Skip to content

Commit eefefa7

Browse files
ricardonPeter Zijlstra
authored and
Peter Zijlstra
committed
sched/fair: Only do asym_packing load balancing from fully idle SMT cores
When balancing load between cores, all the SMT siblings of the destination CPU, if any, must be idle. Otherwise, pulling new tasks degrades the throughput of the busy SMT siblings. The overall throughput of the system remains the same. When balancing load within an SMT core this consideration is not relevant. Follow the priorities that hardware indicates. Suggested-by: Valentin Schneider <vschneid@redhat.com> Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Zhang Rui <rui.zhang@intel.com> Link: https://lore.kernel.org/r/20230406203148.19182-3-ricardo.neri-calderon@linux.intel.com
1 parent 8b36d07 commit eefefa7

File tree

1 file changed

+40
-16
lines changed

1 file changed

+40
-16
lines changed

kernel/sched/fair.c

Lines changed: 40 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -9330,6 +9330,25 @@ group_type group_classify(unsigned int imbalance_pct,
93309330
return group_has_spare;
93319331
}
93329332

9333+
/**
9334+
* sched_use_asym_prio - Check whether asym_packing priority must be used
9335+
* @sd: The scheduling domain of the load balancing
9336+
* @cpu: A CPU
9337+
*
9338+
* Always use CPU priority when balancing load between SMT siblings. When
9339+
* balancing load between cores, it is not sufficient that @cpu is idle. Only
9340+
* use CPU priority if the whole core is idle.
9341+
*
9342+
* Returns: True if the priority of @cpu must be followed. False otherwise.
9343+
*/
9344+
static bool sched_use_asym_prio(struct sched_domain *sd, int cpu)
9345+
{
9346+
if (!sched_smt_active())
9347+
return true;
9348+
9349+
return sd->flags & SD_SHARE_CPUCAPACITY || is_core_idle(cpu);
9350+
}
9351+
93339352
/**
93349353
* asym_smt_can_pull_tasks - Check whether the load balancing CPU can pull tasks
93359354
* @dst_cpu: Destination CPU of the load balancing
@@ -9340,6 +9359,9 @@ group_type group_classify(unsigned int imbalance_pct,
93409359
* Check the state of the SMT siblings of both @sds::local and @sg and decide
93419360
* if @dst_cpu can pull tasks.
93429361
*
9362+
* This function must be called only if all the SMT siblings of @dst_cpu are
9363+
* idle, if any.
9364+
*
93439365
* If @dst_cpu does not have SMT siblings, it can pull tasks if two or more of
93449366
* the SMT siblings of @sg are busy. If only one CPU in @sg is busy, pull tasks
93459367
* only if @dst_cpu has higher priority.
@@ -9349,8 +9371,7 @@ group_type group_classify(unsigned int imbalance_pct,
93499371
* Bigger imbalances in the number of busy CPUs will be dealt with in
93509372
* update_sd_pick_busiest().
93519373
*
9352-
* If @sg does not have SMT siblings, only pull tasks if all of the SMT siblings
9353-
* of @dst_cpu are idle and @sg has lower priority.
9374+
* If @sg does not have SMT siblings, only pull tasks if @sg has lower priority.
93549375
*
93559376
* Return: true if @dst_cpu can pull tasks, false otherwise.
93569377
*/
@@ -9398,15 +9419,8 @@ static bool asym_smt_can_pull_tasks(int dst_cpu, struct sd_lb_stats *sds,
93989419
return false;
93999420
}
94009421

9401-
/*
9402-
* @sg does not have SMT siblings. Ensure that @sds::local does not end
9403-
* up with more than one busy SMT sibling and only pull tasks if there
9404-
* are not busy CPUs (i.e., no CPU has running tasks).
9405-
*/
9406-
if (!sds->local_stat.sum_nr_running)
9407-
return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu);
9408-
9409-
return false;
9422+
/* If we are here @dst_cpu has SMT siblings and are also idle. */
9423+
return sched_asym_prefer(dst_cpu, sg->asym_prefer_cpu);
94109424
#else
94119425
/* Always return false so that callers deal with non-SMT cases. */
94129426
return false;
@@ -9417,7 +9431,11 @@ static inline bool
94179431
sched_asym(struct lb_env *env, struct sd_lb_stats *sds, struct sg_lb_stats *sgs,
94189432
struct sched_group *group)
94199433
{
9420-
/* Only do SMT checks if either local or candidate have SMT siblings */
9434+
/* Ensure that the whole local core is idle, if applicable. */
9435+
if (!sched_use_asym_prio(env->sd, env->dst_cpu))
9436+
return false;
9437+
9438+
/* Only do SMT checks if either local or candidate have SMT siblings. */
94219439
if ((sds->local->flags & SD_SHARE_CPUCAPACITY) ||
94229440
(group->flags & SD_SHARE_CPUCAPACITY))
94239441
return asym_smt_can_pull_tasks(env->dst_cpu, sds, sgs, group);
@@ -10632,11 +10650,13 @@ static inline bool
1063210650
asym_active_balance(struct lb_env *env)
1063310651
{
1063410652
/*
10635-
* ASYM_PACKING needs to force migrate tasks from busy but
10636-
* lower priority CPUs in order to pack all tasks in the
10637-
* highest priority CPUs.
10653+
* ASYM_PACKING needs to force migrate tasks from busy but lower
10654+
* priority CPUs in order to pack all tasks in the highest priority
10655+
* CPUs. When done between cores, do it only if the whole core if the
10656+
* whole core is idle.
1063810657
*/
1063910658
return env->idle != CPU_NOT_IDLE && (env->sd->flags & SD_ASYM_PACKING) &&
10659+
sched_use_asym_prio(env->sd, env->dst_cpu) &&
1064010660
sched_asym_prefer(env->dst_cpu, env->src_cpu);
1064110661
}
1064210662

@@ -11371,9 +11391,13 @@ static void nohz_balancer_kick(struct rq *rq)
1137111391
* When ASYM_PACKING; see if there's a more preferred CPU
1137211392
* currently idle; in which case, kick the ILB to move tasks
1137311393
* around.
11394+
*
11395+
* When balancing betwen cores, all the SMT siblings of the
11396+
* preferred CPU must be idle.
1137411397
*/
1137511398
for_each_cpu_and(i, sched_domain_span(sd), nohz.idle_cpus_mask) {
11376-
if (sched_asym_prefer(i, cpu)) {
11399+
if (sched_use_asym_prio(sd, i) &&
11400+
sched_asym_prefer(i, cpu)) {
1137711401
flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
1137811402
goto unlock;
1137911403
}

0 commit comments

Comments
 (0)