@@ -9330,6 +9330,25 @@ group_type group_classify(unsigned int imbalance_pct,
9330
9330
return group_has_spare ;
9331
9331
}
9332
9332
9333
+ /**
9334
+ * sched_use_asym_prio - Check whether asym_packing priority must be used
9335
+ * @sd: The scheduling domain of the load balancing
9336
+ * @cpu: A CPU
9337
+ *
9338
+ * Always use CPU priority when balancing load between SMT siblings. When
9339
+ * balancing load between cores, it is not sufficient that @cpu is idle. Only
9340
+ * use CPU priority if the whole core is idle.
9341
+ *
9342
+ * Returns: True if the priority of @cpu must be followed. False otherwise.
9343
+ */
9344
+ static bool sched_use_asym_prio (struct sched_domain * sd , int cpu )
9345
+ {
9346
+ if (!sched_smt_active ())
9347
+ return true;
9348
+
9349
+ return sd -> flags & SD_SHARE_CPUCAPACITY || is_core_idle (cpu );
9350
+ }
9351
+
9333
9352
/**
9334
9353
* asym_smt_can_pull_tasks - Check whether the load balancing CPU can pull tasks
9335
9354
* @dst_cpu: Destination CPU of the load balancing
@@ -9340,6 +9359,9 @@ group_type group_classify(unsigned int imbalance_pct,
9340
9359
* Check the state of the SMT siblings of both @sds::local and @sg and decide
9341
9360
* if @dst_cpu can pull tasks.
9342
9361
*
9362
+ * This function must be called only if all the SMT siblings of @dst_cpu are
9363
+ * idle, if any.
9364
+ *
9343
9365
* If @dst_cpu does not have SMT siblings, it can pull tasks if two or more of
9344
9366
* the SMT siblings of @sg are busy. If only one CPU in @sg is busy, pull tasks
9345
9367
* only if @dst_cpu has higher priority.
@@ -9349,8 +9371,7 @@ group_type group_classify(unsigned int imbalance_pct,
9349
9371
* Bigger imbalances in the number of busy CPUs will be dealt with in
9350
9372
* update_sd_pick_busiest().
9351
9373
*
9352
- * If @sg does not have SMT siblings, only pull tasks if all of the SMT siblings
9353
- * of @dst_cpu are idle and @sg has lower priority.
9374
+ * If @sg does not have SMT siblings, only pull tasks if @sg has lower priority.
9354
9375
*
9355
9376
* Return: true if @dst_cpu can pull tasks, false otherwise.
9356
9377
*/
@@ -9398,15 +9419,8 @@ static bool asym_smt_can_pull_tasks(int dst_cpu, struct sd_lb_stats *sds,
9398
9419
return false;
9399
9420
}
9400
9421
9401
- /*
9402
- * @sg does not have SMT siblings. Ensure that @sds::local does not end
9403
- * up with more than one busy SMT sibling and only pull tasks if there
9404
- * are not busy CPUs (i.e., no CPU has running tasks).
9405
- */
9406
- if (!sds -> local_stat .sum_nr_running )
9407
- return sched_asym_prefer (dst_cpu , sg -> asym_prefer_cpu );
9408
-
9409
- return false;
9422
+ /* If we are here @dst_cpu has SMT siblings and are also idle. */
9423
+ return sched_asym_prefer (dst_cpu , sg -> asym_prefer_cpu );
9410
9424
#else
9411
9425
/* Always return false so that callers deal with non-SMT cases. */
9412
9426
return false;
@@ -9417,7 +9431,11 @@ static inline bool
9417
9431
sched_asym (struct lb_env * env , struct sd_lb_stats * sds , struct sg_lb_stats * sgs ,
9418
9432
struct sched_group * group )
9419
9433
{
9420
- /* Only do SMT checks if either local or candidate have SMT siblings */
9434
+ /* Ensure that the whole local core is idle, if applicable. */
9435
+ if (!sched_use_asym_prio (env -> sd , env -> dst_cpu ))
9436
+ return false;
9437
+
9438
+ /* Only do SMT checks if either local or candidate have SMT siblings. */
9421
9439
if ((sds -> local -> flags & SD_SHARE_CPUCAPACITY ) ||
9422
9440
(group -> flags & SD_SHARE_CPUCAPACITY ))
9423
9441
return asym_smt_can_pull_tasks (env -> dst_cpu , sds , sgs , group );
@@ -10632,11 +10650,13 @@ static inline bool
10632
10650
asym_active_balance (struct lb_env * env )
10633
10651
{
10634
10652
/*
10635
- * ASYM_PACKING needs to force migrate tasks from busy but
10636
- * lower priority CPUs in order to pack all tasks in the
10637
- * highest priority CPUs.
10653
+ * ASYM_PACKING needs to force migrate tasks from busy but lower
10654
+ * priority CPUs in order to pack all tasks in the highest priority
10655
+ * CPUs. When done between cores, do it only if the whole core if the
10656
+ * whole core is idle.
10638
10657
*/
10639
10658
return env -> idle != CPU_NOT_IDLE && (env -> sd -> flags & SD_ASYM_PACKING ) &&
10659
+ sched_use_asym_prio (env -> sd , env -> dst_cpu ) &&
10640
10660
sched_asym_prefer (env -> dst_cpu , env -> src_cpu );
10641
10661
}
10642
10662
@@ -11371,9 +11391,13 @@ static void nohz_balancer_kick(struct rq *rq)
11371
11391
* When ASYM_PACKING; see if there's a more preferred CPU
11372
11392
* currently idle; in which case, kick the ILB to move tasks
11373
11393
* around.
11394
+ *
11395
+ * When balancing betwen cores, all the SMT siblings of the
11396
+ * preferred CPU must be idle.
11374
11397
*/
11375
11398
for_each_cpu_and (i , sched_domain_span (sd ), nohz .idle_cpus_mask ) {
11376
- if (sched_asym_prefer (i , cpu )) {
11399
+ if (sched_use_asym_prio (sd , i ) &&
11400
+ sched_asym_prefer (i , cpu )) {
11377
11401
flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK ;
11378
11402
goto unlock ;
11379
11403
}
0 commit comments