@@ -2806,4 +2806,6 @@ static inline unsigned long rlimit_max(unsigned int limit)
#endif /* __KERNEL__ */
+extern int find_idlest_prefer_cpu(struct cpumask *prefer,
+ struct cpumask *allowed, int prev_cpu);
#endif
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include <linux/profile.h>
#include <linux/interrupt.h>
+#include <linux/export.h>
#include <trace/events/sched.h>
@@ -2809,6 +2810,35 @@ unlock:
return new_cpu;
}
+
+/*
+ * This API is used to find the most idle cpu from both preferred and
+ * allowed cpuset (such as cgroup controls cpuset). It helps per-cpu thread
+ * model to pick up the allowed local cpu to be scheduled.
+ * If these two cpusets have intersects, the cpu is chose from the intersects,
+ * if there is no intersects, then the cpu is chose from the allowed cpuset.
+ * prev_cpu helps to better local cache when prev_cpu is not busy.
+ */
+int find_idlest_prefer_cpu(struct cpumask *prefer, struct cpumask *allowed,
+ int prev_cpu)
+{
+ unsigned long load, min_load = ULONG_MAX;
+ int check, i, idlest = -1;
+
+ check = cpumask_intersects(prefer, allowed);
+ /* Traverse only the allowed CPUs */
+ if (check == 0)
+ prefer = allowed;
+ for_each_cpu_and(i, prefer, allowed) {
+ load = weighted_cpuload(i);
+ if (load < min_load || (load == min_load && i == prev_cpu)) {
+ min_load = load;
+ idlest = i;
+ }
+ }
+ return idlest;
+}
+EXPORT_SYMBOL(find_idlest_prefer_cpu);
#endif /* CONFIG_SMP */
static unsigned long