@@ -98,6 +98,15 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
s64 ret;
int cpu;
+ /*
+ * The calls to get/put_online_cpus_atomic() is strictly not
+ * necessary, since CPU hotplug is explicitly handled via the
+ * hotplug callback which synchronizes through fbc->lock.
+ * But we add them here anyway to make it easier for the debug
+ * code under CONFIG_DEBUG_HOTPLUG_CPU to validate the correctness
+ * of hotplug synchronization.
+ */
+ get_online_cpus_atomic();
raw_spin_lock(&fbc->lock);
ret = fbc->count;
for_each_online_cpu(cpu) {
@@ -105,6 +114,7 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
ret += *pcount;
}
raw_spin_unlock(&fbc->lock);
+ put_online_cpus_atomic();
return ret;
}
EXPORT_SYMBOL(__percpu_counter_sum);
Once stop_machine() is gone from the CPU offline path, we won't be able to depend on disabling preemption to prevent CPUs from going offline from under us. Use the get/put_online_cpus_atomic() APIs to prevent CPUs from going offline, while invoking from atomic context. Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> --- lib/percpu_counter.c | 10 ++++++++++ 1 file changed, 10 insertions(+) -- To unsubscribe from this list: send the line "unsubscribe netdev" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html