diff mbox series

[RFC,v2,1/6] Add cpu_queue_job_on_node() to do node local jobs

Message ID 20180628025501.20676-2-stewart@linux.ibm.com
State RFC
Headers show
Series Faster fast reboot (2x!) | expand

Commit Message

Stewart Smith June 28, 2018, 2:54 a.m. UTC
From: Nicholas Piggin <npiggin@gmail.com>

For things like clearing memory on fast reboot, running these node
local is a good idea.

I made it strict (run on this chip_id or return NULL) because I figured
firmware might get some particular requirements like that. But it's
easy to make it a fallback, just test if (!cpu) cpu = cpu_find_job_target(-1);
-- you'd have to put a fallback into the caller otherwise.

From Nick Piggin, and I haven't really looked at/reviewed yet, but hey,
it seems to work :)
---
 core/cpu.c    | 77 ++++++++++++++++++++++++++++++++++++++++-----------
 include/cpu.h |  4 +++
 2 files changed, 65 insertions(+), 16 deletions(-)
diff mbox series

Patch

diff --git a/core/cpu.c b/core/cpu.c
index a8936fab0fe2..ede966b356e8 100644
--- a/core/cpu.c
+++ b/core/cpu.c
@@ -106,7 +106,11 @@  static void cpu_wake(struct cpu_thread *cpu)
 	}
 }
 
-static struct cpu_thread *cpu_find_job_target(void)
+/*
+ * If chip_id is >= 0, schedule the job on that node.
+ * Otherwise schedule the job anywhere.
+ */
+static struct cpu_thread *cpu_find_job_target(int32_t chip_id)
 {
 	struct cpu_thread *cpu, *best, *me = this_cpu();
 	uint32_t best_count;
@@ -126,6 +130,8 @@  static struct cpu_thread *cpu_find_job_target(void)
 	for_each_available_cpu(cpu) {
 		if (cpu == me || !cpu_is_thread0(cpu) || cpu->job_has_no_return)
 			continue;
+		if (chip_id >= 0 && cpu->chip_id != chip_id)
+			continue;
 		if (cpu->job_count)
 			continue;
 		lock(&cpu->job_lock);
@@ -145,6 +151,8 @@  static struct cpu_thread *cpu_find_job_target(void)
 	for_each_available_cpu(cpu) {
 		if (cpu == me || cpu->job_has_no_return)
 			continue;
+		if (chip_id >= 0 && cpu->chip_id != chip_id)
+			continue;
 		if (!best || cpu->job_count < best_count) {
 			best = cpu;
 			best_count = cpu->job_count;
@@ -167,6 +175,26 @@  static struct cpu_thread *cpu_find_job_target(void)
 	return NULL;
 }
 
+/* job_lock is held, returns with it released */
+static void queue_job_on_cpu(struct cpu_thread *cpu, struct cpu_job *job)
+{
+	/* That's bad, the job will never run */
+	if (cpu->job_has_no_return) {
+		prlog(PR_WARNING, "WARNING ! Job %s scheduled on CPU 0x%x"
+		      " which has a no-return job on its queue !\n",
+		      job->name, cpu->pir);
+		backtrace();
+	}
+	list_add_tail(&cpu->job_queue, &job->link);
+	if (job->no_return)
+		cpu->job_has_no_return = true;
+	else
+		cpu->job_count++;
+	if (pm_enabled)
+		cpu_wake(cpu);
+	unlock(&cpu->job_lock);
+}
+
 struct cpu_job *__cpu_queue_job(struct cpu_thread *cpu,
 				const char *name,
 				void (*func)(void *data), void *data,
@@ -196,7 +224,7 @@  struct cpu_job *__cpu_queue_job(struct cpu_thread *cpu,
 
 	/* Pick a candidate. Returns with target queue locked */
 	if (cpu == NULL)
-		cpu = cpu_find_job_target();
+		cpu = cpu_find_job_target(-1);
 	else if (cpu != this_cpu())
 		lock(&cpu->job_lock);
 	else
@@ -209,21 +237,38 @@  struct cpu_job *__cpu_queue_job(struct cpu_thread *cpu,
 		return job;
 	}
 
-	/* That's bad, the job will never run */
-	if (cpu->job_has_no_return) {
-		prlog(PR_WARNING, "WARNING ! Job %s scheduled on CPU 0x%x"
-		      " which has a no-return job on its queue !\n",
-		      job->name, cpu->pir);
-		backtrace();
+	queue_job_on_cpu(cpu, job);
+
+	return job;
+}
+
+struct cpu_job *cpu_queue_job_on_node(uint32_t chip_id,
+				const char *name,
+				void (*func)(void *data), void *data)
+{
+	struct cpu_thread *cpu;
+	struct cpu_job *job;
+
+	job = zalloc(sizeof(struct cpu_job));
+	if (!job)
+		return NULL;
+	job->func = func;
+	job->data = data;
+	job->name = name;
+	job->complete = false;
+	job->no_return = false;
+
+	/* Pick a candidate. Returns with target queue locked */
+	cpu = cpu_find_job_target(chip_id);
+
+	/* Can't be scheduled, run it now */
+	if (cpu == NULL) {
+		unlock(&cpu->job_lock);
+		free(job);
+		return NULL;
 	}
-	list_add_tail(&cpu->job_queue, &job->link);
-	if (no_return)
-		cpu->job_has_no_return = true;
-	else
-		cpu->job_count++;
-	if (pm_enabled)
-		cpu_wake(cpu);
-	unlock(&cpu->job_lock);
+
+	queue_job_on_cpu(cpu, job);
 
 	return job;
 }
diff --git a/include/cpu.h b/include/cpu.h
index 2ca59b9648fa..ae3185723673 100644
--- a/include/cpu.h
+++ b/include/cpu.h
@@ -280,6 +280,10 @@  static inline struct cpu_job *cpu_queue_job(struct cpu_thread *cpu,
 	return __cpu_queue_job(cpu, name, func, data, false);
 }
 
+extern struct cpu_job *cpu_queue_job_on_node(uint32_t chip_id,
+				       const char *name,
+				       void (*func)(void *data), void *data);
+
 
 /* Poll job status, returns true if completed */
 extern bool cpu_poll_job(struct cpu_job *job);