diff mbox

[1/2] sched: Prevent divide by zero when cpu_power is 0

Message ID 1295364863-9028-2-git-send-email-stefan.bader@canonical.com
State Accepted
Delegated to: Stefan Bader
Headers show

Commit Message

Stefan Bader Jan. 18, 2011, 3:34 p.m. UTC
From: Andrew Dickinson <whydna@whydna.net>

This is a patch to fix the corner case where we're crashing with
divide_error in find_busiest_group.

I don't fully understand what the case is that causes sds.total_pwr to
be zero in find_busiest_group, but this patch guards against the
divide-by-zero bug.

I also added safe-guarding around other routines in the scheduler code
where we're dividing by power; that's more of a just-in-case and I'm
definitely open for debate on that.

BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=16991
BugLink: http://bugs.launchpad.net/bugs/614853

Signed-off-by: Andrew Dickinson <whydna@whydna.net>
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
---
 kernel/sched.c      |   10 +++++++---
 kernel/sched_fair.c |    4 +++-
 2 files changed, 10 insertions(+), 4 deletions(-)

Comments

Andy Whitcroft Jan. 19, 2011, 2:14 p.m. UTC | #1
On Tue, Jan 18, 2011 at 04:34:22PM +0100, Stefan Bader wrote:
> From: Andrew Dickinson <whydna@whydna.net>
> 
> This is a patch to fix the corner case where we're crashing with
> divide_error in find_busiest_group.
> 
> I don't fully understand what the case is that causes sds.total_pwr to
> be zero in find_busiest_group, but this patch guards against the
> divide-by-zero bug.
> 
> I also added safe-guarding around other routines in the scheduler code
> where we're dividing by power; that's more of a just-in-case and I'm
> definitely open for debate on that.
> 
> BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=16991
> BugLink: http://bugs.launchpad.net/bugs/614853
> 
> Signed-off-by: Andrew Dickinson <whydna@whydna.net>
> Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
> ---
>  kernel/sched.c      |   10 +++++++---
>  kernel/sched_fair.c |    4 +++-
>  2 files changed, 10 insertions(+), 4 deletions(-)
> 
> diff --git a/kernel/sched.c b/kernel/sched.c
> index 7dd8aad..d4a4b14 100644
> --- a/kernel/sched.c
> +++ b/kernel/sched.c
> @@ -3836,7 +3836,9 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
>  	}
>  
>  	/* Adjust by relative CPU power of the group */
> -	sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
> +	sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE);
> +	if (group->cpu_power)
> +		sgs->avg_load /= group->cpu_power;
>  
>  	/*
>  	 * Consider the group unbalanced when the imbalance is larger
> @@ -4119,7 +4121,7 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
>  	if (balance && !(*balance))
>  		goto ret;
>  
> -	if (!sds.busiest || sds.busiest_nr_running == 0)
> +	if (!sds.busiest || sds.busiest_nr_running == 0 || sds.total_pwr == 0)
>  		goto out_balanced;
>  
>  	if (sds.this_load >= sds.max_load)
> @@ -4184,7 +4186,9 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
>  		 * the load can be moved away from the cpu that is potentially
>  		 * running at a lower capacity.
>  		 */
> -		wl = (wl * SCHED_LOAD_SCALE) / power;
> +		wl = (wl * SCHED_LOAD_SCALE);
> +		if (power)
> +			wl /= power;
>  
>  		if (wl > max_load) {
>  			max_load = wl;
> diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
> index 01e311e..3087249 100644
> --- a/kernel/sched_fair.c
> +++ b/kernel/sched_fair.c
> @@ -1344,7 +1344,9 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
>  		}
>  
>  		/* Adjust by relative CPU power of the group */
> -		avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
> +		avg_load = (avg_load * SCHED_LOAD_SCALE);
> +		if (group->cpu_power)
> +			avg_load /= group->cpu_power;
>  
>  		if (local_group) {
>  			this_load = avg_load;

This one looks ok.  Seems to avoid doing the divide when it would trip
by zero.

Acked-by: Andy Whitcroft <apw@canonical.com>

-apw
diff mbox

Patch

diff --git a/kernel/sched.c b/kernel/sched.c
index 7dd8aad..d4a4b14 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3836,7 +3836,9 @@  static inline void update_sg_lb_stats(struct sched_domain *sd,
 	}
 
 	/* Adjust by relative CPU power of the group */
-	sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
+	sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE);
+	if (group->cpu_power)
+		sgs->avg_load /= group->cpu_power;
 
 	/*
 	 * Consider the group unbalanced when the imbalance is larger
@@ -4119,7 +4121,7 @@  find_busiest_group(struct sched_domain *sd, int this_cpu,
 	if (balance && !(*balance))
 		goto ret;
 
-	if (!sds.busiest || sds.busiest_nr_running == 0)
+	if (!sds.busiest || sds.busiest_nr_running == 0 || sds.total_pwr == 0)
 		goto out_balanced;
 
 	if (sds.this_load >= sds.max_load)
@@ -4184,7 +4186,9 @@  find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
 		 * the load can be moved away from the cpu that is potentially
 		 * running at a lower capacity.
 		 */
-		wl = (wl * SCHED_LOAD_SCALE) / power;
+		wl = (wl * SCHED_LOAD_SCALE);
+		if (power)
+			wl /= power;
 
 		if (wl > max_load) {
 			max_load = wl;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 01e311e..3087249 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1344,7 +1344,9 @@  find_idlest_group(struct sched_domain *sd, struct task_struct *p,
 		}
 
 		/* Adjust by relative CPU power of the group */
-		avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
+		avg_load = (avg_load * SCHED_LOAD_SCALE);
+		if (group->cpu_power)
+			avg_load /= group->cpu_power;
 
 		if (local_group) {
 			this_load = avg_load;