summary refs log tree commit diff
diff options
context:
space:
mode:
authorAndrew Morton <akpm@linux-foundation.org>2007-05-08 20:27:06 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-08 20:41:15 -0700
commitd5f9f942c601fdebe57f7805e4b4fbad9c28ada8 (patch)
tree6a212ceabd844fbaf517a13a9f885be5ab1c59b5
parentda4e8ca376a1b3dca470eba14dcec321a6a27b8b (diff)
downloadlinux-d5f9f942c601fdebe57f7805e4b4fbad9c28ada8.tar.gz
revert 'sched: redundant reschedule when set_user_nice() boosts a prio of a task from the "expired" array'
Revert commit bd53f96ca54a21c07e7a0ae1886fa623d370b85f.

Con says:

This is no good, sorry. The one I saw originally was with the staircase
deadline cpu scheduler in situ and was different.

  #define TASK_PREEMPTS_CURR(p, rq) \
     ((p)->prio < (rq)->curr->prio)
     (((p)->prio < (rq)->curr->prio) && ((p)->array == (rq)->active))

This will fail to wake up a runqueue for a task that has been migrated to the
expired array of a runqueue which is otherwise idle which can happen with smp
balancing,

Cc: Dmitry Adamushko <dmitry.adamushko@gmail.com>
Cc: Con Kolivas <kernel@kolivas.org>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--kernel/sched.c34
1 files changed, 18 insertions, 16 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index a3a04085e794..66bd7ff23f18 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -169,7 +169,7 @@ unsigned long long __attribute__((weak)) sched_clock(void)
 		(MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
 
 #define TASK_PREEMPTS_CURR(p, rq) \
-	(((p)->prio < (rq)->curr->prio) && ((p)->array == (rq)->active))
+	((p)->prio < (rq)->curr->prio)
 
 #define SCALE_PRIO(x, prio) \
 	max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
@@ -4076,13 +4076,13 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
 	struct prio_array *array;
 	unsigned long flags;
 	struct rq *rq;
-	int delta;
+	int oldprio;
 
 	BUG_ON(prio < 0 || prio > MAX_PRIO);
 
 	rq = task_rq_lock(p, &flags);
 
-	delta = prio - p->prio;
+	oldprio = p->prio;
 	array = p->array;
 	if (array)
 		dequeue_task(p, array);
@@ -4098,11 +4098,13 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
 		enqueue_task(p, array);
 		/*
 		 * Reschedule if we are currently running on this runqueue and
-		 * our priority decreased, or if our priority became higher
-		 * than the current's.
+		 * our priority decreased, or if we are not currently running on
+		 * this runqueue and our priority is higher than the current's
 		 */
-		if (TASK_PREEMPTS_CURR(p, rq) ||
-				(delta > 0 && task_running(rq, p)))
+		if (task_running(rq, p)) {
+			if (p->prio > oldprio)
+				resched_task(rq->curr);
+		} else if (TASK_PREEMPTS_CURR(p, rq))
 			resched_task(rq->curr);
 	}
 	task_rq_unlock(rq, &flags);
@@ -4150,12 +4152,10 @@ void set_user_nice(struct task_struct *p, long nice)
 		enqueue_task(p, array);
 		inc_raw_weighted_load(rq, p);
 		/*
-		 * Reschedule if we are currently running on this runqueue and
-		 * our priority decreased, or if our priority became higher
-		 * than the current's.
+		 * If the task increased its priority or is running and
+		 * lowered its priority, then reschedule its CPU:
 		 */
-		if (TASK_PREEMPTS_CURR(p, rq) ||
-				(delta > 0 && task_running(rq, p)))
+		if (delta < 0 || (delta > 0 && task_running(rq, p)))
 			resched_task(rq->curr);
 	}
 out_unlock:
@@ -4382,11 +4382,13 @@ recheck:
 		__activate_task(p, rq);
 		/*
 		 * Reschedule if we are currently running on this runqueue and
-		 * our priority decreased, or our priority became higher
-		 * than the current's.
+		 * our priority decreased, or if we are not currently running on
+		 * this runqueue and our priority is higher than the current's
 		 */
-		if (TASK_PREEMPTS_CURR(p, rq) ||
-				(task_running(rq, p) && p->prio > oldprio))
+		if (task_running(rq, p)) {
+			if (p->prio > oldprio)
+				resched_task(rq->curr);
+		} else if (TASK_PREEMPTS_CURR(p, rq))
 			resched_task(rq->curr);
 	}
 	__task_rq_unlock(rq);