From: Con Kolivas Add an above_background_load() function which can be used by other subsystems to detect if there is anything besides niced tasks running. Place it in sched.h to allow it to be compiled out if not used. This version relies on DEFAULT_WEIGHT being scaled via the SD cpu scheduler with SCHED_IDLEPRIO support. Signed-off-by: Con Kolivas Acked-by: Ingo Molnar Cc: Peter Williams Signed-off-by: Andrew Morton --- include/linux/sched.h | 2 +- kernel/sched.c | 46 +++++++++++++++++++++++++++++++++++++--------- 2 files changed, 38 insertions(+), 10 deletions(-) Index: linux-2.6.21-ck2/include/linux/sched.h =================================================================== --- linux-2.6.21-ck2.orig/include/linux/sched.h 2007-05-14 19:30:31.000000000 +1000 +++ linux-2.6.21-ck2/include/linux/sched.h 2007-05-14 19:31:55.000000000 +1000 @@ -133,7 +133,7 @@ extern unsigned long nr_uninterruptible( extern unsigned long nr_active(void); extern unsigned long nr_iowait(void); extern unsigned long weighted_cpuload(const int cpu); - +extern int above_background_load(void); /* * Task state bitmask. NOTE! These bits are also Index: linux-2.6.21-ck2/kernel/sched.c =================================================================== --- linux-2.6.21-ck2.orig/kernel/sched.c 2007-05-14 19:31:42.000000000 +1000 +++ linux-2.6.21-ck2/kernel/sched.c 2007-05-14 19:49:51.000000000 +1000 @@ -1082,6 +1082,37 @@ static int effective_prio(struct task_st return p->prio; } +static inline unsigned int nice_quota_ms(int nice) +{ + unsigned int rr = rr_interval; + + if (nice < -6) { + rr *= nice * nice; + rr /= 40; + } else if (nice > 0) + rr = rr / 2 ? : 1; + return rr; +} + +#define DEFAULT_WEIGHT (nice_quota_ms(0) * 20 * PRIO_RANGE) + +/* + * A runqueue laden with a single nice 0 task scores a weighted_cpuload of + * SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a + * task of nice 0 or enough lower priority tasks to bring up the + * weighted_cpuload + */ +int above_background_load(void) +{ + unsigned long cpu; + + for_each_online_cpu(cpu) { + if (weighted_cpuload(cpu) >= DEFAULT_WEIGHT) + return 1; + } + return 0; +} + /* * All tasks have quotas based on rr_interval. RT tasks all get rr_interval. * From nice 1 to 19 they are smaller than it only if they are at least one @@ -1092,16 +1123,13 @@ static int effective_prio(struct task_st */ static inline unsigned int rr_quota(struct task_struct *p) { - int nice = TASK_NICE(p), rr = rr_interval; + unsigned int quota; - if (!rt_task(p)) { - if (nice < -6) { - rr *= nice * nice; - rr /= 40; - } else if (nice > 0) - rr = rr / 2 ? : 1; - } - return MS_TO_US(rr); + if (rt_task(p)) + quota = rr_interval; + else + quota = nice_quota_ms(TASK_NICE(p)); + return MS_TO_US(quota); } /* Every time we set the quota we need to set the load weight */