Magellan Linux

Annotation of /trunk/kernel26-magellan/patches-2.6.21-r4/0008-2.6.21-sched-ck-add-above-background-load-function.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 222 - (hide annotations) (download)
Tue Jun 12 08:03:28 2007 UTC (16 years, 11 months ago) by niro
File size: 2992 byte(s)
files for 2.6.21-magellan-r4

1 niro 222 From: Con Kolivas <kernel@kolivas.org>
2    
3     Add an above_background_load() function which can be used by other
4     subsystems to detect if there is anything besides niced tasks running.
5     Place it in sched.h to allow it to be compiled out if not used.
6    
7     This version relies on DEFAULT_WEIGHT being scaled via the SD cpu scheduler
8     with SCHED_IDLEPRIO support.
9    
10     Signed-off-by: Con Kolivas <kernel@kolivas.org>
11     Acked-by: Ingo Molnar <mingo@elte.hu>
12     Cc: Peter Williams <pwil3058@bigpond.net.au>
13     Signed-off-by: Andrew Morton <akpm@osdl.org>
14     ---
15    
16     include/linux/sched.h | 2 +-
17     kernel/sched.c | 46 +++++++++++++++++++++++++++++++++++++---------
18     2 files changed, 38 insertions(+), 10 deletions(-)
19    
20     Index: linux-2.6.21-ck2/include/linux/sched.h
21     ===================================================================
22     --- linux-2.6.21-ck2.orig/include/linux/sched.h 2007-05-14 19:30:31.000000000 +1000
23     +++ linux-2.6.21-ck2/include/linux/sched.h 2007-05-14 19:31:55.000000000 +1000
24     @@ -133,7 +133,7 @@ extern unsigned long nr_uninterruptible(
25     extern unsigned long nr_active(void);
26     extern unsigned long nr_iowait(void);
27     extern unsigned long weighted_cpuload(const int cpu);
28     -
29     +extern int above_background_load(void);
30    
31     /*
32     * Task state bitmask. NOTE! These bits are also
33     Index: linux-2.6.21-ck2/kernel/sched.c
34     ===================================================================
35     --- linux-2.6.21-ck2.orig/kernel/sched.c 2007-05-14 19:31:42.000000000 +1000
36     +++ linux-2.6.21-ck2/kernel/sched.c 2007-05-14 19:49:51.000000000 +1000
37     @@ -1082,6 +1082,37 @@ static int effective_prio(struct task_st
38     return p->prio;
39     }
40    
41     +static inline unsigned int nice_quota_ms(int nice)
42     +{
43     + unsigned int rr = rr_interval;
44     +
45     + if (nice < -6) {
46     + rr *= nice * nice;
47     + rr /= 40;
48     + } else if (nice > 0)
49     + rr = rr / 2 ? : 1;
50     + return rr;
51     +}
52     +
53     +#define DEFAULT_WEIGHT (nice_quota_ms(0) * 20 * PRIO_RANGE)
54     +
55     +/*
56     + * A runqueue laden with a single nice 0 task scores a weighted_cpuload of
57     + * SCHED_LOAD_SCALE. This function returns 1 if any cpu is laden with a
58     + * task of nice 0 or enough lower priority tasks to bring up the
59     + * weighted_cpuload
60     + */
61     +int above_background_load(void)
62     +{
63     + unsigned long cpu;
64     +
65     + for_each_online_cpu(cpu) {
66     + if (weighted_cpuload(cpu) >= DEFAULT_WEIGHT)
67     + return 1;
68     + }
69     + return 0;
70     +}
71     +
72     /*
73     * All tasks have quotas based on rr_interval. RT tasks all get rr_interval.
74     * From nice 1 to 19 they are smaller than it only if they are at least one
75     @@ -1092,16 +1123,13 @@ static int effective_prio(struct task_st
76     */
77     static inline unsigned int rr_quota(struct task_struct *p)
78     {
79     - int nice = TASK_NICE(p), rr = rr_interval;
80     + unsigned int quota;
81    
82     - if (!rt_task(p)) {
83     - if (nice < -6) {
84     - rr *= nice * nice;
85     - rr /= 40;
86     - } else if (nice > 0)
87     - rr = rr / 2 ? : 1;
88     - }
89     - return MS_TO_US(rr);
90     + if (rt_task(p))
91     + quota = rr_interval;
92     + else
93     + quota = nice_quota_ms(TASK_NICE(p));
94     + return MS_TO_US(quota);
95     }
96    
97     /* Every time we set the quota we need to set the load weight */