Magellan Linux

Contents of /trunk/kernel26-magellan/patches-2.6.16-r12/0004-2.6.16-sched-store-weighted-load-on-up.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 72 - (show annotations) (download)
Mon Jun 5 09:25:38 2006 UTC (17 years, 10 months ago) by niro
File size: 6383 byte(s)
ver bump to 2.6.16-r12:
- updated to linux-2.6.16.19
- updated to ck11

1 From akpm@osdl.org Tue Mar 14 12:57:28 2006
2 Return-Path: <akpm@osdl.org>
3 X-Original-To: kernel@kolivas.org
4 Delivered-To: kernel@kolivas.org
5 Received: from bhhdoa.org.au (bhhdoa.org.au [65.98.99.88])
6 by mail.kolivas.org (Postfix) with ESMTP id 2264BC7486
7 for <kernel@kolivas.org>; Tue, 14 Mar 2006 13:24:20 +1100 (EST)
8 Received: from smtp.osdl.org (smtp.osdl.org [65.172.181.4])
9 by bhhdoa.org.au (Postfix) with ESMTP id 48A7D51704
10 for <kernel@kolivas.org>; Tue, 14 Mar 2006 11:48:42 +1100 (EST)
11 Received: from shell0.pdx.osdl.net (fw.osdl.org [65.172.181.6])
12 by smtp.osdl.org (8.12.8/8.12.8) with ESMTP id k2E1xqDZ010665
13 (version=TLSv1/SSLv3 cipher=EDH-RSA-DES-CBC3-SHA bits=168 verify=NO);
14 Mon, 13 Mar 2006 17:59:53 -0800
15 Received: from localhost.localdomain (shell0.pdx.osdl.net [10.9.0.31])
16 by shell0.pdx.osdl.net (8.13.1/8.11.6) with ESMTP id k2E1xqtI004517;
17 Mon, 13 Mar 2006 17:59:52 -0800
18 Message-Id: <200603140159.k2E1xqtI004517@shell0.pdx.osdl.net>
19 Subject: + sched-store-weighted-load-on-up.patch added to -mm tree
20 To: kernel@kolivas.org,
21 mingo@elte.hu,
22 pwil3058@bigpond.net.au,
23 mm-commits@vger.kernel.org
24 From: akpm@osdl.org
25 Date: Mon, 13 Mar 2006 17:57:28 -0800
26 X-Spam-Status: No, hits=-4.416 required=5 tests=MAILTO_TO_SPAM_ADDR,NO_REAL_NAME,PATCH_UNIFIED_DIFF_OSDL
27 X-Spam-Checker-Version: SpamAssassin 2.63-osdl_revision__1.68__
28 X-MIMEDefang-Filter: osdl$Revision: 1.1 $
29 X-Scanned-By: MIMEDefang 2.36
30 X-DSPAM-Result: Whitelisted
31 X-DSPAM-Confidence: 0.9997
32 X-DSPAM-Probability: 0.0000
33 X-DSPAM-Signature: 4416295b68304546116836
34 X-DSPAM-Factors: 27,
35 detect+patch, 0.00010,
36 X-Spam-Status*MAILTO+TO, 0.00010,
37 hu+Signed, 0.00010,
38 to+uneven, 0.00010,
39 load+rq, 0.00010,
40 oncpu+#endif, 0.00010,
41 thread+does, 0.00010,
42 X-Spam-Status*ADDR+NO, 0.00010,
43 on+runqueue, 0.00010,
44 subversion+of, 0.00010,
45 tp+if, 0.00010,
46 niceness, 0.00010,
47 niceness, 0.00010,
48 load+weight, 0.00010,
49 load+weight, 0.00010,
50 Subject*load, 0.00010,
51 is+sched, 0.00010,
52 sched+cleanup, 0.00010,
53 run+list, 0.00010,
54 X-Spam-Status*tests+MAILTO, 0.00010,
55 sched+implement, 0.00010,
56 smpnice+patch, 0.00010,
57 To*org+mingo, 0.00010,
58 noninteractive+use, 0.00010,
59 load+per, 0.00010,
60 alter+uninterruptible, 0.00010,
61 SMP+if, 0.00010
62 X-Length: 7043
63 X-KMail-EncryptionState:
64 X-KMail-SignatureState:
65 X-KMail-MDN-Sent:
66 Content-Type:
67 X-UID: 9777
68
69
70 The patch titled
71
72 sched: store weighted load on up
73
74 has been added to the -mm tree. Its filename is
75
76 sched-store-weighted-load-on-up.patch
77
78 See http://www.zip.com.au/~akpm/linux/patches/stuff/added-to-mm.txt to find
79 out what to do about this
80
81
82 From: Con Kolivas <kernel@kolivas.org>
83
84 Modify the smp nice code to store load_weight on uniprocessor as well to
85 allow relative niceness on one cpu to be assessed. Minor cleanups and
86 uninline set_load_weight().
87
88 Signed-off-by: Con Kolivas <kernel@kolivas.org>
89 Cc: Peter Williams <pwil3058@bigpond.net.au>
90 Acked-by: Ingo Molnar <mingo@elte.hu>
91 Signed-off-by: Andrew Morton <akpm@osdl.org>
92 include/linux/sched.h | 4 ++--
93 kernel/sched.c | 24 ++++++------------------
94 2 files changed, 8 insertions(+), 20 deletions(-)
95
96 Index: linux-2.6.16-ck1/include/linux/sched.h
97 ===================================================================
98 --- linux-2.6.16-ck1.orig/include/linux/sched.h 2006-03-20 20:46:44.000000000 +1100
99 +++ linux-2.6.16-ck1/include/linux/sched.h 2006-03-20 20:46:46.000000000 +1100
100 @@ -551,9 +551,9 @@ enum idle_type
101 /*
102 * sched-domains (multiprocessor balancing) declarations:
103 */
104 -#ifdef CONFIG_SMP
105 #define SCHED_LOAD_SCALE 128UL /* increase resolution of load */
106
107 +#ifdef CONFIG_SMP
108 #define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */
109 #define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */
110 #define SD_BALANCE_EXEC 4 /* Balance on exec */
111 @@ -702,8 +702,8 @@ struct task_struct {
112 #ifdef __ARCH_WANT_UNLOCKED_CTXSW
113 int oncpu;
114 #endif
115 - int load_weight; /* for load balancing purposes */
116 #endif
117 + int load_weight; /* for niceness load balancing purposes */
118 int prio, static_prio;
119 struct list_head run_list;
120 prio_array_t *array;
121 Index: linux-2.6.16-ck1/kernel/sched.c
122 ===================================================================
123 --- linux-2.6.16-ck1.orig/kernel/sched.c 2006-03-20 20:46:45.000000000 +1100
124 +++ linux-2.6.16-ck1/kernel/sched.c 2006-03-20 20:46:46.000000000 +1100
125 @@ -166,12 +166,12 @@
126 */
127
128 #define SCALE_PRIO(x, prio) \
129 - max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE)
130 + max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
131
132 static unsigned int static_prio_timeslice(int static_prio)
133 {
134 if (static_prio < NICE_TO_PRIO(0))
135 - return SCALE_PRIO(DEF_TIMESLICE*4, static_prio);
136 + return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
137 else
138 return SCALE_PRIO(DEF_TIMESLICE, static_prio);
139 }
140 @@ -213,8 +213,8 @@ struct runqueue {
141 * remote CPUs use both these fields when doing load calculation.
142 */
143 unsigned long nr_running;
144 -#ifdef CONFIG_SMP
145 unsigned long raw_weighted_load;
146 +#ifdef CONFIG_SMP
147 unsigned long cpu_load[3];
148 #endif
149 unsigned long long nr_switches;
150 @@ -668,7 +668,6 @@ static int effective_prio(task_t *p)
151 return prio;
152 }
153
154 -#ifdef CONFIG_SMP
155 /*
156 * To aid in avoiding the subversion of "niceness" due to uneven distribution
157 * of tasks with abnormal "nice" values across CPUs the contribution that
158 @@ -691,9 +690,10 @@ static int effective_prio(task_t *p)
159 #define RTPRIO_TO_LOAD_WEIGHT(rp) \
160 (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
161
162 -static inline void set_load_weight(task_t *p)
163 +static void set_load_weight(task_t *p)
164 {
165 if (rt_task(p)) {
166 +#ifdef CONFIG_SMP
167 if (p == task_rq(p)->migration_thread)
168 /*
169 * The migration thread does the actual balancing.
170 @@ -702,6 +702,7 @@ static inline void set_load_weight(task_
171 */
172 p->load_weight = 0;
173 else
174 +#endif
175 p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
176 } else
177 p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
178 @@ -716,19 +717,6 @@ static inline void dec_raw_weighted_load
179 {
180 rq->raw_weighted_load -= p->load_weight;
181 }
182 -#else
183 -static inline void set_load_weight(task_t *p)
184 -{
185 -}
186 -
187 -static inline void inc_raw_weighted_load(runqueue_t *rq, const task_t *p)
188 -{
189 -}
190 -
191 -static inline void dec_raw_weighted_load(runqueue_t *rq, const task_t *p)
192 -{
193 -}
194 -#endif
195
196 static inline void inc_nr_running(task_t *p, runqueue_t *rq)
197 {