Contents of /trunk/kernel26-magellan/patches-2.6.16-r12/0008-2.6.16-sched-generic_optims2.patch
Parent Directory | Revision Log
Revision 72 -
(show annotations)
(download)
Mon Jun 5 09:25:38 2006 UTC (18 years, 3 months ago) by niro
File size: 17840 byte(s)
Mon Jun 5 09:25:38 2006 UTC (18 years, 3 months ago) by niro
File size: 17840 byte(s)
ver bump to 2.6.16-r12: - updated to linux-2.6.16.19 - updated to ck11
1 | include/linux/sched.h | 2 |
2 | kernel/sched.c | 121 ++++++++++++++++++++++++++------------------------ |
3 | 2 files changed, 64 insertions(+), 59 deletions(-) |
4 | |
5 | Index: linux-2.6.16-ck1/kernel/sched.c |
6 | =================================================================== |
7 | --- linux-2.6.16-ck1.orig/kernel/sched.c 2006-03-20 20:46:48.000000000 +1100 |
8 | +++ linux-2.6.16-ck1/kernel/sched.c 2006-03-20 20:46:48.000000000 +1100 |
9 | @@ -194,7 +194,7 @@ for (domain = rcu_dereference(cpu_rq(cpu |
10 | #endif |
11 | |
12 | #ifndef __ARCH_WANT_UNLOCKED_CTXSW |
13 | -static inline int task_running(runqueue_t *rq, task_t *p) |
14 | +static inline int task_running(const runqueue_t *rq, const task_t *p) |
15 | { |
16 | return rq->curr == p; |
17 | } |
18 | @@ -203,7 +203,7 @@ static inline void prepare_lock_switch(r |
19 | { |
20 | } |
21 | |
22 | -static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) |
23 | +static inline void finish_lock_switch(runqueue_t *rq, task_t *__unused) |
24 | { |
25 | #ifdef CONFIG_DEBUG_SPINLOCK |
26 | /* this is a valid case when another task releases the spinlock */ |
27 | @@ -213,7 +213,7 @@ static inline void finish_lock_switch(ru |
28 | } |
29 | |
30 | #else /* __ARCH_WANT_UNLOCKED_CTXSW */ |
31 | -static inline int task_running(runqueue_t *rq, task_t *p) |
32 | +static inline int task_running(const runqueue_t *rq, const task_t *p) |
33 | { |
34 | #ifdef CONFIG_SMP |
35 | return p->oncpu; |
36 | @@ -239,7 +239,7 @@ static inline void prepare_lock_switch(r |
37 | #endif |
38 | } |
39 | |
40 | -static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) |
41 | +static inline void finish_lock_switch(runqueue_t *__unused, task_t *prev) |
42 | { |
43 | #ifdef CONFIG_SMP |
44 | /* |
45 | @@ -261,7 +261,7 @@ static inline void finish_lock_switch(ru |
46 | * interrupts. Note the ordering: we can safely lookup the task_rq without |
47 | * explicitly disabling preemption. |
48 | */ |
49 | -static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) |
50 | +static inline runqueue_t *task_rq_lock(const task_t *p, unsigned long *flags) |
51 | __acquires(rq->lock) |
52 | { |
53 | struct runqueue *rq; |
54 | @@ -611,7 +611,7 @@ static inline void inc_nr_running(task_t |
55 | inc_raw_weighted_load(rq, p); |
56 | } |
57 | |
58 | -static inline void dec_nr_running(task_t *p, runqueue_t *rq) |
59 | +static inline void dec_nr_running(const task_t *p, runqueue_t *rq) |
60 | { |
61 | rq->nr_running--; |
62 | dec_raw_weighted_load(rq, p); |
63 | @@ -831,7 +831,7 @@ static void fastcall deactivate_task(tas |
64 | * the target CPU. |
65 | */ |
66 | #ifdef CONFIG_SMP |
67 | -static void resched_task(task_t *p) |
68 | +static void fastcall resched_task(task_t *p) |
69 | { |
70 | int cpu; |
71 | |
72 | @@ -888,7 +888,7 @@ typedef struct { |
73 | * The task's runqueue lock must be held. |
74 | * Returns true if you have to wait for migration thread. |
75 | */ |
76 | -static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req) |
77 | +static int migrate_task(task_t *p, const int dest_cpu, migration_req_t *req) |
78 | { |
79 | runqueue_t *rq = task_rq(p); |
80 | |
81 | @@ -951,7 +951,7 @@ repeat: |
82 | * to another CPU then no harm is done and the purpose has been |
83 | * achieved as well. |
84 | */ |
85 | -void kick_process(task_t *p) |
86 | +void kick_process(const task_t *p) |
87 | { |
88 | int cpu; |
89 | |
90 | @@ -1009,7 +1009,7 @@ static inline unsigned long cpu_avg_load |
91 | * domain. |
92 | */ |
93 | static struct sched_group * |
94 | -find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) |
95 | +find_idlest_group(struct sched_domain *sd, task_t *p, const int this_cpu) |
96 | { |
97 | struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups; |
98 | unsigned long min_load = ULONG_MAX, this_load = 0; |
99 | @@ -1063,7 +1063,7 @@ nextgroup: |
100 | * find_idlest_queue - find the idlest runqueue among the cpus in group. |
101 | */ |
102 | static int |
103 | -find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) |
104 | +find_idlest_cpu(struct sched_group *group, task_t *p, const int this_cpu) |
105 | { |
106 | cpumask_t tmp; |
107 | unsigned long load, min_load = ULONG_MAX; |
108 | @@ -1096,7 +1096,7 @@ find_idlest_cpu(struct sched_group *grou |
109 | * |
110 | * preempt must be disabled. |
111 | */ |
112 | -static int sched_balance_self(int cpu, int flag) |
113 | +static int sched_balance_self(int cpu, const int flag) |
114 | { |
115 | struct task_struct *t = current; |
116 | struct sched_domain *tmp, *sd = NULL; |
117 | @@ -1148,7 +1148,7 @@ nextlevel: |
118 | * Returns the CPU we should wake onto. |
119 | */ |
120 | #if defined(ARCH_HAS_SCHED_WAKE_IDLE) |
121 | -static int wake_idle(int cpu, task_t *p) |
122 | +static int wake_idle(const int cpu, const task_t *p) |
123 | { |
124 | cpumask_t tmp; |
125 | struct sched_domain *sd; |
126 | @@ -1171,7 +1171,7 @@ static int wake_idle(int cpu, task_t *p) |
127 | return cpu; |
128 | } |
129 | #else |
130 | -static inline int wake_idle(int cpu, task_t *p) |
131 | +static inline int wake_idle(const int cpu, task_t *__unused) |
132 | { |
133 | return cpu; |
134 | } |
135 | @@ -1211,7 +1211,7 @@ static void fastcall preempt(const task_ |
136 | * |
137 | * returns failure only if the task is already active. |
138 | */ |
139 | -static int try_to_wake_up(task_t *p, unsigned int state, int sync) |
140 | +static int try_to_wake_up(task_t *p, unsigned int state, const int sync) |
141 | { |
142 | int cpu, this_cpu, success = 0; |
143 | unsigned long flags; |
144 | @@ -1369,7 +1369,7 @@ int fastcall wake_up_state(task_t *p, un |
145 | * Perform scheduler related setup for a newly forked process p. |
146 | * p is forked by current. |
147 | */ |
148 | -void fastcall sched_fork(task_t *p, int clone_flags) |
149 | +void fastcall sched_fork(task_t *p, int __unused) |
150 | { |
151 | int cpu = get_cpu(); |
152 | |
153 | @@ -1406,7 +1406,7 @@ void fastcall sched_fork(task_t *p, int |
154 | * that must be done for every newly created context, then puts the task |
155 | * on the runqueue and wakes it. |
156 | */ |
157 | -void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags) |
158 | +void fastcall wake_up_new_task(task_t *p, const unsigned long clone_flags) |
159 | { |
160 | unsigned long flags; |
161 | int this_cpu, cpu; |
162 | @@ -1690,7 +1690,7 @@ static void double_lock_balance(runqueue |
163 | * allow dest_cpu, which will force the cpu onto dest_cpu. Then |
164 | * the cpu_allowed mask is restored. |
165 | */ |
166 | -static void sched_migrate_task(task_t *p, int dest_cpu) |
167 | +static void sched_migrate_task(task_t *p, const int dest_cpu) |
168 | { |
169 | migration_req_t req; |
170 | runqueue_t *rq; |
171 | @@ -1869,8 +1869,8 @@ out: |
172 | * moved to restore balance via the imbalance parameter. |
173 | */ |
174 | static struct sched_group * |
175 | -find_busiest_group(struct sched_domain *sd, int this_cpu, |
176 | - unsigned long *imbalance, enum idle_type idle, int *sd_idle) |
177 | +find_busiest_group(struct sched_domain *sd, const int this_cpu, |
178 | + unsigned long *imbalance, const enum idle_type idle, int *sd_idle) |
179 | { |
180 | struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; |
181 | unsigned long max_load, avg_load, total_load, this_load, total_pwr; |
182 | @@ -2035,7 +2035,7 @@ out_balanced: |
183 | * find_busiest_queue - find the busiest runqueue among the cpus in group. |
184 | */ |
185 | static runqueue_t *find_busiest_queue(struct sched_group *group, |
186 | - enum idle_type idle) |
187 | + const enum idle_type idle) |
188 | { |
189 | unsigned long load, max_load = 0; |
190 | runqueue_t *busiest = NULL; |
191 | @@ -2066,8 +2066,8 @@ static runqueue_t *find_busiest_queue(st |
192 | * |
193 | * Called with this_rq unlocked. |
194 | */ |
195 | -static int load_balance(int this_cpu, runqueue_t *this_rq, |
196 | - struct sched_domain *sd, enum idle_type idle) |
197 | +static int load_balance(const int this_cpu, runqueue_t *this_rq, |
198 | + struct sched_domain *sd, const enum idle_type idle) |
199 | { |
200 | struct sched_group *group; |
201 | runqueue_t *busiest; |
202 | @@ -2192,7 +2192,7 @@ out_one_pinned: |
203 | * Called from schedule when this_rq is about to become idle (NEWLY_IDLE). |
204 | * this_rq is locked. |
205 | */ |
206 | -static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, |
207 | +static int load_balance_newidle(const int this_cpu, runqueue_t *this_rq, |
208 | struct sched_domain *sd) |
209 | { |
210 | struct sched_group *group; |
211 | @@ -2252,7 +2252,7 @@ out_balanced: |
212 | * idle_balance is called by schedule() if this_cpu is about to become |
213 | * idle. Attempts to pull tasks from other CPUs. |
214 | */ |
215 | -static void idle_balance(int this_cpu, runqueue_t *this_rq) |
216 | +static void idle_balance(const int this_cpu, runqueue_t *this_rq) |
217 | { |
218 | struct sched_domain *sd; |
219 | |
220 | @@ -2274,7 +2274,7 @@ static void idle_balance(int this_cpu, r |
221 | * |
222 | * Called with busiest_rq locked. |
223 | */ |
224 | -static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu) |
225 | +static void active_load_balance(runqueue_t *busiest_rq, const int busiest_cpu) |
226 | { |
227 | struct sched_domain *sd; |
228 | runqueue_t *target_rq; |
229 | @@ -2328,8 +2328,8 @@ out: |
230 | /* Don't have all balancing operations going off at once */ |
231 | #define CPU_OFFSET(cpu) (HZ * cpu / NR_CPUS) |
232 | |
233 | -static void rebalance_tick(int this_cpu, runqueue_t *this_rq, |
234 | - enum idle_type idle) |
235 | +static void rebalance_tick(const int this_cpu, runqueue_t *this_rq, |
236 | + enum idle_type idle) |
237 | { |
238 | unsigned long old_load, this_load; |
239 | unsigned long j = jiffies + CPU_OFFSET(this_cpu); |
240 | @@ -2416,8 +2416,8 @@ EXPORT_PER_CPU_SYMBOL(kstat); |
241 | * This is called on clock ticks and on context switches. |
242 | * Bank in p->sched_time the ns elapsed since the last tick or switch. |
243 | */ |
244 | -static inline void update_cpu_clock(task_t *p, runqueue_t *rq, |
245 | - unsigned long long now) |
246 | +static inline void update_cpu_clock(task_t *p, const runqueue_t *rq, |
247 | + const unsigned long long now) |
248 | { |
249 | unsigned long long last = max(p->timestamp, rq->timestamp_last_tick); |
250 | p->sched_time += now - last; |
251 | @@ -2602,7 +2602,7 @@ static inline void wakeup_busy_runqueue( |
252 | resched_task(rq->idle); |
253 | } |
254 | |
255 | -static void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) |
256 | +static void wake_sleeping_dependent(const int this_cpu, runqueue_t *this_rq) |
257 | { |
258 | struct sched_domain *tmp, *sd = NULL; |
259 | cpumask_t sibling_map; |
260 | @@ -2657,7 +2657,7 @@ static inline unsigned long smt_slice(co |
261 | return p->slice * (100 - sd->per_cpu_gain) / 100; |
262 | } |
263 | |
264 | -static int dependent_sleeper(int this_cpu, runqueue_t *this_rq) |
265 | +static int dependent_sleeper(const int this_cpu, runqueue_t *this_rq) |
266 | { |
267 | struct sched_domain *tmp, *sd = NULL; |
268 | cpumask_t sibling_map; |
269 | @@ -2765,7 +2765,7 @@ static inline int dependent_sleeper(int |
270 | |
271 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) |
272 | |
273 | -void fastcall add_preempt_count(int val) |
274 | +void fastcall add_preempt_count(const int val) |
275 | { |
276 | /* |
277 | * Underflow? |
278 | @@ -2779,7 +2779,7 @@ void fastcall add_preempt_count(int val) |
279 | } |
280 | EXPORT_SYMBOL(add_preempt_count); |
281 | |
282 | -void fastcall sub_preempt_count(int val) |
283 | +void fastcall sub_preempt_count(const int val) |
284 | { |
285 | /* |
286 | * Underflow? |
287 | @@ -2813,8 +2813,8 @@ asmlinkage void __sched schedule(void) |
288 | * schedule() atomically, we ignore that path for now. |
289 | * Otherwise, whine if we are scheduling when we should not be. |
290 | */ |
291 | - if (likely(!current->exit_state)) { |
292 | - if (unlikely(in_atomic())) { |
293 | + if (unlikely(in_atomic())) { |
294 | + if (!current->exit_state) { |
295 | printk(KERN_ERR "scheduling while atomic: " |
296 | "%s/0x%08x/%d\n", |
297 | current->comm, preempt_count(), current->pid); |
298 | @@ -2850,7 +2850,6 @@ need_resched_nonpreemptible: |
299 | if (unlikely(prev->flags & PF_DEAD)) |
300 | prev->state = EXIT_DEAD; |
301 | |
302 | - switch_count = &prev->nivcsw; |
303 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |
304 | switch_count = &prev->nvcsw; |
305 | if (unlikely((prev->state & TASK_INTERRUPTIBLE) && |
306 | @@ -2863,7 +2862,9 @@ need_resched_nonpreemptible: |
307 | } |
308 | deactivate_task(prev, rq); |
309 | } |
310 | - } |
311 | + } else |
312 | + switch_count = &prev->nivcsw; |
313 | + |
314 | |
315 | cpu = smp_processor_id(); |
316 | if (unlikely(!rq->nr_running)) { |
317 | @@ -3048,7 +3049,7 @@ EXPORT_SYMBOL(default_wake_function); |
318 | * zero in this (rare) case, and we handle it by continuing to scan the queue. |
319 | */ |
320 | static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, |
321 | - int nr_exclusive, int sync, void *key) |
322 | + int nr_exclusive, const int sync, void *key) |
323 | { |
324 | struct list_head *tmp, *next; |
325 | |
326 | @@ -3340,7 +3341,7 @@ long fastcall __sched sleep_on_timeout(w |
327 | |
328 | EXPORT_SYMBOL(sleep_on_timeout); |
329 | |
330 | -void set_user_nice(task_t *p, long nice) |
331 | +void set_user_nice(task_t *p, const long nice) |
332 | { |
333 | unsigned long flags; |
334 | runqueue_t *rq; |
335 | @@ -3476,7 +3477,7 @@ EXPORT_SYMBOL_GPL(task_nice); |
336 | * idle_cpu - is a given cpu idle currently? |
337 | * @cpu: the processor in question. |
338 | */ |
339 | -int idle_cpu(int cpu) |
340 | +int idle_cpu(const int cpu) |
341 | { |
342 | return cpu_curr(cpu) == cpu_rq(cpu)->idle; |
343 | } |
344 | @@ -3485,7 +3486,7 @@ int idle_cpu(int cpu) |
345 | * idle_task - return the idle task for a given cpu. |
346 | * @cpu: the processor in question. |
347 | */ |
348 | -task_t *idle_task(int cpu) |
349 | +task_t *idle_task(const int cpu) |
350 | { |
351 | return cpu_rq(cpu)->idle; |
352 | } |
353 | @@ -3494,13 +3495,13 @@ task_t *idle_task(int cpu) |
354 | * find_process_by_pid - find a process with a matching PID value. |
355 | * @pid: the pid in question. |
356 | */ |
357 | -static inline task_t *find_process_by_pid(pid_t pid) |
358 | +static inline task_t *find_process_by_pid(const pid_t pid) |
359 | { |
360 | return pid ? find_task_by_pid(pid) : current; |
361 | } |
362 | |
363 | /* Actually do priority change: must hold rq lock. */ |
364 | -static void __setscheduler(struct task_struct *p, int policy, int prio) |
365 | +static void __setscheduler(task_t *p, const int policy, const int prio) |
366 | { |
367 | BUG_ON(task_queued(p)); |
368 | p->policy = policy; |
369 | @@ -3608,7 +3609,8 @@ recheck: |
370 | EXPORT_SYMBOL_GPL(sched_setscheduler); |
371 | |
372 | static int |
373 | -do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) |
374 | +do_sched_setscheduler(const pid_t pid, int policy, |
375 | + struct sched_param __user *param) |
376 | { |
377 | int retval; |
378 | struct sched_param lparam; |
379 | @@ -3635,7 +3637,7 @@ do_sched_setscheduler(pid_t pid, int pol |
380 | * @policy: new policy. |
381 | * @param: structure containing the new RT priority. |
382 | */ |
383 | -asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, |
384 | +asmlinkage long sys_sched_setscheduler(const pid_t pid, int policy, |
385 | struct sched_param __user *param) |
386 | { |
387 | /* negative values for policy are not valid */ |
388 | @@ -3650,7 +3652,8 @@ asmlinkage long sys_sched_setscheduler(p |
389 | * @pid: the pid in question. |
390 | * @param: structure containing the new RT priority. |
391 | */ |
392 | -asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param) |
393 | +asmlinkage long sys_sched_setparam(const pid_t pid, |
394 | + struct sched_param __user *param) |
395 | { |
396 | return do_sched_setscheduler(pid, -1, param); |
397 | } |
398 | @@ -3659,7 +3662,7 @@ asmlinkage long sys_sched_setparam(pid_t |
399 | * sys_sched_getscheduler - get the policy (scheduling class) of a thread |
400 | * @pid: the pid in question. |
401 | */ |
402 | -asmlinkage long sys_sched_getscheduler(pid_t pid) |
403 | +asmlinkage long sys_sched_getscheduler(const pid_t pid) |
404 | { |
405 | int retval = -EINVAL; |
406 | task_t *p; |
407 | @@ -3686,7 +3689,8 @@ out_nounlock: |
408 | * @pid: the pid in question. |
409 | * @param: structure containing the RT priority. |
410 | */ |
411 | -asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) |
412 | +asmlinkage long sys_sched_getparam(const pid_t pid, |
413 | + struct sched_param __user *param) |
414 | { |
415 | struct sched_param lp; |
416 | int retval = -EINVAL; |
417 | @@ -3721,7 +3725,7 @@ out_unlock: |
418 | return retval; |
419 | } |
420 | |
421 | -long sched_setaffinity(pid_t pid, cpumask_t new_mask) |
422 | +long sched_setaffinity(const pid_t pid, cpumask_t new_mask) |
423 | { |
424 | task_t *p; |
425 | int retval; |
426 | @@ -3777,7 +3781,7 @@ static int get_user_cpu_mask(unsigned lo |
427 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
428 | * @user_mask_ptr: user-space pointer to the new cpu mask |
429 | */ |
430 | -asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, |
431 | +asmlinkage long sys_sched_setaffinity(const pid_t pid, unsigned int len, |
432 | unsigned long __user *user_mask_ptr) |
433 | { |
434 | cpumask_t new_mask; |
435 | @@ -3805,7 +3809,7 @@ cpumask_t cpu_online_map __read_mostly = |
436 | cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; |
437 | #endif |
438 | |
439 | -long sched_getaffinity(pid_t pid, cpumask_t *mask) |
440 | +long sched_getaffinity(const pid_t pid, cpumask_t *mask) |
441 | { |
442 | int retval; |
443 | task_t *p; |
444 | @@ -3836,7 +3840,7 @@ out_unlock: |
445 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
446 | * @user_mask_ptr: user-space pointer to hold the current cpu mask |
447 | */ |
448 | -asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, |
449 | +asmlinkage long sys_sched_getaffinity(const pid_t pid, unsigned int len, |
450 | unsigned long __user *user_mask_ptr) |
451 | { |
452 | int ret; |
453 | @@ -4017,7 +4021,7 @@ long __sched io_schedule_timeout(long ti |
454 | * this syscall returns the maximum rt_priority that can be used |
455 | * by a given scheduling class. |
456 | */ |
457 | -asmlinkage long sys_sched_get_priority_max(int policy) |
458 | +asmlinkage long sys_sched_get_priority_max(const int policy) |
459 | { |
460 | int ret = -EINVAL; |
461 | |
462 | @@ -4041,7 +4045,7 @@ asmlinkage long sys_sched_get_priority_m |
463 | * this syscall returns the minimum rt_priority that can be used |
464 | * by a given scheduling class. |
465 | */ |
466 | -asmlinkage long sys_sched_get_priority_min(int policy) |
467 | +asmlinkage long sys_sched_get_priority_min(const int policy) |
468 | { |
469 | int ret = -EINVAL; |
470 | |
471 | @@ -4066,7 +4070,8 @@ asmlinkage long sys_sched_get_priority_m |
472 | * into the user-space timespec buffer. A value of '0' means infinity. |
473 | */ |
474 | asmlinkage |
475 | -long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) |
476 | +long sys_sched_rr_get_interval(const pid_t pid, |
477 | + struct timespec __user *interval) |
478 | { |
479 | int retval = -EINVAL; |
480 | struct timespec t; |
481 | @@ -4203,7 +4208,7 @@ void show_state(void) |
482 | * NOTE: this function does not set the idle thread's NEED_RESCHED |
483 | * flag, to make booting more robust. |
484 | */ |
485 | -void __devinit init_idle(task_t *idle, int cpu) |
486 | +void __devinit init_idle(task_t *idle, const int cpu) |
487 | { |
488 | runqueue_t *rq = cpu_rq(cpu); |
489 | unsigned long flags; |
490 | @@ -4306,7 +4311,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed); |
491 | * So we race with normal scheduler movements, but that's OK, as long |
492 | * as the task is no longer on this CPU. |
493 | */ |
494 | -static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) |
495 | +static void __migrate_task(task_t *p, const int src_cpu, const int dest_cpu) |
496 | { |
497 | runqueue_t *rq_dest, *rq_src; |
498 | |
499 | Index: linux-2.6.16-ck1/include/linux/sched.h |
500 | =================================================================== |
501 | --- linux-2.6.16-ck1.orig/include/linux/sched.h 2006-03-20 20:46:48.000000000 +1100 |
502 | +++ linux-2.6.16-ck1/include/linux/sched.h 2006-03-20 20:46:48.000000000 +1100 |
503 | @@ -1064,7 +1064,7 @@ extern int FASTCALL(wake_up_process(stru |
504 | extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, |
505 | unsigned long clone_flags)); |
506 | #ifdef CONFIG_SMP |
507 | - extern void kick_process(struct task_struct *tsk); |
508 | + extern void kick_process(const task_t *p); |
509 | #else |
510 | static inline void kick_process(struct task_struct *tsk) { } |
511 | #endif |