include/linux/sched.h | 2 kernel/sched.c | 121 ++++++++++++++++++++++++++------------------------ 2 files changed, 64 insertions(+), 59 deletions(-) Index: linux-2.6.16-ck1/kernel/sched.c =================================================================== --- linux-2.6.16-ck1.orig/kernel/sched.c 2006-03-20 20:46:48.000000000 +1100 +++ linux-2.6.16-ck1/kernel/sched.c 2006-03-20 20:46:48.000000000 +1100 @@ -194,7 +194,7 @@ for (domain = rcu_dereference(cpu_rq(cpu #endif #ifndef __ARCH_WANT_UNLOCKED_CTXSW -static inline int task_running(runqueue_t *rq, task_t *p) +static inline int task_running(const runqueue_t *rq, const task_t *p) { return rq->curr == p; } @@ -203,7 +203,7 @@ static inline void prepare_lock_switch(r { } -static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) +static inline void finish_lock_switch(runqueue_t *rq, task_t *__unused) { #ifdef CONFIG_DEBUG_SPINLOCK /* this is a valid case when another task releases the spinlock */ @@ -213,7 +213,7 @@ static inline void finish_lock_switch(ru } #else /* __ARCH_WANT_UNLOCKED_CTXSW */ -static inline int task_running(runqueue_t *rq, task_t *p) +static inline int task_running(const runqueue_t *rq, const task_t *p) { #ifdef CONFIG_SMP return p->oncpu; @@ -239,7 +239,7 @@ static inline void prepare_lock_switch(r #endif } -static inline void finish_lock_switch(runqueue_t *rq, task_t *prev) +static inline void finish_lock_switch(runqueue_t *__unused, task_t *prev) { #ifdef CONFIG_SMP /* @@ -261,7 +261,7 @@ static inline void finish_lock_switch(ru * interrupts. Note the ordering: we can safely lookup the task_rq without * explicitly disabling preemption. */ -static inline runqueue_t *task_rq_lock(task_t *p, unsigned long *flags) +static inline runqueue_t *task_rq_lock(const task_t *p, unsigned long *flags) __acquires(rq->lock) { struct runqueue *rq; @@ -611,7 +611,7 @@ static inline void inc_nr_running(task_t inc_raw_weighted_load(rq, p); } -static inline void dec_nr_running(task_t *p, runqueue_t *rq) +static inline void dec_nr_running(const task_t *p, runqueue_t *rq) { rq->nr_running--; dec_raw_weighted_load(rq, p); @@ -831,7 +831,7 @@ static void fastcall deactivate_task(tas * the target CPU. */ #ifdef CONFIG_SMP -static void resched_task(task_t *p) +static void fastcall resched_task(task_t *p) { int cpu; @@ -888,7 +888,7 @@ typedef struct { * The task's runqueue lock must be held. * Returns true if you have to wait for migration thread. */ -static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req) +static int migrate_task(task_t *p, const int dest_cpu, migration_req_t *req) { runqueue_t *rq = task_rq(p); @@ -951,7 +951,7 @@ repeat: * to another CPU then no harm is done and the purpose has been * achieved as well. */ -void kick_process(task_t *p) +void kick_process(const task_t *p) { int cpu; @@ -1009,7 +1009,7 @@ static inline unsigned long cpu_avg_load * domain. */ static struct sched_group * -find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu) +find_idlest_group(struct sched_domain *sd, task_t *p, const int this_cpu) { struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups; unsigned long min_load = ULONG_MAX, this_load = 0; @@ -1063,7 +1063,7 @@ nextgroup: * find_idlest_queue - find the idlest runqueue among the cpus in group. */ static int -find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) +find_idlest_cpu(struct sched_group *group, task_t *p, const int this_cpu) { cpumask_t tmp; unsigned long load, min_load = ULONG_MAX; @@ -1096,7 +1096,7 @@ find_idlest_cpu(struct sched_group *grou * * preempt must be disabled. */ -static int sched_balance_self(int cpu, int flag) +static int sched_balance_self(int cpu, const int flag) { struct task_struct *t = current; struct sched_domain *tmp, *sd = NULL; @@ -1148,7 +1148,7 @@ nextlevel: * Returns the CPU we should wake onto. */ #if defined(ARCH_HAS_SCHED_WAKE_IDLE) -static int wake_idle(int cpu, task_t *p) +static int wake_idle(const int cpu, const task_t *p) { cpumask_t tmp; struct sched_domain *sd; @@ -1171,7 +1171,7 @@ static int wake_idle(int cpu, task_t *p) return cpu; } #else -static inline int wake_idle(int cpu, task_t *p) +static inline int wake_idle(const int cpu, task_t *__unused) { return cpu; } @@ -1211,7 +1211,7 @@ static void fastcall preempt(const task_ * * returns failure only if the task is already active. */ -static int try_to_wake_up(task_t *p, unsigned int state, int sync) +static int try_to_wake_up(task_t *p, unsigned int state, const int sync) { int cpu, this_cpu, success = 0; unsigned long flags; @@ -1369,7 +1369,7 @@ int fastcall wake_up_state(task_t *p, un * Perform scheduler related setup for a newly forked process p. * p is forked by current. */ -void fastcall sched_fork(task_t *p, int clone_flags) +void fastcall sched_fork(task_t *p, int __unused) { int cpu = get_cpu(); @@ -1406,7 +1406,7 @@ void fastcall sched_fork(task_t *p, int * that must be done for every newly created context, then puts the task * on the runqueue and wakes it. */ -void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags) +void fastcall wake_up_new_task(task_t *p, const unsigned long clone_flags) { unsigned long flags; int this_cpu, cpu; @@ -1690,7 +1690,7 @@ static void double_lock_balance(runqueue * allow dest_cpu, which will force the cpu onto dest_cpu. Then * the cpu_allowed mask is restored. */ -static void sched_migrate_task(task_t *p, int dest_cpu) +static void sched_migrate_task(task_t *p, const int dest_cpu) { migration_req_t req; runqueue_t *rq; @@ -1869,8 +1869,8 @@ out: * moved to restore balance via the imbalance parameter. */ static struct sched_group * -find_busiest_group(struct sched_domain *sd, int this_cpu, - unsigned long *imbalance, enum idle_type idle, int *sd_idle) +find_busiest_group(struct sched_domain *sd, const int this_cpu, + unsigned long *imbalance, const enum idle_type idle, int *sd_idle) { struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups; unsigned long max_load, avg_load, total_load, this_load, total_pwr; @@ -2035,7 +2035,7 @@ out_balanced: * find_busiest_queue - find the busiest runqueue among the cpus in group. */ static runqueue_t *find_busiest_queue(struct sched_group *group, - enum idle_type idle) + const enum idle_type idle) { unsigned long load, max_load = 0; runqueue_t *busiest = NULL; @@ -2066,8 +2066,8 @@ static runqueue_t *find_busiest_queue(st * * Called with this_rq unlocked. */ -static int load_balance(int this_cpu, runqueue_t *this_rq, - struct sched_domain *sd, enum idle_type idle) +static int load_balance(const int this_cpu, runqueue_t *this_rq, + struct sched_domain *sd, const enum idle_type idle) { struct sched_group *group; runqueue_t *busiest; @@ -2192,7 +2192,7 @@ out_one_pinned: * Called from schedule when this_rq is about to become idle (NEWLY_IDLE). * this_rq is locked. */ -static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, +static int load_balance_newidle(const int this_cpu, runqueue_t *this_rq, struct sched_domain *sd) { struct sched_group *group; @@ -2252,7 +2252,7 @@ out_balanced: * idle_balance is called by schedule() if this_cpu is about to become * idle. Attempts to pull tasks from other CPUs. */ -static void idle_balance(int this_cpu, runqueue_t *this_rq) +static void idle_balance(const int this_cpu, runqueue_t *this_rq) { struct sched_domain *sd; @@ -2274,7 +2274,7 @@ static void idle_balance(int this_cpu, r * * Called with busiest_rq locked. */ -static void active_load_balance(runqueue_t *busiest_rq, int busiest_cpu) +static void active_load_balance(runqueue_t *busiest_rq, const int busiest_cpu) { struct sched_domain *sd; runqueue_t *target_rq; @@ -2328,8 +2328,8 @@ out: /* Don't have all balancing operations going off at once */ #define CPU_OFFSET(cpu) (HZ * cpu / NR_CPUS) -static void rebalance_tick(int this_cpu, runqueue_t *this_rq, - enum idle_type idle) +static void rebalance_tick(const int this_cpu, runqueue_t *this_rq, + enum idle_type idle) { unsigned long old_load, this_load; unsigned long j = jiffies + CPU_OFFSET(this_cpu); @@ -2416,8 +2416,8 @@ EXPORT_PER_CPU_SYMBOL(kstat); * This is called on clock ticks and on context switches. * Bank in p->sched_time the ns elapsed since the last tick or switch. */ -static inline void update_cpu_clock(task_t *p, runqueue_t *rq, - unsigned long long now) +static inline void update_cpu_clock(task_t *p, const runqueue_t *rq, + const unsigned long long now) { unsigned long long last = max(p->timestamp, rq->timestamp_last_tick); p->sched_time += now - last; @@ -2602,7 +2602,7 @@ static inline void wakeup_busy_runqueue( resched_task(rq->idle); } -static void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) +static void wake_sleeping_dependent(const int this_cpu, runqueue_t *this_rq) { struct sched_domain *tmp, *sd = NULL; cpumask_t sibling_map; @@ -2657,7 +2657,7 @@ static inline unsigned long smt_slice(co return p->slice * (100 - sd->per_cpu_gain) / 100; } -static int dependent_sleeper(int this_cpu, runqueue_t *this_rq) +static int dependent_sleeper(const int this_cpu, runqueue_t *this_rq) { struct sched_domain *tmp, *sd = NULL; cpumask_t sibling_map; @@ -2765,7 +2765,7 @@ static inline int dependent_sleeper(int #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) -void fastcall add_preempt_count(int val) +void fastcall add_preempt_count(const int val) { /* * Underflow? @@ -2779,7 +2779,7 @@ void fastcall add_preempt_count(int val) } EXPORT_SYMBOL(add_preempt_count); -void fastcall sub_preempt_count(int val) +void fastcall sub_preempt_count(const int val) { /* * Underflow? @@ -2813,8 +2813,8 @@ asmlinkage void __sched schedule(void) * schedule() atomically, we ignore that path for now. * Otherwise, whine if we are scheduling when we should not be. */ - if (likely(!current->exit_state)) { - if (unlikely(in_atomic())) { + if (unlikely(in_atomic())) { + if (!current->exit_state) { printk(KERN_ERR "scheduling while atomic: " "%s/0x%08x/%d\n", current->comm, preempt_count(), current->pid); @@ -2850,7 +2850,6 @@ need_resched_nonpreemptible: if (unlikely(prev->flags & PF_DEAD)) prev->state = EXIT_DEAD; - switch_count = &prev->nivcsw; if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { switch_count = &prev->nvcsw; if (unlikely((prev->state & TASK_INTERRUPTIBLE) && @@ -2863,7 +2862,9 @@ need_resched_nonpreemptible: } deactivate_task(prev, rq); } - } + } else + switch_count = &prev->nivcsw; + cpu = smp_processor_id(); if (unlikely(!rq->nr_running)) { @@ -3048,7 +3049,7 @@ EXPORT_SYMBOL(default_wake_function); * zero in this (rare) case, and we handle it by continuing to scan the queue. */ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode, - int nr_exclusive, int sync, void *key) + int nr_exclusive, const int sync, void *key) { struct list_head *tmp, *next; @@ -3340,7 +3341,7 @@ long fastcall __sched sleep_on_timeout(w EXPORT_SYMBOL(sleep_on_timeout); -void set_user_nice(task_t *p, long nice) +void set_user_nice(task_t *p, const long nice) { unsigned long flags; runqueue_t *rq; @@ -3476,7 +3477,7 @@ EXPORT_SYMBOL_GPL(task_nice); * idle_cpu - is a given cpu idle currently? * @cpu: the processor in question. */ -int idle_cpu(int cpu) +int idle_cpu(const int cpu) { return cpu_curr(cpu) == cpu_rq(cpu)->idle; } @@ -3485,7 +3486,7 @@ int idle_cpu(int cpu) * idle_task - return the idle task for a given cpu. * @cpu: the processor in question. */ -task_t *idle_task(int cpu) +task_t *idle_task(const int cpu) { return cpu_rq(cpu)->idle; } @@ -3494,13 +3495,13 @@ task_t *idle_task(int cpu) * find_process_by_pid - find a process with a matching PID value. * @pid: the pid in question. */ -static inline task_t *find_process_by_pid(pid_t pid) +static inline task_t *find_process_by_pid(const pid_t pid) { return pid ? find_task_by_pid(pid) : current; } /* Actually do priority change: must hold rq lock. */ -static void __setscheduler(struct task_struct *p, int policy, int prio) +static void __setscheduler(task_t *p, const int policy, const int prio) { BUG_ON(task_queued(p)); p->policy = policy; @@ -3608,7 +3609,8 @@ recheck: EXPORT_SYMBOL_GPL(sched_setscheduler); static int -do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) +do_sched_setscheduler(const pid_t pid, int policy, + struct sched_param __user *param) { int retval; struct sched_param lparam; @@ -3635,7 +3637,7 @@ do_sched_setscheduler(pid_t pid, int pol * @policy: new policy. * @param: structure containing the new RT priority. */ -asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, +asmlinkage long sys_sched_setscheduler(const pid_t pid, int policy, struct sched_param __user *param) { /* negative values for policy are not valid */ @@ -3650,7 +3652,8 @@ asmlinkage long sys_sched_setscheduler(p * @pid: the pid in question. * @param: structure containing the new RT priority. */ -asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param) +asmlinkage long sys_sched_setparam(const pid_t pid, + struct sched_param __user *param) { return do_sched_setscheduler(pid, -1, param); } @@ -3659,7 +3662,7 @@ asmlinkage long sys_sched_setparam(pid_t * sys_sched_getscheduler - get the policy (scheduling class) of a thread * @pid: the pid in question. */ -asmlinkage long sys_sched_getscheduler(pid_t pid) +asmlinkage long sys_sched_getscheduler(const pid_t pid) { int retval = -EINVAL; task_t *p; @@ -3686,7 +3689,8 @@ out_nounlock: * @pid: the pid in question. * @param: structure containing the RT priority. */ -asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param) +asmlinkage long sys_sched_getparam(const pid_t pid, + struct sched_param __user *param) { struct sched_param lp; int retval = -EINVAL; @@ -3721,7 +3725,7 @@ out_unlock: return retval; } -long sched_setaffinity(pid_t pid, cpumask_t new_mask) +long sched_setaffinity(const pid_t pid, cpumask_t new_mask) { task_t *p; int retval; @@ -3777,7 +3781,7 @@ static int get_user_cpu_mask(unsigned lo * @len: length in bytes of the bitmask pointed to by user_mask_ptr * @user_mask_ptr: user-space pointer to the new cpu mask */ -asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, +asmlinkage long sys_sched_setaffinity(const pid_t pid, unsigned int len, unsigned long __user *user_mask_ptr) { cpumask_t new_mask; @@ -3805,7 +3809,7 @@ cpumask_t cpu_online_map __read_mostly = cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; #endif -long sched_getaffinity(pid_t pid, cpumask_t *mask) +long sched_getaffinity(const pid_t pid, cpumask_t *mask) { int retval; task_t *p; @@ -3836,7 +3840,7 @@ out_unlock: * @len: length in bytes of the bitmask pointed to by user_mask_ptr * @user_mask_ptr: user-space pointer to hold the current cpu mask */ -asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, +asmlinkage long sys_sched_getaffinity(const pid_t pid, unsigned int len, unsigned long __user *user_mask_ptr) { int ret; @@ -4017,7 +4021,7 @@ long __sched io_schedule_timeout(long ti * this syscall returns the maximum rt_priority that can be used * by a given scheduling class. */ -asmlinkage long sys_sched_get_priority_max(int policy) +asmlinkage long sys_sched_get_priority_max(const int policy) { int ret = -EINVAL; @@ -4041,7 +4045,7 @@ asmlinkage long sys_sched_get_priority_m * this syscall returns the minimum rt_priority that can be used * by a given scheduling class. */ -asmlinkage long sys_sched_get_priority_min(int policy) +asmlinkage long sys_sched_get_priority_min(const int policy) { int ret = -EINVAL; @@ -4066,7 +4070,8 @@ asmlinkage long sys_sched_get_priority_m * into the user-space timespec buffer. A value of '0' means infinity. */ asmlinkage -long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval) +long sys_sched_rr_get_interval(const pid_t pid, + struct timespec __user *interval) { int retval = -EINVAL; struct timespec t; @@ -4203,7 +4208,7 @@ void show_state(void) * NOTE: this function does not set the idle thread's NEED_RESCHED * flag, to make booting more robust. */ -void __devinit init_idle(task_t *idle, int cpu) +void __devinit init_idle(task_t *idle, const int cpu) { runqueue_t *rq = cpu_rq(cpu); unsigned long flags; @@ -4306,7 +4311,7 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed); * So we race with normal scheduler movements, but that's OK, as long * as the task is no longer on this CPU. */ -static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) +static void __migrate_task(task_t *p, const int src_cpu, const int dest_cpu) { runqueue_t *rq_dest, *rq_src; Index: linux-2.6.16-ck1/include/linux/sched.h =================================================================== --- linux-2.6.16-ck1.orig/include/linux/sched.h 2006-03-20 20:46:48.000000000 +1100 +++ linux-2.6.16-ck1/include/linux/sched.h 2006-03-20 20:46:48.000000000 +1100 @@ -1064,7 +1064,7 @@ extern int FASTCALL(wake_up_process(stru extern void FASTCALL(wake_up_new_task(struct task_struct * tsk, unsigned long clone_flags)); #ifdef CONFIG_SMP - extern void kick_process(struct task_struct *tsk); + extern void kick_process(const task_t *p); #else static inline void kick_process(struct task_struct *tsk) { } #endif