Magellan Linux

Contents of /trunk/kernel26-magellan/patches-2.6.16-r3/0011-2.6.16-sched-idleprio-1.2.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 70 - (show annotations) (download)
Thu May 11 19:09:22 2006 UTC (18 years ago) by niro
File size: 6218 byte(s)
import

1 include/linux/init_task.h | 4 +--
2 include/linux/sched.h | 6 +++-
3 kernel/sched.c | 57 ++++++++++++++++++++++++++++++++++++++--------
4 3 files changed, 54 insertions(+), 13 deletions(-)
5
6 Index: linux-2.6.16-ck1/include/linux/init_task.h
7 ===================================================================
8 --- linux-2.6.16-ck1.orig/include/linux/init_task.h 2006-03-20 20:46:25.000000000 +1100
9 +++ linux-2.6.16-ck1/include/linux/init_task.h 2006-03-20 20:46:51.000000000 +1100
10 @@ -83,8 +83,8 @@ extern struct group_info init_groups;
11 .usage = ATOMIC_INIT(2), \
12 .flags = 0, \
13 .lock_depth = -1, \
14 - .prio = MAX_PRIO-20, \
15 - .static_prio = MAX_PRIO-20, \
16 + .prio = MAX_PRIO-21, \
17 + .static_prio = MAX_PRIO-21, \
18 .policy = SCHED_NORMAL, \
19 .cpus_allowed = CPU_MASK_ALL, \
20 .mm = NULL, \
21 Index: linux-2.6.16-ck1/include/linux/sched.h
22 ===================================================================
23 --- linux-2.6.16-ck1.orig/include/linux/sched.h 2006-03-20 20:46:50.000000000 +1100
24 +++ linux-2.6.16-ck1/include/linux/sched.h 2006-03-20 20:46:51.000000000 +1100
25 @@ -163,9 +163,10 @@ extern unsigned long weighted_cpuload(co
26 #define SCHED_RR 2
27 #define SCHED_BATCH 3
28 #define SCHED_ISO 4
29 +#define SCHED_IDLEPRIO 5
30
31 #define SCHED_MIN 0
32 -#define SCHED_MAX 4
33 +#define SCHED_MAX 5
34
35 #define SCHED_RANGE(policy) ((policy) >= SCHED_MIN && \
36 (policy) <= SCHED_MAX)
37 @@ -497,10 +498,11 @@ struct signal_struct {
38 #define MAX_USER_RT_PRIO 100
39 #define MAX_RT_PRIO MAX_USER_RT_PRIO
40
41 -#define MAX_PRIO (MAX_RT_PRIO + 40)
42 +#define MAX_PRIO (MAX_RT_PRIO + 41)
43
44 #define rt_task(p) (unlikely(SCHED_RT((p)->policy)))
45 #define iso_task(p) ((p)->policy == SCHED_ISO)
46 +#define idleprio_task(p) ((p)->policy == SCHED_IDLEPRIO)
47
48 /*
49 * Some day this will be a full-fledged user tracking system..
50 Index: linux-2.6.16-ck1/kernel/sched.c
51 ===================================================================
52 --- linux-2.6.16-ck1.orig/kernel/sched.c 2006-03-20 20:46:50.000000000 +1100
53 +++ linux-2.6.16-ck1/kernel/sched.c 2006-03-20 20:46:51.000000000 +1100
54 @@ -710,7 +710,7 @@ int sched_interactive __read_mostly = 1;
55 * As the bonus increases the initial priority starts at a higher "stair" or
56 * priority for longer.
57 */
58 -static int effective_prio(const task_t *p)
59 +static int effective_prio(task_t *p)
60 {
61 int prio;
62 unsigned int full_slice, used_slice = 0;
63 @@ -730,6 +730,20 @@ static int effective_prio(const task_t *
64 return MAX_RT_PRIO - 1;
65 }
66
67 + if (idleprio_task(p)) {
68 + if (unlikely(p->flags & (PF_NONSLEEP | PF_FREEZE))) {
69 + /*
70 + * If idleprio is waking up from in kernel activity
71 + * or being frozen, reschedule at a normal priority
72 + * to begin with.
73 + */
74 + p->time_slice = p->slice % RR_INTERVAL() ? :
75 + RR_INTERVAL();
76 + return MAX_PRIO - 2;
77 + }
78 + return MAX_PRIO - 1;
79 + }
80 +
81 full_slice = slice(p);
82 if (full_slice > p->slice)
83 used_slice = full_slice - p->slice;
84 @@ -741,8 +755,8 @@ static int effective_prio(const task_t *
85
86 rr = rr_interval(p);
87 prio += used_slice / rr;
88 - if (prio > MAX_PRIO - 1)
89 - prio = MAX_PRIO - 1;
90 + if (prio > MAX_PRIO - 2)
91 + prio = MAX_PRIO - 2;
92 return prio;
93 }
94
95 @@ -2470,7 +2484,7 @@ void account_user_time(struct task_struc
96
97 /* Add user time to cpustat. */
98 tmp = cputime_to_cputime64(cputime);
99 - if (TASK_NICE(p) > 0)
100 + if (TASK_NICE(p) > 0 || idleprio_task(p))
101 cpustat->nice = cputime64_add(cpustat->nice, tmp);
102 else
103 cpustat->user = cputime64_add(cpustat->user, tmp);
104 @@ -2766,11 +2780,22 @@ static int dependent_sleeper(const int t
105 if ((jiffies % DEF_TIMESLICE) >
106 (sd->per_cpu_gain * DEF_TIMESLICE / 100))
107 ret = 1;
108 - } else
109 + else if (idleprio_task(p))
110 + ret = 1;
111 + } else {
112 if (smt_curr->static_prio < p->static_prio &&
113 !TASK_PREEMPTS_CURR(p, smt_rq) &&
114 smt_slice(smt_curr, sd) > slice(p))
115 ret = 1;
116 + else if (idleprio_task(p) && !idleprio_task(smt_curr) &&
117 + smt_curr->slice * sd->per_cpu_gain >
118 + slice(smt_curr))
119 + /*
120 + * With batch tasks they run just the last
121 + * per_cpu_gain percent of the smt task's slice.
122 + */
123 + ret = 1;
124 + }
125
126 check_smt_task:
127 if ((!smt_curr->mm && smt_curr != smt_rq->idle) ||
128 @@ -2790,10 +2815,15 @@ check_smt_task:
129 if ((jiffies % DEF_TIMESLICE) >
130 (sd->per_cpu_gain * DEF_TIMESLICE / 100))
131 resched_task(smt_curr);
132 + else if (idleprio_task(smt_curr))
133 + resched_task(smt_curr);
134 } else {
135 if (TASK_PREEMPTS_CURR(p, smt_rq) &&
136 smt_slice(p, sd) > slice(smt_curr))
137 resched_task(smt_curr);
138 + else if (idleprio_task(smt_curr) && !idleprio_task(p) &&
139 + p->slice * sd->per_cpu_gain > slice(p))
140 + resched_task(smt_curr);
141 else
142 wakeup_busy_runqueue(smt_rq);
143 }
144 @@ -3436,8 +3466,9 @@ void set_user_nice(task_t *p, const long
145 * If the task increased its priority or is running and
146 * lowered its priority, then reschedule its CPU:
147 */
148 - if (delta < 0 || (delta > 0 && task_running(rq, p)))
149 - resched_task(rq->curr);
150 + if (delta < 0 || ((delta > 0 || idleprio_task(p)) &&
151 + task_running(rq, p)))
152 + resched_task(rq->curr);
153 }
154 out_unlock:
155 task_rq_unlock(rq, &flags);
156 @@ -3630,6 +3661,12 @@ recheck:
157 return -EPERM;
158 }
159
160 + if (!(p->mm) && policy == SCHED_IDLEPRIO)
161 + /*
162 + * Don't allow kernel threads to be SCHED_IDLEPRIO.
163 + */
164 + return -EINVAL;
165 +
166 retval = security_task_setscheduler(p, policy, param);
167 if (retval)
168 return retval;
169 @@ -3931,8 +3968,8 @@ asmlinkage long sys_sched_yield(void)
170 schedstat_inc(rq, yld_cnt);
171 current->slice = slice(current);
172 current->time_slice = rr_interval(current);
173 - if (likely(!rt_task(current)))
174 - newprio = MAX_PRIO - 1;
175 + if (likely(!rt_task(current) && !idleprio_task(current)))
176 + newprio = MAX_PRIO - 2;
177
178 if (newprio != current->prio) {
179 dequeue_task(current, rq);
180 @@ -4091,6 +4128,7 @@ asmlinkage long sys_sched_get_priority_m
181 case SCHED_NORMAL:
182 case SCHED_BATCH:
183 case SCHED_ISO:
184 + case SCHED_IDLEPRIO:
185 ret = 0;
186 break;
187 }
188 @@ -4116,6 +4154,7 @@ asmlinkage long sys_sched_get_priority_m
189 case SCHED_NORMAL:
190 case SCHED_BATCH:
191 case SCHED_ISO:
192 + case SCHED_IDLEPRIO:
193 ret = 0;
194 }
195 return ret;