Magellan Linux

Contents of /trunk/kernel26-alx/patches-2.6.17-r7/0009-2.6.17-sched-idleprio-1.9.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 199 - (show annotations) (download)
Fri May 18 11:04:36 2007 UTC (16 years, 11 months ago) by niro
File size: 7179 byte(s)
-import

1 Add the SCHED_IDLEPRIO scheduling policy. Tasks set to this policy are only
2 given cpu time if no other tasks at all wish to have cpu time thus running
3 effectively at idle priority. If semaphores or mutexes are held, or the
4 system is going into suspend, schedule them as SCHED_NORMAL nice 19.
5
6 Signed-off-by: Con Kolivas <kernel@kolivas.org>
7
8 include/linux/init_task.h | 4 +-
9 include/linux/sched.h | 9 +++--
10 kernel/sched.c | 75 +++++++++++++++++++++++++++++++++++++++-------
11 3 files changed, 72 insertions(+), 16 deletions(-)
12
13 Index: linux-ck-dev/include/linux/init_task.h
14 ===================================================================
15 --- linux-ck-dev.orig/include/linux/init_task.h 2006-06-18 15:23:44.000000000 +1000
16 +++ linux-ck-dev/include/linux/init_task.h 2006-06-18 15:23:46.000000000 +1000
17 @@ -85,8 +85,8 @@ extern struct group_info init_groups;
18 .usage = ATOMIC_INIT(2), \
19 .flags = 0, \
20 .lock_depth = -1, \
21 - .prio = MAX_PRIO-20, \
22 - .static_prio = MAX_PRIO-20, \
23 + .prio = MAX_PRIO-21, \
24 + .static_prio = MAX_PRIO-21, \
25 .policy = SCHED_NORMAL, \
26 .cpus_allowed = CPU_MASK_ALL, \
27 .mm = NULL, \
28 Index: linux-ck-dev/include/linux/sched.h
29 ===================================================================
30 --- linux-ck-dev.orig/include/linux/sched.h 2006-06-18 15:23:44.000000000 +1000
31 +++ linux-ck-dev/include/linux/sched.h 2006-06-18 15:23:46.000000000 +1000
32 @@ -165,9 +165,10 @@ extern unsigned long weighted_cpuload(co
33 #define SCHED_RR 2
34 #define SCHED_BATCH 3
35 #define SCHED_ISO 4
36 +#define SCHED_IDLEPRIO 5
37
38 #define SCHED_MIN 0
39 -#define SCHED_MAX 4
40 +#define SCHED_MAX 5
41
42 #define SCHED_RANGE(policy) ((policy) <= SCHED_MAX)
43 #define SCHED_RT(policy) ((policy) == SCHED_FIFO || \
44 @@ -492,12 +493,14 @@ struct signal_struct {
45 #define MAX_RT_PRIO MAX_USER_RT_PRIO
46 #define ISO_PRIO (MAX_RT_PRIO - 1)
47
48 -#define MAX_PRIO (MAX_RT_PRIO + 40)
49 -#define MIN_USER_PRIO (MAX_PRIO - 1)
50 +#define MAX_PRIO (MAX_RT_PRIO + 41)
51 +#define MIN_USER_PRIO (MAX_PRIO - 2)
52 +#define IDLEPRIO_PRIO (MAX_PRIO - 1)
53
54 #define rt_task(p) (unlikely(SCHED_RT((p)->policy)))
55 #define batch_task(p) (unlikely((p)->policy == SCHED_BATCH))
56 #define iso_task(p) (unlikely((p)->policy == SCHED_ISO))
57 +#define idleprio_task(p) (unlikely((p)->policy == SCHED_IDLEPRIO))
58
59 /*
60 * Some day this will be a full-fledged user tracking system..
61 Index: linux-ck-dev/kernel/sched.c
62 ===================================================================
63 --- linux-ck-dev.orig/kernel/sched.c 2006-06-18 15:23:38.000000000 +1000
64 +++ linux-ck-dev/kernel/sched.c 2006-06-18 15:23:46.000000000 +1000
65 @@ -627,6 +627,12 @@ static void set_load_weight(task_t *p)
66 else
67 #endif
68 p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
69 + } else if (idleprio_task(p)) {
70 + /*
71 + * We want idleprio_tasks to have a presence on weighting but
72 + * as small as possible
73 + */
74 + p->load_weight = 1;
75 } else
76 p->load_weight = TASK_LOAD_WEIGHT(p);
77 }
78 @@ -734,13 +740,24 @@ static inline void slice_overrun(struct
79 } while (unlikely(p->totalrun > ns_slice));
80 }
81
82 +static inline int idleprio_suitable(const struct task_struct *p)
83 +{
84 + return (!p->mutexes_held &&
85 + !(p->flags & (PF_FREEZE | PF_NONSLEEP)));
86 +}
87 +
88 +static inline int idleprio(const struct task_struct *p)
89 +{
90 + return (p->prio == IDLEPRIO_PRIO);
91 +}
92 +
93 /*
94 * effective_prio - dynamic priority dependent on bonus.
95 * The priority normally decreases by one each RR_INTERVAL.
96 * As the bonus increases the initial priority starts at a higher "stair" or
97 * priority for longer.
98 */
99 -static int effective_prio(const task_t *p)
100 +static int effective_prio(task_t *p)
101 {
102 int prio;
103 unsigned int full_slice, used_slice = 0;
104 @@ -760,6 +777,18 @@ static int effective_prio(const task_t *
105 return ISO_PRIO;
106 }
107
108 + if (idleprio_task(p)) {
109 + if (unlikely(!idleprio_suitable(p))) {
110 + /*
111 + * If idleprio tasks are holding a semaphore, mutex,
112 + * or being frozen, schedule at a normal priority.
113 + */
114 + p->time_slice = p->slice % RR_INTERVAL ? : RR_INTERVAL;
115 + return MIN_USER_PRIO;
116 + }
117 + return IDLEPRIO_PRIO;
118 + }
119 +
120 full_slice = slice(p);
121 if (full_slice > p->slice)
122 used_slice = full_slice - p->slice;
123 @@ -2582,7 +2611,7 @@ void account_user_time(struct task_struc
124
125 /* Add user time to cpustat. */
126 tmp = cputime_to_cputime64(cputime);
127 - if (TASK_NICE(p) > 0)
128 + if (TASK_NICE(p) > 0 || idleprio_task(p))
129 cpustat->nice = cputime64_add(cpustat->nice, tmp);
130 else
131 cpustat->user = cputime64_add(cpustat->user, tmp);
132 @@ -2710,11 +2739,14 @@ void scheduler_tick(void)
133 }
134 } else
135 p->flags &= ~PF_ISOREF;
136 - } else
137 - /* SCHED_FIFO tasks never run out of timeslice. */
138 - if (unlikely(p->policy == SCHED_FIFO))
139 - goto out_unlock;
140 -
141 + } else {
142 + if (idleprio_task(p) && !idleprio(p) && idleprio_suitable(p))
143 + set_tsk_need_resched(p);
144 + else
145 + /* SCHED_FIFO tasks never run out of timeslice. */
146 + if (unlikely(p->policy == SCHED_FIFO))
147 + goto out_unlock;
148 + }
149
150 debit = ns_diff(rq->timestamp_last_tick, p->timestamp);
151 p->ns_debit += debit;
152 @@ -2855,11 +2887,24 @@ static int dependent_sleeper(int this_cp
153 if ((jiffies % DEF_TIMESLICE) >
154 (sd->per_cpu_gain * DEF_TIMESLICE / 100))
155 ret = 1;
156 - } else
157 + else if (idleprio(p))
158 + ret = 1;
159 + } else {
160 if (smt_curr->static_prio < p->static_prio &&
161 !TASK_PREEMPTS_CURR(p, smt_rq) &&
162 smt_slice(smt_curr, sd) > slice(p))
163 ret = 1;
164 + else if (idleprio(p) && !idleprio_task(smt_curr) &&
165 + smt_curr->slice * sd->per_cpu_gain >
166 + slice(smt_curr)) {
167 + /*
168 + * With idleprio tasks they run just the last
169 + * per_cpu_gain percent of the smt task's
170 + * slice.
171 + */
172 + ret = 1;
173 + }
174 + }
175
176 unlock:
177 spin_unlock(&smt_rq->lock);
178 @@ -3479,8 +3524,9 @@ void set_user_nice(task_t *p, long nice)
179 * If the task increased its priority or is running and
180 * lowered its priority, then reschedule its CPU:
181 */
182 - if (delta < 0 || (delta > 0 && task_running(rq, p)))
183 - resched_task(rq->curr);
184 + if (delta < 0 || ((delta > 0 || idleprio_task(p)) &&
185 + task_running(rq, p)))
186 + resched_task(rq->curr);
187 }
188 out_unlock:
189 task_rq_unlock(rq, &flags);
190 @@ -3673,6 +3719,11 @@ recheck:
191 return -EPERM;
192 }
193
194 + if (!(p->mm) && policy == SCHED_IDLEPRIO) {
195 + /* Don't allow kernel threads to be SCHED_IDLEPRIO. */
196 + return -EINVAL;
197 + }
198 +
199 retval = security_task_setscheduler(p, policy, param);
200 if (retval)
201 return retval;
202 @@ -3971,7 +4022,7 @@ asmlinkage long sys_sched_yield(void)
203 schedstat_inc(rq, yld_cnt);
204 current->slice = slice(current);
205 current->time_slice = rr_interval(current);
206 - if (likely(!rt_task(current)))
207 + if (likely(!rt_task(current) && !idleprio(current)))
208 newprio = MIN_USER_PRIO;
209
210 requeue_task(current, rq, newprio);
211 @@ -4126,6 +4177,7 @@ asmlinkage long sys_sched_get_priority_m
212 case SCHED_NORMAL:
213 case SCHED_BATCH:
214 case SCHED_ISO:
215 + case SCHED_IDLEPRIO:
216 ret = 0;
217 break;
218 }
219 @@ -4151,6 +4203,7 @@ asmlinkage long sys_sched_get_priority_m
220 case SCHED_NORMAL:
221 case SCHED_BATCH:
222 case SCHED_ISO:
223 + case SCHED_IDLEPRIO:
224 ret = 0;
225 }
226 return ret;