Magellan Linux

Contents of /trunk/kernel26-alx/patches-2.6.20-r6/0007-2.6.20-sched-idleprio-1.1.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1175 - (show annotations) (download)
Thu Oct 14 12:15:46 2010 UTC (13 years, 6 months ago) by niro
File size: 7317 byte(s)
-2.6.20-alx-r6 new magellan 0.5.2 kernel
1 Add the SCHED_IDLEPRIO scheduling policy. Tasks set to this policy are only
2 given cpu time if no other tasks at all wish to have cpu time thus running
3 effectively at idle priority. If semaphores or mutexes are held, or the
4 system is going into suspend, schedule them as SCHED_NORMAL nice 19.
5
6 Bugfixes by Sergio Cerlesi & Serge Belyshev.
7
8 Signed-off-by: Con Kolivas <kernel@kolivas.org>
9
10 include/linux/init_task.h | 6 ++--
11 include/linux/sched.h | 9 ++++--
12 kernel/sched.c | 68 +++++++++++++++++++++++++++++++++++++++++-----
13 3 files changed, 70 insertions(+), 13 deletions(-)
14
15 Index: linux-2.6.20-ck1/include/linux/init_task.h
16 ===================================================================
17 --- linux-2.6.20-ck1.orig/include/linux/init_task.h 2007-02-16 19:01:31.000000000 +1100
18 +++ linux-2.6.20-ck1/include/linux/init_task.h 2007-02-16 19:01:31.000000000 +1100
19 @@ -99,9 +99,9 @@ extern struct group_info init_groups;
20 .usage = ATOMIC_INIT(2), \
21 .flags = 0, \
22 .lock_depth = -1, \
23 - .prio = MAX_PRIO-20, \
24 - .static_prio = MAX_PRIO-20, \
25 - .normal_prio = MAX_PRIO-20, \
26 + .prio = MAX_PRIO-21, \
27 + .static_prio = MAX_PRIO-21, \
28 + .normal_prio = MAX_PRIO-21, \
29 .policy = SCHED_NORMAL, \
30 .cpus_allowed = CPU_MASK_ALL, \
31 .mm = NULL, \
32 Index: linux-2.6.20-ck1/include/linux/sched.h
33 ===================================================================
34 --- linux-2.6.20-ck1.orig/include/linux/sched.h 2007-02-16 19:01:31.000000000 +1100
35 +++ linux-2.6.20-ck1/include/linux/sched.h 2007-02-16 19:01:31.000000000 +1100
36 @@ -35,10 +35,11 @@
37 #define SCHED_RR 2
38 #define SCHED_BATCH 3
39 #define SCHED_ISO 4
40 +#define SCHED_IDLEPRIO 5
41
42 #ifdef __KERNEL__
43
44 -#define SCHED_MAX SCHED_ISO
45 +#define SCHED_MAX SCHED_IDLEPRIO
46 #define SCHED_RANGE(policy) ((policy) <= SCHED_MAX)
47
48 struct sched_param {
49 @@ -529,8 +530,9 @@ struct signal_struct {
50 #define MAX_RT_PRIO MAX_USER_RT_PRIO
51 #define ISO_PRIO (MAX_RT_PRIO - 1)
52
53 -#define MAX_PRIO (MAX_RT_PRIO + 40)
54 -#define MIN_USER_PRIO (MAX_PRIO - 1)
55 +#define MAX_PRIO (MAX_RT_PRIO + 41)
56 +#define MIN_USER_PRIO (MAX_PRIO - 2)
57 +#define IDLEPRIO_PRIO (MAX_PRIO - 1)
58
59 #define rt_prio(prio) unlikely((prio) < ISO_PRIO)
60 #define rt_task(p) rt_prio((p)->prio)
61 @@ -539,6 +541,7 @@ struct signal_struct {
62 (policy) == SCHED_RR)
63 #define has_rt_policy(p) unlikely(is_rt_policy((p)->policy))
64 #define iso_task(p) (unlikely((p)->policy == SCHED_ISO))
65 +#define idleprio_task(p) (unlikely((p)->policy == SCHED_IDLEPRIO))
66
67 /*
68 * Some day this will be a full-fledged user tracking system..
69 Index: linux-2.6.20-ck1/kernel/sched.c
70 ===================================================================
71 --- linux-2.6.20-ck1.orig/kernel/sched.c 2007-02-16 19:01:31.000000000 +1100
72 +++ linux-2.6.20-ck1/kernel/sched.c 2007-02-16 19:01:31.000000000 +1100
73 @@ -696,6 +696,12 @@ static void set_load_weight(struct task_
74 else
75 #endif
76 p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
77 + } else if (idleprio_task(p)) {
78 + /*
79 + * We want idleprio_tasks to have a presence on weighting but
80 + * as small as possible
81 + */
82 + p->load_weight = 1;
83 } else
84 p->load_weight = TASK_LOAD_WEIGHT(p);
85 }
86 @@ -872,6 +878,17 @@ static inline void recalc_task_prio(stru
87 continue_slice(p);
88 }
89
90 +static inline int idleprio_suitable(struct task_struct *p)
91 +{
92 + return (!p->mutexes_held && !freezing(p) &&
93 + !(p->flags & (PF_NONSLEEP | PF_EXITING)));
94 +}
95 +
96 +static inline int idleprio(const struct task_struct *p)
97 +{
98 + return (p->prio == IDLEPRIO_PRIO);
99 +}
100 +
101 /*
102 * __normal_prio - dynamic priority dependent on bonus.
103 * The priority normally decreases by one each RR_INTERVAL.
104 @@ -895,6 +912,18 @@ static inline int __normal_prio(struct t
105 return ISO_PRIO;
106 }
107
108 + if (idleprio_task(p)) {
109 + if (unlikely(!idleprio_suitable(p))) {
110 + /*
111 + * If idleprio tasks are holding a semaphore, mutex,
112 + * or being frozen, schedule at a normal priority.
113 + */
114 + p->time_slice = p->slice % RR_INTERVAL ? : RR_INTERVAL;
115 + return MIN_USER_PRIO;
116 + }
117 + return IDLEPRIO_PRIO;
118 + }
119 +
120 full_slice = slice(p);
121 if (full_slice > p->slice)
122 used_slice = full_slice - p->slice;
123 @@ -1544,6 +1573,8 @@ out_activate:
124 out_running:
125 p->state = TASK_RUNNING;
126 out:
127 + if (idleprio_task(p) && freezing(p) && idleprio(p))
128 + requeue_task(p, rq, effective_prio(p));
129 task_rq_unlock(rq, &flags);
130
131 return success;
132 @@ -2941,7 +2972,7 @@ void account_user_time(struct task_struc
133
134 /* Add user time to cpustat. */
135 tmp = cputime_to_cputime64(cputime);
136 - if (TASK_NICE(p) > 0)
137 + if (TASK_NICE(p) > 0 || idleprio_task(p))
138 cpustat->nice = cputime64_add(cpustat->nice, tmp);
139 else
140 cpustat->user = cputime64_add(cpustat->user, tmp);
141 @@ -3051,9 +3082,12 @@ static void task_running_tick(struct rq
142 } else
143 p->flags &= ~PF_ISOREF;
144 } else {
145 - /* SCHED_FIFO tasks never run out of timeslice. */
146 - if (unlikely(p->policy == SCHED_FIFO))
147 - goto out_unlock;
148 + if (idleprio_task(p) && !idleprio(p) && idleprio_suitable(p))
149 + set_tsk_need_resched(p);
150 + else
151 + /* SCHED_FIFO tasks never run out of timeslice. */
152 + if (unlikely(p->policy == SCHED_FIFO))
153 + goto out_unlock;
154 }
155
156 debit = ns_diff(rq->most_recent_timestamp, p->timestamp);
157 @@ -3219,11 +3253,23 @@ dependent_sleeper(int this_cpu, struct r
158 if ((jiffies % DEF_TIMESLICE) >
159 (sd->per_cpu_gain * DEF_TIMESLICE / 100))
160 ret = 1;
161 + else if (idleprio(p))
162 + ret = 1;
163 } else {
164 if (smt_curr->static_prio < p->static_prio &&
165 !TASK_PREEMPTS_CURR(p, smt_rq) &&
166 smt_slice(smt_curr, sd) > slice(p))
167 ret = 1;
168 + else if (idleprio(p) && !idleprio_task(smt_curr) &&
169 + smt_curr->slice * sd->per_cpu_gain >
170 + slice(smt_curr)) {
171 + /*
172 + * With idleprio tasks they run just the last
173 + * per_cpu_gain percent of the smt task's
174 + * slice.
175 + */
176 + ret = 1;
177 + }
178 }
179 unlock:
180 spin_unlock(&smt_rq->lock);
181 @@ -3884,8 +3930,9 @@ void set_user_nice(struct task_struct *p
182 * If the task increased its priority or is running and
183 * lowered its priority, then reschedule its CPU:
184 */
185 - if (delta < 0 || (delta > 0 && task_running(rq, p)))
186 - resched_task(rq->curr);
187 + if (delta < 0 || ((delta > 0 || idleprio_task(p)) &&
188 + task_running(rq, p)))
189 + resched_task(rq->curr);
190 }
191 out_unlock:
192 task_rq_unlock(rq, &flags);
193 @@ -4086,6 +4133,11 @@ recheck:
194 return -EPERM;
195 }
196
197 + if (!(p->mm) && policy == SCHED_IDLEPRIO) {
198 + /* Don't allow kernel threads to be SCHED_IDLEPRIO. */
199 + return -EINVAL;
200 + }
201 +
202 retval = security_task_setscheduler(p, policy, param);
203 if (retval)
204 return retval;
205 @@ -4407,7 +4459,7 @@ asmlinkage long sys_sched_yield(void)
206 schedstat_inc(rq, yld_cnt);
207 current->slice = slice(current);
208 current->time_slice = rr_interval(current);
209 - if (likely(!rt_task(current)))
210 + if (likely(!rt_task(current) && !idleprio(current)))
211 newprio = MIN_USER_PRIO;
212
213 requeue_task(current, rq, newprio);
214 @@ -4564,6 +4616,7 @@ asmlinkage long sys_sched_get_priority_m
215 case SCHED_NORMAL:
216 case SCHED_BATCH:
217 case SCHED_ISO:
218 + case SCHED_IDLEPRIO:
219 ret = 0;
220 break;
221 }
222 @@ -4589,6 +4642,7 @@ asmlinkage long sys_sched_get_priority_m
223 case SCHED_NORMAL:
224 case SCHED_BATCH:
225 case SCHED_ISO:
226 + case SCHED_IDLEPRIO:
227 ret = 0;
228 }
229 return ret;