/[pkg-src]/trunk/kernel26-magellan/patches-2.6.16-r12/0002-2.6.16-sched-smpnice-apply-review-suggestions.patch |
Contents of /trunk/kernel26-magellan/patches-2.6.16-r12/0002-2.6.16-sched-smpnice-apply-review-suggestions.patch
Parent Directory | Revision Log
Revision 72 -
(show annotations)
(download)
Mon Jun 5 09:25:38 2006 UTC (18 years, 3 months ago) by niro
File size: 4150 byte(s)
Mon Jun 5 09:25:38 2006 UTC (18 years, 3 months ago) by niro
File size: 4150 byte(s)
ver bump to 2.6.16-r12: - updated to linux-2.6.16.19 - updated to ck11
1 | |
2 | From: Peter Williams <pwil3058@bigpond.net.au> |
3 | |
4 | This patch applies the suggestions made by Con Kolivas for improving the |
5 | smpnice code. |
6 | |
7 | The non cosmetic part of the patch addresses the fact the mapping from nice |
8 | values to task load weights for negative nice values does not match the |
9 | implied CPU allocations in the function task_timeslice(). As suggested by |
10 | Con the mapping function now uses the time slice information directly (via |
11 | a slightly modified interface). |
12 | |
13 | Signed-off-by: Peter Williams <pwil3058@bigpond.com.au> |
14 | Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com> |
15 | Cc: "Chen, Kenneth W" <kenneth.w.chen@intel.com> |
16 | Cc: Ingo Molnar <mingo@elte.hu> |
17 | Cc: Nick Piggin <nickpiggin@yahoo.com.au> |
18 | Cc: Con Kolivas <kernel@kolivas.org> |
19 | Cc: John Hawkes <hawkes@sgi.com> |
20 | Signed-off-by: Andrew Morton <akpm@osdl.org> |
21 | include/linux/sched.h | 6 +++--- |
22 | kernel/sched.c | 34 +++++++++++++++++++++------------- |
23 | 2 files changed, 24 insertions(+), 16 deletions(-) |
24 | |
25 | Index: linux-2.6.16-ck1/include/linux/sched.h |
26 | =================================================================== |
27 | --- linux-2.6.16-ck1.orig/include/linux/sched.h 2006-03-20 20:46:44.000000000 +1100 |
28 | +++ linux-2.6.16-ck1/include/linux/sched.h 2006-03-20 20:46:44.000000000 +1100 |
29 | @@ -698,13 +698,13 @@ struct task_struct { |
30 | |
31 | int lock_depth; /* BKL lock depth */ |
32 | |
33 | -#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW) |
34 | +#ifdef CONFIG_SMP |
35 | +#ifdef __ARCH_WANT_UNLOCKED_CTXSW |
36 | int oncpu; |
37 | #endif |
38 | - int prio, static_prio; |
39 | -#ifdef CONFIG_SMP |
40 | int load_weight; /* for load balancing purposes */ |
41 | #endif |
42 | + int prio, static_prio; |
43 | struct list_head run_list; |
44 | prio_array_t *array; |
45 | |
46 | Index: linux-2.6.16-ck1/kernel/sched.c |
47 | =================================================================== |
48 | --- linux-2.6.16-ck1.orig/kernel/sched.c 2006-03-20 20:46:44.000000000 +1100 |
49 | +++ linux-2.6.16-ck1/kernel/sched.c 2006-03-20 20:46:44.000000000 +1100 |
50 | @@ -168,13 +168,19 @@ |
51 | #define SCALE_PRIO(x, prio) \ |
52 | max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE) |
53 | |
54 | -static unsigned int task_timeslice(task_t *p) |
55 | +static unsigned int static_prio_timeslice(int static_prio) |
56 | { |
57 | - if (p->static_prio < NICE_TO_PRIO(0)) |
58 | - return SCALE_PRIO(DEF_TIMESLICE*4, p->static_prio); |
59 | + if (static_prio < NICE_TO_PRIO(0)) |
60 | + return SCALE_PRIO(DEF_TIMESLICE*4, static_prio); |
61 | else |
62 | - return SCALE_PRIO(DEF_TIMESLICE, p->static_prio); |
63 | + return SCALE_PRIO(DEF_TIMESLICE, static_prio); |
64 | } |
65 | + |
66 | +static inline unsigned int task_timeslice(task_t *p) |
67 | +{ |
68 | + return static_prio_timeslice(p->static_prio); |
69 | +} |
70 | + |
71 | #define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \ |
72 | < (long long) (sd)->cache_hot_time) |
73 | |
74 | @@ -667,21 +673,23 @@ static int effective_prio(task_t *p) |
75 | * To aid in avoiding the subversion of "niceness" due to uneven distribution |
76 | * of tasks with abnormal "nice" values across CPUs the contribution that |
77 | * each task makes to its run queue's load is weighted according to its |
78 | - * scheduling class and "nice" value. |
79 | + * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a |
80 | + * scaled version of the new time slice allocation that they receive on time |
81 | + * slice expiry etc. |
82 | */ |
83 | |
84 | /* |
85 | - * Priority weight for load balancing ranges from 1/20 (nice==19) to 459/20 (RT |
86 | - * priority of 100). |
87 | + * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE |
88 | + * If static_prio_timeslice() is ever changed to break this assumption then |
89 | + * this code will need modification |
90 | */ |
91 | -#define NICE_TO_LOAD_PRIO(nice) \ |
92 | - ((nice >= 0) ? (20 - (nice)) : (20 + (nice) * (nice))) |
93 | +#define TIME_SLICE_NICE_ZERO DEF_TIMESLICE |
94 | #define LOAD_WEIGHT(lp) \ |
95 | - (((lp) * SCHED_LOAD_SCALE) / NICE_TO_LOAD_PRIO(0)) |
96 | -#define NICE_TO_LOAD_WEIGHT(nice) LOAD_WEIGHT(NICE_TO_LOAD_PRIO(nice)) |
97 | -#define PRIO_TO_LOAD_WEIGHT(prio) NICE_TO_LOAD_WEIGHT(PRIO_TO_NICE(prio)) |
98 | + (((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO) |
99 | +#define PRIO_TO_LOAD_WEIGHT(prio) \ |
100 | + LOAD_WEIGHT(static_prio_timeslice(prio)) |
101 | #define RTPRIO_TO_LOAD_WEIGHT(rp) \ |
102 | - LOAD_WEIGHT(NICE_TO_LOAD_PRIO(-20) + (rp)) |
103 | + (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp)) |
104 | |
105 | static inline void set_load_weight(task_t *p) |
106 | { |