Contents of /alx-src/tags/kernel26-2.6.12-alx-r9/lib/kernel_lock.c
Parent Directory | Revision Log
Revision 630 -
(show annotations)
(download)
Wed Mar 4 11:03:09 2009 UTC (15 years, 6 months ago) by niro
File MIME type: text/plain
File size: 5745 byte(s)
Wed Mar 4 11:03:09 2009 UTC (15 years, 6 months ago) by niro
File MIME type: text/plain
File size: 5745 byte(s)
Tag kernel26-2.6.12-alx-r9
1 | /* |
2 | * lib/kernel_lock.c |
3 | * |
4 | * This is the traditional BKL - big kernel lock. Largely |
5 | * relegated to obsolescense, but used by various less |
6 | * important (or lazy) subsystems. |
7 | */ |
8 | #include <linux/smp_lock.h> |
9 | #include <linux/module.h> |
10 | #include <linux/kallsyms.h> |
11 | |
12 | #if defined(CONFIG_PREEMPT) && defined(__smp_processor_id) && \ |
13 | defined(CONFIG_DEBUG_PREEMPT) |
14 | |
15 | /* |
16 | * Debugging check. |
17 | */ |
18 | unsigned int smp_processor_id(void) |
19 | { |
20 | unsigned long preempt_count = preempt_count(); |
21 | int this_cpu = __smp_processor_id(); |
22 | cpumask_t this_mask; |
23 | |
24 | if (likely(preempt_count)) |
25 | goto out; |
26 | |
27 | if (irqs_disabled()) |
28 | goto out; |
29 | |
30 | /* |
31 | * Kernel threads bound to a single CPU can safely use |
32 | * smp_processor_id(): |
33 | */ |
34 | this_mask = cpumask_of_cpu(this_cpu); |
35 | |
36 | if (cpus_equal(current->cpus_allowed, this_mask)) |
37 | goto out; |
38 | |
39 | /* |
40 | * It is valid to assume CPU-locality during early bootup: |
41 | */ |
42 | if (system_state != SYSTEM_RUNNING) |
43 | goto out; |
44 | |
45 | /* |
46 | * Avoid recursion: |
47 | */ |
48 | preempt_disable(); |
49 | |
50 | if (!printk_ratelimit()) |
51 | goto out_enable; |
52 | |
53 | printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid); |
54 | print_symbol("caller is %s\n", (long)__builtin_return_address(0)); |
55 | dump_stack(); |
56 | |
57 | out_enable: |
58 | preempt_enable_no_resched(); |
59 | out: |
60 | return this_cpu; |
61 | } |
62 | |
63 | EXPORT_SYMBOL(smp_processor_id); |
64 | |
65 | #endif /* PREEMPT && __smp_processor_id && DEBUG_PREEMPT */ |
66 | |
67 | #ifdef CONFIG_PREEMPT_BKL |
68 | /* |
69 | * The 'big kernel semaphore' |
70 | * |
71 | * This mutex is taken and released recursively by lock_kernel() |
72 | * and unlock_kernel(). It is transparently dropped and reaquired |
73 | * over schedule(). It is used to protect legacy code that hasn't |
74 | * been migrated to a proper locking design yet. |
75 | * |
76 | * Note: code locked by this semaphore will only be serialized against |
77 | * other code using the same locking facility. The code guarantees that |
78 | * the task remains on the same CPU. |
79 | * |
80 | * Don't use in new code. |
81 | */ |
82 | static DECLARE_MUTEX(kernel_sem); |
83 | |
84 | /* |
85 | * Re-acquire the kernel semaphore. |
86 | * |
87 | * This function is called with preemption off. |
88 | * |
89 | * We are executing in schedule() so the code must be extremely careful |
90 | * about recursion, both due to the down() and due to the enabling of |
91 | * preemption. schedule() will re-check the preemption flag after |
92 | * reacquiring the semaphore. |
93 | */ |
94 | int __lockfunc __reacquire_kernel_lock(void) |
95 | { |
96 | struct task_struct *task = current; |
97 | int saved_lock_depth = task->lock_depth; |
98 | |
99 | BUG_ON(saved_lock_depth < 0); |
100 | |
101 | task->lock_depth = -1; |
102 | preempt_enable_no_resched(); |
103 | |
104 | down(&kernel_sem); |
105 | |
106 | preempt_disable(); |
107 | task->lock_depth = saved_lock_depth; |
108 | |
109 | return 0; |
110 | } |
111 | |
112 | void __lockfunc __release_kernel_lock(void) |
113 | { |
114 | up(&kernel_sem); |
115 | } |
116 | |
117 | /* |
118 | * Getting the big kernel semaphore. |
119 | */ |
120 | void __lockfunc lock_kernel(void) |
121 | { |
122 | struct task_struct *task = current; |
123 | int depth = task->lock_depth + 1; |
124 | |
125 | if (likely(!depth)) |
126 | /* |
127 | * No recursion worries - we set up lock_depth _after_ |
128 | */ |
129 | down(&kernel_sem); |
130 | |
131 | task->lock_depth = depth; |
132 | } |
133 | |
134 | void __lockfunc unlock_kernel(void) |
135 | { |
136 | struct task_struct *task = current; |
137 | |
138 | BUG_ON(task->lock_depth < 0); |
139 | |
140 | if (likely(--task->lock_depth < 0)) |
141 | up(&kernel_sem); |
142 | } |
143 | |
144 | #else |
145 | |
146 | /* |
147 | * The 'big kernel lock' |
148 | * |
149 | * This spinlock is taken and released recursively by lock_kernel() |
150 | * and unlock_kernel(). It is transparently dropped and reaquired |
151 | * over schedule(). It is used to protect legacy code that hasn't |
152 | * been migrated to a proper locking design yet. |
153 | * |
154 | * Don't use in new code. |
155 | */ |
156 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); |
157 | |
158 | |
159 | /* |
160 | * Acquire/release the underlying lock from the scheduler. |
161 | * |
162 | * This is called with preemption disabled, and should |
163 | * return an error value if it cannot get the lock and |
164 | * TIF_NEED_RESCHED gets set. |
165 | * |
166 | * If it successfully gets the lock, it should increment |
167 | * the preemption count like any spinlock does. |
168 | * |
169 | * (This works on UP too - _raw_spin_trylock will never |
170 | * return false in that case) |
171 | */ |
172 | int __lockfunc __reacquire_kernel_lock(void) |
173 | { |
174 | while (!_raw_spin_trylock(&kernel_flag)) { |
175 | if (test_thread_flag(TIF_NEED_RESCHED)) |
176 | return -EAGAIN; |
177 | cpu_relax(); |
178 | } |
179 | preempt_disable(); |
180 | return 0; |
181 | } |
182 | |
183 | void __lockfunc __release_kernel_lock(void) |
184 | { |
185 | _raw_spin_unlock(&kernel_flag); |
186 | preempt_enable_no_resched(); |
187 | } |
188 | |
189 | /* |
190 | * These are the BKL spinlocks - we try to be polite about preemption. |
191 | * If SMP is not on (ie UP preemption), this all goes away because the |
192 | * _raw_spin_trylock() will always succeed. |
193 | */ |
194 | #ifdef CONFIG_PREEMPT |
195 | static inline void __lock_kernel(void) |
196 | { |
197 | preempt_disable(); |
198 | if (unlikely(!_raw_spin_trylock(&kernel_flag))) { |
199 | /* |
200 | * If preemption was disabled even before this |
201 | * was called, there's nothing we can be polite |
202 | * about - just spin. |
203 | */ |
204 | if (preempt_count() > 1) { |
205 | _raw_spin_lock(&kernel_flag); |
206 | return; |
207 | } |
208 | |
209 | /* |
210 | * Otherwise, let's wait for the kernel lock |
211 | * with preemption enabled.. |
212 | */ |
213 | do { |
214 | preempt_enable(); |
215 | while (spin_is_locked(&kernel_flag)) |
216 | cpu_relax(); |
217 | preempt_disable(); |
218 | } while (!_raw_spin_trylock(&kernel_flag)); |
219 | } |
220 | } |
221 | |
222 | #else |
223 | |
224 | /* |
225 | * Non-preemption case - just get the spinlock |
226 | */ |
227 | static inline void __lock_kernel(void) |
228 | { |
229 | _raw_spin_lock(&kernel_flag); |
230 | } |
231 | #endif |
232 | |
233 | static inline void __unlock_kernel(void) |
234 | { |
235 | _raw_spin_unlock(&kernel_flag); |
236 | preempt_enable(); |
237 | } |
238 | |
239 | /* |
240 | * Getting the big kernel lock. |
241 | * |
242 | * This cannot happen asynchronously, so we only need to |
243 | * worry about other CPU's. |
244 | */ |
245 | void __lockfunc lock_kernel(void) |
246 | { |
247 | int depth = current->lock_depth+1; |
248 | if (likely(!depth)) |
249 | __lock_kernel(); |
250 | current->lock_depth = depth; |
251 | } |
252 | |
253 | void __lockfunc unlock_kernel(void) |
254 | { |
255 | BUG_ON(current->lock_depth < 0); |
256 | if (likely(--current->lock_depth < 0)) |
257 | __unlock_kernel(); |
258 | } |
259 | |
260 | #endif |
261 | |
262 | EXPORT_SYMBOL(lock_kernel); |
263 | EXPORT_SYMBOL(unlock_kernel); |
264 |