Contents of /alx-src/tags/kernel26-2.6.12-alx-r9/include/asm-alpha/bitops.h
Parent Directory
|
Revision Log
Revision 630 -
(show annotations)
(download)
Wed Mar 4 11:03:09 2009 UTC (15 years, 3 months ago) by niro
File MIME type: text/plain
File size: 10786 byte(s)
Wed Mar 4 11:03:09 2009 UTC (15 years, 3 months ago) by niro
File MIME type: text/plain
File size: 10786 byte(s)
Tag kernel26-2.6.12-alx-r9
1 | #ifndef _ALPHA_BITOPS_H |
2 | #define _ALPHA_BITOPS_H |
3 | |
4 | #include <linux/config.h> |
5 | #include <asm/compiler.h> |
6 | |
7 | /* |
8 | * Copyright 1994, Linus Torvalds. |
9 | */ |
10 | |
11 | /* |
12 | * These have to be done with inline assembly: that way the bit-setting |
13 | * is guaranteed to be atomic. All bit operations return 0 if the bit |
14 | * was cleared before the operation and != 0 if it was not. |
15 | * |
16 | * To get proper branch prediction for the main line, we must branch |
17 | * forward to code at the end of this object's .text section, then |
18 | * branch back to restart the operation. |
19 | * |
20 | * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1). |
21 | */ |
22 | |
23 | static inline void |
24 | set_bit(unsigned long nr, volatile void * addr) |
25 | { |
26 | unsigned long temp; |
27 | int *m = ((int *) addr) + (nr >> 5); |
28 | |
29 | __asm__ __volatile__( |
30 | "1: ldl_l %0,%3\n" |
31 | " bis %0,%2,%0\n" |
32 | " stl_c %0,%1\n" |
33 | " beq %0,2f\n" |
34 | ".subsection 2\n" |
35 | "2: br 1b\n" |
36 | ".previous" |
37 | :"=&r" (temp), "=m" (*m) |
38 | :"Ir" (1UL << (nr & 31)), "m" (*m)); |
39 | } |
40 | |
41 | /* |
42 | * WARNING: non atomic version. |
43 | */ |
44 | static inline void |
45 | __set_bit(unsigned long nr, volatile void * addr) |
46 | { |
47 | int *m = ((int *) addr) + (nr >> 5); |
48 | |
49 | *m |= 1 << (nr & 31); |
50 | } |
51 | |
52 | #define smp_mb__before_clear_bit() smp_mb() |
53 | #define smp_mb__after_clear_bit() smp_mb() |
54 | |
55 | static inline void |
56 | clear_bit(unsigned long nr, volatile void * addr) |
57 | { |
58 | unsigned long temp; |
59 | int *m = ((int *) addr) + (nr >> 5); |
60 | |
61 | __asm__ __volatile__( |
62 | "1: ldl_l %0,%3\n" |
63 | " bic %0,%2,%0\n" |
64 | " stl_c %0,%1\n" |
65 | " beq %0,2f\n" |
66 | ".subsection 2\n" |
67 | "2: br 1b\n" |
68 | ".previous" |
69 | :"=&r" (temp), "=m" (*m) |
70 | :"Ir" (1UL << (nr & 31)), "m" (*m)); |
71 | } |
72 | |
73 | /* |
74 | * WARNING: non atomic version. |
75 | */ |
76 | static __inline__ void |
77 | __clear_bit(unsigned long nr, volatile void * addr) |
78 | { |
79 | int *m = ((int *) addr) + (nr >> 5); |
80 | |
81 | *m &= ~(1 << (nr & 31)); |
82 | } |
83 | |
84 | static inline void |
85 | change_bit(unsigned long nr, volatile void * addr) |
86 | { |
87 | unsigned long temp; |
88 | int *m = ((int *) addr) + (nr >> 5); |
89 | |
90 | __asm__ __volatile__( |
91 | "1: ldl_l %0,%3\n" |
92 | " xor %0,%2,%0\n" |
93 | " stl_c %0,%1\n" |
94 | " beq %0,2f\n" |
95 | ".subsection 2\n" |
96 | "2: br 1b\n" |
97 | ".previous" |
98 | :"=&r" (temp), "=m" (*m) |
99 | :"Ir" (1UL << (nr & 31)), "m" (*m)); |
100 | } |
101 | |
102 | /* |
103 | * WARNING: non atomic version. |
104 | */ |
105 | static __inline__ void |
106 | __change_bit(unsigned long nr, volatile void * addr) |
107 | { |
108 | int *m = ((int *) addr) + (nr >> 5); |
109 | |
110 | *m ^= 1 << (nr & 31); |
111 | } |
112 | |
113 | static inline int |
114 | test_and_set_bit(unsigned long nr, volatile void *addr) |
115 | { |
116 | unsigned long oldbit; |
117 | unsigned long temp; |
118 | int *m = ((int *) addr) + (nr >> 5); |
119 | |
120 | __asm__ __volatile__( |
121 | "1: ldl_l %0,%4\n" |
122 | " and %0,%3,%2\n" |
123 | " bne %2,2f\n" |
124 | " xor %0,%3,%0\n" |
125 | " stl_c %0,%1\n" |
126 | " beq %0,3f\n" |
127 | "2:\n" |
128 | #ifdef CONFIG_SMP |
129 | " mb\n" |
130 | #endif |
131 | ".subsection 2\n" |
132 | "3: br 1b\n" |
133 | ".previous" |
134 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) |
135 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); |
136 | |
137 | return oldbit != 0; |
138 | } |
139 | |
140 | /* |
141 | * WARNING: non atomic version. |
142 | */ |
143 | static inline int |
144 | __test_and_set_bit(unsigned long nr, volatile void * addr) |
145 | { |
146 | unsigned long mask = 1 << (nr & 0x1f); |
147 | int *m = ((int *) addr) + (nr >> 5); |
148 | int old = *m; |
149 | |
150 | *m = old | mask; |
151 | return (old & mask) != 0; |
152 | } |
153 | |
154 | static inline int |
155 | test_and_clear_bit(unsigned long nr, volatile void * addr) |
156 | { |
157 | unsigned long oldbit; |
158 | unsigned long temp; |
159 | int *m = ((int *) addr) + (nr >> 5); |
160 | |
161 | __asm__ __volatile__( |
162 | "1: ldl_l %0,%4\n" |
163 | " and %0,%3,%2\n" |
164 | " beq %2,2f\n" |
165 | " xor %0,%3,%0\n" |
166 | " stl_c %0,%1\n" |
167 | " beq %0,3f\n" |
168 | "2:\n" |
169 | #ifdef CONFIG_SMP |
170 | " mb\n" |
171 | #endif |
172 | ".subsection 2\n" |
173 | "3: br 1b\n" |
174 | ".previous" |
175 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) |
176 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); |
177 | |
178 | return oldbit != 0; |
179 | } |
180 | |
181 | /* |
182 | * WARNING: non atomic version. |
183 | */ |
184 | static inline int |
185 | __test_and_clear_bit(unsigned long nr, volatile void * addr) |
186 | { |
187 | unsigned long mask = 1 << (nr & 0x1f); |
188 | int *m = ((int *) addr) + (nr >> 5); |
189 | int old = *m; |
190 | |
191 | *m = old & ~mask; |
192 | return (old & mask) != 0; |
193 | } |
194 | |
195 | static inline int |
196 | test_and_change_bit(unsigned long nr, volatile void * addr) |
197 | { |
198 | unsigned long oldbit; |
199 | unsigned long temp; |
200 | int *m = ((int *) addr) + (nr >> 5); |
201 | |
202 | __asm__ __volatile__( |
203 | "1: ldl_l %0,%4\n" |
204 | " and %0,%3,%2\n" |
205 | " xor %0,%3,%0\n" |
206 | " stl_c %0,%1\n" |
207 | " beq %0,3f\n" |
208 | #ifdef CONFIG_SMP |
209 | " mb\n" |
210 | #endif |
211 | ".subsection 2\n" |
212 | "3: br 1b\n" |
213 | ".previous" |
214 | :"=&r" (temp), "=m" (*m), "=&r" (oldbit) |
215 | :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); |
216 | |
217 | return oldbit != 0; |
218 | } |
219 | |
220 | /* |
221 | * WARNING: non atomic version. |
222 | */ |
223 | static __inline__ int |
224 | __test_and_change_bit(unsigned long nr, volatile void * addr) |
225 | { |
226 | unsigned long mask = 1 << (nr & 0x1f); |
227 | int *m = ((int *) addr) + (nr >> 5); |
228 | int old = *m; |
229 | |
230 | *m = old ^ mask; |
231 | return (old & mask) != 0; |
232 | } |
233 | |
234 | static inline int |
235 | test_bit(int nr, const volatile void * addr) |
236 | { |
237 | return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL; |
238 | } |
239 | |
240 | /* |
241 | * ffz = Find First Zero in word. Undefined if no zero exists, |
242 | * so code should check against ~0UL first.. |
243 | * |
244 | * Do a binary search on the bits. Due to the nature of large |
245 | * constants on the alpha, it is worthwhile to split the search. |
246 | */ |
247 | static inline unsigned long ffz_b(unsigned long x) |
248 | { |
249 | unsigned long sum, x1, x2, x4; |
250 | |
251 | x = ~x & -~x; /* set first 0 bit, clear others */ |
252 | x1 = x & 0xAA; |
253 | x2 = x & 0xCC; |
254 | x4 = x & 0xF0; |
255 | sum = x2 ? 2 : 0; |
256 | sum += (x4 != 0) * 4; |
257 | sum += (x1 != 0); |
258 | |
259 | return sum; |
260 | } |
261 | |
262 | static inline unsigned long ffz(unsigned long word) |
263 | { |
264 | #if defined(__alpha_cix__) && defined(__alpha_fix__) |
265 | /* Whee. EV67 can calculate it directly. */ |
266 | return __kernel_cttz(~word); |
267 | #else |
268 | unsigned long bits, qofs, bofs; |
269 | |
270 | bits = __kernel_cmpbge(word, ~0UL); |
271 | qofs = ffz_b(bits); |
272 | bits = __kernel_extbl(word, qofs); |
273 | bofs = ffz_b(bits); |
274 | |
275 | return qofs*8 + bofs; |
276 | #endif |
277 | } |
278 | |
279 | /* |
280 | * __ffs = Find First set bit in word. Undefined if no set bit exists. |
281 | */ |
282 | static inline unsigned long __ffs(unsigned long word) |
283 | { |
284 | #if defined(__alpha_cix__) && defined(__alpha_fix__) |
285 | /* Whee. EV67 can calculate it directly. */ |
286 | return __kernel_cttz(word); |
287 | #else |
288 | unsigned long bits, qofs, bofs; |
289 | |
290 | bits = __kernel_cmpbge(0, word); |
291 | qofs = ffz_b(bits); |
292 | bits = __kernel_extbl(word, qofs); |
293 | bofs = ffz_b(~bits); |
294 | |
295 | return qofs*8 + bofs; |
296 | #endif |
297 | } |
298 | |
299 | #ifdef __KERNEL__ |
300 | |
301 | /* |
302 | * ffs: find first bit set. This is defined the same way as |
303 | * the libc and compiler builtin ffs routines, therefore |
304 | * differs in spirit from the above __ffs. |
305 | */ |
306 | |
307 | static inline int ffs(int word) |
308 | { |
309 | int result = __ffs(word) + 1; |
310 | return word ? result : 0; |
311 | } |
312 | |
313 | /* |
314 | * fls: find last bit set. |
315 | */ |
316 | #if defined(__alpha_cix__) && defined(__alpha_fix__) |
317 | static inline int fls(int word) |
318 | { |
319 | return 64 - __kernel_ctlz(word & 0xffffffff); |
320 | } |
321 | #else |
322 | #define fls generic_fls |
323 | #endif |
324 | |
325 | /* Compute powers of two for the given integer. */ |
326 | static inline long floor_log2(unsigned long word) |
327 | { |
328 | #if defined(__alpha_cix__) && defined(__alpha_fix__) |
329 | return 63 - __kernel_ctlz(word); |
330 | #else |
331 | long bit; |
332 | for (bit = -1; word ; bit++) |
333 | word >>= 1; |
334 | return bit; |
335 | #endif |
336 | } |
337 | |
338 | static inline long ceil_log2(unsigned long word) |
339 | { |
340 | long bit = floor_log2(word); |
341 | return bit + (word > (1UL << bit)); |
342 | } |
343 | |
344 | /* |
345 | * hweightN: returns the hamming weight (i.e. the number |
346 | * of bits set) of a N-bit word |
347 | */ |
348 | |
349 | #if defined(__alpha_cix__) && defined(__alpha_fix__) |
350 | /* Whee. EV67 can calculate it directly. */ |
351 | static inline unsigned long hweight64(unsigned long w) |
352 | { |
353 | return __kernel_ctpop(w); |
354 | } |
355 | |
356 | #define hweight32(x) (unsigned int) hweight64((x) & 0xfffffffful) |
357 | #define hweight16(x) (unsigned int) hweight64((x) & 0xfffful) |
358 | #define hweight8(x) (unsigned int) hweight64((x) & 0xfful) |
359 | #else |
360 | static inline unsigned long hweight64(unsigned long w) |
361 | { |
362 | unsigned long result; |
363 | for (result = 0; w ; w >>= 1) |
364 | result += (w & 1); |
365 | return result; |
366 | } |
367 | |
368 | #define hweight32(x) generic_hweight32(x) |
369 | #define hweight16(x) generic_hweight16(x) |
370 | #define hweight8(x) generic_hweight8(x) |
371 | #endif |
372 | |
373 | #endif /* __KERNEL__ */ |
374 | |
375 | /* |
376 | * Find next zero bit in a bitmap reasonably efficiently.. |
377 | */ |
378 | static inline unsigned long |
379 | find_next_zero_bit(const void *addr, unsigned long size, unsigned long offset) |
380 | { |
381 | const unsigned long *p = addr; |
382 | unsigned long result = offset & ~63UL; |
383 | unsigned long tmp; |
384 | |
385 | p += offset >> 6; |
386 | if (offset >= size) |
387 | return size; |
388 | size -= result; |
389 | offset &= 63UL; |
390 | if (offset) { |
391 | tmp = *(p++); |
392 | tmp |= ~0UL >> (64-offset); |
393 | if (size < 64) |
394 | goto found_first; |
395 | if (~tmp) |
396 | goto found_middle; |
397 | size -= 64; |
398 | result += 64; |
399 | } |
400 | while (size & ~63UL) { |
401 | if (~(tmp = *(p++))) |
402 | goto found_middle; |
403 | result += 64; |
404 | size -= 64; |
405 | } |
406 | if (!size) |
407 | return result; |
408 | tmp = *p; |
409 | found_first: |
410 | tmp |= ~0UL << size; |
411 | if (tmp == ~0UL) /* Are any bits zero? */ |
412 | return result + size; /* Nope. */ |
413 | found_middle: |
414 | return result + ffz(tmp); |
415 | } |
416 | |
417 | /* |
418 | * Find next one bit in a bitmap reasonably efficiently. |
419 | */ |
420 | static inline unsigned long |
421 | find_next_bit(const void * addr, unsigned long size, unsigned long offset) |
422 | { |
423 | const unsigned long *p = addr; |
424 | unsigned long result = offset & ~63UL; |
425 | unsigned long tmp; |
426 | |
427 | p += offset >> 6; |
428 | if (offset >= size) |
429 | return size; |
430 | size -= result; |
431 | offset &= 63UL; |
432 | if (offset) { |
433 | tmp = *(p++); |
434 | tmp &= ~0UL << offset; |
435 | if (size < 64) |
436 | goto found_first; |
437 | if (tmp) |
438 | goto found_middle; |
439 | size -= 64; |
440 | result += 64; |
441 | } |
442 | while (size & ~63UL) { |
443 | if ((tmp = *(p++))) |
444 | goto found_middle; |
445 | result += 64; |
446 | size -= 64; |
447 | } |
448 | if (!size) |
449 | return result; |
450 | tmp = *p; |
451 | found_first: |
452 | tmp &= ~0UL >> (64 - size); |
453 | if (!tmp) |
454 | return result + size; |
455 | found_middle: |
456 | return result + __ffs(tmp); |
457 | } |
458 | |
459 | /* |
460 | * The optimizer actually does good code for this case. |
461 | */ |
462 | #define find_first_zero_bit(addr, size) \ |
463 | find_next_zero_bit((addr), (size), 0) |
464 | #define find_first_bit(addr, size) \ |
465 | find_next_bit((addr), (size), 0) |
466 | |
467 | #ifdef __KERNEL__ |
468 | |
469 | /* |
470 | * Every architecture must define this function. It's the fastest |
471 | * way of searching a 140-bit bitmap where the first 100 bits are |
472 | * unlikely to be set. It's guaranteed that at least one of the 140 |
473 | * bits is set. |
474 | */ |
475 | static inline unsigned long |
476 | sched_find_first_bit(unsigned long b[3]) |
477 | { |
478 | unsigned long b0 = b[0], b1 = b[1], b2 = b[2]; |
479 | unsigned long ofs; |
480 | |
481 | ofs = (b1 ? 64 : 128); |
482 | b1 = (b1 ? b1 : b2); |
483 | ofs = (b0 ? 0 : ofs); |
484 | b0 = (b0 ? b0 : b1); |
485 | |
486 | return __ffs(b0) + ofs; |
487 | } |
488 | |
489 | |
490 | #define ext2_set_bit __test_and_set_bit |
491 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) |
492 | #define ext2_clear_bit __test_and_clear_bit |
493 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) |
494 | #define ext2_test_bit test_bit |
495 | #define ext2_find_first_zero_bit find_first_zero_bit |
496 | #define ext2_find_next_zero_bit find_next_zero_bit |
497 | |
498 | /* Bitmap functions for the minix filesystem. */ |
499 | #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr) |
500 | #define minix_set_bit(nr,addr) __set_bit(nr,addr) |
501 | #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr) |
502 | #define minix_test_bit(nr,addr) test_bit(nr,addr) |
503 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) |
504 | |
505 | #endif /* __KERNEL__ */ |
506 | |
507 | #endif /* _ALPHA_BITOPS_H */ |