Contents of /trunk/kernel26-magellan/patches-2.6.34-r1/0100-2.6.34.1-all-fixes.patch
Parent Directory | Revision Log
Revision 1114 -
(show annotations)
(download)
Sun Aug 22 17:59:15 2010 UTC (14 years, 1 month ago) by niro
File size: 295155 byte(s)
Sun Aug 22 17:59:15 2010 UTC (14 years, 1 month ago) by niro
File size: 295155 byte(s)
-added
1 | diff --git a/Documentation/hwmon/ltc4245 b/Documentation/hwmon/ltc4245 |
2 | index 02838a4..86b5880 100644 |
3 | --- a/Documentation/hwmon/ltc4245 |
4 | +++ b/Documentation/hwmon/ltc4245 |
5 | @@ -72,9 +72,7 @@ in6_min_alarm 5v output undervoltage alarm |
6 | in7_min_alarm 3v output undervoltage alarm |
7 | in8_min_alarm Vee (-12v) output undervoltage alarm |
8 | |
9 | -in9_input GPIO #1 voltage data |
10 | -in10_input GPIO #2 voltage data |
11 | -in11_input GPIO #3 voltage data |
12 | +in9_input GPIO voltage data |
13 | |
14 | power1_input 12v power usage (mW) |
15 | power2_input 5v power usage (mW) |
16 | diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c |
17 | index a52a27c..6f80665 100644 |
18 | --- a/arch/arm/common/sa1111.c |
19 | +++ b/arch/arm/common/sa1111.c |
20 | @@ -951,8 +951,6 @@ static int sa1111_resume(struct platform_device *dev) |
21 | if (!save) |
22 | return 0; |
23 | |
24 | - spin_lock_irqsave(&sachip->lock, flags); |
25 | - |
26 | /* |
27 | * Ensure that the SA1111 is still here. |
28 | * FIXME: shouldn't do this here. |
29 | @@ -969,6 +967,13 @@ static int sa1111_resume(struct platform_device *dev) |
30 | * First of all, wake up the chip. |
31 | */ |
32 | sa1111_wake(sachip); |
33 | + |
34 | + /* |
35 | + * Only lock for write ops. Also, sa1111_wake must be called with |
36 | + * released spinlock! |
37 | + */ |
38 | + spin_lock_irqsave(&sachip->lock, flags); |
39 | + |
40 | sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN0); |
41 | sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN1); |
42 | |
43 | diff --git a/arch/arm/mach-mx2/devices.c b/arch/arm/mach-mx2/devices.c |
44 | index b91e412..04f36d8 100644 |
45 | --- a/arch/arm/mach-mx2/devices.c |
46 | +++ b/arch/arm/mach-mx2/devices.c |
47 | @@ -483,8 +483,8 @@ int __init mxc_register_gpios(void) |
48 | #ifdef CONFIG_MACH_MX21 |
49 | static struct resource mx21_usbhc_resources[] = { |
50 | { |
51 | - .start = MX21_BASE_ADDR, |
52 | - .end = MX21_BASE_ADDR + 0x1FFF, |
53 | + .start = MX21_USBOTG_BASE_ADDR, |
54 | + .end = MX21_USBOTG_BASE_ADDR + SZ_8K - 1, |
55 | .flags = IORESOURCE_MEM, |
56 | }, |
57 | { |
58 | diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S |
59 | index 06a90dc..37c8157 100644 |
60 | --- a/arch/arm/mm/cache-v7.S |
61 | +++ b/arch/arm/mm/cache-v7.S |
62 | @@ -91,7 +91,11 @@ ENTRY(v7_flush_kern_cache_all) |
63 | THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) |
64 | bl v7_flush_dcache_all |
65 | mov r0, #0 |
66 | +#ifdef CONFIG_SMP |
67 | + mcr p15, 0, r0, c7, c1, 0 @ invalidate I-cache inner shareable |
68 | +#else |
69 | mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate |
70 | +#endif |
71 | ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) |
72 | THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) |
73 | mov pc, lr |
74 | diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c |
75 | index 5eb4fd9..ac163de 100644 |
76 | --- a/arch/arm/mm/copypage-feroceon.c |
77 | +++ b/arch/arm/mm/copypage-feroceon.c |
78 | @@ -18,7 +18,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom) |
79 | { |
80 | asm("\ |
81 | stmfd sp!, {r4-r9, lr} \n\ |
82 | - mov ip, %0 \n\ |
83 | + mov ip, %2 \n\ |
84 | 1: mov lr, r1 \n\ |
85 | ldmia r1!, {r2 - r9} \n\ |
86 | pld [lr, #32] \n\ |
87 | @@ -64,7 +64,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom) |
88 | mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\ |
89 | ldmfd sp!, {r4-r9, pc}" |
90 | : |
91 | - : "I" (PAGE_SIZE)); |
92 | + : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE)); |
93 | } |
94 | |
95 | void feroceon_copy_user_highpage(struct page *to, struct page *from, |
96 | diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c |
97 | index 7c2eb55..cb589cb 100644 |
98 | --- a/arch/arm/mm/copypage-v4wb.c |
99 | +++ b/arch/arm/mm/copypage-v4wb.c |
100 | @@ -27,7 +27,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom) |
101 | { |
102 | asm("\ |
103 | stmfd sp!, {r4, lr} @ 2\n\ |
104 | - mov r2, %0 @ 1\n\ |
105 | + mov r2, %2 @ 1\n\ |
106 | ldmia r1!, {r3, r4, ip, lr} @ 4\n\ |
107 | 1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ |
108 | stmia r0!, {r3, r4, ip, lr} @ 4\n\ |
109 | @@ -44,7 +44,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom) |
110 | mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\ |
111 | ldmfd sp!, {r4, pc} @ 3" |
112 | : |
113 | - : "I" (PAGE_SIZE / 64)); |
114 | + : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64)); |
115 | } |
116 | |
117 | void v4wb_copy_user_highpage(struct page *to, struct page *from, |
118 | diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c |
119 | index 172e6a5..30c7d04 100644 |
120 | --- a/arch/arm/mm/copypage-v4wt.c |
121 | +++ b/arch/arm/mm/copypage-v4wt.c |
122 | @@ -25,7 +25,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom) |
123 | { |
124 | asm("\ |
125 | stmfd sp!, {r4, lr} @ 2\n\ |
126 | - mov r2, %0 @ 1\n\ |
127 | + mov r2, %2 @ 1\n\ |
128 | ldmia r1!, {r3, r4, ip, lr} @ 4\n\ |
129 | 1: stmia r0!, {r3, r4, ip, lr} @ 4\n\ |
130 | ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ |
131 | @@ -40,7 +40,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom) |
132 | mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\ |
133 | ldmfd sp!, {r4, pc} @ 3" |
134 | : |
135 | - : "I" (PAGE_SIZE / 64)); |
136 | + : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64)); |
137 | } |
138 | |
139 | void v4wt_copy_user_highpage(struct page *to, struct page *from, |
140 | diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c |
141 | index 747ad41..f9cde07 100644 |
142 | --- a/arch/arm/mm/copypage-xsc3.c |
143 | +++ b/arch/arm/mm/copypage-xsc3.c |
144 | @@ -34,7 +34,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom) |
145 | { |
146 | asm("\ |
147 | stmfd sp!, {r4, r5, lr} \n\ |
148 | - mov lr, %0 \n\ |
149 | + mov lr, %2 \n\ |
150 | \n\ |
151 | pld [r1, #0] \n\ |
152 | pld [r1, #32] \n\ |
153 | @@ -67,7 +67,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom) |
154 | \n\ |
155 | ldmfd sp!, {r4, r5, pc}" |
156 | : |
157 | - : "I" (PAGE_SIZE / 64 - 1)); |
158 | + : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1)); |
159 | } |
160 | |
161 | void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, |
162 | diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c |
163 | index 9d40c34..8ad75e9 100644 |
164 | --- a/arch/arm/mm/fault.c |
165 | +++ b/arch/arm/mm/fault.c |
166 | @@ -393,6 +393,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr, |
167 | if (addr < TASK_SIZE) |
168 | return do_page_fault(addr, fsr, regs); |
169 | |
170 | + if (user_mode(regs)) |
171 | + goto bad_area; |
172 | + |
173 | index = pgd_index(addr); |
174 | |
175 | /* |
176 | diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c |
177 | index 0ed29bf..55d07c8 100644 |
178 | --- a/arch/arm/mm/init.c |
179 | +++ b/arch/arm/mm/init.c |
180 | @@ -712,10 +712,10 @@ void __init mem_init(void) |
181 | void free_initmem(void) |
182 | { |
183 | #ifdef CONFIG_HAVE_TCM |
184 | - extern char *__tcm_start, *__tcm_end; |
185 | + extern char __tcm_start, __tcm_end; |
186 | |
187 | - totalram_pages += free_area(__phys_to_pfn(__pa(__tcm_start)), |
188 | - __phys_to_pfn(__pa(__tcm_end)), |
189 | + totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)), |
190 | + __phys_to_pfn(__pa(&__tcm_end)), |
191 | "TCM link"); |
192 | #endif |
193 | |
194 | diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S |
195 | index 66dc2d0..d66cead 100644 |
196 | --- a/arch/arm/vfp/vfphw.S |
197 | +++ b/arch/arm/vfp/vfphw.S |
198 | @@ -277,7 +277,7 @@ ENTRY(vfp_put_double) |
199 | #ifdef CONFIG_VFPv3 |
200 | @ d16 - d31 registers |
201 | .irp dr,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 |
202 | -1: mcrr p11, 3, r1, r2, c\dr @ fmdrr r1, r2, d\dr |
203 | +1: mcrr p11, 3, r0, r1, c\dr @ fmdrr r0, r1, d\dr |
204 | mov pc, lr |
205 | .org 1b + 8 |
206 | .endr |
207 | diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h |
208 | index 8542bc3..93f6c63 100644 |
209 | --- a/arch/blackfin/include/asm/cache.h |
210 | +++ b/arch/blackfin/include/asm/cache.h |
211 | @@ -15,6 +15,8 @@ |
212 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) |
213 | #define SMP_CACHE_BYTES L1_CACHE_BYTES |
214 | |
215 | +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES |
216 | + |
217 | #ifdef CONFIG_SMP |
218 | #define __cacheline_aligned |
219 | #else |
220 | diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h |
221 | index 2797163..7dc0f0f 100644 |
222 | --- a/arch/frv/include/asm/cache.h |
223 | +++ b/arch/frv/include/asm/cache.h |
224 | @@ -17,6 +17,8 @@ |
225 | #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT) |
226 | #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) |
227 | |
228 | +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES |
229 | + |
230 | #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) |
231 | #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES))) |
232 | |
233 | diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h |
234 | index fed3fd3..ecafbe1 100644 |
235 | --- a/arch/m68k/include/asm/cache.h |
236 | +++ b/arch/m68k/include/asm/cache.h |
237 | @@ -8,4 +8,6 @@ |
238 | #define L1_CACHE_SHIFT 4 |
239 | #define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT) |
240 | |
241 | +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES |
242 | + |
243 | #endif |
244 | diff --git a/arch/mn10300/include/asm/cache.h b/arch/mn10300/include/asm/cache.h |
245 | index e03cfa2..6e2fe28 100644 |
246 | --- a/arch/mn10300/include/asm/cache.h |
247 | +++ b/arch/mn10300/include/asm/cache.h |
248 | @@ -21,6 +21,8 @@ |
249 | #define L1_CACHE_DISPARITY L1_CACHE_NENTRIES * L1_CACHE_BYTES |
250 | #endif |
251 | |
252 | +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES |
253 | + |
254 | /* data cache purge registers |
255 | * - read from the register to unconditionally purge that cache line |
256 | * - write address & 0xffffff00 to conditionally purge that cache line |
257 | diff --git a/arch/parisc/math-emu/decode_exc.c b/arch/parisc/math-emu/decode_exc.c |
258 | index 3ca1c61..27a7492 100644 |
259 | --- a/arch/parisc/math-emu/decode_exc.c |
260 | +++ b/arch/parisc/math-emu/decode_exc.c |
261 | @@ -342,6 +342,7 @@ decode_fpu(unsigned int Fpu_register[], unsigned int trap_counts[]) |
262 | return SIGNALCODE(SIGFPE, FPE_FLTINV); |
263 | case DIVISIONBYZEROEXCEPTION: |
264 | update_trap_counts(Fpu_register, aflags, bflags, trap_counts); |
265 | + Clear_excp_register(exception_index); |
266 | return SIGNALCODE(SIGFPE, FPE_FLTDIV); |
267 | case INEXACTEXCEPTION: |
268 | update_trap_counts(Fpu_register, aflags, bflags, trap_counts); |
269 | diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c |
270 | index c09138d..b894721 100644 |
271 | --- a/arch/powerpc/kernel/asm-offsets.c |
272 | +++ b/arch/powerpc/kernel/asm-offsets.c |
273 | @@ -447,6 +447,14 @@ int main(void) |
274 | DEFINE(PGD_T_LOG2, PGD_T_LOG2); |
275 | DEFINE(PTE_T_LOG2, PTE_T_LOG2); |
276 | #endif |
277 | +#ifdef CONFIG_FSL_BOOKE |
278 | + DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam)); |
279 | + DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0)); |
280 | + DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1)); |
281 | + DEFINE(TLBCAM_MAS2, offsetof(struct tlbcam, MAS2)); |
282 | + DEFINE(TLBCAM_MAS3, offsetof(struct tlbcam, MAS3)); |
283 | + DEFINE(TLBCAM_MAS7, offsetof(struct tlbcam, MAS7)); |
284 | +#endif |
285 | |
286 | #ifdef CONFIG_KVM_EXIT_TIMING |
287 | DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, |
288 | diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S |
289 | index 7255265..edd4a57 100644 |
290 | --- a/arch/powerpc/kernel/head_fsl_booke.S |
291 | +++ b/arch/powerpc/kernel/head_fsl_booke.S |
292 | @@ -639,6 +639,13 @@ interrupt_base: |
293 | rlwinm r12,r12,0,16,1 |
294 | mtspr SPRN_MAS1,r12 |
295 | |
296 | + /* Make up the required permissions for kernel code */ |
297 | +#ifdef CONFIG_PTE_64BIT |
298 | + li r13,_PAGE_PRESENT | _PAGE_BAP_SX |
299 | + oris r13,r13,_PAGE_ACCESSED@h |
300 | +#else |
301 | + li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC |
302 | +#endif |
303 | b 4f |
304 | |
305 | /* Get the PGD for the current thread */ |
306 | @@ -646,15 +653,15 @@ interrupt_base: |
307 | mfspr r11,SPRN_SPRG_THREAD |
308 | lwz r11,PGDIR(r11) |
309 | |
310 | -4: |
311 | - /* Make up the required permissions */ |
312 | + /* Make up the required permissions for user code */ |
313 | #ifdef CONFIG_PTE_64BIT |
314 | - li r13,_PAGE_PRESENT | _PAGE_EXEC |
315 | + li r13,_PAGE_PRESENT | _PAGE_BAP_UX |
316 | oris r13,r13,_PAGE_ACCESSED@h |
317 | #else |
318 | li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC |
319 | #endif |
320 | |
321 | +4: |
322 | FIND_PTE |
323 | andc. r13,r13,r11 /* Check permission */ |
324 | |
325 | diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c |
326 | index 604af29..ecb532b 100644 |
327 | --- a/arch/powerpc/kvm/book3s.c |
328 | +++ b/arch/powerpc/kvm/book3s.c |
329 | @@ -922,6 +922,8 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
330 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
331 | int i; |
332 | |
333 | + vcpu_load(vcpu); |
334 | + |
335 | sregs->pvr = vcpu->arch.pvr; |
336 | |
337 | sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; |
338 | @@ -940,6 +942,9 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
339 | sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; |
340 | } |
341 | } |
342 | + |
343 | + vcpu_put(vcpu); |
344 | + |
345 | return 0; |
346 | } |
347 | |
348 | @@ -949,6 +954,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
349 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
350 | int i; |
351 | |
352 | + vcpu_load(vcpu); |
353 | + |
354 | kvmppc_set_pvr(vcpu, sregs->pvr); |
355 | |
356 | vcpu3s->sdr1 = sregs->u.s.sdr1; |
357 | @@ -975,6 +982,9 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
358 | |
359 | /* Flush the MMU after messing with the segments */ |
360 | kvmppc_mmu_pte_flush(vcpu, 0, 0); |
361 | + |
362 | + vcpu_put(vcpu); |
363 | + |
364 | return 0; |
365 | } |
366 | |
367 | diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c |
368 | index 2a3a195..df0182a 100644 |
369 | --- a/arch/powerpc/kvm/booke.c |
370 | +++ b/arch/powerpc/kvm/booke.c |
371 | @@ -479,6 +479,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
372 | { |
373 | int i; |
374 | |
375 | + vcpu_load(vcpu); |
376 | + |
377 | regs->pc = vcpu->arch.pc; |
378 | regs->cr = kvmppc_get_cr(vcpu); |
379 | regs->ctr = vcpu->arch.ctr; |
380 | @@ -499,6 +501,8 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
381 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
382 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); |
383 | |
384 | + vcpu_put(vcpu); |
385 | + |
386 | return 0; |
387 | } |
388 | |
389 | @@ -506,6 +510,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
390 | { |
391 | int i; |
392 | |
393 | + vcpu_load(vcpu); |
394 | + |
395 | vcpu->arch.pc = regs->pc; |
396 | kvmppc_set_cr(vcpu, regs->cr); |
397 | vcpu->arch.ctr = regs->ctr; |
398 | @@ -525,6 +531,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
399 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
400 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); |
401 | |
402 | + vcpu_put(vcpu); |
403 | + |
404 | return 0; |
405 | } |
406 | |
407 | @@ -553,7 +561,12 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
408 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
409 | struct kvm_translation *tr) |
410 | { |
411 | - return kvmppc_core_vcpu_translate(vcpu, tr); |
412 | + int r; |
413 | + |
414 | + vcpu_load(vcpu); |
415 | + r = kvmppc_core_vcpu_translate(vcpu, tr); |
416 | + vcpu_put(vcpu); |
417 | + return r; |
418 | } |
419 | |
420 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) |
421 | diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c |
422 | index 297fcd2..bf36a9d 100644 |
423 | --- a/arch/powerpc/kvm/powerpc.c |
424 | +++ b/arch/powerpc/kvm/powerpc.c |
425 | @@ -193,7 +193,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
426 | { |
427 | struct kvm_vcpu *vcpu; |
428 | vcpu = kvmppc_core_vcpu_create(kvm, id); |
429 | - kvmppc_create_vcpu_debugfs(vcpu, id); |
430 | + if (!IS_ERR(vcpu)) |
431 | + kvmppc_create_vcpu_debugfs(vcpu, id); |
432 | return vcpu; |
433 | } |
434 | |
435 | diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S |
436 | index 64e2e49..3ac0cd3 100644 |
437 | --- a/arch/powerpc/lib/string.S |
438 | +++ b/arch/powerpc/lib/string.S |
439 | @@ -71,7 +71,7 @@ _GLOBAL(strcmp) |
440 | |
441 | _GLOBAL(strncmp) |
442 | PPC_LCMPI r5,0 |
443 | - beqlr |
444 | + ble- 2f |
445 | mtctr r5 |
446 | addi r5,r3,-1 |
447 | addi r4,r4,-1 |
448 | @@ -82,6 +82,8 @@ _GLOBAL(strncmp) |
449 | beqlr 1 |
450 | bdnzt eq,1b |
451 | blr |
452 | +2: li r3,0 |
453 | + blr |
454 | |
455 | _GLOBAL(strlen) |
456 | addi r4,r3,-1 |
457 | diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c |
458 | index 1ed6b52..cdc7526 100644 |
459 | --- a/arch/powerpc/mm/fsl_booke_mmu.c |
460 | +++ b/arch/powerpc/mm/fsl_booke_mmu.c |
461 | @@ -2,7 +2,7 @@ |
462 | * Modifications by Kumar Gala (galak@kernel.crashing.org) to support |
463 | * E500 Book E processors. |
464 | * |
465 | - * Copyright 2004 Freescale Semiconductor, Inc |
466 | + * Copyright 2004,2010 Freescale Semiconductor, Inc. |
467 | * |
468 | * This file contains the routines for initializing the MMU |
469 | * on the 4xx series of chips. |
470 | @@ -56,19 +56,13 @@ |
471 | |
472 | unsigned int tlbcam_index; |
473 | |
474 | -#define NUM_TLBCAMS (64) |
475 | |
476 | #if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS) |
477 | #error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS" |
478 | #endif |
479 | |
480 | -struct tlbcam { |
481 | - u32 MAS0; |
482 | - u32 MAS1; |
483 | - unsigned long MAS2; |
484 | - u32 MAS3; |
485 | - u32 MAS7; |
486 | -} TLBCAM[NUM_TLBCAMS]; |
487 | +#define NUM_TLBCAMS (64) |
488 | +struct tlbcam TLBCAM[NUM_TLBCAMS]; |
489 | |
490 | struct tlbcamrange { |
491 | unsigned long start; |
492 | @@ -109,19 +103,6 @@ unsigned long p_mapped_by_tlbcam(phys_addr_t pa) |
493 | return 0; |
494 | } |
495 | |
496 | -void loadcam_entry(int idx) |
497 | -{ |
498 | - mtspr(SPRN_MAS0, TLBCAM[idx].MAS0); |
499 | - mtspr(SPRN_MAS1, TLBCAM[idx].MAS1); |
500 | - mtspr(SPRN_MAS2, TLBCAM[idx].MAS2); |
501 | - mtspr(SPRN_MAS3, TLBCAM[idx].MAS3); |
502 | - |
503 | - if (mmu_has_feature(MMU_FTR_BIG_PHYS)) |
504 | - mtspr(SPRN_MAS7, TLBCAM[idx].MAS7); |
505 | - |
506 | - asm volatile("isync;tlbwe;isync" : : : "memory"); |
507 | -} |
508 | - |
509 | /* |
510 | * Set up one of the I/D BAT (block address translation) register pairs. |
511 | * The parameters are not checked; in particular size must be a power |
512 | diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h |
513 | index d49a775..0591f25 100644 |
514 | --- a/arch/powerpc/mm/mmu_decl.h |
515 | +++ b/arch/powerpc/mm/mmu_decl.h |
516 | @@ -149,7 +149,15 @@ extern unsigned long mmu_mapin_ram(unsigned long top); |
517 | extern void MMU_init_hw(void); |
518 | extern unsigned long mmu_mapin_ram(unsigned long top); |
519 | extern void adjust_total_lowmem(void); |
520 | - |
521 | +extern void loadcam_entry(unsigned int index); |
522 | + |
523 | +struct tlbcam { |
524 | + u32 MAS0; |
525 | + u32 MAS1; |
526 | + unsigned long MAS2; |
527 | + u32 MAS3; |
528 | + u32 MAS7; |
529 | +}; |
530 | #elif defined(CONFIG_PPC32) |
531 | /* anything 32-bit except 4xx or 8xx */ |
532 | extern void MMU_init_hw(void); |
533 | diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c |
534 | index b9243e7..767b0cf 100644 |
535 | --- a/arch/powerpc/mm/pgtable_32.c |
536 | +++ b/arch/powerpc/mm/pgtable_32.c |
537 | @@ -146,6 +146,14 @@ ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags) |
538 | /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ |
539 | flags &= ~(_PAGE_USER | _PAGE_EXEC); |
540 | |
541 | +#ifdef _PAGE_BAP_SR |
542 | + /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format |
543 | + * which means that we just cleared supervisor access... oops ;-) This |
544 | + * restores it |
545 | + */ |
546 | + flags |= _PAGE_BAP_SR; |
547 | +#endif |
548 | + |
549 | return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); |
550 | } |
551 | EXPORT_SYMBOL(ioremap_flags); |
552 | diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c |
553 | index d95679a..d050fc8 100644 |
554 | --- a/arch/powerpc/mm/pgtable_64.c |
555 | +++ b/arch/powerpc/mm/pgtable_64.c |
556 | @@ -265,6 +265,14 @@ void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size, |
557 | /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */ |
558 | flags &= ~(_PAGE_USER | _PAGE_EXEC); |
559 | |
560 | +#ifdef _PAGE_BAP_SR |
561 | + /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format |
562 | + * which means that we just cleared supervisor access... oops ;-) This |
563 | + * restores it |
564 | + */ |
565 | + flags |= _PAGE_BAP_SR; |
566 | +#endif |
567 | + |
568 | if (ppc_md.ioremap) |
569 | return ppc_md.ioremap(addr, size, flags, caller); |
570 | return __ioremap_caller(addr, size, flags, caller); |
571 | diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S |
572 | index bbdc5b5..8656ecf 100644 |
573 | --- a/arch/powerpc/mm/tlb_nohash_low.S |
574 | +++ b/arch/powerpc/mm/tlb_nohash_low.S |
575 | @@ -271,3 +271,31 @@ _GLOBAL(set_context) |
576 | #else |
577 | #error Unsupported processor type ! |
578 | #endif |
579 | + |
580 | +#if defined(CONFIG_FSL_BOOKE) |
581 | +/* |
582 | + * extern void loadcam_entry(unsigned int index) |
583 | + * |
584 | + * Load TLBCAM[index] entry in to the L2 CAM MMU |
585 | + */ |
586 | +_GLOBAL(loadcam_entry) |
587 | + LOAD_REG_ADDR(r4, TLBCAM) |
588 | + mulli r5,r3,TLBCAM_SIZE |
589 | + add r3,r5,r4 |
590 | + lwz r4,TLBCAM_MAS0(r3) |
591 | + mtspr SPRN_MAS0,r4 |
592 | + lwz r4,TLBCAM_MAS1(r3) |
593 | + mtspr SPRN_MAS1,r4 |
594 | + PPC_LL r4,TLBCAM_MAS2(r3) |
595 | + mtspr SPRN_MAS2,r4 |
596 | + lwz r4,TLBCAM_MAS3(r3) |
597 | + mtspr SPRN_MAS3,r4 |
598 | +BEGIN_MMU_FTR_SECTION |
599 | + lwz r4,TLBCAM_MAS7(r3) |
600 | + mtspr SPRN_MAS7,r4 |
601 | +END_MMU_FTR_SECTION_IFSET(MMU_FTR_BIG_PHYS) |
602 | + isync |
603 | + tlbwe |
604 | + isync |
605 | + blr |
606 | +#endif |
607 | diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c |
608 | index 2c9e522..7fd90d0 100644 |
609 | --- a/arch/powerpc/oprofile/op_model_cell.c |
610 | +++ b/arch/powerpc/oprofile/op_model_cell.c |
611 | @@ -1077,7 +1077,7 @@ static int calculate_lfsr(int n) |
612 | index = ENTRIES-1; |
613 | |
614 | /* make sure index is valid */ |
615 | - if ((index > ENTRIES) || (index < 0)) |
616 | + if ((index >= ENTRIES) || (index < 0)) |
617 | index = ENTRIES-1; |
618 | |
619 | return initial_lfsr[index]; |
620 | diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c |
621 | index a8e1d5d..b0760d7 100644 |
622 | --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c |
623 | +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c |
624 | @@ -154,30 +154,6 @@ static void pseries_mach_cpu_die(void) |
625 | for(;;); |
626 | } |
627 | |
628 | -static int qcss_tok; /* query-cpu-stopped-state token */ |
629 | - |
630 | -/* Get state of physical CPU. |
631 | - * Return codes: |
632 | - * 0 - The processor is in the RTAS stopped state |
633 | - * 1 - stop-self is in progress |
634 | - * 2 - The processor is not in the RTAS stopped state |
635 | - * -1 - Hardware Error |
636 | - * -2 - Hardware Busy, Try again later. |
637 | - */ |
638 | -static int query_cpu_stopped(unsigned int pcpu) |
639 | -{ |
640 | - int cpu_status, status; |
641 | - |
642 | - status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu); |
643 | - if (status != 0) { |
644 | - printk(KERN_ERR |
645 | - "RTAS query-cpu-stopped-state failed: %i\n", status); |
646 | - return status; |
647 | - } |
648 | - |
649 | - return cpu_status; |
650 | -} |
651 | - |
652 | static int pseries_cpu_disable(void) |
653 | { |
654 | int cpu = smp_processor_id(); |
655 | @@ -224,8 +200,9 @@ static void pseries_cpu_die(unsigned int cpu) |
656 | } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) { |
657 | |
658 | for (tries = 0; tries < 25; tries++) { |
659 | - cpu_status = query_cpu_stopped(pcpu); |
660 | - if (cpu_status == 0 || cpu_status == -1) |
661 | + cpu_status = smp_query_cpu_stopped(pcpu); |
662 | + if (cpu_status == QCSS_STOPPED || |
663 | + cpu_status == QCSS_HARDWARE_ERROR) |
664 | break; |
665 | cpu_relax(); |
666 | } |
667 | @@ -388,6 +365,7 @@ static int __init pseries_cpu_hotplug_init(void) |
668 | struct device_node *np; |
669 | const char *typep; |
670 | int cpu; |
671 | + int qcss_tok; |
672 | |
673 | for_each_node_by_name(np, "interrupt-controller") { |
674 | typep = of_get_property(np, "compatible", NULL); |
675 | diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h |
676 | index a05f8d4..6c4fd2c 100644 |
677 | --- a/arch/powerpc/platforms/pseries/plpar_wrappers.h |
678 | +++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h |
679 | @@ -4,6 +4,14 @@ |
680 | #include <asm/hvcall.h> |
681 | #include <asm/page.h> |
682 | |
683 | +/* Get state of physical CPU from query_cpu_stopped */ |
684 | +int smp_query_cpu_stopped(unsigned int pcpu); |
685 | +#define QCSS_STOPPED 0 |
686 | +#define QCSS_STOPPING 1 |
687 | +#define QCSS_NOT_STOPPED 2 |
688 | +#define QCSS_HARDWARE_ERROR -1 |
689 | +#define QCSS_HARDWARE_BUSY -2 |
690 | + |
691 | static inline long poll_pending(void) |
692 | { |
693 | return plpar_hcall_norets(H_POLL_PENDING); |
694 | diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c |
695 | index 4e7f89a..8979982 100644 |
696 | --- a/arch/powerpc/platforms/pseries/smp.c |
697 | +++ b/arch/powerpc/platforms/pseries/smp.c |
698 | @@ -57,6 +57,28 @@ |
699 | */ |
700 | static cpumask_t of_spin_map; |
701 | |
702 | +/* Query where a cpu is now. Return codes #defined in plpar_wrappers.h */ |
703 | +int smp_query_cpu_stopped(unsigned int pcpu) |
704 | +{ |
705 | + int cpu_status, status; |
706 | + int qcss_tok = rtas_token("query-cpu-stopped-state"); |
707 | + |
708 | + if (qcss_tok == RTAS_UNKNOWN_SERVICE) { |
709 | + printk(KERN_INFO "Firmware doesn't support " |
710 | + "query-cpu-stopped-state\n"); |
711 | + return QCSS_HARDWARE_ERROR; |
712 | + } |
713 | + |
714 | + status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu); |
715 | + if (status != 0) { |
716 | + printk(KERN_ERR |
717 | + "RTAS query-cpu-stopped-state failed: %i\n", status); |
718 | + return status; |
719 | + } |
720 | + |
721 | + return cpu_status; |
722 | +} |
723 | + |
724 | /** |
725 | * smp_startup_cpu() - start the given cpu |
726 | * |
727 | @@ -82,6 +104,12 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) |
728 | |
729 | pcpu = get_hard_smp_processor_id(lcpu); |
730 | |
731 | + /* Check to see if the CPU out of FW already for kexec */ |
732 | + if (smp_query_cpu_stopped(pcpu) == QCSS_NOT_STOPPED){ |
733 | + cpu_set(lcpu, of_spin_map); |
734 | + return 1; |
735 | + } |
736 | + |
737 | /* Fixup atomic count: it exited inside IRQ handler. */ |
738 | task_thread_info(paca[lcpu].__current)->preempt_count = 0; |
739 | |
740 | diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c |
741 | index 4929286..ee7c713 100644 |
742 | --- a/arch/s390/kvm/kvm-s390.c |
743 | +++ b/arch/s390/kvm/kvm-s390.c |
744 | @@ -341,11 +341,13 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, |
745 | |
746 | rc = kvm_vcpu_init(vcpu, kvm, id); |
747 | if (rc) |
748 | - goto out_free_cpu; |
749 | + goto out_free_sie_block; |
750 | VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu, |
751 | vcpu->arch.sie_block); |
752 | |
753 | return vcpu; |
754 | +out_free_sie_block: |
755 | + free_page((unsigned long)(vcpu->arch.sie_block)); |
756 | out_free_cpu: |
757 | kfree(vcpu); |
758 | out_nomem: |
759 | diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h |
760 | index 06d9e79..63400e6 100644 |
761 | --- a/arch/x86/include/asm/kvm_host.h |
762 | +++ b/arch/x86/include/asm/kvm_host.h |
763 | @@ -180,6 +180,7 @@ union kvm_mmu_page_role { |
764 | unsigned invalid:1; |
765 | unsigned cr4_pge:1; |
766 | unsigned nxe:1; |
767 | + unsigned cr0_wp:1; |
768 | }; |
769 | }; |
770 | |
771 | @@ -541,6 +542,8 @@ struct kvm_x86_ops { |
772 | int (*get_lpage_level)(void); |
773 | bool (*rdtscp_supported)(void); |
774 | |
775 | + void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry); |
776 | + |
777 | const struct trace_print_flags *exit_reasons_str; |
778 | }; |
779 | |
780 | diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h |
781 | index 4604e6a..d86da72 100644 |
782 | --- a/arch/x86/include/asm/msr-index.h |
783 | +++ b/arch/x86/include/asm/msr-index.h |
784 | @@ -199,8 +199,9 @@ |
785 | #define MSR_IA32_EBL_CR_POWERON 0x0000002a |
786 | #define MSR_IA32_FEATURE_CONTROL 0x0000003a |
787 | |
788 | -#define FEATURE_CONTROL_LOCKED (1<<0) |
789 | -#define FEATURE_CONTROL_VMXON_ENABLED (1<<2) |
790 | +#define FEATURE_CONTROL_LOCKED (1<<0) |
791 | +#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1) |
792 | +#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2) |
793 | |
794 | #define MSR_IA32_APICBASE 0x0000001b |
795 | #define MSR_IA32_APICBASE_BSP (1<<8) |
796 | diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c |
797 | index f854d89..29e5e6e 100644 |
798 | --- a/arch/x86/kernel/amd_iommu.c |
799 | +++ b/arch/x86/kernel/amd_iommu.c |
800 | @@ -1420,6 +1420,7 @@ static int __attach_device(struct device *dev, |
801 | struct protection_domain *domain) |
802 | { |
803 | struct iommu_dev_data *dev_data, *alias_data; |
804 | + int ret; |
805 | |
806 | dev_data = get_dev_data(dev); |
807 | alias_data = get_dev_data(dev_data->alias); |
808 | @@ -1431,13 +1432,14 @@ static int __attach_device(struct device *dev, |
809 | spin_lock(&domain->lock); |
810 | |
811 | /* Some sanity checks */ |
812 | + ret = -EBUSY; |
813 | if (alias_data->domain != NULL && |
814 | alias_data->domain != domain) |
815 | - return -EBUSY; |
816 | + goto out_unlock; |
817 | |
818 | if (dev_data->domain != NULL && |
819 | dev_data->domain != domain) |
820 | - return -EBUSY; |
821 | + goto out_unlock; |
822 | |
823 | /* Do real assignment */ |
824 | if (dev_data->alias != dev) { |
825 | @@ -1453,10 +1455,14 @@ static int __attach_device(struct device *dev, |
826 | |
827 | atomic_inc(&dev_data->bind); |
828 | |
829 | + ret = 0; |
830 | + |
831 | +out_unlock: |
832 | + |
833 | /* ready */ |
834 | spin_unlock(&domain->lock); |
835 | |
836 | - return 0; |
837 | + return ret; |
838 | } |
839 | |
840 | /* |
841 | @@ -2257,10 +2263,6 @@ int __init amd_iommu_init_dma_ops(void) |
842 | |
843 | iommu_detected = 1; |
844 | swiotlb = 0; |
845 | -#ifdef CONFIG_GART_IOMMU |
846 | - gart_iommu_aperture_disabled = 1; |
847 | - gart_iommu_aperture = 0; |
848 | -#endif |
849 | |
850 | /* Make the driver finally visible to the drivers */ |
851 | dma_ops = &amd_iommu_dma_ops; |
852 | diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c |
853 | index 6360abf..6f8ce75 100644 |
854 | --- a/arch/x86/kernel/amd_iommu_init.c |
855 | +++ b/arch/x86/kernel/amd_iommu_init.c |
856 | @@ -286,8 +286,12 @@ static u8 * __init iommu_map_mmio_space(u64 address) |
857 | { |
858 | u8 *ret; |
859 | |
860 | - if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) |
861 | + if (!request_mem_region(address, MMIO_REGION_LENGTH, "amd_iommu")) { |
862 | + pr_err("AMD-Vi: Can not reserve memory region %llx for mmio\n", |
863 | + address); |
864 | + pr_err("AMD-Vi: This is a BIOS bug. Please contact your hardware vendor\n"); |
865 | return NULL; |
866 | + } |
867 | |
868 | ret = ioremap_nocache(address, MMIO_REGION_LENGTH); |
869 | if (ret != NULL) |
870 | @@ -1313,7 +1317,7 @@ static int __init amd_iommu_init(void) |
871 | ret = amd_iommu_init_dma_ops(); |
872 | |
873 | if (ret) |
874 | - goto free; |
875 | + goto free_disable; |
876 | |
877 | amd_iommu_init_api(); |
878 | |
879 | @@ -1331,9 +1335,10 @@ static int __init amd_iommu_init(void) |
880 | out: |
881 | return ret; |
882 | |
883 | -free: |
884 | +free_disable: |
885 | disable_iommus(); |
886 | |
887 | +free: |
888 | amd_iommu_uninit_devices(); |
889 | |
890 | free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, |
891 | @@ -1352,6 +1357,15 @@ free: |
892 | |
893 | free_unity_maps(); |
894 | |
895 | +#ifdef CONFIG_GART_IOMMU |
896 | + /* |
897 | + * We failed to initialize the AMD IOMMU - try fallback to GART |
898 | + * if possible. |
899 | + */ |
900 | + gart_iommu_init(); |
901 | + |
902 | +#endif |
903 | + |
904 | goto out; |
905 | } |
906 | |
907 | diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c |
908 | index db5bdc8..c5e8b53 100644 |
909 | --- a/arch/x86/kernel/cpu/perf_event.c |
910 | +++ b/arch/x86/kernel/cpu/perf_event.c |
911 | @@ -460,8 +460,11 @@ static int __hw_perf_event_init(struct perf_event *event) |
912 | if (atomic_read(&active_events) == 0) { |
913 | if (!reserve_pmc_hardware()) |
914 | err = -EBUSY; |
915 | - else |
916 | + else { |
917 | err = reserve_bts_hardware(); |
918 | + if (err) |
919 | + release_pmc_hardware(); |
920 | + } |
921 | } |
922 | if (!err) |
923 | atomic_inc(&active_events); |
924 | diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c |
925 | index 03801f2..dfdfe46 100644 |
926 | --- a/arch/x86/kernel/pvclock.c |
927 | +++ b/arch/x86/kernel/pvclock.c |
928 | @@ -109,11 +109,14 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src) |
929 | return pv_tsc_khz; |
930 | } |
931 | |
932 | +static atomic64_t last_value = ATOMIC64_INIT(0); |
933 | + |
934 | cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) |
935 | { |
936 | struct pvclock_shadow_time shadow; |
937 | unsigned version; |
938 | cycle_t ret, offset; |
939 | + u64 last; |
940 | |
941 | do { |
942 | version = pvclock_get_time_values(&shadow, src); |
943 | @@ -123,6 +126,27 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) |
944 | barrier(); |
945 | } while (version != src->version); |
946 | |
947 | + /* |
948 | + * Assumption here is that last_value, a global accumulator, always goes |
949 | + * forward. If we are less than that, we should not be much smaller. |
950 | + * We assume there is an error marging we're inside, and then the correction |
951 | + * does not sacrifice accuracy. |
952 | + * |
953 | + * For reads: global may have changed between test and return, |
954 | + * but this means someone else updated poked the clock at a later time. |
955 | + * We just need to make sure we are not seeing a backwards event. |
956 | + * |
957 | + * For updates: last_value = ret is not enough, since two vcpus could be |
958 | + * updating at the same time, and one of them could be slightly behind, |
959 | + * making the assumption that last_value always go forward fail to hold. |
960 | + */ |
961 | + last = atomic64_read(&last_value); |
962 | + do { |
963 | + if (ret < last) |
964 | + return last; |
965 | + last = atomic64_cmpxchg(&last_value, last, ret); |
966 | + } while (unlikely(last != ret)); |
967 | + |
968 | return ret; |
969 | } |
970 | |
971 | diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c |
972 | index c4851ef..47ae912 100644 |
973 | --- a/arch/x86/kernel/setup.c |
974 | +++ b/arch/x86/kernel/setup.c |
975 | @@ -676,6 +676,17 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = { |
976 | DMI_MATCH(DMI_BOARD_NAME, "DG45FC"), |
977 | }, |
978 | }, |
979 | + /* |
980 | + * The Dell Inspiron Mini 1012 has DMI_BIOS_VENDOR = "Dell Inc.", so |
981 | + * match on the product name. |
982 | + */ |
983 | + { |
984 | + .callback = dmi_low_memory_corruption, |
985 | + .ident = "Phoenix BIOS", |
986 | + .matches = { |
987 | + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"), |
988 | + }, |
989 | + }, |
990 | #endif |
991 | {} |
992 | }; |
993 | diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c |
994 | index 86c9f91..46b8277 100644 |
995 | --- a/arch/x86/kernel/tboot.c |
996 | +++ b/arch/x86/kernel/tboot.c |
997 | @@ -46,6 +46,7 @@ |
998 | |
999 | /* Global pointer to shared data; NULL means no measured launch. */ |
1000 | struct tboot *tboot __read_mostly; |
1001 | +EXPORT_SYMBOL(tboot); |
1002 | |
1003 | /* timeout for APs (in secs) to enter wait-for-SIPI state during shutdown */ |
1004 | #define AP_WAIT_TIMEOUT 1 |
1005 | diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c |
1006 | index 19a8906..62fd8e6 100644 |
1007 | --- a/arch/x86/kvm/mmu.c |
1008 | +++ b/arch/x86/kvm/mmu.c |
1009 | @@ -223,7 +223,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, |
1010 | } |
1011 | EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); |
1012 | |
1013 | -static int is_write_protection(struct kvm_vcpu *vcpu) |
1014 | +static bool is_write_protection(struct kvm_vcpu *vcpu) |
1015 | { |
1016 | return kvm_read_cr0_bits(vcpu, X86_CR0_WP); |
1017 | } |
1018 | @@ -2085,11 +2085,13 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu) |
1019 | direct = 1; |
1020 | if (mmu_check_root(vcpu, root_gfn)) |
1021 | return 1; |
1022 | + spin_lock(&vcpu->kvm->mmu_lock); |
1023 | sp = kvm_mmu_get_page(vcpu, root_gfn, 0, |
1024 | PT64_ROOT_LEVEL, direct, |
1025 | ACC_ALL, NULL); |
1026 | root = __pa(sp->spt); |
1027 | ++sp->root_count; |
1028 | + spin_unlock(&vcpu->kvm->mmu_lock); |
1029 | vcpu->arch.mmu.root_hpa = root; |
1030 | return 0; |
1031 | } |
1032 | @@ -2111,11 +2113,14 @@ static int mmu_alloc_roots(struct kvm_vcpu *vcpu) |
1033 | root_gfn = 0; |
1034 | if (mmu_check_root(vcpu, root_gfn)) |
1035 | return 1; |
1036 | + spin_lock(&vcpu->kvm->mmu_lock); |
1037 | sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, |
1038 | PT32_ROOT_LEVEL, direct, |
1039 | ACC_ALL, NULL); |
1040 | root = __pa(sp->spt); |
1041 | ++sp->root_count; |
1042 | + spin_unlock(&vcpu->kvm->mmu_lock); |
1043 | + |
1044 | vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; |
1045 | } |
1046 | vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); |
1047 | @@ -2439,6 +2444,7 @@ static int init_kvm_softmmu(struct kvm_vcpu *vcpu) |
1048 | r = paging32_init_context(vcpu); |
1049 | |
1050 | vcpu->arch.mmu.base_role.glevels = vcpu->arch.mmu.root_level; |
1051 | + vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu); |
1052 | |
1053 | return r; |
1054 | } |
1055 | @@ -2478,7 +2484,9 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) |
1056 | goto out; |
1057 | spin_lock(&vcpu->kvm->mmu_lock); |
1058 | kvm_mmu_free_some_pages(vcpu); |
1059 | + spin_unlock(&vcpu->kvm->mmu_lock); |
1060 | r = mmu_alloc_roots(vcpu); |
1061 | + spin_lock(&vcpu->kvm->mmu_lock); |
1062 | mmu_sync_roots(vcpu); |
1063 | spin_unlock(&vcpu->kvm->mmu_lock); |
1064 | if (r) |
1065 | diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c |
1066 | index 737361f..1185b55 100644 |
1067 | --- a/arch/x86/kvm/svm.c |
1068 | +++ b/arch/x86/kvm/svm.c |
1069 | @@ -129,6 +129,7 @@ static void svm_flush_tlb(struct kvm_vcpu *vcpu); |
1070 | static void svm_complete_interrupts(struct vcpu_svm *svm); |
1071 | |
1072 | static int nested_svm_exit_handled(struct vcpu_svm *svm); |
1073 | +static int nested_svm_intercept(struct vcpu_svm *svm); |
1074 | static int nested_svm_vmexit(struct vcpu_svm *svm); |
1075 | static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, |
1076 | bool has_error_code, u32 error_code); |
1077 | @@ -1384,6 +1385,8 @@ static int nested_svm_check_permissions(struct vcpu_svm *svm) |
1078 | static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, |
1079 | bool has_error_code, u32 error_code) |
1080 | { |
1081 | + int vmexit; |
1082 | + |
1083 | if (!is_nested(svm)) |
1084 | return 0; |
1085 | |
1086 | @@ -1392,19 +1395,24 @@ static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, |
1087 | svm->vmcb->control.exit_info_1 = error_code; |
1088 | svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; |
1089 | |
1090 | - return nested_svm_exit_handled(svm); |
1091 | + vmexit = nested_svm_intercept(svm); |
1092 | + if (vmexit == NESTED_EXIT_DONE) |
1093 | + svm->nested.exit_required = true; |
1094 | + |
1095 | + return vmexit; |
1096 | } |
1097 | |
1098 | -static inline int nested_svm_intr(struct vcpu_svm *svm) |
1099 | +/* This function returns true if it is save to enable the irq window */ |
1100 | +static inline bool nested_svm_intr(struct vcpu_svm *svm) |
1101 | { |
1102 | if (!is_nested(svm)) |
1103 | - return 0; |
1104 | + return true; |
1105 | |
1106 | if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) |
1107 | - return 0; |
1108 | + return true; |
1109 | |
1110 | if (!(svm->vcpu.arch.hflags & HF_HIF_MASK)) |
1111 | - return 0; |
1112 | + return false; |
1113 | |
1114 | svm->vmcb->control.exit_code = SVM_EXIT_INTR; |
1115 | |
1116 | @@ -1417,13 +1425,13 @@ static inline int nested_svm_intr(struct vcpu_svm *svm) |
1117 | */ |
1118 | svm->nested.exit_required = true; |
1119 | trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); |
1120 | - return 1; |
1121 | + return false; |
1122 | } |
1123 | |
1124 | - return 0; |
1125 | + return true; |
1126 | } |
1127 | |
1128 | -static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx) |
1129 | +static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page) |
1130 | { |
1131 | struct page *page; |
1132 | |
1133 | @@ -1431,7 +1439,9 @@ static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, enum km_type idx) |
1134 | if (is_error_page(page)) |
1135 | goto error; |
1136 | |
1137 | - return kmap_atomic(page, idx); |
1138 | + *_page = page; |
1139 | + |
1140 | + return kmap(page); |
1141 | |
1142 | error: |
1143 | kvm_release_page_clean(page); |
1144 | @@ -1440,16 +1450,9 @@ error: |
1145 | return NULL; |
1146 | } |
1147 | |
1148 | -static void nested_svm_unmap(void *addr, enum km_type idx) |
1149 | +static void nested_svm_unmap(struct page *page) |
1150 | { |
1151 | - struct page *page; |
1152 | - |
1153 | - if (!addr) |
1154 | - return; |
1155 | - |
1156 | - page = kmap_atomic_to_page(addr); |
1157 | - |
1158 | - kunmap_atomic(addr, idx); |
1159 | + kunmap(page); |
1160 | kvm_release_page_dirty(page); |
1161 | } |
1162 | |
1163 | @@ -1459,16 +1462,11 @@ static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm) |
1164 | u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; |
1165 | bool ret = false; |
1166 | u32 t0, t1; |
1167 | - u8 *msrpm; |
1168 | + u8 val; |
1169 | |
1170 | if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) |
1171 | return false; |
1172 | |
1173 | - msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0); |
1174 | - |
1175 | - if (!msrpm) |
1176 | - goto out; |
1177 | - |
1178 | switch (msr) { |
1179 | case 0 ... 0x1fff: |
1180 | t0 = (msr * 2) % 8; |
1181 | @@ -1489,11 +1487,10 @@ static bool nested_svm_exit_handled_msr(struct vcpu_svm *svm) |
1182 | goto out; |
1183 | } |
1184 | |
1185 | - ret = msrpm[t1] & ((1 << param) << t0); |
1186 | + if (!kvm_read_guest(svm->vcpu.kvm, svm->nested.vmcb_msrpm + t1, &val, 1)) |
1187 | + ret = val & ((1 << param) << t0); |
1188 | |
1189 | out: |
1190 | - nested_svm_unmap(msrpm, KM_USER0); |
1191 | - |
1192 | return ret; |
1193 | } |
1194 | |
1195 | @@ -1525,7 +1522,7 @@ static int nested_svm_exit_special(struct vcpu_svm *svm) |
1196 | /* |
1197 | * If this function returns true, this #vmexit was already handled |
1198 | */ |
1199 | -static int nested_svm_exit_handled(struct vcpu_svm *svm) |
1200 | +static int nested_svm_intercept(struct vcpu_svm *svm) |
1201 | { |
1202 | u32 exit_code = svm->vmcb->control.exit_code; |
1203 | int vmexit = NESTED_EXIT_HOST; |
1204 | @@ -1571,9 +1568,17 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm) |
1205 | } |
1206 | } |
1207 | |
1208 | - if (vmexit == NESTED_EXIT_DONE) { |
1209 | + return vmexit; |
1210 | +} |
1211 | + |
1212 | +static int nested_svm_exit_handled(struct vcpu_svm *svm) |
1213 | +{ |
1214 | + int vmexit; |
1215 | + |
1216 | + vmexit = nested_svm_intercept(svm); |
1217 | + |
1218 | + if (vmexit == NESTED_EXIT_DONE) |
1219 | nested_svm_vmexit(svm); |
1220 | - } |
1221 | |
1222 | return vmexit; |
1223 | } |
1224 | @@ -1615,6 +1620,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) |
1225 | struct vmcb *nested_vmcb; |
1226 | struct vmcb *hsave = svm->nested.hsave; |
1227 | struct vmcb *vmcb = svm->vmcb; |
1228 | + struct page *page; |
1229 | |
1230 | trace_kvm_nested_vmexit_inject(vmcb->control.exit_code, |
1231 | vmcb->control.exit_info_1, |
1232 | @@ -1622,7 +1628,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) |
1233 | vmcb->control.exit_int_info, |
1234 | vmcb->control.exit_int_info_err); |
1235 | |
1236 | - nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, KM_USER0); |
1237 | + nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page); |
1238 | if (!nested_vmcb) |
1239 | return 1; |
1240 | |
1241 | @@ -1635,9 +1641,13 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) |
1242 | nested_vmcb->save.ds = vmcb->save.ds; |
1243 | nested_vmcb->save.gdtr = vmcb->save.gdtr; |
1244 | nested_vmcb->save.idtr = vmcb->save.idtr; |
1245 | + nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu); |
1246 | if (npt_enabled) |
1247 | nested_vmcb->save.cr3 = vmcb->save.cr3; |
1248 | + else |
1249 | + nested_vmcb->save.cr3 = svm->vcpu.arch.cr3; |
1250 | nested_vmcb->save.cr2 = vmcb->save.cr2; |
1251 | + nested_vmcb->save.cr4 = svm->vcpu.arch.cr4; |
1252 | nested_vmcb->save.rflags = vmcb->save.rflags; |
1253 | nested_vmcb->save.rip = vmcb->save.rip; |
1254 | nested_vmcb->save.rsp = vmcb->save.rsp; |
1255 | @@ -1712,7 +1722,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) |
1256 | /* Exit nested SVM mode */ |
1257 | svm->nested.vmcb = 0; |
1258 | |
1259 | - nested_svm_unmap(nested_vmcb, KM_USER0); |
1260 | + nested_svm_unmap(page); |
1261 | |
1262 | kvm_mmu_reset_context(&svm->vcpu); |
1263 | kvm_mmu_load(&svm->vcpu); |
1264 | @@ -1723,9 +1733,10 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) |
1265 | static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) |
1266 | { |
1267 | u32 *nested_msrpm; |
1268 | + struct page *page; |
1269 | int i; |
1270 | |
1271 | - nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, KM_USER0); |
1272 | + nested_msrpm = nested_svm_map(svm, svm->nested.vmcb_msrpm, &page); |
1273 | if (!nested_msrpm) |
1274 | return false; |
1275 | |
1276 | @@ -1734,7 +1745,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) |
1277 | |
1278 | svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm); |
1279 | |
1280 | - nested_svm_unmap(nested_msrpm, KM_USER0); |
1281 | + nested_svm_unmap(page); |
1282 | |
1283 | return true; |
1284 | } |
1285 | @@ -1744,8 +1755,9 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) |
1286 | struct vmcb *nested_vmcb; |
1287 | struct vmcb *hsave = svm->nested.hsave; |
1288 | struct vmcb *vmcb = svm->vmcb; |
1289 | + struct page *page; |
1290 | |
1291 | - nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0); |
1292 | + nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); |
1293 | if (!nested_vmcb) |
1294 | return false; |
1295 | |
1296 | @@ -1819,21 +1831,6 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) |
1297 | svm->vmcb->save.dr6 = nested_vmcb->save.dr6; |
1298 | svm->vmcb->save.cpl = nested_vmcb->save.cpl; |
1299 | |
1300 | - /* We don't want a nested guest to be more powerful than the guest, |
1301 | - so all intercepts are ORed */ |
1302 | - svm->vmcb->control.intercept_cr_read |= |
1303 | - nested_vmcb->control.intercept_cr_read; |
1304 | - svm->vmcb->control.intercept_cr_write |= |
1305 | - nested_vmcb->control.intercept_cr_write; |
1306 | - svm->vmcb->control.intercept_dr_read |= |
1307 | - nested_vmcb->control.intercept_dr_read; |
1308 | - svm->vmcb->control.intercept_dr_write |= |
1309 | - nested_vmcb->control.intercept_dr_write; |
1310 | - svm->vmcb->control.intercept_exceptions |= |
1311 | - nested_vmcb->control.intercept_exceptions; |
1312 | - |
1313 | - svm->vmcb->control.intercept |= nested_vmcb->control.intercept; |
1314 | - |
1315 | svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa; |
1316 | |
1317 | /* cache intercepts */ |
1318 | @@ -1851,13 +1848,38 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) |
1319 | else |
1320 | svm->vcpu.arch.hflags &= ~HF_VINTR_MASK; |
1321 | |
1322 | + if (svm->vcpu.arch.hflags & HF_VINTR_MASK) { |
1323 | + /* We only want the cr8 intercept bits of the guest */ |
1324 | + svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR8_MASK; |
1325 | + svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK; |
1326 | + } |
1327 | + |
1328 | + /* We don't want to see VMMCALLs from a nested guest */ |
1329 | + svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VMMCALL); |
1330 | + |
1331 | + /* We don't want a nested guest to be more powerful than the guest, |
1332 | + so all intercepts are ORed */ |
1333 | + svm->vmcb->control.intercept_cr_read |= |
1334 | + nested_vmcb->control.intercept_cr_read; |
1335 | + svm->vmcb->control.intercept_cr_write |= |
1336 | + nested_vmcb->control.intercept_cr_write; |
1337 | + svm->vmcb->control.intercept_dr_read |= |
1338 | + nested_vmcb->control.intercept_dr_read; |
1339 | + svm->vmcb->control.intercept_dr_write |= |
1340 | + nested_vmcb->control.intercept_dr_write; |
1341 | + svm->vmcb->control.intercept_exceptions |= |
1342 | + nested_vmcb->control.intercept_exceptions; |
1343 | + |
1344 | + svm->vmcb->control.intercept |= nested_vmcb->control.intercept; |
1345 | + |
1346 | + svm->vmcb->control.lbr_ctl = nested_vmcb->control.lbr_ctl; |
1347 | svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; |
1348 | svm->vmcb->control.int_state = nested_vmcb->control.int_state; |
1349 | svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset; |
1350 | svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; |
1351 | svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; |
1352 | |
1353 | - nested_svm_unmap(nested_vmcb, KM_USER0); |
1354 | + nested_svm_unmap(page); |
1355 | |
1356 | enable_gif(svm); |
1357 | |
1358 | @@ -1883,6 +1905,7 @@ static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) |
1359 | static int vmload_interception(struct vcpu_svm *svm) |
1360 | { |
1361 | struct vmcb *nested_vmcb; |
1362 | + struct page *page; |
1363 | |
1364 | if (nested_svm_check_permissions(svm)) |
1365 | return 1; |
1366 | @@ -1890,12 +1913,12 @@ static int vmload_interception(struct vcpu_svm *svm) |
1367 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; |
1368 | skip_emulated_instruction(&svm->vcpu); |
1369 | |
1370 | - nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0); |
1371 | + nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); |
1372 | if (!nested_vmcb) |
1373 | return 1; |
1374 | |
1375 | nested_svm_vmloadsave(nested_vmcb, svm->vmcb); |
1376 | - nested_svm_unmap(nested_vmcb, KM_USER0); |
1377 | + nested_svm_unmap(page); |
1378 | |
1379 | return 1; |
1380 | } |
1381 | @@ -1903,6 +1926,7 @@ static int vmload_interception(struct vcpu_svm *svm) |
1382 | static int vmsave_interception(struct vcpu_svm *svm) |
1383 | { |
1384 | struct vmcb *nested_vmcb; |
1385 | + struct page *page; |
1386 | |
1387 | if (nested_svm_check_permissions(svm)) |
1388 | return 1; |
1389 | @@ -1910,12 +1934,12 @@ static int vmsave_interception(struct vcpu_svm *svm) |
1390 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; |
1391 | skip_emulated_instruction(&svm->vcpu); |
1392 | |
1393 | - nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, KM_USER0); |
1394 | + nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); |
1395 | if (!nested_vmcb) |
1396 | return 1; |
1397 | |
1398 | nested_svm_vmloadsave(svm->vmcb, nested_vmcb); |
1399 | - nested_svm_unmap(nested_vmcb, KM_USER0); |
1400 | + nested_svm_unmap(page); |
1401 | |
1402 | return 1; |
1403 | } |
1404 | @@ -2511,6 +2535,9 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) |
1405 | { |
1406 | struct vcpu_svm *svm = to_svm(vcpu); |
1407 | |
1408 | + if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK)) |
1409 | + return; |
1410 | + |
1411 | if (irr == -1) |
1412 | return; |
1413 | |
1414 | @@ -2568,13 +2595,11 @@ static void enable_irq_window(struct kvm_vcpu *vcpu) |
1415 | { |
1416 | struct vcpu_svm *svm = to_svm(vcpu); |
1417 | |
1418 | - nested_svm_intr(svm); |
1419 | - |
1420 | /* In case GIF=0 we can't rely on the CPU to tell us when |
1421 | * GIF becomes 1, because that's a separate STGI/VMRUN intercept. |
1422 | * The next time we get that intercept, this function will be |
1423 | * called again though and we'll get the vintr intercept. */ |
1424 | - if (gif_set(svm)) { |
1425 | + if (gif_set(svm) && nested_svm_intr(svm)) { |
1426 | svm_set_vintr(svm); |
1427 | svm_inject_irq(svm, 0x0); |
1428 | } |
1429 | @@ -2614,6 +2639,9 @@ static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu) |
1430 | { |
1431 | struct vcpu_svm *svm = to_svm(vcpu); |
1432 | |
1433 | + if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK)) |
1434 | + return; |
1435 | + |
1436 | if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) { |
1437 | int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; |
1438 | kvm_set_cr8(vcpu, cr8); |
1439 | @@ -2625,6 +2653,9 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu) |
1440 | struct vcpu_svm *svm = to_svm(vcpu); |
1441 | u64 cr8; |
1442 | |
1443 | + if (is_nested(svm) && (vcpu->arch.hflags & HF_VINTR_MASK)) |
1444 | + return; |
1445 | + |
1446 | cr8 = kvm_get_cr8(vcpu); |
1447 | svm->vmcb->control.int_ctl &= ~V_TPR_MASK; |
1448 | svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; |
1449 | @@ -2879,6 +2910,20 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu) |
1450 | { |
1451 | } |
1452 | |
1453 | +static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) |
1454 | +{ |
1455 | + switch (func) { |
1456 | + case 0x8000000A: |
1457 | + entry->eax = 1; /* SVM revision 1 */ |
1458 | + entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper |
1459 | + ASID emulation to nested SVM */ |
1460 | + entry->ecx = 0; /* Reserved */ |
1461 | + entry->edx = 0; /* Do not support any additional features */ |
1462 | + |
1463 | + break; |
1464 | + } |
1465 | +} |
1466 | + |
1467 | static const struct trace_print_flags svm_exit_reasons_str[] = { |
1468 | { SVM_EXIT_READ_CR0, "read_cr0" }, |
1469 | { SVM_EXIT_READ_CR3, "read_cr3" }, |
1470 | @@ -3023,6 +3068,8 @@ static struct kvm_x86_ops svm_x86_ops = { |
1471 | .cpuid_update = svm_cpuid_update, |
1472 | |
1473 | .rdtscp_supported = svm_rdtscp_supported, |
1474 | + |
1475 | + .set_supported_cpuid = svm_set_supported_cpuid, |
1476 | }; |
1477 | |
1478 | static int __init svm_init(void) |
1479 | diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
1480 | index 2f8db0e..3c86c42 100644 |
1481 | --- a/arch/x86/kvm/vmx.c |
1482 | +++ b/arch/x86/kvm/vmx.c |
1483 | @@ -27,6 +27,7 @@ |
1484 | #include <linux/moduleparam.h> |
1485 | #include <linux/ftrace_event.h> |
1486 | #include <linux/slab.h> |
1487 | +#include <linux/tboot.h> |
1488 | #include "kvm_cache_regs.h" |
1489 | #include "x86.h" |
1490 | |
1491 | @@ -1176,9 +1177,16 @@ static __init int vmx_disabled_by_bios(void) |
1492 | u64 msr; |
1493 | |
1494 | rdmsrl(MSR_IA32_FEATURE_CONTROL, msr); |
1495 | - return (msr & (FEATURE_CONTROL_LOCKED | |
1496 | - FEATURE_CONTROL_VMXON_ENABLED)) |
1497 | - == FEATURE_CONTROL_LOCKED; |
1498 | + if (msr & FEATURE_CONTROL_LOCKED) { |
1499 | + if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX) |
1500 | + && tboot_enabled()) |
1501 | + return 1; |
1502 | + if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX) |
1503 | + && !tboot_enabled()) |
1504 | + return 1; |
1505 | + } |
1506 | + |
1507 | + return 0; |
1508 | /* locked but not enabled */ |
1509 | } |
1510 | |
1511 | @@ -1186,21 +1194,23 @@ static int hardware_enable(void *garbage) |
1512 | { |
1513 | int cpu = raw_smp_processor_id(); |
1514 | u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); |
1515 | - u64 old; |
1516 | + u64 old, test_bits; |
1517 | |
1518 | if (read_cr4() & X86_CR4_VMXE) |
1519 | return -EBUSY; |
1520 | |
1521 | INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu)); |
1522 | rdmsrl(MSR_IA32_FEATURE_CONTROL, old); |
1523 | - if ((old & (FEATURE_CONTROL_LOCKED | |
1524 | - FEATURE_CONTROL_VMXON_ENABLED)) |
1525 | - != (FEATURE_CONTROL_LOCKED | |
1526 | - FEATURE_CONTROL_VMXON_ENABLED)) |
1527 | + |
1528 | + test_bits = FEATURE_CONTROL_LOCKED; |
1529 | + test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; |
1530 | + if (tboot_enabled()) |
1531 | + test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX; |
1532 | + |
1533 | + if ((old & test_bits) != test_bits) { |
1534 | /* enable and lock */ |
1535 | - wrmsrl(MSR_IA32_FEATURE_CONTROL, old | |
1536 | - FEATURE_CONTROL_LOCKED | |
1537 | - FEATURE_CONTROL_VMXON_ENABLED); |
1538 | + wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); |
1539 | + } |
1540 | write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */ |
1541 | asm volatile (ASM_VMX_VMXON_RAX |
1542 | : : "a"(&phys_addr), "m"(phys_addr) |
1543 | @@ -4115,6 +4125,10 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) |
1544 | } |
1545 | } |
1546 | |
1547 | +static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) |
1548 | +{ |
1549 | +} |
1550 | + |
1551 | static struct kvm_x86_ops vmx_x86_ops = { |
1552 | .cpu_has_kvm_support = cpu_has_kvm_support, |
1553 | .disabled_by_bios = vmx_disabled_by_bios, |
1554 | @@ -4186,6 +4200,8 @@ static struct kvm_x86_ops vmx_x86_ops = { |
1555 | .cpuid_update = vmx_cpuid_update, |
1556 | |
1557 | .rdtscp_supported = vmx_rdtscp_supported, |
1558 | + |
1559 | + .set_supported_cpuid = vmx_set_supported_cpuid, |
1560 | }; |
1561 | |
1562 | static int __init vmx_init(void) |
1563 | diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
1564 | index c4f35b5..a6517a2 100644 |
1565 | --- a/arch/x86/kvm/x86.c |
1566 | +++ b/arch/x86/kvm/x86.c |
1567 | @@ -484,7 +484,7 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0); |
1568 | |
1569 | void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) |
1570 | { |
1571 | - kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0ful) | (msw & 0x0f)); |
1572 | + kvm_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~0x0eul) | (msw & 0x0f)); |
1573 | } |
1574 | EXPORT_SYMBOL_GPL(kvm_lmsw); |
1575 | |
1576 | @@ -624,48 +624,42 @@ static u32 emulated_msrs[] = { |
1577 | MSR_IA32_MISC_ENABLE, |
1578 | }; |
1579 | |
1580 | -static void set_efer(struct kvm_vcpu *vcpu, u64 efer) |
1581 | +static int set_efer(struct kvm_vcpu *vcpu, u64 efer) |
1582 | { |
1583 | - if (efer & efer_reserved_bits) { |
1584 | - kvm_inject_gp(vcpu, 0); |
1585 | - return; |
1586 | - } |
1587 | + if (efer & efer_reserved_bits) |
1588 | + return 1; |
1589 | |
1590 | if (is_paging(vcpu) |
1591 | - && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) { |
1592 | - kvm_inject_gp(vcpu, 0); |
1593 | - return; |
1594 | - } |
1595 | + && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) |
1596 | + return 1; |
1597 | |
1598 | if (efer & EFER_FFXSR) { |
1599 | struct kvm_cpuid_entry2 *feat; |
1600 | |
1601 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
1602 | - if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) { |
1603 | - kvm_inject_gp(vcpu, 0); |
1604 | - return; |
1605 | - } |
1606 | + if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) |
1607 | + return 1; |
1608 | } |
1609 | |
1610 | if (efer & EFER_SVME) { |
1611 | struct kvm_cpuid_entry2 *feat; |
1612 | |
1613 | feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
1614 | - if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) { |
1615 | - kvm_inject_gp(vcpu, 0); |
1616 | - return; |
1617 | - } |
1618 | + if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) |
1619 | + return 1; |
1620 | } |
1621 | |
1622 | - kvm_x86_ops->set_efer(vcpu, efer); |
1623 | - |
1624 | efer &= ~EFER_LMA; |
1625 | efer |= vcpu->arch.efer & EFER_LMA; |
1626 | |
1627 | + kvm_x86_ops->set_efer(vcpu, efer); |
1628 | + |
1629 | vcpu->arch.efer = efer; |
1630 | |
1631 | vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled; |
1632 | kvm_mmu_reset_context(vcpu); |
1633 | + |
1634 | + return 0; |
1635 | } |
1636 | |
1637 | void kvm_enable_efer_bits(u64 mask) |
1638 | @@ -695,14 +689,22 @@ static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data) |
1639 | |
1640 | static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) |
1641 | { |
1642 | - static int version; |
1643 | + int version; |
1644 | + int r; |
1645 | struct pvclock_wall_clock wc; |
1646 | struct timespec boot; |
1647 | |
1648 | if (!wall_clock) |
1649 | return; |
1650 | |
1651 | - version++; |
1652 | + r = kvm_read_guest(kvm, wall_clock, &version, sizeof(version)); |
1653 | + if (r) |
1654 | + return; |
1655 | + |
1656 | + if (version & 1) |
1657 | + ++version; /* first time write, random junk */ |
1658 | + |
1659 | + ++version; |
1660 | |
1661 | kvm_write_guest(kvm, wall_clock, &version, sizeof(version)); |
1662 | |
1663 | @@ -1086,8 +1088,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) |
1664 | { |
1665 | switch (msr) { |
1666 | case MSR_EFER: |
1667 | - set_efer(vcpu, data); |
1668 | - break; |
1669 | + return set_efer(vcpu, data); |
1670 | case MSR_K7_HWCR: |
1671 | data &= ~(u64)0x40; /* ignore flush filter disable */ |
1672 | if (data != 0) { |
1673 | @@ -1768,6 +1769,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, |
1674 | { |
1675 | int r; |
1676 | |
1677 | + vcpu_load(vcpu); |
1678 | r = -E2BIG; |
1679 | if (cpuid->nent < vcpu->arch.cpuid_nent) |
1680 | goto out; |
1681 | @@ -1779,6 +1781,7 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, |
1682 | |
1683 | out: |
1684 | cpuid->nent = vcpu->arch.cpuid_nent; |
1685 | + vcpu_put(vcpu); |
1686 | return r; |
1687 | } |
1688 | |
1689 | @@ -1917,6 +1920,9 @@ static void do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, |
1690 | entry->ecx &= kvm_supported_word6_x86_features; |
1691 | break; |
1692 | } |
1693 | + |
1694 | + kvm_x86_ops->set_supported_cpuid(function, entry); |
1695 | + |
1696 | put_cpu(); |
1697 | } |
1698 | |
1699 | @@ -2031,6 +2037,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, |
1700 | int r; |
1701 | unsigned bank_num = mcg_cap & 0xff, bank; |
1702 | |
1703 | + vcpu_load(vcpu); |
1704 | r = -EINVAL; |
1705 | if (!bank_num || bank_num >= KVM_MAX_MCE_BANKS) |
1706 | goto out; |
1707 | @@ -2045,6 +2052,7 @@ static int kvm_vcpu_ioctl_x86_setup_mce(struct kvm_vcpu *vcpu, |
1708 | for (bank = 0; bank < bank_num; bank++) |
1709 | vcpu->arch.mce_banks[bank*4] = ~(u64)0; |
1710 | out: |
1711 | + vcpu_put(vcpu); |
1712 | return r; |
1713 | } |
1714 | |
1715 | @@ -2312,7 +2320,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, |
1716 | r = -EFAULT; |
1717 | if (copy_from_user(&mce, argp, sizeof mce)) |
1718 | goto out; |
1719 | + vcpu_load(vcpu); |
1720 | r = kvm_vcpu_ioctl_x86_set_mce(vcpu, &mce); |
1721 | + vcpu_put(vcpu); |
1722 | break; |
1723 | } |
1724 | case KVM_GET_VCPU_EVENTS: { |
1725 | diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c |
1726 | index 2c505ee..f1fb411 100644 |
1727 | --- a/arch/x86/oprofile/nmi_int.c |
1728 | +++ b/arch/x86/oprofile/nmi_int.c |
1729 | @@ -95,7 +95,10 @@ static void nmi_cpu_save_registers(struct op_msrs *msrs) |
1730 | static void nmi_cpu_start(void *dummy) |
1731 | { |
1732 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); |
1733 | - model->start(msrs); |
1734 | + if (!msrs->controls) |
1735 | + WARN_ON_ONCE(1); |
1736 | + else |
1737 | + model->start(msrs); |
1738 | } |
1739 | |
1740 | static int nmi_start(void) |
1741 | @@ -107,7 +110,10 @@ static int nmi_start(void) |
1742 | static void nmi_cpu_stop(void *dummy) |
1743 | { |
1744 | struct op_msrs const *msrs = &__get_cpu_var(cpu_msrs); |
1745 | - model->stop(msrs); |
1746 | + if (!msrs->controls) |
1747 | + WARN_ON_ONCE(1); |
1748 | + else |
1749 | + model->stop(msrs); |
1750 | } |
1751 | |
1752 | static void nmi_stop(void) |
1753 | diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c |
1754 | index 987267f..a9c6611 100644 |
1755 | --- a/arch/x86/xen/suspend.c |
1756 | +++ b/arch/x86/xen/suspend.c |
1757 | @@ -60,6 +60,6 @@ static void xen_vcpu_notify_restore(void *data) |
1758 | |
1759 | void xen_arch_resume(void) |
1760 | { |
1761 | - smp_call_function(xen_vcpu_notify_restore, |
1762 | - (void *)CLOCK_EVT_NOTIFY_RESUME, 1); |
1763 | + on_each_cpu(xen_vcpu_notify_restore, |
1764 | + (void *)CLOCK_EVT_NOTIFY_RESUME, 1); |
1765 | } |
1766 | diff --git a/arch/xtensa/include/asm/cache.h b/arch/xtensa/include/asm/cache.h |
1767 | index f04c989..ed8cd3c 100644 |
1768 | --- a/arch/xtensa/include/asm/cache.h |
1769 | +++ b/arch/xtensa/include/asm/cache.h |
1770 | @@ -29,5 +29,6 @@ |
1771 | # define CACHE_WAY_SIZE ICACHE_WAY_SIZE |
1772 | #endif |
1773 | |
1774 | +#define ARCH_KMALLOC_MINALIGN L1_CACHE_BYTES |
1775 | |
1776 | #endif /* _XTENSA_CACHE_H */ |
1777 | diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c |
1778 | index 5f127cf..002c0ce 100644 |
1779 | --- a/block/cfq-iosched.c |
1780 | +++ b/block/cfq-iosched.c |
1781 | @@ -2503,15 +2503,10 @@ static void cfq_free_io_context(struct io_context *ioc) |
1782 | __call_for_each_cic(ioc, cic_free_func); |
1783 | } |
1784 | |
1785 | -static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1786 | +static void cfq_put_cooperator(struct cfq_queue *cfqq) |
1787 | { |
1788 | struct cfq_queue *__cfqq, *next; |
1789 | |
1790 | - if (unlikely(cfqq == cfqd->active_queue)) { |
1791 | - __cfq_slice_expired(cfqd, cfqq, 0); |
1792 | - cfq_schedule_dispatch(cfqd); |
1793 | - } |
1794 | - |
1795 | /* |
1796 | * If this queue was scheduled to merge with another queue, be |
1797 | * sure to drop the reference taken on that queue (and others in |
1798 | @@ -2527,6 +2522,16 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1799 | cfq_put_queue(__cfqq); |
1800 | __cfqq = next; |
1801 | } |
1802 | +} |
1803 | + |
1804 | +static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
1805 | +{ |
1806 | + if (unlikely(cfqq == cfqd->active_queue)) { |
1807 | + __cfq_slice_expired(cfqd, cfqq, 0); |
1808 | + cfq_schedule_dispatch(cfqd); |
1809 | + } |
1810 | + |
1811 | + cfq_put_cooperator(cfqq); |
1812 | |
1813 | cfq_put_queue(cfqq); |
1814 | } |
1815 | @@ -3470,6 +3475,9 @@ split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq) |
1816 | } |
1817 | |
1818 | cic_set_cfqq(cic, NULL, 1); |
1819 | + |
1820 | + cfq_put_cooperator(cfqq); |
1821 | + |
1822 | cfq_put_queue(cfqq); |
1823 | return NULL; |
1824 | } |
1825 | diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c |
1826 | index fc2f26b..c5fef01 100644 |
1827 | --- a/drivers/acpi/video_detect.c |
1828 | +++ b/drivers/acpi/video_detect.c |
1829 | @@ -250,7 +250,7 @@ static int __init acpi_backlight(char *str) |
1830 | ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR; |
1831 | if (!strcmp("video", str)) |
1832 | acpi_video_support |= |
1833 | - ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO; |
1834 | + ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO; |
1835 | } |
1836 | return 1; |
1837 | } |
1838 | diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c |
1839 | index 49cffb6..5abab5d 100644 |
1840 | --- a/drivers/ata/libata-core.c |
1841 | +++ b/drivers/ata/libata-core.c |
1842 | @@ -160,6 +160,10 @@ int libata_allow_tpm = 0; |
1843 | module_param_named(allow_tpm, libata_allow_tpm, int, 0444); |
1844 | MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); |
1845 | |
1846 | +static int atapi_an; |
1847 | +module_param(atapi_an, int, 0444); |
1848 | +MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)"); |
1849 | + |
1850 | MODULE_AUTHOR("Jeff Garzik"); |
1851 | MODULE_DESCRIPTION("Library module for ATA devices"); |
1852 | MODULE_LICENSE("GPL"); |
1853 | @@ -2572,7 +2576,8 @@ int ata_dev_configure(struct ata_device *dev) |
1854 | * to enable ATAPI AN to discern between PHY status |
1855 | * changed notifications and ATAPI ANs. |
1856 | */ |
1857 | - if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && |
1858 | + if (atapi_an && |
1859 | + (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && |
1860 | (!sata_pmp_attached(ap) || |
1861 | sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { |
1862 | unsigned int err_mask; |
1863 | diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c |
1864 | index e3877b6..4723648 100644 |
1865 | --- a/drivers/ata/libata-sff.c |
1866 | +++ b/drivers/ata/libata-sff.c |
1867 | @@ -894,7 +894,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) |
1868 | do_write); |
1869 | } |
1870 | |
1871 | - if (!do_write) |
1872 | + if (!do_write && !PageSlab(page)) |
1873 | flush_dcache_page(page); |
1874 | |
1875 | qc->curbytes += qc->sect_size; |
1876 | diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c |
1877 | index 2a98b09..7f3d179 100644 |
1878 | --- a/drivers/ata/sata_nv.c |
1879 | +++ b/drivers/ata/sata_nv.c |
1880 | @@ -1674,7 +1674,6 @@ static void nv_mcp55_freeze(struct ata_port *ap) |
1881 | mask = readl(mmio_base + NV_INT_ENABLE_MCP55); |
1882 | mask &= ~(NV_INT_ALL_MCP55 << shift); |
1883 | writel(mask, mmio_base + NV_INT_ENABLE_MCP55); |
1884 | - ata_sff_freeze(ap); |
1885 | } |
1886 | |
1887 | static void nv_mcp55_thaw(struct ata_port *ap) |
1888 | @@ -1688,7 +1687,6 @@ static void nv_mcp55_thaw(struct ata_port *ap) |
1889 | mask = readl(mmio_base + NV_INT_ENABLE_MCP55); |
1890 | mask |= (NV_INT_MASK_MCP55 << shift); |
1891 | writel(mask, mmio_base + NV_INT_ENABLE_MCP55); |
1892 | - ata_sff_thaw(ap); |
1893 | } |
1894 | |
1895 | static void nv_adma_error_handler(struct ata_port *ap) |
1896 | @@ -2479,8 +2477,7 @@ static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
1897 | } |
1898 | |
1899 | pci_set_master(pdev); |
1900 | - return ata_host_activate(host, pdev->irq, ipriv->irq_handler, |
1901 | - IRQF_SHARED, ipriv->sht); |
1902 | + return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht); |
1903 | } |
1904 | |
1905 | #ifdef CONFIG_PM |
1906 | diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c |
1907 | index 08f6549..0553455 100644 |
1908 | --- a/drivers/ata/sata_via.c |
1909 | +++ b/drivers/ata/sata_via.c |
1910 | @@ -575,6 +575,19 @@ static void svia_configure(struct pci_dev *pdev) |
1911 | tmp8 |= NATIVE_MODE_ALL; |
1912 | pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); |
1913 | } |
1914 | + |
1915 | + /* |
1916 | + * vt6421 has problems talking to some drives. The following |
1917 | + * is the magic fix from Joseph Chan <JosephChan@via.com.tw>. |
1918 | + * Please add proper documentation if possible. |
1919 | + * |
1920 | + * https://bugzilla.kernel.org/show_bug.cgi?id=15173 |
1921 | + */ |
1922 | + if (pdev->device == 0x3249) { |
1923 | + pci_read_config_byte(pdev, 0x52, &tmp8); |
1924 | + tmp8 |= 1 << 2; |
1925 | + pci_write_config_byte(pdev, 0x52, tmp8); |
1926 | + } |
1927 | } |
1928 | |
1929 | static int svia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
1930 | diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c |
1931 | index f35719a..251acea 100644 |
1932 | --- a/drivers/base/cpu.c |
1933 | +++ b/drivers/base/cpu.c |
1934 | @@ -186,7 +186,7 @@ static ssize_t print_cpus_offline(struct sysdev_class *class, |
1935 | /* display offline cpus < nr_cpu_ids */ |
1936 | if (!alloc_cpumask_var(&offline, GFP_KERNEL)) |
1937 | return -ENOMEM; |
1938 | - cpumask_complement(offline, cpu_online_mask); |
1939 | + cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask); |
1940 | n = cpulist_scnprintf(buf, len, offline); |
1941 | free_cpumask_var(offline); |
1942 | |
1943 | diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c |
1944 | index 4462b11..9ead05d 100644 |
1945 | --- a/drivers/char/ipmi/ipmi_si_intf.c |
1946 | +++ b/drivers/char/ipmi/ipmi_si_intf.c |
1947 | @@ -314,9 +314,14 @@ static void deliver_recv_msg(struct smi_info *smi_info, |
1948 | { |
1949 | /* Deliver the message to the upper layer with the lock |
1950 | released. */ |
1951 | - spin_unlock(&(smi_info->si_lock)); |
1952 | - ipmi_smi_msg_received(smi_info->intf, msg); |
1953 | - spin_lock(&(smi_info->si_lock)); |
1954 | + |
1955 | + if (smi_info->run_to_completion) { |
1956 | + ipmi_smi_msg_received(smi_info->intf, msg); |
1957 | + } else { |
1958 | + spin_unlock(&(smi_info->si_lock)); |
1959 | + ipmi_smi_msg_received(smi_info->intf, msg); |
1960 | + spin_lock(&(smi_info->si_lock)); |
1961 | + } |
1962 | } |
1963 | |
1964 | static void return_hosed_msg(struct smi_info *smi_info, int cCode) |
1965 | diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c |
1966 | index 744f748..a860ec0 100644 |
1967 | --- a/drivers/clocksource/sh_cmt.c |
1968 | +++ b/drivers/clocksource/sh_cmt.c |
1969 | @@ -413,18 +413,10 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs) |
1970 | static int sh_cmt_clocksource_enable(struct clocksource *cs) |
1971 | { |
1972 | struct sh_cmt_priv *p = cs_to_sh_cmt(cs); |
1973 | - int ret; |
1974 | |
1975 | p->total_cycles = 0; |
1976 | |
1977 | - ret = sh_cmt_start(p, FLAG_CLOCKSOURCE); |
1978 | - if (ret) |
1979 | - return ret; |
1980 | - |
1981 | - /* TODO: calculate good shift from rate and counter bit width */ |
1982 | - cs->shift = 0; |
1983 | - cs->mult = clocksource_hz2mult(p->rate, cs->shift); |
1984 | - return 0; |
1985 | + return sh_cmt_start(p, FLAG_CLOCKSOURCE); |
1986 | } |
1987 | |
1988 | static void sh_cmt_clocksource_disable(struct clocksource *cs) |
1989 | @@ -451,7 +443,18 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p, |
1990 | cs->resume = sh_cmt_clocksource_resume; |
1991 | cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8); |
1992 | cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; |
1993 | + |
1994 | + /* clk_get_rate() needs an enabled clock */ |
1995 | + clk_enable(p->clk); |
1996 | + p->rate = clk_get_rate(p->clk) / (p->width == 16) ? 512 : 8; |
1997 | + clk_disable(p->clk); |
1998 | + |
1999 | + /* TODO: calculate good shift from rate and counter bit width */ |
2000 | + cs->shift = 10; |
2001 | + cs->mult = clocksource_hz2mult(p->rate, cs->shift); |
2002 | + |
2003 | pr_info("sh_cmt: %s used as clock source\n", cs->name); |
2004 | + |
2005 | clocksource_register(cs); |
2006 | return 0; |
2007 | } |
2008 | diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c |
2009 | index fc9ff1e..7a24160 100644 |
2010 | --- a/drivers/clocksource/sh_tmu.c |
2011 | +++ b/drivers/clocksource/sh_tmu.c |
2012 | @@ -200,16 +200,8 @@ static cycle_t sh_tmu_clocksource_read(struct clocksource *cs) |
2013 | static int sh_tmu_clocksource_enable(struct clocksource *cs) |
2014 | { |
2015 | struct sh_tmu_priv *p = cs_to_sh_tmu(cs); |
2016 | - int ret; |
2017 | - |
2018 | - ret = sh_tmu_enable(p); |
2019 | - if (ret) |
2020 | - return ret; |
2021 | |
2022 | - /* TODO: calculate good shift from rate and counter bit width */ |
2023 | - cs->shift = 10; |
2024 | - cs->mult = clocksource_hz2mult(p->rate, cs->shift); |
2025 | - return 0; |
2026 | + return sh_tmu_enable(p); |
2027 | } |
2028 | |
2029 | static void sh_tmu_clocksource_disable(struct clocksource *cs) |
2030 | @@ -229,6 +221,16 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p, |
2031 | cs->disable = sh_tmu_clocksource_disable; |
2032 | cs->mask = CLOCKSOURCE_MASK(32); |
2033 | cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; |
2034 | + |
2035 | + /* clk_get_rate() needs an enabled clock */ |
2036 | + clk_enable(p->clk); |
2037 | + /* channel will be configured at parent clock / 4 */ |
2038 | + p->rate = clk_get_rate(p->clk) / 4; |
2039 | + clk_disable(p->clk); |
2040 | + /* TODO: calculate good shift from rate and counter bit width */ |
2041 | + cs->shift = 10; |
2042 | + cs->mult = clocksource_hz2mult(p->rate, cs->shift); |
2043 | + |
2044 | pr_info("sh_tmu: %s used as clock source\n", cs->name); |
2045 | clocksource_register(cs); |
2046 | return 0; |
2047 | diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c |
2048 | index 5045156..991447b 100644 |
2049 | --- a/drivers/firewire/core-card.c |
2050 | +++ b/drivers/firewire/core-card.c |
2051 | @@ -231,7 +231,7 @@ void fw_schedule_bm_work(struct fw_card *card, unsigned long delay) |
2052 | static void fw_card_bm_work(struct work_struct *work) |
2053 | { |
2054 | struct fw_card *card = container_of(work, struct fw_card, work.work); |
2055 | - struct fw_device *root_device; |
2056 | + struct fw_device *root_device, *irm_device; |
2057 | struct fw_node *root_node; |
2058 | unsigned long flags; |
2059 | int root_id, new_root_id, irm_id, local_id; |
2060 | @@ -239,6 +239,7 @@ static void fw_card_bm_work(struct work_struct *work) |
2061 | bool do_reset = false; |
2062 | bool root_device_is_running; |
2063 | bool root_device_is_cmc; |
2064 | + bool irm_is_1394_1995_only; |
2065 | |
2066 | spin_lock_irqsave(&card->lock, flags); |
2067 | |
2068 | @@ -248,12 +249,18 @@ static void fw_card_bm_work(struct work_struct *work) |
2069 | } |
2070 | |
2071 | generation = card->generation; |
2072 | + |
2073 | root_node = card->root_node; |
2074 | fw_node_get(root_node); |
2075 | root_device = root_node->data; |
2076 | root_device_is_running = root_device && |
2077 | atomic_read(&root_device->state) == FW_DEVICE_RUNNING; |
2078 | root_device_is_cmc = root_device && root_device->cmc; |
2079 | + |
2080 | + irm_device = card->irm_node->data; |
2081 | + irm_is_1394_1995_only = irm_device && irm_device->config_rom && |
2082 | + (irm_device->config_rom[2] & 0x000000f0) == 0; |
2083 | + |
2084 | root_id = root_node->node_id; |
2085 | irm_id = card->irm_node->node_id; |
2086 | local_id = card->local_node->node_id; |
2087 | @@ -276,8 +283,15 @@ static void fw_card_bm_work(struct work_struct *work) |
2088 | |
2089 | if (!card->irm_node->link_on) { |
2090 | new_root_id = local_id; |
2091 | - fw_notify("IRM has link off, making local node (%02x) root.\n", |
2092 | - new_root_id); |
2093 | + fw_notify("%s, making local node (%02x) root.\n", |
2094 | + "IRM has link off", new_root_id); |
2095 | + goto pick_me; |
2096 | + } |
2097 | + |
2098 | + if (irm_is_1394_1995_only) { |
2099 | + new_root_id = local_id; |
2100 | + fw_notify("%s, making local node (%02x) root.\n", |
2101 | + "IRM is not 1394a compliant", new_root_id); |
2102 | goto pick_me; |
2103 | } |
2104 | |
2105 | @@ -316,8 +330,8 @@ static void fw_card_bm_work(struct work_struct *work) |
2106 | * root, and thus, IRM. |
2107 | */ |
2108 | new_root_id = local_id; |
2109 | - fw_notify("BM lock failed, making local node (%02x) root.\n", |
2110 | - new_root_id); |
2111 | + fw_notify("%s, making local node (%02x) root.\n", |
2112 | + "BM lock failed", new_root_id); |
2113 | goto pick_me; |
2114 | } |
2115 | } else if (card->bm_generation != generation) { |
2116 | diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c |
2117 | index 18f41d7..10348d3 100644 |
2118 | --- a/drivers/gpu/drm/drm_edid.c |
2119 | +++ b/drivers/gpu/drm/drm_edid.c |
2120 | @@ -335,7 +335,7 @@ static struct drm_display_mode drm_dmt_modes[] = { |
2121 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
2122 | /* 1024x768@85Hz */ |
2123 | { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072, |
2124 | - 1072, 1376, 0, 768, 769, 772, 808, 0, |
2125 | + 1168, 1376, 0, 768, 769, 772, 808, 0, |
2126 | DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
2127 | /* 1152x864@75Hz */ |
2128 | { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, |
2129 | diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c |
2130 | index ef3d91d..691701a 100644 |
2131 | --- a/drivers/gpu/drm/i915/i915_gem.c |
2132 | +++ b/drivers/gpu/drm/i915/i915_gem.c |
2133 | @@ -2688,6 +2688,14 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) |
2134 | return -EINVAL; |
2135 | } |
2136 | |
2137 | + /* If the object is bigger than the entire aperture, reject it early |
2138 | + * before evicting everything in a vain attempt to find space. |
2139 | + */ |
2140 | + if (obj->size > dev->gtt_total) { |
2141 | + DRM_ERROR("Attempting to bind an object larger than the aperture\n"); |
2142 | + return -E2BIG; |
2143 | + } |
2144 | + |
2145 | search_free: |
2146 | free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, |
2147 | obj->size, alignment, 0); |
2148 | @@ -4231,6 +4239,17 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) |
2149 | int ret; |
2150 | |
2151 | i915_verify_inactive(dev, __FILE__, __LINE__); |
2152 | + |
2153 | + if (obj_priv->gtt_space != NULL) { |
2154 | + if (alignment == 0) |
2155 | + alignment = i915_gem_get_gtt_alignment(obj); |
2156 | + if (obj_priv->gtt_offset & (alignment - 1)) { |
2157 | + ret = i915_gem_object_unbind(obj); |
2158 | + if (ret) |
2159 | + return ret; |
2160 | + } |
2161 | + } |
2162 | + |
2163 | if (obj_priv->gtt_space == NULL) { |
2164 | ret = i915_gem_object_bind_to_gtt(obj, alignment); |
2165 | if (ret) |
2166 | diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c |
2167 | index c7502b6..70765cf 100644 |
2168 | --- a/drivers/gpu/drm/i915/intel_display.c |
2169 | +++ b/drivers/gpu/drm/i915/intel_display.c |
2170 | @@ -4155,12 +4155,6 @@ void intel_finish_page_flip(struct drm_device *dev, int pipe) |
2171 | spin_lock_irqsave(&dev->event_lock, flags); |
2172 | work = intel_crtc->unpin_work; |
2173 | if (work == NULL || !work->pending) { |
2174 | - if (work && !work->pending) { |
2175 | - obj_priv = to_intel_bo(work->pending_flip_obj); |
2176 | - DRM_DEBUG_DRIVER("flip finish: %p (%d) not pending?\n", |
2177 | - obj_priv, |
2178 | - atomic_read(&obj_priv->pending_flip)); |
2179 | - } |
2180 | spin_unlock_irqrestore(&dev->event_lock, flags); |
2181 | return; |
2182 | } |
2183 | diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c |
2184 | index 77e40cf..7c28ff1 100644 |
2185 | --- a/drivers/gpu/drm/i915/intel_dp.c |
2186 | +++ b/drivers/gpu/drm/i915/intel_dp.c |
2187 | @@ -1180,16 +1180,6 @@ intel_dp_detect(struct drm_connector *connector) |
2188 | if (HAS_PCH_SPLIT(dev)) |
2189 | return ironlake_dp_detect(connector); |
2190 | |
2191 | - temp = I915_READ(PORT_HOTPLUG_EN); |
2192 | - |
2193 | - I915_WRITE(PORT_HOTPLUG_EN, |
2194 | - temp | |
2195 | - DPB_HOTPLUG_INT_EN | |
2196 | - DPC_HOTPLUG_INT_EN | |
2197 | - DPD_HOTPLUG_INT_EN); |
2198 | - |
2199 | - POSTING_READ(PORT_HOTPLUG_EN); |
2200 | - |
2201 | switch (dp_priv->output_reg) { |
2202 | case DP_B: |
2203 | bit = DPB_HOTPLUG_INT_STATUS; |
2204 | diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h |
2205 | index 034218c..4c7204a 100644 |
2206 | --- a/drivers/gpu/drm/radeon/radeon.h |
2207 | +++ b/drivers/gpu/drm/radeon/radeon.h |
2208 | @@ -566,6 +566,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p, |
2209 | */ |
2210 | int radeon_agp_init(struct radeon_device *rdev); |
2211 | void radeon_agp_resume(struct radeon_device *rdev); |
2212 | +void radeon_agp_suspend(struct radeon_device *rdev); |
2213 | void radeon_agp_fini(struct radeon_device *rdev); |
2214 | |
2215 | |
2216 | diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c |
2217 | index 28e473f..f40dfb7 100644 |
2218 | --- a/drivers/gpu/drm/radeon/radeon_agp.c |
2219 | +++ b/drivers/gpu/drm/radeon/radeon_agp.c |
2220 | @@ -270,3 +270,8 @@ void radeon_agp_fini(struct radeon_device *rdev) |
2221 | } |
2222 | #endif |
2223 | } |
2224 | + |
2225 | +void radeon_agp_suspend(struct radeon_device *rdev) |
2226 | +{ |
2227 | + radeon_agp_fini(rdev); |
2228 | +} |
2229 | diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c |
2230 | index 9916d82..1a4fa9b 100644 |
2231 | --- a/drivers/gpu/drm/radeon/radeon_atombios.c |
2232 | +++ b/drivers/gpu/drm/radeon/radeon_atombios.c |
2233 | @@ -530,6 +530,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) |
2234 | } |
2235 | |
2236 | /* look up gpio for ddc, hpd */ |
2237 | + ddc_bus.valid = false; |
2238 | + hpd.hpd = RADEON_HPD_NONE; |
2239 | if ((le16_to_cpu(path->usDeviceTag) & |
2240 | (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) { |
2241 | for (j = 0; j < con_obj->ucNumberOfObjects; j++) { |
2242 | @@ -585,9 +587,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) |
2243 | break; |
2244 | } |
2245 | } |
2246 | - } else { |
2247 | - hpd.hpd = RADEON_HPD_NONE; |
2248 | - ddc_bus.valid = false; |
2249 | } |
2250 | |
2251 | /* needed for aux chan transactions */ |
2252 | @@ -1174,7 +1173,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct |
2253 | lvds->native_mode.vtotal = lvds->native_mode.vdisplay + |
2254 | le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time); |
2255 | lvds->native_mode.vsync_start = lvds->native_mode.vdisplay + |
2256 | - le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); |
2257 | + le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset); |
2258 | lvds->native_mode.vsync_end = lvds->native_mode.vsync_start + |
2259 | le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); |
2260 | lvds->panel_pwr_delay = |
2261 | diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c |
2262 | index 7b629e3..ed6a724 100644 |
2263 | --- a/drivers/gpu/drm/radeon/radeon_device.c |
2264 | +++ b/drivers/gpu/drm/radeon/radeon_device.c |
2265 | @@ -748,6 +748,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) |
2266 | /* evict remaining vram memory */ |
2267 | radeon_bo_evict_vram(rdev); |
2268 | |
2269 | + radeon_agp_suspend(rdev); |
2270 | + |
2271 | pci_save_state(dev->pdev); |
2272 | if (state.event == PM_EVENT_SUSPEND) { |
2273 | /* Shut down the device */ |
2274 | diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c |
2275 | index bb1c122..c2848e0 100644 |
2276 | --- a/drivers/gpu/drm/radeon/radeon_display.c |
2277 | +++ b/drivers/gpu/drm/radeon/radeon_display.c |
2278 | @@ -978,8 +978,11 @@ void radeon_update_display_priority(struct radeon_device *rdev) |
2279 | /* set display priority to high for r3xx, rv515 chips |
2280 | * this avoids flickering due to underflow to the |
2281 | * display controllers during heavy acceleration. |
2282 | + * Don't force high on rs4xx igp chips as it seems to |
2283 | + * affect the sound card. See kernel bug 15982. |
2284 | */ |
2285 | - if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) |
2286 | + if ((ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515)) && |
2287 | + !(rdev->flags & RADEON_IS_IGP)) |
2288 | rdev->disp_priority = 2; |
2289 | else |
2290 | rdev->disp_priority = 0; |
2291 | diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c |
2292 | index cc5316d..b3ba44c 100644 |
2293 | --- a/drivers/gpu/drm/radeon/radeon_state.c |
2294 | +++ b/drivers/gpu/drm/radeon/radeon_state.c |
2295 | @@ -900,9 +900,10 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev, |
2296 | flags |= RADEON_FRONT; |
2297 | } |
2298 | if (flags & (RADEON_DEPTH|RADEON_STENCIL)) { |
2299 | - if (!dev_priv->have_z_offset) |
2300 | + if (!dev_priv->have_z_offset) { |
2301 | printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n"); |
2302 | - flags &= ~(RADEON_DEPTH | RADEON_STENCIL); |
2303 | + flags &= ~(RADEON_DEPTH | RADEON_STENCIL); |
2304 | + } |
2305 | } |
2306 | |
2307 | if (flags & (RADEON_FRONT | RADEON_BACK)) { |
2308 | diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c |
2309 | index 143e788..0010efa 100644 |
2310 | --- a/drivers/hid/hid-core.c |
2311 | +++ b/drivers/hid/hid-core.c |
2312 | @@ -1305,6 +1305,7 @@ static const struct hid_device_id hid_blacklist[] = { |
2313 | { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) }, |
2314 | { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, |
2315 | { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, |
2316 | + { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) }, |
2317 | { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, |
2318 | { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) }, |
2319 | { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, |
2320 | diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c |
2321 | index 62416e6..3975e03 100644 |
2322 | --- a/drivers/hid/hid-gyration.c |
2323 | +++ b/drivers/hid/hid-gyration.c |
2324 | @@ -73,6 +73,7 @@ static int gyration_event(struct hid_device *hdev, struct hid_field *field, |
2325 | static const struct hid_device_id gyration_devices[] = { |
2326 | { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, |
2327 | { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, |
2328 | + { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) }, |
2329 | { } |
2330 | }; |
2331 | MODULE_DEVICE_TABLE(hid, gyration_devices); |
2332 | diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h |
2333 | index 09d2764..b681cbf 100644 |
2334 | --- a/drivers/hid/hid-ids.h |
2335 | +++ b/drivers/hid/hid-ids.h |
2336 | @@ -267,6 +267,7 @@ |
2337 | #define USB_VENDOR_ID_GYRATION 0x0c16 |
2338 | #define USB_DEVICE_ID_GYRATION_REMOTE 0x0002 |
2339 | #define USB_DEVICE_ID_GYRATION_REMOTE_2 0x0003 |
2340 | +#define USB_DEVICE_ID_GYRATION_REMOTE_3 0x0008 |
2341 | |
2342 | #define USB_VENDOR_ID_HAPP 0x078b |
2343 | #define USB_DEVICE_ID_UGCI_DRIVING 0x0010 |
2344 | diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c |
2345 | index 65c232a..21d201b 100644 |
2346 | --- a/drivers/hwmon/ltc4245.c |
2347 | +++ b/drivers/hwmon/ltc4245.c |
2348 | @@ -45,9 +45,7 @@ enum ltc4245_cmd { |
2349 | LTC4245_VEEIN = 0x19, |
2350 | LTC4245_VEESENSE = 0x1a, |
2351 | LTC4245_VEEOUT = 0x1b, |
2352 | - LTC4245_GPIOADC1 = 0x1c, |
2353 | - LTC4245_GPIOADC2 = 0x1d, |
2354 | - LTC4245_GPIOADC3 = 0x1e, |
2355 | + LTC4245_GPIOADC = 0x1c, |
2356 | }; |
2357 | |
2358 | struct ltc4245_data { |
2359 | @@ -61,7 +59,7 @@ struct ltc4245_data { |
2360 | u8 cregs[0x08]; |
2361 | |
2362 | /* Voltage registers */ |
2363 | - u8 vregs[0x0f]; |
2364 | + u8 vregs[0x0d]; |
2365 | }; |
2366 | |
2367 | static struct ltc4245_data *ltc4245_update_device(struct device *dev) |
2368 | @@ -86,7 +84,7 @@ static struct ltc4245_data *ltc4245_update_device(struct device *dev) |
2369 | data->cregs[i] = val; |
2370 | } |
2371 | |
2372 | - /* Read voltage registers -- 0x10 to 0x1f */ |
2373 | + /* Read voltage registers -- 0x10 to 0x1c */ |
2374 | for (i = 0; i < ARRAY_SIZE(data->vregs); i++) { |
2375 | val = i2c_smbus_read_byte_data(client, i+0x10); |
2376 | if (unlikely(val < 0)) |
2377 | @@ -128,9 +126,7 @@ static int ltc4245_get_voltage(struct device *dev, u8 reg) |
2378 | case LTC4245_VEEOUT: |
2379 | voltage = regval * -55; |
2380 | break; |
2381 | - case LTC4245_GPIOADC1: |
2382 | - case LTC4245_GPIOADC2: |
2383 | - case LTC4245_GPIOADC3: |
2384 | + case LTC4245_GPIOADC: |
2385 | voltage = regval * 10; |
2386 | break; |
2387 | default: |
2388 | @@ -297,9 +293,7 @@ LTC4245_ALARM(in7_min_alarm, (1 << 2), LTC4245_FAULT2); |
2389 | LTC4245_ALARM(in8_min_alarm, (1 << 3), LTC4245_FAULT2); |
2390 | |
2391 | /* GPIO voltages */ |
2392 | -LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC1); |
2393 | -LTC4245_VOLTAGE(in10_input, LTC4245_GPIOADC2); |
2394 | -LTC4245_VOLTAGE(in11_input, LTC4245_GPIOADC3); |
2395 | +LTC4245_VOLTAGE(in9_input, LTC4245_GPIOADC); |
2396 | |
2397 | /* Power Consumption (virtual) */ |
2398 | LTC4245_POWER(power1_input, LTC4245_12VSENSE); |
2399 | @@ -342,8 +336,6 @@ static struct attribute *ltc4245_attributes[] = { |
2400 | &sensor_dev_attr_in8_min_alarm.dev_attr.attr, |
2401 | |
2402 | &sensor_dev_attr_in9_input.dev_attr.attr, |
2403 | - &sensor_dev_attr_in10_input.dev_attr.attr, |
2404 | - &sensor_dev_attr_in11_input.dev_attr.attr, |
2405 | |
2406 | &sensor_dev_attr_power1_input.dev_attr.attr, |
2407 | &sensor_dev_attr_power2_input.dev_attr.attr, |
2408 | diff --git a/drivers/md/linear.c b/drivers/md/linear.c |
2409 | index 09437e9..0a1042b 100644 |
2410 | --- a/drivers/md/linear.c |
2411 | +++ b/drivers/md/linear.c |
2412 | @@ -282,6 +282,7 @@ static int linear_stop (mddev_t *mddev) |
2413 | rcu_barrier(); |
2414 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ |
2415 | kfree(conf); |
2416 | + mddev->private = NULL; |
2417 | |
2418 | return 0; |
2419 | } |
2420 | diff --git a/drivers/md/md.c b/drivers/md/md.c |
2421 | index cefd63d..336792e 100644 |
2422 | --- a/drivers/md/md.c |
2423 | +++ b/drivers/md/md.c |
2424 | @@ -508,9 +508,36 @@ static inline int mddev_trylock(mddev_t * mddev) |
2425 | return mutex_trylock(&mddev->reconfig_mutex); |
2426 | } |
2427 | |
2428 | -static inline void mddev_unlock(mddev_t * mddev) |
2429 | -{ |
2430 | - mutex_unlock(&mddev->reconfig_mutex); |
2431 | +static struct attribute_group md_redundancy_group; |
2432 | + |
2433 | +static void mddev_unlock(mddev_t * mddev) |
2434 | +{ |
2435 | + if (mddev->to_remove) { |
2436 | + /* These cannot be removed under reconfig_mutex as |
2437 | + * an access to the files will try to take reconfig_mutex |
2438 | + * while holding the file unremovable, which leads to |
2439 | + * a deadlock. |
2440 | + * So hold open_mutex instead - we are allowed to take |
2441 | + * it while holding reconfig_mutex, and md_run can |
2442 | + * use it to wait for the remove to complete. |
2443 | + */ |
2444 | + struct attribute_group *to_remove = mddev->to_remove; |
2445 | + mddev->to_remove = NULL; |
2446 | + mutex_lock(&mddev->open_mutex); |
2447 | + mutex_unlock(&mddev->reconfig_mutex); |
2448 | + |
2449 | + if (to_remove != &md_redundancy_group) |
2450 | + sysfs_remove_group(&mddev->kobj, to_remove); |
2451 | + if (mddev->pers == NULL || |
2452 | + mddev->pers->sync_request == NULL) { |
2453 | + sysfs_remove_group(&mddev->kobj, &md_redundancy_group); |
2454 | + if (mddev->sysfs_action) |
2455 | + sysfs_put(mddev->sysfs_action); |
2456 | + mddev->sysfs_action = NULL; |
2457 | + } |
2458 | + mutex_unlock(&mddev->open_mutex); |
2459 | + } else |
2460 | + mutex_unlock(&mddev->reconfig_mutex); |
2461 | |
2462 | md_wakeup_thread(mddev->thread); |
2463 | } |
2464 | @@ -2980,6 +3007,23 @@ level_store(mddev_t *mddev, const char *buf, size_t len) |
2465 | /* Looks like we have a winner */ |
2466 | mddev_suspend(mddev); |
2467 | mddev->pers->stop(mddev); |
2468 | + |
2469 | + if (mddev->pers->sync_request == NULL && |
2470 | + pers->sync_request != NULL) { |
2471 | + /* need to add the md_redundancy_group */ |
2472 | + if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) |
2473 | + printk(KERN_WARNING |
2474 | + "md: cannot register extra attributes for %s\n", |
2475 | + mdname(mddev)); |
2476 | + mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); |
2477 | + } |
2478 | + if (mddev->pers->sync_request != NULL && |
2479 | + pers->sync_request == NULL) { |
2480 | + /* need to remove the md_redundancy_group */ |
2481 | + if (mddev->to_remove == NULL) |
2482 | + mddev->to_remove = &md_redundancy_group; |
2483 | + } |
2484 | + |
2485 | module_put(mddev->pers->owner); |
2486 | /* Invalidate devices that are now superfluous */ |
2487 | list_for_each_entry(rdev, &mddev->disks, same_set) |
2488 | @@ -4082,15 +4126,6 @@ static void mddev_delayed_delete(struct work_struct *ws) |
2489 | { |
2490 | mddev_t *mddev = container_of(ws, mddev_t, del_work); |
2491 | |
2492 | - if (mddev->private) { |
2493 | - sysfs_remove_group(&mddev->kobj, &md_redundancy_group); |
2494 | - if (mddev->private != (void*)1) |
2495 | - sysfs_remove_group(&mddev->kobj, mddev->private); |
2496 | - if (mddev->sysfs_action) |
2497 | - sysfs_put(mddev->sysfs_action); |
2498 | - mddev->sysfs_action = NULL; |
2499 | - mddev->private = NULL; |
2500 | - } |
2501 | sysfs_remove_group(&mddev->kobj, &md_bitmap_group); |
2502 | kobject_del(&mddev->kobj); |
2503 | kobject_put(&mddev->kobj); |
2504 | @@ -4248,6 +4283,13 @@ static int do_md_run(mddev_t * mddev) |
2505 | if (mddev->pers) |
2506 | return -EBUSY; |
2507 | |
2508 | + /* These two calls synchronise us with the |
2509 | + * sysfs_remove_group calls in mddev_unlock, |
2510 | + * so they must have completed. |
2511 | + */ |
2512 | + mutex_lock(&mddev->open_mutex); |
2513 | + mutex_unlock(&mddev->open_mutex); |
2514 | + |
2515 | /* |
2516 | * Analyze all RAID superblock(s) |
2517 | */ |
2518 | @@ -4536,8 +4578,8 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) |
2519 | mddev->queue->unplug_fn = NULL; |
2520 | mddev->queue->backing_dev_info.congested_fn = NULL; |
2521 | module_put(mddev->pers->owner); |
2522 | - if (mddev->pers->sync_request && mddev->private == NULL) |
2523 | - mddev->private = (void*)1; |
2524 | + if (mddev->pers->sync_request && mddev->to_remove == NULL) |
2525 | + mddev->to_remove = &md_redundancy_group; |
2526 | mddev->pers = NULL; |
2527 | /* tell userspace to handle 'inactive' */ |
2528 | sysfs_notify_dirent(mddev->sysfs_state); |
2529 | @@ -5496,6 +5538,7 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, |
2530 | int err = 0; |
2531 | void __user *argp = (void __user *)arg; |
2532 | mddev_t *mddev = NULL; |
2533 | + int ro; |
2534 | |
2535 | if (!capable(CAP_SYS_ADMIN)) |
2536 | return -EACCES; |
2537 | @@ -5631,6 +5674,34 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, |
2538 | err = do_md_stop(mddev, 1, 1); |
2539 | goto done_unlock; |
2540 | |
2541 | + case BLKROSET: |
2542 | + if (get_user(ro, (int __user *)(arg))) { |
2543 | + err = -EFAULT; |
2544 | + goto done_unlock; |
2545 | + } |
2546 | + err = -EINVAL; |
2547 | + |
2548 | + /* if the bdev is going readonly the value of mddev->ro |
2549 | + * does not matter, no writes are coming |
2550 | + */ |
2551 | + if (ro) |
2552 | + goto done_unlock; |
2553 | + |
2554 | + /* are we are already prepared for writes? */ |
2555 | + if (mddev->ro != 1) |
2556 | + goto done_unlock; |
2557 | + |
2558 | + /* transitioning to readauto need only happen for |
2559 | + * arrays that call md_write_start |
2560 | + */ |
2561 | + if (mddev->pers) { |
2562 | + err = restart_array(mddev); |
2563 | + if (err == 0) { |
2564 | + mddev->ro = 2; |
2565 | + set_disk_ro(mddev->gendisk, 0); |
2566 | + } |
2567 | + } |
2568 | + goto done_unlock; |
2569 | } |
2570 | |
2571 | /* |
2572 | diff --git a/drivers/md/md.h b/drivers/md/md.h |
2573 | index 8e4c75c..722f5df 100644 |
2574 | --- a/drivers/md/md.h |
2575 | +++ b/drivers/md/md.h |
2576 | @@ -305,6 +305,7 @@ struct mddev_s |
2577 | atomic_t max_corr_read_errors; /* max read retries */ |
2578 | struct list_head all_mddevs; |
2579 | |
2580 | + struct attribute_group *to_remove; |
2581 | /* Generic barrier handling. |
2582 | * If there is a pending barrier request, all other |
2583 | * writes are blocked while the devices are flushed. |
2584 | diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c |
2585 | index e59b10e..84d3bf0 100644 |
2586 | --- a/drivers/md/raid1.c |
2587 | +++ b/drivers/md/raid1.c |
2588 | @@ -418,7 +418,7 @@ static void raid1_end_write_request(struct bio *bio, int error) |
2589 | */ |
2590 | static int read_balance(conf_t *conf, r1bio_t *r1_bio) |
2591 | { |
2592 | - const unsigned long this_sector = r1_bio->sector; |
2593 | + const sector_t this_sector = r1_bio->sector; |
2594 | int new_disk = conf->last_used, disk = new_disk; |
2595 | int wonly_disk = -1; |
2596 | const int sectors = r1_bio->sectors; |
2597 | @@ -434,7 +434,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) |
2598 | retry: |
2599 | if (conf->mddev->recovery_cp < MaxSector && |
2600 | (this_sector + sectors >= conf->next_resync)) { |
2601 | - /* Choose the first operation device, for consistancy */ |
2602 | + /* Choose the first operational device, for consistancy */ |
2603 | new_disk = 0; |
2604 | |
2605 | for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev); |
2606 | @@ -912,9 +912,10 @@ static int make_request(struct request_queue *q, struct bio * bio) |
2607 | if (test_bit(Faulty, &rdev->flags)) { |
2608 | rdev_dec_pending(rdev, mddev); |
2609 | r1_bio->bios[i] = NULL; |
2610 | - } else |
2611 | + } else { |
2612 | r1_bio->bios[i] = bio; |
2613 | - targets++; |
2614 | + targets++; |
2615 | + } |
2616 | } else |
2617 | r1_bio->bios[i] = NULL; |
2618 | } |
2619 | diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c |
2620 | index e2766d8..ad945cc 100644 |
2621 | --- a/drivers/md/raid10.c |
2622 | +++ b/drivers/md/raid10.c |
2623 | @@ -494,7 +494,7 @@ static int raid10_mergeable_bvec(struct request_queue *q, |
2624 | */ |
2625 | static int read_balance(conf_t *conf, r10bio_t *r10_bio) |
2626 | { |
2627 | - const unsigned long this_sector = r10_bio->sector; |
2628 | + const sector_t this_sector = r10_bio->sector; |
2629 | int disk, slot, nslot; |
2630 | const int sectors = r10_bio->sectors; |
2631 | sector_t new_distance, current_distance; |
2632 | diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c |
2633 | index 15348c3..6af0a6d 100644 |
2634 | --- a/drivers/md/raid5.c |
2635 | +++ b/drivers/md/raid5.c |
2636 | @@ -5087,7 +5087,9 @@ static int run(mddev_t *mddev) |
2637 | } |
2638 | |
2639 | /* Ok, everything is just fine now */ |
2640 | - if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) |
2641 | + if (mddev->to_remove == &raid5_attrs_group) |
2642 | + mddev->to_remove = NULL; |
2643 | + else if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) |
2644 | printk(KERN_WARNING |
2645 | "raid5: failed to create sysfs attributes for %s\n", |
2646 | mdname(mddev)); |
2647 | @@ -5134,7 +5136,8 @@ static int stop(mddev_t *mddev) |
2648 | mddev->queue->backing_dev_info.congested_fn = NULL; |
2649 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ |
2650 | free_conf(conf); |
2651 | - mddev->private = &raid5_attrs_group; |
2652 | + mddev->private = NULL; |
2653 | + mddev->to_remove = &raid5_attrs_group; |
2654 | return 0; |
2655 | } |
2656 | |
2657 | diff --git a/drivers/media/video/uvc/uvc_ctrl.c b/drivers/media/video/uvc/uvc_ctrl.c |
2658 | index 6d3850b..2194da5 100644 |
2659 | --- a/drivers/media/video/uvc/uvc_ctrl.c |
2660 | +++ b/drivers/media/video/uvc/uvc_ctrl.c |
2661 | @@ -1047,6 +1047,8 @@ int uvc_ctrl_set(struct uvc_video_chain *chain, |
2662 | uvc_ctrl_data(ctrl, UVC_CTRL_DATA_MAX)); |
2663 | step = mapping->get(mapping, UVC_GET_RES, |
2664 | uvc_ctrl_data(ctrl, UVC_CTRL_DATA_RES)); |
2665 | + if (step == 0) |
2666 | + step = 1; |
2667 | |
2668 | xctrl->value = min + (xctrl->value - min + step/2) / step * step; |
2669 | xctrl->value = clamp(xctrl->value, min, max); |
2670 | diff --git a/drivers/misc/vmware_balloon.c b/drivers/misc/vmware_balloon.c |
2671 | index e7161c4..ad8fb09 100644 |
2672 | --- a/drivers/misc/vmware_balloon.c |
2673 | +++ b/drivers/misc/vmware_balloon.c |
2674 | @@ -45,7 +45,7 @@ |
2675 | |
2676 | MODULE_AUTHOR("VMware, Inc."); |
2677 | MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver"); |
2678 | -MODULE_VERSION("1.2.1.0-K"); |
2679 | +MODULE_VERSION("1.2.1.1-k"); |
2680 | MODULE_ALIAS("dmi:*:svnVMware*:*"); |
2681 | MODULE_ALIAS("vmware_vmmemctl"); |
2682 | MODULE_LICENSE("GPL"); |
2683 | @@ -101,6 +101,8 @@ MODULE_LICENSE("GPL"); |
2684 | /* Maximum number of page allocations without yielding processor */ |
2685 | #define VMW_BALLOON_YIELD_THRESHOLD 1024 |
2686 | |
2687 | +/* Maximum number of refused pages we accumulate during inflation cycle */ |
2688 | +#define VMW_BALLOON_MAX_REFUSED 16 |
2689 | |
2690 | /* |
2691 | * Hypervisor communication port definitions. |
2692 | @@ -183,6 +185,7 @@ struct vmballoon { |
2693 | |
2694 | /* transient list of non-balloonable pages */ |
2695 | struct list_head refused_pages; |
2696 | + unsigned int n_refused_pages; |
2697 | |
2698 | /* balloon size in pages */ |
2699 | unsigned int size; |
2700 | @@ -428,14 +431,21 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep) |
2701 | /* inform monitor */ |
2702 | locked = vmballoon_send_lock_page(b, page_to_pfn(page)); |
2703 | if (!locked) { |
2704 | + STATS_INC(b->stats.refused_alloc); |
2705 | + |
2706 | if (b->reset_required) { |
2707 | __free_page(page); |
2708 | return -EIO; |
2709 | } |
2710 | |
2711 | - /* place on list of non-balloonable pages, retry allocation */ |
2712 | + /* |
2713 | + * Place page on the list of non-balloonable pages |
2714 | + * and retry allocation, unless we already accumulated |
2715 | + * too many of them, in which case take a breather. |
2716 | + */ |
2717 | list_add(&page->lru, &b->refused_pages); |
2718 | - STATS_INC(b->stats.refused_alloc); |
2719 | + if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED) |
2720 | + return -EIO; |
2721 | } |
2722 | } while (!locked); |
2723 | |
2724 | @@ -483,6 +493,8 @@ static void vmballoon_release_refused_pages(struct vmballoon *b) |
2725 | __free_page(page); |
2726 | STATS_INC(b->stats.refused_free); |
2727 | } |
2728 | + |
2729 | + b->n_refused_pages = 0; |
2730 | } |
2731 | |
2732 | /* |
2733 | diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c |
2734 | index 2c712af..48a1dbf 100644 |
2735 | --- a/drivers/net/arcnet/com20020-pci.c |
2736 | +++ b/drivers/net/arcnet/com20020-pci.c |
2737 | @@ -164,8 +164,8 @@ static DEFINE_PCI_DEVICE_TABLE(com20020pci_id_table) = { |
2738 | { 0x1571, 0xa204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, |
2739 | { 0x1571, 0xa205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, |
2740 | { 0x1571, 0xa206, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, |
2741 | - { 0x10B5, 0x9030, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, |
2742 | - { 0x10B5, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, |
2743 | + { 0x10B5, 0x9030, 0x10B5, 0x2978, 0, 0, ARC_CAN_10MBIT }, |
2744 | + { 0x10B5, 0x9050, 0x10B5, 0x2273, 0, 0, ARC_CAN_10MBIT }, |
2745 | { 0x14BA, 0x6000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, |
2746 | { 0x10B5, 0x2200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ARC_CAN_10MBIT }, |
2747 | {0,} |
2748 | diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c |
2749 | index 145b1a7..dd70d0c 100644 |
2750 | --- a/drivers/net/can/sja1000/sja1000.c |
2751 | +++ b/drivers/net/can/sja1000/sja1000.c |
2752 | @@ -84,6 +84,20 @@ static struct can_bittiming_const sja1000_bittiming_const = { |
2753 | .brp_inc = 1, |
2754 | }; |
2755 | |
2756 | +static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val) |
2757 | +{ |
2758 | + unsigned long flags; |
2759 | + |
2760 | + /* |
2761 | + * The command register needs some locking and time to settle |
2762 | + * the write_reg() operation - especially on SMP systems. |
2763 | + */ |
2764 | + spin_lock_irqsave(&priv->cmdreg_lock, flags); |
2765 | + priv->write_reg(priv, REG_CMR, val); |
2766 | + priv->read_reg(priv, REG_SR); |
2767 | + spin_unlock_irqrestore(&priv->cmdreg_lock, flags); |
2768 | +} |
2769 | + |
2770 | static int sja1000_probe_chip(struct net_device *dev) |
2771 | { |
2772 | struct sja1000_priv *priv = netdev_priv(dev); |
2773 | @@ -297,7 +311,7 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb, |
2774 | |
2775 | can_put_echo_skb(skb, dev, 0); |
2776 | |
2777 | - priv->write_reg(priv, REG_CMR, CMD_TR); |
2778 | + sja1000_write_cmdreg(priv, CMD_TR); |
2779 | |
2780 | return NETDEV_TX_OK; |
2781 | } |
2782 | @@ -346,7 +360,7 @@ static void sja1000_rx(struct net_device *dev) |
2783 | cf->can_id = id; |
2784 | |
2785 | /* release receive buffer */ |
2786 | - priv->write_reg(priv, REG_CMR, CMD_RRB); |
2787 | + sja1000_write_cmdreg(priv, CMD_RRB); |
2788 | |
2789 | netif_rx(skb); |
2790 | |
2791 | @@ -374,7 +388,7 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) |
2792 | cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; |
2793 | stats->rx_over_errors++; |
2794 | stats->rx_errors++; |
2795 | - priv->write_reg(priv, REG_CMR, CMD_CDO); /* clear bit */ |
2796 | + sja1000_write_cmdreg(priv, CMD_CDO); /* clear bit */ |
2797 | } |
2798 | |
2799 | if (isrc & IRQ_EI) { |
2800 | diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h |
2801 | index 97a622b..de8e778 100644 |
2802 | --- a/drivers/net/can/sja1000/sja1000.h |
2803 | +++ b/drivers/net/can/sja1000/sja1000.h |
2804 | @@ -167,6 +167,7 @@ struct sja1000_priv { |
2805 | |
2806 | void __iomem *reg_base; /* ioremap'ed address to registers */ |
2807 | unsigned long irq_flags; /* for request_irq() */ |
2808 | + spinlock_t cmdreg_lock; /* lock for concurrent cmd register writes */ |
2809 | |
2810 | u16 flags; /* custom mode flags */ |
2811 | u8 ocr; /* output control register */ |
2812 | diff --git a/drivers/net/mlx4/icm.c b/drivers/net/mlx4/icm.c |
2813 | index 57288ca..ef62f17 100644 |
2814 | --- a/drivers/net/mlx4/icm.c |
2815 | +++ b/drivers/net/mlx4/icm.c |
2816 | @@ -175,9 +175,10 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, |
2817 | |
2818 | if (chunk->nsg <= 0) |
2819 | goto fail; |
2820 | + } |
2821 | |
2822 | + if (chunk->npages == MLX4_ICM_CHUNK_LEN) |
2823 | chunk = NULL; |
2824 | - } |
2825 | |
2826 | npages -= 1 << cur_order; |
2827 | } else { |
2828 | diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h |
2829 | index 0a1d4c2..06f1f3c 100644 |
2830 | --- a/drivers/net/wireless/ath/ar9170/hw.h |
2831 | +++ b/drivers/net/wireless/ath/ar9170/hw.h |
2832 | @@ -425,5 +425,6 @@ enum ar9170_txq { |
2833 | |
2834 | #define AR9170_TXQ_DEPTH 32 |
2835 | #define AR9170_TX_MAX_PENDING 128 |
2836 | +#define AR9170_RX_STREAM_MAX_SIZE 65535 |
2837 | |
2838 | #endif /* __AR9170_HW_H */ |
2839 | diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c |
2840 | index c536929..144db02 100644 |
2841 | --- a/drivers/net/wireless/ath/ar9170/main.c |
2842 | +++ b/drivers/net/wireless/ath/ar9170/main.c |
2843 | @@ -2516,7 +2516,7 @@ void *ar9170_alloc(size_t priv_size) |
2844 | * tends to split the streams into separate rx descriptors. |
2845 | */ |
2846 | |
2847 | - skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL); |
2848 | + skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL); |
2849 | if (!skb) |
2850 | goto err_nomem; |
2851 | |
2852 | diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c |
2853 | index e1c2fca..7bae7fd 100644 |
2854 | --- a/drivers/net/wireless/ath/ar9170/usb.c |
2855 | +++ b/drivers/net/wireless/ath/ar9170/usb.c |
2856 | @@ -67,18 +67,28 @@ static struct usb_device_id ar9170_usb_ids[] = { |
2857 | { USB_DEVICE(0x0cf3, 0x1001) }, |
2858 | /* TP-Link TL-WN821N v2 */ |
2859 | { USB_DEVICE(0x0cf3, 0x1002) }, |
2860 | + /* 3Com Dual Band 802.11n USB Adapter */ |
2861 | + { USB_DEVICE(0x0cf3, 0x1010) }, |
2862 | + /* H3C Dual Band 802.11n USB Adapter */ |
2863 | + { USB_DEVICE(0x0cf3, 0x1011) }, |
2864 | /* Cace Airpcap NX */ |
2865 | { USB_DEVICE(0xcace, 0x0300) }, |
2866 | /* D-Link DWA 160 A1 */ |
2867 | { USB_DEVICE(0x07d1, 0x3c10) }, |
2868 | /* D-Link DWA 160 A2 */ |
2869 | { USB_DEVICE(0x07d1, 0x3a09) }, |
2870 | + /* Netgear WNA1000 */ |
2871 | + { USB_DEVICE(0x0846, 0x9040) }, |
2872 | /* Netgear WNDA3100 */ |
2873 | { USB_DEVICE(0x0846, 0x9010) }, |
2874 | /* Netgear WN111 v2 */ |
2875 | { USB_DEVICE(0x0846, 0x9001) }, |
2876 | /* Zydas ZD1221 */ |
2877 | { USB_DEVICE(0x0ace, 0x1221) }, |
2878 | + /* Proxim ORiNOCO 802.11n USB */ |
2879 | + { USB_DEVICE(0x1435, 0x0804) }, |
2880 | + /* WNC Generic 11n USB Dongle */ |
2881 | + { USB_DEVICE(0x1435, 0x0326) }, |
2882 | /* ZyXEL NWD271N */ |
2883 | { USB_DEVICE(0x0586, 0x3417) }, |
2884 | /* Z-Com UB81 BG */ |
2885 | diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c |
2886 | index 3abbe75..ea90997 100644 |
2887 | --- a/drivers/net/wireless/ath/ath5k/base.c |
2888 | +++ b/drivers/net/wireless/ath/ath5k/base.c |
2889 | @@ -1211,6 +1211,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) |
2890 | struct ath5k_hw *ah = sc->ah; |
2891 | struct sk_buff *skb = bf->skb; |
2892 | struct ath5k_desc *ds; |
2893 | + int ret; |
2894 | |
2895 | if (!skb) { |
2896 | skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr); |
2897 | @@ -1237,9 +1238,9 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf) |
2898 | ds = bf->desc; |
2899 | ds->ds_link = bf->daddr; /* link to self */ |
2900 | ds->ds_data = bf->skbaddr; |
2901 | - ah->ah_setup_rx_desc(ah, ds, |
2902 | - skb_tailroom(skb), /* buffer size */ |
2903 | - 0); |
2904 | + ret = ah->ah_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0); |
2905 | + if (ret) |
2906 | + return ret; |
2907 | |
2908 | if (sc->rxlink != NULL) |
2909 | *sc->rxlink = bf->daddr; |
2910 | @@ -2993,13 +2994,15 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw, |
2911 | |
2912 | if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) { |
2913 | if (*new_flags & FIF_PROMISC_IN_BSS) { |
2914 | - rfilt |= AR5K_RX_FILTER_PROM; |
2915 | __set_bit(ATH_STAT_PROMISC, sc->status); |
2916 | } else { |
2917 | __clear_bit(ATH_STAT_PROMISC, sc->status); |
2918 | } |
2919 | } |
2920 | |
2921 | + if (test_bit(ATH_STAT_PROMISC, sc->status)) |
2922 | + rfilt |= AR5K_RX_FILTER_PROM; |
2923 | + |
2924 | /* Note, AR5K_RX_FILTER_MCAST is already enabled */ |
2925 | if (*new_flags & FIF_ALLMULTI) { |
2926 | mfilt[0] = ~0; |
2927 | diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c |
2928 | index 78b5711..20d5414 100644 |
2929 | --- a/drivers/net/wireless/ath/ath9k/hw.c |
2930 | +++ b/drivers/net/wireless/ath/ath9k/hw.c |
2931 | @@ -1241,7 +1241,7 @@ void ath9k_hw_deinit(struct ath_hw *ah) |
2932 | { |
2933 | struct ath_common *common = ath9k_hw_common(ah); |
2934 | |
2935 | - if (common->state <= ATH_HW_INITIALIZED) |
2936 | + if (common->state < ATH_HW_INITIALIZED) |
2937 | goto free_hw; |
2938 | |
2939 | if (!AR_SREV_9100(ah)) |
2940 | @@ -1252,8 +1252,6 @@ void ath9k_hw_deinit(struct ath_hw *ah) |
2941 | free_hw: |
2942 | if (!AR_SREV_9280_10_OR_LATER(ah)) |
2943 | ath9k_hw_rf_free_ext_banks(ah); |
2944 | - kfree(ah); |
2945 | - ah = NULL; |
2946 | } |
2947 | EXPORT_SYMBOL(ath9k_hw_deinit); |
2948 | |
2949 | diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c |
2950 | index 3d4d897..b78308c 100644 |
2951 | --- a/drivers/net/wireless/ath/ath9k/init.c |
2952 | +++ b/drivers/net/wireless/ath/ath9k/init.c |
2953 | @@ -760,6 +760,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc) |
2954 | |
2955 | tasklet_kill(&sc->intr_tq); |
2956 | tasklet_kill(&sc->bcon_tasklet); |
2957 | + |
2958 | + kfree(sc->sc_ah); |
2959 | + sc->sc_ah = NULL; |
2960 | } |
2961 | |
2962 | void ath9k_deinit_device(struct ath_softc *sc) |
2963 | diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c |
2964 | index 1460116..d3ef2a9 100644 |
2965 | --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c |
2966 | +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c |
2967 | @@ -2077,10 +2077,12 @@ static void rs_rate_scale_perform(struct iwl_priv *priv, |
2968 | } |
2969 | /* Else we have enough samples; calculate estimate of |
2970 | * actual average throughput */ |
2971 | - |
2972 | - /* Sanity-check TPT calculations */ |
2973 | - BUG_ON(window->average_tpt != ((window->success_ratio * |
2974 | - tbl->expected_tpt[index] + 64) / 128)); |
2975 | + if (window->average_tpt != ((window->success_ratio * |
2976 | + tbl->expected_tpt[index] + 64) / 128)) { |
2977 | + IWL_ERR(priv, "expected_tpt should have been calculated by now\n"); |
2978 | + window->average_tpt = ((window->success_ratio * |
2979 | + tbl->expected_tpt[index] + 64) / 128); |
2980 | + } |
2981 | |
2982 | /* If we are searching for better modulation mode, check success. */ |
2983 | if (lq_sta->search_better_tbl && |
2984 | diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c |
2985 | index 741e65e..661e36b 100644 |
2986 | --- a/drivers/net/wireless/iwlwifi/iwl-scan.c |
2987 | +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c |
2988 | @@ -561,6 +561,11 @@ static void iwl_bg_start_internal_scan(struct work_struct *work) |
2989 | |
2990 | mutex_lock(&priv->mutex); |
2991 | |
2992 | + if (priv->is_internal_short_scan == true) { |
2993 | + IWL_DEBUG_SCAN(priv, "Internal scan already in progress\n"); |
2994 | + goto unlock; |
2995 | + } |
2996 | + |
2997 | if (!iwl_is_ready_rf(priv)) { |
2998 | IWL_DEBUG_SCAN(priv, "not ready or exit pending\n"); |
2999 | goto unlock; |
3000 | @@ -958,17 +963,27 @@ void iwl_bg_scan_completed(struct work_struct *work) |
3001 | { |
3002 | struct iwl_priv *priv = |
3003 | container_of(work, struct iwl_priv, scan_completed); |
3004 | + bool internal = false; |
3005 | |
3006 | IWL_DEBUG_SCAN(priv, "SCAN complete scan\n"); |
3007 | |
3008 | cancel_delayed_work(&priv->scan_check); |
3009 | |
3010 | - if (!priv->is_internal_short_scan) |
3011 | - ieee80211_scan_completed(priv->hw, false); |
3012 | - else { |
3013 | + mutex_lock(&priv->mutex); |
3014 | + if (priv->is_internal_short_scan) { |
3015 | priv->is_internal_short_scan = false; |
3016 | IWL_DEBUG_SCAN(priv, "internal short scan completed\n"); |
3017 | + internal = true; |
3018 | } |
3019 | + mutex_unlock(&priv->mutex); |
3020 | + |
3021 | + /* |
3022 | + * Do not hold mutex here since this will cause mac80211 to call |
3023 | + * into driver again into functions that will attempt to take |
3024 | + * mutex. |
3025 | + */ |
3026 | + if (!internal) |
3027 | + ieee80211_scan_completed(priv->hw, false); |
3028 | |
3029 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) |
3030 | return; |
3031 | diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c |
3032 | index 8dd0c03..c243df7 100644 |
3033 | --- a/drivers/net/wireless/iwlwifi/iwl-tx.c |
3034 | +++ b/drivers/net/wireless/iwlwifi/iwl-tx.c |
3035 | @@ -1198,6 +1198,7 @@ static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb) |
3036 | struct ieee80211_sta *sta; |
3037 | struct iwl_station_priv *sta_priv; |
3038 | |
3039 | + rcu_read_lock(); |
3040 | sta = ieee80211_find_sta(priv->vif, hdr->addr1); |
3041 | if (sta) { |
3042 | sta_priv = (void *)sta->drv_priv; |
3043 | @@ -1206,6 +1207,7 @@ static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb) |
3044 | atomic_dec_return(&sta_priv->pending_frames) == 0) |
3045 | ieee80211_sta_block_awake(priv->hw, sta, false); |
3046 | } |
3047 | + rcu_read_unlock(); |
3048 | |
3049 | ieee80211_tx_status_irqsafe(priv->hw, skb); |
3050 | } |
3051 | diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c |
3052 | index 743a6c6..186dc71 100644 |
3053 | --- a/drivers/net/wireless/p54/p54usb.c |
3054 | +++ b/drivers/net/wireless/p54/p54usb.c |
3055 | @@ -80,6 +80,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { |
3056 | {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ |
3057 | {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ |
3058 | {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ |
3059 | + {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */ |
3060 | {USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */ |
3061 | {USB_DEVICE(0x413c, 0x8104)}, /* Cohiba Proto board */ |
3062 | {} |
3063 | diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c |
3064 | index 2131a44..de632ec 100644 |
3065 | --- a/drivers/net/wireless/rtl818x/rtl8180_dev.c |
3066 | +++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c |
3067 | @@ -188,6 +188,7 @@ static void rtl8180_handle_tx(struct ieee80211_hw *dev, unsigned int prio) |
3068 | info->flags |= IEEE80211_TX_STAT_ACK; |
3069 | |
3070 | info->status.rates[0].count = (flags & 0xFF) + 1; |
3071 | + info->status.rates[1].idx = -1; |
3072 | |
3073 | ieee80211_tx_status_irqsafe(dev, skb); |
3074 | if (ring->entries - skb_queue_len(&ring->queue) == 2) |
3075 | diff --git a/drivers/net/wireless/wl12xx/wl1251_sdio.c b/drivers/net/wireless/wl12xx/wl1251_sdio.c |
3076 | index 9423f22..d74b89b 100644 |
3077 | --- a/drivers/net/wireless/wl12xx/wl1251_sdio.c |
3078 | +++ b/drivers/net/wireless/wl12xx/wl1251_sdio.c |
3079 | @@ -160,6 +160,7 @@ disable: |
3080 | sdio_disable_func(func); |
3081 | release: |
3082 | sdio_release_host(func); |
3083 | + wl1251_free_hw(wl); |
3084 | return ret; |
3085 | } |
3086 | |
3087 | diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c |
3088 | index 166b67e..de82183 100644 |
3089 | --- a/drivers/oprofile/cpu_buffer.c |
3090 | +++ b/drivers/oprofile/cpu_buffer.c |
3091 | @@ -30,23 +30,7 @@ |
3092 | |
3093 | #define OP_BUFFER_FLAGS 0 |
3094 | |
3095 | -/* |
3096 | - * Read and write access is using spin locking. Thus, writing to the |
3097 | - * buffer by NMI handler (x86) could occur also during critical |
3098 | - * sections when reading the buffer. To avoid this, there are 2 |
3099 | - * buffers for independent read and write access. Read access is in |
3100 | - * process context only, write access only in the NMI handler. If the |
3101 | - * read buffer runs empty, both buffers are swapped atomically. There |
3102 | - * is potentially a small window during swapping where the buffers are |
3103 | - * disabled and samples could be lost. |
3104 | - * |
3105 | - * Using 2 buffers is a little bit overhead, but the solution is clear |
3106 | - * and does not require changes in the ring buffer implementation. It |
3107 | - * can be changed to a single buffer solution when the ring buffer |
3108 | - * access is implemented as non-locking atomic code. |
3109 | - */ |
3110 | -static struct ring_buffer *op_ring_buffer_read; |
3111 | -static struct ring_buffer *op_ring_buffer_write; |
3112 | +static struct ring_buffer *op_ring_buffer; |
3113 | DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer); |
3114 | |
3115 | static void wq_sync_buffer(struct work_struct *work); |
3116 | @@ -68,12 +52,9 @@ void oprofile_cpu_buffer_inc_smpl_lost(void) |
3117 | |
3118 | void free_cpu_buffers(void) |
3119 | { |
3120 | - if (op_ring_buffer_read) |
3121 | - ring_buffer_free(op_ring_buffer_read); |
3122 | - op_ring_buffer_read = NULL; |
3123 | - if (op_ring_buffer_write) |
3124 | - ring_buffer_free(op_ring_buffer_write); |
3125 | - op_ring_buffer_write = NULL; |
3126 | + if (op_ring_buffer) |
3127 | + ring_buffer_free(op_ring_buffer); |
3128 | + op_ring_buffer = NULL; |
3129 | } |
3130 | |
3131 | #define RB_EVENT_HDR_SIZE 4 |
3132 | @@ -86,11 +67,8 @@ int alloc_cpu_buffers(void) |
3133 | unsigned long byte_size = buffer_size * (sizeof(struct op_sample) + |
3134 | RB_EVENT_HDR_SIZE); |
3135 | |
3136 | - op_ring_buffer_read = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); |
3137 | - if (!op_ring_buffer_read) |
3138 | - goto fail; |
3139 | - op_ring_buffer_write = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); |
3140 | - if (!op_ring_buffer_write) |
3141 | + op_ring_buffer = ring_buffer_alloc(byte_size, OP_BUFFER_FLAGS); |
3142 | + if (!op_ring_buffer) |
3143 | goto fail; |
3144 | |
3145 | for_each_possible_cpu(i) { |
3146 | @@ -162,16 +140,11 @@ struct op_sample |
3147 | *op_cpu_buffer_write_reserve(struct op_entry *entry, unsigned long size) |
3148 | { |
3149 | entry->event = ring_buffer_lock_reserve |
3150 | - (op_ring_buffer_write, sizeof(struct op_sample) + |
3151 | + (op_ring_buffer, sizeof(struct op_sample) + |
3152 | size * sizeof(entry->sample->data[0])); |
3153 | - if (entry->event) |
3154 | - entry->sample = ring_buffer_event_data(entry->event); |
3155 | - else |
3156 | - entry->sample = NULL; |
3157 | - |
3158 | - if (!entry->sample) |
3159 | + if (!entry->event) |
3160 | return NULL; |
3161 | - |
3162 | + entry->sample = ring_buffer_event_data(entry->event); |
3163 | entry->size = size; |
3164 | entry->data = entry->sample->data; |
3165 | |
3166 | @@ -180,25 +153,16 @@ struct op_sample |
3167 | |
3168 | int op_cpu_buffer_write_commit(struct op_entry *entry) |
3169 | { |
3170 | - return ring_buffer_unlock_commit(op_ring_buffer_write, entry->event); |
3171 | + return ring_buffer_unlock_commit(op_ring_buffer, entry->event); |
3172 | } |
3173 | |
3174 | struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) |
3175 | { |
3176 | struct ring_buffer_event *e; |
3177 | - e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); |
3178 | - if (e) |
3179 | - goto event; |
3180 | - if (ring_buffer_swap_cpu(op_ring_buffer_read, |
3181 | - op_ring_buffer_write, |
3182 | - cpu)) |
3183 | + e = ring_buffer_consume(op_ring_buffer, cpu, NULL); |
3184 | + if (!e) |
3185 | return NULL; |
3186 | - e = ring_buffer_consume(op_ring_buffer_read, cpu, NULL); |
3187 | - if (e) |
3188 | - goto event; |
3189 | - return NULL; |
3190 | |
3191 | -event: |
3192 | entry->event = e; |
3193 | entry->sample = ring_buffer_event_data(e); |
3194 | entry->size = (ring_buffer_event_length(e) - sizeof(struct op_sample)) |
3195 | @@ -209,8 +173,7 @@ event: |
3196 | |
3197 | unsigned long op_cpu_buffer_entries(int cpu) |
3198 | { |
3199 | - return ring_buffer_entries_cpu(op_ring_buffer_read, cpu) |
3200 | - + ring_buffer_entries_cpu(op_ring_buffer_write, cpu); |
3201 | + return ring_buffer_entries_cpu(op_ring_buffer, cpu); |
3202 | } |
3203 | |
3204 | static int |
3205 | diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c |
3206 | index 27c0e6e..2f2d0ec 100644 |
3207 | --- a/drivers/pci/quirks.c |
3208 | +++ b/drivers/pci/quirks.c |
3209 | @@ -1457,7 +1457,8 @@ static void quirk_jmicron_ata(struct pci_dev *pdev) |
3210 | conf5 &= ~(1 << 24); /* Clear bit 24 */ |
3211 | |
3212 | switch (pdev->device) { |
3213 | - case PCI_DEVICE_ID_JMICRON_JMB360: |
3214 | + case PCI_DEVICE_ID_JMICRON_JMB360: /* SATA single port */ |
3215 | + case PCI_DEVICE_ID_JMICRON_JMB362: /* SATA dual ports */ |
3216 | /* The controller should be in single function ahci mode */ |
3217 | conf1 |= 0x0002A100; /* Set 8, 13, 15, 17 */ |
3218 | break; |
3219 | @@ -1493,12 +1494,14 @@ static void quirk_jmicron_ata(struct pci_dev *pdev) |
3220 | } |
3221 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); |
3222 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); |
3223 | +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata); |
3224 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); |
3225 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); |
3226 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); |
3227 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB368, quirk_jmicron_ata); |
3228 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB360, quirk_jmicron_ata); |
3229 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB361, quirk_jmicron_ata); |
3230 | +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB362, quirk_jmicron_ata); |
3231 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB363, quirk_jmicron_ata); |
3232 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB365, quirk_jmicron_ata); |
3233 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB366, quirk_jmicron_ata); |
3234 | @@ -2127,6 +2130,7 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9602, quirk_disable_msi); |
3235 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASUSTEK, 0x9602, quirk_disable_msi); |
3236 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AI, 0x9602, quirk_disable_msi); |
3237 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi); |
3238 | +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x5a3f, quirk_disable_msi); |
3239 | |
3240 | /* Go through the list of Hypertransport capabilities and |
3241 | * return 1 if a HT MSI capability is found and enabled */ |
3242 | @@ -2218,15 +2222,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SERVERWORKS, |
3243 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8132_BRIDGE, |
3244 | ht_enable_msi_mapping); |
3245 | |
3246 | -/* The P5N32-SLI Premium motherboard from Asus has a problem with msi |
3247 | +/* The P5N32-SLI motherboards from Asus have a problem with msi |
3248 | * for the MCP55 NIC. It is not yet determined whether the msi problem |
3249 | * also affects other devices. As for now, turn off msi for this device. |
3250 | */ |
3251 | static void __devinit nvenet_msi_disable(struct pci_dev *dev) |
3252 | { |
3253 | - if (dmi_name_in_vendors("P5N32-SLI PREMIUM")) { |
3254 | + if (dmi_name_in_vendors("P5N32-SLI PREMIUM") || |
3255 | + dmi_name_in_vendors("P5N32-E SLI")) { |
3256 | dev_info(&dev->dev, |
3257 | - "Disabling msi for MCP55 NIC on P5N32-SLI Premium\n"); |
3258 | + "Disabling msi for MCP55 NIC on P5N32-SLI\n"); |
3259 | dev->no_msi = 1; |
3260 | } |
3261 | } |
3262 | diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c |
3263 | index 041eee4..6df5dff 100644 |
3264 | --- a/drivers/pcmcia/ds.c |
3265 | +++ b/drivers/pcmcia/ds.c |
3266 | @@ -682,6 +682,7 @@ static void pcmcia_requery(struct pcmcia_socket *s) |
3267 | if (old_funcs != new_funcs) { |
3268 | /* we need to re-start */ |
3269 | pcmcia_card_remove(s, NULL); |
3270 | + s->functions = 0; |
3271 | pcmcia_card_add(s); |
3272 | } |
3273 | } |
3274 | diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c |
3275 | index 83ace27..6bb6cb9 100644 |
3276 | --- a/drivers/pcmcia/yenta_socket.c |
3277 | +++ b/drivers/pcmcia/yenta_socket.c |
3278 | @@ -975,7 +975,7 @@ static irqreturn_t yenta_probe_handler(int irq, void *dev_id) |
3279 | /* probes the PCI interrupt, use only on override functions */ |
3280 | static int yenta_probe_cb_irq(struct yenta_socket *socket) |
3281 | { |
3282 | - u8 reg; |
3283 | + u8 reg = 0; |
3284 | |
3285 | if (!socket->cb_irq) |
3286 | return -1; |
3287 | @@ -989,7 +989,8 @@ static int yenta_probe_cb_irq(struct yenta_socket *socket) |
3288 | } |
3289 | |
3290 | /* generate interrupt, wait */ |
3291 | - reg = exca_readb(socket, I365_CSCINT); |
3292 | + if (!socket->dev->irq) |
3293 | + reg = exca_readb(socket, I365_CSCINT); |
3294 | exca_writeb(socket, I365_CSCINT, reg | I365_CSC_STSCHG); |
3295 | cb_writel(socket, CB_SOCKET_EVENT, -1); |
3296 | cb_writel(socket, CB_SOCKET_MASK, CB_CSTSMASK); |
3297 | diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig |
3298 | index 6c3320d..50601d9 100644 |
3299 | --- a/drivers/platform/x86/Kconfig |
3300 | +++ b/drivers/platform/x86/Kconfig |
3301 | @@ -390,6 +390,7 @@ config EEEPC_WMI |
3302 | depends on ACPI_WMI |
3303 | depends on INPUT |
3304 | depends on EXPERIMENTAL |
3305 | + depends on BACKLIGHT_CLASS_DEVICE |
3306 | select INPUT_SPARSEKMAP |
3307 | ---help--- |
3308 | Say Y here if you want to support WMI-based hotkeys on Eee PC laptops. |
3309 | diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c |
3310 | index e9aa814..aa13875 100644 |
3311 | --- a/drivers/rtc/rtc-cmos.c |
3312 | +++ b/drivers/rtc/rtc-cmos.c |
3313 | @@ -719,6 +719,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) |
3314 | } |
3315 | } |
3316 | |
3317 | + cmos_rtc.dev = dev; |
3318 | + dev_set_drvdata(dev, &cmos_rtc); |
3319 | + |
3320 | cmos_rtc.rtc = rtc_device_register(driver_name, dev, |
3321 | &cmos_rtc_ops, THIS_MODULE); |
3322 | if (IS_ERR(cmos_rtc.rtc)) { |
3323 | @@ -726,8 +729,6 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) |
3324 | goto cleanup0; |
3325 | } |
3326 | |
3327 | - cmos_rtc.dev = dev; |
3328 | - dev_set_drvdata(dev, &cmos_rtc); |
3329 | rename_region(ports, dev_name(&cmos_rtc.rtc->dev)); |
3330 | |
3331 | spin_lock_irq(&rtc_lock); |
3332 | diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c |
3333 | index 4969b60..3793ea6 100644 |
3334 | --- a/drivers/rtc/rtc-s3c.c |
3335 | +++ b/drivers/rtc/rtc-s3c.c |
3336 | @@ -457,8 +457,6 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev) |
3337 | pr_debug("s3c2410_rtc: RTCCON=%02x\n", |
3338 | readb(s3c_rtc_base + S3C2410_RTCCON)); |
3339 | |
3340 | - s3c_rtc_setfreq(&pdev->dev, 1); |
3341 | - |
3342 | device_init_wakeup(&pdev->dev, 1); |
3343 | |
3344 | /* register RTC and exit */ |
3345 | @@ -475,6 +473,9 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev) |
3346 | rtc->max_user_freq = 128; |
3347 | |
3348 | platform_set_drvdata(pdev, rtc); |
3349 | + |
3350 | + s3c_rtc_setfreq(&pdev->dev, 1); |
3351 | + |
3352 | return 0; |
3353 | |
3354 | err_nortc: |
3355 | diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c |
3356 | index 88f7446..8c496b5 100644 |
3357 | --- a/drivers/scsi/libsas/sas_ata.c |
3358 | +++ b/drivers/scsi/libsas/sas_ata.c |
3359 | @@ -395,12 +395,13 @@ int sas_ata_init_host_and_port(struct domain_device *found_dev, |
3360 | void sas_ata_task_abort(struct sas_task *task) |
3361 | { |
3362 | struct ata_queued_cmd *qc = task->uldd_task; |
3363 | - struct request_queue *q = qc->scsicmd->device->request_queue; |
3364 | struct completion *waiting; |
3365 | - unsigned long flags; |
3366 | |
3367 | /* Bounce SCSI-initiated commands to the SCSI EH */ |
3368 | if (qc->scsicmd) { |
3369 | + struct request_queue *q = qc->scsicmd->device->request_queue; |
3370 | + unsigned long flags; |
3371 | + |
3372 | spin_lock_irqsave(q->queue_lock, flags); |
3373 | blk_abort_request(qc->scsicmd->request); |
3374 | spin_unlock_irqrestore(q->queue_lock, flags); |
3375 | diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c |
3376 | index 8228350..53849f2 100644 |
3377 | --- a/drivers/scsi/libsas/sas_scsi_host.c |
3378 | +++ b/drivers/scsi/libsas/sas_scsi_host.c |
3379 | @@ -1030,8 +1030,6 @@ int __sas_task_abort(struct sas_task *task) |
3380 | void sas_task_abort(struct sas_task *task) |
3381 | { |
3382 | struct scsi_cmnd *sc = task->uldd_task; |
3383 | - struct request_queue *q = sc->device->request_queue; |
3384 | - unsigned long flags; |
3385 | |
3386 | /* Escape for libsas internal commands */ |
3387 | if (!sc) { |
3388 | @@ -1043,13 +1041,15 @@ void sas_task_abort(struct sas_task *task) |
3389 | |
3390 | if (dev_is_sata(task->dev)) { |
3391 | sas_ata_task_abort(task); |
3392 | - return; |
3393 | - } |
3394 | + } else { |
3395 | + struct request_queue *q = sc->device->request_queue; |
3396 | + unsigned long flags; |
3397 | |
3398 | - spin_lock_irqsave(q->queue_lock, flags); |
3399 | - blk_abort_request(sc->request); |
3400 | - spin_unlock_irqrestore(q->queue_lock, flags); |
3401 | - scsi_schedule_eh(sc->device->host); |
3402 | + spin_lock_irqsave(q->queue_lock, flags); |
3403 | + blk_abort_request(sc->request); |
3404 | + spin_unlock_irqrestore(q->queue_lock, flags); |
3405 | + scsi_schedule_eh(sc->device->host); |
3406 | + } |
3407 | } |
3408 | |
3409 | int sas_slave_alloc(struct scsi_device *scsi_dev) |
3410 | diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c |
3411 | index 78ed24b..3046386 100644 |
3412 | --- a/drivers/serial/68328serial.c |
3413 | +++ b/drivers/serial/68328serial.c |
3414 | @@ -1437,7 +1437,7 @@ int m68328_console_setup(struct console *cp, char *arg) |
3415 | for (i = 0; i < ARRAY_SIZE(baud_table); i++) |
3416 | if (baud_table[i] == n) |
3417 | break; |
3418 | - if (i < BAUD_TABLE_SIZE) { |
3419 | + if (i < ARRAY_SIZE(baud_table)) { |
3420 | m68328_console_baud = n; |
3421 | m68328_console_cbaud = 0; |
3422 | if (i > 15) { |
3423 | diff --git a/drivers/staging/batman-adv/proc.c b/drivers/staging/batman-adv/proc.c |
3424 | index 7de60e8..c9366bc 100644 |
3425 | --- a/drivers/staging/batman-adv/proc.c |
3426 | +++ b/drivers/staging/batman-adv/proc.c |
3427 | @@ -41,7 +41,7 @@ static int proc_interfaces_read(struct seq_file *seq, void *offset) |
3428 | |
3429 | rcu_read_lock(); |
3430 | list_for_each_entry_rcu(batman_if, &if_list, list) { |
3431 | - seq_printf(seq, "[%8s] %s %s \n", |
3432 | + seq_printf(seq, "[%8s] %s %s\n", |
3433 | (batman_if->if_active == IF_ACTIVE ? |
3434 | "active" : "inactive"), |
3435 | batman_if->dev, |
3436 | @@ -188,18 +188,18 @@ static int proc_originators_read(struct seq_file *seq, void *offset) |
3437 | rcu_read_lock(); |
3438 | if (list_empty(&if_list)) { |
3439 | rcu_read_unlock(); |
3440 | - seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it \n"); |
3441 | + seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it\n"); |
3442 | goto end; |
3443 | } |
3444 | |
3445 | if (((struct batman_if *)if_list.next)->if_active != IF_ACTIVE) { |
3446 | rcu_read_unlock(); |
3447 | - seq_printf(seq, "BATMAN disabled - primary interface not active \n"); |
3448 | + seq_printf(seq, "BATMAN disabled - primary interface not active\n"); |
3449 | goto end; |
3450 | } |
3451 | |
3452 | seq_printf(seq, |
3453 | - " %-14s (%s/%i) %17s [%10s]: %20s ... [B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%s] \n", |
3454 | + " %-14s (%s/%i) %17s [%10s]: %20s ... [B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%s]\n", |
3455 | "Originator", "#", TQ_MAX_VALUE, "Nexthop", "outgoingIF", |
3456 | "Potential nexthops", SOURCE_VERSION, REVISION_VERSION_STR, |
3457 | ((struct batman_if *)if_list.next)->dev, |
3458 | @@ -240,7 +240,7 @@ static int proc_originators_read(struct seq_file *seq, void *offset) |
3459 | spin_unlock_irqrestore(&orig_hash_lock, flags); |
3460 | |
3461 | if (batman_count == 0) |
3462 | - seq_printf(seq, "No batman nodes in range ... \n"); |
3463 | + seq_printf(seq, "No batman nodes in range ...\n"); |
3464 | |
3465 | end: |
3466 | return 0; |
3467 | @@ -262,7 +262,7 @@ static int proc_transt_local_read(struct seq_file *seq, void *offset) |
3468 | rcu_read_lock(); |
3469 | if (list_empty(&if_list)) { |
3470 | rcu_read_unlock(); |
3471 | - seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it \n"); |
3472 | + seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it\n"); |
3473 | goto end; |
3474 | } |
3475 | |
3476 | @@ -294,7 +294,7 @@ static int proc_transt_global_read(struct seq_file *seq, void *offset) |
3477 | rcu_read_lock(); |
3478 | if (list_empty(&if_list)) { |
3479 | rcu_read_unlock(); |
3480 | - seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it \n"); |
3481 | + seq_printf(seq, "BATMAN disabled - please specify interfaces to enable it\n"); |
3482 | goto end; |
3483 | } |
3484 | rcu_read_unlock(); |
3485 | @@ -350,9 +350,9 @@ static int proc_vis_srv_read(struct seq_file *seq, void *offset) |
3486 | { |
3487 | int vis_server = atomic_read(&vis_mode); |
3488 | |
3489 | - seq_printf(seq, "[%c] client mode (server disabled) \n", |
3490 | + seq_printf(seq, "[%c] client mode (server disabled)\n", |
3491 | (vis_server == VIS_TYPE_CLIENT_UPDATE) ? 'x' : ' '); |
3492 | - seq_printf(seq, "[%c] server mode (server enabled) \n", |
3493 | + seq_printf(seq, "[%c] server mode (server enabled)\n", |
3494 | (vis_server == VIS_TYPE_SERVER_SYNC) ? 'x' : ' '); |
3495 | |
3496 | return 0; |
3497 | @@ -369,6 +369,8 @@ static int proc_vis_data_read(struct seq_file *seq, void *offset) |
3498 | struct vis_info *info; |
3499 | struct vis_info_entry *entries; |
3500 | HLIST_HEAD(vis_if_list); |
3501 | + struct if_list_entry *entry; |
3502 | + struct hlist_node *pos, *n; |
3503 | int i; |
3504 | char tmp_addr_str[ETH_STR_LEN]; |
3505 | unsigned long flags; |
3506 | @@ -387,17 +389,34 @@ static int proc_vis_data_read(struct seq_file *seq, void *offset) |
3507 | info = hashit.bucket->data; |
3508 | entries = (struct vis_info_entry *) |
3509 | ((char *)info + sizeof(struct vis_info)); |
3510 | - addr_to_string(tmp_addr_str, info->packet.vis_orig); |
3511 | - seq_printf(seq, "%s,", tmp_addr_str); |
3512 | |
3513 | for (i = 0; i < info->packet.entries; i++) { |
3514 | - proc_vis_read_entry(seq, &entries[i], &vis_if_list, |
3515 | - info->packet.vis_orig); |
3516 | + if (entries[i].quality == 0) |
3517 | + continue; |
3518 | + proc_vis_insert_interface(entries[i].src, &vis_if_list, |
3519 | + compare_orig(entries[i].src, |
3520 | + info->packet.vis_orig)); |
3521 | } |
3522 | |
3523 | - /* add primary/secondary records */ |
3524 | - proc_vis_read_prim_sec(seq, &vis_if_list); |
3525 | - seq_printf(seq, "\n"); |
3526 | + hlist_for_each_entry(entry, pos, &vis_if_list, list) { |
3527 | + addr_to_string(tmp_addr_str, entry->addr); |
3528 | + seq_printf(seq, "%s,", tmp_addr_str); |
3529 | + |
3530 | + for (i = 0; i < info->packet.entries; i++) |
3531 | + proc_vis_read_entry(seq, &entries[i], |
3532 | + entry->addr, entry->primary); |
3533 | + |
3534 | + /* add primary/secondary records */ |
3535 | + if (compare_orig(entry->addr, info->packet.vis_orig)) |
3536 | + proc_vis_read_prim_sec(seq, &vis_if_list); |
3537 | + |
3538 | + seq_printf(seq, "\n"); |
3539 | + } |
3540 | + |
3541 | + hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) { |
3542 | + hlist_del(&entry->list); |
3543 | + kfree(entry); |
3544 | + } |
3545 | } |
3546 | spin_unlock_irqrestore(&vis_hash_lock, flags); |
3547 | |
3548 | diff --git a/drivers/staging/batman-adv/vis.c b/drivers/staging/batman-adv/vis.c |
3549 | index fedec1b..28eac7e 100644 |
3550 | --- a/drivers/staging/batman-adv/vis.c |
3551 | +++ b/drivers/staging/batman-adv/vis.c |
3552 | @@ -27,24 +27,44 @@ |
3553 | #include "hard-interface.h" |
3554 | #include "hash.h" |
3555 | |
3556 | +/* Returns the smallest signed integer in two's complement with the sizeof x */ |
3557 | +#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u))) |
3558 | + |
3559 | +/* Checks if a sequence number x is a predecessor/successor of y. |
3560 | + they handle overflows/underflows and can correctly check for a |
3561 | + predecessor/successor unless the variable sequence number has grown by |
3562 | + more then 2**(bitwidth(x)-1)-1. |
3563 | + This means that for a uint8_t with the maximum value 255, it would think: |
3564 | + * when adding nothing - it is neither a predecessor nor a successor |
3565 | + * before adding more than 127 to the starting value - it is a predecessor, |
3566 | + * when adding 128 - it is neither a predecessor nor a successor, |
3567 | + * after adding more than 127 to the starting value - it is a successor */ |
3568 | +#define seq_before(x, y) ({typeof(x) _dummy = (x - y); \ |
3569 | + _dummy > smallest_signed_int(_dummy); }) |
3570 | +#define seq_after(x, y) seq_before(y, x) |
3571 | + |
3572 | struct hashtable_t *vis_hash; |
3573 | DEFINE_SPINLOCK(vis_hash_lock); |
3574 | +static DEFINE_SPINLOCK(recv_list_lock); |
3575 | static struct vis_info *my_vis_info; |
3576 | static struct list_head send_list; /* always locked with vis_hash_lock */ |
3577 | |
3578 | static void start_vis_timer(void); |
3579 | |
3580 | /* free the info */ |
3581 | -static void free_info(void *data) |
3582 | +static void free_info(struct kref *ref) |
3583 | { |
3584 | - struct vis_info *info = data; |
3585 | + struct vis_info *info = container_of(ref, struct vis_info, refcount); |
3586 | struct recvlist_node *entry, *tmp; |
3587 | + unsigned long flags; |
3588 | |
3589 | list_del_init(&info->send_list); |
3590 | + spin_lock_irqsave(&recv_list_lock, flags); |
3591 | list_for_each_entry_safe(entry, tmp, &info->recv_list, list) { |
3592 | list_del(&entry->list); |
3593 | kfree(entry); |
3594 | } |
3595 | + spin_unlock_irqrestore(&recv_list_lock, flags); |
3596 | kfree(info); |
3597 | } |
3598 | |
3599 | @@ -82,7 +102,7 @@ static int vis_info_choose(void *data, int size) |
3600 | |
3601 | /* insert interface to the list of interfaces of one originator, if it |
3602 | * does not already exist in the list */ |
3603 | -static void proc_vis_insert_interface(const uint8_t *interface, |
3604 | +void proc_vis_insert_interface(const uint8_t *interface, |
3605 | struct hlist_head *if_list, |
3606 | bool primary) |
3607 | { |
3608 | @@ -107,38 +127,51 @@ void proc_vis_read_prim_sec(struct seq_file *seq, |
3609 | struct hlist_head *if_list) |
3610 | { |
3611 | struct if_list_entry *entry; |
3612 | - struct hlist_node *pos, *n; |
3613 | + struct hlist_node *pos; |
3614 | char tmp_addr_str[ETH_STR_LEN]; |
3615 | |
3616 | - hlist_for_each_entry_safe(entry, pos, n, if_list, list) { |
3617 | - if (entry->primary) { |
3618 | + hlist_for_each_entry(entry, pos, if_list, list) { |
3619 | + if (entry->primary) |
3620 | seq_printf(seq, "PRIMARY, "); |
3621 | - } else { |
3622 | + else { |
3623 | addr_to_string(tmp_addr_str, entry->addr); |
3624 | seq_printf(seq, "SEC %s, ", tmp_addr_str); |
3625 | } |
3626 | - |
3627 | - hlist_del(&entry->list); |
3628 | - kfree(entry); |
3629 | } |
3630 | } |
3631 | |
3632 | /* read an entry */ |
3633 | void proc_vis_read_entry(struct seq_file *seq, |
3634 | struct vis_info_entry *entry, |
3635 | - struct hlist_head *if_list, |
3636 | - uint8_t *vis_orig) |
3637 | + uint8_t *src, |
3638 | + bool primary) |
3639 | { |
3640 | char to[40]; |
3641 | |
3642 | addr_to_string(to, entry->dest); |
3643 | - if (entry->quality == 0) { |
3644 | - proc_vis_insert_interface(vis_orig, if_list, true); |
3645 | + if (primary && entry->quality == 0) |
3646 | seq_printf(seq, "HNA %s, ", to); |
3647 | - } else { |
3648 | - proc_vis_insert_interface(entry->src, if_list, |
3649 | - compare_orig(entry->src, vis_orig)); |
3650 | + else if (compare_orig(entry->src, src)) |
3651 | seq_printf(seq, "TQ %s %d, ", to, entry->quality); |
3652 | +} |
3653 | + |
3654 | +/* add the info packet to the send list, if it was not |
3655 | + * already linked in. */ |
3656 | +static void send_list_add(struct vis_info *info) |
3657 | +{ |
3658 | + if (list_empty(&info->send_list)) { |
3659 | + kref_get(&info->refcount); |
3660 | + list_add_tail(&info->send_list, &send_list); |
3661 | + } |
3662 | +} |
3663 | + |
3664 | +/* delete the info packet from the send list, if it was |
3665 | + * linked in. */ |
3666 | +static void send_list_del(struct vis_info *info) |
3667 | +{ |
3668 | + if (!list_empty(&info->send_list)) { |
3669 | + list_del_init(&info->send_list); |
3670 | + kref_put(&info->refcount, free_info); |
3671 | } |
3672 | } |
3673 | |
3674 | @@ -146,32 +179,41 @@ void proc_vis_read_entry(struct seq_file *seq, |
3675 | static void recv_list_add(struct list_head *recv_list, char *mac) |
3676 | { |
3677 | struct recvlist_node *entry; |
3678 | + unsigned long flags; |
3679 | + |
3680 | entry = kmalloc(sizeof(struct recvlist_node), GFP_ATOMIC); |
3681 | if (!entry) |
3682 | return; |
3683 | |
3684 | memcpy(entry->mac, mac, ETH_ALEN); |
3685 | + spin_lock_irqsave(&recv_list_lock, flags); |
3686 | list_add_tail(&entry->list, recv_list); |
3687 | + spin_unlock_irqrestore(&recv_list_lock, flags); |
3688 | } |
3689 | |
3690 | /* returns 1 if this mac is in the recv_list */ |
3691 | static int recv_list_is_in(struct list_head *recv_list, char *mac) |
3692 | { |
3693 | struct recvlist_node *entry; |
3694 | + unsigned long flags; |
3695 | |
3696 | + spin_lock_irqsave(&recv_list_lock, flags); |
3697 | list_for_each_entry(entry, recv_list, list) { |
3698 | - if (memcmp(entry->mac, mac, ETH_ALEN) == 0) |
3699 | + if (memcmp(entry->mac, mac, ETH_ALEN) == 0) { |
3700 | + spin_unlock_irqrestore(&recv_list_lock, flags); |
3701 | return 1; |
3702 | + } |
3703 | } |
3704 | - |
3705 | + spin_unlock_irqrestore(&recv_list_lock, flags); |
3706 | return 0; |
3707 | } |
3708 | |
3709 | /* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old, |
3710 | - * broken.. ). vis hash must be locked outside. is_new is set when the packet |
3711 | + * broken.. ). vis hash must be locked outside. is_new is set when the packet |
3712 | * is newer than old entries in the hash. */ |
3713 | static struct vis_info *add_packet(struct vis_packet *vis_packet, |
3714 | - int vis_info_len, int *is_new) |
3715 | + int vis_info_len, int *is_new, |
3716 | + int make_broadcast) |
3717 | { |
3718 | struct vis_info *info, *old_info; |
3719 | struct vis_info search_elem; |
3720 | @@ -186,7 +228,7 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet, |
3721 | old_info = hash_find(vis_hash, &search_elem); |
3722 | |
3723 | if (old_info != NULL) { |
3724 | - if (vis_packet->seqno - old_info->packet.seqno <= 0) { |
3725 | + if (!seq_after(vis_packet->seqno, old_info->packet.seqno)) { |
3726 | if (old_info->packet.seqno == vis_packet->seqno) { |
3727 | recv_list_add(&old_info->recv_list, |
3728 | vis_packet->sender_orig); |
3729 | @@ -198,13 +240,15 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet, |
3730 | } |
3731 | /* remove old entry */ |
3732 | hash_remove(vis_hash, old_info); |
3733 | - free_info(old_info); |
3734 | + send_list_del(old_info); |
3735 | + kref_put(&old_info->refcount, free_info); |
3736 | } |
3737 | |
3738 | info = kmalloc(sizeof(struct vis_info) + vis_info_len, GFP_ATOMIC); |
3739 | if (info == NULL) |
3740 | return NULL; |
3741 | |
3742 | + kref_init(&info->refcount); |
3743 | INIT_LIST_HEAD(&info->send_list); |
3744 | INIT_LIST_HEAD(&info->recv_list); |
3745 | info->first_seen = jiffies; |
3746 | @@ -214,16 +258,21 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet, |
3747 | /* initialize and add new packet. */ |
3748 | *is_new = 1; |
3749 | |
3750 | + /* Make it a broadcast packet, if required */ |
3751 | + if (make_broadcast) |
3752 | + memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN); |
3753 | + |
3754 | /* repair if entries is longer than packet. */ |
3755 | if (info->packet.entries * sizeof(struct vis_info_entry) > vis_info_len) |
3756 | - info->packet.entries = vis_info_len / sizeof(struct vis_info_entry); |
3757 | + info->packet.entries = vis_info_len / |
3758 | + sizeof(struct vis_info_entry); |
3759 | |
3760 | recv_list_add(&info->recv_list, info->packet.sender_orig); |
3761 | |
3762 | /* try to add it */ |
3763 | if (hash_add(vis_hash, info) < 0) { |
3764 | /* did not work (for some reason) */ |
3765 | - free_info(info); |
3766 | + kref_put(&old_info->refcount, free_info); |
3767 | info = NULL; |
3768 | } |
3769 | |
3770 | @@ -234,22 +283,21 @@ static struct vis_info *add_packet(struct vis_packet *vis_packet, |
3771 | void receive_server_sync_packet(struct vis_packet *vis_packet, int vis_info_len) |
3772 | { |
3773 | struct vis_info *info; |
3774 | - int is_new; |
3775 | + int is_new, make_broadcast; |
3776 | unsigned long flags; |
3777 | int vis_server = atomic_read(&vis_mode); |
3778 | |
3779 | + make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC); |
3780 | + |
3781 | spin_lock_irqsave(&vis_hash_lock, flags); |
3782 | - info = add_packet(vis_packet, vis_info_len, &is_new); |
3783 | + info = add_packet(vis_packet, vis_info_len, &is_new, make_broadcast); |
3784 | if (info == NULL) |
3785 | goto end; |
3786 | |
3787 | /* only if we are server ourselves and packet is newer than the one in |
3788 | * hash.*/ |
3789 | - if (vis_server == VIS_TYPE_SERVER_SYNC && is_new) { |
3790 | - memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN); |
3791 | - if (list_empty(&info->send_list)) |
3792 | - list_add_tail(&info->send_list, &send_list); |
3793 | - } |
3794 | + if (vis_server == VIS_TYPE_SERVER_SYNC && is_new) |
3795 | + send_list_add(info); |
3796 | end: |
3797 | spin_unlock_irqrestore(&vis_hash_lock, flags); |
3798 | } |
3799 | @@ -262,31 +310,32 @@ void receive_client_update_packet(struct vis_packet *vis_packet, |
3800 | int is_new; |
3801 | unsigned long flags; |
3802 | int vis_server = atomic_read(&vis_mode); |
3803 | + int are_target = 0; |
3804 | |
3805 | /* clients shall not broadcast. */ |
3806 | if (is_bcast(vis_packet->target_orig)) |
3807 | return; |
3808 | |
3809 | + /* Are we the target for this VIS packet? */ |
3810 | + if (vis_server == VIS_TYPE_SERVER_SYNC && |
3811 | + is_my_mac(vis_packet->target_orig)) |
3812 | + are_target = 1; |
3813 | + |
3814 | spin_lock_irqsave(&vis_hash_lock, flags); |
3815 | - info = add_packet(vis_packet, vis_info_len, &is_new); |
3816 | + info = add_packet(vis_packet, vis_info_len, &is_new, are_target); |
3817 | if (info == NULL) |
3818 | goto end; |
3819 | /* note that outdated packets will be dropped at this point. */ |
3820 | |
3821 | |
3822 | /* send only if we're the target server or ... */ |
3823 | - if (vis_server == VIS_TYPE_SERVER_SYNC && |
3824 | - is_my_mac(info->packet.target_orig) && |
3825 | - is_new) { |
3826 | + if (are_target && is_new) { |
3827 | info->packet.vis_type = VIS_TYPE_SERVER_SYNC; /* upgrade! */ |
3828 | - memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN); |
3829 | - if (list_empty(&info->send_list)) |
3830 | - list_add_tail(&info->send_list, &send_list); |
3831 | + send_list_add(info); |
3832 | |
3833 | /* ... we're not the recipient (and thus need to forward). */ |
3834 | } else if (!is_my_mac(info->packet.target_orig)) { |
3835 | - if (list_empty(&info->send_list)) |
3836 | - list_add_tail(&info->send_list, &send_list); |
3837 | + send_list_add(info); |
3838 | } |
3839 | end: |
3840 | spin_unlock_irqrestore(&vis_hash_lock, flags); |
3841 | @@ -361,14 +410,17 @@ static int generate_vis_packet(void) |
3842 | while (hash_iterate(orig_hash, &hashit_global)) { |
3843 | orig_node = hashit_global.bucket->data; |
3844 | if (orig_node->router != NULL |
3845 | - && compare_orig(orig_node->router->addr, orig_node->orig) |
3846 | + && compare_orig(orig_node->router->addr, |
3847 | + orig_node->orig) |
3848 | && orig_node->batman_if |
3849 | && (orig_node->batman_if->if_active == IF_ACTIVE) |
3850 | && orig_node->router->tq_avg > 0) { |
3851 | |
3852 | /* fill one entry into buffer. */ |
3853 | entry = &entry_array[info->packet.entries]; |
3854 | - memcpy(entry->src, orig_node->batman_if->net_dev->dev_addr, ETH_ALEN); |
3855 | + memcpy(entry->src, |
3856 | + orig_node->batman_if->net_dev->dev_addr, |
3857 | + ETH_ALEN); |
3858 | memcpy(entry->dest, orig_node->orig, ETH_ALEN); |
3859 | entry->quality = orig_node->router->tq_avg; |
3860 | info->packet.entries++; |
3861 | @@ -400,6 +452,8 @@ static int generate_vis_packet(void) |
3862 | return 0; |
3863 | } |
3864 | |
3865 | +/* free old vis packets. Must be called with this vis_hash_lock |
3866 | + * held */ |
3867 | static void purge_vis_packets(void) |
3868 | { |
3869 | HASHIT(hashit); |
3870 | @@ -412,7 +466,8 @@ static void purge_vis_packets(void) |
3871 | if (time_after(jiffies, |
3872 | info->first_seen + (VIS_TIMEOUT*HZ)/1000)) { |
3873 | hash_remove_bucket(vis_hash, &hashit); |
3874 | - free_info(info); |
3875 | + send_list_del(info); |
3876 | + kref_put(&info->refcount, free_info); |
3877 | } |
3878 | } |
3879 | } |
3880 | @@ -422,6 +477,8 @@ static void broadcast_vis_packet(struct vis_info *info, int packet_length) |
3881 | HASHIT(hashit); |
3882 | struct orig_node *orig_node; |
3883 | unsigned long flags; |
3884 | + struct batman_if *batman_if; |
3885 | + uint8_t dstaddr[ETH_ALEN]; |
3886 | |
3887 | spin_lock_irqsave(&orig_hash_lock, flags); |
3888 | |
3889 | @@ -430,45 +487,56 @@ static void broadcast_vis_packet(struct vis_info *info, int packet_length) |
3890 | orig_node = hashit.bucket->data; |
3891 | |
3892 | /* if it's a vis server and reachable, send it. */ |
3893 | - if (orig_node && |
3894 | - (orig_node->flags & VIS_SERVER) && |
3895 | - orig_node->batman_if && |
3896 | - orig_node->router) { |
3897 | + if ((!orig_node) || (!orig_node->batman_if) || |
3898 | + (!orig_node->router)) |
3899 | + continue; |
3900 | + if (!(orig_node->flags & VIS_SERVER)) |
3901 | + continue; |
3902 | + /* don't send it if we already received the packet from |
3903 | + * this node. */ |
3904 | + if (recv_list_is_in(&info->recv_list, orig_node->orig)) |
3905 | + continue; |
3906 | |
3907 | - /* don't send it if we already received the packet from |
3908 | - * this node. */ |
3909 | - if (recv_list_is_in(&info->recv_list, orig_node->orig)) |
3910 | - continue; |
3911 | + memcpy(info->packet.target_orig, orig_node->orig, ETH_ALEN); |
3912 | + batman_if = orig_node->batman_if; |
3913 | + memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); |
3914 | + spin_unlock_irqrestore(&orig_hash_lock, flags); |
3915 | |
3916 | - memcpy(info->packet.target_orig, |
3917 | - orig_node->orig, ETH_ALEN); |
3918 | + send_raw_packet((unsigned char *)&info->packet, |
3919 | + packet_length, batman_if, dstaddr); |
3920 | + |
3921 | + spin_lock_irqsave(&orig_hash_lock, flags); |
3922 | |
3923 | - send_raw_packet((unsigned char *) &info->packet, |
3924 | - packet_length, |
3925 | - orig_node->batman_if, |
3926 | - orig_node->router->addr); |
3927 | - } |
3928 | } |
3929 | - memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN); |
3930 | spin_unlock_irqrestore(&orig_hash_lock, flags); |
3931 | + memcpy(info->packet.target_orig, broadcastAddr, ETH_ALEN); |
3932 | } |
3933 | |
3934 | static void unicast_vis_packet(struct vis_info *info, int packet_length) |
3935 | { |
3936 | struct orig_node *orig_node; |
3937 | unsigned long flags; |
3938 | + struct batman_if *batman_if; |
3939 | + uint8_t dstaddr[ETH_ALEN]; |
3940 | |
3941 | spin_lock_irqsave(&orig_hash_lock, flags); |
3942 | orig_node = ((struct orig_node *) |
3943 | hash_find(orig_hash, info->packet.target_orig)); |
3944 | |
3945 | - if ((orig_node != NULL) && |
3946 | - (orig_node->batman_if != NULL) && |
3947 | - (orig_node->router != NULL)) { |
3948 | - send_raw_packet((unsigned char *) &info->packet, packet_length, |
3949 | - orig_node->batman_if, |
3950 | - orig_node->router->addr); |
3951 | - } |
3952 | + if ((!orig_node) || (!orig_node->batman_if) || (!orig_node->router)) |
3953 | + goto out; |
3954 | + |
3955 | + /* don't lock while sending the packets ... we therefore |
3956 | + * copy the required data before sending */ |
3957 | + batman_if = orig_node->batman_if; |
3958 | + memcpy(dstaddr, orig_node->router->addr, ETH_ALEN); |
3959 | + spin_unlock_irqrestore(&orig_hash_lock, flags); |
3960 | + |
3961 | + send_raw_packet((unsigned char *)&info->packet, |
3962 | + packet_length, batman_if, dstaddr); |
3963 | + return; |
3964 | + |
3965 | +out: |
3966 | spin_unlock_irqrestore(&orig_hash_lock, flags); |
3967 | } |
3968 | |
3969 | @@ -502,15 +570,24 @@ static void send_vis_packets(struct work_struct *work) |
3970 | unsigned long flags; |
3971 | |
3972 | spin_lock_irqsave(&vis_hash_lock, flags); |
3973 | + |
3974 | purge_vis_packets(); |
3975 | |
3976 | - if (generate_vis_packet() == 0) |
3977 | + if (generate_vis_packet() == 0) { |
3978 | /* schedule if generation was successful */ |
3979 | - list_add_tail(&my_vis_info->send_list, &send_list); |
3980 | + send_list_add(my_vis_info); |
3981 | + } |
3982 | |
3983 | list_for_each_entry_safe(info, temp, &send_list, send_list) { |
3984 | - list_del_init(&info->send_list); |
3985 | + |
3986 | + kref_get(&info->refcount); |
3987 | + spin_unlock_irqrestore(&vis_hash_lock, flags); |
3988 | + |
3989 | send_vis_packet(info); |
3990 | + |
3991 | + spin_lock_irqsave(&vis_hash_lock, flags); |
3992 | + send_list_del(info); |
3993 | + kref_put(&info->refcount, free_info); |
3994 | } |
3995 | spin_unlock_irqrestore(&vis_hash_lock, flags); |
3996 | start_vis_timer(); |
3997 | @@ -543,6 +620,7 @@ int vis_init(void) |
3998 | my_vis_info->first_seen = jiffies - atomic_read(&vis_interval); |
3999 | INIT_LIST_HEAD(&my_vis_info->recv_list); |
4000 | INIT_LIST_HEAD(&my_vis_info->send_list); |
4001 | + kref_init(&my_vis_info->refcount); |
4002 | my_vis_info->packet.version = COMPAT_VERSION; |
4003 | my_vis_info->packet.packet_type = BAT_VIS; |
4004 | my_vis_info->packet.ttl = TTL; |
4005 | @@ -556,9 +634,9 @@ int vis_init(void) |
4006 | |
4007 | if (hash_add(vis_hash, my_vis_info) < 0) { |
4008 | printk(KERN_ERR |
4009 | - "batman-adv:Can't add own vis packet into hash\n"); |
4010 | - free_info(my_vis_info); /* not in hash, need to remove it |
4011 | - * manually. */ |
4012 | + "batman-adv:Can't add own vis packet into hash\n"); |
4013 | + /* not in hash, need to remove it manually. */ |
4014 | + kref_put(&my_vis_info->refcount, free_info); |
4015 | goto err; |
4016 | } |
4017 | |
4018 | @@ -572,6 +650,15 @@ err: |
4019 | return 0; |
4020 | } |
4021 | |
4022 | +/* Decrease the reference count on a hash item info */ |
4023 | +static void free_info_ref(void *data) |
4024 | +{ |
4025 | + struct vis_info *info = data; |
4026 | + |
4027 | + send_list_del(info); |
4028 | + kref_put(&info->refcount, free_info); |
4029 | +} |
4030 | + |
4031 | /* shutdown vis-server */ |
4032 | void vis_quit(void) |
4033 | { |
4034 | @@ -583,7 +670,7 @@ void vis_quit(void) |
4035 | |
4036 | spin_lock_irqsave(&vis_hash_lock, flags); |
4037 | /* properly remove, kill timers ... */ |
4038 | - hash_delete(vis_hash, free_info); |
4039 | + hash_delete(vis_hash, free_info_ref); |
4040 | vis_hash = NULL; |
4041 | my_vis_info = NULL; |
4042 | spin_unlock_irqrestore(&vis_hash_lock, flags); |
4043 | diff --git a/drivers/staging/batman-adv/vis.h b/drivers/staging/batman-adv/vis.h |
4044 | index 0cdafde..a1f92a4 100644 |
4045 | --- a/drivers/staging/batman-adv/vis.h |
4046 | +++ b/drivers/staging/batman-adv/vis.h |
4047 | @@ -29,6 +29,7 @@ struct vis_info { |
4048 | /* list of server-neighbors we received a vis-packet |
4049 | * from. we should not reply to them. */ |
4050 | struct list_head send_list; |
4051 | + struct kref refcount; |
4052 | /* this packet might be part of the vis send queue. */ |
4053 | struct vis_packet packet; |
4054 | /* vis_info may follow here*/ |
4055 | @@ -48,10 +49,13 @@ struct recvlist_node { |
4056 | extern struct hashtable_t *vis_hash; |
4057 | extern spinlock_t vis_hash_lock; |
4058 | |
4059 | +void proc_vis_insert_interface(const uint8_t *interface, |
4060 | + struct hlist_head *if_list, |
4061 | + bool primary); |
4062 | void proc_vis_read_entry(struct seq_file *seq, |
4063 | struct vis_info_entry *entry, |
4064 | - struct hlist_head *if_list, |
4065 | - uint8_t *vis_orig); |
4066 | + uint8_t *src, |
4067 | + bool primary); |
4068 | void proc_vis_read_prim_sec(struct seq_file *seq, |
4069 | struct hlist_head *if_list); |
4070 | void receive_server_sync_packet(struct vis_packet *vis_packet, |
4071 | diff --git a/drivers/staging/comedi/drivers/ni_mio_cs.c b/drivers/staging/comedi/drivers/ni_mio_cs.c |
4072 | index dc4849a..9855608 100644 |
4073 | --- a/drivers/staging/comedi/drivers/ni_mio_cs.c |
4074 | +++ b/drivers/staging/comedi/drivers/ni_mio_cs.c |
4075 | @@ -123,7 +123,7 @@ static const struct ni_board_struct ni_boards[] = { |
4076 | .adbits = 12, |
4077 | .ai_fifo_depth = 1024, |
4078 | .alwaysdither = 0, |
4079 | - .gainlkup = ai_gain_16, |
4080 | + .gainlkup = ai_gain_4, |
4081 | .ai_speed = 5000, |
4082 | .n_aochan = 2, |
4083 | .aobits = 12, |
4084 | diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c |
4085 | index 740db0c..2ffd0fe 100644 |
4086 | --- a/drivers/staging/rt2860/usb_main_dev.c |
4087 | +++ b/drivers/staging/rt2860/usb_main_dev.c |
4088 | @@ -98,6 +98,7 @@ struct usb_device_id rtusb_usb_id[] = { |
4089 | {USB_DEVICE(0x5A57, 0x0282)}, /* Zinwell */ |
4090 | {USB_DEVICE(0x7392, 0x7718)}, |
4091 | {USB_DEVICE(0x7392, 0x7717)}, |
4092 | + {USB_DEVICE(0x0411, 0x016f)}, /* MelCo.,Inc. WLI-UC-G301N */ |
4093 | {USB_DEVICE(0x1737, 0x0070)}, /* Linksys WUSB100 */ |
4094 | {USB_DEVICE(0x1737, 0x0071)}, /* Linksys WUSB600N */ |
4095 | {USB_DEVICE(0x0411, 0x00e8)}, /* Buffalo WLI-UC-G300N */ |
4096 | diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c |
4097 | index e40a2e9..fea0e99 100644 |
4098 | --- a/drivers/staging/vt6655/device_main.c |
4099 | +++ b/drivers/staging/vt6655/device_main.c |
4100 | @@ -1090,11 +1090,13 @@ device_found1(struct pci_dev *pcid, const struct pci_device_id *ent) |
4101 | } |
4102 | //2008-07-21-01<Add>by MikeLiu |
4103 | //register wpadev |
4104 | +#if 0 |
4105 | if(wpa_set_wpadev(pDevice, 1)!=0) { |
4106 | printk("Fail to Register WPADEV?\n"); |
4107 | unregister_netdev(pDevice->dev); |
4108 | free_netdev(dev); |
4109 | } |
4110 | +#endif |
4111 | device_print_info(pDevice); |
4112 | pci_set_drvdata(pcid, pDevice); |
4113 | return 0; |
4114 | diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c |
4115 | index 5e1a253..3c73add 100644 |
4116 | --- a/drivers/usb/class/cdc-acm.c |
4117 | +++ b/drivers/usb/class/cdc-acm.c |
4118 | @@ -1201,7 +1201,7 @@ made_compressed_probe: |
4119 | if (rcv->urb == NULL) { |
4120 | dev_dbg(&intf->dev, |
4121 | "out of memory (read urbs usb_alloc_urb)\n"); |
4122 | - goto alloc_fail7; |
4123 | + goto alloc_fail6; |
4124 | } |
4125 | |
4126 | rcv->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; |
4127 | @@ -1225,7 +1225,7 @@ made_compressed_probe: |
4128 | if (snd->urb == NULL) { |
4129 | dev_dbg(&intf->dev, |
4130 | "out of memory (write urbs usb_alloc_urb)"); |
4131 | - goto alloc_fail7; |
4132 | + goto alloc_fail8; |
4133 | } |
4134 | |
4135 | if (usb_endpoint_xfer_int(epwrite)) |
4136 | @@ -1264,6 +1264,7 @@ made_compressed_probe: |
4137 | i = device_create_file(&intf->dev, |
4138 | &dev_attr_iCountryCodeRelDate); |
4139 | if (i < 0) { |
4140 | + device_remove_file(&intf->dev, &dev_attr_wCountryCodes); |
4141 | kfree(acm->country_codes); |
4142 | goto skip_countries; |
4143 | } |
4144 | @@ -1300,6 +1301,7 @@ alloc_fail8: |
4145 | usb_free_urb(acm->wb[i].urb); |
4146 | alloc_fail7: |
4147 | acm_read_buffers_free(acm); |
4148 | +alloc_fail6: |
4149 | for (i = 0; i < num_rx_buf; i++) |
4150 | usb_free_urb(acm->ru[i].urb); |
4151 | usb_free_urb(acm->ctrlurb); |
4152 | diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c |
4153 | index 2f3dc4c..9d02dc6 100644 |
4154 | --- a/drivers/usb/core/driver.c |
4155 | +++ b/drivers/usb/core/driver.c |
4156 | @@ -1322,6 +1322,7 @@ int usb_resume(struct device *dev, pm_message_t msg) |
4157 | |
4158 | /* For all other calls, take the device back to full power and |
4159 | * tell the PM core in case it was autosuspended previously. |
4160 | + * Unbind the interfaces that will need rebinding later. |
4161 | */ |
4162 | } else { |
4163 | status = usb_resume_both(udev, msg); |
4164 | @@ -1330,6 +1331,7 @@ int usb_resume(struct device *dev, pm_message_t msg) |
4165 | pm_runtime_set_active(dev); |
4166 | pm_runtime_enable(dev); |
4167 | udev->last_busy = jiffies; |
4168 | + do_unbind_rebind(udev, DO_REBIND); |
4169 | } |
4170 | } |
4171 | |
4172 | diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c |
4173 | index 2f8cedd..b60a14d 100644 |
4174 | --- a/drivers/usb/core/hcd.c |
4175 | +++ b/drivers/usb/core/hcd.c |
4176 | @@ -1261,6 +1261,51 @@ static void hcd_free_coherent(struct usb_bus *bus, dma_addr_t *dma_handle, |
4177 | *dma_handle = 0; |
4178 | } |
4179 | |
4180 | +static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) |
4181 | +{ |
4182 | + enum dma_data_direction dir; |
4183 | + |
4184 | + if (urb->transfer_flags & URB_SETUP_MAP_SINGLE) |
4185 | + dma_unmap_single(hcd->self.controller, |
4186 | + urb->setup_dma, |
4187 | + sizeof(struct usb_ctrlrequest), |
4188 | + DMA_TO_DEVICE); |
4189 | + else if (urb->transfer_flags & URB_SETUP_MAP_LOCAL) |
4190 | + hcd_free_coherent(urb->dev->bus, |
4191 | + &urb->setup_dma, |
4192 | + (void **) &urb->setup_packet, |
4193 | + sizeof(struct usb_ctrlrequest), |
4194 | + DMA_TO_DEVICE); |
4195 | + |
4196 | + dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; |
4197 | + if (urb->transfer_flags & URB_DMA_MAP_SG) |
4198 | + dma_unmap_sg(hcd->self.controller, |
4199 | + urb->sg->sg, |
4200 | + urb->num_sgs, |
4201 | + dir); |
4202 | + else if (urb->transfer_flags & URB_DMA_MAP_PAGE) |
4203 | + dma_unmap_page(hcd->self.controller, |
4204 | + urb->transfer_dma, |
4205 | + urb->transfer_buffer_length, |
4206 | + dir); |
4207 | + else if (urb->transfer_flags & URB_DMA_MAP_SINGLE) |
4208 | + dma_unmap_single(hcd->self.controller, |
4209 | + urb->transfer_dma, |
4210 | + urb->transfer_buffer_length, |
4211 | + dir); |
4212 | + else if (urb->transfer_flags & URB_MAP_LOCAL) |
4213 | + hcd_free_coherent(urb->dev->bus, |
4214 | + &urb->transfer_dma, |
4215 | + &urb->transfer_buffer, |
4216 | + urb->transfer_buffer_length, |
4217 | + dir); |
4218 | + |
4219 | + /* Make it safe to call this routine more than once */ |
4220 | + urb->transfer_flags &= ~(URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL | |
4221 | + URB_DMA_MAP_SG | URB_DMA_MAP_PAGE | |
4222 | + URB_DMA_MAP_SINGLE | URB_MAP_LOCAL); |
4223 | +} |
4224 | + |
4225 | static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, |
4226 | gfp_t mem_flags) |
4227 | { |
4228 | @@ -1272,8 +1317,6 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, |
4229 | * unless it uses pio or talks to another transport, |
4230 | * or uses the provided scatter gather list for bulk. |
4231 | */ |
4232 | - if (is_root_hub(urb->dev)) |
4233 | - return 0; |
4234 | |
4235 | if (usb_endpoint_xfer_control(&urb->ep->desc) |
4236 | && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) { |
4237 | @@ -1286,6 +1329,7 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, |
4238 | if (dma_mapping_error(hcd->self.controller, |
4239 | urb->setup_dma)) |
4240 | return -EAGAIN; |
4241 | + urb->transfer_flags |= URB_SETUP_MAP_SINGLE; |
4242 | } else if (hcd->driver->flags & HCD_LOCAL_MEM) |
4243 | ret = hcd_alloc_coherent( |
4244 | urb->dev->bus, mem_flags, |
4245 | @@ -1293,20 +1337,57 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, |
4246 | (void **)&urb->setup_packet, |
4247 | sizeof(struct usb_ctrlrequest), |
4248 | DMA_TO_DEVICE); |
4249 | + if (ret) |
4250 | + return ret; |
4251 | + urb->transfer_flags |= URB_SETUP_MAP_LOCAL; |
4252 | } |
4253 | |
4254 | dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; |
4255 | - if (ret == 0 && urb->transfer_buffer_length != 0 |
4256 | + if (urb->transfer_buffer_length != 0 |
4257 | && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { |
4258 | if (hcd->self.uses_dma) { |
4259 | - urb->transfer_dma = dma_map_single ( |
4260 | - hcd->self.controller, |
4261 | - urb->transfer_buffer, |
4262 | - urb->transfer_buffer_length, |
4263 | - dir); |
4264 | - if (dma_mapping_error(hcd->self.controller, |
4265 | + if (urb->num_sgs) { |
4266 | + int n = dma_map_sg( |
4267 | + hcd->self.controller, |
4268 | + urb->sg->sg, |
4269 | + urb->num_sgs, |
4270 | + dir); |
4271 | + if (n <= 0) |
4272 | + ret = -EAGAIN; |
4273 | + else |
4274 | + urb->transfer_flags |= URB_DMA_MAP_SG; |
4275 | + if (n != urb->num_sgs) { |
4276 | + urb->num_sgs = n; |
4277 | + urb->transfer_flags |= |
4278 | + URB_DMA_SG_COMBINED; |
4279 | + } |
4280 | + } else if (urb->sg) { |
4281 | + struct scatterlist *sg; |
4282 | + |
4283 | + sg = (struct scatterlist *) urb->sg; |
4284 | + urb->transfer_dma = dma_map_page( |
4285 | + hcd->self.controller, |
4286 | + sg_page(sg), |
4287 | + sg->offset, |
4288 | + urb->transfer_buffer_length, |
4289 | + dir); |
4290 | + if (dma_mapping_error(hcd->self.controller, |
4291 | urb->transfer_dma)) |
4292 | - return -EAGAIN; |
4293 | + ret = -EAGAIN; |
4294 | + else |
4295 | + urb->transfer_flags |= URB_DMA_MAP_PAGE; |
4296 | + } else { |
4297 | + urb->transfer_dma = dma_map_single( |
4298 | + hcd->self.controller, |
4299 | + urb->transfer_buffer, |
4300 | + urb->transfer_buffer_length, |
4301 | + dir); |
4302 | + if (dma_mapping_error(hcd->self.controller, |
4303 | + urb->transfer_dma)) |
4304 | + ret = -EAGAIN; |
4305 | + else |
4306 | + urb->transfer_flags |= URB_DMA_MAP_SINGLE; |
4307 | + } |
4308 | } else if (hcd->driver->flags & HCD_LOCAL_MEM) { |
4309 | ret = hcd_alloc_coherent( |
4310 | urb->dev->bus, mem_flags, |
4311 | @@ -1314,55 +1395,16 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb, |
4312 | &urb->transfer_buffer, |
4313 | urb->transfer_buffer_length, |
4314 | dir); |
4315 | - |
4316 | - if (ret && usb_endpoint_xfer_control(&urb->ep->desc) |
4317 | - && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) |
4318 | - hcd_free_coherent(urb->dev->bus, |
4319 | - &urb->setup_dma, |
4320 | - (void **)&urb->setup_packet, |
4321 | - sizeof(struct usb_ctrlrequest), |
4322 | - DMA_TO_DEVICE); |
4323 | + if (ret == 0) |
4324 | + urb->transfer_flags |= URB_MAP_LOCAL; |
4325 | } |
4326 | + if (ret && (urb->transfer_flags & (URB_SETUP_MAP_SINGLE | |
4327 | + URB_SETUP_MAP_LOCAL))) |
4328 | + unmap_urb_for_dma(hcd, urb); |
4329 | } |
4330 | return ret; |
4331 | } |
4332 | |
4333 | -static void unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb) |
4334 | -{ |
4335 | - enum dma_data_direction dir; |
4336 | - |
4337 | - if (is_root_hub(urb->dev)) |
4338 | - return; |
4339 | - |
4340 | - if (usb_endpoint_xfer_control(&urb->ep->desc) |
4341 | - && !(urb->transfer_flags & URB_NO_SETUP_DMA_MAP)) { |
4342 | - if (hcd->self.uses_dma) |
4343 | - dma_unmap_single(hcd->self.controller, urb->setup_dma, |
4344 | - sizeof(struct usb_ctrlrequest), |
4345 | - DMA_TO_DEVICE); |
4346 | - else if (hcd->driver->flags & HCD_LOCAL_MEM) |
4347 | - hcd_free_coherent(urb->dev->bus, &urb->setup_dma, |
4348 | - (void **)&urb->setup_packet, |
4349 | - sizeof(struct usb_ctrlrequest), |
4350 | - DMA_TO_DEVICE); |
4351 | - } |
4352 | - |
4353 | - dir = usb_urb_dir_in(urb) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; |
4354 | - if (urb->transfer_buffer_length != 0 |
4355 | - && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) { |
4356 | - if (hcd->self.uses_dma) |
4357 | - dma_unmap_single(hcd->self.controller, |
4358 | - urb->transfer_dma, |
4359 | - urb->transfer_buffer_length, |
4360 | - dir); |
4361 | - else if (hcd->driver->flags & HCD_LOCAL_MEM) |
4362 | - hcd_free_coherent(urb->dev->bus, &urb->transfer_dma, |
4363 | - &urb->transfer_buffer, |
4364 | - urb->transfer_buffer_length, |
4365 | - dir); |
4366 | - } |
4367 | -} |
4368 | - |
4369 | /*-------------------------------------------------------------------------*/ |
4370 | |
4371 | /* may be called in any context with a valid urb->dev usecount |
4372 | @@ -1391,21 +1433,20 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags) |
4373 | * URBs must be submitted in process context with interrupts |
4374 | * enabled. |
4375 | */ |
4376 | - status = map_urb_for_dma(hcd, urb, mem_flags); |
4377 | - if (unlikely(status)) { |
4378 | - usbmon_urb_submit_error(&hcd->self, urb, status); |
4379 | - goto error; |
4380 | - } |
4381 | |
4382 | - if (is_root_hub(urb->dev)) |
4383 | + if (is_root_hub(urb->dev)) { |
4384 | status = rh_urb_enqueue(hcd, urb); |
4385 | - else |
4386 | - status = hcd->driver->urb_enqueue(hcd, urb, mem_flags); |
4387 | + } else { |
4388 | + status = map_urb_for_dma(hcd, urb, mem_flags); |
4389 | + if (likely(status == 0)) { |
4390 | + status = hcd->driver->urb_enqueue(hcd, urb, mem_flags); |
4391 | + if (unlikely(status)) |
4392 | + unmap_urb_for_dma(hcd, urb); |
4393 | + } |
4394 | + } |
4395 | |
4396 | if (unlikely(status)) { |
4397 | usbmon_urb_submit_error(&hcd->self, urb, status); |
4398 | - unmap_urb_for_dma(hcd, urb); |
4399 | - error: |
4400 | urb->hcpriv = NULL; |
4401 | INIT_LIST_HEAD(&urb->urb_list); |
4402 | atomic_dec(&urb->use_count); |
4403 | diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c |
4404 | index cd22027..794dca2 100644 |
4405 | --- a/drivers/usb/core/message.c |
4406 | +++ b/drivers/usb/core/message.c |
4407 | @@ -259,9 +259,6 @@ static void sg_clean(struct usb_sg_request *io) |
4408 | kfree(io->urbs); |
4409 | io->urbs = NULL; |
4410 | } |
4411 | - if (io->dev->dev.dma_mask != NULL) |
4412 | - usb_buffer_unmap_sg(io->dev, usb_pipein(io->pipe), |
4413 | - io->sg, io->nents); |
4414 | io->dev = NULL; |
4415 | } |
4416 | |
4417 | @@ -364,7 +361,6 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev, |
4418 | { |
4419 | int i; |
4420 | int urb_flags; |
4421 | - int dma; |
4422 | int use_sg; |
4423 | |
4424 | if (!io || !dev || !sg |
4425 | @@ -378,21 +374,9 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev, |
4426 | io->pipe = pipe; |
4427 | io->sg = sg; |
4428 | io->nents = nents; |
4429 | - |
4430 | - /* not all host controllers use DMA (like the mainstream pci ones); |
4431 | - * they can use PIO (sl811) or be software over another transport. |
4432 | - */ |
4433 | - dma = (dev->dev.dma_mask != NULL); |
4434 | - if (dma) |
4435 | - io->entries = usb_buffer_map_sg(dev, usb_pipein(pipe), |
4436 | - sg, nents); |
4437 | - else |
4438 | - io->entries = nents; |
4439 | + io->entries = nents; |
4440 | |
4441 | /* initialize all the urbs we'll use */ |
4442 | - if (io->entries <= 0) |
4443 | - return io->entries; |
4444 | - |
4445 | if (dev->bus->sg_tablesize > 0) { |
4446 | io->urbs = kmalloc(sizeof *io->urbs, mem_flags); |
4447 | use_sg = true; |
4448 | @@ -404,8 +388,6 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev, |
4449 | goto nomem; |
4450 | |
4451 | urb_flags = 0; |
4452 | - if (dma) |
4453 | - urb_flags |= URB_NO_TRANSFER_DMA_MAP; |
4454 | if (usb_pipein(pipe)) |
4455 | urb_flags |= URB_SHORT_NOT_OK; |
4456 | |
4457 | @@ -423,12 +405,13 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev, |
4458 | |
4459 | io->urbs[0]->complete = sg_complete; |
4460 | io->urbs[0]->context = io; |
4461 | + |
4462 | /* A length of zero means transfer the whole sg list */ |
4463 | io->urbs[0]->transfer_buffer_length = length; |
4464 | if (length == 0) { |
4465 | for_each_sg(sg, sg, io->entries, i) { |
4466 | io->urbs[0]->transfer_buffer_length += |
4467 | - sg_dma_len(sg); |
4468 | + sg->length; |
4469 | } |
4470 | } |
4471 | io->urbs[0]->sg = io; |
4472 | @@ -454,26 +437,16 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev, |
4473 | io->urbs[i]->context = io; |
4474 | |
4475 | /* |
4476 | - * Some systems need to revert to PIO when DMA is temporarily |
4477 | - * unavailable. For their sakes, both transfer_buffer and |
4478 | - * transfer_dma are set when possible. |
4479 | - * |
4480 | - * Note that if IOMMU coalescing occurred, we cannot |
4481 | - * trust sg_page anymore, so check if S/G list shrunk. |
4482 | + * Some systems can't use DMA; they use PIO instead. |
4483 | + * For their sakes, transfer_buffer is set whenever |
4484 | + * possible. |
4485 | */ |
4486 | - if (io->nents == io->entries && !PageHighMem(sg_page(sg))) |
4487 | + if (!PageHighMem(sg_page(sg))) |
4488 | io->urbs[i]->transfer_buffer = sg_virt(sg); |
4489 | else |
4490 | io->urbs[i]->transfer_buffer = NULL; |
4491 | |
4492 | - if (dma) { |
4493 | - io->urbs[i]->transfer_dma = sg_dma_address(sg); |
4494 | - len = sg_dma_len(sg); |
4495 | - } else { |
4496 | - /* hc may use _only_ transfer_buffer */ |
4497 | - len = sg->length; |
4498 | - } |
4499 | - |
4500 | + len = sg->length; |
4501 | if (length) { |
4502 | len = min_t(unsigned, len, length); |
4503 | length -= len; |
4504 | @@ -481,6 +454,8 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev, |
4505 | io->entries = i + 1; |
4506 | } |
4507 | io->urbs[i]->transfer_buffer_length = len; |
4508 | + |
4509 | + io->urbs[i]->sg = (struct usb_sg_request *) sg; |
4510 | } |
4511 | io->urbs[--i]->transfer_flags &= ~URB_NO_INTERRUPT; |
4512 | } |
4513 | diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c |
4514 | index 45a32da..fec46d0 100644 |
4515 | --- a/drivers/usb/core/urb.c |
4516 | +++ b/drivers/usb/core/urb.c |
4517 | @@ -333,9 +333,12 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) |
4518 | is_out = usb_endpoint_dir_out(&ep->desc); |
4519 | } |
4520 | |
4521 | - /* Cache the direction for later use */ |
4522 | - urb->transfer_flags = (urb->transfer_flags & ~URB_DIR_MASK) | |
4523 | - (is_out ? URB_DIR_OUT : URB_DIR_IN); |
4524 | + /* Clear the internal flags and cache the direction for later use */ |
4525 | + urb->transfer_flags &= ~(URB_DIR_MASK | URB_DMA_MAP_SINGLE | |
4526 | + URB_DMA_MAP_PAGE | URB_DMA_MAP_SG | URB_MAP_LOCAL | |
4527 | + URB_SETUP_MAP_SINGLE | URB_SETUP_MAP_LOCAL | |
4528 | + URB_DMA_SG_COMBINED); |
4529 | + urb->transfer_flags |= (is_out ? URB_DIR_OUT : URB_DIR_IN); |
4530 | |
4531 | if (xfertype != USB_ENDPOINT_XFER_CONTROL && |
4532 | dev->state < USB_STATE_CONFIGURED) |
4533 | diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c |
4534 | index 0561430..956108e 100644 |
4535 | --- a/drivers/usb/core/usb.c |
4536 | +++ b/drivers/usb/core/usb.c |
4537 | @@ -893,6 +893,7 @@ void usb_buffer_unmap(struct urb *urb) |
4538 | EXPORT_SYMBOL_GPL(usb_buffer_unmap); |
4539 | #endif /* 0 */ |
4540 | |
4541 | +#if 0 |
4542 | /** |
4543 | * usb_buffer_map_sg - create scatterlist DMA mapping(s) for an endpoint |
4544 | * @dev: device to which the scatterlist will be mapped |
4545 | @@ -936,6 +937,7 @@ int usb_buffer_map_sg(const struct usb_device *dev, int is_in, |
4546 | is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE) ? : -ENOMEM; |
4547 | } |
4548 | EXPORT_SYMBOL_GPL(usb_buffer_map_sg); |
4549 | +#endif |
4550 | |
4551 | /* XXX DISABLED, no users currently. If you wish to re-enable this |
4552 | * XXX please determine whether the sync is to transfer ownership of |
4553 | @@ -972,6 +974,7 @@ void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in, |
4554 | EXPORT_SYMBOL_GPL(usb_buffer_dmasync_sg); |
4555 | #endif |
4556 | |
4557 | +#if 0 |
4558 | /** |
4559 | * usb_buffer_unmap_sg - free DMA mapping(s) for a scatterlist |
4560 | * @dev: device to which the scatterlist will be mapped |
4561 | @@ -997,6 +1000,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in, |
4562 | is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE); |
4563 | } |
4564 | EXPORT_SYMBOL_GPL(usb_buffer_unmap_sg); |
4565 | +#endif |
4566 | |
4567 | /* To disable USB, kernel command line is 'nousb' not 'usbcore.nousb' */ |
4568 | #ifdef MODULE |
4569 | diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c |
4570 | index fa3d142..08a9a62 100644 |
4571 | --- a/drivers/usb/gadget/fsl_udc_core.c |
4572 | +++ b/drivers/usb/gadget/fsl_udc_core.c |
4573 | @@ -489,7 +489,7 @@ static int fsl_ep_enable(struct usb_ep *_ep, |
4574 | case USB_ENDPOINT_XFER_ISOC: |
4575 | /* Calculate transactions needed for high bandwidth iso */ |
4576 | mult = (unsigned char)(1 + ((max >> 11) & 0x03)); |
4577 | - max = max & 0x8ff; /* bit 0~10 */ |
4578 | + max = max & 0x7ff; /* bit 0~10 */ |
4579 | /* 3 transactions at most */ |
4580 | if (mult > 3) |
4581 | goto en_done; |
4582 | diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c |
4583 | index e3a74e7..a422a1b 100644 |
4584 | --- a/drivers/usb/host/ehci-au1xxx.c |
4585 | +++ b/drivers/usb/host/ehci-au1xxx.c |
4586 | @@ -215,26 +215,17 @@ static int ehci_hcd_au1xxx_drv_suspend(struct device *dev) |
4587 | msleep(10); |
4588 | |
4589 | /* Root hub was already suspended. Disable irq emission and |
4590 | - * mark HW unaccessible, bail out if RH has been resumed. Use |
4591 | - * the spinlock to properly synchronize with possible pending |
4592 | - * RH suspend or resume activity. |
4593 | - * |
4594 | - * This is still racy as hcd->state is manipulated outside of |
4595 | - * any locks =P But that will be a different fix. |
4596 | + * mark HW unaccessible. The PM and USB cores make sure that |
4597 | + * the root hub is either suspended or stopped. |
4598 | */ |
4599 | spin_lock_irqsave(&ehci->lock, flags); |
4600 | - if (hcd->state != HC_STATE_SUSPENDED) { |
4601 | - rc = -EINVAL; |
4602 | - goto bail; |
4603 | - } |
4604 | + ehci_prepare_ports_for_controller_suspend(ehci); |
4605 | ehci_writel(ehci, 0, &ehci->regs->intr_enable); |
4606 | (void)ehci_readl(ehci, &ehci->regs->intr_enable); |
4607 | |
4608 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); |
4609 | |
4610 | au1xxx_stop_ehc(); |
4611 | - |
4612 | -bail: |
4613 | spin_unlock_irqrestore(&ehci->lock, flags); |
4614 | |
4615 | // could save FLADJ in case of Vaux power loss |
4616 | @@ -264,6 +255,7 @@ static int ehci_hcd_au1xxx_drv_resume(struct device *dev) |
4617 | if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) { |
4618 | int mask = INTR_MASK; |
4619 | |
4620 | + ehci_prepare_ports_for_controller_resume(ehci); |
4621 | if (!hcd->self.root_hub->do_remote_wakeup) |
4622 | mask &= ~STS_PCD; |
4623 | ehci_writel(ehci, mask, &ehci->regs->intr_enable); |
4624 | diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c |
4625 | index 0e26aa1..5cd967d 100644 |
4626 | --- a/drivers/usb/host/ehci-fsl.c |
4627 | +++ b/drivers/usb/host/ehci-fsl.c |
4628 | @@ -313,6 +313,7 @@ static int ehci_fsl_drv_suspend(struct device *dev) |
4629 | struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd); |
4630 | void __iomem *non_ehci = hcd->regs; |
4631 | |
4632 | + ehci_prepare_ports_for_controller_suspend(hcd_to_ehci(hcd)); |
4633 | if (!fsl_deep_sleep()) |
4634 | return 0; |
4635 | |
4636 | @@ -327,6 +328,7 @@ static int ehci_fsl_drv_resume(struct device *dev) |
4637 | struct ehci_hcd *ehci = hcd_to_ehci(hcd); |
4638 | void __iomem *non_ehci = hcd->regs; |
4639 | |
4640 | + ehci_prepare_ports_for_controller_resume(ehci); |
4641 | if (!fsl_deep_sleep()) |
4642 | return 0; |
4643 | |
4644 | diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c |
4645 | index c7178bc..1b2af4d 100644 |
4646 | --- a/drivers/usb/host/ehci-hub.c |
4647 | +++ b/drivers/usb/host/ehci-hub.c |
4648 | @@ -106,12 +106,75 @@ static void ehci_handover_companion_ports(struct ehci_hcd *ehci) |
4649 | ehci->owned_ports = 0; |
4650 | } |
4651 | |
4652 | +static void ehci_adjust_port_wakeup_flags(struct ehci_hcd *ehci, |
4653 | + bool suspending) |
4654 | +{ |
4655 | + int port; |
4656 | + u32 temp; |
4657 | + |
4658 | + /* If remote wakeup is enabled for the root hub but disabled |
4659 | + * for the controller, we must adjust all the port wakeup flags |
4660 | + * when the controller is suspended or resumed. In all other |
4661 | + * cases they don't need to be changed. |
4662 | + */ |
4663 | + if (!ehci_to_hcd(ehci)->self.root_hub->do_remote_wakeup || |
4664 | + device_may_wakeup(ehci_to_hcd(ehci)->self.controller)) |
4665 | + return; |
4666 | + |
4667 | + /* clear phy low-power mode before changing wakeup flags */ |
4668 | + if (ehci->has_hostpc) { |
4669 | + port = HCS_N_PORTS(ehci->hcs_params); |
4670 | + while (port--) { |
4671 | + u32 __iomem *hostpc_reg; |
4672 | + |
4673 | + hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs |
4674 | + + HOSTPC0 + 4 * port); |
4675 | + temp = ehci_readl(ehci, hostpc_reg); |
4676 | + ehci_writel(ehci, temp & ~HOSTPC_PHCD, hostpc_reg); |
4677 | + } |
4678 | + msleep(5); |
4679 | + } |
4680 | + |
4681 | + port = HCS_N_PORTS(ehci->hcs_params); |
4682 | + while (port--) { |
4683 | + u32 __iomem *reg = &ehci->regs->port_status[port]; |
4684 | + u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; |
4685 | + u32 t2 = t1 & ~PORT_WAKE_BITS; |
4686 | + |
4687 | + /* If we are suspending the controller, clear the flags. |
4688 | + * If we are resuming the controller, set the wakeup flags. |
4689 | + */ |
4690 | + if (!suspending) { |
4691 | + if (t1 & PORT_CONNECT) |
4692 | + t2 |= PORT_WKOC_E | PORT_WKDISC_E; |
4693 | + else |
4694 | + t2 |= PORT_WKOC_E | PORT_WKCONN_E; |
4695 | + } |
4696 | + ehci_vdbg(ehci, "port %d, %08x -> %08x\n", |
4697 | + port + 1, t1, t2); |
4698 | + ehci_writel(ehci, t2, reg); |
4699 | + } |
4700 | + |
4701 | + /* enter phy low-power mode again */ |
4702 | + if (ehci->has_hostpc) { |
4703 | + port = HCS_N_PORTS(ehci->hcs_params); |
4704 | + while (port--) { |
4705 | + u32 __iomem *hostpc_reg; |
4706 | + |
4707 | + hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs |
4708 | + + HOSTPC0 + 4 * port); |
4709 | + temp = ehci_readl(ehci, hostpc_reg); |
4710 | + ehci_writel(ehci, temp | HOSTPC_PHCD, hostpc_reg); |
4711 | + } |
4712 | + } |
4713 | +} |
4714 | + |
4715 | static int ehci_bus_suspend (struct usb_hcd *hcd) |
4716 | { |
4717 | struct ehci_hcd *ehci = hcd_to_ehci (hcd); |
4718 | int port; |
4719 | int mask; |
4720 | - u32 __iomem *hostpc_reg = NULL; |
4721 | + int changed; |
4722 | |
4723 | ehci_dbg(ehci, "suspend root hub\n"); |
4724 | |
4725 | @@ -155,15 +218,13 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) |
4726 | */ |
4727 | ehci->bus_suspended = 0; |
4728 | ehci->owned_ports = 0; |
4729 | + changed = 0; |
4730 | port = HCS_N_PORTS(ehci->hcs_params); |
4731 | while (port--) { |
4732 | u32 __iomem *reg = &ehci->regs->port_status [port]; |
4733 | u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS; |
4734 | - u32 t2 = t1; |
4735 | + u32 t2 = t1 & ~PORT_WAKE_BITS; |
4736 | |
4737 | - if (ehci->has_hostpc) |
4738 | - hostpc_reg = (u32 __iomem *)((u8 *)ehci->regs |
4739 | - + HOSTPC0 + 4 * (port & 0xff)); |
4740 | /* keep track of which ports we suspend */ |
4741 | if (t1 & PORT_OWNER) |
4742 | set_bit(port, &ehci->owned_ports); |
4743 | @@ -172,40 +233,45 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) |
4744 | set_bit(port, &ehci->bus_suspended); |
4745 | } |
4746 | |
4747 | - /* enable remote wakeup on all ports */ |
4748 | + /* enable remote wakeup on all ports, if told to do so */ |
4749 | if (hcd->self.root_hub->do_remote_wakeup) { |
4750 | /* only enable appropriate wake bits, otherwise the |
4751 | * hardware can not go phy low power mode. If a race |
4752 | * condition happens here(connection change during bits |
4753 | * set), the port change detection will finally fix it. |
4754 | */ |
4755 | - if (t1 & PORT_CONNECT) { |
4756 | + if (t1 & PORT_CONNECT) |
4757 | t2 |= PORT_WKOC_E | PORT_WKDISC_E; |
4758 | - t2 &= ~PORT_WKCONN_E; |
4759 | - } else { |
4760 | + else |
4761 | t2 |= PORT_WKOC_E | PORT_WKCONN_E; |
4762 | - t2 &= ~PORT_WKDISC_E; |
4763 | - } |
4764 | - } else |
4765 | - t2 &= ~PORT_WAKE_BITS; |
4766 | + } |
4767 | |
4768 | if (t1 != t2) { |
4769 | ehci_vdbg (ehci, "port %d, %08x -> %08x\n", |
4770 | port + 1, t1, t2); |
4771 | ehci_writel(ehci, t2, reg); |
4772 | - if (hostpc_reg) { |
4773 | - u32 t3; |
4774 | + changed = 1; |
4775 | + } |
4776 | + } |
4777 | |
4778 | - spin_unlock_irq(&ehci->lock); |
4779 | - msleep(5);/* 5ms for HCD enter low pwr mode */ |
4780 | - spin_lock_irq(&ehci->lock); |
4781 | - t3 = ehci_readl(ehci, hostpc_reg); |
4782 | - ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg); |
4783 | - t3 = ehci_readl(ehci, hostpc_reg); |
4784 | - ehci_dbg(ehci, "Port%d phy low pwr mode %s\n", |
4785 | + if (changed && ehci->has_hostpc) { |
4786 | + spin_unlock_irq(&ehci->lock); |
4787 | + msleep(5); /* 5 ms for HCD to enter low-power mode */ |
4788 | + spin_lock_irq(&ehci->lock); |
4789 | + |
4790 | + port = HCS_N_PORTS(ehci->hcs_params); |
4791 | + while (port--) { |
4792 | + u32 __iomem *hostpc_reg; |
4793 | + u32 t3; |
4794 | + |
4795 | + hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs |
4796 | + + HOSTPC0 + 4 * port); |
4797 | + t3 = ehci_readl(ehci, hostpc_reg); |
4798 | + ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg); |
4799 | + t3 = ehci_readl(ehci, hostpc_reg); |
4800 | + ehci_dbg(ehci, "Port %d phy low-power mode %s\n", |
4801 | port, (t3 & HOSTPC_PHCD) ? |
4802 | "succeeded" : "failed"); |
4803 | - } |
4804 | } |
4805 | } |
4806 | |
4807 | @@ -291,6 +357,25 @@ static int ehci_bus_resume (struct usb_hcd *hcd) |
4808 | msleep(8); |
4809 | spin_lock_irq(&ehci->lock); |
4810 | |
4811 | + /* clear phy low-power mode before resume */ |
4812 | + if (ehci->bus_suspended && ehci->has_hostpc) { |
4813 | + i = HCS_N_PORTS(ehci->hcs_params); |
4814 | + while (i--) { |
4815 | + if (test_bit(i, &ehci->bus_suspended)) { |
4816 | + u32 __iomem *hostpc_reg; |
4817 | + |
4818 | + hostpc_reg = (u32 __iomem *)((u8 *) ehci->regs |
4819 | + + HOSTPC0 + 4 * i); |
4820 | + temp = ehci_readl(ehci, hostpc_reg); |
4821 | + ehci_writel(ehci, temp & ~HOSTPC_PHCD, |
4822 | + hostpc_reg); |
4823 | + } |
4824 | + } |
4825 | + spin_unlock_irq(&ehci->lock); |
4826 | + msleep(5); |
4827 | + spin_lock_irq(&ehci->lock); |
4828 | + } |
4829 | + |
4830 | /* manually resume the ports we suspended during bus_suspend() */ |
4831 | i = HCS_N_PORTS (ehci->hcs_params); |
4832 | while (i--) { |
4833 | @@ -675,16 +760,25 @@ static int ehci_hub_control ( |
4834 | goto error; |
4835 | if (ehci->no_selective_suspend) |
4836 | break; |
4837 | - if (temp & PORT_SUSPEND) { |
4838 | - if ((temp & PORT_PE) == 0) |
4839 | - goto error; |
4840 | - /* resume signaling for 20 msec */ |
4841 | - temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); |
4842 | - ehci_writel(ehci, temp | PORT_RESUME, |
4843 | - status_reg); |
4844 | - ehci->reset_done [wIndex] = jiffies |
4845 | - + msecs_to_jiffies (20); |
4846 | + if (!(temp & PORT_SUSPEND)) |
4847 | + break; |
4848 | + if ((temp & PORT_PE) == 0) |
4849 | + goto error; |
4850 | + |
4851 | + /* clear phy low-power mode before resume */ |
4852 | + if (hostpc_reg) { |
4853 | + temp1 = ehci_readl(ehci, hostpc_reg); |
4854 | + ehci_writel(ehci, temp1 & ~HOSTPC_PHCD, |
4855 | + hostpc_reg); |
4856 | + spin_unlock_irqrestore(&ehci->lock, flags); |
4857 | + msleep(5);/* wait to leave low-power mode */ |
4858 | + spin_lock_irqsave(&ehci->lock, flags); |
4859 | } |
4860 | + /* resume signaling for 20 msec */ |
4861 | + temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); |
4862 | + ehci_writel(ehci, temp | PORT_RESUME, status_reg); |
4863 | + ehci->reset_done[wIndex] = jiffies |
4864 | + + msecs_to_jiffies(20); |
4865 | break; |
4866 | case USB_PORT_FEAT_C_SUSPEND: |
4867 | clear_bit(wIndex, &ehci->port_c_suspend); |
4868 | diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c |
4869 | index ead5f4f..c5f662d 100644 |
4870 | --- a/drivers/usb/host/ehci-pci.c |
4871 | +++ b/drivers/usb/host/ehci-pci.c |
4872 | @@ -284,23 +284,15 @@ static int ehci_pci_suspend(struct usb_hcd *hcd) |
4873 | msleep(10); |
4874 | |
4875 | /* Root hub was already suspended. Disable irq emission and |
4876 | - * mark HW unaccessible, bail out if RH has been resumed. Use |
4877 | - * the spinlock to properly synchronize with possible pending |
4878 | - * RH suspend or resume activity. |
4879 | - * |
4880 | - * This is still racy as hcd->state is manipulated outside of |
4881 | - * any locks =P But that will be a different fix. |
4882 | + * mark HW unaccessible. The PM and USB cores make sure that |
4883 | + * the root hub is either suspended or stopped. |
4884 | */ |
4885 | spin_lock_irqsave (&ehci->lock, flags); |
4886 | - if (hcd->state != HC_STATE_SUSPENDED) { |
4887 | - rc = -EINVAL; |
4888 | - goto bail; |
4889 | - } |
4890 | + ehci_prepare_ports_for_controller_suspend(ehci); |
4891 | ehci_writel(ehci, 0, &ehci->regs->intr_enable); |
4892 | (void)ehci_readl(ehci, &ehci->regs->intr_enable); |
4893 | |
4894 | clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); |
4895 | - bail: |
4896 | spin_unlock_irqrestore (&ehci->lock, flags); |
4897 | |
4898 | // could save FLADJ in case of Vaux power loss |
4899 | @@ -330,6 +322,7 @@ static int ehci_pci_resume(struct usb_hcd *hcd, bool hibernated) |
4900 | !hibernated) { |
4901 | int mask = INTR_MASK; |
4902 | |
4903 | + ehci_prepare_ports_for_controller_resume(ehci); |
4904 | if (!hcd->self.root_hub->do_remote_wakeup) |
4905 | mask &= ~STS_PCD; |
4906 | ehci_writel(ehci, mask, &ehci->regs->intr_enable); |
4907 | diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h |
4908 | index 556c0b4..ddf61c3 100644 |
4909 | --- a/drivers/usb/host/ehci.h |
4910 | +++ b/drivers/usb/host/ehci.h |
4911 | @@ -536,6 +536,16 @@ struct ehci_fstn { |
4912 | |
4913 | /*-------------------------------------------------------------------------*/ |
4914 | |
4915 | +/* Prepare the PORTSC wakeup flags during controller suspend/resume */ |
4916 | + |
4917 | +#define ehci_prepare_ports_for_controller_suspend(ehci) \ |
4918 | + ehci_adjust_port_wakeup_flags(ehci, true); |
4919 | + |
4920 | +#define ehci_prepare_ports_for_controller_resume(ehci) \ |
4921 | + ehci_adjust_port_wakeup_flags(ehci, false); |
4922 | + |
4923 | +/*-------------------------------------------------------------------------*/ |
4924 | + |
4925 | #ifdef CONFIG_USB_EHCI_ROOT_HUB_TT |
4926 | |
4927 | /* |
4928 | diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h |
4929 | index 72dae1c..3b6e864 100644 |
4930 | --- a/drivers/usb/host/fhci.h |
4931 | +++ b/drivers/usb/host/fhci.h |
4932 | @@ -20,6 +20,7 @@ |
4933 | |
4934 | #include <linux/kernel.h> |
4935 | #include <linux/types.h> |
4936 | +#include <linux/bug.h> |
4937 | #include <linux/spinlock.h> |
4938 | #include <linux/interrupt.h> |
4939 | #include <linux/kfifo.h> |
4940 | @@ -515,9 +516,13 @@ static inline int cq_put(struct kfifo *kfifo, void *p) |
4941 | |
4942 | static inline void *cq_get(struct kfifo *kfifo) |
4943 | { |
4944 | - void *p = NULL; |
4945 | + unsigned int sz; |
4946 | + void *p; |
4947 | + |
4948 | + sz = kfifo_out(kfifo, (void *)&p, sizeof(p)); |
4949 | + if (sz != sizeof(p)) |
4950 | + return NULL; |
4951 | |
4952 | - kfifo_out(kfifo, (void *)&p, sizeof(p)); |
4953 | return p; |
4954 | } |
4955 | |
4956 | diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c |
4957 | index 141d049..b388dd1 100644 |
4958 | --- a/drivers/usb/host/whci/qset.c |
4959 | +++ b/drivers/usb/host/whci/qset.c |
4960 | @@ -646,7 +646,7 @@ int qset_add_urb(struct whc *whc, struct whc_qset *qset, struct urb *urb, |
4961 | wurb->urb = urb; |
4962 | INIT_WORK(&wurb->dequeue_work, urb_dequeue_work); |
4963 | |
4964 | - if (urb->sg) { |
4965 | + if (urb->num_sgs) { |
4966 | ret = qset_add_urb_sg(whc, qset, urb, mem_flags); |
4967 | if (ret == -EINVAL) { |
4968 | qset_free_stds(qset, urb); |
4969 | diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c |
4970 | index 417d37a..98a73cd 100644 |
4971 | --- a/drivers/usb/host/xhci-pci.c |
4972 | +++ b/drivers/usb/host/xhci-pci.c |
4973 | @@ -54,7 +54,7 @@ static int xhci_pci_setup(struct usb_hcd *hcd) |
4974 | struct pci_dev *pdev = to_pci_dev(hcd->self.controller); |
4975 | int retval; |
4976 | |
4977 | - hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 1; |
4978 | + hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2; |
4979 | |
4980 | xhci->cap_regs = hcd->regs; |
4981 | xhci->op_regs = hcd->regs + |
4982 | diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c |
4983 | index 85d7e8f..40cba25 100644 |
4984 | --- a/drivers/usb/host/xhci-ring.c |
4985 | +++ b/drivers/usb/host/xhci-ring.c |
4986 | @@ -242,10 +242,27 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, |
4987 | int i; |
4988 | union xhci_trb *enq = ring->enqueue; |
4989 | struct xhci_segment *enq_seg = ring->enq_seg; |
4990 | + struct xhci_segment *cur_seg; |
4991 | + unsigned int left_on_ring; |
4992 | |
4993 | /* Check if ring is empty */ |
4994 | - if (enq == ring->dequeue) |
4995 | + if (enq == ring->dequeue) { |
4996 | + /* Can't use link trbs */ |
4997 | + left_on_ring = TRBS_PER_SEGMENT - 1; |
4998 | + for (cur_seg = enq_seg->next; cur_seg != enq_seg; |
4999 | + cur_seg = cur_seg->next) |
5000 | + left_on_ring += TRBS_PER_SEGMENT - 1; |
5001 | + |
5002 | + /* Always need one TRB free in the ring. */ |
5003 | + left_on_ring -= 1; |
5004 | + if (num_trbs > left_on_ring) { |
5005 | + xhci_warn(xhci, "Not enough room on ring; " |
5006 | + "need %u TRBs, %u TRBs left\n", |
5007 | + num_trbs, left_on_ring); |
5008 | + return 0; |
5009 | + } |
5010 | return 1; |
5011 | + } |
5012 | /* Make sure there's an extra empty TRB available */ |
5013 | for (i = 0; i <= num_trbs; ++i) { |
5014 | if (enq == ring->dequeue) |
5015 | @@ -334,7 +351,8 @@ static struct xhci_segment *find_trb_seg( |
5016 | while (cur_seg->trbs > trb || |
5017 | &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { |
5018 | generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; |
5019 | - if (TRB_TYPE(generic_trb->field[3]) == TRB_LINK && |
5020 | + if ((generic_trb->field[3] & TRB_TYPE_BITMASK) == |
5021 | + TRB_TYPE(TRB_LINK) && |
5022 | (generic_trb->field[3] & LINK_TOGGLE)) |
5023 | *cycle_state = ~(*cycle_state) & 0x1; |
5024 | cur_seg = cur_seg->next; |
5025 | @@ -390,7 +408,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, |
5026 | BUG(); |
5027 | |
5028 | trb = &state->new_deq_ptr->generic; |
5029 | - if (TRB_TYPE(trb->field[3]) == TRB_LINK && |
5030 | + if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && |
5031 | (trb->field[3] & LINK_TOGGLE)) |
5032 | state->new_cycle_state = ~(state->new_cycle_state) & 0x1; |
5033 | next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); |
5034 | @@ -578,6 +596,8 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci, |
5035 | /* Otherwise just ring the doorbell to restart the ring */ |
5036 | ring_ep_doorbell(xhci, slot_id, ep_index); |
5037 | } |
5038 | + ep->stopped_td = NULL; |
5039 | + ep->stopped_trb = NULL; |
5040 | |
5041 | /* |
5042 | * Drop the lock and complete the URBs in the cancelled TD list. |
5043 | @@ -1061,8 +1081,13 @@ static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci, |
5044 | ep->ep_state |= EP_HALTED; |
5045 | ep->stopped_td = td; |
5046 | ep->stopped_trb = event_trb; |
5047 | + |
5048 | xhci_queue_reset_ep(xhci, slot_id, ep_index); |
5049 | xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index); |
5050 | + |
5051 | + ep->stopped_td = NULL; |
5052 | + ep->stopped_trb = NULL; |
5053 | + |
5054 | xhci_ring_cmd_db(xhci); |
5055 | } |
5056 | |
5057 | @@ -1390,8 +1415,10 @@ static int handle_tx_event(struct xhci_hcd *xhci, |
5058 | for (cur_trb = ep_ring->dequeue, cur_seg = ep_ring->deq_seg; |
5059 | cur_trb != event_trb; |
5060 | next_trb(xhci, ep_ring, &cur_seg, &cur_trb)) { |
5061 | - if (TRB_TYPE(cur_trb->generic.field[3]) != TRB_TR_NOOP && |
5062 | - TRB_TYPE(cur_trb->generic.field[3]) != TRB_LINK) |
5063 | + if ((cur_trb->generic.field[3] & |
5064 | + TRB_TYPE_BITMASK) != TRB_TYPE(TRB_TR_NOOP) && |
5065 | + (cur_trb->generic.field[3] & |
5066 | + TRB_TYPE_BITMASK) != TRB_TYPE(TRB_LINK)) |
5067 | td->urb->actual_length += |
5068 | TRB_LEN(cur_trb->generic.field[2]); |
5069 | } |
5070 | @@ -1938,7 +1965,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, |
5071 | int running_total, trb_buff_len, ret; |
5072 | u64 addr; |
5073 | |
5074 | - if (urb->sg) |
5075 | + if (urb->num_sgs) |
5076 | return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index); |
5077 | |
5078 | ep_ring = xhci->devs[slot_id]->eps[ep_index].ring; |
5079 | diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c |
5080 | index 7e42772..5a752d6 100644 |
5081 | --- a/drivers/usb/host/xhci.c |
5082 | +++ b/drivers/usb/host/xhci.c |
5083 | @@ -105,6 +105,33 @@ int xhci_halt(struct xhci_hcd *xhci) |
5084 | } |
5085 | |
5086 | /* |
5087 | + * Set the run bit and wait for the host to be running. |
5088 | + */ |
5089 | +int xhci_start(struct xhci_hcd *xhci) |
5090 | +{ |
5091 | + u32 temp; |
5092 | + int ret; |
5093 | + |
5094 | + temp = xhci_readl(xhci, &xhci->op_regs->command); |
5095 | + temp |= (CMD_RUN); |
5096 | + xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", |
5097 | + temp); |
5098 | + xhci_writel(xhci, temp, &xhci->op_regs->command); |
5099 | + |
5100 | + /* |
5101 | + * Wait for the HCHalted Status bit to be 0 to indicate the host is |
5102 | + * running. |
5103 | + */ |
5104 | + ret = handshake(xhci, &xhci->op_regs->status, |
5105 | + STS_HALT, 0, XHCI_MAX_HALT_USEC); |
5106 | + if (ret == -ETIMEDOUT) |
5107 | + xhci_err(xhci, "Host took too long to start, " |
5108 | + "waited %u microseconds.\n", |
5109 | + XHCI_MAX_HALT_USEC); |
5110 | + return ret; |
5111 | +} |
5112 | + |
5113 | +/* |
5114 | * Reset a halted HC, and set the internal HC state to HC_STATE_HALT. |
5115 | * |
5116 | * This resets pipelines, timers, counters, state machines, etc. |
5117 | @@ -115,6 +142,7 @@ int xhci_reset(struct xhci_hcd *xhci) |
5118 | { |
5119 | u32 command; |
5120 | u32 state; |
5121 | + int ret; |
5122 | |
5123 | state = xhci_readl(xhci, &xhci->op_regs->status); |
5124 | if ((state & STS_HALT) == 0) { |
5125 | @@ -129,7 +157,17 @@ int xhci_reset(struct xhci_hcd *xhci) |
5126 | /* XXX: Why does EHCI set this here? Shouldn't other code do this? */ |
5127 | xhci_to_hcd(xhci)->state = HC_STATE_HALT; |
5128 | |
5129 | - return handshake(xhci, &xhci->op_regs->command, CMD_RESET, 0, 250 * 1000); |
5130 | + ret = handshake(xhci, &xhci->op_regs->command, |
5131 | + CMD_RESET, 0, 250 * 1000); |
5132 | + if (ret) |
5133 | + return ret; |
5134 | + |
5135 | + xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n"); |
5136 | + /* |
5137 | + * xHCI cannot write to any doorbells or operational registers other |
5138 | + * than status until the "Controller Not Ready" flag is cleared. |
5139 | + */ |
5140 | + return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000); |
5141 | } |
5142 | |
5143 | |
5144 | @@ -452,13 +490,11 @@ int xhci_run(struct usb_hcd *hcd) |
5145 | if (NUM_TEST_NOOPS > 0) |
5146 | doorbell = xhci_setup_one_noop(xhci); |
5147 | |
5148 | - temp = xhci_readl(xhci, &xhci->op_regs->command); |
5149 | - temp |= (CMD_RUN); |
5150 | - xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n", |
5151 | - temp); |
5152 | - xhci_writel(xhci, temp, &xhci->op_regs->command); |
5153 | - /* Flush PCI posted writes */ |
5154 | - temp = xhci_readl(xhci, &xhci->op_regs->command); |
5155 | + if (xhci_start(xhci)) { |
5156 | + xhci_halt(xhci); |
5157 | + return -ENODEV; |
5158 | + } |
5159 | + |
5160 | xhci_dbg(xhci, "// @%p = 0x%x\n", &xhci->op_regs->command, temp); |
5161 | if (doorbell) |
5162 | (*doorbell)(xhci); |
5163 | @@ -1438,6 +1474,8 @@ void xhci_endpoint_reset(struct usb_hcd *hcd, |
5164 | kfree(virt_ep->stopped_td); |
5165 | xhci_ring_cmd_db(xhci); |
5166 | } |
5167 | + virt_ep->stopped_td = NULL; |
5168 | + virt_ep->stopped_trb = NULL; |
5169 | spin_unlock_irqrestore(&xhci->lock, flags); |
5170 | |
5171 | if (ret) |
5172 | diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c |
5173 | index ddf7f9a..8a7968d 100644 |
5174 | --- a/drivers/usb/mon/mon_bin.c |
5175 | +++ b/drivers/usb/mon/mon_bin.c |
5176 | @@ -416,7 +416,7 @@ static unsigned int mon_bin_get_data(const struct mon_reader_bin *rp, |
5177 | |
5178 | } else { |
5179 | /* If IOMMU coalescing occurred, we cannot trust sg_page */ |
5180 | - if (urb->sg->nents != urb->num_sgs) { |
5181 | + if (urb->transfer_flags & URB_DMA_SG_COMBINED) { |
5182 | *flag = 'D'; |
5183 | return length; |
5184 | } |
5185 | diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c |
5186 | index 4d0be13..d562602 100644 |
5187 | --- a/drivers/usb/mon/mon_text.c |
5188 | +++ b/drivers/usb/mon/mon_text.c |
5189 | @@ -161,9 +161,7 @@ static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb, |
5190 | } else { |
5191 | struct scatterlist *sg = urb->sg->sg; |
5192 | |
5193 | - /* If IOMMU coalescing occurred, we cannot trust sg_page */ |
5194 | - if (urb->sg->nents != urb->num_sgs || |
5195 | - PageHighMem(sg_page(sg))) |
5196 | + if (PageHighMem(sg_page(sg))) |
5197 | return 'D'; |
5198 | |
5199 | /* For the text interface we copy only the first sg buffer */ |
5200 | diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c |
5201 | index ec9b044..009e26c 100644 |
5202 | --- a/drivers/usb/serial/cp210x.c |
5203 | +++ b/drivers/usb/serial/cp210x.c |
5204 | @@ -61,6 +61,8 @@ static const struct usb_device_id id_table[] = { |
5205 | { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ |
5206 | { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ |
5207 | { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ |
5208 | + { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ |
5209 | + { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ |
5210 | { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */ |
5211 | { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */ |
5212 | { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */ |
5213 | @@ -72,9 +74,12 @@ static const struct usb_device_id id_table[] = { |
5214 | { USB_DEVICE(0x10C4, 0x1601) }, /* Arkham Technology DS101 Adapter */ |
5215 | { USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */ |
5216 | { USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */ |
5217 | + { USB_DEVICE(0x10C4, 0x8044) }, /* Cygnal Debug Adapter */ |
5218 | + { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */ |
5219 | { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */ |
5220 | { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */ |
5221 | { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ |
5222 | + { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */ |
5223 | { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ |
5224 | { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */ |
5225 | { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */ |
5226 | @@ -82,12 +87,15 @@ static const struct usb_device_id id_table[] = { |
5227 | { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */ |
5228 | { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ |
5229 | { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */ |
5230 | + { USB_DEVICE(0x10C4, 0x8149) }, /* West Mountain Radio Computerized Battery Analyzer */ |
5231 | { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ |
5232 | { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ |
5233 | { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ |
5234 | + { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */ |
5235 | { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ |
5236 | { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */ |
5237 | { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */ |
5238 | + { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */ |
5239 | { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */ |
5240 | { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */ |
5241 | { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */ |
5242 | @@ -105,6 +113,7 @@ static const struct usb_device_id id_table[] = { |
5243 | { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ |
5244 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ |
5245 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ |
5246 | + { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ |
5247 | { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */ |
5248 | { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */ |
5249 | { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */ |
5250 | @@ -115,6 +124,8 @@ static const struct usb_device_id id_table[] = { |
5251 | { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ |
5252 | { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ |
5253 | { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ |
5254 | + { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ |
5255 | + { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ |
5256 | { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ |
5257 | { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */ |
5258 | { } /* Terminating Entry */ |
5259 | diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c |
5260 | index e23c779..582e832 100644 |
5261 | --- a/drivers/usb/serial/cypress_m8.c |
5262 | +++ b/drivers/usb/serial/cypress_m8.c |
5263 | @@ -1309,7 +1309,7 @@ static void cypress_read_int_callback(struct urb *urb) |
5264 | /* process read if there is data other than line status */ |
5265 | if (tty && bytes > i) { |
5266 | tty_insert_flip_string_fixed_flag(tty, data + i, |
5267 | - bytes - i, tty_flag); |
5268 | + tty_flag, bytes - i); |
5269 | tty_flip_buffer_push(tty); |
5270 | } |
5271 | |
5272 | diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c |
5273 | index 68b0aa5..3edda3e 100644 |
5274 | --- a/drivers/usb/serial/digi_acceleport.c |
5275 | +++ b/drivers/usb/serial/digi_acceleport.c |
5276 | @@ -1703,8 +1703,8 @@ static int digi_read_inb_callback(struct urb *urb) |
5277 | /* data length is len-1 (one byte of len is port_status) */ |
5278 | --len; |
5279 | if (len > 0) { |
5280 | - tty_insert_flip_string_fixed_flag(tty, data, len, |
5281 | - flag); |
5282 | + tty_insert_flip_string_fixed_flag(tty, data, flag, |
5283 | + len); |
5284 | tty_flip_buffer_push(tty); |
5285 | } |
5286 | } |
5287 | diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c |
5288 | index 1d7c4fa..3f5676e 100644 |
5289 | --- a/drivers/usb/serial/ftdi_sio.c |
5290 | +++ b/drivers/usb/serial/ftdi_sio.c |
5291 | @@ -2289,6 +2289,8 @@ static void ftdi_set_termios(struct tty_struct *tty, |
5292 | "urb failed to set to rts/cts flow control\n"); |
5293 | } |
5294 | |
5295 | + /* raise DTR/RTS */ |
5296 | + set_mctrl(port, TIOCM_DTR | TIOCM_RTS); |
5297 | } else { |
5298 | /* |
5299 | * Xon/Xoff code |
5300 | @@ -2336,6 +2338,8 @@ static void ftdi_set_termios(struct tty_struct *tty, |
5301 | } |
5302 | } |
5303 | |
5304 | + /* lower DTR/RTS */ |
5305 | + clear_mctrl(port, TIOCM_DTR | TIOCM_RTS); |
5306 | } |
5307 | return; |
5308 | } |
5309 | diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c |
5310 | index 4a0f519..71bdbe0 100644 |
5311 | --- a/drivers/usb/serial/ir-usb.c |
5312 | +++ b/drivers/usb/serial/ir-usb.c |
5313 | @@ -312,6 +312,7 @@ static int ir_open(struct tty_struct *tty, struct usb_serial_port *port) |
5314 | kfree(port->read_urb->transfer_buffer); |
5315 | port->read_urb->transfer_buffer = buffer; |
5316 | port->read_urb->transfer_buffer_length = buffer_size; |
5317 | + port->bulk_in_buffer = buffer; |
5318 | |
5319 | buffer = kmalloc(buffer_size, GFP_KERNEL); |
5320 | if (!buffer) { |
5321 | @@ -321,6 +322,7 @@ static int ir_open(struct tty_struct *tty, struct usb_serial_port *port) |
5322 | kfree(port->write_urb->transfer_buffer); |
5323 | port->write_urb->transfer_buffer = buffer; |
5324 | port->write_urb->transfer_buffer_length = buffer_size; |
5325 | + port->bulk_out_buffer = buffer; |
5326 | port->bulk_out_size = buffer_size; |
5327 | } |
5328 | |
5329 | diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c |
5330 | index 8eef91b..cc0ba38 100644 |
5331 | --- a/drivers/usb/serial/kl5kusb105.c |
5332 | +++ b/drivers/usb/serial/kl5kusb105.c |
5333 | @@ -321,6 +321,7 @@ err_cleanup: |
5334 | usb_free_urb(priv->write_urb_pool[j]); |
5335 | } |
5336 | } |
5337 | + kfree(priv); |
5338 | usb_set_serial_port_data(serial->port[i], NULL); |
5339 | } |
5340 | return -ENOMEM; |
5341 | diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c |
5342 | index c113a2a..bd5bd85 100644 |
5343 | --- a/drivers/usb/serial/kobil_sct.c |
5344 | +++ b/drivers/usb/serial/kobil_sct.c |
5345 | @@ -345,7 +345,8 @@ static void kobil_close(struct usb_serial_port *port) |
5346 | |
5347 | /* FIXME: Add rts/dtr methods */ |
5348 | if (port->write_urb) { |
5349 | - usb_kill_urb(port->write_urb); |
5350 | + usb_poison_urb(port->write_urb); |
5351 | + kfree(port->write_urb->transfer_buffer); |
5352 | usb_free_urb(port->write_urb); |
5353 | port->write_urb = NULL; |
5354 | } |
5355 | diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c |
5356 | index 2fda1c0..68f6a1d 100644 |
5357 | --- a/drivers/usb/serial/mos7840.c |
5358 | +++ b/drivers/usb/serial/mos7840.c |
5359 | @@ -731,7 +731,6 @@ static void mos7840_bulk_in_callback(struct urb *urb) |
5360 | mos7840_port = urb->context; |
5361 | if (!mos7840_port) { |
5362 | dbg("%s", "NULL mos7840_port pointer"); |
5363 | - mos7840_port->read_urb_busy = false; |
5364 | return; |
5365 | } |
5366 | |
5367 | diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
5368 | index 84d0eda..8b2e612 100644 |
5369 | --- a/drivers/usb/serial/option.c |
5370 | +++ b/drivers/usb/serial/option.c |
5371 | @@ -380,6 +380,10 @@ static int option_resume(struct usb_serial *serial); |
5372 | |
5373 | #define CINTERION_VENDOR_ID 0x0681 |
5374 | |
5375 | +/* Olivetti products */ |
5376 | +#define OLIVETTI_VENDOR_ID 0x0b3c |
5377 | +#define OLIVETTI_PRODUCT_OLICARD100 0xc000 |
5378 | + |
5379 | /* some devices interfaces need special handling due to a number of reasons */ |
5380 | enum option_blacklist_reason { |
5381 | OPTION_BLACKLIST_NONE = 0, |
5382 | @@ -675,6 +679,180 @@ static const struct usb_device_id option_ids[] = { |
5383 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) }, |
5384 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) }, |
5385 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) }, |
5386 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) }, |
5387 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) }, |
5388 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) }, |
5389 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) }, |
5390 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) }, |
5391 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) }, |
5392 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1060, 0xff, 0xff, 0xff) }, |
5393 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1061, 0xff, 0xff, 0xff) }, |
5394 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1062, 0xff, 0xff, 0xff) }, |
5395 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1063, 0xff, 0xff, 0xff) }, |
5396 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1064, 0xff, 0xff, 0xff) }, |
5397 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1065, 0xff, 0xff, 0xff) }, |
5398 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1066, 0xff, 0xff, 0xff) }, |
5399 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1067, 0xff, 0xff, 0xff) }, |
5400 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1068, 0xff, 0xff, 0xff) }, |
5401 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1069, 0xff, 0xff, 0xff) }, |
5402 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1070, 0xff, 0xff, 0xff) }, |
5403 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1071, 0xff, 0xff, 0xff) }, |
5404 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1072, 0xff, 0xff, 0xff) }, |
5405 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1073, 0xff, 0xff, 0xff) }, |
5406 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1074, 0xff, 0xff, 0xff) }, |
5407 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1075, 0xff, 0xff, 0xff) }, |
5408 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1076, 0xff, 0xff, 0xff) }, |
5409 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1077, 0xff, 0xff, 0xff) }, |
5410 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1078, 0xff, 0xff, 0xff) }, |
5411 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1079, 0xff, 0xff, 0xff) }, |
5412 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1080, 0xff, 0xff, 0xff) }, |
5413 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1081, 0xff, 0xff, 0xff) }, |
5414 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1082, 0xff, 0xff, 0xff) }, |
5415 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1083, 0xff, 0xff, 0xff) }, |
5416 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1084, 0xff, 0xff, 0xff) }, |
5417 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1085, 0xff, 0xff, 0xff) }, |
5418 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1086, 0xff, 0xff, 0xff) }, |
5419 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1087, 0xff, 0xff, 0xff) }, |
5420 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1088, 0xff, 0xff, 0xff) }, |
5421 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1089, 0xff, 0xff, 0xff) }, |
5422 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1090, 0xff, 0xff, 0xff) }, |
5423 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1091, 0xff, 0xff, 0xff) }, |
5424 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1092, 0xff, 0xff, 0xff) }, |
5425 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1093, 0xff, 0xff, 0xff) }, |
5426 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1094, 0xff, 0xff, 0xff) }, |
5427 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1095, 0xff, 0xff, 0xff) }, |
5428 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1096, 0xff, 0xff, 0xff) }, |
5429 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1097, 0xff, 0xff, 0xff) }, |
5430 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1098, 0xff, 0xff, 0xff) }, |
5431 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1099, 0xff, 0xff, 0xff) }, |
5432 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1100, 0xff, 0xff, 0xff) }, |
5433 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1101, 0xff, 0xff, 0xff) }, |
5434 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1102, 0xff, 0xff, 0xff) }, |
5435 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1103, 0xff, 0xff, 0xff) }, |
5436 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1104, 0xff, 0xff, 0xff) }, |
5437 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1105, 0xff, 0xff, 0xff) }, |
5438 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1106, 0xff, 0xff, 0xff) }, |
5439 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1107, 0xff, 0xff, 0xff) }, |
5440 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1108, 0xff, 0xff, 0xff) }, |
5441 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1109, 0xff, 0xff, 0xff) }, |
5442 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1110, 0xff, 0xff, 0xff) }, |
5443 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1111, 0xff, 0xff, 0xff) }, |
5444 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1112, 0xff, 0xff, 0xff) }, |
5445 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1113, 0xff, 0xff, 0xff) }, |
5446 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1114, 0xff, 0xff, 0xff) }, |
5447 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1115, 0xff, 0xff, 0xff) }, |
5448 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1116, 0xff, 0xff, 0xff) }, |
5449 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1117, 0xff, 0xff, 0xff) }, |
5450 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1118, 0xff, 0xff, 0xff) }, |
5451 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1119, 0xff, 0xff, 0xff) }, |
5452 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1120, 0xff, 0xff, 0xff) }, |
5453 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1121, 0xff, 0xff, 0xff) }, |
5454 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1122, 0xff, 0xff, 0xff) }, |
5455 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1123, 0xff, 0xff, 0xff) }, |
5456 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1124, 0xff, 0xff, 0xff) }, |
5457 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1125, 0xff, 0xff, 0xff) }, |
5458 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1126, 0xff, 0xff, 0xff) }, |
5459 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1127, 0xff, 0xff, 0xff) }, |
5460 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1128, 0xff, 0xff, 0xff) }, |
5461 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1129, 0xff, 0xff, 0xff) }, |
5462 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1130, 0xff, 0xff, 0xff) }, |
5463 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1131, 0xff, 0xff, 0xff) }, |
5464 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1132, 0xff, 0xff, 0xff) }, |
5465 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1133, 0xff, 0xff, 0xff) }, |
5466 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1134, 0xff, 0xff, 0xff) }, |
5467 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1135, 0xff, 0xff, 0xff) }, |
5468 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1136, 0xff, 0xff, 0xff) }, |
5469 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1137, 0xff, 0xff, 0xff) }, |
5470 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1138, 0xff, 0xff, 0xff) }, |
5471 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1139, 0xff, 0xff, 0xff) }, |
5472 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1140, 0xff, 0xff, 0xff) }, |
5473 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1141, 0xff, 0xff, 0xff) }, |
5474 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1142, 0xff, 0xff, 0xff) }, |
5475 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1143, 0xff, 0xff, 0xff) }, |
5476 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1144, 0xff, 0xff, 0xff) }, |
5477 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1145, 0xff, 0xff, 0xff) }, |
5478 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1146, 0xff, 0xff, 0xff) }, |
5479 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1147, 0xff, 0xff, 0xff) }, |
5480 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1148, 0xff, 0xff, 0xff) }, |
5481 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1149, 0xff, 0xff, 0xff) }, |
5482 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1150, 0xff, 0xff, 0xff) }, |
5483 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1151, 0xff, 0xff, 0xff) }, |
5484 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1152, 0xff, 0xff, 0xff) }, |
5485 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1153, 0xff, 0xff, 0xff) }, |
5486 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1154, 0xff, 0xff, 0xff) }, |
5487 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1155, 0xff, 0xff, 0xff) }, |
5488 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1156, 0xff, 0xff, 0xff) }, |
5489 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1157, 0xff, 0xff, 0xff) }, |
5490 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1158, 0xff, 0xff, 0xff) }, |
5491 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1159, 0xff, 0xff, 0xff) }, |
5492 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1160, 0xff, 0xff, 0xff) }, |
5493 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1161, 0xff, 0xff, 0xff) }, |
5494 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1162, 0xff, 0xff, 0xff) }, |
5495 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1163, 0xff, 0xff, 0xff) }, |
5496 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1164, 0xff, 0xff, 0xff) }, |
5497 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1165, 0xff, 0xff, 0xff) }, |
5498 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1166, 0xff, 0xff, 0xff) }, |
5499 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1167, 0xff, 0xff, 0xff) }, |
5500 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1168, 0xff, 0xff, 0xff) }, |
5501 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) }, |
5502 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) }, |
5503 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) }, |
5504 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) }, |
5505 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) }, |
5506 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) }, |
5507 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) }, |
5508 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) }, |
5509 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) }, |
5510 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) }, |
5511 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) }, |
5512 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) }, |
5513 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) }, |
5514 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) }, |
5515 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) }, |
5516 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) }, |
5517 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) }, |
5518 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) }, |
5519 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1260, 0xff, 0xff, 0xff) }, |
5520 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1261, 0xff, 0xff, 0xff) }, |
5521 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1262, 0xff, 0xff, 0xff) }, |
5522 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1263, 0xff, 0xff, 0xff) }, |
5523 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1264, 0xff, 0xff, 0xff) }, |
5524 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1265, 0xff, 0xff, 0xff) }, |
5525 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1266, 0xff, 0xff, 0xff) }, |
5526 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) }, |
5527 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) }, |
5528 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) }, |
5529 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) }, |
5530 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) }, |
5531 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) }, |
5532 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) }, |
5533 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1274, 0xff, 0xff, 0xff) }, |
5534 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1275, 0xff, 0xff, 0xff) }, |
5535 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1276, 0xff, 0xff, 0xff) }, |
5536 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1277, 0xff, 0xff, 0xff) }, |
5537 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1278, 0xff, 0xff, 0xff) }, |
5538 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1279, 0xff, 0xff, 0xff) }, |
5539 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1280, 0xff, 0xff, 0xff) }, |
5540 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1281, 0xff, 0xff, 0xff) }, |
5541 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1282, 0xff, 0xff, 0xff) }, |
5542 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1283, 0xff, 0xff, 0xff) }, |
5543 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1284, 0xff, 0xff, 0xff) }, |
5544 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1285, 0xff, 0xff, 0xff) }, |
5545 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1286, 0xff, 0xff, 0xff) }, |
5546 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1287, 0xff, 0xff, 0xff) }, |
5547 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1288, 0xff, 0xff, 0xff) }, |
5548 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1289, 0xff, 0xff, 0xff) }, |
5549 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1290, 0xff, 0xff, 0xff) }, |
5550 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1291, 0xff, 0xff, 0xff) }, |
5551 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1292, 0xff, 0xff, 0xff) }, |
5552 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1293, 0xff, 0xff, 0xff) }, |
5553 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1294, 0xff, 0xff, 0xff) }, |
5554 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1295, 0xff, 0xff, 0xff) }, |
5555 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1296, 0xff, 0xff, 0xff) }, |
5556 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1297, 0xff, 0xff, 0xff) }, |
5557 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) }, |
5558 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) }, |
5559 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) }, |
5560 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */ |
5561 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) }, |
5562 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) }, |
5563 | @@ -726,6 +904,8 @@ static const struct usb_device_id option_ids[] = { |
5564 | { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)}, |
5565 | |
5566 | { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) }, |
5567 | + |
5568 | + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, |
5569 | { } /* Terminating entry */ |
5570 | }; |
5571 | MODULE_DEVICE_TABLE(usb, option_ids); |
5572 | diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c |
5573 | index 7e3bea2..214a3e5 100644 |
5574 | --- a/drivers/usb/serial/qcaux.c |
5575 | +++ b/drivers/usb/serial/qcaux.c |
5576 | @@ -50,6 +50,10 @@ |
5577 | #define SANYO_VENDOR_ID 0x0474 |
5578 | #define SANYO_PRODUCT_KATANA_LX 0x0754 /* SCP-3800 (Katana LX) */ |
5579 | |
5580 | +/* Samsung devices */ |
5581 | +#define SAMSUNG_VENDOR_ID 0x04e8 |
5582 | +#define SAMSUNG_PRODUCT_U520 0x6640 /* SCH-U520 */ |
5583 | + |
5584 | static struct usb_device_id id_table[] = { |
5585 | { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) }, |
5586 | { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) }, |
5587 | @@ -61,6 +65,7 @@ static struct usb_device_id id_table[] = { |
5588 | { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) }, |
5589 | { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) }, |
5590 | { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) }, |
5591 | + { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) }, |
5592 | { }, |
5593 | }; |
5594 | MODULE_DEVICE_TABLE(usb, id_table); |
5595 | diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c |
5596 | index 5d39191..2ea32c5 100644 |
5597 | --- a/drivers/usb/serial/spcp8x5.c |
5598 | +++ b/drivers/usb/serial/spcp8x5.c |
5599 | @@ -726,8 +726,8 @@ static void spcp8x5_read_bulk_callback(struct urb *urb) |
5600 | /* overrun is special, not associated with a char */ |
5601 | if (status & UART_OVERRUN_ERROR) |
5602 | tty_insert_flip_char(tty, 0, TTY_OVERRUN); |
5603 | - tty_insert_flip_string_fixed_flag(tty, data, |
5604 | - urb->actual_length, tty_flag); |
5605 | + tty_insert_flip_string_fixed_flag(tty, data, tty_flag, |
5606 | + urb->actual_length); |
5607 | tty_flip_buffer_push(tty); |
5608 | } |
5609 | tty_kref_put(tty); |
5610 | diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c |
5611 | index 0949427..fb7fc40 100644 |
5612 | --- a/drivers/usb/serial/visor.c |
5613 | +++ b/drivers/usb/serial/visor.c |
5614 | @@ -249,6 +249,7 @@ static struct usb_serial_driver clie_3_5_device = { |
5615 | .throttle = visor_throttle, |
5616 | .unthrottle = visor_unthrottle, |
5617 | .attach = clie_3_5_startup, |
5618 | + .release = visor_release, |
5619 | .write = visor_write, |
5620 | .write_room = visor_write_room, |
5621 | .write_bulk_callback = visor_write_bulk_callback, |
5622 | diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h |
5623 | index ccf1dbb..55b3cd1 100644 |
5624 | --- a/drivers/usb/storage/unusual_devs.h |
5625 | +++ b/drivers/usb/storage/unusual_devs.h |
5626 | @@ -1853,6 +1853,21 @@ UNUSUAL_DEV( 0x1652, 0x6600, 0x0201, 0x0201, |
5627 | US_SC_DEVICE, US_PR_DEVICE, NULL, |
5628 | US_FL_IGNORE_RESIDUE ), |
5629 | |
5630 | +/* Reported by Hans de Goede <hdegoede@redhat.com> |
5631 | + * These Appotech controllers are found in Picture Frames, they provide a |
5632 | + * (buggy) emulation of a cdrom drive which contains the windows software |
5633 | + * Uploading of pictures happens over the corresponding /dev/sg device. */ |
5634 | +UNUSUAL_DEV( 0x1908, 0x1315, 0x0000, 0x0000, |
5635 | + "BUILDWIN", |
5636 | + "Photo Frame", |
5637 | + US_SC_DEVICE, US_PR_DEVICE, NULL, |
5638 | + US_FL_BAD_SENSE ), |
5639 | +UNUSUAL_DEV( 0x1908, 0x1320, 0x0000, 0x0000, |
5640 | + "BUILDWIN", |
5641 | + "Photo Frame", |
5642 | + US_SC_DEVICE, US_PR_DEVICE, NULL, |
5643 | + US_FL_BAD_SENSE ), |
5644 | + |
5645 | UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001, |
5646 | "ST", |
5647 | "2A", |
5648 | diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c |
5649 | index 9777583..c9d0c79 100644 |
5650 | --- a/drivers/vhost/net.c |
5651 | +++ b/drivers/vhost/net.c |
5652 | @@ -637,7 +637,7 @@ const static struct file_operations vhost_net_fops = { |
5653 | }; |
5654 | |
5655 | static struct miscdevice vhost_net_misc = { |
5656 | - VHOST_NET_MINOR, |
5657 | + MISC_DYNAMIC_MINOR, |
5658 | "vhost-net", |
5659 | &vhost_net_fops, |
5660 | }; |
5661 | diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c |
5662 | index 8d406fb..f3d7440 100644 |
5663 | --- a/drivers/video/arcfb.c |
5664 | +++ b/drivers/video/arcfb.c |
5665 | @@ -80,7 +80,7 @@ struct arcfb_par { |
5666 | spinlock_t lock; |
5667 | }; |
5668 | |
5669 | -static struct fb_fix_screeninfo arcfb_fix __initdata = { |
5670 | +static struct fb_fix_screeninfo arcfb_fix __devinitdata = { |
5671 | .id = "arcfb", |
5672 | .type = FB_TYPE_PACKED_PIXELS, |
5673 | .visual = FB_VISUAL_MONO01, |
5674 | @@ -90,7 +90,7 @@ static struct fb_fix_screeninfo arcfb_fix __initdata = { |
5675 | .accel = FB_ACCEL_NONE, |
5676 | }; |
5677 | |
5678 | -static struct fb_var_screeninfo arcfb_var __initdata = { |
5679 | +static struct fb_var_screeninfo arcfb_var __devinitdata = { |
5680 | .xres = 128, |
5681 | .yres = 64, |
5682 | .xres_virtual = 128, |
5683 | @@ -588,7 +588,7 @@ err: |
5684 | return retval; |
5685 | } |
5686 | |
5687 | -static int arcfb_remove(struct platform_device *dev) |
5688 | +static int __devexit arcfb_remove(struct platform_device *dev) |
5689 | { |
5690 | struct fb_info *info = platform_get_drvdata(dev); |
5691 | |
5692 | @@ -602,7 +602,7 @@ static int arcfb_remove(struct platform_device *dev) |
5693 | |
5694 | static struct platform_driver arcfb_driver = { |
5695 | .probe = arcfb_probe, |
5696 | - .remove = arcfb_remove, |
5697 | + .remove = __devexit_p(arcfb_remove), |
5698 | .driver = { |
5699 | .name = "arcfb", |
5700 | }, |
5701 | diff --git a/drivers/video/hgafb.c b/drivers/video/hgafb.c |
5702 | index 8bbf251..af8f0f2 100644 |
5703 | --- a/drivers/video/hgafb.c |
5704 | +++ b/drivers/video/hgafb.c |
5705 | @@ -106,7 +106,7 @@ static DEFINE_SPINLOCK(hga_reg_lock); |
5706 | |
5707 | /* Framebuffer driver structures */ |
5708 | |
5709 | -static struct fb_var_screeninfo __initdata hga_default_var = { |
5710 | +static struct fb_var_screeninfo hga_default_var __devinitdata = { |
5711 | .xres = 720, |
5712 | .yres = 348, |
5713 | .xres_virtual = 720, |
5714 | @@ -120,7 +120,7 @@ static struct fb_var_screeninfo __initdata hga_default_var = { |
5715 | .width = -1, |
5716 | }; |
5717 | |
5718 | -static struct fb_fix_screeninfo __initdata hga_fix = { |
5719 | +static struct fb_fix_screeninfo hga_fix __devinitdata = { |
5720 | .id = "HGA", |
5721 | .type = FB_TYPE_PACKED_PIXELS, /* (not sure) */ |
5722 | .visual = FB_VISUAL_MONO10, |
5723 | @@ -276,7 +276,7 @@ static void hga_blank(int blank_mode) |
5724 | spin_unlock_irqrestore(&hga_reg_lock, flags); |
5725 | } |
5726 | |
5727 | -static int __init hga_card_detect(void) |
5728 | +static int __devinit hga_card_detect(void) |
5729 | { |
5730 | int count = 0; |
5731 | void __iomem *p, *q; |
5732 | @@ -596,7 +596,7 @@ static int __devinit hgafb_probe(struct platform_device *pdev) |
5733 | return 0; |
5734 | } |
5735 | |
5736 | -static int hgafb_remove(struct platform_device *pdev) |
5737 | +static int __devexit hgafb_remove(struct platform_device *pdev) |
5738 | { |
5739 | struct fb_info *info = platform_get_drvdata(pdev); |
5740 | |
5741 | @@ -621,7 +621,7 @@ static int hgafb_remove(struct platform_device *pdev) |
5742 | |
5743 | static struct platform_driver hgafb_driver = { |
5744 | .probe = hgafb_probe, |
5745 | - .remove = hgafb_remove, |
5746 | + .remove = __devexit_p(hgafb_remove), |
5747 | .driver = { |
5748 | .name = "hgafb", |
5749 | }, |
5750 | diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c |
5751 | index 9b5532b..bc67251 100644 |
5752 | --- a/drivers/video/vfb.c |
5753 | +++ b/drivers/video/vfb.c |
5754 | @@ -78,7 +78,7 @@ static void rvfree(void *mem, unsigned long size) |
5755 | vfree(mem); |
5756 | } |
5757 | |
5758 | -static struct fb_var_screeninfo vfb_default __initdata = { |
5759 | +static struct fb_var_screeninfo vfb_default __devinitdata = { |
5760 | .xres = 640, |
5761 | .yres = 480, |
5762 | .xres_virtual = 640, |
5763 | @@ -100,7 +100,7 @@ static struct fb_var_screeninfo vfb_default __initdata = { |
5764 | .vmode = FB_VMODE_NONINTERLACED, |
5765 | }; |
5766 | |
5767 | -static struct fb_fix_screeninfo vfb_fix __initdata = { |
5768 | +static struct fb_fix_screeninfo vfb_fix __devinitdata = { |
5769 | .id = "Virtual FB", |
5770 | .type = FB_TYPE_PACKED_PIXELS, |
5771 | .visual = FB_VISUAL_PSEUDOCOLOR, |
5772 | diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c |
5773 | index bf638a4..2ab3cc7 100644 |
5774 | --- a/drivers/video/vga16fb.c |
5775 | +++ b/drivers/video/vga16fb.c |
5776 | @@ -65,7 +65,7 @@ struct vga16fb_par { |
5777 | |
5778 | /* --------------------------------------------------------------------- */ |
5779 | |
5780 | -static struct fb_var_screeninfo vga16fb_defined __initdata = { |
5781 | +static struct fb_var_screeninfo vga16fb_defined __devinitdata = { |
5782 | .xres = 640, |
5783 | .yres = 480, |
5784 | .xres_virtual = 640, |
5785 | @@ -85,7 +85,7 @@ static struct fb_var_screeninfo vga16fb_defined __initdata = { |
5786 | }; |
5787 | |
5788 | /* name should not depend on EGA/VGA */ |
5789 | -static struct fb_fix_screeninfo vga16fb_fix __initdata = { |
5790 | +static struct fb_fix_screeninfo vga16fb_fix __devinitdata = { |
5791 | .id = "VGA16 VGA", |
5792 | .smem_start = VGA_FB_PHYS, |
5793 | .smem_len = VGA_FB_PHYS_LEN, |
5794 | @@ -1278,7 +1278,7 @@ static struct fb_ops vga16fb_ops = { |
5795 | }; |
5796 | |
5797 | #ifndef MODULE |
5798 | -static int vga16fb_setup(char *options) |
5799 | +static int __init vga16fb_setup(char *options) |
5800 | { |
5801 | char *this_opt; |
5802 | |
5803 | @@ -1376,7 +1376,7 @@ static int __devinit vga16fb_probe(struct platform_device *dev) |
5804 | return ret; |
5805 | } |
5806 | |
5807 | -static int vga16fb_remove(struct platform_device *dev) |
5808 | +static int __devexit vga16fb_remove(struct platform_device *dev) |
5809 | { |
5810 | struct fb_info *info = platform_get_drvdata(dev); |
5811 | |
5812 | @@ -1393,7 +1393,7 @@ static int vga16fb_remove(struct platform_device *dev) |
5813 | |
5814 | static struct platform_driver vga16fb_driver = { |
5815 | .probe = vga16fb_probe, |
5816 | - .remove = vga16fb_remove, |
5817 | + .remove = __devexit_p(vga16fb_remove), |
5818 | .driver = { |
5819 | .name = "vga16fb", |
5820 | }, |
5821 | diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c |
5822 | index 31b0e17..e66b8b1 100644 |
5823 | --- a/drivers/video/w100fb.c |
5824 | +++ b/drivers/video/w100fb.c |
5825 | @@ -53,7 +53,7 @@ static void w100_update_enable(void); |
5826 | static void w100_update_disable(void); |
5827 | static void calc_hsync(struct w100fb_par *par); |
5828 | static void w100_init_graphic_engine(struct w100fb_par *par); |
5829 | -struct w100_pll_info *w100_get_xtal_table(unsigned int freq); |
5830 | +struct w100_pll_info *w100_get_xtal_table(unsigned int freq) __devinit; |
5831 | |
5832 | /* Pseudo palette size */ |
5833 | #define MAX_PALETTES 16 |
5834 | @@ -782,7 +782,7 @@ out: |
5835 | } |
5836 | |
5837 | |
5838 | -static int w100fb_remove(struct platform_device *pdev) |
5839 | +static int __devexit w100fb_remove(struct platform_device *pdev) |
5840 | { |
5841 | struct fb_info *info = platform_get_drvdata(pdev); |
5842 | struct w100fb_par *par=info->par; |
5843 | @@ -1020,7 +1020,7 @@ static struct pll_entries { |
5844 | { 0 }, |
5845 | }; |
5846 | |
5847 | -struct w100_pll_info *w100_get_xtal_table(unsigned int freq) |
5848 | +struct w100_pll_info __devinit *w100_get_xtal_table(unsigned int freq) |
5849 | { |
5850 | struct pll_entries *pll_entry = w100_pll_tables; |
5851 | |
5852 | @@ -1611,7 +1611,7 @@ static void w100_vsync(void) |
5853 | |
5854 | static struct platform_driver w100fb_driver = { |
5855 | .probe = w100fb_probe, |
5856 | - .remove = w100fb_remove, |
5857 | + .remove = __devexit_p(w100fb_remove), |
5858 | .suspend = w100fb_suspend, |
5859 | .resume = w100fb_resume, |
5860 | .driver = { |
5861 | @@ -1619,7 +1619,7 @@ static struct platform_driver w100fb_driver = { |
5862 | }, |
5863 | }; |
5864 | |
5865 | -int __devinit w100fb_init(void) |
5866 | +int __init w100fb_init(void) |
5867 | { |
5868 | return platform_driver_register(&w100fb_driver); |
5869 | } |
5870 | diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c |
5871 | index eab33f1..7b547f5 100644 |
5872 | --- a/drivers/xen/xenbus/xenbus_xs.c |
5873 | +++ b/drivers/xen/xenbus/xenbus_xs.c |
5874 | @@ -499,7 +499,7 @@ int xenbus_printf(struct xenbus_transaction t, |
5875 | #define PRINTF_BUFFER_SIZE 4096 |
5876 | char *printf_buffer; |
5877 | |
5878 | - printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); |
5879 | + printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH); |
5880 | if (printf_buffer == NULL) |
5881 | return -ENOMEM; |
5882 | |
5883 | diff --git a/fs/aio.c b/fs/aio.c |
5884 | index 1cf12b3..48fdeeb 100644 |
5885 | --- a/fs/aio.c |
5886 | +++ b/fs/aio.c |
5887 | @@ -36,6 +36,7 @@ |
5888 | #include <linux/blkdev.h> |
5889 | #include <linux/mempool.h> |
5890 | #include <linux/hash.h> |
5891 | +#include <linux/compat.h> |
5892 | |
5893 | #include <asm/kmap_types.h> |
5894 | #include <asm/uaccess.h> |
5895 | @@ -1384,13 +1385,22 @@ static ssize_t aio_fsync(struct kiocb *iocb) |
5896 | return ret; |
5897 | } |
5898 | |
5899 | -static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb) |
5900 | +static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat) |
5901 | { |
5902 | ssize_t ret; |
5903 | |
5904 | - ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf, |
5905 | - kiocb->ki_nbytes, 1, |
5906 | - &kiocb->ki_inline_vec, &kiocb->ki_iovec); |
5907 | +#ifdef CONFIG_COMPAT |
5908 | + if (compat) |
5909 | + ret = compat_rw_copy_check_uvector(type, |
5910 | + (struct compat_iovec __user *)kiocb->ki_buf, |
5911 | + kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, |
5912 | + &kiocb->ki_iovec); |
5913 | + else |
5914 | +#endif |
5915 | + ret = rw_copy_check_uvector(type, |
5916 | + (struct iovec __user *)kiocb->ki_buf, |
5917 | + kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, |
5918 | + &kiocb->ki_iovec); |
5919 | if (ret < 0) |
5920 | goto out; |
5921 | |
5922 | @@ -1420,7 +1430,7 @@ static ssize_t aio_setup_single_vector(struct kiocb *kiocb) |
5923 | * Performs the initial checks and aio retry method |
5924 | * setup for the kiocb at the time of io submission. |
5925 | */ |
5926 | -static ssize_t aio_setup_iocb(struct kiocb *kiocb) |
5927 | +static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) |
5928 | { |
5929 | struct file *file = kiocb->ki_filp; |
5930 | ssize_t ret = 0; |
5931 | @@ -1469,7 +1479,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb) |
5932 | ret = security_file_permission(file, MAY_READ); |
5933 | if (unlikely(ret)) |
5934 | break; |
5935 | - ret = aio_setup_vectored_rw(READ, kiocb); |
5936 | + ret = aio_setup_vectored_rw(READ, kiocb, compat); |
5937 | if (ret) |
5938 | break; |
5939 | ret = -EINVAL; |
5940 | @@ -1483,7 +1493,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb) |
5941 | ret = security_file_permission(file, MAY_WRITE); |
5942 | if (unlikely(ret)) |
5943 | break; |
5944 | - ret = aio_setup_vectored_rw(WRITE, kiocb); |
5945 | + ret = aio_setup_vectored_rw(WRITE, kiocb, compat); |
5946 | if (ret) |
5947 | break; |
5948 | ret = -EINVAL; |
5949 | @@ -1548,7 +1558,8 @@ static void aio_batch_free(struct hlist_head *batch_hash) |
5950 | } |
5951 | |
5952 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, |
5953 | - struct iocb *iocb, struct hlist_head *batch_hash) |
5954 | + struct iocb *iocb, struct hlist_head *batch_hash, |
5955 | + bool compat) |
5956 | { |
5957 | struct kiocb *req; |
5958 | struct file *file; |
5959 | @@ -1609,7 +1620,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, |
5960 | req->ki_left = req->ki_nbytes = iocb->aio_nbytes; |
5961 | req->ki_opcode = iocb->aio_lio_opcode; |
5962 | |
5963 | - ret = aio_setup_iocb(req); |
5964 | + ret = aio_setup_iocb(req, compat); |
5965 | |
5966 | if (ret) |
5967 | goto out_put_req; |
5968 | @@ -1637,20 +1648,8 @@ out_put_req: |
5969 | return ret; |
5970 | } |
5971 | |
5972 | -/* sys_io_submit: |
5973 | - * Queue the nr iocbs pointed to by iocbpp for processing. Returns |
5974 | - * the number of iocbs queued. May return -EINVAL if the aio_context |
5975 | - * specified by ctx_id is invalid, if nr is < 0, if the iocb at |
5976 | - * *iocbpp[0] is not properly initialized, if the operation specified |
5977 | - * is invalid for the file descriptor in the iocb. May fail with |
5978 | - * -EFAULT if any of the data structures point to invalid data. May |
5979 | - * fail with -EBADF if the file descriptor specified in the first |
5980 | - * iocb is invalid. May fail with -EAGAIN if insufficient resources |
5981 | - * are available to queue any iocbs. Will return 0 if nr is 0. Will |
5982 | - * fail with -ENOSYS if not implemented. |
5983 | - */ |
5984 | -SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, |
5985 | - struct iocb __user * __user *, iocbpp) |
5986 | +long do_io_submit(aio_context_t ctx_id, long nr, |
5987 | + struct iocb __user *__user *iocbpp, bool compat) |
5988 | { |
5989 | struct kioctx *ctx; |
5990 | long ret = 0; |
5991 | @@ -1687,7 +1686,7 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, |
5992 | break; |
5993 | } |
5994 | |
5995 | - ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash); |
5996 | + ret = io_submit_one(ctx, user_iocb, &tmp, batch_hash, compat); |
5997 | if (ret) |
5998 | break; |
5999 | } |
6000 | @@ -1697,6 +1696,24 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, |
6001 | return i ? i : ret; |
6002 | } |
6003 | |
6004 | +/* sys_io_submit: |
6005 | + * Queue the nr iocbs pointed to by iocbpp for processing. Returns |
6006 | + * the number of iocbs queued. May return -EINVAL if the aio_context |
6007 | + * specified by ctx_id is invalid, if nr is < 0, if the iocb at |
6008 | + * *iocbpp[0] is not properly initialized, if the operation specified |
6009 | + * is invalid for the file descriptor in the iocb. May fail with |
6010 | + * -EFAULT if any of the data structures point to invalid data. May |
6011 | + * fail with -EBADF if the file descriptor specified in the first |
6012 | + * iocb is invalid. May fail with -EAGAIN if insufficient resources |
6013 | + * are available to queue any iocbs. Will return 0 if nr is 0. Will |
6014 | + * fail with -ENOSYS if not implemented. |
6015 | + */ |
6016 | +SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr, |
6017 | + struct iocb __user * __user *, iocbpp) |
6018 | +{ |
6019 | + return do_io_submit(ctx_id, nr, iocbpp, 0); |
6020 | +} |
6021 | + |
6022 | /* lookup_kiocb |
6023 | * Finds a given iocb for cancellation. |
6024 | */ |
6025 | diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c |
6026 | index 6ef7b26..6b4d0cc 100644 |
6027 | --- a/fs/btrfs/acl.c |
6028 | +++ b/fs/btrfs/acl.c |
6029 | @@ -160,6 +160,9 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, |
6030 | int ret; |
6031 | struct posix_acl *acl = NULL; |
6032 | |
6033 | + if (!is_owner_or_cap(dentry->d_inode)) |
6034 | + return -EPERM; |
6035 | + |
6036 | if (value) { |
6037 | acl = posix_acl_from_xattr(value, size); |
6038 | if (acl == NULL) { |
6039 | diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h |
6040 | index 39e47f4..a6db615 100644 |
6041 | --- a/fs/cifs/cifsproto.h |
6042 | +++ b/fs/cifs/cifsproto.h |
6043 | @@ -95,8 +95,10 @@ extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode, |
6044 | __u16 fileHandle, struct file *file, |
6045 | struct vfsmount *mnt, unsigned int oflags); |
6046 | extern int cifs_posix_open(char *full_path, struct inode **pinode, |
6047 | - struct vfsmount *mnt, int mode, int oflags, |
6048 | - __u32 *poplock, __u16 *pnetfid, int xid); |
6049 | + struct vfsmount *mnt, |
6050 | + struct super_block *sb, |
6051 | + int mode, int oflags, |
6052 | + __u32 *poplock, __u16 *pnetfid, int xid); |
6053 | extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, |
6054 | FILE_UNIX_BASIC_INFO *info, |
6055 | struct cifs_sb_info *cifs_sb); |
6056 | diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c |
6057 | index e9f7ecc..ff3d891 100644 |
6058 | --- a/fs/cifs/dir.c |
6059 | +++ b/fs/cifs/dir.c |
6060 | @@ -183,13 +183,14 @@ cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle, |
6061 | } |
6062 | |
6063 | int cifs_posix_open(char *full_path, struct inode **pinode, |
6064 | - struct vfsmount *mnt, int mode, int oflags, |
6065 | - __u32 *poplock, __u16 *pnetfid, int xid) |
6066 | + struct vfsmount *mnt, struct super_block *sb, |
6067 | + int mode, int oflags, |
6068 | + __u32 *poplock, __u16 *pnetfid, int xid) |
6069 | { |
6070 | int rc; |
6071 | FILE_UNIX_BASIC_INFO *presp_data; |
6072 | __u32 posix_flags = 0; |
6073 | - struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb); |
6074 | + struct cifs_sb_info *cifs_sb = CIFS_SB(sb); |
6075 | struct cifs_fattr fattr; |
6076 | |
6077 | cFYI(1, ("posix open %s", full_path)); |
6078 | @@ -242,7 +243,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode, |
6079 | |
6080 | /* get new inode and set it up */ |
6081 | if (*pinode == NULL) { |
6082 | - *pinode = cifs_iget(mnt->mnt_sb, &fattr); |
6083 | + *pinode = cifs_iget(sb, &fattr); |
6084 | if (!*pinode) { |
6085 | rc = -ENOMEM; |
6086 | goto posix_open_ret; |
6087 | @@ -251,7 +252,8 @@ int cifs_posix_open(char *full_path, struct inode **pinode, |
6088 | cifs_fattr_to_inode(*pinode, &fattr); |
6089 | } |
6090 | |
6091 | - cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags); |
6092 | + if (mnt) |
6093 | + cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags); |
6094 | |
6095 | posix_open_ret: |
6096 | kfree(presp_data); |
6097 | @@ -315,13 +317,14 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, |
6098 | if (nd && (nd->flags & LOOKUP_OPEN)) |
6099 | oflags = nd->intent.open.flags; |
6100 | else |
6101 | - oflags = FMODE_READ; |
6102 | + oflags = FMODE_READ | SMB_O_CREAT; |
6103 | |
6104 | if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) && |
6105 | (CIFS_UNIX_POSIX_PATH_OPS_CAP & |
6106 | le64_to_cpu(tcon->fsUnixInfo.Capability))) { |
6107 | - rc = cifs_posix_open(full_path, &newinode, nd->path.mnt, |
6108 | - mode, oflags, &oplock, &fileHandle, xid); |
6109 | + rc = cifs_posix_open(full_path, &newinode, |
6110 | + nd ? nd->path.mnt : NULL, |
6111 | + inode->i_sb, mode, oflags, &oplock, &fileHandle, xid); |
6112 | /* EIO could indicate that (posix open) operation is not |
6113 | supported, despite what server claimed in capability |
6114 | negotation. EREMOTE indicates DFS junction, which is not |
6115 | @@ -678,6 +681,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, |
6116 | (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open && |
6117 | (nd->intent.open.flags & O_CREAT)) { |
6118 | rc = cifs_posix_open(full_path, &newInode, nd->path.mnt, |
6119 | + parent_dir_inode->i_sb, |
6120 | nd->intent.open.create_mode, |
6121 | nd->intent.open.flags, &oplock, |
6122 | &fileHandle, xid); |
6123 | diff --git a/fs/cifs/file.c b/fs/cifs/file.c |
6124 | index 9b11a8f..4cbdb20 100644 |
6125 | --- a/fs/cifs/file.c |
6126 | +++ b/fs/cifs/file.c |
6127 | @@ -298,10 +298,12 @@ int cifs_open(struct inode *inode, struct file *file) |
6128 | (CIFS_UNIX_POSIX_PATH_OPS_CAP & |
6129 | le64_to_cpu(tcon->fsUnixInfo.Capability))) { |
6130 | int oflags = (int) cifs_posix_convert_flags(file->f_flags); |
6131 | + oflags |= SMB_O_CREAT; |
6132 | /* can not refresh inode info since size could be stale */ |
6133 | rc = cifs_posix_open(full_path, &inode, file->f_path.mnt, |
6134 | - cifs_sb->mnt_file_mode /* ignored */, |
6135 | - oflags, &oplock, &netfid, xid); |
6136 | + inode->i_sb, |
6137 | + cifs_sb->mnt_file_mode /* ignored */, |
6138 | + oflags, &oplock, &netfid, xid); |
6139 | if (rc == 0) { |
6140 | cFYI(1, ("posix open succeeded")); |
6141 | /* no need for special case handling of setting mode |
6142 | @@ -513,8 +515,9 @@ reopen_error_exit: |
6143 | int oflags = (int) cifs_posix_convert_flags(file->f_flags); |
6144 | /* can not refresh inode info since size could be stale */ |
6145 | rc = cifs_posix_open(full_path, NULL, file->f_path.mnt, |
6146 | - cifs_sb->mnt_file_mode /* ignored */, |
6147 | - oflags, &oplock, &netfid, xid); |
6148 | + inode->i_sb, |
6149 | + cifs_sb->mnt_file_mode /* ignored */, |
6150 | + oflags, &oplock, &netfid, xid); |
6151 | if (rc == 0) { |
6152 | cFYI(1, ("posix reopen succeeded")); |
6153 | goto reopen_success; |
6154 | diff --git a/fs/compat.c b/fs/compat.c |
6155 | index 0544873..6490d21 100644 |
6156 | --- a/fs/compat.c |
6157 | +++ b/fs/compat.c |
6158 | @@ -568,6 +568,79 @@ out: |
6159 | return ret; |
6160 | } |
6161 | |
6162 | +/* A write operation does a read from user space and vice versa */ |
6163 | +#define vrfy_dir(type) ((type) == READ ? VERIFY_WRITE : VERIFY_READ) |
6164 | + |
6165 | +ssize_t compat_rw_copy_check_uvector(int type, |
6166 | + const struct compat_iovec __user *uvector, unsigned long nr_segs, |
6167 | + unsigned long fast_segs, struct iovec *fast_pointer, |
6168 | + struct iovec **ret_pointer) |
6169 | +{ |
6170 | + compat_ssize_t tot_len; |
6171 | + struct iovec *iov = *ret_pointer = fast_pointer; |
6172 | + ssize_t ret = 0; |
6173 | + int seg; |
6174 | + |
6175 | + /* |
6176 | + * SuS says "The readv() function *may* fail if the iovcnt argument |
6177 | + * was less than or equal to 0, or greater than {IOV_MAX}. Linux has |
6178 | + * traditionally returned zero for zero segments, so... |
6179 | + */ |
6180 | + if (nr_segs == 0) |
6181 | + goto out; |
6182 | + |
6183 | + ret = -EINVAL; |
6184 | + if (nr_segs > UIO_MAXIOV || nr_segs < 0) |
6185 | + goto out; |
6186 | + if (nr_segs > fast_segs) { |
6187 | + ret = -ENOMEM; |
6188 | + iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL); |
6189 | + if (iov == NULL) { |
6190 | + *ret_pointer = fast_pointer; |
6191 | + goto out; |
6192 | + } |
6193 | + } |
6194 | + *ret_pointer = iov; |
6195 | + |
6196 | + /* |
6197 | + * Single unix specification: |
6198 | + * We should -EINVAL if an element length is not >= 0 and fitting an |
6199 | + * ssize_t. The total length is fitting an ssize_t |
6200 | + * |
6201 | + * Be careful here because iov_len is a size_t not an ssize_t |
6202 | + */ |
6203 | + tot_len = 0; |
6204 | + ret = -EINVAL; |
6205 | + for (seg = 0; seg < nr_segs; seg++) { |
6206 | + compat_ssize_t tmp = tot_len; |
6207 | + compat_uptr_t buf; |
6208 | + compat_ssize_t len; |
6209 | + |
6210 | + if (__get_user(len, &uvector->iov_len) || |
6211 | + __get_user(buf, &uvector->iov_base)) { |
6212 | + ret = -EFAULT; |
6213 | + goto out; |
6214 | + } |
6215 | + if (len < 0) /* size_t not fitting in compat_ssize_t .. */ |
6216 | + goto out; |
6217 | + tot_len += len; |
6218 | + if (tot_len < tmp) /* maths overflow on the compat_ssize_t */ |
6219 | + goto out; |
6220 | + if (!access_ok(vrfy_dir(type), compat_ptr(buf), len)) { |
6221 | + ret = -EFAULT; |
6222 | + goto out; |
6223 | + } |
6224 | + iov->iov_base = compat_ptr(buf); |
6225 | + iov->iov_len = (compat_size_t) len; |
6226 | + uvector++; |
6227 | + iov++; |
6228 | + } |
6229 | + ret = tot_len; |
6230 | + |
6231 | +out: |
6232 | + return ret; |
6233 | +} |
6234 | + |
6235 | static inline long |
6236 | copy_iocb(long nr, u32 __user *ptr32, struct iocb __user * __user *ptr64) |
6237 | { |
6238 | @@ -600,7 +673,7 @@ compat_sys_io_submit(aio_context_t ctx_id, int nr, u32 __user *iocb) |
6239 | iocb64 = compat_alloc_user_space(nr * sizeof(*iocb64)); |
6240 | ret = copy_iocb(nr, iocb, iocb64); |
6241 | if (!ret) |
6242 | - ret = sys_io_submit(ctx_id, nr, iocb64); |
6243 | + ret = do_io_submit(ctx_id, nr, iocb64, 1); |
6244 | return ret; |
6245 | } |
6246 | |
6247 | @@ -1077,70 +1150,21 @@ static ssize_t compat_do_readv_writev(int type, struct file *file, |
6248 | { |
6249 | compat_ssize_t tot_len; |
6250 | struct iovec iovstack[UIO_FASTIOV]; |
6251 | - struct iovec *iov=iovstack, *vector; |
6252 | + struct iovec *iov; |
6253 | ssize_t ret; |
6254 | - int seg; |
6255 | io_fn_t fn; |
6256 | iov_fn_t fnv; |
6257 | |
6258 | - /* |
6259 | - * SuS says "The readv() function *may* fail if the iovcnt argument |
6260 | - * was less than or equal to 0, or greater than {IOV_MAX}. Linux has |
6261 | - * traditionally returned zero for zero segments, so... |
6262 | - */ |
6263 | - ret = 0; |
6264 | - if (nr_segs == 0) |
6265 | - goto out; |
6266 | - |
6267 | - /* |
6268 | - * First get the "struct iovec" from user memory and |
6269 | - * verify all the pointers |
6270 | - */ |
6271 | ret = -EINVAL; |
6272 | - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0)) |
6273 | - goto out; |
6274 | if (!file->f_op) |
6275 | goto out; |
6276 | - if (nr_segs > UIO_FASTIOV) { |
6277 | - ret = -ENOMEM; |
6278 | - iov = kmalloc(nr_segs*sizeof(struct iovec), GFP_KERNEL); |
6279 | - if (!iov) |
6280 | - goto out; |
6281 | - } |
6282 | + |
6283 | ret = -EFAULT; |
6284 | if (!access_ok(VERIFY_READ, uvector, nr_segs*sizeof(*uvector))) |
6285 | goto out; |
6286 | |
6287 | - /* |
6288 | - * Single unix specification: |
6289 | - * We should -EINVAL if an element length is not >= 0 and fitting an |
6290 | - * ssize_t. The total length is fitting an ssize_t |
6291 | - * |
6292 | - * Be careful here because iov_len is a size_t not an ssize_t |
6293 | - */ |
6294 | - tot_len = 0; |
6295 | - vector = iov; |
6296 | - ret = -EINVAL; |
6297 | - for (seg = 0 ; seg < nr_segs; seg++) { |
6298 | - compat_ssize_t tmp = tot_len; |
6299 | - compat_ssize_t len; |
6300 | - compat_uptr_t buf; |
6301 | - |
6302 | - if (__get_user(len, &uvector->iov_len) || |
6303 | - __get_user(buf, &uvector->iov_base)) { |
6304 | - ret = -EFAULT; |
6305 | - goto out; |
6306 | - } |
6307 | - if (len < 0) /* size_t not fitting an compat_ssize_t .. */ |
6308 | - goto out; |
6309 | - tot_len += len; |
6310 | - if (tot_len < tmp) /* maths overflow on the compat_ssize_t */ |
6311 | - goto out; |
6312 | - vector->iov_base = compat_ptr(buf); |
6313 | - vector->iov_len = (compat_size_t) len; |
6314 | - uvector++; |
6315 | - vector++; |
6316 | - } |
6317 | + tot_len = compat_rw_copy_check_uvector(type, uvector, nr_segs, |
6318 | + UIO_FASTIOV, iovstack, &iov); |
6319 | if (tot_len == 0) { |
6320 | ret = 0; |
6321 | goto out; |
6322 | diff --git a/fs/dcache.c b/fs/dcache.c |
6323 | index f1358e5..2b6f09a 100644 |
6324 | --- a/fs/dcache.c |
6325 | +++ b/fs/dcache.c |
6326 | @@ -1529,6 +1529,7 @@ void d_delete(struct dentry * dentry) |
6327 | spin_lock(&dentry->d_lock); |
6328 | isdir = S_ISDIR(dentry->d_inode->i_mode); |
6329 | if (atomic_read(&dentry->d_count) == 1) { |
6330 | + dentry->d_flags &= ~DCACHE_CANT_MOUNT; |
6331 | dentry_iput(dentry); |
6332 | fsnotify_nameremove(dentry, isdir); |
6333 | return; |
6334 | diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c |
6335 | index 4cfab1c..d91e9d8 100644 |
6336 | --- a/fs/exofs/dir.c |
6337 | +++ b/fs/exofs/dir.c |
6338 | @@ -608,7 +608,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent) |
6339 | de->inode_no = cpu_to_le64(parent->i_ino); |
6340 | memcpy(de->name, PARENT_DIR, sizeof(PARENT_DIR)); |
6341 | exofs_set_de_type(de, inode); |
6342 | - kunmap_atomic(page, KM_USER0); |
6343 | + kunmap_atomic(kaddr, KM_USER0); |
6344 | err = exofs_commit_chunk(page, 0, chunk_size); |
6345 | fail: |
6346 | page_cache_release(page); |
6347 | diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c |
6348 | index d1fc662..d3d6a64 100644 |
6349 | --- a/fs/ext4/move_extent.c |
6350 | +++ b/fs/ext4/move_extent.c |
6351 | @@ -959,6 +959,9 @@ mext_check_arguments(struct inode *orig_inode, |
6352 | return -EINVAL; |
6353 | } |
6354 | |
6355 | + if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode)) |
6356 | + return -EPERM; |
6357 | + |
6358 | /* Ext4 move extent does not support swapfile */ |
6359 | if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) { |
6360 | ext4_debug("ext4 move extent: The argument files should " |
6361 | diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c |
6362 | index 5692c48..6df797e 100644 |
6363 | --- a/fs/ext4/resize.c |
6364 | +++ b/fs/ext4/resize.c |
6365 | @@ -911,7 +911,8 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input) |
6366 | percpu_counter_add(&sbi->s_freeinodes_counter, |
6367 | EXT4_INODES_PER_GROUP(sb)); |
6368 | |
6369 | - if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) { |
6370 | + if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) && |
6371 | + sbi->s_log_groups_per_flex) { |
6372 | ext4_group_t flex_group; |
6373 | flex_group = ext4_flex_group(sbi, input->group); |
6374 | atomic_add(input->free_blocks_count, |
6375 | diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c |
6376 | index 4b37f7c..760dc8d 100644 |
6377 | --- a/fs/fs-writeback.c |
6378 | +++ b/fs/fs-writeback.c |
6379 | @@ -852,6 +852,12 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb) |
6380 | unsigned long expired; |
6381 | long nr_pages; |
6382 | |
6383 | + /* |
6384 | + * When set to zero, disable periodic writeback |
6385 | + */ |
6386 | + if (!dirty_writeback_interval) |
6387 | + return 0; |
6388 | + |
6389 | expired = wb->last_old_flush + |
6390 | msecs_to_jiffies(dirty_writeback_interval * 10); |
6391 | if (time_before(jiffies, expired)) |
6392 | @@ -947,8 +953,12 @@ int bdi_writeback_task(struct bdi_writeback *wb) |
6393 | break; |
6394 | } |
6395 | |
6396 | - wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10); |
6397 | - schedule_timeout_interruptible(wait_jiffies); |
6398 | + if (dirty_writeback_interval) { |
6399 | + wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10); |
6400 | + schedule_timeout_interruptible(wait_jiffies); |
6401 | + } else |
6402 | + schedule(); |
6403 | + |
6404 | try_to_freeze(); |
6405 | } |
6406 | |
6407 | diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c |
6408 | index e6dd2ae..b20bfcc 100644 |
6409 | --- a/fs/gfs2/file.c |
6410 | +++ b/fs/gfs2/file.c |
6411 | @@ -218,6 +218,11 @@ static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask) |
6412 | if (error) |
6413 | goto out_drop_write; |
6414 | |
6415 | + error = -EACCES; |
6416 | + if (!is_owner_or_cap(inode)) |
6417 | + goto out; |
6418 | + |
6419 | + error = 0; |
6420 | flags = ip->i_diskflags; |
6421 | new_flags = (flags & ~mask) | (reqflags & mask); |
6422 | if ((new_flags ^ flags) == 0) |
6423 | @@ -275,8 +280,10 @@ static int gfs2_set_flags(struct file *filp, u32 __user *ptr) |
6424 | { |
6425 | struct inode *inode = filp->f_path.dentry->d_inode; |
6426 | u32 fsflags, gfsflags; |
6427 | + |
6428 | if (get_user(fsflags, ptr)) |
6429 | return -EFAULT; |
6430 | + |
6431 | gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags); |
6432 | if (!S_ISDIR(inode->i_mode)) { |
6433 | if (gfsflags & GFS2_DIF_INHERIT_JDATA) |
6434 | diff --git a/fs/libfs.c b/fs/libfs.c |
6435 | index ea9a6cc..b016af9 100644 |
6436 | --- a/fs/libfs.c |
6437 | +++ b/fs/libfs.c |
6438 | @@ -418,7 +418,8 @@ int simple_write_end(struct file *file, struct address_space *mapping, |
6439 | * unique inode values later for this filesystem, then you must take care |
6440 | * to pass it an appropriate max_reserved value to avoid collisions. |
6441 | */ |
6442 | -int simple_fill_super(struct super_block *s, int magic, struct tree_descr *files) |
6443 | +int simple_fill_super(struct super_block *s, unsigned long magic, |
6444 | + struct tree_descr *files) |
6445 | { |
6446 | struct inode *inode; |
6447 | struct dentry *root; |
6448 | diff --git a/fs/namei.c b/fs/namei.c |
6449 | index b86b96f..f6c7fcf 100644 |
6450 | --- a/fs/namei.c |
6451 | +++ b/fs/namei.c |
6452 | @@ -1620,6 +1620,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path, |
6453 | case LAST_DOTDOT: |
6454 | follow_dotdot(nd); |
6455 | dir = nd->path.dentry; |
6456 | + case LAST_DOT: |
6457 | if (nd->path.mnt->mnt_sb->s_type->fs_flags & FS_REVAL_DOT) { |
6458 | if (!dir->d_op->d_revalidate(dir, nd)) { |
6459 | error = -ESTALE; |
6460 | @@ -1627,7 +1628,6 @@ static struct file *do_last(struct nameidata *nd, struct path *path, |
6461 | } |
6462 | } |
6463 | /* fallthrough */ |
6464 | - case LAST_DOT: |
6465 | case LAST_ROOT: |
6466 | if (open_flag & O_CREAT) |
6467 | goto exit; |
6468 | diff --git a/fs/nfs/write.c b/fs/nfs/write.c |
6469 | index 3aea3ca..91679e2 100644 |
6470 | --- a/fs/nfs/write.c |
6471 | +++ b/fs/nfs/write.c |
6472 | @@ -1386,7 +1386,7 @@ static int nfs_commit_inode(struct inode *inode, int how) |
6473 | int res = 0; |
6474 | |
6475 | if (!nfs_commit_set_lock(NFS_I(inode), may_wait)) |
6476 | - goto out; |
6477 | + goto out_mark_dirty; |
6478 | spin_lock(&inode->i_lock); |
6479 | res = nfs_scan_commit(inode, &head, 0, 0); |
6480 | spin_unlock(&inode->i_lock); |
6481 | @@ -1398,9 +1398,18 @@ static int nfs_commit_inode(struct inode *inode, int how) |
6482 | wait_on_bit(&NFS_I(inode)->flags, NFS_INO_COMMIT, |
6483 | nfs_wait_bit_killable, |
6484 | TASK_KILLABLE); |
6485 | + else |
6486 | + goto out_mark_dirty; |
6487 | } else |
6488 | nfs_commit_clear_lock(NFS_I(inode)); |
6489 | -out: |
6490 | + return res; |
6491 | + /* Note: If we exit without ensuring that the commit is complete, |
6492 | + * we must mark the inode as dirty. Otherwise, future calls to |
6493 | + * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure |
6494 | + * that the data is on the disk. |
6495 | + */ |
6496 | +out_mark_dirty: |
6497 | + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); |
6498 | return res; |
6499 | } |
6500 | |
6501 | @@ -1509,14 +1518,17 @@ int nfs_wb_page(struct inode *inode, struct page *page) |
6502 | }; |
6503 | int ret; |
6504 | |
6505 | - while(PagePrivate(page)) { |
6506 | + for (;;) { |
6507 | wait_on_page_writeback(page); |
6508 | if (clear_page_dirty_for_io(page)) { |
6509 | ret = nfs_writepage_locked(page, &wbc); |
6510 | if (ret < 0) |
6511 | goto out_error; |
6512 | + continue; |
6513 | } |
6514 | - ret = sync_inode(inode, &wbc); |
6515 | + if (!PagePrivate(page)) |
6516 | + break; |
6517 | + ret = nfs_commit_inode(inode, FLUSH_SYNC); |
6518 | if (ret < 0) |
6519 | goto out_error; |
6520 | } |
6521 | diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c |
6522 | index 171699e..06b2a26 100644 |
6523 | --- a/fs/nfsd/nfssvc.c |
6524 | +++ b/fs/nfsd/nfssvc.c |
6525 | @@ -120,7 +120,7 @@ u32 nfsd_supported_minorversion; |
6526 | int nfsd_vers(int vers, enum vers_op change) |
6527 | { |
6528 | if (vers < NFSD_MINVERS || vers >= NFSD_NRVERS) |
6529 | - return -1; |
6530 | + return 0; |
6531 | switch(change) { |
6532 | case NFSD_SET: |
6533 | nfsd_versions[vers] = nfsd_version[vers]; |
6534 | diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c |
6535 | index 6dd5f19..4eb9baa 100644 |
6536 | --- a/fs/nfsd/vfs.c |
6537 | +++ b/fs/nfsd/vfs.c |
6538 | @@ -443,8 +443,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap, |
6539 | if (size_change) |
6540 | put_write_access(inode); |
6541 | if (!err) |
6542 | - if (EX_ISSYNC(fhp->fh_export)) |
6543 | - write_inode_now(inode, 1); |
6544 | + commit_metadata(fhp); |
6545 | out: |
6546 | return err; |
6547 | |
6548 | @@ -724,7 +723,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, |
6549 | struct inode *inode; |
6550 | int flags = O_RDONLY|O_LARGEFILE; |
6551 | __be32 err; |
6552 | - int host_err; |
6553 | + int host_err = 0; |
6554 | |
6555 | validate_process_creds(); |
6556 | |
6557 | @@ -761,7 +760,8 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type, |
6558 | * Check to see if there are any leases on this file. |
6559 | * This may block while leases are broken. |
6560 | */ |
6561 | - host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0)); |
6562 | + if (!(access & NFSD_MAY_NOT_BREAK_LEASE)) |
6563 | + host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0)); |
6564 | if (host_err == -EWOULDBLOCK) |
6565 | host_err = -ETIMEDOUT; |
6566 | if (host_err) /* NOMEM or WOULDBLOCK */ |
6567 | @@ -1169,7 +1169,8 @@ nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp, |
6568 | goto out; |
6569 | } |
6570 | |
6571 | - err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_WRITE, &file); |
6572 | + err = nfsd_open(rqstp, fhp, S_IFREG, |
6573 | + NFSD_MAY_WRITE|NFSD_MAY_NOT_BREAK_LEASE, &file); |
6574 | if (err) |
6575 | goto out; |
6576 | if (EX_ISSYNC(fhp->fh_export)) { |
6577 | diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h |
6578 | index 4b1de0a..217a62c 100644 |
6579 | --- a/fs/nfsd/vfs.h |
6580 | +++ b/fs/nfsd/vfs.h |
6581 | @@ -20,6 +20,7 @@ |
6582 | #define NFSD_MAY_OWNER_OVERRIDE 64 |
6583 | #define NFSD_MAY_LOCAL_ACCESS 128 /* IRIX doing local access check on device special file*/ |
6584 | #define NFSD_MAY_BYPASS_GSS_ON_ROOT 256 |
6585 | +#define NFSD_MAY_NOT_BREAK_LEASE 512 |
6586 | |
6587 | #define NFSD_MAY_CREATE (NFSD_MAY_EXEC|NFSD_MAY_WRITE) |
6588 | #define NFSD_MAY_REMOVE (NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC) |
6589 | diff --git a/include/linux/aio.h b/include/linux/aio.h |
6590 | index 811dbb3..7a8db41 100644 |
6591 | --- a/include/linux/aio.h |
6592 | +++ b/include/linux/aio.h |
6593 | @@ -212,6 +212,8 @@ extern void kick_iocb(struct kiocb *iocb); |
6594 | extern int aio_complete(struct kiocb *iocb, long res, long res2); |
6595 | struct mm_struct; |
6596 | extern void exit_aio(struct mm_struct *mm); |
6597 | +extern long do_io_submit(aio_context_t ctx_id, long nr, |
6598 | + struct iocb __user *__user *iocbpp, bool compat); |
6599 | #else |
6600 | static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; } |
6601 | static inline int aio_put_req(struct kiocb *iocb) { return 0; } |
6602 | @@ -219,6 +221,9 @@ static inline void kick_iocb(struct kiocb *iocb) { } |
6603 | static inline int aio_complete(struct kiocb *iocb, long res, long res2) { return 0; } |
6604 | struct mm_struct; |
6605 | static inline void exit_aio(struct mm_struct *mm) { } |
6606 | +static inline long do_io_submit(aio_context_t ctx_id, long nr, |
6607 | + struct iocb __user * __user *iocbpp, |
6608 | + bool compat) { return 0; } |
6609 | #endif /* CONFIG_AIO */ |
6610 | |
6611 | static inline struct kiocb *list_kiocb(struct list_head *h) |
6612 | diff --git a/include/linux/compat.h b/include/linux/compat.h |
6613 | index 717c691..168f7da 100644 |
6614 | --- a/include/linux/compat.h |
6615 | +++ b/include/linux/compat.h |
6616 | @@ -356,5 +356,9 @@ asmlinkage long compat_sys_newfstatat(unsigned int dfd, char __user * filename, |
6617 | asmlinkage long compat_sys_openat(unsigned int dfd, const char __user *filename, |
6618 | int flags, int mode); |
6619 | |
6620 | +extern ssize_t compat_rw_copy_check_uvector(int type, |
6621 | + const struct compat_iovec __user *uvector, unsigned long nr_segs, |
6622 | + unsigned long fast_segs, struct iovec *fast_pointer, |
6623 | + struct iovec **ret_pointer); |
6624 | #endif /* CONFIG_COMPAT */ |
6625 | #endif /* _LINUX_COMPAT_H */ |
6626 | diff --git a/include/linux/fs.h b/include/linux/fs.h |
6627 | index 44f35ae..801b398 100644 |
6628 | --- a/include/linux/fs.h |
6629 | +++ b/include/linux/fs.h |
6630 | @@ -2356,7 +2356,7 @@ extern const struct file_operations simple_dir_operations; |
6631 | extern const struct inode_operations simple_dir_inode_operations; |
6632 | struct tree_descr { char *name; const struct file_operations *ops; int mode; }; |
6633 | struct dentry *d_alloc_name(struct dentry *, const char *); |
6634 | -extern int simple_fill_super(struct super_block *, int, struct tree_descr *); |
6635 | +extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *); |
6636 | extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count); |
6637 | extern void simple_release_fs(struct vfsmount **mount, int *count); |
6638 | |
6639 | diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h |
6640 | index 8b5f7cc..ce9cd11 100644 |
6641 | --- a/include/linux/miscdevice.h |
6642 | +++ b/include/linux/miscdevice.h |
6643 | @@ -3,6 +3,12 @@ |
6644 | #include <linux/module.h> |
6645 | #include <linux/major.h> |
6646 | |
6647 | +/* |
6648 | + * These allocations are managed by device@lanana.org. If you use an |
6649 | + * entry that is not in assigned your entry may well be moved and |
6650 | + * reassigned, or set dynamic if a fixed value is not justified. |
6651 | + */ |
6652 | + |
6653 | #define PSMOUSE_MINOR 1 |
6654 | #define MS_BUSMOUSE_MINOR 2 |
6655 | #define ATIXL_BUSMOUSE_MINOR 3 |
6656 | @@ -30,7 +36,6 @@ |
6657 | #define HPET_MINOR 228 |
6658 | #define FUSE_MINOR 229 |
6659 | #define KVM_MINOR 232 |
6660 | -#define VHOST_NET_MINOR 233 |
6661 | #define MISC_DYNAMIC_MINOR 255 |
6662 | |
6663 | struct device; |
6664 | diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h |
6665 | index 9f688d2..c516a33 100644 |
6666 | --- a/include/linux/pci_ids.h |
6667 | +++ b/include/linux/pci_ids.h |
6668 | @@ -2321,6 +2321,7 @@ |
6669 | #define PCI_VENDOR_ID_JMICRON 0x197B |
6670 | #define PCI_DEVICE_ID_JMICRON_JMB360 0x2360 |
6671 | #define PCI_DEVICE_ID_JMICRON_JMB361 0x2361 |
6672 | +#define PCI_DEVICE_ID_JMICRON_JMB362 0x2362 |
6673 | #define PCI_DEVICE_ID_JMICRON_JMB363 0x2363 |
6674 | #define PCI_DEVICE_ID_JMICRON_JMB365 0x2365 |
6675 | #define PCI_DEVICE_ID_JMICRON_JMB366 0x2366 |
6676 | diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h |
6677 | index c8e3754..eea9188 100644 |
6678 | --- a/include/linux/perf_event.h |
6679 | +++ b/include/linux/perf_event.h |
6680 | @@ -531,6 +531,7 @@ enum perf_event_active_state { |
6681 | struct file; |
6682 | |
6683 | struct perf_mmap_data { |
6684 | + atomic_t refcount; |
6685 | struct rcu_head rcu_head; |
6686 | #ifdef CONFIG_PERF_USE_VMALLOC |
6687 | struct work_struct work; |
6688 | @@ -538,7 +539,6 @@ struct perf_mmap_data { |
6689 | int data_order; |
6690 | int nr_pages; /* nr of data pages */ |
6691 | int writable; /* are we writable */ |
6692 | - int nr_locked; /* nr pages mlocked */ |
6693 | |
6694 | atomic_t poll; /* POLL_ for wakeups */ |
6695 | atomic_t events; /* event_id limit */ |
6696 | @@ -582,7 +582,6 @@ struct perf_event { |
6697 | int nr_siblings; |
6698 | int group_flags; |
6699 | struct perf_event *group_leader; |
6700 | - struct perf_event *output; |
6701 | const struct pmu *pmu; |
6702 | |
6703 | enum perf_event_active_state state; |
6704 | @@ -643,6 +642,8 @@ struct perf_event { |
6705 | /* mmap bits */ |
6706 | struct mutex mmap_mutex; |
6707 | atomic_t mmap_count; |
6708 | + int mmap_locked; |
6709 | + struct user_struct *mmap_user; |
6710 | struct perf_mmap_data *data; |
6711 | |
6712 | /* poll related */ |
6713 | diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h |
6714 | index 0249d41..c3dc7e1 100644 |
6715 | --- a/include/linux/slub_def.h |
6716 | +++ b/include/linux/slub_def.h |
6717 | @@ -75,12 +75,6 @@ struct kmem_cache { |
6718 | int offset; /* Free pointer offset. */ |
6719 | struct kmem_cache_order_objects oo; |
6720 | |
6721 | - /* |
6722 | - * Avoid an extra cache line for UP, SMP and for the node local to |
6723 | - * struct kmem_cache. |
6724 | - */ |
6725 | - struct kmem_cache_node local_node; |
6726 | - |
6727 | /* Allocation and freeing of slabs */ |
6728 | struct kmem_cache_order_objects max; |
6729 | struct kmem_cache_order_objects min; |
6730 | @@ -102,6 +96,9 @@ struct kmem_cache { |
6731 | */ |
6732 | int remote_node_defrag_ratio; |
6733 | struct kmem_cache_node *node[MAX_NUMNODES]; |
6734 | +#else |
6735 | + /* Avoid an extra cache line for UP */ |
6736 | + struct kmem_cache_node local_node; |
6737 | #endif |
6738 | }; |
6739 | |
6740 | @@ -132,7 +129,7 @@ struct kmem_cache { |
6741 | #ifdef CONFIG_ZONE_DMA |
6742 | #define SLUB_DMA __GFP_DMA |
6743 | /* Reserve extra caches for potential DMA use */ |
6744 | -#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6) |
6745 | +#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT) |
6746 | #else |
6747 | /* Disable DMA functionality */ |
6748 | #define SLUB_DMA (__force gfp_t)0 |
6749 | diff --git a/include/linux/swap.h b/include/linux/swap.h |
6750 | index 1f59d93..d70f424 100644 |
6751 | --- a/include/linux/swap.h |
6752 | +++ b/include/linux/swap.h |
6753 | @@ -223,21 +223,11 @@ static inline void lru_cache_add_anon(struct page *page) |
6754 | __lru_cache_add(page, LRU_INACTIVE_ANON); |
6755 | } |
6756 | |
6757 | -static inline void lru_cache_add_active_anon(struct page *page) |
6758 | -{ |
6759 | - __lru_cache_add(page, LRU_ACTIVE_ANON); |
6760 | -} |
6761 | - |
6762 | static inline void lru_cache_add_file(struct page *page) |
6763 | { |
6764 | __lru_cache_add(page, LRU_INACTIVE_FILE); |
6765 | } |
6766 | |
6767 | -static inline void lru_cache_add_active_file(struct page *page) |
6768 | -{ |
6769 | - __lru_cache_add(page, LRU_ACTIVE_FILE); |
6770 | -} |
6771 | - |
6772 | /* linux/mm/vmscan.c */ |
6773 | extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, |
6774 | gfp_t gfp_mask, nodemask_t *mask); |
6775 | diff --git a/include/linux/tboot.h b/include/linux/tboot.h |
6776 | index bf2a0c7..1dba6ee 100644 |
6777 | --- a/include/linux/tboot.h |
6778 | +++ b/include/linux/tboot.h |
6779 | @@ -150,6 +150,7 @@ extern int tboot_force_iommu(void); |
6780 | |
6781 | #else |
6782 | |
6783 | +#define tboot_enabled() 0 |
6784 | #define tboot_probe() do { } while (0) |
6785 | #define tboot_shutdown(shutdown_type) do { } while (0) |
6786 | #define tboot_sleep(sleep_state, pm1a_control, pm1b_control) \ |
6787 | diff --git a/include/linux/usb.h b/include/linux/usb.h |
6788 | index 739f1fd..9983302 100644 |
6789 | --- a/include/linux/usb.h |
6790 | +++ b/include/linux/usb.h |
6791 | @@ -965,10 +965,19 @@ extern int usb_disabled(void); |
6792 | * needed */ |
6793 | #define URB_FREE_BUFFER 0x0100 /* Free transfer buffer with the URB */ |
6794 | |
6795 | +/* The following flags are used internally by usbcore and HCDs */ |
6796 | #define URB_DIR_IN 0x0200 /* Transfer from device to host */ |
6797 | #define URB_DIR_OUT 0 |
6798 | #define URB_DIR_MASK URB_DIR_IN |
6799 | |
6800 | +#define URB_DMA_MAP_SINGLE 0x00010000 /* Non-scatter-gather mapping */ |
6801 | +#define URB_DMA_MAP_PAGE 0x00020000 /* HCD-unsupported S-G */ |
6802 | +#define URB_DMA_MAP_SG 0x00040000 /* HCD-supported S-G */ |
6803 | +#define URB_MAP_LOCAL 0x00080000 /* HCD-local-memory mapping */ |
6804 | +#define URB_SETUP_MAP_SINGLE 0x00100000 /* Setup packet DMA mapped */ |
6805 | +#define URB_SETUP_MAP_LOCAL 0x00200000 /* HCD-local setup packet */ |
6806 | +#define URB_DMA_SG_COMBINED 0x00400000 /* S-G entries were combined */ |
6807 | + |
6808 | struct usb_iso_packet_descriptor { |
6809 | unsigned int offset; |
6810 | unsigned int length; /* expected length */ |
6811 | diff --git a/include/trace/events/signal.h b/include/trace/events/signal.h |
6812 | index a510b75..32c0697 100644 |
6813 | --- a/include/trace/events/signal.h |
6814 | +++ b/include/trace/events/signal.h |
6815 | @@ -10,7 +10,8 @@ |
6816 | |
6817 | #define TP_STORE_SIGINFO(__entry, info) \ |
6818 | do { \ |
6819 | - if (info == SEND_SIG_NOINFO) { \ |
6820 | + if (info == SEND_SIG_NOINFO || \ |
6821 | + info == SEND_SIG_FORCED) { \ |
6822 | __entry->errno = 0; \ |
6823 | __entry->code = SI_USER; \ |
6824 | } else if (info == SEND_SIG_PRIV) { \ |
6825 | diff --git a/kernel/cgroup.c b/kernel/cgroup.c |
6826 | index 6d870f2..bf4f78f 100644 |
6827 | --- a/kernel/cgroup.c |
6828 | +++ b/kernel/cgroup.c |
6829 | @@ -4599,7 +4599,7 @@ static int alloc_css_id(struct cgroup_subsys *ss, struct cgroup *parent, |
6830 | parent_css = parent->subsys[subsys_id]; |
6831 | child_css = child->subsys[subsys_id]; |
6832 | parent_id = parent_css->id; |
6833 | - depth = parent_id->depth; |
6834 | + depth = parent_id->depth + 1; |
6835 | |
6836 | child_id = get_new_cssid(ss, depth); |
6837 | if (IS_ERR(child_id)) |
6838 | diff --git a/kernel/compat.c b/kernel/compat.c |
6839 | index 7f40e92..5adab05 100644 |
6840 | --- a/kernel/compat.c |
6841 | +++ b/kernel/compat.c |
6842 | @@ -495,29 +495,26 @@ asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len, |
6843 | { |
6844 | int ret; |
6845 | cpumask_var_t mask; |
6846 | - unsigned long *k; |
6847 | - unsigned int min_length = cpumask_size(); |
6848 | - |
6849 | - if (nr_cpu_ids <= BITS_PER_COMPAT_LONG) |
6850 | - min_length = sizeof(compat_ulong_t); |
6851 | |
6852 | - if (len < min_length) |
6853 | + if ((len * BITS_PER_BYTE) < nr_cpu_ids) |
6854 | + return -EINVAL; |
6855 | + if (len & (sizeof(compat_ulong_t)-1)) |
6856 | return -EINVAL; |
6857 | |
6858 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
6859 | return -ENOMEM; |
6860 | |
6861 | ret = sched_getaffinity(pid, mask); |
6862 | - if (ret < 0) |
6863 | - goto out; |
6864 | + if (ret == 0) { |
6865 | + size_t retlen = min_t(size_t, len, cpumask_size()); |
6866 | |
6867 | - k = cpumask_bits(mask); |
6868 | - ret = compat_put_bitmap(user_mask_ptr, k, min_length * 8); |
6869 | - if (ret == 0) |
6870 | - ret = min_length; |
6871 | - |
6872 | -out: |
6873 | + if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8)) |
6874 | + ret = -EFAULT; |
6875 | + else |
6876 | + ret = retlen; |
6877 | + } |
6878 | free_cpumask_var(mask); |
6879 | + |
6880 | return ret; |
6881 | } |
6882 | |
6883 | diff --git a/kernel/mutex.c b/kernel/mutex.c |
6884 | index 632f04c..4c0b7b3 100644 |
6885 | --- a/kernel/mutex.c |
6886 | +++ b/kernel/mutex.c |
6887 | @@ -172,6 +172,13 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, |
6888 | struct thread_info *owner; |
6889 | |
6890 | /* |
6891 | + * If we own the BKL, then don't spin. The owner of |
6892 | + * the mutex might be waiting on us to release the BKL. |
6893 | + */ |
6894 | + if (unlikely(current->lock_depth >= 0)) |
6895 | + break; |
6896 | + |
6897 | + /* |
6898 | * If there's an owner, wait for it to either |
6899 | * release the lock or go to sleep. |
6900 | */ |
6901 | diff --git a/kernel/perf_event.c b/kernel/perf_event.c |
6902 | index 3d1552d..a244651 100644 |
6903 | --- a/kernel/perf_event.c |
6904 | +++ b/kernel/perf_event.c |
6905 | @@ -262,6 +262,18 @@ static void update_event_times(struct perf_event *event) |
6906 | event->total_time_running = run_end - event->tstamp_running; |
6907 | } |
6908 | |
6909 | +/* |
6910 | + * Update total_time_enabled and total_time_running for all events in a group. |
6911 | + */ |
6912 | +static void update_group_times(struct perf_event *leader) |
6913 | +{ |
6914 | + struct perf_event *event; |
6915 | + |
6916 | + update_event_times(leader); |
6917 | + list_for_each_entry(event, &leader->sibling_list, group_entry) |
6918 | + update_event_times(event); |
6919 | +} |
6920 | + |
6921 | static struct list_head * |
6922 | ctx_group_list(struct perf_event *event, struct perf_event_context *ctx) |
6923 | { |
6924 | @@ -315,8 +327,6 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx) |
6925 | static void |
6926 | list_del_event(struct perf_event *event, struct perf_event_context *ctx) |
6927 | { |
6928 | - struct perf_event *sibling, *tmp; |
6929 | - |
6930 | if (list_empty(&event->group_entry)) |
6931 | return; |
6932 | ctx->nr_events--; |
6933 | @@ -329,7 +339,7 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx) |
6934 | if (event->group_leader != event) |
6935 | event->group_leader->nr_siblings--; |
6936 | |
6937 | - update_event_times(event); |
6938 | + update_group_times(event); |
6939 | |
6940 | /* |
6941 | * If event was in error state, then keep it |
6942 | @@ -340,6 +350,12 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx) |
6943 | */ |
6944 | if (event->state > PERF_EVENT_STATE_OFF) |
6945 | event->state = PERF_EVENT_STATE_OFF; |
6946 | +} |
6947 | + |
6948 | +static void |
6949 | +perf_destroy_group(struct perf_event *event, struct perf_event_context *ctx) |
6950 | +{ |
6951 | + struct perf_event *sibling, *tmp; |
6952 | |
6953 | /* |
6954 | * If this was a group event with sibling events then |
6955 | @@ -505,18 +521,6 @@ retry: |
6956 | } |
6957 | |
6958 | /* |
6959 | - * Update total_time_enabled and total_time_running for all events in a group. |
6960 | - */ |
6961 | -static void update_group_times(struct perf_event *leader) |
6962 | -{ |
6963 | - struct perf_event *event; |
6964 | - |
6965 | - update_event_times(leader); |
6966 | - list_for_each_entry(event, &leader->sibling_list, group_entry) |
6967 | - update_event_times(event); |
6968 | -} |
6969 | - |
6970 | -/* |
6971 | * Cross CPU call to disable a performance event |
6972 | */ |
6973 | static void __perf_event_disable(void *info) |
6974 | @@ -1452,6 +1456,9 @@ do { \ |
6975 | divisor = nsec * frequency; |
6976 | } |
6977 | |
6978 | + if (!divisor) |
6979 | + return dividend; |
6980 | + |
6981 | return div64_u64(dividend, divisor); |
6982 | } |
6983 | |
6984 | @@ -1474,7 +1481,7 @@ static int perf_event_start(struct perf_event *event) |
6985 | static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) |
6986 | { |
6987 | struct hw_perf_event *hwc = &event->hw; |
6988 | - u64 period, sample_period; |
6989 | + s64 period, sample_period; |
6990 | s64 delta; |
6991 | |
6992 | period = perf_calculate_period(event, nsec, count); |
6993 | @@ -1825,6 +1832,7 @@ static void free_event_rcu(struct rcu_head *head) |
6994 | } |
6995 | |
6996 | static void perf_pending_sync(struct perf_event *event); |
6997 | +static void perf_mmap_data_put(struct perf_mmap_data *data); |
6998 | |
6999 | static void free_event(struct perf_event *event) |
7000 | { |
7001 | @@ -1840,9 +1848,9 @@ static void free_event(struct perf_event *event) |
7002 | atomic_dec(&nr_task_events); |
7003 | } |
7004 | |
7005 | - if (event->output) { |
7006 | - fput(event->output->filp); |
7007 | - event->output = NULL; |
7008 | + if (event->data) { |
7009 | + perf_mmap_data_put(event->data); |
7010 | + event->data = NULL; |
7011 | } |
7012 | |
7013 | if (event->destroy) |
7014 | @@ -1856,9 +1864,18 @@ int perf_event_release_kernel(struct perf_event *event) |
7015 | { |
7016 | struct perf_event_context *ctx = event->ctx; |
7017 | |
7018 | + /* |
7019 | + * Remove from the PMU, can't get re-enabled since we got |
7020 | + * here because the last ref went. |
7021 | + */ |
7022 | + perf_event_disable(event); |
7023 | + |
7024 | WARN_ON_ONCE(ctx->parent_ctx); |
7025 | mutex_lock(&ctx->mutex); |
7026 | - perf_event_remove_from_context(event); |
7027 | + raw_spin_lock_irq(&ctx->lock); |
7028 | + list_del_event(event, ctx); |
7029 | + perf_destroy_group(event, ctx); |
7030 | + raw_spin_unlock_irq(&ctx->lock); |
7031 | mutex_unlock(&ctx->mutex); |
7032 | |
7033 | mutex_lock(&event->owner->perf_event_mutex); |
7034 | @@ -2138,7 +2155,27 @@ unlock: |
7035 | return ret; |
7036 | } |
7037 | |
7038 | -static int perf_event_set_output(struct perf_event *event, int output_fd); |
7039 | +static const struct file_operations perf_fops; |
7040 | + |
7041 | +static struct perf_event *perf_fget_light(int fd, int *fput_needed) |
7042 | +{ |
7043 | + struct file *file; |
7044 | + |
7045 | + file = fget_light(fd, fput_needed); |
7046 | + if (!file) |
7047 | + return ERR_PTR(-EBADF); |
7048 | + |
7049 | + if (file->f_op != &perf_fops) { |
7050 | + fput_light(file, *fput_needed); |
7051 | + *fput_needed = 0; |
7052 | + return ERR_PTR(-EBADF); |
7053 | + } |
7054 | + |
7055 | + return file->private_data; |
7056 | +} |
7057 | + |
7058 | +static int perf_event_set_output(struct perf_event *event, |
7059 | + struct perf_event *output_event); |
7060 | static int perf_event_set_filter(struct perf_event *event, void __user *arg); |
7061 | |
7062 | static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
7063 | @@ -2165,7 +2202,23 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
7064 | return perf_event_period(event, (u64 __user *)arg); |
7065 | |
7066 | case PERF_EVENT_IOC_SET_OUTPUT: |
7067 | - return perf_event_set_output(event, arg); |
7068 | + { |
7069 | + struct perf_event *output_event = NULL; |
7070 | + int fput_needed = 0; |
7071 | + int ret; |
7072 | + |
7073 | + if (arg != -1) { |
7074 | + output_event = perf_fget_light(arg, &fput_needed); |
7075 | + if (IS_ERR(output_event)) |
7076 | + return PTR_ERR(output_event); |
7077 | + } |
7078 | + |
7079 | + ret = perf_event_set_output(event, output_event); |
7080 | + if (output_event) |
7081 | + fput_light(output_event->filp, fput_needed); |
7082 | + |
7083 | + return ret; |
7084 | + } |
7085 | |
7086 | case PERF_EVENT_IOC_SET_FILTER: |
7087 | return perf_event_set_filter(event, (void __user *)arg); |
7088 | @@ -2290,8 +2343,6 @@ perf_mmap_data_alloc(struct perf_event *event, int nr_pages) |
7089 | unsigned long size; |
7090 | int i; |
7091 | |
7092 | - WARN_ON(atomic_read(&event->mmap_count)); |
7093 | - |
7094 | size = sizeof(struct perf_mmap_data); |
7095 | size += nr_pages * sizeof(void *); |
7096 | |
7097 | @@ -2398,8 +2449,6 @@ perf_mmap_data_alloc(struct perf_event *event, int nr_pages) |
7098 | unsigned long size; |
7099 | void *all_buf; |
7100 | |
7101 | - WARN_ON(atomic_read(&event->mmap_count)); |
7102 | - |
7103 | size = sizeof(struct perf_mmap_data); |
7104 | size += sizeof(void *); |
7105 | |
7106 | @@ -2479,7 +2528,7 @@ perf_mmap_data_init(struct perf_event *event, struct perf_mmap_data *data) |
7107 | if (!data->watermark) |
7108 | data->watermark = max_size / 2; |
7109 | |
7110 | - |
7111 | + atomic_set(&data->refcount, 1); |
7112 | rcu_assign_pointer(event->data, data); |
7113 | } |
7114 | |
7115 | @@ -2491,13 +2540,26 @@ static void perf_mmap_data_free_rcu(struct rcu_head *rcu_head) |
7116 | perf_mmap_data_free(data); |
7117 | } |
7118 | |
7119 | -static void perf_mmap_data_release(struct perf_event *event) |
7120 | +static struct perf_mmap_data *perf_mmap_data_get(struct perf_event *event) |
7121 | { |
7122 | - struct perf_mmap_data *data = event->data; |
7123 | + struct perf_mmap_data *data; |
7124 | + |
7125 | + rcu_read_lock(); |
7126 | + data = rcu_dereference(event->data); |
7127 | + if (data) { |
7128 | + if (!atomic_inc_not_zero(&data->refcount)) |
7129 | + data = NULL; |
7130 | + } |
7131 | + rcu_read_unlock(); |
7132 | |
7133 | - WARN_ON(atomic_read(&event->mmap_count)); |
7134 | + return data; |
7135 | +} |
7136 | + |
7137 | +static void perf_mmap_data_put(struct perf_mmap_data *data) |
7138 | +{ |
7139 | + if (!atomic_dec_and_test(&data->refcount)) |
7140 | + return; |
7141 | |
7142 | - rcu_assign_pointer(event->data, NULL); |
7143 | call_rcu(&data->rcu_head, perf_mmap_data_free_rcu); |
7144 | } |
7145 | |
7146 | @@ -2512,15 +2574,18 @@ static void perf_mmap_close(struct vm_area_struct *vma) |
7147 | { |
7148 | struct perf_event *event = vma->vm_file->private_data; |
7149 | |
7150 | - WARN_ON_ONCE(event->ctx->parent_ctx); |
7151 | if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { |
7152 | unsigned long size = perf_data_size(event->data); |
7153 | - struct user_struct *user = current_user(); |
7154 | + struct user_struct *user = event->mmap_user; |
7155 | + struct perf_mmap_data *data = event->data; |
7156 | |
7157 | atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); |
7158 | - vma->vm_mm->locked_vm -= event->data->nr_locked; |
7159 | - perf_mmap_data_release(event); |
7160 | + vma->vm_mm->locked_vm -= event->mmap_locked; |
7161 | + rcu_assign_pointer(event->data, NULL); |
7162 | mutex_unlock(&event->mmap_mutex); |
7163 | + |
7164 | + perf_mmap_data_put(data); |
7165 | + free_uid(user); |
7166 | } |
7167 | } |
7168 | |
7169 | @@ -2564,13 +2629,10 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) |
7170 | |
7171 | WARN_ON_ONCE(event->ctx->parent_ctx); |
7172 | mutex_lock(&event->mmap_mutex); |
7173 | - if (event->output) { |
7174 | - ret = -EINVAL; |
7175 | - goto unlock; |
7176 | - } |
7177 | - |
7178 | - if (atomic_inc_not_zero(&event->mmap_count)) { |
7179 | - if (nr_pages != event->data->nr_pages) |
7180 | + if (event->data) { |
7181 | + if (event->data->nr_pages == nr_pages) |
7182 | + atomic_inc(&event->data->refcount); |
7183 | + else |
7184 | ret = -EINVAL; |
7185 | goto unlock; |
7186 | } |
7187 | @@ -2602,21 +2664,23 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) |
7188 | WARN_ON(event->data); |
7189 | |
7190 | data = perf_mmap_data_alloc(event, nr_pages); |
7191 | - ret = -ENOMEM; |
7192 | - if (!data) |
7193 | + if (!data) { |
7194 | + ret = -ENOMEM; |
7195 | goto unlock; |
7196 | + } |
7197 | |
7198 | - ret = 0; |
7199 | perf_mmap_data_init(event, data); |
7200 | - |
7201 | - atomic_set(&event->mmap_count, 1); |
7202 | - atomic_long_add(user_extra, &user->locked_vm); |
7203 | - vma->vm_mm->locked_vm += extra; |
7204 | - event->data->nr_locked = extra; |
7205 | if (vma->vm_flags & VM_WRITE) |
7206 | event->data->writable = 1; |
7207 | |
7208 | + atomic_long_add(user_extra, &user->locked_vm); |
7209 | + event->mmap_locked = extra; |
7210 | + event->mmap_user = get_current_user(); |
7211 | + vma->vm_mm->locked_vm += event->mmap_locked; |
7212 | + |
7213 | unlock: |
7214 | + if (!ret) |
7215 | + atomic_inc(&event->mmap_count); |
7216 | mutex_unlock(&event->mmap_mutex); |
7217 | |
7218 | vma->vm_flags |= VM_RESERVED; |
7219 | @@ -2946,7 +3010,6 @@ int perf_output_begin(struct perf_output_handle *handle, |
7220 | struct perf_event *event, unsigned int size, |
7221 | int nmi, int sample) |
7222 | { |
7223 | - struct perf_event *output_event; |
7224 | struct perf_mmap_data *data; |
7225 | unsigned long tail, offset, head; |
7226 | int have_lost; |
7227 | @@ -2963,10 +3026,6 @@ int perf_output_begin(struct perf_output_handle *handle, |
7228 | if (event->parent) |
7229 | event = event->parent; |
7230 | |
7231 | - output_event = rcu_dereference(event->output); |
7232 | - if (output_event) |
7233 | - event = output_event; |
7234 | - |
7235 | data = rcu_dereference(event->data); |
7236 | if (!data) |
7237 | goto out; |
7238 | @@ -4730,54 +4789,41 @@ err_size: |
7239 | goto out; |
7240 | } |
7241 | |
7242 | -static int perf_event_set_output(struct perf_event *event, int output_fd) |
7243 | +static int |
7244 | +perf_event_set_output(struct perf_event *event, struct perf_event *output_event) |
7245 | { |
7246 | - struct perf_event *output_event = NULL; |
7247 | - struct file *output_file = NULL; |
7248 | - struct perf_event *old_output; |
7249 | - int fput_needed = 0; |
7250 | + struct perf_mmap_data *data = NULL, *old_data = NULL; |
7251 | int ret = -EINVAL; |
7252 | |
7253 | - if (!output_fd) |
7254 | + if (!output_event) |
7255 | goto set; |
7256 | |
7257 | - output_file = fget_light(output_fd, &fput_needed); |
7258 | - if (!output_file) |
7259 | - return -EBADF; |
7260 | - |
7261 | - if (output_file->f_op != &perf_fops) |
7262 | + /* don't allow circular references */ |
7263 | + if (event == output_event) |
7264 | goto out; |
7265 | |
7266 | - output_event = output_file->private_data; |
7267 | - |
7268 | - /* Don't chain output fds */ |
7269 | - if (output_event->output) |
7270 | - goto out; |
7271 | - |
7272 | - /* Don't set an output fd when we already have an output channel */ |
7273 | - if (event->data) |
7274 | - goto out; |
7275 | - |
7276 | - atomic_long_inc(&output_file->f_count); |
7277 | - |
7278 | set: |
7279 | mutex_lock(&event->mmap_mutex); |
7280 | - old_output = event->output; |
7281 | - rcu_assign_pointer(event->output, output_event); |
7282 | - mutex_unlock(&event->mmap_mutex); |
7283 | + /* Can't redirect output if we've got an active mmap() */ |
7284 | + if (atomic_read(&event->mmap_count)) |
7285 | + goto unlock; |
7286 | |
7287 | - if (old_output) { |
7288 | - /* |
7289 | - * we need to make sure no existing perf_output_*() |
7290 | - * is still referencing this event. |
7291 | - */ |
7292 | - synchronize_rcu(); |
7293 | - fput(old_output->filp); |
7294 | + if (output_event) { |
7295 | + /* get the buffer we want to redirect to */ |
7296 | + data = perf_mmap_data_get(output_event); |
7297 | + if (!data) |
7298 | + goto unlock; |
7299 | } |
7300 | |
7301 | + old_data = event->data; |
7302 | + rcu_assign_pointer(event->data, data); |
7303 | ret = 0; |
7304 | +unlock: |
7305 | + mutex_unlock(&event->mmap_mutex); |
7306 | + |
7307 | + if (old_data) |
7308 | + perf_mmap_data_put(old_data); |
7309 | out: |
7310 | - fput_light(output_file, fput_needed); |
7311 | return ret; |
7312 | } |
7313 | |
7314 | @@ -4793,13 +4839,13 @@ SYSCALL_DEFINE5(perf_event_open, |
7315 | struct perf_event_attr __user *, attr_uptr, |
7316 | pid_t, pid, int, cpu, int, group_fd, unsigned long, flags) |
7317 | { |
7318 | - struct perf_event *event, *group_leader; |
7319 | + struct perf_event *event, *group_leader = NULL, *output_event = NULL; |
7320 | struct perf_event_attr attr; |
7321 | struct perf_event_context *ctx; |
7322 | struct file *event_file = NULL; |
7323 | struct file *group_file = NULL; |
7324 | + int event_fd; |
7325 | int fput_needed = 0; |
7326 | - int fput_needed2 = 0; |
7327 | int err; |
7328 | |
7329 | /* for future expandability... */ |
7330 | @@ -4820,26 +4866,38 @@ SYSCALL_DEFINE5(perf_event_open, |
7331 | return -EINVAL; |
7332 | } |
7333 | |
7334 | + event_fd = get_unused_fd_flags(O_RDWR); |
7335 | + if (event_fd < 0) |
7336 | + return event_fd; |
7337 | + |
7338 | + if (group_fd != -1) { |
7339 | + group_leader = perf_fget_light(group_fd, &fput_needed); |
7340 | + if (IS_ERR(group_leader)) { |
7341 | + err = PTR_ERR(group_leader); |
7342 | + goto err_put_context; |
7343 | + } |
7344 | + group_file = group_leader->filp; |
7345 | + if (flags & PERF_FLAG_FD_OUTPUT) |
7346 | + output_event = group_leader; |
7347 | + if (flags & PERF_FLAG_FD_NO_GROUP) |
7348 | + group_leader = NULL; |
7349 | + } |
7350 | + |
7351 | /* |
7352 | * Get the target context (task or percpu): |
7353 | */ |
7354 | ctx = find_get_context(pid, cpu); |
7355 | - if (IS_ERR(ctx)) |
7356 | - return PTR_ERR(ctx); |
7357 | + if (IS_ERR(ctx)) { |
7358 | + err = PTR_ERR(ctx); |
7359 | + goto err_fd; |
7360 | + } |
7361 | |
7362 | /* |
7363 | * Look up the group leader (we will attach this event to it): |
7364 | */ |
7365 | - group_leader = NULL; |
7366 | - if (group_fd != -1 && !(flags & PERF_FLAG_FD_NO_GROUP)) { |
7367 | + if (group_leader) { |
7368 | err = -EINVAL; |
7369 | - group_file = fget_light(group_fd, &fput_needed); |
7370 | - if (!group_file) |
7371 | - goto err_put_context; |
7372 | - if (group_file->f_op != &perf_fops) |
7373 | - goto err_put_context; |
7374 | |
7375 | - group_leader = group_file->private_data; |
7376 | /* |
7377 | * Do not allow a recursive hierarchy (this new sibling |
7378 | * becoming part of another group-sibling): |
7379 | @@ -4861,22 +4919,21 @@ SYSCALL_DEFINE5(perf_event_open, |
7380 | |
7381 | event = perf_event_alloc(&attr, cpu, ctx, group_leader, |
7382 | NULL, NULL, GFP_KERNEL); |
7383 | - err = PTR_ERR(event); |
7384 | - if (IS_ERR(event)) |
7385 | + if (IS_ERR(event)) { |
7386 | + err = PTR_ERR(event); |
7387 | goto err_put_context; |
7388 | + } |
7389 | |
7390 | - err = anon_inode_getfd("[perf_event]", &perf_fops, event, O_RDWR); |
7391 | - if (err < 0) |
7392 | - goto err_free_put_context; |
7393 | + if (output_event) { |
7394 | + err = perf_event_set_output(event, output_event); |
7395 | + if (err) |
7396 | + goto err_free_put_context; |
7397 | + } |
7398 | |
7399 | - event_file = fget_light(err, &fput_needed2); |
7400 | - if (!event_file) |
7401 | + event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR); |
7402 | + if (IS_ERR(event_file)) { |
7403 | + err = PTR_ERR(event_file); |
7404 | goto err_free_put_context; |
7405 | - |
7406 | - if (flags & PERF_FLAG_FD_OUTPUT) { |
7407 | - err = perf_event_set_output(event, group_fd); |
7408 | - if (err) |
7409 | - goto err_fput_free_put_context; |
7410 | } |
7411 | |
7412 | event->filp = event_file; |
7413 | @@ -4892,19 +4949,17 @@ SYSCALL_DEFINE5(perf_event_open, |
7414 | list_add_tail(&event->owner_entry, ¤t->perf_event_list); |
7415 | mutex_unlock(¤t->perf_event_mutex); |
7416 | |
7417 | -err_fput_free_put_context: |
7418 | - fput_light(event_file, fput_needed2); |
7419 | + fput_light(group_file, fput_needed); |
7420 | + fd_install(event_fd, event_file); |
7421 | + return event_fd; |
7422 | |
7423 | err_free_put_context: |
7424 | - if (err < 0) |
7425 | - free_event(event); |
7426 | - |
7427 | + free_event(event); |
7428 | err_put_context: |
7429 | - if (err < 0) |
7430 | - put_ctx(ctx); |
7431 | - |
7432 | fput_light(group_file, fput_needed); |
7433 | - |
7434 | + put_ctx(ctx); |
7435 | +err_fd: |
7436 | + put_unused_fd(event_fd); |
7437 | return err; |
7438 | } |
7439 | |
7440 | diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c |
7441 | index 00d1fda..ad72342 100644 |
7442 | --- a/kernel/posix-timers.c |
7443 | +++ b/kernel/posix-timers.c |
7444 | @@ -559,14 +559,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, |
7445 | new_timer->it_id = (timer_t) new_timer_id; |
7446 | new_timer->it_clock = which_clock; |
7447 | new_timer->it_overrun = -1; |
7448 | - error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer)); |
7449 | - if (error) |
7450 | - goto out; |
7451 | |
7452 | - /* |
7453 | - * return the timer_id now. The next step is hard to |
7454 | - * back out if there is an error. |
7455 | - */ |
7456 | if (copy_to_user(created_timer_id, |
7457 | &new_timer_id, sizeof (new_timer_id))) { |
7458 | error = -EFAULT; |
7459 | @@ -597,6 +590,10 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock, |
7460 | new_timer->sigq->info.si_tid = new_timer->it_id; |
7461 | new_timer->sigq->info.si_code = SI_TIMER; |
7462 | |
7463 | + error = CLOCK_DISPATCH(which_clock, timer_create, (new_timer)); |
7464 | + if (error) |
7465 | + goto out; |
7466 | + |
7467 | spin_lock_irq(¤t->sighand->siglock); |
7468 | new_timer->it_signal = current->signal; |
7469 | list_add(&new_timer->list, ¤t->signal->posix_timers); |
7470 | diff --git a/kernel/signal.c b/kernel/signal.c |
7471 | index dbd7fe0..48f2130 100644 |
7472 | --- a/kernel/signal.c |
7473 | +++ b/kernel/signal.c |
7474 | @@ -642,7 +642,7 @@ static inline bool si_fromuser(const struct siginfo *info) |
7475 | static int check_kill_permission(int sig, struct siginfo *info, |
7476 | struct task_struct *t) |
7477 | { |
7478 | - const struct cred *cred = current_cred(), *tcred; |
7479 | + const struct cred *cred, *tcred; |
7480 | struct pid *sid; |
7481 | int error; |
7482 | |
7483 | @@ -656,8 +656,10 @@ static int check_kill_permission(int sig, struct siginfo *info, |
7484 | if (error) |
7485 | return error; |
7486 | |
7487 | + cred = current_cred(); |
7488 | tcred = __task_cred(t); |
7489 | - if ((cred->euid ^ tcred->suid) && |
7490 | + if (!same_thread_group(current, t) && |
7491 | + (cred->euid ^ tcred->suid) && |
7492 | (cred->euid ^ tcred->uid) && |
7493 | (cred->uid ^ tcred->suid) && |
7494 | (cred->uid ^ tcred->uid) && |
7495 | diff --git a/lib/idr.c b/lib/idr.c |
7496 | index 2eb1dca..0d74f6b 100644 |
7497 | --- a/lib/idr.c |
7498 | +++ b/lib/idr.c |
7499 | @@ -445,6 +445,7 @@ EXPORT_SYMBOL(idr_remove); |
7500 | void idr_remove_all(struct idr *idp) |
7501 | { |
7502 | int n, id, max; |
7503 | + int bt_mask; |
7504 | struct idr_layer *p; |
7505 | struct idr_layer *pa[MAX_LEVEL]; |
7506 | struct idr_layer **paa = &pa[0]; |
7507 | @@ -462,8 +463,10 @@ void idr_remove_all(struct idr *idp) |
7508 | p = p->ary[(id >> n) & IDR_MASK]; |
7509 | } |
7510 | |
7511 | + bt_mask = id; |
7512 | id += 1 << n; |
7513 | - while (n < fls(id)) { |
7514 | + /* Get the highest bit that the above add changed from 0->1. */ |
7515 | + while (n < fls(id ^ bt_mask)) { |
7516 | if (p) |
7517 | free_layer(p); |
7518 | n += IDR_BITS; |
7519 | diff --git a/mm/filemap.c b/mm/filemap.c |
7520 | index 140ebda..3760bdc 100644 |
7521 | --- a/mm/filemap.c |
7522 | +++ b/mm/filemap.c |
7523 | @@ -441,7 +441,7 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, |
7524 | /* |
7525 | * Splice_read and readahead add shmem/tmpfs pages into the page cache |
7526 | * before shmem_readpage has a chance to mark them as SwapBacked: they |
7527 | - * need to go on the active_anon lru below, and mem_cgroup_cache_charge |
7528 | + * need to go on the anon lru below, and mem_cgroup_cache_charge |
7529 | * (called in add_to_page_cache) needs to know where they're going too. |
7530 | */ |
7531 | if (mapping_cap_swap_backed(mapping)) |
7532 | @@ -452,7 +452,7 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping, |
7533 | if (page_is_file_cache(page)) |
7534 | lru_cache_add_file(page); |
7535 | else |
7536 | - lru_cache_add_active_anon(page); |
7537 | + lru_cache_add_anon(page); |
7538 | } |
7539 | return ret; |
7540 | } |
7541 | @@ -1099,6 +1099,12 @@ page_not_up_to_date_locked: |
7542 | } |
7543 | |
7544 | readpage: |
7545 | + /* |
7546 | + * A previous I/O error may have been due to temporary |
7547 | + * failures, eg. multipath errors. |
7548 | + * PG_error will be set again if readpage fails. |
7549 | + */ |
7550 | + ClearPageError(page); |
7551 | /* Start the actual read. The read will unlock the page. */ |
7552 | error = mapping->a_ops->readpage(filp, page); |
7553 | |
7554 | diff --git a/mm/slub.c b/mm/slub.c |
7555 | index d2a54fe..6cf6be7 100644 |
7556 | --- a/mm/slub.c |
7557 | +++ b/mm/slub.c |
7558 | @@ -2141,7 +2141,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s) |
7559 | |
7560 | for_each_node_state(node, N_NORMAL_MEMORY) { |
7561 | struct kmem_cache_node *n = s->node[node]; |
7562 | - if (n && n != &s->local_node) |
7563 | + if (n) |
7564 | kmem_cache_free(kmalloc_caches, n); |
7565 | s->node[node] = NULL; |
7566 | } |
7567 | @@ -2150,33 +2150,22 @@ static void free_kmem_cache_nodes(struct kmem_cache *s) |
7568 | static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) |
7569 | { |
7570 | int node; |
7571 | - int local_node; |
7572 | - |
7573 | - if (slab_state >= UP && (s < kmalloc_caches || |
7574 | - s >= kmalloc_caches + KMALLOC_CACHES)) |
7575 | - local_node = page_to_nid(virt_to_page(s)); |
7576 | - else |
7577 | - local_node = 0; |
7578 | |
7579 | for_each_node_state(node, N_NORMAL_MEMORY) { |
7580 | struct kmem_cache_node *n; |
7581 | |
7582 | - if (local_node == node) |
7583 | - n = &s->local_node; |
7584 | - else { |
7585 | - if (slab_state == DOWN) { |
7586 | - early_kmem_cache_node_alloc(gfpflags, node); |
7587 | - continue; |
7588 | - } |
7589 | - n = kmem_cache_alloc_node(kmalloc_caches, |
7590 | - gfpflags, node); |
7591 | - |
7592 | - if (!n) { |
7593 | - free_kmem_cache_nodes(s); |
7594 | - return 0; |
7595 | - } |
7596 | + if (slab_state == DOWN) { |
7597 | + early_kmem_cache_node_alloc(gfpflags, node); |
7598 | + continue; |
7599 | + } |
7600 | + n = kmem_cache_alloc_node(kmalloc_caches, |
7601 | + gfpflags, node); |
7602 | |
7603 | + if (!n) { |
7604 | + free_kmem_cache_nodes(s); |
7605 | + return 0; |
7606 | } |
7607 | + |
7608 | s->node[node] = n; |
7609 | init_kmem_cache_node(n, s); |
7610 | } |
7611 | diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig |
7612 | index a952b7f..334c359 100644 |
7613 | --- a/net/mac80211/Kconfig |
7614 | +++ b/net/mac80211/Kconfig |
7615 | @@ -15,8 +15,12 @@ comment "CFG80211 needs to be enabled for MAC80211" |
7616 | |
7617 | if MAC80211 != n |
7618 | |
7619 | +config MAC80211_HAS_RC |
7620 | + def_bool n |
7621 | + |
7622 | config MAC80211_RC_PID |
7623 | bool "PID controller based rate control algorithm" if EMBEDDED |
7624 | + select MAC80211_HAS_RC |
7625 | ---help--- |
7626 | This option enables a TX rate control algorithm for |
7627 | mac80211 that uses a PID controller to select the TX |
7628 | @@ -24,12 +28,14 @@ config MAC80211_RC_PID |
7629 | |
7630 | config MAC80211_RC_MINSTREL |
7631 | bool "Minstrel" if EMBEDDED |
7632 | + select MAC80211_HAS_RC |
7633 | default y |
7634 | ---help--- |
7635 | This option enables the 'minstrel' TX rate control algorithm |
7636 | |
7637 | choice |
7638 | prompt "Default rate control algorithm" |
7639 | + depends on MAC80211_HAS_RC |
7640 | default MAC80211_RC_DEFAULT_MINSTREL |
7641 | ---help--- |
7642 | This option selects the default rate control algorithm |
7643 | @@ -62,6 +68,9 @@ config MAC80211_RC_DEFAULT |
7644 | |
7645 | endif |
7646 | |
7647 | +comment "Some wireless drivers require a rate control algorithm" |
7648 | + depends on MAC80211_HAS_RC=n |
7649 | + |
7650 | config MAC80211_MESH |
7651 | bool "Enable mac80211 mesh networking (pre-802.11s) support" |
7652 | depends on MAC80211 && EXPERIMENTAL |
7653 | diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c |
7654 | index edc872e..0d1811b 100644 |
7655 | --- a/net/mac80211/cfg.c |
7656 | +++ b/net/mac80211/cfg.c |
7657 | @@ -97,9 +97,6 @@ static int ieee80211_change_iface(struct wiphy *wiphy, |
7658 | params->mesh_id_len, |
7659 | params->mesh_id); |
7660 | |
7661 | - if (sdata->vif.type != NL80211_IFTYPE_MONITOR || !flags) |
7662 | - return 0; |
7663 | - |
7664 | if (type == NL80211_IFTYPE_AP_VLAN && |
7665 | params && params->use_4addr == 0) |
7666 | rcu_assign_pointer(sdata->u.vlan.sta, NULL); |
7667 | @@ -107,7 +104,9 @@ static int ieee80211_change_iface(struct wiphy *wiphy, |
7668 | params && params->use_4addr >= 0) |
7669 | sdata->u.mgd.use_4addr = params->use_4addr; |
7670 | |
7671 | - sdata->u.mntr_flags = *flags; |
7672 | + if (sdata->vif.type == NL80211_IFTYPE_MONITOR && flags) |
7673 | + sdata->u.mntr_flags = *flags; |
7674 | + |
7675 | return 0; |
7676 | } |
7677 | |
7678 | diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c |
7679 | index 875c8de..1349a09 100644 |
7680 | --- a/net/mac80211/mlme.c |
7681 | +++ b/net/mac80211/mlme.c |
7682 | @@ -1530,9 +1530,45 @@ static void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, |
7683 | mutex_unlock(&ifmgd->mtx); |
7684 | |
7685 | if (skb->len >= 24 + 2 /* mgmt + deauth reason */ && |
7686 | - (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) |
7687 | - cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); |
7688 | + (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DEAUTH) { |
7689 | + struct ieee80211_local *local = sdata->local; |
7690 | + struct ieee80211_work *wk; |
7691 | + |
7692 | + mutex_lock(&local->work_mtx); |
7693 | + list_for_each_entry(wk, &local->work_list, list) { |
7694 | + if (wk->sdata != sdata) |
7695 | + continue; |
7696 | + |
7697 | + if (wk->type != IEEE80211_WORK_ASSOC) |
7698 | + continue; |
7699 | + |
7700 | + if (memcmp(mgmt->bssid, wk->filter_ta, ETH_ALEN)) |
7701 | + continue; |
7702 | + if (memcmp(mgmt->sa, wk->filter_ta, ETH_ALEN)) |
7703 | + continue; |
7704 | |
7705 | + /* |
7706 | + * Printing the message only here means we can't |
7707 | + * spuriously print it, but it also means that it |
7708 | + * won't be printed when the frame comes in before |
7709 | + * we even tried to associate or in similar cases. |
7710 | + * |
7711 | + * Ultimately, I suspect cfg80211 should print the |
7712 | + * messages instead. |
7713 | + */ |
7714 | + printk(KERN_DEBUG |
7715 | + "%s: deauthenticated from %pM (Reason: %u)\n", |
7716 | + sdata->name, mgmt->bssid, |
7717 | + le16_to_cpu(mgmt->u.deauth.reason_code)); |
7718 | + |
7719 | + list_del_rcu(&wk->list); |
7720 | + free_work(wk); |
7721 | + break; |
7722 | + } |
7723 | + mutex_unlock(&local->work_mtx); |
7724 | + |
7725 | + cfg80211_send_deauth(sdata->dev, (u8 *)mgmt, skb->len); |
7726 | + } |
7727 | out: |
7728 | kfree_skb(skb); |
7729 | } |
7730 | diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c |
7731 | index 04ea07f..1946f6b 100644 |
7732 | --- a/net/mac80211/rx.c |
7733 | +++ b/net/mac80211/rx.c |
7734 | @@ -1414,7 +1414,8 @@ ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) |
7735 | return res; |
7736 | |
7737 | if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) { |
7738 | - if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && |
7739 | + if (unlikely(!ieee80211_has_protected(fc) && |
7740 | + ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && |
7741 | rx->key)) |
7742 | return -EACCES; |
7743 | /* BIP does not use Protected field, so need to check MMIE */ |
7744 | diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c |
7745 | index cfc473e..d0716b9 100644 |
7746 | --- a/net/mac80211/tx.c |
7747 | +++ b/net/mac80211/tx.c |
7748 | @@ -584,7 +584,8 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) |
7749 | struct ieee80211_hdr *hdr = (void *)tx->skb->data; |
7750 | struct ieee80211_supported_band *sband; |
7751 | struct ieee80211_rate *rate; |
7752 | - int i, len; |
7753 | + int i; |
7754 | + u32 len; |
7755 | bool inval = false, rts = false, short_preamble = false; |
7756 | struct ieee80211_tx_rate_control txrc; |
7757 | u32 sta_flags; |
7758 | @@ -593,7 +594,7 @@ ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) |
7759 | |
7760 | sband = tx->local->hw.wiphy->bands[tx->channel->band]; |
7761 | |
7762 | - len = min_t(int, tx->skb->len + FCS_LEN, |
7763 | + len = min_t(u32, tx->skb->len + FCS_LEN, |
7764 | tx->local->hw.wiphy->frag_threshold); |
7765 | |
7766 | /* set up the tx rate control struct we give the RC algo */ |
7767 | diff --git a/scripts/kconfig/Makefile b/scripts/kconfig/Makefile |
7768 | index 186c466..9842611 100644 |
7769 | --- a/scripts/kconfig/Makefile |
7770 | +++ b/scripts/kconfig/Makefile |
7771 | @@ -208,7 +208,7 @@ HOSTCFLAGS_zconf.tab.o := -I$(src) |
7772 | HOSTLOADLIBES_qconf = $(KC_QT_LIBS) -ldl |
7773 | HOSTCXXFLAGS_qconf.o = $(KC_QT_CFLAGS) -D LKC_DIRECT_LINK |
7774 | |
7775 | -HOSTLOADLIBES_gconf = `pkg-config --libs gtk+-2.0 gmodule-2.0 libglade-2.0` |
7776 | +HOSTLOADLIBES_gconf = `pkg-config --libs gtk+-2.0 gmodule-2.0 libglade-2.0` -ldl |
7777 | HOSTCFLAGS_gconf.o = `pkg-config --cflags gtk+-2.0 gmodule-2.0 libglade-2.0` \ |
7778 | -D LKC_DIRECT_LINK |
7779 | |
7780 | diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c |
7781 | index a2ff861..e9d98be 100644 |
7782 | --- a/sound/core/pcm_lib.c |
7783 | +++ b/sound/core/pcm_lib.c |
7784 | @@ -345,7 +345,9 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, |
7785 | new_hw_ptr = hw_base + pos; |
7786 | } |
7787 | __delta: |
7788 | - delta = (new_hw_ptr - old_hw_ptr) % runtime->boundary; |
7789 | + delta = new_hw_ptr - old_hw_ptr; |
7790 | + if (delta < 0) |
7791 | + delta += runtime->boundary; |
7792 | if (xrun_debug(substream, in_interrupt ? |
7793 | XRUN_DEBUG_PERIODUPDATE : XRUN_DEBUG_HWPTRUPDATE)) { |
7794 | char name[16]; |
7795 | @@ -439,8 +441,13 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream, |
7796 | snd_pcm_playback_silence(substream, new_hw_ptr); |
7797 | |
7798 | if (in_interrupt) { |
7799 | - runtime->hw_ptr_interrupt = new_hw_ptr - |
7800 | - (new_hw_ptr % runtime->period_size); |
7801 | + delta = new_hw_ptr - runtime->hw_ptr_interrupt; |
7802 | + if (delta < 0) |
7803 | + delta += runtime->boundary; |
7804 | + delta -= (snd_pcm_uframes_t)delta % runtime->period_size; |
7805 | + runtime->hw_ptr_interrupt += delta; |
7806 | + if (runtime->hw_ptr_interrupt >= runtime->boundary) |
7807 | + runtime->hw_ptr_interrupt -= runtime->boundary; |
7808 | } |
7809 | runtime->hw_ptr_base = hw_base; |
7810 | runtime->status->hw_ptr = new_hw_ptr; |
7811 | diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c |
7812 | index 20b5982..d124e95 100644 |
7813 | --- a/sound/core/pcm_native.c |
7814 | +++ b/sound/core/pcm_native.c |
7815 | @@ -27,7 +27,6 @@ |
7816 | #include <linux/pm_qos_params.h> |
7817 | #include <linux/uio.h> |
7818 | #include <linux/dma-mapping.h> |
7819 | -#include <linux/math64.h> |
7820 | #include <sound/core.h> |
7821 | #include <sound/control.h> |
7822 | #include <sound/info.h> |
7823 | @@ -370,38 +369,6 @@ static int period_to_usecs(struct snd_pcm_runtime *runtime) |
7824 | return usecs; |
7825 | } |
7826 | |
7827 | -static int calc_boundary(struct snd_pcm_runtime *runtime) |
7828 | -{ |
7829 | - u_int64_t boundary; |
7830 | - |
7831 | - boundary = (u_int64_t)runtime->buffer_size * |
7832 | - (u_int64_t)runtime->period_size; |
7833 | -#if BITS_PER_LONG < 64 |
7834 | - /* try to find lowest common multiple for buffer and period */ |
7835 | - if (boundary > LONG_MAX - runtime->buffer_size) { |
7836 | - u_int32_t remainder = -1; |
7837 | - u_int32_t divident = runtime->buffer_size; |
7838 | - u_int32_t divisor = runtime->period_size; |
7839 | - while (remainder) { |
7840 | - remainder = divident % divisor; |
7841 | - if (remainder) { |
7842 | - divident = divisor; |
7843 | - divisor = remainder; |
7844 | - } |
7845 | - } |
7846 | - boundary = div_u64(boundary, divisor); |
7847 | - if (boundary > LONG_MAX - runtime->buffer_size) |
7848 | - return -ERANGE; |
7849 | - } |
7850 | -#endif |
7851 | - if (boundary == 0) |
7852 | - return -ERANGE; |
7853 | - runtime->boundary = boundary; |
7854 | - while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size) |
7855 | - runtime->boundary *= 2; |
7856 | - return 0; |
7857 | -} |
7858 | - |
7859 | static int snd_pcm_hw_params(struct snd_pcm_substream *substream, |
7860 | struct snd_pcm_hw_params *params) |
7861 | { |
7862 | @@ -477,9 +444,9 @@ static int snd_pcm_hw_params(struct snd_pcm_substream *substream, |
7863 | runtime->stop_threshold = runtime->buffer_size; |
7864 | runtime->silence_threshold = 0; |
7865 | runtime->silence_size = 0; |
7866 | - err = calc_boundary(runtime); |
7867 | - if (err < 0) |
7868 | - goto _error; |
7869 | + runtime->boundary = runtime->buffer_size; |
7870 | + while (runtime->boundary * 2 <= LONG_MAX - runtime->buffer_size) |
7871 | + runtime->boundary *= 2; |
7872 | |
7873 | snd_pcm_timer_resolution_change(substream); |
7874 | runtime->status->state = SNDRV_PCM_STATE_SETUP; |
7875 | diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c |
7876 | index cec6815..0bc24bd 100644 |
7877 | --- a/sound/pci/hda/hda_intel.c |
7878 | +++ b/sound/pci/hda/hda_intel.c |
7879 | @@ -2263,16 +2263,23 @@ static int azx_dev_free(struct snd_device *device) |
7880 | * white/black-listing for position_fix |
7881 | */ |
7882 | static struct snd_pci_quirk position_fix_list[] __devinitdata = { |
7883 | + SND_PCI_QUIRK(0x1025, 0x009f, "Acer Aspire 5110", POS_FIX_LPIB), |
7884 | SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB), |
7885 | SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB), |
7886 | SND_PCI_QUIRK(0x1028, 0x01f6, "Dell Latitude 131L", POS_FIX_LPIB), |
7887 | SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB), |
7888 | - SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB), |
7889 | SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), |
7890 | + SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB), |
7891 | + SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB), |
7892 | + SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB), |
7893 | + SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB), |
7894 | + SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB), |
7895 | + SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB), |
7896 | SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB), |
7897 | SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB), |
7898 | SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB), |
7899 | SND_PCI_QUIRK(0x1565, 0x8218, "Biostar Microtech", POS_FIX_LPIB), |
7900 | + SND_PCI_QUIRK(0x1849, 0x0888, "775Dual-VSTA", POS_FIX_LPIB), |
7901 | SND_PCI_QUIRK(0x8086, 0x2503, "DG965OT AAD63733-203", POS_FIX_LPIB), |
7902 | SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB), |
7903 | {} |
7904 | diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
7905 | index 886d8e4..3273765 100644 |
7906 | --- a/sound/pci/hda/patch_realtek.c |
7907 | +++ b/sound/pci/hda/patch_realtek.c |
7908 | @@ -9392,6 +9392,7 @@ static struct snd_pci_quirk alc882_ssid_cfg_tbl[] = { |
7909 | SND_PCI_QUIRK(0x106b, 0x1000, "iMac 24", ALC885_IMAC24), |
7910 | SND_PCI_QUIRK(0x106b, 0x2800, "AppleTV", ALC885_IMAC24), |
7911 | SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC885_MBP3), |
7912 | + SND_PCI_QUIRK(0x106b, 0x3000, "iMac", ALC889A_MB31), |
7913 | SND_PCI_QUIRK(0x106b, 0x3600, "Macbook 3,1", ALC889A_MB31), |
7914 | SND_PCI_QUIRK(0x106b, 0x3800, "MacbookPro 4,1", ALC885_MBP3), |
7915 | SND_PCI_QUIRK(0x106b, 0x3e00, "iMac 24 Aluminum", ALC885_IMAC24), |
7916 | diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c |
7917 | index a0e06d8..f1e7bab 100644 |
7918 | --- a/sound/pci/hda/patch_sigmatel.c |
7919 | +++ b/sound/pci/hda/patch_sigmatel.c |
7920 | @@ -2078,12 +2078,12 @@ static struct snd_pci_quirk stac927x_cfg_tbl[] = { |
7921 | SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_INTEL, 0xff00, 0x2000, |
7922 | "Intel D965", STAC_D965_3ST), |
7923 | /* Dell 3 stack systems */ |
7924 | - SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f7, "Dell XPS M1730", STAC_DELL_3ST), |
7925 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01dd, "Dell Dimension E520", STAC_DELL_3ST), |
7926 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01ed, "Dell ", STAC_DELL_3ST), |
7927 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f4, "Dell ", STAC_DELL_3ST), |
7928 | /* Dell 3 stack systems with verb table in BIOS */ |
7929 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f3, "Dell Inspiron 1420", STAC_DELL_BIOS), |
7930 | + SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f7, "Dell XPS M1730", STAC_DELL_BIOS), |
7931 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0227, "Dell Vostro 1400 ", STAC_DELL_BIOS), |
7932 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022e, "Dell ", STAC_DELL_BIOS), |
7933 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x022f, "Dell Inspiron 1525", STAC_DELL_BIOS), |
7934 | diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c |
7935 | index 2e0772f..1e22141 100644 |
7936 | --- a/sound/soc/codecs/wm8350.c |
7937 | +++ b/sound/soc/codecs/wm8350.c |
7938 | @@ -424,8 +424,8 @@ static const struct soc_enum wm8350_enum[] = { |
7939 | SOC_ENUM_SINGLE(WM8350_INPUT_MIXER_VOLUME, 15, 2, wm8350_lr), |
7940 | }; |
7941 | |
7942 | -static DECLARE_TLV_DB_LINEAR(pre_amp_tlv, -1200, 3525); |
7943 | -static DECLARE_TLV_DB_LINEAR(out_pga_tlv, -5700, 600); |
7944 | +static DECLARE_TLV_DB_SCALE(pre_amp_tlv, -1200, 3525, 0); |
7945 | +static DECLARE_TLV_DB_SCALE(out_pga_tlv, -5700, 600, 0); |
7946 | static DECLARE_TLV_DB_SCALE(dac_pcm_tlv, -7163, 36, 1); |
7947 | static DECLARE_TLV_DB_SCALE(adc_pcm_tlv, -12700, 50, 1); |
7948 | static DECLARE_TLV_DB_SCALE(out_mix_tlv, -1500, 300, 1); |
7949 | diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c |
7950 | index 6acc885..a9fa46c 100644 |
7951 | --- a/sound/soc/codecs/wm8400.c |
7952 | +++ b/sound/soc/codecs/wm8400.c |
7953 | @@ -107,21 +107,21 @@ static void wm8400_codec_reset(struct snd_soc_codec *codec) |
7954 | wm8400_reset_codec_reg_cache(wm8400->wm8400); |
7955 | } |
7956 | |
7957 | -static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600); |
7958 | +static const DECLARE_TLV_DB_SCALE(rec_mix_tlv, -1500, 600, 0); |
7959 | |
7960 | -static const DECLARE_TLV_DB_LINEAR(in_pga_tlv, -1650, 3000); |
7961 | +static const DECLARE_TLV_DB_SCALE(in_pga_tlv, -1650, 3000, 0); |
7962 | |
7963 | -static const DECLARE_TLV_DB_LINEAR(out_mix_tlv, -2100, 0); |
7964 | +static const DECLARE_TLV_DB_SCALE(out_mix_tlv, -2100, 0, 0); |
7965 | |
7966 | -static const DECLARE_TLV_DB_LINEAR(out_pga_tlv, -7300, 600); |
7967 | +static const DECLARE_TLV_DB_SCALE(out_pga_tlv, -7300, 600, 0); |
7968 | |
7969 | -static const DECLARE_TLV_DB_LINEAR(out_omix_tlv, -600, 0); |
7970 | +static const DECLARE_TLV_DB_SCALE(out_omix_tlv, -600, 0, 0); |
7971 | |
7972 | -static const DECLARE_TLV_DB_LINEAR(out_dac_tlv, -7163, 0); |
7973 | +static const DECLARE_TLV_DB_SCALE(out_dac_tlv, -7163, 0, 0); |
7974 | |
7975 | -static const DECLARE_TLV_DB_LINEAR(in_adc_tlv, -7163, 1763); |
7976 | +static const DECLARE_TLV_DB_SCALE(in_adc_tlv, -7163, 1763, 0); |
7977 | |
7978 | -static const DECLARE_TLV_DB_LINEAR(out_sidetone_tlv, -3600, 0); |
7979 | +static const DECLARE_TLV_DB_SCALE(out_sidetone_tlv, -3600, 0, 0); |
7980 | |
7981 | static int wm8400_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol, |
7982 | struct snd_ctl_elem_value *ucontrol) |
7983 | @@ -440,7 +440,7 @@ static int outmixer_event (struct snd_soc_dapm_widget *w, |
7984 | /* INMIX dB values */ |
7985 | static const unsigned int in_mix_tlv[] = { |
7986 | TLV_DB_RANGE_HEAD(1), |
7987 | - 0,7, TLV_DB_LINEAR_ITEM(-1200, 600), |
7988 | + 0,7, TLV_DB_SCALE_ITEM(-1200, 600, 0), |
7989 | }; |
7990 | |
7991 | /* Left In PGA Connections */ |
7992 | diff --git a/sound/soc/codecs/wm8990.c b/sound/soc/codecs/wm8990.c |
7993 | index 831f473..7c6b00e 100644 |
7994 | --- a/sound/soc/codecs/wm8990.c |
7995 | +++ b/sound/soc/codecs/wm8990.c |
7996 | @@ -111,21 +111,21 @@ static const u16 wm8990_reg[] = { |
7997 | |
7998 | #define wm8990_reset(c) snd_soc_write(c, WM8990_RESET, 0) |
7999 | |
8000 | -static const DECLARE_TLV_DB_LINEAR(rec_mix_tlv, -1500, 600); |
8001 | +static const DECLARE_TLV_DB_SCALE(rec_mix_tlv, -1500, 600, 0); |
8002 | |
8003 | -static const DECLARE_TLV_DB_LINEAR(in_pga_tlv, -1650, 3000); |
8004 | +static const DECLARE_TLV_DB_SCALE(in_pga_tlv, -1650, 3000, 0); |
8005 | |
8006 | -static const DECLARE_TLV_DB_LINEAR(out_mix_tlv, 0, -2100); |
8007 | +static const DECLARE_TLV_DB_SCALE(out_mix_tlv, 0, -2100, 0); |
8008 | |
8009 | -static const DECLARE_TLV_DB_LINEAR(out_pga_tlv, -7300, 600); |
8010 | +static const DECLARE_TLV_DB_SCALE(out_pga_tlv, -7300, 600, 0); |
8011 | |
8012 | -static const DECLARE_TLV_DB_LINEAR(out_omix_tlv, -600, 0); |
8013 | +static const DECLARE_TLV_DB_SCALE(out_omix_tlv, -600, 0, 0); |
8014 | |
8015 | -static const DECLARE_TLV_DB_LINEAR(out_dac_tlv, -7163, 0); |
8016 | +static const DECLARE_TLV_DB_SCALE(out_dac_tlv, -7163, 0, 0); |
8017 | |
8018 | -static const DECLARE_TLV_DB_LINEAR(in_adc_tlv, -7163, 1763); |
8019 | +static const DECLARE_TLV_DB_SCALE(in_adc_tlv, -7163, 1763, 0); |
8020 | |
8021 | -static const DECLARE_TLV_DB_LINEAR(out_sidetone_tlv, -3600, 0); |
8022 | +static const DECLARE_TLV_DB_SCALE(out_sidetone_tlv, -3600, 0, 0); |
8023 | |
8024 | static int wm899x_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol, |
8025 | struct snd_ctl_elem_value *ucontrol) |
8026 | @@ -451,7 +451,7 @@ static int outmixer_event(struct snd_soc_dapm_widget *w, |
8027 | /* INMIX dB values */ |
8028 | static const unsigned int in_mix_tlv[] = { |
8029 | TLV_DB_RANGE_HEAD(1), |
8030 | - 0, 7, TLV_DB_LINEAR_ITEM(-1200, 600), |
8031 | + 0, 7, TLV_DB_SCALE_ITEM(-1200, 600, 0), |
8032 | }; |
8033 | |
8034 | /* Left In PGA Connections */ |
8035 | diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c |
8036 | index 2b31ac6..803aeef 100644 |
8037 | --- a/sound/soc/imx/imx-pcm-dma-mx2.c |
8038 | +++ b/sound/soc/imx/imx-pcm-dma-mx2.c |
8039 | @@ -73,7 +73,8 @@ static void snd_imx_dma_err_callback(int channel, void *data, int err) |
8040 | { |
8041 | struct snd_pcm_substream *substream = data; |
8042 | struct snd_soc_pcm_runtime *rtd = substream->private_data; |
8043 | - struct imx_pcm_dma_params *dma_params = rtd->dai->cpu_dai->dma_data; |
8044 | + struct imx_pcm_dma_params *dma_params = |
8045 | + snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); |
8046 | struct snd_pcm_runtime *runtime = substream->runtime; |
8047 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; |
8048 | int ret; |
8049 | @@ -102,7 +103,7 @@ static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream) |
8050 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; |
8051 | int ret; |
8052 | |
8053 | - dma_params = snd_soc_get_dma_data(rtd->dai->cpu_dai, substream); |
8054 | + dma_params = snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); |
8055 | |
8056 | iprtd->dma = imx_dma_request_by_prio(DRV_NAME, DMA_PRIO_HIGH); |
8057 | if (iprtd->dma < 0) { |
8058 | @@ -212,7 +213,7 @@ static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream) |
8059 | struct imx_pcm_runtime_data *iprtd = runtime->private_data; |
8060 | int err; |
8061 | |
8062 | - dma_params = snd_soc_get_dma_data(rtd->dai->cpu_dai, substream); |
8063 | + dma_params = snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream); |
8064 | |
8065 | iprtd->substream = substream; |
8066 | iprtd->buf = (unsigned int *)substream->dma_buffer.area; |
8067 | diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c |
8068 | index 8977317..5fd55cd 100644 |
8069 | --- a/tools/perf/bench/mem-memcpy.c |
8070 | +++ b/tools/perf/bench/mem-memcpy.c |
8071 | @@ -24,7 +24,7 @@ |
8072 | |
8073 | static const char *length_str = "1MB"; |
8074 | static const char *routine = "default"; |
8075 | -static int use_clock = 0; |
8076 | +static bool use_clock = false; |
8077 | static int clock_fd; |
8078 | |
8079 | static const struct option options[] = { |
8080 | diff --git a/tools/perf/bench/sched-messaging.c b/tools/perf/bench/sched-messaging.c |
8081 | index 81cee78..da1b2e9 100644 |
8082 | --- a/tools/perf/bench/sched-messaging.c |
8083 | +++ b/tools/perf/bench/sched-messaging.c |
8084 | @@ -31,9 +31,9 @@ |
8085 | |
8086 | #define DATASIZE 100 |
8087 | |
8088 | -static int use_pipes = 0; |
8089 | +static bool use_pipes = false; |
8090 | static unsigned int loops = 100; |
8091 | -static unsigned int thread_mode = 0; |
8092 | +static bool thread_mode = false; |
8093 | static unsigned int num_groups = 10; |
8094 | |
8095 | struct sender_context { |
8096 | diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c |
8097 | index 6ad7148..94a814c 100644 |
8098 | --- a/tools/perf/builtin-annotate.c |
8099 | +++ b/tools/perf/builtin-annotate.c |
8100 | @@ -29,11 +29,11 @@ |
8101 | |
8102 | static char const *input_name = "perf.data"; |
8103 | |
8104 | -static int force; |
8105 | +static bool force; |
8106 | |
8107 | -static int full_paths; |
8108 | +static bool full_paths; |
8109 | |
8110 | -static int print_line; |
8111 | +static bool print_line; |
8112 | |
8113 | struct sym_hist { |
8114 | u64 sum; |
8115 | @@ -584,7 +584,7 @@ static const struct option options[] = { |
8116 | OPT_STRING('s', "symbol", &sym_hist_filter, "symbol", |
8117 | "symbol to annotate"), |
8118 | OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), |
8119 | - OPT_BOOLEAN('v', "verbose", &verbose, |
8120 | + OPT_INCR('v', "verbose", &verbose, |
8121 | "be more verbose (show symbol address, etc)"), |
8122 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, |
8123 | "dump raw trace in ASCII"), |
8124 | diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c |
8125 | index 30a05f5..f8e3d18 100644 |
8126 | --- a/tools/perf/builtin-buildid-cache.c |
8127 | +++ b/tools/perf/builtin-buildid-cache.c |
8128 | @@ -27,7 +27,7 @@ static const struct option buildid_cache_options[] = { |
8129 | "file list", "file(s) to add"), |
8130 | OPT_STRING('r', "remove", &remove_name_list_str, "file list", |
8131 | "file(s) to remove"), |
8132 | - OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose"), |
8133 | + OPT_INCR('v', "verbose", &verbose, "be more verbose"), |
8134 | OPT_END() |
8135 | }; |
8136 | |
8137 | diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c |
8138 | index d0675c0..af2ad8b 100644 |
8139 | --- a/tools/perf/builtin-buildid-list.c |
8140 | +++ b/tools/perf/builtin-buildid-list.c |
8141 | @@ -16,7 +16,7 @@ |
8142 | #include "util/symbol.h" |
8143 | |
8144 | static char const *input_name = "perf.data"; |
8145 | -static int force; |
8146 | +static bool force; |
8147 | static bool with_hits; |
8148 | |
8149 | static const char * const buildid_list_usage[] = { |
8150 | @@ -29,7 +29,7 @@ static const struct option options[] = { |
8151 | OPT_STRING('i', "input", &input_name, "file", |
8152 | "input file name"), |
8153 | OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), |
8154 | - OPT_BOOLEAN('v', "verbose", &verbose, |
8155 | + OPT_INCR('v', "verbose", &verbose, |
8156 | "be more verbose"), |
8157 | OPT_END() |
8158 | }; |
8159 | diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c |
8160 | index 1ea15d8..3a1d94d 100644 |
8161 | --- a/tools/perf/builtin-diff.c |
8162 | +++ b/tools/perf/builtin-diff.c |
8163 | @@ -19,7 +19,7 @@ |
8164 | static char const *input_old = "perf.data.old", |
8165 | *input_new = "perf.data"; |
8166 | static char diff__default_sort_order[] = "dso,symbol"; |
8167 | -static int force; |
8168 | +static bool force; |
8169 | static bool show_displacement; |
8170 | |
8171 | static int perf_session__add_hist_entry(struct perf_session *self, |
8172 | @@ -188,7 +188,7 @@ static const char * const diff_usage[] = { |
8173 | }; |
8174 | |
8175 | static const struct option options[] = { |
8176 | - OPT_BOOLEAN('v', "verbose", &verbose, |
8177 | + OPT_INCR('v', "verbose", &verbose, |
8178 | "be more verbose (show symbol address, etc)"), |
8179 | OPT_BOOLEAN('m', "displacement", &show_displacement, |
8180 | "Show position displacement relative to baseline"), |
8181 | diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c |
8182 | index 215b584..81e3ecc 100644 |
8183 | --- a/tools/perf/builtin-help.c |
8184 | +++ b/tools/perf/builtin-help.c |
8185 | @@ -29,7 +29,7 @@ enum help_format { |
8186 | HELP_FORMAT_WEB, |
8187 | }; |
8188 | |
8189 | -static int show_all = 0; |
8190 | +static bool show_all = false; |
8191 | static enum help_format help_format = HELP_FORMAT_MAN; |
8192 | static struct option builtin_help_options[] = { |
8193 | OPT_BOOLEAN('a', "all", &show_all, "print all available commands"), |
8194 | diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c |
8195 | index e12c844..6c38e4f 100644 |
8196 | --- a/tools/perf/builtin-lock.c |
8197 | +++ b/tools/perf/builtin-lock.c |
8198 | @@ -744,7 +744,7 @@ static const char * const lock_usage[] = { |
8199 | |
8200 | static const struct option lock_options[] = { |
8201 | OPT_STRING('i', "input", &input_name, "file", "input file name"), |
8202 | - OPT_BOOLEAN('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), |
8203 | + OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), |
8204 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), |
8205 | OPT_END() |
8206 | }; |
8207 | diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c |
8208 | index 152d6c9..0562c50 100644 |
8209 | --- a/tools/perf/builtin-probe.c |
8210 | +++ b/tools/perf/builtin-probe.c |
8211 | @@ -162,7 +162,7 @@ static const char * const probe_usage[] = { |
8212 | }; |
8213 | |
8214 | static const struct option options[] = { |
8215 | - OPT_BOOLEAN('v', "verbose", &verbose, |
8216 | + OPT_INCR('v', "verbose", &verbose, |
8217 | "be more verbose (show parsed arguments, etc)"), |
8218 | #ifndef NO_DWARF_SUPPORT |
8219 | OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, |
8220 | diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c |
8221 | index f1411e9..d2d1b02 100644 |
8222 | --- a/tools/perf/builtin-record.c |
8223 | +++ b/tools/perf/builtin-record.c |
8224 | @@ -39,19 +39,19 @@ static int output; |
8225 | static const char *output_name = "perf.data"; |
8226 | static int group = 0; |
8227 | static unsigned int realtime_prio = 0; |
8228 | -static int raw_samples = 0; |
8229 | -static int system_wide = 0; |
8230 | +static bool raw_samples = false; |
8231 | +static bool system_wide = false; |
8232 | static int profile_cpu = -1; |
8233 | static pid_t target_pid = -1; |
8234 | static pid_t child_pid = -1; |
8235 | -static int inherit = 1; |
8236 | -static int force = 0; |
8237 | -static int append_file = 0; |
8238 | -static int call_graph = 0; |
8239 | -static int inherit_stat = 0; |
8240 | -static int no_samples = 0; |
8241 | -static int sample_address = 0; |
8242 | -static int multiplex = 0; |
8243 | +static bool inherit = true; |
8244 | +static bool force = false; |
8245 | +static bool append_file = false; |
8246 | +static bool call_graph = false; |
8247 | +static bool inherit_stat = false; |
8248 | +static bool no_samples = false; |
8249 | +static bool sample_address = false; |
8250 | +static bool multiplex = false; |
8251 | static int multiplex_fd = -1; |
8252 | |
8253 | static long samples = 0; |
8254 | @@ -451,7 +451,7 @@ static int __cmd_record(int argc, const char **argv) |
8255 | rename(output_name, oldname); |
8256 | } |
8257 | } else { |
8258 | - append_file = 0; |
8259 | + append_file = false; |
8260 | } |
8261 | |
8262 | flags = O_CREAT|O_RDWR; |
8263 | @@ -676,7 +676,7 @@ static const struct option options[] = { |
8264 | "number of mmap data pages"), |
8265 | OPT_BOOLEAN('g', "call-graph", &call_graph, |
8266 | "do call-graph (stack chain/backtrace) recording"), |
8267 | - OPT_BOOLEAN('v', "verbose", &verbose, |
8268 | + OPT_INCR('v', "verbose", &verbose, |
8269 | "be more verbose (show counter open errors, etc)"), |
8270 | OPT_BOOLEAN('s', "stat", &inherit_stat, |
8271 | "per thread counts"), |
8272 | diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c |
8273 | index f815de2..63fc64e 100644 |
8274 | --- a/tools/perf/builtin-report.c |
8275 | +++ b/tools/perf/builtin-report.c |
8276 | @@ -33,11 +33,11 @@ |
8277 | |
8278 | static char const *input_name = "perf.data"; |
8279 | |
8280 | -static int force; |
8281 | +static bool force; |
8282 | static bool hide_unresolved; |
8283 | static bool dont_use_callchains; |
8284 | |
8285 | -static int show_threads; |
8286 | +static bool show_threads; |
8287 | static struct perf_read_values show_threads_values; |
8288 | |
8289 | static char default_pretty_printing_style[] = "normal"; |
8290 | @@ -400,7 +400,7 @@ static const char * const report_usage[] = { |
8291 | static const struct option options[] = { |
8292 | OPT_STRING('i', "input", &input_name, "file", |
8293 | "input file name"), |
8294 | - OPT_BOOLEAN('v', "verbose", &verbose, |
8295 | + OPT_INCR('v', "verbose", &verbose, |
8296 | "be more verbose (show symbol address, etc)"), |
8297 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, |
8298 | "dump raw trace in ASCII"), |
8299 | diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c |
8300 | index 4f5a03e..682783c 100644 |
8301 | --- a/tools/perf/builtin-sched.c |
8302 | +++ b/tools/perf/builtin-sched.c |
8303 | @@ -1790,7 +1790,7 @@ static const char * const sched_usage[] = { |
8304 | static const struct option sched_options[] = { |
8305 | OPT_STRING('i', "input", &input_name, "file", |
8306 | "input file name"), |
8307 | - OPT_BOOLEAN('v', "verbose", &verbose, |
8308 | + OPT_INCR('v', "verbose", &verbose, |
8309 | "be more verbose (show symbol address, etc)"), |
8310 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, |
8311 | "dump raw trace in ASCII"), |
8312 | @@ -1805,7 +1805,7 @@ static const char * const latency_usage[] = { |
8313 | static const struct option latency_options[] = { |
8314 | OPT_STRING('s', "sort", &sort_order, "key[,key2...]", |
8315 | "sort by key(s): runtime, switch, avg, max"), |
8316 | - OPT_BOOLEAN('v', "verbose", &verbose, |
8317 | + OPT_INCR('v', "verbose", &verbose, |
8318 | "be more verbose (show symbol address, etc)"), |
8319 | OPT_INTEGER('C', "CPU", &profile_cpu, |
8320 | "CPU to profile on"), |
8321 | @@ -1822,7 +1822,7 @@ static const char * const replay_usage[] = { |
8322 | static const struct option replay_options[] = { |
8323 | OPT_INTEGER('r', "repeat", &replay_repeat, |
8324 | "repeat the workload replay N times (-1: infinite)"), |
8325 | - OPT_BOOLEAN('v', "verbose", &verbose, |
8326 | + OPT_INCR('v', "verbose", &verbose, |
8327 | "be more verbose (show symbol address, etc)"), |
8328 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, |
8329 | "dump raw trace in ASCII"), |
8330 | diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c |
8331 | index 95db31c..f7f4a88 100644 |
8332 | --- a/tools/perf/builtin-stat.c |
8333 | +++ b/tools/perf/builtin-stat.c |
8334 | @@ -66,16 +66,16 @@ static struct perf_event_attr default_attrs[] = { |
8335 | |
8336 | }; |
8337 | |
8338 | -static int system_wide = 0; |
8339 | +static bool system_wide = false; |
8340 | static unsigned int nr_cpus = 0; |
8341 | static int run_idx = 0; |
8342 | |
8343 | static int run_count = 1; |
8344 | -static int inherit = 1; |
8345 | -static int scale = 1; |
8346 | +static bool inherit = true; |
8347 | +static bool scale = true; |
8348 | static pid_t target_pid = -1; |
8349 | static pid_t child_pid = -1; |
8350 | -static int null_run = 0; |
8351 | +static bool null_run = false; |
8352 | |
8353 | static int fd[MAX_NR_CPUS][MAX_COUNTERS]; |
8354 | |
8355 | @@ -494,7 +494,7 @@ static const struct option options[] = { |
8356 | "system-wide collection from all CPUs"), |
8357 | OPT_BOOLEAN('c', "scale", &scale, |
8358 | "scale/normalize counters"), |
8359 | - OPT_BOOLEAN('v', "verbose", &verbose, |
8360 | + OPT_INCR('v', "verbose", &verbose, |
8361 | "be more verbose (show counter open errors, etc)"), |
8362 | OPT_INTEGER('r', "repeat", &run_count, |
8363 | "repeat command and print average + stddev (max: 100)"), |
8364 | diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c |
8365 | index 0d4d8ff..1d7416d 100644 |
8366 | --- a/tools/perf/builtin-timechart.c |
8367 | +++ b/tools/perf/builtin-timechart.c |
8368 | @@ -43,7 +43,7 @@ static u64 turbo_frequency; |
8369 | |
8370 | static u64 first_time, last_time; |
8371 | |
8372 | -static int power_only; |
8373 | +static bool power_only; |
8374 | |
8375 | |
8376 | struct per_pid; |
8377 | diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c |
8378 | index 1f52932..4f94b10 100644 |
8379 | --- a/tools/perf/builtin-top.c |
8380 | +++ b/tools/perf/builtin-top.c |
8381 | @@ -57,7 +57,7 @@ |
8382 | |
8383 | static int fd[MAX_NR_CPUS][MAX_COUNTERS]; |
8384 | |
8385 | -static int system_wide = 0; |
8386 | +static bool system_wide = false; |
8387 | |
8388 | static int default_interval = 0; |
8389 | |
8390 | @@ -65,18 +65,18 @@ static int count_filter = 5; |
8391 | static int print_entries; |
8392 | |
8393 | static int target_pid = -1; |
8394 | -static int inherit = 0; |
8395 | +static bool inherit = false; |
8396 | static int profile_cpu = -1; |
8397 | static int nr_cpus = 0; |
8398 | static unsigned int realtime_prio = 0; |
8399 | -static int group = 0; |
8400 | +static bool group = false; |
8401 | static unsigned int page_size; |
8402 | static unsigned int mmap_pages = 16; |
8403 | static int freq = 1000; /* 1 KHz */ |
8404 | |
8405 | static int delay_secs = 2; |
8406 | -static int zero = 0; |
8407 | -static int dump_symtab = 0; |
8408 | +static bool zero = false; |
8409 | +static bool dump_symtab = false; |
8410 | |
8411 | static bool hide_kernel_symbols = false; |
8412 | static bool hide_user_symbols = false; |
8413 | @@ -169,7 +169,7 @@ static void sig_winch_handler(int sig __used) |
8414 | update_print_entries(&winsize); |
8415 | } |
8416 | |
8417 | -static void parse_source(struct sym_entry *syme) |
8418 | +static int parse_source(struct sym_entry *syme) |
8419 | { |
8420 | struct symbol *sym; |
8421 | struct sym_entry_source *source; |
8422 | @@ -180,12 +180,21 @@ static void parse_source(struct sym_entry *syme) |
8423 | u64 len; |
8424 | |
8425 | if (!syme) |
8426 | - return; |
8427 | + return -1; |
8428 | + |
8429 | + sym = sym_entry__symbol(syme); |
8430 | + map = syme->map; |
8431 | + |
8432 | + /* |
8433 | + * We can't annotate with just /proc/kallsyms |
8434 | + */ |
8435 | + if (map->dso->origin == DSO__ORIG_KERNEL) |
8436 | + return -1; |
8437 | |
8438 | if (syme->src == NULL) { |
8439 | syme->src = zalloc(sizeof(*source)); |
8440 | if (syme->src == NULL) |
8441 | - return; |
8442 | + return -1; |
8443 | pthread_mutex_init(&syme->src->lock, NULL); |
8444 | } |
8445 | |
8446 | @@ -195,9 +204,6 @@ static void parse_source(struct sym_entry *syme) |
8447 | pthread_mutex_lock(&source->lock); |
8448 | goto out_assign; |
8449 | } |
8450 | - |
8451 | - sym = sym_entry__symbol(syme); |
8452 | - map = syme->map; |
8453 | path = map->dso->long_name; |
8454 | |
8455 | len = sym->end - sym->start; |
8456 | @@ -209,7 +215,7 @@ static void parse_source(struct sym_entry *syme) |
8457 | |
8458 | file = popen(command, "r"); |
8459 | if (!file) |
8460 | - return; |
8461 | + return -1; |
8462 | |
8463 | pthread_mutex_lock(&source->lock); |
8464 | source->lines_tail = &source->lines; |
8465 | @@ -245,6 +251,7 @@ static void parse_source(struct sym_entry *syme) |
8466 | out_assign: |
8467 | sym_filter_entry = syme; |
8468 | pthread_mutex_unlock(&source->lock); |
8469 | + return 0; |
8470 | } |
8471 | |
8472 | static void __zero_source_counters(struct sym_entry *syme) |
8473 | @@ -839,7 +846,7 @@ static void handle_keypress(int c) |
8474 | display_weighted = ~display_weighted; |
8475 | break; |
8476 | case 'z': |
8477 | - zero = ~zero; |
8478 | + zero = !zero; |
8479 | break; |
8480 | default: |
8481 | break; |
8482 | @@ -990,7 +997,17 @@ static void event__process_sample(const event_t *self, |
8483 | if (sym_filter_entry_sched) { |
8484 | sym_filter_entry = sym_filter_entry_sched; |
8485 | sym_filter_entry_sched = NULL; |
8486 | - parse_source(sym_filter_entry); |
8487 | + if (parse_source(sym_filter_entry) < 0) { |
8488 | + struct symbol *sym = sym_entry__symbol(sym_filter_entry); |
8489 | + |
8490 | + pr_err("Can't annotate %s", sym->name); |
8491 | + if (sym_filter_entry->map->dso->origin == DSO__ORIG_KERNEL) { |
8492 | + pr_err(": No vmlinux file was found in the path:\n"); |
8493 | + vmlinux_path__fprintf(stderr); |
8494 | + } else |
8495 | + pr_err(".\n"); |
8496 | + exit(1); |
8497 | + } |
8498 | } |
8499 | |
8500 | syme = symbol__priv(al.sym); |
8501 | @@ -1296,7 +1313,7 @@ static const struct option options[] = { |
8502 | "display this many functions"), |
8503 | OPT_BOOLEAN('U', "hide_user_symbols", &hide_user_symbols, |
8504 | "hide user symbols"), |
8505 | - OPT_BOOLEAN('v', "verbose", &verbose, |
8506 | + OPT_INCR('v', "verbose", &verbose, |
8507 | "be more verbose (show counter open errors, etc)"), |
8508 | OPT_END() |
8509 | }; |
8510 | diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c |
8511 | index 407041d..8fc50d8 100644 |
8512 | --- a/tools/perf/builtin-trace.c |
8513 | +++ b/tools/perf/builtin-trace.c |
8514 | @@ -505,7 +505,7 @@ static const char * const trace_usage[] = { |
8515 | static const struct option options[] = { |
8516 | OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, |
8517 | "dump raw trace in ASCII"), |
8518 | - OPT_BOOLEAN('v', "verbose", &verbose, |
8519 | + OPT_INCR('v', "verbose", &verbose, |
8520 | "be more verbose (show symbol address, etc)"), |
8521 | OPT_BOOLEAN('L', "Latency", &latency_format, |
8522 | "show latency attributes (irqs/preemption disabled, etc)"), |
8523 | diff --git a/tools/perf/util/debug.c b/tools/perf/util/debug.c |
8524 | index 0905600..666b1c2 100644 |
8525 | --- a/tools/perf/util/debug.c |
8526 | +++ b/tools/perf/util/debug.c |
8527 | @@ -12,7 +12,7 @@ |
8528 | #include "util.h" |
8529 | |
8530 | int verbose = 0; |
8531 | -int dump_trace = 0; |
8532 | +bool dump_trace = false; |
8533 | |
8534 | int eprintf(int level, const char *fmt, ...) |
8535 | { |
8536 | diff --git a/tools/perf/util/debug.h b/tools/perf/util/debug.h |
8537 | index c6c24c5..0f7af87 100644 |
8538 | --- a/tools/perf/util/debug.h |
8539 | +++ b/tools/perf/util/debug.h |
8540 | @@ -2,10 +2,11 @@ |
8541 | #ifndef __PERF_DEBUG_H |
8542 | #define __PERF_DEBUG_H |
8543 | |
8544 | +#include <stdbool.h> |
8545 | #include "event.h" |
8546 | |
8547 | extern int verbose; |
8548 | -extern int dump_trace; |
8549 | +extern bool dump_trace; |
8550 | |
8551 | int eprintf(int level, |
8552 | const char *fmt, ...) __attribute__((format(printf, 2, 3))); |
8553 | diff --git a/tools/perf/util/parse-options.c b/tools/perf/util/parse-options.c |
8554 | index efebd5b..bbe646b 100644 |
8555 | --- a/tools/perf/util/parse-options.c |
8556 | +++ b/tools/perf/util/parse-options.c |
8557 | @@ -49,6 +49,7 @@ static int get_value(struct parse_opt_ctx_t *p, |
8558 | break; |
8559 | /* FALLTHROUGH */ |
8560 | case OPTION_BOOLEAN: |
8561 | + case OPTION_INCR: |
8562 | case OPTION_BIT: |
8563 | case OPTION_SET_INT: |
8564 | case OPTION_SET_PTR: |
8565 | @@ -73,6 +74,10 @@ static int get_value(struct parse_opt_ctx_t *p, |
8566 | return 0; |
8567 | |
8568 | case OPTION_BOOLEAN: |
8569 | + *(bool *)opt->value = unset ? false : true; |
8570 | + return 0; |
8571 | + |
8572 | + case OPTION_INCR: |
8573 | *(int *)opt->value = unset ? 0 : *(int *)opt->value + 1; |
8574 | return 0; |
8575 | |
8576 | @@ -478,6 +483,7 @@ int usage_with_options_internal(const char * const *usagestr, |
8577 | case OPTION_GROUP: |
8578 | case OPTION_BIT: |
8579 | case OPTION_BOOLEAN: |
8580 | + case OPTION_INCR: |
8581 | case OPTION_SET_INT: |
8582 | case OPTION_SET_PTR: |
8583 | case OPTION_LONG: |
8584 | diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h |
8585 | index 948805a..b2da725 100644 |
8586 | --- a/tools/perf/util/parse-options.h |
8587 | +++ b/tools/perf/util/parse-options.h |
8588 | @@ -8,7 +8,8 @@ enum parse_opt_type { |
8589 | OPTION_GROUP, |
8590 | /* options with no arguments */ |
8591 | OPTION_BIT, |
8592 | - OPTION_BOOLEAN, /* _INCR would have been a better name */ |
8593 | + OPTION_BOOLEAN, |
8594 | + OPTION_INCR, |
8595 | OPTION_SET_INT, |
8596 | OPTION_SET_PTR, |
8597 | /* options with arguments (usually) */ |
8598 | @@ -95,6 +96,7 @@ struct option { |
8599 | #define OPT_GROUP(h) { .type = OPTION_GROUP, .help = (h) } |
8600 | #define OPT_BIT(s, l, v, h, b) { .type = OPTION_BIT, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (b) } |
8601 | #define OPT_BOOLEAN(s, l, v, h) { .type = OPTION_BOOLEAN, .short_name = (s), .long_name = (l), .value = (v), .help = (h) } |
8602 | +#define OPT_INCR(s, l, v, h) { .type = OPTION_INCR, .short_name = (s), .long_name = (l), .value = (v), .help = (h) } |
8603 | #define OPT_SET_INT(s, l, v, h, i) { .type = OPTION_SET_INT, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (i) } |
8604 | #define OPT_SET_PTR(s, l, v, h, p) { .type = OPTION_SET_PTR, .short_name = (s), .long_name = (l), .value = (v), .help = (h), .defval = (p) } |
8605 | #define OPT_INTEGER(s, l, v, h) { .type = OPTION_INTEGER, .short_name = (s), .long_name = (l), .value = (v), .help = (h) } |
8606 | diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c |
8607 | index c458c4a..4acb5d7 100644 |
8608 | --- a/tools/perf/util/symbol.c |
8609 | +++ b/tools/perf/util/symbol.c |
8610 | @@ -18,18 +18,6 @@ |
8611 | #define NT_GNU_BUILD_ID 3 |
8612 | #endif |
8613 | |
8614 | -enum dso_origin { |
8615 | - DSO__ORIG_KERNEL = 0, |
8616 | - DSO__ORIG_JAVA_JIT, |
8617 | - DSO__ORIG_BUILD_ID_CACHE, |
8618 | - DSO__ORIG_FEDORA, |
8619 | - DSO__ORIG_UBUNTU, |
8620 | - DSO__ORIG_BUILDID, |
8621 | - DSO__ORIG_DSO, |
8622 | - DSO__ORIG_KMODULE, |
8623 | - DSO__ORIG_NOT_FOUND, |
8624 | -}; |
8625 | - |
8626 | static void dsos__add(struct list_head *head, struct dso *dso); |
8627 | static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); |
8628 | static int dso__load_kernel_sym(struct dso *self, struct map *map, |
8629 | @@ -1025,7 +1013,7 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name, |
8630 | } |
8631 | curr_map->map_ip = identity__map_ip; |
8632 | curr_map->unmap_ip = identity__map_ip; |
8633 | - curr_dso->origin = DSO__ORIG_KERNEL; |
8634 | + curr_dso->origin = self->origin; |
8635 | map_groups__insert(kmap->kmaps, curr_map); |
8636 | dsos__add(&dsos__kernel, curr_dso); |
8637 | dso__set_loaded(curr_dso, map->type); |
8638 | @@ -1895,6 +1883,17 @@ out_fail: |
8639 | return -1; |
8640 | } |
8641 | |
8642 | +size_t vmlinux_path__fprintf(FILE *fp) |
8643 | +{ |
8644 | + int i; |
8645 | + size_t printed = 0; |
8646 | + |
8647 | + for (i = 0; i < vmlinux_path__nr_entries; ++i) |
8648 | + printed += fprintf(fp, "[%d] %s\n", i, vmlinux_path[i]); |
8649 | + |
8650 | + return printed; |
8651 | +} |
8652 | + |
8653 | static int setup_list(struct strlist **list, const char *list_str, |
8654 | const char *list_name) |
8655 | { |
8656 | diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h |
8657 | index f30a374..044a4bc 100644 |
8658 | --- a/tools/perf/util/symbol.h |
8659 | +++ b/tools/perf/util/symbol.h |
8660 | @@ -150,6 +150,19 @@ size_t dsos__fprintf_buildid(FILE *fp, bool with_hits); |
8661 | |
8662 | size_t dso__fprintf_buildid(struct dso *self, FILE *fp); |
8663 | size_t dso__fprintf(struct dso *self, enum map_type type, FILE *fp); |
8664 | + |
8665 | +enum dso_origin { |
8666 | + DSO__ORIG_KERNEL = 0, |
8667 | + DSO__ORIG_JAVA_JIT, |
8668 | + DSO__ORIG_BUILD_ID_CACHE, |
8669 | + DSO__ORIG_FEDORA, |
8670 | + DSO__ORIG_UBUNTU, |
8671 | + DSO__ORIG_BUILDID, |
8672 | + DSO__ORIG_DSO, |
8673 | + DSO__ORIG_KMODULE, |
8674 | + DSO__ORIG_NOT_FOUND, |
8675 | +}; |
8676 | + |
8677 | char dso__symtab_origin(const struct dso *self); |
8678 | void dso__set_long_name(struct dso *self, char *name); |
8679 | void dso__set_build_id(struct dso *self, void *build_id); |
8680 | @@ -169,4 +182,6 @@ int kallsyms__parse(const char *filename, void *arg, |
8681 | int symbol__init(void); |
8682 | bool symbol_type__is_a(char symbol_type, enum map_type map_type); |
8683 | |
8684 | +size_t vmlinux_path__fprintf(FILE *fp); |
8685 | + |
8686 | #endif /* __PERF_SYMBOL */ |
8687 | diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c |
8688 | index 613c9cc..dc78bae 100644 |
8689 | --- a/tools/perf/util/trace-event-parse.c |
8690 | +++ b/tools/perf/util/trace-event-parse.c |
8691 | @@ -40,7 +40,7 @@ int header_page_size_size; |
8692 | int header_page_data_offset; |
8693 | int header_page_data_size; |
8694 | |
8695 | -int latency_format; |
8696 | +bool latency_format; |
8697 | |
8698 | static char *input_buf; |
8699 | static unsigned long long input_buf_ptr; |
8700 | diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h |
8701 | index c3269b9..81f2fd2 100644 |
8702 | --- a/tools/perf/util/trace-event.h |
8703 | +++ b/tools/perf/util/trace-event.h |
8704 | @@ -1,6 +1,7 @@ |
8705 | #ifndef __PERF_TRACE_EVENTS_H |
8706 | #define __PERF_TRACE_EVENTS_H |
8707 | |
8708 | +#include <stdbool.h> |
8709 | #include "parse-events.h" |
8710 | |
8711 | #define __unused __attribute__((unused)) |
8712 | @@ -241,7 +242,7 @@ extern int header_page_size_size; |
8713 | extern int header_page_data_offset; |
8714 | extern int header_page_data_size; |
8715 | |
8716 | -extern int latency_format; |
8717 | +extern bool latency_format; |
8718 | |
8719 | int parse_header_page(char *buf, unsigned long size); |
8720 | int trace_parse_common_type(void *data); |