Contents of /trunk/kernel-lts/patches-3.10/0101-3.10.2-all-fixes.patch
Parent Directory | Revision Log
Revision 2395 -
(show annotations)
(download)
Mon Feb 3 12:41:31 2014 UTC (10 years, 8 months ago) by niro
File size: 81302 byte(s)
Mon Feb 3 12:41:31 2014 UTC (10 years, 8 months ago) by niro
File size: 81302 byte(s)
-copied
1 | diff --git a/Documentation/parisc/registers b/Documentation/parisc/registers |
2 | index dd3cadd..10c7d17 100644 |
3 | --- a/Documentation/parisc/registers |
4 | +++ b/Documentation/parisc/registers |
5 | @@ -78,6 +78,14 @@ Shadow Registers used by interruption handler code |
6 | TOC enable bit 1 |
7 | |
8 | ========================================================================= |
9 | + |
10 | +The PA-RISC architecture defines 7 registers as "shadow registers". |
11 | +Those are used in RETURN FROM INTERRUPTION AND RESTORE instruction to reduce |
12 | +the state save and restore time by eliminating the need for general register |
13 | +(GR) saves and restores in interruption handlers. |
14 | +Shadow registers are the GRs 1, 8, 9, 16, 17, 24, and 25. |
15 | + |
16 | +========================================================================= |
17 | Register usage notes, originally from John Marvin, with some additional |
18 | notes from Randolph Chung. |
19 | |
20 | diff --git a/Makefile b/Makefile |
21 | index b75cc30..4336730 100644 |
22 | --- a/Makefile |
23 | +++ b/Makefile |
24 | @@ -1,6 +1,6 @@ |
25 | VERSION = 3 |
26 | PATCHLEVEL = 10 |
27 | -SUBLEVEL = 1 |
28 | +SUBLEVEL = 2 |
29 | EXTRAVERSION = |
30 | NAME = Unicycling Gorilla |
31 | |
32 | diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi |
33 | index 73fd7d0..587ceef 100644 |
34 | --- a/arch/arm/boot/dts/imx23.dtsi |
35 | +++ b/arch/arm/boot/dts/imx23.dtsi |
36 | @@ -23,8 +23,12 @@ |
37 | }; |
38 | |
39 | cpus { |
40 | - cpu@0 { |
41 | - compatible = "arm,arm926ejs"; |
42 | + #address-cells = <0>; |
43 | + #size-cells = <0>; |
44 | + |
45 | + cpu { |
46 | + compatible = "arm,arm926ej-s"; |
47 | + device_type = "cpu"; |
48 | }; |
49 | }; |
50 | |
51 | diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi |
52 | index 600f7cb..4c10a19 100644 |
53 | --- a/arch/arm/boot/dts/imx28.dtsi |
54 | +++ b/arch/arm/boot/dts/imx28.dtsi |
55 | @@ -32,8 +32,12 @@ |
56 | }; |
57 | |
58 | cpus { |
59 | - cpu@0 { |
60 | - compatible = "arm,arm926ejs"; |
61 | + #address-cells = <0>; |
62 | + #size-cells = <0>; |
63 | + |
64 | + cpu { |
65 | + compatible = "arm,arm926ej-s"; |
66 | + device_type = "cpu"; |
67 | }; |
68 | }; |
69 | |
70 | diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi |
71 | index 5bcdf3a..62dc781 100644 |
72 | --- a/arch/arm/boot/dts/imx6dl.dtsi |
73 | +++ b/arch/arm/boot/dts/imx6dl.dtsi |
74 | @@ -18,12 +18,14 @@ |
75 | |
76 | cpu@0 { |
77 | compatible = "arm,cortex-a9"; |
78 | + device_type = "cpu"; |
79 | reg = <0>; |
80 | next-level-cache = <&L2>; |
81 | }; |
82 | |
83 | cpu@1 { |
84 | compatible = "arm,cortex-a9"; |
85 | + device_type = "cpu"; |
86 | reg = <1>; |
87 | next-level-cache = <&L2>; |
88 | }; |
89 | diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi |
90 | index 21e6758..dc54a72 100644 |
91 | --- a/arch/arm/boot/dts/imx6q.dtsi |
92 | +++ b/arch/arm/boot/dts/imx6q.dtsi |
93 | @@ -18,6 +18,7 @@ |
94 | |
95 | cpu@0 { |
96 | compatible = "arm,cortex-a9"; |
97 | + device_type = "cpu"; |
98 | reg = <0>; |
99 | next-level-cache = <&L2>; |
100 | operating-points = < |
101 | @@ -39,18 +40,21 @@ |
102 | |
103 | cpu@1 { |
104 | compatible = "arm,cortex-a9"; |
105 | + device_type = "cpu"; |
106 | reg = <1>; |
107 | next-level-cache = <&L2>; |
108 | }; |
109 | |
110 | cpu@2 { |
111 | compatible = "arm,cortex-a9"; |
112 | + device_type = "cpu"; |
113 | reg = <2>; |
114 | next-level-cache = <&L2>; |
115 | }; |
116 | |
117 | cpu@3 { |
118 | compatible = "arm,cortex-a9"; |
119 | + device_type = "cpu"; |
120 | reg = <3>; |
121 | next-level-cache = <&L2>; |
122 | }; |
123 | diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h |
124 | index a7b85e0..dc90203 100644 |
125 | --- a/arch/arm/include/asm/mmu_context.h |
126 | +++ b/arch/arm/include/asm/mmu_context.h |
127 | @@ -27,7 +27,15 @@ void __check_vmalloc_seq(struct mm_struct *mm); |
128 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); |
129 | #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) |
130 | |
131 | -DECLARE_PER_CPU(atomic64_t, active_asids); |
132 | +#ifdef CONFIG_ARM_ERRATA_798181 |
133 | +void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, |
134 | + cpumask_t *mask); |
135 | +#else /* !CONFIG_ARM_ERRATA_798181 */ |
136 | +static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, |
137 | + cpumask_t *mask) |
138 | +{ |
139 | +} |
140 | +#endif /* CONFIG_ARM_ERRATA_798181 */ |
141 | |
142 | #else /* !CONFIG_CPU_HAS_ASID */ |
143 | |
144 | diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c |
145 | index 8c3094d..d9f5cd4 100644 |
146 | --- a/arch/arm/kernel/perf_event.c |
147 | +++ b/arch/arm/kernel/perf_event.c |
148 | @@ -569,6 +569,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) |
149 | return; |
150 | } |
151 | |
152 | + perf_callchain_store(entry, regs->ARM_pc); |
153 | tail = (struct frame_tail __user *)regs->ARM_fp - 1; |
154 | |
155 | while ((entry->nr < PERF_MAX_STACK_DEPTH) && |
156 | diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c |
157 | index 9a52a07..a98b62d 100644 |
158 | --- a/arch/arm/kernel/smp_tlb.c |
159 | +++ b/arch/arm/kernel/smp_tlb.c |
160 | @@ -103,7 +103,7 @@ static void broadcast_tlb_a15_erratum(void) |
161 | |
162 | static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) |
163 | { |
164 | - int cpu, this_cpu; |
165 | + int this_cpu; |
166 | cpumask_t mask = { CPU_BITS_NONE }; |
167 | |
168 | if (!erratum_a15_798181()) |
169 | @@ -111,21 +111,7 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) |
170 | |
171 | dummy_flush_tlb_a15_erratum(); |
172 | this_cpu = get_cpu(); |
173 | - for_each_online_cpu(cpu) { |
174 | - if (cpu == this_cpu) |
175 | - continue; |
176 | - /* |
177 | - * We only need to send an IPI if the other CPUs are running |
178 | - * the same ASID as the one being invalidated. There is no |
179 | - * need for locking around the active_asids check since the |
180 | - * switch_mm() function has at least one dmb() (as required by |
181 | - * this workaround) in case a context switch happens on |
182 | - * another CPU after the condition below. |
183 | - */ |
184 | - if (atomic64_read(&mm->context.id) == |
185 | - atomic64_read(&per_cpu(active_asids, cpu))) |
186 | - cpumask_set_cpu(cpu, &mask); |
187 | - } |
188 | + a15_erratum_get_cpumask(this_cpu, mm, &mask); |
189 | smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); |
190 | put_cpu(); |
191 | } |
192 | diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c |
193 | index 90525d9..f6fd1d4 100644 |
194 | --- a/arch/arm/kernel/smp_twd.c |
195 | +++ b/arch/arm/kernel/smp_twd.c |
196 | @@ -120,7 +120,7 @@ static int twd_rate_change(struct notifier_block *nb, |
197 | * changing cpu. |
198 | */ |
199 | if (flags == POST_RATE_CHANGE) |
200 | - smp_call_function(twd_update_frequency, |
201 | + on_each_cpu(twd_update_frequency, |
202 | (void *)&cnd->new_rate, 1); |
203 | |
204 | return NOTIFY_OK; |
205 | diff --git a/arch/arm/mach-shmobile/setup-emev2.c b/arch/arm/mach-shmobile/setup-emev2.c |
206 | index 899a86c..1ccddd2 100644 |
207 | --- a/arch/arm/mach-shmobile/setup-emev2.c |
208 | +++ b/arch/arm/mach-shmobile/setup-emev2.c |
209 | @@ -287,14 +287,14 @@ static struct gpio_em_config gio3_config = { |
210 | static struct resource gio3_resources[] = { |
211 | [0] = { |
212 | .name = "GIO_096", |
213 | - .start = 0xe0050100, |
214 | - .end = 0xe005012b, |
215 | + .start = 0xe0050180, |
216 | + .end = 0xe00501ab, |
217 | .flags = IORESOURCE_MEM, |
218 | }, |
219 | [1] = { |
220 | .name = "GIO_096", |
221 | - .start = 0xe0050140, |
222 | - .end = 0xe005015f, |
223 | + .start = 0xe00501c0, |
224 | + .end = 0xe00501df, |
225 | .flags = IORESOURCE_MEM, |
226 | }, |
227 | [2] = { |
228 | diff --git a/arch/arm/mach-shmobile/setup-r8a73a4.c b/arch/arm/mach-shmobile/setup-r8a73a4.c |
229 | index c5a75a7..7f45c2e 100644 |
230 | --- a/arch/arm/mach-shmobile/setup-r8a73a4.c |
231 | +++ b/arch/arm/mach-shmobile/setup-r8a73a4.c |
232 | @@ -62,7 +62,7 @@ enum { SCIFA0, SCIFA1, SCIFB0, SCIFB1, SCIFB2, SCIFB3 }; |
233 | static const struct plat_sci_port scif[] = { |
234 | SCIFA_DATA(SCIFA0, 0xe6c40000, gic_spi(144)), /* SCIFA0 */ |
235 | SCIFA_DATA(SCIFA1, 0xe6c50000, gic_spi(145)), /* SCIFA1 */ |
236 | - SCIFB_DATA(SCIFB0, 0xe6c50000, gic_spi(145)), /* SCIFB0 */ |
237 | + SCIFB_DATA(SCIFB0, 0xe6c20000, gic_spi(148)), /* SCIFB0 */ |
238 | SCIFB_DATA(SCIFB1, 0xe6c30000, gic_spi(149)), /* SCIFB1 */ |
239 | SCIFB_DATA(SCIFB2, 0xe6ce0000, gic_spi(150)), /* SCIFB2 */ |
240 | SCIFB_DATA(SCIFB3, 0xe6cf0000, gic_spi(151)), /* SCIFB3 */ |
241 | diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c |
242 | index 2ac3737..eeab06e 100644 |
243 | --- a/arch/arm/mm/context.c |
244 | +++ b/arch/arm/mm/context.c |
245 | @@ -39,19 +39,43 @@ |
246 | * non 64-bit operations. |
247 | */ |
248 | #define ASID_FIRST_VERSION (1ULL << ASID_BITS) |
249 | -#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) |
250 | - |
251 | -#define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1) |
252 | -#define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK) |
253 | +#define NUM_USER_ASIDS ASID_FIRST_VERSION |
254 | |
255 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); |
256 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); |
257 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); |
258 | |
259 | -DEFINE_PER_CPU(atomic64_t, active_asids); |
260 | +static DEFINE_PER_CPU(atomic64_t, active_asids); |
261 | static DEFINE_PER_CPU(u64, reserved_asids); |
262 | static cpumask_t tlb_flush_pending; |
263 | |
264 | +#ifdef CONFIG_ARM_ERRATA_798181 |
265 | +void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, |
266 | + cpumask_t *mask) |
267 | +{ |
268 | + int cpu; |
269 | + unsigned long flags; |
270 | + u64 context_id, asid; |
271 | + |
272 | + raw_spin_lock_irqsave(&cpu_asid_lock, flags); |
273 | + context_id = mm->context.id.counter; |
274 | + for_each_online_cpu(cpu) { |
275 | + if (cpu == this_cpu) |
276 | + continue; |
277 | + /* |
278 | + * We only need to send an IPI if the other CPUs are |
279 | + * running the same ASID as the one being invalidated. |
280 | + */ |
281 | + asid = per_cpu(active_asids, cpu).counter; |
282 | + if (asid == 0) |
283 | + asid = per_cpu(reserved_asids, cpu); |
284 | + if (context_id == asid) |
285 | + cpumask_set_cpu(cpu, mask); |
286 | + } |
287 | + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); |
288 | +} |
289 | +#endif |
290 | + |
291 | #ifdef CONFIG_ARM_LPAE |
292 | static void cpu_set_reserved_ttbr0(void) |
293 | { |
294 | @@ -128,7 +152,16 @@ static void flush_context(unsigned int cpu) |
295 | asid = 0; |
296 | } else { |
297 | asid = atomic64_xchg(&per_cpu(active_asids, i), 0); |
298 | - __set_bit(ASID_TO_IDX(asid), asid_map); |
299 | + /* |
300 | + * If this CPU has already been through a |
301 | + * rollover, but hasn't run another task in |
302 | + * the meantime, we must preserve its reserved |
303 | + * ASID, as this is the only trace we have of |
304 | + * the process it is still running. |
305 | + */ |
306 | + if (asid == 0) |
307 | + asid = per_cpu(reserved_asids, i); |
308 | + __set_bit(asid & ~ASID_MASK, asid_map); |
309 | } |
310 | per_cpu(reserved_asids, i) = asid; |
311 | } |
312 | @@ -167,17 +200,19 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) |
313 | /* |
314 | * Allocate a free ASID. If we can't find one, take a |
315 | * note of the currently active ASIDs and mark the TLBs |
316 | - * as requiring flushes. |
317 | + * as requiring flushes. We always count from ASID #1, |
318 | + * as we reserve ASID #0 to switch via TTBR0 and indicate |
319 | + * rollover events. |
320 | */ |
321 | - asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); |
322 | + asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); |
323 | if (asid == NUM_USER_ASIDS) { |
324 | generation = atomic64_add_return(ASID_FIRST_VERSION, |
325 | &asid_generation); |
326 | flush_context(cpu); |
327 | - asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); |
328 | + asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); |
329 | } |
330 | __set_bit(asid, asid_map); |
331 | - asid = generation | IDX_TO_ASID(asid); |
332 | + asid |= generation; |
333 | cpumask_clear(mm_cpumask(mm)); |
334 | } |
335 | |
336 | diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c |
337 | index 9a5cdc0..0ecc43f 100644 |
338 | --- a/arch/arm/mm/init.c |
339 | +++ b/arch/arm/mm/init.c |
340 | @@ -600,7 +600,7 @@ void __init mem_init(void) |
341 | |
342 | #ifdef CONFIG_SA1111 |
343 | /* now that our DMA memory is actually so designated, we can free it */ |
344 | - free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, 0, NULL); |
345 | + free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, 0, NULL); |
346 | #endif |
347 | |
348 | free_highpages(); |
349 | diff --git a/arch/c6x/mm/init.c b/arch/c6x/mm/init.c |
350 | index a9fcd89..b74ccb5 100644 |
351 | --- a/arch/c6x/mm/init.c |
352 | +++ b/arch/c6x/mm/init.c |
353 | @@ -18,6 +18,7 @@ |
354 | #include <linux/initrd.h> |
355 | |
356 | #include <asm/sections.h> |
357 | +#include <asm/uaccess.h> |
358 | |
359 | /* |
360 | * ZERO_PAGE is a special page that is used for zero-initialized |
361 | diff --git a/arch/parisc/include/asm/special_insns.h b/arch/parisc/include/asm/special_insns.h |
362 | index d306b75..e150930 100644 |
363 | --- a/arch/parisc/include/asm/special_insns.h |
364 | +++ b/arch/parisc/include/asm/special_insns.h |
365 | @@ -32,9 +32,12 @@ static inline void set_eiem(unsigned long val) |
366 | cr; \ |
367 | }) |
368 | |
369 | -#define mtsp(gr, cr) \ |
370 | - __asm__ __volatile__("mtsp %0,%1" \ |
371 | +#define mtsp(val, cr) \ |
372 | + { if (__builtin_constant_p(val) && ((val) == 0)) \ |
373 | + __asm__ __volatile__("mtsp %%r0,%0" : : "i" (cr) : "memory"); \ |
374 | + else \ |
375 | + __asm__ __volatile__("mtsp %0,%1" \ |
376 | : /* no outputs */ \ |
377 | - : "r" (gr), "i" (cr) : "memory") |
378 | + : "r" (val), "i" (cr) : "memory"); } |
379 | |
380 | #endif /* __PARISC_SPECIAL_INSNS_H */ |
381 | diff --git a/arch/parisc/include/asm/tlbflush.h b/arch/parisc/include/asm/tlbflush.h |
382 | index 5273da9..9d086a5 100644 |
383 | --- a/arch/parisc/include/asm/tlbflush.h |
384 | +++ b/arch/parisc/include/asm/tlbflush.h |
385 | @@ -63,13 +63,14 @@ static inline void flush_tlb_mm(struct mm_struct *mm) |
386 | static inline void flush_tlb_page(struct vm_area_struct *vma, |
387 | unsigned long addr) |
388 | { |
389 | - unsigned long flags; |
390 | + unsigned long flags, sid; |
391 | |
392 | /* For one page, it's not worth testing the split_tlb variable */ |
393 | |
394 | mb(); |
395 | - mtsp(vma->vm_mm->context,1); |
396 | + sid = vma->vm_mm->context; |
397 | purge_tlb_start(flags); |
398 | + mtsp(sid, 1); |
399 | pdtlb(addr); |
400 | pitlb(addr); |
401 | purge_tlb_end(flags); |
402 | diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c |
403 | index 65fb4cb..2e65aa5 100644 |
404 | --- a/arch/parisc/kernel/cache.c |
405 | +++ b/arch/parisc/kernel/cache.c |
406 | @@ -440,8 +440,8 @@ void __flush_tlb_range(unsigned long sid, unsigned long start, |
407 | else { |
408 | unsigned long flags; |
409 | |
410 | - mtsp(sid, 1); |
411 | purge_tlb_start(flags); |
412 | + mtsp(sid, 1); |
413 | if (split_tlb) { |
414 | while (npages--) { |
415 | pdtlb(start); |
416 | diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c |
417 | index a49cc81..ac4370b 100644 |
418 | --- a/arch/parisc/lib/memcpy.c |
419 | +++ b/arch/parisc/lib/memcpy.c |
420 | @@ -2,6 +2,7 @@ |
421 | * Optimized memory copy routines. |
422 | * |
423 | * Copyright (C) 2004 Randolph Chung <tausq@debian.org> |
424 | + * Copyright (C) 2013 Helge Deller <deller@gmx.de> |
425 | * |
426 | * This program is free software; you can redistribute it and/or modify |
427 | * it under the terms of the GNU General Public License as published by |
428 | @@ -153,17 +154,21 @@ static inline void prefetch_dst(const void *addr) |
429 | #define prefetch_dst(addr) do { } while(0) |
430 | #endif |
431 | |
432 | +#define PA_MEMCPY_OK 0 |
433 | +#define PA_MEMCPY_LOAD_ERROR 1 |
434 | +#define PA_MEMCPY_STORE_ERROR 2 |
435 | + |
436 | /* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words |
437 | * per loop. This code is derived from glibc. |
438 | */ |
439 | -static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src, unsigned long len, unsigned long o_dst, unsigned long o_src, unsigned long o_len) |
440 | +static inline unsigned long copy_dstaligned(unsigned long dst, |
441 | + unsigned long src, unsigned long len) |
442 | { |
443 | /* gcc complains that a2 and a3 may be uninitialized, but actually |
444 | * they cannot be. Initialize a2/a3 to shut gcc up. |
445 | */ |
446 | register unsigned int a0, a1, a2 = 0, a3 = 0; |
447 | int sh_1, sh_2; |
448 | - struct exception_data *d; |
449 | |
450 | /* prefetch_src((const void *)src); */ |
451 | |
452 | @@ -197,7 +202,7 @@ static inline unsigned long copy_dstaligned(unsigned long dst, unsigned long src |
453 | goto do2; |
454 | case 0: |
455 | if (len == 0) |
456 | - return 0; |
457 | + return PA_MEMCPY_OK; |
458 | /* a3 = ((unsigned int *) src)[0]; |
459 | a0 = ((unsigned int *) src)[1]; */ |
460 | ldw(s_space, 0, src, a3, cda_ldw_exc); |
461 | @@ -256,42 +261,35 @@ do0: |
462 | preserve_branch(handle_load_error); |
463 | preserve_branch(handle_store_error); |
464 | |
465 | - return 0; |
466 | + return PA_MEMCPY_OK; |
467 | |
468 | handle_load_error: |
469 | __asm__ __volatile__ ("cda_ldw_exc:\n"); |
470 | - d = &__get_cpu_var(exception_data); |
471 | - DPRINTF("cda_ldw_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n", |
472 | - o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src); |
473 | - return o_len * 4 - d->fault_addr + o_src; |
474 | + return PA_MEMCPY_LOAD_ERROR; |
475 | |
476 | handle_store_error: |
477 | __asm__ __volatile__ ("cda_stw_exc:\n"); |
478 | - d = &__get_cpu_var(exception_data); |
479 | - DPRINTF("cda_stw_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n", |
480 | - o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst); |
481 | - return o_len * 4 - d->fault_addr + o_dst; |
482 | + return PA_MEMCPY_STORE_ERROR; |
483 | } |
484 | |
485 | |
486 | -/* Returns 0 for success, otherwise, returns number of bytes not transferred. */ |
487 | -static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) |
488 | +/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR. |
489 | + * In case of an access fault the faulty address can be read from the per_cpu |
490 | + * exception data struct. */ |
491 | +static unsigned long pa_memcpy_internal(void *dstp, const void *srcp, |
492 | + unsigned long len) |
493 | { |
494 | register unsigned long src, dst, t1, t2, t3; |
495 | register unsigned char *pcs, *pcd; |
496 | register unsigned int *pws, *pwd; |
497 | register double *pds, *pdd; |
498 | - unsigned long ret = 0; |
499 | - unsigned long o_dst, o_src, o_len; |
500 | - struct exception_data *d; |
501 | + unsigned long ret; |
502 | |
503 | src = (unsigned long)srcp; |
504 | dst = (unsigned long)dstp; |
505 | pcs = (unsigned char *)srcp; |
506 | pcd = (unsigned char *)dstp; |
507 | |
508 | - o_dst = dst; o_src = src; o_len = len; |
509 | - |
510 | /* prefetch_src((const void *)srcp); */ |
511 | |
512 | if (len < THRESHOLD) |
513 | @@ -401,7 +399,7 @@ byte_copy: |
514 | len--; |
515 | } |
516 | |
517 | - return 0; |
518 | + return PA_MEMCPY_OK; |
519 | |
520 | unaligned_copy: |
521 | /* possibly we are aligned on a word, but not on a double... */ |
522 | @@ -438,8 +436,7 @@ unaligned_copy: |
523 | src = (unsigned long)pcs; |
524 | } |
525 | |
526 | - ret = copy_dstaligned(dst, src, len / sizeof(unsigned int), |
527 | - o_dst, o_src, o_len); |
528 | + ret = copy_dstaligned(dst, src, len / sizeof(unsigned int)); |
529 | if (ret) |
530 | return ret; |
531 | |
532 | @@ -454,17 +451,41 @@ unaligned_copy: |
533 | |
534 | handle_load_error: |
535 | __asm__ __volatile__ ("pmc_load_exc:\n"); |
536 | - d = &__get_cpu_var(exception_data); |
537 | - DPRINTF("pmc_load_exc: o_len=%lu fault_addr=%lu o_src=%lu ret=%lu\n", |
538 | - o_len, d->fault_addr, o_src, o_len - d->fault_addr + o_src); |
539 | - return o_len - d->fault_addr + o_src; |
540 | + return PA_MEMCPY_LOAD_ERROR; |
541 | |
542 | handle_store_error: |
543 | __asm__ __volatile__ ("pmc_store_exc:\n"); |
544 | + return PA_MEMCPY_STORE_ERROR; |
545 | +} |
546 | + |
547 | + |
548 | +/* Returns 0 for success, otherwise, returns number of bytes not transferred. */ |
549 | +static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) |
550 | +{ |
551 | + unsigned long ret, fault_addr, reference; |
552 | + struct exception_data *d; |
553 | + |
554 | + ret = pa_memcpy_internal(dstp, srcp, len); |
555 | + if (likely(ret == PA_MEMCPY_OK)) |
556 | + return 0; |
557 | + |
558 | + /* if a load or store fault occured we can get the faulty addr */ |
559 | d = &__get_cpu_var(exception_data); |
560 | - DPRINTF("pmc_store_exc: o_len=%lu fault_addr=%lu o_dst=%lu ret=%lu\n", |
561 | - o_len, d->fault_addr, o_dst, o_len - d->fault_addr + o_dst); |
562 | - return o_len - d->fault_addr + o_dst; |
563 | + fault_addr = d->fault_addr; |
564 | + |
565 | + /* error in load or store? */ |
566 | + if (ret == PA_MEMCPY_LOAD_ERROR) |
567 | + reference = (unsigned long) srcp; |
568 | + else |
569 | + reference = (unsigned long) dstp; |
570 | + |
571 | + DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n", |
572 | + ret, len, fault_addr, reference); |
573 | + |
574 | + if (fault_addr >= reference) |
575 | + return len - (fault_addr - reference); |
576 | + else |
577 | + return len; |
578 | } |
579 | |
580 | #ifdef __KERNEL__ |
581 | diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c |
582 | index c205035..d606463 100644 |
583 | --- a/arch/x86/boot/compressed/eboot.c |
584 | +++ b/arch/x86/boot/compressed/eboot.c |
585 | @@ -992,18 +992,20 @@ static efi_status_t exit_boot(struct boot_params *boot_params, |
586 | efi_memory_desc_t *mem_map; |
587 | efi_status_t status; |
588 | __u32 desc_version; |
589 | + bool called_exit = false; |
590 | u8 nr_entries; |
591 | int i; |
592 | |
593 | size = sizeof(*mem_map) * 32; |
594 | |
595 | again: |
596 | - size += sizeof(*mem_map); |
597 | + size += sizeof(*mem_map) * 2; |
598 | _size = size; |
599 | status = low_alloc(size, 1, (unsigned long *)&mem_map); |
600 | if (status != EFI_SUCCESS) |
601 | return status; |
602 | |
603 | +get_map: |
604 | status = efi_call_phys5(sys_table->boottime->get_memory_map, &size, |
605 | mem_map, &key, &desc_size, &desc_version); |
606 | if (status == EFI_BUFFER_TOO_SMALL) { |
607 | @@ -1029,8 +1031,20 @@ again: |
608 | /* Might as well exit boot services now */ |
609 | status = efi_call_phys2(sys_table->boottime->exit_boot_services, |
610 | handle, key); |
611 | - if (status != EFI_SUCCESS) |
612 | - goto free_mem_map; |
613 | + if (status != EFI_SUCCESS) { |
614 | + /* |
615 | + * ExitBootServices() will fail if any of the event |
616 | + * handlers change the memory map. In which case, we |
617 | + * must be prepared to retry, but only once so that |
618 | + * we're guaranteed to exit on repeated failures instead |
619 | + * of spinning forever. |
620 | + */ |
621 | + if (called_exit) |
622 | + goto free_mem_map; |
623 | + |
624 | + called_exit = true; |
625 | + goto get_map; |
626 | + } |
627 | |
628 | /* Historic? */ |
629 | boot_params->alt_mem_k = 32 * 1024; |
630 | diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c |
631 | index 3d88bfd..13e8935 100644 |
632 | --- a/arch/x86/xen/time.c |
633 | +++ b/arch/x86/xen/time.c |
634 | @@ -36,9 +36,8 @@ static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate); |
635 | /* snapshots of runstate info */ |
636 | static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot); |
637 | |
638 | -/* unused ns of stolen and blocked time */ |
639 | +/* unused ns of stolen time */ |
640 | static DEFINE_PER_CPU(u64, xen_residual_stolen); |
641 | -static DEFINE_PER_CPU(u64, xen_residual_blocked); |
642 | |
643 | /* return an consistent snapshot of 64-bit time/counter value */ |
644 | static u64 get64(const u64 *p) |
645 | @@ -115,7 +114,7 @@ static void do_stolen_accounting(void) |
646 | { |
647 | struct vcpu_runstate_info state; |
648 | struct vcpu_runstate_info *snap; |
649 | - s64 blocked, runnable, offline, stolen; |
650 | + s64 runnable, offline, stolen; |
651 | cputime_t ticks; |
652 | |
653 | get_runstate_snapshot(&state); |
654 | @@ -125,7 +124,6 @@ static void do_stolen_accounting(void) |
655 | snap = &__get_cpu_var(xen_runstate_snapshot); |
656 | |
657 | /* work out how much time the VCPU has not been runn*ing* */ |
658 | - blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked]; |
659 | runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; |
660 | offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline]; |
661 | |
662 | @@ -141,17 +139,6 @@ static void do_stolen_accounting(void) |
663 | ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); |
664 | __this_cpu_write(xen_residual_stolen, stolen); |
665 | account_steal_ticks(ticks); |
666 | - |
667 | - /* Add the appropriate number of ticks of blocked time, |
668 | - including any left-overs from last time. */ |
669 | - blocked += __this_cpu_read(xen_residual_blocked); |
670 | - |
671 | - if (blocked < 0) |
672 | - blocked = 0; |
673 | - |
674 | - ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); |
675 | - __this_cpu_write(xen_residual_blocked, blocked); |
676 | - account_idle_ticks(ticks); |
677 | } |
678 | |
679 | /* Get the TSC speed from Xen */ |
680 | diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile |
681 | index 536562c..97c949a 100644 |
682 | --- a/drivers/acpi/Makefile |
683 | +++ b/drivers/acpi/Makefile |
684 | @@ -43,6 +43,7 @@ acpi-y += acpi_platform.o |
685 | acpi-y += power.o |
686 | acpi-y += event.o |
687 | acpi-y += sysfs.o |
688 | +acpi-$(CONFIG_X86) += acpi_cmos_rtc.o |
689 | acpi-$(CONFIG_DEBUG_FS) += debugfs.o |
690 | acpi-$(CONFIG_ACPI_NUMA) += numa.o |
691 | acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o |
692 | diff --git a/drivers/acpi/acpi_cmos_rtc.c b/drivers/acpi/acpi_cmos_rtc.c |
693 | new file mode 100644 |
694 | index 0000000..84190ed |
695 | --- /dev/null |
696 | +++ b/drivers/acpi/acpi_cmos_rtc.c |
697 | @@ -0,0 +1,92 @@ |
698 | +/* |
699 | + * ACPI support for CMOS RTC Address Space access |
700 | + * |
701 | + * Copyright (C) 2013, Intel Corporation |
702 | + * Authors: Lan Tianyu <tianyu.lan@intel.com> |
703 | + * |
704 | + * This program is free software; you can redistribute it and/or modify |
705 | + * it under the terms of the GNU General Public License version 2 as |
706 | + * published by the Free Software Foundation. |
707 | + */ |
708 | + |
709 | +#include <linux/acpi.h> |
710 | +#include <linux/device.h> |
711 | +#include <linux/err.h> |
712 | +#include <linux/kernel.h> |
713 | +#include <linux/module.h> |
714 | +#include <asm-generic/rtc.h> |
715 | + |
716 | +#include "internal.h" |
717 | + |
718 | +#define PREFIX "ACPI: " |
719 | + |
720 | +ACPI_MODULE_NAME("cmos rtc"); |
721 | + |
722 | +static const struct acpi_device_id acpi_cmos_rtc_ids[] = { |
723 | + { "PNP0B00" }, |
724 | + { "PNP0B01" }, |
725 | + { "PNP0B02" }, |
726 | + {} |
727 | +}; |
728 | + |
729 | +static acpi_status |
730 | +acpi_cmos_rtc_space_handler(u32 function, acpi_physical_address address, |
731 | + u32 bits, u64 *value64, |
732 | + void *handler_context, void *region_context) |
733 | +{ |
734 | + int i; |
735 | + u8 *value = (u8 *)&value64; |
736 | + |
737 | + if (address > 0xff || !value64) |
738 | + return AE_BAD_PARAMETER; |
739 | + |
740 | + if (function != ACPI_WRITE && function != ACPI_READ) |
741 | + return AE_BAD_PARAMETER; |
742 | + |
743 | + spin_lock_irq(&rtc_lock); |
744 | + |
745 | + for (i = 0; i < DIV_ROUND_UP(bits, 8); ++i, ++address, ++value) |
746 | + if (function == ACPI_READ) |
747 | + *value = CMOS_READ(address); |
748 | + else |
749 | + CMOS_WRITE(*value, address); |
750 | + |
751 | + spin_unlock_irq(&rtc_lock); |
752 | + |
753 | + return AE_OK; |
754 | +} |
755 | + |
756 | +static int acpi_install_cmos_rtc_space_handler(struct acpi_device *adev, |
757 | + const struct acpi_device_id *id) |
758 | +{ |
759 | + acpi_status status; |
760 | + |
761 | + status = acpi_install_address_space_handler(adev->handle, |
762 | + ACPI_ADR_SPACE_CMOS, |
763 | + &acpi_cmos_rtc_space_handler, |
764 | + NULL, NULL); |
765 | + if (ACPI_FAILURE(status)) { |
766 | + pr_err(PREFIX "Error installing CMOS-RTC region handler\n"); |
767 | + return -ENODEV; |
768 | + } |
769 | + |
770 | + return 0; |
771 | +} |
772 | + |
773 | +static void acpi_remove_cmos_rtc_space_handler(struct acpi_device *adev) |
774 | +{ |
775 | + if (ACPI_FAILURE(acpi_remove_address_space_handler(adev->handle, |
776 | + ACPI_ADR_SPACE_CMOS, &acpi_cmos_rtc_space_handler))) |
777 | + pr_err(PREFIX "Error removing CMOS-RTC region handler\n"); |
778 | +} |
779 | + |
780 | +static struct acpi_scan_handler cmos_rtc_handler = { |
781 | + .ids = acpi_cmos_rtc_ids, |
782 | + .attach = acpi_install_cmos_rtc_space_handler, |
783 | + .detach = acpi_remove_cmos_rtc_space_handler, |
784 | +}; |
785 | + |
786 | +void __init acpi_cmos_rtc_init(void) |
787 | +{ |
788 | + acpi_scan_add_handler(&cmos_rtc_handler); |
789 | +} |
790 | diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c |
791 | index 35eebda..09b06e2 100644 |
792 | --- a/drivers/acpi/acpica/hwxfsleep.c |
793 | +++ b/drivers/acpi/acpica/hwxfsleep.c |
794 | @@ -240,12 +240,14 @@ static acpi_status acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id) |
795 | &acpi_sleep_dispatch[function_id]; |
796 | |
797 | #if (!ACPI_REDUCED_HARDWARE) |
798 | - |
799 | /* |
800 | * If the Hardware Reduced flag is set (from the FADT), we must |
801 | - * use the extended sleep registers |
802 | + * use the extended sleep registers (FADT). Note: As per the ACPI |
803 | + * specification, these extended registers are to be used for HW-reduced |
804 | + * platforms only. They are not general-purpose replacements for the |
805 | + * legacy PM register sleep support. |
806 | */ |
807 | - if (acpi_gbl_reduced_hardware || acpi_gbl_FADT.sleep_control.address) { |
808 | + if (acpi_gbl_reduced_hardware) { |
809 | status = sleep_functions->extended_function(sleep_state); |
810 | } else { |
811 | /* Legacy sleep */ |
812 | diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c |
813 | index 31c217a..553527c 100644 |
814 | --- a/drivers/acpi/device_pm.c |
815 | +++ b/drivers/acpi/device_pm.c |
816 | @@ -324,14 +324,27 @@ int acpi_bus_update_power(acpi_handle handle, int *state_p) |
817 | if (result) |
818 | return result; |
819 | |
820 | - if (state == ACPI_STATE_UNKNOWN) |
821 | + if (state == ACPI_STATE_UNKNOWN) { |
822 | state = ACPI_STATE_D0; |
823 | - |
824 | - result = acpi_device_set_power(device, state); |
825 | - if (!result && state_p) |
826 | + result = acpi_device_set_power(device, state); |
827 | + if (result) |
828 | + return result; |
829 | + } else { |
830 | + if (device->power.flags.power_resources) { |
831 | + /* |
832 | + * We don't need to really switch the state, bu we need |
833 | + * to update the power resources' reference counters. |
834 | + */ |
835 | + result = acpi_power_transition(device, state); |
836 | + if (result) |
837 | + return result; |
838 | + } |
839 | + device->power.state = state; |
840 | + } |
841 | + if (state_p) |
842 | *state_p = state; |
843 | |
844 | - return result; |
845 | + return 0; |
846 | } |
847 | EXPORT_SYMBOL_GPL(acpi_bus_update_power); |
848 | |
849 | diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c |
850 | index edc0081..80403c1 100644 |
851 | --- a/drivers/acpi/ec.c |
852 | +++ b/drivers/acpi/ec.c |
853 | @@ -983,6 +983,10 @@ static struct dmi_system_id __initdata ec_dmi_table[] = { |
854 | ec_enlarge_storm_threshold, "CLEVO hardware", { |
855 | DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), |
856 | DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL}, |
857 | + { |
858 | + ec_skip_dsdt_scan, "HP Folio 13", { |
859 | + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), |
860 | + DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13"),}, NULL}, |
861 | {}, |
862 | }; |
863 | |
864 | diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h |
865 | index c610a76..63a0854 100644 |
866 | --- a/drivers/acpi/internal.h |
867 | +++ b/drivers/acpi/internal.h |
868 | @@ -50,6 +50,11 @@ void acpi_memory_hotplug_init(void); |
869 | #else |
870 | static inline void acpi_memory_hotplug_init(void) {} |
871 | #endif |
872 | +#ifdef CONFIG_X86 |
873 | +void acpi_cmos_rtc_init(void); |
874 | +#else |
875 | +static inline void acpi_cmos_rtc_init(void) {} |
876 | +#endif |
877 | |
878 | void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug, |
879 | const char *name); |
880 | diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c |
881 | index 27da630..14807e5 100644 |
882 | --- a/drivers/acpi/scan.c |
883 | +++ b/drivers/acpi/scan.c |
884 | @@ -2040,6 +2040,7 @@ int __init acpi_scan_init(void) |
885 | acpi_pci_link_init(); |
886 | acpi_platform_init(); |
887 | acpi_lpss_init(); |
888 | + acpi_cmos_rtc_init(); |
889 | acpi_container_init(); |
890 | acpi_memory_hotplug_init(); |
891 | acpi_dock_init(); |
892 | diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c |
893 | index 2b50dfd..b112625 100644 |
894 | --- a/drivers/ata/ahci.c |
895 | +++ b/drivers/ata/ahci.c |
896 | @@ -291,6 +291,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { |
897 | { PCI_VDEVICE(INTEL, 0x8d64), board_ahci }, /* Wellsburg RAID */ |
898 | { PCI_VDEVICE(INTEL, 0x8d66), board_ahci }, /* Wellsburg RAID */ |
899 | { PCI_VDEVICE(INTEL, 0x8d6e), board_ahci }, /* Wellsburg RAID */ |
900 | + { PCI_VDEVICE(INTEL, 0x23a3), board_ahci }, /* Coleto Creek AHCI */ |
901 | |
902 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ |
903 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
904 | @@ -310,6 +311,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { |
905 | |
906 | /* AMD */ |
907 | { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */ |
908 | + { PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */ |
909 | /* AMD is using RAID class only for ahci controllers */ |
910 | { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
911 | PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci }, |
912 | diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c |
913 | index a70ff15..7b9bdd8 100644 |
914 | --- a/drivers/ata/libahci.c |
915 | +++ b/drivers/ata/libahci.c |
916 | @@ -1560,8 +1560,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) |
917 | u32 fbs = readl(port_mmio + PORT_FBS); |
918 | int pmp = fbs >> PORT_FBS_DWE_OFFSET; |
919 | |
920 | - if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) && |
921 | - ata_link_online(&ap->pmp_link[pmp])) { |
922 | + if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links)) { |
923 | link = &ap->pmp_link[pmp]; |
924 | fbs_need_dec = true; |
925 | } |
926 | diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c |
927 | index 46b35f7..cf1576d 100644 |
928 | --- a/drivers/block/nbd.c |
929 | +++ b/drivers/block/nbd.c |
930 | @@ -623,8 +623,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, |
931 | if (!nbd->sock) |
932 | return -EINVAL; |
933 | |
934 | + nbd->disconnect = 1; |
935 | + |
936 | nbd_send_req(nbd, &sreq); |
937 | - return 0; |
938 | + return 0; |
939 | } |
940 | |
941 | case NBD_CLEAR_SOCK: { |
942 | @@ -654,6 +656,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, |
943 | nbd->sock = SOCKET_I(inode); |
944 | if (max_part > 0) |
945 | bdev->bd_invalidated = 1; |
946 | + nbd->disconnect = 0; /* we're connected now */ |
947 | return 0; |
948 | } else { |
949 | fput(file); |
950 | @@ -743,6 +746,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, |
951 | set_capacity(nbd->disk, 0); |
952 | if (max_part > 0) |
953 | ioctl_by_bdev(bdev, BLKRRPART, 0); |
954 | + if (nbd->disconnect) /* user requested, ignore socket errors */ |
955 | + return 0; |
956 | return nbd->harderror; |
957 | } |
958 | |
959 | diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c |
960 | index a17553f..7ec82f0 100644 |
961 | --- a/drivers/dma/pl330.c |
962 | +++ b/drivers/dma/pl330.c |
963 | @@ -2485,10 +2485,10 @@ static void pl330_free_chan_resources(struct dma_chan *chan) |
964 | struct dma_pl330_chan *pch = to_pchan(chan); |
965 | unsigned long flags; |
966 | |
967 | - spin_lock_irqsave(&pch->lock, flags); |
968 | - |
969 | tasklet_kill(&pch->task); |
970 | |
971 | + spin_lock_irqsave(&pch->lock, flags); |
972 | + |
973 | pl330_release_channel(pch->pl330_chid); |
974 | pch->pl330_chid = NULL; |
975 | |
976 | diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c |
977 | index feae88b..c7710b5 100644 |
978 | --- a/drivers/hid/hid-apple.c |
979 | +++ b/drivers/hid/hid-apple.c |
980 | @@ -524,6 +524,12 @@ static const struct hid_device_id apple_devices[] = { |
981 | .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, |
982 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS), |
983 | .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, |
984 | + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI), |
985 | + .driver_data = APPLE_HAS_FN }, |
986 | + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO), |
987 | + .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, |
988 | + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS), |
989 | + .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, |
990 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI), |
991 | .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, |
992 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO), |
993 | diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c |
994 | index 264f550..402f486 100644 |
995 | --- a/drivers/hid/hid-core.c |
996 | +++ b/drivers/hid/hid-core.c |
997 | @@ -1547,6 +1547,9 @@ static const struct hid_device_id hid_have_special_driver[] = { |
998 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) }, |
999 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) }, |
1000 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) }, |
1001 | + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) }, |
1002 | + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) }, |
1003 | + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) }, |
1004 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, |
1005 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, |
1006 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, |
1007 | @@ -2179,6 +2182,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = { |
1008 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI) }, |
1009 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO) }, |
1010 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS) }, |
1011 | + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) }, |
1012 | + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) }, |
1013 | + { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) }, |
1014 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, |
1015 | { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, |
1016 | { } |
1017 | diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h |
1018 | index 38535c9..2168885 100644 |
1019 | --- a/drivers/hid/hid-ids.h |
1020 | +++ b/drivers/hid/hid-ids.h |
1021 | @@ -135,6 +135,9 @@ |
1022 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b |
1023 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255 |
1024 | #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256 |
1025 | +#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0291 |
1026 | +#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0292 |
1027 | +#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0293 |
1028 | #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a |
1029 | #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b |
1030 | #define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240 |
1031 | diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c |
1032 | index d6fbb577..791f45d 100644 |
1033 | --- a/drivers/hv/ring_buffer.c |
1034 | +++ b/drivers/hv/ring_buffer.c |
1035 | @@ -32,7 +32,7 @@ |
1036 | void hv_begin_read(struct hv_ring_buffer_info *rbi) |
1037 | { |
1038 | rbi->ring_buffer->interrupt_mask = 1; |
1039 | - smp_mb(); |
1040 | + mb(); |
1041 | } |
1042 | |
1043 | u32 hv_end_read(struct hv_ring_buffer_info *rbi) |
1044 | @@ -41,7 +41,7 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi) |
1045 | u32 write; |
1046 | |
1047 | rbi->ring_buffer->interrupt_mask = 0; |
1048 | - smp_mb(); |
1049 | + mb(); |
1050 | |
1051 | /* |
1052 | * Now check to see if the ring buffer is still empty. |
1053 | @@ -71,7 +71,7 @@ u32 hv_end_read(struct hv_ring_buffer_info *rbi) |
1054 | |
1055 | static bool hv_need_to_signal(u32 old_write, struct hv_ring_buffer_info *rbi) |
1056 | { |
1057 | - smp_mb(); |
1058 | + mb(); |
1059 | if (rbi->ring_buffer->interrupt_mask) |
1060 | return false; |
1061 | |
1062 | @@ -442,7 +442,7 @@ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, |
1063 | sizeof(u64)); |
1064 | |
1065 | /* Issue a full memory barrier before updating the write index */ |
1066 | - smp_mb(); |
1067 | + mb(); |
1068 | |
1069 | /* Now, update the write location */ |
1070 | hv_set_next_write_location(outring_info, next_write_location); |
1071 | @@ -549,7 +549,7 @@ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer, |
1072 | /* Make sure all reads are done before we update the read index since */ |
1073 | /* the writer may start writing to the read area once the read index */ |
1074 | /*is updated */ |
1075 | - smp_mb(); |
1076 | + mb(); |
1077 | |
1078 | /* Update the read index */ |
1079 | hv_set_next_read_location(inring_info, next_read_location); |
1080 | diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c |
1081 | index bf421e0..4004e54 100644 |
1082 | --- a/drivers/hv/vmbus_drv.c |
1083 | +++ b/drivers/hv/vmbus_drv.c |
1084 | @@ -434,7 +434,7 @@ static void vmbus_on_msg_dpc(unsigned long data) |
1085 | * will not deliver any more messages since there is |
1086 | * no empty slot |
1087 | */ |
1088 | - smp_mb(); |
1089 | + mb(); |
1090 | |
1091 | if (msg->header.message_flags.msg_pending) { |
1092 | /* |
1093 | diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c |
1094 | index 2baff1b..4ef4d5e 100644 |
1095 | --- a/drivers/input/mouse/bcm5974.c |
1096 | +++ b/drivers/input/mouse/bcm5974.c |
1097 | @@ -88,6 +88,10 @@ |
1098 | #define USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI 0x0259 |
1099 | #define USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO 0x025a |
1100 | #define USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS 0x025b |
1101 | +/* MacbookAir6,2 (unibody, June 2013) */ |
1102 | +#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0291 |
1103 | +#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0292 |
1104 | +#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0293 |
1105 | |
1106 | #define BCM5974_DEVICE(prod) { \ |
1107 | .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ |
1108 | @@ -145,6 +149,10 @@ static const struct usb_device_id bcm5974_table[] = { |
1109 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7A_ANSI), |
1110 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO), |
1111 | BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS), |
1112 | + /* MacbookAir6,2 */ |
1113 | + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI), |
1114 | + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ISO), |
1115 | + BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_JIS), |
1116 | /* Terminating entry */ |
1117 | {} |
1118 | }; |
1119 | @@ -172,15 +180,18 @@ struct bt_data { |
1120 | /* trackpad header types */ |
1121 | enum tp_type { |
1122 | TYPE1, /* plain trackpad */ |
1123 | - TYPE2 /* button integrated in trackpad */ |
1124 | + TYPE2, /* button integrated in trackpad */ |
1125 | + TYPE3 /* additional header fields since June 2013 */ |
1126 | }; |
1127 | |
1128 | /* trackpad finger data offsets, le16-aligned */ |
1129 | #define FINGER_TYPE1 (13 * sizeof(__le16)) |
1130 | #define FINGER_TYPE2 (15 * sizeof(__le16)) |
1131 | +#define FINGER_TYPE3 (19 * sizeof(__le16)) |
1132 | |
1133 | /* trackpad button data offsets */ |
1134 | #define BUTTON_TYPE2 15 |
1135 | +#define BUTTON_TYPE3 23 |
1136 | |
1137 | /* list of device capability bits */ |
1138 | #define HAS_INTEGRATED_BUTTON 1 |
1139 | @@ -400,6 +411,19 @@ static const struct bcm5974_config bcm5974_config_table[] = { |
1140 | { SN_COORD, -150, 6730 }, |
1141 | { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } |
1142 | }, |
1143 | + { |
1144 | + USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI, |
1145 | + USB_DEVICE_ID_APPLE_WELLSPRING8_ISO, |
1146 | + USB_DEVICE_ID_APPLE_WELLSPRING8_JIS, |
1147 | + HAS_INTEGRATED_BUTTON, |
1148 | + 0, sizeof(struct bt_data), |
1149 | + 0x83, TYPE3, FINGER_TYPE3, FINGER_TYPE3 + SIZEOF_ALL_FINGERS, |
1150 | + { SN_PRESSURE, 0, 300 }, |
1151 | + { SN_WIDTH, 0, 2048 }, |
1152 | + { SN_COORD, -4620, 5140 }, |
1153 | + { SN_COORD, -150, 6600 }, |
1154 | + { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } |
1155 | + }, |
1156 | {} |
1157 | }; |
1158 | |
1159 | @@ -557,6 +581,9 @@ static int report_tp_state(struct bcm5974 *dev, int size) |
1160 | input_report_key(input, BTN_LEFT, ibt); |
1161 | } |
1162 | |
1163 | + if (c->tp_type == TYPE3) |
1164 | + input_report_key(input, BTN_LEFT, dev->tp_data[BUTTON_TYPE3]); |
1165 | + |
1166 | input_sync(input); |
1167 | |
1168 | return 0; |
1169 | @@ -572,9 +599,14 @@ static int report_tp_state(struct bcm5974 *dev, int size) |
1170 | |
1171 | static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on) |
1172 | { |
1173 | - char *data = kmalloc(8, GFP_KERNEL); |
1174 | int retval = 0, size; |
1175 | + char *data; |
1176 | + |
1177 | + /* Type 3 does not require a mode switch */ |
1178 | + if (dev->cfg.tp_type == TYPE3) |
1179 | + return 0; |
1180 | |
1181 | + data = kmalloc(8, GFP_KERNEL); |
1182 | if (!data) { |
1183 | dev_err(&dev->intf->dev, "out of memory\n"); |
1184 | retval = -ENOMEM; |
1185 | diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c |
1186 | index c5e3029..48acfc6 100644 |
1187 | --- a/drivers/net/wireless/iwlwifi/pcie/tx.c |
1188 | +++ b/drivers/net/wireless/iwlwifi/pcie/tx.c |
1189 | @@ -576,10 +576,16 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) |
1190 | |
1191 | spin_lock_bh(&txq->lock); |
1192 | while (q->write_ptr != q->read_ptr) { |
1193 | + IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", |
1194 | + txq_id, q->read_ptr); |
1195 | iwl_pcie_txq_free_tfd(trans, txq); |
1196 | q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); |
1197 | } |
1198 | + txq->active = false; |
1199 | spin_unlock_bh(&txq->lock); |
1200 | + |
1201 | + /* just in case - this queue may have been stopped */ |
1202 | + iwl_wake_queue(trans, txq); |
1203 | } |
1204 | |
1205 | /* |
1206 | @@ -927,6 +933,12 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, |
1207 | |
1208 | spin_lock_bh(&txq->lock); |
1209 | |
1210 | + if (!txq->active) { |
1211 | + IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", |
1212 | + txq_id, ssn); |
1213 | + goto out; |
1214 | + } |
1215 | + |
1216 | if (txq->q.read_ptr == tfd_num) |
1217 | goto out; |
1218 | |
1219 | @@ -1103,6 +1115,7 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, |
1220 | (fifo << SCD_QUEUE_STTS_REG_POS_TXF) | |
1221 | (1 << SCD_QUEUE_STTS_REG_POS_WSL) | |
1222 | SCD_QUEUE_STTS_REG_MSK); |
1223 | + trans_pcie->txq[txq_id].active = true; |
1224 | IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n", |
1225 | txq_id, fifo, ssn & 0xff); |
1226 | } |
1227 | diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c |
1228 | index 953f1a0..2119313 100644 |
1229 | --- a/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c |
1230 | +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/rf.c |
1231 | @@ -104,7 +104,7 @@ void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, |
1232 | tx_agc[RF90_PATH_A] = 0x10101010; |
1233 | tx_agc[RF90_PATH_B] = 0x10101010; |
1234 | } else if (rtlpriv->dm.dynamic_txhighpower_lvl == |
1235 | - TXHIGHPWRLEVEL_LEVEL1) { |
1236 | + TXHIGHPWRLEVEL_LEVEL2) { |
1237 | tx_agc[RF90_PATH_A] = 0x00000000; |
1238 | tx_agc[RF90_PATH_B] = 0x00000000; |
1239 | } else{ |
1240 | diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c |
1241 | index 826f085..2bd5985 100644 |
1242 | --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c |
1243 | +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c |
1244 | @@ -359,6 +359,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { |
1245 | {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/ |
1246 | {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/ |
1247 | {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/ |
1248 | + {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/ |
1249 | {RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/ |
1250 | {} |
1251 | }; |
1252 | diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c |
1253 | index e4c4cdc..d9ee2ef 100644 |
1254 | --- a/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c |
1255 | +++ b/drivers/net/wireless/rtlwifi/rtl8723ae/sw.c |
1256 | @@ -251,7 +251,7 @@ static struct rtl_hal_cfg rtl8723ae_hal_cfg = { |
1257 | .bar_id = 2, |
1258 | .write_readback = true, |
1259 | .name = "rtl8723ae_pci", |
1260 | - .fw_name = "rtlwifi/rtl8723aefw.bin", |
1261 | + .fw_name = "rtlwifi/rtl8723fw.bin", |
1262 | .ops = &rtl8723ae_hal_ops, |
1263 | .mod_params = &rtl8723ae_mod_params, |
1264 | .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, |
1265 | @@ -353,8 +353,8 @@ MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>"); |
1266 | MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>"); |
1267 | MODULE_LICENSE("GPL"); |
1268 | MODULE_DESCRIPTION("Realtek 8723E 802.11n PCI wireless"); |
1269 | -MODULE_FIRMWARE("rtlwifi/rtl8723aefw.bin"); |
1270 | -MODULE_FIRMWARE("rtlwifi/rtl8723aefw_B.bin"); |
1271 | +MODULE_FIRMWARE("rtlwifi/rtl8723fw.bin"); |
1272 | +MODULE_FIRMWARE("rtlwifi/rtl8723fw_B.bin"); |
1273 | |
1274 | module_param_named(swenc, rtl8723ae_mod_params.sw_crypto, bool, 0444); |
1275 | module_param_named(debug, rtl8723ae_mod_params.debug, int, 0444); |
1276 | diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c |
1277 | index 1f05913..19f6f70 100644 |
1278 | --- a/drivers/parisc/lba_pci.c |
1279 | +++ b/drivers/parisc/lba_pci.c |
1280 | @@ -613,6 +613,54 @@ truncate_pat_collision(struct resource *root, struct resource *new) |
1281 | return 0; /* truncation successful */ |
1282 | } |
1283 | |
1284 | +/* |
1285 | + * extend_lmmio_len: extend lmmio range to maximum length |
1286 | + * |
1287 | + * This is needed at least on C8000 systems to get the ATI FireGL card |
1288 | + * working. On other systems we will currently not extend the lmmio space. |
1289 | + */ |
1290 | +static unsigned long |
1291 | +extend_lmmio_len(unsigned long start, unsigned long end, unsigned long lba_len) |
1292 | +{ |
1293 | + struct resource *tmp; |
1294 | + |
1295 | + pr_debug("LMMIO mismatch: PAT length = 0x%lx, MASK register = 0x%lx\n", |
1296 | + end - start, lba_len); |
1297 | + |
1298 | + lba_len = min(lba_len+1, 256UL*1024*1024); /* limit to 256 MB */ |
1299 | + |
1300 | + pr_debug("LBA: lmmio_space [0x%lx-0x%lx] - original\n", start, end); |
1301 | + |
1302 | + if (boot_cpu_data.cpu_type < mako) { |
1303 | + pr_info("LBA: Not a C8000 system - not extending LMMIO range.\n"); |
1304 | + return end; |
1305 | + } |
1306 | + |
1307 | + end += lba_len; |
1308 | + if (end < start) /* fix overflow */ |
1309 | + end = -1ULL; |
1310 | + |
1311 | + pr_debug("LBA: lmmio_space [0x%lx-0x%lx] - current\n", start, end); |
1312 | + |
1313 | + /* first overlap */ |
1314 | + for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) { |
1315 | + pr_debug("LBA: testing %pR\n", tmp); |
1316 | + if (tmp->start == start) |
1317 | + continue; /* ignore ourself */ |
1318 | + if (tmp->end < start) |
1319 | + continue; |
1320 | + if (tmp->start > end) |
1321 | + continue; |
1322 | + if (end >= tmp->start) |
1323 | + end = tmp->start - 1; |
1324 | + } |
1325 | + |
1326 | + pr_info("LBA: lmmio_space [0x%lx-0x%lx] - new\n", start, end); |
1327 | + |
1328 | + /* return new end */ |
1329 | + return end; |
1330 | +} |
1331 | + |
1332 | #else |
1333 | #define truncate_pat_collision(r,n) (0) |
1334 | #endif |
1335 | @@ -994,6 +1042,14 @@ lba_pat_resources(struct parisc_device *pa_dev, struct lba_device *lba_dev) |
1336 | case PAT_LMMIO: |
1337 | /* used to fix up pre-initialized MEM BARs */ |
1338 | if (!lba_dev->hba.lmmio_space.flags) { |
1339 | + unsigned long lba_len; |
1340 | + |
1341 | + lba_len = ~READ_REG32(lba_dev->hba.base_addr |
1342 | + + LBA_LMMIO_MASK); |
1343 | + if ((p->end - p->start) != lba_len) |
1344 | + p->end = extend_lmmio_len(p->start, |
1345 | + p->end, lba_len); |
1346 | + |
1347 | sprintf(lba_dev->hba.lmmio_name, |
1348 | "PCI%02x LMMIO", |
1349 | (int)lba_dev->hba.bus_num.start); |
1350 | diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c |
1351 | index c93071d..a971a6f 100644 |
1352 | --- a/drivers/pci/iov.c |
1353 | +++ b/drivers/pci/iov.c |
1354 | @@ -92,6 +92,8 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset) |
1355 | pci_read_config_word(dev, iov->pos + PCI_SRIOV_VF_DID, &virtfn->device); |
1356 | pci_setup_device(virtfn); |
1357 | virtfn->dev.parent = dev->dev.parent; |
1358 | + virtfn->physfn = pci_dev_get(dev); |
1359 | + virtfn->is_virtfn = 1; |
1360 | |
1361 | for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) { |
1362 | res = dev->resource + PCI_IOV_RESOURCES + i; |
1363 | @@ -113,9 +115,6 @@ static int virtfn_add(struct pci_dev *dev, int id, int reset) |
1364 | pci_device_add(virtfn, virtfn->bus); |
1365 | mutex_unlock(&iov->dev->sriov->lock); |
1366 | |
1367 | - virtfn->physfn = pci_dev_get(dev); |
1368 | - virtfn->is_virtfn = 1; |
1369 | - |
1370 | rc = pci_bus_add_device(virtfn); |
1371 | sprintf(buf, "virtfn%u", id); |
1372 | rc = sysfs_create_link(&dev->dev.kobj, &virtfn->dev.kobj, buf); |
1373 | diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c |
1374 | index 70f10fa..ea37072 100644 |
1375 | --- a/drivers/pci/probe.c |
1376 | +++ b/drivers/pci/probe.c |
1377 | @@ -1703,12 +1703,16 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus, |
1378 | bridge->dev.release = pci_release_bus_bridge_dev; |
1379 | dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus); |
1380 | error = pcibios_root_bridge_prepare(bridge); |
1381 | - if (error) |
1382 | - goto bridge_dev_reg_err; |
1383 | + if (error) { |
1384 | + kfree(bridge); |
1385 | + goto err_out; |
1386 | + } |
1387 | |
1388 | error = device_register(&bridge->dev); |
1389 | - if (error) |
1390 | - goto bridge_dev_reg_err; |
1391 | + if (error) { |
1392 | + put_device(&bridge->dev); |
1393 | + goto err_out; |
1394 | + } |
1395 | b->bridge = get_device(&bridge->dev); |
1396 | device_enable_async_suspend(b->bridge); |
1397 | pci_set_bus_of_node(b); |
1398 | @@ -1764,8 +1768,6 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus, |
1399 | class_dev_reg_err: |
1400 | put_device(&bridge->dev); |
1401 | device_unregister(&bridge->dev); |
1402 | -bridge_dev_reg_err: |
1403 | - kfree(bridge); |
1404 | err_out: |
1405 | kfree(b); |
1406 | return NULL; |
1407 | diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c |
1408 | index 7d68aee..df4655c 100644 |
1409 | --- a/drivers/pci/quirks.c |
1410 | +++ b/drivers/pci/quirks.c |
1411 | @@ -1022,6 +1022,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk |
1412 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); |
1413 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode); |
1414 | DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode); |
1415 | +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode); |
1416 | +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode); |
1417 | |
1418 | /* |
1419 | * Serverworks CSB5 IDE does not fully support native mode |
1420 | diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c |
1421 | index 966abc6..f7197a7 100644 |
1422 | --- a/drivers/pci/xen-pcifront.c |
1423 | +++ b/drivers/pci/xen-pcifront.c |
1424 | @@ -678,10 +678,9 @@ static int pcifront_connect_and_init_dma(struct pcifront_device *pdev) |
1425 | if (!pcifront_dev) { |
1426 | dev_info(&pdev->xdev->dev, "Installing PCI frontend\n"); |
1427 | pcifront_dev = pdev; |
1428 | - } else { |
1429 | - dev_err(&pdev->xdev->dev, "PCI frontend already installed!\n"); |
1430 | + } else |
1431 | err = -EEXIST; |
1432 | - } |
1433 | + |
1434 | spin_unlock(&pcifront_dev_lock); |
1435 | |
1436 | if (!err && !swiotlb_nr_tbl()) { |
1437 | @@ -848,7 +847,7 @@ static int pcifront_try_connect(struct pcifront_device *pdev) |
1438 | goto out; |
1439 | |
1440 | err = pcifront_connect_and_init_dma(pdev); |
1441 | - if (err) { |
1442 | + if (err && err != -EEXIST) { |
1443 | xenbus_dev_fatal(pdev->xdev, err, |
1444 | "Error setting up PCI Frontend"); |
1445 | goto out; |
1446 | diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c |
1447 | index 01463c7..1b2c631 100644 |
1448 | --- a/drivers/pcmcia/at91_cf.c |
1449 | +++ b/drivers/pcmcia/at91_cf.c |
1450 | @@ -100,9 +100,9 @@ static int at91_cf_get_status(struct pcmcia_socket *s, u_int *sp) |
1451 | int vcc = gpio_is_valid(cf->board->vcc_pin); |
1452 | |
1453 | *sp = SS_DETECT | SS_3VCARD; |
1454 | - if (!rdy || gpio_get_value(rdy)) |
1455 | + if (!rdy || gpio_get_value(cf->board->irq_pin)) |
1456 | *sp |= SS_READY; |
1457 | - if (!vcc || gpio_get_value(vcc)) |
1458 | + if (!vcc || gpio_get_value(cf->board->vcc_pin)) |
1459 | *sp |= SS_POWERON; |
1460 | } else |
1461 | *sp = 0; |
1462 | diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c |
1463 | index 5032c24..9100a34 100644 |
1464 | --- a/drivers/rtc/rtc-rv3029c2.c |
1465 | +++ b/drivers/rtc/rtc-rv3029c2.c |
1466 | @@ -310,7 +310,7 @@ static int rv3029c2_rtc_i2c_set_alarm(struct i2c_client *client, |
1467 | dev_dbg(&client->dev, "alarm IRQ armed\n"); |
1468 | } else { |
1469 | /* disable AIE irq */ |
1470 | - ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 1); |
1471 | + ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 0); |
1472 | if (ret) |
1473 | return ret; |
1474 | |
1475 | diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c |
1476 | index 21a7e17..572d481 100644 |
1477 | --- a/drivers/tty/serial/pch_uart.c |
1478 | +++ b/drivers/tty/serial/pch_uart.c |
1479 | @@ -217,6 +217,7 @@ enum { |
1480 | #define FRI2_64_UARTCLK 64000000 /* 64.0000 MHz */ |
1481 | #define FRI2_48_UARTCLK 48000000 /* 48.0000 MHz */ |
1482 | #define NTC1_UARTCLK 64000000 /* 64.0000 MHz */ |
1483 | +#define MINNOW_UARTCLK 50000000 /* 50.0000 MHz */ |
1484 | |
1485 | struct pch_uart_buffer { |
1486 | unsigned char *buf; |
1487 | @@ -398,6 +399,10 @@ static int pch_uart_get_uartclk(void) |
1488 | strstr(cmp, "nanoETXexpress-TT"))) |
1489 | return NTC1_UARTCLK; |
1490 | |
1491 | + cmp = dmi_get_system_info(DMI_BOARD_NAME); |
1492 | + if (cmp && strstr(cmp, "MinnowBoard")) |
1493 | + return MINNOW_UARTCLK; |
1494 | + |
1495 | return DEFAULT_UARTCLK; |
1496 | } |
1497 | |
1498 | diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c |
1499 | index 97666e8..c35a9ec 100644 |
1500 | --- a/drivers/usb/gadget/f_mass_storage.c |
1501 | +++ b/drivers/usb/gadget/f_mass_storage.c |
1502 | @@ -413,6 +413,7 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep) |
1503 | /* Caller must hold fsg->lock */ |
1504 | static void wakeup_thread(struct fsg_common *common) |
1505 | { |
1506 | + smp_wmb(); /* ensure the write of bh->state is complete */ |
1507 | /* Tell the main thread that something has happened */ |
1508 | common->thread_wakeup_needed = 1; |
1509 | if (common->thread_task) |
1510 | @@ -632,6 +633,7 @@ static int sleep_thread(struct fsg_common *common) |
1511 | } |
1512 | __set_current_state(TASK_RUNNING); |
1513 | common->thread_wakeup_needed = 0; |
1514 | + smp_rmb(); /* ensure the latest bh->state is visible */ |
1515 | return rc; |
1516 | } |
1517 | |
1518 | diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c |
1519 | index 16d7150..dda408f 100644 |
1520 | --- a/drivers/usb/host/ehci-omap.c |
1521 | +++ b/drivers/usb/host/ehci-omap.c |
1522 | @@ -187,6 +187,12 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) |
1523 | } |
1524 | |
1525 | omap->phy[i] = phy; |
1526 | + |
1527 | + if (pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_PHY) { |
1528 | + usb_phy_init(omap->phy[i]); |
1529 | + /* bring PHY out of suspend */ |
1530 | + usb_phy_set_suspend(omap->phy[i], 0); |
1531 | + } |
1532 | } |
1533 | |
1534 | pm_runtime_enable(dev); |
1535 | @@ -211,13 +217,14 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) |
1536 | } |
1537 | |
1538 | /* |
1539 | - * Bring PHYs out of reset. |
1540 | + * Bring PHYs out of reset for non PHY modes. |
1541 | * Even though HSIC mode is a PHY-less mode, the reset |
1542 | * line exists between the chips and can be modelled |
1543 | * as a PHY device for reset control. |
1544 | */ |
1545 | for (i = 0; i < omap->nports; i++) { |
1546 | - if (!omap->phy[i]) |
1547 | + if (!omap->phy[i] || |
1548 | + pdata->port_mode[i] == OMAP_EHCI_PORT_MODE_PHY) |
1549 | continue; |
1550 | |
1551 | usb_phy_init(omap->phy[i]); |
1552 | diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c |
1553 | index fbf75e5..f2e57a1 100644 |
1554 | --- a/drivers/usb/host/xhci-mem.c |
1555 | +++ b/drivers/usb/host/xhci-mem.c |
1556 | @@ -369,6 +369,10 @@ static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci |
1557 | ctx->size += CTX_SIZE(xhci->hcc_params); |
1558 | |
1559 | ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma); |
1560 | + if (!ctx->bytes) { |
1561 | + kfree(ctx); |
1562 | + return NULL; |
1563 | + } |
1564 | memset(ctx->bytes, 0, ctx->size); |
1565 | return ctx; |
1566 | } |
1567 | diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c |
1568 | index df90fe5..93ad67e 100644 |
1569 | --- a/drivers/usb/host/xhci-plat.c |
1570 | +++ b/drivers/usb/host/xhci-plat.c |
1571 | @@ -179,6 +179,7 @@ static int xhci_plat_remove(struct platform_device *dev) |
1572 | |
1573 | usb_remove_hcd(hcd); |
1574 | iounmap(hcd->regs); |
1575 | + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); |
1576 | usb_put_hcd(hcd); |
1577 | kfree(xhci); |
1578 | |
1579 | diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
1580 | index bd4323d..5dd857d 100644 |
1581 | --- a/drivers/usb/serial/option.c |
1582 | +++ b/drivers/usb/serial/option.c |
1583 | @@ -159,8 +159,6 @@ static void option_instat_callback(struct urb *urb); |
1584 | #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000 |
1585 | #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 |
1586 | #define NOVATELWIRELESS_PRODUCT_E362 0x9010 |
1587 | -#define NOVATELWIRELESS_PRODUCT_G1 0xA001 |
1588 | -#define NOVATELWIRELESS_PRODUCT_G1_M 0xA002 |
1589 | #define NOVATELWIRELESS_PRODUCT_G2 0xA010 |
1590 | #define NOVATELWIRELESS_PRODUCT_MC551 0xB001 |
1591 | |
1592 | @@ -730,8 +728,6 @@ static const struct usb_device_id option_ids[] = { |
1593 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC547) }, |
1594 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, |
1595 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, |
1596 | - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) }, |
1597 | - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) }, |
1598 | { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) }, |
1599 | /* Novatel Ovation MC551 a.k.a. Verizon USB551L */ |
1600 | { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, |
1601 | diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c |
1602 | index bd794b4..c65437c 100644 |
1603 | --- a/drivers/usb/serial/qcserial.c |
1604 | +++ b/drivers/usb/serial/qcserial.c |
1605 | @@ -35,7 +35,13 @@ static const struct usb_device_id id_table[] = { |
1606 | {DEVICE_G1K(0x04da, 0x250c)}, /* Panasonic Gobi QDL device */ |
1607 | {DEVICE_G1K(0x413c, 0x8172)}, /* Dell Gobi Modem device */ |
1608 | {DEVICE_G1K(0x413c, 0x8171)}, /* Dell Gobi QDL device */ |
1609 | - {DEVICE_G1K(0x1410, 0xa001)}, /* Novatel Gobi Modem device */ |
1610 | + {DEVICE_G1K(0x1410, 0xa001)}, /* Novatel/Verizon USB-1000 */ |
1611 | + {DEVICE_G1K(0x1410, 0xa002)}, /* Novatel Gobi Modem device */ |
1612 | + {DEVICE_G1K(0x1410, 0xa003)}, /* Novatel Gobi Modem device */ |
1613 | + {DEVICE_G1K(0x1410, 0xa004)}, /* Novatel Gobi Modem device */ |
1614 | + {DEVICE_G1K(0x1410, 0xa005)}, /* Novatel Gobi Modem device */ |
1615 | + {DEVICE_G1K(0x1410, 0xa006)}, /* Novatel Gobi Modem device */ |
1616 | + {DEVICE_G1K(0x1410, 0xa007)}, /* Novatel Gobi Modem device */ |
1617 | {DEVICE_G1K(0x1410, 0xa008)}, /* Novatel Gobi QDL device */ |
1618 | {DEVICE_G1K(0x0b05, 0x1776)}, /* Asus Gobi Modem device */ |
1619 | {DEVICE_G1K(0x0b05, 0x1774)}, /* Asus Gobi QDL device */ |
1620 | diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c |
1621 | index 02fae7f..7fb054b 100644 |
1622 | --- a/fs/btrfs/ctree.c |
1623 | +++ b/fs/btrfs/ctree.c |
1624 | @@ -1089,7 +1089,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, |
1625 | btrfs_set_node_ptr_generation(parent, parent_slot, |
1626 | trans->transid); |
1627 | btrfs_mark_buffer_dirty(parent); |
1628 | - tree_mod_log_free_eb(root->fs_info, buf); |
1629 | + if (last_ref) |
1630 | + tree_mod_log_free_eb(root->fs_info, buf); |
1631 | btrfs_free_tree_block(trans, root, buf, parent_start, |
1632 | last_ref); |
1633 | } |
1634 | @@ -1161,8 +1162,8 @@ __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info, |
1635 | * time_seq). |
1636 | */ |
1637 | static void |
1638 | -__tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq, |
1639 | - struct tree_mod_elem *first_tm) |
1640 | +__tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, |
1641 | + u64 time_seq, struct tree_mod_elem *first_tm) |
1642 | { |
1643 | u32 n; |
1644 | struct rb_node *next; |
1645 | @@ -1172,6 +1173,7 @@ __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq, |
1646 | unsigned long p_size = sizeof(struct btrfs_key_ptr); |
1647 | |
1648 | n = btrfs_header_nritems(eb); |
1649 | + tree_mod_log_read_lock(fs_info); |
1650 | while (tm && tm->seq >= time_seq) { |
1651 | /* |
1652 | * all the operations are recorded with the operator used for |
1653 | @@ -1226,6 +1228,7 @@ __tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq, |
1654 | if (tm->index != first_tm->index) |
1655 | break; |
1656 | } |
1657 | + tree_mod_log_read_unlock(fs_info); |
1658 | btrfs_set_header_nritems(eb, n); |
1659 | } |
1660 | |
1661 | @@ -1274,7 +1277,7 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, |
1662 | |
1663 | extent_buffer_get(eb_rewin); |
1664 | btrfs_tree_read_lock(eb_rewin); |
1665 | - __tree_mod_log_rewind(eb_rewin, time_seq, tm); |
1666 | + __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm); |
1667 | WARN_ON(btrfs_header_nritems(eb_rewin) > |
1668 | BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root)); |
1669 | |
1670 | @@ -1350,7 +1353,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq) |
1671 | btrfs_set_header_generation(eb, old_generation); |
1672 | } |
1673 | if (tm) |
1674 | - __tree_mod_log_rewind(eb, time_seq, tm); |
1675 | + __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm); |
1676 | else |
1677 | WARN_ON(btrfs_header_level(eb) != 0); |
1678 | WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root)); |
1679 | diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c |
1680 | index ff40f1c..ba9690b 100644 |
1681 | --- a/fs/btrfs/send.c |
1682 | +++ b/fs/btrfs/send.c |
1683 | @@ -4579,6 +4579,41 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_) |
1684 | send_root = BTRFS_I(file_inode(mnt_file))->root; |
1685 | fs_info = send_root->fs_info; |
1686 | |
1687 | + /* |
1688 | + * This is done when we lookup the root, it should already be complete |
1689 | + * by the time we get here. |
1690 | + */ |
1691 | + WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE); |
1692 | + |
1693 | + /* |
1694 | + * If we just created this root we need to make sure that the orphan |
1695 | + * cleanup has been done and committed since we search the commit root, |
1696 | + * so check its commit root transid with our otransid and if they match |
1697 | + * commit the transaction to make sure everything is updated. |
1698 | + */ |
1699 | + down_read(&send_root->fs_info->extent_commit_sem); |
1700 | + if (btrfs_header_generation(send_root->commit_root) == |
1701 | + btrfs_root_otransid(&send_root->root_item)) { |
1702 | + struct btrfs_trans_handle *trans; |
1703 | + |
1704 | + up_read(&send_root->fs_info->extent_commit_sem); |
1705 | + |
1706 | + trans = btrfs_attach_transaction_barrier(send_root); |
1707 | + if (IS_ERR(trans)) { |
1708 | + if (PTR_ERR(trans) != -ENOENT) { |
1709 | + ret = PTR_ERR(trans); |
1710 | + goto out; |
1711 | + } |
1712 | + /* ENOENT means theres no transaction */ |
1713 | + } else { |
1714 | + ret = btrfs_commit_transaction(trans, send_root); |
1715 | + if (ret) |
1716 | + goto out; |
1717 | + } |
1718 | + } else { |
1719 | + up_read(&send_root->fs_info->extent_commit_sem); |
1720 | + } |
1721 | + |
1722 | arg = memdup_user(arg_, sizeof(*arg)); |
1723 | if (IS_ERR(arg)) { |
1724 | ret = PTR_ERR(arg); |
1725 | diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h |
1726 | index 4fb0974..fe8d627 100644 |
1727 | --- a/fs/cifs/cifs_unicode.h |
1728 | +++ b/fs/cifs/cifs_unicode.h |
1729 | @@ -327,14 +327,14 @@ UniToupper(register wchar_t uc) |
1730 | /* |
1731 | * UniStrupr: Upper case a unicode string |
1732 | */ |
1733 | -static inline wchar_t * |
1734 | -UniStrupr(register wchar_t *upin) |
1735 | +static inline __le16 * |
1736 | +UniStrupr(register __le16 *upin) |
1737 | { |
1738 | - register wchar_t *up; |
1739 | + register __le16 *up; |
1740 | |
1741 | up = upin; |
1742 | while (*up) { /* For all characters */ |
1743 | - *up = UniToupper(*up); |
1744 | + *up = cpu_to_le16(UniToupper(le16_to_cpu(*up))); |
1745 | up++; |
1746 | } |
1747 | return upin; /* Return input pointer */ |
1748 | diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c |
1749 | index 71436d1..f59d0d5 100644 |
1750 | --- a/fs/cifs/cifsencrypt.c |
1751 | +++ b/fs/cifs/cifsencrypt.c |
1752 | @@ -414,7 +414,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash, |
1753 | int rc = 0; |
1754 | int len; |
1755 | char nt_hash[CIFS_NTHASH_SIZE]; |
1756 | - wchar_t *user; |
1757 | + __le16 *user; |
1758 | wchar_t *domain; |
1759 | wchar_t *server; |
1760 | |
1761 | @@ -439,7 +439,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash, |
1762 | return rc; |
1763 | } |
1764 | |
1765 | - /* convert ses->user_name to unicode and uppercase */ |
1766 | + /* convert ses->user_name to unicode */ |
1767 | len = ses->user_name ? strlen(ses->user_name) : 0; |
1768 | user = kmalloc(2 + (len * 2), GFP_KERNEL); |
1769 | if (user == NULL) { |
1770 | @@ -448,7 +448,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash, |
1771 | } |
1772 | |
1773 | if (len) { |
1774 | - len = cifs_strtoUTF16((__le16 *)user, ses->user_name, len, nls_cp); |
1775 | + len = cifs_strtoUTF16(user, ses->user_name, len, nls_cp); |
1776 | UniStrupr(user); |
1777 | } else { |
1778 | memset(user, '\0', 2); |
1779 | diff --git a/fs/cifs/file.c b/fs/cifs/file.c |
1780 | index 48b29d2..c2934f8 100644 |
1781 | --- a/fs/cifs/file.c |
1782 | +++ b/fs/cifs/file.c |
1783 | @@ -553,11 +553,10 @@ cifs_relock_file(struct cifsFileInfo *cfile) |
1784 | struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); |
1785 | int rc = 0; |
1786 | |
1787 | - /* we are going to update can_cache_brlcks here - need a write access */ |
1788 | - down_write(&cinode->lock_sem); |
1789 | + down_read(&cinode->lock_sem); |
1790 | if (cinode->can_cache_brlcks) { |
1791 | - /* can cache locks - no need to push them */ |
1792 | - up_write(&cinode->lock_sem); |
1793 | + /* can cache locks - no need to relock */ |
1794 | + up_read(&cinode->lock_sem); |
1795 | return rc; |
1796 | } |
1797 | |
1798 | @@ -568,7 +567,7 @@ cifs_relock_file(struct cifsFileInfo *cfile) |
1799 | else |
1800 | rc = tcon->ses->server->ops->push_mand_locks(cfile); |
1801 | |
1802 | - up_write(&cinode->lock_sem); |
1803 | + up_read(&cinode->lock_sem); |
1804 | return rc; |
1805 | } |
1806 | |
1807 | diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c |
1808 | index 20efd81..449b6cf 100644 |
1809 | --- a/fs/cifs/inode.c |
1810 | +++ b/fs/cifs/inode.c |
1811 | @@ -558,6 +558,11 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info, |
1812 | fattr->cf_mode &= ~(S_IWUGO); |
1813 | |
1814 | fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks); |
1815 | + if (fattr->cf_nlink < 1) { |
1816 | + cifs_dbg(1, "replacing bogus file nlink value %u\n", |
1817 | + fattr->cf_nlink); |
1818 | + fattr->cf_nlink = 1; |
1819 | + } |
1820 | } |
1821 | |
1822 | fattr->cf_uid = cifs_sb->mnt_uid; |
1823 | diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c |
1824 | index 692de13..cea8ecf 100644 |
1825 | --- a/fs/ext3/namei.c |
1826 | +++ b/fs/ext3/namei.c |
1827 | @@ -576,11 +576,8 @@ static int htree_dirblock_to_tree(struct file *dir_file, |
1828 | if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh, |
1829 | (block<<EXT3_BLOCK_SIZE_BITS(dir->i_sb)) |
1830 | +((char *)de - bh->b_data))) { |
1831 | - /* On error, skip the f_pos to the next block. */ |
1832 | - dir_file->f_pos = (dir_file->f_pos | |
1833 | - (dir->i_sb->s_blocksize - 1)) + 1; |
1834 | - brelse (bh); |
1835 | - return count; |
1836 | + /* silently ignore the rest of the block */ |
1837 | + break; |
1838 | } |
1839 | ext3fs_dirhash(de->name, de->name_len, hinfo); |
1840 | if ((hinfo->hash < start_hash) || |
1841 | diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c |
1842 | index d0f13ea..3742e4c 100644 |
1843 | --- a/fs/ext4/balloc.c |
1844 | +++ b/fs/ext4/balloc.c |
1845 | @@ -38,8 +38,8 @@ ext4_group_t ext4_get_group_number(struct super_block *sb, |
1846 | ext4_group_t group; |
1847 | |
1848 | if (test_opt2(sb, STD_GROUP_SIZE)) |
1849 | - group = (le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) + |
1850 | - block) >> |
1851 | + group = (block - |
1852 | + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) >> |
1853 | (EXT4_BLOCK_SIZE_BITS(sb) + EXT4_CLUSTER_BITS(sb) + 3); |
1854 | else |
1855 | ext4_get_group_no_and_offset(sb, block, &group, NULL); |
1856 | diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c |
1857 | index bc0f191..e49da58 100644 |
1858 | --- a/fs/ext4/extents.c |
1859 | +++ b/fs/ext4/extents.c |
1860 | @@ -4659,7 +4659,7 @@ static int ext4_xattr_fiemap(struct inode *inode, |
1861 | error = ext4_get_inode_loc(inode, &iloc); |
1862 | if (error) |
1863 | return error; |
1864 | - physical = iloc.bh->b_blocknr << blockbits; |
1865 | + physical = (__u64)iloc.bh->b_blocknr << blockbits; |
1866 | offset = EXT4_GOOD_OLD_INODE_SIZE + |
1867 | EXT4_I(inode)->i_extra_isize; |
1868 | physical += offset; |
1869 | @@ -4667,7 +4667,7 @@ static int ext4_xattr_fiemap(struct inode *inode, |
1870 | flags |= FIEMAP_EXTENT_DATA_INLINE; |
1871 | brelse(iloc.bh); |
1872 | } else { /* external block */ |
1873 | - physical = EXT4_I(inode)->i_file_acl << blockbits; |
1874 | + physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits; |
1875 | length = inode->i_sb->s_blocksize; |
1876 | } |
1877 | |
1878 | diff --git a/fs/ext4/file.c b/fs/ext4/file.c |
1879 | index b1b4d51..b19f0a4 100644 |
1880 | --- a/fs/ext4/file.c |
1881 | +++ b/fs/ext4/file.c |
1882 | @@ -312,7 +312,7 @@ static int ext4_find_unwritten_pgoff(struct inode *inode, |
1883 | blkbits = inode->i_sb->s_blocksize_bits; |
1884 | startoff = *offset; |
1885 | lastoff = startoff; |
1886 | - endoff = (map->m_lblk + map->m_len) << blkbits; |
1887 | + endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits; |
1888 | |
1889 | index = startoff >> PAGE_CACHE_SHIFT; |
1890 | end = endoff >> PAGE_CACHE_SHIFT; |
1891 | @@ -457,7 +457,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) |
1892 | ret = ext4_map_blocks(NULL, inode, &map, 0); |
1893 | if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { |
1894 | if (last != start) |
1895 | - dataoff = last << blkbits; |
1896 | + dataoff = (loff_t)last << blkbits; |
1897 | break; |
1898 | } |
1899 | |
1900 | @@ -468,7 +468,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) |
1901 | ext4_es_find_delayed_extent_range(inode, last, last, &es); |
1902 | if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { |
1903 | if (last != start) |
1904 | - dataoff = last << blkbits; |
1905 | + dataoff = (loff_t)last << blkbits; |
1906 | break; |
1907 | } |
1908 | |
1909 | @@ -486,7 +486,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) |
1910 | } |
1911 | |
1912 | last++; |
1913 | - dataoff = last << blkbits; |
1914 | + dataoff = (loff_t)last << blkbits; |
1915 | } while (last <= end); |
1916 | |
1917 | mutex_unlock(&inode->i_mutex); |
1918 | @@ -540,7 +540,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) |
1919 | ret = ext4_map_blocks(NULL, inode, &map, 0); |
1920 | if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { |
1921 | last += ret; |
1922 | - holeoff = last << blkbits; |
1923 | + holeoff = (loff_t)last << blkbits; |
1924 | continue; |
1925 | } |
1926 | |
1927 | @@ -551,7 +551,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) |
1928 | ext4_es_find_delayed_extent_range(inode, last, last, &es); |
1929 | if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { |
1930 | last = es.es_lblk + es.es_len; |
1931 | - holeoff = last << blkbits; |
1932 | + holeoff = (loff_t)last << blkbits; |
1933 | continue; |
1934 | } |
1935 | |
1936 | @@ -566,7 +566,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) |
1937 | &map, &holeoff); |
1938 | if (!unwritten) { |
1939 | last += ret; |
1940 | - holeoff = last << blkbits; |
1941 | + holeoff = (loff_t)last << blkbits; |
1942 | continue; |
1943 | } |
1944 | } |
1945 | diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c |
1946 | index 3e2bf87..33331b4 100644 |
1947 | --- a/fs/ext4/inline.c |
1948 | +++ b/fs/ext4/inline.c |
1949 | @@ -1842,7 +1842,7 @@ int ext4_inline_data_fiemap(struct inode *inode, |
1950 | if (error) |
1951 | goto out; |
1952 | |
1953 | - physical = iloc.bh->b_blocknr << inode->i_sb->s_blocksize_bits; |
1954 | + physical = (__u64)iloc.bh->b_blocknr << inode->i_sb->s_blocksize_bits; |
1955 | physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data; |
1956 | physical += offsetof(struct ext4_inode, i_block); |
1957 | length = i_size_read(inode); |
1958 | diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c |
1959 | index d6382b8..ccbfbbb 100644 |
1960 | --- a/fs/ext4/inode.c |
1961 | +++ b/fs/ext4/inode.c |
1962 | @@ -1118,10 +1118,13 @@ static int ext4_write_end(struct file *file, |
1963 | } |
1964 | } |
1965 | |
1966 | - if (ext4_has_inline_data(inode)) |
1967 | - copied = ext4_write_inline_data_end(inode, pos, len, |
1968 | - copied, page); |
1969 | - else |
1970 | + if (ext4_has_inline_data(inode)) { |
1971 | + ret = ext4_write_inline_data_end(inode, pos, len, |
1972 | + copied, page); |
1973 | + if (ret < 0) |
1974 | + goto errout; |
1975 | + copied = ret; |
1976 | + } else |
1977 | copied = block_write_end(file, mapping, pos, |
1978 | len, copied, page, fsdata); |
1979 | |
1980 | @@ -4805,7 +4808,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, |
1981 | struct kstat *stat) |
1982 | { |
1983 | struct inode *inode; |
1984 | - unsigned long delalloc_blocks; |
1985 | + unsigned long long delalloc_blocks; |
1986 | |
1987 | inode = dentry->d_inode; |
1988 | generic_fillattr(inode, stat); |
1989 | @@ -4823,7 +4826,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, |
1990 | delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb), |
1991 | EXT4_I(inode)->i_reserved_data_blocks); |
1992 | |
1993 | - stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; |
1994 | + stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits-9); |
1995 | return 0; |
1996 | } |
1997 | |
1998 | diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c |
1999 | index def8408..59c6750 100644 |
2000 | --- a/fs/ext4/mballoc.c |
2001 | +++ b/fs/ext4/mballoc.c |
2002 | @@ -4735,11 +4735,16 @@ do_more: |
2003 | * blocks being freed are metadata. these blocks shouldn't |
2004 | * be used until this transaction is committed |
2005 | */ |
2006 | + retry: |
2007 | new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS); |
2008 | if (!new_entry) { |
2009 | - ext4_mb_unload_buddy(&e4b); |
2010 | - err = -ENOMEM; |
2011 | - goto error_return; |
2012 | + /* |
2013 | + * We use a retry loop because |
2014 | + * ext4_free_blocks() is not allowed to fail. |
2015 | + */ |
2016 | + cond_resched(); |
2017 | + congestion_wait(BLK_RW_ASYNC, HZ/50); |
2018 | + goto retry; |
2019 | } |
2020 | new_entry->efd_start_cluster = bit; |
2021 | new_entry->efd_group = block_group; |
2022 | diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c |
2023 | index 6653fc3..ab2f6dc 100644 |
2024 | --- a/fs/ext4/namei.c |
2025 | +++ b/fs/ext4/namei.c |
2026 | @@ -918,11 +918,8 @@ static int htree_dirblock_to_tree(struct file *dir_file, |
2027 | bh->b_data, bh->b_size, |
2028 | (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb)) |
2029 | + ((char *)de - bh->b_data))) { |
2030 | - /* On error, skip the f_pos to the next block. */ |
2031 | - dir_file->f_pos = (dir_file->f_pos | |
2032 | - (dir->i_sb->s_blocksize - 1)) + 1; |
2033 | - brelse(bh); |
2034 | - return count; |
2035 | + /* silently ignore the rest of the block */ |
2036 | + break; |
2037 | } |
2038 | ext4fs_dirhash(de->name, de->name_len, hinfo); |
2039 | if ((hinfo->hash < start_hash) || |
2040 | diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c |
2041 | index b27c96d..49d3c01 100644 |
2042 | --- a/fs/ext4/resize.c |
2043 | +++ b/fs/ext4/resize.c |
2044 | @@ -1656,12 +1656,10 @@ errout: |
2045 | err = err2; |
2046 | |
2047 | if (!err) { |
2048 | - ext4_fsblk_t first_block; |
2049 | - first_block = ext4_group_first_block_no(sb, 0); |
2050 | if (test_opt(sb, DEBUG)) |
2051 | printk(KERN_DEBUG "EXT4-fs: extended group to %llu " |
2052 | "blocks\n", ext4_blocks_count(es)); |
2053 | - update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr - first_block, |
2054 | + update_backups(sb, EXT4_SB(sb)->s_sbh->b_blocknr, |
2055 | (char *)es, sizeof(struct ext4_super_block), 0); |
2056 | } |
2057 | return err; |
2058 | diff --git a/fs/ext4/super.c b/fs/ext4/super.c |
2059 | index 94cc84d..6681c03 100644 |
2060 | --- a/fs/ext4/super.c |
2061 | +++ b/fs/ext4/super.c |
2062 | @@ -1684,12 +1684,6 @@ static inline void ext4_show_quota_options(struct seq_file *seq, |
2063 | |
2064 | if (sbi->s_qf_names[GRPQUOTA]) |
2065 | seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]); |
2066 | - |
2067 | - if (test_opt(sb, USRQUOTA)) |
2068 | - seq_puts(seq, ",usrquota"); |
2069 | - |
2070 | - if (test_opt(sb, GRPQUOTA)) |
2071 | - seq_puts(seq, ",grpquota"); |
2072 | #endif |
2073 | } |
2074 | |
2075 | @@ -3586,10 +3580,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
2076 | sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb)); |
2077 | sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb)); |
2078 | |
2079 | - /* Do we have standard group size of blocksize * 8 blocks ? */ |
2080 | - if (sbi->s_blocks_per_group == blocksize << 3) |
2081 | - set_opt2(sb, STD_GROUP_SIZE); |
2082 | - |
2083 | for (i = 0; i < 4; i++) |
2084 | sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]); |
2085 | sbi->s_def_hash_version = es->s_def_hash_version; |
2086 | @@ -3659,6 +3649,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) |
2087 | goto failed_mount; |
2088 | } |
2089 | |
2090 | + /* Do we have standard group size of clustersize * 8 blocks ? */ |
2091 | + if (sbi->s_blocks_per_group == clustersize << 3) |
2092 | + set_opt2(sb, STD_GROUP_SIZE); |
2093 | + |
2094 | /* |
2095 | * Test whether we have more sectors than will fit in sector_t, |
2096 | * and whether the max offset is addressable by the page cache. |
2097 | diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c |
2098 | index 9545757..aaa1a3f 100644 |
2099 | --- a/fs/jbd2/journal.c |
2100 | +++ b/fs/jbd2/journal.c |
2101 | @@ -1318,6 +1318,7 @@ static int journal_reset(journal_t *journal) |
2102 | static void jbd2_write_superblock(journal_t *journal, int write_op) |
2103 | { |
2104 | struct buffer_head *bh = journal->j_sb_buffer; |
2105 | + journal_superblock_t *sb = journal->j_superblock; |
2106 | int ret; |
2107 | |
2108 | trace_jbd2_write_superblock(journal, write_op); |
2109 | @@ -1339,6 +1340,7 @@ static void jbd2_write_superblock(journal_t *journal, int write_op) |
2110 | clear_buffer_write_io_error(bh); |
2111 | set_buffer_uptodate(bh); |
2112 | } |
2113 | + jbd2_superblock_csum_set(journal, sb); |
2114 | get_bh(bh); |
2115 | bh->b_end_io = end_buffer_write_sync; |
2116 | ret = submit_bh(write_op, bh); |
2117 | @@ -1435,7 +1437,6 @@ void jbd2_journal_update_sb_errno(journal_t *journal) |
2118 | jbd_debug(1, "JBD2: updating superblock error (errno %d)\n", |
2119 | journal->j_errno); |
2120 | sb->s_errno = cpu_to_be32(journal->j_errno); |
2121 | - jbd2_superblock_csum_set(journal, sb); |
2122 | read_unlock(&journal->j_state_lock); |
2123 | |
2124 | jbd2_write_superblock(journal, WRITE_SYNC); |
2125 | diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c |
2126 | index 10f524c..e0c0bc2 100644 |
2127 | --- a/fs/jbd2/transaction.c |
2128 | +++ b/fs/jbd2/transaction.c |
2129 | @@ -517,10 +517,10 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask) |
2130 | &transaction->t_outstanding_credits); |
2131 | if (atomic_dec_and_test(&transaction->t_updates)) |
2132 | wake_up(&journal->j_wait_updates); |
2133 | + tid = transaction->t_tid; |
2134 | spin_unlock(&transaction->t_handle_lock); |
2135 | |
2136 | jbd_debug(2, "restarting handle %p\n", handle); |
2137 | - tid = transaction->t_tid; |
2138 | need_to_start = !tid_geq(journal->j_commit_request, tid); |
2139 | read_unlock(&journal->j_state_lock); |
2140 | if (need_to_start) |
2141 | diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c |
2142 | index 2e3ea30..5b8d944 100644 |
2143 | --- a/fs/ocfs2/xattr.c |
2144 | +++ b/fs/ocfs2/xattr.c |
2145 | @@ -6499,6 +6499,16 @@ static int ocfs2_reflink_xattr_inline(struct ocfs2_xattr_reflink *args) |
2146 | } |
2147 | |
2148 | new_oi = OCFS2_I(args->new_inode); |
2149 | + /* |
2150 | + * Adjust extent record count to reserve space for extended attribute. |
2151 | + * Inline data count had been adjusted in ocfs2_duplicate_inline_data(). |
2152 | + */ |
2153 | + if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) && |
2154 | + !(ocfs2_inode_is_fast_symlink(args->new_inode))) { |
2155 | + struct ocfs2_extent_list *el = &new_di->id2.i_list; |
2156 | + le16_add_cpu(&el->l_count, -(inline_size / |
2157 | + sizeof(struct ocfs2_extent_rec))); |
2158 | + } |
2159 | spin_lock(&new_oi->ip_lock); |
2160 | new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL; |
2161 | new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features); |
2162 | diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c |
2163 | index f21acf0..879b997 100644 |
2164 | --- a/fs/ubifs/super.c |
2165 | +++ b/fs/ubifs/super.c |
2166 | @@ -1412,7 +1412,7 @@ static int mount_ubifs(struct ubifs_info *c) |
2167 | |
2168 | ubifs_msg("mounted UBI device %d, volume %d, name \"%s\"%s", |
2169 | c->vi.ubi_num, c->vi.vol_id, c->vi.name, |
2170 | - c->ro_mount ? ", R/O mode" : NULL); |
2171 | + c->ro_mount ? ", R/O mode" : ""); |
2172 | x = (long long)c->main_lebs * c->leb_size; |
2173 | y = (long long)c->log_lebs * c->leb_size + c->max_bud_bytes; |
2174 | ubifs_msg("LEB size: %d bytes (%d KiB), min./max. I/O unit sizes: %d bytes/%d bytes", |
2175 | diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h |
2176 | index 8bda129..8852d37 100644 |
2177 | --- a/include/linux/cgroup.h |
2178 | +++ b/include/linux/cgroup.h |
2179 | @@ -646,22 +646,60 @@ static inline struct cgroup_subsys_state *cgroup_subsys_state( |
2180 | return cgrp->subsys[subsys_id]; |
2181 | } |
2182 | |
2183 | -/* |
2184 | - * function to get the cgroup_subsys_state which allows for extra |
2185 | - * rcu_dereference_check() conditions, such as locks used during the |
2186 | - * cgroup_subsys::attach() methods. |
2187 | +/** |
2188 | + * task_css_set_check - obtain a task's css_set with extra access conditions |
2189 | + * @task: the task to obtain css_set for |
2190 | + * @__c: extra condition expression to be passed to rcu_dereference_check() |
2191 | + * |
2192 | + * A task's css_set is RCU protected, initialized and exited while holding |
2193 | + * task_lock(), and can only be modified while holding both cgroup_mutex |
2194 | + * and task_lock() while the task is alive. This macro verifies that the |
2195 | + * caller is inside proper critical section and returns @task's css_set. |
2196 | + * |
2197 | + * The caller can also specify additional allowed conditions via @__c, such |
2198 | + * as locks used during the cgroup_subsys::attach() methods. |
2199 | */ |
2200 | #ifdef CONFIG_PROVE_RCU |
2201 | extern struct mutex cgroup_mutex; |
2202 | -#define task_subsys_state_check(task, subsys_id, __c) \ |
2203 | - rcu_dereference_check((task)->cgroups->subsys[(subsys_id)], \ |
2204 | - lockdep_is_held(&(task)->alloc_lock) || \ |
2205 | - lockdep_is_held(&cgroup_mutex) || (__c)) |
2206 | +#define task_css_set_check(task, __c) \ |
2207 | + rcu_dereference_check((task)->cgroups, \ |
2208 | + lockdep_is_held(&(task)->alloc_lock) || \ |
2209 | + lockdep_is_held(&cgroup_mutex) || (__c)) |
2210 | #else |
2211 | -#define task_subsys_state_check(task, subsys_id, __c) \ |
2212 | - rcu_dereference((task)->cgroups->subsys[(subsys_id)]) |
2213 | +#define task_css_set_check(task, __c) \ |
2214 | + rcu_dereference((task)->cgroups) |
2215 | #endif |
2216 | |
2217 | +/** |
2218 | + * task_subsys_state_check - obtain css for (task, subsys) w/ extra access conds |
2219 | + * @task: the target task |
2220 | + * @subsys_id: the target subsystem ID |
2221 | + * @__c: extra condition expression to be passed to rcu_dereference_check() |
2222 | + * |
2223 | + * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The |
2224 | + * synchronization rules are the same as task_css_set_check(). |
2225 | + */ |
2226 | +#define task_subsys_state_check(task, subsys_id, __c) \ |
2227 | + task_css_set_check((task), (__c))->subsys[(subsys_id)] |
2228 | + |
2229 | +/** |
2230 | + * task_css_set - obtain a task's css_set |
2231 | + * @task: the task to obtain css_set for |
2232 | + * |
2233 | + * See task_css_set_check(). |
2234 | + */ |
2235 | +static inline struct css_set *task_css_set(struct task_struct *task) |
2236 | +{ |
2237 | + return task_css_set_check(task, false); |
2238 | +} |
2239 | + |
2240 | +/** |
2241 | + * task_subsys_state - obtain css for (task, subsys) |
2242 | + * @task: the target task |
2243 | + * @subsys_id: the target subsystem ID |
2244 | + * |
2245 | + * See task_subsys_state_check(). |
2246 | + */ |
2247 | static inline struct cgroup_subsys_state * |
2248 | task_subsys_state(struct task_struct *task, int subsys_id) |
2249 | { |
2250 | diff --git a/include/linux/nbd.h b/include/linux/nbd.h |
2251 | index 4871170..ae4981e 100644 |
2252 | --- a/include/linux/nbd.h |
2253 | +++ b/include/linux/nbd.h |
2254 | @@ -41,6 +41,7 @@ struct nbd_device { |
2255 | u64 bytesize; |
2256 | pid_t pid; /* pid of nbd-client, if attached */ |
2257 | int xmit_timeout; |
2258 | + int disconnect; /* a disconnect has been requested by user */ |
2259 | }; |
2260 | |
2261 | #endif |
2262 | diff --git a/kernel/cgroup.c b/kernel/cgroup.c |
2263 | index a7c9e6d..c6e77ef 100644 |
2264 | --- a/kernel/cgroup.c |
2265 | +++ b/kernel/cgroup.c |
2266 | @@ -3727,6 +3727,23 @@ static int cgroup_write_notify_on_release(struct cgroup *cgrp, |
2267 | } |
2268 | |
2269 | /* |
2270 | + * When dput() is called asynchronously, if umount has been done and |
2271 | + * then deactivate_super() in cgroup_free_fn() kills the superblock, |
2272 | + * there's a small window that vfs will see the root dentry with non-zero |
2273 | + * refcnt and trigger BUG(). |
2274 | + * |
2275 | + * That's why we hold a reference before dput() and drop it right after. |
2276 | + */ |
2277 | +static void cgroup_dput(struct cgroup *cgrp) |
2278 | +{ |
2279 | + struct super_block *sb = cgrp->root->sb; |
2280 | + |
2281 | + atomic_inc(&sb->s_active); |
2282 | + dput(cgrp->dentry); |
2283 | + deactivate_super(sb); |
2284 | +} |
2285 | + |
2286 | +/* |
2287 | * Unregister event and free resources. |
2288 | * |
2289 | * Gets called from workqueue. |
2290 | @@ -3746,7 +3763,7 @@ static void cgroup_event_remove(struct work_struct *work) |
2291 | |
2292 | eventfd_ctx_put(event->eventfd); |
2293 | kfree(event); |
2294 | - dput(cgrp->dentry); |
2295 | + cgroup_dput(cgrp); |
2296 | } |
2297 | |
2298 | /* |
2299 | @@ -4031,12 +4048,8 @@ static void css_dput_fn(struct work_struct *work) |
2300 | { |
2301 | struct cgroup_subsys_state *css = |
2302 | container_of(work, struct cgroup_subsys_state, dput_work); |
2303 | - struct dentry *dentry = css->cgroup->dentry; |
2304 | - struct super_block *sb = dentry->d_sb; |
2305 | |
2306 | - atomic_inc(&sb->s_active); |
2307 | - dput(dentry); |
2308 | - deactivate_super(sb); |
2309 | + cgroup_dput(css->cgroup); |
2310 | } |
2311 | |
2312 | static void init_cgroup_css(struct cgroup_subsys_state *css, |
2313 | diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c |
2314 | index fa17855..dc4db32 100644 |
2315 | --- a/kernel/irq/manage.c |
2316 | +++ b/kernel/irq/manage.c |
2317 | @@ -555,9 +555,9 @@ int can_request_irq(unsigned int irq, unsigned long irqflags) |
2318 | return 0; |
2319 | |
2320 | if (irq_settings_can_request(desc)) { |
2321 | - if (desc->action) |
2322 | - if (irqflags & desc->action->flags & IRQF_SHARED) |
2323 | - canrequest =1; |
2324 | + if (!desc->action || |
2325 | + irqflags & desc->action->flags & IRQF_SHARED) |
2326 | + canrequest = 1; |
2327 | } |
2328 | irq_put_desc_unlock(desc, flags); |
2329 | return canrequest; |
2330 | diff --git a/kernel/timer.c b/kernel/timer.c |
2331 | index 15ffdb3..15bc1b4 100644 |
2332 | --- a/kernel/timer.c |
2333 | +++ b/kernel/timer.c |
2334 | @@ -149,9 +149,11 @@ static unsigned long round_jiffies_common(unsigned long j, int cpu, |
2335 | /* now that we have rounded, subtract the extra skew again */ |
2336 | j -= cpu * 3; |
2337 | |
2338 | - if (j <= jiffies) /* rounding ate our timeout entirely; */ |
2339 | - return original; |
2340 | - return j; |
2341 | + /* |
2342 | + * Make sure j is still in the future. Otherwise return the |
2343 | + * unmodified value. |
2344 | + */ |
2345 | + return time_is_after_jiffies(j) ? j : original; |
2346 | } |
2347 | |
2348 | /** |
2349 | diff --git a/mm/memcontrol.c b/mm/memcontrol.c |
2350 | index fd79df5..15b0409 100644 |
2351 | --- a/mm/memcontrol.c |
2352 | +++ b/mm/memcontrol.c |
2353 | @@ -6296,14 +6296,6 @@ mem_cgroup_css_online(struct cgroup *cont) |
2354 | |
2355 | error = memcg_init_kmem(memcg, &mem_cgroup_subsys); |
2356 | mutex_unlock(&memcg_create_mutex); |
2357 | - if (error) { |
2358 | - /* |
2359 | - * We call put now because our (and parent's) refcnts |
2360 | - * are already in place. mem_cgroup_put() will internally |
2361 | - * call __mem_cgroup_free, so return directly |
2362 | - */ |
2363 | - mem_cgroup_put(memcg); |
2364 | - } |
2365 | return error; |
2366 | } |
2367 | |
2368 | diff --git a/mm/page_alloc.c b/mm/page_alloc.c |
2369 | index c3edb62..2ee0fd3 100644 |
2370 | --- a/mm/page_alloc.c |
2371 | +++ b/mm/page_alloc.c |
2372 | @@ -6142,6 +6142,10 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) |
2373 | list_del(&page->lru); |
2374 | rmv_page_order(page); |
2375 | zone->free_area[order].nr_free--; |
2376 | +#ifdef CONFIG_HIGHMEM |
2377 | + if (PageHighMem(page)) |
2378 | + totalhigh_pages -= 1 << order; |
2379 | +#endif |
2380 | for (i = 0; i < (1 << order); i++) |
2381 | SetPageReserved((page+i)); |
2382 | pfn += (1 << order); |
2383 | diff --git a/mm/slab.c b/mm/slab.c |
2384 | index 8ccd296..bd88411 100644 |
2385 | --- a/mm/slab.c |
2386 | +++ b/mm/slab.c |
2387 | @@ -565,7 +565,7 @@ static void init_node_lock_keys(int q) |
2388 | if (slab_state < UP) |
2389 | return; |
2390 | |
2391 | - for (i = 1; i < PAGE_SHIFT + MAX_ORDER; i++) { |
2392 | + for (i = 1; i <= KMALLOC_SHIFT_HIGH; i++) { |
2393 | struct kmem_cache_node *n; |
2394 | struct kmem_cache *cache = kmalloc_caches[i]; |
2395 |