Magellan Linux

Contents of /trunk/kernel-alx/patches-3.10/0132-3.10.33-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2471 - (show annotations) (download) (as text)
Wed Jul 2 10:42:37 2014 UTC (9 years, 9 months ago) by niro
File MIME type: application/x-xz
File size: 128935 byte(s)
-uncompressed
1 diff --git a/Makefile b/Makefile
2 index aab192446f50..1e602eb906fa 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 10
8 -SUBLEVEL = 32
9 +SUBLEVEL = 33
10 EXTRAVERSION =
11 NAME = TOSSUG Baby Fish
12
13 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
14 index 17d0ae8672fa..a25e62d2de6e 100644
15 --- a/arch/arm/include/asm/cacheflush.h
16 +++ b/arch/arm/include/asm/cacheflush.h
17 @@ -212,6 +212,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
18 static inline void __flush_icache_all(void)
19 {
20 __flush_icache_preferred();
21 + dsb();
22 }
23
24 /*
25 diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
26 index 6220e9fdf4c7..dd64cc6f9cba 100644
27 --- a/arch/arm/include/asm/spinlock.h
28 +++ b/arch/arm/include/asm/spinlock.h
29 @@ -97,19 +97,22 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
30
31 static inline int arch_spin_trylock(arch_spinlock_t *lock)
32 {
33 - unsigned long tmp;
34 + unsigned long contended, res;
35 u32 slock;
36
37 - __asm__ __volatile__(
38 -" ldrex %0, [%2]\n"
39 -" subs %1, %0, %0, ror #16\n"
40 -" addeq %0, %0, %3\n"
41 -" strexeq %1, %0, [%2]"
42 - : "=&r" (slock), "=&r" (tmp)
43 - : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
44 - : "cc");
45 -
46 - if (tmp == 0) {
47 + do {
48 + __asm__ __volatile__(
49 + " ldrex %0, [%3]\n"
50 + " mov %2, #0\n"
51 + " subs %1, %0, %0, ror #16\n"
52 + " addeq %0, %0, %4\n"
53 + " strexeq %2, %0, [%3]"
54 + : "=&r" (slock), "=&r" (contended), "=r" (res)
55 + : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
56 + : "cc");
57 + } while (res);
58 +
59 + if (!contended) {
60 smp_mb();
61 return 1;
62 } else {
63 @@ -165,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
64
65 static inline int arch_write_trylock(arch_rwlock_t *rw)
66 {
67 - unsigned long tmp;
68 -
69 - __asm__ __volatile__(
70 -" ldrex %0, [%1]\n"
71 -" teq %0, #0\n"
72 -" strexeq %0, %2, [%1]"
73 - : "=&r" (tmp)
74 - : "r" (&rw->lock), "r" (0x80000000)
75 - : "cc");
76 -
77 - if (tmp == 0) {
78 + unsigned long contended, res;
79 +
80 + do {
81 + __asm__ __volatile__(
82 + " ldrex %0, [%2]\n"
83 + " mov %1, #0\n"
84 + " teq %0, #0\n"
85 + " strexeq %1, %3, [%2]"
86 + : "=&r" (contended), "=&r" (res)
87 + : "r" (&rw->lock), "r" (0x80000000)
88 + : "cc");
89 + } while (res);
90 +
91 + if (!contended) {
92 smp_mb();
93 return 1;
94 } else {
95 @@ -251,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
96
97 static inline int arch_read_trylock(arch_rwlock_t *rw)
98 {
99 - unsigned long tmp, tmp2 = 1;
100 -
101 - __asm__ __volatile__(
102 -" ldrex %0, [%2]\n"
103 -" adds %0, %0, #1\n"
104 -" strexpl %1, %0, [%2]\n"
105 - : "=&r" (tmp), "+r" (tmp2)
106 - : "r" (&rw->lock)
107 - : "cc");
108 -
109 - smp_mb();
110 - return tmp2 == 0;
111 + unsigned long contended, res;
112 +
113 + do {
114 + __asm__ __volatile__(
115 + " ldrex %0, [%2]\n"
116 + " mov %1, #0\n"
117 + " adds %0, %0, #1\n"
118 + " strexpl %1, %0, [%2]"
119 + : "=&r" (contended), "=&r" (res)
120 + : "r" (&rw->lock)
121 + : "cc");
122 + } while (res);
123 +
124 + /* If the lock is negative, then it is already held for write. */
125 + if (contended < 0x80000000) {
126 + smp_mb();
127 + return 1;
128 + } else {
129 + return 0;
130 + }
131 }
132
133 /* read_can_lock - would read_trylock() succeed? */
134 diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
135 index 6c4da1254f53..55bcb77c623e 100644
136 --- a/arch/arm/mach-omap2/gpmc.c
137 +++ b/arch/arm/mach-omap2/gpmc.c
138 @@ -1335,7 +1335,7 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
139 of_property_read_bool(np, "gpmc,time-para-granularity");
140 }
141
142 -#ifdef CONFIG_MTD_NAND
143 +#if IS_ENABLED(CONFIG_MTD_NAND)
144
145 static const char * const nand_ecc_opts[] = {
146 [OMAP_ECC_HAMMING_CODE_DEFAULT] = "sw",
147 @@ -1391,7 +1391,7 @@ static int gpmc_probe_nand_child(struct platform_device *pdev,
148 }
149 #endif
150
151 -#ifdef CONFIG_MTD_ONENAND
152 +#if IS_ENABLED(CONFIG_MTD_ONENAND)
153 static int gpmc_probe_onenand_child(struct platform_device *pdev,
154 struct device_node *child)
155 {
156 diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
157 index 9f852c6fe5b9..d5ebcd0bb622 100644
158 --- a/arch/arm/mach-tegra/common.c
159 +++ b/arch/arm/mach-tegra/common.c
160 @@ -22,6 +22,7 @@
161 #include <linux/io.h>
162 #include <linux/clk.h>
163 #include <linux/delay.h>
164 +#include <linux/of.h>
165 #include <linux/irqchip.h>
166 #include <linux/clk/tegra.h>
167
168 @@ -80,10 +81,20 @@ void tegra_assert_system_reset(char mode, const char *cmd)
169 static void __init tegra_init_cache(void)
170 {
171 #ifdef CONFIG_CACHE_L2X0
172 + static const struct of_device_id pl310_ids[] __initconst = {
173 + { .compatible = "arm,pl310-cache", },
174 + {}
175 + };
176 +
177 + struct device_node *np;
178 int ret;
179 void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
180 u32 aux_ctrl, cache_type;
181
182 + np = of_find_matching_node(NULL, pl310_ids);
183 + if (!np)
184 + return;
185 +
186 cache_type = readl(p + L2X0_CACHE_TYPE);
187 aux_ctrl = (cache_type & 0x700) << (17-8);
188 aux_ctrl |= 0x7C400001;
189 diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
190 index ef3e0f3aac96..6c9d7054d997 100644
191 --- a/arch/arm/mm/dma-mapping.c
192 +++ b/arch/arm/mm/dma-mapping.c
193 @@ -1311,7 +1311,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
194 *handle = DMA_ERROR_CODE;
195 size = PAGE_ALIGN(size);
196
197 - if (gfp & GFP_ATOMIC)
198 + if (!(gfp & __GFP_WAIT))
199 return __iommu_alloc_atomic(dev, size, handle);
200
201 pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
202 diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
203 index 919405e20b80..d07352819580 100644
204 --- a/arch/arm/mm/proc-v6.S
205 +++ b/arch/arm/mm/proc-v6.S
206 @@ -206,7 +206,6 @@ __v6_setup:
207 mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache
208 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
209 mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache
210 - mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
211 #ifdef CONFIG_MMU
212 mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
213 mcr p15, 0, r0, c2, c0, 2 @ TTB control register
214 @@ -216,6 +215,8 @@ __v6_setup:
215 ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
216 mcr p15, 0, r8, c2, c0, 1 @ load TTB1
217 #endif /* CONFIG_MMU */
218 + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer and
219 + @ complete invalidations
220 adr r5, v6_crval
221 ldmia r5, {r5, r6}
222 #ifdef CONFIG_CPU_ENDIAN_BE8
223 diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
224 index 5fbccee5f644..19da84172cc3 100644
225 --- a/arch/arm/mm/proc-v7.S
226 +++ b/arch/arm/mm/proc-v7.S
227 @@ -329,7 +329,6 @@ __v7_setup:
228
229 3: mov r10, #0
230 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
231 - dsb
232 #ifdef CONFIG_MMU
233 mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
234 v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup
235 @@ -338,6 +337,7 @@ __v7_setup:
236 mcr p15, 0, r5, c10, c2, 0 @ write PRRR
237 mcr p15, 0, r6, c10, c2, 1 @ write NMRR
238 #endif
239 + dsb @ Complete invalidations
240 #ifndef CONFIG_ARM_THUMBEE
241 mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE
242 and r0, r0, #(0xf << 12) @ ThumbEE enabled field
243 diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
244 index d25459ff57fc..048334bb2651 100644
245 --- a/arch/arm64/kernel/stacktrace.c
246 +++ b/arch/arm64/kernel/stacktrace.c
247 @@ -48,7 +48,11 @@ int unwind_frame(struct stackframe *frame)
248
249 frame->sp = fp + 0x10;
250 frame->fp = *(unsigned long *)(fp);
251 - frame->pc = *(unsigned long *)(fp + 8);
252 + /*
253 + * -4 here because we care about the PC at time of bl,
254 + * not where the return will go.
255 + */
256 + frame->pc = *(unsigned long *)(fp + 8) - 4;
257
258 return 0;
259 }
260 diff --git a/arch/avr32/Makefile b/arch/avr32/Makefile
261 index 22fb66590dcd..dba48a5d5bb9 100644
262 --- a/arch/avr32/Makefile
263 +++ b/arch/avr32/Makefile
264 @@ -11,7 +11,7 @@ all: uImage vmlinux.elf
265
266 KBUILD_DEFCONFIG := atstk1002_defconfig
267
268 -KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic
269 +KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic -D__linux__
270 KBUILD_AFLAGS += -mrelax -mno-pic
271 KBUILD_CFLAGS_MODULE += -mno-relax
272 LDFLAGS_vmlinux += --relax
273 diff --git a/arch/avr32/boards/mimc200/fram.c b/arch/avr32/boards/mimc200/fram.c
274 index 9764a1a1073e..c1466a872b9c 100644
275 --- a/arch/avr32/boards/mimc200/fram.c
276 +++ b/arch/avr32/boards/mimc200/fram.c
277 @@ -11,6 +11,7 @@
278 #define FRAM_VERSION "1.0"
279
280 #include <linux/miscdevice.h>
281 +#include <linux/module.h>
282 #include <linux/proc_fs.h>
283 #include <linux/mm.h>
284 #include <linux/io.h>
285 diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
286 index 9ec3fe174cba..555ae67e4086 100644
287 --- a/arch/powerpc/kernel/crash_dump.c
288 +++ b/arch/powerpc/kernel/crash_dump.c
289 @@ -108,17 +108,19 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
290 size_t csize, unsigned long offset, int userbuf)
291 {
292 void *vaddr;
293 + phys_addr_t paddr;
294
295 if (!csize)
296 return 0;
297
298 csize = min_t(size_t, csize, PAGE_SIZE);
299 + paddr = pfn << PAGE_SHIFT;
300
301 - if ((min_low_pfn < pfn) && (pfn < max_pfn)) {
302 - vaddr = __va(pfn << PAGE_SHIFT);
303 + if (memblock_is_region_memory(paddr, csize)) {
304 + vaddr = __va(paddr);
305 csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
306 } else {
307 - vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0);
308 + vaddr = __ioremap(paddr, PAGE_SIZE, 0);
309 csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
310 iounmap(vaddr);
311 }
312 diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
313 index 217ca5c75b20..2882d614221f 100644
314 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
315 +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
316 @@ -34,12 +34,7 @@
317 #include "offline_states.h"
318
319 /* This version can't take the spinlock, because it never returns */
320 -static struct rtas_args rtas_stop_self_args = {
321 - .token = RTAS_UNKNOWN_SERVICE,
322 - .nargs = 0,
323 - .nret = 1,
324 - .rets = &rtas_stop_self_args.args[0],
325 -};
326 +static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE;
327
328 static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) =
329 CPU_STATE_OFFLINE;
330 @@ -92,15 +87,20 @@ void set_default_offline_state(int cpu)
331
332 static void rtas_stop_self(void)
333 {
334 - struct rtas_args *args = &rtas_stop_self_args;
335 + struct rtas_args args = {
336 + .token = cpu_to_be32(rtas_stop_self_token),
337 + .nargs = 0,
338 + .nret = 1,
339 + .rets = &args.args[0],
340 + };
341
342 local_irq_disable();
343
344 - BUG_ON(args->token == RTAS_UNKNOWN_SERVICE);
345 + BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
346
347 printk("cpu %u (hwid %u) Ready to die...\n",
348 smp_processor_id(), hard_smp_processor_id());
349 - enter_rtas(__pa(args));
350 + enter_rtas(__pa(&args));
351
352 panic("Alas, I survived.\n");
353 }
354 @@ -391,10 +391,10 @@ static int __init pseries_cpu_hotplug_init(void)
355 }
356 }
357
358 - rtas_stop_self_args.token = rtas_token("stop-self");
359 + rtas_stop_self_token = rtas_token("stop-self");
360 qcss_tok = rtas_token("query-cpu-stopped-state");
361
362 - if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE ||
363 + if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE ||
364 qcss_tok == RTAS_UNKNOWN_SERVICE) {
365 printk(KERN_INFO "CPU Hotplug not supported by firmware "
366 "- disabling.\n");
367 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
368 index 1025f3c99d20..a69b67d968d4 100644
369 --- a/arch/x86/kernel/cpu/perf_event.c
370 +++ b/arch/x86/kernel/cpu/perf_event.c
371 @@ -1165,6 +1165,9 @@ static void x86_pmu_del(struct perf_event *event, int flags)
372 for (i = 0; i < cpuc->n_events; i++) {
373 if (event == cpuc->event_list[i]) {
374
375 + if (i >= cpuc->n_events - cpuc->n_added)
376 + --cpuc->n_added;
377 +
378 if (x86_pmu.put_event_constraints)
379 x86_pmu.put_event_constraints(cpuc, event);
380
381 diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
382 index 872079a67e4d..f7d0672481fd 100644
383 --- a/arch/x86/kernel/pci-dma.c
384 +++ b/arch/x86/kernel/pci-dma.c
385 @@ -100,8 +100,10 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
386 flag |= __GFP_ZERO;
387 again:
388 page = NULL;
389 - if (!(flag & GFP_ATOMIC))
390 + /* CMA can be used only in the context which permits sleeping */
391 + if (flag & __GFP_WAIT)
392 page = dma_alloc_from_contiguous(dev, count, get_order(size));
393 + /* fallback */
394 if (!page)
395 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
396 if (!page)
397 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
398 index 1bd6ea07d7d3..1be0a9e75d1f 100644
399 --- a/arch/x86/kvm/x86.c
400 +++ b/arch/x86/kvm/x86.c
401 @@ -5982,7 +5982,7 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
402 frag->len -= len;
403 }
404
405 - if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
406 + if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
407 vcpu->mmio_needed = 0;
408 if (vcpu->mmio_is_write)
409 return 1;
410 diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
411 index 917488a0ab00..f2faa58f9a43 100644
412 --- a/arch/xtensa/include/asm/traps.h
413 +++ b/arch/xtensa/include/asm/traps.h
414 @@ -22,25 +22,37 @@ extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
415
416 static inline void spill_registers(void)
417 {
418 -
419 +#if XCHAL_NUM_AREGS > 16
420 __asm__ __volatile__ (
421 - "movi a14, "__stringify((1 << PS_EXCM_BIT) | LOCKLEVEL)"\n\t"
422 - "mov a12, a0\n\t"
423 - "rsr a13, sar\n\t"
424 - "xsr a14, ps\n\t"
425 - "movi a0, _spill_registers\n\t"
426 - "rsync\n\t"
427 - "callx0 a0\n\t"
428 - "mov a0, a12\n\t"
429 - "wsr a13, sar\n\t"
430 - "wsr a14, ps\n\t"
431 - : :
432 -#if defined(CONFIG_FRAME_POINTER)
433 - : "a2", "a3", "a4", "a11", "a12", "a13", "a14", "a15",
434 + " call12 1f\n"
435 + " _j 2f\n"
436 + " retw\n"
437 + " .align 4\n"
438 + "1:\n"
439 + " _entry a1, 48\n"
440 + " addi a12, a0, 3\n"
441 +#if XCHAL_NUM_AREGS > 32
442 + " .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n"
443 + " _entry a1, 48\n"
444 + " mov a12, a0\n"
445 + " .endr\n"
446 +#endif
447 + " _entry a1, 48\n"
448 +#if XCHAL_NUM_AREGS % 12 == 0
449 + " mov a8, a8\n"
450 +#elif XCHAL_NUM_AREGS % 12 == 4
451 + " mov a12, a12\n"
452 +#elif XCHAL_NUM_AREGS % 12 == 8
453 + " mov a4, a4\n"
454 +#endif
455 + " retw\n"
456 + "2:\n"
457 + : : : "a12", "a13", "memory");
458 #else
459 - : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15",
460 + __asm__ __volatile__ (
461 + " mov a12, a12\n"
462 + : : : "memory");
463 #endif
464 - "memory");
465 }
466
467 #endif /* _XTENSA_TRAPS_H */
468 diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
469 index 5082507d5631..aa7f9add7d77 100644
470 --- a/arch/xtensa/kernel/entry.S
471 +++ b/arch/xtensa/kernel/entry.S
472 @@ -1912,6 +1912,43 @@ ENTRY(system_call)
473
474 ENDPROC(system_call)
475
476 +/*
477 + * Spill live registers on the kernel stack macro.
478 + *
479 + * Entry condition: ps.woe is set, ps.excm is cleared
480 + * Exit condition: windowstart has single bit set
481 + * May clobber: a12, a13
482 + */
483 + .macro spill_registers_kernel
484 +
485 +#if XCHAL_NUM_AREGS > 16
486 + call12 1f
487 + _j 2f
488 + retw
489 + .align 4
490 +1:
491 + _entry a1, 48
492 + addi a12, a0, 3
493 +#if XCHAL_NUM_AREGS > 32
494 + .rept (XCHAL_NUM_AREGS - 32) / 12
495 + _entry a1, 48
496 + mov a12, a0
497 + .endr
498 +#endif
499 + _entry a1, 48
500 +#if XCHAL_NUM_AREGS % 12 == 0
501 + mov a8, a8
502 +#elif XCHAL_NUM_AREGS % 12 == 4
503 + mov a12, a12
504 +#elif XCHAL_NUM_AREGS % 12 == 8
505 + mov a4, a4
506 +#endif
507 + retw
508 +2:
509 +#else
510 + mov a12, a12
511 +#endif
512 + .endm
513
514 /*
515 * Task switch.
516 @@ -1924,21 +1961,20 @@ ENTRY(_switch_to)
517
518 entry a1, 16
519
520 - mov a12, a2 # preserve 'prev' (a2)
521 - mov a13, a3 # and 'next' (a3)
522 + mov a10, a2 # preserve 'prev' (a2)
523 + mov a11, a3 # and 'next' (a3)
524
525 l32i a4, a2, TASK_THREAD_INFO
526 l32i a5, a3, TASK_THREAD_INFO
527
528 - save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
529 + save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
530
531 - s32i a0, a12, THREAD_RA # save return address
532 - s32i a1, a12, THREAD_SP # save stack pointer
533 + s32i a0, a10, THREAD_RA # save return address
534 + s32i a1, a10, THREAD_SP # save stack pointer
535
536 /* Disable ints while we manipulate the stack pointer. */
537
538 - movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
539 - xsr a14, ps
540 + rsil a14, LOCKLEVEL
541 rsr a3, excsave1
542 rsync
543 s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
544 @@ -1953,7 +1989,7 @@ ENTRY(_switch_to)
545
546 /* Flush register file. */
547
548 - call0 _spill_registers # destroys a3, a4, and SAR
549 + spill_registers_kernel
550
551 /* Set kernel stack (and leave critical section)
552 * Note: It's save to set it here. The stack will not be overwritten
553 @@ -1969,13 +2005,13 @@ ENTRY(_switch_to)
554
555 /* restore context of the task 'next' */
556
557 - l32i a0, a13, THREAD_RA # restore return address
558 - l32i a1, a13, THREAD_SP # restore stack pointer
559 + l32i a0, a11, THREAD_RA # restore return address
560 + l32i a1, a11, THREAD_SP # restore stack pointer
561
562 - load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
563 + load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
564
565 wsr a14, ps
566 - mov a2, a12 # return 'prev'
567 + mov a2, a10 # return 'prev'
568 rsync
569
570 retw
571 diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
572 index 41c5e1b799ef..f658e0948703 100644
573 --- a/drivers/acpi/pci_irq.c
574 +++ b/drivers/acpi/pci_irq.c
575 @@ -432,6 +432,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
576 pin_name(pin));
577 }
578
579 + kfree(entry);
580 return 0;
581 }
582
583 diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
584 index e7dd2c1fee79..5e47d7bf4745 100644
585 --- a/drivers/acpi/processor_throttling.c
586 +++ b/drivers/acpi/processor_throttling.c
587 @@ -59,6 +59,12 @@ struct throttling_tstate {
588 int target_state; /* target T-state */
589 };
590
591 +struct acpi_processor_throttling_arg {
592 + struct acpi_processor *pr;
593 + int target_state;
594 + bool force;
595 +};
596 +
597 #define THROTTLING_PRECHANGE (1)
598 #define THROTTLING_POSTCHANGE (2)
599
600 @@ -1063,16 +1069,24 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
601 return 0;
602 }
603
604 +static long acpi_processor_throttling_fn(void *data)
605 +{
606 + struct acpi_processor_throttling_arg *arg = data;
607 + struct acpi_processor *pr = arg->pr;
608 +
609 + return pr->throttling.acpi_processor_set_throttling(pr,
610 + arg->target_state, arg->force);
611 +}
612 +
613 int acpi_processor_set_throttling(struct acpi_processor *pr,
614 int state, bool force)
615 {
616 - cpumask_var_t saved_mask;
617 int ret = 0;
618 unsigned int i;
619 struct acpi_processor *match_pr;
620 struct acpi_processor_throttling *p_throttling;
621 + struct acpi_processor_throttling_arg arg;
622 struct throttling_tstate t_state;
623 - cpumask_var_t online_throttling_cpus;
624
625 if (!pr)
626 return -EINVAL;
627 @@ -1083,14 +1097,6 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
628 if ((state < 0) || (state > (pr->throttling.state_count - 1)))
629 return -EINVAL;
630
631 - if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
632 - return -ENOMEM;
633 -
634 - if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
635 - free_cpumask_var(saved_mask);
636 - return -ENOMEM;
637 - }
638 -
639 if (cpu_is_offline(pr->id)) {
640 /*
641 * the cpu pointed by pr->id is offline. Unnecessary to change
642 @@ -1099,17 +1105,15 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
643 return -ENODEV;
644 }
645
646 - cpumask_copy(saved_mask, &current->cpus_allowed);
647 t_state.target_state = state;
648 p_throttling = &(pr->throttling);
649 - cpumask_and(online_throttling_cpus, cpu_online_mask,
650 - p_throttling->shared_cpu_map);
651 +
652 /*
653 * The throttling notifier will be called for every
654 * affected cpu in order to get one proper T-state.
655 * The notifier event is THROTTLING_PRECHANGE.
656 */
657 - for_each_cpu(i, online_throttling_cpus) {
658 + for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
659 t_state.cpu = i;
660 acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
661 &t_state);
662 @@ -1121,21 +1125,18 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
663 * it can be called only for the cpu pointed by pr.
664 */
665 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
666 - /* FIXME: use work_on_cpu() */
667 - if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
668 - /* Can't migrate to the pr->id CPU. Exit */
669 - ret = -ENODEV;
670 - goto exit;
671 - }
672 - ret = p_throttling->acpi_processor_set_throttling(pr,
673 - t_state.target_state, force);
674 + arg.pr = pr;
675 + arg.target_state = state;
676 + arg.force = force;
677 + ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg);
678 } else {
679 /*
680 * When the T-state coordination is SW_ALL or HW_ALL,
681 * it is necessary to set T-state for every affected
682 * cpus.
683 */
684 - for_each_cpu(i, online_throttling_cpus) {
685 + for_each_cpu_and(i, cpu_online_mask,
686 + p_throttling->shared_cpu_map) {
687 match_pr = per_cpu(processors, i);
688 /*
689 * If the pointer is invalid, we will report the
690 @@ -1156,13 +1157,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
691 "on CPU %d\n", i));
692 continue;
693 }
694 - t_state.cpu = i;
695 - /* FIXME: use work_on_cpu() */
696 - if (set_cpus_allowed_ptr(current, cpumask_of(i)))
697 - continue;
698 - ret = match_pr->throttling.
699 - acpi_processor_set_throttling(
700 - match_pr, t_state.target_state, force);
701 +
702 + arg.pr = match_pr;
703 + arg.target_state = state;
704 + arg.force = force;
705 + ret = work_on_cpu(pr->id, acpi_processor_throttling_fn,
706 + &arg);
707 }
708 }
709 /*
710 @@ -1171,17 +1171,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
711 * affected cpu to update the T-states.
712 * The notifier event is THROTTLING_POSTCHANGE
713 */
714 - for_each_cpu(i, online_throttling_cpus) {
715 + for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
716 t_state.cpu = i;
717 acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
718 &t_state);
719 }
720 - /* restore the previous state */
721 - /* FIXME: use work_on_cpu() */
722 - set_cpus_allowed_ptr(current, saved_mask);
723 -exit:
724 - free_cpumask_var(online_throttling_cpus);
725 - free_cpumask_var(saved_mask);
726 +
727 return ret;
728 }
729
730 diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
731 index 055dfdfd7348..82a01cc45f9c 100644
732 --- a/drivers/acpi/video.c
733 +++ b/drivers/acpi/video.c
734 @@ -733,6 +733,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
735 union acpi_object *o;
736 struct acpi_video_device_brightness *br = NULL;
737 int result = -EINVAL;
738 + u32 value;
739
740 if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) {
741 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available "
742 @@ -763,7 +764,12 @@ acpi_video_init_brightness(struct acpi_video_device *device)
743 printk(KERN_ERR PREFIX "Invalid data\n");
744 continue;
745 }
746 - br->levels[count] = (u32) o->integer.value;
747 + value = (u32) o->integer.value;
748 + /* Skip duplicate entries */
749 + if (count > 2 && br->levels[count - 1] == value)
750 + continue;
751 +
752 + br->levels[count] = value;
753
754 if (br->levels[count] > max_level)
755 max_level = br->levels[count];
756 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
757 index 3f1794f4a8bf..4942058402a4 100644
758 --- a/drivers/ata/ahci.c
759 +++ b/drivers/ata/ahci.c
760 @@ -61,6 +61,7 @@ enum board_ids {
761 /* board IDs by feature in alphabetical order */
762 board_ahci,
763 board_ahci_ign_iferr,
764 + board_ahci_noncq,
765 board_ahci_nosntf,
766 board_ahci_yes_fbs,
767
768 @@ -119,6 +120,13 @@ static const struct ata_port_info ahci_port_info[] = {
769 .udma_mask = ATA_UDMA6,
770 .port_ops = &ahci_ops,
771 },
772 + [board_ahci_noncq] = {
773 + AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
774 + .flags = AHCI_FLAG_COMMON,
775 + .pio_mask = ATA_PIO4,
776 + .udma_mask = ATA_UDMA6,
777 + .port_ops = &ahci_ops,
778 + },
779 [board_ahci_nosntf] = {
780 AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
781 .flags = AHCI_FLAG_COMMON,
782 @@ -450,6 +458,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
783 { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
784 { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
785
786 + /*
787 + * Samsung SSDs found on some macbooks. NCQ times out.
788 + * https://bugzilla.kernel.org/show_bug.cgi?id=60731
789 + */
790 + { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq },
791 +
792 /* Enmotus */
793 { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
794
795 diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
796 index 20fd337a5731..7ccc084bf1df 100644
797 --- a/drivers/ata/libata-pmp.c
798 +++ b/drivers/ata/libata-pmp.c
799 @@ -447,8 +447,11 @@ static void sata_pmp_quirks(struct ata_port *ap)
800 * otherwise. Don't try hard to recover it.
801 */
802 ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY;
803 - } else if (vendor == 0x197b && devid == 0x2352) {
804 - /* chip found in Thermaltake BlackX Duet, jmicron JMB350? */
805 + } else if (vendor == 0x197b && (devid == 0x2352 || devid == 0x0325)) {
806 + /*
807 + * 0x2352: found in Thermaltake BlackX Duet, jmicron JMB350?
808 + * 0x0325: jmicron JMB394.
809 + */
810 ata_for_each_link(link, ap, EDGE) {
811 /* SRST breaks detection and disks get misclassified
812 * LPM disabled to avoid potential problems
813 diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
814 index 0ae3ca4bf5c0..dd1faa564eb2 100644
815 --- a/drivers/ata/sata_sil.c
816 +++ b/drivers/ata/sata_sil.c
817 @@ -157,6 +157,7 @@ static const struct sil_drivelist {
818 { "ST380011ASL", SIL_QUIRK_MOD15WRITE },
819 { "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
820 { "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
821 + { "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE },
822 { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
823 { }
824 };
825 diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
826 index b828efe4b2f8..9b963ceba5c4 100644
827 --- a/drivers/cpufreq/powernow-k8.c
828 +++ b/drivers/cpufreq/powernow-k8.c
829 @@ -1100,7 +1100,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
830 {
831 struct powernow_k8_data *data;
832 struct init_on_cpu init_on_cpu;
833 - int rc;
834 + int rc, cpu;
835
836 smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
837 if (rc)
838 @@ -1169,7 +1169,9 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
839 pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
840 data->currfid, data->currvid);
841
842 - per_cpu(powernow_data, pol->cpu) = data;
843 + /* Point all the CPUs in this policy to the same data */
844 + for_each_cpu(cpu, pol->cpus)
845 + per_cpu(powernow_data, cpu) = data;
846
847 return 0;
848
849 @@ -1184,6 +1186,7 @@ err_out:
850 static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
851 {
852 struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
853 + int cpu;
854
855 if (!data)
856 return -EINVAL;
857 @@ -1194,7 +1197,8 @@ static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
858
859 kfree(data->powernow_table);
860 kfree(data);
861 - per_cpu(powernow_data, pol->cpu) = NULL;
862 + for_each_cpu(cpu, pol->cpus)
863 + per_cpu(powernow_data, cpu) = NULL;
864
865 return 0;
866 }
867 diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
868 index 17a2393b3e25..533e1874e1d6 100644
869 --- a/drivers/dma/ioat/dma.c
870 +++ b/drivers/dma/ioat/dma.c
871 @@ -77,7 +77,8 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
872 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
873 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
874 chan = ioat_chan_by_index(instance, bit);
875 - tasklet_schedule(&chan->cleanup_task);
876 + if (test_bit(IOAT_RUN, &chan->state))
877 + tasklet_schedule(&chan->cleanup_task);
878 }
879
880 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
881 @@ -93,7 +94,8 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
882 {
883 struct ioat_chan_common *chan = data;
884
885 - tasklet_schedule(&chan->cleanup_task);
886 + if (test_bit(IOAT_RUN, &chan->state))
887 + tasklet_schedule(&chan->cleanup_task);
888
889 return IRQ_HANDLED;
890 }
891 @@ -116,7 +118,6 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c
892 chan->timer.function = device->timer_fn;
893 chan->timer.data = data;
894 tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
895 - tasklet_disable(&chan->cleanup_task);
896 }
897
898 /**
899 @@ -354,13 +355,49 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
900 writel(((u64) chan->completion_dma) >> 32,
901 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
902
903 - tasklet_enable(&chan->cleanup_task);
904 + set_bit(IOAT_RUN, &chan->state);
905 ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
906 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
907 __func__, ioat->desccount);
908 return ioat->desccount;
909 }
910
911 +void ioat_stop(struct ioat_chan_common *chan)
912 +{
913 + struct ioatdma_device *device = chan->device;
914 + struct pci_dev *pdev = device->pdev;
915 + int chan_id = chan_num(chan);
916 + struct msix_entry *msix;
917 +
918 + /* 1/ stop irq from firing tasklets
919 + * 2/ stop the tasklet from re-arming irqs
920 + */
921 + clear_bit(IOAT_RUN, &chan->state);
922 +
923 + /* flush inflight interrupts */
924 + switch (device->irq_mode) {
925 + case IOAT_MSIX:
926 + msix = &device->msix_entries[chan_id];
927 + synchronize_irq(msix->vector);
928 + break;
929 + case IOAT_MSI:
930 + case IOAT_INTX:
931 + synchronize_irq(pdev->irq);
932 + break;
933 + default:
934 + break;
935 + }
936 +
937 + /* flush inflight timers */
938 + del_timer_sync(&chan->timer);
939 +
940 + /* flush inflight tasklet runs */
941 + tasklet_kill(&chan->cleanup_task);
942 +
943 + /* final cleanup now that everything is quiesced and can't re-arm */
944 + device->cleanup_fn((unsigned long) &chan->common);
945 +}
946 +
947 /**
948 * ioat1_dma_free_chan_resources - release all the descriptors
949 * @chan: the channel to be cleaned
950 @@ -379,9 +416,7 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c)
951 if (ioat->desccount == 0)
952 return;
953
954 - tasklet_disable(&chan->cleanup_task);
955 - del_timer_sync(&chan->timer);
956 - ioat1_cleanup(ioat);
957 + ioat_stop(chan);
958
959 /* Delay 100ms after reset to allow internal DMA logic to quiesce
960 * before removing DMA descriptor resources.
961 @@ -526,8 +561,11 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
962 static void ioat1_cleanup_event(unsigned long data)
963 {
964 struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
965 + struct ioat_chan_common *chan = &ioat->base;
966
967 ioat1_cleanup(ioat);
968 + if (!test_bit(IOAT_RUN, &chan->state))
969 + return;
970 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
971 }
972
973 diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
974 index 54fb7b9ff9aa..a1d78847e5a5 100644
975 --- a/drivers/dma/ioat/dma.h
976 +++ b/drivers/dma/ioat/dma.h
977 @@ -370,6 +370,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
978 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
979 void ioat_kobject_del(struct ioatdma_device *device);
980 int ioat_dma_setup_interrupts(struct ioatdma_device *device);
981 +void ioat_stop(struct ioat_chan_common *chan);
982 extern const struct sysfs_ops ioat_sysfs_ops;
983 extern struct ioat_sysfs_entry ioat_version_attr;
984 extern struct ioat_sysfs_entry ioat_cap_attr;
985 diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
986 index b925e1b1d139..1cd761026d84 100644
987 --- a/drivers/dma/ioat/dma_v2.c
988 +++ b/drivers/dma/ioat/dma_v2.c
989 @@ -190,8 +190,11 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
990 void ioat2_cleanup_event(unsigned long data)
991 {
992 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
993 + struct ioat_chan_common *chan = &ioat->base;
994
995 ioat2_cleanup(ioat);
996 + if (!test_bit(IOAT_RUN, &chan->state))
997 + return;
998 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
999 }
1000
1001 @@ -553,10 +556,10 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
1002 ioat->issued = 0;
1003 ioat->tail = 0;
1004 ioat->alloc_order = order;
1005 + set_bit(IOAT_RUN, &chan->state);
1006 spin_unlock_bh(&ioat->prep_lock);
1007 spin_unlock_bh(&chan->cleanup_lock);
1008
1009 - tasklet_enable(&chan->cleanup_task);
1010 ioat2_start_null_desc(ioat);
1011
1012 /* check that we got off the ground */
1013 @@ -566,7 +569,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
1014 } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
1015
1016 if (is_ioat_active(status) || is_ioat_idle(status)) {
1017 - set_bit(IOAT_RUN, &chan->state);
1018 return 1 << ioat->alloc_order;
1019 } else {
1020 u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
1021 @@ -809,11 +811,8 @@ void ioat2_free_chan_resources(struct dma_chan *c)
1022 if (!ioat->ring)
1023 return;
1024
1025 - tasklet_disable(&chan->cleanup_task);
1026 - del_timer_sync(&chan->timer);
1027 - device->cleanup_fn((unsigned long) c);
1028 + ioat_stop(chan);
1029 device->reset_hw(chan);
1030 - clear_bit(IOAT_RUN, &chan->state);
1031
1032 spin_lock_bh(&chan->cleanup_lock);
1033 spin_lock_bh(&ioat->prep_lock);
1034 diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
1035 index fa43a42ccc86..38b94b393c6c 100644
1036 --- a/drivers/dma/ioat/dma_v3.c
1037 +++ b/drivers/dma/ioat/dma_v3.c
1038 @@ -635,8 +635,11 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
1039 static void ioat3_cleanup_event(unsigned long data)
1040 {
1041 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
1042 + struct ioat_chan_common *chan = &ioat->base;
1043
1044 ioat3_cleanup(ioat);
1045 + if (!test_bit(IOAT_RUN, &chan->state))
1046 + return;
1047 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
1048 }
1049
1050 diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
1051 index 71bf4ec300ea..ca78044df4b5 100644
1052 --- a/drivers/dma/ste_dma40.c
1053 +++ b/drivers/dma/ste_dma40.c
1054 @@ -1587,6 +1587,7 @@ static void dma_tasklet(unsigned long data)
1055 struct d40_chan *d40c = (struct d40_chan *) data;
1056 struct d40_desc *d40d;
1057 unsigned long flags;
1058 + bool callback_active;
1059 dma_async_tx_callback callback;
1060 void *callback_param;
1061
1062 @@ -1614,6 +1615,7 @@ static void dma_tasklet(unsigned long data)
1063 }
1064
1065 /* Callback to client */
1066 + callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
1067 callback = d40d->txd.callback;
1068 callback_param = d40d->txd.callback_param;
1069
1070 @@ -1636,7 +1638,7 @@ static void dma_tasklet(unsigned long data)
1071
1072 spin_unlock_irqrestore(&d40c->lock, flags);
1073
1074 - if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
1075 + if (callback_active && callback)
1076 callback(callback_param);
1077
1078 return;
1079 diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
1080 index 9004c64b169e..841eee387478 100644
1081 --- a/drivers/edac/i7300_edac.c
1082 +++ b/drivers/edac/i7300_edac.c
1083 @@ -943,33 +943,35 @@ static int i7300_get_devices(struct mem_ctl_info *mci)
1084
1085 /* Attempt to 'get' the MCH register we want */
1086 pdev = NULL;
1087 - while (!pvt->pci_dev_16_1_fsb_addr_map ||
1088 - !pvt->pci_dev_16_2_fsb_err_regs) {
1089 - pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1090 - PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, pdev);
1091 - if (!pdev) {
1092 - /* End of list, leave */
1093 - i7300_printk(KERN_ERR,
1094 - "'system address,Process Bus' "
1095 - "device not found:"
1096 - "vendor 0x%x device 0x%x ERR funcs "
1097 - "(broken BIOS?)\n",
1098 - PCI_VENDOR_ID_INTEL,
1099 - PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
1100 - goto error;
1101 - }
1102 -
1103 + while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1104 + PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
1105 + pdev))) {
1106 /* Store device 16 funcs 1 and 2 */
1107 switch (PCI_FUNC(pdev->devfn)) {
1108 case 1:
1109 - pvt->pci_dev_16_1_fsb_addr_map = pdev;
1110 + if (!pvt->pci_dev_16_1_fsb_addr_map)
1111 + pvt->pci_dev_16_1_fsb_addr_map =
1112 + pci_dev_get(pdev);
1113 break;
1114 case 2:
1115 - pvt->pci_dev_16_2_fsb_err_regs = pdev;
1116 + if (!pvt->pci_dev_16_2_fsb_err_regs)
1117 + pvt->pci_dev_16_2_fsb_err_regs =
1118 + pci_dev_get(pdev);
1119 break;
1120 }
1121 }
1122
1123 + if (!pvt->pci_dev_16_1_fsb_addr_map ||
1124 + !pvt->pci_dev_16_2_fsb_err_regs) {
1125 + /* At least one device was not found */
1126 + i7300_printk(KERN_ERR,
1127 + "'system address,Process Bus' device not found:"
1128 + "vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n",
1129 + PCI_VENDOR_ID_INTEL,
1130 + PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
1131 + goto error;
1132 + }
1133 +
1134 edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
1135 pci_name(pvt->pci_dev_16_0_fsb_ctlr),
1136 pvt->pci_dev_16_0_fsb_ctlr->vendor,
1137 diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
1138 index 0ec3e95a12cd..271818a5a33a 100644
1139 --- a/drivers/edac/i7core_edac.c
1140 +++ b/drivers/edac/i7core_edac.c
1141 @@ -1334,14 +1334,19 @@ static int i7core_get_onedevice(struct pci_dev **prev,
1142 * is at addr 8086:2c40, instead of 8086:2c41. So, we need
1143 * to probe for the alternate address in case of failure
1144 */
1145 - if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
1146 + if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) {
1147 + pci_dev_get(*prev); /* pci_get_device will put it */
1148 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1149 PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
1150 + }
1151
1152 - if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
1153 + if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE &&
1154 + !pdev) {
1155 + pci_dev_get(*prev); /* pci_get_device will put it */
1156 pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1157 PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
1158 *prev);
1159 + }
1160
1161 if (!pdev) {
1162 if (*prev) {
1163 diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
1164 index 5680d3eb11ca..4b7d4343f4ac 100644
1165 --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
1166 +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
1167 @@ -1112,7 +1112,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
1168 if (conf != ~0) {
1169 if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) {
1170 u32 soff = (ffs(outp.or) - 1) * 0x08;
1171 - u32 ctrl = nv_rd32(priv, 0x610798 + soff);
1172 + u32 ctrl = nv_rd32(priv, 0x610794 + soff);
1173 u32 datarate;
1174
1175 switch ((ctrl & 0x000f0000) >> 16) {
1176 diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
1177 index 383f4e6ea9d1..4598a6afea1e 100644
1178 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c
1179 +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
1180 @@ -339,6 +339,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
1181 if (ret)
1182 goto fail_device;
1183
1184 + dev->irq_enabled = true;
1185 +
1186 /* workaround an odd issue on nvc1 by disabling the device's
1187 * nosnoop capability. hopefully won't cause issues until a
1188 * better fix is found - assuming there is one...
1189 @@ -426,6 +428,7 @@ nouveau_drm_remove(struct pci_dev *pdev)
1190 struct nouveau_drm *drm = nouveau_drm(dev);
1191 struct nouveau_object *device;
1192
1193 + dev->irq_enabled = false;
1194 device = drm->client.base.device;
1195 drm_put_dev(dev);
1196
1197 diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
1198 index 50684dc6ffdf..a56d0199e334 100644
1199 --- a/drivers/gpu/drm/radeon/atombios_crtc.c
1200 +++ b/drivers/gpu/drm/radeon/atombios_crtc.c
1201 @@ -1661,6 +1661,20 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1202 return ATOM_PPLL1;
1203 DRM_ERROR("unable to allocate a PPLL\n");
1204 return ATOM_PPLL_INVALID;
1205 + } else if (ASIC_IS_DCE41(rdev)) {
1206 + /* Don't share PLLs on DCE4.1 chips */
1207 + if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
1208 + if (rdev->clock.dp_extclk)
1209 + /* skip PPLL programming if using ext clock */
1210 + return ATOM_PPLL_INVALID;
1211 + }
1212 + pll_in_use = radeon_get_pll_use_mask(crtc);
1213 + if (!(pll_in_use & (1 << ATOM_PPLL1)))
1214 + return ATOM_PPLL1;
1215 + if (!(pll_in_use & (1 << ATOM_PPLL2)))
1216 + return ATOM_PPLL2;
1217 + DRM_ERROR("unable to allocate a PPLL\n");
1218 + return ATOM_PPLL_INVALID;
1219 } else if (ASIC_IS_DCE4(rdev)) {
1220 /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
1221 * depending on the asic:
1222 @@ -1688,7 +1702,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1223 if (pll != ATOM_PPLL_INVALID)
1224 return pll;
1225 }
1226 - } else if (!ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */
1227 + } else {
1228 /* use the same PPLL for all monitors with the same clock */
1229 pll = radeon_get_shared_nondp_ppll(crtc);
1230 if (pll != ATOM_PPLL_INVALID)
1231 diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
1232 index d96070bf8388..cbb06d7c89b5 100644
1233 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
1234 +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
1235 @@ -215,7 +215,8 @@ static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
1236 memcpy(&output, info->buffer.pointer, size);
1237
1238 /* TODO: check version? */
1239 - printk("ATPX version %u\n", output.version);
1240 + printk("ATPX version %u, functions 0x%08x\n",
1241 + output.version, output.function_bits);
1242
1243 radeon_atpx_parse_functions(&atpx->functions, output.function_bits);
1244
1245 diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
1246 index 7e292d899209..1113e8f69137 100644
1247 --- a/drivers/gpu/drm/radeon/radeon_kms.c
1248 +++ b/drivers/gpu/drm/radeon/radeon_kms.c
1249 @@ -485,6 +485,10 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
1250
1251 radeon_vm_init(rdev, &fpriv->vm);
1252
1253 + r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
1254 + if (r)
1255 + return r;
1256 +
1257 /* map the ib pool buffer read only into
1258 * virtual address space */
1259 bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
1260 @@ -492,6 +496,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
1261 r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
1262 RADEON_VM_PAGE_READABLE |
1263 RADEON_VM_PAGE_SNOOPED);
1264 +
1265 + radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
1266 if (r) {
1267 radeon_vm_fini(rdev, &fpriv->vm);
1268 kfree(fpriv);
1269 diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
1270 index a7626358c95d..029b65e6c589 100644
1271 --- a/drivers/hwmon/max1668.c
1272 +++ b/drivers/hwmon/max1668.c
1273 @@ -243,7 +243,7 @@ static ssize_t set_temp_min(struct device *dev,
1274 data->temp_min[index] = clamp_val(temp/1000, -128, 127);
1275 if (i2c_smbus_write_byte_data(client,
1276 MAX1668_REG_LIML_WR(index),
1277 - data->temp_max[index]))
1278 + data->temp_min[index]))
1279 count = -EIO;
1280 mutex_unlock(&data->update_lock);
1281
1282 diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
1283 index 6be4628faffe..107cafcb89da 100644
1284 --- a/drivers/iio/gyro/Kconfig
1285 +++ b/drivers/iio/gyro/Kconfig
1286 @@ -50,7 +50,7 @@ config IIO_ST_GYRO_3AXIS
1287 select IIO_ST_GYRO_BUFFER if (IIO_TRIGGERED_BUFFER)
1288 help
1289 Say yes here to build support for STMicroelectronics gyroscopes:
1290 - L3G4200D, LSM330DL, L3GD20, L3GD20H, LSM330DLC, L3G4IS, LSM330.
1291 + L3G4200D, LSM330DL, L3GD20, LSM330DLC, L3G4IS, LSM330.
1292
1293 This driver can also be built as a module. If so, will be created
1294 these modules:
1295 diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
1296 index 3ad9907bb154..25ee236b1bb2 100644
1297 --- a/drivers/iio/gyro/st_gyro.h
1298 +++ b/drivers/iio/gyro/st_gyro.h
1299 @@ -19,7 +19,6 @@
1300 #define LSM330DL_GYRO_DEV_NAME "lsm330dl_gyro"
1301 #define LSM330DLC_GYRO_DEV_NAME "lsm330dlc_gyro"
1302 #define L3GD20_GYRO_DEV_NAME "l3gd20"
1303 -#define L3GD20H_GYRO_DEV_NAME "l3gd20h"
1304 #define L3G4IS_GYRO_DEV_NAME "l3g4is_ui"
1305 #define LSM330_GYRO_DEV_NAME "lsm330_gyro"
1306
1307 diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
1308 index fa9b24219987..1fabc3ffe7ee 100644
1309 --- a/drivers/iio/gyro/st_gyro_core.c
1310 +++ b/drivers/iio/gyro/st_gyro_core.c
1311 @@ -162,11 +162,10 @@ static const struct st_sensors st_gyro_sensors[] = {
1312 .wai = ST_GYRO_2_WAI_EXP,
1313 .sensors_supported = {
1314 [0] = L3GD20_GYRO_DEV_NAME,
1315 - [1] = L3GD20H_GYRO_DEV_NAME,
1316 - [2] = LSM330D_GYRO_DEV_NAME,
1317 - [3] = LSM330DLC_GYRO_DEV_NAME,
1318 - [4] = L3G4IS_GYRO_DEV_NAME,
1319 - [5] = LSM330_GYRO_DEV_NAME,
1320 + [1] = LSM330D_GYRO_DEV_NAME,
1321 + [2] = LSM330DLC_GYRO_DEV_NAME,
1322 + [3] = L3G4IS_GYRO_DEV_NAME,
1323 + [4] = LSM330_GYRO_DEV_NAME,
1324 },
1325 .ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
1326 .odr = {
1327 diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
1328 index 8a310500573d..c1755ce2da30 100644
1329 --- a/drivers/iio/gyro/st_gyro_i2c.c
1330 +++ b/drivers/iio/gyro/st_gyro_i2c.c
1331 @@ -61,7 +61,6 @@ static const struct i2c_device_id st_gyro_id_table[] = {
1332 { LSM330DL_GYRO_DEV_NAME },
1333 { LSM330DLC_GYRO_DEV_NAME },
1334 { L3GD20_GYRO_DEV_NAME },
1335 - { L3GD20H_GYRO_DEV_NAME },
1336 { L3G4IS_GYRO_DEV_NAME },
1337 { LSM330_GYRO_DEV_NAME },
1338 {},
1339 diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
1340 index f3540390eb22..b37fc9e07706 100644
1341 --- a/drivers/iio/gyro/st_gyro_spi.c
1342 +++ b/drivers/iio/gyro/st_gyro_spi.c
1343 @@ -60,7 +60,6 @@ static const struct spi_device_id st_gyro_id_table[] = {
1344 { LSM330DL_GYRO_DEV_NAME },
1345 { LSM330DLC_GYRO_DEV_NAME },
1346 { L3GD20_GYRO_DEV_NAME },
1347 - { L3GD20H_GYRO_DEV_NAME },
1348 { L3G4IS_GYRO_DEV_NAME },
1349 { LSM330_GYRO_DEV_NAME },
1350 {},
1351 diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
1352 index 7a04f54ef961..e7e12a5f5c2d 100644
1353 --- a/drivers/input/misc/arizona-haptics.c
1354 +++ b/drivers/input/misc/arizona-haptics.c
1355 @@ -77,16 +77,14 @@ static void arizona_haptics_work(struct work_struct *work)
1356 return;
1357 }
1358
1359 + mutex_unlock(dapm_mutex);
1360 +
1361 ret = snd_soc_dapm_sync(arizona->dapm);
1362 if (ret != 0) {
1363 dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
1364 ret);
1365 - mutex_unlock(dapm_mutex);
1366 return;
1367 }
1368 -
1369 - mutex_unlock(dapm_mutex);
1370 -
1371 } else {
1372 /* This disable sequence will be a noop if already enabled */
1373 mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
1374 @@ -99,16 +97,15 @@ static void arizona_haptics_work(struct work_struct *work)
1375 return;
1376 }
1377
1378 + mutex_unlock(dapm_mutex);
1379 +
1380 ret = snd_soc_dapm_sync(arizona->dapm);
1381 if (ret != 0) {
1382 dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
1383 ret);
1384 - mutex_unlock(dapm_mutex);
1385 return;
1386 }
1387
1388 - mutex_unlock(dapm_mutex);
1389 -
1390 ret = regmap_update_bits(arizona->regmap,
1391 ARIZONA_HAPTICS_CONTROL_1,
1392 ARIZONA_HAP_CTRL_MASK,
1393 diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c
1394 index 92c41ab4dbfd..2cb474ad8809 100644
1395 --- a/drivers/irqchip/irq-metag-ext.c
1396 +++ b/drivers/irqchip/irq-metag-ext.c
1397 @@ -515,7 +515,7 @@ static int meta_intc_set_affinity(struct irq_data *data,
1398 * one cpu (the interrupt code doesn't support it), so we just
1399 * pick the first cpu we find in 'cpumask'.
1400 */
1401 - cpu = cpumask_any(cpumask);
1402 + cpu = cpumask_any_and(cpumask, cpu_online_mask);
1403 thread = cpu_2_hwthread_id[cpu];
1404
1405 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
1406 diff --git a/drivers/irqchip/irq-metag.c b/drivers/irqchip/irq-metag.c
1407 index 8e94d7a3b20d..c16c186d97d3 100644
1408 --- a/drivers/irqchip/irq-metag.c
1409 +++ b/drivers/irqchip/irq-metag.c
1410 @@ -201,7 +201,7 @@ static int metag_internal_irq_set_affinity(struct irq_data *data,
1411 * one cpu (the interrupt code doesn't support it), so we just
1412 * pick the first cpu we find in 'cpumask'.
1413 */
1414 - cpu = cpumask_any(cpumask);
1415 + cpu = cpumask_any_and(cpumask, cpu_online_mask);
1416 thread = cpu_2_hwthread_id[cpu];
1417
1418 metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)),
1419 diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
1420 index eb7ddb20fd48..0ba21b0f3972 100644
1421 --- a/drivers/md/dm-mpath.c
1422 +++ b/drivers/md/dm-mpath.c
1423 @@ -1608,8 +1608,11 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
1424 /*
1425 * Only pass ioctls through if the device sizes match exactly.
1426 */
1427 - if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
1428 - r = scsi_verify_blk_ioctl(NULL, cmd);
1429 + if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) {
1430 + int err = scsi_verify_blk_ioctl(NULL, cmd);
1431 + if (err)
1432 + r = err;
1433 + }
1434
1435 if (r == -ENOTCONN && !fatal_signal_pending(current))
1436 queue_work(kmultipathd, &m->process_queued_ios);
1437 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
1438 index 33ac3be2e836..5f49d704f275 100644
1439 --- a/drivers/md/dm-thin-metadata.c
1440 +++ b/drivers/md/dm-thin-metadata.c
1441 @@ -1489,6 +1489,23 @@ bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
1442 return r;
1443 }
1444
1445 +bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd)
1446 +{
1447 + bool r = false;
1448 + struct dm_thin_device *td, *tmp;
1449 +
1450 + down_read(&pmd->root_lock);
1451 + list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
1452 + if (td->changed) {
1453 + r = td->changed;
1454 + break;
1455 + }
1456 + }
1457 + up_read(&pmd->root_lock);
1458 +
1459 + return r;
1460 +}
1461 +
1462 bool dm_thin_aborted_changes(struct dm_thin_device *td)
1463 {
1464 bool r;
1465 diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
1466 index 125c09444019..8f4d62baf09b 100644
1467 --- a/drivers/md/dm-thin-metadata.h
1468 +++ b/drivers/md/dm-thin-metadata.h
1469 @@ -161,6 +161,8 @@ int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block);
1470 */
1471 bool dm_thin_changed_this_transaction(struct dm_thin_device *td);
1472
1473 +bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd);
1474 +
1475 bool dm_thin_aborted_changes(struct dm_thin_device *td);
1476
1477 int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
1478 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1479 index 73c76c565a4d..901aac27e522 100644
1480 --- a/drivers/md/dm-thin.c
1481 +++ b/drivers/md/dm-thin.c
1482 @@ -1344,7 +1344,8 @@ static void process_deferred_bios(struct pool *pool)
1483 bio_list_init(&pool->deferred_flush_bios);
1484 spin_unlock_irqrestore(&pool->lock, flags);
1485
1486 - if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
1487 + if (bio_list_empty(&bios) &&
1488 + !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
1489 return;
1490
1491 if (commit_or_fallback(pool)) {
1492 @@ -2783,6 +2784,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
1493
1494 if (get_pool_mode(tc->pool) == PM_FAIL) {
1495 ti->error = "Couldn't open thin device, Pool is in fail mode";
1496 + r = -EINVAL;
1497 goto bad_thin_open;
1498 }
1499
1500 @@ -2794,7 +2796,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
1501
1502 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
1503 if (r)
1504 - goto bad_thin_open;
1505 + goto bad_target_max_io_len;
1506
1507 ti->num_flush_bios = 1;
1508 ti->flush_supported = true;
1509 @@ -2815,6 +2817,8 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
1510
1511 return 0;
1512
1513 +bad_target_max_io_len:
1514 + dm_pool_close_thin_device(tc->td);
1515 bad_thin_open:
1516 __pool_dec(tc->pool);
1517 bad_pool_lookup:
1518 diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
1519 index 957fb36c7eb3..0bb2aa2c6fb0 100644
1520 --- a/drivers/misc/mei/client.c
1521 +++ b/drivers/misc/mei/client.c
1522 @@ -664,7 +664,6 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
1523 goto err;
1524
1525 cb->fop_type = MEI_FOP_READ;
1526 - cl->read_cb = cb;
1527 if (dev->hbuf_is_ready) {
1528 dev->hbuf_is_ready = false;
1529 if (mei_hbm_cl_flow_control_req(dev, cl)) {
1530 @@ -675,6 +674,9 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
1531 } else {
1532 list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
1533 }
1534 +
1535 + cl->read_cb = cb;
1536 +
1537 return rets;
1538 err:
1539 mei_io_cb_free(cb);
1540 diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
1541 index 390061d09693..00c6c089b935 100644
1542 --- a/drivers/net/bonding/bond_3ad.c
1543 +++ b/drivers/net/bonding/bond_3ad.c
1544 @@ -1854,8 +1854,6 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
1545 BOND_AD_INFO(bond).agg_select_timer = timeout;
1546 }
1547
1548 -static u16 aggregator_identifier;
1549 -
1550 /**
1551 * bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
1552 * @bond: bonding struct to work on
1553 @@ -1869,7 +1867,7 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
1554 if (MAC_ADDRESS_COMPARE(&(BOND_AD_INFO(bond).system.sys_mac_addr),
1555 bond->dev->dev_addr)) {
1556
1557 - aggregator_identifier = 0;
1558 + BOND_AD_INFO(bond).aggregator_identifier = 0;
1559
1560 BOND_AD_INFO(bond).system.sys_priority = 0xFFFF;
1561 BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr);
1562 @@ -1940,7 +1938,7 @@ int bond_3ad_bind_slave(struct slave *slave)
1563 ad_initialize_agg(aggregator);
1564
1565 aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr);
1566 - aggregator->aggregator_identifier = (++aggregator_identifier);
1567 + aggregator->aggregator_identifier = ++BOND_AD_INFO(bond).aggregator_identifier;
1568 aggregator->slave = slave;
1569 aggregator->is_active = 0;
1570 aggregator->num_of_ports = 0;
1571 diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
1572 index 5d91ad0cc041..1f081c89753f 100644
1573 --- a/drivers/net/bonding/bond_3ad.h
1574 +++ b/drivers/net/bonding/bond_3ad.h
1575 @@ -253,6 +253,7 @@ struct ad_system {
1576 struct ad_bond_info {
1577 struct ad_system system; /* 802.3ad system structure */
1578 u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes
1579 + u16 aggregator_identifier;
1580 };
1581
1582 struct ad_slave_info {
1583 diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
1584 index 1870c4731a57..539239d8e9ab 100644
1585 --- a/drivers/net/can/dev.c
1586 +++ b/drivers/net/can/dev.c
1587 @@ -324,19 +324,10 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
1588 }
1589
1590 if (!priv->echo_skb[idx]) {
1591 - struct sock *srcsk = skb->sk;
1592
1593 - if (atomic_read(&skb->users) != 1) {
1594 - struct sk_buff *old_skb = skb;
1595 -
1596 - skb = skb_clone(old_skb, GFP_ATOMIC);
1597 - kfree_skb(old_skb);
1598 - if (!skb)
1599 - return;
1600 - } else
1601 - skb_orphan(skb);
1602 -
1603 - skb->sk = srcsk;
1604 + skb = can_create_echo_skb(skb);
1605 + if (!skb)
1606 + return;
1607
1608 /* make settings for echo to reduce code in irq context */
1609 skb->protocol = htons(ETH_P_CAN);
1610 diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
1611 index c4bc1d2e2033..b08383f485a5 100644
1612 --- a/drivers/net/can/janz-ican3.c
1613 +++ b/drivers/net/can/janz-ican3.c
1614 @@ -19,6 +19,7 @@
1615 #include <linux/netdevice.h>
1616 #include <linux/can.h>
1617 #include <linux/can/dev.h>
1618 +#include <linux/can/skb.h>
1619 #include <linux/can/error.h>
1620
1621 #include <linux/mfd/janz.h>
1622 @@ -1134,20 +1135,9 @@ static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
1623 */
1624 static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb)
1625 {
1626 - struct sock *srcsk = skb->sk;
1627 -
1628 - if (atomic_read(&skb->users) != 1) {
1629 - struct sk_buff *old_skb = skb;
1630 -
1631 - skb = skb_clone(old_skb, GFP_ATOMIC);
1632 - kfree_skb(old_skb);
1633 - if (!skb)
1634 - return;
1635 - } else {
1636 - skb_orphan(skb);
1637 - }
1638 -
1639 - skb->sk = srcsk;
1640 + skb = can_create_echo_skb(skb);
1641 + if (!skb)
1642 + return;
1643
1644 /* save this skb for tx interrupt echo handling */
1645 skb_queue_tail(&mod->echoq, skb);
1646 diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
1647 index 4b2d5ed62b11..cc3df8aebb87 100644
1648 --- a/drivers/net/can/usb/kvaser_usb.c
1649 +++ b/drivers/net/can/usb/kvaser_usb.c
1650 @@ -474,6 +474,8 @@ static int kvaser_usb_get_card_info(struct kvaser_usb *dev)
1651 return err;
1652
1653 dev->nchannels = msg.u.cardinfo.nchannels;
1654 + if (dev->nchannels > MAX_NET_DEVICES)
1655 + return -EINVAL;
1656
1657 return 0;
1658 }
1659 diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
1660 index 0a2a5ee79a17..4e94057ef5cf 100644
1661 --- a/drivers/net/can/vcan.c
1662 +++ b/drivers/net/can/vcan.c
1663 @@ -46,6 +46,7 @@
1664 #include <linux/if_ether.h>
1665 #include <linux/can.h>
1666 #include <linux/can/dev.h>
1667 +#include <linux/can/skb.h>
1668 #include <linux/slab.h>
1669 #include <net/rtnetlink.h>
1670
1671 @@ -109,25 +110,23 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
1672 stats->rx_packets++;
1673 stats->rx_bytes += cfd->len;
1674 }
1675 - kfree_skb(skb);
1676 + consume_skb(skb);
1677 return NETDEV_TX_OK;
1678 }
1679
1680 /* perform standard echo handling for CAN network interfaces */
1681
1682 if (loop) {
1683 - struct sock *srcsk = skb->sk;
1684
1685 - skb = skb_share_check(skb, GFP_ATOMIC);
1686 + skb = can_create_echo_skb(skb);
1687 if (!skb)
1688 return NETDEV_TX_OK;
1689
1690 /* receive with packet counting */
1691 - skb->sk = srcsk;
1692 vcan_rx(skb, dev);
1693 } else {
1694 /* no looped packets => no counting */
1695 - kfree_skb(skb);
1696 + consume_skb(skb);
1697 }
1698 return NETDEV_TX_OK;
1699 }
1700 diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
1701 index 11ae0811e4bf..68e9dc453e11 100644
1702 --- a/drivers/net/ethernet/broadcom/tg3.c
1703 +++ b/drivers/net/ethernet/broadcom/tg3.c
1704 @@ -13777,12 +13777,12 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
1705
1706 tg3_netif_stop(tp);
1707
1708 + tg3_set_mtu(dev, tp, new_mtu);
1709 +
1710 tg3_full_lock(tp, 1);
1711
1712 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
1713
1714 - tg3_set_mtu(dev, tp, new_mtu);
1715 -
1716 /* Reset PHY, otherwise the read DMA engine will be in a mode that
1717 * breaks all requests to 256 bytes.
1718 */
1719 diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
1720 index ad5d1e4384db..97b5de7aebdb 100644
1721 --- a/drivers/net/usb/asix_devices.c
1722 +++ b/drivers/net/usb/asix_devices.c
1723 @@ -915,7 +915,8 @@ static const struct driver_info ax88178_info = {
1724 .status = asix_status,
1725 .link_reset = ax88178_link_reset,
1726 .reset = ax88178_reset,
1727 - .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
1728 + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
1729 + FLAG_MULTI_PACKET,
1730 .rx_fixup = asix_rx_fixup_common,
1731 .tx_fixup = asix_tx_fixup,
1732 };
1733 diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
1734 index cea1f3d0311b..d33c3ae2fcea 100644
1735 --- a/drivers/net/usb/ax88179_178a.c
1736 +++ b/drivers/net/usb/ax88179_178a.c
1737 @@ -1109,6 +1109,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1738 u16 hdr_off;
1739 u32 *pkt_hdr;
1740
1741 + /* This check is no longer done by usbnet */
1742 + if (skb->len < dev->net->hard_header_len)
1743 + return 0;
1744 +
1745 skb_trim(skb, skb->len - 4);
1746 memcpy(&rx_hdr, skb_tail_pointer(skb), 4);
1747 le32_to_cpus(&rx_hdr);
1748 diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
1749 index a7e3f4e55bf3..82ab61d62804 100644
1750 --- a/drivers/net/usb/gl620a.c
1751 +++ b/drivers/net/usb/gl620a.c
1752 @@ -86,6 +86,10 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1753 u32 size;
1754 u32 count;
1755
1756 + /* This check is no longer done by usbnet */
1757 + if (skb->len < dev->net->hard_header_len)
1758 + return 0;
1759 +
1760 header = (struct gl_header *) skb->data;
1761
1762 // get the packet count of the received skb
1763 diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
1764 index 03832d3780aa..9237c45883cd 100644
1765 --- a/drivers/net/usb/mcs7830.c
1766 +++ b/drivers/net/usb/mcs7830.c
1767 @@ -529,8 +529,9 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1768 {
1769 u8 status;
1770
1771 - if (skb->len == 0) {
1772 - dev_err(&dev->udev->dev, "unexpected empty rx frame\n");
1773 + /* This check is no longer done by usbnet */
1774 + if (skb->len < dev->net->hard_header_len) {
1775 + dev_err(&dev->udev->dev, "unexpected tiny rx frame\n");
1776 return 0;
1777 }
1778
1779 diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
1780 index 93e0716a118c..7f4a3a41c4f8 100644
1781 --- a/drivers/net/usb/net1080.c
1782 +++ b/drivers/net/usb/net1080.c
1783 @@ -366,6 +366,10 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1784 struct nc_trailer *trailer;
1785 u16 hdr_len, packet_len;
1786
1787 + /* This check is no longer done by usbnet */
1788 + if (skb->len < dev->net->hard_header_len)
1789 + return 0;
1790 +
1791 if (!(skb->len & 0x01)) {
1792 netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n",
1793 skb->len, dev->net->hard_header_len, dev->hard_mtu,
1794 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1795 index 34a081f1527f..37d9785974fc 100644
1796 --- a/drivers/net/usb/qmi_wwan.c
1797 +++ b/drivers/net/usb/qmi_wwan.c
1798 @@ -80,10 +80,10 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1799 {
1800 __be16 proto;
1801
1802 - /* usbnet rx_complete guarantees that skb->len is at least
1803 - * hard_header_len, so we can inspect the dest address without
1804 - * checking skb->len
1805 - */
1806 + /* This check is no longer done by usbnet */
1807 + if (skb->len < dev->net->hard_header_len)
1808 + return 0;
1809 +
1810 switch (skb->data[0] & 0xf0) {
1811 case 0x40:
1812 proto = htons(ETH_P_IP);
1813 @@ -709,6 +709,7 @@ static const struct usb_device_id products[] = {
1814 {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
1815 {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
1816 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
1817 + {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
1818 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
1819 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
1820 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
1821 diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
1822 index cc49aac70224..691fca4e4c2d 100644
1823 --- a/drivers/net/usb/rndis_host.c
1824 +++ b/drivers/net/usb/rndis_host.c
1825 @@ -494,6 +494,10 @@ EXPORT_SYMBOL_GPL(rndis_unbind);
1826 */
1827 int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1828 {
1829 + /* This check is no longer done by usbnet */
1830 + if (skb->len < dev->net->hard_header_len)
1831 + return 0;
1832 +
1833 /* peripheral may have batched packets to us... */
1834 while (likely(skb->len)) {
1835 struct rndis_data_hdr *hdr = (void *)skb->data;
1836 diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
1837 index 66ebbacf066f..12afae0451e6 100644
1838 --- a/drivers/net/usb/smsc75xx.c
1839 +++ b/drivers/net/usb/smsc75xx.c
1840 @@ -2108,6 +2108,10 @@ static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb,
1841
1842 static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1843 {
1844 + /* This check is no longer done by usbnet */
1845 + if (skb->len < dev->net->hard_header_len)
1846 + return 0;
1847 +
1848 while (skb->len > 0) {
1849 u32 rx_cmd_a, rx_cmd_b, align_count, size;
1850 struct sk_buff *ax_skb;
1851 diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
1852 index 3f38ba868f61..9375b8c6410b 100644
1853 --- a/drivers/net/usb/smsc95xx.c
1854 +++ b/drivers/net/usb/smsc95xx.c
1855 @@ -1725,6 +1725,10 @@ static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
1856
1857 static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1858 {
1859 + /* This check is no longer done by usbnet */
1860 + if (skb->len < dev->net->hard_header_len)
1861 + return 0;
1862 +
1863 while (skb->len > 0) {
1864 u32 header, align_count;
1865 struct sk_buff *ax_skb;
1866 diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
1867 index 28f16ed6422d..f6dce4765de4 100644
1868 --- a/drivers/net/usb/usbnet.c
1869 +++ b/drivers/net/usb/usbnet.c
1870 @@ -517,17 +517,19 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
1871 }
1872 // else network stack removes extra byte if we forced a short packet
1873
1874 - if (skb->len) {
1875 - /* all data was already cloned from skb inside the driver */
1876 - if (dev->driver_info->flags & FLAG_MULTI_PACKET)
1877 - dev_kfree_skb_any(skb);
1878 - else
1879 - usbnet_skb_return(dev, skb);
1880 + /* all data was already cloned from skb inside the driver */
1881 + if (dev->driver_info->flags & FLAG_MULTI_PACKET)
1882 + goto done;
1883 +
1884 + if (skb->len < ETH_HLEN) {
1885 + dev->net->stats.rx_errors++;
1886 + dev->net->stats.rx_length_errors++;
1887 + netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len);
1888 + } else {
1889 + usbnet_skb_return(dev, skb);
1890 return;
1891 }
1892
1893 - netif_dbg(dev, rx_err, dev->net, "drop\n");
1894 - dev->net->stats.rx_errors++;
1895 done:
1896 skb_queue_tail(&dev->done, skb);
1897 }
1898 @@ -549,13 +551,6 @@ static void rx_complete (struct urb *urb)
1899 switch (urb_status) {
1900 /* success */
1901 case 0:
1902 - if (skb->len < dev->net->hard_header_len) {
1903 - state = rx_cleanup;
1904 - dev->net->stats.rx_errors++;
1905 - dev->net->stats.rx_length_errors++;
1906 - netif_dbg(dev, rx_err, dev->net,
1907 - "rx length %d\n", skb->len);
1908 - }
1909 break;
1910
1911 /* stalls need manual reset. this is rare ... except that
1912 diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
1913 index e19a20a8e955..ecd1ac424047 100644
1914 --- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
1915 +++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
1916 @@ -15,6 +15,8 @@
1917 #ifndef RTL8187_H
1918 #define RTL8187_H
1919
1920 +#include <linux/cache.h>
1921 +
1922 #include "rtl818x.h"
1923 #include "leds.h"
1924
1925 @@ -139,7 +141,10 @@ struct rtl8187_priv {
1926 u8 aifsn[4];
1927 u8 rfkill_mask;
1928 struct {
1929 - __le64 buf;
1930 + union {
1931 + __le64 buf;
1932 + u8 dummy1[L1_CACHE_BYTES];
1933 + } ____cacheline_aligned;
1934 struct sk_buff_head queue;
1935 } b_tx_status; /* This queue is used by both -b and non-b devices */
1936 struct mutex io_mutex;
1937 @@ -147,7 +152,8 @@ struct rtl8187_priv {
1938 u8 bits8;
1939 __le16 bits16;
1940 __le32 bits32;
1941 - } *io_dmabuf;
1942 + u8 dummy2[L1_CACHE_BYTES];
1943 + } *io_dmabuf ____cacheline_aligned;
1944 bool rfkill_off;
1945 u16 seqno;
1946 };
1947 diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
1948 index 71e917db8338..ace1a087fe8a 100644
1949 --- a/drivers/net/wireless/rtlwifi/ps.c
1950 +++ b/drivers/net/wireless/rtlwifi/ps.c
1951 @@ -48,7 +48,7 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
1952
1953 /*<2> Enable Adapter */
1954 if (rtlpriv->cfg->ops->hw_init(hw))
1955 - return 1;
1956 + return false;
1957 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
1958
1959 /*<3> Enable Interrupt */
1960 diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
1961 index a82b30a1996c..2eb0b38384dd 100644
1962 --- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
1963 +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
1964 @@ -937,14 +937,26 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
1965 bool is92c;
1966 int err;
1967 u8 tmp_u1b;
1968 + unsigned long flags;
1969
1970 rtlpci->being_init_adapter = true;
1971 +
1972 + /* Since this function can take a very long time (up to 350 ms)
1973 + * and can be called with irqs disabled, reenable the irqs
1974 + * to let the other devices continue being serviced.
1975 + *
1976 + * It is safe doing so since our own interrupts will only be enabled
1977 + * in a subsequent step.
1978 + */
1979 + local_save_flags(flags);
1980 + local_irq_enable();
1981 +
1982 rtlpriv->intf_ops->disable_aspm(hw);
1983 rtstatus = _rtl92ce_init_mac(hw);
1984 if (!rtstatus) {
1985 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
1986 err = 1;
1987 - return err;
1988 + goto exit;
1989 }
1990
1991 err = rtl92c_download_fw(hw);
1992 @@ -952,7 +964,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
1993 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1994 "Failed to download FW. Init HW without FW now..\n");
1995 err = 1;
1996 - return err;
1997 + goto exit;
1998 }
1999
2000 rtlhal->last_hmeboxnum = 0;
2001 @@ -1032,6 +1044,8 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
2002 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
2003 }
2004 rtl92c_dm_init(hw);
2005 +exit:
2006 + local_irq_restore(flags);
2007 rtlpci->being_init_adapter = false;
2008 return err;
2009 }
2010 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
2011 index a899d8bb190d..0bb7bfd49bf6 100644
2012 --- a/drivers/pci/pci.c
2013 +++ b/drivers/pci/pci.c
2014 @@ -1119,6 +1119,8 @@ EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
2015 static int do_pci_enable_device(struct pci_dev *dev, int bars)
2016 {
2017 int err;
2018 + u16 cmd;
2019 + u8 pin;
2020
2021 err = pci_set_power_state(dev, PCI_D0);
2022 if (err < 0 && err != -EIO)
2023 @@ -1128,6 +1130,14 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
2024 return err;
2025 pci_fixup_device(pci_fixup_enable, dev);
2026
2027 + pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
2028 + if (pin) {
2029 + pci_read_config_word(dev, PCI_COMMAND, &cmd);
2030 + if (cmd & PCI_COMMAND_INTX_DISABLE)
2031 + pci_write_config_word(dev, PCI_COMMAND,
2032 + cmd & ~PCI_COMMAND_INTX_DISABLE);
2033 + }
2034 +
2035 return 0;
2036 }
2037
2038 diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
2039 index fcdc22306cab..f033b191a022 100644
2040 --- a/drivers/scsi/qla2xxx/qla_target.c
2041 +++ b/drivers/scsi/qla2xxx/qla_target.c
2042 @@ -3339,7 +3339,8 @@ restart:
2043 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
2044 "SRR cmd %p (se_cmd %p, tag %d, op %x), "
2045 "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
2046 - se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset);
2047 + se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
2048 + cmd->sg_cnt, cmd->offset);
2049
2050 qlt_handle_srr(vha, sctio, imm);
2051
2052 diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
2053 index 1567ac296b39..0fce5fc9923b 100644
2054 --- a/drivers/staging/android/binder.c
2055 +++ b/drivers/staging/android/binder.c
2056 @@ -2902,7 +2902,7 @@ static int binder_node_release(struct binder_node *node, int refs)
2057 refs++;
2058
2059 if (!ref->death)
2060 - goto out;
2061 + continue;
2062
2063 death++;
2064
2065 @@ -2915,7 +2915,6 @@ static int binder_node_release(struct binder_node *node, int refs)
2066 BUG();
2067 }
2068
2069 -out:
2070 binder_debug(BINDER_DEBUG_DEAD_BINDER,
2071 "node %d now dead, refs %d, death %d\n",
2072 node->debug_id, refs, death);
2073 diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
2074 index b501346484ae..f1cab425163f 100644
2075 --- a/drivers/usb/chipidea/udc.c
2076 +++ b/drivers/usb/chipidea/udc.c
2077 @@ -103,7 +103,7 @@ static int hw_ep_flush(struct ci13xxx *ci, int num, int dir)
2078
2079 do {
2080 /* flush any pending transfer */
2081 - hw_write(ci, OP_ENDPTFLUSH, BIT(n), BIT(n));
2082 + hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n));
2083 while (hw_read(ci, OP_ENDPTFLUSH, BIT(n)))
2084 cpu_relax();
2085 } while (hw_read(ci, OP_ENDPTSTAT, BIT(n)));
2086 @@ -203,7 +203,7 @@ static int hw_ep_prime(struct ci13xxx *ci, int num, int dir, int is_ctrl)
2087 if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
2088 return -EAGAIN;
2089
2090 - hw_write(ci, OP_ENDPTPRIME, BIT(n), BIT(n));
2091 + hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n));
2092
2093 while (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
2094 cpu_relax();
2095 diff --git a/drivers/usb/gadget/bcm63xx_udc.c b/drivers/usb/gadget/bcm63xx_udc.c
2096 index fd24cb4540a4..5396709cdc07 100644
2097 --- a/drivers/usb/gadget/bcm63xx_udc.c
2098 +++ b/drivers/usb/gadget/bcm63xx_udc.c
2099 @@ -361,24 +361,30 @@ static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
2100 bcm_writel(val, udc->iudma_regs + off);
2101 }
2102
2103 -static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off)
2104 +static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
2105 {
2106 - return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
2107 + return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
2108 + (ENETDMA_CHAN_WIDTH * chan));
2109 }
2110
2111 -static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
2112 +static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
2113 + int chan)
2114 {
2115 - bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
2116 + bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
2117 + (ENETDMA_CHAN_WIDTH * chan));
2118 }
2119
2120 -static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off)
2121 +static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
2122 {
2123 - return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
2124 + return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
2125 + (ENETDMA_CHAN_WIDTH * chan));
2126 }
2127
2128 -static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
2129 +static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
2130 + int chan)
2131 {
2132 - bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
2133 + bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
2134 + (ENETDMA_CHAN_WIDTH * chan));
2135 }
2136
2137 static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
2138 @@ -639,7 +645,7 @@ static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
2139 } while (!last_bd);
2140
2141 usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
2142 - ENETDMAC_CHANCFG_REG(iudma->ch_idx));
2143 + ENETDMAC_CHANCFG_REG, iudma->ch_idx);
2144 }
2145
2146 /**
2147 @@ -695,9 +701,9 @@ static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
2148 bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
2149
2150 /* stop DMA, then wait for the hardware to wrap up */
2151 - usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG(ch_idx));
2152 + usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
2153
2154 - while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)) &
2155 + while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
2156 ENETDMAC_CHANCFG_EN_MASK) {
2157 udelay(1);
2158
2159 @@ -714,10 +720,10 @@ static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
2160 dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
2161 ch_idx);
2162 usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
2163 - ENETDMAC_CHANCFG_REG(ch_idx));
2164 + ENETDMAC_CHANCFG_REG, ch_idx);
2165 }
2166 }
2167 - usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG(ch_idx));
2168 + usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
2169
2170 /* don't leave "live" HW-owned entries for the next guy to step on */
2171 for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
2172 @@ -729,11 +735,11 @@ static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
2173
2174 /* set up IRQs, UBUS burst size, and BD base for this channel */
2175 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2176 - ENETDMAC_IRMASK_REG(ch_idx));
2177 - usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG(ch_idx));
2178 + ENETDMAC_IRMASK_REG, ch_idx);
2179 + usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
2180
2181 - usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG(ch_idx));
2182 - usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG(ch_idx));
2183 + usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
2184 + usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
2185 }
2186
2187 /**
2188 @@ -2036,7 +2042,7 @@ static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2189 spin_lock(&udc->lock);
2190
2191 usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2192 - ENETDMAC_IR_REG(iudma->ch_idx));
2193 + ENETDMAC_IR_REG, iudma->ch_idx);
2194 bep = iudma->bep;
2195 rc = iudma_read(udc, iudma);
2196
2197 @@ -2176,18 +2182,18 @@ static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2198 seq_printf(s, " [ep%d]:\n",
2199 max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2200 seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2201 - usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)),
2202 - usb_dmac_readl(udc, ENETDMAC_IR_REG(ch_idx)),
2203 - usb_dmac_readl(udc, ENETDMAC_IRMASK_REG(ch_idx)),
2204 - usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG(ch_idx)));
2205 + usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2206 + usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2207 + usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2208 + usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
2209
2210 - sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG(ch_idx));
2211 - sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG(ch_idx));
2212 + sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2213 + sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
2214 seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2215 - usb_dmas_readl(udc, ENETDMAS_RSTART_REG(ch_idx)),
2216 + usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
2217 sram2 >> 16, sram2 & 0xffff,
2218 sram3 >> 16, sram3 & 0xffff,
2219 - usb_dmas_readl(udc, ENETDMAS_SRAM4_REG(ch_idx)));
2220 + usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
2221 seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
2222 iudma->n_bds);
2223
2224 diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
2225 index 246e124e6ac5..4518b8189a9e 100644
2226 --- a/drivers/usb/host/ehci-hcd.c
2227 +++ b/drivers/usb/host/ehci-hcd.c
2228 @@ -686,8 +686,15 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
2229 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
2230 u32 status, masked_status, pcd_status = 0, cmd;
2231 int bh;
2232 + unsigned long flags;
2233
2234 - spin_lock (&ehci->lock);
2235 + /*
2236 + * For threadirqs option we use spin_lock_irqsave() variant to prevent
2237 + * deadlock with ehci hrtimer callback, because hrtimer callbacks run
2238 + * in interrupt context even when threadirqs is specified. We can go
2239 + * back to spin_lock() variant when hrtimer callbacks become threaded.
2240 + */
2241 + spin_lock_irqsave(&ehci->lock, flags);
2242
2243 status = ehci_readl(ehci, &ehci->regs->status);
2244
2245 @@ -705,7 +712,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
2246
2247 /* Shared IRQ? */
2248 if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
2249 - spin_unlock(&ehci->lock);
2250 + spin_unlock_irqrestore(&ehci->lock, flags);
2251 return IRQ_NONE;
2252 }
2253
2254 @@ -823,7 +830,7 @@ dead:
2255
2256 if (bh)
2257 ehci_work (ehci);
2258 - spin_unlock (&ehci->lock);
2259 + spin_unlock_irqrestore(&ehci->lock, flags);
2260 if (pcd_status)
2261 usb_hcd_poll_rh_status(hcd);
2262 return IRQ_HANDLED;
2263 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2264 index b7446a69d2a9..b83da38bc915 100644
2265 --- a/drivers/usb/serial/ftdi_sio.c
2266 +++ b/drivers/usb/serial/ftdi_sio.c
2267 @@ -910,6 +910,8 @@ static struct usb_device_id id_table_combined [] = {
2268 /* Crucible Devices */
2269 { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
2270 { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
2271 + /* Cressi Devices */
2272 + { USB_DEVICE(FTDI_VID, FTDI_CRESSI_PID) },
2273 { }, /* Optional parameter entry */
2274 { } /* Terminating entry */
2275 };
2276 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
2277 index 1e2d369df86e..e599fbfcde5f 100644
2278 --- a/drivers/usb/serial/ftdi_sio_ids.h
2279 +++ b/drivers/usb/serial/ftdi_sio_ids.h
2280 @@ -1320,3 +1320,9 @@
2281 * Manufacturer: Smart GSM Team
2282 */
2283 #define FTDI_Z3X_PID 0x0011
2284 +
2285 +/*
2286 + * Product: Cressi PC Interface
2287 + * Manufacturer: Cressi
2288 + */
2289 +#define FTDI_CRESSI_PID 0x87d0
2290 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2291 index 216d20affba8..68fc9fe65936 100644
2292 --- a/drivers/usb/serial/option.c
2293 +++ b/drivers/usb/serial/option.c
2294 @@ -1526,7 +1526,8 @@ static const struct usb_device_id option_ids[] = {
2295 /* Cinterion */
2296 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
2297 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
2298 - { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
2299 + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
2300 + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2301 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
2302 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
2303 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2304 diff --git a/fs/attr.c b/fs/attr.c
2305 index 1449adb14ef6..8dd5825ec708 100644
2306 --- a/fs/attr.c
2307 +++ b/fs/attr.c
2308 @@ -182,11 +182,6 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
2309 return -EPERM;
2310 }
2311
2312 - if ((ia_valid & ATTR_SIZE) && IS_I_VERSION(inode)) {
2313 - if (attr->ia_size != inode->i_size)
2314 - inode_inc_iversion(inode);
2315 - }
2316 -
2317 if ((ia_valid & ATTR_MODE)) {
2318 umode_t amode = attr->ia_mode;
2319 /* Flag setting protected by i_mutex */
2320 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
2321 index 25e6a8e1014e..8fcd2424e7f9 100644
2322 --- a/fs/btrfs/inode.c
2323 +++ b/fs/btrfs/inode.c
2324 @@ -4527,8 +4527,12 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
2325 * these flags set. For all other operations the VFS set these flags
2326 * explicitly if it wants a timestamp update.
2327 */
2328 - if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME))))
2329 - inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb);
2330 + if (newsize != oldsize) {
2331 + inode_inc_iversion(inode);
2332 + if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
2333 + inode->i_ctime = inode->i_mtime =
2334 + current_fs_time(inode->i_sb);
2335 + }
2336
2337 if (newsize > oldsize) {
2338 truncate_pagecache(inode, oldsize, newsize);
2339 diff --git a/fs/cifs/file.c b/fs/cifs/file.c
2340 index c2934f8701da..8b0c656f2ab2 100644
2341 --- a/fs/cifs/file.c
2342 +++ b/fs/cifs/file.c
2343 @@ -2353,7 +2353,7 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
2344 unsigned long nr_segs, loff_t *poffset)
2345 {
2346 unsigned long nr_pages, i;
2347 - size_t copied, len, cur_len;
2348 + size_t bytes, copied, len, cur_len;
2349 ssize_t total_written = 0;
2350 loff_t offset;
2351 struct iov_iter it;
2352 @@ -2408,14 +2408,45 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
2353
2354 save_len = cur_len;
2355 for (i = 0; i < nr_pages; i++) {
2356 - copied = min_t(const size_t, cur_len, PAGE_SIZE);
2357 + bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2358 copied = iov_iter_copy_from_user(wdata->pages[i], &it,
2359 - 0, copied);
2360 + 0, bytes);
2361 cur_len -= copied;
2362 iov_iter_advance(&it, copied);
2363 + /*
2364 + * If we didn't copy as much as we expected, then that
2365 + * may mean we trod into an unmapped area. Stop copying
2366 + * at that point. On the next pass through the big
2367 + * loop, we'll likely end up getting a zero-length
2368 + * write and bailing out of it.
2369 + */
2370 + if (copied < bytes)
2371 + break;
2372 }
2373 cur_len = save_len - cur_len;
2374
2375 + /*
2376 + * If we have no data to send, then that probably means that
2377 + * the copy above failed altogether. That's most likely because
2378 + * the address in the iovec was bogus. Set the rc to -EFAULT,
2379 + * free anything we allocated and bail out.
2380 + */
2381 + if (!cur_len) {
2382 + for (i = 0; i < nr_pages; i++)
2383 + put_page(wdata->pages[i]);
2384 + kfree(wdata);
2385 + rc = -EFAULT;
2386 + break;
2387 + }
2388 +
2389 + /*
2390 + * i + 1 now represents the number of pages we actually used in
2391 + * the copy phase above. Bring nr_pages down to that, and free
2392 + * any pages that we didn't use.
2393 + */
2394 + for ( ; nr_pages > i + 1; nr_pages--)
2395 + put_page(wdata->pages[nr_pages - 1]);
2396 +
2397 wdata->sync_mode = WB_SYNC_ALL;
2398 wdata->nr_pages = nr_pages;
2399 wdata->offset = (__u64)offset;
2400 diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
2401 index 7c0e2143e775..cc592ef6584a 100644
2402 --- a/fs/cifs/smb2glob.h
2403 +++ b/fs/cifs/smb2glob.h
2404 @@ -55,4 +55,7 @@
2405 #define SMB2_NTLMV2_SESSKEY_SIZE (16)
2406 #define SMB2_HMACSHA256_SIZE (32)
2407
2408 +/* Maximum buffer size value we can send with 1 credit */
2409 +#define SMB2_MAX_BUFFER_SIZE 65536
2410 +
2411 #endif /* _SMB2_GLOB_H */
2412 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
2413 index f2e76f3b0c61..e2756bb40b4d 100644
2414 --- a/fs/cifs/smb2ops.c
2415 +++ b/fs/cifs/smb2ops.c
2416 @@ -181,11 +181,8 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
2417 /* start with specified wsize, or default */
2418 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
2419 wsize = min_t(unsigned int, wsize, server->max_write);
2420 - /*
2421 - * limit write size to 2 ** 16, because we don't support multicredit
2422 - * requests now.
2423 - */
2424 - wsize = min_t(unsigned int, wsize, 2 << 15);
2425 + /* set it to the maximum buffer size value we can send with 1 credit */
2426 + wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
2427
2428 return wsize;
2429 }
2430 @@ -199,11 +196,8 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
2431 /* start with specified rsize, or default */
2432 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
2433 rsize = min_t(unsigned int, rsize, server->max_read);
2434 - /*
2435 - * limit write size to 2 ** 16, because we don't support multicredit
2436 - * requests now.
2437 - */
2438 - rsize = min_t(unsigned int, rsize, 2 << 15);
2439 + /* set it to the maximum buffer size value we can send with 1 credit */
2440 + rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
2441
2442 return rsize;
2443 }
2444 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
2445 index 2b95ce2b54e8..c7a6fd87bb6e 100644
2446 --- a/fs/cifs/smb2pdu.c
2447 +++ b/fs/cifs/smb2pdu.c
2448 @@ -408,6 +408,9 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
2449 server->dialect = le16_to_cpu(rsp->DialectRevision);
2450
2451 server->maxBuf = le32_to_cpu(rsp->MaxTransactSize);
2452 + /* set it to the maximum buffer size value we can send with 1 credit */
2453 + server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
2454 + SMB2_MAX_BUFFER_SIZE);
2455 server->max_read = le32_to_cpu(rsp->MaxReadSize);
2456 server->max_write = le32_to_cpu(rsp->MaxWriteSize);
2457 /* BB Do we need to validate the SecurityMode? */
2458 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
2459 index 7bb2e2e55123..790b14c5f262 100644
2460 --- a/fs/ext4/ext4.h
2461 +++ b/fs/ext4/ext4.h
2462 @@ -774,6 +774,8 @@ do { \
2463 if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \
2464 (einode)->xtime.tv_sec = \
2465 (signed)le32_to_cpu((raw_inode)->xtime); \
2466 + else \
2467 + (einode)->xtime.tv_sec = 0; \
2468 if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
2469 ext4_decode_extra_time(&(einode)->xtime, \
2470 raw_inode->xtime ## _extra); \
2471 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
2472 index cb2bdc7ccb05..21dff8f236f6 100644
2473 --- a/fs/ext4/inode.c
2474 +++ b/fs/ext4/inode.c
2475 @@ -4704,6 +4704,10 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
2476 if (attr->ia_size > sbi->s_bitmap_maxbytes)
2477 return -EFBIG;
2478 }
2479 +
2480 + if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size)
2481 + inode_inc_iversion(inode);
2482 +
2483 if (S_ISREG(inode->i_mode) &&
2484 (attr->ia_size < inode->i_size)) {
2485 if (ext4_should_order_data(inode)) {
2486 diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
2487 index c0427e2f6648..42624a995b00 100644
2488 --- a/fs/ext4/ioctl.c
2489 +++ b/fs/ext4/ioctl.c
2490 @@ -145,7 +145,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
2491 handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2);
2492 if (IS_ERR(handle)) {
2493 err = -EINVAL;
2494 - goto swap_boot_out;
2495 + goto journal_err_out;
2496 }
2497
2498 /* Protect extent tree against block allocations via delalloc */
2499 @@ -203,6 +203,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
2500
2501 ext4_double_up_write_data_sem(inode, inode_bl);
2502
2503 +journal_err_out:
2504 ext4_inode_resume_unlocked_dio(inode);
2505 ext4_inode_resume_unlocked_dio(inode_bl);
2506
2507 diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
2508 index 49d3c01eabf8..c503850a61a8 100644
2509 --- a/fs/ext4/resize.c
2510 +++ b/fs/ext4/resize.c
2511 @@ -238,6 +238,7 @@ static int ext4_alloc_group_tables(struct super_block *sb,
2512 ext4_group_t group;
2513 ext4_group_t last_group;
2514 unsigned overhead;
2515 + __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
2516
2517 BUG_ON(flex_gd->count == 0 || group_data == NULL);
2518
2519 @@ -261,7 +262,7 @@ next_group:
2520 src_group++;
2521 for (; src_group <= last_group; src_group++) {
2522 overhead = ext4_group_overhead_blocks(sb, src_group);
2523 - if (overhead != 0)
2524 + if (overhead == 0)
2525 last_blk += group_data[src_group - group].blocks_count;
2526 else
2527 break;
2528 @@ -275,8 +276,7 @@ next_group:
2529 group = ext4_get_group_number(sb, start_blk - 1);
2530 group -= group_data[0].group;
2531 group_data[group].free_blocks_count--;
2532 - if (flexbg_size > 1)
2533 - flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
2534 + flex_gd->bg_flags[group] &= uninit_mask;
2535 }
2536
2537 /* Allocate inode bitmaps */
2538 @@ -287,22 +287,30 @@ next_group:
2539 group = ext4_get_group_number(sb, start_blk - 1);
2540 group -= group_data[0].group;
2541 group_data[group].free_blocks_count--;
2542 - if (flexbg_size > 1)
2543 - flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
2544 + flex_gd->bg_flags[group] &= uninit_mask;
2545 }
2546
2547 /* Allocate inode tables */
2548 for (; it_index < flex_gd->count; it_index++) {
2549 - if (start_blk + EXT4_SB(sb)->s_itb_per_group > last_blk)
2550 + unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
2551 + ext4_fsblk_t next_group_start;
2552 +
2553 + if (start_blk + itb > last_blk)
2554 goto next_group;
2555 group_data[it_index].inode_table = start_blk;
2556 - group = ext4_get_group_number(sb, start_blk - 1);
2557 + group = ext4_get_group_number(sb, start_blk);
2558 + next_group_start = ext4_group_first_block_no(sb, group + 1);
2559 group -= group_data[0].group;
2560 - group_data[group].free_blocks_count -=
2561 - EXT4_SB(sb)->s_itb_per_group;
2562 - if (flexbg_size > 1)
2563 - flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
2564
2565 + if (start_blk + itb > next_group_start) {
2566 + flex_gd->bg_flags[group + 1] &= uninit_mask;
2567 + overhead = start_blk + itb - next_group_start;
2568 + group_data[group + 1].free_blocks_count -= overhead;
2569 + itb -= overhead;
2570 + }
2571 +
2572 + group_data[group].free_blocks_count -= itb;
2573 + flex_gd->bg_flags[group] &= uninit_mask;
2574 start_blk += EXT4_SB(sb)->s_itb_per_group;
2575 }
2576
2577 @@ -396,7 +404,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
2578 start = ext4_group_first_block_no(sb, group);
2579 group -= flex_gd->groups[0].group;
2580
2581 - count2 = sb->s_blocksize * 8 - (block - start);
2582 + count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
2583 if (count2 > count)
2584 count2 = count;
2585
2586 @@ -615,7 +623,7 @@ handle_ib:
2587 if (err)
2588 goto out;
2589 count = group_table_count[j];
2590 - start = group_data[i].block_bitmap;
2591 + start = (&group_data[i].block_bitmap)[j];
2592 block = start;
2593 }
2594
2595 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
2596 index e4923b6a9e39..a7a5f7ea74db 100644
2597 --- a/fs/ext4/super.c
2598 +++ b/fs/ext4/super.c
2599 @@ -3592,16 +3592,22 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2600 for (i = 0; i < 4; i++)
2601 sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
2602 sbi->s_def_hash_version = es->s_def_hash_version;
2603 - i = le32_to_cpu(es->s_flags);
2604 - if (i & EXT2_FLAGS_UNSIGNED_HASH)
2605 - sbi->s_hash_unsigned = 3;
2606 - else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
2607 + if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
2608 + i = le32_to_cpu(es->s_flags);
2609 + if (i & EXT2_FLAGS_UNSIGNED_HASH)
2610 + sbi->s_hash_unsigned = 3;
2611 + else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
2612 #ifdef __CHAR_UNSIGNED__
2613 - es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
2614 - sbi->s_hash_unsigned = 3;
2615 + if (!(sb->s_flags & MS_RDONLY))
2616 + es->s_flags |=
2617 + cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
2618 + sbi->s_hash_unsigned = 3;
2619 #else
2620 - es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
2621 + if (!(sb->s_flags & MS_RDONLY))
2622 + es->s_flags |=
2623 + cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
2624 #endif
2625 + }
2626 }
2627
2628 /* Handle clustersize */
2629 diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
2630 index 3e64169ef527..38802d683969 100644
2631 --- a/fs/quota/dquot.c
2632 +++ b/fs/quota/dquot.c
2633 @@ -581,9 +581,17 @@ int dquot_scan_active(struct super_block *sb,
2634 dqstats_inc(DQST_LOOKUPS);
2635 dqput(old_dquot);
2636 old_dquot = dquot;
2637 - ret = fn(dquot, priv);
2638 - if (ret < 0)
2639 - goto out;
2640 + /*
2641 + * ->release_dquot() can be racing with us. Our reference
2642 + * protects us from new calls to it so just wait for any
2643 + * outstanding call and recheck the DQ_ACTIVE_B after that.
2644 + */
2645 + wait_on_dquot(dquot);
2646 + if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
2647 + ret = fn(dquot, priv);
2648 + if (ret < 0)
2649 + goto out;
2650 + }
2651 spin_lock(&dq_list_lock);
2652 /* We are safe to continue now because our dquot could not
2653 * be moved out of the inuse list while we hold the reference */
2654 diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
2655 index 2f0543f7510c..f9bbbb472663 100644
2656 --- a/include/linux/can/skb.h
2657 +++ b/include/linux/can/skb.h
2658 @@ -11,7 +11,9 @@
2659 #define CAN_SKB_H
2660
2661 #include <linux/types.h>
2662 +#include <linux/skbuff.h>
2663 #include <linux/can.h>
2664 +#include <net/sock.h>
2665
2666 /*
2667 * The struct can_skb_priv is used to transport additional information along
2668 @@ -42,4 +44,40 @@ static inline void can_skb_reserve(struct sk_buff *skb)
2669 skb_reserve(skb, sizeof(struct can_skb_priv));
2670 }
2671
2672 +static inline void can_skb_destructor(struct sk_buff *skb)
2673 +{
2674 + sock_put(skb->sk);
2675 +}
2676 +
2677 +static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
2678 +{
2679 + if (sk) {
2680 + sock_hold(sk);
2681 + skb->destructor = can_skb_destructor;
2682 + skb->sk = sk;
2683 + }
2684 +}
2685 +
2686 +/*
2687 + * returns an unshared skb owned by the original sock to be echo'ed back
2688 + */
2689 +static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
2690 +{
2691 + if (skb_shared(skb)) {
2692 + struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2693 +
2694 + if (likely(nskb)) {
2695 + can_skb_set_owner(nskb, skb->sk);
2696 + consume_skb(skb);
2697 + return nskb;
2698 + } else {
2699 + kfree_skb(skb);
2700 + return NULL;
2701 + }
2702 + }
2703 +
2704 + /* we can assume to have an unshared skb with proper owner */
2705 + return skb;
2706 +}
2707 +
2708 #endif /* CAN_SKB_H */
2709 diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
2710 index f6c82de12541..d6ad91f26038 100644
2711 --- a/include/linux/ipc_namespace.h
2712 +++ b/include/linux/ipc_namespace.h
2713 @@ -119,9 +119,7 @@ extern int mq_init_ns(struct ipc_namespace *ns);
2714 * the new maximum will handle anyone else. I may have to revisit this
2715 * in the future.
2716 */
2717 -#define MIN_QUEUESMAX 1
2718 #define DFLT_QUEUESMAX 256
2719 -#define HARD_QUEUESMAX 1024
2720 #define MIN_MSGMAX 1
2721 #define DFLT_MSG 10U
2722 #define DFLT_MSGMAX 10
2723 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
2724 index abf7756eaf9e..4d2e0418ab5a 100644
2725 --- a/include/linux/netdevice.h
2726 +++ b/include/linux/netdevice.h
2727 @@ -2761,7 +2761,12 @@ void netdev_change_features(struct net_device *dev);
2728 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2729 struct net_device *dev);
2730
2731 -netdev_features_t netif_skb_features(struct sk_buff *skb);
2732 +netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
2733 + const struct net_device *dev);
2734 +static inline netdev_features_t netif_skb_features(struct sk_buff *skb)
2735 +{
2736 + return netif_skb_dev_features(skb, skb->dev);
2737 +}
2738
2739 static inline bool net_gso_ok(netdev_features_t features, int gso_type)
2740 {
2741 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
2742 index ded45ec6b22b..478120ae34e5 100644
2743 --- a/include/linux/skbuff.h
2744 +++ b/include/linux/skbuff.h
2745 @@ -2488,6 +2488,8 @@ extern int skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
2746 extern struct sk_buff *skb_segment(struct sk_buff *skb,
2747 netdev_features_t features);
2748
2749 +unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
2750 +
2751 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
2752 int len, void *buffer)
2753 {
2754 @@ -2911,5 +2913,22 @@ static inline bool skb_head_is_locked(const struct sk_buff *skb)
2755 {
2756 return !skb->head_frag || skb_cloned(skb);
2757 }
2758 +
2759 +/**
2760 + * skb_gso_network_seglen - Return length of individual segments of a gso packet
2761 + *
2762 + * @skb: GSO skb
2763 + *
2764 + * skb_gso_network_seglen is used to determine the real size of the
2765 + * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
2766 + *
2767 + * The MAC/L2 header is not accounted for.
2768 + */
2769 +static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
2770 +{
2771 + unsigned int hdr_len = skb_transport_header(skb) -
2772 + skb_network_header(skb);
2773 + return hdr_len + skb_gso_transport_seglen(skb);
2774 +}
2775 #endif /* __KERNEL__ */
2776 #endif /* _LINUX_SKBUFF_H */
2777 diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
2778 index 383d638340b8..5bb8bfe67149 100644
2779 --- a/ipc/mq_sysctl.c
2780 +++ b/ipc/mq_sysctl.c
2781 @@ -22,6 +22,16 @@ static void *get_mq(ctl_table *table)
2782 return which;
2783 }
2784
2785 +static int proc_mq_dointvec(ctl_table *table, int write,
2786 + void __user *buffer, size_t *lenp, loff_t *ppos)
2787 +{
2788 + struct ctl_table mq_table;
2789 + memcpy(&mq_table, table, sizeof(mq_table));
2790 + mq_table.data = get_mq(table);
2791 +
2792 + return proc_dointvec(&mq_table, write, buffer, lenp, ppos);
2793 +}
2794 +
2795 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
2796 void __user *buffer, size_t *lenp, loff_t *ppos)
2797 {
2798 @@ -33,12 +43,10 @@ static int proc_mq_dointvec_minmax(ctl_table *table, int write,
2799 lenp, ppos);
2800 }
2801 #else
2802 +#define proc_mq_dointvec NULL
2803 #define proc_mq_dointvec_minmax NULL
2804 #endif
2805
2806 -static int msg_queues_limit_min = MIN_QUEUESMAX;
2807 -static int msg_queues_limit_max = HARD_QUEUESMAX;
2808 -
2809 static int msg_max_limit_min = MIN_MSGMAX;
2810 static int msg_max_limit_max = HARD_MSGMAX;
2811
2812 @@ -51,9 +59,7 @@ static ctl_table mq_sysctls[] = {
2813 .data = &init_ipc_ns.mq_queues_max,
2814 .maxlen = sizeof(int),
2815 .mode = 0644,
2816 - .proc_handler = proc_mq_dointvec_minmax,
2817 - .extra1 = &msg_queues_limit_min,
2818 - .extra2 = &msg_queues_limit_max,
2819 + .proc_handler = proc_mq_dointvec,
2820 },
2821 {
2822 .procname = "msg_max",
2823 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
2824 index ae1996d3c539..bb0248fc5187 100644
2825 --- a/ipc/mqueue.c
2826 +++ b/ipc/mqueue.c
2827 @@ -433,9 +433,9 @@ static int mqueue_create(struct inode *dir, struct dentry *dentry,
2828 error = -EACCES;
2829 goto out_unlock;
2830 }
2831 - if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX ||
2832 - (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
2833 - !capable(CAP_SYS_RESOURCE))) {
2834 +
2835 + if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
2836 + !capable(CAP_SYS_RESOURCE)) {
2837 error = -ENOSPC;
2838 goto out_unlock;
2839 }
2840 diff --git a/kernel/events/core.c b/kernel/events/core.c
2841 index e76e4959908c..f8eb2b154bdb 100644
2842 --- a/kernel/events/core.c
2843 +++ b/kernel/events/core.c
2844 @@ -7421,14 +7421,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
2845 static void __perf_event_exit_context(void *__info)
2846 {
2847 struct perf_event_context *ctx = __info;
2848 - struct perf_event *event, *tmp;
2849 + struct perf_event *event;
2850
2851 perf_pmu_rotate_stop(ctx->pmu);
2852
2853 - list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
2854 - __perf_remove_from_context(event);
2855 - list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
2856 + rcu_read_lock();
2857 + list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
2858 __perf_remove_from_context(event);
2859 + rcu_read_unlock();
2860 }
2861
2862 static void perf_event_exit_cpu_context(int cpu)
2863 @@ -7452,11 +7452,11 @@ static void perf_event_exit_cpu(int cpu)
2864 {
2865 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
2866
2867 + perf_event_exit_cpu_context(cpu);
2868 +
2869 mutex_lock(&swhash->hlist_mutex);
2870 swevent_hlist_release(swhash);
2871 mutex_unlock(&swhash->hlist_mutex);
2872 -
2873 - perf_event_exit_cpu_context(cpu);
2874 }
2875 #else
2876 static inline void perf_event_exit_cpu(int cpu) { }
2877 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
2878 index 68086a34b8ef..db7a6ac7c0a8 100644
2879 --- a/kernel/workqueue.c
2880 +++ b/kernel/workqueue.c
2881 @@ -1823,6 +1823,12 @@ static void destroy_worker(struct worker *worker)
2882 if (worker->flags & WORKER_IDLE)
2883 pool->nr_idle--;
2884
2885 + /*
2886 + * Once WORKER_DIE is set, the kworker may destroy itself at any
2887 + * point. Pin to ensure the task stays until we're done with it.
2888 + */
2889 + get_task_struct(worker->task);
2890 +
2891 list_del_init(&worker->entry);
2892 worker->flags |= WORKER_DIE;
2893
2894 @@ -1831,6 +1837,7 @@ static void destroy_worker(struct worker *worker)
2895 spin_unlock_irq(&pool->lock);
2896
2897 kthread_stop(worker->task);
2898 + put_task_struct(worker->task);
2899 kfree(worker);
2900
2901 spin_lock_irq(&pool->lock);
2902 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
2903 index 134e2106f467..6115b2bbd6ea 100644
2904 --- a/mm/memcontrol.c
2905 +++ b/mm/memcontrol.c
2906 @@ -1220,7 +1220,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
2907 if (dead_count == iter->last_dead_count) {
2908 smp_rmb();
2909 last_visited = iter->last_visited;
2910 - if (last_visited &&
2911 + if (last_visited && last_visited != root &&
2912 !css_tryget(&last_visited->css))
2913 last_visited = NULL;
2914 }
2915 @@ -1229,7 +1229,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root,
2916 memcg = __mem_cgroup_iter_next(root, last_visited);
2917
2918 if (reclaim) {
2919 - if (last_visited)
2920 + if (last_visited && last_visited != root)
2921 css_put(&last_visited->css);
2922
2923 iter->last_visited = memcg;
2924 diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
2925 index 990afab2be1b..c76a4388a5d7 100644
2926 --- a/net/9p/trans_virtio.c
2927 +++ b/net/9p/trans_virtio.c
2928 @@ -340,7 +340,10 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
2929 int count = nr_pages;
2930 while (nr_pages) {
2931 s = rest_of_page(data);
2932 - pages[index++] = kmap_to_page(data);
2933 + if (is_vmalloc_addr(data))
2934 + pages[index++] = vmalloc_to_page(data);
2935 + else
2936 + pages[index++] = kmap_to_page(data);
2937 data += s;
2938 nr_pages--;
2939 }
2940 diff --git a/net/can/af_can.c b/net/can/af_can.c
2941 index c4e50852c9f4..f59859a3f562 100644
2942 --- a/net/can/af_can.c
2943 +++ b/net/can/af_can.c
2944 @@ -57,6 +57,7 @@
2945 #include <linux/skbuff.h>
2946 #include <linux/can.h>
2947 #include <linux/can/core.h>
2948 +#include <linux/can/skb.h>
2949 #include <linux/ratelimit.h>
2950 #include <net/net_namespace.h>
2951 #include <net/sock.h>
2952 @@ -290,7 +291,7 @@ int can_send(struct sk_buff *skb, int loop)
2953 return -ENOMEM;
2954 }
2955
2956 - newskb->sk = skb->sk;
2957 + can_skb_set_owner(newskb, skb->sk);
2958 newskb->ip_summed = CHECKSUM_UNNECESSARY;
2959 newskb->pkt_type = PACKET_BROADCAST;
2960 }
2961 diff --git a/net/can/bcm.c b/net/can/bcm.c
2962 index 8f113e6ff327..35cf02d92766 100644
2963 --- a/net/can/bcm.c
2964 +++ b/net/can/bcm.c
2965 @@ -268,7 +268,7 @@ static void bcm_can_tx(struct bcm_op *op)
2966
2967 /* send with loopback */
2968 skb->dev = dev;
2969 - skb->sk = op->sk;
2970 + can_skb_set_owner(skb, op->sk);
2971 can_send(skb, 1);
2972
2973 /* update statistics */
2974 @@ -1223,7 +1223,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
2975
2976 can_skb_prv(skb)->ifindex = dev->ifindex;
2977 skb->dev = dev;
2978 - skb->sk = sk;
2979 + can_skb_set_owner(skb, sk);
2980 err = can_send(skb, 1); /* send with loopback */
2981 dev_put(dev);
2982
2983 diff --git a/net/core/dev.c b/net/core/dev.c
2984 index 1283c8442e99..a0e55ffc03c9 100644
2985 --- a/net/core/dev.c
2986 +++ b/net/core/dev.c
2987 @@ -2374,7 +2374,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
2988 * 2. No high memory really exists on this machine.
2989 */
2990
2991 -static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2992 +static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
2993 {
2994 #ifdef CONFIG_HIGHMEM
2995 int i;
2996 @@ -2454,46 +2454,51 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
2997 }
2998
2999 static netdev_features_t harmonize_features(struct sk_buff *skb,
3000 - __be16 protocol, netdev_features_t features)
3001 + __be16 protocol,
3002 + const struct net_device *dev,
3003 + netdev_features_t features)
3004 {
3005 if (skb->ip_summed != CHECKSUM_NONE &&
3006 !can_checksum_protocol(features, protocol)) {
3007 features &= ~NETIF_F_ALL_CSUM;
3008 - } else if (illegal_highdma(skb->dev, skb)) {
3009 + } else if (illegal_highdma(dev, skb)) {
3010 features &= ~NETIF_F_SG;
3011 }
3012
3013 return features;
3014 }
3015
3016 -netdev_features_t netif_skb_features(struct sk_buff *skb)
3017 +netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
3018 + const struct net_device *dev)
3019 {
3020 __be16 protocol = skb->protocol;
3021 - netdev_features_t features = skb->dev->features;
3022 + netdev_features_t features = dev->features;
3023
3024 - if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
3025 + if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
3026 features &= ~NETIF_F_GSO_MASK;
3027
3028 if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
3029 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
3030 protocol = veh->h_vlan_encapsulated_proto;
3031 } else if (!vlan_tx_tag_present(skb)) {
3032 - return harmonize_features(skb, protocol, features);
3033 + return harmonize_features(skb, protocol, dev, features);
3034 }
3035
3036 - features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
3037 + features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
3038 NETIF_F_HW_VLAN_STAG_TX);
3039
3040 if (protocol != htons(ETH_P_8021Q) && protocol != htons(ETH_P_8021AD)) {
3041 - return harmonize_features(skb, protocol, features);
3042 + return harmonize_features(skb, protocol, dev, features);
3043 } else {
3044 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
3045 NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
3046 NETIF_F_HW_VLAN_STAG_TX;
3047 - return harmonize_features(skb, protocol, features);
3048 + return harmonize_features(skb, protocol, dev, features);
3049 }
3050 +
3051 + return harmonize_features(skb, protocol, dev, features);
3052 }
3053 -EXPORT_SYMBOL(netif_skb_features);
3054 +EXPORT_SYMBOL(netif_skb_dev_features);
3055
3056 /*
3057 * Returns true if either:
3058 diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
3059 index 0e9131195eb0..55e08e2de3a1 100644
3060 --- a/net/core/fib_rules.c
3061 +++ b/net/core/fib_rules.c
3062 @@ -720,6 +720,13 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event,
3063 attach_rules(&ops->rules_list, dev);
3064 break;
3065
3066 + case NETDEV_CHANGENAME:
3067 + list_for_each_entry(ops, &net->rules_ops, list) {
3068 + detach_rules(&ops->rules_list, dev);
3069 + attach_rules(&ops->rules_list, dev);
3070 + }
3071 + break;
3072 +
3073 case NETDEV_UNREGISTER:
3074 list_for_each_entry(ops, &net->rules_ops, list)
3075 detach_rules(&ops->rules_list, dev);
3076 diff --git a/net/core/netpoll.c b/net/core/netpoll.c
3077 index 27f33f25cda8..433a1051d323 100644
3078 --- a/net/core/netpoll.c
3079 +++ b/net/core/netpoll.c
3080 @@ -948,6 +948,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
3081 {
3082 char *cur=opt, *delim;
3083 int ipv6;
3084 + bool ipversion_set = false;
3085
3086 if (*cur != '@') {
3087 if ((delim = strchr(cur, '@')) == NULL)
3088 @@ -960,6 +961,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
3089 cur++;
3090
3091 if (*cur != '/') {
3092 + ipversion_set = true;
3093 if ((delim = strchr(cur, '/')) == NULL)
3094 goto parse_failed;
3095 *delim = 0;
3096 @@ -1002,7 +1004,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
3097 ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
3098 if (ipv6 < 0)
3099 goto parse_failed;
3100 - else if (np->ipv6 != (bool)ipv6)
3101 + else if (ipversion_set && np->ipv6 != (bool)ipv6)
3102 goto parse_failed;
3103 else
3104 np->ipv6 = (bool)ipv6;
3105 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
3106 index 20ee14d0a8a9..79143b7af7e5 100644
3107 --- a/net/core/skbuff.c
3108 +++ b/net/core/skbuff.c
3109 @@ -47,6 +47,8 @@
3110 #include <linux/in.h>
3111 #include <linux/inet.h>
3112 #include <linux/slab.h>
3113 +#include <linux/tcp.h>
3114 +#include <linux/udp.h>
3115 #include <linux/netdevice.h>
3116 #ifdef CONFIG_NET_CLS_ACT
3117 #include <net/pkt_sched.h>
3118 @@ -3471,3 +3473,26 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
3119 return true;
3120 }
3121 EXPORT_SYMBOL(skb_try_coalesce);
3122 +
3123 +/**
3124 + * skb_gso_transport_seglen - Return length of individual segments of a gso packet
3125 + *
3126 + * @skb: GSO skb
3127 + *
3128 + * skb_gso_transport_seglen is used to determine the real size of the
3129 + * individual segments, including Layer4 headers (TCP/UDP).
3130 + *
3131 + * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
3132 + */
3133 +unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
3134 +{
3135 + const struct skb_shared_info *shinfo = skb_shinfo(skb);
3136 + unsigned int hdr_len;
3137 +
3138 + if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3139 + hdr_len = tcp_hdrlen(skb);
3140 + else
3141 + hdr_len = sizeof(struct udphdr);
3142 + return hdr_len + shinfo->gso_size;
3143 +}
3144 +EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
3145 diff --git a/net/core/sock.c b/net/core/sock.c
3146 index 50a345e5a26f..3ba527074f7f 100644
3147 --- a/net/core/sock.c
3148 +++ b/net/core/sock.c
3149 @@ -1814,7 +1814,7 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
3150 gfp_t gfp = sk->sk_allocation;
3151
3152 if (order)
3153 - gfp |= __GFP_COMP | __GFP_NOWARN;
3154 + gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
3155 pfrag->page = alloc_pages(gfp, order);
3156 if (likely(pfrag->page)) {
3157 pfrag->offset = 0;
3158 diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
3159 index 31b127e8086b..ca118e8cb141 100644
3160 --- a/net/ieee802154/6lowpan.c
3161 +++ b/net/ieee802154/6lowpan.c
3162 @@ -1173,7 +1173,27 @@ static struct header_ops lowpan_header_ops = {
3163 .create = lowpan_header_create,
3164 };
3165
3166 +static struct lock_class_key lowpan_tx_busylock;
3167 +static struct lock_class_key lowpan_netdev_xmit_lock_key;
3168 +
3169 +static void lowpan_set_lockdep_class_one(struct net_device *dev,
3170 + struct netdev_queue *txq,
3171 + void *_unused)
3172 +{
3173 + lockdep_set_class(&txq->_xmit_lock,
3174 + &lowpan_netdev_xmit_lock_key);
3175 +}
3176 +
3177 +
3178 +static int lowpan_dev_init(struct net_device *dev)
3179 +{
3180 + netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
3181 + dev->qdisc_tx_busylock = &lowpan_tx_busylock;
3182 + return 0;
3183 +}
3184 +
3185 static const struct net_device_ops lowpan_netdev_ops = {
3186 + .ndo_init = lowpan_dev_init,
3187 .ndo_start_xmit = lowpan_xmit,
3188 .ndo_set_mac_address = lowpan_set_address,
3189 };
3190 diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
3191 index 9e38217c3931..e40eef4ac697 100644
3192 --- a/net/ipv4/devinet.c
3193 +++ b/net/ipv4/devinet.c
3194 @@ -1433,7 +1433,8 @@ static size_t inet_nlmsg_size(void)
3195 + nla_total_size(4) /* IFA_ADDRESS */
3196 + nla_total_size(4) /* IFA_LOCAL */
3197 + nla_total_size(4) /* IFA_BROADCAST */
3198 - + nla_total_size(IFNAMSIZ); /* IFA_LABEL */
3199 + + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
3200 + + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
3201 }
3202
3203 static inline u32 cstamp_delta(unsigned long cstamp)
3204 diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
3205 index 694de3b7aebf..98d7e53d2afd 100644
3206 --- a/net/ipv4/ip_forward.c
3207 +++ b/net/ipv4/ip_forward.c
3208 @@ -39,6 +39,71 @@
3209 #include <net/route.h>
3210 #include <net/xfrm.h>
3211
3212 +static bool ip_may_fragment(const struct sk_buff *skb)
3213 +{
3214 + return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
3215 + !skb->local_df;
3216 +}
3217 +
3218 +static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
3219 +{
3220 + if (skb->len <= mtu || skb->local_df)
3221 + return false;
3222 +
3223 + if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
3224 + return false;
3225 +
3226 + return true;
3227 +}
3228 +
3229 +static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb)
3230 +{
3231 + unsigned int mtu;
3232 +
3233 + if (skb->local_df || !skb_is_gso(skb))
3234 + return false;
3235 +
3236 + mtu = dst_mtu(skb_dst(skb));
3237 +
3238 + /* if seglen > mtu, do software segmentation for IP fragmentation on
3239 + * output. DF bit cannot be set since ip_forward would have sent
3240 + * icmp error.
3241 + */
3242 + return skb_gso_network_seglen(skb) > mtu;
3243 +}
3244 +
3245 +/* called if GSO skb needs to be fragmented on forward */
3246 +static int ip_forward_finish_gso(struct sk_buff *skb)
3247 +{
3248 + struct dst_entry *dst = skb_dst(skb);
3249 + netdev_features_t features;
3250 + struct sk_buff *segs;
3251 + int ret = 0;
3252 +
3253 + features = netif_skb_dev_features(skb, dst->dev);
3254 + segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
3255 + if (IS_ERR(segs)) {
3256 + kfree_skb(skb);
3257 + return -ENOMEM;
3258 + }
3259 +
3260 + consume_skb(skb);
3261 +
3262 + do {
3263 + struct sk_buff *nskb = segs->next;
3264 + int err;
3265 +
3266 + segs->next = NULL;
3267 + err = dst_output(segs);
3268 +
3269 + if (err && ret == 0)
3270 + ret = err;
3271 + segs = nskb;
3272 + } while (segs);
3273 +
3274 + return ret;
3275 +}
3276 +
3277 static int ip_forward_finish(struct sk_buff *skb)
3278 {
3279 struct ip_options *opt = &(IPCB(skb)->opt);
3280 @@ -49,6 +114,9 @@ static int ip_forward_finish(struct sk_buff *skb)
3281 if (unlikely(opt->optlen))
3282 ip_forward_options(skb);
3283
3284 + if (ip_gso_exceeds_dst_mtu(skb))
3285 + return ip_forward_finish_gso(skb);
3286 +
3287 return dst_output(skb);
3288 }
3289
3290 @@ -88,8 +156,7 @@ int ip_forward(struct sk_buff *skb)
3291 if (opt->is_strictroute && rt->rt_uses_gateway)
3292 goto sr_failed;
3293
3294 - if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
3295 - (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) {
3296 + if (!ip_may_fragment(skb) && ip_exceeds_mtu(skb, dst_mtu(&rt->dst))) {
3297 IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS);
3298 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
3299 htonl(dst_mtu(&rt->dst)));
3300 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
3301 index f6c6ab14da41..1a362f375e67 100644
3302 --- a/net/ipv4/route.c
3303 +++ b/net/ipv4/route.c
3304 @@ -1544,6 +1544,7 @@ static int __mkroute_input(struct sk_buff *skb,
3305 rth->rt_gateway = 0;
3306 rth->rt_uses_gateway = 0;
3307 INIT_LIST_HEAD(&rth->rt_uncached);
3308 + RT_CACHE_STAT_INC(in_slow_tot);
3309
3310 rth->dst.input = ip_forward;
3311 rth->dst.output = ip_output;
3312 @@ -1645,8 +1646,6 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
3313 if (err != 0)
3314 goto no_route;
3315
3316 - RT_CACHE_STAT_INC(in_slow_tot);
3317 -
3318 if (res.type == RTN_BROADCAST)
3319 goto brd_input;
3320
3321 @@ -1715,6 +1714,7 @@ local_input:
3322 rth->rt_gateway = 0;
3323 rth->rt_uses_gateway = 0;
3324 INIT_LIST_HEAD(&rth->rt_uncached);
3325 + RT_CACHE_STAT_INC(in_slow_tot);
3326 if (res.type == RTN_UNREACHABLE) {
3327 rth->dst.input= ip_error;
3328 rth->dst.error= -err;
3329 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
3330 index 5560abfe6d30..d2df17940e07 100644
3331 --- a/net/ipv4/tcp_output.c
3332 +++ b/net/ipv4/tcp_output.c
3333 @@ -686,7 +686,8 @@ static void tcp_tsq_handler(struct sock *sk)
3334 if ((1 << sk->sk_state) &
3335 (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
3336 TCPF_CLOSE_WAIT | TCPF_LAST_ACK))
3337 - tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC);
3338 + tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle,
3339 + 0, GFP_ATOMIC);
3340 }
3341 /*
3342 * One tasklest per cpu tries to send more skbs.
3343 @@ -1875,7 +1876,15 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
3344
3345 if (atomic_read(&sk->sk_wmem_alloc) > limit) {
3346 set_bit(TSQ_THROTTLED, &tp->tsq_flags);
3347 - break;
3348 + /* It is possible TX completion already happened
3349 + * before we set TSQ_THROTTLED, so we must
3350 + * test again the condition.
3351 + * We abuse smp_mb__after_clear_bit() because
3352 + * there is no smp_mb__after_set_bit() yet
3353 + */
3354 + smp_mb__after_clear_bit();
3355 + if (atomic_read(&sk->sk_wmem_alloc) > limit)
3356 + break;
3357 }
3358
3359 limit = mss_now;
3360 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
3361 index b98b8e06739e..98a262b759ae 100644
3362 --- a/net/ipv6/ip6_output.c
3363 +++ b/net/ipv6/ip6_output.c
3364 @@ -345,6 +345,20 @@ static inline int ip6_forward_finish(struct sk_buff *skb)
3365 return dst_output(skb);
3366 }
3367
3368 +static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
3369 +{
3370 + if (skb->len <= mtu || skb->local_df)
3371 + return false;
3372 +
3373 + if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
3374 + return true;
3375 +
3376 + if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
3377 + return false;
3378 +
3379 + return true;
3380 +}
3381 +
3382 int ip6_forward(struct sk_buff *skb)
3383 {
3384 struct dst_entry *dst = skb_dst(skb);
3385 @@ -467,8 +481,7 @@ int ip6_forward(struct sk_buff *skb)
3386 if (mtu < IPV6_MIN_MTU)
3387 mtu = IPV6_MIN_MTU;
3388
3389 - if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) ||
3390 - (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) {
3391 + if (ip6_pkt_too_big(skb, mtu)) {
3392 /* Again, force OUTPUT device used as source address */
3393 skb->dev = dst->dev;
3394 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
3395 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
3396 index 79bc251042ba..8554e5eebaeb 100644
3397 --- a/net/sctp/socket.c
3398 +++ b/net/sctp/socket.c
3399 @@ -71,6 +71,7 @@
3400 #include <linux/crypto.h>
3401 #include <linux/slab.h>
3402 #include <linux/file.h>
3403 +#include <linux/compat.h>
3404
3405 #include <net/ip.h>
3406 #include <net/icmp.h>
3407 @@ -1384,11 +1385,19 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
3408 /*
3409 * New (hopefully final) interface for the API.
3410 * We use the sctp_getaddrs_old structure so that use-space library
3411 - * can avoid any unnecessary allocations. The only defferent part
3412 + * can avoid any unnecessary allocations. The only different part
3413 * is that we store the actual length of the address buffer into the
3414 - * addrs_num structure member. That way we can re-use the existing
3415 + * addrs_num structure member. That way we can re-use the existing
3416 * code.
3417 */
3418 +#ifdef CONFIG_COMPAT
3419 +struct compat_sctp_getaddrs_old {
3420 + sctp_assoc_t assoc_id;
3421 + s32 addr_num;
3422 + compat_uptr_t addrs; /* struct sockaddr * */
3423 +};
3424 +#endif
3425 +
3426 SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
3427 char __user *optval,
3428 int __user *optlen)
3429 @@ -1397,16 +1406,30 @@ SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
3430 sctp_assoc_t assoc_id = 0;
3431 int err = 0;
3432
3433 - if (len < sizeof(param))
3434 - return -EINVAL;
3435 +#ifdef CONFIG_COMPAT
3436 + if (is_compat_task()) {
3437 + struct compat_sctp_getaddrs_old param32;
3438
3439 - if (copy_from_user(&param, optval, sizeof(param)))
3440 - return -EFAULT;
3441 + if (len < sizeof(param32))
3442 + return -EINVAL;
3443 + if (copy_from_user(&param32, optval, sizeof(param32)))
3444 + return -EFAULT;
3445
3446 - err = __sctp_setsockopt_connectx(sk,
3447 - (struct sockaddr __user *)param.addrs,
3448 - param.addr_num, &assoc_id);
3449 + param.assoc_id = param32.assoc_id;
3450 + param.addr_num = param32.addr_num;
3451 + param.addrs = compat_ptr(param32.addrs);
3452 + } else
3453 +#endif
3454 + {
3455 + if (len < sizeof(param))
3456 + return -EINVAL;
3457 + if (copy_from_user(&param, optval, sizeof(param)))
3458 + return -EFAULT;
3459 + }
3460
3461 + err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
3462 + param.addrs, param.addr_num,
3463 + &assoc_id);
3464 if (err == 0 || err == -EINPROGRESS) {
3465 if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
3466 return -EFAULT;
3467 diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
3468 index 8a0e04d0928a..fc47165dc254 100644
3469 --- a/net/sunrpc/xprtsock.c
3470 +++ b/net/sunrpc/xprtsock.c
3471 @@ -502,6 +502,7 @@ static int xs_nospace(struct rpc_task *task)
3472 struct rpc_rqst *req = task->tk_rqstp;
3473 struct rpc_xprt *xprt = req->rq_xprt;
3474 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
3475 + struct sock *sk = transport->inet;
3476 int ret = -EAGAIN;
3477
3478 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
3479 @@ -519,7 +520,7 @@ static int xs_nospace(struct rpc_task *task)
3480 * window size
3481 */
3482 set_bit(SOCK_NOSPACE, &transport->sock->flags);
3483 - transport->inet->sk_write_pending++;
3484 + sk->sk_write_pending++;
3485 /* ...and wait for more buffer space */
3486 xprt_wait_for_buffer_space(task, xs_nospace_callback);
3487 }
3488 @@ -529,6 +530,9 @@ static int xs_nospace(struct rpc_task *task)
3489 }
3490
3491 spin_unlock_bh(&xprt->transport_lock);
3492 +
3493 + /* Race breaker in case memory is freed before above code is called */
3494 + sk->sk_write_space(sk);
3495 return ret;
3496 }
3497
3498 diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
3499 index 142a59f39796..bcdca73033f3 100644
3500 --- a/security/selinux/ss/policydb.c
3501 +++ b/security/selinux/ss/policydb.c
3502 @@ -3258,10 +3258,10 @@ static int filename_write_helper(void *key, void *data, void *ptr)
3503 if (rc)
3504 return rc;
3505
3506 - buf[0] = ft->stype;
3507 - buf[1] = ft->ttype;
3508 - buf[2] = ft->tclass;
3509 - buf[3] = otype->otype;
3510 + buf[0] = cpu_to_le32(ft->stype);
3511 + buf[1] = cpu_to_le32(ft->ttype);
3512 + buf[2] = cpu_to_le32(ft->tclass);
3513 + buf[3] = cpu_to_le32(otype->otype);
3514
3515 rc = put_entry(buf, sizeof(u32), 4, fp);
3516 if (rc)
3517 diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
3518 index 90ff7a3f72df..01fefbe29e4a 100644
3519 --- a/sound/pci/hda/patch_ca0132.c
3520 +++ b/sound/pci/hda/patch_ca0132.c
3521 @@ -2662,60 +2662,6 @@ static bool dspload_wait_loaded(struct hda_codec *codec)
3522 }
3523
3524 /*
3525 - * PCM stuffs
3526 - */
3527 -static void ca0132_setup_stream(struct hda_codec *codec, hda_nid_t nid,
3528 - u32 stream_tag,
3529 - int channel_id, int format)
3530 -{
3531 - unsigned int oldval, newval;
3532 -
3533 - if (!nid)
3534 - return;
3535 -
3536 - snd_printdd(
3537 - "ca0132_setup_stream: NID=0x%x, stream=0x%x, "
3538 - "channel=%d, format=0x%x\n",
3539 - nid, stream_tag, channel_id, format);
3540 -
3541 - /* update the format-id if changed */
3542 - oldval = snd_hda_codec_read(codec, nid, 0,
3543 - AC_VERB_GET_STREAM_FORMAT,
3544 - 0);
3545 - if (oldval != format) {
3546 - msleep(20);
3547 - snd_hda_codec_write(codec, nid, 0,
3548 - AC_VERB_SET_STREAM_FORMAT,
3549 - format);
3550 - }
3551 -
3552 - oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
3553 - newval = (stream_tag << 4) | channel_id;
3554 - if (oldval != newval) {
3555 - snd_hda_codec_write(codec, nid, 0,
3556 - AC_VERB_SET_CHANNEL_STREAMID,
3557 - newval);
3558 - }
3559 -}
3560 -
3561 -static void ca0132_cleanup_stream(struct hda_codec *codec, hda_nid_t nid)
3562 -{
3563 - unsigned int val;
3564 -
3565 - if (!nid)
3566 - return;
3567 -
3568 - snd_printdd(KERN_INFO "ca0132_cleanup_stream: NID=0x%x\n", nid);
3569 -
3570 - val = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
3571 - if (!val)
3572 - return;
3573 -
3574 - snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_STREAM_FORMAT, 0);
3575 - snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CHANNEL_STREAMID, 0);
3576 -}
3577 -
3578 -/*
3579 * PCM callbacks
3580 */
3581 static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
3582 @@ -2726,7 +2672,7 @@ static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
3583 {
3584 struct ca0132_spec *spec = codec->spec;
3585
3586 - ca0132_setup_stream(codec, spec->dacs[0], stream_tag, 0, format);
3587 + snd_hda_codec_setup_stream(codec, spec->dacs[0], stream_tag, 0, format);
3588
3589 return 0;
3590 }
3591 @@ -2745,7 +2691,7 @@ static int ca0132_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
3592 if (spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID])
3593 msleep(50);
3594
3595 - ca0132_cleanup_stream(codec, spec->dacs[0]);
3596 + snd_hda_codec_cleanup_stream(codec, spec->dacs[0]);
3597
3598 return 0;
3599 }
3600 @@ -2822,10 +2768,8 @@ static int ca0132_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
3601 unsigned int format,
3602 struct snd_pcm_substream *substream)
3603 {
3604 - struct ca0132_spec *spec = codec->spec;
3605 -
3606 - ca0132_setup_stream(codec, spec->adcs[substream->number],
3607 - stream_tag, 0, format);
3608 + snd_hda_codec_setup_stream(codec, hinfo->nid,
3609 + stream_tag, 0, format);
3610
3611 return 0;
3612 }
3613 @@ -2839,7 +2783,7 @@ static int ca0132_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
3614 if (spec->dsp_state == DSP_DOWNLOADING)
3615 return 0;
3616
3617 - ca0132_cleanup_stream(codec, hinfo->nid);
3618 + snd_hda_codec_cleanup_stream(codec, hinfo->nid);
3619 return 0;
3620 }
3621
3622 @@ -4742,6 +4686,8 @@ static int patch_ca0132(struct hda_codec *codec)
3623 return err;
3624
3625 codec->patch_ops = ca0132_patch_ops;
3626 + codec->pcm_format_first = 1;
3627 + codec->no_sticky_stream = 1;
3628
3629 return 0;
3630 }
3631 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
3632 index dc4833f47a2b..0c521b7752b2 100644
3633 --- a/sound/pci/hda/patch_sigmatel.c
3634 +++ b/sound/pci/hda/patch_sigmatel.c
3635 @@ -83,6 +83,7 @@ enum {
3636 STAC_DELL_M6_BOTH,
3637 STAC_DELL_EQ,
3638 STAC_ALIENWARE_M17X,
3639 + STAC_92HD89XX_HP_FRONT_JACK,
3640 STAC_92HD73XX_MODELS
3641 };
3642
3643 @@ -97,6 +98,7 @@ enum {
3644 STAC_92HD83XXX_HP_LED,
3645 STAC_92HD83XXX_HP_INV_LED,
3646 STAC_92HD83XXX_HP_MIC_LED,
3647 + STAC_HP_LED_GPIO10,
3648 STAC_92HD83XXX_HEADSET_JACK,
3649 STAC_92HD83XXX_HP,
3650 STAC_HP_ENVY_BASS,
3651 @@ -1775,6 +1777,12 @@ static const struct hda_pintbl intel_dg45id_pin_configs[] = {
3652 {}
3653 };
3654
3655 +static const struct hda_pintbl stac92hd89xx_hp_front_jack_pin_configs[] = {
3656 + { 0x0a, 0x02214030 },
3657 + { 0x0b, 0x02A19010 },
3658 + {}
3659 +};
3660 +
3661 static void stac92hd73xx_fixup_ref(struct hda_codec *codec,
3662 const struct hda_fixup *fix, int action)
3663 {
3664 @@ -1893,6 +1901,10 @@ static const struct hda_fixup stac92hd73xx_fixups[] = {
3665 [STAC_92HD73XX_NO_JD] = {
3666 .type = HDA_FIXUP_FUNC,
3667 .v.func = stac92hd73xx_fixup_no_jd,
3668 + },
3669 + [STAC_92HD89XX_HP_FRONT_JACK] = {
3670 + .type = HDA_FIXUP_PINS,
3671 + .v.pins = stac92hd89xx_hp_front_jack_pin_configs,
3672 }
3673 };
3674
3675 @@ -1953,6 +1965,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
3676 "Alienware M17x", STAC_ALIENWARE_M17X),
3677 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
3678 "Alienware M17x R3", STAC_DELL_EQ),
3679 + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
3680 + "unknown HP", STAC_92HD89XX_HP_FRONT_JACK),
3681 {} /* terminator */
3682 };
3683
3684 @@ -2094,6 +2108,17 @@ static void stac92hd83xxx_fixup_hp_mic_led(struct hda_codec *codec,
3685 spec->mic_mute_led_gpio = 0x08; /* GPIO3 */
3686 }
3687
3688 +static void stac92hd83xxx_fixup_hp_led_gpio10(struct hda_codec *codec,
3689 + const struct hda_fixup *fix, int action)
3690 +{
3691 + struct sigmatel_spec *spec = codec->spec;
3692 +
3693 + if (action == HDA_FIXUP_ACT_PRE_PROBE) {
3694 + spec->gpio_led = 0x10; /* GPIO4 */
3695 + spec->default_polarity = 0;
3696 + }
3697 +}
3698 +
3699 static void stac92hd83xxx_fixup_headset_jack(struct hda_codec *codec,
3700 const struct hda_fixup *fix, int action)
3701 {
3702 @@ -2160,6 +2185,12 @@ static const struct hda_fixup stac92hd83xxx_fixups[] = {
3703 .chained = true,
3704 .chain_id = STAC_92HD83XXX_HP,
3705 },
3706 + [STAC_HP_LED_GPIO10] = {
3707 + .type = HDA_FIXUP_FUNC,
3708 + .v.func = stac92hd83xxx_fixup_hp_led_gpio10,
3709 + .chained = true,
3710 + .chain_id = STAC_92HD83XXX_HP,
3711 + },
3712 [STAC_92HD83XXX_HEADSET_JACK] = {
3713 .type = HDA_FIXUP_FUNC,
3714 .v.func = stac92hd83xxx_fixup_headset_jack,
3715 @@ -2231,6 +2262,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
3716 "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
3717 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1888,
3718 "HP Envy Spectre", STAC_HP_ENVY_BASS),
3719 + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1899,
3720 + "HP Folio 13", STAC_HP_LED_GPIO10),
3721 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x18df,
3722 "HP Folio", STAC_92HD83XXX_HP_MIC_LED),
3723 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x1900,
3724 diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c
3725 index dc0284dc9e6f..76fdf0a598bc 100644
3726 --- a/sound/soc/codecs/da732x.c
3727 +++ b/sound/soc/codecs/da732x.c
3728 @@ -1268,11 +1268,23 @@ static struct snd_soc_dai_driver da732x_dai[] = {
3729 },
3730 };
3731
3732 +static bool da732x_volatile(struct device *dev, unsigned int reg)
3733 +{
3734 + switch (reg) {
3735 + case DA732X_REG_HPL_DAC_OFF_CNTL:
3736 + case DA732X_REG_HPR_DAC_OFF_CNTL:
3737 + return true;
3738 + default:
3739 + return false;
3740 + }
3741 +}
3742 +
3743 static const struct regmap_config da732x_regmap = {
3744 .reg_bits = 8,
3745 .val_bits = 8,
3746
3747 .max_register = DA732X_MAX_REG,
3748 + .volatile_reg = da732x_volatile,
3749 .reg_defaults = da732x_reg_cache,
3750 .num_reg_defaults = ARRAY_SIZE(da732x_reg_cache),
3751 .cache_type = REGCACHE_RBTREE,
3752 diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
3753 index 8d14a76c7249..819c90fe021f 100644
3754 --- a/sound/soc/codecs/max98090.c
3755 +++ b/sound/soc/codecs/max98090.c
3756 @@ -1755,16 +1755,6 @@ static int max98090_set_bias_level(struct snd_soc_codec *codec,
3757
3758 switch (level) {
3759 case SND_SOC_BIAS_ON:
3760 - if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
3761 - ret = regcache_sync(max98090->regmap);
3762 -
3763 - if (ret != 0) {
3764 - dev_err(codec->dev,
3765 - "Failed to sync cache: %d\n", ret);
3766 - return ret;
3767 - }
3768 - }
3769 -
3770 if (max98090->jack_state == M98090_JACK_STATE_HEADSET) {
3771 /*
3772 * Set to normal bias level.
3773 @@ -1778,6 +1768,16 @@ static int max98090_set_bias_level(struct snd_soc_codec *codec,
3774 break;
3775
3776 case SND_SOC_BIAS_STANDBY:
3777 + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
3778 + ret = regcache_sync(max98090->regmap);
3779 + if (ret != 0) {
3780 + dev_err(codec->dev,
3781 + "Failed to sync cache: %d\n", ret);
3782 + return ret;
3783 + }
3784 + }
3785 + break;
3786 +
3787 case SND_SOC_BIAS_OFF:
3788 /* Set internal pull-up to lowest power mode */
3789 snd_soc_update_bits(codec, M98090_REG_JACK_DETECT,
3790 diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
3791 index cfb55fe35e98..8517e70bc24b 100644
3792 --- a/sound/soc/codecs/sta32x.c
3793 +++ b/sound/soc/codecs/sta32x.c
3794 @@ -187,42 +187,42 @@ static const unsigned int sta32x_limiter_drc_release_tlv[] = {
3795 13, 16, TLV_DB_SCALE_ITEM(-1500, 300, 0),
3796 };
3797
3798 -static const struct soc_enum sta32x_drc_ac_enum =
3799 - SOC_ENUM_SINGLE(STA32X_CONFD, STA32X_CONFD_DRC_SHIFT,
3800 - 2, sta32x_drc_ac);
3801 -static const struct soc_enum sta32x_auto_eq_enum =
3802 - SOC_ENUM_SINGLE(STA32X_AUTO1, STA32X_AUTO1_AMEQ_SHIFT,
3803 - 3, sta32x_auto_eq_mode);
3804 -static const struct soc_enum sta32x_auto_gc_enum =
3805 - SOC_ENUM_SINGLE(STA32X_AUTO1, STA32X_AUTO1_AMGC_SHIFT,
3806 - 4, sta32x_auto_gc_mode);
3807 -static const struct soc_enum sta32x_auto_xo_enum =
3808 - SOC_ENUM_SINGLE(STA32X_AUTO2, STA32X_AUTO2_XO_SHIFT,
3809 - 16, sta32x_auto_xo_mode);
3810 -static const struct soc_enum sta32x_preset_eq_enum =
3811 - SOC_ENUM_SINGLE(STA32X_AUTO3, STA32X_AUTO3_PEQ_SHIFT,
3812 - 32, sta32x_preset_eq_mode);
3813 -static const struct soc_enum sta32x_limiter_ch1_enum =
3814 - SOC_ENUM_SINGLE(STA32X_C1CFG, STA32X_CxCFG_LS_SHIFT,
3815 - 3, sta32x_limiter_select);
3816 -static const struct soc_enum sta32x_limiter_ch2_enum =
3817 - SOC_ENUM_SINGLE(STA32X_C2CFG, STA32X_CxCFG_LS_SHIFT,
3818 - 3, sta32x_limiter_select);
3819 -static const struct soc_enum sta32x_limiter_ch3_enum =
3820 - SOC_ENUM_SINGLE(STA32X_C3CFG, STA32X_CxCFG_LS_SHIFT,
3821 - 3, sta32x_limiter_select);
3822 -static const struct soc_enum sta32x_limiter1_attack_rate_enum =
3823 - SOC_ENUM_SINGLE(STA32X_L1AR, STA32X_LxA_SHIFT,
3824 - 16, sta32x_limiter_attack_rate);
3825 -static const struct soc_enum sta32x_limiter2_attack_rate_enum =
3826 - SOC_ENUM_SINGLE(STA32X_L2AR, STA32X_LxA_SHIFT,
3827 - 16, sta32x_limiter_attack_rate);
3828 -static const struct soc_enum sta32x_limiter1_release_rate_enum =
3829 - SOC_ENUM_SINGLE(STA32X_L1AR, STA32X_LxR_SHIFT,
3830 - 16, sta32x_limiter_release_rate);
3831 -static const struct soc_enum sta32x_limiter2_release_rate_enum =
3832 - SOC_ENUM_SINGLE(STA32X_L2AR, STA32X_LxR_SHIFT,
3833 - 16, sta32x_limiter_release_rate);
3834 +static SOC_ENUM_SINGLE_DECL(sta32x_drc_ac_enum,
3835 + STA32X_CONFD, STA32X_CONFD_DRC_SHIFT,
3836 + sta32x_drc_ac);
3837 +static SOC_ENUM_SINGLE_DECL(sta32x_auto_eq_enum,
3838 + STA32X_AUTO1, STA32X_AUTO1_AMEQ_SHIFT,
3839 + sta32x_auto_eq_mode);
3840 +static SOC_ENUM_SINGLE_DECL(sta32x_auto_gc_enum,
3841 + STA32X_AUTO1, STA32X_AUTO1_AMGC_SHIFT,
3842 + sta32x_auto_gc_mode);
3843 +static SOC_ENUM_SINGLE_DECL(sta32x_auto_xo_enum,
3844 + STA32X_AUTO2, STA32X_AUTO2_XO_SHIFT,
3845 + sta32x_auto_xo_mode);
3846 +static SOC_ENUM_SINGLE_DECL(sta32x_preset_eq_enum,
3847 + STA32X_AUTO3, STA32X_AUTO3_PEQ_SHIFT,
3848 + sta32x_preset_eq_mode);
3849 +static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch1_enum,
3850 + STA32X_C1CFG, STA32X_CxCFG_LS_SHIFT,
3851 + sta32x_limiter_select);
3852 +static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch2_enum,
3853 + STA32X_C2CFG, STA32X_CxCFG_LS_SHIFT,
3854 + sta32x_limiter_select);
3855 +static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch3_enum,
3856 + STA32X_C3CFG, STA32X_CxCFG_LS_SHIFT,
3857 + sta32x_limiter_select);
3858 +static SOC_ENUM_SINGLE_DECL(sta32x_limiter1_attack_rate_enum,
3859 + STA32X_L1AR, STA32X_LxA_SHIFT,
3860 + sta32x_limiter_attack_rate);
3861 +static SOC_ENUM_SINGLE_DECL(sta32x_limiter2_attack_rate_enum,
3862 + STA32X_L2AR, STA32X_LxA_SHIFT,
3863 + sta32x_limiter_attack_rate);
3864 +static SOC_ENUM_SINGLE_DECL(sta32x_limiter1_release_rate_enum,
3865 + STA32X_L1AR, STA32X_LxR_SHIFT,
3866 + sta32x_limiter_release_rate);
3867 +static SOC_ENUM_SINGLE_DECL(sta32x_limiter2_release_rate_enum,
3868 + STA32X_L2AR, STA32X_LxR_SHIFT,
3869 + sta32x_limiter_release_rate);
3870
3871 /* byte array controls for setting biquad, mixer, scaling coefficients;
3872 * for biquads all five coefficients need to be set in one go,
3873 @@ -331,7 +331,7 @@ static int sta32x_sync_coef_shadow(struct snd_soc_codec *codec)
3874
3875 static int sta32x_cache_sync(struct snd_soc_codec *codec)
3876 {
3877 - struct sta32x_priv *sta32x = codec->control_data;
3878 + struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
3879 unsigned int mute;
3880 int rc;
3881
3882 @@ -432,7 +432,7 @@ SOC_SINGLE_TLV("Treble Tone Control", STA32X_TONE, STA32X_TONE_TTC_SHIFT, 15, 0,
3883 SOC_ENUM("Limiter1 Attack Rate (dB/ms)", sta32x_limiter1_attack_rate_enum),
3884 SOC_ENUM("Limiter2 Attack Rate (dB/ms)", sta32x_limiter2_attack_rate_enum),
3885 SOC_ENUM("Limiter1 Release Rate (dB/ms)", sta32x_limiter1_release_rate_enum),
3886 -SOC_ENUM("Limiter2 Release Rate (dB/ms)", sta32x_limiter1_release_rate_enum),
3887 +SOC_ENUM("Limiter2 Release Rate (dB/ms)", sta32x_limiter2_release_rate_enum),
3888
3889 /* depending on mode, the attack/release thresholds have
3890 * two different enum definitions; provide both
3891 diff --git a/sound/soc/codecs/wm8770.c b/sound/soc/codecs/wm8770.c
3892 index 89a18d82f303..5bce21013485 100644
3893 --- a/sound/soc/codecs/wm8770.c
3894 +++ b/sound/soc/codecs/wm8770.c
3895 @@ -196,8 +196,8 @@ static const char *ain_text[] = {
3896 "AIN5", "AIN6", "AIN7", "AIN8"
3897 };
3898
3899 -static const struct soc_enum ain_enum =
3900 - SOC_ENUM_DOUBLE(WM8770_ADCMUX, 0, 4, 8, ain_text);
3901 +static SOC_ENUM_DOUBLE_DECL(ain_enum,
3902 + WM8770_ADCMUX, 0, 4, ain_text);
3903
3904 static const struct snd_kcontrol_new ain_mux =
3905 SOC_DAPM_ENUM("Capture Mux", ain_enum);
3906 diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
3907 index b0710d817a65..754f88e1fdab 100644
3908 --- a/sound/soc/codecs/wm8958-dsp2.c
3909 +++ b/sound/soc/codecs/wm8958-dsp2.c
3910 @@ -153,7 +153,7 @@ static int wm8958_dsp2_fw(struct snd_soc_codec *codec, const char *name,
3911
3912 data32 &= 0xffffff;
3913
3914 - wm8994_bulk_write(codec->control_data,
3915 + wm8994_bulk_write(wm8994->wm8994,
3916 data32 & 0xffffff,
3917 block_len / 2,
3918 (void *)(data + 8));
3919 diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
3920 index cc2dd1f0decb..0339d464791a 100644
3921 --- a/sound/usb/mixer_maps.c
3922 +++ b/sound/usb/mixer_maps.c
3923 @@ -322,6 +322,11 @@ static struct usbmix_name_map hercules_usb51_map[] = {
3924 { 0 } /* terminator */
3925 };
3926
3927 +static const struct usbmix_name_map kef_x300a_map[] = {
3928 + { 10, NULL }, /* firmware locks up (?) when we try to access this FU */
3929 + { 0 }
3930 +};
3931 +
3932 /*
3933 * Control map entries
3934 */
3935 @@ -409,6 +414,10 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
3936 .id = USB_ID(0x200c, 0x1018),
3937 .map = ebox44_map,
3938 },
3939 + {
3940 + .id = USB_ID(0x27ac, 0x1000),
3941 + .map = kef_x300a_map,
3942 + },
3943 { 0 } /* terminator */
3944 };
3945

Properties

Name Value
svn:mime-type application/x-xz