Magellan Linux

Annotation of /trunk/kernel-alx/patches-3.12/0113-3.12.14-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2423 - (hide annotations) (download)
Tue Mar 25 12:29:50 2014 UTC (10 years, 1 month ago) by niro
File size: 192737 byte(s)
-added 3.12 branch
1 niro 2423 diff --git a/Makefile b/Makefile
2     index 0a89e7d84a2d..5d38a5a79b3a 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 3
7     PATCHLEVEL = 12
8     -SUBLEVEL = 13
9     +SUBLEVEL = 14
10     EXTRAVERSION =
11     NAME = One Giant Leap for Frogkind
12    
13     diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
14     index 15f2d5bf8875..cc8a2ca1d80a 100644
15     --- a/arch/arm/include/asm/cacheflush.h
16     +++ b/arch/arm/include/asm/cacheflush.h
17     @@ -212,6 +212,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
18     static inline void __flush_icache_all(void)
19     {
20     __flush_icache_preferred();
21     + dsb();
22     }
23    
24     /*
25     diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
26     index 4f2c28060c9a..05f8066255d0 100644
27     --- a/arch/arm/include/asm/spinlock.h
28     +++ b/arch/arm/include/asm/spinlock.h
29     @@ -44,18 +44,9 @@
30    
31     static inline void dsb_sev(void)
32     {
33     -#if __LINUX_ARM_ARCH__ >= 7
34     - __asm__ __volatile__ (
35     - "dsb ishst\n"
36     - SEV
37     - );
38     -#else
39     - __asm__ __volatile__ (
40     - "mcr p15, 0, %0, c7, c10, 4\n"
41     - SEV
42     - : : "r" (0)
43     - );
44     -#endif
45     +
46     + dsb(ishst);
47     + __asm__(SEV);
48     }
49    
50     /*
51     diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
52     index 579697adaae7..a60a97898090 100644
53     --- a/arch/arm/mach-omap2/gpmc.c
54     +++ b/arch/arm/mach-omap2/gpmc.c
55     @@ -1339,7 +1339,7 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
56     of_property_read_bool(np, "gpmc,time-para-granularity");
57     }
58    
59     -#ifdef CONFIG_MTD_NAND
60     +#if IS_ENABLED(CONFIG_MTD_NAND)
61    
62     static const char * const nand_ecc_opts[] = {
63     [OMAP_ECC_HAMMING_CODE_DEFAULT] = "sw",
64     @@ -1409,7 +1409,7 @@ static int gpmc_probe_nand_child(struct platform_device *pdev,
65     }
66     #endif
67    
68     -#ifdef CONFIG_MTD_ONENAND
69     +#if IS_ENABLED(CONFIG_MTD_ONENAND)
70     static int gpmc_probe_onenand_child(struct platform_device *pdev,
71     struct device_node *child)
72     {
73     diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
74     index 1272ed202dde..a2d0f91c5bd7 100644
75     --- a/arch/arm/mm/dma-mapping.c
76     +++ b/arch/arm/mm/dma-mapping.c
77     @@ -1325,7 +1325,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
78     *handle = DMA_ERROR_CODE;
79     size = PAGE_ALIGN(size);
80    
81     - if (gfp & GFP_ATOMIC)
82     + if (!(gfp & __GFP_WAIT))
83     return __iommu_alloc_atomic(dev, size, handle);
84    
85     /*
86     diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
87     index 1128064fddcb..c0b44fbaad98 100644
88     --- a/arch/arm/mm/proc-v6.S
89     +++ b/arch/arm/mm/proc-v6.S
90     @@ -208,7 +208,6 @@ __v6_setup:
91     mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache
92     mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
93     mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache
94     - mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
95     #ifdef CONFIG_MMU
96     mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
97     mcr p15, 0, r0, c2, c0, 2 @ TTB control register
98     @@ -218,6 +217,8 @@ __v6_setup:
99     ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
100     mcr p15, 0, r8, c2, c0, 1 @ load TTB1
101     #endif /* CONFIG_MMU */
102     + mcr p15, 0, r0, c7, c10, 4 @ drain write buffer and
103     + @ complete invalidations
104     adr r5, v6_crval
105     ldmia r5, {r5, r6}
106     #ifdef CONFIG_CPU_ENDIAN_BE8
107     diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
108     index c63d9bdee51e..ced046d9f825 100644
109     --- a/arch/arm/mm/proc-v7.S
110     +++ b/arch/arm/mm/proc-v7.S
111     @@ -344,7 +344,6 @@ __v7_setup:
112    
113     4: mov r10, #0
114     mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
115     - dsb
116     #ifdef CONFIG_MMU
117     mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
118     v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup
119     @@ -353,6 +352,7 @@ __v7_setup:
120     mcr p15, 0, r5, c10, c2, 0 @ write PRRR
121     mcr p15, 0, r6, c10, c2, 1 @ write NMRR
122     #endif
123     + dsb @ Complete invalidations
124     #ifndef CONFIG_ARM_THUMBEE
125     mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE
126     and r0, r0, #(0xf << 12) @ ThumbEE enabled field
127     diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
128     index d25459ff57fc..048334bb2651 100644
129     --- a/arch/arm64/kernel/stacktrace.c
130     +++ b/arch/arm64/kernel/stacktrace.c
131     @@ -48,7 +48,11 @@ int unwind_frame(struct stackframe *frame)
132    
133     frame->sp = fp + 0x10;
134     frame->fp = *(unsigned long *)(fp);
135     - frame->pc = *(unsigned long *)(fp + 8);
136     + /*
137     + * -4 here because we care about the PC at time of bl,
138     + * not where the return will go.
139     + */
140     + frame->pc = *(unsigned long *)(fp + 8) - 4;
141    
142     return 0;
143     }
144     diff --git a/arch/avr32/Makefile b/arch/avr32/Makefile
145     index 22fb66590dcd..dba48a5d5bb9 100644
146     --- a/arch/avr32/Makefile
147     +++ b/arch/avr32/Makefile
148     @@ -11,7 +11,7 @@ all: uImage vmlinux.elf
149    
150     KBUILD_DEFCONFIG := atstk1002_defconfig
151    
152     -KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic
153     +KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic -D__linux__
154     KBUILD_AFLAGS += -mrelax -mno-pic
155     KBUILD_CFLAGS_MODULE += -mno-relax
156     LDFLAGS_vmlinux += --relax
157     diff --git a/arch/avr32/boards/mimc200/fram.c b/arch/avr32/boards/mimc200/fram.c
158     index 9764a1a1073e..c1466a872b9c 100644
159     --- a/arch/avr32/boards/mimc200/fram.c
160     +++ b/arch/avr32/boards/mimc200/fram.c
161     @@ -11,6 +11,7 @@
162     #define FRAM_VERSION "1.0"
163    
164     #include <linux/miscdevice.h>
165     +#include <linux/module.h>
166     #include <linux/proc_fs.h>
167     #include <linux/mm.h>
168     #include <linux/io.h>
169     diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
170     index 84fdf6857c31..a613d2c82fd9 100644
171     --- a/arch/powerpc/include/asm/compat.h
172     +++ b/arch/powerpc/include/asm/compat.h
173     @@ -200,10 +200,11 @@ static inline void __user *arch_compat_alloc_user_space(long len)
174    
175     /*
176     * We can't access below the stack pointer in the 32bit ABI and
177     - * can access 288 bytes in the 64bit ABI
178     + * can access 288 bytes in the 64bit big-endian ABI,
179     + * or 512 bytes with the new ELFv2 little-endian ABI.
180     */
181     if (!is_32bit_task())
182     - usp -= 288;
183     + usp -= USER_REDZONE_SIZE;
184    
185     return (void __user *) (usp - len);
186     }
187     diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
188     index becc08e6a65c..279b80f3bb29 100644
189     --- a/arch/powerpc/include/asm/ptrace.h
190     +++ b/arch/powerpc/include/asm/ptrace.h
191     @@ -28,11 +28,23 @@
192    
193     #ifdef __powerpc64__
194    
195     +/*
196     + * Size of redzone that userspace is allowed to use below the stack
197     + * pointer. This is 288 in the 64-bit big-endian ELF ABI, and 512 in
198     + * the new ELFv2 little-endian ABI, so we allow the larger amount.
199     + *
200     + * For kernel code we allow a 288-byte redzone, in order to conserve
201     + * kernel stack space; gcc currently only uses 288 bytes, and will
202     + * hopefully allow explicit control of the redzone size in future.
203     + */
204     +#define USER_REDZONE_SIZE 512
205     +#define KERNEL_REDZONE_SIZE 288
206     +
207     #define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
208     #define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */
209     #define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265)
210     #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \
211     - STACK_FRAME_OVERHEAD + 288)
212     + STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
213     #define STACK_FRAME_MARKER 12
214    
215     /* Size of dummy stack frame allocated when calling signal handler. */
216     @@ -41,6 +53,8 @@
217    
218     #else /* __powerpc64__ */
219    
220     +#define USER_REDZONE_SIZE 0
221     +#define KERNEL_REDZONE_SIZE 0
222     #define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
223     #define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */
224     #define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773)
225     diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
226     index 779a78c26435..c16ceb1c8328 100644
227     --- a/arch/powerpc/kernel/crash_dump.c
228     +++ b/arch/powerpc/kernel/crash_dump.c
229     @@ -98,17 +98,19 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
230     size_t csize, unsigned long offset, int userbuf)
231     {
232     void *vaddr;
233     + phys_addr_t paddr;
234    
235     if (!csize)
236     return 0;
237    
238     csize = min_t(size_t, csize, PAGE_SIZE);
239     + paddr = pfn << PAGE_SHIFT;
240    
241     - if ((min_low_pfn < pfn) && (pfn < max_pfn)) {
242     - vaddr = __va(pfn << PAGE_SHIFT);
243     + if (memblock_is_region_memory(paddr, csize)) {
244     + vaddr = __va(paddr);
245     csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
246     } else {
247     - vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0);
248     + vaddr = __ioremap(paddr, PAGE_SIZE, 0);
249     csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
250     iounmap(vaddr);
251     }
252     diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
253     index 2b0ad9845363..ace34137a501 100644
254     --- a/arch/powerpc/kernel/misc_32.S
255     +++ b/arch/powerpc/kernel/misc_32.S
256     @@ -57,11 +57,14 @@ _GLOBAL(call_do_softirq)
257     mtlr r0
258     blr
259    
260     +/*
261     + * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
262     + */
263     _GLOBAL(call_do_irq)
264     mflr r0
265     stw r0,4(r1)
266     lwz r10,THREAD+KSP_LIMIT(r2)
267     - addi r11,r3,THREAD_INFO_GAP
268     + addi r11,r4,THREAD_INFO_GAP
269     stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
270     mr r1,r4
271     stw r10,8(r1)
272     diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
273     index 1e7ba8817106..36b1d1daa236 100644
274     --- a/arch/powerpc/kernel/signal_64.c
275     +++ b/arch/powerpc/kernel/signal_64.c
276     @@ -65,8 +65,8 @@ struct rt_sigframe {
277     struct siginfo __user *pinfo;
278     void __user *puc;
279     struct siginfo info;
280     - /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
281     - char abigap[288];
282     + /* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */
283     + char abigap[USER_REDZONE_SIZE];
284     } __attribute__ ((aligned (16)));
285    
286     static const char fmt32[] = KERN_INFO \
287     diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
288     index cf42e74514fa..b7eb5d4f4c89 100644
289     --- a/arch/powerpc/platforms/powernv/eeh-ioda.c
290     +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
291     @@ -463,8 +463,7 @@ static int ioda_eeh_bridge_reset(struct pci_controller *hose,
292     static int ioda_eeh_reset(struct eeh_pe *pe, int option)
293     {
294     struct pci_controller *hose = pe->phb;
295     - struct eeh_dev *edev;
296     - struct pci_dev *dev;
297     + struct pci_bus *bus;
298     int ret;
299    
300     /*
301     @@ -493,31 +492,11 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
302     if (pe->type & EEH_PE_PHB) {
303     ret = ioda_eeh_phb_reset(hose, option);
304     } else {
305     - if (pe->type & EEH_PE_DEVICE) {
306     - /*
307     - * If it's device PE, we didn't refer to the parent
308     - * PCI bus yet. So we have to figure it out indirectly.
309     - */
310     - edev = list_first_entry(&pe->edevs,
311     - struct eeh_dev, list);
312     - dev = eeh_dev_to_pci_dev(edev);
313     - dev = dev->bus->self;
314     - } else {
315     - /*
316     - * If it's bus PE, the parent PCI bus is already there
317     - * and just pick it up.
318     - */
319     - dev = pe->bus->self;
320     - }
321     -
322     - /*
323     - * Do reset based on the fact that the direct upstream bridge
324     - * is root bridge (port) or not.
325     - */
326     - if (dev->bus->number == 0)
327     + bus = eeh_pe_bus_get(pe);
328     + if (pci_is_root_bus(bus))
329     ret = ioda_eeh_root_reset(hose, option);
330     else
331     - ret = ioda_eeh_bridge_reset(hose, dev, option);
332     + ret = ioda_eeh_bridge_reset(hose, bus->self, option);
333     }
334    
335     return ret;
336     diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
337     index 82789e79e539..0ea99e3d4815 100644
338     --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
339     +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
340     @@ -35,12 +35,7 @@
341     #include "offline_states.h"
342    
343     /* This version can't take the spinlock, because it never returns */
344     -static struct rtas_args rtas_stop_self_args = {
345     - .token = RTAS_UNKNOWN_SERVICE,
346     - .nargs = 0,
347     - .nret = 1,
348     - .rets = &rtas_stop_self_args.args[0],
349     -};
350     +static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE;
351    
352     static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) =
353     CPU_STATE_OFFLINE;
354     @@ -93,15 +88,20 @@ void set_default_offline_state(int cpu)
355    
356     static void rtas_stop_self(void)
357     {
358     - struct rtas_args *args = &rtas_stop_self_args;
359     + struct rtas_args args = {
360     + .token = cpu_to_be32(rtas_stop_self_token),
361     + .nargs = 0,
362     + .nret = 1,
363     + .rets = &args.args[0],
364     + };
365    
366     local_irq_disable();
367    
368     - BUG_ON(args->token == RTAS_UNKNOWN_SERVICE);
369     + BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
370    
371     printk("cpu %u (hwid %u) Ready to die...\n",
372     smp_processor_id(), hard_smp_processor_id());
373     - enter_rtas(__pa(args));
374     + enter_rtas(__pa(&args));
375    
376     panic("Alas, I survived.\n");
377     }
378     @@ -392,10 +392,10 @@ static int __init pseries_cpu_hotplug_init(void)
379     }
380     }
381    
382     - rtas_stop_self_args.token = rtas_token("stop-self");
383     + rtas_stop_self_token = rtas_token("stop-self");
384     qcss_tok = rtas_token("query-cpu-stopped-state");
385    
386     - if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE ||
387     + if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE ||
388     qcss_tok == RTAS_UNKNOWN_SERVICE) {
389     printk(KERN_INFO "CPU Hotplug not supported by firmware "
390     "- disabling.\n");
391     diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
392     index 8a87a3224121..5edd3c0b437a 100644
393     --- a/arch/x86/kernel/cpu/perf_event.c
394     +++ b/arch/x86/kernel/cpu/perf_event.c
395     @@ -1192,6 +1192,9 @@ static void x86_pmu_del(struct perf_event *event, int flags)
396     for (i = 0; i < cpuc->n_events; i++) {
397     if (event == cpuc->event_list[i]) {
398    
399     + if (i >= cpuc->n_events - cpuc->n_added)
400     + --cpuc->n_added;
401     +
402     if (x86_pmu.put_event_constraints)
403     x86_pmu.put_event_constraints(cpuc, event);
404    
405     diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
406     index 872079a67e4d..f7d0672481fd 100644
407     --- a/arch/x86/kernel/pci-dma.c
408     +++ b/arch/x86/kernel/pci-dma.c
409     @@ -100,8 +100,10 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
410     flag |= __GFP_ZERO;
411     again:
412     page = NULL;
413     - if (!(flag & GFP_ATOMIC))
414     + /* CMA can be used only in the context which permits sleeping */
415     + if (flag & __GFP_WAIT)
416     page = dma_alloc_from_contiguous(dev, count, get_order(size));
417     + /* fallback */
418     if (!page)
419     page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
420     if (!page)
421     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
422     index 16dc55a39fa3..92af83d79c97 100644
423     --- a/arch/x86/kvm/x86.c
424     +++ b/arch/x86/kvm/x86.c
425     @@ -6141,7 +6141,7 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
426     frag->len -= len;
427     }
428    
429     - if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
430     + if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
431     vcpu->mmio_needed = 0;
432    
433     /* FIXME: return into emulator if single-stepping. */
434     diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
435     index 917488a0ab00..f2faa58f9a43 100644
436     --- a/arch/xtensa/include/asm/traps.h
437     +++ b/arch/xtensa/include/asm/traps.h
438     @@ -22,25 +22,37 @@ extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
439    
440     static inline void spill_registers(void)
441     {
442     -
443     +#if XCHAL_NUM_AREGS > 16
444     __asm__ __volatile__ (
445     - "movi a14, "__stringify((1 << PS_EXCM_BIT) | LOCKLEVEL)"\n\t"
446     - "mov a12, a0\n\t"
447     - "rsr a13, sar\n\t"
448     - "xsr a14, ps\n\t"
449     - "movi a0, _spill_registers\n\t"
450     - "rsync\n\t"
451     - "callx0 a0\n\t"
452     - "mov a0, a12\n\t"
453     - "wsr a13, sar\n\t"
454     - "wsr a14, ps\n\t"
455     - : :
456     -#if defined(CONFIG_FRAME_POINTER)
457     - : "a2", "a3", "a4", "a11", "a12", "a13", "a14", "a15",
458     + " call12 1f\n"
459     + " _j 2f\n"
460     + " retw\n"
461     + " .align 4\n"
462     + "1:\n"
463     + " _entry a1, 48\n"
464     + " addi a12, a0, 3\n"
465     +#if XCHAL_NUM_AREGS > 32
466     + " .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n"
467     + " _entry a1, 48\n"
468     + " mov a12, a0\n"
469     + " .endr\n"
470     +#endif
471     + " _entry a1, 48\n"
472     +#if XCHAL_NUM_AREGS % 12 == 0
473     + " mov a8, a8\n"
474     +#elif XCHAL_NUM_AREGS % 12 == 4
475     + " mov a12, a12\n"
476     +#elif XCHAL_NUM_AREGS % 12 == 8
477     + " mov a4, a4\n"
478     +#endif
479     + " retw\n"
480     + "2:\n"
481     + : : : "a12", "a13", "memory");
482     #else
483     - : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15",
484     + __asm__ __volatile__ (
485     + " mov a12, a12\n"
486     + : : : "memory");
487     #endif
488     - "memory");
489     }
490    
491     #endif /* _XTENSA_TRAPS_H */
492     diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
493     index 21dbe6bdb8ed..b61e25146a2f 100644
494     --- a/arch/xtensa/kernel/entry.S
495     +++ b/arch/xtensa/kernel/entry.S
496     @@ -1117,6 +1117,13 @@ ENDPROC(fast_syscall_spill_registers)
497     * We basically restore WINDOWBASE and WINDOWSTART to the condition when
498     * we entered the spill routine and jump to the user exception handler.
499     *
500     + * Note that we only need to restore the bits in windowstart that have not
501     + * been spilled yet by the _spill_register routine. Luckily, a3 contains a
502     + * rotated windowstart with only those bits set for frames that haven't been
503     + * spilled yet. Because a3 is rotated such that bit 0 represents the register
504     + * frame for the current windowbase - 1, we need to rotate a3 left by the
505     + * value of the current windowbase + 1 and move it to windowstart.
506     + *
507     * a0: value of depc, original value in depc
508     * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
509     * a3: exctable, original value in excsave1
510     @@ -1131,10 +1138,15 @@ ENTRY(fast_syscall_spill_registers_fixup)
511     /* We need to make sure the current registers (a0-a3) are preserved.
512     * To do this, we simply set the bit for the current window frame
513     * in WS, so that the exception handlers save them to the task stack.
514     + *
515     + * Note: we use a3 to set the windowbase, so we take a special care
516     + * of it, saving it in the original _spill_registers frame across
517     + * the exception handler call.
518     */
519    
520     xsr a3, excsave1 # get spill-mask
521     slli a3, a3, 1 # shift left by one
522     + addi a3, a3, 1 # set the bit for the current window frame
523    
524     slli a2, a3, 32-WSBITS
525     src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy......
526     @@ -1794,6 +1806,43 @@ ENTRY(system_call)
527    
528     ENDPROC(system_call)
529    
530     +/*
531     + * Spill live registers on the kernel stack macro.
532     + *
533     + * Entry condition: ps.woe is set, ps.excm is cleared
534     + * Exit condition: windowstart has single bit set
535     + * May clobber: a12, a13
536     + */
537     + .macro spill_registers_kernel
538     +
539     +#if XCHAL_NUM_AREGS > 16
540     + call12 1f
541     + _j 2f
542     + retw
543     + .align 4
544     +1:
545     + _entry a1, 48
546     + addi a12, a0, 3
547     +#if XCHAL_NUM_AREGS > 32
548     + .rept (XCHAL_NUM_AREGS - 32) / 12
549     + _entry a1, 48
550     + mov a12, a0
551     + .endr
552     +#endif
553     + _entry a1, 48
554     +#if XCHAL_NUM_AREGS % 12 == 0
555     + mov a8, a8
556     +#elif XCHAL_NUM_AREGS % 12 == 4
557     + mov a12, a12
558     +#elif XCHAL_NUM_AREGS % 12 == 8
559     + mov a4, a4
560     +#endif
561     + retw
562     +2:
563     +#else
564     + mov a12, a12
565     +#endif
566     + .endm
567    
568     /*
569     * Task switch.
570     @@ -1806,21 +1855,20 @@ ENTRY(_switch_to)
571    
572     entry a1, 16
573    
574     - mov a12, a2 # preserve 'prev' (a2)
575     - mov a13, a3 # and 'next' (a3)
576     + mov a10, a2 # preserve 'prev' (a2)
577     + mov a11, a3 # and 'next' (a3)
578    
579     l32i a4, a2, TASK_THREAD_INFO
580     l32i a5, a3, TASK_THREAD_INFO
581    
582     - save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
583     + save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
584    
585     - s32i a0, a12, THREAD_RA # save return address
586     - s32i a1, a12, THREAD_SP # save stack pointer
587     + s32i a0, a10, THREAD_RA # save return address
588     + s32i a1, a10, THREAD_SP # save stack pointer
589    
590     /* Disable ints while we manipulate the stack pointer. */
591    
592     - movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
593     - xsr a14, ps
594     + rsil a14, LOCKLEVEL
595     rsr a3, excsave1
596     rsync
597     s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
598     @@ -1835,7 +1883,7 @@ ENTRY(_switch_to)
599    
600     /* Flush register file. */
601    
602     - call0 _spill_registers # destroys a3, a4, and SAR
603     + spill_registers_kernel
604    
605     /* Set kernel stack (and leave critical section)
606     * Note: It's save to set it here. The stack will not be overwritten
607     @@ -1851,13 +1899,13 @@ ENTRY(_switch_to)
608    
609     /* restore context of the task 'next' */
610    
611     - l32i a0, a13, THREAD_RA # restore return address
612     - l32i a1, a13, THREAD_SP # restore stack pointer
613     + l32i a0, a11, THREAD_RA # restore return address
614     + l32i a1, a11, THREAD_SP # restore stack pointer
615    
616     - load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
617     + load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
618    
619     wsr a14, ps
620     - mov a2, a12 # return 'prev'
621     + mov a2, a10 # return 'prev'
622     rsync
623    
624     retw
625     diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
626     index 41c5e1b799ef..f658e0948703 100644
627     --- a/drivers/acpi/pci_irq.c
628     +++ b/drivers/acpi/pci_irq.c
629     @@ -432,6 +432,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
630     pin_name(pin));
631     }
632    
633     + kfree(entry);
634     return 0;
635     }
636    
637     diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
638     index e7dd2c1fee79..5e47d7bf4745 100644
639     --- a/drivers/acpi/processor_throttling.c
640     +++ b/drivers/acpi/processor_throttling.c
641     @@ -59,6 +59,12 @@ struct throttling_tstate {
642     int target_state; /* target T-state */
643     };
644    
645     +struct acpi_processor_throttling_arg {
646     + struct acpi_processor *pr;
647     + int target_state;
648     + bool force;
649     +};
650     +
651     #define THROTTLING_PRECHANGE (1)
652     #define THROTTLING_POSTCHANGE (2)
653    
654     @@ -1063,16 +1069,24 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
655     return 0;
656     }
657    
658     +static long acpi_processor_throttling_fn(void *data)
659     +{
660     + struct acpi_processor_throttling_arg *arg = data;
661     + struct acpi_processor *pr = arg->pr;
662     +
663     + return pr->throttling.acpi_processor_set_throttling(pr,
664     + arg->target_state, arg->force);
665     +}
666     +
667     int acpi_processor_set_throttling(struct acpi_processor *pr,
668     int state, bool force)
669     {
670     - cpumask_var_t saved_mask;
671     int ret = 0;
672     unsigned int i;
673     struct acpi_processor *match_pr;
674     struct acpi_processor_throttling *p_throttling;
675     + struct acpi_processor_throttling_arg arg;
676     struct throttling_tstate t_state;
677     - cpumask_var_t online_throttling_cpus;
678    
679     if (!pr)
680     return -EINVAL;
681     @@ -1083,14 +1097,6 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
682     if ((state < 0) || (state > (pr->throttling.state_count - 1)))
683     return -EINVAL;
684    
685     - if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
686     - return -ENOMEM;
687     -
688     - if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
689     - free_cpumask_var(saved_mask);
690     - return -ENOMEM;
691     - }
692     -
693     if (cpu_is_offline(pr->id)) {
694     /*
695     * the cpu pointed by pr->id is offline. Unnecessary to change
696     @@ -1099,17 +1105,15 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
697     return -ENODEV;
698     }
699    
700     - cpumask_copy(saved_mask, &current->cpus_allowed);
701     t_state.target_state = state;
702     p_throttling = &(pr->throttling);
703     - cpumask_and(online_throttling_cpus, cpu_online_mask,
704     - p_throttling->shared_cpu_map);
705     +
706     /*
707     * The throttling notifier will be called for every
708     * affected cpu in order to get one proper T-state.
709     * The notifier event is THROTTLING_PRECHANGE.
710     */
711     - for_each_cpu(i, online_throttling_cpus) {
712     + for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
713     t_state.cpu = i;
714     acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
715     &t_state);
716     @@ -1121,21 +1125,18 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
717     * it can be called only for the cpu pointed by pr.
718     */
719     if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
720     - /* FIXME: use work_on_cpu() */
721     - if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
722     - /* Can't migrate to the pr->id CPU. Exit */
723     - ret = -ENODEV;
724     - goto exit;
725     - }
726     - ret = p_throttling->acpi_processor_set_throttling(pr,
727     - t_state.target_state, force);
728     + arg.pr = pr;
729     + arg.target_state = state;
730     + arg.force = force;
731     + ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg);
732     } else {
733     /*
734     * When the T-state coordination is SW_ALL or HW_ALL,
735     * it is necessary to set T-state for every affected
736     * cpus.
737     */
738     - for_each_cpu(i, online_throttling_cpus) {
739     + for_each_cpu_and(i, cpu_online_mask,
740     + p_throttling->shared_cpu_map) {
741     match_pr = per_cpu(processors, i);
742     /*
743     * If the pointer is invalid, we will report the
744     @@ -1156,13 +1157,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
745     "on CPU %d\n", i));
746     continue;
747     }
748     - t_state.cpu = i;
749     - /* FIXME: use work_on_cpu() */
750     - if (set_cpus_allowed_ptr(current, cpumask_of(i)))
751     - continue;
752     - ret = match_pr->throttling.
753     - acpi_processor_set_throttling(
754     - match_pr, t_state.target_state, force);
755     +
756     + arg.pr = match_pr;
757     + arg.target_state = state;
758     + arg.force = force;
759     + ret = work_on_cpu(pr->id, acpi_processor_throttling_fn,
760     + &arg);
761     }
762     }
763     /*
764     @@ -1171,17 +1171,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
765     * affected cpu to update the T-states.
766     * The notifier event is THROTTLING_POSTCHANGE
767     */
768     - for_each_cpu(i, online_throttling_cpus) {
769     + for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
770     t_state.cpu = i;
771     acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
772     &t_state);
773     }
774     - /* restore the previous state */
775     - /* FIXME: use work_on_cpu() */
776     - set_cpus_allowed_ptr(current, saved_mask);
777     -exit:
778     - free_cpumask_var(online_throttling_cpus);
779     - free_cpumask_var(saved_mask);
780     +
781     return ret;
782     }
783    
784     diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
785     index f193285968f8..5708e44376fe 100644
786     --- a/drivers/acpi/video.c
787     +++ b/drivers/acpi/video.c
788     @@ -729,6 +729,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
789     union acpi_object *o;
790     struct acpi_video_device_brightness *br = NULL;
791     int result = -EINVAL;
792     + u32 value;
793    
794     if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) {
795     ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available "
796     @@ -759,7 +760,12 @@ acpi_video_init_brightness(struct acpi_video_device *device)
797     printk(KERN_ERR PREFIX "Invalid data\n");
798     continue;
799     }
800     - br->levels[count] = (u32) o->integer.value;
801     + value = (u32) o->integer.value;
802     + /* Skip duplicate entries */
803     + if (count > 2 && br->levels[count - 1] == value)
804     + continue;
805     +
806     + br->levels[count] = value;
807    
808     if (br->levels[count] > max_level)
809     max_level = br->levels[count];
810     diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
811     index cfc6073c0487..dc11b7a64376 100644
812     --- a/drivers/ata/ahci.c
813     +++ b/drivers/ata/ahci.c
814     @@ -61,6 +61,7 @@ enum board_ids {
815     /* board IDs by feature in alphabetical order */
816     board_ahci,
817     board_ahci_ign_iferr,
818     + board_ahci_noncq,
819     board_ahci_nosntf,
820     board_ahci_yes_fbs,
821    
822     @@ -119,6 +120,13 @@ static const struct ata_port_info ahci_port_info[] = {
823     .udma_mask = ATA_UDMA6,
824     .port_ops = &ahci_ops,
825     },
826     + [board_ahci_noncq] = {
827     + AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
828     + .flags = AHCI_FLAG_COMMON,
829     + .pio_mask = ATA_PIO4,
830     + .udma_mask = ATA_UDMA6,
831     + .port_ops = &ahci_ops,
832     + },
833     [board_ahci_nosntf] = {
834     AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
835     .flags = AHCI_FLAG_COMMON,
836     @@ -450,6 +458,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
837     { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
838     { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
839    
840     + /*
841     + * Samsung SSDs found on some macbooks. NCQ times out.
842     + * https://bugzilla.kernel.org/show_bug.cgi?id=60731
843     + */
844     + { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq },
845     +
846     /* Enmotus */
847     { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
848    
849     diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
850     index 20fd337a5731..7ccc084bf1df 100644
851     --- a/drivers/ata/libata-pmp.c
852     +++ b/drivers/ata/libata-pmp.c
853     @@ -447,8 +447,11 @@ static void sata_pmp_quirks(struct ata_port *ap)
854     * otherwise. Don't try hard to recover it.
855     */
856     ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY;
857     - } else if (vendor == 0x197b && devid == 0x2352) {
858     - /* chip found in Thermaltake BlackX Duet, jmicron JMB350? */
859     + } else if (vendor == 0x197b && (devid == 0x2352 || devid == 0x0325)) {
860     + /*
861     + * 0x2352: found in Thermaltake BlackX Duet, jmicron JMB350?
862     + * 0x0325: jmicron JMB394.
863     + */
864     ata_for_each_link(link, ap, EDGE) {
865     /* SRST breaks detection and disks get misclassified
866     * LPM disabled to avoid potential problems
867     diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
868     index d67fc351343c..b7695e804635 100644
869     --- a/drivers/ata/sata_sil.c
870     +++ b/drivers/ata/sata_sil.c
871     @@ -157,6 +157,7 @@ static const struct sil_drivelist {
872     { "ST380011ASL", SIL_QUIRK_MOD15WRITE },
873     { "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
874     { "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
875     + { "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE },
876     { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
877     { }
878     };
879     diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
880     index 10a4467c63f1..701212ba38b7 100644
881     --- a/drivers/base/firmware_class.c
882     +++ b/drivers/base/firmware_class.c
883     @@ -1529,6 +1529,7 @@ static int fw_pm_notify(struct notifier_block *notify_block,
884     switch (mode) {
885     case PM_HIBERNATION_PREPARE:
886     case PM_SUSPEND_PREPARE:
887     + case PM_RESTORE_PREPARE:
888     kill_requests_without_uevent();
889     device_cache_fw_images();
890     break;
891     diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
892     index 2344a9ed17f3..47b6931fd42f 100644
893     --- a/drivers/cpufreq/powernow-k8.c
894     +++ b/drivers/cpufreq/powernow-k8.c
895     @@ -1100,7 +1100,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
896     {
897     struct powernow_k8_data *data;
898     struct init_on_cpu init_on_cpu;
899     - int rc;
900     + int rc, cpu;
901    
902     smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
903     if (rc)
904     @@ -1169,7 +1169,9 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
905     pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
906     data->currfid, data->currvid);
907    
908     - per_cpu(powernow_data, pol->cpu) = data;
909     + /* Point all the CPUs in this policy to the same data */
910     + for_each_cpu(cpu, pol->cpus)
911     + per_cpu(powernow_data, cpu) = data;
912    
913     return 0;
914    
915     @@ -1184,6 +1186,7 @@ err_out:
916     static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
917     {
918     struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
919     + int cpu;
920    
921     if (!data)
922     return -EINVAL;
923     @@ -1194,7 +1197,8 @@ static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
924    
925     kfree(data->powernow_table);
926     kfree(data);
927     - per_cpu(powernow_data, pol->cpu) = NULL;
928     + for_each_cpu(cpu, pol->cpus)
929     + per_cpu(powernow_data, cpu) = NULL;
930    
931     return 0;
932     }
933     diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
934     index 5ff6fc1819dc..a6d117728224 100644
935     --- a/drivers/dma/ioat/dma.c
936     +++ b/drivers/dma/ioat/dma.c
937     @@ -77,7 +77,8 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
938     attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
939     for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
940     chan = ioat_chan_by_index(instance, bit);
941     - tasklet_schedule(&chan->cleanup_task);
942     + if (test_bit(IOAT_RUN, &chan->state))
943     + tasklet_schedule(&chan->cleanup_task);
944     }
945    
946     writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
947     @@ -93,7 +94,8 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
948     {
949     struct ioat_chan_common *chan = data;
950    
951     - tasklet_schedule(&chan->cleanup_task);
952     + if (test_bit(IOAT_RUN, &chan->state))
953     + tasklet_schedule(&chan->cleanup_task);
954    
955     return IRQ_HANDLED;
956     }
957     @@ -116,7 +118,6 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c
958     chan->timer.function = device->timer_fn;
959     chan->timer.data = data;
960     tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
961     - tasklet_disable(&chan->cleanup_task);
962     }
963    
964     /**
965     @@ -354,13 +355,49 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
966     writel(((u64) chan->completion_dma) >> 32,
967     chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
968    
969     - tasklet_enable(&chan->cleanup_task);
970     + set_bit(IOAT_RUN, &chan->state);
971     ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
972     dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
973     __func__, ioat->desccount);
974     return ioat->desccount;
975     }
976    
977     +void ioat_stop(struct ioat_chan_common *chan)
978     +{
979     + struct ioatdma_device *device = chan->device;
980     + struct pci_dev *pdev = device->pdev;
981     + int chan_id = chan_num(chan);
982     + struct msix_entry *msix;
983     +
984     + /* 1/ stop irq from firing tasklets
985     + * 2/ stop the tasklet from re-arming irqs
986     + */
987     + clear_bit(IOAT_RUN, &chan->state);
988     +
989     + /* flush inflight interrupts */
990     + switch (device->irq_mode) {
991     + case IOAT_MSIX:
992     + msix = &device->msix_entries[chan_id];
993     + synchronize_irq(msix->vector);
994     + break;
995     + case IOAT_MSI:
996     + case IOAT_INTX:
997     + synchronize_irq(pdev->irq);
998     + break;
999     + default:
1000     + break;
1001     + }
1002     +
1003     + /* flush inflight timers */
1004     + del_timer_sync(&chan->timer);
1005     +
1006     + /* flush inflight tasklet runs */
1007     + tasklet_kill(&chan->cleanup_task);
1008     +
1009     + /* final cleanup now that everything is quiesced and can't re-arm */
1010     + device->cleanup_fn((unsigned long) &chan->common);
1011     +}
1012     +
1013     /**
1014     * ioat1_dma_free_chan_resources - release all the descriptors
1015     * @chan: the channel to be cleaned
1016     @@ -379,9 +416,7 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c)
1017     if (ioat->desccount == 0)
1018     return;
1019    
1020     - tasklet_disable(&chan->cleanup_task);
1021     - del_timer_sync(&chan->timer);
1022     - ioat1_cleanup(ioat);
1023     + ioat_stop(chan);
1024    
1025     /* Delay 100ms after reset to allow internal DMA logic to quiesce
1026     * before removing DMA descriptor resources.
1027     @@ -526,8 +561,11 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
1028     static void ioat1_cleanup_event(unsigned long data)
1029     {
1030     struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
1031     + struct ioat_chan_common *chan = &ioat->base;
1032    
1033     ioat1_cleanup(ioat);
1034     + if (!test_bit(IOAT_RUN, &chan->state))
1035     + return;
1036     writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
1037     }
1038    
1039     diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
1040     index 54fb7b9ff9aa..a1d78847e5a5 100644
1041     --- a/drivers/dma/ioat/dma.h
1042     +++ b/drivers/dma/ioat/dma.h
1043     @@ -370,6 +370,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
1044     void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
1045     void ioat_kobject_del(struct ioatdma_device *device);
1046     int ioat_dma_setup_interrupts(struct ioatdma_device *device);
1047     +void ioat_stop(struct ioat_chan_common *chan);
1048     extern const struct sysfs_ops ioat_sysfs_ops;
1049     extern struct ioat_sysfs_entry ioat_version_attr;
1050     extern struct ioat_sysfs_entry ioat_cap_attr;
1051     diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
1052     index b925e1b1d139..1cd761026d84 100644
1053     --- a/drivers/dma/ioat/dma_v2.c
1054     +++ b/drivers/dma/ioat/dma_v2.c
1055     @@ -190,8 +190,11 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
1056     void ioat2_cleanup_event(unsigned long data)
1057     {
1058     struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
1059     + struct ioat_chan_common *chan = &ioat->base;
1060    
1061     ioat2_cleanup(ioat);
1062     + if (!test_bit(IOAT_RUN, &chan->state))
1063     + return;
1064     writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
1065     }
1066    
1067     @@ -553,10 +556,10 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
1068     ioat->issued = 0;
1069     ioat->tail = 0;
1070     ioat->alloc_order = order;
1071     + set_bit(IOAT_RUN, &chan->state);
1072     spin_unlock_bh(&ioat->prep_lock);
1073     spin_unlock_bh(&chan->cleanup_lock);
1074    
1075     - tasklet_enable(&chan->cleanup_task);
1076     ioat2_start_null_desc(ioat);
1077    
1078     /* check that we got off the ground */
1079     @@ -566,7 +569,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
1080     } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
1081    
1082     if (is_ioat_active(status) || is_ioat_idle(status)) {
1083     - set_bit(IOAT_RUN, &chan->state);
1084     return 1 << ioat->alloc_order;
1085     } else {
1086     u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
1087     @@ -809,11 +811,8 @@ void ioat2_free_chan_resources(struct dma_chan *c)
1088     if (!ioat->ring)
1089     return;
1090    
1091     - tasklet_disable(&chan->cleanup_task);
1092     - del_timer_sync(&chan->timer);
1093     - device->cleanup_fn((unsigned long) c);
1094     + ioat_stop(chan);
1095     device->reset_hw(chan);
1096     - clear_bit(IOAT_RUN, &chan->state);
1097    
1098     spin_lock_bh(&chan->cleanup_lock);
1099     spin_lock_bh(&ioat->prep_lock);
1100     diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
1101     index 3efc4dcf2095..476017f7ea02 100644
1102     --- a/drivers/dma/ioat/dma_v3.c
1103     +++ b/drivers/dma/ioat/dma_v3.c
1104     @@ -627,8 +627,11 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
1105     static void ioat3_cleanup_event(unsigned long data)
1106     {
1107     struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
1108     + struct ioat_chan_common *chan = &ioat->base;
1109    
1110     ioat3_cleanup(ioat);
1111     + if (!test_bit(IOAT_RUN, &chan->state))
1112     + return;
1113     writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
1114     }
1115    
1116     diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
1117     index 82d2b97ad942..20f22ecb712e 100644
1118     --- a/drivers/dma/ste_dma40.c
1119     +++ b/drivers/dma/ste_dma40.c
1120     @@ -1640,6 +1640,7 @@ static void dma_tasklet(unsigned long data)
1121     struct d40_chan *d40c = (struct d40_chan *) data;
1122     struct d40_desc *d40d;
1123     unsigned long flags;
1124     + bool callback_active;
1125     dma_async_tx_callback callback;
1126     void *callback_param;
1127    
1128     @@ -1667,6 +1668,7 @@ static void dma_tasklet(unsigned long data)
1129     }
1130    
1131     /* Callback to client */
1132     + callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
1133     callback = d40d->txd.callback;
1134     callback_param = d40d->txd.callback_param;
1135    
1136     @@ -1689,7 +1691,7 @@ static void dma_tasklet(unsigned long data)
1137    
1138     spin_unlock_irqrestore(&d40c->lock, flags);
1139    
1140     - if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
1141     + if (callback_active && callback)
1142     callback(callback_param);
1143    
1144     return;
1145     diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
1146     index 9004c64b169e..841eee387478 100644
1147     --- a/drivers/edac/i7300_edac.c
1148     +++ b/drivers/edac/i7300_edac.c
1149     @@ -943,33 +943,35 @@ static int i7300_get_devices(struct mem_ctl_info *mci)
1150    
1151     /* Attempt to 'get' the MCH register we want */
1152     pdev = NULL;
1153     - while (!pvt->pci_dev_16_1_fsb_addr_map ||
1154     - !pvt->pci_dev_16_2_fsb_err_regs) {
1155     - pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1156     - PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, pdev);
1157     - if (!pdev) {
1158     - /* End of list, leave */
1159     - i7300_printk(KERN_ERR,
1160     - "'system address,Process Bus' "
1161     - "device not found:"
1162     - "vendor 0x%x device 0x%x ERR funcs "
1163     - "(broken BIOS?)\n",
1164     - PCI_VENDOR_ID_INTEL,
1165     - PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
1166     - goto error;
1167     - }
1168     -
1169     + while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1170     + PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
1171     + pdev))) {
1172     /* Store device 16 funcs 1 and 2 */
1173     switch (PCI_FUNC(pdev->devfn)) {
1174     case 1:
1175     - pvt->pci_dev_16_1_fsb_addr_map = pdev;
1176     + if (!pvt->pci_dev_16_1_fsb_addr_map)
1177     + pvt->pci_dev_16_1_fsb_addr_map =
1178     + pci_dev_get(pdev);
1179     break;
1180     case 2:
1181     - pvt->pci_dev_16_2_fsb_err_regs = pdev;
1182     + if (!pvt->pci_dev_16_2_fsb_err_regs)
1183     + pvt->pci_dev_16_2_fsb_err_regs =
1184     + pci_dev_get(pdev);
1185     break;
1186     }
1187     }
1188    
1189     + if (!pvt->pci_dev_16_1_fsb_addr_map ||
1190     + !pvt->pci_dev_16_2_fsb_err_regs) {
1191     + /* At least one device was not found */
1192     + i7300_printk(KERN_ERR,
1193     + "'system address,Process Bus' device not found:"
1194     + "vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n",
1195     + PCI_VENDOR_ID_INTEL,
1196     + PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
1197     + goto error;
1198     + }
1199     +
1200     edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
1201     pci_name(pvt->pci_dev_16_0_fsb_ctlr),
1202     pvt->pci_dev_16_0_fsb_ctlr->vendor,
1203     diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
1204     index 80a963d64e58..c67fb4d707d3 100644
1205     --- a/drivers/edac/i7core_edac.c
1206     +++ b/drivers/edac/i7core_edac.c
1207     @@ -1334,14 +1334,19 @@ static int i7core_get_onedevice(struct pci_dev **prev,
1208     * is at addr 8086:2c40, instead of 8086:2c41. So, we need
1209     * to probe for the alternate address in case of failure
1210     */
1211     - if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
1212     + if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) {
1213     + pci_dev_get(*prev); /* pci_get_device will put it */
1214     pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1215     PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
1216     + }
1217    
1218     - if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
1219     + if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE &&
1220     + !pdev) {
1221     + pci_dev_get(*prev); /* pci_get_device will put it */
1222     pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
1223     PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
1224     *prev);
1225     + }
1226    
1227     if (!pdev) {
1228     if (*prev) {
1229     diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1230     index dd2d542e4651..9dcf34f9a22d 100644
1231     --- a/drivers/gpu/drm/i915/intel_display.c
1232     +++ b/drivers/gpu/drm/i915/intel_display.c
1233     @@ -7946,6 +7946,20 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
1234     if (ring->id == RCS)
1235     len += 6;
1236    
1237     + /*
1238     + * BSpec MI_DISPLAY_FLIP for IVB:
1239     + * "The full packet must be contained within the same cache line."
1240     + *
1241     + * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
1242     + * cacheline, if we ever start emitting more commands before
1243     + * the MI_DISPLAY_FLIP we may need to first emit everything else,
1244     + * then do the cacheline alignment, and finally emit the
1245     + * MI_DISPLAY_FLIP.
1246     + */
1247     + ret = intel_ring_cacheline_align(ring);
1248     + if (ret)
1249     + goto err_unpin;
1250     +
1251     ret = intel_ring_begin(ring, len);
1252     if (ret)
1253     goto err_unpin;
1254     diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
1255     index 5a97f7356843..b0191f25cd55 100644
1256     --- a/drivers/gpu/drm/i915/intel_dp.c
1257     +++ b/drivers/gpu/drm/i915/intel_dp.c
1258     @@ -450,6 +450,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
1259     uint8_t msg[20];
1260     int msg_bytes;
1261     uint8_t ack;
1262     + int retry;
1263    
1264     intel_dp_check_edp(intel_dp);
1265     if (send_bytes > 16)
1266     @@ -460,18 +461,20 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
1267     msg[3] = send_bytes - 1;
1268     memcpy(&msg[4], send, send_bytes);
1269     msg_bytes = send_bytes + 4;
1270     - for (;;) {
1271     + for (retry = 0; retry < 7; retry++) {
1272     ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
1273     if (ret < 0)
1274     return ret;
1275     if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
1276     - break;
1277     + return send_bytes;
1278     else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
1279     - udelay(100);
1280     + usleep_range(400, 500);
1281     else
1282     return -EIO;
1283     }
1284     - return send_bytes;
1285     +
1286     + DRM_ERROR("too many retries, giving up\n");
1287     + return -EIO;
1288     }
1289    
1290     /* Write a single byte to the aux channel in native mode */
1291     @@ -493,6 +496,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
1292     int reply_bytes;
1293     uint8_t ack;
1294     int ret;
1295     + int retry;
1296    
1297     intel_dp_check_edp(intel_dp);
1298     msg[0] = AUX_NATIVE_READ << 4;
1299     @@ -503,7 +507,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
1300     msg_bytes = 4;
1301     reply_bytes = recv_bytes + 1;
1302    
1303     - for (;;) {
1304     + for (retry = 0; retry < 7; retry++) {
1305     ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
1306     reply, reply_bytes);
1307     if (ret == 0)
1308     @@ -516,10 +520,13 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
1309     return ret - 1;
1310     }
1311     else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
1312     - udelay(100);
1313     + usleep_range(400, 500);
1314     else
1315     return -EIO;
1316     }
1317     +
1318     + DRM_ERROR("too many retries, giving up\n");
1319     + return -EIO;
1320     }
1321    
1322     static int
1323     diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
1324     index 43719bbb2595..7507fe036b6e 100644
1325     --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
1326     +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
1327     @@ -1545,6 +1545,27 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
1328     return 0;
1329     }
1330    
1331     +/* Align the ring tail to a cacheline boundary */
1332     +int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
1333     +{
1334     + int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t);
1335     + int ret;
1336     +
1337     + if (num_dwords == 0)
1338     + return 0;
1339     +
1340     + ret = intel_ring_begin(ring, num_dwords);
1341     + if (ret)
1342     + return ret;
1343     +
1344     + while (num_dwords--)
1345     + intel_ring_emit(ring, MI_NOOP);
1346     +
1347     + intel_ring_advance(ring);
1348     +
1349     + return 0;
1350     +}
1351     +
1352     void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
1353     {
1354     struct drm_i915_private *dev_priv = ring->dev->dev_private;
1355     diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
1356     index 68b1ca974d59..8eecbd28074a 100644
1357     --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
1358     +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
1359     @@ -231,6 +231,7 @@ intel_write_status_page(struct intel_ring_buffer *ring,
1360     void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
1361    
1362     int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
1363     +int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
1364     static inline void intel_ring_emit(struct intel_ring_buffer *ring,
1365     u32 data)
1366     {
1367     diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
1368     index c168ae3eaa97..355e9fdedc5f 100644
1369     --- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
1370     +++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
1371     @@ -1112,7 +1112,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
1372     if (conf != ~0) {
1373     if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) {
1374     u32 soff = (ffs(outp.or) - 1) * 0x08;
1375     - u32 ctrl = nv_rd32(priv, 0x610798 + soff);
1376     + u32 ctrl = nv_rd32(priv, 0x610794 + soff);
1377     u32 datarate;
1378    
1379     switch ((ctrl & 0x000f0000) >> 16) {
1380     diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
1381     index 32c6b0a60fb3..95aae9110405 100644
1382     --- a/drivers/gpu/drm/nouveau/nouveau_drm.c
1383     +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
1384     @@ -382,6 +382,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
1385     if (ret)
1386     goto fail_device;
1387    
1388     + dev->irq_enabled = true;
1389     +
1390     /* workaround an odd issue on nvc1 by disabling the device's
1391     * nosnoop capability. hopefully won't cause issues until a
1392     * better fix is found - assuming there is one...
1393     @@ -481,6 +483,7 @@ nouveau_drm_remove(struct pci_dev *pdev)
1394     struct nouveau_drm *drm = nouveau_drm(dev);
1395     struct nouveau_object *device;
1396    
1397     + dev->irq_enabled = false;
1398     device = drm->client.base.device;
1399     drm_put_dev(dev);
1400    
1401     diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
1402     index 0ee2cf5cf76e..dcb652a6f924 100644
1403     --- a/drivers/gpu/drm/radeon/atombios_crtc.c
1404     +++ b/drivers/gpu/drm/radeon/atombios_crtc.c
1405     @@ -1744,6 +1744,20 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1406     return ATOM_PPLL1;
1407     DRM_ERROR("unable to allocate a PPLL\n");
1408     return ATOM_PPLL_INVALID;
1409     + } else if (ASIC_IS_DCE41(rdev)) {
1410     + /* Don't share PLLs on DCE4.1 chips */
1411     + if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
1412     + if (rdev->clock.dp_extclk)
1413     + /* skip PPLL programming if using ext clock */
1414     + return ATOM_PPLL_INVALID;
1415     + }
1416     + pll_in_use = radeon_get_pll_use_mask(crtc);
1417     + if (!(pll_in_use & (1 << ATOM_PPLL1)))
1418     + return ATOM_PPLL1;
1419     + if (!(pll_in_use & (1 << ATOM_PPLL2)))
1420     + return ATOM_PPLL2;
1421     + DRM_ERROR("unable to allocate a PPLL\n");
1422     + return ATOM_PPLL_INVALID;
1423     } else if (ASIC_IS_DCE4(rdev)) {
1424     /* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
1425     * depending on the asic:
1426     @@ -1771,7 +1785,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
1427     if (pll != ATOM_PPLL_INVALID)
1428     return pll;
1429     }
1430     - } else if (!ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */
1431     + } else {
1432     /* use the same PPLL for all monitors with the same clock */
1433     pll = radeon_get_shared_nondp_ppll(crtc);
1434     if (pll != ATOM_PPLL_INVALID)
1435     diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
1436     index 86ee09783925..2a2879e53bd5 100644
1437     --- a/drivers/gpu/drm/radeon/dce6_afmt.c
1438     +++ b/drivers/gpu/drm/radeon/dce6_afmt.c
1439     @@ -231,7 +231,7 @@ static void dce6_audio_enable(struct radeon_device *rdev,
1440     bool enable)
1441     {
1442     WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL,
1443     - AUDIO_ENABLED);
1444     + enable ? AUDIO_ENABLED : 0);
1445     DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
1446     }
1447    
1448     diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
1449     index ffb36c1ee005..5f07d1bfbd76 100644
1450     --- a/drivers/gpu/drm/radeon/evergreen.c
1451     +++ b/drivers/gpu/drm/radeon/evergreen.c
1452     @@ -5369,9 +5369,9 @@ void evergreen_fini(struct radeon_device *rdev)
1453     radeon_wb_fini(rdev);
1454     radeon_ib_pool_fini(rdev);
1455     radeon_irq_kms_fini(rdev);
1456     - evergreen_pcie_gart_fini(rdev);
1457     uvd_v1_0_fini(rdev);
1458     radeon_uvd_fini(rdev);
1459     + evergreen_pcie_gart_fini(rdev);
1460     r600_vram_scratch_fini(rdev);
1461     radeon_gem_fini(rdev);
1462     radeon_fence_driver_fini(rdev);
1463     diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
1464     index f26339028154..db0fa617e2f5 100644
1465     --- a/drivers/gpu/drm/radeon/ni_dpm.c
1466     +++ b/drivers/gpu/drm/radeon/ni_dpm.c
1467     @@ -2592,7 +2592,7 @@ static int ni_populate_sq_ramping_values(struct radeon_device *rdev,
1468     if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
1469     enable_sq_ramping = false;
1470    
1471     - if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
1472     + if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
1473     enable_sq_ramping = false;
1474    
1475     for (i = 0; i < state->performance_level_count; i++) {
1476     diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
1477     index d7e7c25feaaf..b8db0d7b5089 100644
1478     --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
1479     +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
1480     @@ -216,7 +216,8 @@ static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
1481     memcpy(&output, info->buffer.pointer, size);
1482    
1483     /* TODO: check version? */
1484     - printk("ATPX version %u\n", output.version);
1485     + printk("ATPX version %u, functions 0x%08x\n",
1486     + output.version, output.function_bits);
1487    
1488     radeon_atpx_parse_functions(&atpx->functions, output.function_bits);
1489    
1490     diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
1491     index 4a3b3c55a568..7456ce186f29 100644
1492     --- a/drivers/gpu/drm/radeon/radeon_kms.c
1493     +++ b/drivers/gpu/drm/radeon/radeon_kms.c
1494     @@ -500,6 +500,10 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
1495    
1496     radeon_vm_init(rdev, &fpriv->vm);
1497    
1498     + r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
1499     + if (r)
1500     + return r;
1501     +
1502     /* map the ib pool buffer read only into
1503     * virtual address space */
1504     bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
1505     @@ -507,6 +511,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
1506     r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
1507     RADEON_VM_PAGE_READABLE |
1508     RADEON_VM_PAGE_SNOOPED);
1509     +
1510     + radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
1511     if (r) {
1512     radeon_vm_fini(rdev, &fpriv->vm);
1513     kfree(fpriv);
1514     diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
1515     index 6d916fc93116..83936473f8e4 100644
1516     --- a/drivers/gpu/drm/radeon/radeon_uvd.c
1517     +++ b/drivers/gpu/drm/radeon/radeon_uvd.c
1518     @@ -170,6 +170,8 @@ void radeon_uvd_fini(struct radeon_device *rdev)
1519    
1520     radeon_bo_unref(&rdev->uvd.vcpu_bo);
1521    
1522     + radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]);
1523     +
1524     release_firmware(rdev->uvd_fw);
1525     }
1526    
1527     diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
1528     index 9f5846743c9e..99dd9d8fcf72 100644
1529     --- a/drivers/gpu/drm/radeon/rv770.c
1530     +++ b/drivers/gpu/drm/radeon/rv770.c
1531     @@ -1921,9 +1921,9 @@ void rv770_fini(struct radeon_device *rdev)
1532     radeon_wb_fini(rdev);
1533     radeon_ib_pool_fini(rdev);
1534     radeon_irq_kms_fini(rdev);
1535     - rv770_pcie_gart_fini(rdev);
1536     uvd_v1_0_fini(rdev);
1537     radeon_uvd_fini(rdev);
1538     + rv770_pcie_gart_fini(rdev);
1539     r600_vram_scratch_fini(rdev);
1540     radeon_gem_fini(rdev);
1541     radeon_fence_driver_fini(rdev);
1542     diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
1543     index a7626358c95d..029b65e6c589 100644
1544     --- a/drivers/hwmon/max1668.c
1545     +++ b/drivers/hwmon/max1668.c
1546     @@ -243,7 +243,7 @@ static ssize_t set_temp_min(struct device *dev,
1547     data->temp_min[index] = clamp_val(temp/1000, -128, 127);
1548     if (i2c_smbus_write_byte_data(client,
1549     MAX1668_REG_LIML_WR(index),
1550     - data->temp_max[index]))
1551     + data->temp_min[index]))
1552     count = -EIO;
1553     mutex_unlock(&data->update_lock);
1554    
1555     diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
1556     index 41c64a43bcab..ac2d69e34c8c 100644
1557     --- a/drivers/iio/gyro/Kconfig
1558     +++ b/drivers/iio/gyro/Kconfig
1559     @@ -70,7 +70,7 @@ config IIO_ST_GYRO_3AXIS
1560     select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
1561     help
1562     Say yes here to build support for STMicroelectronics gyroscopes:
1563     - L3G4200D, LSM330DL, L3GD20, L3GD20H, LSM330DLC, L3G4IS, LSM330.
1564     + L3G4200D, LSM330DL, L3GD20, LSM330DLC, L3G4IS, LSM330.
1565    
1566     This driver can also be built as a module. If so, these modules
1567     will be created:
1568     diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
1569     index f8f2bf84a5a2..c197360c450b 100644
1570     --- a/drivers/iio/gyro/st_gyro.h
1571     +++ b/drivers/iio/gyro/st_gyro.h
1572     @@ -19,7 +19,6 @@
1573     #define LSM330DL_GYRO_DEV_NAME "lsm330dl_gyro"
1574     #define LSM330DLC_GYRO_DEV_NAME "lsm330dlc_gyro"
1575     #define L3GD20_GYRO_DEV_NAME "l3gd20"
1576     -#define L3GD20H_GYRO_DEV_NAME "l3gd20h"
1577     #define L3G4IS_GYRO_DEV_NAME "l3g4is_ui"
1578     #define LSM330_GYRO_DEV_NAME "lsm330_gyro"
1579    
1580     diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
1581     index e13c2b0bf3d1..46cb5ec2b066 100644
1582     --- a/drivers/iio/gyro/st_gyro_core.c
1583     +++ b/drivers/iio/gyro/st_gyro_core.c
1584     @@ -167,11 +167,10 @@ static const struct st_sensors st_gyro_sensors[] = {
1585     .wai = ST_GYRO_2_WAI_EXP,
1586     .sensors_supported = {
1587     [0] = L3GD20_GYRO_DEV_NAME,
1588     - [1] = L3GD20H_GYRO_DEV_NAME,
1589     - [2] = LSM330D_GYRO_DEV_NAME,
1590     - [3] = LSM330DLC_GYRO_DEV_NAME,
1591     - [4] = L3G4IS_GYRO_DEV_NAME,
1592     - [5] = LSM330_GYRO_DEV_NAME,
1593     + [1] = LSM330D_GYRO_DEV_NAME,
1594     + [2] = LSM330DLC_GYRO_DEV_NAME,
1595     + [3] = L3G4IS_GYRO_DEV_NAME,
1596     + [4] = LSM330_GYRO_DEV_NAME,
1597     },
1598     .ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
1599     .odr = {
1600     diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
1601     index 16b8b8d70bf1..23c12f361b05 100644
1602     --- a/drivers/iio/gyro/st_gyro_i2c.c
1603     +++ b/drivers/iio/gyro/st_gyro_i2c.c
1604     @@ -55,7 +55,6 @@ static const struct i2c_device_id st_gyro_id_table[] = {
1605     { LSM330DL_GYRO_DEV_NAME },
1606     { LSM330DLC_GYRO_DEV_NAME },
1607     { L3GD20_GYRO_DEV_NAME },
1608     - { L3GD20H_GYRO_DEV_NAME },
1609     { L3G4IS_GYRO_DEV_NAME },
1610     { LSM330_GYRO_DEV_NAME },
1611     {},
1612     diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
1613     index 94763e25caf9..b4ad3be26687 100644
1614     --- a/drivers/iio/gyro/st_gyro_spi.c
1615     +++ b/drivers/iio/gyro/st_gyro_spi.c
1616     @@ -54,7 +54,6 @@ static const struct spi_device_id st_gyro_id_table[] = {
1617     { LSM330DL_GYRO_DEV_NAME },
1618     { LSM330DLC_GYRO_DEV_NAME },
1619     { L3GD20_GYRO_DEV_NAME },
1620     - { L3GD20H_GYRO_DEV_NAME },
1621     { L3G4IS_GYRO_DEV_NAME },
1622     { LSM330_GYRO_DEV_NAME },
1623     {},
1624     diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
1625     index 7a04f54ef961..e7e12a5f5c2d 100644
1626     --- a/drivers/input/misc/arizona-haptics.c
1627     +++ b/drivers/input/misc/arizona-haptics.c
1628     @@ -77,16 +77,14 @@ static void arizona_haptics_work(struct work_struct *work)
1629     return;
1630     }
1631    
1632     + mutex_unlock(dapm_mutex);
1633     +
1634     ret = snd_soc_dapm_sync(arizona->dapm);
1635     if (ret != 0) {
1636     dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
1637     ret);
1638     - mutex_unlock(dapm_mutex);
1639     return;
1640     }
1641     -
1642     - mutex_unlock(dapm_mutex);
1643     -
1644     } else {
1645     /* This disable sequence will be a noop if already enabled */
1646     mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
1647     @@ -99,16 +97,15 @@ static void arizona_haptics_work(struct work_struct *work)
1648     return;
1649     }
1650    
1651     + mutex_unlock(dapm_mutex);
1652     +
1653     ret = snd_soc_dapm_sync(arizona->dapm);
1654     if (ret != 0) {
1655     dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
1656     ret);
1657     - mutex_unlock(dapm_mutex);
1658     return;
1659     }
1660    
1661     - mutex_unlock(dapm_mutex);
1662     -
1663     ret = regmap_update_bits(arizona->regmap,
1664     ARIZONA_HAPTICS_CONTROL_1,
1665     ARIZONA_HAP_CTRL_MASK,
1666     diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
1667     index 0046a619527d..24a60b9979ca 100644
1668     --- a/drivers/iommu/arm-smmu.c
1669     +++ b/drivers/iommu/arm-smmu.c
1670     @@ -190,6 +190,9 @@
1671     #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
1672     #define CBAR_VMID_SHIFT 0
1673     #define CBAR_VMID_MASK 0xff
1674     +#define CBAR_S1_BPSHCFG_SHIFT 8
1675     +#define CBAR_S1_BPSHCFG_MASK 3
1676     +#define CBAR_S1_BPSHCFG_NSH 3
1677     #define CBAR_S1_MEMATTR_SHIFT 12
1678     #define CBAR_S1_MEMATTR_MASK 0xf
1679     #define CBAR_S1_MEMATTR_WB 0xf
1680     @@ -392,7 +395,7 @@ struct arm_smmu_domain {
1681     struct arm_smmu_cfg root_cfg;
1682     phys_addr_t output_mask;
1683    
1684     - struct mutex lock;
1685     + spinlock_t lock;
1686     };
1687    
1688     static DEFINE_SPINLOCK(arm_smmu_devices_lock);
1689     @@ -646,11 +649,16 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
1690     if (smmu->version == 1)
1691     reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT;
1692    
1693     - /* Use the weakest memory type, so it is overridden by the pte */
1694     - if (stage1)
1695     - reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
1696     - else
1697     + /*
1698     + * Use the weakest shareability/memory types, so they are
1699     + * overridden by the ttbcr/pte.
1700     + */
1701     + if (stage1) {
1702     + reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
1703     + (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
1704     + } else {
1705     reg |= ARM_SMMU_CB_VMID(root_cfg) << CBAR_VMID_SHIFT;
1706     + }
1707     writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx));
1708    
1709     if (smmu->version > 1) {
1710     @@ -897,7 +905,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
1711     goto out_free_domain;
1712     smmu_domain->root_cfg.pgd = pgd;
1713    
1714     - mutex_init(&smmu_domain->lock);
1715     + spin_lock_init(&smmu_domain->lock);
1716     domain->priv = smmu_domain;
1717     return 0;
1718    
1719     @@ -1134,7 +1142,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1720     * Sanity check the domain. We don't currently support domains
1721     * that cross between different SMMU chains.
1722     */
1723     - mutex_lock(&smmu_domain->lock);
1724     + spin_lock(&smmu_domain->lock);
1725     if (!smmu_domain->leaf_smmu) {
1726     /* Now that we have a master, we can finalise the domain */
1727     ret = arm_smmu_init_domain_context(domain, dev);
1728     @@ -1149,7 +1157,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1729     dev_name(device_smmu->dev));
1730     goto err_unlock;
1731     }
1732     - mutex_unlock(&smmu_domain->lock);
1733     + spin_unlock(&smmu_domain->lock);
1734    
1735     /* Looks ok, so add the device to the domain */
1736     master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
1737     @@ -1159,7 +1167,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1738     return arm_smmu_domain_add_master(smmu_domain, master);
1739    
1740     err_unlock:
1741     - mutex_unlock(&smmu_domain->lock);
1742     + spin_unlock(&smmu_domain->lock);
1743     return ret;
1744     }
1745    
1746     @@ -1206,7 +1214,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
1747    
1748     if (pmd_none(*pmd)) {
1749     /* Allocate a new set of tables */
1750     - pgtable_t table = alloc_page(PGALLOC_GFP);
1751     + pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO);
1752     if (!table)
1753     return -ENOMEM;
1754    
1755     @@ -1308,9 +1316,14 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
1756    
1757     #ifndef __PAGETABLE_PMD_FOLDED
1758     if (pud_none(*pud)) {
1759     - pmd = pmd_alloc_one(NULL, addr);
1760     + pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
1761     if (!pmd)
1762     return -ENOMEM;
1763     +
1764     + pud_populate(NULL, pud, pmd);
1765     + arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
1766     +
1767     + pmd += pmd_index(addr);
1768     } else
1769     #endif
1770     pmd = pmd_offset(pud, addr);
1771     @@ -1319,8 +1332,6 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
1772     next = pmd_addr_end(addr, end);
1773     ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn,
1774     flags, stage);
1775     - pud_populate(NULL, pud, pmd);
1776     - arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
1777     phys += next - addr;
1778     } while (pmd++, addr = next, addr < end);
1779    
1780     @@ -1337,9 +1348,14 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
1781    
1782     #ifndef __PAGETABLE_PUD_FOLDED
1783     if (pgd_none(*pgd)) {
1784     - pud = pud_alloc_one(NULL, addr);
1785     + pud = (pud_t *)get_zeroed_page(GFP_ATOMIC);
1786     if (!pud)
1787     return -ENOMEM;
1788     +
1789     + pgd_populate(NULL, pgd, pud);
1790     + arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
1791     +
1792     + pud += pud_index(addr);
1793     } else
1794     #endif
1795     pud = pud_offset(pgd, addr);
1796     @@ -1348,8 +1364,6 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
1797     next = pud_addr_end(addr, end);
1798     ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
1799     flags, stage);
1800     - pgd_populate(NULL, pud, pgd);
1801     - arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
1802     phys += next - addr;
1803     } while (pud++, addr = next, addr < end);
1804    
1805     @@ -1388,7 +1402,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1806     if (paddr & ~output_mask)
1807     return -ERANGE;
1808    
1809     - mutex_lock(&smmu_domain->lock);
1810     + spin_lock(&smmu_domain->lock);
1811     pgd += pgd_index(iova);
1812     end = iova + size;
1813     do {
1814     @@ -1404,7 +1418,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
1815     } while (pgd++, iova != end);
1816    
1817     out_unlock:
1818     - mutex_unlock(&smmu_domain->lock);
1819     + spin_unlock(&smmu_domain->lock);
1820    
1821     /* Ensure new page tables are visible to the hardware walker */
1822     if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
1823     diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c
1824     index 92c41ab4dbfd..2cb474ad8809 100644
1825     --- a/drivers/irqchip/irq-metag-ext.c
1826     +++ b/drivers/irqchip/irq-metag-ext.c
1827     @@ -515,7 +515,7 @@ static int meta_intc_set_affinity(struct irq_data *data,
1828     * one cpu (the interrupt code doesn't support it), so we just
1829     * pick the first cpu we find in 'cpumask'.
1830     */
1831     - cpu = cpumask_any(cpumask);
1832     + cpu = cpumask_any_and(cpumask, cpu_online_mask);
1833     thread = cpu_2_hwthread_id[cpu];
1834    
1835     metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
1836     diff --git a/drivers/irqchip/irq-metag.c b/drivers/irqchip/irq-metag.c
1837     index 8e94d7a3b20d..c16c186d97d3 100644
1838     --- a/drivers/irqchip/irq-metag.c
1839     +++ b/drivers/irqchip/irq-metag.c
1840     @@ -201,7 +201,7 @@ static int metag_internal_irq_set_affinity(struct irq_data *data,
1841     * one cpu (the interrupt code doesn't support it), so we just
1842     * pick the first cpu we find in 'cpumask'.
1843     */
1844     - cpu = cpumask_any(cpumask);
1845     + cpu = cpumask_any_and(cpumask, cpu_online_mask);
1846     thread = cpu_2_hwthread_id[cpu];
1847    
1848     metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)),
1849     diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
1850     index e51d40031884..8e41be62812e 100644
1851     --- a/drivers/irqchip/irq-orion.c
1852     +++ b/drivers/irqchip/irq-orion.c
1853     @@ -111,7 +111,8 @@ IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init);
1854     static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
1855     {
1856     struct irq_domain *d = irq_get_handler_data(irq);
1857     - struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, irq);
1858     +
1859     + struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
1860     u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
1861     gc->mask_cache;
1862    
1863     @@ -123,6 +124,19 @@ static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
1864     }
1865     }
1866    
1867     +/*
1868     + * Bridge IRQ_CAUSE is asserted regardless of IRQ_MASK register.
1869     + * To avoid interrupt events on stale irqs, we clear them before unmask.
1870     + */
1871     +static unsigned int orion_bridge_irq_startup(struct irq_data *d)
1872     +{
1873     + struct irq_chip_type *ct = irq_data_get_chip_type(d);
1874     +
1875     + ct->chip.irq_ack(d);
1876     + ct->chip.irq_unmask(d);
1877     + return 0;
1878     +}
1879     +
1880     static int __init orion_bridge_irq_init(struct device_node *np,
1881     struct device_node *parent)
1882     {
1883     @@ -143,7 +157,7 @@ static int __init orion_bridge_irq_init(struct device_node *np,
1884     }
1885    
1886     ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name,
1887     - handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
1888     + handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
1889     if (ret) {
1890     pr_err("%s: unable to alloc irq domain gc\n", np->name);
1891     return ret;
1892     @@ -176,12 +190,14 @@ static int __init orion_bridge_irq_init(struct device_node *np,
1893    
1894     gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE;
1895     gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK;
1896     + gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup;
1897     gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit;
1898     gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
1899     gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
1900    
1901     - /* mask all interrupts */
1902     + /* mask and clear all interrupts */
1903     writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
1904     + writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE);
1905    
1906     irq_set_handler_data(irq, domain);
1907     irq_set_chained_handler(irq, orion_bridge_irq_handler);
1908     diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
1909     index 799e479db93b..709ce1b2582e 100644
1910     --- a/drivers/md/dm-mpath.c
1911     +++ b/drivers/md/dm-mpath.c
1912     @@ -1623,8 +1623,11 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
1913     /*
1914     * Only pass ioctls through if the device sizes match exactly.
1915     */
1916     - if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
1917     - r = scsi_verify_blk_ioctl(NULL, cmd);
1918     + if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) {
1919     + int err = scsi_verify_blk_ioctl(NULL, cmd);
1920     + if (err)
1921     + r = err;
1922     + }
1923    
1924     if (r == -ENOTCONN && !fatal_signal_pending(current))
1925     queue_work(kmultipathd, &m->process_queued_ios);
1926     diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
1927     index 7da347665552..3bb4506582a9 100644
1928     --- a/drivers/md/dm-thin-metadata.c
1929     +++ b/drivers/md/dm-thin-metadata.c
1930     @@ -1489,6 +1489,23 @@ bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
1931     return r;
1932     }
1933    
1934     +bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd)
1935     +{
1936     + bool r = false;
1937     + struct dm_thin_device *td, *tmp;
1938     +
1939     + down_read(&pmd->root_lock);
1940     + list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
1941     + if (td->changed) {
1942     + r = td->changed;
1943     + break;
1944     + }
1945     + }
1946     + up_read(&pmd->root_lock);
1947     +
1948     + return r;
1949     +}
1950     +
1951     bool dm_thin_aborted_changes(struct dm_thin_device *td)
1952     {
1953     bool r;
1954     diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
1955     index 2edf5dbac76a..c6d123bb768a 100644
1956     --- a/drivers/md/dm-thin-metadata.h
1957     +++ b/drivers/md/dm-thin-metadata.h
1958     @@ -161,6 +161,8 @@ int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block);
1959     */
1960     bool dm_thin_changed_this_transaction(struct dm_thin_device *td);
1961    
1962     +bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd);
1963     +
1964     bool dm_thin_aborted_changes(struct dm_thin_device *td);
1965    
1966     int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
1967     diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1968     index bc0c97d7921e..e9587101b04f 100644
1969     --- a/drivers/md/dm-thin.c
1970     +++ b/drivers/md/dm-thin.c
1971     @@ -1354,7 +1354,8 @@ static void process_deferred_bios(struct pool *pool)
1972     bio_list_init(&pool->deferred_flush_bios);
1973     spin_unlock_irqrestore(&pool->lock, flags);
1974    
1975     - if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
1976     + if (bio_list_empty(&bios) &&
1977     + !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
1978     return;
1979    
1980     if (commit(pool)) {
1981     @@ -2847,6 +2848,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
1982    
1983     if (get_pool_mode(tc->pool) == PM_FAIL) {
1984     ti->error = "Couldn't open thin device, Pool is in fail mode";
1985     + r = -EINVAL;
1986     goto bad_thin_open;
1987     }
1988    
1989     @@ -2858,7 +2860,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
1990    
1991     r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
1992     if (r)
1993     - goto bad_thin_open;
1994     + goto bad_target_max_io_len;
1995    
1996     ti->num_flush_bios = 1;
1997     ti->flush_supported = true;
1998     @@ -2879,6 +2881,8 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
1999    
2000     return 0;
2001    
2002     +bad_target_max_io_len:
2003     + dm_pool_close_thin_device(tc->td);
2004     bad_thin_open:
2005     __pool_dec(tc->pool);
2006     bad_pool_lookup:
2007     diff --git a/drivers/mfd/da9055-i2c.c b/drivers/mfd/da9055-i2c.c
2008     index 13af7e50021e..8103e4362132 100644
2009     --- a/drivers/mfd/da9055-i2c.c
2010     +++ b/drivers/mfd/da9055-i2c.c
2011     @@ -53,17 +53,25 @@ static int da9055_i2c_remove(struct i2c_client *i2c)
2012     return 0;
2013     }
2014    
2015     +/*
2016     + * DO NOT change the device Ids. The naming is intentionally specific as both
2017     + * the PMIC and CODEC parts of this chip are instantiated separately as I2C
2018     + * devices (both have configurable I2C addresses, and are to all intents and
2019     + * purposes separate). As a result there are specific DA9055 ids for PMIC
2020     + * and CODEC, which must be different to operate together.
2021     + */
2022     static struct i2c_device_id da9055_i2c_id[] = {
2023     - {"da9055", 0},
2024     + {"da9055-pmic", 0},
2025     { }
2026     };
2027     +MODULE_DEVICE_TABLE(i2c, da9055_i2c_id);
2028    
2029     static struct i2c_driver da9055_i2c_driver = {
2030     .probe = da9055_i2c_probe,
2031     .remove = da9055_i2c_remove,
2032     .id_table = da9055_i2c_id,
2033     .driver = {
2034     - .name = "da9055",
2035     + .name = "da9055-pmic",
2036     .owner = THIS_MODULE,
2037     },
2038     };
2039     diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
2040     index 8c33f943abbf..b66cec93ebb3 100644
2041     --- a/drivers/misc/mei/client.c
2042     +++ b/drivers/misc/mei/client.c
2043     @@ -660,7 +660,6 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
2044     goto err;
2045    
2046     cb->fop_type = MEI_FOP_READ;
2047     - cl->read_cb = cb;
2048     if (dev->hbuf_is_ready) {
2049     dev->hbuf_is_ready = false;
2050     if (mei_hbm_cl_flow_control_req(dev, cl)) {
2051     @@ -671,6 +670,9 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
2052     } else {
2053     list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
2054     }
2055     +
2056     + cl->read_cb = cb;
2057     +
2058     return rets;
2059     err:
2060     mei_io_cb_free(cb);
2061     diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
2062     index 0d8f427ade93..b3c22527b938 100644
2063     --- a/drivers/net/bonding/bond_3ad.c
2064     +++ b/drivers/net/bonding/bond_3ad.c
2065     @@ -1861,8 +1861,6 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
2066     BOND_AD_INFO(bond).agg_select_timer = timeout;
2067     }
2068    
2069     -static u16 aggregator_identifier;
2070     -
2071     /**
2072     * bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
2073     * @bond: bonding struct to work on
2074     @@ -1876,7 +1874,7 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
2075     if (MAC_ADDRESS_COMPARE(&(BOND_AD_INFO(bond).system.sys_mac_addr),
2076     bond->dev->dev_addr)) {
2077    
2078     - aggregator_identifier = 0;
2079     + BOND_AD_INFO(bond).aggregator_identifier = 0;
2080    
2081     BOND_AD_INFO(bond).system.sys_priority = 0xFFFF;
2082     BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr);
2083     @@ -1947,7 +1945,7 @@ int bond_3ad_bind_slave(struct slave *slave)
2084     ad_initialize_agg(aggregator);
2085    
2086     aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr);
2087     - aggregator->aggregator_identifier = (++aggregator_identifier);
2088     + aggregator->aggregator_identifier = ++BOND_AD_INFO(bond).aggregator_identifier;
2089     aggregator->slave = slave;
2090     aggregator->is_active = 0;
2091     aggregator->num_of_ports = 0;
2092     diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
2093     index 5d91ad0cc041..1f081c89753f 100644
2094     --- a/drivers/net/bonding/bond_3ad.h
2095     +++ b/drivers/net/bonding/bond_3ad.h
2096     @@ -253,6 +253,7 @@ struct ad_system {
2097     struct ad_bond_info {
2098     struct ad_system system; /* 802.3ad system structure */
2099     u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes
2100     + u16 aggregator_identifier;
2101     };
2102    
2103     struct ad_slave_info {
2104     diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
2105     index 1870c4731a57..539239d8e9ab 100644
2106     --- a/drivers/net/can/dev.c
2107     +++ b/drivers/net/can/dev.c
2108     @@ -324,19 +324,10 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
2109     }
2110    
2111     if (!priv->echo_skb[idx]) {
2112     - struct sock *srcsk = skb->sk;
2113    
2114     - if (atomic_read(&skb->users) != 1) {
2115     - struct sk_buff *old_skb = skb;
2116     -
2117     - skb = skb_clone(old_skb, GFP_ATOMIC);
2118     - kfree_skb(old_skb);
2119     - if (!skb)
2120     - return;
2121     - } else
2122     - skb_orphan(skb);
2123     -
2124     - skb->sk = srcsk;
2125     + skb = can_create_echo_skb(skb);
2126     + if (!skb)
2127     + return;
2128    
2129     /* make settings for echo to reduce code in irq context */
2130     skb->protocol = htons(ETH_P_CAN);
2131     diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
2132     index 36bd6fa1c7f3..db2ff0340388 100644
2133     --- a/drivers/net/can/janz-ican3.c
2134     +++ b/drivers/net/can/janz-ican3.c
2135     @@ -19,6 +19,7 @@
2136     #include <linux/netdevice.h>
2137     #include <linux/can.h>
2138     #include <linux/can/dev.h>
2139     +#include <linux/can/skb.h>
2140     #include <linux/can/error.h>
2141    
2142     #include <linux/mfd/janz.h>
2143     @@ -1134,20 +1135,9 @@ static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
2144     */
2145     static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb)
2146     {
2147     - struct sock *srcsk = skb->sk;
2148     -
2149     - if (atomic_read(&skb->users) != 1) {
2150     - struct sk_buff *old_skb = skb;
2151     -
2152     - skb = skb_clone(old_skb, GFP_ATOMIC);
2153     - kfree_skb(old_skb);
2154     - if (!skb)
2155     - return;
2156     - } else {
2157     - skb_orphan(skb);
2158     - }
2159     -
2160     - skb->sk = srcsk;
2161     + skb = can_create_echo_skb(skb);
2162     + if (!skb)
2163     + return;
2164    
2165     /* save this skb for tx interrupt echo handling */
2166     skb_queue_tail(&mod->echoq, skb);
2167     diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
2168     index 4b2d5ed62b11..cc3df8aebb87 100644
2169     --- a/drivers/net/can/usb/kvaser_usb.c
2170     +++ b/drivers/net/can/usb/kvaser_usb.c
2171     @@ -474,6 +474,8 @@ static int kvaser_usb_get_card_info(struct kvaser_usb *dev)
2172     return err;
2173    
2174     dev->nchannels = msg.u.cardinfo.nchannels;
2175     + if (dev->nchannels > MAX_NET_DEVICES)
2176     + return -EINVAL;
2177    
2178     return 0;
2179     }
2180     diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
2181     index 0a2a5ee79a17..4e94057ef5cf 100644
2182     --- a/drivers/net/can/vcan.c
2183     +++ b/drivers/net/can/vcan.c
2184     @@ -46,6 +46,7 @@
2185     #include <linux/if_ether.h>
2186     #include <linux/can.h>
2187     #include <linux/can/dev.h>
2188     +#include <linux/can/skb.h>
2189     #include <linux/slab.h>
2190     #include <net/rtnetlink.h>
2191    
2192     @@ -109,25 +110,23 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
2193     stats->rx_packets++;
2194     stats->rx_bytes += cfd->len;
2195     }
2196     - kfree_skb(skb);
2197     + consume_skb(skb);
2198     return NETDEV_TX_OK;
2199     }
2200    
2201     /* perform standard echo handling for CAN network interfaces */
2202    
2203     if (loop) {
2204     - struct sock *srcsk = skb->sk;
2205    
2206     - skb = skb_share_check(skb, GFP_ATOMIC);
2207     + skb = can_create_echo_skb(skb);
2208     if (!skb)
2209     return NETDEV_TX_OK;
2210    
2211     /* receive with packet counting */
2212     - skb->sk = srcsk;
2213     vcan_rx(skb, dev);
2214     } else {
2215     /* no looped packets => no counting */
2216     - kfree_skb(skb);
2217     + consume_skb(skb);
2218     }
2219     return NETDEV_TX_OK;
2220     }
2221     diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
2222     index 3ff1f272c6c8..c0acf98d1ea5 100644
2223     --- a/drivers/net/ethernet/broadcom/tg3.c
2224     +++ b/drivers/net/ethernet/broadcom/tg3.c
2225     @@ -13956,12 +13956,12 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
2226    
2227     tg3_netif_stop(tp);
2228    
2229     + tg3_set_mtu(dev, tp, new_mtu);
2230     +
2231     tg3_full_lock(tp, 1);
2232    
2233     tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
2234    
2235     - tg3_set_mtu(dev, tp, new_mtu);
2236     -
2237     /* Reset PHY, otherwise the read DMA engine will be in a mode that
2238     * breaks all requests to 256 bytes.
2239     */
2240     diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
2241     index 386a3df53678..20643833f0e6 100644
2242     --- a/drivers/net/usb/asix_devices.c
2243     +++ b/drivers/net/usb/asix_devices.c
2244     @@ -918,7 +918,8 @@ static const struct driver_info ax88178_info = {
2245     .status = asix_status,
2246     .link_reset = ax88178_link_reset,
2247     .reset = ax88178_reset,
2248     - .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
2249     + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
2250     + FLAG_MULTI_PACKET,
2251     .rx_fixup = asix_rx_fixup_common,
2252     .tx_fixup = asix_tx_fixup,
2253     };
2254     diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
2255     index 846cc19c04f2..5e2bac650bd8 100644
2256     --- a/drivers/net/usb/ax88179_178a.c
2257     +++ b/drivers/net/usb/ax88179_178a.c
2258     @@ -1120,6 +1120,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
2259     u16 hdr_off;
2260     u32 *pkt_hdr;
2261    
2262     + /* This check is no longer done by usbnet */
2263     + if (skb->len < dev->net->hard_header_len)
2264     + return 0;
2265     +
2266     skb_trim(skb, skb->len - 4);
2267     memcpy(&rx_hdr, skb_tail_pointer(skb), 4);
2268     le32_to_cpus(&rx_hdr);
2269     diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
2270     index a7e3f4e55bf3..82ab61d62804 100644
2271     --- a/drivers/net/usb/gl620a.c
2272     +++ b/drivers/net/usb/gl620a.c
2273     @@ -86,6 +86,10 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
2274     u32 size;
2275     u32 count;
2276    
2277     + /* This check is no longer done by usbnet */
2278     + if (skb->len < dev->net->hard_header_len)
2279     + return 0;
2280     +
2281     header = (struct gl_header *) skb->data;
2282    
2283     // get the packet count of the received skb
2284     diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
2285     index 03832d3780aa..9237c45883cd 100644
2286     --- a/drivers/net/usb/mcs7830.c
2287     +++ b/drivers/net/usb/mcs7830.c
2288     @@ -529,8 +529,9 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
2289     {
2290     u8 status;
2291    
2292     - if (skb->len == 0) {
2293     - dev_err(&dev->udev->dev, "unexpected empty rx frame\n");
2294     + /* This check is no longer done by usbnet */
2295     + if (skb->len < dev->net->hard_header_len) {
2296     + dev_err(&dev->udev->dev, "unexpected tiny rx frame\n");
2297     return 0;
2298     }
2299    
2300     diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
2301     index 93e0716a118c..7f4a3a41c4f8 100644
2302     --- a/drivers/net/usb/net1080.c
2303     +++ b/drivers/net/usb/net1080.c
2304     @@ -366,6 +366,10 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
2305     struct nc_trailer *trailer;
2306     u16 hdr_len, packet_len;
2307    
2308     + /* This check is no longer done by usbnet */
2309     + if (skb->len < dev->net->hard_header_len)
2310     + return 0;
2311     +
2312     if (!(skb->len & 0x01)) {
2313     netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n",
2314     skb->len, dev->net->hard_header_len, dev->hard_mtu,
2315     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2316     index 818ce90185b5..558469fda3b7 100644
2317     --- a/drivers/net/usb/qmi_wwan.c
2318     +++ b/drivers/net/usb/qmi_wwan.c
2319     @@ -80,10 +80,10 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
2320     {
2321     __be16 proto;
2322    
2323     - /* usbnet rx_complete guarantees that skb->len is at least
2324     - * hard_header_len, so we can inspect the dest address without
2325     - * checking skb->len
2326     - */
2327     + /* This check is no longer done by usbnet */
2328     + if (skb->len < dev->net->hard_header_len)
2329     + return 0;
2330     +
2331     switch (skb->data[0] & 0xf0) {
2332     case 0x40:
2333     proto = htons(ETH_P_IP);
2334     @@ -710,6 +710,7 @@ static const struct usb_device_id products[] = {
2335     {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
2336     {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
2337     {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
2338     + {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
2339     {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
2340     {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
2341     {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
2342     diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
2343     index cc49aac70224..691fca4e4c2d 100644
2344     --- a/drivers/net/usb/rndis_host.c
2345     +++ b/drivers/net/usb/rndis_host.c
2346     @@ -494,6 +494,10 @@ EXPORT_SYMBOL_GPL(rndis_unbind);
2347     */
2348     int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
2349     {
2350     + /* This check is no longer done by usbnet */
2351     + if (skb->len < dev->net->hard_header_len)
2352     + return 0;
2353     +
2354     /* peripheral may have batched packets to us... */
2355     while (likely(skb->len)) {
2356     struct rndis_data_hdr *hdr = (void *)skb->data;
2357     diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
2358     index 66ebbacf066f..12afae0451e6 100644
2359     --- a/drivers/net/usb/smsc75xx.c
2360     +++ b/drivers/net/usb/smsc75xx.c
2361     @@ -2108,6 +2108,10 @@ static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb,
2362    
2363     static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
2364     {
2365     + /* This check is no longer done by usbnet */
2366     + if (skb->len < dev->net->hard_header_len)
2367     + return 0;
2368     +
2369     while (skb->len > 0) {
2370     u32 rx_cmd_a, rx_cmd_b, align_count, size;
2371     struct sk_buff *ax_skb;
2372     diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
2373     index 3f38ba868f61..9375b8c6410b 100644
2374     --- a/drivers/net/usb/smsc95xx.c
2375     +++ b/drivers/net/usb/smsc95xx.c
2376     @@ -1725,6 +1725,10 @@ static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
2377    
2378     static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
2379     {
2380     + /* This check is no longer done by usbnet */
2381     + if (skb->len < dev->net->hard_header_len)
2382     + return 0;
2383     +
2384     while (skb->len > 0) {
2385     u32 header, align_count;
2386     struct sk_buff *ax_skb;
2387     diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
2388     index aba04f561760..a91fa49b81c3 100644
2389     --- a/drivers/net/usb/usbnet.c
2390     +++ b/drivers/net/usb/usbnet.c
2391     @@ -543,17 +543,19 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
2392     }
2393     // else network stack removes extra byte if we forced a short packet
2394    
2395     - if (skb->len) {
2396     - /* all data was already cloned from skb inside the driver */
2397     - if (dev->driver_info->flags & FLAG_MULTI_PACKET)
2398     - dev_kfree_skb_any(skb);
2399     - else
2400     - usbnet_skb_return(dev, skb);
2401     + /* all data was already cloned from skb inside the driver */
2402     + if (dev->driver_info->flags & FLAG_MULTI_PACKET)
2403     + goto done;
2404     +
2405     + if (skb->len < ETH_HLEN) {
2406     + dev->net->stats.rx_errors++;
2407     + dev->net->stats.rx_length_errors++;
2408     + netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len);
2409     + } else {
2410     + usbnet_skb_return(dev, skb);
2411     return;
2412     }
2413    
2414     - netif_dbg(dev, rx_err, dev->net, "drop\n");
2415     - dev->net->stats.rx_errors++;
2416     done:
2417     skb_queue_tail(&dev->done, skb);
2418     }
2419     @@ -575,13 +577,6 @@ static void rx_complete (struct urb *urb)
2420     switch (urb_status) {
2421     /* success */
2422     case 0:
2423     - if (skb->len < dev->net->hard_header_len) {
2424     - state = rx_cleanup;
2425     - dev->net->stats.rx_errors++;
2426     - dev->net->stats.rx_length_errors++;
2427     - netif_dbg(dev, rx_err, dev->net,
2428     - "rx length %d\n", skb->len);
2429     - }
2430     break;
2431    
2432     /* stalls need manual reset. this is rare ... except that
2433     diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
2434     index 56aee067f324..a6ad79f61bf9 100644
2435     --- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
2436     +++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
2437     @@ -15,6 +15,8 @@
2438     #ifndef RTL8187_H
2439     #define RTL8187_H
2440    
2441     +#include <linux/cache.h>
2442     +
2443     #include "rtl818x.h"
2444     #include "leds.h"
2445    
2446     @@ -139,7 +141,10 @@ struct rtl8187_priv {
2447     u8 aifsn[4];
2448     u8 rfkill_mask;
2449     struct {
2450     - __le64 buf;
2451     + union {
2452     + __le64 buf;
2453     + u8 dummy1[L1_CACHE_BYTES];
2454     + } ____cacheline_aligned;
2455     struct sk_buff_head queue;
2456     } b_tx_status; /* This queue is used by both -b and non-b devices */
2457     struct mutex io_mutex;
2458     @@ -147,7 +152,8 @@ struct rtl8187_priv {
2459     u8 bits8;
2460     __le16 bits16;
2461     __le32 bits32;
2462     - } *io_dmabuf;
2463     + u8 dummy2[L1_CACHE_BYTES];
2464     + } *io_dmabuf ____cacheline_aligned;
2465     bool rfkill_off;
2466     u16 seqno;
2467     };
2468     diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
2469     index 0d81f766fd0f..a56e9b3c96c2 100644
2470     --- a/drivers/net/wireless/rtlwifi/ps.c
2471     +++ b/drivers/net/wireless/rtlwifi/ps.c
2472     @@ -48,7 +48,7 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
2473    
2474     /*<2> Enable Adapter */
2475     if (rtlpriv->cfg->ops->hw_init(hw))
2476     - return 1;
2477     + return false;
2478     RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
2479    
2480     /*<3> Enable Interrupt */
2481     diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
2482     index a82b30a1996c..2eb0b38384dd 100644
2483     --- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
2484     +++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
2485     @@ -937,14 +937,26 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
2486     bool is92c;
2487     int err;
2488     u8 tmp_u1b;
2489     + unsigned long flags;
2490    
2491     rtlpci->being_init_adapter = true;
2492     +
2493     + /* Since this function can take a very long time (up to 350 ms)
2494     + * and can be called with irqs disabled, reenable the irqs
2495     + * to let the other devices continue being serviced.
2496     + *
2497     + * It is safe doing so since our own interrupts will only be enabled
2498     + * in a subsequent step.
2499     + */
2500     + local_save_flags(flags);
2501     + local_irq_enable();
2502     +
2503     rtlpriv->intf_ops->disable_aspm(hw);
2504     rtstatus = _rtl92ce_init_mac(hw);
2505     if (!rtstatus) {
2506     RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
2507     err = 1;
2508     - return err;
2509     + goto exit;
2510     }
2511    
2512     err = rtl92c_download_fw(hw);
2513     @@ -952,7 +964,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
2514     RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
2515     "Failed to download FW. Init HW without FW now..\n");
2516     err = 1;
2517     - return err;
2518     + goto exit;
2519     }
2520    
2521     rtlhal->last_hmeboxnum = 0;
2522     @@ -1032,6 +1044,8 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
2523     RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
2524     }
2525     rtl92c_dm_init(hw);
2526     +exit:
2527     + local_irq_restore(flags);
2528     rtlpci->being_init_adapter = false;
2529     return err;
2530     }
2531     diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
2532     index 729d5a101d62..1953c1680986 100644
2533     --- a/drivers/pci/host/pci-mvebu.c
2534     +++ b/drivers/pci/host/pci-mvebu.c
2535     @@ -56,14 +56,6 @@
2536     #define PCIE_DEBUG_CTRL 0x1a60
2537     #define PCIE_DEBUG_SOFT_RESET BIT(20)
2538    
2539     -/*
2540     - * This product ID is registered by Marvell, and used when the Marvell
2541     - * SoC is not the root complex, but an endpoint on the PCIe bus. It is
2542     - * therefore safe to re-use this PCI ID for our emulated PCI-to-PCI
2543     - * bridge.
2544     - */
2545     -#define MARVELL_EMULATED_PCI_PCI_BRIDGE_ID 0x7846
2546     -
2547     /* PCI configuration space of a PCI-to-PCI bridge */
2548     struct mvebu_sw_pci_bridge {
2549     u16 vendor;
2550     @@ -357,7 +349,8 @@ static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port)
2551    
2552     bridge->class = PCI_CLASS_BRIDGE_PCI;
2553     bridge->vendor = PCI_VENDOR_ID_MARVELL;
2554     - bridge->device = MARVELL_EMULATED_PCI_PCI_BRIDGE_ID;
2555     + bridge->device = readl(port->base + PCIE_DEV_ID_OFF) >> 16;
2556     + bridge->revision = readl(port->base + PCIE_DEV_REV_OFF) & 0xff;
2557     bridge->header_type = PCI_HEADER_TYPE_BRIDGE;
2558     bridge->cache_line_size = 0x10;
2559    
2560     diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
2561     index bdd64b1b4817..863bc4bb4806 100644
2562     --- a/drivers/pci/pci.c
2563     +++ b/drivers/pci/pci.c
2564     @@ -1120,6 +1120,8 @@ EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
2565     static int do_pci_enable_device(struct pci_dev *dev, int bars)
2566     {
2567     int err;
2568     + u16 cmd;
2569     + u8 pin;
2570    
2571     err = pci_set_power_state(dev, PCI_D0);
2572     if (err < 0 && err != -EIO)
2573     @@ -1129,6 +1131,14 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
2574     return err;
2575     pci_fixup_device(pci_fixup_enable, dev);
2576    
2577     + pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
2578     + if (pin) {
2579     + pci_read_config_word(dev, PCI_COMMAND, &cmd);
2580     + if (cmd & PCI_COMMAND_INTX_DISABLE)
2581     + pci_write_config_word(dev, PCI_COMMAND,
2582     + cmd & ~PCI_COMMAND_INTX_DISABLE);
2583     + }
2584     +
2585     return 0;
2586     }
2587    
2588     diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
2589     index b9f2653e4ef9..b5b11a545e14 100644
2590     --- a/drivers/regulator/da9063-regulator.c
2591     +++ b/drivers/regulator/da9063-regulator.c
2592     @@ -1,3 +1,4 @@
2593     +
2594     /*
2595     * Regulator driver for DA9063 PMIC series
2596     *
2597     @@ -60,7 +61,8 @@ struct da9063_regulator_info {
2598     .desc.ops = &da9063_ldo_ops, \
2599     .desc.min_uV = (min_mV) * 1000, \
2600     .desc.uV_step = (step_mV) * 1000, \
2601     - .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1), \
2602     + .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1 \
2603     + + (DA9063_V##regl_name##_BIAS)), \
2604     .desc.enable_reg = DA9063_REG_##regl_name##_CONT, \
2605     .desc.enable_mask = DA9063_LDO_EN, \
2606     .desc.vsel_reg = DA9063_REG_V##regl_name##_A, \
2607     diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
2608     index 3bb0a1d1622a..e4fa6fb7e72a 100644
2609     --- a/drivers/scsi/qla2xxx/qla_target.c
2610     +++ b/drivers/scsi/qla2xxx/qla_target.c
2611     @@ -3186,7 +3186,8 @@ restart:
2612     ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
2613     "SRR cmd %p (se_cmd %p, tag %d, op %x), "
2614     "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
2615     - se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset);
2616     + se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
2617     + cmd->sg_cnt, cmd->offset);
2618    
2619     qlt_handle_srr(vha, sctio, imm);
2620    
2621     diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
2622     index 98ac020bf912..69fd236345cb 100644
2623     --- a/drivers/staging/android/binder.c
2624     +++ b/drivers/staging/android/binder.c
2625     @@ -2903,7 +2903,7 @@ static int binder_node_release(struct binder_node *node, int refs)
2626     refs++;
2627    
2628     if (!ref->death)
2629     - goto out;
2630     + continue;
2631    
2632     death++;
2633    
2634     @@ -2916,7 +2916,6 @@ static int binder_node_release(struct binder_node *node, int refs)
2635     BUG();
2636     }
2637    
2638     -out:
2639     binder_debug(BINDER_DEBUG_DEAD_BINDER,
2640     "node %d now dead, refs %d, death %d\n",
2641     node->debug_id, refs, death);
2642     diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
2643     index 606d6f059972..85f692ddd992 100644
2644     --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
2645     +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
2646     @@ -55,6 +55,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
2647     /****** 8188EUS ********/
2648     {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
2649     {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
2650     + {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
2651     {} /* Terminating entry */
2652     };
2653    
2654     diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
2655     index 94c26acfd5a4..938426ae30de 100644
2656     --- a/drivers/usb/chipidea/udc.c
2657     +++ b/drivers/usb/chipidea/udc.c
2658     @@ -106,7 +106,7 @@ static int hw_ep_flush(struct ci_hdrc *ci, int num, int dir)
2659    
2660     do {
2661     /* flush any pending transfer */
2662     - hw_write(ci, OP_ENDPTFLUSH, BIT(n), BIT(n));
2663     + hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n));
2664     while (hw_read(ci, OP_ENDPTFLUSH, BIT(n)))
2665     cpu_relax();
2666     } while (hw_read(ci, OP_ENDPTSTAT, BIT(n)));
2667     @@ -206,7 +206,7 @@ static int hw_ep_prime(struct ci_hdrc *ci, int num, int dir, int is_ctrl)
2668     if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
2669     return -EAGAIN;
2670    
2671     - hw_write(ci, OP_ENDPTPRIME, BIT(n), BIT(n));
2672     + hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n));
2673    
2674     while (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
2675     cpu_relax();
2676     diff --git a/drivers/usb/gadget/bcm63xx_udc.c b/drivers/usb/gadget/bcm63xx_udc.c
2677     index c58fcf1ebe41..95d163cfb626 100644
2678     --- a/drivers/usb/gadget/bcm63xx_udc.c
2679     +++ b/drivers/usb/gadget/bcm63xx_udc.c
2680     @@ -361,24 +361,30 @@ static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
2681     bcm_writel(val, udc->iudma_regs + off);
2682     }
2683    
2684     -static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off)
2685     +static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
2686     {
2687     - return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
2688     + return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
2689     + (ENETDMA_CHAN_WIDTH * chan));
2690     }
2691    
2692     -static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
2693     +static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
2694     + int chan)
2695     {
2696     - bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
2697     + bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
2698     + (ENETDMA_CHAN_WIDTH * chan));
2699     }
2700    
2701     -static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off)
2702     +static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
2703     {
2704     - return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
2705     + return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
2706     + (ENETDMA_CHAN_WIDTH * chan));
2707     }
2708    
2709     -static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
2710     +static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
2711     + int chan)
2712     {
2713     - bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
2714     + bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
2715     + (ENETDMA_CHAN_WIDTH * chan));
2716     }
2717    
2718     static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
2719     @@ -639,7 +645,7 @@ static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
2720     } while (!last_bd);
2721    
2722     usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
2723     - ENETDMAC_CHANCFG_REG(iudma->ch_idx));
2724     + ENETDMAC_CHANCFG_REG, iudma->ch_idx);
2725     }
2726    
2727     /**
2728     @@ -695,9 +701,9 @@ static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
2729     bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
2730    
2731     /* stop DMA, then wait for the hardware to wrap up */
2732     - usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG(ch_idx));
2733     + usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
2734    
2735     - while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)) &
2736     + while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
2737     ENETDMAC_CHANCFG_EN_MASK) {
2738     udelay(1);
2739    
2740     @@ -714,10 +720,10 @@ static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
2741     dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
2742     ch_idx);
2743     usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
2744     - ENETDMAC_CHANCFG_REG(ch_idx));
2745     + ENETDMAC_CHANCFG_REG, ch_idx);
2746     }
2747     }
2748     - usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG(ch_idx));
2749     + usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
2750    
2751     /* don't leave "live" HW-owned entries for the next guy to step on */
2752     for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
2753     @@ -729,11 +735,11 @@ static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
2754    
2755     /* set up IRQs, UBUS burst size, and BD base for this channel */
2756     usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2757     - ENETDMAC_IRMASK_REG(ch_idx));
2758     - usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG(ch_idx));
2759     + ENETDMAC_IRMASK_REG, ch_idx);
2760     + usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
2761    
2762     - usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG(ch_idx));
2763     - usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG(ch_idx));
2764     + usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
2765     + usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
2766     }
2767    
2768     /**
2769     @@ -2036,7 +2042,7 @@ static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
2770     spin_lock(&udc->lock);
2771    
2772     usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
2773     - ENETDMAC_IR_REG(iudma->ch_idx));
2774     + ENETDMAC_IR_REG, iudma->ch_idx);
2775     bep = iudma->bep;
2776     rc = iudma_read(udc, iudma);
2777    
2778     @@ -2176,18 +2182,18 @@ static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
2779     seq_printf(s, " [ep%d]:\n",
2780     max_t(int, iudma_defaults[ch_idx].ep_num, 0));
2781     seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
2782     - usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)),
2783     - usb_dmac_readl(udc, ENETDMAC_IR_REG(ch_idx)),
2784     - usb_dmac_readl(udc, ENETDMAC_IRMASK_REG(ch_idx)),
2785     - usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG(ch_idx)));
2786     + usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
2787     + usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
2788     + usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
2789     + usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
2790    
2791     - sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG(ch_idx));
2792     - sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG(ch_idx));
2793     + sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
2794     + sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
2795     seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
2796     - usb_dmas_readl(udc, ENETDMAS_RSTART_REG(ch_idx)),
2797     + usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
2798     sram2 >> 16, sram2 & 0xffff,
2799     sram3 >> 16, sram3 & 0xffff,
2800     - usb_dmas_readl(udc, ENETDMAS_SRAM4_REG(ch_idx)));
2801     + usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
2802     seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
2803     iudma->n_bds);
2804    
2805     diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
2806     index 86ab9fd9fe9e..784f6242b70e 100644
2807     --- a/drivers/usb/host/ehci-hcd.c
2808     +++ b/drivers/usb/host/ehci-hcd.c
2809     @@ -682,8 +682,15 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
2810     struct ehci_hcd *ehci = hcd_to_ehci (hcd);
2811     u32 status, masked_status, pcd_status = 0, cmd;
2812     int bh;
2813     + unsigned long flags;
2814    
2815     - spin_lock (&ehci->lock);
2816     + /*
2817     + * For threadirqs option we use spin_lock_irqsave() variant to prevent
2818     + * deadlock with ehci hrtimer callback, because hrtimer callbacks run
2819     + * in interrupt context even when threadirqs is specified. We can go
2820     + * back to spin_lock() variant when hrtimer callbacks become threaded.
2821     + */
2822     + spin_lock_irqsave(&ehci->lock, flags);
2823    
2824     status = ehci_readl(ehci, &ehci->regs->status);
2825    
2826     @@ -701,7 +708,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
2827    
2828     /* Shared IRQ? */
2829     if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
2830     - spin_unlock(&ehci->lock);
2831     + spin_unlock_irqrestore(&ehci->lock, flags);
2832     return IRQ_NONE;
2833     }
2834    
2835     @@ -819,7 +826,7 @@ dead:
2836    
2837     if (bh)
2838     ehci_work (ehci);
2839     - spin_unlock (&ehci->lock);
2840     + spin_unlock_irqrestore(&ehci->lock, flags);
2841     if (pcd_status)
2842     usb_hcd_poll_rh_status(hcd);
2843     return IRQ_HANDLED;
2844     diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
2845     index 835fc0844a66..1bb85bee2625 100644
2846     --- a/drivers/usb/host/ehci-hub.c
2847     +++ b/drivers/usb/host/ehci-hub.c
2848     @@ -238,6 +238,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
2849     int port;
2850     int mask;
2851     int changed;
2852     + bool fs_idle_delay;
2853    
2854     ehci_dbg(ehci, "suspend root hub\n");
2855    
2856     @@ -272,6 +273,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
2857     ehci->bus_suspended = 0;
2858     ehci->owned_ports = 0;
2859     changed = 0;
2860     + fs_idle_delay = false;
2861     port = HCS_N_PORTS(ehci->hcs_params);
2862     while (port--) {
2863     u32 __iomem *reg = &ehci->regs->port_status [port];
2864     @@ -300,16 +302,32 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
2865     }
2866    
2867     if (t1 != t2) {
2868     + /*
2869     + * On some controllers, Wake-On-Disconnect will
2870     + * generate false wakeup signals until the bus
2871     + * switches over to full-speed idle. For their
2872     + * sake, add a delay if we need one.
2873     + */
2874     + if ((t2 & PORT_WKDISC_E) &&
2875     + ehci_port_speed(ehci, t2) ==
2876     + USB_PORT_STAT_HIGH_SPEED)
2877     + fs_idle_delay = true;
2878     ehci_writel(ehci, t2, reg);
2879     changed = 1;
2880     }
2881     }
2882     + spin_unlock_irq(&ehci->lock);
2883     +
2884     + if ((changed && ehci->has_tdi_phy_lpm) || fs_idle_delay) {
2885     + /*
2886     + * Wait for HCD to enter low-power mode or for the bus
2887     + * to switch to full-speed idle.
2888     + */
2889     + usleep_range(5000, 5500);
2890     + }
2891    
2892     if (changed && ehci->has_tdi_phy_lpm) {
2893     - spin_unlock_irq(&ehci->lock);
2894     - msleep(5); /* 5 ms for HCD to enter low-power mode */
2895     spin_lock_irq(&ehci->lock);
2896     -
2897     port = HCS_N_PORTS(ehci->hcs_params);
2898     while (port--) {
2899     u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port];
2900     @@ -322,8 +340,8 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
2901     port, (t3 & HOSTPC_PHCD) ?
2902     "succeeded" : "failed");
2903     }
2904     + spin_unlock_irq(&ehci->lock);
2905     }
2906     - spin_unlock_irq(&ehci->lock);
2907    
2908     /* Apparently some devices need a >= 1-uframe delay here */
2909     if (ehci->bus_suspended)
2910     diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2911     index eea9e7b6af4c..b7f715fead15 100644
2912     --- a/drivers/usb/serial/ftdi_sio.c
2913     +++ b/drivers/usb/serial/ftdi_sio.c
2914     @@ -908,6 +908,8 @@ static struct usb_device_id id_table_combined [] = {
2915     /* Crucible Devices */
2916     { USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
2917     { USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
2918     + /* Cressi Devices */
2919     + { USB_DEVICE(FTDI_VID, FTDI_CRESSI_PID) },
2920     { } /* Terminating entry */
2921     };
2922    
2923     diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
2924     index 1e2d369df86e..e599fbfcde5f 100644
2925     --- a/drivers/usb/serial/ftdi_sio_ids.h
2926     +++ b/drivers/usb/serial/ftdi_sio_ids.h
2927     @@ -1320,3 +1320,9 @@
2928     * Manufacturer: Smart GSM Team
2929     */
2930     #define FTDI_Z3X_PID 0x0011
2931     +
2932     +/*
2933     + * Product: Cressi PC Interface
2934     + * Manufacturer: Cressi
2935     + */
2936     +#define FTDI_CRESSI_PID 0x87d0
2937     diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2938     index 216d20affba8..68fc9fe65936 100644
2939     --- a/drivers/usb/serial/option.c
2940     +++ b/drivers/usb/serial/option.c
2941     @@ -1526,7 +1526,8 @@ static const struct usb_device_id option_ids[] = {
2942     /* Cinterion */
2943     { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
2944     { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
2945     - { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
2946     + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
2947     + .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2948     { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
2949     { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
2950     .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
2951     diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
2952     index 831eb4fd197d..b12176f2013c 100644
2953     --- a/drivers/vhost/net.c
2954     +++ b/drivers/vhost/net.c
2955     @@ -70,7 +70,12 @@ enum {
2956     };
2957    
2958     struct vhost_net_ubuf_ref {
2959     - struct kref kref;
2960     + /* refcount follows semantics similar to kref:
2961     + * 0: object is released
2962     + * 1: no outstanding ubufs
2963     + * >1: outstanding ubufs
2964     + */
2965     + atomic_t refcount;
2966     wait_queue_head_t wait;
2967     struct vhost_virtqueue *vq;
2968     };
2969     @@ -116,14 +121,6 @@ static void vhost_net_enable_zcopy(int vq)
2970     vhost_net_zcopy_mask |= 0x1 << vq;
2971     }
2972    
2973     -static void vhost_net_zerocopy_done_signal(struct kref *kref)
2974     -{
2975     - struct vhost_net_ubuf_ref *ubufs;
2976     -
2977     - ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref);
2978     - wake_up(&ubufs->wait);
2979     -}
2980     -
2981     static struct vhost_net_ubuf_ref *
2982     vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
2983     {
2984     @@ -134,21 +131,24 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
2985     ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
2986     if (!ubufs)
2987     return ERR_PTR(-ENOMEM);
2988     - kref_init(&ubufs->kref);
2989     + atomic_set(&ubufs->refcount, 1);
2990     init_waitqueue_head(&ubufs->wait);
2991     ubufs->vq = vq;
2992     return ubufs;
2993     }
2994    
2995     -static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
2996     +static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
2997     {
2998     - kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
2999     + int r = atomic_sub_return(1, &ubufs->refcount);
3000     + if (unlikely(!r))
3001     + wake_up(&ubufs->wait);
3002     + return r;
3003     }
3004    
3005     static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
3006     {
3007     - kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
3008     - wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
3009     + vhost_net_ubuf_put(ubufs);
3010     + wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
3011     }
3012    
3013     static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
3014     @@ -306,22 +306,21 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
3015     {
3016     struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
3017     struct vhost_virtqueue *vq = ubufs->vq;
3018     - int cnt = atomic_read(&ubufs->kref.refcount);
3019     + int cnt;
3020    
3021     /* set len to mark this desc buffers done DMA */
3022     vq->heads[ubuf->desc].len = success ?
3023     VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
3024     - vhost_net_ubuf_put(ubufs);
3025     + cnt = vhost_net_ubuf_put(ubufs);
3026    
3027     /*
3028     * Trigger polling thread if guest stopped submitting new buffers:
3029     - * in this case, the refcount after decrement will eventually reach 1
3030     - * so here it is 2.
3031     + * in this case, the refcount after decrement will eventually reach 1.
3032     * We also trigger polling periodically after each 16 packets
3033     * (the value 16 here is more or less arbitrary, it's tuned to trigger
3034     * less than 10% of times).
3035     */
3036     - if (cnt <= 2 || !(cnt % 16))
3037     + if (cnt <= 1 || !(cnt % 16))
3038     vhost_poll_queue(&vq->poll);
3039     }
3040    
3041     @@ -420,7 +419,7 @@ static void handle_tx(struct vhost_net *net)
3042     msg.msg_control = ubuf;
3043     msg.msg_controllen = sizeof(ubuf);
3044     ubufs = nvq->ubufs;
3045     - kref_get(&ubufs->kref);
3046     + atomic_inc(&ubufs->refcount);
3047     nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
3048     } else {
3049     msg.msg_control = NULL;
3050     @@ -785,7 +784,7 @@ static void vhost_net_flush(struct vhost_net *n)
3051     vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
3052     mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
3053     n->tx_flush = false;
3054     - kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref);
3055     + atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
3056     mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
3057     }
3058     }
3059     diff --git a/fs/attr.c b/fs/attr.c
3060     index 1449adb14ef6..8dd5825ec708 100644
3061     --- a/fs/attr.c
3062     +++ b/fs/attr.c
3063     @@ -182,11 +182,6 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
3064     return -EPERM;
3065     }
3066    
3067     - if ((ia_valid & ATTR_SIZE) && IS_I_VERSION(inode)) {
3068     - if (attr->ia_size != inode->i_size)
3069     - inode_inc_iversion(inode);
3070     - }
3071     -
3072     if ((ia_valid & ATTR_MODE)) {
3073     umode_t amode = attr->ia_mode;
3074     /* Flag setting protected by i_mutex */
3075     diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
3076     index 6dea2b90b4d5..76273c1d26a6 100644
3077     --- a/fs/bio-integrity.c
3078     +++ b/fs/bio-integrity.c
3079     @@ -458,7 +458,7 @@ static int bio_integrity_verify(struct bio *bio)
3080     bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
3081     bix.sector_size = bi->sector_size;
3082    
3083     - bio_for_each_segment(bv, bio, i) {
3084     + bio_for_each_segment_all(bv, bio, i) {
3085     void *kaddr = kmap_atomic(bv->bv_page);
3086     bix.data_buf = kaddr + bv->bv_offset;
3087     bix.data_size = bv->bv_len;
3088     diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
3089     index 6aad98cb343f..6e9ff8fac75a 100644
3090     --- a/fs/btrfs/compression.c
3091     +++ b/fs/btrfs/compression.c
3092     @@ -1012,6 +1012,8 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
3093     bytes = min(bytes, working_bytes);
3094     kaddr = kmap_atomic(page_out);
3095     memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
3096     + if (*pg_index == (vcnt - 1) && *pg_offset == 0)
3097     + memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
3098     kunmap_atomic(kaddr);
3099     flush_dcache_page(page_out);
3100    
3101     diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
3102     index b544a44d696e..c1123ecde6c9 100644
3103     --- a/fs/btrfs/ctree.c
3104     +++ b/fs/btrfs/ctree.c
3105     @@ -39,7 +39,7 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
3106     struct extent_buffer *src_buf);
3107     static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
3108     int level, int slot);
3109     -static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
3110     +static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
3111     struct extent_buffer *eb);
3112     static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
3113    
3114     @@ -475,6 +475,8 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
3115     * the index is the shifted logical of the *new* root node for root replace
3116     * operations, or the shifted logical of the affected block for all other
3117     * operations.
3118     + *
3119     + * Note: must be called with write lock (tree_mod_log_write_lock).
3120     */
3121     static noinline int
3122     __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
3123     @@ -483,24 +485,9 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
3124     struct rb_node **new;
3125     struct rb_node *parent = NULL;
3126     struct tree_mod_elem *cur;
3127     - int ret = 0;
3128    
3129     BUG_ON(!tm);
3130    
3131     - tree_mod_log_write_lock(fs_info);
3132     - if (list_empty(&fs_info->tree_mod_seq_list)) {
3133     - tree_mod_log_write_unlock(fs_info);
3134     - /*
3135     - * Ok we no longer care about logging modifications, free up tm
3136     - * and return 0. Any callers shouldn't be using tm after
3137     - * calling tree_mod_log_insert, but if they do we can just
3138     - * change this to return a special error code to let the callers
3139     - * do their own thing.
3140     - */
3141     - kfree(tm);
3142     - return 0;
3143     - }
3144     -
3145     spin_lock(&fs_info->tree_mod_seq_lock);
3146     tm->seq = btrfs_inc_tree_mod_seq_minor(fs_info);
3147     spin_unlock(&fs_info->tree_mod_seq_lock);
3148     @@ -518,18 +505,13 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
3149     new = &((*new)->rb_left);
3150     else if (cur->seq > tm->seq)
3151     new = &((*new)->rb_right);
3152     - else {
3153     - ret = -EEXIST;
3154     - kfree(tm);
3155     - goto out;
3156     - }
3157     + else
3158     + return -EEXIST;
3159     }
3160    
3161     rb_link_node(&tm->node, parent, new);
3162     rb_insert_color(&tm->node, tm_root);
3163     -out:
3164     - tree_mod_log_write_unlock(fs_info);
3165     - return ret;
3166     + return 0;
3167     }
3168    
3169     /*
3170     @@ -545,19 +527,38 @@ static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
3171     return 1;
3172     if (eb && btrfs_header_level(eb) == 0)
3173     return 1;
3174     +
3175     + tree_mod_log_write_lock(fs_info);
3176     + if (list_empty(&(fs_info)->tree_mod_seq_list)) {
3177     + tree_mod_log_write_unlock(fs_info);
3178     + return 1;
3179     + }
3180     +
3181     return 0;
3182     }
3183    
3184     -static inline int
3185     -__tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
3186     - struct extent_buffer *eb, int slot,
3187     - enum mod_log_op op, gfp_t flags)
3188     +/* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
3189     +static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
3190     + struct extent_buffer *eb)
3191     +{
3192     + smp_mb();
3193     + if (list_empty(&(fs_info)->tree_mod_seq_list))
3194     + return 0;
3195     + if (eb && btrfs_header_level(eb) == 0)
3196     + return 0;
3197     +
3198     + return 1;
3199     +}
3200     +
3201     +static struct tree_mod_elem *
3202     +alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
3203     + enum mod_log_op op, gfp_t flags)
3204     {
3205     struct tree_mod_elem *tm;
3206    
3207     tm = kzalloc(sizeof(*tm), flags);
3208     if (!tm)
3209     - return -ENOMEM;
3210     + return NULL;
3211    
3212     tm->index = eb->start >> PAGE_CACHE_SHIFT;
3213     if (op != MOD_LOG_KEY_ADD) {
3214     @@ -567,8 +568,9 @@ __tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
3215     tm->op = op;
3216     tm->slot = slot;
3217     tm->generation = btrfs_node_ptr_generation(eb, slot);
3218     + RB_CLEAR_NODE(&tm->node);
3219    
3220     - return __tree_mod_log_insert(fs_info, tm);
3221     + return tm;
3222     }
3223    
3224     static noinline int
3225     @@ -576,10 +578,27 @@ tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
3226     struct extent_buffer *eb, int slot,
3227     enum mod_log_op op, gfp_t flags)
3228     {
3229     - if (tree_mod_dont_log(fs_info, eb))
3230     + struct tree_mod_elem *tm;
3231     + int ret;
3232     +
3233     + if (!tree_mod_need_log(fs_info, eb))
3234     + return 0;
3235     +
3236     + tm = alloc_tree_mod_elem(eb, slot, op, flags);
3237     + if (!tm)
3238     + return -ENOMEM;
3239     +
3240     + if (tree_mod_dont_log(fs_info, eb)) {
3241     + kfree(tm);
3242     return 0;
3243     + }
3244     +
3245     + ret = __tree_mod_log_insert(fs_info, tm);
3246     + tree_mod_log_write_unlock(fs_info);
3247     + if (ret)
3248     + kfree(tm);
3249    
3250     - return __tree_mod_log_insert_key(fs_info, eb, slot, op, flags);
3251     + return ret;
3252     }
3253    
3254     static noinline int
3255     @@ -587,53 +606,95 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
3256     struct extent_buffer *eb, int dst_slot, int src_slot,
3257     int nr_items, gfp_t flags)
3258     {
3259     - struct tree_mod_elem *tm;
3260     - int ret;
3261     + struct tree_mod_elem *tm = NULL;
3262     + struct tree_mod_elem **tm_list = NULL;
3263     + int ret = 0;
3264     int i;
3265     + int locked = 0;
3266    
3267     - if (tree_mod_dont_log(fs_info, eb))
3268     + if (!tree_mod_need_log(fs_info, eb))
3269     return 0;
3270    
3271     + tm_list = kzalloc(nr_items * sizeof(struct tree_mod_elem *), flags);
3272     + if (!tm_list)
3273     + return -ENOMEM;
3274     +
3275     + tm = kzalloc(sizeof(*tm), flags);
3276     + if (!tm) {
3277     + ret = -ENOMEM;
3278     + goto free_tms;
3279     + }
3280     +
3281     + tm->index = eb->start >> PAGE_CACHE_SHIFT;
3282     + tm->slot = src_slot;
3283     + tm->move.dst_slot = dst_slot;
3284     + tm->move.nr_items = nr_items;
3285     + tm->op = MOD_LOG_MOVE_KEYS;
3286     +
3287     + for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
3288     + tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
3289     + MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags);
3290     + if (!tm_list[i]) {
3291     + ret = -ENOMEM;
3292     + goto free_tms;
3293     + }
3294     + }
3295     +
3296     + if (tree_mod_dont_log(fs_info, eb))
3297     + goto free_tms;
3298     + locked = 1;
3299     +
3300     /*
3301     * When we override something during the move, we log these removals.
3302     * This can only happen when we move towards the beginning of the
3303     * buffer, i.e. dst_slot < src_slot.
3304     */
3305     for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
3306     - ret = __tree_mod_log_insert_key(fs_info, eb, i + dst_slot,
3307     - MOD_LOG_KEY_REMOVE_WHILE_MOVING, GFP_NOFS);
3308     - BUG_ON(ret < 0);
3309     + ret = __tree_mod_log_insert(fs_info, tm_list[i]);
3310     + if (ret)
3311     + goto free_tms;
3312     }
3313    
3314     - tm = kzalloc(sizeof(*tm), flags);
3315     - if (!tm)
3316     - return -ENOMEM;
3317     + ret = __tree_mod_log_insert(fs_info, tm);
3318     + if (ret)
3319     + goto free_tms;
3320     + tree_mod_log_write_unlock(fs_info);
3321     + kfree(tm_list);
3322    
3323     - tm->index = eb->start >> PAGE_CACHE_SHIFT;
3324     - tm->slot = src_slot;
3325     - tm->move.dst_slot = dst_slot;
3326     - tm->move.nr_items = nr_items;
3327     - tm->op = MOD_LOG_MOVE_KEYS;
3328     + return 0;
3329     +free_tms:
3330     + for (i = 0; i < nr_items; i++) {
3331     + if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
3332     + rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
3333     + kfree(tm_list[i]);
3334     + }
3335     + if (locked)
3336     + tree_mod_log_write_unlock(fs_info);
3337     + kfree(tm_list);
3338     + kfree(tm);
3339    
3340     - return __tree_mod_log_insert(fs_info, tm);
3341     + return ret;
3342     }
3343    
3344     -static inline void
3345     -__tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
3346     +static inline int
3347     +__tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
3348     + struct tree_mod_elem **tm_list,
3349     + int nritems)
3350     {
3351     - int i;
3352     - u32 nritems;
3353     + int i, j;
3354     int ret;
3355    
3356     - if (btrfs_header_level(eb) == 0)
3357     - return;
3358     -
3359     - nritems = btrfs_header_nritems(eb);
3360     for (i = nritems - 1; i >= 0; i--) {
3361     - ret = __tree_mod_log_insert_key(fs_info, eb, i,
3362     - MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
3363     - BUG_ON(ret < 0);
3364     + ret = __tree_mod_log_insert(fs_info, tm_list[i]);
3365     + if (ret) {
3366     + for (j = nritems - 1; j > i; j--)
3367     + rb_erase(&tm_list[j]->node,
3368     + &fs_info->tree_mod_log);
3369     + return ret;
3370     + }
3371     }
3372     +
3373     + return 0;
3374     }
3375    
3376     static noinline int
3377     @@ -642,17 +703,38 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
3378     struct extent_buffer *new_root, gfp_t flags,
3379     int log_removal)
3380     {
3381     - struct tree_mod_elem *tm;
3382     + struct tree_mod_elem *tm = NULL;
3383     + struct tree_mod_elem **tm_list = NULL;
3384     + int nritems = 0;
3385     + int ret = 0;
3386     + int i;
3387    
3388     - if (tree_mod_dont_log(fs_info, NULL))
3389     + if (!tree_mod_need_log(fs_info, NULL))
3390     return 0;
3391    
3392     - if (log_removal)
3393     - __tree_mod_log_free_eb(fs_info, old_root);
3394     + if (log_removal && btrfs_header_level(old_root) > 0) {
3395     + nritems = btrfs_header_nritems(old_root);
3396     + tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
3397     + flags);
3398     + if (!tm_list) {
3399     + ret = -ENOMEM;
3400     + goto free_tms;
3401     + }
3402     + for (i = 0; i < nritems; i++) {
3403     + tm_list[i] = alloc_tree_mod_elem(old_root, i,
3404     + MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags);
3405     + if (!tm_list[i]) {
3406     + ret = -ENOMEM;
3407     + goto free_tms;
3408     + }
3409     + }
3410     + }
3411    
3412     tm = kzalloc(sizeof(*tm), flags);
3413     - if (!tm)
3414     - return -ENOMEM;
3415     + if (!tm) {
3416     + ret = -ENOMEM;
3417     + goto free_tms;
3418     + }
3419    
3420     tm->index = new_root->start >> PAGE_CACHE_SHIFT;
3421     tm->old_root.logical = old_root->start;
3422     @@ -660,7 +742,30 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
3423     tm->generation = btrfs_header_generation(old_root);
3424     tm->op = MOD_LOG_ROOT_REPLACE;
3425    
3426     - return __tree_mod_log_insert(fs_info, tm);
3427     + if (tree_mod_dont_log(fs_info, NULL))
3428     + goto free_tms;
3429     +
3430     + if (tm_list)
3431     + ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
3432     + if (!ret)
3433     + ret = __tree_mod_log_insert(fs_info, tm);
3434     +
3435     + tree_mod_log_write_unlock(fs_info);
3436     + if (ret)
3437     + goto free_tms;
3438     + kfree(tm_list);
3439     +
3440     + return ret;
3441     +
3442     +free_tms:
3443     + if (tm_list) {
3444     + for (i = 0; i < nritems; i++)
3445     + kfree(tm_list[i]);
3446     + kfree(tm_list);
3447     + }
3448     + kfree(tm);
3449     +
3450     + return ret;
3451     }
3452    
3453     static struct tree_mod_elem *
3454     @@ -729,31 +834,75 @@ tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
3455     return __tree_mod_log_search(fs_info, start, min_seq, 0);
3456     }
3457    
3458     -static noinline void
3459     +static noinline int
3460     tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
3461     struct extent_buffer *src, unsigned long dst_offset,
3462     unsigned long src_offset, int nr_items)
3463     {
3464     - int ret;
3465     + int ret = 0;
3466     + struct tree_mod_elem **tm_list = NULL;
3467     + struct tree_mod_elem **tm_list_add, **tm_list_rem;
3468     int i;
3469     + int locked = 0;
3470    
3471     - if (tree_mod_dont_log(fs_info, NULL))
3472     - return;
3473     + if (!tree_mod_need_log(fs_info, NULL))
3474     + return 0;
3475    
3476     if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
3477     - return;
3478     + return 0;
3479    
3480     + tm_list = kzalloc(nr_items * 2 * sizeof(struct tree_mod_elem *),
3481     + GFP_NOFS);
3482     + if (!tm_list)
3483     + return -ENOMEM;
3484     +
3485     + tm_list_add = tm_list;
3486     + tm_list_rem = tm_list + nr_items;
3487     for (i = 0; i < nr_items; i++) {
3488     - ret = __tree_mod_log_insert_key(fs_info, src,
3489     - i + src_offset,
3490     - MOD_LOG_KEY_REMOVE, GFP_NOFS);
3491     - BUG_ON(ret < 0);
3492     - ret = __tree_mod_log_insert_key(fs_info, dst,
3493     - i + dst_offset,
3494     - MOD_LOG_KEY_ADD,
3495     - GFP_NOFS);
3496     - BUG_ON(ret < 0);
3497     + tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
3498     + MOD_LOG_KEY_REMOVE, GFP_NOFS);
3499     + if (!tm_list_rem[i]) {
3500     + ret = -ENOMEM;
3501     + goto free_tms;
3502     + }
3503     +
3504     + tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
3505     + MOD_LOG_KEY_ADD, GFP_NOFS);
3506     + if (!tm_list_add[i]) {
3507     + ret = -ENOMEM;
3508     + goto free_tms;
3509     + }
3510     + }
3511     +
3512     + if (tree_mod_dont_log(fs_info, NULL))
3513     + goto free_tms;
3514     + locked = 1;
3515     +
3516     + for (i = 0; i < nr_items; i++) {
3517     + ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
3518     + if (ret)
3519     + goto free_tms;
3520     + ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
3521     + if (ret)
3522     + goto free_tms;
3523     + }
3524     +
3525     + tree_mod_log_write_unlock(fs_info);
3526     + kfree(tm_list);
3527     +
3528     + return 0;
3529     +
3530     +free_tms:
3531     + for (i = 0; i < nr_items * 2; i++) {
3532     + if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
3533     + rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
3534     + kfree(tm_list[i]);
3535     }
3536     + if (locked)
3537     + tree_mod_log_write_unlock(fs_info);
3538     + kfree(tm_list);
3539     +
3540     + return ret;
3541     }
3542    
3543     static inline void
3544     @@ -772,18 +921,58 @@ tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
3545     {
3546     int ret;
3547    
3548     - ret = __tree_mod_log_insert_key(fs_info, eb, slot,
3549     + ret = tree_mod_log_insert_key(fs_info, eb, slot,
3550     MOD_LOG_KEY_REPLACE,
3551     atomic ? GFP_ATOMIC : GFP_NOFS);
3552     BUG_ON(ret < 0);
3553     }
3554    
3555     -static noinline void
3556     +static noinline int
3557     tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
3558     {
3559     + struct tree_mod_elem **tm_list = NULL;
3560     + int nritems = 0;
3561     + int i;
3562     + int ret = 0;
3563     +
3564     + if (btrfs_header_level(eb) == 0)
3565     + return 0;
3566     +
3567     + if (!tree_mod_need_log(fs_info, NULL))
3568     + return 0;
3569     +
3570     + nritems = btrfs_header_nritems(eb);
3571     + tm_list = kzalloc(nritems * sizeof(struct tree_mod_elem *),
3572     + GFP_NOFS);
3573     + if (!tm_list)
3574     + return -ENOMEM;
3575     +
3576     + for (i = 0; i < nritems; i++) {
3577     + tm_list[i] = alloc_tree_mod_elem(eb, i,
3578     + MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
3579     + if (!tm_list[i]) {
3580     + ret = -ENOMEM;
3581     + goto free_tms;
3582     + }
3583     + }
3584     +
3585     if (tree_mod_dont_log(fs_info, eb))
3586     - return;
3587     - __tree_mod_log_free_eb(fs_info, eb);
3588     + goto free_tms;
3589     +
3590     + ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
3591     + tree_mod_log_write_unlock(fs_info);
3592     + if (ret)
3593     + goto free_tms;
3594     + kfree(tm_list);
3595     +
3596     + return 0;
3597     +
3598     +free_tms:
3599     + for (i = 0; i < nritems; i++)
3600     + kfree(tm_list[i]);
3601     + kfree(tm_list);
3602     +
3603     + return ret;
3604     }
3605    
3606     static noinline void
3607     @@ -1041,8 +1230,13 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
3608     btrfs_set_node_ptr_generation(parent, parent_slot,
3609     trans->transid);
3610     btrfs_mark_buffer_dirty(parent);
3611     - if (last_ref)
3612     - tree_mod_log_free_eb(root->fs_info, buf);
3613     + if (last_ref) {
3614     + ret = tree_mod_log_free_eb(root->fs_info, buf);
3615     + if (ret) {
3616     + btrfs_abort_transaction(trans, root, ret);
3617     + return ret;
3618     + }
3619     + }
3620     btrfs_free_tree_block(trans, root, buf, parent_start,
3621     last_ref);
3622     }
3623     @@ -3023,8 +3217,12 @@ static int push_node_left(struct btrfs_trans_handle *trans,
3624     } else
3625     push_items = min(src_nritems - 8, push_items);
3626    
3627     - tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3628     - push_items);
3629     + ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3630     + push_items);
3631     + if (ret) {
3632     + btrfs_abort_transaction(trans, root, ret);
3633     + return ret;
3634     + }
3635     copy_extent_buffer(dst, src,
3636     btrfs_node_key_ptr_offset(dst_nritems),
3637     btrfs_node_key_ptr_offset(0),
3638     @@ -3094,8 +3292,12 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
3639     (dst_nritems) *
3640     sizeof(struct btrfs_key_ptr));
3641    
3642     - tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3643     - src_nritems - push_items, push_items);
3644     + ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3645     + src_nritems - push_items, push_items);
3646     + if (ret) {
3647     + btrfs_abort_transaction(trans, root, ret);
3648     + return ret;
3649     + }
3650     copy_extent_buffer(dst, src,
3651     btrfs_node_key_ptr_offset(0),
3652     btrfs_node_key_ptr_offset(src_nritems - push_items),
3653     @@ -3296,7 +3498,12 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
3654     btrfs_header_chunk_tree_uuid(split),
3655     BTRFS_UUID_SIZE);
3656    
3657     - tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
3658     + ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0,
3659     + mid, c_nritems - mid);
3660     + if (ret) {
3661     + btrfs_abort_transaction(trans, root, ret);
3662     + return ret;
3663     + }
3664     copy_extent_buffer(split, c,
3665     btrfs_node_key_ptr_offset(0),
3666     btrfs_node_key_ptr_offset(mid),
3667     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3668     index 1b63d29e44b7..3d03d2e0849c 100644
3669     --- a/fs/btrfs/inode.c
3670     +++ b/fs/btrfs/inode.c
3671     @@ -4354,8 +4354,12 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
3672     * these flags set. For all other operations the VFS set these flags
3673     * explicitly if it wants a timestamp update.
3674     */
3675     - if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME))))
3676     - inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb);
3677     + if (newsize != oldsize) {
3678     + inode_inc_iversion(inode);
3679     + if (!(mask & (ATTR_CTIME | ATTR_MTIME)))
3680     + inode->i_ctime = inode->i_mtime =
3681     + current_fs_time(inode->i_sb);
3682     + }
3683    
3684     if (newsize > oldsize) {
3685     truncate_pagecache(inode, newsize);
3686     diff --git a/fs/cifs/file.c b/fs/cifs/file.c
3687     index 7ddddf2e2504..81476e18a789 100644
3688     --- a/fs/cifs/file.c
3689     +++ b/fs/cifs/file.c
3690     @@ -2381,7 +2381,7 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
3691     unsigned long nr_segs, loff_t *poffset)
3692     {
3693     unsigned long nr_pages, i;
3694     - size_t copied, len, cur_len;
3695     + size_t bytes, copied, len, cur_len;
3696     ssize_t total_written = 0;
3697     loff_t offset;
3698     struct iov_iter it;
3699     @@ -2436,14 +2436,45 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
3700    
3701     save_len = cur_len;
3702     for (i = 0; i < nr_pages; i++) {
3703     - copied = min_t(const size_t, cur_len, PAGE_SIZE);
3704     + bytes = min_t(const size_t, cur_len, PAGE_SIZE);
3705     copied = iov_iter_copy_from_user(wdata->pages[i], &it,
3706     - 0, copied);
3707     + 0, bytes);
3708     cur_len -= copied;
3709     iov_iter_advance(&it, copied);
3710     + /*
3711     + * If we didn't copy as much as we expected, then that
3712     + * may mean we trod into an unmapped area. Stop copying
3713     + * at that point. On the next pass through the big
3714     + * loop, we'll likely end up getting a zero-length
3715     + * write and bailing out of it.
3716     + */
3717     + if (copied < bytes)
3718     + break;
3719     }
3720     cur_len = save_len - cur_len;
3721    
3722     + /*
3723     + * If we have no data to send, then that probably means that
3724     + * the copy above failed altogether. That's most likely because
3725     + * the address in the iovec was bogus. Set the rc to -EFAULT,
3726     + * free anything we allocated and bail out.
3727     + */
3728     + if (!cur_len) {
3729     + for (i = 0; i < nr_pages; i++)
3730     + put_page(wdata->pages[i]);
3731     + kfree(wdata);
3732     + rc = -EFAULT;
3733     + break;
3734     + }
3735     +
3736     + /*
3737     + * i + 1 now represents the number of pages we actually used in
3738     + * the copy phase above. Bring nr_pages down to that, and free
3739     + * any pages that we didn't use.
3740     + */
3741     + for ( ; nr_pages > i + 1; nr_pages--)
3742     + put_page(wdata->pages[nr_pages - 1]);
3743     +
3744     wdata->sync_mode = WB_SYNC_ALL;
3745     wdata->nr_pages = nr_pages;
3746     wdata->offset = (__u64)offset;
3747     diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
3748     index c38350851b08..bc0bb9c34f72 100644
3749     --- a/fs/cifs/smb2glob.h
3750     +++ b/fs/cifs/smb2glob.h
3751     @@ -57,4 +57,7 @@
3752     #define SMB2_CMACAES_SIZE (16)
3753     #define SMB3_SIGNKEY_SIZE (16)
3754    
3755     +/* Maximum buffer size value we can send with 1 credit */
3756     +#define SMB2_MAX_BUFFER_SIZE 65536
3757     +
3758     #endif /* _SMB2_GLOB_H */
3759     diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3760     index 861b33214144..027a0c6f7ca0 100644
3761     --- a/fs/cifs/smb2ops.c
3762     +++ b/fs/cifs/smb2ops.c
3763     @@ -182,11 +182,8 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
3764     /* start with specified wsize, or default */
3765     wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
3766     wsize = min_t(unsigned int, wsize, server->max_write);
3767     - /*
3768     - * limit write size to 2 ** 16, because we don't support multicredit
3769     - * requests now.
3770     - */
3771     - wsize = min_t(unsigned int, wsize, 2 << 15);
3772     + /* set it to the maximum buffer size value we can send with 1 credit */
3773     + wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
3774    
3775     return wsize;
3776     }
3777     @@ -200,11 +197,8 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
3778     /* start with specified rsize, or default */
3779     rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
3780     rsize = min_t(unsigned int, rsize, server->max_read);
3781     - /*
3782     - * limit write size to 2 ** 16, because we don't support multicredit
3783     - * requests now.
3784     - */
3785     - rsize = min_t(unsigned int, rsize, 2 << 15);
3786     + /* set it to the maximum buffer size value we can send with 1 credit */
3787     + rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
3788    
3789     return rsize;
3790     }
3791     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
3792     index edccb5252462..06d29e3f5d10 100644
3793     --- a/fs/cifs/smb2pdu.c
3794     +++ b/fs/cifs/smb2pdu.c
3795     @@ -413,7 +413,9 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
3796    
3797     /* SMB2 only has an extended negflavor */
3798     server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
3799     - server->maxBuf = le32_to_cpu(rsp->MaxTransactSize);
3800     + /* set it to the maximum buffer size value we can send with 1 credit */
3801     + server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
3802     + SMB2_MAX_BUFFER_SIZE);
3803     server->max_read = le32_to_cpu(rsp->MaxReadSize);
3804     server->max_write = le32_to_cpu(rsp->MaxWriteSize);
3805     /* BB Do we need to validate the SecurityMode? */
3806     diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
3807     index 745faaa7ef95..a9d2bf941066 100644
3808     --- a/fs/ext4/ext4.h
3809     +++ b/fs/ext4/ext4.h
3810     @@ -770,6 +770,8 @@ do { \
3811     if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \
3812     (einode)->xtime.tv_sec = \
3813     (signed)le32_to_cpu((raw_inode)->xtime); \
3814     + else \
3815     + (einode)->xtime.tv_sec = 0; \
3816     if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
3817     ext4_decode_extra_time(&(einode)->xtime, \
3818     raw_inode->xtime ## _extra); \
3819     diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
3820     index f76027fe58ae..e678549ec994 100644
3821     --- a/fs/ext4/extents.c
3822     +++ b/fs/ext4/extents.c
3823     @@ -3937,6 +3937,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
3824     } else
3825     err = ret;
3826     map->m_flags |= EXT4_MAP_MAPPED;
3827     + map->m_pblk = newblock;
3828     if (allocated > map->m_len)
3829     allocated = map->m_len;
3830     map->m_len = allocated;
3831     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
3832     index 1ddee3dfabe3..55fe412b2410 100644
3833     --- a/fs/ext4/inode.c
3834     +++ b/fs/ext4/inode.c
3835     @@ -4582,6 +4582,10 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
3836     if (attr->ia_size > sbi->s_bitmap_maxbytes)
3837     return -EFBIG;
3838     }
3839     +
3840     + if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size)
3841     + inode_inc_iversion(inode);
3842     +
3843     if (S_ISREG(inode->i_mode) &&
3844     (attr->ia_size < inode->i_size)) {
3845     if (ext4_should_order_data(inode)) {
3846     diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
3847     index a569d335f804..d011b69ae8ae 100644
3848     --- a/fs/ext4/ioctl.c
3849     +++ b/fs/ext4/ioctl.c
3850     @@ -144,7 +144,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
3851     handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2);
3852     if (IS_ERR(handle)) {
3853     err = -EINVAL;
3854     - goto swap_boot_out;
3855     + goto journal_err_out;
3856     }
3857    
3858     /* Protect extent tree against block allocations via delalloc */
3859     @@ -202,6 +202,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
3860    
3861     ext4_double_up_write_data_sem(inode, inode_bl);
3862    
3863     +journal_err_out:
3864     ext4_inode_resume_unlocked_dio(inode);
3865     ext4_inode_resume_unlocked_dio(inode_bl);
3866    
3867     diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
3868     index c5adbb318a90..f3b84cd9de56 100644
3869     --- a/fs/ext4/resize.c
3870     +++ b/fs/ext4/resize.c
3871     @@ -243,6 +243,7 @@ static int ext4_alloc_group_tables(struct super_block *sb,
3872     ext4_group_t group;
3873     ext4_group_t last_group;
3874     unsigned overhead;
3875     + __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
3876    
3877     BUG_ON(flex_gd->count == 0 || group_data == NULL);
3878    
3879     @@ -266,7 +267,7 @@ next_group:
3880     src_group++;
3881     for (; src_group <= last_group; src_group++) {
3882     overhead = ext4_group_overhead_blocks(sb, src_group);
3883     - if (overhead != 0)
3884     + if (overhead == 0)
3885     last_blk += group_data[src_group - group].blocks_count;
3886     else
3887     break;
3888     @@ -280,8 +281,7 @@ next_group:
3889     group = ext4_get_group_number(sb, start_blk - 1);
3890     group -= group_data[0].group;
3891     group_data[group].free_blocks_count--;
3892     - if (flexbg_size > 1)
3893     - flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
3894     + flex_gd->bg_flags[group] &= uninit_mask;
3895     }
3896    
3897     /* Allocate inode bitmaps */
3898     @@ -292,22 +292,30 @@ next_group:
3899     group = ext4_get_group_number(sb, start_blk - 1);
3900     group -= group_data[0].group;
3901     group_data[group].free_blocks_count--;
3902     - if (flexbg_size > 1)
3903     - flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
3904     + flex_gd->bg_flags[group] &= uninit_mask;
3905     }
3906    
3907     /* Allocate inode tables */
3908     for (; it_index < flex_gd->count; it_index++) {
3909     - if (start_blk + EXT4_SB(sb)->s_itb_per_group > last_blk)
3910     + unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
3911     + ext4_fsblk_t next_group_start;
3912     +
3913     + if (start_blk + itb > last_blk)
3914     goto next_group;
3915     group_data[it_index].inode_table = start_blk;
3916     - group = ext4_get_group_number(sb, start_blk - 1);
3917     + group = ext4_get_group_number(sb, start_blk);
3918     + next_group_start = ext4_group_first_block_no(sb, group + 1);
3919     group -= group_data[0].group;
3920     - group_data[group].free_blocks_count -=
3921     - EXT4_SB(sb)->s_itb_per_group;
3922     - if (flexbg_size > 1)
3923     - flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
3924    
3925     + if (start_blk + itb > next_group_start) {
3926     + flex_gd->bg_flags[group + 1] &= uninit_mask;
3927     + overhead = start_blk + itb - next_group_start;
3928     + group_data[group + 1].free_blocks_count -= overhead;
3929     + itb -= overhead;
3930     + }
3931     +
3932     + group_data[group].free_blocks_count -= itb;
3933     + flex_gd->bg_flags[group] &= uninit_mask;
3934     start_blk += EXT4_SB(sb)->s_itb_per_group;
3935     }
3936    
3937     @@ -401,7 +409,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
3938     start = ext4_group_first_block_no(sb, group);
3939     group -= flex_gd->groups[0].group;
3940    
3941     - count2 = sb->s_blocksize * 8 - (block - start);
3942     + count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
3943     if (count2 > count)
3944     count2 = count;
3945    
3946     @@ -620,7 +628,7 @@ handle_ib:
3947     if (err)
3948     goto out;
3949     count = group_table_count[j];
3950     - start = group_data[i].block_bitmap;
3951     + start = (&group_data[i].block_bitmap)[j];
3952     block = start;
3953     }
3954    
3955     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
3956     index b947e0af9956..d9711dc42164 100644
3957     --- a/fs/ext4/super.c
3958     +++ b/fs/ext4/super.c
3959     @@ -3667,16 +3667,22 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3960     for (i = 0; i < 4; i++)
3961     sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
3962     sbi->s_def_hash_version = es->s_def_hash_version;
3963     - i = le32_to_cpu(es->s_flags);
3964     - if (i & EXT2_FLAGS_UNSIGNED_HASH)
3965     - sbi->s_hash_unsigned = 3;
3966     - else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
3967     + if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
3968     + i = le32_to_cpu(es->s_flags);
3969     + if (i & EXT2_FLAGS_UNSIGNED_HASH)
3970     + sbi->s_hash_unsigned = 3;
3971     + else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
3972     #ifdef __CHAR_UNSIGNED__
3973     - es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
3974     - sbi->s_hash_unsigned = 3;
3975     + if (!(sb->s_flags & MS_RDONLY))
3976     + es->s_flags |=
3977     + cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
3978     + sbi->s_hash_unsigned = 3;
3979     #else
3980     - es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
3981     + if (!(sb->s_flags & MS_RDONLY))
3982     + es->s_flags |=
3983     + cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
3984     #endif
3985     + }
3986     }
3987    
3988     /* Handle clustersize */
3989     diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
3990     index b0b74e58697b..7272cc6977ec 100644
3991     --- a/fs/jbd2/transaction.c
3992     +++ b/fs/jbd2/transaction.c
3993     @@ -514,11 +514,13 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
3994     * similarly constrained call sites
3995     */
3996     ret = start_this_handle(journal, handle, GFP_NOFS);
3997     - if (ret < 0)
3998     + if (ret < 0) {
3999     jbd2_journal_free_reserved(handle);
4000     + return ret;
4001     + }
4002     handle->h_type = type;
4003     handle->h_line_no = line_no;
4004     - return ret;
4005     + return 0;
4006     }
4007     EXPORT_SYMBOL(jbd2_journal_start_reserved);
4008    
4009     diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
4010     index eda8879171c4..0ee22ab9ef97 100644
4011     --- a/fs/nfs/inode.c
4012     +++ b/fs/nfs/inode.c
4013     @@ -164,17 +164,16 @@ static void nfs_zap_caches_locked(struct inode *inode)
4014     if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
4015     nfs_fscache_invalidate(inode);
4016     nfsi->cache_validity |= NFS_INO_INVALID_ATTR
4017     - | NFS_INO_INVALID_LABEL
4018     | NFS_INO_INVALID_DATA
4019     | NFS_INO_INVALID_ACCESS
4020     | NFS_INO_INVALID_ACL
4021     | NFS_INO_REVAL_PAGECACHE;
4022     } else
4023     nfsi->cache_validity |= NFS_INO_INVALID_ATTR
4024     - | NFS_INO_INVALID_LABEL
4025     | NFS_INO_INVALID_ACCESS
4026     | NFS_INO_INVALID_ACL
4027     | NFS_INO_REVAL_PAGECACHE;
4028     + nfs_zap_label_cache_locked(nfsi);
4029     }
4030    
4031     void nfs_zap_caches(struct inode *inode)
4032     @@ -266,6 +265,13 @@ nfs_init_locked(struct inode *inode, void *opaque)
4033     }
4034    
4035     #ifdef CONFIG_NFS_V4_SECURITY_LABEL
4036     +static void nfs_clear_label_invalid(struct inode *inode)
4037     +{
4038     + spin_lock(&inode->i_lock);
4039     + NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_LABEL;
4040     + spin_unlock(&inode->i_lock);
4041     +}
4042     +
4043     void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
4044     struct nfs4_label *label)
4045     {
4046     @@ -289,6 +295,7 @@ void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
4047     __func__,
4048     (char *)label->label,
4049     label->len, error);
4050     + nfs_clear_label_invalid(inode);
4051     }
4052     }
4053    
4054     @@ -1599,7 +1606,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
4055     inode->i_blocks = fattr->du.nfs2.blocks;
4056    
4057     /* Update attrtimeo value if we're out of the unstable period */
4058     - if (invalid & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_LABEL)) {
4059     + if (invalid & NFS_INO_INVALID_ATTR) {
4060     nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE);
4061     nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
4062     nfsi->attrtimeo_timestamp = now;
4063     @@ -1612,7 +1619,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
4064     }
4065     }
4066     invalid &= ~NFS_INO_INVALID_ATTR;
4067     - invalid &= ~NFS_INO_INVALID_LABEL;
4068     /* Don't invalidate the data if we were to blame */
4069     if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
4070     || S_ISLNK(inode->i_mode)))
4071     diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
4072     index 38da8c2b81ac..a84dbf238512 100644
4073     --- a/fs/nfs/internal.h
4074     +++ b/fs/nfs/internal.h
4075     @@ -266,6 +266,18 @@ extern const u32 nfs41_maxgetdevinfo_overhead;
4076     extern struct rpc_procinfo nfs4_procedures[];
4077     #endif
4078    
4079     +#ifdef CONFIG_NFS_V4_SECURITY_LABEL
4080     +static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
4081     +{
4082     + if (nfs_server_capable(&nfsi->vfs_inode, NFS_CAP_SECURITY_LABEL))
4083     + nfsi->cache_validity |= NFS_INO_INVALID_LABEL;
4084     +}
4085     +#else
4086     +static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
4087     +{
4088     +}
4089     +#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
4090     +
4091     /* proc.c */
4092     void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
4093     extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
4094     diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
4095     index ebced8d71157..26c07f9efdb3 100644
4096     --- a/fs/nfs/nfs4state.c
4097     +++ b/fs/nfs/nfs4state.c
4098     @@ -1015,8 +1015,11 @@ int nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state,
4099     if (ret == -EIO)
4100     /* A lost lock - don't even consider delegations */
4101     goto out;
4102     - if (nfs4_copy_delegation_stateid(dst, state->inode, fmode))
4103     + /* returns true if delegation stateid found and copied */
4104     + if (nfs4_copy_delegation_stateid(dst, state->inode, fmode)) {
4105     + ret = 0;
4106     goto out;
4107     + }
4108     if (ret != -ENOENT)
4109     /* nfs4_copy_delegation_stateid() didn't over-write
4110     * dst, so it still has the lock stateid which we now
4111     diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
4112     index 831d49a4111f..cfc8dcc16043 100644
4113     --- a/fs/quota/dquot.c
4114     +++ b/fs/quota/dquot.c
4115     @@ -581,9 +581,17 @@ int dquot_scan_active(struct super_block *sb,
4116     dqstats_inc(DQST_LOOKUPS);
4117     dqput(old_dquot);
4118     old_dquot = dquot;
4119     - ret = fn(dquot, priv);
4120     - if (ret < 0)
4121     - goto out;
4122     + /*
4123     + * ->release_dquot() can be racing with us. Our reference
4124     + * protects us from new calls to it so just wait for any
4125     + * outstanding call and recheck the DQ_ACTIVE_B after that.
4126     + */
4127     + wait_on_dquot(dquot);
4128     + if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
4129     + ret = fn(dquot, priv);
4130     + if (ret < 0)
4131     + goto out;
4132     + }
4133     spin_lock(&dq_list_lock);
4134     /* We are safe to continue now because our dquot could not
4135     * be moved out of the inuse list while we hold the reference */
4136     diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
4137     index 2f0543f7510c..f9bbbb472663 100644
4138     --- a/include/linux/can/skb.h
4139     +++ b/include/linux/can/skb.h
4140     @@ -11,7 +11,9 @@
4141     #define CAN_SKB_H
4142    
4143     #include <linux/types.h>
4144     +#include <linux/skbuff.h>
4145     #include <linux/can.h>
4146     +#include <net/sock.h>
4147    
4148     /*
4149     * The struct can_skb_priv is used to transport additional information along
4150     @@ -42,4 +44,40 @@ static inline void can_skb_reserve(struct sk_buff *skb)
4151     skb_reserve(skb, sizeof(struct can_skb_priv));
4152     }
4153    
4154     +static inline void can_skb_destructor(struct sk_buff *skb)
4155     +{
4156     + sock_put(skb->sk);
4157     +}
4158     +
4159     +static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
4160     +{
4161     + if (sk) {
4162     + sock_hold(sk);
4163     + skb->destructor = can_skb_destructor;
4164     + skb->sk = sk;
4165     + }
4166     +}
4167     +
4168     +/*
4169     + * returns an unshared skb owned by the original sock to be echo'ed back
4170     + */
4171     +static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
4172     +{
4173     + if (skb_shared(skb)) {
4174     + struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
4175     +
4176     + if (likely(nskb)) {
4177     + can_skb_set_owner(nskb, skb->sk);
4178     + consume_skb(skb);
4179     + return nskb;
4180     + } else {
4181     + kfree_skb(skb);
4182     + return NULL;
4183     + }
4184     + }
4185     +
4186     + /* we can assume to have an unshared skb with proper owner */
4187     + return skb;
4188     +}
4189     +
4190     #endif /* CAN_SKB_H */
4191     diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
4192     index f6c82de12541..d6ad91f26038 100644
4193     --- a/include/linux/ipc_namespace.h
4194     +++ b/include/linux/ipc_namespace.h
4195     @@ -119,9 +119,7 @@ extern int mq_init_ns(struct ipc_namespace *ns);
4196     * the new maximum will handle anyone else. I may have to revisit this
4197     * in the future.
4198     */
4199     -#define MIN_QUEUESMAX 1
4200     #define DFLT_QUEUESMAX 256
4201     -#define HARD_QUEUESMAX 1024
4202     #define MIN_MSGMAX 1
4203     #define DFLT_MSG 10U
4204     #define DFLT_MSGMAX 10
4205     diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
4206     index 21eae43348fb..9f2a0cbc7d06 100644
4207     --- a/include/linux/netdevice.h
4208     +++ b/include/linux/netdevice.h
4209     @@ -2917,7 +2917,12 @@ void netdev_change_features(struct net_device *dev);
4210     void netif_stacked_transfer_operstate(const struct net_device *rootdev,
4211     struct net_device *dev);
4212    
4213     -netdev_features_t netif_skb_features(struct sk_buff *skb);
4214     +netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
4215     + const struct net_device *dev);
4216     +static inline netdev_features_t netif_skb_features(struct sk_buff *skb)
4217     +{
4218     + return netif_skb_dev_features(skb, skb->dev);
4219     +}
4220    
4221     static inline bool net_gso_ok(netdev_features_t features, int gso_type)
4222     {
4223     diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
4224     index efa1649a822a..9995165ff3d0 100644
4225     --- a/include/linux/skbuff.h
4226     +++ b/include/linux/skbuff.h
4227     @@ -2393,6 +2393,8 @@ extern void skb_scrub_packet(struct sk_buff *skb, bool xnet);
4228     extern struct sk_buff *skb_segment(struct sk_buff *skb,
4229     netdev_features_t features);
4230    
4231     +unsigned int skb_gso_transport_seglen(const struct sk_buff *skb);
4232     +
4233     static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
4234     int len, void *buffer)
4235     {
4236     @@ -2816,5 +2818,22 @@ static inline bool skb_head_is_locked(const struct sk_buff *skb)
4237     {
4238     return !skb->head_frag || skb_cloned(skb);
4239     }
4240     +
4241     +/**
4242     + * skb_gso_network_seglen - Return length of individual segments of a gso packet
4243     + *
4244     + * @skb: GSO skb
4245     + *
4246     + * skb_gso_network_seglen is used to determine the real size of the
4247     + * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
4248     + *
4249     + * The MAC/L2 header is not accounted for.
4250     + */
4251     +static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
4252     +{
4253     + unsigned int hdr_len = skb_transport_header(skb) -
4254     + skb_network_header(skb);
4255     + return hdr_len + skb_gso_transport_seglen(skb);
4256     +}
4257     #endif /* __KERNEL__ */
4258     #endif /* _LINUX_SKBUFF_H */
4259     diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
4260     index 383d638340b8..5bb8bfe67149 100644
4261     --- a/ipc/mq_sysctl.c
4262     +++ b/ipc/mq_sysctl.c
4263     @@ -22,6 +22,16 @@ static void *get_mq(ctl_table *table)
4264     return which;
4265     }
4266    
4267     +static int proc_mq_dointvec(ctl_table *table, int write,
4268     + void __user *buffer, size_t *lenp, loff_t *ppos)
4269     +{
4270     + struct ctl_table mq_table;
4271     + memcpy(&mq_table, table, sizeof(mq_table));
4272     + mq_table.data = get_mq(table);
4273     +
4274     + return proc_dointvec(&mq_table, write, buffer, lenp, ppos);
4275     +}
4276     +
4277     static int proc_mq_dointvec_minmax(ctl_table *table, int write,
4278     void __user *buffer, size_t *lenp, loff_t *ppos)
4279     {
4280     @@ -33,12 +43,10 @@ static int proc_mq_dointvec_minmax(ctl_table *table, int write,
4281     lenp, ppos);
4282     }
4283     #else
4284     +#define proc_mq_dointvec NULL
4285     #define proc_mq_dointvec_minmax NULL
4286     #endif
4287    
4288     -static int msg_queues_limit_min = MIN_QUEUESMAX;
4289     -static int msg_queues_limit_max = HARD_QUEUESMAX;
4290     -
4291     static int msg_max_limit_min = MIN_MSGMAX;
4292     static int msg_max_limit_max = HARD_MSGMAX;
4293    
4294     @@ -51,9 +59,7 @@ static ctl_table mq_sysctls[] = {
4295     .data = &init_ipc_ns.mq_queues_max,
4296     .maxlen = sizeof(int),
4297     .mode = 0644,
4298     - .proc_handler = proc_mq_dointvec_minmax,
4299     - .extra1 = &msg_queues_limit_min,
4300     - .extra2 = &msg_queues_limit_max,
4301     + .proc_handler = proc_mq_dointvec,
4302     },
4303     {
4304     .procname = "msg_max",
4305     diff --git a/ipc/mqueue.c b/ipc/mqueue.c
4306     index ae1996d3c539..bb0248fc5187 100644
4307     --- a/ipc/mqueue.c
4308     +++ b/ipc/mqueue.c
4309     @@ -433,9 +433,9 @@ static int mqueue_create(struct inode *dir, struct dentry *dentry,
4310     error = -EACCES;
4311     goto out_unlock;
4312     }
4313     - if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX ||
4314     - (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
4315     - !capable(CAP_SYS_RESOURCE))) {
4316     +
4317     + if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
4318     + !capable(CAP_SYS_RESOURCE)) {
4319     error = -ENOSPC;
4320     goto out_unlock;
4321     }
4322     diff --git a/kernel/cgroup.c b/kernel/cgroup.c
4323     index b6fd78344c53..c4f8bc79d075 100644
4324     --- a/kernel/cgroup.c
4325     +++ b/kernel/cgroup.c
4326     @@ -1612,10 +1612,10 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
4327     mutex_lock(&cgroup_mutex);
4328     mutex_lock(&cgroup_root_mutex);
4329    
4330     - root_cgrp->id = idr_alloc(&root->cgroup_idr, root_cgrp,
4331     - 0, 1, GFP_KERNEL);
4332     - if (root_cgrp->id < 0)
4333     + ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL);
4334     + if (ret < 0)
4335     goto unlock_drop;
4336     + root_cgrp->id = ret;
4337    
4338     /* Check for name clashes with existing mounts */
4339     ret = -EBUSY;
4340     @@ -2877,10 +2877,7 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add)
4341     */
4342     update_before = cgroup_serial_nr_next;
4343    
4344     - mutex_unlock(&cgroup_mutex);
4345     -
4346     /* add/rm files for all cgroups created before */
4347     - rcu_read_lock();
4348     css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
4349     struct cgroup *cgrp = css->cgroup;
4350    
4351     @@ -2889,23 +2886,19 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add)
4352    
4353     inode = cgrp->dentry->d_inode;
4354     dget(cgrp->dentry);
4355     - rcu_read_unlock();
4356     -
4357     dput(prev);
4358     prev = cgrp->dentry;
4359    
4360     + mutex_unlock(&cgroup_mutex);
4361     mutex_lock(&inode->i_mutex);
4362     mutex_lock(&cgroup_mutex);
4363     if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp))
4364     ret = cgroup_addrm_files(cgrp, cfts, is_add);
4365     - mutex_unlock(&cgroup_mutex);
4366     mutex_unlock(&inode->i_mutex);
4367     -
4368     - rcu_read_lock();
4369     if (ret)
4370     break;
4371     }
4372     - rcu_read_unlock();
4373     + mutex_unlock(&cgroup_mutex);
4374     dput(prev);
4375     deactivate_super(sb);
4376     return ret;
4377     @@ -3024,9 +3017,14 @@ static void cgroup_enable_task_cg_lists(void)
4378     * We should check if the process is exiting, otherwise
4379     * it will race with cgroup_exit() in that the list
4380     * entry won't be deleted though the process has exited.
4381     + * Do it while holding siglock so that we don't end up
4382     + * racing against cgroup_exit().
4383     */
4384     + spin_lock_irq(&p->sighand->siglock);
4385     if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
4386     list_add(&p->cg_list, &task_css_set(p)->tasks);
4387     + spin_unlock_irq(&p->sighand->siglock);
4388     +
4389     task_unlock(p);
4390     } while_each_thread(g, p);
4391     read_unlock(&tasklist_lock);
4392     @@ -4395,7 +4393,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
4393     struct cgroup *cgrp;
4394     struct cgroup_name *name;
4395     struct cgroupfs_root *root = parent->root;
4396     - int err = 0;
4397     + int err;
4398     struct cgroup_subsys *ss;
4399     struct super_block *sb = root->sb;
4400    
4401     @@ -4405,8 +4403,10 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
4402     return -ENOMEM;
4403    
4404     name = cgroup_alloc_name(dentry);
4405     - if (!name)
4406     + if (!name) {
4407     + err = -ENOMEM;
4408     goto err_free_cgrp;
4409     + }
4410     rcu_assign_pointer(cgrp->name, name);
4411    
4412     /*
4413     @@ -4414,8 +4414,10 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
4414     * a half-baked cgroup.
4415     */
4416     cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
4417     - if (cgrp->id < 0)
4418     + if (cgrp->id < 0) {
4419     + err = -ENOMEM;
4420     goto err_free_name;
4421     + }
4422    
4423     /*
4424     * Only live parents can have children. Note that the liveliness
4425     diff --git a/kernel/events/core.c b/kernel/events/core.c
4426     index 953c14348375..fea4f6cf7e90 100644
4427     --- a/kernel/events/core.c
4428     +++ b/kernel/events/core.c
4429     @@ -193,7 +193,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
4430     void __user *buffer, size_t *lenp,
4431     loff_t *ppos)
4432     {
4433     - int ret = proc_dointvec(table, write, buffer, lenp, ppos);
4434     + int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
4435    
4436     if (ret || !write)
4437     return ret;
4438     @@ -7788,14 +7788,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
4439     static void __perf_event_exit_context(void *__info)
4440     {
4441     struct perf_event_context *ctx = __info;
4442     - struct perf_event *event, *tmp;
4443     + struct perf_event *event;
4444    
4445     perf_pmu_rotate_stop(ctx->pmu);
4446    
4447     - list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
4448     - __perf_remove_from_context(event);
4449     - list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
4450     + rcu_read_lock();
4451     + list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
4452     __perf_remove_from_context(event);
4453     + rcu_read_unlock();
4454     }
4455    
4456     static void perf_event_exit_cpu_context(int cpu)
4457     @@ -7819,11 +7819,11 @@ static void perf_event_exit_cpu(int cpu)
4458     {
4459     struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
4460    
4461     + perf_event_exit_cpu_context(cpu);
4462     +
4463     mutex_lock(&swhash->hlist_mutex);
4464     swevent_hlist_release(swhash);
4465     mutex_unlock(&swhash->hlist_mutex);
4466     -
4467     - perf_event_exit_cpu_context(cpu);
4468     }
4469     #else
4470     static inline void perf_event_exit_cpu(int cpu) { }
4471     diff --git a/kernel/sysctl.c b/kernel/sysctl.c
4472     index b2f06f3c6a3f..2a9db916c3f5 100644
4473     --- a/kernel/sysctl.c
4474     +++ b/kernel/sysctl.c
4475     @@ -1049,6 +1049,7 @@ static struct ctl_table kern_table[] = {
4476     .maxlen = sizeof(sysctl_perf_event_sample_rate),
4477     .mode = 0644,
4478     .proc_handler = perf_proc_update_handler,
4479     + .extra1 = &one,
4480     },
4481     {
4482     .procname = "perf_cpu_time_max_percent",
4483     diff --git a/kernel/workqueue.c b/kernel/workqueue.c
4484     index 93c265286e8c..60fee69c37be 100644
4485     --- a/kernel/workqueue.c
4486     +++ b/kernel/workqueue.c
4487     @@ -1843,6 +1843,12 @@ static void destroy_worker(struct worker *worker)
4488     if (worker->flags & WORKER_IDLE)
4489     pool->nr_idle--;
4490    
4491     + /*
4492     + * Once WORKER_DIE is set, the kworker may destroy itself at any
4493     + * point. Pin to ensure the task stays until we're done with it.
4494     + */
4495     + get_task_struct(worker->task);
4496     +
4497     list_del_init(&worker->entry);
4498     worker->flags |= WORKER_DIE;
4499    
4500     @@ -1851,6 +1857,7 @@ static void destroy_worker(struct worker *worker)
4501     spin_unlock_irq(&pool->lock);
4502    
4503     kthread_stop(worker->task);
4504     + put_task_struct(worker->task);
4505     kfree(worker);
4506    
4507     spin_lock_irq(&pool->lock);
4508     diff --git a/mm/huge_memory.c b/mm/huge_memory.c
4509     index 292a266e0d42..dd7789ce7572 100644
4510     --- a/mm/huge_memory.c
4511     +++ b/mm/huge_memory.c
4512     @@ -1160,8 +1160,10 @@ alloc:
4513     } else {
4514     ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
4515     pmd, orig_pmd, page, haddr);
4516     - if (ret & VM_FAULT_OOM)
4517     + if (ret & VM_FAULT_OOM) {
4518     split_huge_page(page);
4519     + ret |= VM_FAULT_FALLBACK;
4520     + }
4521     put_page(page);
4522     }
4523     count_vm_event(THP_FAULT_FALLBACK);
4524     @@ -1173,9 +1175,10 @@ alloc:
4525     if (page) {
4526     split_huge_page(page);
4527     put_page(page);
4528     - }
4529     + } else
4530     + split_huge_page_pmd(vma, address, pmd);
4531     + ret |= VM_FAULT_FALLBACK;
4532     count_vm_event(THP_FAULT_FALLBACK);
4533     - ret |= VM_FAULT_OOM;
4534     goto out;
4535     }
4536    
4537     diff --git a/mm/memory.c b/mm/memory.c
4538     index d176154c243f..22e67a2c955b 100644
4539     --- a/mm/memory.c
4540     +++ b/mm/memory.c
4541     @@ -3756,7 +3756,6 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
4542     if (unlikely(is_vm_hugetlb_page(vma)))
4543     return hugetlb_fault(mm, vma, address, flags);
4544    
4545     -retry:
4546     pgd = pgd_offset(mm, address);
4547     pud = pud_alloc(mm, pgd, address);
4548     if (!pud)
4549     @@ -3794,20 +3793,13 @@ retry:
4550     if (dirty && !pmd_write(orig_pmd)) {
4551     ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
4552     orig_pmd);
4553     - /*
4554     - * If COW results in an oom, the huge pmd will
4555     - * have been split, so retry the fault on the
4556     - * pte for a smaller charge.
4557     - */
4558     - if (unlikely(ret & VM_FAULT_OOM))
4559     - goto retry;
4560     - return ret;
4561     + if (!(ret & VM_FAULT_FALLBACK))
4562     + return ret;
4563     } else {
4564     huge_pmd_set_accessed(mm, vma, address, pmd,
4565     orig_pmd, dirty);
4566     + return 0;
4567     }
4568     -
4569     - return 0;
4570     }
4571     }
4572    
4573     diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
4574     index 990afab2be1b..c76a4388a5d7 100644
4575     --- a/net/9p/trans_virtio.c
4576     +++ b/net/9p/trans_virtio.c
4577     @@ -340,7 +340,10 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
4578     int count = nr_pages;
4579     while (nr_pages) {
4580     s = rest_of_page(data);
4581     - pages[index++] = kmap_to_page(data);
4582     + if (is_vmalloc_addr(data))
4583     + pages[index++] = vmalloc_to_page(data);
4584     + else
4585     + pages[index++] = kmap_to_page(data);
4586     data += s;
4587     nr_pages--;
4588     }
4589     diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
4590     index ca04163635da..0d3400167ef3 100644
4591     --- a/net/bridge/br_device.c
4592     +++ b/net/bridge/br_device.c
4593     @@ -219,6 +219,33 @@ static void br_netpoll_cleanup(struct net_device *dev)
4594     br_netpoll_disable(p);
4595     }
4596    
4597     +static int __br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
4598     +{
4599     + struct netpoll *np;
4600     + int err;
4601     +
4602     + np = kzalloc(sizeof(*p->np), gfp);
4603     + if (!np)
4604     + return -ENOMEM;
4605     +
4606     + err = __netpoll_setup(np, p->dev, gfp);
4607     + if (err) {
4608     + kfree(np);
4609     + return err;
4610     + }
4611     +
4612     + p->np = np;
4613     + return err;
4614     +}
4615     +
4616     +int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
4617     +{
4618     + if (!p->br->dev->npinfo)
4619     + return 0;
4620     +
4621     + return __br_netpoll_enable(p, gfp);
4622     +}
4623     +
4624     static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni,
4625     gfp_t gfp)
4626     {
4627     @@ -229,7 +256,7 @@ static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni,
4628     list_for_each_entry(p, &br->port_list, list) {
4629     if (!p->dev)
4630     continue;
4631     - err = br_netpoll_enable(p, gfp);
4632     + err = __br_netpoll_enable(p, gfp);
4633     if (err)
4634     goto fail;
4635     }
4636     @@ -242,28 +269,6 @@ fail:
4637     goto out;
4638     }
4639    
4640     -int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
4641     -{
4642     - struct netpoll *np;
4643     - int err;
4644     -
4645     - if (!p->br->dev->npinfo)
4646     - return 0;
4647     -
4648     - np = kzalloc(sizeof(*p->np), gfp);
4649     - if (!np)
4650     - return -ENOMEM;
4651     -
4652     - err = __netpoll_setup(np, p->dev, gfp);
4653     - if (err) {
4654     - kfree(np);
4655     - return err;
4656     - }
4657     -
4658     - p->np = np;
4659     - return err;
4660     -}
4661     -
4662     void br_netpoll_disable(struct net_bridge_port *p)
4663     {
4664     struct netpoll *np = p->np;
4665     diff --git a/net/can/af_can.c b/net/can/af_can.c
4666     index 3ab8dd2e1282..ae3f07eb6cd7 100644
4667     --- a/net/can/af_can.c
4668     +++ b/net/can/af_can.c
4669     @@ -57,6 +57,7 @@
4670     #include <linux/skbuff.h>
4671     #include <linux/can.h>
4672     #include <linux/can/core.h>
4673     +#include <linux/can/skb.h>
4674     #include <linux/ratelimit.h>
4675     #include <net/net_namespace.h>
4676     #include <net/sock.h>
4677     @@ -290,7 +291,7 @@ int can_send(struct sk_buff *skb, int loop)
4678     return -ENOMEM;
4679     }
4680    
4681     - newskb->sk = skb->sk;
4682     + can_skb_set_owner(newskb, skb->sk);
4683     newskb->ip_summed = CHECKSUM_UNNECESSARY;
4684     newskb->pkt_type = PACKET_BROADCAST;
4685     }
4686     diff --git a/net/can/bcm.c b/net/can/bcm.c
4687     index 46f20bfafc0e..b57452a65fb9 100644
4688     --- a/net/can/bcm.c
4689     +++ b/net/can/bcm.c
4690     @@ -268,7 +268,7 @@ static void bcm_can_tx(struct bcm_op *op)
4691    
4692     /* send with loopback */
4693     skb->dev = dev;
4694     - skb->sk = op->sk;
4695     + can_skb_set_owner(skb, op->sk);
4696     can_send(skb, 1);
4697    
4698     /* update statistics */
4699     @@ -1223,7 +1223,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
4700    
4701     can_skb_prv(skb)->ifindex = dev->ifindex;
4702     skb->dev = dev;
4703     - skb->sk = sk;
4704     + can_skb_set_owner(skb, sk);
4705     err = can_send(skb, 1); /* send with loopback */
4706     dev_put(dev);
4707    
4708     diff --git a/net/core/dev.c b/net/core/dev.c
4709     index 3d1387461279..b32797590b40 100644
4710     --- a/net/core/dev.c
4711     +++ b/net/core/dev.c
4712     @@ -2398,7 +2398,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
4713     * 2. No high memory really exists on this machine.
4714     */
4715    
4716     -static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
4717     +static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
4718     {
4719     #ifdef CONFIG_HIGHMEM
4720     int i;
4721     @@ -2478,34 +2478,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
4722     }
4723    
4724     static netdev_features_t harmonize_features(struct sk_buff *skb,
4725     - netdev_features_t features)
4726     + const struct net_device *dev,
4727     + netdev_features_t features)
4728     {
4729     if (skb->ip_summed != CHECKSUM_NONE &&
4730     !can_checksum_protocol(features, skb_network_protocol(skb))) {
4731     features &= ~NETIF_F_ALL_CSUM;
4732     - } else if (illegal_highdma(skb->dev, skb)) {
4733     + } else if (illegal_highdma(dev, skb)) {
4734     features &= ~NETIF_F_SG;
4735     }
4736    
4737     return features;
4738     }
4739    
4740     -netdev_features_t netif_skb_features(struct sk_buff *skb)
4741     +netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
4742     + const struct net_device *dev)
4743     {
4744     __be16 protocol = skb->protocol;
4745     - netdev_features_t features = skb->dev->features;
4746     + netdev_features_t features = dev->features;
4747    
4748     - if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
4749     + if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
4750     features &= ~NETIF_F_GSO_MASK;
4751    
4752     if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
4753     struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
4754     protocol = veh->h_vlan_encapsulated_proto;
4755     } else if (!vlan_tx_tag_present(skb)) {
4756     - return harmonize_features(skb, features);
4757     + return harmonize_features(skb, dev, features);
4758     }
4759    
4760     - features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
4761     + features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
4762     NETIF_F_HW_VLAN_STAG_TX);
4763    
4764     if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
4765     @@ -2513,9 +2515,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
4766     NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
4767     NETIF_F_HW_VLAN_STAG_TX;
4768    
4769     - return harmonize_features(skb, features);
4770     + return harmonize_features(skb, dev, features);
4771     }
4772     -EXPORT_SYMBOL(netif_skb_features);
4773     +EXPORT_SYMBOL(netif_skb_dev_features);
4774    
4775     /*
4776     * Returns true if either:
4777     diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
4778     index f409e0bd35c0..185c341fafbd 100644
4779     --- a/net/core/fib_rules.c
4780     +++ b/net/core/fib_rules.c
4781     @@ -745,6 +745,13 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event,
4782     attach_rules(&ops->rules_list, dev);
4783     break;
4784    
4785     + case NETDEV_CHANGENAME:
4786     + list_for_each_entry(ops, &net->rules_ops, list) {
4787     + detach_rules(&ops->rules_list, dev);
4788     + attach_rules(&ops->rules_list, dev);
4789     + }
4790     + break;
4791     +
4792     case NETDEV_UNREGISTER:
4793     list_for_each_entry(ops, &net->rules_ops, list)
4794     detach_rules(&ops->rules_list, dev);
4795     diff --git a/net/core/netpoll.c b/net/core/netpoll.c
4796     index 0c1482c6ff98..462cdc97fad8 100644
4797     --- a/net/core/netpoll.c
4798     +++ b/net/core/netpoll.c
4799     @@ -943,6 +943,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
4800     {
4801     char *cur=opt, *delim;
4802     int ipv6;
4803     + bool ipversion_set = false;
4804    
4805     if (*cur != '@') {
4806     if ((delim = strchr(cur, '@')) == NULL)
4807     @@ -955,6 +956,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
4808     cur++;
4809    
4810     if (*cur != '/') {
4811     + ipversion_set = true;
4812     if ((delim = strchr(cur, '/')) == NULL)
4813     goto parse_failed;
4814     *delim = 0;
4815     @@ -997,7 +999,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
4816     ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
4817     if (ipv6 < 0)
4818     goto parse_failed;
4819     - else if (np->ipv6 != (bool)ipv6)
4820     + else if (ipversion_set && np->ipv6 != (bool)ipv6)
4821     goto parse_failed;
4822     else
4823     np->ipv6 = (bool)ipv6;
4824     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
4825     index 2c7baa809913..21571dc4f2df 100644
4826     --- a/net/core/skbuff.c
4827     +++ b/net/core/skbuff.c
4828     @@ -47,6 +47,8 @@
4829     #include <linux/in.h>
4830     #include <linux/inet.h>
4831     #include <linux/slab.h>
4832     +#include <linux/tcp.h>
4833     +#include <linux/udp.h>
4834     #include <linux/netdevice.h>
4835     #ifdef CONFIG_NET_CLS_ACT
4836     #include <net/pkt_sched.h>
4837     @@ -3519,3 +3521,26 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
4838     nf_reset_trace(skb);
4839     }
4840     EXPORT_SYMBOL_GPL(skb_scrub_packet);
4841     +
4842     +/**
4843     + * skb_gso_transport_seglen - Return length of individual segments of a gso packet
4844     + *
4845     + * @skb: GSO skb
4846     + *
4847     + * skb_gso_transport_seglen is used to determine the real size of the
4848     + * individual segments, including Layer4 headers (TCP/UDP).
4849     + *
4850     + * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
4851     + */
4852     +unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4853     +{
4854     + const struct skb_shared_info *shinfo = skb_shinfo(skb);
4855     + unsigned int hdr_len;
4856     +
4857     + if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
4858     + hdr_len = tcp_hdrlen(skb);
4859     + else
4860     + hdr_len = sizeof(struct udphdr);
4861     + return hdr_len + shinfo->gso_size;
4862     +}
4863     +EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
4864     diff --git a/net/core/sock.c b/net/core/sock.c
4865     index 5cec994ee2f3..831a0d0af49f 100644
4866     --- a/net/core/sock.c
4867     +++ b/net/core/sock.c
4868     @@ -1795,7 +1795,9 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
4869     while (order) {
4870     if (npages >= 1 << order) {
4871     page = alloc_pages(sk->sk_allocation |
4872     - __GFP_COMP | __GFP_NOWARN,
4873     + __GFP_COMP |
4874     + __GFP_NOWARN |
4875     + __GFP_NORETRY,
4876     order);
4877     if (page)
4878     goto fill_page;
4879     @@ -1857,7 +1859,7 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag)
4880     gfp_t gfp = sk->sk_allocation;
4881    
4882     if (order)
4883     - gfp |= __GFP_COMP | __GFP_NOWARN;
4884     + gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
4885     pfrag->page = alloc_pages(gfp, order);
4886     if (likely(pfrag->page)) {
4887     pfrag->offset = 0;
4888     diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
4889     index 008f33703a33..ceabe6f13216 100644
4890     --- a/net/ieee802154/6lowpan.c
4891     +++ b/net/ieee802154/6lowpan.c
4892     @@ -1261,7 +1261,27 @@ static struct header_ops lowpan_header_ops = {
4893     .create = lowpan_header_create,
4894     };
4895    
4896     +static struct lock_class_key lowpan_tx_busylock;
4897     +static struct lock_class_key lowpan_netdev_xmit_lock_key;
4898     +
4899     +static void lowpan_set_lockdep_class_one(struct net_device *dev,
4900     + struct netdev_queue *txq,
4901     + void *_unused)
4902     +{
4903     + lockdep_set_class(&txq->_xmit_lock,
4904     + &lowpan_netdev_xmit_lock_key);
4905     +}
4906     +
4907     +
4908     +static int lowpan_dev_init(struct net_device *dev)
4909     +{
4910     + netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
4911     + dev->qdisc_tx_busylock = &lowpan_tx_busylock;
4912     + return 0;
4913     +}
4914     +
4915     static const struct net_device_ops lowpan_netdev_ops = {
4916     + .ndo_init = lowpan_dev_init,
4917     .ndo_start_xmit = lowpan_xmit,
4918     .ndo_set_mac_address = lowpan_set_address,
4919     };
4920     diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
4921     index a1b5bcbd04ae..f4b34d8f92fe 100644
4922     --- a/net/ipv4/devinet.c
4923     +++ b/net/ipv4/devinet.c
4924     @@ -1435,7 +1435,8 @@ static size_t inet_nlmsg_size(void)
4925     + nla_total_size(4) /* IFA_ADDRESS */
4926     + nla_total_size(4) /* IFA_LOCAL */
4927     + nla_total_size(4) /* IFA_BROADCAST */
4928     - + nla_total_size(IFNAMSIZ); /* IFA_LABEL */
4929     + + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
4930     + + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
4931     }
4932    
4933     static inline u32 cstamp_delta(unsigned long cstamp)
4934     diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
4935     index 694de3b7aebf..98d7e53d2afd 100644
4936     --- a/net/ipv4/ip_forward.c
4937     +++ b/net/ipv4/ip_forward.c
4938     @@ -39,6 +39,71 @@
4939     #include <net/route.h>
4940     #include <net/xfrm.h>
4941    
4942     +static bool ip_may_fragment(const struct sk_buff *skb)
4943     +{
4944     + return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
4945     + !skb->local_df;
4946     +}
4947     +
4948     +static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
4949     +{
4950     + if (skb->len <= mtu || skb->local_df)
4951     + return false;
4952     +
4953     + if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
4954     + return false;
4955     +
4956     + return true;
4957     +}
4958     +
4959     +static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb)
4960     +{
4961     + unsigned int mtu;
4962     +
4963     + if (skb->local_df || !skb_is_gso(skb))
4964     + return false;
4965     +
4966     + mtu = dst_mtu(skb_dst(skb));
4967     +
4968     + /* if seglen > mtu, do software segmentation for IP fragmentation on
4969     + * output. DF bit cannot be set since ip_forward would have sent
4970     + * icmp error.
4971     + */
4972     + return skb_gso_network_seglen(skb) > mtu;
4973     +}
4974     +
4975     +/* called if GSO skb needs to be fragmented on forward */
4976     +static int ip_forward_finish_gso(struct sk_buff *skb)
4977     +{
4978     + struct dst_entry *dst = skb_dst(skb);
4979     + netdev_features_t features;
4980     + struct sk_buff *segs;
4981     + int ret = 0;
4982     +
4983     + features = netif_skb_dev_features(skb, dst->dev);
4984     + segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
4985     + if (IS_ERR(segs)) {
4986     + kfree_skb(skb);
4987     + return -ENOMEM;
4988     + }
4989     +
4990     + consume_skb(skb);
4991     +
4992     + do {
4993     + struct sk_buff *nskb = segs->next;
4994     + int err;
4995     +
4996     + segs->next = NULL;
4997     + err = dst_output(segs);
4998     +
4999     + if (err && ret == 0)
5000     + ret = err;
5001     + segs = nskb;
5002     + } while (segs);
5003     +
5004     + return ret;
5005     +}
5006     +
5007     static int ip_forward_finish(struct sk_buff *skb)
5008     {
5009     struct ip_options *opt = &(IPCB(skb)->opt);
5010     @@ -49,6 +114,9 @@ static int ip_forward_finish(struct sk_buff *skb)
5011     if (unlikely(opt->optlen))
5012     ip_forward_options(skb);
5013    
5014     + if (ip_gso_exceeds_dst_mtu(skb))
5015     + return ip_forward_finish_gso(skb);
5016     +
5017     return dst_output(skb);
5018     }
5019    
5020     @@ -88,8 +156,7 @@ int ip_forward(struct sk_buff *skb)
5021     if (opt->is_strictroute && rt->rt_uses_gateway)
5022     goto sr_failed;
5023    
5024     - if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
5025     - (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) {
5026     + if (!ip_may_fragment(skb) && ip_exceeds_mtu(skb, dst_mtu(&rt->dst))) {
5027     IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS);
5028     icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
5029     htonl(dst_mtu(&rt->dst)));
5030     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
5031     index 62290b5124c8..2557b9a52373 100644
5032     --- a/net/ipv4/route.c
5033     +++ b/net/ipv4/route.c
5034     @@ -1596,6 +1596,7 @@ static int __mkroute_input(struct sk_buff *skb,
5035     rth->rt_gateway = 0;
5036     rth->rt_uses_gateway = 0;
5037     INIT_LIST_HEAD(&rth->rt_uncached);
5038     + RT_CACHE_STAT_INC(in_slow_tot);
5039    
5040     rth->dst.input = ip_forward;
5041     rth->dst.output = ip_output;
5042     @@ -1697,8 +1698,6 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
5043     if (err != 0)
5044     goto no_route;
5045    
5046     - RT_CACHE_STAT_INC(in_slow_tot);
5047     -
5048     if (res.type == RTN_BROADCAST)
5049     goto brd_input;
5050    
5051     @@ -1767,6 +1766,7 @@ local_input:
5052     rth->rt_gateway = 0;
5053     rth->rt_uses_gateway = 0;
5054     INIT_LIST_HEAD(&rth->rt_uncached);
5055     + RT_CACHE_STAT_INC(in_slow_tot);
5056     if (res.type == RTN_UNREACHABLE) {
5057     rth->dst.input= ip_error;
5058     rth->dst.error= -err;
5059     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
5060     index e912634b2f05..fb8227a8c004 100644
5061     --- a/net/ipv4/tcp_output.c
5062     +++ b/net/ipv4/tcp_output.c
5063     @@ -696,7 +696,8 @@ static void tcp_tsq_handler(struct sock *sk)
5064     if ((1 << sk->sk_state) &
5065     (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
5066     TCPF_CLOSE_WAIT | TCPF_LAST_ACK))
5067     - tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC);
5068     + tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle,
5069     + 0, GFP_ATOMIC);
5070     }
5071     /*
5072     * One tasklest per cpu tries to send more skbs.
5073     @@ -1884,7 +1885,15 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
5074    
5075     if (atomic_read(&sk->sk_wmem_alloc) > limit) {
5076     set_bit(TSQ_THROTTLED, &tp->tsq_flags);
5077     - break;
5078     + /* It is possible TX completion already happened
5079     + * before we set TSQ_THROTTLED, so we must
5080     + * test again the condition.
5081     + * We abuse smp_mb__after_clear_bit() because
5082     + * there is no smp_mb__after_set_bit() yet
5083     + */
5084     + smp_mb__after_clear_bit();
5085     + if (atomic_read(&sk->sk_wmem_alloc) > limit)
5086     + break;
5087     }
5088    
5089     limit = mss_now;
5090     diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
5091     index b6fa35e7425c..68fd4918315c 100644
5092     --- a/net/ipv6/ip6_output.c
5093     +++ b/net/ipv6/ip6_output.c
5094     @@ -321,6 +321,20 @@ static inline int ip6_forward_finish(struct sk_buff *skb)
5095     return dst_output(skb);
5096     }
5097    
5098     +static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
5099     +{
5100     + if (skb->len <= mtu || skb->local_df)
5101     + return false;
5102     +
5103     + if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
5104     + return true;
5105     +
5106     + if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
5107     + return false;
5108     +
5109     + return true;
5110     +}
5111     +
5112     int ip6_forward(struct sk_buff *skb)
5113     {
5114     struct dst_entry *dst = skb_dst(skb);
5115     @@ -443,8 +457,7 @@ int ip6_forward(struct sk_buff *skb)
5116     if (mtu < IPV6_MIN_MTU)
5117     mtu = IPV6_MIN_MTU;
5118    
5119     - if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) ||
5120     - (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) {
5121     + if (ip6_pkt_too_big(skb, mtu)) {
5122     /* Again, force OUTPUT device used as source address */
5123     skb->dev = dst->dev;
5124     icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
5125     diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
5126     index 6ddda282f9c7..fecd35af1935 100644
5127     --- a/net/sched/sch_tbf.c
5128     +++ b/net/sched/sch_tbf.c
5129     @@ -21,7 +21,6 @@
5130     #include <net/netlink.h>
5131     #include <net/sch_generic.h>
5132     #include <net/pkt_sched.h>
5133     -#include <net/tcp.h>
5134    
5135    
5136     /* Simple Token Bucket Filter.
5137     @@ -122,16 +121,10 @@ struct tbf_sched_data {
5138     * Return length of individual segments of a gso packet,
5139     * including all headers (MAC, IP, TCP/UDP)
5140     */
5141     -static unsigned int skb_gso_seglen(const struct sk_buff *skb)
5142     +static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
5143     {
5144     unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
5145     - const struct skb_shared_info *shinfo = skb_shinfo(skb);
5146     -
5147     - if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
5148     - hdr_len += tcp_hdrlen(skb);
5149     - else
5150     - hdr_len += sizeof(struct udphdr);
5151     - return hdr_len + shinfo->gso_size;
5152     + return hdr_len + skb_gso_transport_seglen(skb);
5153     }
5154    
5155     /* GSO packet is too big, segment it so that tbf can transmit
5156     @@ -176,7 +169,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
5157     int ret;
5158    
5159     if (qdisc_pkt_len(skb) > q->max_size) {
5160     - if (skb_is_gso(skb) && skb_gso_seglen(skb) <= q->max_size)
5161     + if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size)
5162     return tbf_segment(skb, sch);
5163     return qdisc_reshape_fail(skb, sch);
5164     }
5165     diff --git a/net/sctp/socket.c b/net/sctp/socket.c
5166     index 911b71b26b0e..14c801528da8 100644
5167     --- a/net/sctp/socket.c
5168     +++ b/net/sctp/socket.c
5169     @@ -65,6 +65,7 @@
5170     #include <linux/crypto.h>
5171     #include <linux/slab.h>
5172     #include <linux/file.h>
5173     +#include <linux/compat.h>
5174    
5175     #include <net/ip.h>
5176     #include <net/icmp.h>
5177     @@ -1369,11 +1370,19 @@ static int sctp_setsockopt_connectx(struct sock* sk,
5178     /*
5179     * New (hopefully final) interface for the API.
5180     * We use the sctp_getaddrs_old structure so that use-space library
5181     - * can avoid any unnecessary allocations. The only defferent part
5182     + * can avoid any unnecessary allocations. The only different part
5183     * is that we store the actual length of the address buffer into the
5184     - * addrs_num structure member. That way we can re-use the existing
5185     + * addrs_num structure member. That way we can re-use the existing
5186     * code.
5187     */
5188     +#ifdef CONFIG_COMPAT
5189     +struct compat_sctp_getaddrs_old {
5190     + sctp_assoc_t assoc_id;
5191     + s32 addr_num;
5192     + compat_uptr_t addrs; /* struct sockaddr * */
5193     +};
5194     +#endif
5195     +
5196     static int sctp_getsockopt_connectx3(struct sock* sk, int len,
5197     char __user *optval,
5198     int __user *optlen)
5199     @@ -1382,16 +1391,30 @@ static int sctp_getsockopt_connectx3(struct sock* sk, int len,
5200     sctp_assoc_t assoc_id = 0;
5201     int err = 0;
5202    
5203     - if (len < sizeof(param))
5204     - return -EINVAL;
5205     +#ifdef CONFIG_COMPAT
5206     + if (is_compat_task()) {
5207     + struct compat_sctp_getaddrs_old param32;
5208    
5209     - if (copy_from_user(&param, optval, sizeof(param)))
5210     - return -EFAULT;
5211     + if (len < sizeof(param32))
5212     + return -EINVAL;
5213     + if (copy_from_user(&param32, optval, sizeof(param32)))
5214     + return -EFAULT;
5215    
5216     - err = __sctp_setsockopt_connectx(sk,
5217     - (struct sockaddr __user *)param.addrs,
5218     - param.addr_num, &assoc_id);
5219     + param.assoc_id = param32.assoc_id;
5220     + param.addr_num = param32.addr_num;
5221     + param.addrs = compat_ptr(param32.addrs);
5222     + } else
5223     +#endif
5224     + {
5225     + if (len < sizeof(param))
5226     + return -EINVAL;
5227     + if (copy_from_user(&param, optval, sizeof(param)))
5228     + return -EFAULT;
5229     + }
5230    
5231     + err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
5232     + param.addrs, param.addr_num,
5233     + &assoc_id);
5234     if (err == 0 || err == -EINPROGRESS) {
5235     if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
5236     return -EFAULT;
5237     diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
5238     index cc24323d3045..a7f9821d1760 100644
5239     --- a/net/sunrpc/auth_gss/auth_gss.c
5240     +++ b/net/sunrpc/auth_gss/auth_gss.c
5241     @@ -108,6 +108,7 @@ struct gss_auth {
5242     static DEFINE_SPINLOCK(pipe_version_lock);
5243     static struct rpc_wait_queue pipe_version_rpc_waitqueue;
5244     static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
5245     +static void gss_put_auth(struct gss_auth *gss_auth);
5246    
5247     static void gss_free_ctx(struct gss_cl_ctx *);
5248     static const struct rpc_pipe_ops gss_upcall_ops_v0;
5249     @@ -320,6 +321,7 @@ gss_release_msg(struct gss_upcall_msg *gss_msg)
5250     if (gss_msg->ctx != NULL)
5251     gss_put_ctx(gss_msg->ctx);
5252     rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue);
5253     + gss_put_auth(gss_msg->auth);
5254     kfree(gss_msg);
5255     }
5256    
5257     @@ -486,6 +488,7 @@ gss_alloc_msg(struct gss_auth *gss_auth,
5258     default:
5259     gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name);
5260     };
5261     + kref_get(&gss_auth->kref);
5262     return gss_msg;
5263     }
5264    
5265     @@ -1053,6 +1056,12 @@ gss_free_callback(struct kref *kref)
5266     }
5267    
5268     static void
5269     +gss_put_auth(struct gss_auth *gss_auth)
5270     +{
5271     + kref_put(&gss_auth->kref, gss_free_callback);
5272     +}
5273     +
5274     +static void
5275     gss_destroy(struct rpc_auth *auth)
5276     {
5277     struct gss_auth *gss_auth = container_of(auth,
5278     @@ -1073,7 +1082,7 @@ gss_destroy(struct rpc_auth *auth)
5279     gss_auth->gss_pipe[1] = NULL;
5280     rpcauth_destroy_credcache(auth);
5281    
5282     - kref_put(&gss_auth->kref, gss_free_callback);
5283     + gss_put_auth(gss_auth);
5284     }
5285    
5286     /*
5287     @@ -1244,7 +1253,7 @@ gss_destroy_nullcred(struct rpc_cred *cred)
5288     call_rcu(&cred->cr_rcu, gss_free_cred_callback);
5289     if (ctx)
5290     gss_put_ctx(ctx);
5291     - kref_put(&gss_auth->kref, gss_free_callback);
5292     + gss_put_auth(gss_auth);
5293     }
5294    
5295     static void
5296     diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
5297     index b752e1de2e7d..83a1daa642bb 100644
5298     --- a/net/sunrpc/xprtsock.c
5299     +++ b/net/sunrpc/xprtsock.c
5300     @@ -504,6 +504,7 @@ static int xs_nospace(struct rpc_task *task)
5301     struct rpc_rqst *req = task->tk_rqstp;
5302     struct rpc_xprt *xprt = req->rq_xprt;
5303     struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
5304     + struct sock *sk = transport->inet;
5305     int ret = -EAGAIN;
5306    
5307     dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
5308     @@ -521,7 +522,7 @@ static int xs_nospace(struct rpc_task *task)
5309     * window size
5310     */
5311     set_bit(SOCK_NOSPACE, &transport->sock->flags);
5312     - transport->inet->sk_write_pending++;
5313     + sk->sk_write_pending++;
5314     /* ...and wait for more buffer space */
5315     xprt_wait_for_buffer_space(task, xs_nospace_callback);
5316     }
5317     @@ -531,6 +532,9 @@ static int xs_nospace(struct rpc_task *task)
5318     }
5319    
5320     spin_unlock_bh(&xprt->transport_lock);
5321     +
5322     + /* Race breaker in case memory is freed before above code is called */
5323     + sk->sk_write_space(sk);
5324     return ret;
5325     }
5326    
5327     diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
5328     index 7e5bceddc36f..4d35eb75f129 100644
5329     --- a/security/selinux/ss/policydb.c
5330     +++ b/security/selinux/ss/policydb.c
5331     @@ -3261,10 +3261,10 @@ static int filename_write_helper(void *key, void *data, void *ptr)
5332     if (rc)
5333     return rc;
5334    
5335     - buf[0] = ft->stype;
5336     - buf[1] = ft->ttype;
5337     - buf[2] = ft->tclass;
5338     - buf[3] = otype->otype;
5339     + buf[0] = cpu_to_le32(ft->stype);
5340     + buf[1] = cpu_to_le32(ft->ttype);
5341     + buf[2] = cpu_to_le32(ft->tclass);
5342     + buf[3] = cpu_to_le32(otype->otype);
5343    
5344     rc = put_entry(buf, sizeof(u32), 4, fp);
5345     if (rc)
5346     diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
5347     index 6e9876f27d95..a91ad743fca4 100644
5348     --- a/sound/pci/hda/patch_ca0132.c
5349     +++ b/sound/pci/hda/patch_ca0132.c
5350     @@ -2662,60 +2662,6 @@ static bool dspload_wait_loaded(struct hda_codec *codec)
5351     }
5352    
5353     /*
5354     - * PCM stuffs
5355     - */
5356     -static void ca0132_setup_stream(struct hda_codec *codec, hda_nid_t nid,
5357     - u32 stream_tag,
5358     - int channel_id, int format)
5359     -{
5360     - unsigned int oldval, newval;
5361     -
5362     - if (!nid)
5363     - return;
5364     -
5365     - snd_printdd(
5366     - "ca0132_setup_stream: NID=0x%x, stream=0x%x, "
5367     - "channel=%d, format=0x%x\n",
5368     - nid, stream_tag, channel_id, format);
5369     -
5370     - /* update the format-id if changed */
5371     - oldval = snd_hda_codec_read(codec, nid, 0,
5372     - AC_VERB_GET_STREAM_FORMAT,
5373     - 0);
5374     - if (oldval != format) {
5375     - msleep(20);
5376     - snd_hda_codec_write(codec, nid, 0,
5377     - AC_VERB_SET_STREAM_FORMAT,
5378     - format);
5379     - }
5380     -
5381     - oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
5382     - newval = (stream_tag << 4) | channel_id;
5383     - if (oldval != newval) {
5384     - snd_hda_codec_write(codec, nid, 0,
5385     - AC_VERB_SET_CHANNEL_STREAMID,
5386     - newval);
5387     - }
5388     -}
5389     -
5390     -static void ca0132_cleanup_stream(struct hda_codec *codec, hda_nid_t nid)
5391     -{
5392     - unsigned int val;
5393     -
5394     - if (!nid)
5395     - return;
5396     -
5397     - snd_printdd(KERN_INFO "ca0132_cleanup_stream: NID=0x%x\n", nid);
5398     -
5399     - val = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
5400     - if (!val)
5401     - return;
5402     -
5403     - snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_STREAM_FORMAT, 0);
5404     - snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CHANNEL_STREAMID, 0);
5405     -}
5406     -
5407     -/*
5408     * PCM callbacks
5409     */
5410     static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
5411     @@ -2726,7 +2672,7 @@ static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
5412     {
5413     struct ca0132_spec *spec = codec->spec;
5414    
5415     - ca0132_setup_stream(codec, spec->dacs[0], stream_tag, 0, format);
5416     + snd_hda_codec_setup_stream(codec, spec->dacs[0], stream_tag, 0, format);
5417    
5418     return 0;
5419     }
5420     @@ -2745,7 +2691,7 @@ static int ca0132_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
5421     if (spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID])
5422     msleep(50);
5423    
5424     - ca0132_cleanup_stream(codec, spec->dacs[0]);
5425     + snd_hda_codec_cleanup_stream(codec, spec->dacs[0]);
5426    
5427     return 0;
5428     }
5429     @@ -2822,10 +2768,8 @@ static int ca0132_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
5430     unsigned int format,
5431     struct snd_pcm_substream *substream)
5432     {
5433     - struct ca0132_spec *spec = codec->spec;
5434     -
5435     - ca0132_setup_stream(codec, spec->adcs[substream->number],
5436     - stream_tag, 0, format);
5437     + snd_hda_codec_setup_stream(codec, hinfo->nid,
5438     + stream_tag, 0, format);
5439    
5440     return 0;
5441     }
5442     @@ -2839,7 +2783,7 @@ static int ca0132_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
5443     if (spec->dsp_state == DSP_DOWNLOADING)
5444     return 0;
5445    
5446     - ca0132_cleanup_stream(codec, hinfo->nid);
5447     + snd_hda_codec_cleanup_stream(codec, hinfo->nid);
5448     return 0;
5449     }
5450    
5451     @@ -4742,6 +4686,8 @@ static int patch_ca0132(struct hda_codec *codec)
5452     return err;
5453    
5454     codec->patch_ops = ca0132_patch_ops;
5455     + codec->pcm_format_first = 1;
5456     + codec->no_sticky_stream = 1;
5457    
5458     return 0;
5459     }
5460     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5461     index 003a7ce5791c..1be437f533a6 100644
5462     --- a/sound/pci/hda/patch_realtek.c
5463     +++ b/sound/pci/hda/patch_realtek.c
5464     @@ -3578,6 +3578,8 @@ static void alc283_fixup_chromebook(struct hda_codec *codec,
5465     case HDA_FIXUP_ACT_PRE_PROBE:
5466     alc283_chromebook_caps(codec);
5467     spec->gen.hp_automute_hook = alc283_hp_automute_hook;
5468     + break;
5469     + case HDA_FIXUP_ACT_INIT:
5470     /* MIC2-VREF control */
5471     /* Set to manual mode */
5472     val = alc_read_coef_idx(codec, 0x06);
5473     @@ -3686,6 +3688,7 @@ enum {
5474     ALC271_FIXUP_HP_GATE_MIC_JACK,
5475     ALC269_FIXUP_ACER_AC700,
5476     ALC269_FIXUP_LIMIT_INT_MIC_BOOST,
5477     + ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED,
5478     ALC269VB_FIXUP_ORDISSIMO_EVE2,
5479     ALC283_FIXUP_CHROME_BOOK,
5480     ALC282_FIXUP_ASUS_TX300,
5481     @@ -3955,6 +3958,12 @@ static const struct hda_fixup alc269_fixups[] = {
5482     .type = HDA_FIXUP_FUNC,
5483     .v.func = alc269_fixup_limit_int_mic_boost,
5484     },
5485     + [ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED] = {
5486     + .type = HDA_FIXUP_FUNC,
5487     + .v.func = alc269_fixup_limit_int_mic_boost,
5488     + .chained = true,
5489     + .chain_id = ALC269_FIXUP_HP_MUTE_LED_MIC1,
5490     + },
5491     [ALC269VB_FIXUP_ORDISSIMO_EVE2] = {
5492     .type = HDA_FIXUP_PINS,
5493     .v.pins = (const struct hda_pintbl[]) {
5494     @@ -4041,6 +4050,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5495     SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
5496     SND_PCI_QUIRK(0x103c, 0x1973, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
5497     SND_PCI_QUIRK(0x103c, 0x1983, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
5498     + SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
5499     SND_PCI_QUIRK(0x103c, 0x21ed, "HP Falco Chromebook", ALC283_FIXUP_CHROME_BOOK),
5500     SND_PCI_QUIRK_VENDOR(0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED),
5501     SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
5502     @@ -4571,6 +4581,25 @@ static void alc272_fixup_mario(struct hda_codec *codec,
5503     "hda_codec: failed to override amp caps for NID 0x2\n");
5504     }
5505    
5506     +static const struct snd_pcm_chmap_elem asus_pcm_2_1_chmaps[] = {
5507     + { .channels = 2,
5508     + .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
5509     + { .channels = 4,
5510     + .map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR,
5511     + SNDRV_CHMAP_NA, SNDRV_CHMAP_LFE } }, /* LFE only on right */
5512     + { }
5513     +};
5514     +
5515     +/* override the 2.1 chmap */
5516     +static void alc662_fixup_bass_chmap(struct hda_codec *codec,
5517     + const struct hda_fixup *fix, int action)
5518     +{
5519     + if (action == HDA_FIXUP_ACT_BUILD) {
5520     + struct alc_spec *spec = codec->spec;
5521     + spec->gen.pcm_rec[0].stream[0].chmap = asus_pcm_2_1_chmaps;
5522     + }
5523     +}
5524     +
5525     enum {
5526     ALC662_FIXUP_ASPIRE,
5527     ALC662_FIXUP_IDEAPAD,
5528     @@ -4591,6 +4620,9 @@ enum {
5529     ALC662_FIXUP_INV_DMIC,
5530     ALC668_FIXUP_DELL_MIC_NO_PRESENCE,
5531     ALC668_FIXUP_HEADSET_MODE,
5532     + ALC662_FIXUP_BASS_CHMAP,
5533     + ALC662_FIXUP_BASS_1A,
5534     + ALC662_FIXUP_BASS_1A_CHMAP,
5535     };
5536    
5537     static const struct hda_fixup alc662_fixups[] = {
5538     @@ -4765,6 +4797,25 @@ static const struct hda_fixup alc662_fixups[] = {
5539     .type = HDA_FIXUP_FUNC,
5540     .v.func = alc_fixup_headset_mode_alc668,
5541     },
5542     + [ALC662_FIXUP_BASS_CHMAP] = {
5543     + .type = HDA_FIXUP_FUNC,
5544     + .v.func = alc662_fixup_bass_chmap,
5545     + .chained = true,
5546     + .chain_id = ALC662_FIXUP_ASUS_MODE4
5547     + },
5548     + [ALC662_FIXUP_BASS_1A] = {
5549     + .type = HDA_FIXUP_PINS,
5550     + .v.pins = (const struct hda_pintbl[]) {
5551     + {0x1a, 0x80106111}, /* bass speaker */
5552     + {}
5553     + },
5554     + },
5555     + [ALC662_FIXUP_BASS_1A_CHMAP] = {
5556     + .type = HDA_FIXUP_FUNC,
5557     + .v.func = alc662_fixup_bass_chmap,
5558     + .chained = true,
5559     + .chain_id = ALC662_FIXUP_BASS_1A,
5560     + },
5561     };
5562    
5563     static const struct snd_pci_quirk alc662_fixup_tbl[] = {
5564     @@ -4777,9 +4828,15 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
5565     SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
5566     SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5567     SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5568     + SND_PCI_QUIRK(0x1028, 0x0623, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5569     + SND_PCI_QUIRK(0x1028, 0x0624, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5570     + SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5571     + SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5572     + SND_PCI_QUIRK(0x1028, 0x0628, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5573     SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
5574     - SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_ASUS_MODE4),
5575     - SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_ASUS_MODE4),
5576     + SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP),
5577     + SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP),
5578     + SND_PCI_QUIRK(0x1043, 0x1bf3, "ASUS N76VZ", ALC662_FIXUP_BASS_CHMAP),
5579     SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
5580     SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
5581     SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
5582     diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
5583     index fba0cef1c47f..6133423821d1 100644
5584     --- a/sound/pci/hda/patch_sigmatel.c
5585     +++ b/sound/pci/hda/patch_sigmatel.c
5586     @@ -83,6 +83,7 @@ enum {
5587     STAC_DELL_M6_BOTH,
5588     STAC_DELL_EQ,
5589     STAC_ALIENWARE_M17X,
5590     + STAC_92HD89XX_HP_FRONT_JACK,
5591     STAC_92HD73XX_MODELS
5592     };
5593    
5594     @@ -97,6 +98,7 @@ enum {
5595     STAC_92HD83XXX_HP_LED,
5596     STAC_92HD83XXX_HP_INV_LED,
5597     STAC_92HD83XXX_HP_MIC_LED,
5598     + STAC_HP_LED_GPIO10,
5599     STAC_92HD83XXX_HEADSET_JACK,
5600     STAC_92HD83XXX_HP,
5601     STAC_HP_ENVY_BASS,
5602     @@ -1776,6 +1778,12 @@ static const struct hda_pintbl intel_dg45id_pin_configs[] = {
5603     {}
5604     };
5605    
5606     +static const struct hda_pintbl stac92hd89xx_hp_front_jack_pin_configs[] = {
5607     + { 0x0a, 0x02214030 },
5608     + { 0x0b, 0x02A19010 },
5609     + {}
5610     +};
5611     +
5612     static void stac92hd73xx_fixup_ref(struct hda_codec *codec,
5613     const struct hda_fixup *fix, int action)
5614     {
5615     @@ -1894,6 +1902,10 @@ static const struct hda_fixup stac92hd73xx_fixups[] = {
5616     [STAC_92HD73XX_NO_JD] = {
5617     .type = HDA_FIXUP_FUNC,
5618     .v.func = stac92hd73xx_fixup_no_jd,
5619     + },
5620     + [STAC_92HD89XX_HP_FRONT_JACK] = {
5621     + .type = HDA_FIXUP_PINS,
5622     + .v.pins = stac92hd89xx_hp_front_jack_pin_configs,
5623     }
5624     };
5625    
5626     @@ -1954,6 +1966,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
5627     "Alienware M17x", STAC_ALIENWARE_M17X),
5628     SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
5629     "Alienware M17x R3", STAC_DELL_EQ),
5630     + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
5631     + "unknown HP", STAC_92HD89XX_HP_FRONT_JACK),
5632     {} /* terminator */
5633     };
5634    
5635     @@ -2095,6 +2109,17 @@ static void stac92hd83xxx_fixup_hp_mic_led(struct hda_codec *codec,
5636     spec->mic_mute_led_gpio = 0x08; /* GPIO3 */
5637     }
5638    
5639     +static void stac92hd83xxx_fixup_hp_led_gpio10(struct hda_codec *codec,
5640     + const struct hda_fixup *fix, int action)
5641     +{
5642     + struct sigmatel_spec *spec = codec->spec;
5643     +
5644     + if (action == HDA_FIXUP_ACT_PRE_PROBE) {
5645     + spec->gpio_led = 0x10; /* GPIO4 */
5646     + spec->default_polarity = 0;
5647     + }
5648     +}
5649     +
5650     static void stac92hd83xxx_fixup_headset_jack(struct hda_codec *codec,
5651     const struct hda_fixup *fix, int action)
5652     {
5653     @@ -2161,6 +2186,12 @@ static const struct hda_fixup stac92hd83xxx_fixups[] = {
5654     .chained = true,
5655     .chain_id = STAC_92HD83XXX_HP,
5656     },
5657     + [STAC_HP_LED_GPIO10] = {
5658     + .type = HDA_FIXUP_FUNC,
5659     + .v.func = stac92hd83xxx_fixup_hp_led_gpio10,
5660     + .chained = true,
5661     + .chain_id = STAC_92HD83XXX_HP,
5662     + },
5663     [STAC_92HD83XXX_HEADSET_JACK] = {
5664     .type = HDA_FIXUP_FUNC,
5665     .v.func = stac92hd83xxx_fixup_headset_jack,
5666     @@ -2232,6 +2263,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
5667     "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
5668     SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1888,
5669     "HP Envy Spectre", STAC_HP_ENVY_BASS),
5670     + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1899,
5671     + "HP Folio 13", STAC_HP_LED_GPIO10),
5672     SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x18df,
5673     "HP Folio", STAC_92HD83XXX_HP_MIC_LED),
5674     SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_HP, 0xff00, 0x1900,
5675     diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c
5676     index dc0284dc9e6f..76fdf0a598bc 100644
5677     --- a/sound/soc/codecs/da732x.c
5678     +++ b/sound/soc/codecs/da732x.c
5679     @@ -1268,11 +1268,23 @@ static struct snd_soc_dai_driver da732x_dai[] = {
5680     },
5681     };
5682    
5683     +static bool da732x_volatile(struct device *dev, unsigned int reg)
5684     +{
5685     + switch (reg) {
5686     + case DA732X_REG_HPL_DAC_OFF_CNTL:
5687     + case DA732X_REG_HPR_DAC_OFF_CNTL:
5688     + return true;
5689     + default:
5690     + return false;
5691     + }
5692     +}
5693     +
5694     static const struct regmap_config da732x_regmap = {
5695     .reg_bits = 8,
5696     .val_bits = 8,
5697    
5698     .max_register = DA732X_MAX_REG,
5699     + .volatile_reg = da732x_volatile,
5700     .reg_defaults = da732x_reg_cache,
5701     .num_reg_defaults = ARRAY_SIZE(da732x_reg_cache),
5702     .cache_type = REGCACHE_RBTREE,
5703     diff --git a/sound/soc/codecs/da9055.c b/sound/soc/codecs/da9055.c
5704     index fc9802d1281d..620f80467bc8 100644
5705     --- a/sound/soc/codecs/da9055.c
5706     +++ b/sound/soc/codecs/da9055.c
5707     @@ -1523,8 +1523,15 @@ static int da9055_remove(struct i2c_client *client)
5708     return 0;
5709     }
5710    
5711     +/*
5712     + * DO NOT change the device Ids. The naming is intentionally specific as both
5713     + * the CODEC and PMIC parts of this chip are instantiated separately as I2C
5714     + * devices (both have configurable I2C addresses, and are to all intents and
5715     + * purposes separate). As a result there are specific DA9055 Ids for CODEC
5716     + * and PMIC, which must be different to operate together.
5717     + */
5718     static const struct i2c_device_id da9055_i2c_id[] = {
5719     - { "da9055", 0 },
5720     + { "da9055-codec", 0 },
5721     { }
5722     };
5723     MODULE_DEVICE_TABLE(i2c, da9055_i2c_id);
5724     @@ -1532,7 +1539,7 @@ MODULE_DEVICE_TABLE(i2c, da9055_i2c_id);
5725     /* I2C codec control layer */
5726     static struct i2c_driver da9055_i2c_driver = {
5727     .driver = {
5728     - .name = "da9055",
5729     + .name = "da9055-codec",
5730     .owner = THIS_MODULE,
5731     },
5732     .probe = da9055_i2c_probe,
5733     diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
5734     index 0569a4c3ae00..5f728808eed4 100644
5735     --- a/sound/soc/codecs/max98090.c
5736     +++ b/sound/soc/codecs/max98090.c
5737     @@ -1769,16 +1769,6 @@ static int max98090_set_bias_level(struct snd_soc_codec *codec,
5738    
5739     switch (level) {
5740     case SND_SOC_BIAS_ON:
5741     - if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
5742     - ret = regcache_sync(max98090->regmap);
5743     -
5744     - if (ret != 0) {
5745     - dev_err(codec->dev,
5746     - "Failed to sync cache: %d\n", ret);
5747     - return ret;
5748     - }
5749     - }
5750     -
5751     if (max98090->jack_state == M98090_JACK_STATE_HEADSET) {
5752     /*
5753     * Set to normal bias level.
5754     @@ -1792,6 +1782,16 @@ static int max98090_set_bias_level(struct snd_soc_codec *codec,
5755     break;
5756    
5757     case SND_SOC_BIAS_STANDBY:
5758     + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
5759     + ret = regcache_sync(max98090->regmap);
5760     + if (ret != 0) {
5761     + dev_err(codec->dev,
5762     + "Failed to sync cache: %d\n", ret);
5763     + return ret;
5764     + }
5765     + }
5766     + break;
5767     +
5768     case SND_SOC_BIAS_OFF:
5769     /* Set internal pull-up to lowest power mode */
5770     snd_soc_update_bits(codec, M98090_REG_JACK_DETECT,
5771     diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
5772     index 06edb396e733..2735361a4c3c 100644
5773     --- a/sound/soc/codecs/sta32x.c
5774     +++ b/sound/soc/codecs/sta32x.c
5775     @@ -187,42 +187,42 @@ static const unsigned int sta32x_limiter_drc_release_tlv[] = {
5776     13, 16, TLV_DB_SCALE_ITEM(-1500, 300, 0),
5777     };
5778    
5779     -static const struct soc_enum sta32x_drc_ac_enum =
5780     - SOC_ENUM_SINGLE(STA32X_CONFD, STA32X_CONFD_DRC_SHIFT,
5781     - 2, sta32x_drc_ac);
5782     -static const struct soc_enum sta32x_auto_eq_enum =
5783     - SOC_ENUM_SINGLE(STA32X_AUTO1, STA32X_AUTO1_AMEQ_SHIFT,
5784     - 3, sta32x_auto_eq_mode);
5785     -static const struct soc_enum sta32x_auto_gc_enum =
5786     - SOC_ENUM_SINGLE(STA32X_AUTO1, STA32X_AUTO1_AMGC_SHIFT,
5787     - 4, sta32x_auto_gc_mode);
5788     -static const struct soc_enum sta32x_auto_xo_enum =
5789     - SOC_ENUM_SINGLE(STA32X_AUTO2, STA32X_AUTO2_XO_SHIFT,
5790     - 16, sta32x_auto_xo_mode);
5791     -static const struct soc_enum sta32x_preset_eq_enum =
5792     - SOC_ENUM_SINGLE(STA32X_AUTO3, STA32X_AUTO3_PEQ_SHIFT,
5793     - 32, sta32x_preset_eq_mode);
5794     -static const struct soc_enum sta32x_limiter_ch1_enum =
5795     - SOC_ENUM_SINGLE(STA32X_C1CFG, STA32X_CxCFG_LS_SHIFT,
5796     - 3, sta32x_limiter_select);
5797     -static const struct soc_enum sta32x_limiter_ch2_enum =
5798     - SOC_ENUM_SINGLE(STA32X_C2CFG, STA32X_CxCFG_LS_SHIFT,
5799     - 3, sta32x_limiter_select);
5800     -static const struct soc_enum sta32x_limiter_ch3_enum =
5801     - SOC_ENUM_SINGLE(STA32X_C3CFG, STA32X_CxCFG_LS_SHIFT,
5802     - 3, sta32x_limiter_select);
5803     -static const struct soc_enum sta32x_limiter1_attack_rate_enum =
5804     - SOC_ENUM_SINGLE(STA32X_L1AR, STA32X_LxA_SHIFT,
5805     - 16, sta32x_limiter_attack_rate);
5806     -static const struct soc_enum sta32x_limiter2_attack_rate_enum =
5807     - SOC_ENUM_SINGLE(STA32X_L2AR, STA32X_LxA_SHIFT,
5808     - 16, sta32x_limiter_attack_rate);
5809     -static const struct soc_enum sta32x_limiter1_release_rate_enum =
5810     - SOC_ENUM_SINGLE(STA32X_L1AR, STA32X_LxR_SHIFT,
5811     - 16, sta32x_limiter_release_rate);
5812     -static const struct soc_enum sta32x_limiter2_release_rate_enum =
5813     - SOC_ENUM_SINGLE(STA32X_L2AR, STA32X_LxR_SHIFT,
5814     - 16, sta32x_limiter_release_rate);
5815     +static SOC_ENUM_SINGLE_DECL(sta32x_drc_ac_enum,
5816     + STA32X_CONFD, STA32X_CONFD_DRC_SHIFT,
5817     + sta32x_drc_ac);
5818     +static SOC_ENUM_SINGLE_DECL(sta32x_auto_eq_enum,
5819     + STA32X_AUTO1, STA32X_AUTO1_AMEQ_SHIFT,
5820     + sta32x_auto_eq_mode);
5821     +static SOC_ENUM_SINGLE_DECL(sta32x_auto_gc_enum,
5822     + STA32X_AUTO1, STA32X_AUTO1_AMGC_SHIFT,
5823     + sta32x_auto_gc_mode);
5824     +static SOC_ENUM_SINGLE_DECL(sta32x_auto_xo_enum,
5825     + STA32X_AUTO2, STA32X_AUTO2_XO_SHIFT,
5826     + sta32x_auto_xo_mode);
5827     +static SOC_ENUM_SINGLE_DECL(sta32x_preset_eq_enum,
5828     + STA32X_AUTO3, STA32X_AUTO3_PEQ_SHIFT,
5829     + sta32x_preset_eq_mode);
5830     +static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch1_enum,
5831     + STA32X_C1CFG, STA32X_CxCFG_LS_SHIFT,
5832     + sta32x_limiter_select);
5833     +static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch2_enum,
5834     + STA32X_C2CFG, STA32X_CxCFG_LS_SHIFT,
5835     + sta32x_limiter_select);
5836     +static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch3_enum,
5837     + STA32X_C3CFG, STA32X_CxCFG_LS_SHIFT,
5838     + sta32x_limiter_select);
5839     +static SOC_ENUM_SINGLE_DECL(sta32x_limiter1_attack_rate_enum,
5840     + STA32X_L1AR, STA32X_LxA_SHIFT,
5841     + sta32x_limiter_attack_rate);
5842     +static SOC_ENUM_SINGLE_DECL(sta32x_limiter2_attack_rate_enum,
5843     + STA32X_L2AR, STA32X_LxA_SHIFT,
5844     + sta32x_limiter_attack_rate);
5845     +static SOC_ENUM_SINGLE_DECL(sta32x_limiter1_release_rate_enum,
5846     + STA32X_L1AR, STA32X_LxR_SHIFT,
5847     + sta32x_limiter_release_rate);
5848     +static SOC_ENUM_SINGLE_DECL(sta32x_limiter2_release_rate_enum,
5849     + STA32X_L2AR, STA32X_LxR_SHIFT,
5850     + sta32x_limiter_release_rate);
5851    
5852     /* byte array controls for setting biquad, mixer, scaling coefficients;
5853     * for biquads all five coefficients need to be set in one go,
5854     @@ -331,7 +331,7 @@ static int sta32x_sync_coef_shadow(struct snd_soc_codec *codec)
5855    
5856     static int sta32x_cache_sync(struct snd_soc_codec *codec)
5857     {
5858     - struct sta32x_priv *sta32x = codec->control_data;
5859     + struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
5860     unsigned int mute;
5861     int rc;
5862    
5863     @@ -434,7 +434,7 @@ SOC_SINGLE_TLV("Treble Tone Control", STA32X_TONE, STA32X_TONE_TTC_SHIFT, 15, 0,
5864     SOC_ENUM("Limiter1 Attack Rate (dB/ms)", sta32x_limiter1_attack_rate_enum),
5865     SOC_ENUM("Limiter2 Attack Rate (dB/ms)", sta32x_limiter2_attack_rate_enum),
5866     SOC_ENUM("Limiter1 Release Rate (dB/ms)", sta32x_limiter1_release_rate_enum),
5867     -SOC_ENUM("Limiter2 Release Rate (dB/ms)", sta32x_limiter1_release_rate_enum),
5868     +SOC_ENUM("Limiter2 Release Rate (dB/ms)", sta32x_limiter2_release_rate_enum),
5869    
5870     /* depending on mode, the attack/release thresholds have
5871     * two different enum definitions; provide both
5872     diff --git a/sound/soc/codecs/wm8770.c b/sound/soc/codecs/wm8770.c
5873     index 89a18d82f303..5bce21013485 100644
5874     --- a/sound/soc/codecs/wm8770.c
5875     +++ b/sound/soc/codecs/wm8770.c
5876     @@ -196,8 +196,8 @@ static const char *ain_text[] = {
5877     "AIN5", "AIN6", "AIN7", "AIN8"
5878     };
5879    
5880     -static const struct soc_enum ain_enum =
5881     - SOC_ENUM_DOUBLE(WM8770_ADCMUX, 0, 4, 8, ain_text);
5882     +static SOC_ENUM_DOUBLE_DECL(ain_enum,
5883     + WM8770_ADCMUX, 0, 4, ain_text);
5884    
5885     static const struct snd_kcontrol_new ain_mux =
5886     SOC_DAPM_ENUM("Capture Mux", ain_enum);
5887     diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
5888     index b0710d817a65..754f88e1fdab 100644
5889     --- a/sound/soc/codecs/wm8958-dsp2.c
5890     +++ b/sound/soc/codecs/wm8958-dsp2.c
5891     @@ -153,7 +153,7 @@ static int wm8958_dsp2_fw(struct snd_soc_codec *codec, const char *name,
5892    
5893     data32 &= 0xffffff;
5894    
5895     - wm8994_bulk_write(codec->control_data,
5896     + wm8994_bulk_write(wm8994->wm8994,
5897     data32 & 0xffffff,
5898     block_len / 2,
5899     (void *)(data + 8));
5900     diff --git a/sound/soc/txx9/txx9aclc-ac97.c b/sound/soc/txx9/txx9aclc-ac97.c
5901     index e0305a148568..9edd68db9f48 100644
5902     --- a/sound/soc/txx9/txx9aclc-ac97.c
5903     +++ b/sound/soc/txx9/txx9aclc-ac97.c
5904     @@ -183,14 +183,16 @@ static int txx9aclc_ac97_dev_probe(struct platform_device *pdev)
5905     irq = platform_get_irq(pdev, 0);
5906     if (irq < 0)
5907     return irq;
5908     +
5909     + drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
5910     + if (!drvdata)
5911     + return -ENOMEM;
5912     +
5913     r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5914     drvdata->base = devm_ioremap_resource(&pdev->dev, r);
5915     if (IS_ERR(drvdata->base))
5916     return PTR_ERR(drvdata->base);
5917    
5918     - drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
5919     - if (!drvdata)
5920     - return -ENOMEM;
5921     platform_set_drvdata(pdev, drvdata);
5922     drvdata->physbase = r->start;
5923     if (sizeof(drvdata->physbase) > sizeof(r->start) &&
5924     diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
5925     index cc2dd1f0decb..0339d464791a 100644
5926     --- a/sound/usb/mixer_maps.c
5927     +++ b/sound/usb/mixer_maps.c
5928     @@ -322,6 +322,11 @@ static struct usbmix_name_map hercules_usb51_map[] = {
5929     { 0 } /* terminator */
5930     };
5931    
5932     +static const struct usbmix_name_map kef_x300a_map[] = {
5933     + { 10, NULL }, /* firmware locks up (?) when we try to access this FU */
5934     + { 0 }
5935     +};
5936     +
5937     /*
5938     * Control map entries
5939     */
5940     @@ -409,6 +414,10 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
5941     .id = USB_ID(0x200c, 0x1018),
5942     .map = ebox44_map,
5943     },
5944     + {
5945     + .id = USB_ID(0x27ac, 0x1000),
5946     + .map = kef_x300a_map,
5947     + },
5948     { 0 } /* terminator */
5949     };
5950