Magellan Linux

Annotation of /trunk/kernel-magellan/patches-3.11/0101-3.11.2-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2294 - (hide annotations) (download)
Mon Oct 7 12:06:56 2013 UTC (10 years, 7 months ago) by niro
File size: 145284 byte(s)
-linux-3.11.2
1 niro 2294 diff --git a/Makefile b/Makefile
2     index efd23961..aede3194 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 3
7     PATCHLEVEL = 11
8     -SUBLEVEL = 1
9     +SUBLEVEL = 2
10     EXTRAVERSION =
11     NAME = Linux for Workgroups
12    
13     diff --git a/arch/arc/include/asm/sections.h b/arch/arc/include/asm/sections.h
14     index 6fc1159d..764f1e3b 100644
15     --- a/arch/arc/include/asm/sections.h
16     +++ b/arch/arc/include/asm/sections.h
17     @@ -11,7 +11,6 @@
18    
19     #include <asm-generic/sections.h>
20    
21     -extern char _int_vec_base_lds[];
22     extern char __arc_dccm_base[];
23     extern char __dtb_start[];
24    
25     diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
26     index 2a913f85..0f944f02 100644
27     --- a/arch/arc/kernel/head.S
28     +++ b/arch/arc/kernel/head.S
29     @@ -34,6 +34,9 @@ stext:
30     ; IDENTITY Reg [ 3 2 1 0 ]
31     ; (cpu-id) ^^^ => Zero for UP ARC700
32     ; => #Core-ID if SMP (Master 0)
33     + ; Note that non-boot CPUs might not land here if halt-on-reset and
34     + ; instead breath life from @first_lines_of_secondary, but we still
35     + ; need to make sure only boot cpu takes this path.
36     GET_CPU_ID r5
37     cmp r5, 0
38     jnz arc_platform_smp_wait_to_boot
39     @@ -98,6 +101,8 @@ stext:
40    
41     first_lines_of_secondary:
42    
43     + sr @_int_vec_base_lds, [AUX_INTR_VEC_BASE]
44     +
45     ; setup per-cpu idle task as "current" on this CPU
46     ld r0, [@secondary_idle_tsk]
47     SET_CURR_TASK_ON_CPU r0, r1
48     diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
49     index 305b3f86..5fc92455 100644
50     --- a/arch/arc/kernel/irq.c
51     +++ b/arch/arc/kernel/irq.c
52     @@ -24,7 +24,6 @@
53     * -Needed for each CPU (hence not foldable into init_IRQ)
54     *
55     * what it does ?
56     - * -setup Vector Table Base Reg - in case Linux not linked at 0x8000_0000
57     * -Disable all IRQs (on CPU side)
58     * -Optionally, setup the High priority Interrupts as Level 2 IRQs
59     */
60     diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
61     index 6b083454..e8185631 100644
62     --- a/arch/arc/kernel/setup.c
63     +++ b/arch/arc/kernel/setup.c
64     @@ -47,10 +47,7 @@ void read_arc_build_cfg_regs(void)
65     READ_BCR(AUX_IDENTITY, cpu->core);
66    
67     cpu->timers = read_aux_reg(ARC_REG_TIMERS_BCR);
68     -
69     cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
70     - if (cpu->vec_base == 0)
71     - cpu->vec_base = (unsigned int)_int_vec_base_lds;
72    
73     READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
74     cpu->uncached_base = uncached_space.start << 24;
75     diff --git a/arch/arm/mach-versatile/include/mach/platform.h b/arch/arm/mach-versatile/include/mach/platform.h
76     index ec087407..6f938ccb 100644
77     --- a/arch/arm/mach-versatile/include/mach/platform.h
78     +++ b/arch/arm/mach-versatile/include/mach/platform.h
79     @@ -231,12 +231,14 @@
80     /* PCI space */
81     #define VERSATILE_PCI_BASE 0x41000000 /* PCI Interface */
82     #define VERSATILE_PCI_CFG_BASE 0x42000000
83     +#define VERSATILE_PCI_IO_BASE 0x43000000
84     #define VERSATILE_PCI_MEM_BASE0 0x44000000
85     #define VERSATILE_PCI_MEM_BASE1 0x50000000
86     #define VERSATILE_PCI_MEM_BASE2 0x60000000
87     /* Sizes of above maps */
88     #define VERSATILE_PCI_BASE_SIZE 0x01000000
89     #define VERSATILE_PCI_CFG_BASE_SIZE 0x02000000
90     +#define VERSATILE_PCI_IO_BASE_SIZE 0x01000000
91     #define VERSATILE_PCI_MEM_BASE0_SIZE 0x0c000000 /* 32Mb */
92     #define VERSATILE_PCI_MEM_BASE1_SIZE 0x10000000 /* 256Mb */
93     #define VERSATILE_PCI_MEM_BASE2_SIZE 0x10000000 /* 256Mb */
94     diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
95     index e92e5e07..c97be4ea 100644
96     --- a/arch/arm/mach-versatile/pci.c
97     +++ b/arch/arm/mach-versatile/pci.c
98     @@ -43,9 +43,9 @@
99     #define PCI_IMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x0)
100     #define PCI_IMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x4)
101     #define PCI_IMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x8)
102     -#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x10)
103     -#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
104     -#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
105     +#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14)
106     +#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18)
107     +#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x1c)
108     #define PCI_SELFID __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0xc)
109    
110     #define DEVICE_ID_OFFSET 0x00
111     @@ -170,8 +170,8 @@ static struct pci_ops pci_versatile_ops = {
112     .write = versatile_write_config,
113     };
114    
115     -static struct resource io_mem = {
116     - .name = "PCI I/O space",
117     +static struct resource unused_mem = {
118     + .name = "PCI unused",
119     .start = VERSATILE_PCI_MEM_BASE0,
120     .end = VERSATILE_PCI_MEM_BASE0+VERSATILE_PCI_MEM_BASE0_SIZE-1,
121     .flags = IORESOURCE_MEM,
122     @@ -195,9 +195,9 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
123     {
124     int ret = 0;
125    
126     - ret = request_resource(&iomem_resource, &io_mem);
127     + ret = request_resource(&iomem_resource, &unused_mem);
128     if (ret) {
129     - printk(KERN_ERR "PCI: unable to allocate I/O "
130     + printk(KERN_ERR "PCI: unable to allocate unused "
131     "memory region (%d)\n", ret);
132     goto out;
133     }
134     @@ -205,7 +205,7 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
135     if (ret) {
136     printk(KERN_ERR "PCI: unable to allocate non-prefetchable "
137     "memory region (%d)\n", ret);
138     - goto release_io_mem;
139     + goto release_unused_mem;
140     }
141     ret = request_resource(&iomem_resource, &pre_mem);
142     if (ret) {
143     @@ -225,8 +225,8 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
144    
145     release_non_mem:
146     release_resource(&non_mem);
147     - release_io_mem:
148     - release_resource(&io_mem);
149     + release_unused_mem:
150     + release_resource(&unused_mem);
151     out:
152     return ret;
153     }
154     @@ -246,7 +246,7 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys)
155     goto out;
156     }
157    
158     - ret = pci_ioremap_io(0, VERSATILE_PCI_MEM_BASE0);
159     + ret = pci_ioremap_io(0, VERSATILE_PCI_IO_BASE);
160     if (ret)
161     goto out;
162    
163     @@ -295,6 +295,19 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys)
164     __raw_writel(PHYS_OFFSET, local_pci_cfg_base + PCI_BASE_ADDRESS_2);
165    
166     /*
167     + * For many years the kernel and QEMU were symbiotically buggy
168     + * in that they both assumed the same broken IRQ mapping.
169     + * QEMU therefore attempts to auto-detect old broken kernels
170     + * so that they still work on newer QEMU as they did on old
171     + * QEMU. Since we now use the correct (ie matching-hardware)
172     + * IRQ mapping we write a definitely different value to a
173     + * PCI_INTERRUPT_LINE register to tell QEMU that we expect
174     + * real hardware behaviour and it need not be backwards
175     + * compatible for us. This write is harmless on real hardware.
176     + */
177     + __raw_writel(0, VERSATILE_PCI_VIRT_BASE+PCI_INTERRUPT_LINE);
178     +
179     + /*
180     * Do not to map Versatile FPGA PCI device into memory space
181     */
182     pci_slot_ignore |= (1 << myslot);
183     @@ -327,13 +340,13 @@ static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
184     {
185     int irq;
186    
187     - /* slot, pin, irq
188     - * 24 1 IRQ_SIC_PCI0
189     - * 25 1 IRQ_SIC_PCI1
190     - * 26 1 IRQ_SIC_PCI2
191     - * 27 1 IRQ_SIC_PCI3
192     + /*
193     + * Slot INTA INTB INTC INTD
194     + * 31 PCI1 PCI2 PCI3 PCI0
195     + * 30 PCI0 PCI1 PCI2 PCI3
196     + * 29 PCI3 PCI0 PCI1 PCI2
197     */
198     - irq = IRQ_SIC_PCI0 + ((slot - 24 + pin - 1) & 3);
199     + irq = IRQ_SIC_PCI0 + ((slot + 2 + pin - 1) & 3);
200    
201     return irq;
202     }
203     diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
204     index 8a6295c8..7071fcac 100644
205     --- a/arch/arm/xen/enlighten.c
206     +++ b/arch/arm/xen/enlighten.c
207     @@ -273,12 +273,15 @@ core_initcall(xen_guest_init);
208    
209     static int __init xen_pm_init(void)
210     {
211     + if (!xen_domain())
212     + return -ENODEV;
213     +
214     pm_power_off = xen_power_off;
215     arm_pm_restart = xen_restart;
216    
217     return 0;
218     }
219     -subsys_initcall(xen_pm_init);
220     +late_initcall(xen_pm_init);
221    
222     static irqreturn_t xen_arm_callback(int irq, void *arg)
223     {
224     diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
225     index 12e6ccb8..cea1594f 100644
226     --- a/arch/arm64/kernel/perf_event.c
227     +++ b/arch/arm64/kernel/perf_event.c
228     @@ -325,7 +325,10 @@ validate_event(struct pmu_hw_events *hw_events,
229     if (is_software_event(event))
230     return 1;
231    
232     - if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
233     + if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
234     + return 1;
235     +
236     + if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
237     return 1;
238    
239     return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
240     @@ -781,7 +784,7 @@ static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
241     /*
242     * PMXEVTYPER: Event selection reg
243     */
244     -#define ARMV8_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
245     +#define ARMV8_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
246     #define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
247    
248     /*
249     diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
250     index 765ef30e..733017b3 100644
251     --- a/arch/mips/ath79/clock.c
252     +++ b/arch/mips/ath79/clock.c
253     @@ -164,7 +164,7 @@ static void __init ar933x_clocks_init(void)
254     ath79_ahb_clk.rate = freq / t;
255     }
256    
257     - ath79_wdt_clk.rate = ath79_ref_clk.rate;
258     + ath79_wdt_clk.rate = ath79_ahb_clk.rate;
259     ath79_uart_clk.rate = ath79_ref_clk.rate;
260     }
261    
262     diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
263     index ee5b690a..52e5758e 100644
264     --- a/arch/powerpc/kernel/align.c
265     +++ b/arch/powerpc/kernel/align.c
266     @@ -764,6 +764,16 @@ int fix_alignment(struct pt_regs *regs)
267     nb = aligninfo[instr].len;
268     flags = aligninfo[instr].flags;
269    
270     + /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
271     + if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
272     + nb = 8;
273     + flags = LD+SW;
274     + } else if (IS_XFORM(instruction) &&
275     + ((instruction >> 1) & 0x3ff) == 660) {
276     + nb = 8;
277     + flags = ST+SW;
278     + }
279     +
280     /* Byteswap little endian loads and stores */
281     swiz = 0;
282     if (regs->msr & MSR_LE) {
283     diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
284     index 94c1dd46..a3a5cb8e 100644
285     --- a/arch/powerpc/kvm/book3s_xics.c
286     +++ b/arch/powerpc/kvm/book3s_xics.c
287     @@ -19,6 +19,7 @@
288     #include <asm/hvcall.h>
289     #include <asm/xics.h>
290     #include <asm/debug.h>
291     +#include <asm/time.h>
292    
293     #include <linux/debugfs.h>
294     #include <linux/seq_file.h>
295     diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
296     index c11c8238..54b998f2 100644
297     --- a/arch/powerpc/platforms/pseries/setup.c
298     +++ b/arch/powerpc/platforms/pseries/setup.c
299     @@ -354,7 +354,7 @@ static int alloc_dispatch_log_kmem_cache(void)
300     }
301     early_initcall(alloc_dispatch_log_kmem_cache);
302    
303     -static void pSeries_idle(void)
304     +static void pseries_lpar_idle(void)
305     {
306     /* This would call on the cpuidle framework, and the back-end pseries
307     * driver to go to idle states
308     @@ -362,10 +362,22 @@ static void pSeries_idle(void)
309     if (cpuidle_idle_call()) {
310     /* On error, execute default handler
311     * to go into low thread priority and possibly
312     - * low power mode.
313     + * low power mode by cedeing processor to hypervisor
314     */
315     - HMT_low();
316     - HMT_very_low();
317     +
318     + /* Indicate to hypervisor that we are idle. */
319     + get_lppaca()->idle = 1;
320     +
321     + /*
322     + * Yield the processor to the hypervisor. We return if
323     + * an external interrupt occurs (which are driven prior
324     + * to returning here) or if a prod occurs from another
325     + * processor. When returning here, external interrupts
326     + * are enabled.
327     + */
328     + cede_processor();
329     +
330     + get_lppaca()->idle = 0;
331     }
332     }
333    
334     @@ -456,15 +468,14 @@ static void __init pSeries_setup_arch(void)
335    
336     pSeries_nvram_init();
337    
338     - if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
339     + if (firmware_has_feature(FW_FEATURE_LPAR)) {
340     vpa_init(boot_cpuid);
341     - ppc_md.power_save = pSeries_idle;
342     - }
343     -
344     - if (firmware_has_feature(FW_FEATURE_LPAR))
345     + ppc_md.power_save = pseries_lpar_idle;
346     ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
347     - else
348     + } else {
349     + /* No special idle routine */
350     ppc_md.enable_pmcs = power4_enable_pmcs;
351     + }
352    
353     ppc_md.pcibios_root_bridge_prepare = pseries_root_bridge_prepare;
354    
355     diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
356     index d5f10a43..70923928 100644
357     --- a/arch/s390/net/bpf_jit_comp.c
358     +++ b/arch/s390/net/bpf_jit_comp.c
359     @@ -805,7 +805,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
360     return NULL;
361     memset(header, 0, sz);
362     header->pages = sz / PAGE_SIZE;
363     - hole = sz - bpfsize + sizeof(*header);
364     + hole = sz - (bpfsize + sizeof(*header));
365     /* Insert random number of illegal instructions before BPF code
366     * and make sure the first instruction starts at an even address.
367     */
368     diff --git a/arch/um/include/shared/os.h b/arch/um/include/shared/os.h
369     index 95feaa47..c70a234a 100644
370     --- a/arch/um/include/shared/os.h
371     +++ b/arch/um/include/shared/os.h
372     @@ -200,6 +200,7 @@ extern int os_unmap_memory(void *addr, int len);
373     extern int os_drop_memory(void *addr, int length);
374     extern int can_drop_memory(void);
375     extern void os_flush_stdout(void);
376     +extern int os_mincore(void *addr, unsigned long len);
377    
378     /* execvp.c */
379     extern int execvp_noalloc(char *buf, const char *file, char *const argv[]);
380     diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
381     index babe2182..d8b78a03 100644
382     --- a/arch/um/kernel/Makefile
383     +++ b/arch/um/kernel/Makefile
384     @@ -13,7 +13,7 @@ clean-files :=
385     obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \
386     physmem.o process.o ptrace.o reboot.o sigio.o \
387     signal.o smp.o syscall.o sysrq.o time.o tlb.o trap.o \
388     - um_arch.o umid.o skas/
389     + um_arch.o umid.o maccess.o skas/
390    
391     obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
392     obj-$(CONFIG_GPROF) += gprof_syms.o
393     diff --git a/arch/um/kernel/maccess.c b/arch/um/kernel/maccess.c
394     new file mode 100644
395     index 00000000..1f3d5c49
396     --- /dev/null
397     +++ b/arch/um/kernel/maccess.c
398     @@ -0,0 +1,24 @@
399     +/*
400     + * Copyright (C) 2013 Richard Weinberger <richrd@nod.at>
401     + *
402     + * This program is free software; you can redistribute it and/or modify
403     + * it under the terms of the GNU General Public License version 2 as
404     + * published by the Free Software Foundation.
405     + */
406     +
407     +#include <linux/uaccess.h>
408     +#include <linux/kernel.h>
409     +#include <os.h>
410     +
411     +long probe_kernel_read(void *dst, const void *src, size_t size)
412     +{
413     + void *psrc = (void *)rounddown((unsigned long)src, PAGE_SIZE);
414     +
415     + if ((unsigned long)src < PAGE_SIZE || size <= 0)
416     + return -EFAULT;
417     +
418     + if (os_mincore(psrc, size + src - psrc) <= 0)
419     + return -EFAULT;
420     +
421     + return __probe_kernel_read(dst, src, size);
422     +}
423     diff --git a/arch/um/os-Linux/process.c b/arch/um/os-Linux/process.c
424     index b8f34c9e..67b9c8f5 100644
425     --- a/arch/um/os-Linux/process.c
426     +++ b/arch/um/os-Linux/process.c
427     @@ -4,6 +4,7 @@
428     */
429    
430     #include <stdio.h>
431     +#include <stdlib.h>
432     #include <unistd.h>
433     #include <errno.h>
434     #include <signal.h>
435     @@ -232,6 +233,57 @@ out:
436     return ok;
437     }
438    
439     +static int os_page_mincore(void *addr)
440     +{
441     + char vec[2];
442     + int ret;
443     +
444     + ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
445     + if (ret < 0) {
446     + if (errno == ENOMEM || errno == EINVAL)
447     + return 0;
448     + else
449     + return -errno;
450     + }
451     +
452     + return vec[0] & 1;
453     +}
454     +
455     +int os_mincore(void *addr, unsigned long len)
456     +{
457     + char *vec;
458     + int ret, i;
459     +
460     + if (len <= UM_KERN_PAGE_SIZE)
461     + return os_page_mincore(addr);
462     +
463     + vec = calloc(1, (len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE);
464     + if (!vec)
465     + return -ENOMEM;
466     +
467     + ret = mincore(addr, UM_KERN_PAGE_SIZE, vec);
468     + if (ret < 0) {
469     + if (errno == ENOMEM || errno == EINVAL)
470     + ret = 0;
471     + else
472     + ret = -errno;
473     +
474     + goto out;
475     + }
476     +
477     + for (i = 0; i < ((len + UM_KERN_PAGE_SIZE - 1) / UM_KERN_PAGE_SIZE); i++) {
478     + if (!(vec[i] & 1)) {
479     + ret = 0;
480     + goto out;
481     + }
482     + }
483     +
484     + ret = 1;
485     +out:
486     + free(vec);
487     + return ret;
488     +}
489     +
490     void init_new_thread_signals(void)
491     {
492     set_handler(SIGSEGV);
493     diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
494     index bccfca68..665a7303 100644
495     --- a/arch/x86/ia32/ia32_signal.c
496     +++ b/arch/x86/ia32/ia32_signal.c
497     @@ -457,7 +457,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
498     else
499     put_user_ex(0, &frame->uc.uc_flags);
500     put_user_ex(0, &frame->uc.uc_link);
501     - err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
502     + compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
503    
504     if (ksig->ka.sa.sa_flags & SA_RESTORER)
505     restorer = ksig->ka.sa.sa_restorer;
506     diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
507     index 46fc474f..f50de695 100644
508     --- a/arch/x86/include/asm/checksum_32.h
509     +++ b/arch/x86/include/asm/checksum_32.h
510     @@ -49,9 +49,15 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
511     int len, __wsum sum,
512     int *err_ptr)
513     {
514     + __wsum ret;
515     +
516     might_sleep();
517     - return csum_partial_copy_generic((__force void *)src, dst,
518     - len, sum, err_ptr, NULL);
519     + stac();
520     + ret = csum_partial_copy_generic((__force void *)src, dst,
521     + len, sum, err_ptr, NULL);
522     + clac();
523     +
524     + return ret;
525     }
526    
527     /*
528     @@ -176,10 +182,16 @@ static inline __wsum csum_and_copy_to_user(const void *src,
529     int len, __wsum sum,
530     int *err_ptr)
531     {
532     + __wsum ret;
533     +
534     might_sleep();
535     - if (access_ok(VERIFY_WRITE, dst, len))
536     - return csum_partial_copy_generic(src, (__force void *)dst,
537     - len, sum, NULL, err_ptr);
538     + if (access_ok(VERIFY_WRITE, dst, len)) {
539     + stac();
540     + ret = csum_partial_copy_generic(src, (__force void *)dst,
541     + len, sum, NULL, err_ptr);
542     + clac();
543     + return ret;
544     + }
545    
546     if (len)
547     *err_ptr = -EFAULT;
548     diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
549     index 29e3093b..aa97342e 100644
550     --- a/arch/x86/include/asm/mce.h
551     +++ b/arch/x86/include/asm/mce.h
552     @@ -32,11 +32,20 @@
553     #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */
554     #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */
555     #define MCI_STATUS_AR (1ULL<<55) /* Action required */
556     -#define MCACOD 0xffff /* MCA Error Code */
557     +
558     +/*
559     + * Note that the full MCACOD field of IA32_MCi_STATUS MSR is
560     + * bits 15:0. But bit 12 is the 'F' bit, defined for corrected
561     + * errors to indicate that errors are being filtered by hardware.
562     + * We should mask out bit 12 when looking for specific signatures
563     + * of uncorrected errors - so the F bit is deliberately skipped
564     + * in this #define.
565     + */
566     +#define MCACOD 0xefff /* MCA Error Code */
567    
568     /* Architecturally defined codes from SDM Vol. 3B Chapter 15 */
569     #define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */
570     -#define MCACOD_SCRUBMSK 0xfff0
571     +#define MCACOD_SCRUBMSK 0xeff0 /* Skip bit 12 ('F' bit) */
572     #define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */
573     #define MCACOD_DATA 0x0134 /* Data Load */
574     #define MCACOD_INSTR 0x0150 /* Instruction Fetch */
575     diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
576     index cdbf3677..be12c534 100644
577     --- a/arch/x86/include/asm/mmu_context.h
578     +++ b/arch/x86/include/asm/mmu_context.h
579     @@ -45,22 +45,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
580     /* Re-load page tables */
581     load_cr3(next->pgd);
582    
583     - /* stop flush ipis for the previous mm */
584     + /* Stop flush ipis for the previous mm */
585     cpumask_clear_cpu(cpu, mm_cpumask(prev));
586    
587     - /*
588     - * load the LDT, if the LDT is different:
589     - */
590     + /* Load the LDT, if the LDT is different: */
591     if (unlikely(prev->context.ldt != next->context.ldt))
592     load_LDT_nolock(&next->context);
593     }
594     #ifdef CONFIG_SMP
595     - else {
596     + else {
597     this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
598     BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
599    
600     - if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) {
601     - /* We were in lazy tlb mode and leave_mm disabled
602     + if (!cpumask_test_cpu(cpu, mm_cpumask(next))) {
603     + /*
604     + * On established mms, the mm_cpumask is only changed
605     + * from irq context, from ptep_clear_flush() while in
606     + * lazy tlb mode, and here. Irqs are blocked during
607     + * schedule, protecting us from simultaneous changes.
608     + */
609     + cpumask_set_cpu(cpu, mm_cpumask(next));
610     + /*
611     + * We were in lazy tlb mode and leave_mm disabled
612     * tlb flush IPI delivery. We must reload CR3
613     * to make sure to use no freed page tables.
614     */
615     diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
616     index 3048ded1..59554dca 100644
617     --- a/arch/x86/kernel/amd_nb.c
618     +++ b/arch/x86/kernel/amd_nb.c
619     @@ -20,6 +20,7 @@ const struct pci_device_id amd_nb_misc_ids[] = {
620     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
621     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
622     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
623     + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
624     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
625     {}
626     };
627     @@ -27,6 +28,7 @@ EXPORT_SYMBOL(amd_nb_misc_ids);
628    
629     static const struct pci_device_id amd_nb_link_ids[] = {
630     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
631     + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
632     { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
633     {}
634     };
635     @@ -81,13 +83,20 @@ int amd_cache_northbridges(void)
636     next_northbridge(misc, amd_nb_misc_ids);
637     node_to_amd_nb(i)->link = link =
638     next_northbridge(link, amd_nb_link_ids);
639     - }
640     + }
641    
642     + /* GART present only on Fam15h upto model 0fh */
643     if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
644     - boot_cpu_data.x86 == 0x15)
645     + (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
646     amd_northbridges.flags |= AMD_NB_GART;
647    
648     /*
649     + * Check for L3 cache presence.
650     + */
651     + if (!cpuid_edx(0x80000006))
652     + return 0;
653     +
654     + /*
655     * Some CPU families support L3 Cache Index Disable. There are some
656     * limitations because of E382 and E388 on family 0x10.
657     */
658     diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
659     index cf913587..d859eea0 100644
660     --- a/arch/x86/kernel/signal.c
661     +++ b/arch/x86/kernel/signal.c
662     @@ -358,7 +358,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
663     else
664     put_user_ex(0, &frame->uc.uc_flags);
665     put_user_ex(0, &frame->uc.uc_link);
666     - err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
667     + save_altstack_ex(&frame->uc.uc_stack, regs->sp);
668    
669     /* Set up to return from userspace. */
670     restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
671     @@ -423,7 +423,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
672     else
673     put_user_ex(0, &frame->uc.uc_flags);
674     put_user_ex(0, &frame->uc.uc_link);
675     - err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
676     + save_altstack_ex(&frame->uc.uc_stack, regs->sp);
677    
678     /* Set up to return from userspace. If provided, use a stub
679     already in userspace. */
680     @@ -490,7 +490,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
681     else
682     put_user_ex(0, &frame->uc.uc_flags);
683     put_user_ex(0, &frame->uc.uc_link);
684     - err |= __compat_save_altstack(&frame->uc.uc_stack, regs->sp);
685     + compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
686     put_user_ex(0, &frame->uc.uc__pad0);
687    
688     if (ksig->ka.sa.sa_flags & SA_RESTORER) {
689     diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
690     index 25b7ae8d..7609e0e4 100644
691     --- a/arch/x86/lib/csum-wrappers_64.c
692     +++ b/arch/x86/lib/csum-wrappers_64.c
693     @@ -6,6 +6,7 @@
694     */
695     #include <asm/checksum.h>
696     #include <linux/module.h>
697     +#include <asm/smap.h>
698    
699     /**
700     * csum_partial_copy_from_user - Copy and checksum from user space.
701     @@ -52,8 +53,10 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
702     len -= 2;
703     }
704     }
705     + stac();
706     isum = csum_partial_copy_generic((__force const void *)src,
707     dst, len, isum, errp, NULL);
708     + clac();
709     if (unlikely(*errp))
710     goto out_err;
711    
712     @@ -82,6 +85,8 @@ __wsum
713     csum_partial_copy_to_user(const void *src, void __user *dst,
714     int len, __wsum isum, int *errp)
715     {
716     + __wsum ret;
717     +
718     might_sleep();
719    
720     if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) {
721     @@ -105,8 +110,11 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
722     }
723    
724     *errp = 0;
725     - return csum_partial_copy_generic(src, (void __force *)dst,
726     - len, isum, NULL, errp);
727     + stac();
728     + ret = csum_partial_copy_generic(src, (void __force *)dst,
729     + len, isum, NULL, errp);
730     + clac();
731     + return ret;
732     }
733     EXPORT_SYMBOL(csum_partial_copy_to_user);
734    
735     diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
736     index d8507f81..74a60c7e 100644
737     --- a/arch/xtensa/kernel/xtensa_ksyms.c
738     +++ b/arch/xtensa/kernel/xtensa_ksyms.c
739     @@ -25,6 +25,7 @@
740     #include <asm/io.h>
741     #include <asm/page.h>
742     #include <asm/pgalloc.h>
743     +#include <asm/ftrace.h>
744     #ifdef CONFIG_BLK_DEV_FD
745     #include <asm/floppy.h>
746     #endif
747     diff --git a/crypto/api.c b/crypto/api.c
748     index 3b618033..37c4c721 100644
749     --- a/crypto/api.c
750     +++ b/crypto/api.c
751     @@ -34,6 +34,8 @@ EXPORT_SYMBOL_GPL(crypto_alg_sem);
752     BLOCKING_NOTIFIER_HEAD(crypto_chain);
753     EXPORT_SYMBOL_GPL(crypto_chain);
754    
755     +static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
756     +
757     struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
758     {
759     return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
760     @@ -144,8 +146,11 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type,
761     }
762     up_write(&crypto_alg_sem);
763    
764     - if (alg != &larval->alg)
765     + if (alg != &larval->alg) {
766     kfree(larval);
767     + if (crypto_is_larval(alg))
768     + alg = crypto_larval_wait(alg);
769     + }
770    
771     return alg;
772     }
773     diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
774     index 6a382188..fb78bb9a 100644
775     --- a/drivers/acpi/acpi_lpss.c
776     +++ b/drivers/acpi/acpi_lpss.c
777     @@ -257,12 +257,13 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
778     pdata->mmio_size = resource_size(&rentry->res);
779     pdata->mmio_base = ioremap(rentry->res.start,
780     pdata->mmio_size);
781     - pdata->dev_desc = dev_desc;
782     break;
783     }
784    
785     acpi_dev_free_resource_list(&resource_list);
786    
787     + pdata->dev_desc = dev_desc;
788     +
789     if (dev_desc->clk_required) {
790     ret = register_device_clock(adev, pdata);
791     if (ret) {
792     diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
793     index 59178393..a67853e3 100644
794     --- a/drivers/acpi/pci_root.c
795     +++ b/drivers/acpi/pci_root.c
796     @@ -378,6 +378,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
797     struct acpi_pci_root *root;
798     u32 flags, base_flags;
799     acpi_handle handle = device->handle;
800     + bool no_aspm = false, clear_aspm = false;
801    
802     root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
803     if (!root)
804     @@ -437,27 +438,6 @@ static int acpi_pci_root_add(struct acpi_device *device,
805     flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
806     acpi_pci_osc_support(root, flags);
807    
808     - /*
809     - * TBD: Need PCI interface for enumeration/configuration of roots.
810     - */
811     -
812     - /*
813     - * Scan the Root Bridge
814     - * --------------------
815     - * Must do this prior to any attempt to bind the root device, as the
816     - * PCI namespace does not get created until this call is made (and
817     - * thus the root bridge's pci_dev does not exist).
818     - */
819     - root->bus = pci_acpi_scan_root(root);
820     - if (!root->bus) {
821     - dev_err(&device->dev,
822     - "Bus %04x:%02x not present in PCI namespace\n",
823     - root->segment, (unsigned int)root->secondary.start);
824     - result = -ENODEV;
825     - goto end;
826     - }
827     -
828     - /* Indicate support for various _OSC capabilities. */
829     if (pci_ext_cfg_avail())
830     flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
831     if (pcie_aspm_support_enabled()) {
832     @@ -471,7 +451,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
833     if (ACPI_FAILURE(status)) {
834     dev_info(&device->dev, "ACPI _OSC support "
835     "notification failed, disabling PCIe ASPM\n");
836     - pcie_no_aspm();
837     + no_aspm = true;
838     flags = base_flags;
839     }
840     }
841     @@ -503,7 +483,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
842     * We have ASPM control, but the FADT indicates
843     * that it's unsupported. Clear it.
844     */
845     - pcie_clear_aspm(root->bus);
846     + clear_aspm = true;
847     }
848     } else {
849     dev_info(&device->dev,
850     @@ -512,7 +492,14 @@ static int acpi_pci_root_add(struct acpi_device *device,
851     acpi_format_exception(status), flags);
852     dev_info(&device->dev,
853     "ACPI _OSC control for PCIe not granted, disabling ASPM\n");
854     - pcie_no_aspm();
855     + /*
856     + * We want to disable ASPM here, but aspm_disabled
857     + * needs to remain in its state from boot so that we
858     + * properly handle PCIe 1.1 devices. So we set this
859     + * flag here, to defer the action until after the ACPI
860     + * root scan.
861     + */
862     + no_aspm = true;
863     }
864     } else {
865     dev_info(&device->dev,
866     @@ -520,6 +507,33 @@ static int acpi_pci_root_add(struct acpi_device *device,
867     "(_OSC support mask: 0x%02x)\n", flags);
868     }
869    
870     + /*
871     + * TBD: Need PCI interface for enumeration/configuration of roots.
872     + */
873     +
874     + /*
875     + * Scan the Root Bridge
876     + * --------------------
877     + * Must do this prior to any attempt to bind the root device, as the
878     + * PCI namespace does not get created until this call is made (and
879     + * thus the root bridge's pci_dev does not exist).
880     + */
881     + root->bus = pci_acpi_scan_root(root);
882     + if (!root->bus) {
883     + dev_err(&device->dev,
884     + "Bus %04x:%02x not present in PCI namespace\n",
885     + root->segment, (unsigned int)root->secondary.start);
886     + result = -ENODEV;
887     + goto end;
888     + }
889     +
890     + if (clear_aspm) {
891     + dev_info(&device->dev, "Disabling ASPM (FADT indicates it is unsupported)\n");
892     + pcie_clear_aspm(root->bus);
893     + }
894     + if (no_aspm)
895     + pcie_no_aspm();
896     +
897     pci_acpi_add_bus_pm_notifier(device, root->bus);
898     if (device->wakeup.flags.run_wake)
899     device_set_run_wake(root->bus->bridge, true);
900     diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
901     index a439602e..c8dac745 100644
902     --- a/drivers/base/firmware_class.c
903     +++ b/drivers/base/firmware_class.c
904     @@ -868,8 +868,15 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
905     goto err_del_dev;
906     }
907    
908     + mutex_lock(&fw_lock);
909     + list_add(&buf->pending_list, &pending_fw_head);
910     + mutex_unlock(&fw_lock);
911     +
912     retval = device_create_file(f_dev, &dev_attr_loading);
913     if (retval) {
914     + mutex_lock(&fw_lock);
915     + list_del_init(&buf->pending_list);
916     + mutex_unlock(&fw_lock);
917     dev_err(f_dev, "%s: device_create_file failed\n", __func__);
918     goto err_del_bin_attr;
919     }
920     @@ -884,10 +891,6 @@ static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
921     kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
922     }
923    
924     - mutex_lock(&fw_lock);
925     - list_add(&buf->pending_list, &pending_fw_head);
926     - mutex_unlock(&fw_lock);
927     -
928     wait_for_completion(&buf->completion);
929    
930     cancel_delayed_work_sync(&fw_priv->timeout_work);
931     diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c
932     index 53495753..6c2652a8 100644
933     --- a/drivers/base/regmap/regmap-debugfs.c
934     +++ b/drivers/base/regmap/regmap-debugfs.c
935     @@ -85,8 +85,8 @@ static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
936     unsigned int reg_offset;
937    
938     /* Suppress the cache if we're using a subrange */
939     - if (from)
940     - return from;
941     + if (base)
942     + return base;
943    
944     /*
945     * If we don't have a cache build one so we don't have to do a
946     diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
947     index 4ad2ad9a..45aa20aa 100644
948     --- a/drivers/block/rbd.c
949     +++ b/drivers/block/rbd.c
950     @@ -1557,11 +1557,12 @@ rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
951     obj_request, obj_request->img_request, obj_request->result,
952     xferred, length);
953     /*
954     - * ENOENT means a hole in the image. We zero-fill the
955     - * entire length of the request. A short read also implies
956     - * zero-fill to the end of the request. Either way we
957     - * update the xferred count to indicate the whole request
958     - * was satisfied.
959     + * ENOENT means a hole in the image. We zero-fill the entire
960     + * length of the request. A short read also implies zero-fill
961     + * to the end of the request. An error requires the whole
962     + * length of the request to be reported finished with an error
963     + * to the block layer. In each case we update the xferred
964     + * count to indicate the whole request was satisfied.
965     */
966     rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
967     if (obj_request->result == -ENOENT) {
968     @@ -1570,14 +1571,13 @@ rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
969     else
970     zero_pages(obj_request->pages, 0, length);
971     obj_request->result = 0;
972     - obj_request->xferred = length;
973     } else if (xferred < length && !obj_request->result) {
974     if (obj_request->type == OBJ_REQUEST_BIO)
975     zero_bio_chain(obj_request->bio_list, xferred);
976     else
977     zero_pages(obj_request->pages, xferred, length);
978     - obj_request->xferred = length;
979     }
980     + obj_request->xferred = length;
981     obj_request_done_set(obj_request);
982     }
983    
984     diff --git a/drivers/clk/clk-wm831x.c b/drivers/clk/clk-wm831x.c
985     index 1b3f8c9b..1d5af3f7 100644
986     --- a/drivers/clk/clk-wm831x.c
987     +++ b/drivers/clk/clk-wm831x.c
988     @@ -360,6 +360,8 @@ static int wm831x_clk_probe(struct platform_device *pdev)
989     if (!clkdata)
990     return -ENOMEM;
991    
992     + clkdata->wm831x = wm831x;
993     +
994     /* XTAL_ENA can only be set via OTP/InstantConfig so just read once */
995     ret = wm831x_reg_read(wm831x, WM831X_CLOCK_CONTROL_2);
996     if (ret < 0) {
997     diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
998     index 2a297f86..fe853903 100644
999     --- a/drivers/cpuidle/coupled.c
1000     +++ b/drivers/cpuidle/coupled.c
1001     @@ -106,6 +106,7 @@ struct cpuidle_coupled {
1002     cpumask_t coupled_cpus;
1003     int requested_state[NR_CPUS];
1004     atomic_t ready_waiting_counts;
1005     + atomic_t abort_barrier;
1006     int online_count;
1007     int refcnt;
1008     int prevent;
1009     @@ -122,12 +123,19 @@ static DEFINE_MUTEX(cpuidle_coupled_lock);
1010     static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
1011    
1012     /*
1013     - * The cpuidle_coupled_poked_mask mask is used to avoid calling
1014     + * The cpuidle_coupled_poke_pending mask is used to avoid calling
1015     * __smp_call_function_single with the per cpu call_single_data struct already
1016     * in use. This prevents a deadlock where two cpus are waiting for each others
1017     * call_single_data struct to be available
1018     */
1019     -static cpumask_t cpuidle_coupled_poked_mask;
1020     +static cpumask_t cpuidle_coupled_poke_pending;
1021     +
1022     +/*
1023     + * The cpuidle_coupled_poked mask is used to ensure that each cpu has been poked
1024     + * once to minimize entering the ready loop with a poke pending, which would
1025     + * require aborting and retrying.
1026     + */
1027     +static cpumask_t cpuidle_coupled_poked;
1028    
1029     /**
1030     * cpuidle_coupled_parallel_barrier - synchronize all online coupled cpus
1031     @@ -291,10 +299,11 @@ static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
1032     return state;
1033     }
1034    
1035     -static void cpuidle_coupled_poked(void *info)
1036     +static void cpuidle_coupled_handle_poke(void *info)
1037     {
1038     int cpu = (unsigned long)info;
1039     - cpumask_clear_cpu(cpu, &cpuidle_coupled_poked_mask);
1040     + cpumask_set_cpu(cpu, &cpuidle_coupled_poked);
1041     + cpumask_clear_cpu(cpu, &cpuidle_coupled_poke_pending);
1042     }
1043    
1044     /**
1045     @@ -313,7 +322,7 @@ static void cpuidle_coupled_poke(int cpu)
1046     {
1047     struct call_single_data *csd = &per_cpu(cpuidle_coupled_poke_cb, cpu);
1048    
1049     - if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poked_mask))
1050     + if (!cpumask_test_and_set_cpu(cpu, &cpuidle_coupled_poke_pending))
1051     __smp_call_function_single(cpu, csd, 0);
1052     }
1053    
1054     @@ -340,30 +349,19 @@ static void cpuidle_coupled_poke_others(int this_cpu,
1055     * @coupled: the struct coupled that contains the current cpu
1056     * @next_state: the index in drv->states of the requested state for this cpu
1057     *
1058     - * Updates the requested idle state for the specified cpuidle device,
1059     - * poking all coupled cpus out of idle if necessary to let them see the new
1060     - * state.
1061     + * Updates the requested idle state for the specified cpuidle device.
1062     + * Returns the number of waiting cpus.
1063     */
1064     -static void cpuidle_coupled_set_waiting(int cpu,
1065     +static int cpuidle_coupled_set_waiting(int cpu,
1066     struct cpuidle_coupled *coupled, int next_state)
1067     {
1068     - int w;
1069     -
1070     coupled->requested_state[cpu] = next_state;
1071    
1072     /*
1073     - * If this is the last cpu to enter the waiting state, poke
1074     - * all the other cpus out of their waiting state so they can
1075     - * enter a deeper state. This can race with one of the cpus
1076     - * exiting the waiting state due to an interrupt and
1077     - * decrementing waiting_count, see comment below.
1078     - *
1079     * The atomic_inc_return provides a write barrier to order the write
1080     * to requested_state with the later write that increments ready_count.
1081     */
1082     - w = atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
1083     - if (w == coupled->online_count)
1084     - cpuidle_coupled_poke_others(cpu, coupled);
1085     + return atomic_inc_return(&coupled->ready_waiting_counts) & WAITING_MASK;
1086     }
1087    
1088     /**
1089     @@ -410,19 +408,33 @@ static void cpuidle_coupled_set_done(int cpu, struct cpuidle_coupled *coupled)
1090     * been processed and the poke bit has been cleared.
1091     *
1092     * Other interrupts may also be processed while interrupts are enabled, so
1093     - * need_resched() must be tested after turning interrupts off again to make sure
1094     + * need_resched() must be tested after this function returns to make sure
1095     * the interrupt didn't schedule work that should take the cpu out of idle.
1096     *
1097     - * Returns 0 if need_resched was false, -EINTR if need_resched was true.
1098     + * Returns 0 if no poke was pending, 1 if a poke was cleared.
1099     */
1100     static int cpuidle_coupled_clear_pokes(int cpu)
1101     {
1102     + if (!cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
1103     + return 0;
1104     +
1105     local_irq_enable();
1106     - while (cpumask_test_cpu(cpu, &cpuidle_coupled_poked_mask))
1107     + while (cpumask_test_cpu(cpu, &cpuidle_coupled_poke_pending))
1108     cpu_relax();
1109     local_irq_disable();
1110    
1111     - return need_resched() ? -EINTR : 0;
1112     + return 1;
1113     +}
1114     +
1115     +static bool cpuidle_coupled_any_pokes_pending(struct cpuidle_coupled *coupled)
1116     +{
1117     + cpumask_t cpus;
1118     + int ret;
1119     +
1120     + cpumask_and(&cpus, cpu_online_mask, &coupled->coupled_cpus);
1121     + ret = cpumask_and(&cpus, &cpuidle_coupled_poke_pending, &cpus);
1122     +
1123     + return ret;
1124     }
1125    
1126     /**
1127     @@ -449,12 +461,14 @@ int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
1128     {
1129     int entered_state = -1;
1130     struct cpuidle_coupled *coupled = dev->coupled;
1131     + int w;
1132    
1133     if (!coupled)
1134     return -EINVAL;
1135    
1136     while (coupled->prevent) {
1137     - if (cpuidle_coupled_clear_pokes(dev->cpu)) {
1138     + cpuidle_coupled_clear_pokes(dev->cpu);
1139     + if (need_resched()) {
1140     local_irq_enable();
1141     return entered_state;
1142     }
1143     @@ -465,15 +479,37 @@ int cpuidle_enter_state_coupled(struct cpuidle_device *dev,
1144     /* Read barrier ensures online_count is read after prevent is cleared */
1145     smp_rmb();
1146    
1147     - cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
1148     +reset:
1149     + cpumask_clear_cpu(dev->cpu, &cpuidle_coupled_poked);
1150     +
1151     + w = cpuidle_coupled_set_waiting(dev->cpu, coupled, next_state);
1152     + /*
1153     + * If this is the last cpu to enter the waiting state, poke
1154     + * all the other cpus out of their waiting state so they can
1155     + * enter a deeper state. This can race with one of the cpus
1156     + * exiting the waiting state due to an interrupt and
1157     + * decrementing waiting_count, see comment below.
1158     + */
1159     + if (w == coupled->online_count) {
1160     + cpumask_set_cpu(dev->cpu, &cpuidle_coupled_poked);
1161     + cpuidle_coupled_poke_others(dev->cpu, coupled);
1162     + }
1163    
1164     retry:
1165     /*
1166     * Wait for all coupled cpus to be idle, using the deepest state
1167     - * allowed for a single cpu.
1168     + * allowed for a single cpu. If this was not the poking cpu, wait
1169     + * for at least one poke before leaving to avoid a race where
1170     + * two cpus could arrive at the waiting loop at the same time,
1171     + * but the first of the two to arrive could skip the loop without
1172     + * processing the pokes from the last to arrive.
1173     */
1174     - while (!cpuidle_coupled_cpus_waiting(coupled)) {
1175     - if (cpuidle_coupled_clear_pokes(dev->cpu)) {
1176     + while (!cpuidle_coupled_cpus_waiting(coupled) ||
1177     + !cpumask_test_cpu(dev->cpu, &cpuidle_coupled_poked)) {
1178     + if (cpuidle_coupled_clear_pokes(dev->cpu))
1179     + continue;
1180     +
1181     + if (need_resched()) {
1182     cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
1183     goto out;
1184     }
1185     @@ -487,12 +523,19 @@ retry:
1186     dev->safe_state_index);
1187     }
1188    
1189     - if (cpuidle_coupled_clear_pokes(dev->cpu)) {
1190     + cpuidle_coupled_clear_pokes(dev->cpu);
1191     + if (need_resched()) {
1192     cpuidle_coupled_set_not_waiting(dev->cpu, coupled);
1193     goto out;
1194     }
1195    
1196     /*
1197     + * Make sure final poke status for this cpu is visible before setting
1198     + * cpu as ready.
1199     + */
1200     + smp_wmb();
1201     +
1202     + /*
1203     * All coupled cpus are probably idle. There is a small chance that
1204     * one of the other cpus just became active. Increment the ready count,
1205     * and spin until all coupled cpus have incremented the counter. Once a
1206     @@ -511,6 +554,28 @@ retry:
1207     cpu_relax();
1208     }
1209    
1210     + /*
1211     + * Make sure read of all cpus ready is done before reading pending pokes
1212     + */
1213     + smp_rmb();
1214     +
1215     + /*
1216     + * There is a small chance that a cpu left and reentered idle after this
1217     + * cpu saw that all cpus were waiting. The cpu that reentered idle will
1218     + * have sent this cpu a poke, which will still be pending after the
1219     + * ready loop. The pending interrupt may be lost by the interrupt
1220     + * controller when entering the deep idle state. It's not possible to
1221     + * clear a pending interrupt without turning interrupts on and handling
1222     + * it, and it's too late to turn on interrupts here, so reset the
1223     + * coupled idle state of all cpus and retry.
1224     + */
1225     + if (cpuidle_coupled_any_pokes_pending(coupled)) {
1226     + cpuidle_coupled_set_done(dev->cpu, coupled);
1227     + /* Wait for all cpus to see the pending pokes */
1228     + cpuidle_coupled_parallel_barrier(dev, &coupled->abort_barrier);
1229     + goto reset;
1230     + }
1231     +
1232     /* all cpus have acked the coupled state */
1233     next_state = cpuidle_coupled_get_state(dev, coupled);
1234    
1235     @@ -596,7 +661,7 @@ have_coupled:
1236     coupled->refcnt++;
1237    
1238     csd = &per_cpu(cpuidle_coupled_poke_cb, dev->cpu);
1239     - csd->func = cpuidle_coupled_poked;
1240     + csd->func = cpuidle_coupled_handle_poke;
1241     csd->info = (void *)(unsigned long)dev->cpu;
1242    
1243     return 0;
1244     diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
1245     index 8b6a0343..8b3d9014 100644
1246     --- a/drivers/edac/amd64_edac.c
1247     +++ b/drivers/edac/amd64_edac.c
1248     @@ -2470,8 +2470,15 @@ static int amd64_init_one_instance(struct pci_dev *F2)
1249     layers[0].size = pvt->csels[0].b_cnt;
1250     layers[0].is_virt_csrow = true;
1251     layers[1].type = EDAC_MC_LAYER_CHANNEL;
1252     - layers[1].size = pvt->channel_count;
1253     +
1254     + /*
1255     + * Always allocate two channels since we can have setups with DIMMs on
1256     + * only one channel. Also, this simplifies handling later for the price
1257     + * of a couple of KBs tops.
1258     + */
1259     + layers[1].size = 2;
1260     layers[1].is_virt_csrow = false;
1261     +
1262     mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
1263     if (!mci)
1264     goto err_siblings;
1265     diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
1266     index 95d6f4b6..70fc1335 100644
1267     --- a/drivers/gpu/drm/drm_edid.c
1268     +++ b/drivers/gpu/drm/drm_edid.c
1269     @@ -125,6 +125,9 @@ static struct edid_quirk {
1270    
1271     /* ViewSonic VA2026w */
1272     { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
1273     +
1274     + /* Medion MD 30217 PG */
1275     + { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
1276     };
1277    
1278     /*
1279     diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1280     index be79f477..ca40d1b1 100644
1281     --- a/drivers/gpu/drm/i915/intel_display.c
1282     +++ b/drivers/gpu/drm/i915/intel_display.c
1283     @@ -7809,6 +7809,19 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
1284     pipe_config->cpu_transcoder = to_intel_crtc(crtc)->pipe;
1285     pipe_config->shared_dpll = DPLL_ID_PRIVATE;
1286    
1287     + /*
1288     + * Sanitize sync polarity flags based on requested ones. If neither
1289     + * positive or negative polarity is requested, treat this as meaning
1290     + * negative polarity.
1291     + */
1292     + if (!(pipe_config->adjusted_mode.flags &
1293     + (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
1294     + pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
1295     +
1296     + if (!(pipe_config->adjusted_mode.flags &
1297     + (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
1298     + pipe_config->adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
1299     +
1300     /* Compute a starting value for pipe_config->pipe_bpp taking the source
1301     * plane pixel format and any sink constraints into account. Returns the
1302     * source plane bpp so that dithering can be selected on mismatches
1303     diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1304     index 36668d1a..5956445d 100644
1305     --- a/drivers/hid/hid-core.c
1306     +++ b/drivers/hid/hid-core.c
1307     @@ -63,6 +63,8 @@ struct hid_report *hid_register_report(struct hid_device *device, unsigned type,
1308     struct hid_report_enum *report_enum = device->report_enum + type;
1309     struct hid_report *report;
1310    
1311     + if (id >= HID_MAX_IDS)
1312     + return NULL;
1313     if (report_enum->report_id_hash[id])
1314     return report_enum->report_id_hash[id];
1315    
1316     @@ -404,8 +406,10 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
1317    
1318     case HID_GLOBAL_ITEM_TAG_REPORT_ID:
1319     parser->global.report_id = item_udata(item);
1320     - if (parser->global.report_id == 0) {
1321     - hid_err(parser->device, "report_id 0 is invalid\n");
1322     + if (parser->global.report_id == 0 ||
1323     + parser->global.report_id >= HID_MAX_IDS) {
1324     + hid_err(parser->device, "report_id %u is invalid\n",
1325     + parser->global.report_id);
1326     return -1;
1327     }
1328     return 0;
1329     @@ -575,7 +579,7 @@ static void hid_close_report(struct hid_device *device)
1330     for (i = 0; i < HID_REPORT_TYPES; i++) {
1331     struct hid_report_enum *report_enum = device->report_enum + i;
1332    
1333     - for (j = 0; j < 256; j++) {
1334     + for (j = 0; j < HID_MAX_IDS; j++) {
1335     struct hid_report *report = report_enum->report_id_hash[j];
1336     if (report)
1337     hid_free_report(report);
1338     @@ -1152,7 +1156,12 @@ EXPORT_SYMBOL_GPL(hid_output_report);
1339    
1340     int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
1341     {
1342     - unsigned size = field->report_size;
1343     + unsigned size;
1344     +
1345     + if (!field)
1346     + return -1;
1347     +
1348     + size = field->report_size;
1349    
1350     hid_dump_input(field->report->device, field->usage + offset, value);
1351    
1352     @@ -1597,6 +1606,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1353     { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
1354     { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
1355     { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
1356     + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
1357     { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
1358     { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
1359     { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
1360     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1361     index ffe4c7ae..22134d4b 100644
1362     --- a/drivers/hid/hid-ids.h
1363     +++ b/drivers/hid/hid-ids.h
1364     @@ -135,9 +135,9 @@
1365     #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
1366     #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255
1367     #define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
1368     -#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0291
1369     -#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0292
1370     -#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0293
1371     +#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
1372     +#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
1373     +#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
1374     #define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
1375     #define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
1376     #define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240
1377     @@ -482,6 +482,7 @@
1378     #define USB_VENDOR_ID_KYE 0x0458
1379     #define USB_DEVICE_ID_KYE_ERGO_525V 0x0087
1380     #define USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE 0x0138
1381     +#define USB_DEVICE_ID_GENIUS_GX_IMPERATOR 0x4018
1382     #define USB_DEVICE_ID_KYE_GPEN_560 0x5003
1383     #define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010
1384     #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011
1385     @@ -658,6 +659,7 @@
1386     #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16 0x0012
1387     #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17 0x0013
1388     #define USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18 0x0014
1389     +#define USB_DEVICE_ID_NTRIG_DUOSENSE 0x1500
1390    
1391     #define USB_VENDOR_ID_ONTRAK 0x0a07
1392     #define USB_DEVICE_ID_ONTRAK_ADU100 0x0064
1393     diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
1394     index 7480799e..3fc4034a 100644
1395     --- a/drivers/hid/hid-input.c
1396     +++ b/drivers/hid/hid-input.c
1397     @@ -340,7 +340,7 @@ static int hidinput_get_battery_property(struct power_supply *psy,
1398     {
1399     struct hid_device *dev = container_of(psy, struct hid_device, battery);
1400     int ret = 0;
1401     - __u8 buf[2] = {};
1402     + __u8 *buf;
1403    
1404     switch (prop) {
1405     case POWER_SUPPLY_PROP_PRESENT:
1406     @@ -349,12 +349,19 @@ static int hidinput_get_battery_property(struct power_supply *psy,
1407     break;
1408    
1409     case POWER_SUPPLY_PROP_CAPACITY:
1410     +
1411     + buf = kmalloc(2 * sizeof(__u8), GFP_KERNEL);
1412     + if (!buf) {
1413     + ret = -ENOMEM;
1414     + break;
1415     + }
1416     ret = dev->hid_get_raw_report(dev, dev->battery_report_id,
1417     - buf, sizeof(buf),
1418     + buf, 2,
1419     dev->battery_report_type);
1420    
1421     if (ret != 2) {
1422     ret = -ENODATA;
1423     + kfree(buf);
1424     break;
1425     }
1426     ret = 0;
1427     @@ -364,6 +371,7 @@ static int hidinput_get_battery_property(struct power_supply *psy,
1428     buf[1] <= dev->battery_max)
1429     val->intval = (100 * (buf[1] - dev->battery_min)) /
1430     (dev->battery_max - dev->battery_min);
1431     + kfree(buf);
1432     break;
1433    
1434     case POWER_SUPPLY_PROP_MODEL_NAME:
1435     diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
1436     index 1e2ee2aa..73845120 100644
1437     --- a/drivers/hid/hid-kye.c
1438     +++ b/drivers/hid/hid-kye.c
1439     @@ -268,6 +268,26 @@ static __u8 easypen_m610x_rdesc_fixed[] = {
1440     0xC0 /* End Collection */
1441     };
1442    
1443     +static __u8 *kye_consumer_control_fixup(struct hid_device *hdev, __u8 *rdesc,
1444     + unsigned int *rsize, int offset, const char *device_name) {
1445     + /*
1446     + * the fixup that need to be done:
1447     + * - change Usage Maximum in the Comsumer Control
1448     + * (report ID 3) to a reasonable value
1449     + */
1450     + if (*rsize >= offset + 31 &&
1451     + /* Usage Page (Consumer Devices) */
1452     + rdesc[offset] == 0x05 && rdesc[offset + 1] == 0x0c &&
1453     + /* Usage (Consumer Control) */
1454     + rdesc[offset + 2] == 0x09 && rdesc[offset + 3] == 0x01 &&
1455     + /* Usage Maximum > 12287 */
1456     + rdesc[offset + 10] == 0x2a && rdesc[offset + 12] > 0x2f) {
1457     + hid_info(hdev, "fixing up %s report descriptor\n", device_name);
1458     + rdesc[offset + 12] = 0x2f;
1459     + }
1460     + return rdesc;
1461     +}
1462     +
1463     static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
1464     unsigned int *rsize)
1465     {
1466     @@ -315,23 +335,12 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
1467     }
1468     break;
1469     case USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE:
1470     - /*
1471     - * the fixup that need to be done:
1472     - * - change Usage Maximum in the Comsumer Control
1473     - * (report ID 3) to a reasonable value
1474     - */
1475     - if (*rsize >= 135 &&
1476     - /* Usage Page (Consumer Devices) */
1477     - rdesc[104] == 0x05 && rdesc[105] == 0x0c &&
1478     - /* Usage (Consumer Control) */
1479     - rdesc[106] == 0x09 && rdesc[107] == 0x01 &&
1480     - /* Usage Maximum > 12287 */
1481     - rdesc[114] == 0x2a && rdesc[116] > 0x2f) {
1482     - hid_info(hdev,
1483     - "fixing up Genius Gila Gaming Mouse "
1484     - "report descriptor\n");
1485     - rdesc[116] = 0x2f;
1486     - }
1487     + rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
1488     + "Genius Gila Gaming Mouse");
1489     + break;
1490     + case USB_DEVICE_ID_GENIUS_GX_IMPERATOR:
1491     + rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83,
1492     + "Genius Gx Imperator Keyboard");
1493     break;
1494     }
1495     return rdesc;
1496     @@ -428,6 +437,8 @@ static const struct hid_device_id kye_devices[] = {
1497     USB_DEVICE_ID_KYE_EASYPEN_M610X) },
1498     { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
1499     USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
1500     + { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
1501     + USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
1502     { }
1503     };
1504     MODULE_DEVICE_TABLE(hid, kye_devices);
1505     diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
1506     index ef951025..5482156a 100644
1507     --- a/drivers/hid/hid-ntrig.c
1508     +++ b/drivers/hid/hid-ntrig.c
1509     @@ -115,7 +115,8 @@ static inline int ntrig_get_mode(struct hid_device *hdev)
1510     struct hid_report *report = hdev->report_enum[HID_FEATURE_REPORT].
1511     report_id_hash[0x0d];
1512    
1513     - if (!report)
1514     + if (!report || report->maxfield < 1 ||
1515     + report->field[0]->report_count < 1)
1516     return -EINVAL;
1517    
1518     hid_hw_request(hdev, report, HID_REQ_GET_REPORT);
1519     diff --git a/drivers/hid/hid-picolcd_cir.c b/drivers/hid/hid-picolcd_cir.c
1520     index e346038f..59d5eb1e 100644
1521     --- a/drivers/hid/hid-picolcd_cir.c
1522     +++ b/drivers/hid/hid-picolcd_cir.c
1523     @@ -145,6 +145,7 @@ void picolcd_exit_cir(struct picolcd_data *data)
1524     struct rc_dev *rdev = data->rc_dev;
1525    
1526     data->rc_dev = NULL;
1527     - rc_unregister_device(rdev);
1528     + if (rdev)
1529     + rc_unregister_device(rdev);
1530     }
1531    
1532     diff --git a/drivers/hid/hid-picolcd_core.c b/drivers/hid/hid-picolcd_core.c
1533     index b48092d0..acbb0210 100644
1534     --- a/drivers/hid/hid-picolcd_core.c
1535     +++ b/drivers/hid/hid-picolcd_core.c
1536     @@ -290,7 +290,7 @@ static ssize_t picolcd_operation_mode_store(struct device *dev,
1537     buf += 10;
1538     cnt -= 10;
1539     }
1540     - if (!report)
1541     + if (!report || report->maxfield != 1)
1542     return -EINVAL;
1543    
1544     while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r'))
1545     diff --git a/drivers/hid/hid-picolcd_fb.c b/drivers/hid/hid-picolcd_fb.c
1546     index 591f6b22..c930ab85 100644
1547     --- a/drivers/hid/hid-picolcd_fb.c
1548     +++ b/drivers/hid/hid-picolcd_fb.c
1549     @@ -593,10 +593,14 @@ err_nomem:
1550     void picolcd_exit_framebuffer(struct picolcd_data *data)
1551     {
1552     struct fb_info *info = data->fb_info;
1553     - struct picolcd_fb_data *fbdata = info->par;
1554     + struct picolcd_fb_data *fbdata;
1555     unsigned long flags;
1556    
1557     + if (!info)
1558     + return;
1559     +
1560     device_remove_file(&data->hdev->dev, &dev_attr_fb_update_rate);
1561     + fbdata = info->par;
1562    
1563     /* disconnect framebuffer from HID dev */
1564     spin_lock_irqsave(&fbdata->lock, flags);
1565     diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
1566     index d29112fa..2dcd7d98 100644
1567     --- a/drivers/hid/hid-pl.c
1568     +++ b/drivers/hid/hid-pl.c
1569     @@ -132,8 +132,14 @@ static int plff_init(struct hid_device *hid)
1570     strong = &report->field[0]->value[2];
1571     weak = &report->field[0]->value[3];
1572     debug("detected single-field device");
1573     - } else if (report->maxfield >= 4 && report->field[0]->maxusage == 1 &&
1574     - report->field[0]->usage[0].hid == (HID_UP_LED | 0x43)) {
1575     + } else if (report->field[0]->maxusage == 1 &&
1576     + report->field[0]->usage[0].hid ==
1577     + (HID_UP_LED | 0x43) &&
1578     + report->maxfield >= 4 &&
1579     + report->field[0]->report_count >= 1 &&
1580     + report->field[1]->report_count >= 1 &&
1581     + report->field[2]->report_count >= 1 &&
1582     + report->field[3]->report_count >= 1) {
1583     report->field[0]->value[0] = 0x00;
1584     report->field[1]->value[0] = 0x00;
1585     strong = &report->field[2]->value[0];
1586     diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
1587     index ca749810..aa34755c 100644
1588     --- a/drivers/hid/hid-sensor-hub.c
1589     +++ b/drivers/hid/hid-sensor-hub.c
1590     @@ -221,7 +221,8 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
1591    
1592     mutex_lock(&data->mutex);
1593     report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
1594     - if (!report || (field_index >= report->maxfield)) {
1595     + if (!report || (field_index >= report->maxfield) ||
1596     + report->field[field_index]->report_count < 1) {
1597     ret = -EINVAL;
1598     goto done_proc;
1599     }
1600     diff --git a/drivers/hid/hid-speedlink.c b/drivers/hid/hid-speedlink.c
1601     index a2f587d0..7112f3e8 100644
1602     --- a/drivers/hid/hid-speedlink.c
1603     +++ b/drivers/hid/hid-speedlink.c
1604     @@ -3,7 +3,7 @@
1605     * Fixes "jumpy" cursor and removes nonexistent keyboard LEDS from
1606     * the HID descriptor.
1607     *
1608     - * Copyright (c) 2011 Stefan Kriwanek <mail@stefankriwanek.de>
1609     + * Copyright (c) 2011, 2013 Stefan Kriwanek <dev@stefankriwanek.de>
1610     */
1611    
1612     /*
1613     @@ -46,8 +46,13 @@ static int speedlink_event(struct hid_device *hdev, struct hid_field *field,
1614     struct hid_usage *usage, __s32 value)
1615     {
1616     /* No other conditions due to usage_table. */
1617     - /* Fix "jumpy" cursor (invalid events sent by device). */
1618     - if (value == 256)
1619     +
1620     + /* This fixes the "jumpy" cursor occuring due to invalid events sent
1621     + * by the device. Some devices only send them with value==+256, others
1622     + * don't. However, catching abs(value)>=256 is restrictive enough not
1623     + * to interfere with devices that were bug-free (has been tested).
1624     + */
1625     + if (abs(value) >= 256)
1626     return 1;
1627     /* Drop useless distance 0 events (on button clicks etc.) as well */
1628     if (value == 0)
1629     diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c
1630     index 0c06054c..66020982 100644
1631     --- a/drivers/hid/hid-wiimote-core.c
1632     +++ b/drivers/hid/hid-wiimote-core.c
1633     @@ -212,10 +212,12 @@ static __u8 select_drm(struct wiimote_data *wdata)
1634    
1635     if (ir == WIIPROTO_FLAG_IR_BASIC) {
1636     if (wdata->state.flags & WIIPROTO_FLAG_ACCEL) {
1637     - if (ext)
1638     - return WIIPROTO_REQ_DRM_KAIE;
1639     - else
1640     - return WIIPROTO_REQ_DRM_KAI;
1641     + /* GEN10 and ealier devices bind IR formats to DRMs.
1642     + * Hence, we cannot use DRM_KAI here as it might be
1643     + * bound to IR_EXT. Use DRM_KAIE unconditionally so we
1644     + * work with all devices and our parsers can use the
1645     + * fixed formats, too. */
1646     + return WIIPROTO_REQ_DRM_KAIE;
1647     } else {
1648     return WIIPROTO_REQ_DRM_KIE;
1649     }
1650     diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
1651     index 6f1feb2c..dbfe3007 100644
1652     --- a/drivers/hid/hidraw.c
1653     +++ b/drivers/hid/hidraw.c
1654     @@ -113,7 +113,7 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer,
1655     __u8 *buf;
1656     int ret = 0;
1657    
1658     - if (!hidraw_table[minor]) {
1659     + if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
1660     ret = -ENODEV;
1661     goto out;
1662     }
1663     @@ -261,7 +261,7 @@ static int hidraw_open(struct inode *inode, struct file *file)
1664     }
1665    
1666     mutex_lock(&minors_lock);
1667     - if (!hidraw_table[minor]) {
1668     + if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
1669     err = -ENODEV;
1670     goto out_unlock;
1671     }
1672     @@ -302,39 +302,38 @@ static int hidraw_fasync(int fd, struct file *file, int on)
1673     return fasync_helper(fd, file, on, &list->fasync);
1674     }
1675    
1676     +static void drop_ref(struct hidraw *hidraw, int exists_bit)
1677     +{
1678     + if (exists_bit) {
1679     + hid_hw_close(hidraw->hid);
1680     + hidraw->exist = 0;
1681     + if (hidraw->open)
1682     + wake_up_interruptible(&hidraw->wait);
1683     + } else {
1684     + --hidraw->open;
1685     + }
1686     +
1687     + if (!hidraw->open && !hidraw->exist) {
1688     + device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
1689     + hidraw_table[hidraw->minor] = NULL;
1690     + kfree(hidraw);
1691     + }
1692     +}
1693     +
1694     static int hidraw_release(struct inode * inode, struct file * file)
1695     {
1696     unsigned int minor = iminor(inode);
1697     - struct hidraw *dev;
1698     struct hidraw_list *list = file->private_data;
1699     - int ret;
1700     - int i;
1701    
1702     mutex_lock(&minors_lock);
1703     - if (!hidraw_table[minor]) {
1704     - ret = -ENODEV;
1705     - goto unlock;
1706     - }
1707    
1708     list_del(&list->node);
1709     - dev = hidraw_table[minor];
1710     - if (!--dev->open) {
1711     - if (list->hidraw->exist) {
1712     - hid_hw_power(dev->hid, PM_HINT_NORMAL);
1713     - hid_hw_close(dev->hid);
1714     - } else {
1715     - kfree(list->hidraw);
1716     - }
1717     - }
1718     -
1719     - for (i = 0; i < HIDRAW_BUFFER_SIZE; ++i)
1720     - kfree(list->buffer[i].value);
1721     kfree(list);
1722     - ret = 0;
1723     -unlock:
1724     - mutex_unlock(&minors_lock);
1725    
1726     - return ret;
1727     + drop_ref(hidraw_table[minor], 0);
1728     +
1729     + mutex_unlock(&minors_lock);
1730     + return 0;
1731     }
1732    
1733     static long hidraw_ioctl(struct file *file, unsigned int cmd,
1734     @@ -539,18 +538,9 @@ void hidraw_disconnect(struct hid_device *hid)
1735     struct hidraw *hidraw = hid->hidraw;
1736    
1737     mutex_lock(&minors_lock);
1738     - hidraw->exist = 0;
1739     -
1740     - device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor));
1741    
1742     - hidraw_table[hidraw->minor] = NULL;
1743     + drop_ref(hidraw, 1);
1744    
1745     - if (hidraw->open) {
1746     - hid_hw_close(hid);
1747     - wake_up_interruptible(&hidraw->wait);
1748     - } else {
1749     - kfree(hidraw);
1750     - }
1751     mutex_unlock(&minors_lock);
1752     }
1753     EXPORT_SYMBOL_GPL(hidraw_disconnect);
1754     diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
1755     index 19b8360f..07345521 100644
1756     --- a/drivers/hid/usbhid/hid-quirks.c
1757     +++ b/drivers/hid/usbhid/hid-quirks.c
1758     @@ -109,6 +109,8 @@ static const struct hid_blacklist {
1759     { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
1760     { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
1761     { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
1762     + { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
1763     +
1764     { 0, 0 }
1765     };
1766    
1767     diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
1768     index 4ef4d5e1..a73f9618 100644
1769     --- a/drivers/input/mouse/bcm5974.c
1770     +++ b/drivers/input/mouse/bcm5974.c
1771     @@ -89,9 +89,9 @@
1772     #define USB_DEVICE_ID_APPLE_WELLSPRING7A_ISO 0x025a
1773     #define USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS 0x025b
1774     /* MacbookAir6,2 (unibody, June 2013) */
1775     -#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0291
1776     -#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0292
1777     -#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0293
1778     +#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
1779     +#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
1780     +#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
1781    
1782     #define BCM5974_DEVICE(prod) { \
1783     .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
1784     diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1785     index eec0d3e0..15e9b57e 100644
1786     --- a/drivers/iommu/intel-iommu.c
1787     +++ b/drivers/iommu/intel-iommu.c
1788     @@ -890,56 +890,54 @@ static int dma_pte_clear_range(struct dmar_domain *domain,
1789     return order;
1790     }
1791    
1792     +static void dma_pte_free_level(struct dmar_domain *domain, int level,
1793     + struct dma_pte *pte, unsigned long pfn,
1794     + unsigned long start_pfn, unsigned long last_pfn)
1795     +{
1796     + pfn = max(start_pfn, pfn);
1797     + pte = &pte[pfn_level_offset(pfn, level)];
1798     +
1799     + do {
1800     + unsigned long level_pfn;
1801     + struct dma_pte *level_pte;
1802     +
1803     + if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1804     + goto next;
1805     +
1806     + level_pfn = pfn & level_mask(level - 1);
1807     + level_pte = phys_to_virt(dma_pte_addr(pte));
1808     +
1809     + if (level > 2)
1810     + dma_pte_free_level(domain, level - 1, level_pte,
1811     + level_pfn, start_pfn, last_pfn);
1812     +
1813     + /* If range covers entire pagetable, free it */
1814     + if (!(start_pfn > level_pfn ||
1815     + last_pfn < level_pfn + level_size(level))) {
1816     + dma_clear_pte(pte);
1817     + domain_flush_cache(domain, pte, sizeof(*pte));
1818     + free_pgtable_page(level_pte);
1819     + }
1820     +next:
1821     + pfn += level_size(level);
1822     + } while (!first_pte_in_page(++pte) && pfn <= last_pfn);
1823     +}
1824     +
1825     /* free page table pages. last level pte should already be cleared */
1826     static void dma_pte_free_pagetable(struct dmar_domain *domain,
1827     unsigned long start_pfn,
1828     unsigned long last_pfn)
1829     {
1830     int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
1831     - struct dma_pte *first_pte, *pte;
1832     - int total = agaw_to_level(domain->agaw);
1833     - int level;
1834     - unsigned long tmp;
1835     - int large_page = 2;
1836    
1837     BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
1838     BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
1839     BUG_ON(start_pfn > last_pfn);
1840    
1841     /* We don't need lock here; nobody else touches the iova range */
1842     - level = 2;
1843     - while (level <= total) {
1844     - tmp = align_to_level(start_pfn, level);
1845     -
1846     - /* If we can't even clear one PTE at this level, we're done */
1847     - if (tmp + level_size(level) - 1 > last_pfn)
1848     - return;
1849     -
1850     - do {
1851     - large_page = level;
1852     - first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
1853     - if (large_page > level)
1854     - level = large_page + 1;
1855     - if (!pte) {
1856     - tmp = align_to_level(tmp + 1, level + 1);
1857     - continue;
1858     - }
1859     - do {
1860     - if (dma_pte_present(pte)) {
1861     - free_pgtable_page(phys_to_virt(dma_pte_addr(pte)));
1862     - dma_clear_pte(pte);
1863     - }
1864     - pte++;
1865     - tmp += level_size(level);
1866     - } while (!first_pte_in_page(pte) &&
1867     - tmp + level_size(level) - 1 <= last_pfn);
1868     + dma_pte_free_level(domain, agaw_to_level(domain->agaw),
1869     + domain->pgd, 0, start_pfn, last_pfn);
1870    
1871     - domain_flush_cache(domain, first_pte,
1872     - (void *)pte - (void *)first_pte);
1873     -
1874     - } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
1875     - level++;
1876     - }
1877     /* free pgd */
1878     if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) {
1879     free_pgtable_page(domain->pgd);
1880     diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
1881     index 120815a4..5a19abde 100644
1882     --- a/drivers/leds/leds-wm831x-status.c
1883     +++ b/drivers/leds/leds-wm831x-status.c
1884     @@ -230,9 +230,9 @@ static int wm831x_status_probe(struct platform_device *pdev)
1885     int id = pdev->id % ARRAY_SIZE(chip_pdata->status);
1886     int ret;
1887    
1888     - res = platform_get_resource(pdev, IORESOURCE_IO, 0);
1889     + res = platform_get_resource(pdev, IORESOURCE_REG, 0);
1890     if (res == NULL) {
1891     - dev_err(&pdev->dev, "No I/O resource\n");
1892     + dev_err(&pdev->dev, "No register resource\n");
1893     ret = -EINVAL;
1894     goto err;
1895     }
1896     diff --git a/drivers/media/common/siano/smsdvb-main.c b/drivers/media/common/siano/smsdvb-main.c
1897     index 08626225..63676a8b 100644
1898     --- a/drivers/media/common/siano/smsdvb-main.c
1899     +++ b/drivers/media/common/siano/smsdvb-main.c
1900     @@ -276,7 +276,8 @@ static void smsdvb_update_per_slices(struct smsdvb_client_t *client,
1901    
1902     /* Legacy PER/BER */
1903     tmp = p->ets_packets * 65535;
1904     - do_div(tmp, p->ts_packets + p->ets_packets);
1905     + if (p->ts_packets + p->ets_packets)
1906     + do_div(tmp, p->ts_packets + p->ets_packets);
1907     client->legacy_per = tmp;
1908     }
1909    
1910     diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
1911     index 856374bd..2c7217fb 100644
1912     --- a/drivers/media/dvb-frontends/mb86a20s.c
1913     +++ b/drivers/media/dvb-frontends/mb86a20s.c
1914     @@ -157,7 +157,6 @@ static struct regdata mb86a20s_init2[] = {
1915     { 0x45, 0x04 }, /* CN symbol 4 */
1916     { 0x48, 0x04 }, /* CN manual mode */
1917    
1918     - { 0x50, 0xd5 }, { 0x51, 0x01 }, /* Serial */
1919     { 0x50, 0xd6 }, { 0x51, 0x1f },
1920     { 0x50, 0xd2 }, { 0x51, 0x03 },
1921     { 0x50, 0xd7 }, { 0x51, 0xbf },
1922     @@ -1860,16 +1859,15 @@ static int mb86a20s_initfe(struct dvb_frontend *fe)
1923     dev_dbg(&state->i2c->dev, "%s: IF=%d, IF reg=0x%06llx\n",
1924     __func__, state->if_freq, (long long)pll);
1925    
1926     - if (!state->config->is_serial) {
1927     + if (!state->config->is_serial)
1928     regD5 &= ~1;
1929    
1930     - rc = mb86a20s_writereg(state, 0x50, 0xd5);
1931     - if (rc < 0)
1932     - goto err;
1933     - rc = mb86a20s_writereg(state, 0x51, regD5);
1934     - if (rc < 0)
1935     - goto err;
1936     - }
1937     + rc = mb86a20s_writereg(state, 0x50, 0xd5);
1938     + if (rc < 0)
1939     + goto err;
1940     + rc = mb86a20s_writereg(state, 0x51, regD5);
1941     + if (rc < 0)
1942     + goto err;
1943    
1944     rc = mb86a20s_writeregdata(state, mb86a20s_init2);
1945     if (rc < 0)
1946     diff --git a/drivers/media/pci/cx88/cx88.h b/drivers/media/pci/cx88/cx88.h
1947     index afe0eaea..28893a6b 100644
1948     --- a/drivers/media/pci/cx88/cx88.h
1949     +++ b/drivers/media/pci/cx88/cx88.h
1950     @@ -259,7 +259,7 @@ struct cx88_input {
1951     };
1952    
1953     enum cx88_audio_chip {
1954     - CX88_AUDIO_WM8775,
1955     + CX88_AUDIO_WM8775 = 1,
1956     CX88_AUDIO_TVAUDIO,
1957     };
1958    
1959     diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
1960     index 559fab2a..1ec60264 100644
1961     --- a/drivers/media/platform/exynos-gsc/gsc-core.c
1962     +++ b/drivers/media/platform/exynos-gsc/gsc-core.c
1963     @@ -1122,10 +1122,14 @@ static int gsc_probe(struct platform_device *pdev)
1964     goto err_clk;
1965     }
1966    
1967     - ret = gsc_register_m2m_device(gsc);
1968     + ret = v4l2_device_register(dev, &gsc->v4l2_dev);
1969     if (ret)
1970     goto err_clk;
1971    
1972     + ret = gsc_register_m2m_device(gsc);
1973     + if (ret)
1974     + goto err_v4l2;
1975     +
1976     platform_set_drvdata(pdev, gsc);
1977     pm_runtime_enable(dev);
1978     ret = pm_runtime_get_sync(&pdev->dev);
1979     @@ -1147,6 +1151,8 @@ err_pm:
1980     pm_runtime_put(dev);
1981     err_m2m:
1982     gsc_unregister_m2m_device(gsc);
1983     +err_v4l2:
1984     + v4l2_device_unregister(&gsc->v4l2_dev);
1985     err_clk:
1986     gsc_clk_put(gsc);
1987     return ret;
1988     @@ -1157,6 +1163,7 @@ static int gsc_remove(struct platform_device *pdev)
1989     struct gsc_dev *gsc = platform_get_drvdata(pdev);
1990    
1991     gsc_unregister_m2m_device(gsc);
1992     + v4l2_device_unregister(&gsc->v4l2_dev);
1993    
1994     vb2_dma_contig_cleanup_ctx(gsc->alloc_ctx);
1995     pm_runtime_disable(&pdev->dev);
1996     diff --git a/drivers/media/platform/exynos-gsc/gsc-core.h b/drivers/media/platform/exynos-gsc/gsc-core.h
1997     index cc19bba0..76435d3b 100644
1998     --- a/drivers/media/platform/exynos-gsc/gsc-core.h
1999     +++ b/drivers/media/platform/exynos-gsc/gsc-core.h
2000     @@ -343,6 +343,7 @@ struct gsc_dev {
2001     unsigned long state;
2002     struct vb2_alloc_ctx *alloc_ctx;
2003     struct video_device vdev;
2004     + struct v4l2_device v4l2_dev;
2005     };
2006    
2007     /**
2008     diff --git a/drivers/media/platform/exynos-gsc/gsc-m2m.c b/drivers/media/platform/exynos-gsc/gsc-m2m.c
2009     index 40a73f7d..e576ff2d 100644
2010     --- a/drivers/media/platform/exynos-gsc/gsc-m2m.c
2011     +++ b/drivers/media/platform/exynos-gsc/gsc-m2m.c
2012     @@ -751,6 +751,7 @@ int gsc_register_m2m_device(struct gsc_dev *gsc)
2013     gsc->vdev.release = video_device_release_empty;
2014     gsc->vdev.lock = &gsc->lock;
2015     gsc->vdev.vfl_dir = VFL_DIR_M2M;
2016     + gsc->vdev.v4l2_dev = &gsc->v4l2_dev;
2017     snprintf(gsc->vdev.name, sizeof(gsc->vdev.name), "%s.%d:m2m",
2018     GSC_MODULE_NAME, gsc->id);
2019    
2020     diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
2021     index 08fbfede..e85dc4f2 100644
2022     --- a/drivers/media/platform/exynos4-is/fimc-lite.c
2023     +++ b/drivers/media/platform/exynos4-is/fimc-lite.c
2024     @@ -90,7 +90,7 @@ static const struct fimc_fmt fimc_lite_formats[] = {
2025     .name = "RAW10 (GRBG)",
2026     .fourcc = V4L2_PIX_FMT_SGRBG10,
2027     .colorspace = V4L2_COLORSPACE_SRGB,
2028     - .depth = { 10 },
2029     + .depth = { 16 },
2030     .color = FIMC_FMT_RAW10,
2031     .memplanes = 1,
2032     .mbus_code = V4L2_MBUS_FMT_SGRBG10_1X10,
2033     @@ -99,7 +99,7 @@ static const struct fimc_fmt fimc_lite_formats[] = {
2034     .name = "RAW12 (GRBG)",
2035     .fourcc = V4L2_PIX_FMT_SGRBG12,
2036     .colorspace = V4L2_COLORSPACE_SRGB,
2037     - .depth = { 12 },
2038     + .depth = { 16 },
2039     .color = FIMC_FMT_RAW12,
2040     .memplanes = 1,
2041     .mbus_code = V4L2_MBUS_FMT_SGRBG12_1X12,
2042     diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
2043     index 19f556c5..91f21e27 100644
2044     --- a/drivers/media/platform/exynos4-is/media-dev.c
2045     +++ b/drivers/media/platform/exynos4-is/media-dev.c
2046     @@ -1530,9 +1530,9 @@ static int fimc_md_probe(struct platform_device *pdev)
2047     err_unlock:
2048     mutex_unlock(&fmd->media_dev.graph_mutex);
2049     err_clk:
2050     - media_device_unregister(&fmd->media_dev);
2051     fimc_md_put_clocks(fmd);
2052     fimc_md_unregister_entities(fmd);
2053     + media_device_unregister(&fmd->media_dev);
2054     err_md:
2055     v4l2_device_unregister(&fmd->v4l2_dev);
2056     return ret;
2057     diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
2058     index 47bdb8fa..65edb4a6 100644
2059     --- a/drivers/mmc/host/tmio_mmc_dma.c
2060     +++ b/drivers/mmc/host/tmio_mmc_dma.c
2061     @@ -104,6 +104,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
2062     pio:
2063     if (!desc) {
2064     /* DMA failed, fall back to PIO */
2065     + tmio_mmc_enable_dma(host, false);
2066     if (ret >= 0)
2067     ret = -EIO;
2068     host->chan_rx = NULL;
2069     @@ -116,7 +117,6 @@ pio:
2070     }
2071     dev_warn(&host->pdev->dev,
2072     "DMA failed: %d, falling back to PIO\n", ret);
2073     - tmio_mmc_enable_dma(host, false);
2074     }
2075    
2076     dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
2077     @@ -185,6 +185,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
2078     pio:
2079     if (!desc) {
2080     /* DMA failed, fall back to PIO */
2081     + tmio_mmc_enable_dma(host, false);
2082     if (ret >= 0)
2083     ret = -EIO;
2084     host->chan_tx = NULL;
2085     @@ -197,7 +198,6 @@ pio:
2086     }
2087     dev_warn(&host->pdev->dev,
2088     "DMA failed: %d, falling back to PIO\n", ret);
2089     - tmio_mmc_enable_dma(host, false);
2090     }
2091    
2092     dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__,
2093     diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
2094     index dfcd0a56..fb8c4dea 100644
2095     --- a/drivers/mtd/nand/nand_base.c
2096     +++ b/drivers/mtd/nand/nand_base.c
2097     @@ -2793,7 +2793,9 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
2098    
2099     if (!chip->select_chip)
2100     chip->select_chip = nand_select_chip;
2101     - if (!chip->read_byte)
2102     +
2103     + /* If called twice, pointers that depend on busw may need to be reset */
2104     + if (!chip->read_byte || chip->read_byte == nand_read_byte)
2105     chip->read_byte = busw ? nand_read_byte16 : nand_read_byte;
2106     if (!chip->read_word)
2107     chip->read_word = nand_read_word;
2108     @@ -2801,9 +2803,9 @@ static void nand_set_defaults(struct nand_chip *chip, int busw)
2109     chip->block_bad = nand_block_bad;
2110     if (!chip->block_markbad)
2111     chip->block_markbad = nand_default_block_markbad;
2112     - if (!chip->write_buf)
2113     + if (!chip->write_buf || chip->write_buf == nand_write_buf)
2114     chip->write_buf = busw ? nand_write_buf16 : nand_write_buf;
2115     - if (!chip->read_buf)
2116     + if (!chip->read_buf || chip->read_buf == nand_read_buf)
2117     chip->read_buf = busw ? nand_read_buf16 : nand_read_buf;
2118     if (!chip->scan_bbt)
2119     chip->scan_bbt = nand_default_bbt;
2120     diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
2121     index 5df49d3c..c95bfb18 100644
2122     --- a/drivers/mtd/ubi/wl.c
2123     +++ b/drivers/mtd/ubi/wl.c
2124     @@ -1069,6 +1069,9 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
2125     if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
2126     dbg_wl("no WL needed: min used EC %d, max free EC %d",
2127     e1->ec, e2->ec);
2128     +
2129     + /* Give the unused PEB back */
2130     + wl_tree_add(e2, &ubi->free);
2131     goto out_cancel;
2132     }
2133     self_check_in_wl_tree(ubi, e1, &ubi->used);
2134     diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
2135     index b017818b..90ab2928 100644
2136     --- a/drivers/net/ethernet/marvell/mvneta.c
2137     +++ b/drivers/net/ethernet/marvell/mvneta.c
2138     @@ -138,7 +138,9 @@
2139     #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1)
2140     #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5)
2141     #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6)
2142     +#define MVNETA_GMAC_AN_SPEED_EN BIT(7)
2143     #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12)
2144     +#define MVNETA_GMAC_AN_DUPLEX_EN BIT(13)
2145     #define MVNETA_MIB_COUNTERS_BASE 0x3080
2146     #define MVNETA_MIB_LATE_COLLISION 0x7c
2147     #define MVNETA_DA_FILT_SPEC_MCAST 0x3400
2148     @@ -915,6 +917,13 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
2149     /* Assign port SDMA configuration */
2150     mvreg_write(pp, MVNETA_SDMA_CONFIG, val);
2151    
2152     + /* Disable PHY polling in hardware, since we're using the
2153     + * kernel phylib to do this.
2154     + */
2155     + val = mvreg_read(pp, MVNETA_UNIT_CONTROL);
2156     + val &= ~MVNETA_PHY_POLLING_ENABLE;
2157     + mvreg_write(pp, MVNETA_UNIT_CONTROL, val);
2158     +
2159     mvneta_set_ucast_table(pp, -1);
2160     mvneta_set_special_mcast_table(pp, -1);
2161     mvneta_set_other_mcast_table(pp, -1);
2162     @@ -2307,7 +2316,9 @@ static void mvneta_adjust_link(struct net_device *ndev)
2163     val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG);
2164     val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED |
2165     MVNETA_GMAC_CONFIG_GMII_SPEED |
2166     - MVNETA_GMAC_CONFIG_FULL_DUPLEX);
2167     + MVNETA_GMAC_CONFIG_FULL_DUPLEX |
2168     + MVNETA_GMAC_AN_SPEED_EN |
2169     + MVNETA_GMAC_AN_DUPLEX_EN);
2170    
2171     if (phydev->duplex)
2172     val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX;
2173     diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
2174     index 1f694ab3..77d3a705 100644
2175     --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
2176     +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
2177     @@ -1173,6 +1173,10 @@ skip_ws_det:
2178     * is_on == 0 means MRC CCK is OFF (more noise imm)
2179     */
2180     bool is_on = param ? 1 : 0;
2181     +
2182     + if (ah->caps.rx_chainmask == 1)
2183     + break;
2184     +
2185     REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
2186     AR_PHY_MRC_CCK_ENABLE, is_on);
2187     REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
2188     diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
2189     index c1224b5a..020b9b37 100644
2190     --- a/drivers/net/wireless/ath/ath9k/ath9k.h
2191     +++ b/drivers/net/wireless/ath/ath9k/ath9k.h
2192     @@ -79,10 +79,6 @@ struct ath_config {
2193     sizeof(struct ath_buf_state)); \
2194     } while (0)
2195    
2196     -#define ATH_RXBUF_RESET(_bf) do { \
2197     - (_bf)->bf_stale = false; \
2198     - } while (0)
2199     -
2200     /**
2201     * enum buffer_type - Buffer type flags
2202     *
2203     @@ -317,6 +313,7 @@ struct ath_rx {
2204     struct ath_descdma rxdma;
2205     struct ath_rx_edma rx_edma[ATH9K_RX_QUEUE_MAX];
2206    
2207     + struct ath_buf *buf_hold;
2208     struct sk_buff *frag;
2209    
2210     u32 ampdu_ref;
2211     diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
2212     index 865e043e..b4902b34 100644
2213     --- a/drivers/net/wireless/ath/ath9k/recv.c
2214     +++ b/drivers/net/wireless/ath/ath9k/recv.c
2215     @@ -42,8 +42,6 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
2216     struct ath_desc *ds;
2217     struct sk_buff *skb;
2218    
2219     - ATH_RXBUF_RESET(bf);
2220     -
2221     ds = bf->bf_desc;
2222     ds->ds_link = 0; /* link to null */
2223     ds->ds_data = bf->bf_buf_addr;
2224     @@ -70,6 +68,14 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
2225     sc->rx.rxlink = &ds->ds_link;
2226     }
2227    
2228     +static void ath_rx_buf_relink(struct ath_softc *sc, struct ath_buf *bf)
2229     +{
2230     + if (sc->rx.buf_hold)
2231     + ath_rx_buf_link(sc, sc->rx.buf_hold);
2232     +
2233     + sc->rx.buf_hold = bf;
2234     +}
2235     +
2236     static void ath_setdefantenna(struct ath_softc *sc, u32 antenna)
2237     {
2238     /* XXX block beacon interrupts */
2239     @@ -117,7 +123,6 @@ static bool ath_rx_edma_buf_link(struct ath_softc *sc,
2240    
2241     skb = bf->bf_mpdu;
2242    
2243     - ATH_RXBUF_RESET(bf);
2244     memset(skb->data, 0, ah->caps.rx_status_len);
2245     dma_sync_single_for_device(sc->dev, bf->bf_buf_addr,
2246     ah->caps.rx_status_len, DMA_TO_DEVICE);
2247     @@ -432,6 +437,7 @@ int ath_startrecv(struct ath_softc *sc)
2248     if (list_empty(&sc->rx.rxbuf))
2249     goto start_recv;
2250    
2251     + sc->rx.buf_hold = NULL;
2252     sc->rx.rxlink = NULL;
2253     list_for_each_entry_safe(bf, tbf, &sc->rx.rxbuf, list) {
2254     ath_rx_buf_link(sc, bf);
2255     @@ -677,6 +683,9 @@ static struct ath_buf *ath_get_next_rx_buf(struct ath_softc *sc,
2256     }
2257    
2258     bf = list_first_entry(&sc->rx.rxbuf, struct ath_buf, list);
2259     + if (bf == sc->rx.buf_hold)
2260     + return NULL;
2261     +
2262     ds = bf->bf_desc;
2263    
2264     /*
2265     @@ -1375,7 +1384,7 @@ requeue:
2266     if (edma) {
2267     ath_rx_edma_buf_link(sc, qtype);
2268     } else {
2269     - ath_rx_buf_link(sc, bf);
2270     + ath_rx_buf_relink(sc, bf);
2271     ath9k_hw_rxena(ah);
2272     }
2273     } while (1);
2274     diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
2275     index 92799273..ab646838 100644
2276     --- a/drivers/net/wireless/ath/ath9k/xmit.c
2277     +++ b/drivers/net/wireless/ath/ath9k/xmit.c
2278     @@ -2602,6 +2602,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2279     for (acno = 0, ac = &an->ac[acno];
2280     acno < IEEE80211_NUM_ACS; acno++, ac++) {
2281     ac->sched = false;
2282     + ac->clear_ps_filter = true;
2283     ac->txq = sc->tx.txq_map[acno];
2284     INIT_LIST_HEAD(&ac->tid_q);
2285     }
2286     diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
2287     index 1860c572..4fb9635d 100644
2288     --- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
2289     +++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
2290     @@ -1015,9 +1015,10 @@ static bool dma64_txidle(struct dma_info *di)
2291    
2292     /*
2293     * post receive buffers
2294     - * return false is refill failed completely and ring is empty this will stall
2295     - * the rx dma and user might want to call rxfill again asap. This unlikely
2296     - * happens on memory-rich NIC, but often on memory-constrained dongle
2297     + * Return false if refill failed completely or dma mapping failed. The ring
2298     + * is empty, which will stall the rx dma and user might want to call rxfill
2299     + * again asap. This is unlikely to happen on a memory-rich NIC, but often on
2300     + * memory-constrained dongle.
2301     */
2302     bool dma_rxfill(struct dma_pub *pub)
2303     {
2304     @@ -1078,6 +1079,8 @@ bool dma_rxfill(struct dma_pub *pub)
2305    
2306     pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
2307     DMA_FROM_DEVICE);
2308     + if (dma_mapping_error(di->dmadev, pa))
2309     + return false;
2310    
2311     /* save the free packet pointer */
2312     di->rxp[rxout] = p;
2313     @@ -1284,7 +1287,11 @@ static void dma_txenq(struct dma_info *di, struct sk_buff *p)
2314    
2315     /* get physical address of buffer start */
2316     pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
2317     -
2318     + /* if mapping failed, free skb */
2319     + if (dma_mapping_error(di->dmadev, pa)) {
2320     + brcmu_pkt_buf_free_skb(p);
2321     + return;
2322     + }
2323     /* With a DMA segment list, Descriptor table is filled
2324     * using the segment list instead of looping over
2325     * buffers in multi-chain DMA. Therefore, EOF for SGLIST
2326     diff --git a/drivers/of/base.c b/drivers/of/base.c
2327     index 5c542791..bf8432f5 100644
2328     --- a/drivers/of/base.c
2329     +++ b/drivers/of/base.c
2330     @@ -1629,6 +1629,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
2331     ap = dt_alloc(sizeof(*ap) + len + 1, 4);
2332     if (!ap)
2333     continue;
2334     + memset(ap, 0, sizeof(*ap) + len + 1);
2335     ap->alias = start;
2336     of_alias_add(ap, np, id, start, len);
2337     }
2338     diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
2339     index b90a3a0a..19afb9a7 100644
2340     --- a/drivers/pinctrl/pinctrl-at91.c
2341     +++ b/drivers/pinctrl/pinctrl-at91.c
2342     @@ -325,7 +325,7 @@ static void at91_mux_disable_interrupt(void __iomem *pio, unsigned mask)
2343    
2344     static unsigned at91_mux_get_pullup(void __iomem *pio, unsigned pin)
2345     {
2346     - return (readl_relaxed(pio + PIO_PUSR) >> pin) & 0x1;
2347     + return !((readl_relaxed(pio + PIO_PUSR) >> pin) & 0x1);
2348     }
2349    
2350     static void at91_mux_set_pullup(void __iomem *pio, unsigned mask, bool on)
2351     @@ -445,7 +445,7 @@ static void at91_mux_pio3_set_debounce(void __iomem *pio, unsigned mask,
2352    
2353     static bool at91_mux_pio3_get_pulldown(void __iomem *pio, unsigned pin)
2354     {
2355     - return (__raw_readl(pio + PIO_PPDSR) >> pin) & 0x1;
2356     + return !((__raw_readl(pio + PIO_PPDSR) >> pin) & 0x1);
2357     }
2358    
2359     static void at91_mux_pio3_set_pulldown(void __iomem *pio, unsigned mask, bool is_on)
2360     diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile
2361     index 4c1d2e7a..efb0c4c2 100644
2362     --- a/drivers/scsi/mpt3sas/Makefile
2363     +++ b/drivers/scsi/mpt3sas/Makefile
2364     @@ -1,5 +1,5 @@
2365     # mpt3sas makefile
2366     -obj-m += mpt3sas.o
2367     +obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas.o
2368     mpt3sas-y += mpt3sas_base.o \
2369     mpt3sas_config.o \
2370     mpt3sas_scsih.o \
2371     diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2372     index 86fcf2c3..2783dd70 100644
2373     --- a/drivers/scsi/sd.c
2374     +++ b/drivers/scsi/sd.c
2375     @@ -2419,14 +2419,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2376     }
2377     }
2378    
2379     - if (modepage == 0x3F) {
2380     - sd_printk(KERN_ERR, sdkp, "No Caching mode page "
2381     - "present\n");
2382     - goto defaults;
2383     - } else if ((buffer[offset] & 0x3f) != modepage) {
2384     - sd_printk(KERN_ERR, sdkp, "Got wrong page\n");
2385     - goto defaults;
2386     - }
2387     + sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n");
2388     + goto defaults;
2389     +
2390     Page_found:
2391     if (modepage == 8) {
2392     sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0);
2393     diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
2394     index c1950e3b..674b236f 100644
2395     --- a/drivers/staging/comedi/drivers/dt282x.c
2396     +++ b/drivers/staging/comedi/drivers/dt282x.c
2397     @@ -264,8 +264,9 @@ struct dt282x_private {
2398     } \
2399     udelay(5); \
2400     } \
2401     - if (_i) \
2402     + if (_i) { \
2403     b \
2404     + } \
2405     } while (0)
2406    
2407     static int prep_ai_dma(struct comedi_device *dev, int chan, int size);
2408     diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
2409     index e77fb6ea..8f54c503 100644
2410     --- a/drivers/staging/zram/zram_drv.c
2411     +++ b/drivers/staging/zram/zram_drv.c
2412     @@ -445,6 +445,14 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
2413     goto out;
2414     }
2415    
2416     + /*
2417     + * zram_slot_free_notify could miss free so that let's
2418     + * double check.
2419     + */
2420     + if (unlikely(meta->table[index].handle ||
2421     + zram_test_flag(meta, index, ZRAM_ZERO)))
2422     + zram_free_page(zram, index);
2423     +
2424     ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
2425     meta->compress_workmem);
2426    
2427     @@ -504,6 +512,20 @@ out:
2428     return ret;
2429     }
2430    
2431     +static void handle_pending_slot_free(struct zram *zram)
2432     +{
2433     + struct zram_slot_free *free_rq;
2434     +
2435     + spin_lock(&zram->slot_free_lock);
2436     + while (zram->slot_free_rq) {
2437     + free_rq = zram->slot_free_rq;
2438     + zram->slot_free_rq = free_rq->next;
2439     + zram_free_page(zram, free_rq->index);
2440     + kfree(free_rq);
2441     + }
2442     + spin_unlock(&zram->slot_free_lock);
2443     +}
2444     +
2445     static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
2446     int offset, struct bio *bio, int rw)
2447     {
2448     @@ -511,10 +533,12 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
2449    
2450     if (rw == READ) {
2451     down_read(&zram->lock);
2452     + handle_pending_slot_free(zram);
2453     ret = zram_bvec_read(zram, bvec, index, offset, bio);
2454     up_read(&zram->lock);
2455     } else {
2456     down_write(&zram->lock);
2457     + handle_pending_slot_free(zram);
2458     ret = zram_bvec_write(zram, bvec, index, offset);
2459     up_write(&zram->lock);
2460     }
2461     @@ -522,11 +546,13 @@ static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
2462     return ret;
2463     }
2464    
2465     -static void zram_reset_device(struct zram *zram)
2466     +static void zram_reset_device(struct zram *zram, bool reset_capacity)
2467     {
2468     size_t index;
2469     struct zram_meta *meta;
2470    
2471     + flush_work(&zram->free_work);
2472     +
2473     down_write(&zram->init_lock);
2474     if (!zram->init_done) {
2475     up_write(&zram->init_lock);
2476     @@ -551,7 +577,8 @@ static void zram_reset_device(struct zram *zram)
2477     memset(&zram->stats, 0, sizeof(zram->stats));
2478    
2479     zram->disksize = 0;
2480     - set_capacity(zram->disk, 0);
2481     + if (reset_capacity)
2482     + set_capacity(zram->disk, 0);
2483     up_write(&zram->init_lock);
2484     }
2485    
2486     @@ -635,7 +662,7 @@ static ssize_t reset_store(struct device *dev,
2487     if (bdev)
2488     fsync_bdev(bdev);
2489    
2490     - zram_reset_device(zram);
2491     + zram_reset_device(zram, true);
2492     return len;
2493     }
2494    
2495     @@ -720,16 +747,40 @@ error:
2496     bio_io_error(bio);
2497     }
2498    
2499     +static void zram_slot_free(struct work_struct *work)
2500     +{
2501     + struct zram *zram;
2502     +
2503     + zram = container_of(work, struct zram, free_work);
2504     + down_write(&zram->lock);
2505     + handle_pending_slot_free(zram);
2506     + up_write(&zram->lock);
2507     +}
2508     +
2509     +static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
2510     +{
2511     + spin_lock(&zram->slot_free_lock);
2512     + free_rq->next = zram->slot_free_rq;
2513     + zram->slot_free_rq = free_rq;
2514     + spin_unlock(&zram->slot_free_lock);
2515     +}
2516     +
2517     static void zram_slot_free_notify(struct block_device *bdev,
2518     unsigned long index)
2519     {
2520     struct zram *zram;
2521     + struct zram_slot_free *free_rq;
2522    
2523     zram = bdev->bd_disk->private_data;
2524     - down_write(&zram->lock);
2525     - zram_free_page(zram, index);
2526     - up_write(&zram->lock);
2527     atomic64_inc(&zram->stats.notify_free);
2528     +
2529     + free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
2530     + if (!free_rq)
2531     + return;
2532     +
2533     + free_rq->index = index;
2534     + add_slot_free(zram, free_rq);
2535     + schedule_work(&zram->free_work);
2536     }
2537    
2538     static const struct block_device_operations zram_devops = {
2539     @@ -776,6 +827,10 @@ static int create_device(struct zram *zram, int device_id)
2540     init_rwsem(&zram->lock);
2541     init_rwsem(&zram->init_lock);
2542    
2543     + INIT_WORK(&zram->free_work, zram_slot_free);
2544     + spin_lock_init(&zram->slot_free_lock);
2545     + zram->slot_free_rq = NULL;
2546     +
2547     zram->queue = blk_alloc_queue(GFP_KERNEL);
2548     if (!zram->queue) {
2549     pr_err("Error allocating disk queue for device %d\n",
2550     @@ -902,10 +957,12 @@ static void __exit zram_exit(void)
2551     for (i = 0; i < num_devices; i++) {
2552     zram = &zram_devices[i];
2553    
2554     - get_disk(zram->disk);
2555     destroy_device(zram);
2556     - zram_reset_device(zram);
2557     - put_disk(zram->disk);
2558     + /*
2559     + * Shouldn't access zram->disk after destroy_device
2560     + * because destroy_device already released zram->disk.
2561     + */
2562     + zram_reset_device(zram, false);
2563     }
2564    
2565     unregister_blkdev(zram_major, "zram");
2566     diff --git a/drivers/staging/zram/zram_drv.h b/drivers/staging/zram/zram_drv.h
2567     index 9e57bfb2..97a3acf6 100644
2568     --- a/drivers/staging/zram/zram_drv.h
2569     +++ b/drivers/staging/zram/zram_drv.h
2570     @@ -94,11 +94,20 @@ struct zram_meta {
2571     struct zs_pool *mem_pool;
2572     };
2573    
2574     +struct zram_slot_free {
2575     + unsigned long index;
2576     + struct zram_slot_free *next;
2577     +};
2578     +
2579     struct zram {
2580     struct zram_meta *meta;
2581     struct rw_semaphore lock; /* protect compression buffers, table,
2582     * 32bit stat counters against concurrent
2583     * notifications, reads and writes */
2584     +
2585     + struct work_struct free_work; /* handle pending free request */
2586     + struct zram_slot_free *slot_free_rq; /* list head of free request */
2587     +
2588     struct request_queue *queue;
2589     struct gendisk *disk;
2590     int init_done;
2591     @@ -109,6 +118,7 @@ struct zram {
2592     * we can store in a disk.
2593     */
2594     u64 disksize; /* bytes */
2595     + spinlock_t slot_free_lock;
2596    
2597     struct zram_stats stats;
2598     };
2599     diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
2600     index cbe48ab4..f608fbc1 100644
2601     --- a/drivers/target/target_core_alua.c
2602     +++ b/drivers/target/target_core_alua.c
2603     @@ -730,7 +730,7 @@ static int core_alua_write_tpg_metadata(
2604     if (ret < 0)
2605     pr_err("Error writing ALUA metadata file: %s\n", path);
2606     fput(file);
2607     - return ret ? -EIO : 0;
2608     + return (ret < 0) ? -EIO : 0;
2609     }
2610    
2611     /*
2612     diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
2613     index bd78faf6..adec5a82 100644
2614     --- a/drivers/target/target_core_pr.c
2615     +++ b/drivers/target/target_core_pr.c
2616     @@ -1949,7 +1949,7 @@ static int __core_scsi3_write_aptpl_to_file(
2617     pr_debug("Error writing APTPL metadata file: %s\n", path);
2618     fput(file);
2619    
2620     - return ret ? -EIO : 0;
2621     + return (ret < 0) ? -EIO : 0;
2622     }
2623    
2624     /*
2625     diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
2626     index 366af832..20689b95 100644
2627     --- a/drivers/tty/tty_io.c
2628     +++ b/drivers/tty/tty_io.c
2629     @@ -850,7 +850,8 @@ void disassociate_ctty(int on_exit)
2630     struct pid *tty_pgrp = tty_get_pgrp(tty);
2631     if (tty_pgrp) {
2632     kill_pgrp(tty_pgrp, SIGHUP, on_exit);
2633     - kill_pgrp(tty_pgrp, SIGCONT, on_exit);
2634     + if (!on_exit)
2635     + kill_pgrp(tty_pgrp, SIGCONT, on_exit);
2636     put_pid(tty_pgrp);
2637     }
2638     }
2639     diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
2640     index 8a230f0e..d3318a0d 100644
2641     --- a/drivers/usb/class/cdc-wdm.c
2642     +++ b/drivers/usb/class/cdc-wdm.c
2643     @@ -209,6 +209,7 @@ skip_error:
2644     static void wdm_int_callback(struct urb *urb)
2645     {
2646     int rv = 0;
2647     + int responding;
2648     int status = urb->status;
2649     struct wdm_device *desc;
2650     struct usb_cdc_notification *dr;
2651     @@ -262,8 +263,8 @@ static void wdm_int_callback(struct urb *urb)
2652    
2653     spin_lock(&desc->iuspin);
2654     clear_bit(WDM_READ, &desc->flags);
2655     - set_bit(WDM_RESPONDING, &desc->flags);
2656     - if (!test_bit(WDM_DISCONNECTING, &desc->flags)
2657     + responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
2658     + if (!responding && !test_bit(WDM_DISCONNECTING, &desc->flags)
2659     && !test_bit(WDM_SUSPENDING, &desc->flags)) {
2660     rv = usb_submit_urb(desc->response, GFP_ATOMIC);
2661     dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d",
2662     @@ -685,16 +686,20 @@ static void wdm_rxwork(struct work_struct *work)
2663     {
2664     struct wdm_device *desc = container_of(work, struct wdm_device, rxwork);
2665     unsigned long flags;
2666     - int rv;
2667     + int rv = 0;
2668     + int responding;
2669    
2670     spin_lock_irqsave(&desc->iuspin, flags);
2671     if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
2672     spin_unlock_irqrestore(&desc->iuspin, flags);
2673     } else {
2674     + responding = test_and_set_bit(WDM_RESPONDING, &desc->flags);
2675     spin_unlock_irqrestore(&desc->iuspin, flags);
2676     - rv = usb_submit_urb(desc->response, GFP_KERNEL);
2677     + if (!responding)
2678     + rv = usb_submit_urb(desc->response, GFP_KERNEL);
2679     if (rv < 0 && rv != -EPERM) {
2680     spin_lock_irqsave(&desc->iuspin, flags);
2681     + clear_bit(WDM_RESPONDING, &desc->flags);
2682     if (!test_bit(WDM_DISCONNECTING, &desc->flags))
2683     schedule_work(&desc->rxwork);
2684     spin_unlock_irqrestore(&desc->iuspin, flags);
2685     diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
2686     index 7199adcc..a6b2cabe 100644
2687     --- a/drivers/usb/core/config.c
2688     +++ b/drivers/usb/core/config.c
2689     @@ -424,7 +424,8 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx,
2690    
2691     memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE);
2692     if (config->desc.bDescriptorType != USB_DT_CONFIG ||
2693     - config->desc.bLength < USB_DT_CONFIG_SIZE) {
2694     + config->desc.bLength < USB_DT_CONFIG_SIZE ||
2695     + config->desc.bLength > size) {
2696     dev_err(ddev, "invalid descriptor for config index %d: "
2697     "type = 0x%X, length = %d\n", cfgidx,
2698     config->desc.bDescriptorType, config->desc.bLength);
2699     diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2700     index 558313de..17c37852 100644
2701     --- a/drivers/usb/core/hub.c
2702     +++ b/drivers/usb/core/hub.c
2703     @@ -2918,7 +2918,6 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
2704     {
2705     struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
2706     struct usb_port *port_dev = hub->ports[udev->portnum - 1];
2707     - enum pm_qos_flags_status pm_qos_stat;
2708     int port1 = udev->portnum;
2709     int status;
2710     bool really_suspend = true;
2711     @@ -2956,7 +2955,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
2712     status);
2713     /* bail if autosuspend is requested */
2714     if (PMSG_IS_AUTO(msg))
2715     - return status;
2716     + goto err_wakeup;
2717     }
2718     }
2719    
2720     @@ -2965,14 +2964,16 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
2721     usb_set_usb2_hardware_lpm(udev, 0);
2722    
2723     if (usb_disable_ltm(udev)) {
2724     - dev_err(&udev->dev, "%s Failed to disable LTM before suspend\n.",
2725     - __func__);
2726     - return -ENOMEM;
2727     + dev_err(&udev->dev, "Failed to disable LTM before suspend\n.");
2728     + status = -ENOMEM;
2729     + if (PMSG_IS_AUTO(msg))
2730     + goto err_ltm;
2731     }
2732     if (usb_unlocked_disable_lpm(udev)) {
2733     - dev_err(&udev->dev, "%s Failed to disable LPM before suspend\n.",
2734     - __func__);
2735     - return -ENOMEM;
2736     + dev_err(&udev->dev, "Failed to disable LPM before suspend\n.");
2737     + status = -ENOMEM;
2738     + if (PMSG_IS_AUTO(msg))
2739     + goto err_lpm3;
2740     }
2741    
2742     /* see 7.1.7.6 */
2743     @@ -3000,28 +3001,31 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
2744     if (status) {
2745     dev_dbg(hub->intfdev, "can't suspend port %d, status %d\n",
2746     port1, status);
2747     - /* paranoia: "should not happen" */
2748     - if (udev->do_remote_wakeup) {
2749     - if (!hub_is_superspeed(hub->hdev)) {
2750     - (void) usb_control_msg(udev,
2751     - usb_sndctrlpipe(udev, 0),
2752     - USB_REQ_CLEAR_FEATURE,
2753     - USB_RECIP_DEVICE,
2754     - USB_DEVICE_REMOTE_WAKEUP, 0,
2755     - NULL, 0,
2756     - USB_CTRL_SET_TIMEOUT);
2757     - } else
2758     - (void) usb_disable_function_remotewakeup(udev);
2759     -
2760     - }
2761    
2762     + /* Try to enable USB3 LPM and LTM again */
2763     + usb_unlocked_enable_lpm(udev);
2764     + err_lpm3:
2765     + usb_enable_ltm(udev);
2766     + err_ltm:
2767     /* Try to enable USB2 hardware LPM again */
2768     if (udev->usb2_hw_lpm_capable == 1)
2769     usb_set_usb2_hardware_lpm(udev, 1);
2770    
2771     - /* Try to enable USB3 LTM and LPM again */
2772     - usb_enable_ltm(udev);
2773     - usb_unlocked_enable_lpm(udev);
2774     + if (udev->do_remote_wakeup) {
2775     + if (udev->speed < USB_SPEED_SUPER)
2776     + usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
2777     + USB_REQ_CLEAR_FEATURE,
2778     + USB_RECIP_DEVICE,
2779     + USB_DEVICE_REMOTE_WAKEUP, 0,
2780     + NULL, 0, USB_CTRL_SET_TIMEOUT);
2781     + else
2782     + usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
2783     + USB_REQ_CLEAR_FEATURE,
2784     + USB_RECIP_INTERFACE,
2785     + USB_INTRF_FUNC_SUSPEND, 0,
2786     + NULL, 0, USB_CTRL_SET_TIMEOUT);
2787     + }
2788     + err_wakeup:
2789    
2790     /* System sleep transitions should never fail */
2791     if (!PMSG_IS_AUTO(msg))
2792     @@ -3039,16 +3043,7 @@ int usb_port_suspend(struct usb_device *udev, pm_message_t msg)
2793     usb_set_device_state(udev, USB_STATE_SUSPENDED);
2794     }
2795    
2796     - /*
2797     - * Check whether current status meets the requirement of
2798     - * usb port power off mechanism
2799     - */
2800     - pm_qos_stat = dev_pm_qos_flags(&port_dev->dev,
2801     - PM_QOS_FLAG_NO_POWER_OFF);
2802     - if (!udev->do_remote_wakeup
2803     - && pm_qos_stat != PM_QOS_FLAGS_ALL
2804     - && udev->persist_enabled
2805     - && !status) {
2806     + if (status == 0 && !udev->do_remote_wakeup && udev->persist_enabled) {
2807     pm_runtime_put_sync(&port_dev->dev);
2808     port_dev->did_runtime_put = true;
2809     }
2810     diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
2811     index d6b0fadf..99099116 100644
2812     --- a/drivers/usb/core/port.c
2813     +++ b/drivers/usb/core/port.c
2814     @@ -89,22 +89,19 @@ static int usb_port_runtime_resume(struct device *dev)
2815     retval = usb_hub_set_port_power(hdev, hub, port1, true);
2816     if (port_dev->child && !retval) {
2817     /*
2818     - * Wait for usb hub port to be reconnected in order to make
2819     - * the resume procedure successful.
2820     + * Attempt to wait for usb hub port to be reconnected in order
2821     + * to make the resume procedure successful. The device may have
2822     + * disconnected while the port was powered off, so ignore the
2823     + * return status.
2824     */
2825     retval = hub_port_debounce_be_connected(hub, port1);
2826     - if (retval < 0) {
2827     + if (retval < 0)
2828     dev_dbg(&port_dev->dev, "can't get reconnection after setting port power on, status %d\n",
2829     retval);
2830     - goto out;
2831     - }
2832     usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_ENABLE);
2833     -
2834     - /* Set return value to 0 if debounce successful */
2835     retval = 0;
2836     }
2837    
2838     -out:
2839     clear_bit(port1, hub->busy_bits);
2840     usb_autopm_put_interface(intf);
2841     return retval;
2842     diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
2843     index f77083fe..14d28d61 100644
2844     --- a/drivers/usb/dwc3/gadget.c
2845     +++ b/drivers/usb/dwc3/gadget.c
2846     @@ -1508,6 +1508,15 @@ static int dwc3_gadget_start(struct usb_gadget *g,
2847     int irq;
2848     u32 reg;
2849    
2850     + irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2851     + ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
2852     + IRQF_SHARED | IRQF_ONESHOT, "dwc3", dwc);
2853     + if (ret) {
2854     + dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
2855     + irq, ret);
2856     + goto err0;
2857     + }
2858     +
2859     spin_lock_irqsave(&dwc->lock, flags);
2860    
2861     if (dwc->gadget_driver) {
2862     @@ -1515,7 +1524,7 @@ static int dwc3_gadget_start(struct usb_gadget *g,
2863     dwc->gadget.name,
2864     dwc->gadget_driver->driver.name);
2865     ret = -EBUSY;
2866     - goto err0;
2867     + goto err1;
2868     }
2869    
2870     dwc->gadget_driver = driver;
2871     @@ -1551,42 +1560,38 @@ static int dwc3_gadget_start(struct usb_gadget *g,
2872     ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
2873     if (ret) {
2874     dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2875     - goto err0;
2876     + goto err2;
2877     }
2878    
2879     dep = dwc->eps[1];
2880     ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false);
2881     if (ret) {
2882     dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2883     - goto err1;
2884     + goto err3;
2885     }
2886    
2887     /* begin to receive SETUP packets */
2888     dwc->ep0state = EP0_SETUP_PHASE;
2889     dwc3_ep0_out_start(dwc);
2890    
2891     - irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2892     - ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
2893     - IRQF_SHARED | IRQF_ONESHOT, "dwc3", dwc);
2894     - if (ret) {
2895     - dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
2896     - irq, ret);
2897     - goto err1;
2898     - }
2899     -
2900     dwc3_gadget_enable_irq(dwc);
2901    
2902     spin_unlock_irqrestore(&dwc->lock, flags);
2903    
2904     return 0;
2905    
2906     -err1:
2907     +err3:
2908     __dwc3_gadget_ep_disable(dwc->eps[0]);
2909    
2910     -err0:
2911     +err2:
2912     dwc->gadget_driver = NULL;
2913     +
2914     +err1:
2915     spin_unlock_irqrestore(&dwc->lock, flags);
2916    
2917     + free_irq(irq, dwc);
2918     +
2919     +err0:
2920     return ret;
2921     }
2922    
2923     @@ -1600,9 +1605,6 @@ static int dwc3_gadget_stop(struct usb_gadget *g,
2924     spin_lock_irqsave(&dwc->lock, flags);
2925    
2926     dwc3_gadget_disable_irq(dwc);
2927     - irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2928     - free_irq(irq, dwc);
2929     -
2930     __dwc3_gadget_ep_disable(dwc->eps[0]);
2931     __dwc3_gadget_ep_disable(dwc->eps[1]);
2932    
2933     @@ -1610,6 +1612,9 @@ static int dwc3_gadget_stop(struct usb_gadget *g,
2934    
2935     spin_unlock_irqrestore(&dwc->lock, flags);
2936    
2937     + irq = platform_get_irq(to_platform_device(dwc->dev), 0);
2938     + free_irq(irq, dwc);
2939     +
2940     return 0;
2941     }
2942    
2943     diff --git a/drivers/usb/gadget/uvc_queue.c b/drivers/usb/gadget/uvc_queue.c
2944     index e6170478..0bb5d500 100644
2945     --- a/drivers/usb/gadget/uvc_queue.c
2946     +++ b/drivers/usb/gadget/uvc_queue.c
2947     @@ -193,12 +193,16 @@ static int uvc_queue_buffer(struct uvc_video_queue *queue,
2948    
2949     mutex_lock(&queue->mutex);
2950     ret = vb2_qbuf(&queue->queue, buf);
2951     + if (ret < 0)
2952     + goto done;
2953     +
2954     spin_lock_irqsave(&queue->irqlock, flags);
2955     ret = (queue->flags & UVC_QUEUE_PAUSED) != 0;
2956     queue->flags &= ~UVC_QUEUE_PAUSED;
2957     spin_unlock_irqrestore(&queue->irqlock, flags);
2958     - mutex_unlock(&queue->mutex);
2959    
2960     +done:
2961     + mutex_unlock(&queue->mutex);
2962     return ret;
2963     }
2964    
2965     diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
2966     index e4c34ac3..4c166e1e 100644
2967     --- a/drivers/usb/host/ehci-mxc.c
2968     +++ b/drivers/usb/host/ehci-mxc.c
2969     @@ -184,7 +184,7 @@ static int ehci_mxc_drv_remove(struct platform_device *pdev)
2970     if (pdata && pdata->exit)
2971     pdata->exit(pdev);
2972    
2973     - if (pdata->otg)
2974     + if (pdata && pdata->otg)
2975     usb_phy_shutdown(pdata->otg);
2976    
2977     clk_disable_unprepare(priv->usbclk);
2978     diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
2979     index 279b0491..ec337c2b 100644
2980     --- a/drivers/usb/host/ohci-pci.c
2981     +++ b/drivers/usb/host/ohci-pci.c
2982     @@ -289,7 +289,7 @@ static struct pci_driver ohci_pci_driver = {
2983     .remove = usb_hcd_pci_remove,
2984     .shutdown = usb_hcd_pci_shutdown,
2985    
2986     -#ifdef CONFIG_PM_SLEEP
2987     +#ifdef CONFIG_PM
2988     .driver = {
2989     .pm = &usb_hcd_pci_pm_ops
2990     },
2991     diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
2992     index 8d7a1324..9fe3225e 100644
2993     --- a/drivers/usb/host/xhci-ext-caps.h
2994     +++ b/drivers/usb/host/xhci-ext-caps.h
2995     @@ -71,7 +71,7 @@
2996    
2997     /* USB 2.0 xHCI 1.0 hardware LMP capability - section 7.2.2.1.3.2 */
2998     #define XHCI_HLC (1 << 19)
2999     -#define XHCI_BLC (1 << 19)
3000     +#define XHCI_BLC (1 << 20)
3001    
3002     /* command register values to disable interrupts and halt the HC */
3003     /* start/stop HC execution - do not write unless HC is halted*/
3004     diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
3005     index 51e22bf8..6eca5a53 100644
3006     --- a/drivers/usb/host/xhci-plat.c
3007     +++ b/drivers/usb/host/xhci-plat.c
3008     @@ -24,7 +24,7 @@ static void xhci_plat_quirks(struct device *dev, struct xhci_hcd *xhci)
3009     * here that the generic code does not try to make a pci_dev from our
3010     * dev struct in order to setup MSI
3011     */
3012     - xhci->quirks |= XHCI_BROKEN_MSI;
3013     + xhci->quirks |= XHCI_PLAT;
3014     }
3015    
3016     /* called during probe() after chip reset completes */
3017     diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
3018     index 9478caa2..b3c4162c 100644
3019     --- a/drivers/usb/host/xhci.c
3020     +++ b/drivers/usb/host/xhci.c
3021     @@ -343,9 +343,14 @@ static void __maybe_unused xhci_msix_sync_irqs(struct xhci_hcd *xhci)
3022     static int xhci_try_enable_msi(struct usb_hcd *hcd)
3023     {
3024     struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3025     - struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
3026     + struct pci_dev *pdev;
3027     int ret;
3028    
3029     + /* The xhci platform device has set up IRQs through usb_add_hcd. */
3030     + if (xhci->quirks & XHCI_PLAT)
3031     + return 0;
3032     +
3033     + pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
3034     /*
3035     * Some Fresco Logic host controllers advertise MSI, but fail to
3036     * generate interrupts. Don't even try to enable MSI.
3037     @@ -3581,10 +3586,21 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
3038     {
3039     struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3040     struct xhci_virt_device *virt_dev;
3041     + struct device *dev = hcd->self.controller;
3042     unsigned long flags;
3043     u32 state;
3044     int i, ret;
3045    
3046     +#ifndef CONFIG_USB_DEFAULT_PERSIST
3047     + /*
3048     + * We called pm_runtime_get_noresume when the device was attached.
3049     + * Decrement the counter here to allow controller to runtime suspend
3050     + * if no devices remain.
3051     + */
3052     + if (xhci->quirks & XHCI_RESET_ON_RESUME)
3053     + pm_runtime_put_noidle(dev);
3054     +#endif
3055     +
3056     ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
3057     /* If the host is halted due to driver unload, we still need to free the
3058     * device.
3059     @@ -3656,6 +3672,7 @@ static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci)
3060     int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3061     {
3062     struct xhci_hcd *xhci = hcd_to_xhci(hcd);
3063     + struct device *dev = hcd->self.controller;
3064     unsigned long flags;
3065     int timeleft;
3066     int ret;
3067     @@ -3708,6 +3725,16 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
3068     goto disable_slot;
3069     }
3070     udev->slot_id = xhci->slot_id;
3071     +
3072     +#ifndef CONFIG_USB_DEFAULT_PERSIST
3073     + /*
3074     + * If resetting upon resume, we can't put the controller into runtime
3075     + * suspend if there is a device attached.
3076     + */
3077     + if (xhci->quirks & XHCI_RESET_ON_RESUME)
3078     + pm_runtime_get_noresume(dev);
3079     +#endif
3080     +
3081     /* Is this a LS or FS device under a HS hub? */
3082     /* Hub or peripherial? */
3083     return 1;
3084     diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
3085     index c338741a..6ab1e600 100644
3086     --- a/drivers/usb/host/xhci.h
3087     +++ b/drivers/usb/host/xhci.h
3088     @@ -1542,6 +1542,7 @@ struct xhci_hcd {
3089     #define XHCI_SPURIOUS_REBOOT (1 << 13)
3090     #define XHCI_COMP_MODE_QUIRK (1 << 14)
3091     #define XHCI_AVOID_BEI (1 << 15)
3092     +#define XHCI_PLAT (1 << 16)
3093     unsigned int num_active_eps;
3094     unsigned int limit_active_eps;
3095     /* There are two roothubs to keep track of bus suspend info for */
3096     diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
3097     index b0130016..84657e07 100644
3098     --- a/drivers/usb/serial/mos7720.c
3099     +++ b/drivers/usb/serial/mos7720.c
3100     @@ -374,7 +374,7 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
3101     kfree(urbtrack);
3102     return -ENOMEM;
3103     }
3104     - urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL);
3105     + urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_ATOMIC);
3106     if (!urbtrack->setup) {
3107     usb_free_urb(urbtrack->urb);
3108     kfree(urbtrack);
3109     @@ -382,8 +382,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
3110     }
3111     urbtrack->setup->bRequestType = (__u8)0x40;
3112     urbtrack->setup->bRequest = (__u8)0x0e;
3113     - urbtrack->setup->wValue = get_reg_value(reg, dummy);
3114     - urbtrack->setup->wIndex = get_reg_index(reg);
3115     + urbtrack->setup->wValue = cpu_to_le16(get_reg_value(reg, dummy));
3116     + urbtrack->setup->wIndex = cpu_to_le16(get_reg_index(reg));
3117     urbtrack->setup->wLength = 0;
3118     usb_fill_control_urb(urbtrack->urb, usbdev,
3119     usb_sndctrlpipe(usbdev, 0),
3120     diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
3121     index 04cdeb8e..c4d22988 100644
3122     --- a/drivers/xen/grant-table.c
3123     +++ b/drivers/xen/grant-table.c
3124     @@ -730,9 +730,18 @@ void gnttab_request_free_callback(struct gnttab_free_callback *callback,
3125     void (*fn)(void *), void *arg, u16 count)
3126     {
3127     unsigned long flags;
3128     + struct gnttab_free_callback *cb;
3129     +
3130     spin_lock_irqsave(&gnttab_list_lock, flags);
3131     - if (callback->next)
3132     - goto out;
3133     +
3134     + /* Check if the callback is already on the list */
3135     + cb = gnttab_free_callback_list;
3136     + while (cb) {
3137     + if (cb == callback)
3138     + goto out;
3139     + cb = cb->next;
3140     + }
3141     +
3142     callback->fn = fn;
3143     callback->arg = arg;
3144     callback->count = count;
3145     diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
3146     index 238a0554..9877a2a2 100644
3147     --- a/fs/btrfs/ioctl.c
3148     +++ b/fs/btrfs/ioctl.c
3149     @@ -3312,6 +3312,9 @@ static long btrfs_ioctl_dev_replace(struct btrfs_root *root, void __user *arg)
3150    
3151     switch (p->cmd) {
3152     case BTRFS_IOCTL_DEV_REPLACE_CMD_START:
3153     + if (root->fs_info->sb->s_flags & MS_RDONLY)
3154     + return -EROFS;
3155     +
3156     if (atomic_xchg(
3157     &root->fs_info->mutually_exclusive_operation_running,
3158     1)) {
3159     diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
3160     index e0b4ef31..a5ce62eb 100644
3161     --- a/fs/ceph/ioctl.c
3162     +++ b/fs/ceph/ioctl.c
3163     @@ -196,8 +196,10 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
3164     r = ceph_calc_file_object_mapping(&ci->i_layout, dl.file_offset, len,
3165     &dl.object_no, &dl.object_offset,
3166     &olen);
3167     - if (r < 0)
3168     + if (r < 0) {
3169     + up_read(&osdc->map_sem);
3170     return -EIO;
3171     + }
3172     dl.file_offset -= dl.object_offset;
3173     dl.object_size = ceph_file_layout_object_size(ci->i_layout);
3174     dl.block_size = ceph_file_layout_su(ci->i_layout);
3175     diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
3176     index d67c550c..37950c65 100644
3177     --- a/fs/cifs/connect.c
3178     +++ b/fs/cifs/connect.c
3179     @@ -379,6 +379,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
3180     try_to_freeze();
3181    
3182     /* we should try only the port we connected to before */
3183     + mutex_lock(&server->srv_mutex);
3184     rc = generic_ip_connect(server);
3185     if (rc) {
3186     cifs_dbg(FYI, "reconnect error %d\n", rc);
3187     @@ -390,6 +391,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
3188     server->tcpStatus = CifsNeedNegotiate;
3189     spin_unlock(&GlobalMid_Lock);
3190     }
3191     + mutex_unlock(&server->srv_mutex);
3192     } while (server->tcpStatus == CifsNeedReconnect);
3193    
3194     return rc;
3195     diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
3196     index b0c43345..f851d03f 100644
3197     --- a/fs/cifs/smb2misc.c
3198     +++ b/fs/cifs/smb2misc.c
3199     @@ -417,96 +417,108 @@ cifs_ses_oplock_break(struct work_struct *work)
3200     }
3201    
3202     static bool
3203     -smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server)
3204     +smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
3205     + struct smb2_lease_break_work *lw)
3206     {
3207     - struct smb2_lease_break *rsp = (struct smb2_lease_break *)buffer;
3208     - struct list_head *tmp, *tmp1, *tmp2;
3209     - struct cifs_ses *ses;
3210     - struct cifs_tcon *tcon;
3211     - struct cifsInodeInfo *cinode;
3212     + bool found;
3213     + __u8 lease_state;
3214     + struct list_head *tmp;
3215     struct cifsFileInfo *cfile;
3216     struct cifs_pending_open *open;
3217     - struct smb2_lease_break_work *lw;
3218     - bool found;
3219     + struct cifsInodeInfo *cinode;
3220     int ack_req = le32_to_cpu(rsp->Flags &
3221     SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
3222    
3223     - lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
3224     - if (!lw)
3225     - return false;
3226     + lease_state = smb2_map_lease_to_oplock(rsp->NewLeaseState);
3227    
3228     - INIT_WORK(&lw->lease_break, cifs_ses_oplock_break);
3229     - lw->lease_state = rsp->NewLeaseState;
3230     + list_for_each(tmp, &tcon->openFileList) {
3231     + cfile = list_entry(tmp, struct cifsFileInfo, tlist);
3232     + cinode = CIFS_I(cfile->dentry->d_inode);
3233    
3234     - cifs_dbg(FYI, "Checking for lease break\n");
3235     + if (memcmp(cinode->lease_key, rsp->LeaseKey,
3236     + SMB2_LEASE_KEY_SIZE))
3237     + continue;
3238    
3239     - /* look up tcon based on tid & uid */
3240     - spin_lock(&cifs_tcp_ses_lock);
3241     - list_for_each(tmp, &server->smb_ses_list) {
3242     - ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
3243     + cifs_dbg(FYI, "found in the open list\n");
3244     + cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
3245     + le32_to_cpu(rsp->NewLeaseState));
3246    
3247     - spin_lock(&cifs_file_list_lock);
3248     - list_for_each(tmp1, &ses->tcon_list) {
3249     - tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
3250     + smb2_set_oplock_level(cinode, lease_state);
3251    
3252     - cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
3253     - list_for_each(tmp2, &tcon->openFileList) {
3254     - cfile = list_entry(tmp2, struct cifsFileInfo,
3255     - tlist);
3256     - cinode = CIFS_I(cfile->dentry->d_inode);
3257     + if (ack_req)
3258     + cfile->oplock_break_cancelled = false;
3259     + else
3260     + cfile->oplock_break_cancelled = true;
3261    
3262     - if (memcmp(cinode->lease_key, rsp->LeaseKey,
3263     - SMB2_LEASE_KEY_SIZE))
3264     - continue;
3265     + queue_work(cifsiod_wq, &cfile->oplock_break);
3266     + kfree(lw);
3267     + return true;
3268     + }
3269    
3270     - cifs_dbg(FYI, "found in the open list\n");
3271     - cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
3272     - le32_to_cpu(rsp->NewLeaseState));
3273     + found = false;
3274     + list_for_each_entry(open, &tcon->pending_opens, olist) {
3275     + if (memcmp(open->lease_key, rsp->LeaseKey,
3276     + SMB2_LEASE_KEY_SIZE))
3277     + continue;
3278     +
3279     + if (!found && ack_req) {
3280     + found = true;
3281     + memcpy(lw->lease_key, open->lease_key,
3282     + SMB2_LEASE_KEY_SIZE);
3283     + lw->tlink = cifs_get_tlink(open->tlink);
3284     + queue_work(cifsiod_wq, &lw->lease_break);
3285     + }
3286    
3287     - smb2_set_oplock_level(cinode,
3288     - smb2_map_lease_to_oplock(rsp->NewLeaseState));
3289     + cifs_dbg(FYI, "found in the pending open list\n");
3290     + cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
3291     + le32_to_cpu(rsp->NewLeaseState));
3292    
3293     - if (ack_req)
3294     - cfile->oplock_break_cancelled = false;
3295     - else
3296     - cfile->oplock_break_cancelled = true;
3297     + open->oplock = lease_state;
3298     + }
3299     + return found;
3300     +}
3301    
3302     - queue_work(cifsiod_wq, &cfile->oplock_break);
3303     +static bool
3304     +smb2_is_valid_lease_break(char *buffer)
3305     +{
3306     + struct smb2_lease_break *rsp = (struct smb2_lease_break *)buffer;
3307     + struct list_head *tmp, *tmp1, *tmp2;
3308     + struct TCP_Server_Info *server;
3309     + struct cifs_ses *ses;
3310     + struct cifs_tcon *tcon;
3311     + struct smb2_lease_break_work *lw;
3312    
3313     - spin_unlock(&cifs_file_list_lock);
3314     - spin_unlock(&cifs_tcp_ses_lock);
3315     - return true;
3316     - }
3317     + lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
3318     + if (!lw)
3319     + return false;
3320    
3321     - found = false;
3322     - list_for_each_entry(open, &tcon->pending_opens, olist) {
3323     - if (memcmp(open->lease_key, rsp->LeaseKey,
3324     - SMB2_LEASE_KEY_SIZE))
3325     - continue;
3326     + INIT_WORK(&lw->lease_break, cifs_ses_oplock_break);
3327     + lw->lease_state = rsp->NewLeaseState;
3328    
3329     - if (!found && ack_req) {
3330     - found = true;
3331     - memcpy(lw->lease_key, open->lease_key,
3332     - SMB2_LEASE_KEY_SIZE);
3333     - lw->tlink = cifs_get_tlink(open->tlink);
3334     - queue_work(cifsiod_wq,
3335     - &lw->lease_break);
3336     - }
3337     + cifs_dbg(FYI, "Checking for lease break\n");
3338     +
3339     + /* look up tcon based on tid & uid */
3340     + spin_lock(&cifs_tcp_ses_lock);
3341     + list_for_each(tmp, &cifs_tcp_ses_list) {
3342     + server = list_entry(tmp, struct TCP_Server_Info, tcp_ses_list);
3343    
3344     - cifs_dbg(FYI, "found in the pending open list\n");
3345     - cifs_dbg(FYI, "lease key match, lease break 0x%d\n",
3346     - le32_to_cpu(rsp->NewLeaseState));
3347     + list_for_each(tmp1, &server->smb_ses_list) {
3348     + ses = list_entry(tmp1, struct cifs_ses, smb_ses_list);
3349    
3350     - open->oplock =
3351     - smb2_map_lease_to_oplock(rsp->NewLeaseState);
3352     - }
3353     - if (found) {
3354     - spin_unlock(&cifs_file_list_lock);
3355     - spin_unlock(&cifs_tcp_ses_lock);
3356     - return true;
3357     + spin_lock(&cifs_file_list_lock);
3358     + list_for_each(tmp2, &ses->tcon_list) {
3359     + tcon = list_entry(tmp2, struct cifs_tcon,
3360     + tcon_list);
3361     + cifs_stats_inc(
3362     + &tcon->stats.cifs_stats.num_oplock_brks);
3363     + if (smb2_tcon_has_lease(tcon, rsp, lw)) {
3364     + spin_unlock(&cifs_file_list_lock);
3365     + spin_unlock(&cifs_tcp_ses_lock);
3366     + return true;
3367     + }
3368     }
3369     + spin_unlock(&cifs_file_list_lock);
3370     }
3371     - spin_unlock(&cifs_file_list_lock);
3372     }
3373     spin_unlock(&cifs_tcp_ses_lock);
3374     kfree(lw);
3375     @@ -532,7 +544,7 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
3376     if (rsp->StructureSize !=
3377     smb2_rsp_struct_sizes[SMB2_OPLOCK_BREAK_HE]) {
3378     if (le16_to_cpu(rsp->StructureSize) == 44)
3379     - return smb2_is_valid_lease_break(buffer, server);
3380     + return smb2_is_valid_lease_break(buffer);
3381     else
3382     return false;
3383     }
3384     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
3385     index c2ca04e6..ea4d1884 100644
3386     --- a/fs/ext4/inode.c
3387     +++ b/fs/ext4/inode.c
3388     @@ -1890,6 +1890,26 @@ static int ext4_writepage(struct page *page,
3389     return ret;
3390     }
3391    
3392     +static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
3393     +{
3394     + int len;
3395     + loff_t size = i_size_read(mpd->inode);
3396     + int err;
3397     +
3398     + BUG_ON(page->index != mpd->first_page);
3399     + if (page->index == size >> PAGE_CACHE_SHIFT)
3400     + len = size & ~PAGE_CACHE_MASK;
3401     + else
3402     + len = PAGE_CACHE_SIZE;
3403     + clear_page_dirty_for_io(page);
3404     + err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc);
3405     + if (!err)
3406     + mpd->wbc->nr_to_write--;
3407     + mpd->first_page++;
3408     +
3409     + return err;
3410     +}
3411     +
3412     #define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
3413    
3414     /*
3415     @@ -1904,82 +1924,94 @@ static int ext4_writepage(struct page *page,
3416     *
3417     * @mpd - extent of blocks
3418     * @lblk - logical number of the block in the file
3419     - * @b_state - b_state of the buffer head added
3420     + * @bh - buffer head we want to add to the extent
3421     *
3422     - * the function is used to collect contig. blocks in same state
3423     + * The function is used to collect contig. blocks in the same state. If the
3424     + * buffer doesn't require mapping for writeback and we haven't started the
3425     + * extent of buffers to map yet, the function returns 'true' immediately - the
3426     + * caller can write the buffer right away. Otherwise the function returns true
3427     + * if the block has been added to the extent, false if the block couldn't be
3428     + * added.
3429     */
3430     -static int mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
3431     - unsigned long b_state)
3432     +static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
3433     + struct buffer_head *bh)
3434     {
3435     struct ext4_map_blocks *map = &mpd->map;
3436    
3437     - /* Don't go larger than mballoc is willing to allocate */
3438     - if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
3439     - return 0;
3440     + /* Buffer that doesn't need mapping for writeback? */
3441     + if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
3442     + (!buffer_delay(bh) && !buffer_unwritten(bh))) {
3443     + /* So far no extent to map => we write the buffer right away */
3444     + if (map->m_len == 0)
3445     + return true;
3446     + return false;
3447     + }
3448    
3449     /* First block in the extent? */
3450     if (map->m_len == 0) {
3451     map->m_lblk = lblk;
3452     map->m_len = 1;
3453     - map->m_flags = b_state & BH_FLAGS;
3454     - return 1;
3455     + map->m_flags = bh->b_state & BH_FLAGS;
3456     + return true;
3457     }
3458    
3459     + /* Don't go larger than mballoc is willing to allocate */
3460     + if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
3461     + return false;
3462     +
3463     /* Can we merge the block to our big extent? */
3464     if (lblk == map->m_lblk + map->m_len &&
3465     - (b_state & BH_FLAGS) == map->m_flags) {
3466     + (bh->b_state & BH_FLAGS) == map->m_flags) {
3467     map->m_len++;
3468     - return 1;
3469     + return true;
3470     }
3471     - return 0;
3472     + return false;
3473     }
3474    
3475     -static bool add_page_bufs_to_extent(struct mpage_da_data *mpd,
3476     - struct buffer_head *head,
3477     - struct buffer_head *bh,
3478     - ext4_lblk_t lblk)
3479     +/*
3480     + * mpage_process_page_bufs - submit page buffers for IO or add them to extent
3481     + *
3482     + * @mpd - extent of blocks for mapping
3483     + * @head - the first buffer in the page
3484     + * @bh - buffer we should start processing from
3485     + * @lblk - logical number of the block in the file corresponding to @bh
3486     + *
3487     + * Walk through page buffers from @bh upto @head (exclusive) and either submit
3488     + * the page for IO if all buffers in this page were mapped and there's no
3489     + * accumulated extent of buffers to map or add buffers in the page to the
3490     + * extent of buffers to map. The function returns 1 if the caller can continue
3491     + * by processing the next page, 0 if it should stop adding buffers to the
3492     + * extent to map because we cannot extend it anymore. It can also return value
3493     + * < 0 in case of error during IO submission.
3494     + */
3495     +static int mpage_process_page_bufs(struct mpage_da_data *mpd,
3496     + struct buffer_head *head,
3497     + struct buffer_head *bh,
3498     + ext4_lblk_t lblk)
3499     {
3500     struct inode *inode = mpd->inode;
3501     + int err;
3502     ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
3503     >> inode->i_blkbits;
3504    
3505     do {
3506     BUG_ON(buffer_locked(bh));
3507    
3508     - if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
3509     - (!buffer_delay(bh) && !buffer_unwritten(bh)) ||
3510     - lblk >= blocks) {
3511     + if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
3512     /* Found extent to map? */
3513     if (mpd->map.m_len)
3514     - return false;
3515     - if (lblk >= blocks)
3516     - return true;
3517     - continue;
3518     + return 0;
3519     + /* Everything mapped so far and we hit EOF */
3520     + break;
3521     }
3522     - if (!mpage_add_bh_to_extent(mpd, lblk, bh->b_state))
3523     - return false;
3524     } while (lblk++, (bh = bh->b_this_page) != head);
3525     - return true;
3526     -}
3527     -
3528     -static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
3529     -{
3530     - int len;
3531     - loff_t size = i_size_read(mpd->inode);
3532     - int err;
3533     -
3534     - BUG_ON(page->index != mpd->first_page);
3535     - if (page->index == size >> PAGE_CACHE_SHIFT)
3536     - len = size & ~PAGE_CACHE_MASK;
3537     - else
3538     - len = PAGE_CACHE_SIZE;
3539     - clear_page_dirty_for_io(page);
3540     - err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc);
3541     - if (!err)
3542     - mpd->wbc->nr_to_write--;
3543     - mpd->first_page++;
3544     -
3545     - return err;
3546     + /* So far everything mapped? Submit the page for IO. */
3547     + if (mpd->map.m_len == 0) {
3548     + err = mpage_submit_page(mpd, head->b_page);
3549     + if (err < 0)
3550     + return err;
3551     + }
3552     + return lblk < blocks;
3553     }
3554    
3555     /*
3556     @@ -2003,8 +2035,6 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
3557     struct inode *inode = mpd->inode;
3558     struct buffer_head *head, *bh;
3559     int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits;
3560     - ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
3561     - >> inode->i_blkbits;
3562     pgoff_t start, end;
3563     ext4_lblk_t lblk;
3564     sector_t pblock;
3565     @@ -2039,18 +2069,26 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
3566     */
3567     mpd->map.m_len = 0;
3568     mpd->map.m_flags = 0;
3569     - add_page_bufs_to_extent(mpd, head, bh,
3570     - lblk);
3571     + /*
3572     + * FIXME: If dioread_nolock supports
3573     + * blocksize < pagesize, we need to make
3574     + * sure we add size mapped so far to
3575     + * io_end->size as the following call
3576     + * can submit the page for IO.
3577     + */
3578     + err = mpage_process_page_bufs(mpd, head,
3579     + bh, lblk);
3580     pagevec_release(&pvec);
3581     - return 0;
3582     + if (err > 0)
3583     + err = 0;
3584     + return err;
3585     }
3586     if (buffer_delay(bh)) {
3587     clear_buffer_delay(bh);
3588     bh->b_blocknr = pblock++;
3589     }
3590     clear_buffer_unwritten(bh);
3591     - } while (++lblk < blocks &&
3592     - (bh = bh->b_this_page) != head);
3593     + } while (lblk++, (bh = bh->b_this_page) != head);
3594    
3595     /*
3596     * FIXME: This is going to break if dioread_nolock
3597     @@ -2319,14 +2357,10 @@ static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
3598     lblk = ((ext4_lblk_t)page->index) <<
3599     (PAGE_CACHE_SHIFT - blkbits);
3600     head = page_buffers(page);
3601     - if (!add_page_bufs_to_extent(mpd, head, head, lblk))
3602     + err = mpage_process_page_bufs(mpd, head, head, lblk);
3603     + if (err <= 0)
3604     goto out;
3605     - /* So far everything mapped? Submit the page for IO. */
3606     - if (mpd->map.m_len == 0) {
3607     - err = mpage_submit_page(mpd, page);
3608     - if (err < 0)
3609     - goto out;
3610     - }
3611     + err = 0;
3612    
3613     /*
3614     * Accumulated enough dirty pages? This doesn't apply
3615     @@ -4566,7 +4600,9 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
3616     ext4_journal_stop(handle);
3617     }
3618    
3619     - if (attr->ia_valid & ATTR_SIZE) {
3620     + if (attr->ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
3621     + handle_t *handle;
3622     + loff_t oldsize = inode->i_size;
3623    
3624     if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
3625     struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3626     @@ -4574,73 +4610,60 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
3627     if (attr->ia_size > sbi->s_bitmap_maxbytes)
3628     return -EFBIG;
3629     }
3630     - }
3631     -
3632     - if (S_ISREG(inode->i_mode) &&
3633     - attr->ia_valid & ATTR_SIZE &&
3634     - (attr->ia_size < inode->i_size)) {
3635     - handle_t *handle;
3636     -
3637     - handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
3638     - if (IS_ERR(handle)) {
3639     - error = PTR_ERR(handle);
3640     - goto err_out;
3641     - }
3642     - if (ext4_handle_valid(handle)) {
3643     - error = ext4_orphan_add(handle, inode);
3644     - orphan = 1;
3645     - }
3646     - EXT4_I(inode)->i_disksize = attr->ia_size;
3647     - rc = ext4_mark_inode_dirty(handle, inode);
3648     - if (!error)
3649     - error = rc;
3650     - ext4_journal_stop(handle);
3651     -
3652     - if (ext4_should_order_data(inode)) {
3653     - error = ext4_begin_ordered_truncate(inode,
3654     + if (S_ISREG(inode->i_mode) &&
3655     + (attr->ia_size < inode->i_size)) {
3656     + if (ext4_should_order_data(inode)) {
3657     + error = ext4_begin_ordered_truncate(inode,
3658     attr->ia_size);
3659     - if (error) {
3660     - /* Do as much error cleanup as possible */
3661     - handle = ext4_journal_start(inode,
3662     - EXT4_HT_INODE, 3);
3663     - if (IS_ERR(handle)) {
3664     - ext4_orphan_del(NULL, inode);
3665     + if (error)
3666     goto err_out;
3667     - }
3668     - ext4_orphan_del(handle, inode);
3669     - orphan = 0;
3670     - ext4_journal_stop(handle);
3671     + }
3672     + handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
3673     + if (IS_ERR(handle)) {
3674     + error = PTR_ERR(handle);
3675     + goto err_out;
3676     + }
3677     + if (ext4_handle_valid(handle)) {
3678     + error = ext4_orphan_add(handle, inode);
3679     + orphan = 1;
3680     + }
3681     + EXT4_I(inode)->i_disksize = attr->ia_size;
3682     + rc = ext4_mark_inode_dirty(handle, inode);
3683     + if (!error)
3684     + error = rc;
3685     + ext4_journal_stop(handle);
3686     + if (error) {
3687     + ext4_orphan_del(NULL, inode);
3688     goto err_out;
3689     }
3690     }
3691     - }
3692     -
3693     - if (attr->ia_valid & ATTR_SIZE) {
3694     - if (attr->ia_size != inode->i_size) {
3695     - loff_t oldsize = inode->i_size;
3696    
3697     - i_size_write(inode, attr->ia_size);
3698     - /*
3699     - * Blocks are going to be removed from the inode. Wait
3700     - * for dio in flight. Temporarily disable
3701     - * dioread_nolock to prevent livelock.
3702     - */
3703     - if (orphan) {
3704     - if (!ext4_should_journal_data(inode)) {
3705     - ext4_inode_block_unlocked_dio(inode);
3706     - inode_dio_wait(inode);
3707     - ext4_inode_resume_unlocked_dio(inode);
3708     - } else
3709     - ext4_wait_for_tail_page_commit(inode);
3710     - }
3711     - /*
3712     - * Truncate pagecache after we've waited for commit
3713     - * in data=journal mode to make pages freeable.
3714     - */
3715     - truncate_pagecache(inode, oldsize, inode->i_size);
3716     + i_size_write(inode, attr->ia_size);
3717     + /*
3718     + * Blocks are going to be removed from the inode. Wait
3719     + * for dio in flight. Temporarily disable
3720     + * dioread_nolock to prevent livelock.
3721     + */
3722     + if (orphan) {
3723     + if (!ext4_should_journal_data(inode)) {
3724     + ext4_inode_block_unlocked_dio(inode);
3725     + inode_dio_wait(inode);
3726     + ext4_inode_resume_unlocked_dio(inode);
3727     + } else
3728     + ext4_wait_for_tail_page_commit(inode);
3729     }
3730     - ext4_truncate(inode);
3731     + /*
3732     + * Truncate pagecache after we've waited for commit
3733     + * in data=journal mode to make pages freeable.
3734     + */
3735     + truncate_pagecache(inode, oldsize, inode->i_size);
3736     }
3737     + /*
3738     + * We want to call ext4_truncate() even if attr->ia_size ==
3739     + * inode->i_size for cases like truncation of fallocated space
3740     + */
3741     + if (attr->ia_valid & ATTR_SIZE)
3742     + ext4_truncate(inode);
3743    
3744     if (!rc) {
3745     setattr_copy(inode, attr);
3746     diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
3747     index 72a5d5b0..8fec28ff 100644
3748     --- a/fs/fuse/dir.c
3749     +++ b/fs/fuse/dir.c
3750     @@ -1174,6 +1174,8 @@ static int parse_dirfile(char *buf, size_t nbytes, struct file *file,
3751     return -EIO;
3752     if (reclen > nbytes)
3753     break;
3754     + if (memchr(dirent->name, '/', dirent->namelen) != NULL)
3755     + return -EIO;
3756    
3757     if (!dir_emit(ctx, dirent->name, dirent->namelen,
3758     dirent->ino, dirent->type))
3759     @@ -1320,6 +1322,8 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file,
3760     return -EIO;
3761     if (reclen > nbytes)
3762     break;
3763     + if (memchr(dirent->name, '/', dirent->namelen) != NULL)
3764     + return -EIO;
3765    
3766     if (!over) {
3767     /* We fill entries into dstbuf only as much as
3768     @@ -1590,6 +1594,7 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
3769     struct file *file)
3770     {
3771     struct fuse_conn *fc = get_fuse_conn(inode);
3772     + struct fuse_inode *fi = get_fuse_inode(inode);
3773     struct fuse_req *req;
3774     struct fuse_setattr_in inarg;
3775     struct fuse_attr_out outarg;
3776     @@ -1617,8 +1622,10 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
3777     if (IS_ERR(req))
3778     return PTR_ERR(req);
3779    
3780     - if (is_truncate)
3781     + if (is_truncate) {
3782     fuse_set_nowrite(inode);
3783     + set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3784     + }
3785    
3786     memset(&inarg, 0, sizeof(inarg));
3787     memset(&outarg, 0, sizeof(outarg));
3788     @@ -1680,12 +1687,14 @@ int fuse_do_setattr(struct inode *inode, struct iattr *attr,
3789     invalidate_inode_pages2(inode->i_mapping);
3790     }
3791    
3792     + clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3793     return 0;
3794    
3795     error:
3796     if (is_truncate)
3797     fuse_release_nowrite(inode);
3798    
3799     + clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3800     return err;
3801     }
3802    
3803     @@ -1749,6 +1758,8 @@ static int fuse_setxattr(struct dentry *entry, const char *name,
3804     fc->no_setxattr = 1;
3805     err = -EOPNOTSUPP;
3806     }
3807     + if (!err)
3808     + fuse_invalidate_attr(inode);
3809     return err;
3810     }
3811    
3812     @@ -1878,6 +1889,8 @@ static int fuse_removexattr(struct dentry *entry, const char *name)
3813     fc->no_removexattr = 1;
3814     err = -EOPNOTSUPP;
3815     }
3816     + if (!err)
3817     + fuse_invalidate_attr(inode);
3818     return err;
3819     }
3820    
3821     diff --git a/fs/fuse/file.c b/fs/fuse/file.c
3822     index 5c121fe1..d409deaf 100644
3823     --- a/fs/fuse/file.c
3824     +++ b/fs/fuse/file.c
3825     @@ -629,7 +629,8 @@ static void fuse_read_update_size(struct inode *inode, loff_t size,
3826     struct fuse_inode *fi = get_fuse_inode(inode);
3827    
3828     spin_lock(&fc->lock);
3829     - if (attr_ver == fi->attr_version && size < inode->i_size) {
3830     + if (attr_ver == fi->attr_version && size < inode->i_size &&
3831     + !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
3832     fi->attr_version = ++fc->attr_version;
3833     i_size_write(inode, size);
3834     }
3835     @@ -1032,12 +1033,16 @@ static ssize_t fuse_perform_write(struct file *file,
3836     {
3837     struct inode *inode = mapping->host;
3838     struct fuse_conn *fc = get_fuse_conn(inode);
3839     + struct fuse_inode *fi = get_fuse_inode(inode);
3840     int err = 0;
3841     ssize_t res = 0;
3842    
3843     if (is_bad_inode(inode))
3844     return -EIO;
3845    
3846     + if (inode->i_size < pos + iov_iter_count(ii))
3847     + set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3848     +
3849     do {
3850     struct fuse_req *req;
3851     ssize_t count;
3852     @@ -1073,6 +1078,7 @@ static ssize_t fuse_perform_write(struct file *file,
3853     if (res > 0)
3854     fuse_write_update_size(inode, pos);
3855    
3856     + clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state);
3857     fuse_invalidate_attr(inode);
3858    
3859     return res > 0 ? res : err;
3860     @@ -1529,7 +1535,6 @@ static int fuse_writepage_locked(struct page *page)
3861    
3862     inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK);
3863     inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
3864     - end_page_writeback(page);
3865    
3866     spin_lock(&fc->lock);
3867     list_add(&req->writepages_entry, &fi->writepages);
3868     @@ -1537,6 +1542,8 @@ static int fuse_writepage_locked(struct page *page)
3869     fuse_flush_writepages(inode);
3870     spin_unlock(&fc->lock);
3871    
3872     + end_page_writeback(page);
3873     +
3874     return 0;
3875    
3876     err_free:
3877     diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
3878     index fde7249a..5ced199b 100644
3879     --- a/fs/fuse/fuse_i.h
3880     +++ b/fs/fuse/fuse_i.h
3881     @@ -115,6 +115,8 @@ struct fuse_inode {
3882     enum {
3883     /** Advise readdirplus */
3884     FUSE_I_ADVISE_RDPLUS,
3885     + /** An operation changing file size is in progress */
3886     + FUSE_I_SIZE_UNSTABLE,
3887     };
3888    
3889     struct fuse_conn;
3890     diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
3891     index 0b578598..e0fe703e 100644
3892     --- a/fs/fuse/inode.c
3893     +++ b/fs/fuse/inode.c
3894     @@ -201,7 +201,8 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
3895     struct timespec old_mtime;
3896    
3897     spin_lock(&fc->lock);
3898     - if (attr_version != 0 && fi->attr_version > attr_version) {
3899     + if ((attr_version != 0 && fi->attr_version > attr_version) ||
3900     + test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) {
3901     spin_unlock(&fc->lock);
3902     return;
3903     }
3904     diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
3905     index c348d6d8..e5d408a7 100644
3906     --- a/fs/isofs/inode.c
3907     +++ b/fs/isofs/inode.c
3908     @@ -117,8 +117,8 @@ static void destroy_inodecache(void)
3909    
3910     static int isofs_remount(struct super_block *sb, int *flags, char *data)
3911     {
3912     - /* we probably want a lot more here */
3913     - *flags |= MS_RDONLY;
3914     + if (!(*flags & MS_RDONLY))
3915     + return -EROFS;
3916     return 0;
3917     }
3918    
3919     @@ -763,15 +763,6 @@ root_found:
3920     */
3921     s->s_maxbytes = 0x80000000000LL;
3922    
3923     - /*
3924     - * The CDROM is read-only, has no nodes (devices) on it, and since
3925     - * all of the files appear to be owned by root, we really do not want
3926     - * to allow suid. (suid or devices will not show up unless we have
3927     - * Rock Ridge extensions)
3928     - */
3929     -
3930     - s->s_flags |= MS_RDONLY /* | MS_NODEV | MS_NOSUID */;
3931     -
3932     /* Set this for reference. Its not currently used except on write
3933     which we don't have .. */
3934    
3935     @@ -1530,6 +1521,9 @@ struct inode *isofs_iget(struct super_block *sb,
3936     static struct dentry *isofs_mount(struct file_system_type *fs_type,
3937     int flags, const char *dev_name, void *data)
3938     {
3939     + /* We don't support read-write mounts */
3940     + if (!(flags & MS_RDONLY))
3941     + return ERR_PTR(-EACCES);
3942     return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super);
3943     }
3944    
3945     diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
3946     index 2487116d..84606472 100644
3947     --- a/fs/ocfs2/extent_map.c
3948     +++ b/fs/ocfs2/extent_map.c
3949     @@ -781,7 +781,6 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3950     cpos = map_start >> osb->s_clustersize_bits;
3951     mapping_end = ocfs2_clusters_for_bytes(inode->i_sb,
3952     map_start + map_len);
3953     - mapping_end -= cpos;
3954     is_last = 0;
3955     while (cpos < mapping_end && !is_last) {
3956     u32 fe_flags;
3957     diff --git a/fs/proc/root.c b/fs/proc/root.c
3958     index e0a790da..0e0e83c4 100644
3959     --- a/fs/proc/root.c
3960     +++ b/fs/proc/root.c
3961     @@ -110,7 +110,8 @@ static struct dentry *proc_mount(struct file_system_type *fs_type,
3962     ns = task_active_pid_ns(current);
3963     options = data;
3964    
3965     - if (!current_user_ns()->may_mount_proc)
3966     + if (!current_user_ns()->may_mount_proc ||
3967     + !ns_capable(ns->user_ns, CAP_SYS_ADMIN))
3968     return ERR_PTR(-EPERM);
3969     }
3970    
3971     diff --git a/include/linux/compat.h b/include/linux/compat.h
3972     index 7f0c1dd0..ec1aee4a 100644
3973     --- a/include/linux/compat.h
3974     +++ b/include/linux/compat.h
3975     @@ -669,6 +669,13 @@ asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr,
3976    
3977     int compat_restore_altstack(const compat_stack_t __user *uss);
3978     int __compat_save_altstack(compat_stack_t __user *, unsigned long);
3979     +#define compat_save_altstack_ex(uss, sp) do { \
3980     + compat_stack_t __user *__uss = uss; \
3981     + struct task_struct *t = current; \
3982     + put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \
3983     + put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \
3984     + put_user_ex(t->sas_ss_size, &__uss->ss_size); \
3985     +} while (0);
3986    
3987     asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
3988     struct compat_timespec __user *interval);
3989     diff --git a/include/linux/hid.h b/include/linux/hid.h
3990     index 0c48991b..ff545cc3 100644
3991     --- a/include/linux/hid.h
3992     +++ b/include/linux/hid.h
3993     @@ -393,10 +393,12 @@ struct hid_report {
3994     struct hid_device *device; /* associated device */
3995     };
3996    
3997     +#define HID_MAX_IDS 256
3998     +
3999     struct hid_report_enum {
4000     unsigned numbered;
4001     struct list_head report_list;
4002     - struct hid_report *report_id_hash[256];
4003     + struct hid_report *report_id_hash[HID_MAX_IDS];
4004     };
4005    
4006     #define HID_REPORT_TYPES 3
4007     diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
4008     index 3bed2e89..d1fe5d00 100644
4009     --- a/include/linux/pci_ids.h
4010     +++ b/include/linux/pci_ids.h
4011     @@ -518,6 +518,8 @@
4012     #define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
4013     #define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
4014     #define PCI_DEVICE_ID_AMD_15H_M10H_F3 0x1403
4015     +#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F3 0x141d
4016     +#define PCI_DEVICE_ID_AMD_15H_M30H_NB_F4 0x141e
4017     #define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600
4018     #define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
4019     #define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
4020     diff --git a/include/linux/rculist.h b/include/linux/rculist.h
4021     index f4b1001a..4106721c 100644
4022     --- a/include/linux/rculist.h
4023     +++ b/include/linux/rculist.h
4024     @@ -267,8 +267,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
4025     */
4026     #define list_first_or_null_rcu(ptr, type, member) \
4027     ({struct list_head *__ptr = (ptr); \
4028     - struct list_head __rcu *__next = list_next_rcu(__ptr); \
4029     - likely(__ptr != __next) ? container_of(__next, type, member) : NULL; \
4030     + struct list_head *__next = ACCESS_ONCE(__ptr->next); \
4031     + likely(__ptr != __next) ? \
4032     + list_entry_rcu(__next, type, member) : NULL; \
4033     })
4034    
4035     /**
4036     diff --git a/include/linux/signal.h b/include/linux/signal.h
4037     index d8974847..2ac423bd 100644
4038     --- a/include/linux/signal.h
4039     +++ b/include/linux/signal.h
4040     @@ -434,6 +434,14 @@ void signals_init(void);
4041     int restore_altstack(const stack_t __user *);
4042     int __save_altstack(stack_t __user *, unsigned long);
4043    
4044     +#define save_altstack_ex(uss, sp) do { \
4045     + stack_t __user *__uss = uss; \
4046     + struct task_struct *t = current; \
4047     + put_user_ex((void __user *)t->sas_ss_sp, &__uss->ss_sp); \
4048     + put_user_ex(sas_ss_flags(sp), &__uss->ss_flags); \
4049     + put_user_ex(t->sas_ss_size, &__uss->ss_size); \
4050     +} while (0);
4051     +
4052     #ifdef CONFIG_PROC_FS
4053     struct seq_file;
4054     extern void render_sigset_t(struct seq_file *, const char *, sigset_t *);
4055     diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
4056     index 1e88377e..3e541e63 100644
4057     --- a/include/linux/usb/hcd.h
4058     +++ b/include/linux/usb/hcd.h
4059     @@ -411,7 +411,7 @@ extern int usb_hcd_pci_probe(struct pci_dev *dev,
4060     extern void usb_hcd_pci_remove(struct pci_dev *dev);
4061     extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
4062    
4063     -#ifdef CONFIG_PM_SLEEP
4064     +#ifdef CONFIG_PM
4065     extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
4066     #endif
4067     #endif /* CONFIG_PCI */
4068     diff --git a/ipc/msg.c b/ipc/msg.c
4069     index 9f29d9e8..b65fdf1a 100644
4070     --- a/ipc/msg.c
4071     +++ b/ipc/msg.c
4072     @@ -680,16 +680,18 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
4073     goto out_unlock1;
4074     }
4075    
4076     + ipc_lock_object(&msq->q_perm);
4077     +
4078     for (;;) {
4079     struct msg_sender s;
4080    
4081     err = -EACCES;
4082     if (ipcperms(ns, &msq->q_perm, S_IWUGO))
4083     - goto out_unlock1;
4084     + goto out_unlock0;
4085    
4086     err = security_msg_queue_msgsnd(msq, msg, msgflg);
4087     if (err)
4088     - goto out_unlock1;
4089     + goto out_unlock0;
4090    
4091     if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
4092     1 + msq->q_qnum <= msq->q_qbytes) {
4093     @@ -699,10 +701,9 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
4094     /* queue full, wait: */
4095     if (msgflg & IPC_NOWAIT) {
4096     err = -EAGAIN;
4097     - goto out_unlock1;
4098     + goto out_unlock0;
4099     }
4100    
4101     - ipc_lock_object(&msq->q_perm);
4102     ss_add(msq, &s);
4103    
4104     if (!ipc_rcu_getref(msq)) {
4105     @@ -730,10 +731,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
4106     goto out_unlock0;
4107     }
4108    
4109     - ipc_unlock_object(&msq->q_perm);
4110     }
4111     -
4112     - ipc_lock_object(&msq->q_perm);
4113     msq->q_lspid = task_tgid_vnr(current);
4114     msq->q_stime = get_seconds();
4115    
4116     diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
4117     index f3569747..ad8e1bdc 100644
4118     --- a/kernel/events/uprobes.c
4119     +++ b/kernel/events/uprobes.c
4120     @@ -1682,12 +1682,10 @@ static bool handle_trampoline(struct pt_regs *regs)
4121     tmp = ri;
4122     ri = ri->next;
4123     kfree(tmp);
4124     + utask->depth--;
4125    
4126     if (!chained)
4127     break;
4128     -
4129     - utask->depth--;
4130     -
4131     BUG_ON(!ri);
4132     }
4133    
4134     diff --git a/kernel/fork.c b/kernel/fork.c
4135     index bf46287c..200a7a29 100644
4136     --- a/kernel/fork.c
4137     +++ b/kernel/fork.c
4138     @@ -1173,10 +1173,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
4139     return ERR_PTR(-EINVAL);
4140    
4141     /*
4142     - * If the new process will be in a different pid namespace
4143     - * don't allow the creation of threads.
4144     + * If the new process will be in a different pid namespace don't
4145     + * allow it to share a thread group or signal handlers with the
4146     + * forking task.
4147     */
4148     - if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) &&
4149     + if ((clone_flags & (CLONE_SIGHAND | CLONE_NEWPID)) &&
4150     (task_active_pid_ns(current) !=
4151     current->nsproxy->pid_ns_for_children))
4152     return ERR_PTR(-EINVAL);
4153     diff --git a/kernel/pid.c b/kernel/pid.c
4154     index 66505c1d..ebe5e80b 100644
4155     --- a/kernel/pid.c
4156     +++ b/kernel/pid.c
4157     @@ -265,6 +265,7 @@ void free_pid(struct pid *pid)
4158     struct pid_namespace *ns = upid->ns;
4159     hlist_del_rcu(&upid->pid_chain);
4160     switch(--ns->nr_hashed) {
4161     + case 2:
4162     case 1:
4163     /* When all that is left in the pid namespace
4164     * is the reaper wake up the reaper. The reaper
4165     diff --git a/mm/huge_memory.c b/mm/huge_memory.c
4166     index a92012a7..f2820fbf 100644
4167     --- a/mm/huge_memory.c
4168     +++ b/mm/huge_memory.c
4169     @@ -2296,6 +2296,8 @@ static void collapse_huge_page(struct mm_struct *mm,
4170     goto out;
4171    
4172     vma = find_vma(mm, address);
4173     + if (!vma)
4174     + goto out;
4175     hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
4176     hend = vma->vm_end & HPAGE_PMD_MASK;
4177     if (address < hstart || address + HPAGE_PMD_SIZE > hend)
4178     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
4179     index 0878ff7c..aa44621e 100644
4180     --- a/mm/memcontrol.c
4181     +++ b/mm/memcontrol.c
4182     @@ -5616,7 +5616,13 @@ static int compare_thresholds(const void *a, const void *b)
4183     const struct mem_cgroup_threshold *_a = a;
4184     const struct mem_cgroup_threshold *_b = b;
4185    
4186     - return _a->threshold - _b->threshold;
4187     + if (_a->threshold > _b->threshold)
4188     + return 1;
4189     +
4190     + if (_a->threshold < _b->threshold)
4191     + return -1;
4192     +
4193     + return 0;
4194     }
4195    
4196     static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
4197     diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
4198     index dd47889a..dbc0a739 100644
4199     --- a/net/ceph/osd_client.c
4200     +++ b/net/ceph/osd_client.c
4201     @@ -2129,6 +2129,8 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
4202     dout("osdc_start_request failed map, "
4203     " will retry %lld\n", req->r_tid);
4204     rc = 0;
4205     + } else {
4206     + __unregister_request(osdc, req);
4207     }
4208     goto out_unlock;
4209     }
4210     diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
4211     index 603ddd92..dbd9a479 100644
4212     --- a/net/ceph/osdmap.c
4213     +++ b/net/ceph/osdmap.c
4214     @@ -1129,7 +1129,7 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid,
4215    
4216     /* pg_temp? */
4217     pgid.seed = ceph_stable_mod(pgid.seed, pool->pg_num,
4218     - pool->pgp_num_mask);
4219     + pool->pg_num_mask);
4220     pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
4221     if (pg) {
4222     *num = pg->len;
4223     diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
4224     index cc9e02d7..7a98d524 100644
4225     --- a/net/mac80211/mlme.c
4226     +++ b/net/mac80211/mlme.c
4227     @@ -2851,14 +2851,6 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
4228     ieee80211_rx_bss_put(local, bss);
4229     sdata->vif.bss_conf.beacon_rate = bss->beacon_rate;
4230     }
4231     -
4232     - if (!sdata->u.mgd.associated ||
4233     - !ether_addr_equal(mgmt->bssid, sdata->u.mgd.associated->bssid))
4234     - return;
4235     -
4236     - ieee80211_sta_process_chanswitch(sdata, rx_status->mactime,
4237     - elems, true);
4238     -
4239     }
4240    
4241    
4242     @@ -3147,6 +3139,9 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
4243    
4244     ieee80211_rx_bss_info(sdata, mgmt, len, rx_status, &elems);
4245    
4246     + ieee80211_sta_process_chanswitch(sdata, rx_status->mactime,
4247     + &elems, true);
4248     +
4249     if (ieee80211_sta_wmm_params(local, sdata, elems.wmm_param,
4250     elems.wmm_param_len))
4251     changed |= BSS_CHANGED_QOS;
4252     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4253     index 8860dd52..9552da23 100644
4254     --- a/sound/pci/hda/hda_intel.c
4255     +++ b/sound/pci/hda/hda_intel.c
4256     @@ -3376,6 +3376,7 @@ static struct snd_pci_quirk msi_black_list[] = {
4257     SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
4258     SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
4259     SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
4260     + SND_PCI_QUIRK(0x1179, 0xfb44, "Toshiba Satellite C870", 0), /* AMD Hudson */
4261     SND_PCI_QUIRK(0x1849, 0x0888, "ASRock", 0), /* Athlon64 X2 + nvidia */
4262     SND_PCI_QUIRK(0xa0a0, 0x0575, "Aopen MZ915-M", 0), /* ICH6 */
4263     {}
4264     diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
4265     index 9f358627..45850f67 100644
4266     --- a/sound/pci/hda/patch_hdmi.c
4267     +++ b/sound/pci/hda/patch_hdmi.c
4268     @@ -67,6 +67,8 @@ struct hdmi_spec_per_pin {
4269     struct delayed_work work;
4270     struct snd_kcontrol *eld_ctl;
4271     int repoll_count;
4272     + bool setup; /* the stream has been set up by prepare callback */
4273     + int channels; /* current number of channels */
4274     bool non_pcm;
4275     bool chmap_set; /* channel-map override by ALSA API? */
4276     unsigned char chmap[8]; /* ALSA API channel-map */
4277     @@ -551,6 +553,17 @@ static int hdmi_channel_allocation(struct hdmi_eld *eld, int channels)
4278     }
4279     }
4280    
4281     + if (!ca) {
4282     + /* if there was no match, select the regular ALSA channel
4283     + * allocation with the matching number of channels */
4284     + for (i = 0; i < ARRAY_SIZE(channel_allocations); i++) {
4285     + if (channels == channel_allocations[i].channels) {
4286     + ca = channel_allocations[i].ca_index;
4287     + break;
4288     + }
4289     + }
4290     + }
4291     +
4292     snd_print_channel_allocation(eld->info.spk_alloc, buf, sizeof(buf));
4293     snd_printdd("HDMI: select CA 0x%x for %d-channel allocation: %s\n",
4294     ca, channels, buf);
4295     @@ -868,18 +881,19 @@ static bool hdmi_infoframe_uptodate(struct hda_codec *codec, hda_nid_t pin_nid,
4296     return true;
4297     }
4298    
4299     -static void hdmi_setup_audio_infoframe(struct hda_codec *codec, int pin_idx,
4300     - bool non_pcm,
4301     - struct snd_pcm_substream *substream)
4302     +static void hdmi_setup_audio_infoframe(struct hda_codec *codec,
4303     + struct hdmi_spec_per_pin *per_pin,
4304     + bool non_pcm)
4305     {
4306     - struct hdmi_spec *spec = codec->spec;
4307     - struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
4308     hda_nid_t pin_nid = per_pin->pin_nid;
4309     - int channels = substream->runtime->channels;
4310     + int channels = per_pin->channels;
4311     struct hdmi_eld *eld;
4312     int ca;
4313     union audio_infoframe ai;
4314    
4315     + if (!channels)
4316     + return;
4317     +
4318     eld = &per_pin->sink_eld;
4319     if (!eld->monitor_present)
4320     return;
4321     @@ -1329,6 +1343,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
4322     eld_changed = true;
4323     }
4324     if (update_eld) {
4325     + bool old_eld_valid = pin_eld->eld_valid;
4326     pin_eld->eld_valid = eld->eld_valid;
4327     eld_changed = pin_eld->eld_size != eld->eld_size ||
4328     memcmp(pin_eld->eld_buffer, eld->eld_buffer,
4329     @@ -1338,6 +1353,18 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
4330     eld->eld_size);
4331     pin_eld->eld_size = eld->eld_size;
4332     pin_eld->info = eld->info;
4333     +
4334     + /* Haswell-specific workaround: re-setup when the transcoder is
4335     + * changed during the stream playback
4336     + */
4337     + if (codec->vendor_id == 0x80862807 &&
4338     + eld->eld_valid && !old_eld_valid && per_pin->setup) {
4339     + snd_hda_codec_write(codec, pin_nid, 0,
4340     + AC_VERB_SET_AMP_GAIN_MUTE,
4341     + AMP_OUT_UNMUTE);
4342     + hdmi_setup_audio_infoframe(codec, per_pin,
4343     + per_pin->non_pcm);
4344     + }
4345     }
4346     mutex_unlock(&pin_eld->lock);
4347    
4348     @@ -1510,14 +1537,17 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
4349     hda_nid_t cvt_nid = hinfo->nid;
4350     struct hdmi_spec *spec = codec->spec;
4351     int pin_idx = hinfo_to_pin_index(spec, hinfo);
4352     - hda_nid_t pin_nid = get_pin(spec, pin_idx)->pin_nid;
4353     + struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
4354     + hda_nid_t pin_nid = per_pin->pin_nid;
4355     bool non_pcm;
4356    
4357     non_pcm = check_non_pcm_per_cvt(codec, cvt_nid);
4358     + per_pin->channels = substream->runtime->channels;
4359     + per_pin->setup = true;
4360    
4361     hdmi_set_channel_count(codec, cvt_nid, substream->runtime->channels);
4362    
4363     - hdmi_setup_audio_infoframe(codec, pin_idx, non_pcm, substream);
4364     + hdmi_setup_audio_infoframe(codec, per_pin, non_pcm);
4365    
4366     return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
4367     }
4368     @@ -1557,6 +1587,9 @@ static int hdmi_pcm_close(struct hda_pcm_stream *hinfo,
4369     snd_hda_spdif_ctls_unassign(codec, pin_idx);
4370     per_pin->chmap_set = false;
4371     memset(per_pin->chmap, 0, sizeof(per_pin->chmap));
4372     +
4373     + per_pin->setup = false;
4374     + per_pin->channels = 0;
4375     }
4376    
4377     return 0;
4378     @@ -1692,8 +1725,7 @@ static int hdmi_chmap_ctl_put(struct snd_kcontrol *kcontrol,
4379     per_pin->chmap_set = true;
4380     memcpy(per_pin->chmap, chmap, sizeof(chmap));
4381     if (prepared)
4382     - hdmi_setup_audio_infoframe(codec, pin_idx, per_pin->non_pcm,
4383     - substream);
4384     + hdmi_setup_audio_infoframe(codec, per_pin, per_pin->non_pcm);
4385    
4386     return 0;
4387     }
4388     diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
4389     index 5402dfbb..8a8d9364 100644
4390     --- a/sound/soc/codecs/mc13783.c
4391     +++ b/sound/soc/codecs/mc13783.c
4392     @@ -126,6 +126,10 @@ static int mc13783_write(struct snd_soc_codec *codec,
4393    
4394     ret = mc13xxx_reg_write(priv->mc13xxx, reg, value);
4395    
4396     + /* include errata fix for spi audio problems */
4397     + if (reg == MC13783_AUDIO_CODEC || reg == MC13783_AUDIO_DAC)
4398     + ret = mc13xxx_reg_write(priv->mc13xxx, reg, value);
4399     +
4400     mc13xxx_unlock(priv->mc13xxx);
4401    
4402     return ret;
4403     diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
4404     index 0a4ffdd1..5e5af898 100644
4405     --- a/sound/soc/codecs/wm8960.c
4406     +++ b/sound/soc/codecs/wm8960.c
4407     @@ -857,9 +857,9 @@ static int wm8960_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
4408     if (pll_div.k) {
4409     reg |= 0x20;
4410    
4411     - snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 18) & 0x3f);
4412     - snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 9) & 0x1ff);
4413     - snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0x1ff);
4414     + snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 16) & 0xff);
4415     + snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 8) & 0xff);
4416     + snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0xff);
4417     }
4418     snd_soc_write(codec, WM8960_PLL1, reg);
4419