Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.4/0103-4.4.4-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2771 - (hide annotations) (download)
Thu Mar 10 14:12:39 2016 UTC (8 years, 2 months ago) by niro
File size: 442880 byte(s)
-linux-4.4.4
1 niro 2771 diff --git a/Documentation/filesystems/efivarfs.txt b/Documentation/filesystems/efivarfs.txt
2     index c477af086e65..686a64bba775 100644
3     --- a/Documentation/filesystems/efivarfs.txt
4     +++ b/Documentation/filesystems/efivarfs.txt
5     @@ -14,3 +14,10 @@ filesystem.
6     efivarfs is typically mounted like this,
7    
8     mount -t efivarfs none /sys/firmware/efi/efivars
9     +
10     +Due to the presence of numerous firmware bugs where removing non-standard
11     +UEFI variables causes the system firmware to fail to POST, efivarfs
12     +files that are not well-known standardized variables are created
13     +as immutable files. This doesn't prevent removal - "chattr -i" will work -
14     +but it does prevent this kind of failure from being accomplished
15     +accidentally.
16     diff --git a/Makefile b/Makefile
17     index 802be10c40c5..344bc6f27ea1 100644
18     --- a/Makefile
19     +++ b/Makefile
20     @@ -1,6 +1,6 @@
21     VERSION = 4
22     PATCHLEVEL = 4
23     -SUBLEVEL = 3
24     +SUBLEVEL = 4
25     EXTRAVERSION =
26     NAME = Blurry Fish Butt
27    
28     diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
29     index 258b0e5ad332..68b6092349d7 100644
30     --- a/arch/arc/include/asm/irqflags-arcv2.h
31     +++ b/arch/arc/include/asm/irqflags-arcv2.h
32     @@ -22,6 +22,7 @@
33     #define AUX_IRQ_CTRL 0x00E
34     #define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */
35     #define AUX_IRQ_LVL_PEND 0x200 /* Pending Intr across all levels */
36     +#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
37     #define AUX_IRQ_PRIORITY 0x206
38     #define ICAUSE 0x40a
39     #define AUX_IRQ_SELECT 0x40b
40     @@ -112,6 +113,16 @@ static inline int arch_irqs_disabled(void)
41     return arch_irqs_disabled_flags(arch_local_save_flags());
42     }
43    
44     +static inline void arc_softirq_trigger(int irq)
45     +{
46     + write_aux_reg(AUX_IRQ_HINT, irq);
47     +}
48     +
49     +static inline void arc_softirq_clear(int irq)
50     +{
51     + write_aux_reg(AUX_IRQ_HINT, 0);
52     +}
53     +
54     #else
55    
56     .macro IRQ_DISABLE scratch
57     diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
58     index cbfec79137bf..c1264607bbff 100644
59     --- a/arch/arc/kernel/entry-arcv2.S
60     +++ b/arch/arc/kernel/entry-arcv2.S
61     @@ -45,11 +45,12 @@ VECTOR reserved ; Reserved slots
62     VECTOR handle_interrupt ; (16) Timer0
63     VECTOR handle_interrupt ; unused (Timer1)
64     VECTOR handle_interrupt ; unused (WDT)
65     -VECTOR handle_interrupt ; (19) ICI (inter core interrupt)
66     -VECTOR handle_interrupt
67     -VECTOR handle_interrupt
68     -VECTOR handle_interrupt
69     -VECTOR handle_interrupt ; (23) End of fixed IRQs
70     +VECTOR handle_interrupt ; (19) Inter core Interrupt (IPI)
71     +VECTOR handle_interrupt ; (20) perf Interrupt
72     +VECTOR handle_interrupt ; (21) Software Triggered Intr (Self IPI)
73     +VECTOR handle_interrupt ; unused
74     +VECTOR handle_interrupt ; (23) unused
75     +# End of fixed IRQs
76    
77     .rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8
78     VECTOR handle_interrupt
79     @@ -211,7 +212,11 @@ debug_marker_syscall:
80     ; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig
81     ; entry was via Exception in DS which got preempted in kernel).
82     ;
83     -; IRQ RTIE won't reliably restore DE bit and/or BTA, needs handling
84     +; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround
85     +;
86     +; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline
87     +; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly
88     +
89     .Lintr_ret_to_delay_slot:
90     debug_marker_ds:
91    
92     @@ -222,18 +227,23 @@ debug_marker_ds:
93     ld r2, [sp, PT_ret]
94     ld r3, [sp, PT_status32]
95    
96     + ; STAT32 for Int return created from scratch
97     + ; (No delay dlot, disable Further intr in trampoline)
98     +
99     bic r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK
100     st r0, [sp, PT_status32]
101    
102     mov r1, .Lintr_ret_to_delay_slot_2
103     st r1, [sp, PT_ret]
104    
105     + ; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots
106     st r2, [sp, 0]
107     st r3, [sp, 4]
108    
109     b .Lisr_ret_fast_path
110    
111     .Lintr_ret_to_delay_slot_2:
112     + ; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP
113     sub sp, sp, SZ_PT_REGS
114     st r9, [sp, -4]
115    
116     @@ -243,11 +253,19 @@ debug_marker_ds:
117     ld r9, [sp, 4]
118     sr r9, [erstatus]
119    
120     + ; restore AUX_USER_SP if returning to U mode
121     + bbit0 r9, STATUS_U_BIT, 1f
122     + ld r9, [sp, PT_sp]
123     + sr r9, [AUX_USER_SP]
124     +
125     +1:
126     ld r9, [sp, 8]
127     sr r9, [erbta]
128    
129     ld r9, [sp, -4]
130     add sp, sp, SZ_PT_REGS
131     +
132     + ; return from pure kernel mode to delay slot
133     rtie
134    
135     END(ret_from_exception)
136     diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
137     index bd237acdf4f2..30d806ce0c78 100644
138     --- a/arch/arc/kernel/mcip.c
139     +++ b/arch/arc/kernel/mcip.c
140     @@ -11,9 +11,12 @@
141     #include <linux/smp.h>
142     #include <linux/irq.h>
143     #include <linux/spinlock.h>
144     +#include <asm/irqflags-arcv2.h>
145     #include <asm/mcip.h>
146     #include <asm/setup.h>
147    
148     +#define SOFTIRQ_IRQ 21
149     +
150     static char smp_cpuinfo_buf[128];
151     static int idu_detected;
152    
153     @@ -22,6 +25,7 @@ static DEFINE_RAW_SPINLOCK(mcip_lock);
154     static void mcip_setup_per_cpu(int cpu)
155     {
156     smp_ipi_irq_setup(cpu, IPI_IRQ);
157     + smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
158     }
159    
160     static void mcip_ipi_send(int cpu)
161     @@ -29,6 +33,12 @@ static void mcip_ipi_send(int cpu)
162     unsigned long flags;
163     int ipi_was_pending;
164    
165     + /* ARConnect can only send IPI to others */
166     + if (unlikely(cpu == raw_smp_processor_id())) {
167     + arc_softirq_trigger(SOFTIRQ_IRQ);
168     + return;
169     + }
170     +
171     /*
172     * NOTE: We must spin here if the other cpu hasn't yet
173     * serviced a previous message. This can burn lots
174     @@ -63,6 +73,11 @@ static void mcip_ipi_clear(int irq)
175     unsigned long flags;
176     unsigned int __maybe_unused copy;
177    
178     + if (unlikely(irq == SOFTIRQ_IRQ)) {
179     + arc_softirq_clear(irq);
180     + return;
181     + }
182     +
183     raw_spin_lock_irqsave(&mcip_lock, flags);
184    
185     /* Who sent the IPI */
186     diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
187     index 259c0ca9c99a..ddbb361267d8 100644
188     --- a/arch/arm/Kconfig.debug
189     +++ b/arch/arm/Kconfig.debug
190     @@ -162,10 +162,9 @@ choice
191     mobile SoCs in the Kona family of chips (e.g. bcm28155,
192     bcm11351, etc...)
193    
194     - config DEBUG_BCM63XX
195     + config DEBUG_BCM63XX_UART
196     bool "Kernel low-level debugging on BCM63XX UART"
197     depends on ARCH_BCM_63XX
198     - select DEBUG_UART_BCM63XX
199    
200     config DEBUG_BERLIN_UART
201     bool "Marvell Berlin SoC Debug UART"
202     @@ -1348,7 +1347,7 @@ config DEBUG_LL_INCLUDE
203     default "debug/vf.S" if DEBUG_VF_UART
204     default "debug/vt8500.S" if DEBUG_VT8500_UART0
205     default "debug/zynq.S" if DEBUG_ZYNQ_UART0 || DEBUG_ZYNQ_UART1
206     - default "debug/bcm63xx.S" if DEBUG_UART_BCM63XX
207     + default "debug/bcm63xx.S" if DEBUG_BCM63XX_UART
208     default "debug/digicolor.S" if DEBUG_DIGICOLOR_UA0
209     default "mach/debug-macro.S"
210    
211     @@ -1364,10 +1363,6 @@ config DEBUG_UART_8250
212     ARCH_IOP33X || ARCH_IXP4XX || \
213     ARCH_LPC32XX || ARCH_MV78XX0 || ARCH_ORION5X || ARCH_RPC
214    
215     -# Compatibility options for BCM63xx
216     -config DEBUG_UART_BCM63XX
217     - def_bool ARCH_BCM_63XX
218     -
219     config DEBUG_UART_PHYS
220     hex "Physical base address of debug UART"
221     default 0x00100a00 if DEBUG_NETX_UART
222     @@ -1462,7 +1457,7 @@ config DEBUG_UART_PHYS
223     default 0xfffb0000 if DEBUG_OMAP1UART1 || DEBUG_OMAP7XXUART1
224     default 0xfffb0800 if DEBUG_OMAP1UART2 || DEBUG_OMAP7XXUART2
225     default 0xfffb9800 if DEBUG_OMAP1UART3 || DEBUG_OMAP7XXUART3
226     - default 0xfffe8600 if DEBUG_UART_BCM63XX
227     + default 0xfffe8600 if DEBUG_BCM63XX_UART
228     default 0xfffff700 if ARCH_IOP33X
229     depends on ARCH_EP93XX || \
230     DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
231     @@ -1474,7 +1469,7 @@ config DEBUG_UART_PHYS
232     DEBUG_RCAR_GEN2_SCIF0 || DEBUG_RCAR_GEN2_SCIF2 || \
233     DEBUG_RMOBILE_SCIFA0 || DEBUG_RMOBILE_SCIFA1 || \
234     DEBUG_RMOBILE_SCIFA4 || DEBUG_S3C24XX_UART || \
235     - DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \
236     + DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
237     DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0 || \
238     DEBUG_AT91_UART
239    
240     @@ -1515,7 +1510,7 @@ config DEBUG_UART_VIRT
241     default 0xfb10c000 if DEBUG_REALVIEW_PB1176_PORT
242     default 0xfc40ab00 if DEBUG_BRCMSTB_UART
243     default 0xfc705000 if DEBUG_ZTE_ZX
244     - default 0xfcfe8600 if DEBUG_UART_BCM63XX
245     + default 0xfcfe8600 if DEBUG_BCM63XX_UART
246     default 0xfd000000 if ARCH_SPEAR3XX || ARCH_SPEAR6XX
247     default 0xfd000000 if ARCH_SPEAR13XX
248     default 0xfd012000 if ARCH_MV78XX0
249     @@ -1566,7 +1561,7 @@ config DEBUG_UART_VIRT
250     DEBUG_UART_8250 || DEBUG_UART_PL01X || DEBUG_MESON_UARTAO || \
251     DEBUG_NETX_UART || \
252     DEBUG_QCOM_UARTDM || DEBUG_S3C24XX_UART || \
253     - DEBUG_UART_BCM63XX || DEBUG_ASM9260_UART || \
254     + DEBUG_BCM63XX_UART || DEBUG_ASM9260_UART || \
255     DEBUG_SIRFSOC_UART || DEBUG_DIGICOLOR_UA0
256    
257     config DEBUG_UART_8250_SHIFT
258     diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h
259     index 1afe24629d1f..b0c912feaa2f 100644
260     --- a/arch/arm/boot/dts/sama5d2-pinfunc.h
261     +++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
262     @@ -90,7 +90,7 @@
263     #define PIN_PA14__I2SC1_MCK PINMUX_PIN(PIN_PA14, 4, 2)
264     #define PIN_PA14__FLEXCOM3_IO2 PINMUX_PIN(PIN_PA14, 5, 1)
265     #define PIN_PA14__D9 PINMUX_PIN(PIN_PA14, 6, 2)
266     -#define PIN_PA15 14
267     +#define PIN_PA15 15
268     #define PIN_PA15__GPIO PINMUX_PIN(PIN_PA15, 0, 0)
269     #define PIN_PA15__SPI0_MOSI PINMUX_PIN(PIN_PA15, 1, 1)
270     #define PIN_PA15__TF1 PINMUX_PIN(PIN_PA15, 2, 1)
271     diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
272     index 68ee3ce17b82..b4c6d99364f1 100644
273     --- a/arch/arm/include/asm/psci.h
274     +++ b/arch/arm/include/asm/psci.h
275     @@ -16,7 +16,7 @@
276    
277     extern struct smp_operations psci_smp_ops;
278    
279     -#ifdef CONFIG_ARM_PSCI
280     +#if defined(CONFIG_SMP) && defined(CONFIG_ARM_PSCI)
281     bool psci_smp_available(void);
282     #else
283     static inline bool psci_smp_available(void) { return false; }
284     diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
285     index 0375c8caa061..9408a994cc91 100644
286     --- a/arch/arm/include/asm/xen/page-coherent.h
287     +++ b/arch/arm/include/asm/xen/page-coherent.h
288     @@ -35,14 +35,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
289     dma_addr_t dev_addr, unsigned long offset, size_t size,
290     enum dma_data_direction dir, struct dma_attrs *attrs)
291     {
292     - bool local = XEN_PFN_DOWN(dev_addr) == page_to_xen_pfn(page);
293     + unsigned long page_pfn = page_to_xen_pfn(page);
294     + unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
295     + unsigned long compound_pages =
296     + (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
297     + bool local = (page_pfn <= dev_pfn) &&
298     + (dev_pfn - page_pfn < compound_pages);
299     +
300     /*
301     - * Dom0 is mapped 1:1, while the Linux page can be spanned accross
302     - * multiple Xen page, it's not possible to have a mix of local and
303     - * foreign Xen page. So if the first xen_pfn == mfn the page is local
304     - * otherwise it's a foreign page grant-mapped in dom0. If the page is
305     - * local we can safely call the native dma_ops function, otherwise we
306     - * call the xen specific function.
307     + * Dom0 is mapped 1:1, while the Linux page can span across
308     + * multiple Xen pages, it's not possible for it to contain a
309     + * mix of local and foreign Xen pages. So if the first xen_pfn
310     + * == mfn the page is local otherwise it's a foreign page
311     + * grant-mapped in dom0. If the page is local we can safely
312     + * call the native dma_ops function, otherwise we call the xen
313     + * specific function.
314     */
315     if (local)
316     __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
317     diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
318     index 7b76ce01c21d..8633c703546a 100644
319     --- a/arch/arm/mach-omap2/gpmc-onenand.c
320     +++ b/arch/arm/mach-omap2/gpmc-onenand.c
321     @@ -101,10 +101,8 @@ static void omap2_onenand_set_async_mode(void __iomem *onenand_base)
322    
323     static void set_onenand_cfg(void __iomem *onenand_base)
324     {
325     - u32 reg;
326     + u32 reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
327    
328     - reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
329     - reg &= ~((0x7 << ONENAND_SYS_CFG1_BRL_SHIFT) | (0x7 << 9));
330     reg |= (latency << ONENAND_SYS_CFG1_BRL_SHIFT) |
331     ONENAND_SYS_CFG1_BL_16;
332     if (onenand_flags & ONENAND_FLAG_SYNCREAD)
333     @@ -123,6 +121,7 @@ static void set_onenand_cfg(void __iomem *onenand_base)
334     reg |= ONENAND_SYS_CFG1_VHF;
335     else
336     reg &= ~ONENAND_SYS_CFG1_VHF;
337     +
338     writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
339     }
340    
341     @@ -289,6 +288,7 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base)
342     }
343     }
344    
345     + onenand_async.sync_write = true;
346     omap2_onenand_calc_async_timings(&t);
347    
348     ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_async);
349     diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
350     index cd822d8454c0..b6c90e5006e4 100644
351     --- a/arch/arm64/Makefile
352     +++ b/arch/arm64/Makefile
353     @@ -27,6 +27,7 @@ $(warning LSE atomics not supported by binutils)
354     endif
355    
356     KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr)
357     +KBUILD_CFLAGS += $(call cc-option, -mpc-relative-literal-loads)
358     KBUILD_AFLAGS += $(lseinstr)
359    
360     ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
361     diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
362     index 2046c0230224..21ed7150fec3 100644
363     --- a/arch/mips/include/asm/page.h
364     +++ b/arch/mips/include/asm/page.h
365     @@ -33,7 +33,7 @@
366     #define PAGE_SHIFT 16
367     #endif
368     #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
369     -#define PAGE_MASK (~(PAGE_SIZE - 1))
370     +#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
371    
372     /*
373     * This is used for calculating the real page sizes
374     diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
375     index 8957f15e21ec..18826aa15a7c 100644
376     --- a/arch/mips/include/asm/pgtable.h
377     +++ b/arch/mips/include/asm/pgtable.h
378     @@ -353,7 +353,7 @@ static inline pte_t pte_mkdirty(pte_t pte)
379     static inline pte_t pte_mkyoung(pte_t pte)
380     {
381     pte_val(pte) |= _PAGE_ACCESSED;
382     -#ifdef CONFIG_CPU_MIPSR2
383     +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
384     if (!(pte_val(pte) & _PAGE_NO_READ))
385     pte_val(pte) |= _PAGE_SILENT_READ;
386     else
387     @@ -560,7 +560,7 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
388     {
389     pmd_val(pmd) |= _PAGE_ACCESSED;
390    
391     -#ifdef CONFIG_CPU_MIPSR2
392     +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
393     if (!(pmd_val(pmd) & _PAGE_NO_READ))
394     pmd_val(pmd) |= _PAGE_SILENT_READ;
395     else
396     diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
397     index 6499d93ae68d..47bc45a67e9b 100644
398     --- a/arch/mips/include/asm/syscall.h
399     +++ b/arch/mips/include/asm/syscall.h
400     @@ -101,10 +101,8 @@ static inline void syscall_get_arguments(struct task_struct *task,
401     /* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
402     if ((config_enabled(CONFIG_32BIT) ||
403     test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
404     - (regs->regs[2] == __NR_syscall)) {
405     + (regs->regs[2] == __NR_syscall))
406     i++;
407     - n++;
408     - }
409    
410     while (n--)
411     ret |= mips_get_syscall_arg(args++, task, regs, i++);
412     diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c
413     index bf9f1a77f0e5..a2631a52ca99 100644
414     --- a/arch/mips/loongson64/loongson-3/hpet.c
415     +++ b/arch/mips/loongson64/loongson-3/hpet.c
416     @@ -13,6 +13,9 @@
417     #define SMBUS_PCI_REG64 0x64
418     #define SMBUS_PCI_REGB4 0xb4
419    
420     +#define HPET_MIN_CYCLES 64
421     +#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
422     +
423     static DEFINE_SPINLOCK(hpet_lock);
424     DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device);
425    
426     @@ -161,8 +164,9 @@ static int hpet_next_event(unsigned long delta,
427     cnt += delta;
428     hpet_write(HPET_T0_CMP, cnt);
429    
430     - res = ((int)(hpet_read(HPET_COUNTER) - cnt) > 0) ? -ETIME : 0;
431     - return res;
432     + res = (int)(cnt - hpet_read(HPET_COUNTER));
433     +
434     + return res < HPET_MIN_CYCLES ? -ETIME : 0;
435     }
436    
437     static irqreturn_t hpet_irq_handler(int irq, void *data)
438     @@ -237,7 +241,7 @@ void __init setup_hpet_timer(void)
439     cd->cpumask = cpumask_of(cpu);
440     clockevent_set_clock(cd, HPET_FREQ);
441     cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
442     - cd->min_delta_ns = 5000;
443     + cd->min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA, cd);
444    
445     clockevents_register_device(cd);
446     setup_irq(HPET_T0_IRQ, &hpet_irq);
447     diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
448     index 1a4738a8f2d3..509832a9836c 100644
449     --- a/arch/mips/loongson64/loongson-3/smp.c
450     +++ b/arch/mips/loongson64/loongson-3/smp.c
451     @@ -30,13 +30,13 @@
452     #include "smp.h"
453    
454     DEFINE_PER_CPU(int, cpu_state);
455     -DEFINE_PER_CPU(uint32_t, core0_c0count);
456    
457     static void *ipi_set0_regs[16];
458     static void *ipi_clear0_regs[16];
459     static void *ipi_status0_regs[16];
460     static void *ipi_en0_regs[16];
461     static void *ipi_mailbox_buf[16];
462     +static uint32_t core0_c0count[NR_CPUS];
463    
464     /* read a 32bit value from ipi register */
465     #define loongson3_ipi_read32(addr) readl(addr)
466     @@ -275,12 +275,14 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
467     if (action & SMP_ASK_C0COUNT) {
468     BUG_ON(cpu != 0);
469     c0count = read_c0_count();
470     - for (i = 1; i < num_possible_cpus(); i++)
471     - per_cpu(core0_c0count, i) = c0count;
472     + c0count = c0count ? c0count : 1;
473     + for (i = 1; i < nr_cpu_ids; i++)
474     + core0_c0count[i] = c0count;
475     + __wbflush(); /* Let others see the result ASAP */
476     }
477     }
478    
479     -#define MAX_LOOPS 1111
480     +#define MAX_LOOPS 800
481     /*
482     * SMP init and finish on secondary CPUs
483     */
484     @@ -305,16 +307,20 @@ static void loongson3_init_secondary(void)
485     cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
486    
487     i = 0;
488     - __this_cpu_write(core0_c0count, 0);
489     + core0_c0count[cpu] = 0;
490     loongson3_send_ipi_single(0, SMP_ASK_C0COUNT);
491     - while (!__this_cpu_read(core0_c0count)) {
492     + while (!core0_c0count[cpu]) {
493     i++;
494     cpu_relax();
495     }
496    
497     if (i > MAX_LOOPS)
498     i = MAX_LOOPS;
499     - initcount = __this_cpu_read(core0_c0count) + i;
500     + if (cpu_data[cpu].package)
501     + initcount = core0_c0count[cpu] + i;
502     + else /* Local access is faster for loops */
503     + initcount = core0_c0count[cpu] + i/2;
504     +
505     write_c0_count(initcount);
506     }
507    
508     diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
509     index 32e0be27673f..29f73e00253d 100644
510     --- a/arch/mips/mm/tlbex.c
511     +++ b/arch/mips/mm/tlbex.c
512     @@ -242,7 +242,7 @@ static void output_pgtable_bits_defines(void)
513     pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
514     pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
515     #endif
516     -#ifdef CONFIG_CPU_MIPSR2
517     +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
518     if (cpu_has_rixi) {
519     #ifdef _PAGE_NO_EXEC_SHIFT
520     pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
521     diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
522     index f69ecaa7ce33..52c1e273f8cd 100644
523     --- a/arch/powerpc/kernel/eeh_driver.c
524     +++ b/arch/powerpc/kernel/eeh_driver.c
525     @@ -418,8 +418,7 @@ static void *eeh_rmv_device(void *data, void *userdata)
526     eeh_pcid_put(dev);
527     if (driver->err_handler &&
528     driver->err_handler->error_detected &&
529     - driver->err_handler->slot_reset &&
530     - driver->err_handler->resume)
531     + driver->err_handler->slot_reset)
532     return NULL;
533     }
534    
535     diff --git a/arch/s390/include/asm/fpu/internal.h b/arch/s390/include/asm/fpu/internal.h
536     index 2559b16da525..17d9dcd29d45 100644
537     --- a/arch/s390/include/asm/fpu/internal.h
538     +++ b/arch/s390/include/asm/fpu/internal.h
539     @@ -48,6 +48,7 @@ static inline void convert_fp_to_vx(__vector128 *vxrs, freg_t *fprs)
540     static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
541     {
542     fpregs->pad = 0;
543     + fpregs->fpc = fpu->fpc;
544     if (MACHINE_HAS_VX)
545     convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
546     else
547     @@ -57,6 +58,7 @@ static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
548    
549     static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
550     {
551     + fpu->fpc = fpregs->fpc;
552     if (MACHINE_HAS_VX)
553     convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
554     else
555     diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
556     index efaac2c3bb77..e9a983f40a24 100644
557     --- a/arch/s390/include/asm/kvm_host.h
558     +++ b/arch/s390/include/asm/kvm_host.h
559     @@ -506,7 +506,6 @@ struct kvm_vcpu_arch {
560     struct kvm_s390_sie_block *sie_block;
561     unsigned int host_acrs[NUM_ACRS];
562     struct fpu host_fpregs;
563     - struct fpu guest_fpregs;
564     struct kvm_s390_local_interrupt local_int;
565     struct hrtimer ckc_timer;
566     struct kvm_s390_pgm_info pgm;
567     diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
568     index 9cd248f637c7..dc6c9c604543 100644
569     --- a/arch/s390/kernel/asm-offsets.c
570     +++ b/arch/s390/kernel/asm-offsets.c
571     @@ -181,6 +181,7 @@ int main(void)
572     OFFSET(__LC_PSW_SAVE_AREA, _lowcore, psw_save_area);
573     OFFSET(__LC_PREFIX_SAVE_AREA, _lowcore, prefixreg_save_area);
574     OFFSET(__LC_FP_CREG_SAVE_AREA, _lowcore, fpt_creg_save_area);
575     + OFFSET(__LC_TOD_PROGREG_SAVE_AREA, _lowcore, tod_progreg_save_area);
576     OFFSET(__LC_CPU_TIMER_SAVE_AREA, _lowcore, cpu_timer_save_area);
577     OFFSET(__LC_CLOCK_COMP_SAVE_AREA, _lowcore, clock_comp_save_area);
578     OFFSET(__LC_AREGS_SAVE_AREA, _lowcore, access_regs_save_area);
579     diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
580     index 66c94417c0ba..4af60374eba0 100644
581     --- a/arch/s390/kernel/compat_signal.c
582     +++ b/arch/s390/kernel/compat_signal.c
583     @@ -271,7 +271,7 @@ static int restore_sigregs_ext32(struct pt_regs *regs,
584    
585     /* Restore high gprs from signal stack */
586     if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
587     - sizeof(&sregs_ext->gprs_high)))
588     + sizeof(sregs_ext->gprs_high)))
589     return -EFAULT;
590     for (i = 0; i < NUM_GPRS; i++)
591     *(__u32 *)&regs->gprs[i] = gprs_high[i];
592     diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
593     index 846589281b04..a08d0afd5ff6 100644
594     --- a/arch/s390/kvm/kvm-s390.c
595     +++ b/arch/s390/kvm/kvm-s390.c
596     @@ -1268,44 +1268,18 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
597     return 0;
598     }
599    
600     -/*
601     - * Backs up the current FP/VX register save area on a particular
602     - * destination. Used to switch between different register save
603     - * areas.
604     - */
605     -static inline void save_fpu_to(struct fpu *dst)
606     -{
607     - dst->fpc = current->thread.fpu.fpc;
608     - dst->regs = current->thread.fpu.regs;
609     -}
610     -
611     -/*
612     - * Switches the FP/VX register save area from which to lazy
613     - * restore register contents.
614     - */
615     -static inline void load_fpu_from(struct fpu *from)
616     -{
617     - current->thread.fpu.fpc = from->fpc;
618     - current->thread.fpu.regs = from->regs;
619     -}
620     -
621     void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
622     {
623     /* Save host register state */
624     save_fpu_regs();
625     - save_fpu_to(&vcpu->arch.host_fpregs);
626     -
627     - if (test_kvm_facility(vcpu->kvm, 129)) {
628     - current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
629     - /*
630     - * Use the register save area in the SIE-control block
631     - * for register restore and save in kvm_arch_vcpu_put()
632     - */
633     - current->thread.fpu.vxrs =
634     - (__vector128 *)&vcpu->run->s.regs.vrs;
635     - } else
636     - load_fpu_from(&vcpu->arch.guest_fpregs);
637     + vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
638     + vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
639    
640     + /* Depending on MACHINE_HAS_VX, data stored to vrs either
641     + * has vector register or floating point register format.
642     + */
643     + current->thread.fpu.regs = vcpu->run->s.regs.vrs;
644     + current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
645     if (test_fp_ctl(current->thread.fpu.fpc))
646     /* User space provided an invalid FPC, let's clear it */
647     current->thread.fpu.fpc = 0;
648     @@ -1321,19 +1295,13 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
649     atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
650     gmap_disable(vcpu->arch.gmap);
651    
652     + /* Save guest register state */
653     save_fpu_regs();
654     + vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
655    
656     - if (test_kvm_facility(vcpu->kvm, 129))
657     - /*
658     - * kvm_arch_vcpu_load() set up the register save area to
659     - * the &vcpu->run->s.regs.vrs and, thus, the vector registers
660     - * are already saved. Only the floating-point control must be
661     - * copied.
662     - */
663     - vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
664     - else
665     - save_fpu_to(&vcpu->arch.guest_fpregs);
666     - load_fpu_from(&vcpu->arch.host_fpregs);
667     + /* Restore host register state */
668     + current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
669     + current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
670    
671     save_access_regs(vcpu->run->s.regs.acrs);
672     restore_access_regs(vcpu->arch.host_acrs);
673     @@ -1351,8 +1319,9 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
674     memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
675     vcpu->arch.sie_block->gcr[0] = 0xE0UL;
676     vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
677     - vcpu->arch.guest_fpregs.fpc = 0;
678     - asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
679     + /* make sure the new fpc will be lazily loaded */
680     + save_fpu_regs();
681     + current->thread.fpu.fpc = 0;
682     vcpu->arch.sie_block->gbea = 1;
683     vcpu->arch.sie_block->pp = 0;
684     vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
685     @@ -1501,19 +1470,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
686     vcpu->arch.local_int.wq = &vcpu->wq;
687     vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
688    
689     - /*
690     - * Allocate a save area for floating-point registers. If the vector
691     - * extension is available, register contents are saved in the SIE
692     - * control block. The allocated save area is still required in
693     - * particular places, for example, in kvm_s390_vcpu_store_status().
694     - */
695     - vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
696     - GFP_KERNEL);
697     - if (!vcpu->arch.guest_fpregs.fprs) {
698     - rc = -ENOMEM;
699     - goto out_free_sie_block;
700     - }
701     -
702     rc = kvm_vcpu_init(vcpu, kvm, id);
703     if (rc)
704     goto out_free_sie_block;
705     @@ -1734,19 +1690,27 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
706    
707     int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
708     {
709     + /* make sure the new values will be lazily loaded */
710     + save_fpu_regs();
711     if (test_fp_ctl(fpu->fpc))
712     return -EINVAL;
713     - memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
714     - vcpu->arch.guest_fpregs.fpc = fpu->fpc;
715     - save_fpu_regs();
716     - load_fpu_from(&vcpu->arch.guest_fpregs);
717     + current->thread.fpu.fpc = fpu->fpc;
718     + if (MACHINE_HAS_VX)
719     + convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
720     + else
721     + memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
722     return 0;
723     }
724    
725     int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
726     {
727     - memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
728     - fpu->fpc = vcpu->arch.guest_fpregs.fpc;
729     + /* make sure we have the latest values */
730     + save_fpu_regs();
731     + if (MACHINE_HAS_VX)
732     + convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
733     + else
734     + memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
735     + fpu->fpc = current->thread.fpu.fpc;
736     return 0;
737     }
738    
739     @@ -2266,41 +2230,50 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
740     int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
741     {
742     unsigned char archmode = 1;
743     + freg_t fprs[NUM_FPRS];
744     unsigned int px;
745     u64 clkcomp;
746     int rc;
747    
748     + px = kvm_s390_get_prefix(vcpu);
749     if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
750     if (write_guest_abs(vcpu, 163, &archmode, 1))
751     return -EFAULT;
752     - gpa = SAVE_AREA_BASE;
753     + gpa = 0;
754     } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
755     if (write_guest_real(vcpu, 163, &archmode, 1))
756     return -EFAULT;
757     - gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
758     + gpa = px;
759     + } else
760     + gpa -= __LC_FPREGS_SAVE_AREA;
761     +
762     + /* manually convert vector registers if necessary */
763     + if (MACHINE_HAS_VX) {
764     + convert_vx_to_fp(fprs, current->thread.fpu.vxrs);
765     + rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
766     + fprs, 128);
767     + } else {
768     + rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
769     + vcpu->run->s.regs.vrs, 128);
770     }
771     - rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
772     - vcpu->arch.guest_fpregs.fprs, 128);
773     - rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
774     + rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
775     vcpu->run->s.regs.gprs, 128);
776     - rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
777     + rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
778     &vcpu->arch.sie_block->gpsw, 16);
779     - px = kvm_s390_get_prefix(vcpu);
780     - rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
781     + rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
782     &px, 4);
783     - rc |= write_guest_abs(vcpu,
784     - gpa + offsetof(struct save_area, fp_ctrl_reg),
785     - &vcpu->arch.guest_fpregs.fpc, 4);
786     - rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
787     + rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
788     + &vcpu->run->s.regs.fpc, 4);
789     + rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
790     &vcpu->arch.sie_block->todpr, 4);
791     - rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
792     + rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
793     &vcpu->arch.sie_block->cputm, 8);
794     clkcomp = vcpu->arch.sie_block->ckc >> 8;
795     - rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
796     + rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
797     &clkcomp, 8);
798     - rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
799     + rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
800     &vcpu->run->s.regs.acrs, 64);
801     - rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
802     + rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
803     &vcpu->arch.sie_block->gcr, 128);
804     return rc ? -EFAULT : 0;
805     }
806     @@ -2313,19 +2286,7 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
807     * it into the save area
808     */
809     save_fpu_regs();
810     - if (test_kvm_facility(vcpu->kvm, 129)) {
811     - /*
812     - * If the vector extension is available, the vector registers
813     - * which overlaps with floating-point registers are saved in
814     - * the SIE-control block. Hence, extract the floating-point
815     - * registers and the FPC value and store them in the
816     - * guest_fpregs structure.
817     - */
818     - vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc;
819     - convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs,
820     - current->thread.fpu.vxrs);
821     - } else
822     - save_fpu_to(&vcpu->arch.guest_fpregs);
823     + vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
824     save_access_regs(vcpu->run->s.regs.acrs);
825    
826     return kvm_s390_store_status_unloaded(vcpu, addr);
827     diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
828     index 4d1ee88864e8..18c8b819b0aa 100644
829     --- a/arch/s390/mm/extable.c
830     +++ b/arch/s390/mm/extable.c
831     @@ -52,12 +52,16 @@ void sort_extable(struct exception_table_entry *start,
832     int i;
833    
834     /* Normalize entries to being relative to the start of the section */
835     - for (p = start, i = 0; p < finish; p++, i += 8)
836     + for (p = start, i = 0; p < finish; p++, i += 8) {
837     p->insn += i;
838     + p->fixup += i + 4;
839     + }
840     sort(start, finish - start, sizeof(*start), cmp_ex, NULL);
841     /* Denormalize all entries */
842     - for (p = start, i = 0; p < finish; p++, i += 8)
843     + for (p = start, i = 0; p < finish; p++, i += 8) {
844     p->insn -= i;
845     + p->fixup -= i + 4;
846     + }
847     }
848    
849     #ifdef CONFIG_MODULES
850     diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
851     index 30e7ddb27a3a..c690c8e16a96 100644
852     --- a/arch/sparc/kernel/sys_sparc_64.c
853     +++ b/arch/sparc/kernel/sys_sparc_64.c
854     @@ -413,7 +413,7 @@ out:
855    
856     SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
857     {
858     - int ret;
859     + long ret;
860    
861     if (personality(current->personality) == PER_LINUX32 &&
862     personality(personality) == PER_LINUX)
863     diff --git a/arch/um/os-Linux/start_up.c b/arch/um/os-Linux/start_up.c
864     index 47f1ff056a54..22a358ef1b0c 100644
865     --- a/arch/um/os-Linux/start_up.c
866     +++ b/arch/um/os-Linux/start_up.c
867     @@ -94,6 +94,8 @@ static int start_ptraced_child(void)
868     {
869     int pid, n, status;
870    
871     + fflush(stdout);
872     +
873     pid = fork();
874     if (pid == 0)
875     ptrace_child();
876     diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
877     index 6a1ae3751e82..15cfebaa7688 100644
878     --- a/arch/x86/entry/entry_64_compat.S
879     +++ b/arch/x86/entry/entry_64_compat.S
880     @@ -267,6 +267,7 @@ ENTRY(entry_INT80_compat)
881     * Interrupts are off on entry.
882     */
883     PARAVIRT_ADJUST_EXCEPTION_FRAME
884     + ASM_CLAC /* Do this early to minimize exposure */
885     SWAPGS
886    
887     /*
888     diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
889     index 881b4768644a..e7de5c9a4fbd 100644
890     --- a/arch/x86/include/asm/irq.h
891     +++ b/arch/x86/include/asm/irq.h
892     @@ -23,11 +23,13 @@ extern void irq_ctx_init(int cpu);
893    
894     #define __ARCH_HAS_DO_SOFTIRQ
895    
896     +struct irq_desc;
897     +
898     #ifdef CONFIG_HOTPLUG_CPU
899     #include <linux/cpumask.h>
900     extern int check_irq_vectors_for_cpu_disable(void);
901     extern void fixup_irqs(void);
902     -extern void irq_force_complete_move(int);
903     +extern void irq_force_complete_move(struct irq_desc *desc);
904     #endif
905    
906     #ifdef CONFIG_HAVE_KVM
907     @@ -37,7 +39,6 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
908     extern void (*x86_platform_ipi_callback)(void);
909     extern void native_init_IRQ(void);
910    
911     -struct irq_desc;
912     extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
913    
914     extern __visible unsigned int do_IRQ(struct pt_regs *regs);
915     diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
916     index f25321894ad2..fdb0fbfb1197 100644
917     --- a/arch/x86/kernel/apic/io_apic.c
918     +++ b/arch/x86/kernel/apic/io_apic.c
919     @@ -2521,6 +2521,7 @@ void __init setup_ioapic_dest(void)
920     {
921     int pin, ioapic, irq, irq_entry;
922     const struct cpumask *mask;
923     + struct irq_desc *desc;
924     struct irq_data *idata;
925     struct irq_chip *chip;
926    
927     @@ -2536,7 +2537,9 @@ void __init setup_ioapic_dest(void)
928     if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq))
929     continue;
930    
931     - idata = irq_get_irq_data(irq);
932     + desc = irq_to_desc(irq);
933     + raw_spin_lock_irq(&desc->lock);
934     + idata = irq_desc_get_irq_data(desc);
935    
936     /*
937     * Honour affinities which have been set in early boot
938     @@ -2550,6 +2553,7 @@ void __init setup_ioapic_dest(void)
939     /* Might be lapic_chip for irq 0 */
940     if (chip->irq_set_affinity)
941     chip->irq_set_affinity(idata, mask, false);
942     + raw_spin_unlock_irq(&desc->lock);
943     }
944     }
945     #endif
946     diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
947     index 861bc59c8f25..a35f6b5473f4 100644
948     --- a/arch/x86/kernel/apic/vector.c
949     +++ b/arch/x86/kernel/apic/vector.c
950     @@ -30,7 +30,7 @@ struct apic_chip_data {
951    
952     struct irq_domain *x86_vector_domain;
953     static DEFINE_RAW_SPINLOCK(vector_lock);
954     -static cpumask_var_t vector_cpumask;
955     +static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask;
956     static struct irq_chip lapic_controller;
957     #ifdef CONFIG_X86_IO_APIC
958     static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
959     @@ -116,35 +116,47 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
960     */
961     static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
962     static int current_offset = VECTOR_OFFSET_START % 16;
963     - int cpu, err;
964     + int cpu, vector;
965    
966     - if (d->move_in_progress)
967     + /*
968     + * If there is still a move in progress or the previous move has not
969     + * been cleaned up completely, tell the caller to come back later.
970     + */
971     + if (d->move_in_progress ||
972     + cpumask_intersects(d->old_domain, cpu_online_mask))
973     return -EBUSY;
974    
975     /* Only try and allocate irqs on cpus that are present */
976     - err = -ENOSPC;
977     cpumask_clear(d->old_domain);
978     + cpumask_clear(searched_cpumask);
979     cpu = cpumask_first_and(mask, cpu_online_mask);
980     while (cpu < nr_cpu_ids) {
981     - int new_cpu, vector, offset;
982     + int new_cpu, offset;
983    
984     + /* Get the possible target cpus for @mask/@cpu from the apic */
985     apic->vector_allocation_domain(cpu, vector_cpumask, mask);
986    
987     + /*
988     + * Clear the offline cpus from @vector_cpumask for searching
989     + * and verify whether the result overlaps with @mask. If true,
990     + * then the call to apic->cpu_mask_to_apicid_and() will
991     + * succeed as well. If not, no point in trying to find a
992     + * vector in this mask.
993     + */
994     + cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask);
995     + if (!cpumask_intersects(vector_searchmask, mask))
996     + goto next_cpu;
997     +
998     if (cpumask_subset(vector_cpumask, d->domain)) {
999     - err = 0;
1000     if (cpumask_equal(vector_cpumask, d->domain))
1001     - break;
1002     + goto success;
1003     /*
1004     - * New cpumask using the vector is a proper subset of
1005     - * the current in use mask. So cleanup the vector
1006     - * allocation for the members that are not used anymore.
1007     + * Mark the cpus which are not longer in the mask for
1008     + * cleanup.
1009     */
1010     - cpumask_andnot(d->old_domain, d->domain,
1011     - vector_cpumask);
1012     - d->move_in_progress =
1013     - cpumask_intersects(d->old_domain, cpu_online_mask);
1014     - cpumask_and(d->domain, d->domain, vector_cpumask);
1015     - break;
1016     + cpumask_andnot(d->old_domain, d->domain, vector_cpumask);
1017     + vector = d->cfg.vector;
1018     + goto update;
1019     }
1020    
1021     vector = current_vector;
1022     @@ -156,45 +168,60 @@ next:
1023     vector = FIRST_EXTERNAL_VECTOR + offset;
1024     }
1025    
1026     - if (unlikely(current_vector == vector)) {
1027     - cpumask_or(d->old_domain, d->old_domain,
1028     - vector_cpumask);
1029     - cpumask_andnot(vector_cpumask, mask, d->old_domain);
1030     - cpu = cpumask_first_and(vector_cpumask,
1031     - cpu_online_mask);
1032     - continue;
1033     - }
1034     + /* If the search wrapped around, try the next cpu */
1035     + if (unlikely(current_vector == vector))
1036     + goto next_cpu;
1037    
1038     if (test_bit(vector, used_vectors))
1039     goto next;
1040    
1041     - for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
1042     + for_each_cpu(new_cpu, vector_searchmask) {
1043     if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
1044     goto next;
1045     }
1046     /* Found one! */
1047     current_vector = vector;
1048     current_offset = offset;
1049     - if (d->cfg.vector) {
1050     + /* Schedule the old vector for cleanup on all cpus */
1051     + if (d->cfg.vector)
1052     cpumask_copy(d->old_domain, d->domain);
1053     - d->move_in_progress =
1054     - cpumask_intersects(d->old_domain, cpu_online_mask);
1055     - }
1056     - for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
1057     + for_each_cpu(new_cpu, vector_searchmask)
1058     per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
1059     - d->cfg.vector = vector;
1060     - cpumask_copy(d->domain, vector_cpumask);
1061     - err = 0;
1062     - break;
1063     - }
1064     + goto update;
1065    
1066     - if (!err) {
1067     - /* cache destination APIC IDs into cfg->dest_apicid */
1068     - err = apic->cpu_mask_to_apicid_and(mask, d->domain,
1069     - &d->cfg.dest_apicid);
1070     +next_cpu:
1071     + /*
1072     + * We exclude the current @vector_cpumask from the requested
1073     + * @mask and try again with the next online cpu in the
1074     + * result. We cannot modify @mask, so we use @vector_cpumask
1075     + * as a temporary buffer here as it will be reassigned when
1076     + * calling apic->vector_allocation_domain() above.
1077     + */
1078     + cpumask_or(searched_cpumask, searched_cpumask, vector_cpumask);
1079     + cpumask_andnot(vector_cpumask, mask, searched_cpumask);
1080     + cpu = cpumask_first_and(vector_cpumask, cpu_online_mask);
1081     + continue;
1082     }
1083     + return -ENOSPC;
1084    
1085     - return err;
1086     +update:
1087     + /*
1088     + * Exclude offline cpus from the cleanup mask and set the
1089     + * move_in_progress flag when the result is not empty.
1090     + */
1091     + cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
1092     + d->move_in_progress = !cpumask_empty(d->old_domain);
1093     + d->cfg.vector = vector;
1094     + cpumask_copy(d->domain, vector_cpumask);
1095     +success:
1096     + /*
1097     + * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
1098     + * as we already established, that mask & d->domain & cpu_online_mask
1099     + * is not empty.
1100     + */
1101     + BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain,
1102     + &d->cfg.dest_apicid));
1103     + return 0;
1104     }
1105    
1106     static int assign_irq_vector(int irq, struct apic_chip_data *data,
1107     @@ -224,10 +251,8 @@ static int assign_irq_vector_policy(int irq, int node,
1108     static void clear_irq_vector(int irq, struct apic_chip_data *data)
1109     {
1110     struct irq_desc *desc;
1111     - unsigned long flags;
1112     int cpu, vector;
1113    
1114     - raw_spin_lock_irqsave(&vector_lock, flags);
1115     BUG_ON(!data->cfg.vector);
1116    
1117     vector = data->cfg.vector;
1118     @@ -237,10 +262,13 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
1119     data->cfg.vector = 0;
1120     cpumask_clear(data->domain);
1121    
1122     - if (likely(!data->move_in_progress)) {
1123     - raw_spin_unlock_irqrestore(&vector_lock, flags);
1124     + /*
1125     + * If move is in progress or the old_domain mask is not empty,
1126     + * i.e. the cleanup IPI has not been processed yet, we need to remove
1127     + * the old references to desc from all cpus vector tables.
1128     + */
1129     + if (!data->move_in_progress && cpumask_empty(data->old_domain))
1130     return;
1131     - }
1132    
1133     desc = irq_to_desc(irq);
1134     for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
1135     @@ -253,7 +281,6 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
1136     }
1137     }
1138     data->move_in_progress = 0;
1139     - raw_spin_unlock_irqrestore(&vector_lock, flags);
1140     }
1141    
1142     void init_irq_alloc_info(struct irq_alloc_info *info,
1143     @@ -274,19 +301,24 @@ void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
1144     static void x86_vector_free_irqs(struct irq_domain *domain,
1145     unsigned int virq, unsigned int nr_irqs)
1146     {
1147     + struct apic_chip_data *apic_data;
1148     struct irq_data *irq_data;
1149     + unsigned long flags;
1150     int i;
1151    
1152     for (i = 0; i < nr_irqs; i++) {
1153     irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
1154     if (irq_data && irq_data->chip_data) {
1155     + raw_spin_lock_irqsave(&vector_lock, flags);
1156     clear_irq_vector(virq + i, irq_data->chip_data);
1157     - free_apic_chip_data(irq_data->chip_data);
1158     + apic_data = irq_data->chip_data;
1159     + irq_domain_reset_irq_data(irq_data);
1160     + raw_spin_unlock_irqrestore(&vector_lock, flags);
1161     + free_apic_chip_data(apic_data);
1162     #ifdef CONFIG_X86_IO_APIC
1163     if (virq + i < nr_legacy_irqs())
1164     legacy_irq_data[virq + i] = NULL;
1165     #endif
1166     - irq_domain_reset_irq_data(irq_data);
1167     }
1168     }
1169     }
1170     @@ -404,6 +436,8 @@ int __init arch_early_irq_init(void)
1171     arch_init_htirq_domain(x86_vector_domain);
1172    
1173     BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
1174     + BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
1175     + BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL));
1176    
1177     return arch_early_ioapic_init();
1178     }
1179     @@ -492,14 +526,7 @@ static int apic_set_affinity(struct irq_data *irq_data,
1180     return -EINVAL;
1181    
1182     err = assign_irq_vector(irq, data, dest);
1183     - if (err) {
1184     - if (assign_irq_vector(irq, data,
1185     - irq_data_get_affinity_mask(irq_data)))
1186     - pr_err("Failed to recover vector for irq %d\n", irq);
1187     - return err;
1188     - }
1189     -
1190     - return IRQ_SET_MASK_OK;
1191     + return err ? err : IRQ_SET_MASK_OK;
1192     }
1193    
1194     static struct irq_chip lapic_controller = {
1195     @@ -511,20 +538,12 @@ static struct irq_chip lapic_controller = {
1196     #ifdef CONFIG_SMP
1197     static void __send_cleanup_vector(struct apic_chip_data *data)
1198     {
1199     - cpumask_var_t cleanup_mask;
1200     -
1201     - if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
1202     - unsigned int i;
1203     -
1204     - for_each_cpu_and(i, data->old_domain, cpu_online_mask)
1205     - apic->send_IPI_mask(cpumask_of(i),
1206     - IRQ_MOVE_CLEANUP_VECTOR);
1207     - } else {
1208     - cpumask_and(cleanup_mask, data->old_domain, cpu_online_mask);
1209     - apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
1210     - free_cpumask_var(cleanup_mask);
1211     - }
1212     + raw_spin_lock(&vector_lock);
1213     + cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
1214     data->move_in_progress = 0;
1215     + if (!cpumask_empty(data->old_domain))
1216     + apic->send_IPI_mask(data->old_domain, IRQ_MOVE_CLEANUP_VECTOR);
1217     + raw_spin_unlock(&vector_lock);
1218     }
1219    
1220     void send_cleanup_vector(struct irq_cfg *cfg)
1221     @@ -568,12 +587,25 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
1222     goto unlock;
1223    
1224     /*
1225     - * Check if the irq migration is in progress. If so, we
1226     - * haven't received the cleanup request yet for this irq.
1227     + * Nothing to cleanup if irq migration is in progress
1228     + * or this cpu is not set in the cleanup mask.
1229     */
1230     - if (data->move_in_progress)
1231     + if (data->move_in_progress ||
1232     + !cpumask_test_cpu(me, data->old_domain))
1233     goto unlock;
1234    
1235     + /*
1236     + * We have two cases to handle here:
1237     + * 1) vector is unchanged but the target mask got reduced
1238     + * 2) vector and the target mask has changed
1239     + *
1240     + * #1 is obvious, but in #2 we have two vectors with the same
1241     + * irq descriptor: the old and the new vector. So we need to
1242     + * make sure that we only cleanup the old vector. The new
1243     + * vector has the current @vector number in the config and
1244     + * this cpu is part of the target mask. We better leave that
1245     + * one alone.
1246     + */
1247     if (vector == data->cfg.vector &&
1248     cpumask_test_cpu(me, data->domain))
1249     goto unlock;
1250     @@ -591,6 +623,7 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
1251     goto unlock;
1252     }
1253     __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
1254     + cpumask_clear_cpu(me, data->old_domain);
1255     unlock:
1256     raw_spin_unlock(&desc->lock);
1257     }
1258     @@ -619,12 +652,48 @@ void irq_complete_move(struct irq_cfg *cfg)
1259     __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
1260     }
1261    
1262     -void irq_force_complete_move(int irq)
1263     +/*
1264     + * Called with @desc->lock held and interrupts disabled.
1265     + */
1266     +void irq_force_complete_move(struct irq_desc *desc)
1267     {
1268     - struct irq_cfg *cfg = irq_cfg(irq);
1269     + struct irq_data *irqdata = irq_desc_get_irq_data(desc);
1270     + struct apic_chip_data *data = apic_chip_data(irqdata);
1271     + struct irq_cfg *cfg = data ? &data->cfg : NULL;
1272    
1273     - if (cfg)
1274     - __irq_complete_move(cfg, cfg->vector);
1275     + if (!cfg)
1276     + return;
1277     +
1278     + __irq_complete_move(cfg, cfg->vector);
1279     +
1280     + /*
1281     + * This is tricky. If the cleanup of @data->old_domain has not been
1282     + * done yet, then the following setaffinity call will fail with
1283     + * -EBUSY. This can leave the interrupt in a stale state.
1284     + *
1285     + * The cleanup cannot make progress because we hold @desc->lock. So in
1286     + * case @data->old_domain is not yet cleaned up, we need to drop the
1287     + * lock and acquire it again. @desc cannot go away, because the
1288     + * hotplug code holds the sparse irq lock.
1289     + */
1290     + raw_spin_lock(&vector_lock);
1291     + /* Clean out all offline cpus (including ourself) first. */
1292     + cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
1293     + while (!cpumask_empty(data->old_domain)) {
1294     + raw_spin_unlock(&vector_lock);
1295     + raw_spin_unlock(&desc->lock);
1296     + cpu_relax();
1297     + raw_spin_lock(&desc->lock);
1298     + /*
1299     + * Reevaluate apic_chip_data. It might have been cleared after
1300     + * we dropped @desc->lock.
1301     + */
1302     + data = apic_chip_data(irqdata);
1303     + if (!data)
1304     + return;
1305     + raw_spin_lock(&vector_lock);
1306     + }
1307     + raw_spin_unlock(&vector_lock);
1308     }
1309     #endif
1310    
1311     diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
1312     index f8062aaf5df9..61521dc19c10 100644
1313     --- a/arch/x86/kernel/irq.c
1314     +++ b/arch/x86/kernel/irq.c
1315     @@ -462,7 +462,7 @@ void fixup_irqs(void)
1316     * non intr-remapping case, we can't wait till this interrupt
1317     * arrives at this cpu before completing the irq move.
1318     */
1319     - irq_force_complete_move(irq);
1320     + irq_force_complete_move(desc);
1321    
1322     if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
1323     break_affinity = 1;
1324     @@ -470,6 +470,15 @@ void fixup_irqs(void)
1325     }
1326    
1327     chip = irq_data_get_irq_chip(data);
1328     + /*
1329     + * The interrupt descriptor might have been cleaned up
1330     + * already, but it is not yet removed from the radix tree
1331     + */
1332     + if (!chip) {
1333     + raw_spin_unlock(&desc->lock);
1334     + continue;
1335     + }
1336     +
1337     if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
1338     chip->irq_mask(data);
1339    
1340     diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
1341     index 1505587d06e9..b9b09fec173b 100644
1342     --- a/arch/x86/kvm/emulate.c
1343     +++ b/arch/x86/kvm/emulate.c
1344     @@ -650,10 +650,10 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
1345     u16 sel;
1346    
1347     la = seg_base(ctxt, addr.seg) + addr.ea;
1348     - *linear = la;
1349     *max_size = 0;
1350     switch (mode) {
1351     case X86EMUL_MODE_PROT64:
1352     + *linear = la;
1353     if (is_noncanonical_address(la))
1354     goto bad;
1355    
1356     @@ -662,6 +662,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
1357     goto bad;
1358     break;
1359     default:
1360     + *linear = la = (u32)la;
1361     usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
1362     addr.seg);
1363     if (!usable)
1364     @@ -689,7 +690,6 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
1365     if (size > *max_size)
1366     goto bad;
1367     }
1368     - la &= (u32)-1;
1369     break;
1370     }
1371     if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
1372     diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
1373     index 3058a22a658d..7be8a251363e 100644
1374     --- a/arch/x86/kvm/paging_tmpl.h
1375     +++ b/arch/x86/kvm/paging_tmpl.h
1376     @@ -249,7 +249,7 @@ static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu,
1377     return ret;
1378    
1379     kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
1380     - walker->ptes[level] = pte;
1381     + walker->ptes[level - 1] = pte;
1382     }
1383     return 0;
1384     }
1385     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1386     index 9a2ed8904513..6ef3856aab4b 100644
1387     --- a/arch/x86/kvm/x86.c
1388     +++ b/arch/x86/kvm/x86.c
1389     @@ -2736,6 +2736,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1390     }
1391    
1392     kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
1393     + vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
1394     }
1395    
1396     void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1397     diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
1398     index b2fd67da1701..ef05755a1900 100644
1399     --- a/arch/x86/mm/mpx.c
1400     +++ b/arch/x86/mm/mpx.c
1401     @@ -123,7 +123,7 @@ static int get_reg_offset(struct insn *insn, struct pt_regs *regs,
1402     break;
1403     }
1404    
1405     - if (regno > nr_registers) {
1406     + if (regno >= nr_registers) {
1407     WARN_ONCE(1, "decoded an instruction with an invalid register");
1408     return -EINVAL;
1409     }
1410     diff --git a/block/bio.c b/block/bio.c
1411     index 4f184d938942..d4d144363250 100644
1412     --- a/block/bio.c
1413     +++ b/block/bio.c
1414     @@ -1090,9 +1090,12 @@ int bio_uncopy_user(struct bio *bio)
1415     if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1416     /*
1417     * if we're in a workqueue, the request is orphaned, so
1418     - * don't copy into a random user address space, just free.
1419     + * don't copy into a random user address space, just free
1420     + * and return -EINTR so user space doesn't expect any data.
1421     */
1422     - if (current->mm && bio_data_dir(bio) == READ)
1423     + if (!current->mm)
1424     + ret = -EINTR;
1425     + else if (bio_data_dir(bio) == READ)
1426     ret = bio_copy_to_iter(bio, bmd->iter);
1427     if (bmd->is_our_pages)
1428     bio_free_pages(bio);
1429     diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
1430     index 3405f7a41e25..5fdac394207a 100644
1431     --- a/drivers/acpi/acpi_video.c
1432     +++ b/drivers/acpi/acpi_video.c
1433     @@ -465,6 +465,15 @@ static struct dmi_system_id video_dmi_table[] = {
1434     * as brightness control does not work.
1435     */
1436     {
1437     + /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
1438     + .callback = video_disable_backlight_sysfs_if,
1439     + .ident = "Toshiba Portege R700",
1440     + .matches = {
1441     + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1442     + DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R700"),
1443     + },
1444     + },
1445     + {
1446     /* https://bugs.freedesktop.org/show_bug.cgi?id=82634 */
1447     .callback = video_disable_backlight_sysfs_if,
1448     .ident = "Toshiba Portege R830",
1449     @@ -473,6 +482,15 @@ static struct dmi_system_id video_dmi_table[] = {
1450     DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE R830"),
1451     },
1452     },
1453     + {
1454     + /* https://bugzilla.kernel.org/show_bug.cgi?id=21012 */
1455     + .callback = video_disable_backlight_sysfs_if,
1456     + .ident = "Toshiba Satellite R830",
1457     + .matches = {
1458     + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
1459     + DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE R830"),
1460     + },
1461     + },
1462     /*
1463     * Some machine's _DOD IDs don't have bit 31(Device ID Scheme) set
1464     * but the IDs actually follow the Device ID Scheme.
1465     diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
1466     index aa45d4802707..11d8209e6e5d 100644
1467     --- a/drivers/acpi/nfit.c
1468     +++ b/drivers/acpi/nfit.c
1469     @@ -468,37 +468,16 @@ static void nfit_mem_find_spa_bdw(struct acpi_nfit_desc *acpi_desc,
1470     nfit_mem->bdw = NULL;
1471     }
1472    
1473     -static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
1474     +static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
1475     struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
1476     {
1477     u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
1478     struct nfit_memdev *nfit_memdev;
1479     struct nfit_flush *nfit_flush;
1480     - struct nfit_dcr *nfit_dcr;
1481     struct nfit_bdw *nfit_bdw;
1482     struct nfit_idt *nfit_idt;
1483     u16 idt_idx, range_index;
1484    
1485     - list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1486     - if (nfit_dcr->dcr->region_index != dcr)
1487     - continue;
1488     - nfit_mem->dcr = nfit_dcr->dcr;
1489     - break;
1490     - }
1491     -
1492     - if (!nfit_mem->dcr) {
1493     - dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n",
1494     - spa->range_index, __to_nfit_memdev(nfit_mem)
1495     - ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR");
1496     - return -ENODEV;
1497     - }
1498     -
1499     - /*
1500     - * We've found enough to create an nvdimm, optionally
1501     - * find an associated BDW
1502     - */
1503     - list_add(&nfit_mem->list, &acpi_desc->dimms);
1504     -
1505     list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
1506     if (nfit_bdw->bdw->region_index != dcr)
1507     continue;
1508     @@ -507,12 +486,12 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
1509     }
1510    
1511     if (!nfit_mem->bdw)
1512     - return 0;
1513     + return;
1514    
1515     nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
1516    
1517     if (!nfit_mem->spa_bdw)
1518     - return 0;
1519     + return;
1520    
1521     range_index = nfit_mem->spa_bdw->range_index;
1522     list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1523     @@ -537,8 +516,6 @@ static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
1524     }
1525     break;
1526     }
1527     -
1528     - return 0;
1529     }
1530    
1531     static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
1532     @@ -547,7 +524,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
1533     struct nfit_mem *nfit_mem, *found;
1534     struct nfit_memdev *nfit_memdev;
1535     int type = nfit_spa_type(spa);
1536     - u16 dcr;
1537    
1538     switch (type) {
1539     case NFIT_SPA_DCR:
1540     @@ -558,14 +534,18 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
1541     }
1542    
1543     list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
1544     - int rc;
1545     + struct nfit_dcr *nfit_dcr;
1546     + u32 device_handle;
1547     + u16 dcr;
1548    
1549     if (nfit_memdev->memdev->range_index != spa->range_index)
1550     continue;
1551     found = NULL;
1552     dcr = nfit_memdev->memdev->region_index;
1553     + device_handle = nfit_memdev->memdev->device_handle;
1554     list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
1555     - if (__to_nfit_memdev(nfit_mem)->region_index == dcr) {
1556     + if (__to_nfit_memdev(nfit_mem)->device_handle
1557     + == device_handle) {
1558     found = nfit_mem;
1559     break;
1560     }
1561     @@ -578,6 +558,31 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
1562     if (!nfit_mem)
1563     return -ENOMEM;
1564     INIT_LIST_HEAD(&nfit_mem->list);
1565     + list_add(&nfit_mem->list, &acpi_desc->dimms);
1566     + }
1567     +
1568     + list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
1569     + if (nfit_dcr->dcr->region_index != dcr)
1570     + continue;
1571     + /*
1572     + * Record the control region for the dimm. For
1573     + * the ACPI 6.1 case, where there are separate
1574     + * control regions for the pmem vs blk
1575     + * interfaces, be sure to record the extended
1576     + * blk details.
1577     + */
1578     + if (!nfit_mem->dcr)
1579     + nfit_mem->dcr = nfit_dcr->dcr;
1580     + else if (nfit_mem->dcr->windows == 0
1581     + && nfit_dcr->dcr->windows)
1582     + nfit_mem->dcr = nfit_dcr->dcr;
1583     + break;
1584     + }
1585     +
1586     + if (dcr && !nfit_mem->dcr) {
1587     + dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
1588     + spa->range_index, dcr);
1589     + return -ENODEV;
1590     }
1591    
1592     if (type == NFIT_SPA_DCR) {
1593     @@ -594,6 +599,7 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
1594     nfit_mem->idt_dcr = nfit_idt->idt;
1595     break;
1596     }
1597     + nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
1598     } else {
1599     /*
1600     * A single dimm may belong to multiple SPA-PM
1601     @@ -602,13 +608,6 @@ static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
1602     */
1603     nfit_mem->memdev_pmem = nfit_memdev->memdev;
1604     }
1605     -
1606     - if (found)
1607     - continue;
1608     -
1609     - rc = nfit_mem_add(acpi_desc, nfit_mem, spa);
1610     - if (rc)
1611     - return rc;
1612     }
1613    
1614     return 0;
1615     diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
1616     index daaf1c4e1e0f..80e55cb0827b 100644
1617     --- a/drivers/acpi/video_detect.c
1618     +++ b/drivers/acpi/video_detect.c
1619     @@ -135,14 +135,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
1620     DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
1621     },
1622     },
1623     - {
1624     - .callback = video_detect_force_vendor,
1625     - .ident = "Dell Inspiron 5737",
1626     - .matches = {
1627     - DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1628     - DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
1629     - },
1630     - },
1631    
1632     /*
1633     * These models have a working acpi_video backlight control, and using
1634     diff --git a/drivers/android/binder.c b/drivers/android/binder.c
1635     index a39e85f9efa9..7d00b7a015ea 100644
1636     --- a/drivers/android/binder.c
1637     +++ b/drivers/android/binder.c
1638     @@ -2074,7 +2074,7 @@ static int binder_thread_write(struct binder_proc *proc,
1639     if (get_user(cookie, (binder_uintptr_t __user *)ptr))
1640     return -EFAULT;
1641    
1642     - ptr += sizeof(void *);
1643     + ptr += sizeof(cookie);
1644     list_for_each_entry(w, &proc->delivered_death, entry) {
1645     struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
1646    
1647     diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
1648     index cdf6215a9a22..7dbba387d12a 100644
1649     --- a/drivers/ata/libata-sff.c
1650     +++ b/drivers/ata/libata-sff.c
1651     @@ -997,12 +997,9 @@ static inline int ata_hsm_ok_in_wq(struct ata_port *ap,
1652     static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1653     {
1654     struct ata_port *ap = qc->ap;
1655     - unsigned long flags;
1656    
1657     if (ap->ops->error_handler) {
1658     if (in_wq) {
1659     - spin_lock_irqsave(ap->lock, flags);
1660     -
1661     /* EH might have kicked in while host lock is
1662     * released.
1663     */
1664     @@ -1014,8 +1011,6 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1665     } else
1666     ata_port_freeze(ap);
1667     }
1668     -
1669     - spin_unlock_irqrestore(ap->lock, flags);
1670     } else {
1671     if (likely(!(qc->err_mask & AC_ERR_HSM)))
1672     ata_qc_complete(qc);
1673     @@ -1024,10 +1019,8 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
1674     }
1675     } else {
1676     if (in_wq) {
1677     - spin_lock_irqsave(ap->lock, flags);
1678     ata_sff_irq_on(ap);
1679     ata_qc_complete(qc);
1680     - spin_unlock_irqrestore(ap->lock, flags);
1681     } else
1682     ata_qc_complete(qc);
1683     }
1684     @@ -1048,9 +1041,10 @@ int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
1685     {
1686     struct ata_link *link = qc->dev->link;
1687     struct ata_eh_info *ehi = &link->eh_info;
1688     - unsigned long flags = 0;
1689     int poll_next;
1690    
1691     + lockdep_assert_held(ap->lock);
1692     +
1693     WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
1694    
1695     /* Make sure ata_sff_qc_issue() does not throw things
1696     @@ -1112,14 +1106,6 @@ fsm_start:
1697     }
1698     }
1699    
1700     - /* Send the CDB (atapi) or the first data block (ata pio out).
1701     - * During the state transition, interrupt handler shouldn't
1702     - * be invoked before the data transfer is complete and
1703     - * hsm_task_state is changed. Hence, the following locking.
1704     - */
1705     - if (in_wq)
1706     - spin_lock_irqsave(ap->lock, flags);
1707     -
1708     if (qc->tf.protocol == ATA_PROT_PIO) {
1709     /* PIO data out protocol.
1710     * send first data block.
1711     @@ -1135,9 +1121,6 @@ fsm_start:
1712     /* send CDB */
1713     atapi_send_cdb(ap, qc);
1714    
1715     - if (in_wq)
1716     - spin_unlock_irqrestore(ap->lock, flags);
1717     -
1718     /* if polling, ata_sff_pio_task() handles the rest.
1719     * otherwise, interrupt handler takes over from here.
1720     */
1721     @@ -1361,12 +1344,14 @@ static void ata_sff_pio_task(struct work_struct *work)
1722     u8 status;
1723     int poll_next;
1724    
1725     + spin_lock_irq(ap->lock);
1726     +
1727     BUG_ON(ap->sff_pio_task_link == NULL);
1728     /* qc can be NULL if timeout occurred */
1729     qc = ata_qc_from_tag(ap, link->active_tag);
1730     if (!qc) {
1731     ap->sff_pio_task_link = NULL;
1732     - return;
1733     + goto out_unlock;
1734     }
1735    
1736     fsm_start:
1737     @@ -1381,11 +1366,14 @@ fsm_start:
1738     */
1739     status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
1740     if (status & ATA_BUSY) {
1741     + spin_unlock_irq(ap->lock);
1742     ata_msleep(ap, 2);
1743     + spin_lock_irq(ap->lock);
1744     +
1745     status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
1746     if (status & ATA_BUSY) {
1747     ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
1748     - return;
1749     + goto out_unlock;
1750     }
1751     }
1752    
1753     @@ -1402,6 +1390,8 @@ fsm_start:
1754     */
1755     if (poll_next)
1756     goto fsm_start;
1757     +out_unlock:
1758     + spin_unlock_irq(ap->lock);
1759     }
1760    
1761     /**
1762     diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
1763     index 92f0ee388f9e..968897108c76 100644
1764     --- a/drivers/bluetooth/btusb.c
1765     +++ b/drivers/bluetooth/btusb.c
1766     @@ -153,6 +153,10 @@ static const struct usb_device_id btusb_table[] = {
1767     { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01),
1768     .driver_info = BTUSB_BCM_PATCHRAM },
1769    
1770     + /* Toshiba Corp - Broadcom based */
1771     + { USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01),
1772     + .driver_info = BTUSB_BCM_PATCHRAM },
1773     +
1774     /* Intel Bluetooth USB Bootloader (RAM module) */
1775     { USB_DEVICE(0x8087, 0x0a5a),
1776     .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC },
1777     diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
1778     index 2fe37f708dc7..813003d6ce09 100644
1779     --- a/drivers/clk/samsung/clk-cpu.c
1780     +++ b/drivers/clk/samsung/clk-cpu.c
1781     @@ -148,6 +148,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
1782     unsigned long alt_prate = clk_get_rate(cpuclk->alt_parent);
1783     unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
1784     unsigned long div0, div1 = 0, mux_reg;
1785     + unsigned long flags;
1786    
1787     /* find out the divider values to use for clock data */
1788     while ((cfg_data->prate * 1000) != ndata->new_rate) {
1789     @@ -156,7 +157,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
1790     cfg_data++;
1791     }
1792    
1793     - spin_lock(cpuclk->lock);
1794     + spin_lock_irqsave(cpuclk->lock, flags);
1795    
1796     /*
1797     * For the selected PLL clock frequency, get the pre-defined divider
1798     @@ -212,7 +213,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
1799     DIV_MASK_ALL);
1800     }
1801    
1802     - spin_unlock(cpuclk->lock);
1803     + spin_unlock_irqrestore(cpuclk->lock, flags);
1804     return 0;
1805     }
1806    
1807     @@ -223,6 +224,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
1808     const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
1809     unsigned long div = 0, div_mask = DIV_MASK;
1810     unsigned long mux_reg;
1811     + unsigned long flags;
1812    
1813     /* find out the divider values to use for clock data */
1814     if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) {
1815     @@ -233,7 +235,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
1816     }
1817     }
1818    
1819     - spin_lock(cpuclk->lock);
1820     + spin_lock_irqsave(cpuclk->lock, flags);
1821    
1822     /* select mout_apll as the alternate parent */
1823     mux_reg = readl(base + E4210_SRC_CPU);
1824     @@ -246,7 +248,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
1825     }
1826    
1827     exynos_set_safe_div(base, div, div_mask);
1828     - spin_unlock(cpuclk->lock);
1829     + spin_unlock_irqrestore(cpuclk->lock, flags);
1830     return 0;
1831     }
1832    
1833     diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
1834     index 6ee91401918e..4da2af9694a2 100644
1835     --- a/drivers/clocksource/tcb_clksrc.c
1836     +++ b/drivers/clocksource/tcb_clksrc.c
1837     @@ -98,7 +98,8 @@ static int tc_shutdown(struct clock_event_device *d)
1838    
1839     __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR));
1840     __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
1841     - clk_disable(tcd->clk);
1842     + if (!clockevent_state_detached(d))
1843     + clk_disable(tcd->clk);
1844    
1845     return 0;
1846     }
1847     diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c
1848     index a92e94b40b5b..dfc3bb410b00 100644
1849     --- a/drivers/clocksource/vt8500_timer.c
1850     +++ b/drivers/clocksource/vt8500_timer.c
1851     @@ -50,6 +50,8 @@
1852    
1853     #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
1854    
1855     +#define MIN_OSCR_DELTA 16
1856     +
1857     static void __iomem *regbase;
1858    
1859     static cycle_t vt8500_timer_read(struct clocksource *cs)
1860     @@ -80,7 +82,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles,
1861     cpu_relax();
1862     writel((unsigned long)alarm, regbase + TIMER_MATCH_VAL);
1863    
1864     - if ((signed)(alarm - clocksource.read(&clocksource)) <= 16)
1865     + if ((signed)(alarm - clocksource.read(&clocksource)) <= MIN_OSCR_DELTA)
1866     return -ETIME;
1867    
1868     writel(1, regbase + TIMER_IER_VAL);
1869     @@ -151,7 +153,7 @@ static void __init vt8500_timer_init(struct device_node *np)
1870     pr_err("%s: setup_irq failed for %s\n", __func__,
1871     clockevent.name);
1872     clockevents_config_and_register(&clockevent, VT8500_TIMER_HZ,
1873     - 4, 0xf0000000);
1874     + MIN_OSCR_DELTA * 2, 0xf0000000);
1875     }
1876    
1877     CLOCKSOURCE_OF_DECLARE(vt8500, "via,vt8500-timer", vt8500_timer_init);
1878     diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
1879     index b260576ddb12..d994b0f652d3 100644
1880     --- a/drivers/cpufreq/cpufreq_governor.c
1881     +++ b/drivers/cpufreq/cpufreq_governor.c
1882     @@ -356,16 +356,18 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy,
1883     if (!have_governor_per_policy())
1884     cdata->gdbs_data = dbs_data;
1885    
1886     + policy->governor_data = dbs_data;
1887     +
1888     ret = sysfs_create_group(get_governor_parent_kobj(policy),
1889     get_sysfs_attr(dbs_data));
1890     if (ret)
1891     goto reset_gdbs_data;
1892    
1893     - policy->governor_data = dbs_data;
1894     -
1895     return 0;
1896    
1897     reset_gdbs_data:
1898     + policy->governor_data = NULL;
1899     +
1900     if (!have_governor_per_policy())
1901     cdata->gdbs_data = NULL;
1902     cdata->exit(dbs_data, !policy->governor->initialized);
1903     @@ -386,16 +388,19 @@ static int cpufreq_governor_exit(struct cpufreq_policy *policy,
1904     if (!cdbs->shared || cdbs->shared->policy)
1905     return -EBUSY;
1906    
1907     - policy->governor_data = NULL;
1908     if (!--dbs_data->usage_count) {
1909     sysfs_remove_group(get_governor_parent_kobj(policy),
1910     get_sysfs_attr(dbs_data));
1911    
1912     + policy->governor_data = NULL;
1913     +
1914     if (!have_governor_per_policy())
1915     cdata->gdbs_data = NULL;
1916    
1917     cdata->exit(dbs_data, policy->governor->initialized == 1);
1918     kfree(dbs_data);
1919     + } else {
1920     + policy->governor_data = NULL;
1921     }
1922    
1923     free_common_dbs_info(policy, cdata);
1924     diff --git a/drivers/cpufreq/pxa2xx-cpufreq.c b/drivers/cpufreq/pxa2xx-cpufreq.c
1925     index 1d99c97defa9..096377232747 100644
1926     --- a/drivers/cpufreq/pxa2xx-cpufreq.c
1927     +++ b/drivers/cpufreq/pxa2xx-cpufreq.c
1928     @@ -202,7 +202,7 @@ static void __init pxa_cpufreq_init_voltages(void)
1929     }
1930     }
1931     #else
1932     -static int pxa_cpufreq_change_voltage(struct pxa_freqs *pxa_freq)
1933     +static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq)
1934     {
1935     return 0;
1936     }
1937     diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
1938     index 370c661c7d7b..fa00f3a186da 100644
1939     --- a/drivers/dma/at_xdmac.c
1940     +++ b/drivers/dma/at_xdmac.c
1941     @@ -1688,6 +1688,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1942     list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
1943     at_xdmac_remove_xfer(atchan, desc);
1944    
1945     + clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1946     clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
1947     spin_unlock_irqrestore(&atchan->lock, flags);
1948    
1949     @@ -1820,6 +1821,8 @@ static int atmel_xdmac_resume(struct device *dev)
1950     atchan = to_at_xdmac_chan(chan);
1951     at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
1952     if (at_xdmac_chan_is_cyclic(atchan)) {
1953     + if (at_xdmac_chan_is_paused(atchan))
1954     + at_xdmac_device_resume(chan);
1955     at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
1956     at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
1957     at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
1958     diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
1959     index 7067b6ddc1db..4f099ea29f83 100644
1960     --- a/drivers/dma/dw/core.c
1961     +++ b/drivers/dma/dw/core.c
1962     @@ -536,16 +536,17 @@ EXPORT_SYMBOL(dw_dma_get_dst_addr);
1963    
1964     /* Called with dwc->lock held and all DMAC interrupts disabled */
1965     static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
1966     - u32 status_err, u32 status_xfer)
1967     + u32 status_block, u32 status_err, u32 status_xfer)
1968     {
1969     unsigned long flags;
1970    
1971     - if (dwc->mask) {
1972     + if (status_block & dwc->mask) {
1973     void (*callback)(void *param);
1974     void *callback_param;
1975    
1976     dev_vdbg(chan2dev(&dwc->chan), "new cyclic period llp 0x%08x\n",
1977     channel_readl(dwc, LLP));
1978     + dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1979    
1980     callback = dwc->cdesc->period_callback;
1981     callback_param = dwc->cdesc->period_callback_param;
1982     @@ -577,6 +578,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
1983     channel_writel(dwc, CTL_LO, 0);
1984     channel_writel(dwc, CTL_HI, 0);
1985    
1986     + dma_writel(dw, CLEAR.BLOCK, dwc->mask);
1987     dma_writel(dw, CLEAR.ERROR, dwc->mask);
1988     dma_writel(dw, CLEAR.XFER, dwc->mask);
1989    
1990     @@ -585,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
1991    
1992     spin_unlock_irqrestore(&dwc->lock, flags);
1993     }
1994     +
1995     + /* Re-enable interrupts */
1996     + channel_set_bit(dw, MASK.BLOCK, dwc->mask);
1997     }
1998    
1999     /* ------------------------------------------------------------------------- */
2000     @@ -593,10 +598,12 @@ static void dw_dma_tasklet(unsigned long data)
2001     {
2002     struct dw_dma *dw = (struct dw_dma *)data;
2003     struct dw_dma_chan *dwc;
2004     + u32 status_block;
2005     u32 status_xfer;
2006     u32 status_err;
2007     int i;
2008    
2009     + status_block = dma_readl(dw, RAW.BLOCK);
2010     status_xfer = dma_readl(dw, RAW.XFER);
2011     status_err = dma_readl(dw, RAW.ERROR);
2012    
2013     @@ -605,16 +612,15 @@ static void dw_dma_tasklet(unsigned long data)
2014     for (i = 0; i < dw->dma.chancnt; i++) {
2015     dwc = &dw->chan[i];
2016     if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
2017     - dwc_handle_cyclic(dw, dwc, status_err, status_xfer);
2018     + dwc_handle_cyclic(dw, dwc, status_block, status_err,
2019     + status_xfer);
2020     else if (status_err & (1 << i))
2021     dwc_handle_error(dw, dwc);
2022     else if (status_xfer & (1 << i))
2023     dwc_scan_descriptors(dw, dwc);
2024     }
2025    
2026     - /*
2027     - * Re-enable interrupts.
2028     - */
2029     + /* Re-enable interrupts */
2030     channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
2031     channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
2032     }
2033     @@ -635,6 +641,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
2034     * softirq handler.
2035     */
2036     channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
2037     + channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
2038     channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
2039    
2040     status = dma_readl(dw, STATUS_INT);
2041     @@ -645,6 +652,7 @@ static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
2042    
2043     /* Try to recover */
2044     channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
2045     + channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
2046     channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
2047     channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
2048     channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
2049     @@ -1111,6 +1119,7 @@ static void dw_dma_off(struct dw_dma *dw)
2050     dma_writel(dw, CFG, 0);
2051    
2052     channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
2053     + channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
2054     channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
2055     channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
2056     channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
2057     @@ -1216,6 +1225,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
2058    
2059     /* Disable interrupts */
2060     channel_clear_bit(dw, MASK.XFER, dwc->mask);
2061     + channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
2062     channel_clear_bit(dw, MASK.ERROR, dwc->mask);
2063    
2064     spin_unlock_irqrestore(&dwc->lock, flags);
2065     @@ -1245,7 +1255,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
2066     int dw_dma_cyclic_start(struct dma_chan *chan)
2067     {
2068     struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
2069     - struct dw_dma *dw = to_dw_dma(dwc->chan.device);
2070     + struct dw_dma *dw = to_dw_dma(chan->device);
2071     unsigned long flags;
2072    
2073     if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
2074     @@ -1255,25 +1265,10 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
2075    
2076     spin_lock_irqsave(&dwc->lock, flags);
2077    
2078     - /* Assert channel is idle */
2079     - if (dma_readl(dw, CH_EN) & dwc->mask) {
2080     - dev_err(chan2dev(&dwc->chan),
2081     - "%s: BUG: Attempted to start non-idle channel\n",
2082     - __func__);
2083     - dwc_dump_chan_regs(dwc);
2084     - spin_unlock_irqrestore(&dwc->lock, flags);
2085     - return -EBUSY;
2086     - }
2087     -
2088     - dma_writel(dw, CLEAR.ERROR, dwc->mask);
2089     - dma_writel(dw, CLEAR.XFER, dwc->mask);
2090     + /* Enable interrupts to perform cyclic transfer */
2091     + channel_set_bit(dw, MASK.BLOCK, dwc->mask);
2092    
2093     - /* Setup DMAC channel registers */
2094     - channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys);
2095     - channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
2096     - channel_writel(dwc, CTL_HI, 0);
2097     -
2098     - channel_set_bit(dw, CH_EN, dwc->mask);
2099     + dwc_dostart(dwc, dwc->cdesc->desc[0]);
2100    
2101     spin_unlock_irqrestore(&dwc->lock, flags);
2102    
2103     @@ -1479,6 +1474,7 @@ void dw_dma_cyclic_free(struct dma_chan *chan)
2104    
2105     dwc_chan_disable(dw, dwc);
2106    
2107     + dma_writel(dw, CLEAR.BLOCK, dwc->mask);
2108     dma_writel(dw, CLEAR.ERROR, dwc->mask);
2109     dma_writel(dw, CLEAR.XFER, dwc->mask);
2110    
2111     @@ -1567,9 +1563,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
2112     /* Force dma off, just in case */
2113     dw_dma_off(dw);
2114    
2115     - /* Disable BLOCK interrupts as well */
2116     - channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
2117     -
2118     /* Create a pool of consistent memory blocks for hardware descriptors */
2119     dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", chip->dev,
2120     sizeof(struct dw_desc), 4, 0);
2121     diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
2122     index 592af5f0cf39..53587377e672 100644
2123     --- a/drivers/edac/edac_device.c
2124     +++ b/drivers/edac/edac_device.c
2125     @@ -435,16 +435,13 @@ void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
2126     */
2127     void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
2128     {
2129     - int status;
2130     -
2131     if (!edac_dev->edac_check)
2132     return;
2133    
2134     - status = cancel_delayed_work(&edac_dev->work);
2135     - if (status == 0) {
2136     - /* workq instance might be running, wait for it */
2137     - flush_workqueue(edac_workqueue);
2138     - }
2139     + edac_dev->op_state = OP_OFFLINE;
2140     +
2141     + cancel_delayed_work_sync(&edac_dev->work);
2142     + flush_workqueue(edac_workqueue);
2143     }
2144    
2145     /*
2146     diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
2147     index 77ecd6a4179a..1b2c2187b347 100644
2148     --- a/drivers/edac/edac_mc.c
2149     +++ b/drivers/edac/edac_mc.c
2150     @@ -586,18 +586,10 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
2151     */
2152     static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
2153     {
2154     - int status;
2155     -
2156     - if (mci->op_state != OP_RUNNING_POLL)
2157     - return;
2158     -
2159     - status = cancel_delayed_work(&mci->work);
2160     - if (status == 0) {
2161     - edac_dbg(0, "not canceled, flush the queue\n");
2162     + mci->op_state = OP_OFFLINE;
2163    
2164     - /* workq instance might be running, wait for it */
2165     - flush_workqueue(edac_workqueue);
2166     - }
2167     + cancel_delayed_work_sync(&mci->work);
2168     + flush_workqueue(edac_workqueue);
2169     }
2170    
2171     /*
2172     diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
2173     index a75acea0f674..58aed67b7eba 100644
2174     --- a/drivers/edac/edac_mc_sysfs.c
2175     +++ b/drivers/edac/edac_mc_sysfs.c
2176     @@ -880,21 +880,26 @@ static struct device_type mci_attr_type = {
2177     int edac_create_sysfs_mci_device(struct mem_ctl_info *mci,
2178     const struct attribute_group **groups)
2179     {
2180     + char *name;
2181     int i, err;
2182    
2183     /*
2184     * The memory controller needs its own bus, in order to avoid
2185     * namespace conflicts at /sys/bus/edac.
2186     */
2187     - mci->bus->name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
2188     - if (!mci->bus->name)
2189     + name = kasprintf(GFP_KERNEL, "mc%d", mci->mc_idx);
2190     + if (!name)
2191     return -ENOMEM;
2192    
2193     + mci->bus->name = name;
2194     +
2195     edac_dbg(0, "creating bus %s\n", mci->bus->name);
2196    
2197     err = bus_register(mci->bus);
2198     - if (err < 0)
2199     - goto fail_free_name;
2200     + if (err < 0) {
2201     + kfree(name);
2202     + return err;
2203     + }
2204    
2205     /* get the /sys/devices/system/edac subsys reference */
2206     mci->dev.type = &mci_attr_type;
2207     @@ -961,8 +966,8 @@ fail_unregister_dimm:
2208     device_unregister(&mci->dev);
2209     fail_unregister_bus:
2210     bus_unregister(mci->bus);
2211     -fail_free_name:
2212     - kfree(mci->bus->name);
2213     + kfree(name);
2214     +
2215     return err;
2216     }
2217    
2218     @@ -993,10 +998,12 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
2219    
2220     void edac_unregister_sysfs(struct mem_ctl_info *mci)
2221     {
2222     + const char *name = mci->bus->name;
2223     +
2224     edac_dbg(1, "Unregistering device %s\n", dev_name(&mci->dev));
2225     device_unregister(&mci->dev);
2226     bus_unregister(mci->bus);
2227     - kfree(mci->bus->name);
2228     + kfree(name);
2229     }
2230    
2231     static void mc_attr_release(struct device *dev)
2232     diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
2233     index 2cf44b4db80c..b4b38603b804 100644
2234     --- a/drivers/edac/edac_pci.c
2235     +++ b/drivers/edac/edac_pci.c
2236     @@ -274,13 +274,12 @@ static void edac_pci_workq_setup(struct edac_pci_ctl_info *pci,
2237     */
2238     static void edac_pci_workq_teardown(struct edac_pci_ctl_info *pci)
2239     {
2240     - int status;
2241     -
2242     edac_dbg(0, "\n");
2243    
2244     - status = cancel_delayed_work(&pci->work);
2245     - if (status == 0)
2246     - flush_workqueue(edac_workqueue);
2247     + pci->op_state = OP_OFFLINE;
2248     +
2249     + cancel_delayed_work_sync(&pci->work);
2250     + flush_workqueue(edac_workqueue);
2251     }
2252    
2253     /*
2254     diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
2255     index 756eca8c4cf8..10e6774ab2a2 100644
2256     --- a/drivers/firmware/efi/efivars.c
2257     +++ b/drivers/firmware/efi/efivars.c
2258     @@ -221,7 +221,7 @@ sanity_check(struct efi_variable *var, efi_char16_t *name, efi_guid_t vendor,
2259     }
2260    
2261     if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
2262     - efivar_validate(name, data, size) == false) {
2263     + efivar_validate(vendor, name, data, size) == false) {
2264     printk(KERN_ERR "efivars: Malformed variable content\n");
2265     return -EINVAL;
2266     }
2267     @@ -447,7 +447,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
2268     }
2269    
2270     if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
2271     - efivar_validate(name, data, size) == false) {
2272     + efivar_validate(new_var->VendorGuid, name, data,
2273     + size) == false) {
2274     printk(KERN_ERR "efivars: Malformed variable content\n");
2275     return -EINVAL;
2276     }
2277     @@ -540,38 +541,30 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
2278     static int
2279     efivar_create_sysfs_entry(struct efivar_entry *new_var)
2280     {
2281     - int i, short_name_size;
2282     + int short_name_size;
2283     char *short_name;
2284     - unsigned long variable_name_size;
2285     - efi_char16_t *variable_name;
2286     + unsigned long utf8_name_size;
2287     + efi_char16_t *variable_name = new_var->var.VariableName;
2288     int ret;
2289    
2290     - variable_name = new_var->var.VariableName;
2291     - variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t);
2292     -
2293     /*
2294     - * Length of the variable bytes in ASCII, plus the '-' separator,
2295     + * Length of the variable bytes in UTF8, plus the '-' separator,
2296     * plus the GUID, plus trailing NUL
2297     */
2298     - short_name_size = variable_name_size / sizeof(efi_char16_t)
2299     - + 1 + EFI_VARIABLE_GUID_LEN + 1;
2300     -
2301     - short_name = kzalloc(short_name_size, GFP_KERNEL);
2302     + utf8_name_size = ucs2_utf8size(variable_name);
2303     + short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1;
2304    
2305     + short_name = kmalloc(short_name_size, GFP_KERNEL);
2306     if (!short_name)
2307     return -ENOMEM;
2308    
2309     - /* Convert Unicode to normal chars (assume top bits are 0),
2310     - ala UTF-8 */
2311     - for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) {
2312     - short_name[i] = variable_name[i] & 0xFF;
2313     - }
2314     + ucs2_as_utf8(short_name, variable_name, short_name_size);
2315     +
2316     /* This is ugly, but necessary to separate one vendor's
2317     private variables from another's. */
2318     -
2319     - *(short_name + strlen(short_name)) = '-';
2320     + short_name[utf8_name_size] = '-';
2321     efi_guid_to_str(&new_var->var.VendorGuid,
2322     - short_name + strlen(short_name));
2323     + short_name + utf8_name_size + 1);
2324    
2325     new_var->kobj.kset = efivars_kset;
2326    
2327     diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
2328     index 70a0fb10517f..7f2ea21c730d 100644
2329     --- a/drivers/firmware/efi/vars.c
2330     +++ b/drivers/firmware/efi/vars.c
2331     @@ -165,67 +165,133 @@ validate_ascii_string(efi_char16_t *var_name, int match, u8 *buffer,
2332     }
2333    
2334     struct variable_validate {
2335     + efi_guid_t vendor;
2336     char *name;
2337     bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
2338     unsigned long len);
2339     };
2340    
2341     +/*
2342     + * This is the list of variables we need to validate, as well as the
2343     + * whitelist for what we think is safe not to default to immutable.
2344     + *
2345     + * If it has a validate() method that's not NULL, it'll go into the
2346     + * validation routine. If not, it is assumed valid, but still used for
2347     + * whitelisting.
2348     + *
2349     + * Note that it's sorted by {vendor,name}, but globbed names must come after
2350     + * any other name with the same prefix.
2351     + */
2352     static const struct variable_validate variable_validate[] = {
2353     - { "BootNext", validate_uint16 },
2354     - { "BootOrder", validate_boot_order },
2355     - { "DriverOrder", validate_boot_order },
2356     - { "Boot*", validate_load_option },
2357     - { "Driver*", validate_load_option },
2358     - { "ConIn", validate_device_path },
2359     - { "ConInDev", validate_device_path },
2360     - { "ConOut", validate_device_path },
2361     - { "ConOutDev", validate_device_path },
2362     - { "ErrOut", validate_device_path },
2363     - { "ErrOutDev", validate_device_path },
2364     - { "Timeout", validate_uint16 },
2365     - { "Lang", validate_ascii_string },
2366     - { "PlatformLang", validate_ascii_string },
2367     - { "", NULL },
2368     + { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
2369     + { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
2370     + { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
2371     + { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
2372     + { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
2373     + { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
2374     + { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
2375     + { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
2376     + { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
2377     + { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
2378     + { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
2379     + { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
2380     + { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
2381     + { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
2382     + { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
2383     + { LINUX_EFI_CRASH_GUID, "*", NULL },
2384     + { NULL_GUID, "", NULL },
2385     };
2386    
2387     +static bool
2388     +variable_matches(const char *var_name, size_t len, const char *match_name,
2389     + int *match)
2390     +{
2391     + for (*match = 0; ; (*match)++) {
2392     + char c = match_name[*match];
2393     + char u = var_name[*match];
2394     +
2395     + /* Wildcard in the matching name means we've matched */
2396     + if (c == '*')
2397     + return true;
2398     +
2399     + /* Case sensitive match */
2400     + if (!c && *match == len)
2401     + return true;
2402     +
2403     + if (c != u)
2404     + return false;
2405     +
2406     + if (!c)
2407     + return true;
2408     + }
2409     + return true;
2410     +}
2411     +
2412     bool
2413     -efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len)
2414     +efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
2415     + unsigned long data_size)
2416     {
2417     int i;
2418     - u16 *unicode_name = var_name;
2419     + unsigned long utf8_size;
2420     + u8 *utf8_name;
2421    
2422     - for (i = 0; variable_validate[i].validate != NULL; i++) {
2423     - const char *name = variable_validate[i].name;
2424     - int match;
2425     + utf8_size = ucs2_utf8size(var_name);
2426     + utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
2427     + if (!utf8_name)
2428     + return false;
2429    
2430     - for (match = 0; ; match++) {
2431     - char c = name[match];
2432     - u16 u = unicode_name[match];
2433     + ucs2_as_utf8(utf8_name, var_name, utf8_size);
2434     + utf8_name[utf8_size] = '\0';
2435    
2436     - /* All special variables are plain ascii */
2437     - if (u > 127)
2438     - return true;
2439     + for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
2440     + const char *name = variable_validate[i].name;
2441     + int match = 0;
2442    
2443     - /* Wildcard in the matching name means we've matched */
2444     - if (c == '*')
2445     - return variable_validate[i].validate(var_name,
2446     - match, data, len);
2447     + if (efi_guidcmp(vendor, variable_validate[i].vendor))
2448     + continue;
2449    
2450     - /* Case sensitive match */
2451     - if (c != u)
2452     + if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
2453     + if (variable_validate[i].validate == NULL)
2454     break;
2455     -
2456     - /* Reached the end of the string while matching */
2457     - if (!c)
2458     - return variable_validate[i].validate(var_name,
2459     - match, data, len);
2460     + kfree(utf8_name);
2461     + return variable_validate[i].validate(var_name, match,
2462     + data, data_size);
2463     }
2464     }
2465     -
2466     + kfree(utf8_name);
2467     return true;
2468     }
2469     EXPORT_SYMBOL_GPL(efivar_validate);
2470    
2471     +bool
2472     +efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
2473     + size_t len)
2474     +{
2475     + int i;
2476     + bool found = false;
2477     + int match = 0;
2478     +
2479     + /*
2480     + * Check if our variable is in the validated variables list
2481     + */
2482     + for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
2483     + if (efi_guidcmp(variable_validate[i].vendor, vendor))
2484     + continue;
2485     +
2486     + if (variable_matches(var_name, len,
2487     + variable_validate[i].name, &match)) {
2488     + found = true;
2489     + break;
2490     + }
2491     + }
2492     +
2493     + /*
2494     + * If it's in our list, it is removable.
2495     + */
2496     + return found;
2497     +}
2498     +EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
2499     +
2500     static efi_status_t
2501     check_var_size(u32 attributes, unsigned long size)
2502     {
2503     @@ -852,7 +918,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
2504    
2505     *set = false;
2506    
2507     - if (efivar_validate(name, data, *size) == false)
2508     + if (efivar_validate(*vendor, name, data, *size) == false)
2509     return -EINVAL;
2510    
2511     /*
2512     diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
2513     index 04c270757030..ca066018ea34 100644
2514     --- a/drivers/gpu/drm/amd/amdgpu/Makefile
2515     +++ b/drivers/gpu/drm/amd/amdgpu/Makefile
2516     @@ -22,7 +22,7 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
2517     amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o
2518    
2519     # add asic specific block
2520     -amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o gmc_v7_0.o cik_ih.o kv_smc.o kv_dpm.o \
2521     +amdgpu-$(CONFIG_DRM_AMDGPU_CIK)+= cik.o cik_ih.o kv_smc.o kv_dpm.o \
2522     ci_smc.o ci_dpm.o dce_v8_0.o gfx_v7_0.o cik_sdma.o uvd_v4_2.o vce_v2_0.o \
2523     amdgpu_amdkfd_gfx_v7.o
2524    
2525     @@ -31,6 +31,7 @@ amdgpu-y += \
2526    
2527     # add GMC block
2528     amdgpu-y += \
2529     + gmc_v7_0.o \
2530     gmc_v8_0.o
2531    
2532     # add IH block
2533     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
2534     index 048cfe073dae..bb1099c549df 100644
2535     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
2536     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
2537     @@ -604,8 +604,6 @@ struct amdgpu_sa_manager {
2538     uint32_t align;
2539     };
2540    
2541     -struct amdgpu_sa_bo;
2542     -
2543     /* sub-allocation buffer */
2544     struct amdgpu_sa_bo {
2545     struct list_head olist;
2546     @@ -2314,6 +2312,8 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
2547     int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
2548     uint32_t flags);
2549     bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
2550     +bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
2551     + unsigned long end);
2552     bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
2553     uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
2554     struct ttm_mem_reg *mem);
2555     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
2556     index d5b421330145..c961fe093e12 100644
2557     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
2558     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
2559     @@ -1744,15 +1744,20 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
2560     }
2561    
2562     /* post card */
2563     - amdgpu_atom_asic_init(adev->mode_info.atom_context);
2564     + if (!amdgpu_card_posted(adev))
2565     + amdgpu_atom_asic_init(adev->mode_info.atom_context);
2566    
2567     r = amdgpu_resume(adev);
2568     + if (r)
2569     + DRM_ERROR("amdgpu_resume failed (%d).\n", r);
2570    
2571     amdgpu_fence_driver_resume(adev);
2572    
2573     - r = amdgpu_ib_ring_tests(adev);
2574     - if (r)
2575     - DRM_ERROR("ib ring test failed (%d).\n", r);
2576     + if (resume) {
2577     + r = amdgpu_ib_ring_tests(adev);
2578     + if (r)
2579     + DRM_ERROR("ib ring test failed (%d).\n", r);
2580     + }
2581    
2582     r = amdgpu_late_init(adev);
2583     if (r)
2584     @@ -1788,6 +1793,7 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
2585     }
2586    
2587     drm_kms_helper_poll_enable(dev);
2588     + drm_helper_hpd_irq_event(dev);
2589    
2590     if (fbcon) {
2591     amdgpu_fbdev_set_suspend(adev, 0);
2592     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
2593     index 5580d3420c3a..0c713a908304 100644
2594     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
2595     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
2596     @@ -72,8 +72,8 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
2597    
2598     struct drm_crtc *crtc = &amdgpuCrtc->base;
2599     unsigned long flags;
2600     - unsigned i;
2601     - int vpos, hpos, stat, min_udelay;
2602     + unsigned i, repcnt = 4;
2603     + int vpos, hpos, stat, min_udelay = 0;
2604     struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
2605    
2606     amdgpu_flip_wait_fence(adev, &work->excl);
2607     @@ -96,7 +96,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
2608     * In practice this won't execute very often unless on very fast
2609     * machines because the time window for this to happen is very small.
2610     */
2611     - for (;;) {
2612     + while (amdgpuCrtc->enabled && repcnt--) {
2613     /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
2614     * start in hpos, and to the "fudged earlier" vblank start in
2615     * vpos.
2616     @@ -114,10 +114,22 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
2617     /* Sleep at least until estimated real start of hw vblank */
2618     spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
2619     min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
2620     + if (min_udelay > vblank->framedur_ns / 2000) {
2621     + /* Don't wait ridiculously long - something is wrong */
2622     + repcnt = 0;
2623     + break;
2624     + }
2625     usleep_range(min_udelay, 2 * min_udelay);
2626     spin_lock_irqsave(&crtc->dev->event_lock, flags);
2627     };
2628    
2629     + if (!repcnt)
2630     + DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
2631     + "framedur %d, linedur %d, stat %d, vpos %d, "
2632     + "hpos %d\n", work->crtc_id, min_udelay,
2633     + vblank->framedur_ns / 1000,
2634     + vblank->linedur_ns / 1000, stat, vpos, hpos);
2635     +
2636     /* do the flip (mmio) */
2637     adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
2638     /* set the flip status */
2639     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
2640     index 0508c5cd103a..8d6668cedf6d 100644
2641     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
2642     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
2643     @@ -250,11 +250,11 @@ static struct pci_device_id pciidlist[] = {
2644     {0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
2645     #endif
2646     /* topaz */
2647     - {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
2648     - {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
2649     - {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
2650     - {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
2651     - {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ|AMD_EXP_HW_SUPPORT},
2652     + {0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
2653     + {0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
2654     + {0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
2655     + {0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
2656     + {0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
2657     /* tonga */
2658     {0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
2659     {0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
2660     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
2661     index b1969f2b2038..d4e2780c0796 100644
2662     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
2663     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
2664     @@ -142,7 +142,8 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
2665    
2666     list_for_each_entry(bo, &node->bos, mn_list) {
2667    
2668     - if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
2669     + if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
2670     + end))
2671     continue;
2672    
2673     r = amdgpu_bo_reserve(bo, true);
2674     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
2675     index c3ce103b6a33..a2a16acee34d 100644
2676     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
2677     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
2678     @@ -399,7 +399,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
2679     }
2680     if (fpfn > bo->placements[i].fpfn)
2681     bo->placements[i].fpfn = fpfn;
2682     - if (lpfn && lpfn < bo->placements[i].lpfn)
2683     + if (!bo->placements[i].lpfn ||
2684     + (lpfn && lpfn < bo->placements[i].lpfn))
2685     bo->placements[i].lpfn = lpfn;
2686     bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
2687     }
2688     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
2689     index 22a8c7d3a3ab..03fe25142b78 100644
2690     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
2691     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
2692     @@ -595,8 +595,6 @@ force:
2693    
2694     /* update display watermarks based on new power state */
2695     amdgpu_display_bandwidth_update(adev);
2696     - /* update displays */
2697     - amdgpu_dpm_display_configuration_changed(adev);
2698    
2699     adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
2700     adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
2701     @@ -616,6 +614,9 @@ force:
2702    
2703     amdgpu_dpm_post_set_power_state(adev);
2704    
2705     + /* update displays */
2706     + amdgpu_dpm_display_configuration_changed(adev);
2707     +
2708     if (adev->pm.funcs->force_performance_level) {
2709     if (adev->pm.dpm.thermal_active) {
2710     enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
2711     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
2712     index 8b88edb0434b..ca72a2e487b9 100644
2713     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
2714     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
2715     @@ -354,12 +354,15 @@ int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
2716    
2717     for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
2718     if (fences[i])
2719     - fences[count++] = fences[i];
2720     + fences[count++] = fence_get(fences[i]);
2721    
2722     if (count) {
2723     spin_unlock(&sa_manager->wq.lock);
2724     t = fence_wait_any_timeout(fences, count, false,
2725     MAX_SCHEDULE_TIMEOUT);
2726     + for (i = 0; i < count; ++i)
2727     + fence_put(fences[i]);
2728     +
2729     r = (t > 0) ? 0 : t;
2730     spin_lock(&sa_manager->wq.lock);
2731     } else {
2732     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
2733     index dd005c336c97..181ce39ef5e5 100644
2734     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
2735     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
2736     @@ -293,7 +293,8 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
2737     fence = to_amdgpu_fence(sync->sync_to[i]);
2738    
2739     /* check if we really need to sync */
2740     - if (!amdgpu_fence_need_sync(fence, ring))
2741     + if (!amdgpu_enable_scheduler &&
2742     + !amdgpu_fence_need_sync(fence, ring))
2743     continue;
2744    
2745     /* prevent GPU deadlocks */
2746     @@ -303,7 +304,7 @@ int amdgpu_sync_rings(struct amdgpu_sync *sync,
2747     }
2748    
2749     if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
2750     - r = fence_wait(&fence->base, true);
2751     + r = fence_wait(sync->sync_to[i], true);
2752     if (r)
2753     return r;
2754     continue;
2755     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2756     index 8a1752ff3d8e..1cbb16e15307 100644
2757     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2758     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
2759     @@ -712,7 +712,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
2760     0, PAGE_SIZE,
2761     PCI_DMA_BIDIRECTIONAL);
2762     if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
2763     - while (--i) {
2764     + while (i--) {
2765     pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
2766     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
2767     gtt->ttm.dma_address[i] = 0;
2768     @@ -783,6 +783,25 @@ bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm)
2769     return !!gtt->userptr;
2770     }
2771    
2772     +bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
2773     + unsigned long end)
2774     +{
2775     + struct amdgpu_ttm_tt *gtt = (void *)ttm;
2776     + unsigned long size;
2777     +
2778     + if (gtt == NULL)
2779     + return false;
2780     +
2781     + if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
2782     + return false;
2783     +
2784     + size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
2785     + if (gtt->userptr > end || gtt->userptr + size <= start)
2786     + return false;
2787     +
2788     + return true;
2789     +}
2790     +
2791     bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
2792     {
2793     struct amdgpu_ttm_tt *gtt = (void *)ttm;
2794     @@ -808,7 +827,7 @@ uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
2795     flags |= AMDGPU_PTE_SNOOPED;
2796     }
2797    
2798     - if (adev->asic_type >= CHIP_TOPAZ)
2799     + if (adev->asic_type >= CHIP_TONGA)
2800     flags |= AMDGPU_PTE_EXECUTABLE;
2801    
2802     flags |= AMDGPU_PTE_READABLE;
2803     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2804     index b53d273eb7a1..39adbb6470d1 100644
2805     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2806     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
2807     @@ -1010,13 +1010,13 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2808     return -EINVAL;
2809    
2810     /* make sure object fit at this offset */
2811     - eaddr = saddr + size;
2812     + eaddr = saddr + size - 1;
2813     if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
2814     return -EINVAL;
2815    
2816     last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
2817     - if (last_pfn > adev->vm_manager.max_pfn) {
2818     - dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
2819     + if (last_pfn >= adev->vm_manager.max_pfn) {
2820     + dev_err(adev->dev, "va above limit (0x%08X >= 0x%08X)\n",
2821     last_pfn, adev->vm_manager.max_pfn);
2822     return -EINVAL;
2823     }
2824     @@ -1025,7 +1025,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2825     eaddr /= AMDGPU_GPU_PAGE_SIZE;
2826    
2827     spin_lock(&vm->it_lock);
2828     - it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
2829     + it = interval_tree_iter_first(&vm->va, saddr, eaddr);
2830     spin_unlock(&vm->it_lock);
2831     if (it) {
2832     struct amdgpu_bo_va_mapping *tmp;
2833     @@ -1046,7 +1046,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
2834    
2835     INIT_LIST_HEAD(&mapping->list);
2836     mapping->it.start = saddr;
2837     - mapping->it.last = eaddr - 1;
2838     + mapping->it.last = eaddr;
2839     mapping->offset = offset;
2840     mapping->flags = flags;
2841    
2842     diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
2843     index e1dcab98e249..4cb45f4602aa 100644
2844     --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
2845     +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
2846     @@ -90,7 +90,6 @@ MODULE_FIRMWARE("amdgpu/topaz_ce.bin");
2847     MODULE_FIRMWARE("amdgpu/topaz_pfp.bin");
2848     MODULE_FIRMWARE("amdgpu/topaz_me.bin");
2849     MODULE_FIRMWARE("amdgpu/topaz_mec.bin");
2850     -MODULE_FIRMWARE("amdgpu/topaz_mec2.bin");
2851     MODULE_FIRMWARE("amdgpu/topaz_rlc.bin");
2852    
2853     MODULE_FIRMWARE("amdgpu/fiji_ce.bin");
2854     @@ -807,7 +806,8 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
2855     adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
2856     adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
2857    
2858     - if (adev->asic_type != CHIP_STONEY) {
2859     + if ((adev->asic_type != CHIP_STONEY) &&
2860     + (adev->asic_type != CHIP_TOPAZ)) {
2861     snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
2862     err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
2863     if (!err) {
2864     diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
2865     index ed8abb58a785..272110cc18c2 100644
2866     --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
2867     +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
2868     @@ -42,9 +42,39 @@ static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
2869    
2870     MODULE_FIRMWARE("radeon/bonaire_mc.bin");
2871     MODULE_FIRMWARE("radeon/hawaii_mc.bin");
2872     +MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
2873     +
2874     +static const u32 golden_settings_iceland_a11[] =
2875     +{
2876     + mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
2877     + mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
2878     + mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
2879     + mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
2880     +};
2881     +
2882     +static const u32 iceland_mgcg_cgcg_init[] =
2883     +{
2884     + mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
2885     +};
2886     +
2887     +static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
2888     +{
2889     + switch (adev->asic_type) {
2890     + case CHIP_TOPAZ:
2891     + amdgpu_program_register_sequence(adev,
2892     + iceland_mgcg_cgcg_init,
2893     + (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
2894     + amdgpu_program_register_sequence(adev,
2895     + golden_settings_iceland_a11,
2896     + (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
2897     + break;
2898     + default:
2899     + break;
2900     + }
2901     +}
2902    
2903     /**
2904     - * gmc8_mc_wait_for_idle - wait for MC idle callback.
2905     + * gmc7_mc_wait_for_idle - wait for MC idle callback.
2906     *
2907     * @adev: amdgpu_device pointer
2908     *
2909     @@ -132,13 +162,20 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev)
2910     case CHIP_HAWAII:
2911     chip_name = "hawaii";
2912     break;
2913     + case CHIP_TOPAZ:
2914     + chip_name = "topaz";
2915     + break;
2916     case CHIP_KAVERI:
2917     case CHIP_KABINI:
2918     return 0;
2919     default: BUG();
2920     }
2921    
2922     - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
2923     + if (adev->asic_type == CHIP_TOPAZ)
2924     + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name);
2925     + else
2926     + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
2927     +
2928     err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
2929     if (err)
2930     goto out;
2931     @@ -980,6 +1017,8 @@ static int gmc_v7_0_hw_init(void *handle)
2932     int r;
2933     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2934    
2935     + gmc_v7_0_init_golden_registers(adev);
2936     +
2937     gmc_v7_0_mc_program(adev);
2938    
2939     if (!(adev->flags & AMD_IS_APU)) {
2940     diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
2941     index d39028440814..ba4ad00ba8b4 100644
2942     --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
2943     +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
2944     @@ -42,9 +42,7 @@
2945     static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
2946     static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
2947    
2948     -MODULE_FIRMWARE("amdgpu/topaz_mc.bin");
2949     MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
2950     -MODULE_FIRMWARE("amdgpu/fiji_mc.bin");
2951    
2952     static const u32 golden_settings_tonga_a11[] =
2953     {
2954     @@ -75,19 +73,6 @@ static const u32 fiji_mgcg_cgcg_init[] =
2955     mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
2956     };
2957    
2958     -static const u32 golden_settings_iceland_a11[] =
2959     -{
2960     - mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
2961     - mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
2962     - mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
2963     - mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
2964     -};
2965     -
2966     -static const u32 iceland_mgcg_cgcg_init[] =
2967     -{
2968     - mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
2969     -};
2970     -
2971     static const u32 cz_mgcg_cgcg_init[] =
2972     {
2973     mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
2974     @@ -102,14 +87,6 @@ static const u32 stoney_mgcg_cgcg_init[] =
2975     static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
2976     {
2977     switch (adev->asic_type) {
2978     - case CHIP_TOPAZ:
2979     - amdgpu_program_register_sequence(adev,
2980     - iceland_mgcg_cgcg_init,
2981     - (const u32)ARRAY_SIZE(iceland_mgcg_cgcg_init));
2982     - amdgpu_program_register_sequence(adev,
2983     - golden_settings_iceland_a11,
2984     - (const u32)ARRAY_SIZE(golden_settings_iceland_a11));
2985     - break;
2986     case CHIP_FIJI:
2987     amdgpu_program_register_sequence(adev,
2988     fiji_mgcg_cgcg_init,
2989     @@ -229,15 +206,10 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
2990     DRM_DEBUG("\n");
2991    
2992     switch (adev->asic_type) {
2993     - case CHIP_TOPAZ:
2994     - chip_name = "topaz";
2995     - break;
2996     case CHIP_TONGA:
2997     chip_name = "tonga";
2998     break;
2999     case CHIP_FIJI:
3000     - chip_name = "fiji";
3001     - break;
3002     case CHIP_CARRIZO:
3003     case CHIP_STONEY:
3004     return 0;
3005     @@ -1003,7 +975,7 @@ static int gmc_v8_0_hw_init(void *handle)
3006    
3007     gmc_v8_0_mc_program(adev);
3008    
3009     - if (!(adev->flags & AMD_IS_APU)) {
3010     + if (adev->asic_type == CHIP_TONGA) {
3011     r = gmc_v8_0_mc_load_microcode(adev);
3012     if (r) {
3013     DRM_ERROR("Failed to load MC firmware!\n");
3014     diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
3015     index 966d4b2ed9da..090486c18249 100644
3016     --- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
3017     +++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
3018     @@ -432,7 +432,7 @@ static uint32_t iceland_smu_get_mask_for_fw_type(uint32_t fw_type)
3019     case AMDGPU_UCODE_ID_CP_ME:
3020     return UCODE_ID_CP_ME_MASK;
3021     case AMDGPU_UCODE_ID_CP_MEC1:
3022     - return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT2_MASK;
3023     + return UCODE_ID_CP_MEC_MASK | UCODE_ID_CP_MEC_JT1_MASK;
3024     case AMDGPU_UCODE_ID_CP_MEC2:
3025     return UCODE_ID_CP_MEC_MASK;
3026     case AMDGPU_UCODE_ID_RLC_G:
3027     @@ -522,12 +522,6 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
3028     return -EINVAL;
3029     }
3030    
3031     - if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_CP_MEC_JT2,
3032     - &toc->entry[toc->num_entries++])) {
3033     - DRM_ERROR("Failed to get firmware entry for MEC_JT2\n");
3034     - return -EINVAL;
3035     - }
3036     -
3037     if (iceland_smu_populate_single_firmware_entry(adev, UCODE_ID_SDMA0,
3038     &toc->entry[toc->num_entries++])) {
3039     DRM_ERROR("Failed to get firmware entry for SDMA0\n");
3040     @@ -550,8 +544,8 @@ static int iceland_smu_request_load_fw(struct amdgpu_device *adev)
3041     UCODE_ID_CP_ME_MASK |
3042     UCODE_ID_CP_PFP_MASK |
3043     UCODE_ID_CP_MEC_MASK |
3044     - UCODE_ID_CP_MEC_JT1_MASK |
3045     - UCODE_ID_CP_MEC_JT2_MASK;
3046     + UCODE_ID_CP_MEC_JT1_MASK;
3047     +
3048    
3049     if (iceland_send_msg_to_smc_with_parameter_without_waiting(adev, PPSMC_MSG_LoadUcodes, fw_to_load)) {
3050     DRM_ERROR("Fail to request SMU load ucode\n");
3051     diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
3052     index 204903897b4f..63d6cb3c1110 100644
3053     --- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
3054     +++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c
3055     @@ -122,25 +122,12 @@ static int tonga_dpm_hw_fini(void *handle)
3056    
3057     static int tonga_dpm_suspend(void *handle)
3058     {
3059     - return 0;
3060     + return tonga_dpm_hw_fini(handle);
3061     }
3062    
3063     static int tonga_dpm_resume(void *handle)
3064     {
3065     - int ret;
3066     - struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3067     -
3068     - mutex_lock(&adev->pm.mutex);
3069     -
3070     - ret = tonga_smu_start(adev);
3071     - if (ret) {
3072     - DRM_ERROR("SMU start failed\n");
3073     - goto fail;
3074     - }
3075     -
3076     -fail:
3077     - mutex_unlock(&adev->pm.mutex);
3078     - return ret;
3079     + return tonga_dpm_hw_init(handle);
3080     }
3081    
3082     static int tonga_dpm_set_clockgating_state(void *handle,
3083     diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
3084     index 2adc1c855e85..7628eb44cce2 100644
3085     --- a/drivers/gpu/drm/amd/amdgpu/vi.c
3086     +++ b/drivers/gpu/drm/amd/amdgpu/vi.c
3087     @@ -60,6 +60,7 @@
3088     #include "vi.h"
3089     #include "vi_dpm.h"
3090     #include "gmc_v8_0.h"
3091     +#include "gmc_v7_0.h"
3092     #include "gfx_v8_0.h"
3093     #include "sdma_v2_4.h"
3094     #include "sdma_v3_0.h"
3095     @@ -1128,10 +1129,10 @@ static const struct amdgpu_ip_block_version tonga_ip_blocks[] =
3096     },
3097     {
3098     .type = AMD_IP_BLOCK_TYPE_GMC,
3099     - .major = 8,
3100     - .minor = 0,
3101     + .major = 7,
3102     + .minor = 4,
3103     .rev = 0,
3104     - .funcs = &gmc_v8_0_ip_funcs,
3105     + .funcs = &gmc_v7_0_ip_funcs,
3106     },
3107     {
3108     .type = AMD_IP_BLOCK_TYPE_IH,
3109     diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
3110     index 809959d56d78..39d7e2e15c11 100644
3111     --- a/drivers/gpu/drm/drm_dp_mst_topology.c
3112     +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
3113     @@ -798,6 +798,18 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
3114     return mstb;
3115     }
3116    
3117     +static void drm_dp_free_mst_port(struct kref *kref);
3118     +
3119     +static void drm_dp_free_mst_branch_device(struct kref *kref)
3120     +{
3121     + struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
3122     + if (mstb->port_parent) {
3123     + if (list_empty(&mstb->port_parent->next))
3124     + kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
3125     + }
3126     + kfree(mstb);
3127     +}
3128     +
3129     static void drm_dp_destroy_mst_branch_device(struct kref *kref)
3130     {
3131     struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
3132     @@ -805,6 +817,15 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
3133     bool wake_tx = false;
3134    
3135     /*
3136     + * init kref again to be used by ports to remove mst branch when it is
3137     + * not needed anymore
3138     + */
3139     + kref_init(kref);
3140     +
3141     + if (mstb->port_parent && list_empty(&mstb->port_parent->next))
3142     + kref_get(&mstb->port_parent->kref);
3143     +
3144     + /*
3145     * destroy all ports - don't need lock
3146     * as there are no more references to the mst branch
3147     * device at this point.
3148     @@ -830,7 +851,8 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
3149    
3150     if (wake_tx)
3151     wake_up(&mstb->mgr->tx_waitq);
3152     - kfree(mstb);
3153     +
3154     + kref_put(kref, drm_dp_free_mst_branch_device);
3155     }
3156    
3157     static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
3158     @@ -878,6 +900,7 @@ static void drm_dp_destroy_port(struct kref *kref)
3159     * from an EDID retrieval */
3160    
3161     mutex_lock(&mgr->destroy_connector_lock);
3162     + kref_get(&port->parent->kref);
3163     list_add(&port->next, &mgr->destroy_connector_list);
3164     mutex_unlock(&mgr->destroy_connector_lock);
3165     schedule_work(&mgr->destroy_connector_work);
3166     @@ -973,17 +996,17 @@ static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u
3167     static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
3168     u8 *rad)
3169     {
3170     - int lct = port->parent->lct;
3171     + int parent_lct = port->parent->lct;
3172     int shift = 4;
3173     - int idx = lct / 2;
3174     - if (lct > 1) {
3175     - memcpy(rad, port->parent->rad, idx);
3176     - shift = (lct % 2) ? 4 : 0;
3177     + int idx = (parent_lct - 1) / 2;
3178     + if (parent_lct > 1) {
3179     + memcpy(rad, port->parent->rad, idx + 1);
3180     + shift = (parent_lct % 2) ? 4 : 0;
3181     } else
3182     rad[0] = 0;
3183    
3184     rad[idx] |= port->port_num << shift;
3185     - return lct + 1;
3186     + return parent_lct + 1;
3187     }
3188    
3189     /*
3190     @@ -1013,18 +1036,27 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
3191     return send_link;
3192     }
3193    
3194     -static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
3195     - struct drm_dp_mst_port *port)
3196     +static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
3197     {
3198     int ret;
3199     - if (port->dpcd_rev >= 0x12) {
3200     - port->guid_valid = drm_dp_validate_guid(mstb->mgr, port->guid);
3201     - if (!port->guid_valid) {
3202     - ret = drm_dp_send_dpcd_write(mstb->mgr,
3203     - port,
3204     - DP_GUID,
3205     - 16, port->guid);
3206     - port->guid_valid = true;
3207     +
3208     + memcpy(mstb->guid, guid, 16);
3209     +
3210     + if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
3211     + if (mstb->port_parent) {
3212     + ret = drm_dp_send_dpcd_write(
3213     + mstb->mgr,
3214     + mstb->port_parent,
3215     + DP_GUID,
3216     + 16,
3217     + mstb->guid);
3218     + } else {
3219     +
3220     + ret = drm_dp_dpcd_write(
3221     + mstb->mgr->aux,
3222     + DP_GUID,
3223     + mstb->guid,
3224     + 16);
3225     }
3226     }
3227     }
3228     @@ -1039,7 +1071,7 @@ static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
3229     snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
3230     for (i = 0; i < (mstb->lct - 1); i++) {
3231     int shift = (i % 2) ? 0 : 4;
3232     - int port_num = mstb->rad[i / 2] >> shift;
3233     + int port_num = (mstb->rad[i / 2] >> shift) & 0xf;
3234     snprintf(temp, sizeof(temp), "-%d", port_num);
3235     strlcat(proppath, temp, proppath_size);
3236     }
3237     @@ -1081,7 +1113,6 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
3238     port->dpcd_rev = port_msg->dpcd_revision;
3239     port->num_sdp_streams = port_msg->num_sdp_streams;
3240     port->num_sdp_stream_sinks = port_msg->num_sdp_stream_sinks;
3241     - memcpy(port->guid, port_msg->peer_guid, 16);
3242    
3243     /* manage mstb port lists with mgr lock - take a reference
3244     for this list */
3245     @@ -1094,11 +1125,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
3246    
3247     if (old_ddps != port->ddps) {
3248     if (port->ddps) {
3249     - drm_dp_check_port_guid(mstb, port);
3250     if (!port->input)
3251     drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
3252     } else {
3253     - port->guid_valid = false;
3254     port->available_pbn = 0;
3255     }
3256     }
3257     @@ -1157,10 +1186,8 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
3258    
3259     if (old_ddps != port->ddps) {
3260     if (port->ddps) {
3261     - drm_dp_check_port_guid(mstb, port);
3262     dowork = true;
3263     } else {
3264     - port->guid_valid = false;
3265     port->available_pbn = 0;
3266     }
3267     }
3268     @@ -1190,7 +1217,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
3269    
3270     for (i = 0; i < lct - 1; i++) {
3271     int shift = (i % 2) ? 0 : 4;
3272     - int port_num = rad[i / 2] >> shift;
3273     + int port_num = (rad[i / 2] >> shift) & 0xf;
3274    
3275     list_for_each_entry(port, &mstb->ports, next) {
3276     if (port->port_num == port_num) {
3277     @@ -1210,6 +1237,48 @@ out:
3278     return mstb;
3279     }
3280    
3281     +static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
3282     + struct drm_dp_mst_branch *mstb,
3283     + uint8_t *guid)
3284     +{
3285     + struct drm_dp_mst_branch *found_mstb;
3286     + struct drm_dp_mst_port *port;
3287     +
3288     + if (memcmp(mstb->guid, guid, 16) == 0)
3289     + return mstb;
3290     +
3291     +
3292     + list_for_each_entry(port, &mstb->ports, next) {
3293     + if (!port->mstb)
3294     + continue;
3295     +
3296     + found_mstb = get_mst_branch_device_by_guid_helper(port->mstb, guid);
3297     +
3298     + if (found_mstb)
3299     + return found_mstb;
3300     + }
3301     +
3302     + return NULL;
3303     +}
3304     +
3305     +static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
3306     + struct drm_dp_mst_topology_mgr *mgr,
3307     + uint8_t *guid)
3308     +{
3309     + struct drm_dp_mst_branch *mstb;
3310     +
3311     + /* find the port by iterating down */
3312     + mutex_lock(&mgr->lock);
3313     +
3314     + mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
3315     +
3316     + if (mstb)
3317     + kref_get(&mstb->kref);
3318     +
3319     + mutex_unlock(&mgr->lock);
3320     + return mstb;
3321     +}
3322     +
3323     static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
3324     struct drm_dp_mst_branch *mstb)
3325     {
3326     @@ -1320,6 +1389,7 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
3327     struct drm_dp_sideband_msg_tx *txmsg)
3328     {
3329     struct drm_dp_mst_branch *mstb = txmsg->dst;
3330     + u8 req_type;
3331    
3332     /* both msg slots are full */
3333     if (txmsg->seqno == -1) {
3334     @@ -1336,7 +1406,13 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
3335     txmsg->seqno = 1;
3336     mstb->tx_slots[txmsg->seqno] = txmsg;
3337     }
3338     - hdr->broadcast = 0;
3339     +
3340     + req_type = txmsg->msg[0] & 0x7f;
3341     + if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
3342     + req_type == DP_RESOURCE_STATUS_NOTIFY)
3343     + hdr->broadcast = 1;
3344     + else
3345     + hdr->broadcast = 0;
3346     hdr->path_msg = txmsg->path_msg;
3347     hdr->lct = mstb->lct;
3348     hdr->lcr = mstb->lct - 1;
3349     @@ -1438,26 +1514,18 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
3350     }
3351    
3352     /* called holding qlock */
3353     -static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
3354     +static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr,
3355     + struct drm_dp_sideband_msg_tx *txmsg)
3356     {
3357     - struct drm_dp_sideband_msg_tx *txmsg;
3358     int ret;
3359    
3360     /* construct a chunk from the first msg in the tx_msg queue */
3361     - if (list_empty(&mgr->tx_msg_upq)) {
3362     - mgr->tx_up_in_progress = false;
3363     - return;
3364     - }
3365     -
3366     - txmsg = list_first_entry(&mgr->tx_msg_upq, struct drm_dp_sideband_msg_tx, next);
3367     ret = process_single_tx_qlock(mgr, txmsg, true);
3368     - if (ret == 1) {
3369     - /* up txmsgs aren't put in slots - so free after we send it */
3370     - list_del(&txmsg->next);
3371     - kfree(txmsg);
3372     - } else if (ret)
3373     +
3374     + if (ret != 1)
3375     DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
3376     - mgr->tx_up_in_progress = true;
3377     +
3378     + txmsg->dst->tx_slots[txmsg->seqno] = NULL;
3379     }
3380    
3381     static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
3382     @@ -1507,6 +1575,9 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
3383     txmsg->reply.u.link_addr.ports[i].num_sdp_streams,
3384     txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks);
3385     }
3386     +
3387     + drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid);
3388     +
3389     for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
3390     drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
3391     }
3392     @@ -1554,6 +1625,37 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
3393     return 0;
3394     }
3395    
3396     +static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
3397     +{
3398     + if (!mstb->port_parent)
3399     + return NULL;
3400     +
3401     + if (mstb->port_parent->mstb != mstb)
3402     + return mstb->port_parent;
3403     +
3404     + return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
3405     +}
3406     +
3407     +static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
3408     + struct drm_dp_mst_branch *mstb,
3409     + int *port_num)
3410     +{
3411     + struct drm_dp_mst_branch *rmstb = NULL;
3412     + struct drm_dp_mst_port *found_port;
3413     + mutex_lock(&mgr->lock);
3414     + if (mgr->mst_primary) {
3415     + found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
3416     +
3417     + if (found_port) {
3418     + rmstb = found_port->parent;
3419     + kref_get(&rmstb->kref);
3420     + *port_num = found_port->port_num;
3421     + }
3422     + }
3423     + mutex_unlock(&mgr->lock);
3424     + return rmstb;
3425     +}
3426     +
3427     static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3428     struct drm_dp_mst_port *port,
3429     int id,
3430     @@ -1561,11 +1663,16 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3431     {
3432     struct drm_dp_sideband_msg_tx *txmsg;
3433     struct drm_dp_mst_branch *mstb;
3434     - int len, ret;
3435     + int len, ret, port_num;
3436    
3437     + port_num = port->port_num;
3438     mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
3439     - if (!mstb)
3440     - return -EINVAL;
3441     + if (!mstb) {
3442     + mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
3443     +
3444     + if (!mstb)
3445     + return -EINVAL;
3446     + }
3447    
3448     txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
3449     if (!txmsg) {
3450     @@ -1574,7 +1681,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
3451     }
3452    
3453     txmsg->dst = mstb;
3454     - len = build_allocate_payload(txmsg, port->port_num,
3455     + len = build_allocate_payload(txmsg, port_num,
3456     id,
3457     pbn);
3458    
3459     @@ -1844,11 +1951,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
3460     drm_dp_encode_up_ack_reply(txmsg, req_type);
3461    
3462     mutex_lock(&mgr->qlock);
3463     - list_add_tail(&txmsg->next, &mgr->tx_msg_upq);
3464     - if (!mgr->tx_up_in_progress) {
3465     - process_single_up_tx_qlock(mgr);
3466     - }
3467     +
3468     + process_single_up_tx_qlock(mgr, txmsg);
3469     +
3470     mutex_unlock(&mgr->qlock);
3471     +
3472     + kfree(txmsg);
3473     return 0;
3474     }
3475    
3476     @@ -1927,31 +2035,17 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
3477     mgr->mst_primary = mstb;
3478     kref_get(&mgr->mst_primary->kref);
3479    
3480     - {
3481     - struct drm_dp_payload reset_pay;
3482     - reset_pay.start_slot = 0;
3483     - reset_pay.num_slots = 0x3f;
3484     - drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
3485     - }
3486     -
3487     ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
3488     - DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
3489     + DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
3490     if (ret < 0) {
3491     goto out_unlock;
3492     }
3493    
3494     -
3495     - /* sort out guid */
3496     - ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, mgr->guid, 16);
3497     - if (ret != 16) {
3498     - DRM_DEBUG_KMS("failed to read DP GUID %d\n", ret);
3499     - goto out_unlock;
3500     - }
3501     -
3502     - mgr->guid_valid = drm_dp_validate_guid(mgr, mgr->guid);
3503     - if (!mgr->guid_valid) {
3504     - ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, mgr->guid, 16);
3505     - mgr->guid_valid = true;
3506     + {
3507     + struct drm_dp_payload reset_pay;
3508     + reset_pay.start_slot = 0;
3509     + reset_pay.num_slots = 0x3f;
3510     + drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
3511     }
3512    
3513     queue_work(system_long_wq, &mgr->work);
3514     @@ -2145,28 +2239,51 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
3515    
3516     if (mgr->up_req_recv.have_eomt) {
3517     struct drm_dp_sideband_msg_req_body msg;
3518     - struct drm_dp_mst_branch *mstb;
3519     + struct drm_dp_mst_branch *mstb = NULL;
3520     bool seqno;
3521     - mstb = drm_dp_get_mst_branch_device(mgr,
3522     - mgr->up_req_recv.initial_hdr.lct,
3523     - mgr->up_req_recv.initial_hdr.rad);
3524     - if (!mstb) {
3525     - DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
3526     - memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3527     - return 0;
3528     +
3529     + if (!mgr->up_req_recv.initial_hdr.broadcast) {
3530     + mstb = drm_dp_get_mst_branch_device(mgr,
3531     + mgr->up_req_recv.initial_hdr.lct,
3532     + mgr->up_req_recv.initial_hdr.rad);
3533     + if (!mstb) {
3534     + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
3535     + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3536     + return 0;
3537     + }
3538     }
3539    
3540     seqno = mgr->up_req_recv.initial_hdr.seqno;
3541     drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg);
3542    
3543     if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
3544     - drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
3545     + drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
3546     +
3547     + if (!mstb)
3548     + mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid);
3549     +
3550     + if (!mstb) {
3551     + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
3552     + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3553     + return 0;
3554     + }
3555     +
3556     drm_dp_update_port(mstb, &msg.u.conn_stat);
3557     +
3558     DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
3559     (*mgr->cbs->hotplug)(mgr);
3560    
3561     } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
3562     - drm_dp_send_up_ack_reply(mgr, mstb, msg.req_type, seqno, false);
3563     + drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
3564     + if (!mstb)
3565     + mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid);
3566     +
3567     + if (!mstb) {
3568     + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct);
3569     + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
3570     + return 0;
3571     + }
3572     +
3573     DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
3574     }
3575    
3576     @@ -2346,6 +2463,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp
3577     DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
3578     if (pbn == port->vcpi.pbn) {
3579     *slots = port->vcpi.num_slots;
3580     + drm_dp_put_port(port);
3581     return true;
3582     }
3583     }
3584     @@ -2505,32 +2623,31 @@ EXPORT_SYMBOL(drm_dp_check_act_status);
3585     */
3586     int drm_dp_calc_pbn_mode(int clock, int bpp)
3587     {
3588     - fixed20_12 pix_bw;
3589     - fixed20_12 fbpp;
3590     - fixed20_12 result;
3591     - fixed20_12 margin, tmp;
3592     - u32 res;
3593     -
3594     - pix_bw.full = dfixed_const(clock);
3595     - fbpp.full = dfixed_const(bpp);
3596     - tmp.full = dfixed_const(8);
3597     - fbpp.full = dfixed_div(fbpp, tmp);
3598     -
3599     - result.full = dfixed_mul(pix_bw, fbpp);
3600     - margin.full = dfixed_const(54);
3601     - tmp.full = dfixed_const(64);
3602     - margin.full = dfixed_div(margin, tmp);
3603     - result.full = dfixed_div(result, margin);
3604     -
3605     - margin.full = dfixed_const(1006);
3606     - tmp.full = dfixed_const(1000);
3607     - margin.full = dfixed_div(margin, tmp);
3608     - result.full = dfixed_mul(result, margin);
3609     -
3610     - result.full = dfixed_div(result, tmp);
3611     - result.full = dfixed_ceil(result);
3612     - res = dfixed_trunc(result);
3613     - return res;
3614     + u64 kbps;
3615     + s64 peak_kbps;
3616     + u32 numerator;
3617     + u32 denominator;
3618     +
3619     + kbps = clock * bpp;
3620     +
3621     + /*
3622     + * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006
3623     + * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on
3624     + * common multiplier to render an integer PBN for all link rate/lane
3625     + * counts combinations
3626     + * calculate
3627     + * peak_kbps *= (1006/1000)
3628     + * peak_kbps *= (64/54)
3629     + * peak_kbps *= 8 convert to bytes
3630     + */
3631     +
3632     + numerator = 64 * 1006;
3633     + denominator = 54 * 8 * 1000 * 1000;
3634     +
3635     + kbps *= numerator;
3636     + peak_kbps = drm_fixp_from_fraction(kbps, denominator);
3637     +
3638     + return drm_fixp2int_ceil(peak_kbps);
3639     }
3640     EXPORT_SYMBOL(drm_dp_calc_pbn_mode);
3641    
3642     @@ -2538,11 +2655,23 @@ static int test_calc_pbn_mode(void)
3643     {
3644     int ret;
3645     ret = drm_dp_calc_pbn_mode(154000, 30);
3646     - if (ret != 689)
3647     + if (ret != 689) {
3648     + DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3649     + 154000, 30, 689, ret);
3650     return -EINVAL;
3651     + }
3652     ret = drm_dp_calc_pbn_mode(234000, 30);
3653     - if (ret != 1047)
3654     + if (ret != 1047) {
3655     + DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3656     + 234000, 30, 1047, ret);
3657     + return -EINVAL;
3658     + }
3659     + ret = drm_dp_calc_pbn_mode(297000, 24);
3660     + if (ret != 1063) {
3661     + DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n",
3662     + 297000, 24, 1063, ret);
3663     return -EINVAL;
3664     + }
3665     return 0;
3666     }
3667    
3668     @@ -2683,6 +2812,13 @@ static void drm_dp_tx_work(struct work_struct *work)
3669     mutex_unlock(&mgr->qlock);
3670     }
3671    
3672     +static void drm_dp_free_mst_port(struct kref *kref)
3673     +{
3674     + struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
3675     + kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
3676     + kfree(port);
3677     +}
3678     +
3679     static void drm_dp_destroy_connector_work(struct work_struct *work)
3680     {
3681     struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
3682     @@ -2703,13 +2839,22 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
3683     list_del(&port->next);
3684     mutex_unlock(&mgr->destroy_connector_lock);
3685    
3686     + kref_init(&port->kref);
3687     + INIT_LIST_HEAD(&port->next);
3688     +
3689     mgr->cbs->destroy_connector(mgr, port->connector);
3690    
3691     drm_dp_port_teardown_pdt(port, port->pdt);
3692    
3693     - if (!port->input && port->vcpi.vcpi > 0)
3694     - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
3695     - kfree(port);
3696     + if (!port->input && port->vcpi.vcpi > 0) {
3697     + if (mgr->mst_state) {
3698     + drm_dp_mst_reset_vcpi_slots(mgr, port);
3699     + drm_dp_update_payload_part1(mgr);
3700     + drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
3701     + }
3702     + }
3703     +
3704     + kref_put(&port->kref, drm_dp_free_mst_port);
3705     send_hotplug = true;
3706     }
3707     if (send_hotplug)
3708     @@ -2736,7 +2881,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
3709     mutex_init(&mgr->qlock);
3710     mutex_init(&mgr->payload_lock);
3711     mutex_init(&mgr->destroy_connector_lock);
3712     - INIT_LIST_HEAD(&mgr->tx_msg_upq);
3713     INIT_LIST_HEAD(&mgr->tx_msg_downq);
3714     INIT_LIST_HEAD(&mgr->destroy_connector_list);
3715     INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work);
3716     diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
3717     index 607f493ae801..8090989185b2 100644
3718     --- a/drivers/gpu/drm/drm_irq.c
3719     +++ b/drivers/gpu/drm/drm_irq.c
3720     @@ -221,6 +221,64 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
3721     diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0;
3722     }
3723    
3724     + /*
3725     + * Within a drm_vblank_pre_modeset - drm_vblank_post_modeset
3726     + * interval? If so then vblank irqs keep running and it will likely
3727     + * happen that the hardware vblank counter is not trustworthy as it
3728     + * might reset at some point in that interval and vblank timestamps
3729     + * are not trustworthy either in that interval. Iow. this can result
3730     + * in a bogus diff >> 1 which must be avoided as it would cause
3731     + * random large forward jumps of the software vblank counter.
3732     + */
3733     + if (diff > 1 && (vblank->inmodeset & 0x2)) {
3734     + DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u"
3735     + " due to pre-modeset.\n", pipe, diff);
3736     + diff = 1;
3737     + }
3738     +
3739     + /*
3740     + * FIMXE: Need to replace this hack with proper seqlocks.
3741     + *
3742     + * Restrict the bump of the software vblank counter to a safe maximum
3743     + * value of +1 whenever there is the possibility that concurrent readers
3744     + * of vblank timestamps could be active at the moment, as the current
3745     + * implementation of the timestamp caching and updating is not safe
3746     + * against concurrent readers for calls to store_vblank() with a bump
3747     + * of anything but +1. A bump != 1 would very likely return corrupted
3748     + * timestamps to userspace, because the same slot in the cache could
3749     + * be concurrently written by store_vblank() and read by one of those
3750     + * readers without the read-retry logic detecting the collision.
3751     + *
3752     + * Concurrent readers can exist when we are called from the
3753     + * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
3754     + * irq callers. However, all those calls to us are happening with the
3755     + * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
3756     + * can't increase while we are executing. Therefore a zero refcount at
3757     + * this point is safe for arbitrary counter bumps if we are called
3758     + * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
3759     + * we must also accept a refcount of 1, as whenever we are called from
3760     + * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
3761     + * we must let that one pass through in order to not lose vblank counts
3762     + * during vblank irq off - which would completely defeat the whole
3763     + * point of this routine.
3764     + *
3765     + * Whenever we are called from vblank irq, we have to assume concurrent
3766     + * readers exist or can show up any time during our execution, even if
3767     + * the refcount is currently zero, as vblank irqs are usually only
3768     + * enabled due to the presence of readers, and because when we are called
3769     + * from vblank irq we can't hold the vbl_lock to protect us from sudden
3770     + * bumps in vblank refcount. Therefore also restrict bumps to +1 when
3771     + * called from vblank irq.
3772     + */
3773     + if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
3774     + (flags & DRM_CALLED_FROM_VBLIRQ))) {
3775     + DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
3776     + "refcount %u, vblirq %u\n", pipe, diff,
3777     + atomic_read(&vblank->refcount),
3778     + (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
3779     + diff = 1;
3780     + }
3781     +
3782     DRM_DEBUG_VBL("updating vblank count on crtc %u:"
3783     " current=%u, diff=%u, hw=%u hw_last=%u\n",
3784     pipe, vblank->count, diff, cur_vblank, vblank->last);
3785     @@ -1313,7 +1371,13 @@ void drm_vblank_off(struct drm_device *dev, unsigned int pipe)
3786     spin_lock_irqsave(&dev->event_lock, irqflags);
3787    
3788     spin_lock(&dev->vbl_lock);
3789     - vblank_disable_and_save(dev, pipe);
3790     + DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
3791     + pipe, vblank->enabled, vblank->inmodeset);
3792     +
3793     + /* Avoid redundant vblank disables without previous drm_vblank_on(). */
3794     + if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
3795     + vblank_disable_and_save(dev, pipe);
3796     +
3797     wake_up(&vblank->queue);
3798    
3799     /*
3800     @@ -1415,6 +1479,9 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
3801     return;
3802    
3803     spin_lock_irqsave(&dev->vbl_lock, irqflags);
3804     + DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
3805     + pipe, vblank->enabled, vblank->inmodeset);
3806     +
3807     /* Drop our private "prevent drm_vblank_get" refcount */
3808     if (vblank->inmodeset) {
3809     atomic_dec(&vblank->refcount);
3810     @@ -1427,8 +1494,7 @@ void drm_vblank_on(struct drm_device *dev, unsigned int pipe)
3811     * re-enable interrupts if there are users left, or the
3812     * user wishes vblank interrupts to be enabled all the time.
3813     */
3814     - if (atomic_read(&vblank->refcount) != 0 ||
3815     - (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
3816     + if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
3817     WARN_ON(drm_vblank_enable(dev, pipe));
3818     spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
3819     }
3820     @@ -1523,6 +1589,7 @@ void drm_vblank_post_modeset(struct drm_device *dev, unsigned int pipe)
3821     if (vblank->inmodeset) {
3822     spin_lock_irqsave(&dev->vbl_lock, irqflags);
3823     dev->vblank_disable_allowed = true;
3824     + drm_reset_vblank_timestamp(dev, pipe);
3825     spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
3826    
3827     if (vblank->inmodeset & 0x2)
3828     diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
3829     index c707fa6fca85..e3bdc8b1c32c 100644
3830     --- a/drivers/gpu/drm/gma500/gem.c
3831     +++ b/drivers/gpu/drm/gma500/gem.c
3832     @@ -130,7 +130,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size,
3833     return ret;
3834     }
3835     /* We have the initial and handle reference but need only one now */
3836     - drm_gem_object_unreference(&r->gem);
3837     + drm_gem_object_unreference_unlocked(&r->gem);
3838     *handlep = handle;
3839     return 0;
3840     }
3841     diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
3842     index b4741d121a74..61fcb3b22297 100644
3843     --- a/drivers/gpu/drm/i915/i915_dma.c
3844     +++ b/drivers/gpu/drm/i915/i915_dma.c
3845     @@ -402,6 +402,8 @@ static int i915_load_modeset_init(struct drm_device *dev)
3846     if (ret)
3847     goto cleanup_gem_stolen;
3848    
3849     + intel_setup_gmbus(dev);
3850     +
3851     /* Important: The output setup functions called by modeset_init need
3852     * working irqs for e.g. gmbus and dp aux transfers. */
3853     intel_modeset_init(dev);
3854     @@ -451,6 +453,7 @@ cleanup_gem:
3855     cleanup_irq:
3856     intel_guc_ucode_fini(dev);
3857     drm_irq_uninstall(dev);
3858     + intel_teardown_gmbus(dev);
3859     cleanup_gem_stolen:
3860     i915_gem_cleanup_stolen(dev);
3861     cleanup_vga_switcheroo:
3862     @@ -1028,7 +1031,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
3863    
3864     /* Try to make sure MCHBAR is enabled before poking at it */
3865     intel_setup_mchbar(dev);
3866     - intel_setup_gmbus(dev);
3867     intel_opregion_setup(dev);
3868    
3869     i915_gem_load(dev);
3870     @@ -1099,7 +1101,6 @@ out_gem_unload:
3871     if (dev->pdev->msi_enabled)
3872     pci_disable_msi(dev->pdev);
3873    
3874     - intel_teardown_gmbus(dev);
3875     intel_teardown_mchbar(dev);
3876     pm_qos_remove_request(&dev_priv->pm_qos);
3877     destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
3878     @@ -1198,7 +1199,6 @@ int i915_driver_unload(struct drm_device *dev)
3879    
3880     intel_csr_ucode_fini(dev);
3881    
3882     - intel_teardown_gmbus(dev);
3883     intel_teardown_mchbar(dev);
3884    
3885     destroy_workqueue(dev_priv->hotplug.dp_wq);
3886     diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
3887     index 02ceb7a4b481..0433d25f9d23 100644
3888     --- a/drivers/gpu/drm/i915/i915_gem_context.c
3889     +++ b/drivers/gpu/drm/i915/i915_gem_context.c
3890     @@ -340,6 +340,10 @@ void i915_gem_context_reset(struct drm_device *dev)
3891     i915_gem_context_unreference(lctx);
3892     ring->last_context = NULL;
3893     }
3894     +
3895     + /* Force the GPU state to be reinitialised on enabling */
3896     + if (ring->default_context)
3897     + ring->default_context->legacy_hw_ctx.initialized = false;
3898     }
3899     }
3900    
3901     @@ -708,7 +712,7 @@ static int do_switch(struct drm_i915_gem_request *req)
3902     if (ret)
3903     goto unpin_out;
3904    
3905     - if (!to->legacy_hw_ctx.initialized) {
3906     + if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
3907     hw_flags |= MI_RESTORE_INHIBIT;
3908     /* NB: If we inhibit the restore, the context is not allowed to
3909     * die because future work may end up depending on valid address
3910     diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
3911     index 0d228f909dcb..0f42a2782afc 100644
3912     --- a/drivers/gpu/drm/i915/i915_irq.c
3913     +++ b/drivers/gpu/drm/i915/i915_irq.c
3914     @@ -2354,9 +2354,13 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
3915     spt_irq_handler(dev, pch_iir);
3916     else
3917     cpt_irq_handler(dev, pch_iir);
3918     - } else
3919     - DRM_ERROR("The master control interrupt lied (SDE)!\n");
3920     -
3921     + } else {
3922     + /*
3923     + * Like on previous PCH there seems to be something
3924     + * fishy going on with forwarding PCH interrupts.
3925     + */
3926     + DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
3927     + }
3928     }
3929    
3930     I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3931     diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
3932     index a6752a61d99f..7e6158b889da 100644
3933     --- a/drivers/gpu/drm/i915/intel_ddi.c
3934     +++ b/drivers/gpu/drm/i915/intel_ddi.c
3935     @@ -1582,7 +1582,8 @@ skl_ddi_pll_select(struct intel_crtc *intel_crtc,
3936     DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
3937     DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
3938     wrpll_params.central_freq;
3939     - } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
3940     + } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
3941     + intel_encoder->type == INTEL_OUTPUT_DP_MST) {
3942     switch (crtc_state->port_clock / 2) {
3943     case 81000:
3944     ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
3945     diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
3946     index 32cf97346978..f859a5b87ed4 100644
3947     --- a/drivers/gpu/drm/i915/intel_display.c
3948     +++ b/drivers/gpu/drm/i915/intel_display.c
3949     @@ -11930,11 +11930,21 @@ connected_sink_compute_bpp(struct intel_connector *connector,
3950     pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
3951     }
3952    
3953     - /* Clamp bpp to 8 on screens without EDID 1.4 */
3954     - if (connector->base.display_info.bpc == 0 && bpp > 24) {
3955     - DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n",
3956     - bpp);
3957     - pipe_config->pipe_bpp = 24;
3958     + /* Clamp bpp to default limit on screens without EDID 1.4 */
3959     + if (connector->base.display_info.bpc == 0) {
3960     + int type = connector->base.connector_type;
3961     + int clamp_bpp = 24;
3962     +
3963     + /* Fall back to 18 bpp when DP sink capability is unknown. */
3964     + if (type == DRM_MODE_CONNECTOR_DisplayPort ||
3965     + type == DRM_MODE_CONNECTOR_eDP)
3966     + clamp_bpp = 18;
3967     +
3968     + if (bpp > clamp_bpp) {
3969     + DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
3970     + bpp, clamp_bpp);
3971     + pipe_config->pipe_bpp = clamp_bpp;
3972     + }
3973     }
3974     }
3975    
3976     @@ -13537,11 +13547,12 @@ intel_check_primary_plane(struct drm_plane *plane,
3977     int max_scale = DRM_PLANE_HELPER_NO_SCALING;
3978     bool can_position = false;
3979    
3980     - /* use scaler when colorkey is not required */
3981     - if (INTEL_INFO(plane->dev)->gen >= 9 &&
3982     - state->ckey.flags == I915_SET_COLORKEY_NONE) {
3983     - min_scale = 1;
3984     - max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
3985     + if (INTEL_INFO(plane->dev)->gen >= 9) {
3986     + /* use scaler when colorkey is not required */
3987     + if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
3988     + min_scale = 1;
3989     + max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
3990     + }
3991     can_position = true;
3992     }
3993    
3994     @@ -15565,6 +15576,8 @@ void intel_modeset_cleanup(struct drm_device *dev)
3995     mutex_lock(&dev->struct_mutex);
3996     intel_cleanup_gt_powersave(dev);
3997     mutex_unlock(&dev->struct_mutex);
3998     +
3999     + intel_teardown_gmbus(dev);
4000     }
4001    
4002     /*
4003     diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
4004     index a5e99ac305da..a8912aecc31f 100644
4005     --- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
4006     +++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
4007     @@ -207,7 +207,12 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
4008     gpio = *data++;
4009    
4010     /* pull up/down */
4011     - action = *data++;
4012     + action = *data++ & 1;
4013     +
4014     + if (gpio >= ARRAY_SIZE(gtable)) {
4015     + DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
4016     + goto out;
4017     + }
4018    
4019     function = gtable[gpio].function_reg;
4020     pad = gtable[gpio].pad_reg;
4021     @@ -226,6 +231,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
4022     vlv_gpio_nc_write(dev_priv, pad, val);
4023     mutex_unlock(&dev_priv->sb_lock);
4024    
4025     +out:
4026     return data;
4027     }
4028    
4029     diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
4030     index b17785719598..d7a6437d9da2 100644
4031     --- a/drivers/gpu/drm/i915/intel_hotplug.c
4032     +++ b/drivers/gpu/drm/i915/intel_hotplug.c
4033     @@ -468,9 +468,14 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
4034     list_for_each_entry(connector, &mode_config->connector_list, head) {
4035     struct intel_connector *intel_connector = to_intel_connector(connector);
4036     connector->polled = intel_connector->polled;
4037     - if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4038     - connector->polled = DRM_CONNECTOR_POLL_HPD;
4039     +
4040     + /* MST has a dynamic intel_connector->encoder and it's reprobing
4041     + * is all handled by the MST helpers. */
4042     if (intel_connector->mst_port)
4043     + continue;
4044     +
4045     + if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
4046     + intel_connector->encoder->hpd_pin > HPD_NONE)
4047     connector->polled = DRM_CONNECTOR_POLL_HPD;
4048     }
4049    
4050     diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
4051     index 8324654037b6..f3bee54c414f 100644
4052     --- a/drivers/gpu/drm/i915/intel_i2c.c
4053     +++ b/drivers/gpu/drm/i915/intel_i2c.c
4054     @@ -675,7 +675,7 @@ int intel_setup_gmbus(struct drm_device *dev)
4055     return 0;
4056    
4057     err:
4058     - while (--pin) {
4059     + while (pin--) {
4060     if (!intel_gmbus_is_valid_pin(dev_priv, pin))
4061     continue;
4062    
4063     diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
4064     index 88e12bdf79e2..d69547a65dbb 100644
4065     --- a/drivers/gpu/drm/i915/intel_lrc.c
4066     +++ b/drivers/gpu/drm/i915/intel_lrc.c
4067     @@ -1706,6 +1706,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
4068     if (flush_domains) {
4069     flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
4070     flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
4071     + flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
4072     flags |= PIPE_CONTROL_FLUSH_ENABLE;
4073     }
4074    
4075     diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
4076     index 9461a238f5d5..f6b2a814e629 100644
4077     --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
4078     +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
4079     @@ -347,6 +347,7 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
4080     if (flush_domains) {
4081     flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
4082     flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
4083     + flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
4084     flags |= PIPE_CONTROL_FLUSH_ENABLE;
4085     }
4086     if (invalidate_domains) {
4087     @@ -419,6 +420,7 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
4088     if (flush_domains) {
4089     flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
4090     flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
4091     + flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
4092     flags |= PIPE_CONTROL_FLUSH_ENABLE;
4093     }
4094     if (invalidate_domains) {
4095     diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
4096     index 2e7cbe933533..2a5ed7460354 100644
4097     --- a/drivers/gpu/drm/nouveau/nouveau_connector.c
4098     +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
4099     @@ -969,10 +969,13 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
4100    
4101     NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
4102    
4103     + mutex_lock(&drm->dev->mode_config.mutex);
4104     if (plugged)
4105     drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
4106     else
4107     drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
4108     + mutex_unlock(&drm->dev->mode_config.mutex);
4109     +
4110     drm_helper_hpd_irq_event(connector->dev);
4111     }
4112    
4113     diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
4114     index 64c8d932d5f1..58a3f7cf2fb3 100644
4115     --- a/drivers/gpu/drm/nouveau/nouveau_display.c
4116     +++ b/drivers/gpu/drm/nouveau/nouveau_display.c
4117     @@ -634,10 +634,6 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
4118     nv_crtc->lut.depth = 0;
4119     }
4120    
4121     - /* Make sure that drm and hw vblank irqs get resumed if needed. */
4122     - for (head = 0; head < dev->mode_config.num_crtc; head++)
4123     - drm_vblank_on(dev, head);
4124     -
4125     /* This should ensure we don't hit a locking problem when someone
4126     * wakes us up via a connector. We should never go into suspend
4127     * while the display is on anyways.
4128     @@ -647,6 +643,10 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
4129    
4130     drm_helper_resume_force_mode(dev);
4131    
4132     + /* Make sure that drm and hw vblank irqs get resumed if needed. */
4133     + for (head = 0; head < dev->mode_config.num_crtc; head++)
4134     + drm_vblank_on(dev, head);
4135     +
4136     list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4137     struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
4138    
4139     diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
4140     index 60e32c4e4e49..35ecc0d0458f 100644
4141     --- a/drivers/gpu/drm/nouveau/nouveau_platform.c
4142     +++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
4143     @@ -24,7 +24,7 @@
4144     static int nouveau_platform_probe(struct platform_device *pdev)
4145     {
4146     const struct nvkm_device_tegra_func *func;
4147     - struct nvkm_device *device;
4148     + struct nvkm_device *device = NULL;
4149     struct drm_device *drm;
4150     int ret;
4151    
4152     diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
4153     index 7f8a42721eb2..e7e581d6a8ff 100644
4154     --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
4155     +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
4156     @@ -252,32 +252,40 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
4157    
4158     if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
4159     return -ENOMEM;
4160     - *pdevice = &tdev->device;
4161     +
4162     tdev->func = func;
4163     tdev->pdev = pdev;
4164     tdev->irq = -1;
4165    
4166     tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
4167     - if (IS_ERR(tdev->vdd))
4168     - return PTR_ERR(tdev->vdd);
4169     + if (IS_ERR(tdev->vdd)) {
4170     + ret = PTR_ERR(tdev->vdd);
4171     + goto free;
4172     + }
4173    
4174     tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
4175     - if (IS_ERR(tdev->rst))
4176     - return PTR_ERR(tdev->rst);
4177     + if (IS_ERR(tdev->rst)) {
4178     + ret = PTR_ERR(tdev->rst);
4179     + goto free;
4180     + }
4181    
4182     tdev->clk = devm_clk_get(&pdev->dev, "gpu");
4183     - if (IS_ERR(tdev->clk))
4184     - return PTR_ERR(tdev->clk);
4185     + if (IS_ERR(tdev->clk)) {
4186     + ret = PTR_ERR(tdev->clk);
4187     + goto free;
4188     + }
4189    
4190     tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
4191     - if (IS_ERR(tdev->clk_pwr))
4192     - return PTR_ERR(tdev->clk_pwr);
4193     + if (IS_ERR(tdev->clk_pwr)) {
4194     + ret = PTR_ERR(tdev->clk_pwr);
4195     + goto free;
4196     + }
4197    
4198     nvkm_device_tegra_probe_iommu(tdev);
4199    
4200     ret = nvkm_device_tegra_power_up(tdev);
4201     if (ret)
4202     - return ret;
4203     + goto remove;
4204    
4205     tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
4206     ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
4207     @@ -285,9 +293,19 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
4208     cfg, dbg, detect, mmio, subdev_mask,
4209     &tdev->device);
4210     if (ret)
4211     - return ret;
4212     + goto powerdown;
4213     +
4214     + *pdevice = &tdev->device;
4215    
4216     return 0;
4217     +
4218     +powerdown:
4219     + nvkm_device_tegra_power_down(tdev);
4220     +remove:
4221     + nvkm_device_tegra_remove_iommu(tdev);
4222     +free:
4223     + kfree(tdev);
4224     + return ret;
4225     }
4226     #else
4227     int
4228     diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
4229     index 74e2f7c6c07e..9688970eca47 100644
4230     --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
4231     +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
4232     @@ -328,6 +328,7 @@ nvkm_dp_train(struct work_struct *w)
4233     .outp = outp,
4234     }, *dp = &_dp;
4235     u32 datarate = 0;
4236     + u8 pwr;
4237     int ret;
4238    
4239     if (!outp->base.info.location && disp->func->sor.magic)
4240     @@ -355,6 +356,15 @@ nvkm_dp_train(struct work_struct *w)
4241     /* disable link interrupt handling during link training */
4242     nvkm_notify_put(&outp->irq);
4243    
4244     + /* ensure sink is not in a low-power state */
4245     + if (!nvkm_rdaux(outp->aux, DPCD_SC00, &pwr, 1)) {
4246     + if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
4247     + pwr &= ~DPCD_SC00_SET_POWER;
4248     + pwr |= DPCD_SC00_SET_POWER_D0;
4249     + nvkm_wraux(outp->aux, DPCD_SC00, &pwr, 1);
4250     + }
4251     + }
4252     +
4253     /* enable down-spreading and execute pre-train script from vbios */
4254     dp_link_train_init(dp, outp->dpcd[3] & 0x01);
4255    
4256     diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
4257     index 9596290329c7..6e10c5e0ef11 100644
4258     --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
4259     +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
4260     @@ -71,5 +71,11 @@
4261     #define DPCD_LS0C_LANE1_POST_CURSOR2 0x0c
4262     #define DPCD_LS0C_LANE0_POST_CURSOR2 0x03
4263    
4264     +/* DPCD Sink Control */
4265     +#define DPCD_SC00 0x00600
4266     +#define DPCD_SC00_SET_POWER 0x03
4267     +#define DPCD_SC00_SET_POWER_D0 0x01
4268     +#define DPCD_SC00_SET_POWER_D3 0x03
4269     +
4270     void nvkm_dp_train(struct work_struct *);
4271     #endif
4272     diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
4273     index 2ae8577497ca..7c2e78201ead 100644
4274     --- a/drivers/gpu/drm/qxl/qxl_ioctl.c
4275     +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
4276     @@ -168,7 +168,8 @@ static int qxl_process_single_command(struct qxl_device *qdev,
4277     cmd->command_size))
4278     return -EFAULT;
4279    
4280     - reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
4281     + reloc_info = kmalloc_array(cmd->relocs_num,
4282     + sizeof(struct qxl_reloc_info), GFP_KERNEL);
4283     if (!reloc_info)
4284     return -ENOMEM;
4285    
4286     diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
4287     index 752072771388..367a916f364e 100644
4288     --- a/drivers/gpu/drm/radeon/dce6_afmt.c
4289     +++ b/drivers/gpu/drm/radeon/dce6_afmt.c
4290     @@ -301,6 +301,14 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
4291     * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
4292     */
4293     if (ASIC_IS_DCE8(rdev)) {
4294     + unsigned int div = (RREG32(DENTIST_DISPCLK_CNTL) &
4295     + DENTIST_DPREFCLK_WDIVIDER_MASK) >>
4296     + DENTIST_DPREFCLK_WDIVIDER_SHIFT;
4297     + div = radeon_audio_decode_dfs_div(div);
4298     +
4299     + if (div)
4300     + clock = clock * 100 / div;
4301     +
4302     WREG32(DCE8_DCCG_AUDIO_DTO1_PHASE, 24000);
4303     WREG32(DCE8_DCCG_AUDIO_DTO1_MODULE, clock);
4304     } else {
4305     diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
4306     index 9953356fe263..3cf04a2f44bb 100644
4307     --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
4308     +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
4309     @@ -289,6 +289,16 @@ void dce4_dp_audio_set_dto(struct radeon_device *rdev,
4310     * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
4311     * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
4312     */
4313     + if (ASIC_IS_DCE41(rdev)) {
4314     + unsigned int div = (RREG32(DCE41_DENTIST_DISPCLK_CNTL) &
4315     + DENTIST_DPREFCLK_WDIVIDER_MASK) >>
4316     + DENTIST_DPREFCLK_WDIVIDER_SHIFT;
4317     + div = radeon_audio_decode_dfs_div(div);
4318     +
4319     + if (div)
4320     + clock = 100 * clock / div;
4321     + }
4322     +
4323     WREG32(DCCG_AUDIO_DTO1_PHASE, 24000);
4324     WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
4325     }
4326     diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
4327     index 4aa5f755572b..13b6029d65cc 100644
4328     --- a/drivers/gpu/drm/radeon/evergreend.h
4329     +++ b/drivers/gpu/drm/radeon/evergreend.h
4330     @@ -511,6 +511,11 @@
4331     #define DCCG_AUDIO_DTO1_CNTL 0x05cc
4332     # define DCCG_AUDIO_DTO1_USE_512FBR_DTO (1 << 3)
4333    
4334     +#define DCE41_DENTIST_DISPCLK_CNTL 0x049c
4335     +# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24)
4336     +# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24)
4337     +# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24
4338     +
4339     /* DCE 4.0 AFMT */
4340     #define HDMI_CONTROL 0x7030
4341     # define HDMI_KEEPOUT_MODE (1 << 0)
4342     diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
4343     index 87db64983ea8..5580568088bb 100644
4344     --- a/drivers/gpu/drm/radeon/radeon.h
4345     +++ b/drivers/gpu/drm/radeon/radeon.h
4346     @@ -268,6 +268,7 @@ struct radeon_clock {
4347     uint32_t current_dispclk;
4348     uint32_t dp_extclk;
4349     uint32_t max_pixel_clock;
4350     + uint32_t vco_freq;
4351     };
4352    
4353     /*
4354     diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
4355     index 8f285244c839..de9a2ffcf5f7 100644
4356     --- a/drivers/gpu/drm/radeon/radeon_atombios.c
4357     +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
4358     @@ -437,7 +437,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
4359     }
4360    
4361     /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
4362     - if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
4363     + if (((dev->pdev->device == 0x9802) ||
4364     + (dev->pdev->device == 0x9805) ||
4365     + (dev->pdev->device == 0x9806)) &&
4366     (dev->pdev->subsystem_vendor == 0x1734) &&
4367     (dev->pdev->subsystem_device == 0x11bd)) {
4368     if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
4369     @@ -448,14 +450,6 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
4370     }
4371     }
4372    
4373     - /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */
4374     - if ((dev->pdev->device == 0x9805) &&
4375     - (dev->pdev->subsystem_vendor == 0x1734) &&
4376     - (dev->pdev->subsystem_device == 0x11bd)) {
4377     - if (*connector_type == DRM_MODE_CONNECTOR_VGA)
4378     - return false;
4379     - }
4380     -
4381     return true;
4382     }
4383    
4384     @@ -1112,6 +1106,31 @@ union firmware_info {
4385     ATOM_FIRMWARE_INFO_V2_2 info_22;
4386     };
4387    
4388     +union igp_info {
4389     + struct _ATOM_INTEGRATED_SYSTEM_INFO info;
4390     + struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
4391     + struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
4392     + struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
4393     + struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
4394     +};
4395     +
4396     +static void radeon_atombios_get_dentist_vco_freq(struct radeon_device *rdev)
4397     +{
4398     + struct radeon_mode_info *mode_info = &rdev->mode_info;
4399     + int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
4400     + union igp_info *igp_info;
4401     + u8 frev, crev;
4402     + u16 data_offset;
4403     +
4404     + if (atom_parse_data_header(mode_info->atom_context, index, NULL,
4405     + &frev, &crev, &data_offset)) {
4406     + igp_info = (union igp_info *)(mode_info->atom_context->bios +
4407     + data_offset);
4408     + rdev->clock.vco_freq =
4409     + le32_to_cpu(igp_info->info_6.ulDentistVCOFreq);
4410     + }
4411     +}
4412     +
4413     bool radeon_atom_get_clock_info(struct drm_device *dev)
4414     {
4415     struct radeon_device *rdev = dev->dev_private;
4416     @@ -1263,20 +1282,25 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
4417     rdev->mode_info.firmware_flags =
4418     le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
4419    
4420     + if (ASIC_IS_DCE8(rdev))
4421     + rdev->clock.vco_freq =
4422     + le32_to_cpu(firmware_info->info_22.ulGPUPLL_OutputFreq);
4423     + else if (ASIC_IS_DCE5(rdev))
4424     + rdev->clock.vco_freq = rdev->clock.current_dispclk;
4425     + else if (ASIC_IS_DCE41(rdev))
4426     + radeon_atombios_get_dentist_vco_freq(rdev);
4427     + else
4428     + rdev->clock.vco_freq = rdev->clock.current_dispclk;
4429     +
4430     + if (rdev->clock.vco_freq == 0)
4431     + rdev->clock.vco_freq = 360000; /* 3.6 GHz */
4432     +
4433     return true;
4434     }
4435    
4436     return false;
4437     }
4438    
4439     -union igp_info {
4440     - struct _ATOM_INTEGRATED_SYSTEM_INFO info;
4441     - struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
4442     - struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
4443     - struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
4444     - struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
4445     -};
4446     -
4447     bool radeon_atombios_sideport_present(struct radeon_device *rdev)
4448     {
4449     struct radeon_mode_info *mode_info = &rdev->mode_info;
4450     diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
4451     index 2c02e99b5f95..b214663b370d 100644
4452     --- a/drivers/gpu/drm/radeon/radeon_audio.c
4453     +++ b/drivers/gpu/drm/radeon/radeon_audio.c
4454     @@ -739,9 +739,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
4455     struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
4456     struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
4457     struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
4458     - struct radeon_connector *radeon_connector = to_radeon_connector(connector);
4459     - struct radeon_connector_atom_dig *dig_connector =
4460     - radeon_connector->con_priv;
4461    
4462     if (!dig || !dig->afmt)
4463     return;
4464     @@ -753,10 +750,7 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
4465     radeon_audio_write_speaker_allocation(encoder);
4466     radeon_audio_write_sad_regs(encoder);
4467     radeon_audio_write_latency_fields(encoder, mode);
4468     - if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
4469     - radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
4470     - else
4471     - radeon_audio_set_dto(encoder, dig_connector->dp_clock);
4472     + radeon_audio_set_dto(encoder, rdev->clock.vco_freq * 10);
4473     radeon_audio_set_audio_packet(encoder);
4474     radeon_audio_select_pin(encoder);
4475    
4476     @@ -781,3 +775,15 @@ void radeon_audio_dpms(struct drm_encoder *encoder, int mode)
4477     if (radeon_encoder->audio && radeon_encoder->audio->dpms)
4478     radeon_encoder->audio->dpms(encoder, mode == DRM_MODE_DPMS_ON);
4479     }
4480     +
4481     +unsigned int radeon_audio_decode_dfs_div(unsigned int div)
4482     +{
4483     + if (div >= 8 && div < 64)
4484     + return (div - 8) * 25 + 200;
4485     + else if (div >= 64 && div < 96)
4486     + return (div - 64) * 50 + 1600;
4487     + else if (div >= 96 && div < 128)
4488     + return (div - 96) * 100 + 3200;
4489     + else
4490     + return 0;
4491     +}
4492     diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
4493     index 059cc3012062..5c70cceaa4a6 100644
4494     --- a/drivers/gpu/drm/radeon/radeon_audio.h
4495     +++ b/drivers/gpu/drm/radeon/radeon_audio.h
4496     @@ -79,5 +79,6 @@ void radeon_audio_fini(struct radeon_device *rdev);
4497     void radeon_audio_mode_set(struct drm_encoder *encoder,
4498     struct drm_display_mode *mode);
4499     void radeon_audio_dpms(struct drm_encoder *encoder, int mode);
4500     +unsigned int radeon_audio_decode_dfs_div(unsigned int div);
4501    
4502     #endif
4503     diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
4504     index c566993a2ec3..d690df545b4d 100644
4505     --- a/drivers/gpu/drm/radeon/radeon_device.c
4506     +++ b/drivers/gpu/drm/radeon/radeon_device.c
4507     @@ -1744,6 +1744,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
4508     }
4509    
4510     drm_kms_helper_poll_enable(dev);
4511     + drm_helper_hpd_irq_event(dev);
4512    
4513     /* set the power state here in case we are a PX system or headless */
4514     if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
4515     diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
4516     index 1eca0acac016..13767d21835f 100644
4517     --- a/drivers/gpu/drm/radeon/radeon_display.c
4518     +++ b/drivers/gpu/drm/radeon/radeon_display.c
4519     @@ -403,7 +403,8 @@ static void radeon_flip_work_func(struct work_struct *__work)
4520     struct drm_crtc *crtc = &radeon_crtc->base;
4521     unsigned long flags;
4522     int r;
4523     - int vpos, hpos, stat, min_udelay;
4524     + int vpos, hpos, stat, min_udelay = 0;
4525     + unsigned repcnt = 4;
4526     struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
4527    
4528     down_read(&rdev->exclusive_lock);
4529     @@ -454,7 +455,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
4530     * In practice this won't execute very often unless on very fast
4531     * machines because the time window for this to happen is very small.
4532     */
4533     - for (;;) {
4534     + while (radeon_crtc->enabled && repcnt--) {
4535     /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
4536     * start in hpos, and to the "fudged earlier" vblank start in
4537     * vpos.
4538     @@ -472,10 +473,22 @@ static void radeon_flip_work_func(struct work_struct *__work)
4539     /* Sleep at least until estimated real start of hw vblank */
4540     spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
4541     min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
4542     + if (min_udelay > vblank->framedur_ns / 2000) {
4543     + /* Don't wait ridiculously long - something is wrong */
4544     + repcnt = 0;
4545     + break;
4546     + }
4547     usleep_range(min_udelay, 2 * min_udelay);
4548     spin_lock_irqsave(&crtc->dev->event_lock, flags);
4549     };
4550    
4551     + if (!repcnt)
4552     + DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
4553     + "framedur %d, linedur %d, stat %d, vpos %d, "
4554     + "hpos %d\n", work->crtc_id, min_udelay,
4555     + vblank->framedur_ns / 1000,
4556     + vblank->linedur_ns / 1000, stat, vpos, hpos);
4557     +
4558     /* do the flip (mmio) */
4559     radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
4560    
4561     diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
4562     index 84d45633d28c..fb6ad143873f 100644
4563     --- a/drivers/gpu/drm/radeon/radeon_object.c
4564     +++ b/drivers/gpu/drm/radeon/radeon_object.c
4565     @@ -33,6 +33,7 @@
4566     #include <linux/slab.h>
4567     #include <drm/drmP.h>
4568     #include <drm/radeon_drm.h>
4569     +#include <drm/drm_cache.h>
4570     #include "radeon.h"
4571     #include "radeon_trace.h"
4572    
4573     @@ -245,6 +246,12 @@ int radeon_bo_create(struct radeon_device *rdev,
4574     DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
4575     "better performance thanks to write-combining\n");
4576     bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
4577     +#else
4578     + /* For architectures that don't support WC memory,
4579     + * mask out the WC flag from the BO
4580     + */
4581     + if (!drm_arch_can_wc_memory())
4582     + bo->flags &= ~RADEON_GEM_GTT_WC;
4583     #endif
4584    
4585     radeon_ttm_placement_from_domain(bo, domain);
4586     diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
4587     index 59abebd6b5dc..2081a60d08fb 100644
4588     --- a/drivers/gpu/drm/radeon/radeon_pm.c
4589     +++ b/drivers/gpu/drm/radeon/radeon_pm.c
4590     @@ -1075,8 +1075,6 @@ force:
4591    
4592     /* update display watermarks based on new power state */
4593     radeon_bandwidth_update(rdev);
4594     - /* update displays */
4595     - radeon_dpm_display_configuration_changed(rdev);
4596    
4597     rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
4598     rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
4599     @@ -1097,6 +1095,9 @@ force:
4600    
4601     radeon_dpm_post_set_power_state(rdev);
4602    
4603     + /* update displays */
4604     + radeon_dpm_display_configuration_changed(rdev);
4605     +
4606     if (rdev->asic->dpm.force_performance_level) {
4607     if (rdev->pm.dpm.thermal_active) {
4608     enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
4609     diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
4610     index c507896aca45..197b157b73d0 100644
4611     --- a/drivers/gpu/drm/radeon/radeon_sa.c
4612     +++ b/drivers/gpu/drm/radeon/radeon_sa.c
4613     @@ -349,8 +349,13 @@ int radeon_sa_bo_new(struct radeon_device *rdev,
4614     /* see if we can skip over some allocations */
4615     } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
4616    
4617     + for (i = 0; i < RADEON_NUM_RINGS; ++i)
4618     + radeon_fence_ref(fences[i]);
4619     +
4620     spin_unlock(&sa_manager->wq.lock);
4621     r = radeon_fence_wait_any(rdev, fences, false);
4622     + for (i = 0; i < RADEON_NUM_RINGS; ++i)
4623     + radeon_fence_unref(&fences[i]);
4624     spin_lock(&sa_manager->wq.lock);
4625     /* if we have nothing to wait for block */
4626     if (r == -ENOENT) {
4627     diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
4628     index e34307459e50..e06ac546a90f 100644
4629     --- a/drivers/gpu/drm/radeon/radeon_ttm.c
4630     +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
4631     @@ -758,7 +758,7 @@ static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
4632     0, PAGE_SIZE,
4633     PCI_DMA_BIDIRECTIONAL);
4634     if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
4635     - while (--i) {
4636     + while (i--) {
4637     pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
4638     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
4639     gtt->ttm.dma_address[i] = 0;
4640     diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
4641     index 48d97c040f49..3979632b9225 100644
4642     --- a/drivers/gpu/drm/radeon/radeon_vm.c
4643     +++ b/drivers/gpu/drm/radeon/radeon_vm.c
4644     @@ -455,15 +455,15 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
4645    
4646     if (soffset) {
4647     /* make sure object fit at this offset */
4648     - eoffset = soffset + size;
4649     + eoffset = soffset + size - 1;
4650     if (soffset >= eoffset) {
4651     r = -EINVAL;
4652     goto error_unreserve;
4653     }
4654    
4655     last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
4656     - if (last_pfn > rdev->vm_manager.max_pfn) {
4657     - dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
4658     + if (last_pfn >= rdev->vm_manager.max_pfn) {
4659     + dev_err(rdev->dev, "va above limit (0x%08X >= 0x%08X)\n",
4660     last_pfn, rdev->vm_manager.max_pfn);
4661     r = -EINVAL;
4662     goto error_unreserve;
4663     @@ -478,7 +478,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
4664     eoffset /= RADEON_GPU_PAGE_SIZE;
4665     if (soffset || eoffset) {
4666     struct interval_tree_node *it;
4667     - it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
4668     + it = interval_tree_iter_first(&vm->va, soffset, eoffset);
4669     if (it && it != &bo_va->it) {
4670     struct radeon_bo_va *tmp;
4671     tmp = container_of(it, struct radeon_bo_va, it);
4672     @@ -518,7 +518,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
4673     if (soffset || eoffset) {
4674     spin_lock(&vm->status_lock);
4675     bo_va->it.start = soffset;
4676     - bo_va->it.last = eoffset - 1;
4677     + bo_va->it.last = eoffset;
4678     list_add(&bo_va->vm_status, &vm->cleared);
4679     spin_unlock(&vm->status_lock);
4680     interval_tree_insert(&bo_va->it, &vm->va);
4681     @@ -888,7 +888,7 @@ static void radeon_vm_fence_pts(struct radeon_vm *vm,
4682     unsigned i;
4683    
4684     start >>= radeon_vm_block_size;
4685     - end >>= radeon_vm_block_size;
4686     + end = (end - 1) >> radeon_vm_block_size;
4687    
4688     for (i = start; i <= end; ++i)
4689     radeon_bo_fence(vm->page_tables[i].bo, fence, true);
4690     diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
4691     index 4c4a7218a3bd..d1a7b58dd291 100644
4692     --- a/drivers/gpu/drm/radeon/sid.h
4693     +++ b/drivers/gpu/drm/radeon/sid.h
4694     @@ -915,6 +915,11 @@
4695     #define DCCG_AUDIO_DTO1_PHASE 0x05c0
4696     #define DCCG_AUDIO_DTO1_MODULE 0x05c4
4697    
4698     +#define DENTIST_DISPCLK_CNTL 0x0490
4699     +# define DENTIST_DPREFCLK_WDIVIDER(x) (((x) & 0x7f) << 24)
4700     +# define DENTIST_DPREFCLK_WDIVIDER_MASK (0x7f << 24)
4701     +# define DENTIST_DPREFCLK_WDIVIDER_SHIFT 24
4702     +
4703     #define AFMT_AUDIO_SRC_CONTROL 0x713c
4704     #define AFMT_AUDIO_SRC_SELECT(x) (((x) & 7) << 0)
4705     /* AFMT_AUDIO_SRC_SELECT
4706     diff --git a/drivers/gpu/drm/radeon/vce_v1_0.c b/drivers/gpu/drm/radeon/vce_v1_0.c
4707     index 07a0d378e122..a01efe39a820 100644
4708     --- a/drivers/gpu/drm/radeon/vce_v1_0.c
4709     +++ b/drivers/gpu/drm/radeon/vce_v1_0.c
4710     @@ -178,12 +178,12 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
4711     return -EINVAL;
4712     }
4713    
4714     - for (i = 0; i < sign->num; ++i) {
4715     - if (sign->val[i].chip_id == chip_id)
4716     + for (i = 0; i < le32_to_cpu(sign->num); ++i) {
4717     + if (le32_to_cpu(sign->val[i].chip_id) == chip_id)
4718     break;
4719     }
4720    
4721     - if (i == sign->num)
4722     + if (i == le32_to_cpu(sign->num))
4723     return -EINVAL;
4724    
4725     data += (256 - 64) / 4;
4726     @@ -191,18 +191,18 @@ int vce_v1_0_load_fw(struct radeon_device *rdev, uint32_t *data)
4727     data[1] = sign->val[i].nonce[1];
4728     data[2] = sign->val[i].nonce[2];
4729     data[3] = sign->val[i].nonce[3];
4730     - data[4] = sign->len + 64;
4731     + data[4] = cpu_to_le32(le32_to_cpu(sign->len) + 64);
4732    
4733     memset(&data[5], 0, 44);
4734     memcpy(&data[16], &sign[1], rdev->vce_fw->size - sizeof(*sign));
4735    
4736     - data += data[4] / 4;
4737     + data += le32_to_cpu(data[4]) / 4;
4738     data[0] = sign->val[i].sigval[0];
4739     data[1] = sign->val[i].sigval[1];
4740     data[2] = sign->val[i].sigval[2];
4741     data[3] = sign->val[i].sigval[3];
4742    
4743     - rdev->vce.keyselect = sign->val[i].keyselect;
4744     + rdev->vce.keyselect = le32_to_cpu(sign->val[i].keyselect);
4745    
4746     return 0;
4747     }
4748     diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
4749     index 6377e8151000..67cebb23c940 100644
4750     --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
4751     +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
4752     @@ -247,7 +247,7 @@ static void __vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
4753     {
4754     struct vmw_cmdbuf_man *man = header->man;
4755    
4756     - BUG_ON(!spin_is_locked(&man->lock));
4757     + lockdep_assert_held_once(&man->lock);
4758    
4759     if (header->inline_space) {
4760     vmw_cmdbuf_header_inline_free(header);
4761     diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
4762     index c49812b80dd0..24fb348a44e1 100644
4763     --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
4764     +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
4765     @@ -25,6 +25,7 @@
4766     *
4767     **************************************************************************/
4768     #include <linux/module.h>
4769     +#include <linux/console.h>
4770    
4771     #include <drm/drmP.h>
4772     #include "vmwgfx_drv.h"
4773     @@ -1538,6 +1539,12 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4774     static int __init vmwgfx_init(void)
4775     {
4776     int ret;
4777     +
4778     +#ifdef CONFIG_VGA_CONSOLE
4779     + if (vgacon_text_force())
4780     + return -EINVAL;
4781     +#endif
4782     +
4783     ret = drm_pci_init(&driver, &vmw_pci_driver);
4784     if (ret)
4785     DRM_ERROR("Failed initializing DRM.\n");
4786     diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
4787     index 9b4bb9e74d73..7c2e118a77b0 100644
4788     --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
4789     +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
4790     @@ -763,21 +763,25 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
4791     uint32_t format;
4792     struct drm_vmw_size content_base_size;
4793     struct vmw_resource *res;
4794     + unsigned int bytes_pp;
4795     int ret;
4796    
4797     switch (mode_cmd->depth) {
4798     case 32:
4799     case 24:
4800     format = SVGA3D_X8R8G8B8;
4801     + bytes_pp = 4;
4802     break;
4803    
4804     case 16:
4805     case 15:
4806     format = SVGA3D_R5G6B5;
4807     + bytes_pp = 2;
4808     break;
4809    
4810     case 8:
4811     format = SVGA3D_P8;
4812     + bytes_pp = 1;
4813     break;
4814    
4815     default:
4816     @@ -785,7 +789,7 @@ static int vmw_create_dmabuf_proxy(struct drm_device *dev,
4817     return -EINVAL;
4818     }
4819    
4820     - content_base_size.width = mode_cmd->width;
4821     + content_base_size.width = mode_cmd->pitch / bytes_pp;
4822     content_base_size.height = mode_cmd->height;
4823     content_base_size.depth = 1;
4824    
4825     diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
4826     index c4dcab048cb8..9098f13f2f44 100644
4827     --- a/drivers/hv/channel.c
4828     +++ b/drivers/hv/channel.c
4829     @@ -630,10 +630,19 @@ int vmbus_sendpacket_ctl(struct vmbus_channel *channel, void *buffer,
4830     * on the ring. We will not signal if more data is
4831     * to be placed.
4832     *
4833     + * Based on the channel signal state, we will decide
4834     + * which signaling policy will be applied.
4835     + *
4836     * If we cannot write to the ring-buffer; signal the host
4837     * even if we may not have written anything. This is a rare
4838     * enough condition that it should not matter.
4839     */
4840     +
4841     + if (channel->signal_policy)
4842     + signal = true;
4843     + else
4844     + kick_q = true;
4845     +
4846     if (((ret == 0) && kick_q && signal) || (ret))
4847     vmbus_setevent(channel);
4848    
4849     @@ -733,10 +742,19 @@ int vmbus_sendpacket_pagebuffer_ctl(struct vmbus_channel *channel,
4850     * on the ring. We will not signal if more data is
4851     * to be placed.
4852     *
4853     + * Based on the channel signal state, we will decide
4854     + * which signaling policy will be applied.
4855     + *
4856     * If we cannot write to the ring-buffer; signal the host
4857     * even if we may not have written anything. This is a rare
4858     * enough condition that it should not matter.
4859     */
4860     +
4861     + if (channel->signal_policy)
4862     + signal = true;
4863     + else
4864     + kick_q = true;
4865     +
4866     if (((ret == 0) && kick_q && signal) || (ret))
4867     vmbus_setevent(channel);
4868    
4869     diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
4870     index f155b8380481..2b3105c8aed3 100644
4871     --- a/drivers/hwmon/ads1015.c
4872     +++ b/drivers/hwmon/ads1015.c
4873     @@ -126,7 +126,7 @@ static int ads1015_reg_to_mv(struct i2c_client *client, unsigned int channel,
4874     struct ads1015_data *data = i2c_get_clientdata(client);
4875     unsigned int pga = data->channel_data[channel].pga;
4876     int fullscale = fullscale_table[pga];
4877     - const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
4878     + const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
4879    
4880     return DIV_ROUND_CLOSEST(reg * fullscale, mask);
4881     }
4882     diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
4883     index c8487894b312..c43318d3416e 100644
4884     --- a/drivers/hwmon/dell-smm-hwmon.c
4885     +++ b/drivers/hwmon/dell-smm-hwmon.c
4886     @@ -932,6 +932,17 @@ MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
4887     static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
4888     {
4889     /*
4890     + * CPU fan speed going up and down on Dell Studio XPS 8000
4891     + * for unknown reasons.
4892     + */
4893     + .ident = "Dell Studio XPS 8000",
4894     + .matches = {
4895     + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
4896     + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8000"),
4897     + },
4898     + },
4899     + {
4900     + /*
4901     * CPU fan speed going up and down on Dell Studio XPS 8100
4902     * for unknown reasons.
4903     */
4904     diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
4905     index 82de3deeb18a..685568b1236d 100644
4906     --- a/drivers/hwmon/gpio-fan.c
4907     +++ b/drivers/hwmon/gpio-fan.c
4908     @@ -406,16 +406,11 @@ static int gpio_fan_get_cur_state(struct thermal_cooling_device *cdev,
4909     unsigned long *state)
4910     {
4911     struct gpio_fan_data *fan_data = cdev->devdata;
4912     - int r;
4913    
4914     if (!fan_data)
4915     return -EINVAL;
4916    
4917     - r = get_fan_speed_index(fan_data);
4918     - if (r < 0)
4919     - return r;
4920     -
4921     - *state = r;
4922     + *state = fan_data->speed_index;
4923     return 0;
4924     }
4925    
4926     diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
4927     index e25492137d8b..93738dfbf631 100644
4928     --- a/drivers/hwtracing/coresight/coresight.c
4929     +++ b/drivers/hwtracing/coresight/coresight.c
4930     @@ -548,7 +548,7 @@ static int coresight_name_match(struct device *dev, void *data)
4931     to_match = data;
4932     i_csdev = to_coresight_device(dev);
4933    
4934     - if (!strcmp(to_match, dev_name(&i_csdev->dev)))
4935     + if (to_match && !strcmp(to_match, dev_name(&i_csdev->dev)))
4936     return 1;
4937    
4938     return 0;
4939     diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
4940     index f62d69799a9c..27fa0cb09538 100644
4941     --- a/drivers/i2c/busses/i2c-i801.c
4942     +++ b/drivers/i2c/busses/i2c-i801.c
4943     @@ -1271,6 +1271,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
4944     switch (dev->device) {
4945     case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS:
4946     case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS:
4947     + case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
4948     + case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
4949     case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
4950     priv->features |= FEATURE_I2C_BLOCK_READ;
4951     priv->features |= FEATURE_IRQ;
4952     diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
4953     index 0a26dd6d9b19..d6d2b3582910 100644
4954     --- a/drivers/infiniband/core/cm.c
4955     +++ b/drivers/infiniband/core/cm.c
4956     @@ -782,11 +782,11 @@ static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
4957     wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
4958    
4959     /* Check if the device started its remove_one */
4960     - spin_lock_irq(&cm.lock);
4961     + spin_lock_irqsave(&cm.lock, flags);
4962     if (!cm_dev->going_down)
4963     queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
4964     msecs_to_jiffies(wait_time));
4965     - spin_unlock_irq(&cm.lock);
4966     + spin_unlock_irqrestore(&cm.lock, flags);
4967    
4968     cm_id_priv->timewait_info = NULL;
4969     }
4970     diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
4971     index 2d762a2ecd81..17a15c56028c 100644
4972     --- a/drivers/infiniband/core/cma.c
4973     +++ b/drivers/infiniband/core/cma.c
4974     @@ -453,7 +453,7 @@ static inline int cma_validate_port(struct ib_device *device, u8 port,
4975     if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
4976     return ret;
4977    
4978     - if (dev_type == ARPHRD_ETHER)
4979     + if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port))
4980     ndev = dev_get_by_index(&init_net, bound_if_index);
4981    
4982     ret = ib_find_cached_gid_by_port(device, gid, port, ndev, NULL);
4983     diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
4984     index cb78b1e9bcd9..f504ba73e5dc 100644
4985     --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
4986     +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
4987     @@ -149,7 +149,7 @@ static int iwch_l2t_send(struct t3cdev *tdev, struct sk_buff *skb, struct l2t_en
4988     error = l2t_send(tdev, skb, l2e);
4989     if (error < 0)
4990     kfree_skb(skb);
4991     - return error;
4992     + return error < 0 ? error : 0;
4993     }
4994    
4995     int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
4996     @@ -165,7 +165,7 @@ int iwch_cxgb3_ofld_send(struct t3cdev *tdev, struct sk_buff *skb)
4997     error = cxgb3_ofld_send(tdev, skb);
4998     if (error < 0)
4999     kfree_skb(skb);
5000     - return error;
5001     + return error < 0 ? error : 0;
5002     }
5003    
5004     static void release_tid(struct t3cdev *tdev, u32 hwtid, struct sk_buff *skb)
5005     diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
5006     index 7e97cb55a6bf..c4e091528390 100644
5007     --- a/drivers/infiniband/hw/mlx5/main.c
5008     +++ b/drivers/infiniband/hw/mlx5/main.c
5009     @@ -275,7 +275,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
5010     props->max_sge = min(max_rq_sg, max_sq_sg);
5011     props->max_sge_rd = props->max_sge;
5012     props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
5013     - props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_eq_sz)) - 1;
5014     + props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
5015     props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
5016     props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
5017     props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
5018     diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
5019     index 40f85bb3e0d3..3eff35c2d453 100644
5020     --- a/drivers/infiniband/hw/qib/qib_qp.c
5021     +++ b/drivers/infiniband/hw/qib/qib_qp.c
5022     @@ -100,9 +100,10 @@ static u32 credit_table[31] = {
5023     32768 /* 1E */
5024     };
5025    
5026     -static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
5027     +static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map,
5028     + gfp_t gfp)
5029     {
5030     - unsigned long page = get_zeroed_page(GFP_KERNEL);
5031     + unsigned long page = get_zeroed_page(gfp);
5032    
5033     /*
5034     * Free the page if someone raced with us installing it.
5035     @@ -121,7 +122,7 @@ static void get_map_page(struct qib_qpn_table *qpt, struct qpn_map *map)
5036     * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
5037     */
5038     static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
5039     - enum ib_qp_type type, u8 port)
5040     + enum ib_qp_type type, u8 port, gfp_t gfp)
5041     {
5042     u32 i, offset, max_scan, qpn;
5043     struct qpn_map *map;
5044     @@ -151,7 +152,7 @@ static int alloc_qpn(struct qib_devdata *dd, struct qib_qpn_table *qpt,
5045     max_scan = qpt->nmaps - !offset;
5046     for (i = 0;;) {
5047     if (unlikely(!map->page)) {
5048     - get_map_page(qpt, map);
5049     + get_map_page(qpt, map, gfp);
5050     if (unlikely(!map->page))
5051     break;
5052     }
5053     @@ -983,13 +984,21 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
5054     size_t sz;
5055     size_t sg_list_sz;
5056     struct ib_qp *ret;
5057     + gfp_t gfp;
5058     +
5059    
5060     if (init_attr->cap.max_send_sge > ib_qib_max_sges ||
5061     init_attr->cap.max_send_wr > ib_qib_max_qp_wrs ||
5062     - init_attr->create_flags) {
5063     - ret = ERR_PTR(-EINVAL);
5064     - goto bail;
5065     - }
5066     + init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO))
5067     + return ERR_PTR(-EINVAL);
5068     +
5069     + /* GFP_NOIO is applicable in RC QPs only */
5070     + if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO &&
5071     + init_attr->qp_type != IB_QPT_RC)
5072     + return ERR_PTR(-EINVAL);
5073     +
5074     + gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ?
5075     + GFP_NOIO : GFP_KERNEL;
5076    
5077     /* Check receive queue parameters if no SRQ is specified. */
5078     if (!init_attr->srq) {
5079     @@ -1021,7 +1030,8 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
5080     sz = sizeof(struct qib_sge) *
5081     init_attr->cap.max_send_sge +
5082     sizeof(struct qib_swqe);
5083     - swq = vmalloc((init_attr->cap.max_send_wr + 1) * sz);
5084     + swq = __vmalloc((init_attr->cap.max_send_wr + 1) * sz,
5085     + gfp, PAGE_KERNEL);
5086     if (swq == NULL) {
5087     ret = ERR_PTR(-ENOMEM);
5088     goto bail;
5089     @@ -1037,13 +1047,13 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
5090     } else if (init_attr->cap.max_recv_sge > 1)
5091     sg_list_sz = sizeof(*qp->r_sg_list) *
5092     (init_attr->cap.max_recv_sge - 1);
5093     - qp = kzalloc(sz + sg_list_sz, GFP_KERNEL);
5094     + qp = kzalloc(sz + sg_list_sz, gfp);
5095     if (!qp) {
5096     ret = ERR_PTR(-ENOMEM);
5097     goto bail_swq;
5098     }
5099     RCU_INIT_POINTER(qp->next, NULL);
5100     - qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), GFP_KERNEL);
5101     + qp->s_hdr = kzalloc(sizeof(*qp->s_hdr), gfp);
5102     if (!qp->s_hdr) {
5103     ret = ERR_PTR(-ENOMEM);
5104     goto bail_qp;
5105     @@ -1058,8 +1068,16 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
5106     qp->r_rq.max_sge = init_attr->cap.max_recv_sge;
5107     sz = (sizeof(struct ib_sge) * qp->r_rq.max_sge) +
5108     sizeof(struct qib_rwqe);
5109     - qp->r_rq.wq = vmalloc_user(sizeof(struct qib_rwq) +
5110     - qp->r_rq.size * sz);
5111     + if (gfp != GFP_NOIO)
5112     + qp->r_rq.wq = vmalloc_user(
5113     + sizeof(struct qib_rwq) +
5114     + qp->r_rq.size * sz);
5115     + else
5116     + qp->r_rq.wq = __vmalloc(
5117     + sizeof(struct qib_rwq) +
5118     + qp->r_rq.size * sz,
5119     + gfp, PAGE_KERNEL);
5120     +
5121     if (!qp->r_rq.wq) {
5122     ret = ERR_PTR(-ENOMEM);
5123     goto bail_qp;
5124     @@ -1090,7 +1108,7 @@ struct ib_qp *qib_create_qp(struct ib_pd *ibpd,
5125     dev = to_idev(ibpd->device);
5126     dd = dd_from_dev(dev);
5127     err = alloc_qpn(dd, &dev->qpn_table, init_attr->qp_type,
5128     - init_attr->port_num);
5129     + init_attr->port_num, gfp);
5130     if (err < 0) {
5131     ret = ERR_PTR(err);
5132     vfree(qp->r_rq.wq);
5133     diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
5134     index f8ea069a3eaf..b2fb5286dbd9 100644
5135     --- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c
5136     +++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c
5137     @@ -286,15 +286,13 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
5138     struct qib_ibdev *dev = to_idev(ibqp->device);
5139     struct qib_ibport *ibp = to_iport(ibqp->device, qp->port_num);
5140     struct qib_mcast *mcast = NULL;
5141     - struct qib_mcast_qp *p, *tmp;
5142     + struct qib_mcast_qp *p, *tmp, *delp = NULL;
5143     struct rb_node *n;
5144     int last = 0;
5145     int ret;
5146    
5147     - if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET) {
5148     - ret = -EINVAL;
5149     - goto bail;
5150     - }
5151     + if (ibqp->qp_num <= 1 || qp->state == IB_QPS_RESET)
5152     + return -EINVAL;
5153    
5154     spin_lock_irq(&ibp->lock);
5155    
5156     @@ -303,8 +301,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
5157     while (1) {
5158     if (n == NULL) {
5159     spin_unlock_irq(&ibp->lock);
5160     - ret = -EINVAL;
5161     - goto bail;
5162     + return -EINVAL;
5163     }
5164    
5165     mcast = rb_entry(n, struct qib_mcast, rb_node);
5166     @@ -328,6 +325,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
5167     */
5168     list_del_rcu(&p->list);
5169     mcast->n_attached--;
5170     + delp = p;
5171    
5172     /* If this was the last attached QP, remove the GID too. */
5173     if (list_empty(&mcast->qp_list)) {
5174     @@ -338,15 +336,16 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
5175     }
5176    
5177     spin_unlock_irq(&ibp->lock);
5178     + /* QP not attached */
5179     + if (!delp)
5180     + return -EINVAL;
5181     + /*
5182     + * Wait for any list walkers to finish before freeing the
5183     + * list element.
5184     + */
5185     + wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
5186     + qib_mcast_qp_free(delp);
5187    
5188     - if (p) {
5189     - /*
5190     - * Wait for any list walkers to finish before freeing the
5191     - * list element.
5192     - */
5193     - wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1);
5194     - qib_mcast_qp_free(p);
5195     - }
5196     if (last) {
5197     atomic_dec(&mcast->refcount);
5198     wait_event(mcast->wait, !atomic_read(&mcast->refcount));
5199     @@ -355,11 +354,7 @@ int qib_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
5200     dev->n_mcast_grps_allocated--;
5201     spin_unlock_irq(&dev->n_mcast_grps_lock);
5202     }
5203     -
5204     - ret = 0;
5205     -
5206     -bail:
5207     - return ret;
5208     + return 0;
5209     }
5210    
5211     int qib_mcast_tree_empty(struct qib_ibport *ibp)
5212     diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
5213     index b12a5d58546f..37199b9b2cfa 100644
5214     --- a/drivers/irqchip/irq-atmel-aic-common.c
5215     +++ b/drivers/irqchip/irq-atmel-aic-common.c
5216     @@ -86,7 +86,7 @@ int aic_common_set_priority(int priority, unsigned *val)
5217     priority > AT91_AIC_IRQ_MAX_PRIORITY)
5218     return -EINVAL;
5219    
5220     - *val &= AT91_AIC_PRIOR;
5221     + *val &= ~AT91_AIC_PRIOR;
5222     *val |= priority;
5223    
5224     return 0;
5225     diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
5226     index e23d1d18f9d6..a159529f9d53 100644
5227     --- a/drivers/irqchip/irq-gic-v3-its.c
5228     +++ b/drivers/irqchip/irq-gic-v3-its.c
5229     @@ -597,11 +597,6 @@ static void its_unmask_irq(struct irq_data *d)
5230     lpi_set_config(d, true);
5231     }
5232    
5233     -static void its_eoi_irq(struct irq_data *d)
5234     -{
5235     - gic_write_eoir(d->hwirq);
5236     -}
5237     -
5238     static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
5239     bool force)
5240     {
5241     @@ -638,7 +633,7 @@ static struct irq_chip its_irq_chip = {
5242     .name = "ITS",
5243     .irq_mask = its_mask_irq,
5244     .irq_unmask = its_unmask_irq,
5245     - .irq_eoi = its_eoi_irq,
5246     + .irq_eoi = irq_chip_eoi_parent,
5247     .irq_set_affinity = its_set_affinity,
5248     .irq_compose_msi_msg = its_irq_compose_msi_msg,
5249     };
5250     diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
5251     index c22e2d40cb30..efe50845939d 100644
5252     --- a/drivers/irqchip/irq-mxs.c
5253     +++ b/drivers/irqchip/irq-mxs.c
5254     @@ -241,6 +241,7 @@ static int __init asm9260_of_init(struct device_node *np,
5255     writel(0, icoll_priv.intr + i);
5256    
5257     icoll_add_domain(np, ASM9260_NUM_IRQS);
5258     + set_handle_irq(icoll_handle_irq);
5259    
5260     return 0;
5261     }
5262     diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
5263     index 8587d0f8d8c0..f6cb1b8bb981 100644
5264     --- a/drivers/irqchip/irq-omap-intc.c
5265     +++ b/drivers/irqchip/irq-omap-intc.c
5266     @@ -47,6 +47,7 @@
5267     #define INTC_ILR0 0x0100
5268    
5269     #define ACTIVEIRQ_MASK 0x7f /* omap2/3 active interrupt bits */
5270     +#define SPURIOUSIRQ_MASK (0x1ffffff << 7)
5271     #define INTCPS_NR_ILR_REGS 128
5272     #define INTCPS_NR_MIR_REGS 4
5273    
5274     @@ -330,11 +331,35 @@ static int __init omap_init_irq(u32 base, struct device_node *node)
5275     static asmlinkage void __exception_irq_entry
5276     omap_intc_handle_irq(struct pt_regs *regs)
5277     {
5278     + extern unsigned long irq_err_count;
5279     u32 irqnr;
5280    
5281     irqnr = intc_readl(INTC_SIR);
5282     +
5283     + /*
5284     + * A spurious IRQ can result if interrupt that triggered the
5285     + * sorting is no longer active during the sorting (10 INTC
5286     + * functional clock cycles after interrupt assertion). Or a
5287     + * change in interrupt mask affected the result during sorting
5288     + * time. There is no special handling required except ignoring
5289     + * the SIR register value just read and retrying.
5290     + * See section 6.2.5 of AM335x TRM Literature Number: SPRUH73K
5291     + *
5292     + * Many a times, a spurious interrupt situation has been fixed
5293     + * by adding a flush for the posted write acking the IRQ in
5294     + * the device driver. Typically, this is going be the device
5295     + * driver whose interrupt was handled just before the spurious
5296     + * IRQ occurred. Pay attention to those device drivers if you
5297     + * run into hitting the spurious IRQ condition below.
5298     + */
5299     + if (unlikely((irqnr & SPURIOUSIRQ_MASK) == SPURIOUSIRQ_MASK)) {
5300     + pr_err_once("%s: spurious irq!\n", __func__);
5301     + irq_err_count++;
5302     + omap_ack_irq(NULL);
5303     + return;
5304     + }
5305     +
5306     irqnr &= ACTIVEIRQ_MASK;
5307     - WARN_ONCE(!irqnr, "Spurious IRQ ?\n");
5308     handle_domain_irq(domain, irqnr, regs);
5309     }
5310    
5311     diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
5312     index 83392f856dfd..22b9e34ceb75 100644
5313     --- a/drivers/md/bcache/btree.c
5314     +++ b/drivers/md/bcache/btree.c
5315     @@ -1741,6 +1741,7 @@ static void bch_btree_gc(struct cache_set *c)
5316     do {
5317     ret = btree_root(gc_root, c, &op, &writes, &stats);
5318     closure_sync(&writes);
5319     + cond_resched();
5320    
5321     if (ret && ret != -EAGAIN)
5322     pr_warn("gc failed!");
5323     @@ -2162,8 +2163,10 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
5324     rw_lock(true, b, b->level);
5325    
5326     if (b->key.ptr[0] != btree_ptr ||
5327     - b->seq != seq + 1)
5328     + b->seq != seq + 1) {
5329     + op->lock = b->level;
5330     goto out;
5331     + }
5332     }
5333    
5334     SET_KEY_PTRS(check_key, 1);
5335     diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
5336     index 679a093a3bf6..8d0ead98eb6e 100644
5337     --- a/drivers/md/bcache/super.c
5338     +++ b/drivers/md/bcache/super.c
5339     @@ -685,6 +685,8 @@ static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
5340     WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
5341     sysfs_create_link(&c->kobj, &d->kobj, d->name),
5342     "Couldn't create device <-> cache set symlinks");
5343     +
5344     + clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
5345     }
5346    
5347     static void bcache_device_detach(struct bcache_device *d)
5348     @@ -847,8 +849,11 @@ void bch_cached_dev_run(struct cached_dev *dc)
5349     buf[SB_LABEL_SIZE] = '\0';
5350     env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
5351    
5352     - if (atomic_xchg(&dc->running, 1))
5353     + if (atomic_xchg(&dc->running, 1)) {
5354     + kfree(env[1]);
5355     + kfree(env[2]);
5356     return;
5357     + }
5358    
5359     if (!d->c &&
5360     BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
5361     @@ -1933,6 +1938,8 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
5362     else
5363     err = "device busy";
5364     mutex_unlock(&bch_register_lock);
5365     + if (attr == &ksysfs_register_quiet)
5366     + goto out;
5367     }
5368     goto err;
5369     }
5370     @@ -1971,8 +1978,7 @@ out:
5371     err_close:
5372     blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
5373     err:
5374     - if (attr != &ksysfs_register_quiet)
5375     - pr_info("error opening %s: %s", path, err);
5376     + pr_info("error opening %s: %s", path, err);
5377     ret = -EINVAL;
5378     goto out;
5379     }
5380     @@ -2066,8 +2072,10 @@ static int __init bcache_init(void)
5381     closure_debug_init();
5382    
5383     bcache_major = register_blkdev(0, "bcache");
5384     - if (bcache_major < 0)
5385     + if (bcache_major < 0) {
5386     + unregister_reboot_notifier(&reboot);
5387     return bcache_major;
5388     + }
5389    
5390     if (!(bcache_wq = create_workqueue("bcache")) ||
5391     !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) ||
5392     diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
5393     index b23f88d9f18c..b9346cd9cda1 100644
5394     --- a/drivers/md/bcache/writeback.c
5395     +++ b/drivers/md/bcache/writeback.c
5396     @@ -323,6 +323,10 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned inode,
5397    
5398     static bool dirty_pred(struct keybuf *buf, struct bkey *k)
5399     {
5400     + struct cached_dev *dc = container_of(buf, struct cached_dev, writeback_keys);
5401     +
5402     + BUG_ON(KEY_INODE(k) != dc->disk.id);
5403     +
5404     return KEY_DIRTY(k);
5405     }
5406    
5407     @@ -372,11 +376,24 @@ next:
5408     }
5409     }
5410    
5411     +/*
5412     + * Returns true if we scanned the entire disk
5413     + */
5414     static bool refill_dirty(struct cached_dev *dc)
5415     {
5416     struct keybuf *buf = &dc->writeback_keys;
5417     + struct bkey start = KEY(dc->disk.id, 0, 0);
5418     struct bkey end = KEY(dc->disk.id, MAX_KEY_OFFSET, 0);
5419     - bool searched_from_start = false;
5420     + struct bkey start_pos;
5421     +
5422     + /*
5423     + * make sure keybuf pos is inside the range for this disk - at bringup
5424     + * we might not be attached yet so this disk's inode nr isn't
5425     + * initialized then
5426     + */
5427     + if (bkey_cmp(&buf->last_scanned, &start) < 0 ||
5428     + bkey_cmp(&buf->last_scanned, &end) > 0)
5429     + buf->last_scanned = start;
5430    
5431     if (dc->partial_stripes_expensive) {
5432     refill_full_stripes(dc);
5433     @@ -384,14 +401,20 @@ static bool refill_dirty(struct cached_dev *dc)
5434     return false;
5435     }
5436    
5437     - if (bkey_cmp(&buf->last_scanned, &end) >= 0) {
5438     - buf->last_scanned = KEY(dc->disk.id, 0, 0);
5439     - searched_from_start = true;
5440     - }
5441     -
5442     + start_pos = buf->last_scanned;
5443     bch_refill_keybuf(dc->disk.c, buf, &end, dirty_pred);
5444    
5445     - return bkey_cmp(&buf->last_scanned, &end) >= 0 && searched_from_start;
5446     + if (bkey_cmp(&buf->last_scanned, &end) < 0)
5447     + return false;
5448     +
5449     + /*
5450     + * If we get to the end start scanning again from the beginning, and
5451     + * only scan up to where we initially started scanning from:
5452     + */
5453     + buf->last_scanned = start;
5454     + bch_refill_keybuf(dc->disk.c, buf, &start_pos, dirty_pred);
5455     +
5456     + return bkey_cmp(&buf->last_scanned, &start_pos) >= 0;
5457     }
5458    
5459     static int bch_writeback_thread(void *arg)
5460     diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
5461     index 0a9dab187b79..073a042aed24 100644
5462     --- a/drivers/md/bcache/writeback.h
5463     +++ b/drivers/md/bcache/writeback.h
5464     @@ -63,7 +63,8 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
5465    
5466     static inline void bch_writeback_queue(struct cached_dev *dc)
5467     {
5468     - wake_up_process(dc->writeback_thread);
5469     + if (!IS_ERR_OR_NULL(dc->writeback_thread))
5470     + wake_up_process(dc->writeback_thread);
5471     }
5472    
5473     static inline void bch_writeback_add(struct cached_dev *dc)
5474     diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
5475     index fae34e7a0b1e..12b5216c2cfe 100644
5476     --- a/drivers/md/dm-exception-store.h
5477     +++ b/drivers/md/dm-exception-store.h
5478     @@ -69,7 +69,7 @@ struct dm_exception_store_type {
5479     * Update the metadata with this exception.
5480     */
5481     void (*commit_exception) (struct dm_exception_store *store,
5482     - struct dm_exception *e,
5483     + struct dm_exception *e, int valid,
5484     void (*callback) (void *, int success),
5485     void *callback_context);
5486    
5487     diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
5488     index 3164b8bce294..4d3909393f2c 100644
5489     --- a/drivers/md/dm-snap-persistent.c
5490     +++ b/drivers/md/dm-snap-persistent.c
5491     @@ -695,7 +695,7 @@ static int persistent_prepare_exception(struct dm_exception_store *store,
5492     }
5493    
5494     static void persistent_commit_exception(struct dm_exception_store *store,
5495     - struct dm_exception *e,
5496     + struct dm_exception *e, int valid,
5497     void (*callback) (void *, int success),
5498     void *callback_context)
5499     {
5500     @@ -704,6 +704,9 @@ static void persistent_commit_exception(struct dm_exception_store *store,
5501     struct core_exception ce;
5502     struct commit_callback *cb;
5503    
5504     + if (!valid)
5505     + ps->valid = 0;
5506     +
5507     ce.old_chunk = e->old_chunk;
5508     ce.new_chunk = e->new_chunk;
5509     write_exception(ps, ps->current_committed++, &ce);
5510     diff --git a/drivers/md/dm-snap-transient.c b/drivers/md/dm-snap-transient.c
5511     index 9b7c8c8049d6..4d50a12cf00c 100644
5512     --- a/drivers/md/dm-snap-transient.c
5513     +++ b/drivers/md/dm-snap-transient.c
5514     @@ -52,12 +52,12 @@ static int transient_prepare_exception(struct dm_exception_store *store,
5515     }
5516    
5517     static void transient_commit_exception(struct dm_exception_store *store,
5518     - struct dm_exception *e,
5519     + struct dm_exception *e, int valid,
5520     void (*callback) (void *, int success),
5521     void *callback_context)
5522     {
5523     /* Just succeed */
5524     - callback(callback_context, 1);
5525     + callback(callback_context, valid);
5526     }
5527    
5528     static void transient_usage(struct dm_exception_store *store,
5529     diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
5530     index c06b74e91cd6..61f184ad081c 100644
5531     --- a/drivers/md/dm-snap.c
5532     +++ b/drivers/md/dm-snap.c
5533     @@ -1438,8 +1438,9 @@ static void __invalidate_snapshot(struct dm_snapshot *s, int err)
5534     dm_table_event(s->ti->table);
5535     }
5536    
5537     -static void pending_complete(struct dm_snap_pending_exception *pe, int success)
5538     +static void pending_complete(void *context, int success)
5539     {
5540     + struct dm_snap_pending_exception *pe = context;
5541     struct dm_exception *e;
5542     struct dm_snapshot *s = pe->snap;
5543     struct bio *origin_bios = NULL;
5544     @@ -1509,24 +1510,13 @@ out:
5545     free_pending_exception(pe);
5546     }
5547    
5548     -static void commit_callback(void *context, int success)
5549     -{
5550     - struct dm_snap_pending_exception *pe = context;
5551     -
5552     - pending_complete(pe, success);
5553     -}
5554     -
5555     static void complete_exception(struct dm_snap_pending_exception *pe)
5556     {
5557     struct dm_snapshot *s = pe->snap;
5558    
5559     - if (unlikely(pe->copy_error))
5560     - pending_complete(pe, 0);
5561     -
5562     - else
5563     - /* Update the metadata if we are persistent */
5564     - s->store->type->commit_exception(s->store, &pe->e,
5565     - commit_callback, pe);
5566     + /* Update the metadata if we are persistent */
5567     + s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error,
5568     + pending_complete, pe);
5569     }
5570    
5571     /*
5572     diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
5573     index 63903a5a5d9e..a1cc797fe88f 100644
5574     --- a/drivers/md/dm-thin.c
5575     +++ b/drivers/md/dm-thin.c
5576     @@ -3453,8 +3453,8 @@ static void pool_postsuspend(struct dm_target *ti)
5577     struct pool_c *pt = ti->private;
5578     struct pool *pool = pt->pool;
5579    
5580     - cancel_delayed_work(&pool->waker);
5581     - cancel_delayed_work(&pool->no_space_timeout);
5582     + cancel_delayed_work_sync(&pool->waker);
5583     + cancel_delayed_work_sync(&pool->no_space_timeout);
5584     flush_workqueue(pool->wq);
5585     (void) commit(pool);
5586     }
5587     diff --git a/drivers/md/dm.c b/drivers/md/dm.c
5588     index 5df40480228b..dd834927bc66 100644
5589     --- a/drivers/md/dm.c
5590     +++ b/drivers/md/dm.c
5591     @@ -1191,6 +1191,8 @@ static void dm_unprep_request(struct request *rq)
5592    
5593     if (clone)
5594     free_rq_clone(clone);
5595     + else if (!tio->md->queue->mq_ops)
5596     + free_rq_tio(tio);
5597     }
5598    
5599     /*
5600     diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
5601     index fca6dbcf9a47..7e44005595c1 100644
5602     --- a/drivers/md/persistent-data/dm-space-map-metadata.c
5603     +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
5604     @@ -152,12 +152,9 @@ static int brb_peek(struct bop_ring_buffer *brb, struct block_op *result)
5605    
5606     static int brb_pop(struct bop_ring_buffer *brb)
5607     {
5608     - struct block_op *bop;
5609     -
5610     if (brb_empty(brb))
5611     return -ENODATA;
5612    
5613     - bop = brb->bops + brb->begin;
5614     brb->begin = brb_next(brb, brb->begin);
5615    
5616     return 0;
5617     diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
5618     index c38ef1a72b4a..e2a3833170e3 100644
5619     --- a/drivers/media/dvb-core/dvb_frontend.c
5620     +++ b/drivers/media/dvb-core/dvb_frontend.c
5621     @@ -2313,9 +2313,9 @@ static int dvb_frontend_ioctl_legacy(struct file *file,
5622     dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n",
5623     __func__, c->delivery_system, fe->ops.info.type);
5624    
5625     - /* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't
5626     - * do it, it is done for it. */
5627     - info->caps |= FE_CAN_INVERSION_AUTO;
5628     + /* Set CAN_INVERSION_AUTO bit on in other than oneshot mode */
5629     + if (!(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT))
5630     + info->caps |= FE_CAN_INVERSION_AUTO;
5631     err = 0;
5632     break;
5633     }
5634     diff --git a/drivers/media/dvb-frontends/tda1004x.c b/drivers/media/dvb-frontends/tda1004x.c
5635     index 0e209b56c76c..c6abeb4fba9d 100644
5636     --- a/drivers/media/dvb-frontends/tda1004x.c
5637     +++ b/drivers/media/dvb-frontends/tda1004x.c
5638     @@ -903,9 +903,18 @@ static int tda1004x_get_fe(struct dvb_frontend *fe)
5639     {
5640     struct dtv_frontend_properties *fe_params = &fe->dtv_property_cache;
5641     struct tda1004x_state* state = fe->demodulator_priv;
5642     + int status;
5643    
5644     dprintk("%s\n", __func__);
5645    
5646     + status = tda1004x_read_byte(state, TDA1004X_STATUS_CD);
5647     + if (status == -1)
5648     + return -EIO;
5649     +
5650     + /* Only update the properties cache if device is locked */
5651     + if (!(status & 8))
5652     + return 0;
5653     +
5654     // inversion status
5655     fe_params->inversion = INVERSION_OFF;
5656     if (tda1004x_read_byte(state, TDA1004X_CONFC1) & 0x20)
5657     diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
5658     index 7830aef3db45..40f77685cc4a 100644
5659     --- a/drivers/media/rc/sunxi-cir.c
5660     +++ b/drivers/media/rc/sunxi-cir.c
5661     @@ -153,6 +153,8 @@ static int sunxi_ir_probe(struct platform_device *pdev)
5662     if (!ir)
5663     return -ENOMEM;
5664    
5665     + spin_lock_init(&ir->ir_lock);
5666     +
5667     if (of_device_is_compatible(dn, "allwinner,sun5i-a13-ir"))
5668     ir->fifo_size = 64;
5669     else
5670     diff --git a/drivers/media/tuners/si2157.c b/drivers/media/tuners/si2157.c
5671     index ce157edd45fa..0e1ca2b00e61 100644
5672     --- a/drivers/media/tuners/si2157.c
5673     +++ b/drivers/media/tuners/si2157.c
5674     @@ -168,6 +168,7 @@ static int si2157_init(struct dvb_frontend *fe)
5675     len = fw->data[fw->size - remaining];
5676     if (len > SI2157_ARGLEN) {
5677     dev_err(&client->dev, "Bad firmware length\n");
5678     + ret = -EINVAL;
5679     goto err_release_firmware;
5680     }
5681     memcpy(cmd.args, &fw->data[(fw->size - remaining) + 1], len);
5682     diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
5683     index 146071b8e116..bfff1d1c70ab 100644
5684     --- a/drivers/media/usb/gspca/ov534.c
5685     +++ b/drivers/media/usb/gspca/ov534.c
5686     @@ -1491,8 +1491,13 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
5687     struct v4l2_fract *tpf = &cp->timeperframe;
5688     struct sd *sd = (struct sd *) gspca_dev;
5689    
5690     - /* Set requested framerate */
5691     - sd->frame_rate = tpf->denominator / tpf->numerator;
5692     + if (tpf->numerator == 0 || tpf->denominator == 0)
5693     + /* Set default framerate */
5694     + sd->frame_rate = 30;
5695     + else
5696     + /* Set requested framerate */
5697     + sd->frame_rate = tpf->denominator / tpf->numerator;
5698     +
5699     if (gspca_dev->streaming)
5700     set_frame_rate(gspca_dev);
5701    
5702     diff --git a/drivers/media/usb/gspca/topro.c b/drivers/media/usb/gspca/topro.c
5703     index c70ff406b07a..c028a5c2438e 100644
5704     --- a/drivers/media/usb/gspca/topro.c
5705     +++ b/drivers/media/usb/gspca/topro.c
5706     @@ -4802,7 +4802,11 @@ static void sd_set_streamparm(struct gspca_dev *gspca_dev,
5707     struct v4l2_fract *tpf = &cp->timeperframe;
5708     int fr, i;
5709    
5710     - sd->framerate = tpf->denominator / tpf->numerator;
5711     + if (tpf->numerator == 0 || tpf->denominator == 0)
5712     + sd->framerate = 30;
5713     + else
5714     + sd->framerate = tpf->denominator / tpf->numerator;
5715     +
5716     if (gspca_dev->streaming)
5717     setframerate(gspca_dev, v4l2_ctrl_g_ctrl(gspca_dev->exposure));
5718    
5719     diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
5720     index 27b4b9e7c0c2..502984c724ff 100644
5721     --- a/drivers/media/v4l2-core/videobuf2-v4l2.c
5722     +++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
5723     @@ -822,10 +822,10 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
5724     return res | POLLERR;
5725    
5726     /*
5727     - * For output streams you can write as long as there are fewer buffers
5728     - * queued than there are buffers available.
5729     + * For output streams you can call write() as long as there are fewer
5730     + * buffers queued than there are buffers available.
5731     */
5732     - if (q->is_output && q->queued_count < q->num_buffers)
5733     + if (q->is_output && q->fileio && q->queued_count < q->num_buffers)
5734     return res | POLLOUT | POLLWRNORM;
5735    
5736     if (list_empty(&q->done_list)) {
5737     diff --git a/drivers/misc/cxl/vphb.c b/drivers/misc/cxl/vphb.c
5738     index c241e15cacb1..cbd4331fb45c 100644
5739     --- a/drivers/misc/cxl/vphb.c
5740     +++ b/drivers/misc/cxl/vphb.c
5741     @@ -203,7 +203,7 @@ static int cxl_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
5742     mask <<= shift;
5743     val <<= shift;
5744    
5745     - v = (in_le32(ioaddr) & ~mask) || (val & mask);
5746     + v = (in_le32(ioaddr) & ~mask) | (val & mask);
5747    
5748     out_le32(ioaddr, v);
5749     return PCIBIOS_SUCCESSFUL;
5750     diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
5751     index b2f2486b3d75..80f9afcb1382 100644
5752     --- a/drivers/misc/mei/main.c
5753     +++ b/drivers/misc/mei/main.c
5754     @@ -458,7 +458,11 @@ static int mei_ioctl_client_notify_request(struct file *file, u32 request)
5755     {
5756     struct mei_cl *cl = file->private_data;
5757    
5758     - return mei_cl_notify_request(cl, file, request);
5759     + if (request != MEI_HBM_NOTIFICATION_START &&
5760     + request != MEI_HBM_NOTIFICATION_STOP)
5761     + return -EINVAL;
5762     +
5763     + return mei_cl_notify_request(cl, file, (u8)request);
5764     }
5765    
5766     /**
5767     @@ -657,7 +661,9 @@ out:
5768     * @file: pointer to file structure
5769     * @band: band bitmap
5770     *
5771     - * Return: poll mask
5772     + * Return: negative on error,
5773     + * 0 if it did no changes,
5774     + * and positive a process was added or deleted
5775     */
5776     static int mei_fasync(int fd, struct file *file, int band)
5777     {
5778     @@ -665,7 +671,7 @@ static int mei_fasync(int fd, struct file *file, int band)
5779     struct mei_cl *cl = file->private_data;
5780    
5781     if (!mei_cl_is_connected(cl))
5782     - return POLLERR;
5783     + return -ENODEV;
5784    
5785     return fasync_helper(fd, file, band, &cl->ev_async);
5786     }
5787     diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
5788     index 3a9a79ec4343..3d5087b03999 100644
5789     --- a/drivers/mmc/core/mmc.c
5790     +++ b/drivers/mmc/core/mmc.c
5791     @@ -1076,8 +1076,7 @@ static int mmc_select_hs400(struct mmc_card *card)
5792     mmc_set_clock(host, max_dtr);
5793    
5794     /* Switch card to HS mode */
5795     - val = EXT_CSD_TIMING_HS |
5796     - card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
5797     + val = EXT_CSD_TIMING_HS;
5798     err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
5799     EXT_CSD_HS_TIMING, val,
5800     card->ext_csd.generic_cmd6_time,
5801     @@ -1160,8 +1159,7 @@ int mmc_hs400_to_hs200(struct mmc_card *card)
5802     mmc_set_clock(host, max_dtr);
5803    
5804     /* Switch HS400 to HS DDR */
5805     - val = EXT_CSD_TIMING_HS |
5806     - card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
5807     + val = EXT_CSD_TIMING_HS;
5808     err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_HS_TIMING,
5809     val, card->ext_csd.generic_cmd6_time,
5810     true, send_status, true);
5811     diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
5812     index 141eaa923e18..967535d76e34 100644
5813     --- a/drivers/mmc/core/sd.c
5814     +++ b/drivers/mmc/core/sd.c
5815     @@ -626,9 +626,9 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
5816     * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
5817     */
5818     if (!mmc_host_is_spi(card->host) &&
5819     - (card->sd_bus_speed == UHS_SDR50_BUS_SPEED ||
5820     - card->sd_bus_speed == UHS_DDR50_BUS_SPEED ||
5821     - card->sd_bus_speed == UHS_SDR104_BUS_SPEED)) {
5822     + (card->host->ios.timing == MMC_TIMING_UHS_SDR50 ||
5823     + card->host->ios.timing == MMC_TIMING_UHS_DDR50 ||
5824     + card->host->ios.timing == MMC_TIMING_UHS_SDR104)) {
5825     err = mmc_execute_tuning(card);
5826    
5827     /*
5828     @@ -638,7 +638,7 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
5829     * difference between v3.00 and 3.01 spec means that CMD19
5830     * tuning is also available for DDR50 mode.
5831     */
5832     - if (err && card->sd_bus_speed == UHS_DDR50_BUS_SPEED) {
5833     + if (err && card->host->ios.timing == MMC_TIMING_UHS_DDR50) {
5834     pr_warn("%s: ddr50 tuning failed\n",
5835     mmc_hostname(card->host));
5836     err = 0;
5837     diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
5838     index 16d838e6d623..467b3cf80c44 100644
5839     --- a/drivers/mmc/core/sdio.c
5840     +++ b/drivers/mmc/core/sdio.c
5841     @@ -535,8 +535,8 @@ static int mmc_sdio_init_uhs_card(struct mmc_card *card)
5842     * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104.
5843     */
5844     if (!mmc_host_is_spi(card->host) &&
5845     - ((card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR50) ||
5846     - (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)))
5847     + ((card->host->ios.timing == MMC_TIMING_UHS_SDR50) ||
5848     + (card->host->ios.timing == MMC_TIMING_UHS_SDR104)))
5849     err = mmc_execute_tuning(card);
5850     out:
5851     return err;
5852     @@ -630,7 +630,7 @@ try_again:
5853     */
5854     if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) {
5855     err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
5856     - ocr);
5857     + ocr_card);
5858     if (err == -EAGAIN) {
5859     sdio_reset(host);
5860     mmc_go_idle(host);
5861     diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
5862     index fb266745f824..acece3299756 100644
5863     --- a/drivers/mmc/host/mmci.c
5864     +++ b/drivers/mmc/host/mmci.c
5865     @@ -1886,7 +1886,7 @@ static struct amba_id mmci_ids[] = {
5866     {
5867     .id = 0x00280180,
5868     .mask = 0x00ffffff,
5869     - .data = &variant_u300,
5870     + .data = &variant_nomadik,
5871     },
5872     {
5873     .id = 0x00480180,
5874     diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
5875     index ce08896b9d69..28a057fae0a1 100644
5876     --- a/drivers/mmc/host/pxamci.c
5877     +++ b/drivers/mmc/host/pxamci.c
5878     @@ -804,7 +804,7 @@ static int pxamci_probe(struct platform_device *pdev)
5879     dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
5880     goto out;
5881     } else {
5882     - mmc->caps |= host->pdata->gpio_card_ro_invert ?
5883     + mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
5884     0 : MMC_CAP2_RO_ACTIVE_HIGH;
5885     }
5886    
5887     diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
5888     index f6047fc94062..a5cda926d38e 100644
5889     --- a/drivers/mmc/host/sdhci-acpi.c
5890     +++ b/drivers/mmc/host/sdhci-acpi.c
5891     @@ -146,6 +146,33 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
5892     .ops = &sdhci_acpi_ops_int,
5893     };
5894    
5895     +static int bxt_get_cd(struct mmc_host *mmc)
5896     +{
5897     + int gpio_cd = mmc_gpio_get_cd(mmc);
5898     + struct sdhci_host *host = mmc_priv(mmc);
5899     + unsigned long flags;
5900     + int ret = 0;
5901     +
5902     + if (!gpio_cd)
5903     + return 0;
5904     +
5905     + pm_runtime_get_sync(mmc->parent);
5906     +
5907     + spin_lock_irqsave(&host->lock, flags);
5908     +
5909     + if (host->flags & SDHCI_DEVICE_DEAD)
5910     + goto out;
5911     +
5912     + ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
5913     +out:
5914     + spin_unlock_irqrestore(&host->lock, flags);
5915     +
5916     + pm_runtime_mark_last_busy(mmc->parent);
5917     + pm_runtime_put_autosuspend(mmc->parent);
5918     +
5919     + return ret;
5920     +}
5921     +
5922     static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev,
5923     const char *hid, const char *uid)
5924     {
5925     @@ -196,6 +223,9 @@ static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev,
5926    
5927     /* Platform specific code during sd probe slot goes here */
5928    
5929     + if (hid && !strcmp(hid, "80865ACA"))
5930     + host->mmc_host_ops.get_cd = bxt_get_cd;
5931     +
5932     return 0;
5933     }
5934    
5935     diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
5936     index cf7ad458b4f4..45ee07d3a761 100644
5937     --- a/drivers/mmc/host/sdhci-pci-core.c
5938     +++ b/drivers/mmc/host/sdhci-pci-core.c
5939     @@ -277,7 +277,7 @@ static int spt_select_drive_strength(struct sdhci_host *host,
5940     if (sdhci_pci_spt_drive_strength > 0)
5941     drive_strength = sdhci_pci_spt_drive_strength & 0xf;
5942     else
5943     - drive_strength = 1; /* 33-ohm */
5944     + drive_strength = 0; /* Default 50-ohm */
5945    
5946     if ((mmc_driver_type_mask(drive_strength) & card_drv) == 0)
5947     drive_strength = 0; /* Default 50-ohm */
5948     @@ -330,6 +330,33 @@ static void spt_read_drive_strength(struct sdhci_host *host)
5949     sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf);
5950     }
5951    
5952     +static int bxt_get_cd(struct mmc_host *mmc)
5953     +{
5954     + int gpio_cd = mmc_gpio_get_cd(mmc);
5955     + struct sdhci_host *host = mmc_priv(mmc);
5956     + unsigned long flags;
5957     + int ret = 0;
5958     +
5959     + if (!gpio_cd)
5960     + return 0;
5961     +
5962     + pm_runtime_get_sync(mmc->parent);
5963     +
5964     + spin_lock_irqsave(&host->lock, flags);
5965     +
5966     + if (host->flags & SDHCI_DEVICE_DEAD)
5967     + goto out;
5968     +
5969     + ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
5970     +out:
5971     + spin_unlock_irqrestore(&host->lock, flags);
5972     +
5973     + pm_runtime_mark_last_busy(mmc->parent);
5974     + pm_runtime_put_autosuspend(mmc->parent);
5975     +
5976     + return ret;
5977     +}
5978     +
5979     static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
5980     {
5981     slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
5982     @@ -362,6 +389,10 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
5983     slot->cd_con_id = NULL;
5984     slot->cd_idx = 0;
5985     slot->cd_override_level = true;
5986     + if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
5987     + slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
5988     + slot->host->mmc_host_ops.get_cd = bxt_get_cd;
5989     +
5990     return 0;
5991     }
5992    
5993     diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
5994     index b48565ed5616..8814eb6b83bf 100644
5995     --- a/drivers/mmc/host/sdhci.c
5996     +++ b/drivers/mmc/host/sdhci.c
5997     @@ -540,9 +540,12 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
5998    
5999     BUG_ON(len > 65536);
6000    
6001     - /* tran, valid */
6002     - sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID);
6003     - desc += host->desc_sz;
6004     + if (len) {
6005     + /* tran, valid */
6006     + sdhci_adma_write_desc(host, desc, addr, len,
6007     + ADMA2_TRAN_VALID);
6008     + desc += host->desc_sz;
6009     + }
6010    
6011     /*
6012     * If this triggers then we have a calculation bug
6013     @@ -1364,7 +1367,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
6014     sdhci_runtime_pm_get(host);
6015    
6016     /* Firstly check card presence */
6017     - present = sdhci_do_get_cd(host);
6018     + present = mmc->ops->get_cd(mmc);
6019    
6020     spin_lock_irqsave(&host->lock, flags);
6021    
6022     @@ -2760,7 +2763,7 @@ static int sdhci_runtime_pm_put(struct sdhci_host *host)
6023    
6024     static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
6025     {
6026     - if (host->runtime_suspended || host->bus_on)
6027     + if (host->bus_on)
6028     return;
6029     host->bus_on = true;
6030     pm_runtime_get_noresume(host->mmc->parent);
6031     @@ -2768,7 +2771,7 @@ static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
6032    
6033     static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
6034     {
6035     - if (host->runtime_suspended || !host->bus_on)
6036     + if (!host->bus_on)
6037     return;
6038     host->bus_on = false;
6039     pm_runtime_put_noidle(host->mmc->parent);
6040     @@ -2861,6 +2864,8 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
6041    
6042     host = mmc_priv(mmc);
6043     host->mmc = mmc;
6044     + host->mmc_host_ops = sdhci_ops;
6045     + mmc->ops = &host->mmc_host_ops;
6046    
6047     return host;
6048     }
6049     @@ -3057,7 +3062,6 @@ int sdhci_add_host(struct sdhci_host *host)
6050     /*
6051     * Set host parameters.
6052     */
6053     - mmc->ops = &sdhci_ops;
6054     max_clk = host->max_clk;
6055    
6056     if (host->ops->get_min_clock)
6057     diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
6058     index 9d4aa31b683a..9c331ac5ad6b 100644
6059     --- a/drivers/mmc/host/sdhci.h
6060     +++ b/drivers/mmc/host/sdhci.h
6061     @@ -425,6 +425,7 @@ struct sdhci_host {
6062    
6063     /* Internal data */
6064     struct mmc_host *mmc; /* MMC structure */
6065     + struct mmc_host_ops mmc_host_ops; /* MMC host ops */
6066     u64 dma_mask; /* custom DMA mask */
6067    
6068     #if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
6069     diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c
6070     index 4498e92116b8..b47122d3e8d8 100644
6071     --- a/drivers/mmc/host/usdhi6rol0.c
6072     +++ b/drivers/mmc/host/usdhi6rol0.c
6073     @@ -1634,7 +1634,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
6074     struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work);
6075     struct mmc_request *mrq = host->mrq;
6076     struct mmc_data *data = mrq ? mrq->data : NULL;
6077     - struct scatterlist *sg = host->sg ?: data->sg;
6078     + struct scatterlist *sg;
6079    
6080     dev_warn(mmc_dev(host->mmc),
6081     "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n",
6082     @@ -1666,6 +1666,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
6083     case USDHI6_WAIT_FOR_MWRITE:
6084     case USDHI6_WAIT_FOR_READ:
6085     case USDHI6_WAIT_FOR_WRITE:
6086     + sg = host->sg ?: data->sg;
6087     dev_dbg(mmc_dev(host->mmc),
6088     "%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n",
6089     data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx,
6090     diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
6091     index f1692e418fe4..28bbca0af238 100644
6092     --- a/drivers/net/bonding/bond_main.c
6093     +++ b/drivers/net/bonding/bond_main.c
6094     @@ -214,6 +214,8 @@ static void bond_uninit(struct net_device *bond_dev);
6095     static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
6096     struct rtnl_link_stats64 *stats);
6097     static void bond_slave_arr_handler(struct work_struct *work);
6098     +static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
6099     + int mod);
6100    
6101     /*---------------------------- General routines -----------------------------*/
6102    
6103     @@ -2418,7 +2420,7 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
6104     struct slave *slave)
6105     {
6106     struct arphdr *arp = (struct arphdr *)skb->data;
6107     - struct slave *curr_active_slave;
6108     + struct slave *curr_active_slave, *curr_arp_slave;
6109     unsigned char *arp_ptr;
6110     __be32 sip, tip;
6111     int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
6112     @@ -2465,26 +2467,41 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
6113     &sip, &tip);
6114    
6115     curr_active_slave = rcu_dereference(bond->curr_active_slave);
6116     + curr_arp_slave = rcu_dereference(bond->current_arp_slave);
6117    
6118     - /* Backup slaves won't see the ARP reply, but do come through
6119     - * here for each ARP probe (so we swap the sip/tip to validate
6120     - * the probe). In a "redundant switch, common router" type of
6121     - * configuration, the ARP probe will (hopefully) travel from
6122     - * the active, through one switch, the router, then the other
6123     - * switch before reaching the backup.
6124     + /* We 'trust' the received ARP enough to validate it if:
6125     + *
6126     + * (a) the slave receiving the ARP is active (which includes the
6127     + * current ARP slave, if any), or
6128     + *
6129     + * (b) the receiving slave isn't active, but there is a currently
6130     + * active slave and it received valid arp reply(s) after it became
6131     + * the currently active slave, or
6132     + *
6133     + * (c) there is an ARP slave that sent an ARP during the prior ARP
6134     + * interval, and we receive an ARP reply on any slave. We accept
6135     + * these because switch FDB update delays may deliver the ARP
6136     + * reply to a slave other than the sender of the ARP request.
6137     *
6138     - * We 'trust' the arp requests if there is an active slave and
6139     - * it received valid arp reply(s) after it became active. This
6140     - * is done to avoid endless looping when we can't reach the
6141     + * Note: for (b), backup slaves are receiving the broadcast ARP
6142     + * request, not a reply. This request passes from the sending
6143     + * slave through the L2 switch(es) to the receiving slave. Since
6144     + * this is checking the request, sip/tip are swapped for
6145     + * validation.
6146     + *
6147     + * This is done to avoid endless looping when we can't reach the
6148     * arp_ip_target and fool ourselves with our own arp requests.
6149     */
6150     -
6151     if (bond_is_active_slave(slave))
6152     bond_validate_arp(bond, slave, sip, tip);
6153     else if (curr_active_slave &&
6154     time_after(slave_last_rx(bond, curr_active_slave),
6155     curr_active_slave->last_link_up))
6156     bond_validate_arp(bond, slave, tip, sip);
6157     + else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
6158     + bond_time_in_interval(bond,
6159     + dev_trans_start(curr_arp_slave->dev), 1))
6160     + bond_validate_arp(bond, slave, sip, tip);
6161    
6162     out_unlock:
6163     if (arp != (struct arphdr *)skb->data)
6164     diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
6165     index fc5b75675cd8..eb7192fab593 100644
6166     --- a/drivers/net/can/usb/ems_usb.c
6167     +++ b/drivers/net/can/usb/ems_usb.c
6168     @@ -117,6 +117,9 @@ MODULE_LICENSE("GPL v2");
6169     */
6170     #define EMS_USB_ARM7_CLOCK 8000000
6171    
6172     +#define CPC_TX_QUEUE_TRIGGER_LOW 25
6173     +#define CPC_TX_QUEUE_TRIGGER_HIGH 35
6174     +
6175     /*
6176     * CAN-Message representation in a CPC_MSG. Message object type is
6177     * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
6178     @@ -278,6 +281,11 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
6179     switch (urb->status) {
6180     case 0:
6181     dev->free_slots = dev->intr_in_buffer[1];
6182     + if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
6183     + if (netif_queue_stopped(netdev)){
6184     + netif_wake_queue(netdev);
6185     + }
6186     + }
6187     break;
6188    
6189     case -ECONNRESET: /* unlink */
6190     @@ -526,8 +534,6 @@ static void ems_usb_write_bulk_callback(struct urb *urb)
6191     /* Release context */
6192     context->echo_index = MAX_TX_URBS;
6193    
6194     - if (netif_queue_stopped(netdev))
6195     - netif_wake_queue(netdev);
6196     }
6197    
6198     /*
6199     @@ -587,7 +593,7 @@ static int ems_usb_start(struct ems_usb *dev)
6200     int err, i;
6201    
6202     dev->intr_in_buffer[0] = 0;
6203     - dev->free_slots = 15; /* initial size */
6204     + dev->free_slots = 50; /* initial size */
6205    
6206     for (i = 0; i < MAX_RX_URBS; i++) {
6207     struct urb *urb = NULL;
6208     @@ -835,7 +841,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
6209    
6210     /* Slow down tx path */
6211     if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
6212     - dev->free_slots < 5) {
6213     + dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
6214     netif_stop_queue(netdev);
6215     }
6216     }
6217     diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
6218     index b06dba05594a..2dea39b5cb0b 100644
6219     --- a/drivers/net/dsa/mv88e6xxx.c
6220     +++ b/drivers/net/dsa/mv88e6xxx.c
6221     @@ -1519,7 +1519,7 @@ int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port,
6222    
6223     /* no PVID with ranges, otherwise it's a bug */
6224     if (pvid)
6225     - err = _mv88e6xxx_port_pvid_set(ds, port, vid);
6226     + err = _mv88e6xxx_port_pvid_set(ds, port, vlan->vid_end);
6227     unlock:
6228     mutex_unlock(&ps->smi_mutex);
6229    
6230     diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
6231     index 79789d8e52da..ca5ac5d6f4e6 100644
6232     --- a/drivers/net/ethernet/broadcom/tg3.c
6233     +++ b/drivers/net/ethernet/broadcom/tg3.c
6234     @@ -7833,6 +7833,14 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6235     return ret;
6236     }
6237    
6238     +static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
6239     +{
6240     + /* Check if we will never have enough descriptors,
6241     + * as gso_segs can be more than current ring size
6242     + */
6243     + return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
6244     +}
6245     +
6246     static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6247    
6248     /* Use GSO to workaround all TSO packets that meet HW bug conditions
6249     @@ -7936,14 +7944,19 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6250     * vlan encapsulated.
6251     */
6252     if (skb->protocol == htons(ETH_P_8021Q) ||
6253     - skb->protocol == htons(ETH_P_8021AD))
6254     - return tg3_tso_bug(tp, tnapi, txq, skb);
6255     + skb->protocol == htons(ETH_P_8021AD)) {
6256     + if (tg3_tso_bug_gso_check(tnapi, skb))
6257     + return tg3_tso_bug(tp, tnapi, txq, skb);
6258     + goto drop;
6259     + }
6260    
6261     if (!skb_is_gso_v6(skb)) {
6262     if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6263     - tg3_flag(tp, TSO_BUG))
6264     - return tg3_tso_bug(tp, tnapi, txq, skb);
6265     -
6266     + tg3_flag(tp, TSO_BUG)) {
6267     + if (tg3_tso_bug_gso_check(tnapi, skb))
6268     + return tg3_tso_bug(tp, tnapi, txq, skb);
6269     + goto drop;
6270     + }
6271     ip_csum = iph->check;
6272     ip_tot_len = iph->tot_len;
6273     iph->check = 0;
6274     @@ -8075,7 +8088,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6275     if (would_hit_hwbug) {
6276     tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6277    
6278     - if (mss) {
6279     + if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
6280     /* If it's a TSO packet, do GSO instead of
6281     * allocating and copying to a large linear SKB
6282     */
6283     diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
6284     index 1671fa3332c2..7ba6d530b0c0 100644
6285     --- a/drivers/net/ethernet/cisco/enic/enic.h
6286     +++ b/drivers/net/ethernet/cisco/enic/enic.h
6287     @@ -33,7 +33,7 @@
6288    
6289     #define DRV_NAME "enic"
6290     #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
6291     -#define DRV_VERSION "2.3.0.12"
6292     +#define DRV_VERSION "2.3.0.20"
6293     #define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
6294    
6295     #define ENIC_BARS_MAX 6
6296     diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
6297     index 1ffd1050860b..1fdf5fe12a95 100644
6298     --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
6299     +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
6300     @@ -298,7 +298,8 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
6301     int wait)
6302     {
6303     struct devcmd2_controller *dc2c = vdev->devcmd2;
6304     - struct devcmd2_result *result = dc2c->result + dc2c->next_result;
6305     + struct devcmd2_result *result;
6306     + u8 color;
6307     unsigned int i;
6308     int delay, err;
6309     u32 fetch_index, new_posted;
6310     @@ -336,13 +337,17 @@ static int _vnic_dev_cmd2(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
6311     if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
6312     return 0;
6313    
6314     + result = dc2c->result + dc2c->next_result;
6315     + color = dc2c->color;
6316     +
6317     + dc2c->next_result++;
6318     + if (dc2c->next_result == dc2c->result_size) {
6319     + dc2c->next_result = 0;
6320     + dc2c->color = dc2c->color ? 0 : 1;
6321     + }
6322     +
6323     for (delay = 0; delay < wait; delay++) {
6324     - if (result->color == dc2c->color) {
6325     - dc2c->next_result++;
6326     - if (dc2c->next_result == dc2c->result_size) {
6327     - dc2c->next_result = 0;
6328     - dc2c->color = dc2c->color ? 0 : 1;
6329     - }
6330     + if (result->color == color) {
6331     if (result->error) {
6332     err = result->error;
6333     if (err != ERR_ECMDUNKNOWN ||
6334     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
6335     index 038f9ce391e6..1494997c4f7e 100644
6336     --- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
6337     +++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
6338     @@ -236,6 +236,24 @@ static const struct ptp_clock_info mlx4_en_ptp_clock_info = {
6339     .enable = mlx4_en_phc_enable,
6340     };
6341    
6342     +#define MLX4_EN_WRAP_AROUND_SEC 10ULL
6343     +
6344     +/* This function calculates the max shift that enables the user range
6345     + * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
6346     + */
6347     +static u32 freq_to_shift(u16 freq)
6348     +{
6349     + u32 freq_khz = freq * 1000;
6350     + u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
6351     + u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
6352     + max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
6353     + /* calculate max possible multiplier in order to fit in 64bit */
6354     + u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
6355     +
6356     + /* This comes from the reverse of clocksource_khz2mult */
6357     + return ilog2(div_u64(max_mul * freq_khz, 1000000));
6358     +}
6359     +
6360     void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
6361     {
6362     struct mlx4_dev *dev = mdev->dev;
6363     @@ -254,12 +272,7 @@ void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
6364     memset(&mdev->cycles, 0, sizeof(mdev->cycles));
6365     mdev->cycles.read = mlx4_en_read_clock;
6366     mdev->cycles.mask = CLOCKSOURCE_MASK(48);
6367     - /* Using shift to make calculation more accurate. Since current HW
6368     - * clock frequency is 427 MHz, and cycles are given using a 48 bits
6369     - * register, the biggest shift when calculating using u64, is 14
6370     - * (max_cycles * multiplier < 2^64)
6371     - */
6372     - mdev->cycles.shift = 14;
6373     + mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
6374     mdev->cycles.mult =
6375     clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
6376     mdev->nominal_c_mult = mdev->cycles.mult;
6377     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
6378     index 7869f97de5da..67e9633ea9c7 100644
6379     --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
6380     +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
6381     @@ -2381,8 +2381,6 @@ out:
6382     /* set offloads */
6383     priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
6384     NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
6385     - priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
6386     - priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
6387     }
6388    
6389     static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
6390     @@ -2393,8 +2391,6 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
6391     /* unset offloads */
6392     priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
6393     NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
6394     - priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
6395     - priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
6396    
6397     ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
6398     VXLAN_STEER_BY_OUTER_MAC, 0);
6399     @@ -3020,6 +3016,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
6400     priv->rss_hash_fn = ETH_RSS_HASH_TOP;
6401     }
6402    
6403     + if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
6404     + dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
6405     + dev->features |= NETIF_F_GSO_UDP_TUNNEL;
6406     + }
6407     +
6408     mdev->pndev[port] = dev;
6409     mdev->upper[port] = NULL;
6410    
6411     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
6412     index ee99e67187f5..3904b5fc0b7c 100644
6413     --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
6414     +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
6415     @@ -238,11 +238,11 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
6416     stats->collisions = 0;
6417     stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
6418     stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
6419     - stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
6420     + stats->rx_over_errors = 0;
6421     stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
6422     stats->rx_frame_errors = 0;
6423     stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
6424     - stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
6425     + stats->rx_missed_errors = 0;
6426     stats->tx_aborted_errors = 0;
6427     stats->tx_carrier_errors = 0;
6428     stats->tx_fifo_errors = 0;
6429     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
6430     index 617fb22b5d81..7dbeafa65934 100644
6431     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
6432     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
6433     @@ -45,6 +45,7 @@
6434     #include <linux/if_bridge.h>
6435     #include <linux/workqueue.h>
6436     #include <linux/jiffies.h>
6437     +#include <linux/rtnetlink.h>
6438     #include <net/switchdev.h>
6439    
6440     #include "spectrum.h"
6441     @@ -812,6 +813,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
6442    
6443     mlxsw_sp = container_of(work, struct mlxsw_sp, fdb_notify.dw.work);
6444    
6445     + rtnl_lock();
6446     do {
6447     mlxsw_reg_sfn_pack(sfn_pl);
6448     err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
6449     @@ -824,6 +826,7 @@ static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
6450     mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
6451    
6452     } while (num_rec);
6453     + rtnl_unlock();
6454    
6455     kfree(sfn_pl);
6456     mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
6457     diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
6458     index e9f2349e98bc..52ec3d6e056a 100644
6459     --- a/drivers/net/ethernet/rocker/rocker.c
6460     +++ b/drivers/net/ethernet/rocker/rocker.c
6461     @@ -3531,12 +3531,14 @@ static void rocker_port_fdb_learn_work(struct work_struct *work)
6462     info.addr = lw->addr;
6463     info.vid = lw->vid;
6464    
6465     + rtnl_lock();
6466     if (learned && removing)
6467     call_switchdev_notifiers(SWITCHDEV_FDB_DEL,
6468     lw->rocker_port->dev, &info.info);
6469     else if (learned && !removing)
6470     call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
6471     lw->rocker_port->dev, &info.info);
6472     + rtnl_unlock();
6473    
6474     rocker_port_kfree(lw->trans, work);
6475     }
6476     diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
6477     index 47b711739ba9..e6cefd0e3262 100644
6478     --- a/drivers/net/phy/dp83640.c
6479     +++ b/drivers/net/phy/dp83640.c
6480     @@ -845,6 +845,11 @@ static void decode_rxts(struct dp83640_private *dp83640,
6481     struct skb_shared_hwtstamps *shhwtstamps = NULL;
6482     struct sk_buff *skb;
6483     unsigned long flags;
6484     + u8 overflow;
6485     +
6486     + overflow = (phy_rxts->ns_hi >> 14) & 0x3;
6487     + if (overflow)
6488     + pr_debug("rx timestamp queue overflow, count %d\n", overflow);
6489    
6490     spin_lock_irqsave(&dp83640->rx_lock, flags);
6491    
6492     @@ -887,6 +892,7 @@ static void decode_txts(struct dp83640_private *dp83640,
6493     struct skb_shared_hwtstamps shhwtstamps;
6494     struct sk_buff *skb;
6495     u64 ns;
6496     + u8 overflow;
6497    
6498     /* We must already have the skb that triggered this. */
6499    
6500     @@ -896,6 +902,17 @@ static void decode_txts(struct dp83640_private *dp83640,
6501     pr_debug("have timestamp but tx_queue empty\n");
6502     return;
6503     }
6504     +
6505     + overflow = (phy_txts->ns_hi >> 14) & 0x3;
6506     + if (overflow) {
6507     + pr_debug("tx timestamp queue overflow, count %d\n", overflow);
6508     + while (skb) {
6509     + skb_complete_tx_timestamp(skb, NULL);
6510     + skb = skb_dequeue(&dp83640->tx_queue);
6511     + }
6512     + return;
6513     + }
6514     +
6515     ns = phy2txts(phy_txts);
6516     memset(&shhwtstamps, 0, sizeof(shhwtstamps));
6517     shhwtstamps.hwtstamp = ns_to_ktime(ns);
6518     diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
6519     index 0a37f840fcc5..4e0068e775f9 100644
6520     --- a/drivers/net/ppp/pppoe.c
6521     +++ b/drivers/net/ppp/pppoe.c
6522     @@ -395,6 +395,8 @@ static int pppoe_rcv_core(struct sock *sk, struct sk_buff *skb)
6523    
6524     if (!__pppoe_xmit(sk_pppox(relay_po), skb))
6525     goto abort_put;
6526     +
6527     + sock_put(sk_pppox(relay_po));
6528     } else {
6529     if (sock_queue_rcv_skb(sk, skb))
6530     goto abort_kfree;
6531     diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
6532     index 597c53e0a2ec..f7e8c79349ad 100644
6533     --- a/drivers/net/ppp/pptp.c
6534     +++ b/drivers/net/ppp/pptp.c
6535     @@ -129,24 +129,27 @@ static int lookup_chan_dst(u16 call_id, __be32 d_addr)
6536     return i < MAX_CALLID;
6537     }
6538    
6539     -static int add_chan(struct pppox_sock *sock)
6540     +static int add_chan(struct pppox_sock *sock,
6541     + struct pptp_addr *sa)
6542     {
6543     static int call_id;
6544    
6545     spin_lock(&chan_lock);
6546     - if (!sock->proto.pptp.src_addr.call_id) {
6547     + if (!sa->call_id) {
6548     call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, call_id + 1);
6549     if (call_id == MAX_CALLID) {
6550     call_id = find_next_zero_bit(callid_bitmap, MAX_CALLID, 1);
6551     if (call_id == MAX_CALLID)
6552     goto out_err;
6553     }
6554     - sock->proto.pptp.src_addr.call_id = call_id;
6555     - } else if (test_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap))
6556     + sa->call_id = call_id;
6557     + } else if (test_bit(sa->call_id, callid_bitmap)) {
6558     goto out_err;
6559     + }
6560    
6561     - set_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
6562     - rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], sock);
6563     + sock->proto.pptp.src_addr = *sa;
6564     + set_bit(sa->call_id, callid_bitmap);
6565     + rcu_assign_pointer(callid_sock[sa->call_id], sock);
6566     spin_unlock(&chan_lock);
6567    
6568     return 0;
6569     @@ -416,7 +419,6 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
6570     struct sock *sk = sock->sk;
6571     struct sockaddr_pppox *sp = (struct sockaddr_pppox *) uservaddr;
6572     struct pppox_sock *po = pppox_sk(sk);
6573     - struct pptp_opt *opt = &po->proto.pptp;
6574     int error = 0;
6575    
6576     if (sockaddr_len < sizeof(struct sockaddr_pppox))
6577     @@ -424,10 +426,22 @@ static int pptp_bind(struct socket *sock, struct sockaddr *uservaddr,
6578    
6579     lock_sock(sk);
6580    
6581     - opt->src_addr = sp->sa_addr.pptp;
6582     - if (add_chan(po))
6583     + if (sk->sk_state & PPPOX_DEAD) {
6584     + error = -EALREADY;
6585     + goto out;
6586     + }
6587     +
6588     + if (sk->sk_state & PPPOX_BOUND) {
6589     error = -EBUSY;
6590     + goto out;
6591     + }
6592     +
6593     + if (add_chan(po, &sp->sa_addr.pptp))
6594     + error = -EBUSY;
6595     + else
6596     + sk->sk_state |= PPPOX_BOUND;
6597    
6598     +out:
6599     release_sock(sk);
6600     return error;
6601     }
6602     @@ -498,7 +512,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
6603     }
6604    
6605     opt->dst_addr = sp->sa_addr.pptp;
6606     - sk->sk_state = PPPOX_CONNECTED;
6607     + sk->sk_state |= PPPOX_CONNECTED;
6608    
6609     end:
6610     release_sock(sk);
6611     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
6612     index 5fccc5a8153f..982e0acd1a36 100644
6613     --- a/drivers/net/usb/qmi_wwan.c
6614     +++ b/drivers/net/usb/qmi_wwan.c
6615     @@ -492,6 +492,7 @@ static const struct usb_device_id products[] = {
6616    
6617     /* 3. Combined interface devices matching on interface number */
6618     {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
6619     + {QMI_FIXED_INTF(0x05c6, 0x6001, 3)}, /* 4G LTE usb-modem U901 */
6620     {QMI_FIXED_INTF(0x05c6, 0x7000, 0)},
6621     {QMI_FIXED_INTF(0x05c6, 0x7001, 1)},
6622     {QMI_FIXED_INTF(0x05c6, 0x7002, 1)},
6623     diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
6624     index 405a7b6cca25..e0fcda4ddd55 100644
6625     --- a/drivers/net/vxlan.c
6626     +++ b/drivers/net/vxlan.c
6627     @@ -1984,11 +1984,6 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
6628     vxlan->cfg.port_max, true);
6629    
6630     if (info) {
6631     - if (info->key.tun_flags & TUNNEL_CSUM)
6632     - flags |= VXLAN_F_UDP_CSUM;
6633     - else
6634     - flags &= ~VXLAN_F_UDP_CSUM;
6635     -
6636     ttl = info->key.ttl;
6637     tos = info->key.tos;
6638    
6639     @@ -2003,8 +1998,15 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
6640     goto drop;
6641     sk = vxlan->vn4_sock->sock->sk;
6642    
6643     - if (info && (info->key.tun_flags & TUNNEL_DONT_FRAGMENT))
6644     - df = htons(IP_DF);
6645     + if (info) {
6646     + if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT)
6647     + df = htons(IP_DF);
6648     +
6649     + if (info->key.tun_flags & TUNNEL_CSUM)
6650     + flags |= VXLAN_F_UDP_CSUM;
6651     + else
6652     + flags &= ~VXLAN_F_UDP_CSUM;
6653     + }
6654    
6655     memset(&fl4, 0, sizeof(fl4));
6656     fl4.flowi4_oif = rdst ? rdst->remote_ifindex : 0;
6657     @@ -2102,6 +2104,13 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
6658     return;
6659     }
6660    
6661     + if (info) {
6662     + if (info->key.tun_flags & TUNNEL_CSUM)
6663     + flags &= ~VXLAN_F_UDP_ZERO_CSUM6_TX;
6664     + else
6665     + flags |= VXLAN_F_UDP_ZERO_CSUM6_TX;
6666     + }
6667     +
6668     ttl = ttl ? : ip6_dst_hoplimit(ndst);
6669     err = vxlan6_xmit_skb(ndst, sk, skb, dev, &saddr, &dst->sin6.sin6_addr,
6670     0, ttl, src_port, dst_port, htonl(vni << 8), md,
6671     diff --git a/drivers/net/wireless/iwlwifi/dvm/lib.c b/drivers/net/wireless/iwlwifi/dvm/lib.c
6672     index e18629a16fb0..0961f33de05e 100644
6673     --- a/drivers/net/wireless/iwlwifi/dvm/lib.c
6674     +++ b/drivers/net/wireless/iwlwifi/dvm/lib.c
6675     @@ -1154,6 +1154,9 @@ int iwlagn_suspend(struct iwl_priv *priv, struct cfg80211_wowlan *wowlan)
6676    
6677     priv->ucode_loaded = false;
6678     iwl_trans_stop_device(priv->trans);
6679     + ret = iwl_trans_start_hw(priv->trans);
6680     + if (ret)
6681     + goto out;
6682    
6683     priv->wowlan = true;
6684    
6685     diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
6686     index d6e0c1b5c20c..8215d7405f64 100644
6687     --- a/drivers/net/wireless/iwlwifi/mvm/scan.c
6688     +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
6689     @@ -1267,6 +1267,10 @@ int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
6690     return -EBUSY;
6691     }
6692    
6693     + /* we don't support "match all" in the firmware */
6694     + if (!req->n_match_sets)
6695     + return -EOPNOTSUPP;
6696     +
6697     ret = iwl_mvm_check_running_scans(mvm, type);
6698     if (ret)
6699     return ret;
6700     diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
6701     index 639761fb2bfb..d58c094f2f04 100644
6702     --- a/drivers/net/wireless/iwlwifi/pcie/drv.c
6703     +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
6704     @@ -384,6 +384,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
6705     {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
6706     {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
6707     {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
6708     + {IWL_PCI_DEVICE(0x095A, 0x5C10, iwl7265_2ac_cfg)},
6709     {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
6710     {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
6711     {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
6712     @@ -401,10 +402,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
6713     {IWL_PCI_DEVICE(0x095A, 0x900A, iwl7265_2ac_cfg)},
6714     {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
6715     {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
6716     - {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
6717     + {IWL_PCI_DEVICE(0x095B, 0x9210, iwl7265_2ac_cfg)},
6718     {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
6719     {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
6720     - {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
6721     + {IWL_PCI_DEVICE(0x095B, 0x9310, iwl7265_2ac_cfg)},
6722     {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
6723     {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
6724     {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
6725     diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
6726     index 90283453073c..8c7204738aa3 100644
6727     --- a/drivers/net/wireless/iwlwifi/pcie/trans.c
6728     +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
6729     @@ -7,6 +7,7 @@
6730     *
6731     * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
6732     * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
6733     + * Copyright(c) 2016 Intel Deutschland GmbH
6734     *
6735     * This program is free software; you can redistribute it and/or modify
6736     * it under the terms of version 2 of the GNU General Public License as
6737     @@ -33,6 +34,7 @@
6738     *
6739     * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
6740     * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
6741     + * Copyright(c) 2016 Intel Deutschland GmbH
6742     * All rights reserved.
6743     *
6744     * Redistribution and use in source and binary forms, with or without
6745     @@ -924,9 +926,16 @@ monitor:
6746     if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
6747     iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
6748     trans_pcie->fw_mon_phys >> dest->base_shift);
6749     - iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
6750     - (trans_pcie->fw_mon_phys +
6751     - trans_pcie->fw_mon_size) >> dest->end_shift);
6752     + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
6753     + iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
6754     + (trans_pcie->fw_mon_phys +
6755     + trans_pcie->fw_mon_size - 256) >>
6756     + dest->end_shift);
6757     + else
6758     + iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
6759     + (trans_pcie->fw_mon_phys +
6760     + trans_pcie->fw_mon_size) >>
6761     + dest->end_shift);
6762     }
6763     }
6764    
6765     diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
6766     index f46c9d7f6528..7f471bff435c 100644
6767     --- a/drivers/net/wireless/realtek/rtlwifi/pci.c
6768     +++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
6769     @@ -801,7 +801,9 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
6770     hw_queue);
6771     if (rx_remained_cnt == 0)
6772     return;
6773     -
6774     + buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc[
6775     + rtlpci->rx_ring[rxring_idx].idx];
6776     + pdesc = (struct rtl_rx_desc *)skb->data;
6777     } else { /* rx descriptor */
6778     pdesc = &rtlpci->rx_ring[rxring_idx].desc[
6779     rtlpci->rx_ring[rxring_idx].idx];
6780     @@ -824,13 +826,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
6781     new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
6782     if (unlikely(!new_skb))
6783     goto no_new;
6784     - if (rtlpriv->use_new_trx_flow) {
6785     - buffer_desc =
6786     - &rtlpci->rx_ring[rxring_idx].buffer_desc
6787     - [rtlpci->rx_ring[rxring_idx].idx];
6788     - /*means rx wifi info*/
6789     - pdesc = (struct rtl_rx_desc *)skb->data;
6790     - }
6791     memset(&rx_status , 0 , sizeof(rx_status));
6792     rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
6793     &rx_status, (u8 *)pdesc, skb);
6794     diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
6795     index 11344121c55e..47e32cb0ec1a 100644
6796     --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
6797     +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c
6798     @@ -88,8 +88,6 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
6799     u8 tid;
6800    
6801     rtl8188ee_bt_reg_init(hw);
6802     - rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
6803     -
6804     rtlpriv->dm.dm_initialgain_enable = 1;
6805     rtlpriv->dm.dm_flag = 0;
6806     rtlpriv->dm.disable_framebursting = 0;
6807     @@ -138,6 +136,11 @@ int rtl88e_init_sw_vars(struct ieee80211_hw *hw)
6808     rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
6809     rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
6810     rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
6811     + rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
6812     + rtlpriv->cfg->mod_params->sw_crypto =
6813     + rtlpriv->cfg->mod_params->sw_crypto;
6814     + rtlpriv->cfg->mod_params->disable_watchdog =
6815     + rtlpriv->cfg->mod_params->disable_watchdog;
6816     if (rtlpriv->cfg->mod_params->disable_watchdog)
6817     pr_info("watchdog disabled\n");
6818     if (!rtlpriv->psc.inactiveps)
6819     diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
6820     index de6cb6c3a48c..4780bdc63b2b 100644
6821     --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
6822     +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c
6823     @@ -139,6 +139,8 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
6824     rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
6825     rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
6826     rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
6827     + rtlpriv->cfg->mod_params->sw_crypto =
6828     + rtlpriv->cfg->mod_params->sw_crypto;
6829     if (!rtlpriv->psc.inactiveps)
6830     pr_info("rtl8192ce: Power Save off (module option)\n");
6831     if (!rtlpriv->psc.fwctrl_lps)
6832     diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
6833     index fd4a5353d216..7c6f7f0d18c6 100644
6834     --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
6835     +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c
6836     @@ -65,6 +65,8 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
6837     rtlpriv->dm.disable_framebursting = false;
6838     rtlpriv->dm.thermalvalue = 0;
6839     rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
6840     + rtlpriv->cfg->mod_params->sw_crypto =
6841     + rtlpriv->cfg->mod_params->sw_crypto;
6842    
6843     /* for firmware buf */
6844     rtlpriv->rtlhal.pfirmware = vzalloc(0x4000);
6845     diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
6846     index b19d0398215f..c6e09a19de1a 100644
6847     --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
6848     +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c
6849     @@ -376,8 +376,8 @@ module_param_named(swlps, rtl92de_mod_params.swctrl_lps, bool, 0444);
6850     module_param_named(fwlps, rtl92de_mod_params.fwctrl_lps, bool, 0444);
6851     MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
6852     MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
6853     -MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
6854     -MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
6855     +MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
6856     +MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
6857     MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
6858    
6859     static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
6860     diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
6861     index e1fd27c888bf..31baca41ac2f 100644
6862     --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
6863     +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c
6864     @@ -187,6 +187,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
6865     rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
6866     rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
6867     rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
6868     + rtlpriv->cfg->mod_params->sw_crypto =
6869     + rtlpriv->cfg->mod_params->sw_crypto;
6870     if (!rtlpriv->psc.inactiveps)
6871     pr_info("Power Save off (module option)\n");
6872     if (!rtlpriv->psc.fwctrl_lps)
6873     @@ -425,8 +427,8 @@ module_param_named(swlps, rtl92se_mod_params.swctrl_lps, bool, 0444);
6874     module_param_named(fwlps, rtl92se_mod_params.fwctrl_lps, bool, 0444);
6875     MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
6876     MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
6877     -MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
6878     -MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
6879     +MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 1)\n");
6880     +MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 0)\n");
6881     MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
6882    
6883     static SIMPLE_DEV_PM_OPS(rtlwifi_pm_ops, rtl_pci_suspend, rtl_pci_resume);
6884     diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
6885     index 3859b3e3d158..ff49a8c0ff61 100644
6886     --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
6887     +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c
6888     @@ -150,6 +150,11 @@ int rtl8723e_init_sw_vars(struct ieee80211_hw *hw)
6889     rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps;
6890     rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
6891     rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
6892     + rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
6893     + rtlpriv->cfg->mod_params->sw_crypto =
6894     + rtlpriv->cfg->mod_params->sw_crypto;
6895     + rtlpriv->cfg->mod_params->disable_watchdog =
6896     + rtlpriv->cfg->mod_params->disable_watchdog;
6897     if (rtlpriv->cfg->mod_params->disable_watchdog)
6898     pr_info("watchdog disabled\n");
6899     rtlpriv->psc.reg_fwctrl_lps = 3;
6900     @@ -267,6 +272,8 @@ static struct rtl_mod_params rtl8723e_mod_params = {
6901     .swctrl_lps = false,
6902     .fwctrl_lps = true,
6903     .debug = DBG_EMERG,
6904     + .msi_support = false,
6905     + .disable_watchdog = false,
6906     };
6907    
6908     static struct rtl_hal_cfg rtl8723e_hal_cfg = {
6909     @@ -383,12 +390,14 @@ module_param_named(debug, rtl8723e_mod_params.debug, int, 0444);
6910     module_param_named(ips, rtl8723e_mod_params.inactiveps, bool, 0444);
6911     module_param_named(swlps, rtl8723e_mod_params.swctrl_lps, bool, 0444);
6912     module_param_named(fwlps, rtl8723e_mod_params.fwctrl_lps, bool, 0444);
6913     +module_param_named(msi, rtl8723e_mod_params.msi_support, bool, 0444);
6914     module_param_named(disable_watchdog, rtl8723e_mod_params.disable_watchdog,
6915     bool, 0444);
6916     MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
6917     MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 1)\n");
6918     MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n");
6919     MODULE_PARM_DESC(fwlps, "Set to 1 to use FW control power save (default 1)\n");
6920     +MODULE_PARM_DESC(msi, "Set to 1 to use MSI interrupts mode (default 0)\n");
6921     MODULE_PARM_DESC(debug, "Set debug level (0-5) (default 0)");
6922     MODULE_PARM_DESC(disable_watchdog, "Set to 1 to disable the watchdog (default 0)\n");
6923    
6924     diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
6925     index d091f1d5f91e..a78eaeda0008 100644
6926     --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
6927     +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c
6928     @@ -93,7 +93,6 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
6929     struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
6930    
6931     rtl8723be_bt_reg_init(hw);
6932     - rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
6933     rtlpriv->btcoexist.btc_ops = rtl_btc_get_ops_pointer();
6934    
6935     rtlpriv->dm.dm_initialgain_enable = 1;
6936     @@ -151,6 +150,10 @@ int rtl8723be_init_sw_vars(struct ieee80211_hw *hw)
6937     rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps;
6938     rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps;
6939     rtlpci->msi_support = rtlpriv->cfg->mod_params->msi_support;
6940     + rtlpriv->cfg->mod_params->sw_crypto =
6941     + rtlpriv->cfg->mod_params->sw_crypto;
6942     + rtlpriv->cfg->mod_params->disable_watchdog =
6943     + rtlpriv->cfg->mod_params->disable_watchdog;
6944     if (rtlpriv->cfg->mod_params->disable_watchdog)
6945     pr_info("watchdog disabled\n");
6946     rtlpriv->psc.reg_fwctrl_lps = 3;
6947     @@ -267,6 +270,9 @@ static struct rtl_mod_params rtl8723be_mod_params = {
6948     .inactiveps = true,
6949     .swctrl_lps = false,
6950     .fwctrl_lps = true,
6951     + .msi_support = false,
6952     + .disable_watchdog = false,
6953     + .debug = DBG_EMERG,
6954     };
6955    
6956     static struct rtl_hal_cfg rtl8723be_hal_cfg = {
6957     diff --git a/drivers/of/irq.c b/drivers/of/irq.c
6958     index 4fa916dffc91..72a2c1969646 100644
6959     --- a/drivers/of/irq.c
6960     +++ b/drivers/of/irq.c
6961     @@ -636,6 +636,13 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
6962     msi_base = be32_to_cpup(msi_map + 2);
6963     rid_len = be32_to_cpup(msi_map + 3);
6964    
6965     + if (rid_base & ~map_mask) {
6966     + dev_err(parent_dev,
6967     + "Invalid msi-map translation - msi-map-mask (0x%x) ignores rid-base (0x%x)\n",
6968     + map_mask, rid_base);
6969     + return rid_out;
6970     + }
6971     +
6972     msi_controller_node = of_find_node_by_phandle(phandle);
6973    
6974     matched = (masked_rid >= rid_base &&
6975     @@ -655,7 +662,7 @@ static u32 __of_msi_map_rid(struct device *dev, struct device_node **np,
6976     if (!matched)
6977     return rid_out;
6978    
6979     - rid_out = masked_rid + msi_base;
6980     + rid_out = masked_rid - rid_base + msi_base;
6981     dev_dbg(dev,
6982     "msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n",
6983     dev_name(parent_dev), map_mask, rid_base, msi_base,
6984     diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
6985     index ff538568a617..0b3e0bfa7be5 100644
6986     --- a/drivers/pci/hotplug/acpiphp_glue.c
6987     +++ b/drivers/pci/hotplug/acpiphp_glue.c
6988     @@ -953,8 +953,10 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
6989     {
6990     pci_lock_rescan_remove();
6991    
6992     - if (slot->flags & SLOT_IS_GOING_AWAY)
6993     + if (slot->flags & SLOT_IS_GOING_AWAY) {
6994     + pci_unlock_rescan_remove();
6995     return -ENODEV;
6996     + }
6997    
6998     /* configure all functions */
6999     if (!(slot->flags & SLOT_ENABLED))
7000     diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
7001     index 0bf82a20a0fb..48d21e0edd56 100644
7002     --- a/drivers/pci/pcie/aer/aerdrv.c
7003     +++ b/drivers/pci/pcie/aer/aerdrv.c
7004     @@ -262,7 +262,6 @@ static struct aer_rpc *aer_alloc_rpc(struct pcie_device *dev)
7005     rpc->rpd = dev;
7006     INIT_WORK(&rpc->dpc_handler, aer_isr);
7007     mutex_init(&rpc->rpc_mutex);
7008     - init_waitqueue_head(&rpc->wait_release);
7009    
7010     /* Use PCIe bus function to store rpc into PCIe device */
7011     set_service_data(dev, rpc);
7012     @@ -285,8 +284,7 @@ static void aer_remove(struct pcie_device *dev)
7013     if (rpc->isr)
7014     free_irq(dev->irq, dev);
7015    
7016     - wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
7017     -
7018     + flush_work(&rpc->dpc_handler);
7019     aer_disable_rootport(rpc);
7020     kfree(rpc);
7021     set_service_data(dev, NULL);
7022     diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
7023     index 84420b7c9456..945c939a86c5 100644
7024     --- a/drivers/pci/pcie/aer/aerdrv.h
7025     +++ b/drivers/pci/pcie/aer/aerdrv.h
7026     @@ -72,7 +72,6 @@ struct aer_rpc {
7027     * recovery on the same
7028     * root port hierarchy
7029     */
7030     - wait_queue_head_t wait_release;
7031     };
7032    
7033     struct aer_broadcast_data {
7034     diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
7035     index fba785e9df75..4e14de0f0f98 100644
7036     --- a/drivers/pci/pcie/aer/aerdrv_core.c
7037     +++ b/drivers/pci/pcie/aer/aerdrv_core.c
7038     @@ -811,8 +811,6 @@ void aer_isr(struct work_struct *work)
7039     while (get_e_source(rpc, &e_src))
7040     aer_isr_one_error(p_device, &e_src);
7041     mutex_unlock(&rpc->rpc_mutex);
7042     -
7043     - wake_up(&rpc->wait_release);
7044     }
7045    
7046     /**
7047     diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
7048     index c777b97207d5..5f70fee59a94 100644
7049     --- a/drivers/pci/xen-pcifront.c
7050     +++ b/drivers/pci/xen-pcifront.c
7051     @@ -53,7 +53,7 @@ struct pcifront_device {
7052     };
7053    
7054     struct pcifront_sd {
7055     - int domain;
7056     + struct pci_sysdata sd;
7057     struct pcifront_device *pdev;
7058     };
7059    
7060     @@ -67,7 +67,9 @@ static inline void pcifront_init_sd(struct pcifront_sd *sd,
7061     unsigned int domain, unsigned int bus,
7062     struct pcifront_device *pdev)
7063     {
7064     - sd->domain = domain;
7065     + /* Because we do not expose that information via XenBus. */
7066     + sd->sd.node = first_online_node;
7067     + sd->sd.domain = domain;
7068     sd->pdev = pdev;
7069     }
7070    
7071     @@ -468,8 +470,8 @@ static int pcifront_scan_root(struct pcifront_device *pdev,
7072     dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
7073     domain, bus);
7074    
7075     - bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
7076     - sd = kmalloc(sizeof(*sd), GFP_KERNEL);
7077     + bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
7078     + sd = kzalloc(sizeof(*sd), GFP_KERNEL);
7079     if (!bus_entry || !sd) {
7080     err = -ENOMEM;
7081     goto err_out;
7082     diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
7083     index 8c7f27db6ad3..e7e574dc667a 100644
7084     --- a/drivers/phy/phy-core.c
7085     +++ b/drivers/phy/phy-core.c
7086     @@ -275,20 +275,21 @@ EXPORT_SYMBOL_GPL(phy_exit);
7087    
7088     int phy_power_on(struct phy *phy)
7089     {
7090     - int ret;
7091     + int ret = 0;
7092    
7093     if (!phy)
7094     - return 0;
7095     + goto out;
7096    
7097     if (phy->pwr) {
7098     ret = regulator_enable(phy->pwr);
7099     if (ret)
7100     - return ret;
7101     + goto out;
7102     }
7103    
7104     ret = phy_pm_runtime_get_sync(phy);
7105     if (ret < 0 && ret != -ENOTSUPP)
7106     - return ret;
7107     + goto err_pm_sync;
7108     +
7109     ret = 0; /* Override possible ret == -ENOTSUPP */
7110    
7111     mutex_lock(&phy->mutex);
7112     @@ -296,19 +297,20 @@ int phy_power_on(struct phy *phy)
7113     ret = phy->ops->power_on(phy);
7114     if (ret < 0) {
7115     dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
7116     - goto out;
7117     + goto err_pwr_on;
7118     }
7119     }
7120     ++phy->power_count;
7121     mutex_unlock(&phy->mutex);
7122     return 0;
7123    
7124     -out:
7125     +err_pwr_on:
7126     mutex_unlock(&phy->mutex);
7127     phy_pm_runtime_put_sync(phy);
7128     +err_pm_sync:
7129     if (phy->pwr)
7130     regulator_disable(phy->pwr);
7131     -
7132     +out:
7133     return ret;
7134     }
7135     EXPORT_SYMBOL_GPL(phy_power_on);
7136     diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
7137     index a313dfc0245f..d78ee151c9e4 100644
7138     --- a/drivers/platform/x86/ideapad-laptop.c
7139     +++ b/drivers/platform/x86/ideapad-laptop.c
7140     @@ -865,6 +865,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
7141     },
7142     },
7143     {
7144     + .ident = "Lenovo ideapad Y700-17ISK",
7145     + .matches = {
7146     + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
7147     + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-17ISK"),
7148     + },
7149     + },
7150     + {
7151     .ident = "Lenovo Yoga 2 11 / 13 / Pro",
7152     .matches = {
7153     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
7154     @@ -893,6 +900,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
7155     },
7156     },
7157     {
7158     + .ident = "Lenovo Yoga 700",
7159     + .matches = {
7160     + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
7161     + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 700"),
7162     + },
7163     + },
7164     + {
7165     .ident = "Lenovo Yoga 900",
7166     .matches = {
7167     DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
7168     diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
7169     index c01302989ee4..b0f62141ea4d 100644
7170     --- a/drivers/platform/x86/toshiba_acpi.c
7171     +++ b/drivers/platform/x86/toshiba_acpi.c
7172     @@ -2484,6 +2484,14 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
7173     brightness = __get_lcd_brightness(dev);
7174     if (brightness < 0)
7175     return 0;
7176     + /*
7177     + * If transflective backlight is supported and the brightness is zero
7178     + * (lowest brightness level), the set_lcd_brightness function will
7179     + * activate the transflective backlight, making the LCD appear to be
7180     + * turned off, simply increment the brightness level to avoid that.
7181     + */
7182     + if (dev->tr_backlight_supported && brightness == 0)
7183     + brightness++;
7184     ret = set_lcd_brightness(dev, brightness);
7185     if (ret) {
7186     pr_debug("Backlight method is read-only, disabling backlight support\n");
7187     diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
7188     index 8df0b0e62976..00676208080e 100644
7189     --- a/drivers/regulator/Kconfig
7190     +++ b/drivers/regulator/Kconfig
7191     @@ -446,6 +446,7 @@ config REGULATOR_MC13892
7192     config REGULATOR_MT6311
7193     tristate "MediaTek MT6311 PMIC"
7194     depends on I2C
7195     + select REGMAP_I2C
7196     help
7197     Say y here to select this option to enable the power regulator of
7198     MediaTek MT6311 PMIC.
7199     diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c
7200     index 35de22fdb7a0..f2e1a39ce0f3 100644
7201     --- a/drivers/regulator/axp20x-regulator.c
7202     +++ b/drivers/regulator/axp20x-regulator.c
7203     @@ -27,8 +27,8 @@
7204     #define AXP20X_IO_ENABLED 0x03
7205     #define AXP20X_IO_DISABLED 0x07
7206    
7207     -#define AXP22X_IO_ENABLED 0x04
7208     -#define AXP22X_IO_DISABLED 0x03
7209     +#define AXP22X_IO_ENABLED 0x03
7210     +#define AXP22X_IO_DISABLED 0x04
7211    
7212     #define AXP20X_WORKMODE_DCDC2_MASK BIT(2)
7213     #define AXP20X_WORKMODE_DCDC3_MASK BIT(1)
7214     diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
7215     index a263c10359e1..4abfbdb285ec 100644
7216     --- a/drivers/s390/block/dasd.c
7217     +++ b/drivers/s390/block/dasd.c
7218     @@ -3031,6 +3031,7 @@ static void dasd_setup_queue(struct dasd_block *block)
7219     max = block->base->discipline->max_blocks << block->s2b_shift;
7220     }
7221     queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue);
7222     + block->request_queue->limits.max_dev_sectors = max;
7223     blk_queue_logical_block_size(block->request_queue,
7224     block->bp_block);
7225     blk_queue_max_hw_sectors(block->request_queue, max);
7226     diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
7227     index 184b1dbeb554..286782c60da4 100644
7228     --- a/drivers/s390/block/dasd_alias.c
7229     +++ b/drivers/s390/block/dasd_alias.c
7230     @@ -264,8 +264,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
7231     spin_unlock_irqrestore(&lcu->lock, flags);
7232     cancel_work_sync(&lcu->suc_data.worker);
7233     spin_lock_irqsave(&lcu->lock, flags);
7234     - if (device == lcu->suc_data.device)
7235     + if (device == lcu->suc_data.device) {
7236     + dasd_put_device(device);
7237     lcu->suc_data.device = NULL;
7238     + }
7239     }
7240     was_pending = 0;
7241     if (device == lcu->ruac_data.device) {
7242     @@ -273,8 +275,10 @@ void dasd_alias_disconnect_device_from_lcu(struct dasd_device *device)
7243     was_pending = 1;
7244     cancel_delayed_work_sync(&lcu->ruac_data.dwork);
7245     spin_lock_irqsave(&lcu->lock, flags);
7246     - if (device == lcu->ruac_data.device)
7247     + if (device == lcu->ruac_data.device) {
7248     + dasd_put_device(device);
7249     lcu->ruac_data.device = NULL;
7250     + }
7251     }
7252     private->lcu = NULL;
7253     spin_unlock_irqrestore(&lcu->lock, flags);
7254     @@ -549,8 +553,10 @@ static void lcu_update_work(struct work_struct *work)
7255     if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
7256     DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
7257     " alias data in lcu (rc = %d), retry later", rc);
7258     - schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
7259     + if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
7260     + dasd_put_device(device);
7261     } else {
7262     + dasd_put_device(device);
7263     lcu->ruac_data.device = NULL;
7264     lcu->flags &= ~UPDATE_PENDING;
7265     }
7266     @@ -593,8 +599,10 @@ static int _schedule_lcu_update(struct alias_lcu *lcu,
7267     */
7268     if (!usedev)
7269     return -EINVAL;
7270     + dasd_get_device(usedev);
7271     lcu->ruac_data.device = usedev;
7272     - schedule_delayed_work(&lcu->ruac_data.dwork, 0);
7273     + if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
7274     + dasd_put_device(usedev);
7275     return 0;
7276     }
7277    
7278     @@ -723,7 +731,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
7279     ASCEBC((char *) &cqr->magic, 4);
7280     ccw = cqr->cpaddr;
7281     ccw->cmd_code = DASD_ECKD_CCW_RSCK;
7282     - ccw->flags = 0 ;
7283     + ccw->flags = CCW_FLAG_SLI;
7284     ccw->count = 16;
7285     ccw->cda = (__u32)(addr_t) cqr->data;
7286     ((char *)cqr->data)[0] = reason;
7287     @@ -930,6 +938,7 @@ static void summary_unit_check_handling_work(struct work_struct *work)
7288     /* 3. read new alias configuration */
7289     _schedule_lcu_update(lcu, device);
7290     lcu->suc_data.device = NULL;
7291     + dasd_put_device(device);
7292     spin_unlock_irqrestore(&lcu->lock, flags);
7293     }
7294    
7295     @@ -989,6 +998,8 @@ void dasd_alias_handle_summary_unit_check(struct dasd_device *device,
7296     }
7297     lcu->suc_data.reason = reason;
7298     lcu->suc_data.device = device;
7299     + dasd_get_device(device);
7300     spin_unlock(&lcu->lock);
7301     - schedule_work(&lcu->suc_data.worker);
7302     + if (!schedule_work(&lcu->suc_data.worker))
7303     + dasd_put_device(device);
7304     };
7305     diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
7306     index 16a1935cc9c1..e197c6f39de2 100644
7307     --- a/drivers/scsi/qla2xxx/qla_init.c
7308     +++ b/drivers/scsi/qla2xxx/qla_init.c
7309     @@ -2192,7 +2192,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
7310     /* Clear outstanding commands array. */
7311     for (que = 0; que < ha->max_req_queues; que++) {
7312     req = ha->req_q_map[que];
7313     - if (!req)
7314     + if (!req || !test_bit(que, ha->req_qid_map))
7315     continue;
7316     req->out_ptr = (void *)(req->ring + req->length);
7317     *req->out_ptr = 0;
7318     @@ -2209,7 +2209,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
7319    
7320     for (que = 0; que < ha->max_rsp_queues; que++) {
7321     rsp = ha->rsp_q_map[que];
7322     - if (!rsp)
7323     + if (!rsp || !test_bit(que, ha->rsp_qid_map))
7324     continue;
7325     rsp->in_ptr = (void *)(rsp->ring + rsp->length);
7326     *rsp->in_ptr = 0;
7327     @@ -4961,7 +4961,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
7328    
7329     for (i = 1; i < ha->max_rsp_queues; i++) {
7330     rsp = ha->rsp_q_map[i];
7331     - if (rsp) {
7332     + if (rsp && test_bit(i, ha->rsp_qid_map)) {
7333     rsp->options &= ~BIT_0;
7334     ret = qla25xx_init_rsp_que(base_vha, rsp);
7335     if (ret != QLA_SUCCESS)
7336     @@ -4976,8 +4976,8 @@ qla25xx_init_queues(struct qla_hw_data *ha)
7337     }
7338     for (i = 1; i < ha->max_req_queues; i++) {
7339     req = ha->req_q_map[i];
7340     - if (req) {
7341     - /* Clear outstanding commands array. */
7342     + if (req && test_bit(i, ha->req_qid_map)) {
7343     + /* Clear outstanding commands array. */
7344     req->options &= ~BIT_0;
7345     ret = qla25xx_init_req_que(base_vha, req);
7346     if (ret != QLA_SUCCESS)
7347     diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
7348     index ccf6a7f99024..0e59731f95ad 100644
7349     --- a/drivers/scsi/qla2xxx/qla_isr.c
7350     +++ b/drivers/scsi/qla2xxx/qla_isr.c
7351     @@ -3018,9 +3018,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
7352     "MSI-X: Failed to enable support "
7353     "-- %d/%d\n Retry with %d vectors.\n",
7354     ha->msix_count, ret, ret);
7355     + ha->msix_count = ret;
7356     + ha->max_rsp_queues = ha->msix_count - 1;
7357     }
7358     - ha->msix_count = ret;
7359     - ha->max_rsp_queues = ha->msix_count - 1;
7360     ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
7361     ha->msix_count, GFP_KERNEL);
7362     if (!ha->msix_entries) {
7363     diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
7364     index c5dd594f6c31..cf7ba52bae66 100644
7365     --- a/drivers/scsi/qla2xxx/qla_mid.c
7366     +++ b/drivers/scsi/qla2xxx/qla_mid.c
7367     @@ -600,7 +600,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
7368     /* Delete request queues */
7369     for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
7370     req = ha->req_q_map[cnt];
7371     - if (req) {
7372     + if (req && test_bit(cnt, ha->req_qid_map)) {
7373     ret = qla25xx_delete_req_que(vha, req);
7374     if (ret != QLA_SUCCESS) {
7375     ql_log(ql_log_warn, vha, 0x00ea,
7376     @@ -614,7 +614,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
7377     /* Delete response queues */
7378     for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
7379     rsp = ha->rsp_q_map[cnt];
7380     - if (rsp) {
7381     + if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
7382     ret = qla25xx_delete_rsp_que(vha, rsp);
7383     if (ret != QLA_SUCCESS) {
7384     ql_log(ql_log_warn, vha, 0x00eb,
7385     diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
7386     index bfa9a64c316b..fc6674db4f2d 100644
7387     --- a/drivers/scsi/qla2xxx/qla_os.c
7388     +++ b/drivers/scsi/qla2xxx/qla_os.c
7389     @@ -397,6 +397,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
7390     int cnt;
7391    
7392     for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
7393     + if (!test_bit(cnt, ha->req_qid_map))
7394     + continue;
7395     +
7396     req = ha->req_q_map[cnt];
7397     qla2x00_free_req_que(ha, req);
7398     }
7399     @@ -404,6 +407,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
7400     ha->req_q_map = NULL;
7401    
7402     for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
7403     + if (!test_bit(cnt, ha->rsp_qid_map))
7404     + continue;
7405     +
7406     rsp = ha->rsp_q_map[cnt];
7407     qla2x00_free_rsp_que(ha, rsp);
7408     }
7409     diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
7410     index ddbe2e7ac14d..c3e622524604 100644
7411     --- a/drivers/scsi/qla2xxx/qla_tmpl.c
7412     +++ b/drivers/scsi/qla2xxx/qla_tmpl.c
7413     @@ -395,6 +395,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
7414     if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
7415     for (i = 0; i < vha->hw->max_req_queues; i++) {
7416     struct req_que *req = vha->hw->req_q_map[i];
7417     +
7418     + if (!test_bit(i, vha->hw->req_qid_map))
7419     + continue;
7420     +
7421     if (req || !buf) {
7422     length = req ?
7423     req->length : REQUEST_ENTRY_CNT_24XX;
7424     @@ -408,6 +412,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
7425     } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
7426     for (i = 0; i < vha->hw->max_rsp_queues; i++) {
7427     struct rsp_que *rsp = vha->hw->rsp_q_map[i];
7428     +
7429     + if (!test_bit(i, vha->hw->rsp_qid_map))
7430     + continue;
7431     +
7432     if (rsp || !buf) {
7433     length = rsp ?
7434     rsp->length : RESPONSE_ENTRY_CNT_MQ;
7435     @@ -634,6 +642,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
7436     if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
7437     for (i = 0; i < vha->hw->max_req_queues; i++) {
7438     struct req_que *req = vha->hw->req_q_map[i];
7439     +
7440     + if (!test_bit(i, vha->hw->req_qid_map))
7441     + continue;
7442     +
7443     if (req || !buf) {
7444     qla27xx_insert16(i, buf, len);
7445     qla27xx_insert16(1, buf, len);
7446     @@ -645,6 +657,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
7447     } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
7448     for (i = 0; i < vha->hw->max_rsp_queues; i++) {
7449     struct rsp_que *rsp = vha->hw->rsp_q_map[i];
7450     +
7451     + if (!test_bit(i, vha->hw->rsp_qid_map))
7452     + continue;
7453     +
7454     if (rsp || !buf) {
7455     qla27xx_insert16(i, buf, len);
7456     qla27xx_insert16(1, buf, len);
7457     diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
7458     index 84fa4c46eaa6..bb669d32ccd0 100644
7459     --- a/drivers/scsi/sd.c
7460     +++ b/drivers/scsi/sd.c
7461     @@ -2893,7 +2893,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
7462     sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
7463     sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_CACHE_SIZE)
7464     rw_max = q->limits.io_opt =
7465     - logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
7466     + sdkp->opt_xfer_blocks * sdp->sector_size;
7467     else
7468     rw_max = BLK_DEF_MAX_SECTORS;
7469    
7470     diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
7471     index aebad36391c9..8feac599e9ab 100644
7472     --- a/drivers/spi/spi-atmel.c
7473     +++ b/drivers/spi/spi-atmel.c
7474     @@ -1571,6 +1571,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
7475    
7476     as->use_cs_gpios = true;
7477     if (atmel_spi_is_v2(as) &&
7478     + pdev->dev.of_node &&
7479     !of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) {
7480     as->use_cs_gpios = false;
7481     master->num_chipselect = 4;
7482     diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
7483     index 1f8903d356e5..ed8283e7397a 100644
7484     --- a/drivers/spi/spi-omap2-mcspi.c
7485     +++ b/drivers/spi/spi-omap2-mcspi.c
7486     @@ -1024,6 +1024,16 @@ static int omap2_mcspi_setup(struct spi_device *spi)
7487     spi->controller_state = cs;
7488     /* Link this to context save list */
7489     list_add_tail(&cs->node, &ctx->cs);
7490     +
7491     + if (gpio_is_valid(spi->cs_gpio)) {
7492     + ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
7493     + if (ret) {
7494     + dev_err(&spi->dev, "failed to request gpio\n");
7495     + return ret;
7496     + }
7497     + gpio_direction_output(spi->cs_gpio,
7498     + !(spi->mode & SPI_CS_HIGH));
7499     + }
7500     }
7501    
7502     if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
7503     @@ -1032,15 +1042,6 @@ static int omap2_mcspi_setup(struct spi_device *spi)
7504     return ret;
7505     }
7506    
7507     - if (gpio_is_valid(spi->cs_gpio)) {
7508     - ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
7509     - if (ret) {
7510     - dev_err(&spi->dev, "failed to request gpio\n");
7511     - return ret;
7512     - }
7513     - gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
7514     - }
7515     -
7516     ret = pm_runtime_get_sync(mcspi->dev);
7517     if (ret < 0)
7518     return ret;
7519     diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
7520     index 79ac19246548..70b8f4fabfad 100644
7521     --- a/drivers/staging/panel/panel.c
7522     +++ b/drivers/staging/panel/panel.c
7523     @@ -825,8 +825,7 @@ static void lcd_write_cmd_s(int cmd)
7524     lcd_send_serial(0x1F); /* R/W=W, RS=0 */
7525     lcd_send_serial(cmd & 0x0F);
7526     lcd_send_serial((cmd >> 4) & 0x0F);
7527     - /* the shortest command takes at least 40 us */
7528     - usleep_range(40, 100);
7529     + udelay(40); /* the shortest command takes at least 40 us */
7530     spin_unlock_irq(&pprt_lock);
7531     }
7532    
7533     @@ -837,8 +836,7 @@ static void lcd_write_data_s(int data)
7534     lcd_send_serial(0x5F); /* R/W=W, RS=1 */
7535     lcd_send_serial(data & 0x0F);
7536     lcd_send_serial((data >> 4) & 0x0F);
7537     - /* the shortest data takes at least 40 us */
7538     - usleep_range(40, 100);
7539     + udelay(40); /* the shortest data takes at least 40 us */
7540     spin_unlock_irq(&pprt_lock);
7541     }
7542    
7543     @@ -848,20 +846,19 @@ static void lcd_write_cmd_p8(int cmd)
7544     spin_lock_irq(&pprt_lock);
7545     /* present the data to the data port */
7546     w_dtr(pprt, cmd);
7547     - /* maintain the data during 20 us before the strobe */
7548     - usleep_range(20, 100);
7549     + udelay(20); /* maintain the data during 20 us before the strobe */
7550    
7551     bits.e = BIT_SET;
7552     bits.rs = BIT_CLR;
7553     bits.rw = BIT_CLR;
7554     set_ctrl_bits();
7555    
7556     - usleep_range(40, 100); /* maintain the strobe during 40 us */
7557     + udelay(40); /* maintain the strobe during 40 us */
7558    
7559     bits.e = BIT_CLR;
7560     set_ctrl_bits();
7561    
7562     - usleep_range(120, 500); /* the shortest command takes at least 120 us */
7563     + udelay(120); /* the shortest command takes at least 120 us */
7564     spin_unlock_irq(&pprt_lock);
7565     }
7566    
7567     @@ -871,20 +868,19 @@ static void lcd_write_data_p8(int data)
7568     spin_lock_irq(&pprt_lock);
7569     /* present the data to the data port */
7570     w_dtr(pprt, data);
7571     - /* maintain the data during 20 us before the strobe */
7572     - usleep_range(20, 100);
7573     + udelay(20); /* maintain the data during 20 us before the strobe */
7574    
7575     bits.e = BIT_SET;
7576     bits.rs = BIT_SET;
7577     bits.rw = BIT_CLR;
7578     set_ctrl_bits();
7579    
7580     - usleep_range(40, 100); /* maintain the strobe during 40 us */
7581     + udelay(40); /* maintain the strobe during 40 us */
7582    
7583     bits.e = BIT_CLR;
7584     set_ctrl_bits();
7585    
7586     - usleep_range(45, 100); /* the shortest data takes at least 45 us */
7587     + udelay(45); /* the shortest data takes at least 45 us */
7588     spin_unlock_irq(&pprt_lock);
7589     }
7590    
7591     @@ -894,7 +890,7 @@ static void lcd_write_cmd_tilcd(int cmd)
7592     spin_lock_irq(&pprt_lock);
7593     /* present the data to the control port */
7594     w_ctr(pprt, cmd);
7595     - usleep_range(60, 120);
7596     + udelay(60);
7597     spin_unlock_irq(&pprt_lock);
7598     }
7599    
7600     @@ -904,7 +900,7 @@ static void lcd_write_data_tilcd(int data)
7601     spin_lock_irq(&pprt_lock);
7602     /* present the data to the data port */
7603     w_dtr(pprt, data);
7604     - usleep_range(60, 120);
7605     + udelay(60);
7606     spin_unlock_irq(&pprt_lock);
7607     }
7608    
7609     @@ -947,7 +943,7 @@ static void lcd_clear_fast_s(void)
7610     lcd_send_serial(0x5F); /* R/W=W, RS=1 */
7611     lcd_send_serial(' ' & 0x0F);
7612     lcd_send_serial((' ' >> 4) & 0x0F);
7613     - usleep_range(40, 100); /* the shortest data takes at least 40 us */
7614     + udelay(40); /* the shortest data takes at least 40 us */
7615     }
7616     spin_unlock_irq(&pprt_lock);
7617    
7618     @@ -971,7 +967,7 @@ static void lcd_clear_fast_p8(void)
7619     w_dtr(pprt, ' ');
7620    
7621     /* maintain the data during 20 us before the strobe */
7622     - usleep_range(20, 100);
7623     + udelay(20);
7624    
7625     bits.e = BIT_SET;
7626     bits.rs = BIT_SET;
7627     @@ -979,13 +975,13 @@ static void lcd_clear_fast_p8(void)
7628     set_ctrl_bits();
7629    
7630     /* maintain the strobe during 40 us */
7631     - usleep_range(40, 100);
7632     + udelay(40);
7633    
7634     bits.e = BIT_CLR;
7635     set_ctrl_bits();
7636    
7637     /* the shortest data takes at least 45 us */
7638     - usleep_range(45, 100);
7639     + udelay(45);
7640     }
7641     spin_unlock_irq(&pprt_lock);
7642    
7643     @@ -1007,7 +1003,7 @@ static void lcd_clear_fast_tilcd(void)
7644     for (pos = 0; pos < lcd.height * lcd.hwidth; pos++) {
7645     /* present the data to the data port */
7646     w_dtr(pprt, ' ');
7647     - usleep_range(60, 120);
7648     + udelay(60);
7649     }
7650    
7651     spin_unlock_irq(&pprt_lock);
7652     diff --git a/drivers/staging/speakup/serialio.c b/drivers/staging/speakup/serialio.c
7653     index 3b5835b28128..a5bbb338f275 100644
7654     --- a/drivers/staging/speakup/serialio.c
7655     +++ b/drivers/staging/speakup/serialio.c
7656     @@ -6,6 +6,11 @@
7657     #include "spk_priv.h"
7658     #include "serialio.h"
7659    
7660     +#include <linux/serial_core.h>
7661     +/* WARNING: Do not change this to <linux/serial.h> without testing that
7662     + * SERIAL_PORT_DFNS does get defined to the appropriate value. */
7663     +#include <asm/serial.h>
7664     +
7665     #ifndef SERIAL_PORT_DFNS
7666     #define SERIAL_PORT_DFNS
7667     #endif
7668     @@ -23,9 +28,15 @@ const struct old_serial_port *spk_serial_init(int index)
7669     int baud = 9600, quot = 0;
7670     unsigned int cval = 0;
7671     int cflag = CREAD | HUPCL | CLOCAL | B9600 | CS8;
7672     - const struct old_serial_port *ser = rs_table + index;
7673     + const struct old_serial_port *ser;
7674     int err;
7675    
7676     + if (index >= ARRAY_SIZE(rs_table)) {
7677     + pr_info("no port info for ttyS%d\n", index);
7678     + return NULL;
7679     + }
7680     + ser = rs_table + index;
7681     +
7682     /* Divisor, bytesize and parity */
7683     quot = ser->baud_base / baud;
7684     cval = cflag & (CSIZE | CSTOPB);
7685     diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
7686     index 28fb3016370f..88029cc6de5e 100644
7687     --- a/drivers/target/target_core_tmr.c
7688     +++ b/drivers/target/target_core_tmr.c
7689     @@ -68,23 +68,25 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
7690    
7691     if (dev) {
7692     spin_lock_irqsave(&dev->se_tmr_lock, flags);
7693     - list_del(&tmr->tmr_list);
7694     + list_del_init(&tmr->tmr_list);
7695     spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
7696     }
7697    
7698     kfree(tmr);
7699     }
7700    
7701     -static void core_tmr_handle_tas_abort(
7702     - struct se_node_acl *tmr_nacl,
7703     - struct se_cmd *cmd,
7704     - int tas)
7705     +static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
7706     {
7707     - bool remove = true;
7708     + unsigned long flags;
7709     + bool remove = true, send_tas;
7710     /*
7711     * TASK ABORTED status (TAS) bit support
7712     */
7713     - if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
7714     + spin_lock_irqsave(&cmd->t_state_lock, flags);
7715     + send_tas = (cmd->transport_state & CMD_T_TAS);
7716     + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
7717     +
7718     + if (send_tas) {
7719     remove = false;
7720     transport_send_task_abort(cmd);
7721     }
7722     @@ -107,6 +109,46 @@ static int target_check_cdb_and_preempt(struct list_head *list,
7723     return 1;
7724     }
7725    
7726     +static bool __target_check_io_state(struct se_cmd *se_cmd,
7727     + struct se_session *tmr_sess, int tas)
7728     +{
7729     + struct se_session *sess = se_cmd->se_sess;
7730     +
7731     + assert_spin_locked(&sess->sess_cmd_lock);
7732     + WARN_ON_ONCE(!irqs_disabled());
7733     + /*
7734     + * If command already reached CMD_T_COMPLETE state within
7735     + * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
7736     + * this se_cmd has been passed to fabric driver and will
7737     + * not be aborted.
7738     + *
7739     + * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
7740     + * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
7741     + * long as se_cmd->cmd_kref is still active unless zero.
7742     + */
7743     + spin_lock(&se_cmd->t_state_lock);
7744     + if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
7745     + pr_debug("Attempted to abort io tag: %llu already complete or"
7746     + " fabric stop, skipping\n", se_cmd->tag);
7747     + spin_unlock(&se_cmd->t_state_lock);
7748     + return false;
7749     + }
7750     + if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
7751     + pr_debug("Attempted to abort io tag: %llu already shutdown,"
7752     + " skipping\n", se_cmd->tag);
7753     + spin_unlock(&se_cmd->t_state_lock);
7754     + return false;
7755     + }
7756     + se_cmd->transport_state |= CMD_T_ABORTED;
7757     +
7758     + if ((tmr_sess != se_cmd->se_sess) && tas)
7759     + se_cmd->transport_state |= CMD_T_TAS;
7760     +
7761     + spin_unlock(&se_cmd->t_state_lock);
7762     +
7763     + return kref_get_unless_zero(&se_cmd->cmd_kref);
7764     +}
7765     +
7766     void core_tmr_abort_task(
7767     struct se_device *dev,
7768     struct se_tmr_req *tmr,
7769     @@ -130,34 +172,22 @@ void core_tmr_abort_task(
7770     if (tmr->ref_task_tag != ref_tag)
7771     continue;
7772    
7773     - if (!kref_get_unless_zero(&se_cmd->cmd_kref))
7774     - continue;
7775     -
7776     printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
7777     se_cmd->se_tfo->get_fabric_name(), ref_tag);
7778    
7779     - spin_lock(&se_cmd->t_state_lock);
7780     - if (se_cmd->transport_state & CMD_T_COMPLETE) {
7781     - printk("ABORT_TASK: ref_tag: %llu already complete,"
7782     - " skipping\n", ref_tag);
7783     - spin_unlock(&se_cmd->t_state_lock);
7784     + if (!__target_check_io_state(se_cmd, se_sess, 0)) {
7785     spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
7786     -
7787     target_put_sess_cmd(se_cmd);
7788     -
7789     goto out;
7790     }
7791     - se_cmd->transport_state |= CMD_T_ABORTED;
7792     - spin_unlock(&se_cmd->t_state_lock);
7793     -
7794     list_del_init(&se_cmd->se_cmd_list);
7795     spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
7796    
7797     cancel_work_sync(&se_cmd->work);
7798     transport_wait_for_tasks(se_cmd);
7799    
7800     - target_put_sess_cmd(se_cmd);
7801     transport_cmd_finish_abort(se_cmd, true);
7802     + target_put_sess_cmd(se_cmd);
7803    
7804     printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
7805     " ref_tag: %llu\n", ref_tag);
7806     @@ -178,9 +208,11 @@ static void core_tmr_drain_tmr_list(
7807     struct list_head *preempt_and_abort_list)
7808     {
7809     LIST_HEAD(drain_tmr_list);
7810     + struct se_session *sess;
7811     struct se_tmr_req *tmr_p, *tmr_pp;
7812     struct se_cmd *cmd;
7813     unsigned long flags;
7814     + bool rc;
7815     /*
7816     * Release all pending and outgoing TMRs aside from the received
7817     * LUN_RESET tmr..
7818     @@ -206,17 +238,39 @@ static void core_tmr_drain_tmr_list(
7819     if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
7820     continue;
7821    
7822     + sess = cmd->se_sess;
7823     + if (WARN_ON_ONCE(!sess))
7824     + continue;
7825     +
7826     + spin_lock(&sess->sess_cmd_lock);
7827     spin_lock(&cmd->t_state_lock);
7828     - if (!(cmd->transport_state & CMD_T_ACTIVE)) {
7829     + if (!(cmd->transport_state & CMD_T_ACTIVE) ||
7830     + (cmd->transport_state & CMD_T_FABRIC_STOP)) {
7831     spin_unlock(&cmd->t_state_lock);
7832     + spin_unlock(&sess->sess_cmd_lock);
7833     continue;
7834     }
7835     if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
7836     spin_unlock(&cmd->t_state_lock);
7837     + spin_unlock(&sess->sess_cmd_lock);
7838     continue;
7839     }
7840     + if (sess->sess_tearing_down || cmd->cmd_wait_set) {
7841     + spin_unlock(&cmd->t_state_lock);
7842     + spin_unlock(&sess->sess_cmd_lock);
7843     + continue;
7844     + }
7845     + cmd->transport_state |= CMD_T_ABORTED;
7846     spin_unlock(&cmd->t_state_lock);
7847    
7848     + rc = kref_get_unless_zero(&cmd->cmd_kref);
7849     + if (!rc) {
7850     + printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
7851     + spin_unlock(&sess->sess_cmd_lock);
7852     + continue;
7853     + }
7854     + spin_unlock(&sess->sess_cmd_lock);
7855     +
7856     list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
7857     }
7858     spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
7859     @@ -230,20 +284,26 @@ static void core_tmr_drain_tmr_list(
7860     (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
7861     tmr_p->function, tmr_p->response, cmd->t_state);
7862    
7863     + cancel_work_sync(&cmd->work);
7864     + transport_wait_for_tasks(cmd);
7865     +
7866     transport_cmd_finish_abort(cmd, 1);
7867     + target_put_sess_cmd(cmd);
7868     }
7869     }
7870    
7871     static void core_tmr_drain_state_list(
7872     struct se_device *dev,
7873     struct se_cmd *prout_cmd,
7874     - struct se_node_acl *tmr_nacl,
7875     + struct se_session *tmr_sess,
7876     int tas,
7877     struct list_head *preempt_and_abort_list)
7878     {
7879     LIST_HEAD(drain_task_list);
7880     + struct se_session *sess;
7881     struct se_cmd *cmd, *next;
7882     unsigned long flags;
7883     + int rc;
7884    
7885     /*
7886     * Complete outstanding commands with TASK_ABORTED SAM status.
7887     @@ -282,6 +342,16 @@ static void core_tmr_drain_state_list(
7888     if (prout_cmd == cmd)
7889     continue;
7890    
7891     + sess = cmd->se_sess;
7892     + if (WARN_ON_ONCE(!sess))
7893     + continue;
7894     +
7895     + spin_lock(&sess->sess_cmd_lock);
7896     + rc = __target_check_io_state(cmd, tmr_sess, tas);
7897     + spin_unlock(&sess->sess_cmd_lock);
7898     + if (!rc)
7899     + continue;
7900     +
7901     list_move_tail(&cmd->state_list, &drain_task_list);
7902     cmd->state_active = false;
7903     }
7904     @@ -289,7 +359,7 @@ static void core_tmr_drain_state_list(
7905    
7906     while (!list_empty(&drain_task_list)) {
7907     cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
7908     - list_del(&cmd->state_list);
7909     + list_del_init(&cmd->state_list);
7910    
7911     pr_debug("LUN_RESET: %s cmd: %p"
7912     " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d"
7913     @@ -313,16 +383,11 @@ static void core_tmr_drain_state_list(
7914     * loop above, but we do it down here given that
7915     * cancel_work_sync may block.
7916     */
7917     - if (cmd->t_state == TRANSPORT_COMPLETE)
7918     - cancel_work_sync(&cmd->work);
7919     -
7920     - spin_lock_irqsave(&cmd->t_state_lock, flags);
7921     - target_stop_cmd(cmd, &flags);
7922     -
7923     - cmd->transport_state |= CMD_T_ABORTED;
7924     - spin_unlock_irqrestore(&cmd->t_state_lock, flags);
7925     + cancel_work_sync(&cmd->work);
7926     + transport_wait_for_tasks(cmd);
7927    
7928     - core_tmr_handle_tas_abort(tmr_nacl, cmd, tas);
7929     + core_tmr_handle_tas_abort(cmd, tas);
7930     + target_put_sess_cmd(cmd);
7931     }
7932     }
7933    
7934     @@ -334,6 +399,7 @@ int core_tmr_lun_reset(
7935     {
7936     struct se_node_acl *tmr_nacl = NULL;
7937     struct se_portal_group *tmr_tpg = NULL;
7938     + struct se_session *tmr_sess = NULL;
7939     int tas;
7940     /*
7941     * TASK_ABORTED status bit, this is configurable via ConfigFS
7942     @@ -352,8 +418,9 @@ int core_tmr_lun_reset(
7943     * or struct se_device passthrough..
7944     */
7945     if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
7946     - tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
7947     - tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
7948     + tmr_sess = tmr->task_cmd->se_sess;
7949     + tmr_nacl = tmr_sess->se_node_acl;
7950     + tmr_tpg = tmr_sess->se_tpg;
7951     if (tmr_nacl && tmr_tpg) {
7952     pr_debug("LUN_RESET: TMR caller fabric: %s"
7953     " initiator port %s\n",
7954     @@ -366,7 +433,7 @@ int core_tmr_lun_reset(
7955     dev->transport->name, tas);
7956    
7957     core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
7958     - core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
7959     + core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
7960     preempt_and_abort_list);
7961    
7962     /*
7963     diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
7964     index 4fdcee2006d1..94f4ffac723f 100644
7965     --- a/drivers/target/target_core_transport.c
7966     +++ b/drivers/target/target_core_transport.c
7967     @@ -528,9 +528,6 @@ void transport_deregister_session(struct se_session *se_sess)
7968     }
7969     EXPORT_SYMBOL(transport_deregister_session);
7970    
7971     -/*
7972     - * Called with cmd->t_state_lock held.
7973     - */
7974     static void target_remove_from_state_list(struct se_cmd *cmd)
7975     {
7976     struct se_device *dev = cmd->se_dev;
7977     @@ -555,10 +552,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
7978     {
7979     unsigned long flags;
7980    
7981     - spin_lock_irqsave(&cmd->t_state_lock, flags);
7982     - if (write_pending)
7983     - cmd->t_state = TRANSPORT_WRITE_PENDING;
7984     -
7985     if (remove_from_lists) {
7986     target_remove_from_state_list(cmd);
7987    
7988     @@ -568,6 +561,10 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
7989     cmd->se_lun = NULL;
7990     }
7991    
7992     + spin_lock_irqsave(&cmd->t_state_lock, flags);
7993     + if (write_pending)
7994     + cmd->t_state = TRANSPORT_WRITE_PENDING;
7995     +
7996     /*
7997     * Determine if frontend context caller is requesting the stopping of
7998     * this command for frontend exceptions.
7999     @@ -621,6 +618,8 @@ static void transport_lun_remove_cmd(struct se_cmd *cmd)
8000    
8001     void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
8002     {
8003     + bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
8004     +
8005     if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
8006     transport_lun_remove_cmd(cmd);
8007     /*
8008     @@ -632,7 +631,7 @@ void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
8009    
8010     if (transport_cmd_check_stop_to_fabric(cmd))
8011     return;
8012     - if (remove)
8013     + if (remove && ack_kref)
8014     transport_put_cmd(cmd);
8015     }
8016    
8017     @@ -700,7 +699,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
8018     * Check for case where an explicit ABORT_TASK has been received
8019     * and transport_wait_for_tasks() will be waiting for completion..
8020     */
8021     - if (cmd->transport_state & CMD_T_ABORTED &&
8022     + if (cmd->transport_state & CMD_T_ABORTED ||
8023     cmd->transport_state & CMD_T_STOP) {
8024     spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8025     complete_all(&cmd->t_transport_stop_comp);
8026     @@ -1850,19 +1849,21 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
8027     return true;
8028     }
8029    
8030     +static int __transport_check_aborted_status(struct se_cmd *, int);
8031     +
8032     void target_execute_cmd(struct se_cmd *cmd)
8033     {
8034     /*
8035     - * If the received CDB has aleady been aborted stop processing it here.
8036     - */
8037     - if (transport_check_aborted_status(cmd, 1))
8038     - return;
8039     -
8040     - /*
8041     * Determine if frontend context caller is requesting the stopping of
8042     * this command for frontend exceptions.
8043     + *
8044     + * If the received CDB has aleady been aborted stop processing it here.
8045     */
8046     spin_lock_irq(&cmd->t_state_lock);
8047     + if (__transport_check_aborted_status(cmd, 1)) {
8048     + spin_unlock_irq(&cmd->t_state_lock);
8049     + return;
8050     + }
8051     if (cmd->transport_state & CMD_T_STOP) {
8052     pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
8053     __func__, __LINE__, cmd->tag);
8054     @@ -2213,20 +2214,14 @@ static inline void transport_free_pages(struct se_cmd *cmd)
8055     }
8056    
8057     /**
8058     - * transport_release_cmd - free a command
8059     - * @cmd: command to free
8060     + * transport_put_cmd - release a reference to a command
8061     + * @cmd: command to release
8062     *
8063     - * This routine unconditionally frees a command, and reference counting
8064     - * or list removal must be done in the caller.
8065     + * This routine releases our reference to the command and frees it if possible.
8066     */
8067     -static int transport_release_cmd(struct se_cmd *cmd)
8068     +static int transport_put_cmd(struct se_cmd *cmd)
8069     {
8070     BUG_ON(!cmd->se_tfo);
8071     -
8072     - if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
8073     - core_tmr_release_req(cmd->se_tmr_req);
8074     - if (cmd->t_task_cdb != cmd->__t_task_cdb)
8075     - kfree(cmd->t_task_cdb);
8076     /*
8077     * If this cmd has been setup with target_get_sess_cmd(), drop
8078     * the kref and call ->release_cmd() in kref callback.
8079     @@ -2234,18 +2229,6 @@ static int transport_release_cmd(struct se_cmd *cmd)
8080     return target_put_sess_cmd(cmd);
8081     }
8082    
8083     -/**
8084     - * transport_put_cmd - release a reference to a command
8085     - * @cmd: command to release
8086     - *
8087     - * This routine releases our reference to the command and frees it if possible.
8088     - */
8089     -static int transport_put_cmd(struct se_cmd *cmd)
8090     -{
8091     - transport_free_pages(cmd);
8092     - return transport_release_cmd(cmd);
8093     -}
8094     -
8095     void *transport_kmap_data_sg(struct se_cmd *cmd)
8096     {
8097     struct scatterlist *sg = cmd->t_data_sg;
8098     @@ -2441,34 +2424,58 @@ static void transport_write_pending_qf(struct se_cmd *cmd)
8099     }
8100     }
8101    
8102     -int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
8103     +static bool
8104     +__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
8105     + unsigned long *flags);
8106     +
8107     +static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
8108     {
8109     unsigned long flags;
8110     +
8111     + spin_lock_irqsave(&cmd->t_state_lock, flags);
8112     + __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
8113     + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8114     +}
8115     +
8116     +int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
8117     +{
8118     int ret = 0;
8119     + bool aborted = false, tas = false;
8120    
8121     if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
8122     if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
8123     - transport_wait_for_tasks(cmd);
8124     + target_wait_free_cmd(cmd, &aborted, &tas);
8125    
8126     - ret = transport_release_cmd(cmd);
8127     + if (!aborted || tas)
8128     + ret = transport_put_cmd(cmd);
8129     } else {
8130     if (wait_for_tasks)
8131     - transport_wait_for_tasks(cmd);
8132     + target_wait_free_cmd(cmd, &aborted, &tas);
8133     /*
8134     * Handle WRITE failure case where transport_generic_new_cmd()
8135     * has already added se_cmd to state_list, but fabric has
8136     * failed command before I/O submission.
8137     */
8138     - if (cmd->state_active) {
8139     - spin_lock_irqsave(&cmd->t_state_lock, flags);
8140     + if (cmd->state_active)
8141     target_remove_from_state_list(cmd);
8142     - spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8143     - }
8144    
8145     if (cmd->se_lun)
8146     transport_lun_remove_cmd(cmd);
8147    
8148     - ret = transport_put_cmd(cmd);
8149     + if (!aborted || tas)
8150     + ret = transport_put_cmd(cmd);
8151     + }
8152     + /*
8153     + * If the task has been internally aborted due to TMR ABORT_TASK
8154     + * or LUN_RESET, target_core_tmr.c is responsible for performing
8155     + * the remaining calls to target_put_sess_cmd(), and not the
8156     + * callers of this function.
8157     + */
8158     + if (aborted) {
8159     + pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
8160     + wait_for_completion(&cmd->cmd_wait_comp);
8161     + cmd->se_tfo->release_cmd(cmd);
8162     + ret = 1;
8163     }
8164     return ret;
8165     }
8166     @@ -2508,26 +2515,46 @@ out:
8167     }
8168     EXPORT_SYMBOL(target_get_sess_cmd);
8169    
8170     +static void target_free_cmd_mem(struct se_cmd *cmd)
8171     +{
8172     + transport_free_pages(cmd);
8173     +
8174     + if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
8175     + core_tmr_release_req(cmd->se_tmr_req);
8176     + if (cmd->t_task_cdb != cmd->__t_task_cdb)
8177     + kfree(cmd->t_task_cdb);
8178     +}
8179     +
8180     static void target_release_cmd_kref(struct kref *kref)
8181     {
8182     struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
8183     struct se_session *se_sess = se_cmd->se_sess;
8184     unsigned long flags;
8185     + bool fabric_stop;
8186    
8187     spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
8188     if (list_empty(&se_cmd->se_cmd_list)) {
8189     spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
8190     + target_free_cmd_mem(se_cmd);
8191     se_cmd->se_tfo->release_cmd(se_cmd);
8192     return;
8193     }
8194     - if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
8195     +
8196     + spin_lock(&se_cmd->t_state_lock);
8197     + fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
8198     + spin_unlock(&se_cmd->t_state_lock);
8199     +
8200     + if (se_cmd->cmd_wait_set || fabric_stop) {
8201     + list_del_init(&se_cmd->se_cmd_list);
8202     spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
8203     + target_free_cmd_mem(se_cmd);
8204     complete(&se_cmd->cmd_wait_comp);
8205     return;
8206     }
8207     - list_del(&se_cmd->se_cmd_list);
8208     + list_del_init(&se_cmd->se_cmd_list);
8209     spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
8210    
8211     + target_free_cmd_mem(se_cmd);
8212     se_cmd->se_tfo->release_cmd(se_cmd);
8213     }
8214    
8215     @@ -2539,6 +2566,7 @@ int target_put_sess_cmd(struct se_cmd *se_cmd)
8216     struct se_session *se_sess = se_cmd->se_sess;
8217    
8218     if (!se_sess) {
8219     + target_free_cmd_mem(se_cmd);
8220     se_cmd->se_tfo->release_cmd(se_cmd);
8221     return 1;
8222     }
8223     @@ -2555,6 +2583,7 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
8224     {
8225     struct se_cmd *se_cmd;
8226     unsigned long flags;
8227     + int rc;
8228    
8229     spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
8230     if (se_sess->sess_tearing_down) {
8231     @@ -2564,8 +2593,15 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
8232     se_sess->sess_tearing_down = 1;
8233     list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
8234    
8235     - list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
8236     - se_cmd->cmd_wait_set = 1;
8237     + list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
8238     + rc = kref_get_unless_zero(&se_cmd->cmd_kref);
8239     + if (rc) {
8240     + se_cmd->cmd_wait_set = 1;
8241     + spin_lock(&se_cmd->t_state_lock);
8242     + se_cmd->transport_state |= CMD_T_FABRIC_STOP;
8243     + spin_unlock(&se_cmd->t_state_lock);
8244     + }
8245     + }
8246    
8247     spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
8248     }
8249     @@ -2578,15 +2614,25 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
8250     {
8251     struct se_cmd *se_cmd, *tmp_cmd;
8252     unsigned long flags;
8253     + bool tas;
8254    
8255     list_for_each_entry_safe(se_cmd, tmp_cmd,
8256     &se_sess->sess_wait_list, se_cmd_list) {
8257     - list_del(&se_cmd->se_cmd_list);
8258     + list_del_init(&se_cmd->se_cmd_list);
8259    
8260     pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
8261     " %d\n", se_cmd, se_cmd->t_state,
8262     se_cmd->se_tfo->get_cmd_state(se_cmd));
8263    
8264     + spin_lock_irqsave(&se_cmd->t_state_lock, flags);
8265     + tas = (se_cmd->transport_state & CMD_T_TAS);
8266     + spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
8267     +
8268     + if (!target_put_sess_cmd(se_cmd)) {
8269     + if (tas)
8270     + target_put_sess_cmd(se_cmd);
8271     + }
8272     +
8273     wait_for_completion(&se_cmd->cmd_wait_comp);
8274     pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
8275     " fabric state: %d\n", se_cmd, se_cmd->t_state,
8276     @@ -2608,53 +2654,75 @@ void transport_clear_lun_ref(struct se_lun *lun)
8277     wait_for_completion(&lun->lun_ref_comp);
8278     }
8279    
8280     -/**
8281     - * transport_wait_for_tasks - wait for completion to occur
8282     - * @cmd: command to wait
8283     - *
8284     - * Called from frontend fabric context to wait for storage engine
8285     - * to pause and/or release frontend generated struct se_cmd.
8286     - */
8287     -bool transport_wait_for_tasks(struct se_cmd *cmd)
8288     +static bool
8289     +__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
8290     + bool *aborted, bool *tas, unsigned long *flags)
8291     + __releases(&cmd->t_state_lock)
8292     + __acquires(&cmd->t_state_lock)
8293     {
8294     - unsigned long flags;
8295    
8296     - spin_lock_irqsave(&cmd->t_state_lock, flags);
8297     + assert_spin_locked(&cmd->t_state_lock);
8298     + WARN_ON_ONCE(!irqs_disabled());
8299     +
8300     + if (fabric_stop)
8301     + cmd->transport_state |= CMD_T_FABRIC_STOP;
8302     +
8303     + if (cmd->transport_state & CMD_T_ABORTED)
8304     + *aborted = true;
8305     +
8306     + if (cmd->transport_state & CMD_T_TAS)
8307     + *tas = true;
8308     +
8309     if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
8310     - !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
8311     - spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8312     + !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
8313     return false;
8314     - }
8315    
8316     if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
8317     - !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
8318     - spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8319     + !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
8320     return false;
8321     - }
8322    
8323     - if (!(cmd->transport_state & CMD_T_ACTIVE)) {
8324     - spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8325     + if (!(cmd->transport_state & CMD_T_ACTIVE))
8326     + return false;
8327     +
8328     + if (fabric_stop && *aborted)
8329     return false;
8330     - }
8331    
8332     cmd->transport_state |= CMD_T_STOP;
8333    
8334     - pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n",
8335     - cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
8336     + pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
8337     + " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
8338     + cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
8339    
8340     - spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8341     + spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
8342    
8343     wait_for_completion(&cmd->t_transport_stop_comp);
8344    
8345     - spin_lock_irqsave(&cmd->t_state_lock, flags);
8346     + spin_lock_irqsave(&cmd->t_state_lock, *flags);
8347     cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
8348    
8349     - pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n",
8350     - cmd->tag);
8351     + pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
8352     + "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
8353     +
8354     + return true;
8355     +}
8356    
8357     +/**
8358     + * transport_wait_for_tasks - wait for completion to occur
8359     + * @cmd: command to wait
8360     + *
8361     + * Called from frontend fabric context to wait for storage engine
8362     + * to pause and/or release frontend generated struct se_cmd.
8363     + */
8364     +bool transport_wait_for_tasks(struct se_cmd *cmd)
8365     +{
8366     + unsigned long flags;
8367     + bool ret, aborted = false, tas = false;
8368     +
8369     + spin_lock_irqsave(&cmd->t_state_lock, flags);
8370     + ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
8371     spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8372    
8373     - return true;
8374     + return ret;
8375     }
8376     EXPORT_SYMBOL(transport_wait_for_tasks);
8377    
8378     @@ -2836,28 +2904,49 @@ transport_send_check_condition_and_sense(struct se_cmd *cmd,
8379     }
8380     EXPORT_SYMBOL(transport_send_check_condition_and_sense);
8381    
8382     -int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
8383     +static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
8384     + __releases(&cmd->t_state_lock)
8385     + __acquires(&cmd->t_state_lock)
8386     {
8387     + assert_spin_locked(&cmd->t_state_lock);
8388     + WARN_ON_ONCE(!irqs_disabled());
8389     +
8390     if (!(cmd->transport_state & CMD_T_ABORTED))
8391     return 0;
8392     -
8393     /*
8394     * If cmd has been aborted but either no status is to be sent or it has
8395     * already been sent, just return
8396     */
8397     - if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
8398     + if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
8399     + if (send_status)
8400     + cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
8401     return 1;
8402     + }
8403    
8404     - pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n",
8405     - cmd->t_task_cdb[0], cmd->tag);
8406     + pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
8407     + " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
8408    
8409     cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
8410     cmd->scsi_status = SAM_STAT_TASK_ABORTED;
8411     trace_target_cmd_complete(cmd);
8412     +
8413     + spin_unlock_irq(&cmd->t_state_lock);
8414     cmd->se_tfo->queue_status(cmd);
8415     + spin_lock_irq(&cmd->t_state_lock);
8416    
8417     return 1;
8418     }
8419     +
8420     +int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
8421     +{
8422     + int ret;
8423     +
8424     + spin_lock_irq(&cmd->t_state_lock);
8425     + ret = __transport_check_aborted_status(cmd, send_status);
8426     + spin_unlock_irq(&cmd->t_state_lock);
8427     +
8428     + return ret;
8429     +}
8430     EXPORT_SYMBOL(transport_check_aborted_status);
8431    
8432     void transport_send_task_abort(struct se_cmd *cmd)
8433     @@ -2879,11 +2968,17 @@ void transport_send_task_abort(struct se_cmd *cmd)
8434     */
8435     if (cmd->data_direction == DMA_TO_DEVICE) {
8436     if (cmd->se_tfo->write_pending_status(cmd) != 0) {
8437     - cmd->transport_state |= CMD_T_ABORTED;
8438     + spin_lock_irqsave(&cmd->t_state_lock, flags);
8439     + if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
8440     + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8441     + goto send_abort;
8442     + }
8443     cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
8444     + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8445     return;
8446     }
8447     }
8448     +send_abort:
8449     cmd->scsi_status = SAM_STAT_TASK_ABORTED;
8450    
8451     transport_lun_remove_cmd(cmd);
8452     @@ -2900,8 +2995,17 @@ static void target_tmr_work(struct work_struct *work)
8453     struct se_cmd *cmd = container_of(work, struct se_cmd, work);
8454     struct se_device *dev = cmd->se_dev;
8455     struct se_tmr_req *tmr = cmd->se_tmr_req;
8456     + unsigned long flags;
8457     int ret;
8458    
8459     + spin_lock_irqsave(&cmd->t_state_lock, flags);
8460     + if (cmd->transport_state & CMD_T_ABORTED) {
8461     + tmr->response = TMR_FUNCTION_REJECTED;
8462     + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8463     + goto check_stop;
8464     + }
8465     + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8466     +
8467     switch (tmr->function) {
8468     case TMR_ABORT_TASK:
8469     core_tmr_abort_task(dev, tmr, cmd->se_sess);
8470     @@ -2934,9 +3038,17 @@ static void target_tmr_work(struct work_struct *work)
8471     break;
8472     }
8473    
8474     + spin_lock_irqsave(&cmd->t_state_lock, flags);
8475     + if (cmd->transport_state & CMD_T_ABORTED) {
8476     + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8477     + goto check_stop;
8478     + }
8479     cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
8480     + spin_unlock_irqrestore(&cmd->t_state_lock, flags);
8481     +
8482     cmd->se_tfo->queue_tm_rsp(cmd);
8483    
8484     +check_stop:
8485     transport_cmd_check_stop_to_fabric(cmd);
8486     }
8487    
8488     diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
8489     index 2f9f7086ac3d..ea9366ad3e6b 100644
8490     --- a/drivers/thermal/step_wise.c
8491     +++ b/drivers/thermal/step_wise.c
8492     @@ -63,6 +63,19 @@ static unsigned long get_target_state(struct thermal_instance *instance,
8493     next_target = instance->target;
8494     dev_dbg(&cdev->device, "cur_state=%ld\n", cur_state);
8495    
8496     + if (!instance->initialized) {
8497     + if (throttle) {
8498     + next_target = (cur_state + 1) >= instance->upper ?
8499     + instance->upper :
8500     + ((cur_state + 1) < instance->lower ?
8501     + instance->lower : (cur_state + 1));
8502     + } else {
8503     + next_target = THERMAL_NO_TARGET;
8504     + }
8505     +
8506     + return next_target;
8507     + }
8508     +
8509     switch (trend) {
8510     case THERMAL_TREND_RAISING:
8511     if (throttle) {
8512     @@ -149,7 +162,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
8513     dev_dbg(&instance->cdev->device, "old_target=%d, target=%d\n",
8514     old_target, (int)instance->target);
8515    
8516     - if (old_target == instance->target)
8517     + if (instance->initialized && old_target == instance->target)
8518     continue;
8519    
8520     /* Activate a passive thermal instance */
8521     @@ -161,7 +174,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
8522     instance->target == THERMAL_NO_TARGET)
8523     update_passive_instance(tz, trip_type, -1);
8524    
8525     -
8526     + instance->initialized = true;
8527     instance->cdev->updated = false; /* cdev needs update */
8528     }
8529    
8530     diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
8531     index d9e525cc9c1c..ba08b5521382 100644
8532     --- a/drivers/thermal/thermal_core.c
8533     +++ b/drivers/thermal/thermal_core.c
8534     @@ -37,6 +37,7 @@
8535     #include <linux/of.h>
8536     #include <net/netlink.h>
8537     #include <net/genetlink.h>
8538     +#include <linux/suspend.h>
8539    
8540     #define CREATE_TRACE_POINTS
8541     #include <trace/events/thermal.h>
8542     @@ -59,6 +60,8 @@ static LIST_HEAD(thermal_governor_list);
8543     static DEFINE_MUTEX(thermal_list_lock);
8544     static DEFINE_MUTEX(thermal_governor_lock);
8545    
8546     +static atomic_t in_suspend;
8547     +
8548     static struct thermal_governor *def_governor;
8549    
8550     static struct thermal_governor *__find_governor(const char *name)
8551     @@ -532,14 +535,31 @@ static void update_temperature(struct thermal_zone_device *tz)
8552     mutex_unlock(&tz->lock);
8553    
8554     trace_thermal_temperature(tz);
8555     - dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
8556     - tz->last_temperature, tz->temperature);
8557     + if (tz->last_temperature == THERMAL_TEMP_INVALID)
8558     + dev_dbg(&tz->device, "last_temperature N/A, current_temperature=%d\n",
8559     + tz->temperature);
8560     + else
8561     + dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
8562     + tz->last_temperature, tz->temperature);
8563     +}
8564     +
8565     +static void thermal_zone_device_reset(struct thermal_zone_device *tz)
8566     +{
8567     + struct thermal_instance *pos;
8568     +
8569     + tz->temperature = THERMAL_TEMP_INVALID;
8570     + tz->passive = 0;
8571     + list_for_each_entry(pos, &tz->thermal_instances, tz_node)
8572     + pos->initialized = false;
8573     }
8574    
8575     void thermal_zone_device_update(struct thermal_zone_device *tz)
8576     {
8577     int count;
8578    
8579     + if (atomic_read(&in_suspend))
8580     + return;
8581     +
8582     if (!tz->ops->get_temp)
8583     return;
8584    
8585     @@ -1321,6 +1341,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
8586     if (!result) {
8587     list_add_tail(&dev->tz_node, &tz->thermal_instances);
8588     list_add_tail(&dev->cdev_node, &cdev->thermal_instances);
8589     + atomic_set(&tz->need_update, 1);
8590     }
8591     mutex_unlock(&cdev->lock);
8592     mutex_unlock(&tz->lock);
8593     @@ -1430,6 +1451,7 @@ __thermal_cooling_device_register(struct device_node *np,
8594     const struct thermal_cooling_device_ops *ops)
8595     {
8596     struct thermal_cooling_device *cdev;
8597     + struct thermal_zone_device *pos = NULL;
8598     int result;
8599    
8600     if (type && strlen(type) >= THERMAL_NAME_LENGTH)
8601     @@ -1474,6 +1496,12 @@ __thermal_cooling_device_register(struct device_node *np,
8602     /* Update binding information for 'this' new cdev */
8603     bind_cdev(cdev);
8604    
8605     + mutex_lock(&thermal_list_lock);
8606     + list_for_each_entry(pos, &thermal_tz_list, node)
8607     + if (atomic_cmpxchg(&pos->need_update, 1, 0))
8608     + thermal_zone_device_update(pos);
8609     + mutex_unlock(&thermal_list_lock);
8610     +
8611     return cdev;
8612     }
8613    
8614     @@ -1806,6 +1834,8 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
8615     tz->trips = trips;
8616     tz->passive_delay = passive_delay;
8617     tz->polling_delay = polling_delay;
8618     + /* A new thermal zone needs to be updated anyway. */
8619     + atomic_set(&tz->need_update, 1);
8620    
8621     dev_set_name(&tz->device, "thermal_zone%d", tz->id);
8622     result = device_register(&tz->device);
8623     @@ -1900,7 +1930,10 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
8624    
8625     INIT_DELAYED_WORK(&(tz->poll_queue), thermal_zone_device_check);
8626    
8627     - thermal_zone_device_update(tz);
8628     + thermal_zone_device_reset(tz);
8629     + /* Update the new thermal zone and mark it as already updated. */
8630     + if (atomic_cmpxchg(&tz->need_update, 1, 0))
8631     + thermal_zone_device_update(tz);
8632    
8633     return tz;
8634    
8635     @@ -2140,6 +2173,36 @@ static void thermal_unregister_governors(void)
8636     thermal_gov_power_allocator_unregister();
8637     }
8638    
8639     +static int thermal_pm_notify(struct notifier_block *nb,
8640     + unsigned long mode, void *_unused)
8641     +{
8642     + struct thermal_zone_device *tz;
8643     +
8644     + switch (mode) {
8645     + case PM_HIBERNATION_PREPARE:
8646     + case PM_RESTORE_PREPARE:
8647     + case PM_SUSPEND_PREPARE:
8648     + atomic_set(&in_suspend, 1);
8649     + break;
8650     + case PM_POST_HIBERNATION:
8651     + case PM_POST_RESTORE:
8652     + case PM_POST_SUSPEND:
8653     + atomic_set(&in_suspend, 0);
8654     + list_for_each_entry(tz, &thermal_tz_list, node) {
8655     + thermal_zone_device_reset(tz);
8656     + thermal_zone_device_update(tz);
8657     + }
8658     + break;
8659     + default:
8660     + break;
8661     + }
8662     + return 0;
8663     +}
8664     +
8665     +static struct notifier_block thermal_pm_nb = {
8666     + .notifier_call = thermal_pm_notify,
8667     +};
8668     +
8669     static int __init thermal_init(void)
8670     {
8671     int result;
8672     @@ -2160,6 +2223,11 @@ static int __init thermal_init(void)
8673     if (result)
8674     goto exit_netlink;
8675    
8676     + result = register_pm_notifier(&thermal_pm_nb);
8677     + if (result)
8678     + pr_warn("Thermal: Can not register suspend notifier, return %d\n",
8679     + result);
8680     +
8681     return 0;
8682    
8683     exit_netlink:
8684     @@ -2179,6 +2247,7 @@ error:
8685    
8686     static void __exit thermal_exit(void)
8687     {
8688     + unregister_pm_notifier(&thermal_pm_nb);
8689     of_thermal_destroy_zones();
8690     genetlink_exit();
8691     class_unregister(&thermal_class);
8692     diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
8693     index d7ac1fccd659..749d41abfbab 100644
8694     --- a/drivers/thermal/thermal_core.h
8695     +++ b/drivers/thermal/thermal_core.h
8696     @@ -41,6 +41,7 @@ struct thermal_instance {
8697     struct thermal_zone_device *tz;
8698     struct thermal_cooling_device *cdev;
8699     int trip;
8700     + bool initialized;
8701     unsigned long upper; /* Highest cooling state for this trip point */
8702     unsigned long lower; /* Lowest cooling state for this trip point */
8703     unsigned long target; /* expected cooling state */
8704     diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
8705     index e4c70dce3e7c..fa4e23930614 100644
8706     --- a/drivers/usb/class/cdc-acm.c
8707     +++ b/drivers/usb/class/cdc-acm.c
8708     @@ -1841,6 +1841,11 @@ static const struct usb_device_id acm_ids[] = {
8709     },
8710     #endif
8711    
8712     + /*Samsung phone in firmware update mode */
8713     + { USB_DEVICE(0x04e8, 0x685d),
8714     + .driver_info = IGNORE_DEVICE,
8715     + },
8716     +
8717     /* Exclude Infineon Flash Loader utility */
8718     { USB_DEVICE(0x058b, 0x0041),
8719     .driver_info = IGNORE_DEVICE,
8720     diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
8721     index 36f1cb74588c..78be201d81f4 100644
8722     --- a/drivers/usb/dwc3/core.h
8723     +++ b/drivers/usb/dwc3/core.h
8724     @@ -853,7 +853,6 @@ struct dwc3 {
8725     unsigned pullups_connected:1;
8726     unsigned resize_fifos:1;
8727     unsigned setup_packet_pending:1;
8728     - unsigned start_config_issued:1;
8729     unsigned three_stage_setup:1;
8730     unsigned usb3_lpm_capable:1;
8731    
8732     diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
8733     index 5320e939e090..b13912d5fa99 100644
8734     --- a/drivers/usb/dwc3/ep0.c
8735     +++ b/drivers/usb/dwc3/ep0.c
8736     @@ -555,7 +555,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
8737     int ret;
8738     u32 reg;
8739    
8740     - dwc->start_config_issued = false;
8741     cfg = le16_to_cpu(ctrl->wValue);
8742    
8743     switch (state) {
8744     @@ -737,10 +736,6 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
8745     dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
8746     ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
8747     break;
8748     - case USB_REQ_SET_INTERFACE:
8749     - dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
8750     - dwc->start_config_issued = false;
8751     - /* Fall through */
8752     default:
8753     dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
8754     ret = dwc3_ep0_delegate_req(dwc, ctrl);
8755     diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
8756     index a58376fd65fe..69ffe6e8d77f 100644
8757     --- a/drivers/usb/dwc3/gadget.c
8758     +++ b/drivers/usb/dwc3/gadget.c
8759     @@ -388,24 +388,66 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
8760     dep->trb_pool_dma = 0;
8761     }
8762    
8763     +static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
8764     +
8765     +/**
8766     + * dwc3_gadget_start_config - Configure EP resources
8767     + * @dwc: pointer to our controller context structure
8768     + * @dep: endpoint that is being enabled
8769     + *
8770     + * The assignment of transfer resources cannot perfectly follow the
8771     + * data book due to the fact that the controller driver does not have
8772     + * all knowledge of the configuration in advance. It is given this
8773     + * information piecemeal by the composite gadget framework after every
8774     + * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
8775     + * programming model in this scenario can cause errors. For two
8776     + * reasons:
8777     + *
8778     + * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
8779     + * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
8780     + * multiple interfaces.
8781     + *
8782     + * 2) The databook does not mention doing more DEPXFERCFG for new
8783     + * endpoint on alt setting (8.1.6).
8784     + *
8785     + * The following simplified method is used instead:
8786     + *
8787     + * All hardware endpoints can be assigned a transfer resource and this
8788     + * setting will stay persistent until either a core reset or
8789     + * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
8790     + * do DEPXFERCFG for every hardware endpoint as well. We are
8791     + * guaranteed that there are as many transfer resources as endpoints.
8792     + *
8793     + * This function is called for each endpoint when it is being enabled
8794     + * but is triggered only when called for EP0-out, which always happens
8795     + * first, and which should only happen in one of the above conditions.
8796     + */
8797     static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
8798     {
8799     struct dwc3_gadget_ep_cmd_params params;
8800     u32 cmd;
8801     + int i;
8802     + int ret;
8803     +
8804     + if (dep->number)
8805     + return 0;
8806    
8807     memset(&params, 0x00, sizeof(params));
8808     + cmd = DWC3_DEPCMD_DEPSTARTCFG;
8809    
8810     - if (dep->number != 1) {
8811     - cmd = DWC3_DEPCMD_DEPSTARTCFG;
8812     - /* XferRscIdx == 0 for ep0 and 2 for the remaining */
8813     - if (dep->number > 1) {
8814     - if (dwc->start_config_issued)
8815     - return 0;
8816     - dwc->start_config_issued = true;
8817     - cmd |= DWC3_DEPCMD_PARAM(2);
8818     - }
8819     + ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
8820     + if (ret)
8821     + return ret;
8822    
8823     - return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
8824     + for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
8825     + struct dwc3_ep *dep = dwc->eps[i];
8826     +
8827     + if (!dep)
8828     + continue;
8829     +
8830     + ret = dwc3_gadget_set_xfer_resource(dwc, dep);
8831     + if (ret)
8832     + return ret;
8833     }
8834    
8835     return 0;
8836     @@ -519,10 +561,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
8837     struct dwc3_trb *trb_st_hw;
8838     struct dwc3_trb *trb_link;
8839    
8840     - ret = dwc3_gadget_set_xfer_resource(dwc, dep);
8841     - if (ret)
8842     - return ret;
8843     -
8844     dep->endpoint.desc = desc;
8845     dep->comp_desc = comp_desc;
8846     dep->type = usb_endpoint_type(desc);
8847     @@ -1604,8 +1642,6 @@ static int dwc3_gadget_start(struct usb_gadget *g,
8848     }
8849     dwc3_writel(dwc->regs, DWC3_DCFG, reg);
8850    
8851     - dwc->start_config_issued = false;
8852     -
8853     /* Start with SuperSpeed Default */
8854     dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
8855    
8856     @@ -2202,7 +2238,6 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
8857     dwc3_writel(dwc->regs, DWC3_DCTL, reg);
8858    
8859     dwc3_disconnect_gadget(dwc);
8860     - dwc->start_config_issued = false;
8861    
8862     dwc->gadget.speed = USB_SPEED_UNKNOWN;
8863     dwc->setup_packet_pending = false;
8864     @@ -2253,7 +2288,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
8865    
8866     dwc3_stop_active_transfers(dwc);
8867     dwc3_clear_stall_all_ep(dwc);
8868     - dwc->start_config_issued = false;
8869    
8870     /* Reset device address to zero */
8871     reg = dwc3_readl(dwc->regs, DWC3_DCFG);
8872     diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
8873     index 1dd9919081f8..a7caf53d8b5e 100644
8874     --- a/drivers/usb/serial/cp210x.c
8875     +++ b/drivers/usb/serial/cp210x.c
8876     @@ -162,6 +162,8 @@ static const struct usb_device_id id_table[] = {
8877     { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
8878     { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
8879     { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
8880     + { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
8881     + { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
8882     { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
8883     { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
8884     { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
8885     diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
8886     index db86e512e0fc..8849439a8f18 100644
8887     --- a/drivers/usb/serial/option.c
8888     +++ b/drivers/usb/serial/option.c
8889     @@ -315,6 +315,7 @@ static void option_instat_callback(struct urb *urb);
8890     #define TOSHIBA_PRODUCT_G450 0x0d45
8891    
8892     #define ALINK_VENDOR_ID 0x1e0e
8893     +#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */
8894     #define ALINK_PRODUCT_PH300 0x9100
8895     #define ALINK_PRODUCT_3GU 0x9200
8896    
8897     @@ -607,6 +608,10 @@ static const struct option_blacklist_info zte_1255_blacklist = {
8898     .reserved = BIT(3) | BIT(4),
8899     };
8900    
8901     +static const struct option_blacklist_info simcom_sim7100e_blacklist = {
8902     + .reserved = BIT(5) | BIT(6),
8903     +};
8904     +
8905     static const struct option_blacklist_info telit_le910_blacklist = {
8906     .sendsetup = BIT(0),
8907     .reserved = BIT(1) | BIT(2),
8908     @@ -1122,6 +1127,8 @@ static const struct usb_device_id option_ids[] = {
8909     { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
8910     { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
8911     { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
8912     + { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
8913     + .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
8914     { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
8915     { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
8916     { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
8917     @@ -1645,6 +1652,8 @@ static const struct usb_device_id option_ids[] = {
8918     { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
8919     { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
8920     { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
8921     + { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
8922     + .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
8923     { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
8924     .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
8925     },
8926     diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
8927     index 7efc32945810..7d3e5d0e9aa4 100644
8928     --- a/drivers/virtio/virtio_balloon.c
8929     +++ b/drivers/virtio/virtio_balloon.c
8930     @@ -209,8 +209,8 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
8931     */
8932     if (vb->num_pfns != 0)
8933     tell_host(vb, vb->deflate_vq);
8934     - mutex_unlock(&vb->balloon_lock);
8935     release_pages_balloon(vb);
8936     + mutex_unlock(&vb->balloon_lock);
8937     return num_freed_pages;
8938     }
8939    
8940     diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
8941     index 78f804af6c20..2046a68ad0ba 100644
8942     --- a/drivers/virtio/virtio_pci_common.c
8943     +++ b/drivers/virtio/virtio_pci_common.c
8944     @@ -545,6 +545,7 @@ err_enable_device:
8945     static void virtio_pci_remove(struct pci_dev *pci_dev)
8946     {
8947     struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
8948     + struct device *dev = get_device(&vp_dev->vdev.dev);
8949    
8950     unregister_virtio_device(&vp_dev->vdev);
8951    
8952     @@ -554,6 +555,7 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
8953     virtio_pci_modern_remove(vp_dev);
8954    
8955     pci_disable_device(pci_dev);
8956     + put_device(dev);
8957     }
8958    
8959     static struct pci_driver virtio_pci_driver = {
8960     diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
8961     index 73dafdc494aa..fb0221434f81 100644
8962     --- a/drivers/xen/xen-pciback/pciback_ops.c
8963     +++ b/drivers/xen/xen-pciback/pciback_ops.c
8964     @@ -227,8 +227,9 @@ int xen_pcibk_enable_msix(struct xen_pcibk_device *pdev,
8965     /*
8966     * PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
8967     * to access the BARs where the MSI-X entries reside.
8968     + * But VF devices are unique in which the PF needs to be checked.
8969     */
8970     - pci_read_config_word(dev, PCI_COMMAND, &cmd);
8971     + pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd);
8972     if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
8973     return -ENXIO;
8974    
8975     @@ -332,6 +333,9 @@ void xen_pcibk_do_op(struct work_struct *data)
8976     struct xen_pcibk_dev_data *dev_data = NULL;
8977     struct xen_pci_op *op = &pdev->op;
8978     int test_intx = 0;
8979     +#ifdef CONFIG_PCI_MSI
8980     + unsigned int nr = 0;
8981     +#endif
8982    
8983     *op = pdev->sh_info->op;
8984     barrier();
8985     @@ -360,6 +364,7 @@ void xen_pcibk_do_op(struct work_struct *data)
8986     op->err = xen_pcibk_disable_msi(pdev, dev, op);
8987     break;
8988     case XEN_PCI_OP_enable_msix:
8989     + nr = op->value;
8990     op->err = xen_pcibk_enable_msix(pdev, dev, op);
8991     break;
8992     case XEN_PCI_OP_disable_msix:
8993     @@ -382,7 +387,7 @@ void xen_pcibk_do_op(struct work_struct *data)
8994     if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
8995     unsigned int i;
8996    
8997     - for (i = 0; i < op->value; i++)
8998     + for (i = 0; i < nr; i++)
8999     pdev->sh_info->op.msix_entries[i].vector =
9000     op->msix_entries[i].vector;
9001     }
9002     diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
9003     index ad4eb1024d1f..51387d75c7bf 100644
9004     --- a/drivers/xen/xen-scsiback.c
9005     +++ b/drivers/xen/xen-scsiback.c
9006     @@ -939,12 +939,12 @@ out:
9007     spin_unlock_irqrestore(&info->v2p_lock, flags);
9008    
9009     out_free:
9010     - mutex_lock(&tpg->tv_tpg_mutex);
9011     - tpg->tv_tpg_fe_count--;
9012     - mutex_unlock(&tpg->tv_tpg_mutex);
9013     -
9014     - if (err)
9015     + if (err) {
9016     + mutex_lock(&tpg->tv_tpg_mutex);
9017     + tpg->tv_tpg_fe_count--;
9018     + mutex_unlock(&tpg->tv_tpg_mutex);
9019     kfree(new);
9020     + }
9021    
9022     return err;
9023     }
9024     diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
9025     index 0ddca6734494..4958360a44f7 100644
9026     --- a/fs/btrfs/disk-io.c
9027     +++ b/fs/btrfs/disk-io.c
9028     @@ -1582,8 +1582,23 @@ int btrfs_init_fs_root(struct btrfs_root *root)
9029     ret = get_anon_bdev(&root->anon_dev);
9030     if (ret)
9031     goto free_writers;
9032     +
9033     + mutex_lock(&root->objectid_mutex);
9034     + ret = btrfs_find_highest_objectid(root,
9035     + &root->highest_objectid);
9036     + if (ret) {
9037     + mutex_unlock(&root->objectid_mutex);
9038     + goto free_root_dev;
9039     + }
9040     +
9041     + ASSERT(root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
9042     +
9043     + mutex_unlock(&root->objectid_mutex);
9044     +
9045     return 0;
9046    
9047     +free_root_dev:
9048     + free_anon_bdev(root->anon_dev);
9049     free_writers:
9050     btrfs_free_subvolume_writers(root->subv_writers);
9051     fail:
9052     @@ -2667,6 +2682,7 @@ int open_ctree(struct super_block *sb,
9053     if (btrfs_check_super_csum(bh->b_data)) {
9054     printk(KERN_ERR "BTRFS: superblock checksum mismatch\n");
9055     err = -EINVAL;
9056     + brelse(bh);
9057     goto fail_alloc;
9058     }
9059    
9060     @@ -2899,6 +2915,18 @@ retry_root_backup:
9061     tree_root->commit_root = btrfs_root_node(tree_root);
9062     btrfs_set_root_refs(&tree_root->root_item, 1);
9063    
9064     + mutex_lock(&tree_root->objectid_mutex);
9065     + ret = btrfs_find_highest_objectid(tree_root,
9066     + &tree_root->highest_objectid);
9067     + if (ret) {
9068     + mutex_unlock(&tree_root->objectid_mutex);
9069     + goto recovery_tree_root;
9070     + }
9071     +
9072     + ASSERT(tree_root->highest_objectid <= BTRFS_LAST_FREE_OBJECTID);
9073     +
9074     + mutex_unlock(&tree_root->objectid_mutex);
9075     +
9076     ret = btrfs_read_roots(fs_info, tree_root);
9077     if (ret)
9078     goto recovery_tree_root;
9079     diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
9080     index 767a6056ac45..07573dc1614a 100644
9081     --- a/fs/btrfs/inode-map.c
9082     +++ b/fs/btrfs/inode-map.c
9083     @@ -515,7 +515,7 @@ out:
9084     return ret;
9085     }
9086    
9087     -static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
9088     +int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid)
9089     {
9090     struct btrfs_path *path;
9091     int ret;
9092     @@ -555,13 +555,6 @@ int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid)
9093     int ret;
9094     mutex_lock(&root->objectid_mutex);
9095    
9096     - if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) {
9097     - ret = btrfs_find_highest_objectid(root,
9098     - &root->highest_objectid);
9099     - if (ret)
9100     - goto out;
9101     - }
9102     -
9103     if (unlikely(root->highest_objectid >= BTRFS_LAST_FREE_OBJECTID)) {
9104     ret = -ENOSPC;
9105     goto out;
9106     diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h
9107     index ddb347bfee23..c8e864b2d530 100644
9108     --- a/fs/btrfs/inode-map.h
9109     +++ b/fs/btrfs/inode-map.h
9110     @@ -9,5 +9,6 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
9111     struct btrfs_trans_handle *trans);
9112    
9113     int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid);
9114     +int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid);
9115    
9116     #endif
9117     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
9118     index 54b5f0de623b..52fc1b5e9f03 100644
9119     --- a/fs/btrfs/inode.c
9120     +++ b/fs/btrfs/inode.c
9121     @@ -6493,7 +6493,7 @@ out_unlock_inode:
9122     static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
9123     struct dentry *dentry)
9124     {
9125     - struct btrfs_trans_handle *trans;
9126     + struct btrfs_trans_handle *trans = NULL;
9127     struct btrfs_root *root = BTRFS_I(dir)->root;
9128     struct inode *inode = d_inode(old_dentry);
9129     u64 index;
9130     @@ -6519,6 +6519,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
9131     trans = btrfs_start_transaction(root, 5);
9132     if (IS_ERR(trans)) {
9133     err = PTR_ERR(trans);
9134     + trans = NULL;
9135     goto fail;
9136     }
9137    
9138     @@ -6552,9 +6553,10 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
9139     btrfs_log_new_name(trans, inode, NULL, parent);
9140     }
9141    
9142     - btrfs_end_transaction(trans, root);
9143     btrfs_balance_delayed_items(root);
9144     fail:
9145     + if (trans)
9146     + btrfs_end_transaction(trans, root);
9147     if (drop_inode) {
9148     inode_dec_link_count(inode);
9149     iput(inode);
9150     @@ -8548,15 +8550,28 @@ int btrfs_readpage(struct file *file, struct page *page)
9151     static int btrfs_writepage(struct page *page, struct writeback_control *wbc)
9152     {
9153     struct extent_io_tree *tree;
9154     -
9155     + struct inode *inode = page->mapping->host;
9156     + int ret;
9157    
9158     if (current->flags & PF_MEMALLOC) {
9159     redirty_page_for_writepage(wbc, page);
9160     unlock_page(page);
9161     return 0;
9162     }
9163     +
9164     + /*
9165     + * If we are under memory pressure we will call this directly from the
9166     + * VM, we need to make sure we have the inode referenced for the ordered
9167     + * extent. If not just return like we didn't do anything.
9168     + */
9169     + if (!igrab(inode)) {
9170     + redirty_page_for_writepage(wbc, page);
9171     + return AOP_WRITEPAGE_ACTIVATE;
9172     + }
9173     tree = &BTRFS_I(page->mapping->host)->io_tree;
9174     - return extent_write_full_page(tree, page, btrfs_get_extent, wbc);
9175     + ret = extent_write_full_page(tree, page, btrfs_get_extent, wbc);
9176     + btrfs_add_delayed_iput(inode);
9177     + return ret;
9178     }
9179    
9180     static int btrfs_writepages(struct address_space *mapping,
9181     @@ -9650,9 +9665,11 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
9182     /*
9183     * 2 items for inode item and ref
9184     * 2 items for dir items
9185     + * 1 item for updating parent inode item
9186     + * 1 item for the inline extent item
9187     * 1 item for xattr if selinux is on
9188     */
9189     - trans = btrfs_start_transaction(root, 5);
9190     + trans = btrfs_start_transaction(root, 7);
9191     if (IS_ERR(trans))
9192     return PTR_ERR(trans);
9193    
9194     diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
9195     index 08fd3f0f34fd..f07d01bc4875 100644
9196     --- a/fs/btrfs/ioctl.c
9197     +++ b/fs/btrfs/ioctl.c
9198     @@ -568,6 +568,10 @@ static noinline int create_subvol(struct inode *dir,
9199     goto fail;
9200     }
9201    
9202     + mutex_lock(&new_root->objectid_mutex);
9203     + new_root->highest_objectid = new_dirid;
9204     + mutex_unlock(&new_root->objectid_mutex);
9205     +
9206     /*
9207     * insert the directory item
9208     */
9209     diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
9210     index 355a458cba1a..63a6152be04b 100644
9211     --- a/fs/btrfs/send.c
9212     +++ b/fs/btrfs/send.c
9213     @@ -1469,7 +1469,21 @@ static int read_symlink(struct btrfs_root *root,
9214     ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
9215     if (ret < 0)
9216     goto out;
9217     - BUG_ON(ret);
9218     + if (ret) {
9219     + /*
9220     + * An empty symlink inode. Can happen in rare error paths when
9221     + * creating a symlink (transaction committed before the inode
9222     + * eviction handler removed the symlink inode items and a crash
9223     + * happened in between or the subvol was snapshoted in between).
9224     + * Print an informative message to dmesg/syslog so that the user
9225     + * can delete the symlink.
9226     + */
9227     + btrfs_err(root->fs_info,
9228     + "Found empty symlink inode %llu at root %llu",
9229     + ino, root->root_key.objectid);
9230     + ret = -EIO;
9231     + goto out;
9232     + }
9233    
9234     ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
9235     struct btrfs_file_extent_item);
9236     diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
9237     index 24154e422945..fe609b81dd1b 100644
9238     --- a/fs/btrfs/super.c
9239     +++ b/fs/btrfs/super.c
9240     @@ -1956,6 +1956,8 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
9241     * there are other factors that may change the result (like a new metadata
9242     * chunk).
9243     *
9244     + * If metadata is exhausted, f_bavail will be 0.
9245     + *
9246     * FIXME: not accurate for mixed block groups, total and free/used are ok,
9247     * available appears slightly larger.
9248     */
9249     @@ -1967,11 +1969,13 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
9250     struct btrfs_space_info *found;
9251     u64 total_used = 0;
9252     u64 total_free_data = 0;
9253     + u64 total_free_meta = 0;
9254     int bits = dentry->d_sb->s_blocksize_bits;
9255     __be32 *fsid = (__be32 *)fs_info->fsid;
9256     unsigned factor = 1;
9257     struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
9258     int ret;
9259     + u64 thresh = 0;
9260    
9261     /*
9262     * holding chunk_muext to avoid allocating new chunks, holding
9263     @@ -1997,6 +2001,8 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
9264     }
9265     }
9266     }
9267     + if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
9268     + total_free_meta += found->disk_total - found->disk_used;
9269    
9270     total_used += found->disk_used;
9271     }
9272     @@ -2019,6 +2025,24 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
9273     buf->f_bavail += div_u64(total_free_data, factor);
9274     buf->f_bavail = buf->f_bavail >> bits;
9275    
9276     + /*
9277     + * We calculate the remaining metadata space minus global reserve. If
9278     + * this is (supposedly) smaller than zero, there's no space. But this
9279     + * does not hold in practice, the exhausted state happens where's still
9280     + * some positive delta. So we apply some guesswork and compare the
9281     + * delta to a 4M threshold. (Practically observed delta was ~2M.)
9282     + *
9283     + * We probably cannot calculate the exact threshold value because this
9284     + * depends on the internal reservations requested by various
9285     + * operations, so some operations that consume a few metadata will
9286     + * succeed even if the Avail is zero. But this is better than the other
9287     + * way around.
9288     + */
9289     + thresh = 4 * 1024 * 1024;
9290     +
9291     + if (total_free_meta - thresh < block_rsv->size)
9292     + buf->f_bavail = 0;
9293     +
9294     buf->f_type = BTRFS_SUPER_MAGIC;
9295     buf->f_bsize = dentry->d_sb->s_blocksize;
9296     buf->f_namelen = BTRFS_NAME_LEN;
9297     diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
9298     index 9e084477d320..9c62a6f9757a 100644
9299     --- a/fs/btrfs/volumes.c
9300     +++ b/fs/btrfs/volumes.c
9301     @@ -232,6 +232,7 @@ static struct btrfs_device *__alloc_device(void)
9302     spin_lock_init(&dev->reada_lock);
9303     atomic_set(&dev->reada_in_flight, 0);
9304     atomic_set(&dev->dev_stats_ccnt, 0);
9305     + btrfs_device_data_ordered_init(dev);
9306     INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
9307     INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
9308    
9309     diff --git a/fs/direct-io.c b/fs/direct-io.c
9310     index 602e8441bc0f..01171d8a6ee9 100644
9311     --- a/fs/direct-io.c
9312     +++ b/fs/direct-io.c
9313     @@ -472,8 +472,8 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
9314     dio->io_error = -EIO;
9315    
9316     if (dio->is_async && dio->rw == READ && dio->should_dirty) {
9317     - bio_check_pages_dirty(bio); /* transfers ownership */
9318     err = bio->bi_error;
9319     + bio_check_pages_dirty(bio); /* transfers ownership */
9320     } else {
9321     bio_for_each_segment_all(bvec, bio, i) {
9322     struct page *page = bvec->bv_page;
9323     diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
9324     index 90001da9abfd..66842e55c48c 100644
9325     --- a/fs/efivarfs/file.c
9326     +++ b/fs/efivarfs/file.c
9327     @@ -10,6 +10,7 @@
9328     #include <linux/efi.h>
9329     #include <linux/fs.h>
9330     #include <linux/slab.h>
9331     +#include <linux/mount.h>
9332    
9333     #include "internal.h"
9334    
9335     @@ -103,9 +104,78 @@ out_free:
9336     return size;
9337     }
9338    
9339     +static int
9340     +efivarfs_ioc_getxflags(struct file *file, void __user *arg)
9341     +{
9342     + struct inode *inode = file->f_mapping->host;
9343     + unsigned int i_flags;
9344     + unsigned int flags = 0;
9345     +
9346     + i_flags = inode->i_flags;
9347     + if (i_flags & S_IMMUTABLE)
9348     + flags |= FS_IMMUTABLE_FL;
9349     +
9350     + if (copy_to_user(arg, &flags, sizeof(flags)))
9351     + return -EFAULT;
9352     + return 0;
9353     +}
9354     +
9355     +static int
9356     +efivarfs_ioc_setxflags(struct file *file, void __user *arg)
9357     +{
9358     + struct inode *inode = file->f_mapping->host;
9359     + unsigned int flags;
9360     + unsigned int i_flags = 0;
9361     + int error;
9362     +
9363     + if (!inode_owner_or_capable(inode))
9364     + return -EACCES;
9365     +
9366     + if (copy_from_user(&flags, arg, sizeof(flags)))
9367     + return -EFAULT;
9368     +
9369     + if (flags & ~FS_IMMUTABLE_FL)
9370     + return -EOPNOTSUPP;
9371     +
9372     + if (!capable(CAP_LINUX_IMMUTABLE))
9373     + return -EPERM;
9374     +
9375     + if (flags & FS_IMMUTABLE_FL)
9376     + i_flags |= S_IMMUTABLE;
9377     +
9378     +
9379     + error = mnt_want_write_file(file);
9380     + if (error)
9381     + return error;
9382     +
9383     + mutex_lock(&inode->i_mutex);
9384     + inode_set_flags(inode, i_flags, S_IMMUTABLE);
9385     + mutex_unlock(&inode->i_mutex);
9386     +
9387     + mnt_drop_write_file(file);
9388     +
9389     + return 0;
9390     +}
9391     +
9392     +long
9393     +efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p)
9394     +{
9395     + void __user *arg = (void __user *)p;
9396     +
9397     + switch (cmd) {
9398     + case FS_IOC_GETFLAGS:
9399     + return efivarfs_ioc_getxflags(file, arg);
9400     + case FS_IOC_SETFLAGS:
9401     + return efivarfs_ioc_setxflags(file, arg);
9402     + }
9403     +
9404     + return -ENOTTY;
9405     +}
9406     +
9407     const struct file_operations efivarfs_file_operations = {
9408     .open = simple_open,
9409     .read = efivarfs_file_read,
9410     .write = efivarfs_file_write,
9411     .llseek = no_llseek,
9412     + .unlocked_ioctl = efivarfs_file_ioctl,
9413     };
9414     diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
9415     index 3381b9da9ee6..e2ab6d0497f2 100644
9416     --- a/fs/efivarfs/inode.c
9417     +++ b/fs/efivarfs/inode.c
9418     @@ -15,7 +15,8 @@
9419     #include "internal.h"
9420    
9421     struct inode *efivarfs_get_inode(struct super_block *sb,
9422     - const struct inode *dir, int mode, dev_t dev)
9423     + const struct inode *dir, int mode,
9424     + dev_t dev, bool is_removable)
9425     {
9426     struct inode *inode = new_inode(sb);
9427    
9428     @@ -23,6 +24,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb,
9429     inode->i_ino = get_next_ino();
9430     inode->i_mode = mode;
9431     inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
9432     + inode->i_flags = is_removable ? 0 : S_IMMUTABLE;
9433     switch (mode & S_IFMT) {
9434     case S_IFREG:
9435     inode->i_fop = &efivarfs_file_operations;
9436     @@ -102,22 +104,17 @@ static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid)
9437     static int efivarfs_create(struct inode *dir, struct dentry *dentry,
9438     umode_t mode, bool excl)
9439     {
9440     - struct inode *inode;
9441     + struct inode *inode = NULL;
9442     struct efivar_entry *var;
9443     int namelen, i = 0, err = 0;
9444     + bool is_removable = false;
9445    
9446     if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
9447     return -EINVAL;
9448    
9449     - inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
9450     - if (!inode)
9451     - return -ENOMEM;
9452     -
9453     var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
9454     - if (!var) {
9455     - err = -ENOMEM;
9456     - goto out;
9457     - }
9458     + if (!var)
9459     + return -ENOMEM;
9460    
9461     /* length of the variable name itself: remove GUID and separator */
9462     namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
9463     @@ -125,6 +122,16 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
9464     efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1,
9465     &var->var.VendorGuid);
9466    
9467     + if (efivar_variable_is_removable(var->var.VendorGuid,
9468     + dentry->d_name.name, namelen))
9469     + is_removable = true;
9470     +
9471     + inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable);
9472     + if (!inode) {
9473     + err = -ENOMEM;
9474     + goto out;
9475     + }
9476     +
9477     for (i = 0; i < namelen; i++)
9478     var->var.VariableName[i] = dentry->d_name.name[i];
9479    
9480     @@ -138,7 +145,8 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry,
9481     out:
9482     if (err) {
9483     kfree(var);
9484     - iput(inode);
9485     + if (inode)
9486     + iput(inode);
9487     }
9488     return err;
9489     }
9490     diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h
9491     index b5ff16addb7c..b4505188e799 100644
9492     --- a/fs/efivarfs/internal.h
9493     +++ b/fs/efivarfs/internal.h
9494     @@ -15,7 +15,8 @@ extern const struct file_operations efivarfs_file_operations;
9495     extern const struct inode_operations efivarfs_dir_inode_operations;
9496     extern bool efivarfs_valid_name(const char *str, int len);
9497     extern struct inode *efivarfs_get_inode(struct super_block *sb,
9498     - const struct inode *dir, int mode, dev_t dev);
9499     + const struct inode *dir, int mode, dev_t dev,
9500     + bool is_removable);
9501    
9502     extern struct list_head efivarfs_list;
9503    
9504     diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
9505     index 86a2121828c3..abb244b06024 100644
9506     --- a/fs/efivarfs/super.c
9507     +++ b/fs/efivarfs/super.c
9508     @@ -118,8 +118,9 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
9509     struct dentry *dentry, *root = sb->s_root;
9510     unsigned long size = 0;
9511     char *name;
9512     - int len, i;
9513     + int len;
9514     int err = -ENOMEM;
9515     + bool is_removable = false;
9516    
9517     entry = kzalloc(sizeof(*entry), GFP_KERNEL);
9518     if (!entry)
9519     @@ -128,15 +129,17 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
9520     memcpy(entry->var.VariableName, name16, name_size);
9521     memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
9522    
9523     - len = ucs2_strlen(entry->var.VariableName);
9524     + len = ucs2_utf8size(entry->var.VariableName);
9525    
9526     /* name, plus '-', plus GUID, plus NUL*/
9527     name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL);
9528     if (!name)
9529     goto fail;
9530    
9531     - for (i = 0; i < len; i++)
9532     - name[i] = entry->var.VariableName[i] & 0xFF;
9533     + ucs2_as_utf8(name, entry->var.VariableName, len);
9534     +
9535     + if (efivar_variable_is_removable(entry->var.VendorGuid, name, len))
9536     + is_removable = true;
9537    
9538     name[len] = '-';
9539    
9540     @@ -144,7 +147,8 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
9541    
9542     name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
9543    
9544     - inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0);
9545     + inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0,
9546     + is_removable);
9547     if (!inode)
9548     goto fail_name;
9549    
9550     @@ -200,7 +204,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
9551     sb->s_d_op = &efivarfs_d_ops;
9552     sb->s_time_gran = 1;
9553    
9554     - inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0);
9555     + inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true);
9556     if (!inode)
9557     return -ENOMEM;
9558     inode->i_op = &efivarfs_dir_inode_operations;
9559     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
9560     index ea433a7f4bca..06bda0361e7c 100644
9561     --- a/fs/ext4/inode.c
9562     +++ b/fs/ext4/inode.c
9563     @@ -657,6 +657,34 @@ has_zeroout:
9564     return retval;
9565     }
9566    
9567     +/*
9568     + * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
9569     + * we have to be careful as someone else may be manipulating b_state as well.
9570     + */
9571     +static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
9572     +{
9573     + unsigned long old_state;
9574     + unsigned long new_state;
9575     +
9576     + flags &= EXT4_MAP_FLAGS;
9577     +
9578     + /* Dummy buffer_head? Set non-atomically. */
9579     + if (!bh->b_page) {
9580     + bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
9581     + return;
9582     + }
9583     + /*
9584     + * Someone else may be modifying b_state. Be careful! This is ugly but
9585     + * once we get rid of using bh as a container for mapping information
9586     + * to pass to / from get_block functions, this can go away.
9587     + */
9588     + do {
9589     + old_state = READ_ONCE(bh->b_state);
9590     + new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
9591     + } while (unlikely(
9592     + cmpxchg(&bh->b_state, old_state, new_state) != old_state));
9593     +}
9594     +
9595     /* Maximum number of blocks we map for direct IO at once. */
9596     #define DIO_MAX_BLOCKS 4096
9597    
9598     @@ -693,7 +721,7 @@ static int _ext4_get_block(struct inode *inode, sector_t iblock,
9599     ext4_io_end_t *io_end = ext4_inode_aio(inode);
9600    
9601     map_bh(bh, inode->i_sb, map.m_pblk);
9602     - bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
9603     + ext4_update_bh_state(bh, map.m_flags);
9604     if (IS_DAX(inode) && buffer_unwritten(bh)) {
9605     /*
9606     * dgc: I suspect unwritten conversion on ext4+DAX is
9607     @@ -1669,7 +1697,7 @@ int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
9608     return ret;
9609    
9610     map_bh(bh, inode->i_sb, map.m_pblk);
9611     - bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
9612     + ext4_update_bh_state(bh, map.m_flags);
9613    
9614     if (buffer_unwritten(bh)) {
9615     /* A delayed write to unwritten bh should be marked
9616     diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
9617     index 023f6a1f23cd..e5232bbcbe3d 100644
9618     --- a/fs/fs-writeback.c
9619     +++ b/fs/fs-writeback.c
9620     @@ -317,6 +317,7 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
9621     struct inode_switch_wbs_context *isw =
9622     container_of(work, struct inode_switch_wbs_context, work);
9623     struct inode *inode = isw->inode;
9624     + struct super_block *sb = inode->i_sb;
9625     struct address_space *mapping = inode->i_mapping;
9626     struct bdi_writeback *old_wb = inode->i_wb;
9627     struct bdi_writeback *new_wb = isw->new_wb;
9628     @@ -423,6 +424,7 @@ skip_switch:
9629     wb_put(new_wb);
9630    
9631     iput(inode);
9632     + deactivate_super(sb);
9633     kfree(isw);
9634     }
9635    
9636     @@ -469,11 +471,14 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
9637    
9638     /* while holding I_WB_SWITCH, no one else can update the association */
9639     spin_lock(&inode->i_lock);
9640     +
9641     if (inode->i_state & (I_WB_SWITCH | I_FREEING) ||
9642     - inode_to_wb(inode) == isw->new_wb) {
9643     - spin_unlock(&inode->i_lock);
9644     - goto out_free;
9645     - }
9646     + inode_to_wb(inode) == isw->new_wb)
9647     + goto out_unlock;
9648     +
9649     + if (!atomic_inc_not_zero(&inode->i_sb->s_active))
9650     + goto out_unlock;
9651     +
9652     inode->i_state |= I_WB_SWITCH;
9653     spin_unlock(&inode->i_lock);
9654    
9655     @@ -489,6 +494,8 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
9656     call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
9657     return;
9658    
9659     +out_unlock:
9660     + spin_unlock(&inode->i_lock);
9661     out_free:
9662     if (isw->new_wb)
9663     wb_put(isw->new_wb);
9664     diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
9665     index 2ac99db3750e..5a7b3229b956 100644
9666     --- a/fs/hostfs/hostfs_kern.c
9667     +++ b/fs/hostfs/hostfs_kern.c
9668     @@ -730,15 +730,13 @@ static int hostfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
9669    
9670     init_special_inode(inode, mode, dev);
9671     err = do_mknod(name, mode, MAJOR(dev), MINOR(dev));
9672     - if (!err)
9673     + if (err)
9674     goto out_free;
9675    
9676     err = read_name(inode, name);
9677     __putname(name);
9678     if (err)
9679     goto out_put;
9680     - if (err)
9681     - goto out_put;
9682    
9683     d_instantiate(dentry, inode);
9684     return 0;
9685     diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
9686     index ae4d5a1fa4c9..bffb908acbd4 100644
9687     --- a/fs/hpfs/namei.c
9688     +++ b/fs/hpfs/namei.c
9689     @@ -375,12 +375,11 @@ static int hpfs_unlink(struct inode *dir, struct dentry *dentry)
9690     struct inode *inode = d_inode(dentry);
9691     dnode_secno dno;
9692     int r;
9693     - int rep = 0;
9694     int err;
9695    
9696     hpfs_lock(dir->i_sb);
9697     hpfs_adjust_length(name, &len);
9698     -again:
9699     +
9700     err = -ENOENT;
9701     de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
9702     if (!de)
9703     @@ -400,33 +399,9 @@ again:
9704     hpfs_error(dir->i_sb, "there was error when removing dirent");
9705     err = -EFSERROR;
9706     break;
9707     - case 2: /* no space for deleting, try to truncate file */
9708     -
9709     + case 2: /* no space for deleting */
9710     err = -ENOSPC;
9711     - if (rep++)
9712     - break;
9713     -
9714     - dentry_unhash(dentry);
9715     - if (!d_unhashed(dentry)) {
9716     - hpfs_unlock(dir->i_sb);
9717     - return -ENOSPC;
9718     - }
9719     - if (generic_permission(inode, MAY_WRITE) ||
9720     - !S_ISREG(inode->i_mode) ||
9721     - get_write_access(inode)) {
9722     - d_rehash(dentry);
9723     - } else {
9724     - struct iattr newattrs;
9725     - /*pr_info("truncating file before delete.\n");*/
9726     - newattrs.ia_size = 0;
9727     - newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
9728     - err = notify_change(dentry, &newattrs, NULL);
9729     - put_write_access(inode);
9730     - if (!err)
9731     - goto again;
9732     - }
9733     - hpfs_unlock(dir->i_sb);
9734     - return -ENOSPC;
9735     + break;
9736     default:
9737     drop_nlink(inode);
9738     err = 0;
9739     diff --git a/fs/locks.c b/fs/locks.c
9740     index 0d2b3267e2a3..6333263b7bc8 100644
9741     --- a/fs/locks.c
9742     +++ b/fs/locks.c
9743     @@ -2182,7 +2182,6 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd,
9744     goto out;
9745     }
9746    
9747     -again:
9748     error = flock_to_posix_lock(filp, file_lock, &flock);
9749     if (error)
9750     goto out;
9751     @@ -2224,19 +2223,22 @@ again:
9752     * Attempt to detect a close/fcntl race and recover by
9753     * releasing the lock that was just acquired.
9754     */
9755     - /*
9756     - * we need that spin_lock here - it prevents reordering between
9757     - * update of i_flctx->flc_posix and check for it done in close().
9758     - * rcu_read_lock() wouldn't do.
9759     - */
9760     - spin_lock(&current->files->file_lock);
9761     - f = fcheck(fd);
9762     - spin_unlock(&current->files->file_lock);
9763     - if (!error && f != filp && flock.l_type != F_UNLCK) {
9764     - flock.l_type = F_UNLCK;
9765     - goto again;
9766     + if (!error && file_lock->fl_type != F_UNLCK) {
9767     + /*
9768     + * We need that spin_lock here - it prevents reordering between
9769     + * update of i_flctx->flc_posix and check for it done in
9770     + * close(). rcu_read_lock() wouldn't do.
9771     + */
9772     + spin_lock(&current->files->file_lock);
9773     + f = fcheck(fd);
9774     + spin_unlock(&current->files->file_lock);
9775     + if (f != filp) {
9776     + file_lock->fl_type = F_UNLCK;
9777     + error = do_lock_file_wait(filp, cmd, file_lock);
9778     + WARN_ON_ONCE(error);
9779     + error = -EBADF;
9780     + }
9781     }
9782     -
9783     out:
9784     locks_free_lock(file_lock);
9785     return error;
9786     @@ -2322,7 +2324,6 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
9787     goto out;
9788     }
9789    
9790     -again:
9791     error = flock64_to_posix_lock(filp, file_lock, &flock);
9792     if (error)
9793     goto out;
9794     @@ -2364,14 +2365,22 @@ again:
9795     * Attempt to detect a close/fcntl race and recover by
9796     * releasing the lock that was just acquired.
9797     */
9798     - spin_lock(&current->files->file_lock);
9799     - f = fcheck(fd);
9800     - spin_unlock(&current->files->file_lock);
9801     - if (!error && f != filp && flock.l_type != F_UNLCK) {
9802     - flock.l_type = F_UNLCK;
9803     - goto again;
9804     + if (!error && file_lock->fl_type != F_UNLCK) {
9805     + /*
9806     + * We need that spin_lock here - it prevents reordering between
9807     + * update of i_flctx->flc_posix and check for it done in
9808     + * close(). rcu_read_lock() wouldn't do.
9809     + */
9810     + spin_lock(&current->files->file_lock);
9811     + f = fcheck(fd);
9812     + spin_unlock(&current->files->file_lock);
9813     + if (f != filp) {
9814     + file_lock->fl_type = F_UNLCK;
9815     + error = do_lock_file_wait(filp, cmd, file_lock);
9816     + WARN_ON_ONCE(error);
9817     + error = -EBADF;
9818     + }
9819     }
9820     -
9821     out:
9822     locks_free_lock(file_lock);
9823     return error;
9824     diff --git a/fs/namei.c b/fs/namei.c
9825     index 0c3974cd3ecd..d8ee4da93650 100644
9826     --- a/fs/namei.c
9827     +++ b/fs/namei.c
9828     @@ -1711,6 +1711,11 @@ static inline int should_follow_link(struct nameidata *nd, struct path *link,
9829     return 0;
9830     if (!follow)
9831     return 0;
9832     + /* make sure that d_is_symlink above matches inode */
9833     + if (nd->flags & LOOKUP_RCU) {
9834     + if (read_seqcount_retry(&link->dentry->d_seq, seq))
9835     + return -ECHILD;
9836     + }
9837     return pick_link(nd, link, inode, seq);
9838     }
9839    
9840     @@ -1742,11 +1747,11 @@ static int walk_component(struct nameidata *nd, int flags)
9841     if (err < 0)
9842     return err;
9843    
9844     - inode = d_backing_inode(path.dentry);
9845     seq = 0; /* we are already out of RCU mode */
9846     err = -ENOENT;
9847     if (d_is_negative(path.dentry))
9848     goto out_path_put;
9849     + inode = d_backing_inode(path.dentry);
9850     }
9851    
9852     if (flags & WALK_PUT)
9853     @@ -3130,12 +3135,12 @@ retry_lookup:
9854     return error;
9855    
9856     BUG_ON(nd->flags & LOOKUP_RCU);
9857     - inode = d_backing_inode(path.dentry);
9858     seq = 0; /* out of RCU mode, so the value doesn't matter */
9859     if (unlikely(d_is_negative(path.dentry))) {
9860     path_to_nameidata(&path, nd);
9861     return -ENOENT;
9862     }
9863     + inode = d_backing_inode(path.dentry);
9864     finish_lookup:
9865     if (nd->depth)
9866     put_link(nd);
9867     @@ -3144,11 +3149,6 @@ finish_lookup:
9868     if (unlikely(error))
9869     return error;
9870    
9871     - if (unlikely(d_is_symlink(path.dentry)) && !(open_flag & O_PATH)) {
9872     - path_to_nameidata(&path, nd);
9873     - return -ELOOP;
9874     - }
9875     -
9876     if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) {
9877     path_to_nameidata(&path, nd);
9878     } else {
9879     @@ -3167,6 +3167,10 @@ finish_open:
9880     return error;
9881     }
9882     audit_inode(nd->name, nd->path.dentry, 0);
9883     + if (unlikely(d_is_symlink(nd->path.dentry)) && !(open_flag & O_PATH)) {
9884     + error = -ELOOP;
9885     + goto out;
9886     + }
9887     error = -EISDIR;
9888     if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
9889     goto out;
9890     @@ -3210,6 +3214,10 @@ opened:
9891     goto exit_fput;
9892     }
9893     out:
9894     + if (unlikely(error > 0)) {
9895     + WARN_ON(1);
9896     + error = -EINVAL;
9897     + }
9898     if (got_write)
9899     mnt_drop_write(nd->path.mnt);
9900     path_put(&save_parent);
9901     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
9902     index f496ed721d27..98a44157353a 100644
9903     --- a/fs/nfs/nfs4proc.c
9904     +++ b/fs/nfs/nfs4proc.c
9905     @@ -2461,9 +2461,9 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
9906     dentry = d_add_unique(dentry, igrab(state->inode));
9907     if (dentry == NULL) {
9908     dentry = opendata->dentry;
9909     - } else if (dentry != ctx->dentry) {
9910     + } else {
9911     dput(ctx->dentry);
9912     - ctx->dentry = dget(dentry);
9913     + ctx->dentry = dentry;
9914     }
9915     nfs_set_verifier(dentry,
9916     nfs_save_change_attribute(d_inode(opendata->dir)));
9917     diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
9918     index 7f604727f487..e6795c7c76a8 100644
9919     --- a/fs/ocfs2/aops.c
9920     +++ b/fs/ocfs2/aops.c
9921     @@ -956,6 +956,7 @@ clean_orphan:
9922     tmp_ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh,
9923     update_isize, end);
9924     if (tmp_ret < 0) {
9925     + ocfs2_inode_unlock(inode, 1);
9926     ret = tmp_ret;
9927     mlog_errno(ret);
9928     brelse(di_bh);
9929     diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
9930     index 0419485891f2..0f1c6f315cdc 100644
9931     --- a/include/asm-generic/cputime_nsecs.h
9932     +++ b/include/asm-generic/cputime_nsecs.h
9933     @@ -75,7 +75,7 @@ typedef u64 __nocast cputime64_t;
9934     */
9935     static inline cputime_t timespec_to_cputime(const struct timespec *val)
9936     {
9937     - u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
9938     + u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
9939     return (__force cputime_t) ret;
9940     }
9941     static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
9942     @@ -91,7 +91,8 @@ static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
9943     */
9944     static inline cputime_t timeval_to_cputime(const struct timeval *val)
9945     {
9946     - u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
9947     + u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
9948     + val->tv_usec * NSEC_PER_USEC;
9949     return (__force cputime_t) ret;
9950     }
9951     static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
9952     diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
9953     index 7bfb063029d8..461a0558bca4 100644
9954     --- a/include/drm/drm_cache.h
9955     +++ b/include/drm/drm_cache.h
9956     @@ -35,4 +35,13 @@
9957    
9958     void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
9959    
9960     +static inline bool drm_arch_can_wc_memory(void)
9961     +{
9962     +#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
9963     + return false;
9964     +#else
9965     + return true;
9966     +#endif
9967     +}
9968     +
9969     #endif
9970     diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
9971     index 5340099741ae..f356f9716474 100644
9972     --- a/include/drm/drm_dp_mst_helper.h
9973     +++ b/include/drm/drm_dp_mst_helper.h
9974     @@ -44,8 +44,6 @@ struct drm_dp_vcpi {
9975     /**
9976     * struct drm_dp_mst_port - MST port
9977     * @kref: reference count for this port.
9978     - * @guid_valid: for DP 1.2 devices if we have validated the GUID.
9979     - * @guid: guid for DP 1.2 device on this port.
9980     * @port_num: port number
9981     * @input: if this port is an input port.
9982     * @mcs: message capability status - DP 1.2 spec.
9983     @@ -70,10 +68,6 @@ struct drm_dp_vcpi {
9984     struct drm_dp_mst_port {
9985     struct kref kref;
9986    
9987     - /* if dpcd 1.2 device is on this port - its GUID info */
9988     - bool guid_valid;
9989     - u8 guid[16];
9990     -
9991     u8 port_num;
9992     bool input;
9993     bool mcs;
9994     @@ -109,10 +103,12 @@ struct drm_dp_mst_port {
9995     * @tx_slots: transmission slots for this device.
9996     * @last_seqno: last sequence number used to talk to this.
9997     * @link_address_sent: if a link address message has been sent to this device yet.
9998     + * @guid: guid for DP 1.2 branch device. port under this branch can be
9999     + * identified by port #.
10000     *
10001     * This structure represents an MST branch device, there is one
10002     - * primary branch device at the root, along with any others connected
10003     - * to downstream ports
10004     + * primary branch device at the root, along with any other branches connected
10005     + * to downstream port of parent branches.
10006     */
10007     struct drm_dp_mst_branch {
10008     struct kref kref;
10009     @@ -131,6 +127,9 @@ struct drm_dp_mst_branch {
10010     struct drm_dp_sideband_msg_tx *tx_slots[2];
10011     int last_seqno;
10012     bool link_address_sent;
10013     +
10014     + /* global unique identifier to identify branch devices */
10015     + u8 guid[16];
10016     };
10017    
10018    
10019     @@ -405,11 +404,9 @@ struct drm_dp_payload {
10020     * @conn_base_id: DRM connector ID this mgr is connected to.
10021     * @down_rep_recv: msg receiver state for down replies.
10022     * @up_req_recv: msg receiver state for up requests.
10023     - * @lock: protects mst state, primary, guid, dpcd.
10024     + * @lock: protects mst state, primary, dpcd.
10025     * @mst_state: if this manager is enabled for an MST capable port.
10026     * @mst_primary: pointer to the primary branch device.
10027     - * @guid_valid: GUID valid for the primary branch device.
10028     - * @guid: GUID for primary port.
10029     * @dpcd: cache of DPCD for primary port.
10030     * @pbn_div: PBN to slots divisor.
10031     *
10032     @@ -431,13 +428,11 @@ struct drm_dp_mst_topology_mgr {
10033     struct drm_dp_sideband_msg_rx up_req_recv;
10034    
10035     /* pointer to info about the initial MST device */
10036     - struct mutex lock; /* protects mst_state + primary + guid + dpcd */
10037     + struct mutex lock; /* protects mst_state + primary + dpcd */
10038    
10039     bool mst_state;
10040     struct drm_dp_mst_branch *mst_primary;
10041     - /* primary MST device GUID */
10042     - bool guid_valid;
10043     - u8 guid[16];
10044     +
10045     u8 dpcd[DP_RECEIVER_CAP_SIZE];
10046     u8 sink_count;
10047     int pbn_div;
10048     @@ -450,9 +445,7 @@ struct drm_dp_mst_topology_mgr {
10049     the mstb tx_slots and txmsg->state once they are queued */
10050     struct mutex qlock;
10051     struct list_head tx_msg_downq;
10052     - struct list_head tx_msg_upq;
10053     bool tx_down_in_progress;
10054     - bool tx_up_in_progress;
10055    
10056     /* payload info + lock for it */
10057     struct mutex payload_lock;
10058     diff --git a/include/drm/drm_fixed.h b/include/drm/drm_fixed.h
10059     index d639049a613d..553210c02ee0 100644
10060     --- a/include/drm/drm_fixed.h
10061     +++ b/include/drm/drm_fixed.h
10062     @@ -73,18 +73,28 @@ static inline u32 dfixed_div(fixed20_12 A, fixed20_12 B)
10063     #define DRM_FIXED_ONE (1ULL << DRM_FIXED_POINT)
10064     #define DRM_FIXED_DECIMAL_MASK (DRM_FIXED_ONE - 1)
10065     #define DRM_FIXED_DIGITS_MASK (~DRM_FIXED_DECIMAL_MASK)
10066     +#define DRM_FIXED_EPSILON 1LL
10067     +#define DRM_FIXED_ALMOST_ONE (DRM_FIXED_ONE - DRM_FIXED_EPSILON)
10068    
10069     static inline s64 drm_int2fixp(int a)
10070     {
10071     return ((s64)a) << DRM_FIXED_POINT;
10072     }
10073    
10074     -static inline int drm_fixp2int(int64_t a)
10075     +static inline int drm_fixp2int(s64 a)
10076     {
10077     return ((s64)a) >> DRM_FIXED_POINT;
10078     }
10079    
10080     -static inline unsigned drm_fixp_msbset(int64_t a)
10081     +static inline int drm_fixp2int_ceil(s64 a)
10082     +{
10083     + if (a > 0)
10084     + return drm_fixp2int(a + DRM_FIXED_ALMOST_ONE);
10085     + else
10086     + return drm_fixp2int(a - DRM_FIXED_ALMOST_ONE);
10087     +}
10088     +
10089     +static inline unsigned drm_fixp_msbset(s64 a)
10090     {
10091     unsigned shift, sign = (a >> 63) & 1;
10092    
10093     @@ -136,6 +146,45 @@ static inline s64 drm_fixp_div(s64 a, s64 b)
10094     return result;
10095     }
10096    
10097     +static inline s64 drm_fixp_from_fraction(s64 a, s64 b)
10098     +{
10099     + s64 res;
10100     + bool a_neg = a < 0;
10101     + bool b_neg = b < 0;
10102     + u64 a_abs = a_neg ? -a : a;
10103     + u64 b_abs = b_neg ? -b : b;
10104     + u64 rem;
10105     +
10106     + /* determine integer part */
10107     + u64 res_abs = div64_u64_rem(a_abs, b_abs, &rem);
10108     +
10109     + /* determine fractional part */
10110     + {
10111     + u32 i = DRM_FIXED_POINT;
10112     +
10113     + do {
10114     + rem <<= 1;
10115     + res_abs <<= 1;
10116     + if (rem >= b_abs) {
10117     + res_abs |= 1;
10118     + rem -= b_abs;
10119     + }
10120     + } while (--i != 0);
10121     + }
10122     +
10123     + /* round up LSB */
10124     + {
10125     + u64 summand = (rem << 1) >= b_abs;
10126     +
10127     + res_abs += summand;
10128     + }
10129     +
10130     + res = (s64) res_abs;
10131     + if (a_neg ^ b_neg)
10132     + res = -res;
10133     + return res;
10134     +}
10135     +
10136     static inline s64 drm_fixp_exp(s64 x)
10137     {
10138     s64 tolerance = div64_s64(DRM_FIXED_ONE, 1000000);
10139     diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
10140     index 71b1d6cdcb5d..8dbd7879fdc6 100644
10141     --- a/include/linux/ceph/messenger.h
10142     +++ b/include/linux/ceph/messenger.h
10143     @@ -220,6 +220,7 @@ struct ceph_connection {
10144     struct ceph_entity_addr actual_peer_addr;
10145    
10146     /* message out temps */
10147     + struct ceph_msg_header out_hdr;
10148     struct ceph_msg *out_msg; /* sending message (== tail of
10149     out_sent) */
10150     bool out_msg_done;
10151     @@ -229,7 +230,6 @@ struct ceph_connection {
10152     int out_kvec_left; /* kvec's left in out_kvec */
10153     int out_skip; /* skip this many bytes */
10154     int out_kvec_bytes; /* total bytes left */
10155     - bool out_kvec_is_msg; /* kvec refers to out_msg */
10156     int out_more; /* there is more data after the kvecs */
10157     __le64 out_temp_ack; /* for writing an ack */
10158     struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
10159     diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
10160     index 06b77f9dd3f2..8e30faeab183 100644
10161     --- a/include/linux/cgroup-defs.h
10162     +++ b/include/linux/cgroup-defs.h
10163     @@ -133,6 +133,12 @@ struct cgroup_subsys_state {
10164     */
10165     u64 serial_nr;
10166    
10167     + /*
10168     + * Incremented by online self and children. Used to guarantee that
10169     + * parents are not offlined before their children.
10170     + */
10171     + atomic_t online_cnt;
10172     +
10173     /* percpu_ref killing and RCU release */
10174     struct rcu_head rcu_head;
10175     struct work_struct destroy_work;
10176     diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
10177     index 85a868ccb493..fea160ee5803 100644
10178     --- a/include/linux/cpuset.h
10179     +++ b/include/linux/cpuset.h
10180     @@ -137,6 +137,8 @@ static inline void set_mems_allowed(nodemask_t nodemask)
10181     task_unlock(current);
10182     }
10183    
10184     +extern void cpuset_post_attach_flush(void);
10185     +
10186     #else /* !CONFIG_CPUSETS */
10187    
10188     static inline bool cpusets_enabled(void) { return false; }
10189     @@ -243,6 +245,10 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
10190     return false;
10191     }
10192    
10193     +static inline void cpuset_post_attach_flush(void)
10194     +{
10195     +}
10196     +
10197     #endif /* !CONFIG_CPUSETS */
10198    
10199     #endif /* _LINUX_CPUSET_H */
10200     diff --git a/include/linux/efi.h b/include/linux/efi.h
10201     index 569b5a866bb1..47be3ad7d3e5 100644
10202     --- a/include/linux/efi.h
10203     +++ b/include/linux/efi.h
10204     @@ -1199,7 +1199,10 @@ int efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
10205     struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
10206     struct list_head *head, bool remove);
10207    
10208     -bool efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len);
10209     +bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
10210     + unsigned long data_size);
10211     +bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
10212     + size_t len);
10213    
10214     extern struct work_struct efivar_work;
10215     void efivar_run_worker(void);
10216     diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
10217     index 8fdc17b84739..ae6a711dcd1d 100644
10218     --- a/include/linux/hyperv.h
10219     +++ b/include/linux/hyperv.h
10220     @@ -630,6 +630,11 @@ struct hv_input_signal_event_buffer {
10221     struct hv_input_signal_event event;
10222     };
10223    
10224     +enum hv_signal_policy {
10225     + HV_SIGNAL_POLICY_DEFAULT = 0,
10226     + HV_SIGNAL_POLICY_EXPLICIT,
10227     +};
10228     +
10229     struct vmbus_channel {
10230     /* Unique channel id */
10231     int id;
10232     @@ -757,8 +762,21 @@ struct vmbus_channel {
10233     * link up channels based on their CPU affinity.
10234     */
10235     struct list_head percpu_list;
10236     + /*
10237     + * Host signaling policy: The default policy will be
10238     + * based on the ring buffer state. We will also support
10239     + * a policy where the client driver can have explicit
10240     + * signaling control.
10241     + */
10242     + enum hv_signal_policy signal_policy;
10243     };
10244    
10245     +static inline void set_channel_signal_state(struct vmbus_channel *c,
10246     + enum hv_signal_policy policy)
10247     +{
10248     + c->signal_policy = policy;
10249     +}
10250     +
10251     static inline void set_channel_read_state(struct vmbus_channel *c, bool state)
10252     {
10253     c->batched_reading = state;
10254     diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
10255     index c0e961474a52..5455b660bd88 100644
10256     --- a/include/linux/nfs_fs.h
10257     +++ b/include/linux/nfs_fs.h
10258     @@ -544,9 +544,7 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
10259    
10260     static inline loff_t nfs_size_to_loff_t(__u64 size)
10261     {
10262     - if (size > (__u64) OFFSET_MAX - 1)
10263     - return OFFSET_MAX - 1;
10264     - return (loff_t) size;
10265     + return min_t(u64, size, OFFSET_MAX);
10266     }
10267    
10268     static inline ino_t
10269     diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
10270     index 50777b5b1e4c..92d112aeec68 100644
10271     --- a/include/linux/shmem_fs.h
10272     +++ b/include/linux/shmem_fs.h
10273     @@ -15,10 +15,7 @@ struct shmem_inode_info {
10274     unsigned int seals; /* shmem seals */
10275     unsigned long flags;
10276     unsigned long alloced; /* data pages alloced to file */
10277     - union {
10278     - unsigned long swapped; /* subtotal assigned to swap */
10279     - char *symlink; /* unswappable short symlink */
10280     - };
10281     + unsigned long swapped; /* subtotal assigned to swap */
10282     struct shared_policy policy; /* NUMA memory alloc policy */
10283     struct list_head swaplist; /* chain of maybes on swap */
10284     struct simple_xattrs xattrs; /* list of xattrs */
10285     diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
10286     index 9147f9f34cbe..75f136a22a5e 100644
10287     --- a/include/linux/skbuff.h
10288     +++ b/include/linux/skbuff.h
10289     @@ -219,6 +219,7 @@ struct sk_buff;
10290     #else
10291     #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
10292     #endif
10293     +extern int sysctl_max_skb_frags;
10294    
10295     typedef struct skb_frag_struct skb_frag_t;
10296    
10297     diff --git a/include/linux/thermal.h b/include/linux/thermal.h
10298     index 613c29bd6baf..e13a1ace50e9 100644
10299     --- a/include/linux/thermal.h
10300     +++ b/include/linux/thermal.h
10301     @@ -43,6 +43,9 @@
10302     /* Default weight of a bound cooling device */
10303     #define THERMAL_WEIGHT_DEFAULT 0
10304    
10305     +/* use value, which < 0K, to indicate an invalid/uninitialized temperature */
10306     +#define THERMAL_TEMP_INVALID -274000
10307     +
10308     /* Unit conversion macros */
10309     #define DECI_KELVIN_TO_CELSIUS(t) ({ \
10310     long _t = (t); \
10311     @@ -167,6 +170,7 @@ struct thermal_attr {
10312     * @forced_passive: If > 0, temperature at which to switch on all ACPI
10313     * processor cooling devices. Currently only used by the
10314     * step-wise governor.
10315     + * @need_update: if equals 1, thermal_zone_device_update needs to be invoked.
10316     * @ops: operations this &thermal_zone_device supports
10317     * @tzp: thermal zone parameters
10318     * @governor: pointer to the governor for this thermal zone
10319     @@ -194,6 +198,7 @@ struct thermal_zone_device {
10320     int emul_temperature;
10321     int passive;
10322     unsigned int forced_passive;
10323     + atomic_t need_update;
10324     struct thermal_zone_device_ops *ops;
10325     struct thermal_zone_params *tzp;
10326     struct thermal_governor *governor;
10327     diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h
10328     index cbb20afdbc01..bb679b48f408 100644
10329     --- a/include/linux/ucs2_string.h
10330     +++ b/include/linux/ucs2_string.h
10331     @@ -11,4 +11,8 @@ unsigned long ucs2_strlen(const ucs2_char_t *s);
10332     unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
10333     int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
10334    
10335     +unsigned long ucs2_utf8size(const ucs2_char_t *src);
10336     +unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src,
10337     + unsigned long maxlength);
10338     +
10339     #endif /* _LINUX_UCS2_STRING_H_ */
10340     diff --git a/include/net/af_unix.h b/include/net/af_unix.h
10341     index 2a91a0561a47..9b4c418bebd8 100644
10342     --- a/include/net/af_unix.h
10343     +++ b/include/net/af_unix.h
10344     @@ -6,8 +6,8 @@
10345     #include <linux/mutex.h>
10346     #include <net/sock.h>
10347    
10348     -void unix_inflight(struct file *fp);
10349     -void unix_notinflight(struct file *fp);
10350     +void unix_inflight(struct user_struct *user, struct file *fp);
10351     +void unix_notinflight(struct user_struct *user, struct file *fp);
10352     void unix_gc(void);
10353     void wait_for_unix_gc(void);
10354     struct sock *unix_get_socket(struct file *filp);
10355     diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
10356     index 6816f0fa5693..30a56ab2ccfb 100644
10357     --- a/include/net/dst_metadata.h
10358     +++ b/include/net/dst_metadata.h
10359     @@ -44,6 +44,24 @@ static inline bool skb_valid_dst(const struct sk_buff *skb)
10360     return dst && !(dst->flags & DST_METADATA);
10361     }
10362    
10363     +static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a,
10364     + const struct sk_buff *skb_b)
10365     +{
10366     + const struct metadata_dst *a, *b;
10367     +
10368     + if (!(skb_a->_skb_refdst | skb_b->_skb_refdst))
10369     + return 0;
10370     +
10371     + a = (const struct metadata_dst *) skb_dst(skb_a);
10372     + b = (const struct metadata_dst *) skb_dst(skb_b);
10373     +
10374     + if (!a != !b || a->u.tun_info.options_len != b->u.tun_info.options_len)
10375     + return 1;
10376     +
10377     + return memcmp(&a->u.tun_info, &b->u.tun_info,
10378     + sizeof(a->u.tun_info) + a->u.tun_info.options_len);
10379     +}
10380     +
10381     struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags);
10382     struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags);
10383    
10384     diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
10385     index 481fe1c9044c..49dcad4fe99e 100644
10386     --- a/include/net/inet_connection_sock.h
10387     +++ b/include/net/inet_connection_sock.h
10388     @@ -270,8 +270,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
10389     struct sock *newsk,
10390     const struct request_sock *req);
10391    
10392     -void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
10393     - struct sock *child);
10394     +struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
10395     + struct request_sock *req,
10396     + struct sock *child);
10397     void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
10398     unsigned long timeout);
10399     struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
10400     diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
10401     index 877f682989b8..295d291269e2 100644
10402     --- a/include/net/ip6_route.h
10403     +++ b/include/net/ip6_route.h
10404     @@ -64,8 +64,16 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
10405    
10406     void ip6_route_input(struct sk_buff *skb);
10407    
10408     -struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
10409     - struct flowi6 *fl6);
10410     +struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
10411     + struct flowi6 *fl6, int flags);
10412     +
10413     +static inline struct dst_entry *ip6_route_output(struct net *net,
10414     + const struct sock *sk,
10415     + struct flowi6 *fl6)
10416     +{
10417     + return ip6_route_output_flags(net, sk, fl6, 0);
10418     +}
10419     +
10420     struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
10421     int flags);
10422    
10423     diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
10424     index 9f4df68105ab..3f98233388fb 100644
10425     --- a/include/net/ip_fib.h
10426     +++ b/include/net/ip_fib.h
10427     @@ -61,6 +61,7 @@ struct fib_nh_exception {
10428     struct rtable __rcu *fnhe_rth_input;
10429     struct rtable __rcu *fnhe_rth_output;
10430     unsigned long fnhe_stamp;
10431     + struct rcu_head rcu;
10432     };
10433    
10434     struct fnhe_hash_bucket {
10435     diff --git a/include/net/scm.h b/include/net/scm.h
10436     index 262532d111f5..59fa93c01d2a 100644
10437     --- a/include/net/scm.h
10438     +++ b/include/net/scm.h
10439     @@ -21,6 +21,7 @@ struct scm_creds {
10440     struct scm_fp_list {
10441     short count;
10442     short max;
10443     + struct user_struct *user;
10444     struct file *fp[SCM_MAX_FD];
10445     };
10446    
10447     diff --git a/include/net/tcp.h b/include/net/tcp.h
10448     index f80e74c5ad18..414d822bc1db 100644
10449     --- a/include/net/tcp.h
10450     +++ b/include/net/tcp.h
10451     @@ -449,7 +449,7 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
10452    
10453     void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
10454     void tcp_v4_mtu_reduced(struct sock *sk);
10455     -void tcp_req_err(struct sock *sk, u32 seq);
10456     +void tcp_req_err(struct sock *sk, u32 seq, bool abort);
10457     int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
10458     struct sock *tcp_create_openreq_child(const struct sock *sk,
10459     struct request_sock *req,
10460     diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
10461     index aabf0aca0171..689f4d207122 100644
10462     --- a/include/target/target_core_base.h
10463     +++ b/include/target/target_core_base.h
10464     @@ -138,6 +138,7 @@ enum se_cmd_flags_table {
10465     SCF_COMPARE_AND_WRITE = 0x00080000,
10466     SCF_COMPARE_AND_WRITE_POST = 0x00100000,
10467     SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
10468     + SCF_ACK_KREF = 0x00400000,
10469     };
10470    
10471     /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
10472     @@ -490,6 +491,8 @@ struct se_cmd {
10473     #define CMD_T_DEV_ACTIVE (1 << 7)
10474     #define CMD_T_REQUEST_STOP (1 << 8)
10475     #define CMD_T_BUSY (1 << 9)
10476     +#define CMD_T_TAS (1 << 10)
10477     +#define CMD_T_FABRIC_STOP (1 << 11)
10478     spinlock_t t_state_lock;
10479     struct kref cmd_kref;
10480     struct completion t_transport_stop_comp;
10481     diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
10482     index c2e5d6cb34e3..ebd10e624598 100644
10483     --- a/include/uapi/linux/Kbuild
10484     +++ b/include/uapi/linux/Kbuild
10485     @@ -307,7 +307,7 @@ header-y += nfs_mount.h
10486     header-y += nl80211.h
10487     header-y += n_r3964.h
10488     header-y += nubus.h
10489     -header-y += nvme.h
10490     +header-y += nvme_ioctl.h
10491     header-y += nvram.h
10492     header-y += omap3isp.h
10493     header-y += omapfb.h
10494     diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
10495     index d1d3e8f57de9..2e7f7ab739e4 100644
10496     --- a/kernel/bpf/verifier.c
10497     +++ b/kernel/bpf/verifier.c
10498     @@ -2082,7 +2082,7 @@ static void adjust_branches(struct bpf_prog *prog, int pos, int delta)
10499     /* adjust offset of jmps if necessary */
10500     if (i < pos && i + insn->off + 1 > pos)
10501     insn->off += delta;
10502     - else if (i > pos && i + insn->off + 1 < pos)
10503     + else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
10504     insn->off -= delta;
10505     }
10506     }
10507     diff --git a/kernel/cgroup.c b/kernel/cgroup.c
10508     index 470f6536b9e8..fb1ecfd2decd 100644
10509     --- a/kernel/cgroup.c
10510     +++ b/kernel/cgroup.c
10511     @@ -57,7 +57,7 @@
10512     #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */
10513     #include <linux/kthread.h>
10514     #include <linux/delay.h>
10515     -
10516     +#include <linux/cpuset.h>
10517     #include <linux/atomic.h>
10518    
10519     /*
10520     @@ -2764,6 +2764,7 @@ out_unlock_rcu:
10521     out_unlock_threadgroup:
10522     percpu_up_write(&cgroup_threadgroup_rwsem);
10523     cgroup_kn_unlock(of->kn);
10524     + cpuset_post_attach_flush();
10525     return ret ?: nbytes;
10526     }
10527    
10528     @@ -4783,6 +4784,7 @@ static void init_and_link_css(struct cgroup_subsys_state *css,
10529     INIT_LIST_HEAD(&css->sibling);
10530     INIT_LIST_HEAD(&css->children);
10531     css->serial_nr = css_serial_nr_next++;
10532     + atomic_set(&css->online_cnt, 0);
10533    
10534     if (cgroup_parent(cgrp)) {
10535     css->parent = cgroup_css(cgroup_parent(cgrp), ss);
10536     @@ -4805,6 +4807,10 @@ static int online_css(struct cgroup_subsys_state *css)
10537     if (!ret) {
10538     css->flags |= CSS_ONLINE;
10539     rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
10540     +
10541     + atomic_inc(&css->online_cnt);
10542     + if (css->parent)
10543     + atomic_inc(&css->parent->online_cnt);
10544     }
10545     return ret;
10546     }
10547     @@ -5036,10 +5042,15 @@ static void css_killed_work_fn(struct work_struct *work)
10548     container_of(work, struct cgroup_subsys_state, destroy_work);
10549    
10550     mutex_lock(&cgroup_mutex);
10551     - offline_css(css);
10552     - mutex_unlock(&cgroup_mutex);
10553    
10554     - css_put(css);
10555     + do {
10556     + offline_css(css);
10557     + css_put(css);
10558     + /* @css can't go away while we're holding cgroup_mutex */
10559     + css = css->parent;
10560     + } while (css && atomic_dec_and_test(&css->online_cnt));
10561     +
10562     + mutex_unlock(&cgroup_mutex);
10563     }
10564    
10565     /* css kill confirmation processing requires process context, bounce */
10566     @@ -5048,8 +5059,10 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
10567     struct cgroup_subsys_state *css =
10568     container_of(ref, struct cgroup_subsys_state, refcnt);
10569    
10570     - INIT_WORK(&css->destroy_work, css_killed_work_fn);
10571     - queue_work(cgroup_destroy_wq, &css->destroy_work);
10572     + if (atomic_dec_and_test(&css->online_cnt)) {
10573     + INIT_WORK(&css->destroy_work, css_killed_work_fn);
10574     + queue_work(cgroup_destroy_wq, &css->destroy_work);
10575     + }
10576     }
10577    
10578     /**
10579     diff --git a/kernel/cpuset.c b/kernel/cpuset.c
10580     index 02a8ea5c9963..2ade632197d5 100644
10581     --- a/kernel/cpuset.c
10582     +++ b/kernel/cpuset.c
10583     @@ -286,6 +286,8 @@ static struct cpuset top_cpuset = {
10584     static DEFINE_MUTEX(cpuset_mutex);
10585     static DEFINE_SPINLOCK(callback_lock);
10586    
10587     +static struct workqueue_struct *cpuset_migrate_mm_wq;
10588     +
10589     /*
10590     * CPU / memory hotplug is handled asynchronously.
10591     */
10592     @@ -971,31 +973,51 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
10593     }
10594    
10595     /*
10596     - * cpuset_migrate_mm
10597     - *
10598     - * Migrate memory region from one set of nodes to another.
10599     - *
10600     - * Temporarilly set tasks mems_allowed to target nodes of migration,
10601     - * so that the migration code can allocate pages on these nodes.
10602     - *
10603     - * While the mm_struct we are migrating is typically from some
10604     - * other task, the task_struct mems_allowed that we are hacking
10605     - * is for our current task, which must allocate new pages for that
10606     - * migrating memory region.
10607     + * Migrate memory region from one set of nodes to another. This is
10608     + * performed asynchronously as it can be called from process migration path
10609     + * holding locks involved in process management. All mm migrations are
10610     + * performed in the queued order and can be waited for by flushing
10611     + * cpuset_migrate_mm_wq.
10612     */
10613    
10614     +struct cpuset_migrate_mm_work {
10615     + struct work_struct work;
10616     + struct mm_struct *mm;
10617     + nodemask_t from;
10618     + nodemask_t to;
10619     +};
10620     +
10621     +static void cpuset_migrate_mm_workfn(struct work_struct *work)
10622     +{
10623     + struct cpuset_migrate_mm_work *mwork =
10624     + container_of(work, struct cpuset_migrate_mm_work, work);
10625     +
10626     + /* on a wq worker, no need to worry about %current's mems_allowed */
10627     + do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
10628     + mmput(mwork->mm);
10629     + kfree(mwork);
10630     +}
10631     +
10632     static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
10633     const nodemask_t *to)
10634     {
10635     - struct task_struct *tsk = current;
10636     -
10637     - tsk->mems_allowed = *to;
10638     + struct cpuset_migrate_mm_work *mwork;
10639    
10640     - do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
10641     + mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
10642     + if (mwork) {
10643     + mwork->mm = mm;
10644     + mwork->from = *from;
10645     + mwork->to = *to;
10646     + INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
10647     + queue_work(cpuset_migrate_mm_wq, &mwork->work);
10648     + } else {
10649     + mmput(mm);
10650     + }
10651     +}
10652    
10653     - rcu_read_lock();
10654     - guarantee_online_mems(task_cs(tsk), &tsk->mems_allowed);
10655     - rcu_read_unlock();
10656     +void cpuset_post_attach_flush(void)
10657     +{
10658     + flush_workqueue(cpuset_migrate_mm_wq);
10659     }
10660    
10661     /*
10662     @@ -1096,7 +1118,8 @@ static void update_tasks_nodemask(struct cpuset *cs)
10663     mpol_rebind_mm(mm, &cs->mems_allowed);
10664     if (migrate)
10665     cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
10666     - mmput(mm);
10667     + else
10668     + mmput(mm);
10669     }
10670     css_task_iter_end(&it);
10671    
10672     @@ -1541,11 +1564,11 @@ static void cpuset_attach(struct cgroup_taskset *tset)
10673     * @old_mems_allowed is the right nodesets that we
10674     * migrate mm from.
10675     */
10676     - if (is_memory_migrate(cs)) {
10677     + if (is_memory_migrate(cs))
10678     cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
10679     &cpuset_attach_nodemask_to);
10680     - }
10681     - mmput(mm);
10682     + else
10683     + mmput(mm);
10684     }
10685     }
10686    
10687     @@ -1710,6 +1733,7 @@ out_unlock:
10688     mutex_unlock(&cpuset_mutex);
10689     kernfs_unbreak_active_protection(of->kn);
10690     css_put(&cs->css);
10691     + flush_workqueue(cpuset_migrate_mm_wq);
10692     return retval ?: nbytes;
10693     }
10694    
10695     @@ -2355,6 +2379,9 @@ void __init cpuset_init_smp(void)
10696     top_cpuset.effective_mems = node_states[N_MEMORY];
10697    
10698     register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
10699     +
10700     + cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
10701     + BUG_ON(!cpuset_migrate_mm_wq);
10702     }
10703    
10704     /**
10705     diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
10706     index a302cf9a2126..57bff7857e87 100644
10707     --- a/kernel/irq/handle.c
10708     +++ b/kernel/irq/handle.c
10709     @@ -138,7 +138,8 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
10710     unsigned int flags = 0, irq = desc->irq_data.irq;
10711     struct irqaction *action = desc->action;
10712    
10713     - do {
10714     + /* action might have become NULL since we dropped the lock */
10715     + while (action) {
10716     irqreturn_t res;
10717    
10718     trace_irq_handler_entry(irq, action);
10719     @@ -173,7 +174,7 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc)
10720    
10721     retval |= res;
10722     action = action->next;
10723     - } while (action);
10724     + }
10725    
10726     add_interrupt_randomness(irq, flags);
10727    
10728     diff --git a/kernel/memremap.c b/kernel/memremap.c
10729     index 7a4e473cea4d..25ced161ebeb 100644
10730     --- a/kernel/memremap.c
10731     +++ b/kernel/memremap.c
10732     @@ -133,8 +133,10 @@ void *devm_memremap(struct device *dev, resource_size_t offset,
10733     if (addr) {
10734     *ptr = addr;
10735     devres_add(dev, ptr);
10736     - } else
10737     + } else {
10738     devres_free(ptr);
10739     + return ERR_PTR(-ENXIO);
10740     + }
10741    
10742     return addr;
10743     }
10744     diff --git a/kernel/resource.c b/kernel/resource.c
10745     index f150dbbe6f62..249b1eb1e6e1 100644
10746     --- a/kernel/resource.c
10747     +++ b/kernel/resource.c
10748     @@ -1083,9 +1083,10 @@ struct resource * __request_region(struct resource *parent,
10749     if (!conflict)
10750     break;
10751     if (conflict != parent) {
10752     - parent = conflict;
10753     - if (!(conflict->flags & IORESOURCE_BUSY))
10754     + if (!(conflict->flags & IORESOURCE_BUSY)) {
10755     + parent = conflict;
10756     continue;
10757     + }
10758     }
10759     if (conflict->flags & flags & IORESOURCE_MUXED) {
10760     add_wait_queue(&muxed_resource_wait, &wait);
10761     diff --git a/kernel/seccomp.c b/kernel/seccomp.c
10762     index 580ac2d4024f..15a1795bbba1 100644
10763     --- a/kernel/seccomp.c
10764     +++ b/kernel/seccomp.c
10765     @@ -316,24 +316,24 @@ static inline void seccomp_sync_threads(void)
10766     put_seccomp_filter(thread);
10767     smp_store_release(&thread->seccomp.filter,
10768     caller->seccomp.filter);
10769     +
10770     + /*
10771     + * Don't let an unprivileged task work around
10772     + * the no_new_privs restriction by creating
10773     + * a thread that sets it up, enters seccomp,
10774     + * then dies.
10775     + */
10776     + if (task_no_new_privs(caller))
10777     + task_set_no_new_privs(thread);
10778     +
10779     /*
10780     * Opt the other thread into seccomp if needed.
10781     * As threads are considered to be trust-realm
10782     * equivalent (see ptrace_may_access), it is safe to
10783     * allow one thread to transition the other.
10784     */
10785     - if (thread->seccomp.mode == SECCOMP_MODE_DISABLED) {
10786     - /*
10787     - * Don't let an unprivileged task work around
10788     - * the no_new_privs restriction by creating
10789     - * a thread that sets it up, enters seccomp,
10790     - * then dies.
10791     - */
10792     - if (task_no_new_privs(caller))
10793     - task_set_no_new_privs(thread);
10794     -
10795     + if (thread->seccomp.mode == SECCOMP_MODE_DISABLED)
10796     seccomp_assign_mode(thread, SECCOMP_MODE_FILTER);
10797     - }
10798     }
10799     }
10800    
10801     diff --git a/kernel/time/posix-clock.c b/kernel/time/posix-clock.c
10802     index ce033c7aa2e8..9cff0ab82b63 100644
10803     --- a/kernel/time/posix-clock.c
10804     +++ b/kernel/time/posix-clock.c
10805     @@ -69,10 +69,10 @@ static ssize_t posix_clock_read(struct file *fp, char __user *buf,
10806     static unsigned int posix_clock_poll(struct file *fp, poll_table *wait)
10807     {
10808     struct posix_clock *clk = get_posix_clock(fp);
10809     - int result = 0;
10810     + unsigned int result = 0;
10811    
10812     if (!clk)
10813     - return -ENODEV;
10814     + return POLLERR;
10815    
10816     if (clk->ops.poll)
10817     result = clk->ops.poll(clk, fp, wait);
10818     diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
10819     index 7c7ec4515983..22c57e191a23 100644
10820     --- a/kernel/time/tick-sched.c
10821     +++ b/kernel/time/tick-sched.c
10822     @@ -977,9 +977,9 @@ static void tick_nohz_switch_to_nohz(void)
10823     /* Get the next period */
10824     next = tick_init_jiffy_update();
10825    
10826     - hrtimer_forward_now(&ts->sched_timer, tick_period);
10827     hrtimer_set_expires(&ts->sched_timer, next);
10828     - tick_program_event(next, 1);
10829     + hrtimer_forward_now(&ts->sched_timer, tick_period);
10830     + tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
10831     tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
10832     }
10833    
10834     diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
10835     index d563c1960302..99188ee5d9d0 100644
10836     --- a/kernel/time/timekeeping.c
10837     +++ b/kernel/time/timekeeping.c
10838     @@ -305,8 +305,7 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
10839    
10840     delta = timekeeping_get_delta(tkr);
10841    
10842     - nsec = delta * tkr->mult + tkr->xtime_nsec;
10843     - nsec >>= tkr->shift;
10844     + nsec = (delta * tkr->mult + tkr->xtime_nsec) >> tkr->shift;
10845    
10846     /* If arch requires, add in get_arch_timeoffset() */
10847     return nsec + arch_gettimeoffset();
10848     diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
10849     index 4f6ef6912e00..debf6e878076 100644
10850     --- a/kernel/trace/trace_events.c
10851     +++ b/kernel/trace/trace_events.c
10852     @@ -869,7 +869,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
10853     * The ftrace subsystem is for showing formats only.
10854     * They can not be enabled or disabled via the event files.
10855     */
10856     - if (call->class && call->class->reg)
10857     + if (call->class && call->class->reg &&
10858     + !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
10859     return file;
10860     }
10861    
10862     diff --git a/kernel/workqueue.c b/kernel/workqueue.c
10863     index c579dbab2e36..450c21fd0e6e 100644
10864     --- a/kernel/workqueue.c
10865     +++ b/kernel/workqueue.c
10866     @@ -568,6 +568,16 @@ static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
10867     int node)
10868     {
10869     assert_rcu_or_wq_mutex_or_pool_mutex(wq);
10870     +
10871     + /*
10872     + * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
10873     + * delayed item is pending. The plan is to keep CPU -> NODE
10874     + * mapping valid and stable across CPU on/offlines. Once that
10875     + * happens, this workaround can be removed.
10876     + */
10877     + if (unlikely(node == NUMA_NO_NODE))
10878     + return wq->dfl_pwq;
10879     +
10880     return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
10881     }
10882    
10883     @@ -1458,13 +1468,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
10884     timer_stats_timer_set_start_info(&dwork->timer);
10885    
10886     dwork->wq = wq;
10887     - /* timer isn't guaranteed to run in this cpu, record earlier */
10888     - if (cpu == WORK_CPU_UNBOUND)
10889     - cpu = raw_smp_processor_id();
10890     dwork->cpu = cpu;
10891     timer->expires = jiffies + delay;
10892    
10893     - add_timer_on(timer, cpu);
10894     + if (unlikely(cpu != WORK_CPU_UNBOUND))
10895     + add_timer_on(timer, cpu);
10896     + else
10897     + add_timer(timer);
10898     }
10899    
10900     /**
10901     diff --git a/lib/Kconfig b/lib/Kconfig
10902     index f0df318104e7..1a48744253d7 100644
10903     --- a/lib/Kconfig
10904     +++ b/lib/Kconfig
10905     @@ -210,9 +210,11 @@ config RANDOM32_SELFTEST
10906     # compression support is select'ed if needed
10907     #
10908     config 842_COMPRESS
10909     + select CRC32
10910     tristate
10911    
10912     config 842_DECOMPRESS
10913     + select CRC32
10914     tristate
10915    
10916     config ZLIB_INFLATE
10917     diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
10918     index 6f500ef2301d..f0b323abb4c6 100644
10919     --- a/lib/ucs2_string.c
10920     +++ b/lib/ucs2_string.c
10921     @@ -49,3 +49,65 @@ ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len)
10922     }
10923     }
10924     EXPORT_SYMBOL(ucs2_strncmp);
10925     +
10926     +unsigned long
10927     +ucs2_utf8size(const ucs2_char_t *src)
10928     +{
10929     + unsigned long i;
10930     + unsigned long j = 0;
10931     +
10932     + for (i = 0; i < ucs2_strlen(src); i++) {
10933     + u16 c = src[i];
10934     +
10935     + if (c >= 0x800)
10936     + j += 3;
10937     + else if (c >= 0x80)
10938     + j += 2;
10939     + else
10940     + j += 1;
10941     + }
10942     +
10943     + return j;
10944     +}
10945     +EXPORT_SYMBOL(ucs2_utf8size);
10946     +
10947     +/*
10948     + * copy at most maxlength bytes of whole utf8 characters to dest from the
10949     + * ucs2 string src.
10950     + *
10951     + * The return value is the number of characters copied, not including the
10952     + * final NUL character.
10953     + */
10954     +unsigned long
10955     +ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength)
10956     +{
10957     + unsigned int i;
10958     + unsigned long j = 0;
10959     + unsigned long limit = ucs2_strnlen(src, maxlength);
10960     +
10961     + for (i = 0; maxlength && i < limit; i++) {
10962     + u16 c = src[i];
10963     +
10964     + if (c >= 0x800) {
10965     + if (maxlength < 3)
10966     + break;
10967     + maxlength -= 3;
10968     + dest[j++] = 0xe0 | (c & 0xf000) >> 12;
10969     + dest[j++] = 0x80 | (c & 0x0fc0) >> 6;
10970     + dest[j++] = 0x80 | (c & 0x003f);
10971     + } else if (c >= 0x80) {
10972     + if (maxlength < 2)
10973     + break;
10974     + maxlength -= 2;
10975     + dest[j++] = 0xc0 | (c & 0x7c0) >> 6;
10976     + dest[j++] = 0x80 | (c & 0x03f);
10977     + } else {
10978     + maxlength -= 1;
10979     + dest[j++] = c & 0x7f;
10980     + }
10981     + }
10982     + if (maxlength)
10983     + dest[j] = '\0';
10984     + return j;
10985     +}
10986     +EXPORT_SYMBOL(ucs2_as_utf8);
10987     diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
10988     index d3116be5a00f..300117f1a08f 100644
10989     --- a/mm/balloon_compaction.c
10990     +++ b/mm/balloon_compaction.c
10991     @@ -61,6 +61,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
10992     bool dequeued_page;
10993    
10994     dequeued_page = false;
10995     + spin_lock_irqsave(&b_dev_info->pages_lock, flags);
10996     list_for_each_entry_safe(page, tmp, &b_dev_info->pages, lru) {
10997     /*
10998     * Block others from accessing the 'page' while we get around
10999     @@ -75,15 +76,14 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
11000     continue;
11001     }
11002     #endif
11003     - spin_lock_irqsave(&b_dev_info->pages_lock, flags);
11004     balloon_page_delete(page);
11005     __count_vm_event(BALLOON_DEFLATE);
11006     - spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
11007     unlock_page(page);
11008     dequeued_page = true;
11009     break;
11010     }
11011     }
11012     + spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
11013    
11014     if (!dequeued_page) {
11015     /*
11016     diff --git a/mm/memory.c b/mm/memory.c
11017     index c387430f06c3..b80bf4746b67 100644
11018     --- a/mm/memory.c
11019     +++ b/mm/memory.c
11020     @@ -3399,8 +3399,18 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
11021     if (unlikely(pmd_none(*pmd)) &&
11022     unlikely(__pte_alloc(mm, vma, pmd, address)))
11023     return VM_FAULT_OOM;
11024     - /* if an huge pmd materialized from under us just retry later */
11025     - if (unlikely(pmd_trans_huge(*pmd)))
11026     + /*
11027     + * If a huge pmd materialized under us just retry later. Use
11028     + * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
11029     + * didn't become pmd_trans_huge under us and then back to pmd_none, as
11030     + * a result of MADV_DONTNEED running immediately after a huge pmd fault
11031     + * in a different thread of this mm, in turn leading to a misleading
11032     + * pmd_trans_huge() retval. All we have to ensure is that it is a
11033     + * regular pmd that we can walk with pte_offset_map() and we can do that
11034     + * through an atomic read in C, which is what pmd_trans_unstable()
11035     + * provides.
11036     + */
11037     + if (unlikely(pmd_trans_unstable(pmd)))
11038     return 0;
11039     /*
11040     * A regular pmd is established and it can't morph into a huge pmd
11041     diff --git a/mm/migrate.c b/mm/migrate.c
11042     index 7890d0bb5e23..6d17e0ab42d4 100644
11043     --- a/mm/migrate.c
11044     +++ b/mm/migrate.c
11045     @@ -1578,7 +1578,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
11046     (GFP_HIGHUSER_MOVABLE |
11047     __GFP_THISNODE | __GFP_NOMEMALLOC |
11048     __GFP_NORETRY | __GFP_NOWARN) &
11049     - ~(__GFP_IO | __GFP_FS), 0);
11050     + ~__GFP_RECLAIM, 0);
11051    
11052     return newpage;
11053     }
11054     diff --git a/mm/shmem.c b/mm/shmem.c
11055     index 2afcdbbdb685..ea5a70cfc1d8 100644
11056     --- a/mm/shmem.c
11057     +++ b/mm/shmem.c
11058     @@ -620,8 +620,7 @@ static void shmem_evict_inode(struct inode *inode)
11059     list_del_init(&info->swaplist);
11060     mutex_unlock(&shmem_swaplist_mutex);
11061     }
11062     - } else
11063     - kfree(info->symlink);
11064     + }
11065    
11066     simple_xattrs_free(&info->xattrs);
11067     WARN_ON(inode->i_blocks);
11068     @@ -2462,13 +2461,12 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
11069     info = SHMEM_I(inode);
11070     inode->i_size = len-1;
11071     if (len <= SHORT_SYMLINK_LEN) {
11072     - info->symlink = kmemdup(symname, len, GFP_KERNEL);
11073     - if (!info->symlink) {
11074     + inode->i_link = kmemdup(symname, len, GFP_KERNEL);
11075     + if (!inode->i_link) {
11076     iput(inode);
11077     return -ENOMEM;
11078     }
11079     inode->i_op = &shmem_short_symlink_operations;
11080     - inode->i_link = info->symlink;
11081     } else {
11082     error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
11083     if (error) {
11084     @@ -3083,6 +3081,7 @@ static struct inode *shmem_alloc_inode(struct super_block *sb)
11085     static void shmem_destroy_callback(struct rcu_head *head)
11086     {
11087     struct inode *inode = container_of(head, struct inode, i_rcu);
11088     + kfree(inode->i_link);
11089     kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
11090     }
11091    
11092     diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
11093     index 9e9cca3689a0..795ddd8b2f77 100644
11094     --- a/net/bluetooth/6lowpan.c
11095     +++ b/net/bluetooth/6lowpan.c
11096     @@ -307,6 +307,9 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
11097    
11098     /* check that it's our buffer */
11099     if (lowpan_is_ipv6(*skb_network_header(skb))) {
11100     + /* Pull off the 1-byte of 6lowpan header. */
11101     + skb_pull(skb, 1);
11102     +
11103     /* Copy the packet so that the IPv6 header is
11104     * properly aligned.
11105     */
11106     @@ -317,6 +320,7 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
11107    
11108     local_skb->protocol = htons(ETH_P_IPV6);
11109     local_skb->pkt_type = PACKET_HOST;
11110     + local_skb->dev = dev;
11111    
11112     skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
11113    
11114     @@ -335,6 +339,8 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
11115     if (!local_skb)
11116     goto drop;
11117    
11118     + local_skb->dev = dev;
11119     +
11120     ret = iphc_decompress(local_skb, dev, chan);
11121     if (ret < 0) {
11122     kfree_skb(local_skb);
11123     @@ -343,7 +349,6 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
11124    
11125     local_skb->protocol = htons(ETH_P_IPV6);
11126     local_skb->pkt_type = PACKET_HOST;
11127     - local_skb->dev = dev;
11128    
11129     if (give_skb_to_upper(local_skb, dev)
11130     != NET_RX_SUCCESS) {
11131     diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
11132     index 85b82f7adbd2..24e9410923d0 100644
11133     --- a/net/bluetooth/hci_conn.c
11134     +++ b/net/bluetooth/hci_conn.c
11135     @@ -722,8 +722,12 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
11136     if (hci_update_random_address(req, false, &own_addr_type))
11137     return;
11138    
11139     + /* Set window to be the same value as the interval to enable
11140     + * continuous scanning.
11141     + */
11142     cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
11143     - cp.scan_window = cpu_to_le16(hdev->le_scan_window);
11144     + cp.scan_window = cp.scan_interval;
11145     +
11146     bacpy(&cp.peer_addr, &conn->dst);
11147     cp.peer_addr_type = conn->dst_type;
11148     cp.own_address_type = own_addr_type;
11149     diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
11150     index 981f8a202c27..02778c5bc149 100644
11151     --- a/net/bluetooth/hci_request.c
11152     +++ b/net/bluetooth/hci_request.c
11153     @@ -175,21 +175,29 @@ static u8 update_white_list(struct hci_request *req)
11154     * command to remove it from the controller.
11155     */
11156     list_for_each_entry(b, &hdev->le_white_list, list) {
11157     - struct hci_cp_le_del_from_white_list cp;
11158     + /* If the device is neither in pend_le_conns nor
11159     + * pend_le_reports then remove it from the whitelist.
11160     + */
11161     + if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
11162     + &b->bdaddr, b->bdaddr_type) &&
11163     + !hci_pend_le_action_lookup(&hdev->pend_le_reports,
11164     + &b->bdaddr, b->bdaddr_type)) {
11165     + struct hci_cp_le_del_from_white_list cp;
11166     +
11167     + cp.bdaddr_type = b->bdaddr_type;
11168     + bacpy(&cp.bdaddr, &b->bdaddr);
11169    
11170     - if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
11171     - &b->bdaddr, b->bdaddr_type) ||
11172     - hci_pend_le_action_lookup(&hdev->pend_le_reports,
11173     - &b->bdaddr, b->bdaddr_type)) {
11174     - white_list_entries++;
11175     + hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
11176     + sizeof(cp), &cp);
11177     continue;
11178     }
11179    
11180     - cp.bdaddr_type = b->bdaddr_type;
11181     - bacpy(&cp.bdaddr, &b->bdaddr);
11182     + if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
11183     + /* White list can not be used with RPAs */
11184     + return 0x00;
11185     + }
11186    
11187     - hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
11188     - sizeof(cp), &cp);
11189     + white_list_entries++;
11190     }
11191    
11192     /* Since all no longer valid white list entries have been
11193     diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
11194     index ffed8a1d4f27..4b175df35184 100644
11195     --- a/net/bluetooth/smp.c
11196     +++ b/net/bluetooth/smp.c
11197     @@ -1072,22 +1072,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
11198     hcon->dst_type = smp->remote_irk->addr_type;
11199     queue_work(hdev->workqueue, &conn->id_addr_update_work);
11200     }
11201     -
11202     - /* When receiving an indentity resolving key for
11203     - * a remote device that does not use a resolvable
11204     - * private address, just remove the key so that
11205     - * it is possible to use the controller white
11206     - * list for scanning.
11207     - *
11208     - * Userspace will have been told to not store
11209     - * this key at this point. So it is safe to
11210     - * just remove it.
11211     - */
11212     - if (!bacmp(&smp->remote_irk->rpa, BDADDR_ANY)) {
11213     - list_del_rcu(&smp->remote_irk->list);
11214     - kfree_rcu(smp->remote_irk, rcu);
11215     - smp->remote_irk = NULL;
11216     - }
11217     }
11218    
11219     if (smp->csrk) {
11220     diff --git a/net/bridge/br.c b/net/bridge/br.c
11221     index a1abe4936fe1..3addc05b9a16 100644
11222     --- a/net/bridge/br.c
11223     +++ b/net/bridge/br.c
11224     @@ -121,6 +121,7 @@ static struct notifier_block br_device_notifier = {
11225     .notifier_call = br_device_event
11226     };
11227    
11228     +/* called with RTNL */
11229     static int br_switchdev_event(struct notifier_block *unused,
11230     unsigned long event, void *ptr)
11231     {
11232     @@ -130,7 +131,6 @@ static int br_switchdev_event(struct notifier_block *unused,
11233     struct switchdev_notifier_fdb_info *fdb_info;
11234     int err = NOTIFY_DONE;
11235    
11236     - rtnl_lock();
11237     p = br_port_get_rtnl(dev);
11238     if (!p)
11239     goto out;
11240     @@ -155,7 +155,6 @@ static int br_switchdev_event(struct notifier_block *unused,
11241     }
11242    
11243     out:
11244     - rtnl_unlock();
11245     return err;
11246     }
11247    
11248     diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
11249     index 9981039ef4ff..63ae5dd24fc5 100644
11250     --- a/net/ceph/messenger.c
11251     +++ b/net/ceph/messenger.c
11252     @@ -672,6 +672,8 @@ static void reset_connection(struct ceph_connection *con)
11253     }
11254     con->in_seq = 0;
11255     con->in_seq_acked = 0;
11256     +
11257     + con->out_skip = 0;
11258     }
11259    
11260     /*
11261     @@ -771,6 +773,8 @@ static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
11262    
11263     static void con_out_kvec_reset(struct ceph_connection *con)
11264     {
11265     + BUG_ON(con->out_skip);
11266     +
11267     con->out_kvec_left = 0;
11268     con->out_kvec_bytes = 0;
11269     con->out_kvec_cur = &con->out_kvec[0];
11270     @@ -779,9 +783,9 @@ static void con_out_kvec_reset(struct ceph_connection *con)
11271     static void con_out_kvec_add(struct ceph_connection *con,
11272     size_t size, void *data)
11273     {
11274     - int index;
11275     + int index = con->out_kvec_left;
11276    
11277     - index = con->out_kvec_left;
11278     + BUG_ON(con->out_skip);
11279     BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
11280    
11281     con->out_kvec[index].iov_len = size;
11282     @@ -790,6 +794,27 @@ static void con_out_kvec_add(struct ceph_connection *con,
11283     con->out_kvec_bytes += size;
11284     }
11285    
11286     +/*
11287     + * Chop off a kvec from the end. Return residual number of bytes for
11288     + * that kvec, i.e. how many bytes would have been written if the kvec
11289     + * hadn't been nuked.
11290     + */
11291     +static int con_out_kvec_skip(struct ceph_connection *con)
11292     +{
11293     + int off = con->out_kvec_cur - con->out_kvec;
11294     + int skip = 0;
11295     +
11296     + if (con->out_kvec_bytes > 0) {
11297     + skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len;
11298     + BUG_ON(con->out_kvec_bytes < skip);
11299     + BUG_ON(!con->out_kvec_left);
11300     + con->out_kvec_bytes -= skip;
11301     + con->out_kvec_left--;
11302     + }
11303     +
11304     + return skip;
11305     +}
11306     +
11307     #ifdef CONFIG_BLOCK
11308    
11309     /*
11310     @@ -1175,6 +1200,13 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
11311     return new_piece;
11312     }
11313    
11314     +static size_t sizeof_footer(struct ceph_connection *con)
11315     +{
11316     + return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ?
11317     + sizeof(struct ceph_msg_footer) :
11318     + sizeof(struct ceph_msg_footer_old);
11319     +}
11320     +
11321     static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
11322     {
11323     BUG_ON(!msg);
11324     @@ -1197,7 +1229,6 @@ static void prepare_write_message_footer(struct ceph_connection *con)
11325     m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
11326    
11327     dout("prepare_write_message_footer %p\n", con);
11328     - con->out_kvec_is_msg = true;
11329     con->out_kvec[v].iov_base = &m->footer;
11330     if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
11331     if (con->ops->sign_message)
11332     @@ -1225,7 +1256,6 @@ static void prepare_write_message(struct ceph_connection *con)
11333     u32 crc;
11334    
11335     con_out_kvec_reset(con);
11336     - con->out_kvec_is_msg = true;
11337     con->out_msg_done = false;
11338    
11339     /* Sneak an ack in there first? If we can get it into the same
11340     @@ -1265,18 +1295,19 @@ static void prepare_write_message(struct ceph_connection *con)
11341    
11342     /* tag + hdr + front + middle */
11343     con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
11344     - con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
11345     + con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr);
11346     con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
11347    
11348     if (m->middle)
11349     con_out_kvec_add(con, m->middle->vec.iov_len,
11350     m->middle->vec.iov_base);
11351    
11352     - /* fill in crc (except data pages), footer */
11353     + /* fill in hdr crc and finalize hdr */
11354     crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
11355     con->out_msg->hdr.crc = cpu_to_le32(crc);
11356     - con->out_msg->footer.flags = 0;
11357     + memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr));
11358    
11359     + /* fill in front and middle crc, footer */
11360     crc = crc32c(0, m->front.iov_base, m->front.iov_len);
11361     con->out_msg->footer.front_crc = cpu_to_le32(crc);
11362     if (m->middle) {
11363     @@ -1288,6 +1319,7 @@ static void prepare_write_message(struct ceph_connection *con)
11364     dout("%s front_crc %u middle_crc %u\n", __func__,
11365     le32_to_cpu(con->out_msg->footer.front_crc),
11366     le32_to_cpu(con->out_msg->footer.middle_crc));
11367     + con->out_msg->footer.flags = 0;
11368    
11369     /* is there a data payload? */
11370     con->out_msg->footer.data_crc = 0;
11371     @@ -1492,7 +1524,6 @@ static int write_partial_kvec(struct ceph_connection *con)
11372     }
11373     }
11374     con->out_kvec_left = 0;
11375     - con->out_kvec_is_msg = false;
11376     ret = 1;
11377     out:
11378     dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
11379     @@ -1584,6 +1615,7 @@ static int write_partial_skip(struct ceph_connection *con)
11380     {
11381     int ret;
11382    
11383     + dout("%s %p %d left\n", __func__, con, con->out_skip);
11384     while (con->out_skip > 0) {
11385     size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
11386    
11387     @@ -2313,9 +2345,9 @@ static int read_partial_message(struct ceph_connection *con)
11388     ceph_pr_addr(&con->peer_addr.in_addr),
11389     seq, con->in_seq + 1);
11390     con->in_base_pos = -front_len - middle_len - data_len -
11391     - sizeof(m->footer);
11392     + sizeof_footer(con);
11393     con->in_tag = CEPH_MSGR_TAG_READY;
11394     - return 0;
11395     + return 1;
11396     } else if ((s64)seq - (s64)con->in_seq > 1) {
11397     pr_err("read_partial_message bad seq %lld expected %lld\n",
11398     seq, con->in_seq + 1);
11399     @@ -2338,10 +2370,10 @@ static int read_partial_message(struct ceph_connection *con)
11400     /* skip this message */
11401     dout("alloc_msg said skip message\n");
11402     con->in_base_pos = -front_len - middle_len - data_len -
11403     - sizeof(m->footer);
11404     + sizeof_footer(con);
11405     con->in_tag = CEPH_MSGR_TAG_READY;
11406     con->in_seq++;
11407     - return 0;
11408     + return 1;
11409     }
11410    
11411     BUG_ON(!con->in_msg);
11412     @@ -2506,13 +2538,13 @@ more:
11413    
11414     more_kvec:
11415     /* kvec data queued? */
11416     - if (con->out_skip) {
11417     - ret = write_partial_skip(con);
11418     + if (con->out_kvec_left) {
11419     + ret = write_partial_kvec(con);
11420     if (ret <= 0)
11421     goto out;
11422     }
11423     - if (con->out_kvec_left) {
11424     - ret = write_partial_kvec(con);
11425     + if (con->out_skip) {
11426     + ret = write_partial_skip(con);
11427     if (ret <= 0)
11428     goto out;
11429     }
11430     @@ -3050,16 +3082,31 @@ void ceph_msg_revoke(struct ceph_msg *msg)
11431     ceph_msg_put(msg);
11432     }
11433     if (con->out_msg == msg) {
11434     - dout("%s %p msg %p - was sending\n", __func__, con, msg);
11435     - con->out_msg = NULL;
11436     - if (con->out_kvec_is_msg) {
11437     - con->out_skip = con->out_kvec_bytes;
11438     - con->out_kvec_is_msg = false;
11439     + BUG_ON(con->out_skip);
11440     + /* footer */
11441     + if (con->out_msg_done) {
11442     + con->out_skip += con_out_kvec_skip(con);
11443     + } else {
11444     + BUG_ON(!msg->data_length);
11445     + if (con->peer_features & CEPH_FEATURE_MSG_AUTH)
11446     + con->out_skip += sizeof(msg->footer);
11447     + else
11448     + con->out_skip += sizeof(msg->old_footer);
11449     }
11450     + /* data, middle, front */
11451     + if (msg->data_length)
11452     + con->out_skip += msg->cursor.total_resid;
11453     + if (msg->middle)
11454     + con->out_skip += con_out_kvec_skip(con);
11455     + con->out_skip += con_out_kvec_skip(con);
11456     +
11457     + dout("%s %p msg %p - was sending, will write %d skip %d\n",
11458     + __func__, con, msg, con->out_kvec_bytes, con->out_skip);
11459     msg->hdr.seq = 0;
11460     -
11461     + con->out_msg = NULL;
11462     ceph_msg_put(msg);
11463     }
11464     +
11465     mutex_unlock(&con->mutex);
11466     }
11467    
11468     diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
11469     index f8f235930d88..a28e47ff1b1b 100644
11470     --- a/net/ceph/osd_client.c
11471     +++ b/net/ceph/osd_client.c
11472     @@ -2843,8 +2843,8 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
11473     mutex_lock(&osdc->request_mutex);
11474     req = __lookup_request(osdc, tid);
11475     if (!req) {
11476     - pr_warn("%s osd%d tid %llu unknown, skipping\n",
11477     - __func__, osd->o_osd, tid);
11478     + dout("%s osd%d tid %llu unknown, skipping\n", __func__,
11479     + osd->o_osd, tid);
11480     m = NULL;
11481     *skip = 1;
11482     goto out;
11483     diff --git a/net/core/dev.c b/net/core/dev.c
11484     index 7f00f2439770..9efbdb3ff78a 100644
11485     --- a/net/core/dev.c
11486     +++ b/net/core/dev.c
11487     @@ -4145,6 +4145,7 @@ static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
11488    
11489     diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
11490     diffs |= p->vlan_tci ^ skb->vlan_tci;
11491     + diffs |= skb_metadata_dst_cmp(p, skb);
11492     if (maclen == ETH_HLEN)
11493     diffs |= compare_ether_header(skb_mac_header(p),
11494     skb_mac_header(skb));
11495     @@ -4342,10 +4343,12 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
11496     break;
11497    
11498     case GRO_MERGED_FREE:
11499     - if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
11500     + if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
11501     + skb_dst_drop(skb);
11502     kmem_cache_free(skbuff_head_cache, skb);
11503     - else
11504     + } else {
11505     __kfree_skb(skb);
11506     + }
11507     break;
11508    
11509     case GRO_HELD:
11510     @@ -7125,8 +7128,10 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
11511     dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
11512     setup(dev);
11513    
11514     - if (!dev->tx_queue_len)
11515     + if (!dev->tx_queue_len) {
11516     dev->priv_flags |= IFF_NO_QUEUE;
11517     + dev->tx_queue_len = 1;
11518     + }
11519    
11520     dev->num_tx_queues = txqs;
11521     dev->real_num_tx_queues = txqs;
11522     diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
11523     index d79699c9d1b9..12e700332010 100644
11524     --- a/net/core/flow_dissector.c
11525     +++ b/net/core/flow_dissector.c
11526     @@ -208,7 +208,6 @@ ip:
11527     case htons(ETH_P_IPV6): {
11528     const struct ipv6hdr *iph;
11529     struct ipv6hdr _iph;
11530     - __be32 flow_label;
11531    
11532     ipv6:
11533     iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
11534     @@ -230,8 +229,12 @@ ipv6:
11535     key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
11536     }
11537    
11538     - flow_label = ip6_flowlabel(iph);
11539     - if (flow_label) {
11540     + if ((dissector_uses_key(flow_dissector,
11541     + FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
11542     + (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
11543     + ip6_flowlabel(iph)) {
11544     + __be32 flow_label = ip6_flowlabel(iph);
11545     +
11546     if (dissector_uses_key(flow_dissector,
11547     FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
11548     key_tags = skb_flow_dissector_target(flow_dissector,
11549     @@ -396,6 +399,13 @@ ip_proto_again:
11550     goto out_bad;
11551     proto = eth->h_proto;
11552     nhoff += sizeof(*eth);
11553     +
11554     + /* Cap headers that we access via pointers at the
11555     + * end of the Ethernet header as our maximum alignment
11556     + * at that point is only 2 bytes.
11557     + */
11558     + if (NET_IP_ALIGN)
11559     + hlen = nhoff;
11560     }
11561    
11562     key_control->flags |= FLOW_DIS_ENCAPSULATION;
11563     diff --git a/net/core/scm.c b/net/core/scm.c
11564     index 8a1741b14302..dce0acb929f1 100644
11565     --- a/net/core/scm.c
11566     +++ b/net/core/scm.c
11567     @@ -87,6 +87,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
11568     *fplp = fpl;
11569     fpl->count = 0;
11570     fpl->max = SCM_MAX_FD;
11571     + fpl->user = NULL;
11572     }
11573     fpp = &fpl->fp[fpl->count];
11574    
11575     @@ -107,6 +108,10 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
11576     *fpp++ = file;
11577     fpl->count++;
11578     }
11579     +
11580     + if (!fpl->user)
11581     + fpl->user = get_uid(current_user());
11582     +
11583     return num;
11584     }
11585    
11586     @@ -119,6 +124,7 @@ void __scm_destroy(struct scm_cookie *scm)
11587     scm->fp = NULL;
11588     for (i=fpl->count-1; i>=0; i--)
11589     fput(fpl->fp[i]);
11590     + free_uid(fpl->user);
11591     kfree(fpl);
11592     }
11593     }
11594     @@ -336,6 +342,7 @@ struct scm_fp_list *scm_fp_dup(struct scm_fp_list *fpl)
11595     for (i = 0; i < fpl->count; i++)
11596     get_file(fpl->fp[i]);
11597     new_fpl->max = new_fpl->count;
11598     + new_fpl->user = get_uid(fpl->user);
11599     }
11600     return new_fpl;
11601     }
11602     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
11603     index b2df375ec9c2..5bf88f58bee7 100644
11604     --- a/net/core/skbuff.c
11605     +++ b/net/core/skbuff.c
11606     @@ -79,6 +79,8 @@
11607    
11608     struct kmem_cache *skbuff_head_cache __read_mostly;
11609     static struct kmem_cache *skbuff_fclone_cache __read_mostly;
11610     +int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
11611     +EXPORT_SYMBOL(sysctl_max_skb_frags);
11612    
11613     /**
11614     * skb_panic - private function for out-of-line support
11615     diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
11616     index 95b6139d710c..a6beb7b6ae55 100644
11617     --- a/net/core/sysctl_net_core.c
11618     +++ b/net/core/sysctl_net_core.c
11619     @@ -26,6 +26,7 @@ static int zero = 0;
11620     static int one = 1;
11621     static int min_sndbuf = SOCK_MIN_SNDBUF;
11622     static int min_rcvbuf = SOCK_MIN_RCVBUF;
11623     +static int max_skb_frags = MAX_SKB_FRAGS;
11624    
11625     static int net_msg_warn; /* Unused, but still a sysctl */
11626    
11627     @@ -392,6 +393,15 @@ static struct ctl_table net_core_table[] = {
11628     .mode = 0644,
11629     .proc_handler = proc_dointvec
11630     },
11631     + {
11632     + .procname = "max_skb_frags",
11633     + .data = &sysctl_max_skb_frags,
11634     + .maxlen = sizeof(int),
11635     + .mode = 0644,
11636     + .proc_handler = proc_dointvec_minmax,
11637     + .extra1 = &one,
11638     + .extra2 = &max_skb_frags,
11639     + },
11640     { }
11641     };
11642    
11643     diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
11644     index 5684e14932bd..902d606324a0 100644
11645     --- a/net/dccp/ipv4.c
11646     +++ b/net/dccp/ipv4.c
11647     @@ -824,26 +824,26 @@ lookup:
11648    
11649     if (sk->sk_state == DCCP_NEW_SYN_RECV) {
11650     struct request_sock *req = inet_reqsk(sk);
11651     - struct sock *nsk = NULL;
11652     + struct sock *nsk;
11653    
11654     sk = req->rsk_listener;
11655     - if (likely(sk->sk_state == DCCP_LISTEN)) {
11656     - nsk = dccp_check_req(sk, skb, req);
11657     - } else {
11658     + if (unlikely(sk->sk_state != DCCP_LISTEN)) {
11659     inet_csk_reqsk_queue_drop_and_put(sk, req);
11660     goto lookup;
11661     }
11662     + sock_hold(sk);
11663     + nsk = dccp_check_req(sk, skb, req);
11664     if (!nsk) {
11665     reqsk_put(req);
11666     - goto discard_it;
11667     + goto discard_and_relse;
11668     }
11669     if (nsk == sk) {
11670     - sock_hold(sk);
11671     reqsk_put(req);
11672     } else if (dccp_child_process(sk, nsk, skb)) {
11673     dccp_v4_ctl_send_reset(sk, skb);
11674     - goto discard_it;
11675     + goto discard_and_relse;
11676     } else {
11677     + sock_put(sk);
11678     return 0;
11679     }
11680     }
11681     diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
11682     index 9c6d0508e63a..b8608b71a66d 100644
11683     --- a/net/dccp/ipv6.c
11684     +++ b/net/dccp/ipv6.c
11685     @@ -691,26 +691,26 @@ lookup:
11686    
11687     if (sk->sk_state == DCCP_NEW_SYN_RECV) {
11688     struct request_sock *req = inet_reqsk(sk);
11689     - struct sock *nsk = NULL;
11690     + struct sock *nsk;
11691    
11692     sk = req->rsk_listener;
11693     - if (likely(sk->sk_state == DCCP_LISTEN)) {
11694     - nsk = dccp_check_req(sk, skb, req);
11695     - } else {
11696     + if (unlikely(sk->sk_state != DCCP_LISTEN)) {
11697     inet_csk_reqsk_queue_drop_and_put(sk, req);
11698     goto lookup;
11699     }
11700     + sock_hold(sk);
11701     + nsk = dccp_check_req(sk, skb, req);
11702     if (!nsk) {
11703     reqsk_put(req);
11704     - goto discard_it;
11705     + goto discard_and_relse;
11706     }
11707     if (nsk == sk) {
11708     - sock_hold(sk);
11709     reqsk_put(req);
11710     } else if (dccp_child_process(sk, nsk, skb)) {
11711     dccp_v6_ctl_send_reset(sk, skb);
11712     - goto discard_it;
11713     + goto discard_and_relse;
11714     } else {
11715     + sock_put(sk);
11716     return 0;
11717     }
11718     }
11719     diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
11720     index cebd9d31e65a..f6303b17546b 100644
11721     --- a/net/ipv4/devinet.c
11722     +++ b/net/ipv4/devinet.c
11723     @@ -1847,7 +1847,7 @@ static int inet_netconf_get_devconf(struct sk_buff *in_skb,
11724     if (err < 0)
11725     goto errout;
11726    
11727     - err = EINVAL;
11728     + err = -EINVAL;
11729     if (!tb[NETCONFA_IFINDEX])
11730     goto errout;
11731    
11732     diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
11733     index 46b9c887bede..64148914803a 100644
11734     --- a/net/ipv4/inet_connection_sock.c
11735     +++ b/net/ipv4/inet_connection_sock.c
11736     @@ -789,14 +789,16 @@ static void inet_child_forget(struct sock *sk, struct request_sock *req,
11737     reqsk_put(req);
11738     }
11739    
11740     -void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
11741     - struct sock *child)
11742     +struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
11743     + struct request_sock *req,
11744     + struct sock *child)
11745     {
11746     struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
11747    
11748     spin_lock(&queue->rskq_lock);
11749     if (unlikely(sk->sk_state != TCP_LISTEN)) {
11750     inet_child_forget(sk, req, child);
11751     + child = NULL;
11752     } else {
11753     req->sk = child;
11754     req->dl_next = NULL;
11755     @@ -808,6 +810,7 @@ void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
11756     sk_acceptq_added(sk);
11757     }
11758     spin_unlock(&queue->rskq_lock);
11759     + return child;
11760     }
11761     EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
11762    
11763     @@ -817,11 +820,8 @@ struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
11764     if (own_req) {
11765     inet_csk_reqsk_queue_drop(sk, req);
11766     reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
11767     - inet_csk_reqsk_queue_add(sk, req, child);
11768     - /* Warning: caller must not call reqsk_put(req);
11769     - * child stole last reference on it.
11770     - */
11771     - return child;
11772     + if (inet_csk_reqsk_queue_add(sk, req, child))
11773     + return child;
11774     }
11775     /* Too bad, another child took ownership of the request, undo. */
11776     bh_unlock_sock(child);
11777     diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
11778     index 1fe55ae81781..b8a0607dab96 100644
11779     --- a/net/ipv4/ip_fragment.c
11780     +++ b/net/ipv4/ip_fragment.c
11781     @@ -661,6 +661,7 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
11782     struct ipq *qp;
11783    
11784     IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
11785     + skb_orphan(skb);
11786    
11787     /* Lookup (or create) queue header */
11788     qp = ip_find(net, ip_hdr(skb), user, vif);
11789     diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
11790     index 5f73a7c03e27..a50124260f5a 100644
11791     --- a/net/ipv4/ip_sockglue.c
11792     +++ b/net/ipv4/ip_sockglue.c
11793     @@ -249,6 +249,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
11794     switch (cmsg->cmsg_type) {
11795     case IP_RETOPTS:
11796     err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
11797     +
11798     + /* Our caller is responsible for freeing ipc->opt */
11799     err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
11800     err < 40 ? err : 40);
11801     if (err)
11802     diff --git a/net/ipv4/netfilter/nf_defrag_ipv4.c b/net/ipv4/netfilter/nf_defrag_ipv4.c
11803     index 6fb869f646bf..a04dee536b8e 100644
11804     --- a/net/ipv4/netfilter/nf_defrag_ipv4.c
11805     +++ b/net/ipv4/netfilter/nf_defrag_ipv4.c
11806     @@ -27,8 +27,6 @@ static int nf_ct_ipv4_gather_frags(struct net *net, struct sk_buff *skb,
11807     {
11808     int err;
11809    
11810     - skb_orphan(skb);
11811     -
11812     local_bh_disable();
11813     err = ip_defrag(net, skb, user);
11814     local_bh_enable();
11815     diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
11816     index e89094ab5ddb..aa67e0e64b69 100644
11817     --- a/net/ipv4/ping.c
11818     +++ b/net/ipv4/ping.c
11819     @@ -746,8 +746,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
11820    
11821     if (msg->msg_controllen) {
11822     err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
11823     - if (err)
11824     + if (unlikely(err)) {
11825     + kfree(ipc.opt);
11826     return err;
11827     + }
11828     if (ipc.opt)
11829     free = 1;
11830     }
11831     diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
11832     index bc35f1842512..7113bae4e6a0 100644
11833     --- a/net/ipv4/raw.c
11834     +++ b/net/ipv4/raw.c
11835     @@ -547,8 +547,10 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
11836    
11837     if (msg->msg_controllen) {
11838     err = ip_cmsg_send(net, msg, &ipc, false);
11839     - if (err)
11840     + if (unlikely(err)) {
11841     + kfree(ipc.opt);
11842     goto out;
11843     + }
11844     if (ipc.opt)
11845     free = 1;
11846     }
11847     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
11848     index 85f184e429c6..02c62299d717 100644
11849     --- a/net/ipv4/route.c
11850     +++ b/net/ipv4/route.c
11851     @@ -129,6 +129,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
11852     static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
11853     static int ip_rt_min_advmss __read_mostly = 256;
11854    
11855     +static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
11856     /*
11857     * Interface to generic destination cache.
11858     */
11859     @@ -755,7 +756,7 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow
11860     struct fib_nh *nh = &FIB_RES_NH(res);
11861    
11862     update_or_create_fnhe(nh, fl4->daddr, new_gw,
11863     - 0, 0);
11864     + 0, jiffies + ip_rt_gc_timeout);
11865     }
11866     if (kill_route)
11867     rt->dst.obsolete = DST_OBSOLETE_KILL;
11868     @@ -1556,6 +1557,36 @@ static void ip_handle_martian_source(struct net_device *dev,
11869     #endif
11870     }
11871    
11872     +static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
11873     +{
11874     + struct fnhe_hash_bucket *hash;
11875     + struct fib_nh_exception *fnhe, __rcu **fnhe_p;
11876     + u32 hval = fnhe_hashfun(daddr);
11877     +
11878     + spin_lock_bh(&fnhe_lock);
11879     +
11880     + hash = rcu_dereference_protected(nh->nh_exceptions,
11881     + lockdep_is_held(&fnhe_lock));
11882     + hash += hval;
11883     +
11884     + fnhe_p = &hash->chain;
11885     + fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
11886     + while (fnhe) {
11887     + if (fnhe->fnhe_daddr == daddr) {
11888     + rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
11889     + fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
11890     + fnhe_flush_routes(fnhe);
11891     + kfree_rcu(fnhe, rcu);
11892     + break;
11893     + }
11894     + fnhe_p = &fnhe->fnhe_next;
11895     + fnhe = rcu_dereference_protected(fnhe->fnhe_next,
11896     + lockdep_is_held(&fnhe_lock));
11897     + }
11898     +
11899     + spin_unlock_bh(&fnhe_lock);
11900     +}
11901     +
11902     /* called in rcu_read_lock() section */
11903     static int __mkroute_input(struct sk_buff *skb,
11904     const struct fib_result *res,
11905     @@ -1609,11 +1640,20 @@ static int __mkroute_input(struct sk_buff *skb,
11906    
11907     fnhe = find_exception(&FIB_RES_NH(*res), daddr);
11908     if (do_cache) {
11909     - if (fnhe)
11910     + if (fnhe) {
11911     rth = rcu_dereference(fnhe->fnhe_rth_input);
11912     - else
11913     - rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
11914     + if (rth && rth->dst.expires &&
11915     + time_after(jiffies, rth->dst.expires)) {
11916     + ip_del_fnhe(&FIB_RES_NH(*res), daddr);
11917     + fnhe = NULL;
11918     + } else {
11919     + goto rt_cache;
11920     + }
11921     + }
11922     +
11923     + rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
11924    
11925     +rt_cache:
11926     if (rt_cache_valid(rth)) {
11927     skb_dst_set_noref(skb, &rth->dst);
11928     goto out;
11929     @@ -2014,19 +2054,29 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
11930     struct fib_nh *nh = &FIB_RES_NH(*res);
11931    
11932     fnhe = find_exception(nh, fl4->daddr);
11933     - if (fnhe)
11934     + if (fnhe) {
11935     prth = &fnhe->fnhe_rth_output;
11936     - else {
11937     - if (unlikely(fl4->flowi4_flags &
11938     - FLOWI_FLAG_KNOWN_NH &&
11939     - !(nh->nh_gw &&
11940     - nh->nh_scope == RT_SCOPE_LINK))) {
11941     - do_cache = false;
11942     - goto add;
11943     + rth = rcu_dereference(*prth);
11944     + if (rth && rth->dst.expires &&
11945     + time_after(jiffies, rth->dst.expires)) {
11946     + ip_del_fnhe(nh, fl4->daddr);
11947     + fnhe = NULL;
11948     + } else {
11949     + goto rt_cache;
11950     }
11951     - prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
11952     }
11953     +
11954     + if (unlikely(fl4->flowi4_flags &
11955     + FLOWI_FLAG_KNOWN_NH &&
11956     + !(nh->nh_gw &&
11957     + nh->nh_scope == RT_SCOPE_LINK))) {
11958     + do_cache = false;
11959     + goto add;
11960     + }
11961     + prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
11962     rth = rcu_dereference(*prth);
11963     +
11964     +rt_cache:
11965     if (rt_cache_valid(rth)) {
11966     dst_hold(&rth->dst);
11967     return rth;
11968     @@ -2569,7 +2619,6 @@ void ip_rt_multicast_event(struct in_device *in_dev)
11969     }
11970    
11971     #ifdef CONFIG_SYSCTL
11972     -static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
11973     static int ip_rt_gc_interval __read_mostly = 60 * HZ;
11974     static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
11975     static int ip_rt_gc_elasticity __read_mostly = 8;
11976     diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
11977     index c82cca18c90f..036a76ba2ac2 100644
11978     --- a/net/ipv4/tcp.c
11979     +++ b/net/ipv4/tcp.c
11980     @@ -279,6 +279,7 @@
11981    
11982     #include <asm/uaccess.h>
11983     #include <asm/ioctls.h>
11984     +#include <asm/unaligned.h>
11985     #include <net/busy_poll.h>
11986    
11987     int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
11988     @@ -938,7 +939,7 @@ new_segment:
11989    
11990     i = skb_shinfo(skb)->nr_frags;
11991     can_coalesce = skb_can_coalesce(skb, i, page, offset);
11992     - if (!can_coalesce && i >= MAX_SKB_FRAGS) {
11993     + if (!can_coalesce && i >= sysctl_max_skb_frags) {
11994     tcp_mark_push(tp, skb);
11995     goto new_segment;
11996     }
11997     @@ -1211,7 +1212,7 @@ new_segment:
11998    
11999     if (!skb_can_coalesce(skb, i, pfrag->page,
12000     pfrag->offset)) {
12001     - if (i == MAX_SKB_FRAGS || !sg) {
12002     + if (i == sysctl_max_skb_frags || !sg) {
12003     tcp_mark_push(tp, skb);
12004     goto new_segment;
12005     }
12006     @@ -2637,6 +2638,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
12007     const struct inet_connection_sock *icsk = inet_csk(sk);
12008     u32 now = tcp_time_stamp;
12009     unsigned int start;
12010     + u64 rate64;
12011     u32 rate;
12012    
12013     memset(info, 0, sizeof(*info));
12014     @@ -2702,15 +2704,17 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
12015     info->tcpi_total_retrans = tp->total_retrans;
12016    
12017     rate = READ_ONCE(sk->sk_pacing_rate);
12018     - info->tcpi_pacing_rate = rate != ~0U ? rate : ~0ULL;
12019     + rate64 = rate != ~0U ? rate : ~0ULL;
12020     + put_unaligned(rate64, &info->tcpi_pacing_rate);
12021    
12022     rate = READ_ONCE(sk->sk_max_pacing_rate);
12023     - info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL;
12024     + rate64 = rate != ~0U ? rate : ~0ULL;
12025     + put_unaligned(rate64, &info->tcpi_max_pacing_rate);
12026    
12027     do {
12028     start = u64_stats_fetch_begin_irq(&tp->syncp);
12029     - info->tcpi_bytes_acked = tp->bytes_acked;
12030     - info->tcpi_bytes_received = tp->bytes_received;
12031     + put_unaligned(tp->bytes_acked, &info->tcpi_bytes_acked);
12032     + put_unaligned(tp->bytes_received, &info->tcpi_bytes_received);
12033     } while (u64_stats_fetch_retry_irq(&tp->syncp, start));
12034     info->tcpi_segs_out = tp->segs_out;
12035     info->tcpi_segs_in = tp->segs_in;
12036     diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
12037     index d8841a2f1569..8c7e63163e92 100644
12038     --- a/net/ipv4/tcp_ipv4.c
12039     +++ b/net/ipv4/tcp_ipv4.c
12040     @@ -312,7 +312,7 @@ static void do_redirect(struct sk_buff *skb, struct sock *sk)
12041    
12042    
12043     /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
12044     -void tcp_req_err(struct sock *sk, u32 seq)
12045     +void tcp_req_err(struct sock *sk, u32 seq, bool abort)
12046     {
12047     struct request_sock *req = inet_reqsk(sk);
12048     struct net *net = sock_net(sk);
12049     @@ -324,7 +324,7 @@ void tcp_req_err(struct sock *sk, u32 seq)
12050    
12051     if (seq != tcp_rsk(req)->snt_isn) {
12052     NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
12053     - } else {
12054     + } else if (abort) {
12055     /*
12056     * Still in SYN_RECV, just remove it silently.
12057     * There is no good way to pass the error to the newly
12058     @@ -384,7 +384,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
12059     }
12060     seq = ntohl(th->seq);
12061     if (sk->sk_state == TCP_NEW_SYN_RECV)
12062     - return tcp_req_err(sk, seq);
12063     + return tcp_req_err(sk, seq,
12064     + type == ICMP_PARAMETERPROB ||
12065     + type == ICMP_TIME_EXCEEDED ||
12066     + (type == ICMP_DEST_UNREACH &&
12067     + (code == ICMP_NET_UNREACH ||
12068     + code == ICMP_HOST_UNREACH)));
12069    
12070     bh_lock_sock(sk);
12071     /* If too many ICMPs get dropped on busy
12072     @@ -705,7 +710,8 @@ release_sk1:
12073     outside socket context is ugly, certainly. What can I do?
12074     */
12075    
12076     -static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
12077     +static void tcp_v4_send_ack(struct net *net,
12078     + struct sk_buff *skb, u32 seq, u32 ack,
12079     u32 win, u32 tsval, u32 tsecr, int oif,
12080     struct tcp_md5sig_key *key,
12081     int reply_flags, u8 tos)
12082     @@ -720,7 +726,6 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
12083     ];
12084     } rep;
12085     struct ip_reply_arg arg;
12086     - struct net *net = dev_net(skb_dst(skb)->dev);
12087    
12088     memset(&rep.th, 0, sizeof(struct tcphdr));
12089     memset(&arg, 0, sizeof(arg));
12090     @@ -782,7 +787,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
12091     struct inet_timewait_sock *tw = inet_twsk(sk);
12092     struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
12093    
12094     - tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
12095     + tcp_v4_send_ack(sock_net(sk), skb,
12096     + tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
12097     tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
12098     tcp_time_stamp + tcptw->tw_ts_offset,
12099     tcptw->tw_ts_recent,
12100     @@ -801,8 +807,10 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
12101     /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
12102     * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
12103     */
12104     - tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
12105     - tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
12106     + u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
12107     + tcp_sk(sk)->snd_nxt;
12108     +
12109     + tcp_v4_send_ack(sock_net(sk), skb, seq,
12110     tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
12111     tcp_time_stamp,
12112     req->ts_recent,
12113     @@ -1586,28 +1594,30 @@ process:
12114    
12115     if (sk->sk_state == TCP_NEW_SYN_RECV) {
12116     struct request_sock *req = inet_reqsk(sk);
12117     - struct sock *nsk = NULL;
12118     + struct sock *nsk;
12119    
12120     sk = req->rsk_listener;
12121     - if (tcp_v4_inbound_md5_hash(sk, skb))
12122     - goto discard_and_relse;
12123     - if (likely(sk->sk_state == TCP_LISTEN)) {
12124     - nsk = tcp_check_req(sk, skb, req, false);
12125     - } else {
12126     + if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
12127     + reqsk_put(req);
12128     + goto discard_it;
12129     + }
12130     + if (unlikely(sk->sk_state != TCP_LISTEN)) {
12131     inet_csk_reqsk_queue_drop_and_put(sk, req);
12132     goto lookup;
12133     }
12134     + sock_hold(sk);
12135     + nsk = tcp_check_req(sk, skb, req, false);
12136     if (!nsk) {
12137     reqsk_put(req);
12138     - goto discard_it;
12139     + goto discard_and_relse;
12140     }
12141     if (nsk == sk) {
12142     - sock_hold(sk);
12143     reqsk_put(req);
12144     } else if (tcp_child_process(sk, nsk, skb)) {
12145     tcp_v4_send_reset(nsk, skb);
12146     - goto discard_it;
12147     + goto discard_and_relse;
12148     } else {
12149     + sock_put(sk);
12150     return 0;
12151     }
12152     }
12153     diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
12154     index c43890848641..7f8ab46adf61 100644
12155     --- a/net/ipv4/udp.c
12156     +++ b/net/ipv4/udp.c
12157     @@ -966,8 +966,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
12158     if (msg->msg_controllen) {
12159     err = ip_cmsg_send(sock_net(sk), msg, &ipc,
12160     sk->sk_family == AF_INET6);
12161     - if (err)
12162     + if (unlikely(err)) {
12163     + kfree(ipc.opt);
12164     return err;
12165     + }
12166     if (ipc.opt)
12167     free = 1;
12168     connected = 0;
12169     diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
12170     index 1f21087accab..e8d3da0817d3 100644
12171     --- a/net/ipv6/addrconf.c
12172     +++ b/net/ipv6/addrconf.c
12173     @@ -583,7 +583,7 @@ static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
12174     if (err < 0)
12175     goto errout;
12176    
12177     - err = EINVAL;
12178     + err = -EINVAL;
12179     if (!tb[NETCONFA_IFINDEX])
12180     goto errout;
12181    
12182     @@ -3506,6 +3506,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
12183     {
12184     struct inet6_dev *idev = ifp->idev;
12185     struct net_device *dev = idev->dev;
12186     + bool notify = false;
12187    
12188     addrconf_join_solict(dev, &ifp->addr);
12189    
12190     @@ -3551,7 +3552,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
12191     /* Because optimistic nodes can use this address,
12192     * notify listeners. If DAD fails, RTM_DELADDR is sent.
12193     */
12194     - ipv6_ifa_notify(RTM_NEWADDR, ifp);
12195     + notify = true;
12196     }
12197     }
12198    
12199     @@ -3559,6 +3560,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
12200     out:
12201     spin_unlock(&ifp->lock);
12202     read_unlock_bh(&idev->lock);
12203     + if (notify)
12204     + ipv6_ifa_notify(RTM_NEWADDR, ifp);
12205     }
12206    
12207     static void addrconf_dad_start(struct inet6_ifaddr *ifp)
12208     diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
12209     index 517c55b01ba8..428162155280 100644
12210     --- a/net/ipv6/datagram.c
12211     +++ b/net/ipv6/datagram.c
12212     @@ -162,6 +162,9 @@ ipv4_connected:
12213     fl6.fl6_dport = inet->inet_dport;
12214     fl6.fl6_sport = inet->inet_sport;
12215    
12216     + if (!fl6.flowi6_oif)
12217     + fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
12218     +
12219     if (!fl6.flowi6_oif && (addr_type&IPV6_ADDR_MULTICAST))
12220     fl6.flowi6_oif = np->mcast_oif;
12221    
12222     diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
12223     index 1f9ebe3cbb4a..dc2db4f7b182 100644
12224     --- a/net/ipv6/ip6_flowlabel.c
12225     +++ b/net/ipv6/ip6_flowlabel.c
12226     @@ -540,12 +540,13 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
12227     }
12228     spin_lock_bh(&ip6_sk_fl_lock);
12229     for (sflp = &np->ipv6_fl_list;
12230     - (sfl = rcu_dereference(*sflp)) != NULL;
12231     + (sfl = rcu_dereference_protected(*sflp,
12232     + lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
12233     sflp = &sfl->next) {
12234     if (sfl->fl->label == freq.flr_label) {
12235     if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
12236     np->flow_label &= ~IPV6_FLOWLABEL_MASK;
12237     - *sflp = rcu_dereference(sfl->next);
12238     + *sflp = sfl->next;
12239     spin_unlock_bh(&ip6_sk_fl_lock);
12240     fl_release(sfl->fl);
12241     kfree_rcu(sfl, rcu);
12242     diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
12243     index 6473889f1736..31144c486c52 100644
12244     --- a/net/ipv6/ip6_output.c
12245     +++ b/net/ipv6/ip6_output.c
12246     @@ -909,6 +909,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
12247     struct rt6_info *rt;
12248     #endif
12249     int err;
12250     + int flags = 0;
12251    
12252     /* The correct way to handle this would be to do
12253     * ip6_route_get_saddr, and then ip6_route_output; however,
12254     @@ -940,10 +941,13 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
12255     dst_release(*dst);
12256     *dst = NULL;
12257     }
12258     +
12259     + if (fl6->flowi6_oif)
12260     + flags |= RT6_LOOKUP_F_IFACE;
12261     }
12262    
12263     if (!*dst)
12264     - *dst = ip6_route_output(net, sk, fl6);
12265     + *dst = ip6_route_output_flags(net, sk, fl6, flags);
12266    
12267     err = (*dst)->error;
12268     if (err)
12269     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
12270     index 826e6aa44f8d..3f164d3aaee2 100644
12271     --- a/net/ipv6/route.c
12272     +++ b/net/ipv6/route.c
12273     @@ -1174,11 +1174,10 @@ static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table
12274     return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
12275     }
12276    
12277     -struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
12278     - struct flowi6 *fl6)
12279     +struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
12280     + struct flowi6 *fl6, int flags)
12281     {
12282     struct dst_entry *dst;
12283     - int flags = 0;
12284     bool any_src;
12285    
12286     dst = l3mdev_rt6_dst_by_oif(net, fl6);
12287     @@ -1199,7 +1198,7 @@ struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
12288    
12289     return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
12290     }
12291     -EXPORT_SYMBOL(ip6_route_output);
12292     +EXPORT_SYMBOL_GPL(ip6_route_output_flags);
12293    
12294     struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
12295     {
12296     diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
12297     index bd100b47c717..b8d405623f4f 100644
12298     --- a/net/ipv6/tcp_ipv6.c
12299     +++ b/net/ipv6/tcp_ipv6.c
12300     @@ -328,6 +328,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
12301     struct tcp_sock *tp;
12302     __u32 seq, snd_una;
12303     struct sock *sk;
12304     + bool fatal;
12305     int err;
12306    
12307     sk = __inet6_lookup_established(net, &tcp_hashinfo,
12308     @@ -346,8 +347,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
12309     return;
12310     }
12311     seq = ntohl(th->seq);
12312     + fatal = icmpv6_err_convert(type, code, &err);
12313     if (sk->sk_state == TCP_NEW_SYN_RECV)
12314     - return tcp_req_err(sk, seq);
12315     + return tcp_req_err(sk, seq, fatal);
12316    
12317     bh_lock_sock(sk);
12318     if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
12319     @@ -401,7 +403,6 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
12320     goto out;
12321     }
12322    
12323     - icmpv6_err_convert(type, code, &err);
12324    
12325     /* Might be for an request_sock */
12326     switch (sk->sk_state) {
12327     @@ -1387,7 +1388,7 @@ process:
12328    
12329     if (sk->sk_state == TCP_NEW_SYN_RECV) {
12330     struct request_sock *req = inet_reqsk(sk);
12331     - struct sock *nsk = NULL;
12332     + struct sock *nsk;
12333    
12334     sk = req->rsk_listener;
12335     tcp_v6_fill_cb(skb, hdr, th);
12336     @@ -1395,24 +1396,24 @@ process:
12337     reqsk_put(req);
12338     goto discard_it;
12339     }
12340     - if (likely(sk->sk_state == TCP_LISTEN)) {
12341     - nsk = tcp_check_req(sk, skb, req, false);
12342     - } else {
12343     + if (unlikely(sk->sk_state != TCP_LISTEN)) {
12344     inet_csk_reqsk_queue_drop_and_put(sk, req);
12345     goto lookup;
12346     }
12347     + sock_hold(sk);
12348     + nsk = tcp_check_req(sk, skb, req, false);
12349     if (!nsk) {
12350     reqsk_put(req);
12351     - goto discard_it;
12352     + goto discard_and_relse;
12353     }
12354     if (nsk == sk) {
12355     - sock_hold(sk);
12356     reqsk_put(req);
12357     tcp_v6_restore_cb(skb);
12358     } else if (tcp_child_process(sk, nsk, skb)) {
12359     tcp_v6_send_reset(nsk, skb);
12360     - goto discard_it;
12361     + goto discard_and_relse;
12362     } else {
12363     + sock_put(sk);
12364     return 0;
12365     }
12366     }
12367     diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
12368     index 435608c4306d..20ab7b2ec463 100644
12369     --- a/net/iucv/af_iucv.c
12370     +++ b/net/iucv/af_iucv.c
12371     @@ -708,6 +708,9 @@ static int iucv_sock_bind(struct socket *sock, struct sockaddr *addr,
12372     if (!addr || addr->sa_family != AF_IUCV)
12373     return -EINVAL;
12374    
12375     + if (addr_len < sizeof(struct sockaddr_iucv))
12376     + return -EINVAL;
12377     +
12378     lock_sock(sk);
12379     if (sk->sk_state != IUCV_OPEN) {
12380     err = -EBADFD;
12381     diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
12382     index f93c5be612a7..2caaa84ce92d 100644
12383     --- a/net/l2tp/l2tp_netlink.c
12384     +++ b/net/l2tp/l2tp_netlink.c
12385     @@ -124,8 +124,13 @@ static int l2tp_tunnel_notify(struct genl_family *family,
12386     ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
12387     NLM_F_ACK, tunnel, cmd);
12388    
12389     - if (ret >= 0)
12390     - return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
12391     + if (ret >= 0) {
12392     + ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
12393     + /* We don't care if no one is listening */
12394     + if (ret == -ESRCH)
12395     + ret = 0;
12396     + return ret;
12397     + }
12398    
12399     nlmsg_free(msg);
12400    
12401     @@ -147,8 +152,13 @@ static int l2tp_session_notify(struct genl_family *family,
12402     ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
12403     NLM_F_ACK, session, cmd);
12404    
12405     - if (ret >= 0)
12406     - return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
12407     + if (ret >= 0) {
12408     + ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
12409     + /* We don't care if no one is listening */
12410     + if (ret == -ESRCH)
12411     + ret = 0;
12412     + return ret;
12413     + }
12414    
12415     nlmsg_free(msg);
12416    
12417     diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
12418     index 337bb5d78003..6a12b0f5cac8 100644
12419     --- a/net/mac80211/ibss.c
12420     +++ b/net/mac80211/ibss.c
12421     @@ -1732,7 +1732,6 @@ void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local)
12422     if (sdata->vif.type != NL80211_IFTYPE_ADHOC)
12423     continue;
12424     sdata->u.ibss.last_scan_completed = jiffies;
12425     - ieee80211_queue_work(&local->hw, &sdata->work);
12426     }
12427     mutex_unlock(&local->iflist_mtx);
12428     }
12429     diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
12430     index fa28500f28fd..6f85b6ab8e51 100644
12431     --- a/net/mac80211/mesh.c
12432     +++ b/net/mac80211/mesh.c
12433     @@ -1370,17 +1370,6 @@ out:
12434     sdata_unlock(sdata);
12435     }
12436    
12437     -void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
12438     -{
12439     - struct ieee80211_sub_if_data *sdata;
12440     -
12441     - rcu_read_lock();
12442     - list_for_each_entry_rcu(sdata, &local->interfaces, list)
12443     - if (ieee80211_vif_is_mesh(&sdata->vif) &&
12444     - ieee80211_sdata_running(sdata))
12445     - ieee80211_queue_work(&local->hw, &sdata->work);
12446     - rcu_read_unlock();
12447     -}
12448    
12449     void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
12450     {
12451     diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
12452     index a1596344c3ba..4a8019f79fb2 100644
12453     --- a/net/mac80211/mesh.h
12454     +++ b/net/mac80211/mesh.h
12455     @@ -362,14 +362,10 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
12456     return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP;
12457     }
12458    
12459     -void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local);
12460     -
12461     void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
12462     void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata);
12463     void ieee80211s_stop(void);
12464     #else
12465     -static inline void
12466     -ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local) {}
12467     static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
12468     { return false; }
12469     static inline void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
12470     diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
12471     index 3aa04344942b..83097c3832d1 100644
12472     --- a/net/mac80211/mlme.c
12473     +++ b/net/mac80211/mlme.c
12474     @@ -4003,8 +4003,6 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
12475     if (!ieee80211_hw_check(&sdata->local->hw, CONNECTION_MONITOR))
12476     ieee80211_queue_work(&sdata->local->hw,
12477     &sdata->u.mgd.monitor_work);
12478     - /* and do all the other regular work too */
12479     - ieee80211_queue_work(&sdata->local->hw, &sdata->work);
12480     }
12481     }
12482    
12483     diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
12484     index a413e52f7691..acbe182b75d1 100644
12485     --- a/net/mac80211/scan.c
12486     +++ b/net/mac80211/scan.c
12487     @@ -314,6 +314,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
12488     bool was_scanning = local->scanning;
12489     struct cfg80211_scan_request *scan_req;
12490     struct ieee80211_sub_if_data *scan_sdata;
12491     + struct ieee80211_sub_if_data *sdata;
12492    
12493     lockdep_assert_held(&local->mtx);
12494    
12495     @@ -373,7 +374,16 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
12496    
12497     ieee80211_mlme_notify_scan_completed(local);
12498     ieee80211_ibss_notify_scan_completed(local);
12499     - ieee80211_mesh_notify_scan_completed(local);
12500     +
12501     + /* Requeue all the work that might have been ignored while
12502     + * the scan was in progress; if there was none this will
12503     + * just be a no-op for the particular interface.
12504     + */
12505     + list_for_each_entry_rcu(sdata, &local->interfaces, list) {
12506     + if (ieee80211_sdata_running(sdata))
12507     + ieee80211_queue_work(&sdata->local->hw, &sdata->work);
12508     + }
12509     +
12510     if (was_scanning)
12511     ieee80211_start_next_roc(local);
12512     }
12513     diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
12514     index 1605691d9414..d933cb89efac 100644
12515     --- a/net/openvswitch/vport-vxlan.c
12516     +++ b/net/openvswitch/vport-vxlan.c
12517     @@ -90,7 +90,7 @@ static struct vport *vxlan_tnl_create(const struct vport_parms *parms)
12518     int err;
12519     struct vxlan_config conf = {
12520     .no_share = true,
12521     - .flags = VXLAN_F_COLLECT_METADATA,
12522     + .flags = VXLAN_F_COLLECT_METADATA | VXLAN_F_UDP_ZERO_CSUM6_RX,
12523     };
12524    
12525     if (!options) {
12526     diff --git a/net/rfkill/core.c b/net/rfkill/core.c
12527     index f53bf3b6558b..cf5b69ab1829 100644
12528     --- a/net/rfkill/core.c
12529     +++ b/net/rfkill/core.c
12530     @@ -1095,17 +1095,6 @@ static unsigned int rfkill_fop_poll(struct file *file, poll_table *wait)
12531     return res;
12532     }
12533    
12534     -static bool rfkill_readable(struct rfkill_data *data)
12535     -{
12536     - bool r;
12537     -
12538     - mutex_lock(&data->mtx);
12539     - r = !list_empty(&data->events);
12540     - mutex_unlock(&data->mtx);
12541     -
12542     - return r;
12543     -}
12544     -
12545     static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
12546     size_t count, loff_t *pos)
12547     {
12548     @@ -1122,8 +1111,11 @@ static ssize_t rfkill_fop_read(struct file *file, char __user *buf,
12549     goto out;
12550     }
12551     mutex_unlock(&data->mtx);
12552     + /* since we re-check and it just compares pointers,
12553     + * using !list_empty() without locking isn't a problem
12554     + */
12555     ret = wait_event_interruptible(data->read_wait,
12556     - rfkill_readable(data));
12557     + !list_empty(&data->events));
12558     mutex_lock(&data->mtx);
12559    
12560     if (ret)
12561     diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
12562     index b5c2cf2aa6d4..af1acf009866 100644
12563     --- a/net/sched/sch_api.c
12564     +++ b/net/sched/sch_api.c
12565     @@ -1852,6 +1852,7 @@ reset:
12566     }
12567    
12568     tp = old_tp;
12569     + protocol = tc_skb_protocol(skb);
12570     goto reclassify;
12571     #endif
12572     }
12573     diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
12574     index 3d9ea9a48289..8b4ff315695e 100644
12575     --- a/net/sctp/protocol.c
12576     +++ b/net/sctp/protocol.c
12577     @@ -60,6 +60,8 @@
12578     #include <net/inet_common.h>
12579     #include <net/inet_ecn.h>
12580    
12581     +#define MAX_SCTP_PORT_HASH_ENTRIES (64 * 1024)
12582     +
12583     /* Global data structures. */
12584     struct sctp_globals sctp_globals __read_mostly;
12585    
12586     @@ -1352,6 +1354,8 @@ static __init int sctp_init(void)
12587     unsigned long limit;
12588     int max_share;
12589     int order;
12590     + int num_entries;
12591     + int max_entry_order;
12592    
12593     sock_skb_cb_check_size(sizeof(struct sctp_ulpevent));
12594    
12595     @@ -1404,14 +1408,24 @@ static __init int sctp_init(void)
12596    
12597     /* Size and allocate the association hash table.
12598     * The methodology is similar to that of the tcp hash tables.
12599     + * Though not identical. Start by getting a goal size
12600     */
12601     if (totalram_pages >= (128 * 1024))
12602     goal = totalram_pages >> (22 - PAGE_SHIFT);
12603     else
12604     goal = totalram_pages >> (24 - PAGE_SHIFT);
12605    
12606     - for (order = 0; (1UL << order) < goal; order++)
12607     - ;
12608     + /* Then compute the page order for said goal */
12609     + order = get_order(goal);
12610     +
12611     + /* Now compute the required page order for the maximum sized table we
12612     + * want to create
12613     + */
12614     + max_entry_order = get_order(MAX_SCTP_PORT_HASH_ENTRIES *
12615     + sizeof(struct sctp_bind_hashbucket));
12616     +
12617     + /* Limit the page order by that maximum hash table size */
12618     + order = min(order, max_entry_order);
12619    
12620     do {
12621     sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE /
12622     @@ -1445,20 +1459,35 @@ static __init int sctp_init(void)
12623     INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
12624     }
12625    
12626     - /* Allocate and initialize the SCTP port hash table. */
12627     + /* Allocate and initialize the SCTP port hash table.
12628     + * Note that order is initalized to start at the max sized
12629     + * table we want to support. If we can't get that many pages
12630     + * reduce the order and try again
12631     + */
12632     do {
12633     - sctp_port_hashsize = (1UL << order) * PAGE_SIZE /
12634     - sizeof(struct sctp_bind_hashbucket);
12635     - if ((sctp_port_hashsize > (64 * 1024)) && order > 0)
12636     - continue;
12637     sctp_port_hashtable = (struct sctp_bind_hashbucket *)
12638     __get_free_pages(GFP_ATOMIC|__GFP_NOWARN, order);
12639     } while (!sctp_port_hashtable && --order > 0);
12640     +
12641     if (!sctp_port_hashtable) {
12642     pr_err("Failed bind hash alloc\n");
12643     status = -ENOMEM;
12644     goto err_bhash_alloc;
12645     }
12646     +
12647     + /* Now compute the number of entries that will fit in the
12648     + * port hash space we allocated
12649     + */
12650     + num_entries = (1UL << order) * PAGE_SIZE /
12651     + sizeof(struct sctp_bind_hashbucket);
12652     +
12653     + /* And finish by rounding it down to the nearest power of two
12654     + * this wastes some memory of course, but its needed because
12655     + * the hash function operates based on the assumption that
12656     + * that the number of entries is a power of two
12657     + */
12658     + sctp_port_hashsize = rounddown_pow_of_two(num_entries);
12659     +
12660     for (i = 0; i < sctp_port_hashsize; i++) {
12661     spin_lock_init(&sctp_port_hashtable[i].lock);
12662     INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
12663     diff --git a/net/sctp/socket.c b/net/sctp/socket.c
12664     index ef1d90fdc773..be1489fc3234 100644
12665     --- a/net/sctp/socket.c
12666     +++ b/net/sctp/socket.c
12667     @@ -5542,6 +5542,7 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
12668     struct sctp_hmac_algo_param *hmacs;
12669     __u16 data_len = 0;
12670     u32 num_idents;
12671     + int i;
12672    
12673     if (!ep->auth_enable)
12674     return -EACCES;
12675     @@ -5559,8 +5560,12 @@ static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
12676     return -EFAULT;
12677     if (put_user(num_idents, &p->shmac_num_idents))
12678     return -EFAULT;
12679     - if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
12680     - return -EFAULT;
12681     + for (i = 0; i < num_idents; i++) {
12682     + __u16 hmacid = ntohs(hmacs->hmac_ids[i]);
12683     +
12684     + if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
12685     + return -EFAULT;
12686     + }
12687     return 0;
12688     }
12689    
12690     @@ -6640,6 +6645,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
12691    
12692     if (cmsgs->srinfo->sinfo_flags &
12693     ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
12694     + SCTP_SACK_IMMEDIATELY |
12695     SCTP_ABORT | SCTP_EOF))
12696     return -EINVAL;
12697     break;
12698     @@ -6663,6 +6669,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
12699    
12700     if (cmsgs->sinfo->snd_flags &
12701     ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
12702     + SCTP_SACK_IMMEDIATELY |
12703     SCTP_ABORT | SCTP_EOF))
12704     return -EINVAL;
12705     break;
12706     diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
12707     index 5e4f815c2b34..21e20353178e 100644
12708     --- a/net/sunrpc/cache.c
12709     +++ b/net/sunrpc/cache.c
12710     @@ -1225,7 +1225,7 @@ int qword_get(char **bpp, char *dest, int bufsize)
12711     if (bp[0] == '\\' && bp[1] == 'x') {
12712     /* HEX STRING */
12713     bp += 2;
12714     - while (len < bufsize) {
12715     + while (len < bufsize - 1) {
12716     int h, l;
12717    
12718     h = hex_to_bin(bp[0]);
12719     diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
12720     index f34e535e93bd..d5d7132ac847 100644
12721     --- a/net/switchdev/switchdev.c
12722     +++ b/net/switchdev/switchdev.c
12723     @@ -20,6 +20,7 @@
12724     #include <linux/list.h>
12725     #include <linux/workqueue.h>
12726     #include <linux/if_vlan.h>
12727     +#include <linux/rtnetlink.h>
12728     #include <net/ip_fib.h>
12729     #include <net/switchdev.h>
12730    
12731     @@ -565,7 +566,6 @@ int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
12732     }
12733     EXPORT_SYMBOL_GPL(switchdev_port_obj_dump);
12734    
12735     -static DEFINE_MUTEX(switchdev_mutex);
12736     static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
12737    
12738     /**
12739     @@ -580,9 +580,9 @@ int register_switchdev_notifier(struct notifier_block *nb)
12740     {
12741     int err;
12742    
12743     - mutex_lock(&switchdev_mutex);
12744     + rtnl_lock();
12745     err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
12746     - mutex_unlock(&switchdev_mutex);
12747     + rtnl_unlock();
12748     return err;
12749     }
12750     EXPORT_SYMBOL_GPL(register_switchdev_notifier);
12751     @@ -598,9 +598,9 @@ int unregister_switchdev_notifier(struct notifier_block *nb)
12752     {
12753     int err;
12754    
12755     - mutex_lock(&switchdev_mutex);
12756     + rtnl_lock();
12757     err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
12758     - mutex_unlock(&switchdev_mutex);
12759     + rtnl_unlock();
12760     return err;
12761     }
12762     EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
12763     @@ -614,16 +614,17 @@ EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
12764     * Call all network notifier blocks. This should be called by driver
12765     * when it needs to propagate hardware event.
12766     * Return values are same as for atomic_notifier_call_chain().
12767     + * rtnl_lock must be held.
12768     */
12769     int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
12770     struct switchdev_notifier_info *info)
12771     {
12772     int err;
12773    
12774     + ASSERT_RTNL();
12775     +
12776     info->dev = dev;
12777     - mutex_lock(&switchdev_mutex);
12778     err = raw_notifier_call_chain(&switchdev_notif_chain, val, info);
12779     - mutex_unlock(&switchdev_mutex);
12780     return err;
12781     }
12782     EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
12783     diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
12784     index 9dc239dfe192..92e367a0a5ce 100644
12785     --- a/net/tipc/bcast.c
12786     +++ b/net/tipc/bcast.c
12787     @@ -399,8 +399,10 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
12788    
12789     hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
12790     NLM_F_MULTI, TIPC_NL_LINK_GET);
12791     - if (!hdr)
12792     + if (!hdr) {
12793     + tipc_bcast_unlock(net);
12794     return -EMSGSIZE;
12795     + }
12796    
12797     attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
12798     if (!attrs)
12799     diff --git a/net/tipc/node.c b/net/tipc/node.c
12800     index 20cddec0a43c..3926b561f873 100644
12801     --- a/net/tipc/node.c
12802     +++ b/net/tipc/node.c
12803     @@ -168,12 +168,6 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
12804     skb_queue_head_init(&n_ptr->bc_entry.inputq1);
12805     __skb_queue_head_init(&n_ptr->bc_entry.arrvq);
12806     skb_queue_head_init(&n_ptr->bc_entry.inputq2);
12807     - hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
12808     - list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
12809     - if (n_ptr->addr < temp_node->addr)
12810     - break;
12811     - }
12812     - list_add_tail_rcu(&n_ptr->list, &temp_node->list);
12813     n_ptr->state = SELF_DOWN_PEER_LEAVING;
12814     n_ptr->signature = INVALID_NODE_SIG;
12815     n_ptr->active_links[0] = INVALID_BEARER_ID;
12816     @@ -193,6 +187,12 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
12817     tipc_node_get(n_ptr);
12818     setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr);
12819     n_ptr->keepalive_intv = U32_MAX;
12820     + hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
12821     + list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
12822     + if (n_ptr->addr < temp_node->addr)
12823     + break;
12824     + }
12825     + list_add_tail_rcu(&n_ptr->list, &temp_node->list);
12826     exit:
12827     spin_unlock_bh(&tn->node_list_lock);
12828     return n_ptr;
12829     diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
12830     index 350cca33ee0a..69ee2eeef968 100644
12831     --- a/net/tipc/subscr.c
12832     +++ b/net/tipc/subscr.c
12833     @@ -289,15 +289,14 @@ static void tipc_subscrb_rcv_cb(struct net *net, int conid,
12834     struct sockaddr_tipc *addr, void *usr_data,
12835     void *buf, size_t len)
12836     {
12837     - struct tipc_subscriber *subscriber = usr_data;
12838     + struct tipc_subscriber *subscrb = usr_data;
12839     struct tipc_subscription *sub = NULL;
12840     struct tipc_net *tn = net_generic(net, tipc_net_id);
12841    
12842     - tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscriber, &sub);
12843     - if (sub)
12844     - tipc_nametbl_subscribe(sub);
12845     - else
12846     - tipc_conn_terminate(tn->topsrv, subscriber->conid);
12847     + if (tipc_subscrp_create(net, (struct tipc_subscr *)buf, subscrb, &sub))
12848     + return tipc_conn_terminate(tn->topsrv, subscrb->conid);
12849     +
12850     + tipc_nametbl_subscribe(sub);
12851     }
12852    
12853     /* Handle one request to establish a new subscriber */
12854     diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
12855     index e3f85bc8b135..898a53a562b8 100644
12856     --- a/net/unix/af_unix.c
12857     +++ b/net/unix/af_unix.c
12858     @@ -1496,7 +1496,7 @@ static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
12859     UNIXCB(skb).fp = NULL;
12860    
12861     for (i = scm->fp->count-1; i >= 0; i--)
12862     - unix_notinflight(scm->fp->fp[i]);
12863     + unix_notinflight(scm->fp->user, scm->fp->fp[i]);
12864     }
12865    
12866     static void unix_destruct_scm(struct sk_buff *skb)
12867     @@ -1561,7 +1561,7 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
12868     return -ENOMEM;
12869    
12870     for (i = scm->fp->count - 1; i >= 0; i--)
12871     - unix_inflight(scm->fp->fp[i]);
12872     + unix_inflight(scm->fp->user, scm->fp->fp[i]);
12873     return max_level;
12874     }
12875    
12876     @@ -1781,7 +1781,12 @@ restart_locked:
12877     goto out_unlock;
12878     }
12879    
12880     - if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
12881     + /* other == sk && unix_peer(other) != sk if
12882     + * - unix_peer(sk) == NULL, destination address bound to sk
12883     + * - unix_peer(sk) == sk by time of get but disconnected before lock
12884     + */
12885     + if (other != sk &&
12886     + unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
12887     if (timeo) {
12888     timeo = unix_wait_for_peer(other, timeo);
12889    
12890     @@ -2270,13 +2275,15 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
12891     size_t size = state->size;
12892     unsigned int last_len;
12893    
12894     - err = -EINVAL;
12895     - if (sk->sk_state != TCP_ESTABLISHED)
12896     + if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
12897     + err = -EINVAL;
12898     goto out;
12899     + }
12900    
12901     - err = -EOPNOTSUPP;
12902     - if (flags & MSG_OOB)
12903     + if (unlikely(flags & MSG_OOB)) {
12904     + err = -EOPNOTSUPP;
12905     goto out;
12906     + }
12907    
12908     target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
12909     timeo = sock_rcvtimeo(sk, noblock);
12910     @@ -2322,9 +2329,11 @@ again:
12911     goto unlock;
12912    
12913     unix_state_unlock(sk);
12914     - err = -EAGAIN;
12915     - if (!timeo)
12916     + if (!timeo) {
12917     + err = -EAGAIN;
12918     break;
12919     + }
12920     +
12921     mutex_unlock(&u->readlock);
12922    
12923     timeo = unix_stream_data_wait(sk, timeo, last,
12924     @@ -2332,6 +2341,7 @@ again:
12925    
12926     if (signal_pending(current)) {
12927     err = sock_intr_errno(timeo);
12928     + scm_destroy(&scm);
12929     goto out;
12930     }
12931    
12932     diff --git a/net/unix/diag.c b/net/unix/diag.c
12933     index c512f64d5287..4d9679701a6d 100644
12934     --- a/net/unix/diag.c
12935     +++ b/net/unix/diag.c
12936     @@ -220,7 +220,7 @@ done:
12937     return skb->len;
12938     }
12939    
12940     -static struct sock *unix_lookup_by_ino(int ino)
12941     +static struct sock *unix_lookup_by_ino(unsigned int ino)
12942     {
12943     int i;
12944     struct sock *sk;
12945     diff --git a/net/unix/garbage.c b/net/unix/garbage.c
12946     index 8fcdc2283af5..6a0d48525fcf 100644
12947     --- a/net/unix/garbage.c
12948     +++ b/net/unix/garbage.c
12949     @@ -116,7 +116,7 @@ struct sock *unix_get_socket(struct file *filp)
12950     * descriptor if it is for an AF_UNIX socket.
12951     */
12952    
12953     -void unix_inflight(struct file *fp)
12954     +void unix_inflight(struct user_struct *user, struct file *fp)
12955     {
12956     struct sock *s = unix_get_socket(fp);
12957    
12958     @@ -133,11 +133,11 @@ void unix_inflight(struct file *fp)
12959     }
12960     unix_tot_inflight++;
12961     }
12962     - fp->f_cred->user->unix_inflight++;
12963     + user->unix_inflight++;
12964     spin_unlock(&unix_gc_lock);
12965     }
12966    
12967     -void unix_notinflight(struct file *fp)
12968     +void unix_notinflight(struct user_struct *user, struct file *fp)
12969     {
12970     struct sock *s = unix_get_socket(fp);
12971    
12972     @@ -152,7 +152,7 @@ void unix_notinflight(struct file *fp)
12973     list_del_init(&u->link);
12974     unix_tot_inflight--;
12975     }
12976     - fp->f_cred->user->unix_inflight--;
12977     + user->unix_inflight--;
12978     spin_unlock(&unix_gc_lock);
12979     }
12980    
12981     diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
12982     index dacf71a43ad4..ba6c34ea5429 100755
12983     --- a/scripts/link-vmlinux.sh
12984     +++ b/scripts/link-vmlinux.sh
12985     @@ -62,7 +62,7 @@ vmlinux_link()
12986     -Wl,--start-group \
12987     ${KBUILD_VMLINUX_MAIN} \
12988     -Wl,--end-group \
12989     - -lutil -lrt ${1}
12990     + -lutil -lrt -lpthread ${1}
12991     rm -f linux
12992     fi
12993     }
12994     diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
12995     index ff81026f6ddb..7c57c7fcf5a2 100644
12996     --- a/security/smack/smack_lsm.c
12997     +++ b/security/smack/smack_lsm.c
12998     @@ -398,12 +398,10 @@ static int smk_copy_relabel(struct list_head *nhead, struct list_head *ohead,
12999     */
13000     static inline unsigned int smk_ptrace_mode(unsigned int mode)
13001     {
13002     - switch (mode) {
13003     - case PTRACE_MODE_READ:
13004     - return MAY_READ;
13005     - case PTRACE_MODE_ATTACH:
13006     + if (mode & PTRACE_MODE_ATTACH)
13007     return MAY_READWRITE;
13008     - }
13009     + if (mode & PTRACE_MODE_READ)
13010     + return MAY_READ;
13011    
13012     return 0;
13013     }
13014     diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
13015     index d3c19c970a06..cb6ed10816d4 100644
13016     --- a/security/yama/yama_lsm.c
13017     +++ b/security/yama/yama_lsm.c
13018     @@ -281,7 +281,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
13019     int rc = 0;
13020    
13021     /* require ptrace target be a child of ptracer on attach */
13022     - if (mode == PTRACE_MODE_ATTACH) {
13023     + if (mode & PTRACE_MODE_ATTACH) {
13024     switch (ptrace_scope) {
13025     case YAMA_SCOPE_DISABLED:
13026     /* No additional restrictions. */
13027     @@ -307,7 +307,7 @@ static int yama_ptrace_access_check(struct task_struct *child,
13028     }
13029     }
13030    
13031     - if (rc) {
13032     + if (rc && (mode & PTRACE_MODE_NOAUDIT) == 0) {
13033     printk_ratelimited(KERN_NOTICE
13034     "ptrace of pid %d was attempted by: %s (pid %d)\n",
13035     child->pid, current->comm, current->pid);
13036     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
13037     index 2c13298e80b7..2ff692dd2c5f 100644
13038     --- a/sound/pci/hda/hda_intel.c
13039     +++ b/sound/pci/hda/hda_intel.c
13040     @@ -357,7 +357,10 @@ enum {
13041     ((pci)->device == 0x0d0c) || \
13042     ((pci)->device == 0x160c))
13043    
13044     -#define IS_BROXTON(pci) ((pci)->device == 0x5a98)
13045     +#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
13046     +#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
13047     +#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
13048     +#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
13049    
13050     static char *driver_short_names[] = {
13051     [AZX_DRIVER_ICH] = "HDA Intel",
13052     @@ -534,13 +537,13 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
13053    
13054     if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
13055     snd_hdac_set_codec_wakeup(bus, true);
13056     - if (IS_BROXTON(pci)) {
13057     + if (IS_SKL_PLUS(pci)) {
13058     pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
13059     val = val & ~INTEL_HDA_CGCTL_MISCBDCGE;
13060     pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
13061     }
13062     azx_init_chip(chip, full_reset);
13063     - if (IS_BROXTON(pci)) {
13064     + if (IS_SKL_PLUS(pci)) {
13065     pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
13066     val = val | INTEL_HDA_CGCTL_MISCBDCGE;
13067     pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
13068     @@ -549,7 +552,7 @@ static void hda_intel_init_chip(struct azx *chip, bool full_reset)
13069     snd_hdac_set_codec_wakeup(bus, false);
13070    
13071     /* reduce dma latency to avoid noise */
13072     - if (IS_BROXTON(pci))
13073     + if (IS_BXT(pci))
13074     bxt_reduce_dma_latency(chip);
13075     }
13076    
13077     @@ -971,11 +974,6 @@ static int azx_resume(struct device *dev)
13078     /* put codec down to D3 at hibernation for Intel SKL+;
13079     * otherwise BIOS may still access the codec and screw up the driver
13080     */
13081     -#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
13082     -#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
13083     -#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
13084     -#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
13085     -
13086     static int azx_freeze_noirq(struct device *dev)
13087     {
13088     struct pci_dev *pci = to_pci_dev(dev);
13089     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
13090     index efd4980cffb8..72fa58dd7723 100644
13091     --- a/sound/pci/hda/patch_realtek.c
13092     +++ b/sound/pci/hda/patch_realtek.c
13093     @@ -4749,6 +4749,7 @@ enum {
13094     ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
13095     ALC293_FIXUP_LENOVO_SPK_NOISE,
13096     ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
13097     + ALC255_FIXUP_DELL_SPK_NOISE,
13098     };
13099    
13100     static const struct hda_fixup alc269_fixups[] = {
13101     @@ -5368,6 +5369,12 @@ static const struct hda_fixup alc269_fixups[] = {
13102     .type = HDA_FIXUP_FUNC,
13103     .v.func = alc233_fixup_lenovo_line2_mic_hotkey,
13104     },
13105     + [ALC255_FIXUP_DELL_SPK_NOISE] = {
13106     + .type = HDA_FIXUP_FUNC,
13107     + .v.func = alc_fixup_disable_aamix,
13108     + .chained = true,
13109     + .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
13110     + },
13111     };
13112    
13113     static const struct snd_pci_quirk alc269_fixup_tbl[] = {
13114     @@ -5410,6 +5417,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
13115     SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
13116     SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
13117     SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
13118     + SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
13119     SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
13120     SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
13121     SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
13122     diff --git a/tools/hv/hv_vss_daemon.c b/tools/hv/hv_vss_daemon.c
13123     index 96234b638249..5d51d6ff08e6 100644
13124     --- a/tools/hv/hv_vss_daemon.c
13125     +++ b/tools/hv/hv_vss_daemon.c
13126     @@ -254,7 +254,7 @@ int main(int argc, char *argv[])
13127     syslog(LOG_ERR, "Illegal op:%d\n", op);
13128     }
13129     vss_msg->error = error;
13130     - len = write(vss_fd, &error, sizeof(struct hv_vss_msg));
13131     + len = write(vss_fd, vss_msg, sizeof(struct hv_vss_msg));
13132     if (len != sizeof(struct hv_vss_msg)) {
13133     syslog(LOG_ERR, "write failed; error: %d %s", errno,
13134     strerror(errno));
13135     diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
13136     index 2d9d8306dbd3..4a3a72cb5805 100644
13137     --- a/tools/perf/util/stat.c
13138     +++ b/tools/perf/util/stat.c
13139     @@ -310,7 +310,6 @@ int perf_stat_process_counter(struct perf_stat_config *config,
13140     int i, ret;
13141    
13142     aggr->val = aggr->ena = aggr->run = 0;
13143     - init_stats(ps->res_stats);
13144    
13145     if (counter->per_pkg)
13146     zero_per_pkg(counter);
13147     diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
13148     index 77edcdcc016b..057278448515 100755
13149     --- a/tools/testing/selftests/efivarfs/efivarfs.sh
13150     +++ b/tools/testing/selftests/efivarfs/efivarfs.sh
13151     @@ -88,7 +88,11 @@ test_delete()
13152     exit 1
13153     fi
13154    
13155     - rm $file
13156     + rm $file 2>/dev/null
13157     + if [ $? -ne 0 ]; then
13158     + chattr -i $file
13159     + rm $file
13160     + fi
13161    
13162     if [ -e $file ]; then
13163     echo "$file couldn't be deleted" >&2
13164     @@ -111,6 +115,7 @@ test_zero_size_delete()
13165     exit 1
13166     fi
13167    
13168     + chattr -i $file
13169     printf "$attrs" > $file
13170    
13171     if [ -e $file ]; then
13172     @@ -141,7 +146,11 @@ test_valid_filenames()
13173     echo "$file could not be created" >&2
13174     ret=1
13175     else
13176     - rm $file
13177     + rm $file 2>/dev/null
13178     + if [ $? -ne 0 ]; then
13179     + chattr -i $file
13180     + rm $file
13181     + fi
13182     fi
13183     done
13184    
13185     @@ -174,7 +183,11 @@ test_invalid_filenames()
13186    
13187     if [ -e $file ]; then
13188     echo "Creating $file should have failed" >&2
13189     - rm $file
13190     + rm $file 2>/dev/null
13191     + if [ $? -ne 0 ]; then
13192     + chattr -i $file
13193     + rm $file
13194     + fi
13195     ret=1
13196     fi
13197     done
13198     diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c
13199     index 8c0764407b3c..4af74f733036 100644
13200     --- a/tools/testing/selftests/efivarfs/open-unlink.c
13201     +++ b/tools/testing/selftests/efivarfs/open-unlink.c
13202     @@ -1,10 +1,68 @@
13203     +#include <errno.h>
13204     #include <stdio.h>
13205     #include <stdint.h>
13206     #include <stdlib.h>
13207     #include <unistd.h>
13208     +#include <sys/ioctl.h>
13209     #include <sys/types.h>
13210     #include <sys/stat.h>
13211     #include <fcntl.h>
13212     +#include <linux/fs.h>
13213     +
13214     +static int set_immutable(const char *path, int immutable)
13215     +{
13216     + unsigned int flags;
13217     + int fd;
13218     + int rc;
13219     + int error;
13220     +
13221     + fd = open(path, O_RDONLY);
13222     + if (fd < 0)
13223     + return fd;
13224     +
13225     + rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
13226     + if (rc < 0) {
13227     + error = errno;
13228     + close(fd);
13229     + errno = error;
13230     + return rc;
13231     + }
13232     +
13233     + if (immutable)
13234     + flags |= FS_IMMUTABLE_FL;
13235     + else
13236     + flags &= ~FS_IMMUTABLE_FL;
13237     +
13238     + rc = ioctl(fd, FS_IOC_SETFLAGS, &flags);
13239     + error = errno;
13240     + close(fd);
13241     + errno = error;
13242     + return rc;
13243     +}
13244     +
13245     +static int get_immutable(const char *path)
13246     +{
13247     + unsigned int flags;
13248     + int fd;
13249     + int rc;
13250     + int error;
13251     +
13252     + fd = open(path, O_RDONLY);
13253     + if (fd < 0)
13254     + return fd;
13255     +
13256     + rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
13257     + if (rc < 0) {
13258     + error = errno;
13259     + close(fd);
13260     + errno = error;
13261     + return rc;
13262     + }
13263     + close(fd);
13264     + if (flags & FS_IMMUTABLE_FL)
13265     + return 1;
13266     + return 0;
13267     +}
13268    
13269     int main(int argc, char **argv)
13270     {
13271     @@ -27,7 +85,7 @@ int main(int argc, char **argv)
13272     buf[4] = 0;
13273    
13274     /* create a test variable */
13275     - fd = open(path, O_WRONLY | O_CREAT);
13276     + fd = open(path, O_WRONLY | O_CREAT, 0600);
13277     if (fd < 0) {
13278     perror("open(O_WRONLY)");
13279     return EXIT_FAILURE;
13280     @@ -41,6 +99,18 @@ int main(int argc, char **argv)
13281    
13282     close(fd);
13283    
13284     + rc = get_immutable(path);
13285     + if (rc < 0) {
13286     + perror("ioctl(FS_IOC_GETFLAGS)");
13287     + return EXIT_FAILURE;
13288     + } else if (rc) {
13289     + rc = set_immutable(path, 0);
13290     + if (rc < 0) {
13291     + perror("ioctl(FS_IOC_SETFLAGS)");
13292     + return EXIT_FAILURE;
13293     + }
13294     + }
13295     +
13296     fd = open(path, O_RDONLY);
13297     if (fd < 0) {
13298     perror("open");
13299     diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
13300     index 7a2f449bd85d..5d10f104f3eb 100644
13301     --- a/virt/kvm/arm/vgic.c
13302     +++ b/virt/kvm/arm/vgic.c
13303     @@ -1875,8 +1875,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
13304     static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
13305     {
13306     struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
13307     -
13308     - int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
13309     + int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
13310     + int sz = nr_longs * sizeof(unsigned long);
13311     vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
13312     vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
13313     vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
13314     diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
13315     index 77d42be6970e..4f70d12e392d 100644
13316     --- a/virt/kvm/async_pf.c
13317     +++ b/virt/kvm/async_pf.c
13318     @@ -173,7 +173,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
13319     * do alloc nowait since if we are going to sleep anyway we
13320     * may as well sleep faulting in page
13321     */
13322     - work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
13323     + work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
13324     if (!work)
13325     return 0;
13326