Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.4/0108-4.4.9-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2800 - (hide annotations) (download)
Mon May 30 12:46:03 2016 UTC (7 years, 11 months ago) by niro
File size: 202814 byte(s)
-linux-4.4.9
1 niro 2800 diff --git a/Makefile b/Makefile
2     index 1928fcd539cc..0722cdf52152 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 4
8     -SUBLEVEL = 8
9     +SUBLEVEL = 9
10     EXTRAVERSION =
11     NAME = Blurry Fish Butt
12    
13     diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
14     index 47954ed990f8..00707aac72fc 100644
15     --- a/arch/arm/boot/dts/am43x-epos-evm.dts
16     +++ b/arch/arm/boot/dts/am43x-epos-evm.dts
17     @@ -792,3 +792,8 @@
18     tx-num-evt = <32>;
19     rx-num-evt = <32>;
20     };
21     +
22     +&synctimer_32kclk {
23     + assigned-clocks = <&mux_synctimer32k_ck>;
24     + assigned-clock-parents = <&clkdiv32k_ick>;
25     +};
26     diff --git a/arch/arm/boot/dts/armada-375.dtsi b/arch/arm/boot/dts/armada-375.dtsi
27     index 7ccce7529b0c..cc952cf8ec30 100644
28     --- a/arch/arm/boot/dts/armada-375.dtsi
29     +++ b/arch/arm/boot/dts/armada-375.dtsi
30     @@ -529,7 +529,7 @@
31     };
32    
33     sata@a0000 {
34     - compatible = "marvell,orion-sata";
35     + compatible = "marvell,armada-370-sata";
36     reg = <0xa0000 0x5000>;
37     interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>;
38     clocks = <&gateclk 14>, <&gateclk 20>;
39     diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
40     index 3710755c6d76..85d2c377c332 100644
41     --- a/arch/arm/boot/dts/armada-385-linksys.dtsi
42     +++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
43     @@ -117,7 +117,7 @@
44     };
45    
46     /* USB part of the eSATA/USB 2.0 port */
47     - usb@50000 {
48     + usb@58000 {
49     status = "okay";
50     };
51    
52     diff --git a/arch/arm/boot/dts/pxa3xx.dtsi b/arch/arm/boot/dts/pxa3xx.dtsi
53     index cf6998a0804d..564341af7e97 100644
54     --- a/arch/arm/boot/dts/pxa3xx.dtsi
55     +++ b/arch/arm/boot/dts/pxa3xx.dtsi
56     @@ -30,7 +30,7 @@
57     reg = <0x43100000 90>;
58     interrupts = <45>;
59     clocks = <&clks CLK_NAND>;
60     - dmas = <&pdma 97>;
61     + dmas = <&pdma 97 3>;
62     dma-names = "data";
63     #address-cells = <1>;
64     #size-cells = <1>;
65     diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
66     index 3a10f1a8317a..bfd8bb371477 100644
67     --- a/arch/arm/mach-exynos/Kconfig
68     +++ b/arch/arm/mach-exynos/Kconfig
69     @@ -26,6 +26,7 @@ menuconfig ARCH_EXYNOS
70     select S5P_DEV_MFC
71     select SRAM
72     select THERMAL
73     + select THERMAL_OF
74     select MFD_SYSCON
75     help
76     Support for SAMSUNG EXYNOS SoCs (EXYNOS4/5)
77     diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c
78     index aa7b379e2661..2a3db0bd9e15 100644
79     --- a/arch/arm/mach-omap2/cpuidle34xx.c
80     +++ b/arch/arm/mach-omap2/cpuidle34xx.c
81     @@ -34,6 +34,7 @@
82     #include "pm.h"
83     #include "control.h"
84     #include "common.h"
85     +#include "soc.h"
86    
87     /* Mach specific information to be recorded in the C-state driver_data */
88     struct omap3_idle_statedata {
89     @@ -315,6 +316,69 @@ static struct cpuidle_driver omap3_idle_driver = {
90     .safe_state_index = 0,
91     };
92    
93     +/*
94     + * Numbers based on measurements made in October 2009 for PM optimized kernel
95     + * with CPU freq enabled on device Nokia N900. Assumes OPP2 (main idle OPP,
96     + * and worst case latencies).
97     + */
98     +static struct cpuidle_driver omap3430_idle_driver = {
99     + .name = "omap3430_idle",
100     + .owner = THIS_MODULE,
101     + .states = {
102     + {
103     + .enter = omap3_enter_idle_bm,
104     + .exit_latency = 110 + 162,
105     + .target_residency = 5,
106     + .name = "C1",
107     + .desc = "MPU ON + CORE ON",
108     + },
109     + {
110     + .enter = omap3_enter_idle_bm,
111     + .exit_latency = 106 + 180,
112     + .target_residency = 309,
113     + .name = "C2",
114     + .desc = "MPU ON + CORE ON",
115     + },
116     + {
117     + .enter = omap3_enter_idle_bm,
118     + .exit_latency = 107 + 410,
119     + .target_residency = 46057,
120     + .name = "C3",
121     + .desc = "MPU RET + CORE ON",
122     + },
123     + {
124     + .enter = omap3_enter_idle_bm,
125     + .exit_latency = 121 + 3374,
126     + .target_residency = 46057,
127     + .name = "C4",
128     + .desc = "MPU OFF + CORE ON",
129     + },
130     + {
131     + .enter = omap3_enter_idle_bm,
132     + .exit_latency = 855 + 1146,
133     + .target_residency = 46057,
134     + .name = "C5",
135     + .desc = "MPU RET + CORE RET",
136     + },
137     + {
138     + .enter = omap3_enter_idle_bm,
139     + .exit_latency = 7580 + 4134,
140     + .target_residency = 484329,
141     + .name = "C6",
142     + .desc = "MPU OFF + CORE RET",
143     + },
144     + {
145     + .enter = omap3_enter_idle_bm,
146     + .exit_latency = 7505 + 15274,
147     + .target_residency = 484329,
148     + .name = "C7",
149     + .desc = "MPU OFF + CORE OFF",
150     + },
151     + },
152     + .state_count = ARRAY_SIZE(omap3_idle_data),
153     + .safe_state_index = 0,
154     +};
155     +
156     /* Public functions */
157    
158     /**
159     @@ -333,5 +397,8 @@ int __init omap3_idle_init(void)
160     if (!mpu_pd || !core_pd || !per_pd || !cam_pd)
161     return -ENODEV;
162    
163     - return cpuidle_register(&omap3_idle_driver, NULL);
164     + if (cpu_is_omap3430())
165     + return cpuidle_register(&omap3430_idle_driver, NULL);
166     + else
167     + return cpuidle_register(&omap3_idle_driver, NULL);
168     }
169     diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
170     index 3eaeaca5da05..3a911d8dea8b 100644
171     --- a/arch/arm/mach-omap2/io.c
172     +++ b/arch/arm/mach-omap2/io.c
173     @@ -368,6 +368,7 @@ void __init omap5_map_io(void)
174     void __init dra7xx_map_io(void)
175     {
176     iotable_init(dra7xx_io_desc, ARRAY_SIZE(dra7xx_io_desc));
177     + omap_barriers_init();
178     }
179     #endif
180     /*
181     diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
182     index 8e0bd5939e5a..147c90e70b2e 100644
183     --- a/arch/arm/mach-omap2/omap_hwmod.c
184     +++ b/arch/arm/mach-omap2/omap_hwmod.c
185     @@ -1416,9 +1416,7 @@ static void _enable_sysc(struct omap_hwmod *oh)
186     (sf & SYSC_HAS_CLOCKACTIVITY))
187     _set_clockactivity(oh, oh->class->sysc->clockact, &v);
188    
189     - /* If the cached value is the same as the new value, skip the write */
190     - if (oh->_sysc_cache != v)
191     - _write_sysconfig(v, oh);
192     + _write_sysconfig(v, oh);
193    
194     /*
195     * Set the autoidle bit only after setting the smartidle bit
196     @@ -1481,7 +1479,9 @@ static void _idle_sysc(struct omap_hwmod *oh)
197     _set_master_standbymode(oh, idlemode, &v);
198     }
199    
200     - _write_sysconfig(v, oh);
201     + /* If the cached value is the same as the new value, skip the write */
202     + if (oh->_sysc_cache != v)
203     + _write_sysconfig(v, oh);
204     }
205    
206     /**
207     diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig
208     index 9ab8932403e5..56e55fd37d13 100644
209     --- a/arch/arm/mach-prima2/Kconfig
210     +++ b/arch/arm/mach-prima2/Kconfig
211     @@ -1,6 +1,7 @@
212     menuconfig ARCH_SIRF
213     bool "CSR SiRF" if ARCH_MULTI_V7
214     select ARCH_HAS_RESET_CONTROLLER
215     + select RESET_CONTROLLER
216     select ARCH_REQUIRE_GPIOLIB
217     select GENERIC_IRQ_CHIP
218     select NO_IOPORT_MAP
219     diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
220     index eaa9cabf4066..c63868ae9a4a 100644
221     --- a/arch/arm64/include/asm/pgtable.h
222     +++ b/arch/arm64/include/asm/pgtable.h
223     @@ -69,11 +69,11 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
224     #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
225     #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
226    
227     -#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
228     -#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
229     -#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
230     -#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_WT))
231     -#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
232     +#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
233     +#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
234     +#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
235     +#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
236     +#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
237    
238     #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
239     #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
240     @@ -83,7 +83,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
241    
242     #define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
243     #define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
244     -#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
245     +#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
246     #define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
247     #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
248    
249     @@ -155,6 +155,7 @@ extern struct page *empty_zero_page;
250     #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
251     #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
252     #define pte_cont(pte) (!!(pte_val(pte) & PTE_CONT))
253     +#define pte_user(pte) (!!(pte_val(pte) & PTE_USER))
254    
255     #ifdef CONFIG_ARM64_HW_AFDBM
256     #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
257     @@ -165,8 +166,6 @@ extern struct page *empty_zero_page;
258     #define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
259    
260     #define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
261     -#define pte_valid_user(pte) \
262     - ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
263     #define pte_valid_not_user(pte) \
264     ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
265    
266     @@ -264,13 +263,13 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
267     static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
268     pte_t *ptep, pte_t pte)
269     {
270     - if (pte_valid_user(pte)) {
271     - if (!pte_special(pte) && pte_exec(pte))
272     - __sync_icache_dcache(pte, addr);
273     + if (pte_present(pte)) {
274     if (pte_sw_dirty(pte) && pte_write(pte))
275     pte_val(pte) &= ~PTE_RDONLY;
276     else
277     pte_val(pte) |= PTE_RDONLY;
278     + if (pte_user(pte) && pte_exec(pte) && !pte_special(pte))
279     + __sync_icache_dcache(pte, addr);
280     }
281    
282     /*
283     @@ -641,6 +640,7 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
284     * bits 0-1: present (must be zero)
285     * bits 2-7: swap type
286     * bits 8-57: swap offset
287     + * bit 58: PTE_PROT_NONE (must be zero)
288     */
289     #define __SWP_TYPE_SHIFT 2
290     #define __SWP_TYPE_BITS 6
291     diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
292     index 43686043e297..2734c005da21 100644
293     --- a/arch/powerpc/include/uapi/asm/cputable.h
294     +++ b/arch/powerpc/include/uapi/asm/cputable.h
295     @@ -31,6 +31,7 @@
296     #define PPC_FEATURE_PSERIES_PERFMON_COMPAT \
297     0x00000040
298    
299     +/* Reserved - do not use 0x00000004 */
300     #define PPC_FEATURE_TRUE_LE 0x00000002
301     #define PPC_FEATURE_PPC_LE 0x00000001
302    
303     diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
304     index 7030b035905d..a15fe1d4e84a 100644
305     --- a/arch/powerpc/kernel/prom.c
306     +++ b/arch/powerpc/kernel/prom.c
307     @@ -148,23 +148,25 @@ static struct ibm_pa_feature {
308     unsigned long cpu_features; /* CPU_FTR_xxx bit */
309     unsigned long mmu_features; /* MMU_FTR_xxx bit */
310     unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
311     + unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
312     unsigned char pabyte; /* byte number in ibm,pa-features */
313     unsigned char pabit; /* bit number (big-endian) */
314     unsigned char invert; /* if 1, pa bit set => clear feature */
315     } ibm_pa_features[] __initdata = {
316     - {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0},
317     - {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0},
318     - {CPU_FTR_CTRL, 0, 0, 0, 3, 0},
319     - {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0},
320     - {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1},
321     - {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
322     - {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
323     + {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0, 0},
324     + {0, 0, PPC_FEATURE_HAS_FPU, 0, 0, 1, 0},
325     + {CPU_FTR_CTRL, 0, 0, 0, 0, 3, 0},
326     + {CPU_FTR_NOEXECUTE, 0, 0, 0, 0, 6, 0},
327     + {CPU_FTR_NODSISRALIGN, 0, 0, 0, 1, 1, 1},
328     + {0, MMU_FTR_CI_LARGE_PAGE, 0, 0, 1, 2, 0},
329     + {CPU_FTR_REAL_LE, 0, PPC_FEATURE_TRUE_LE, 0, 5, 0, 0},
330     /*
331     - * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
332     - * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
333     - * which is 0 if the kernel doesn't support TM.
334     + * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
335     + * we don't want to turn on TM here, so we use the *_COMP versions
336     + * which are 0 if the kernel doesn't support TM.
337     */
338     - {CPU_FTR_TM_COMP, 0, 0, 22, 0, 0},
339     + {CPU_FTR_TM_COMP, 0, 0,
340     + PPC_FEATURE2_HTM_COMP|PPC_FEATURE2_HTM_NOSC_COMP, 22, 0, 0},
341     };
342    
343     static void __init scan_features(unsigned long node, const unsigned char *ftrs,
344     @@ -195,10 +197,12 @@ static void __init scan_features(unsigned long node, const unsigned char *ftrs,
345     if (bit ^ fp->invert) {
346     cur_cpu_spec->cpu_features |= fp->cpu_features;
347     cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
348     + cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
349     cur_cpu_spec->mmu_features |= fp->mmu_features;
350     } else {
351     cur_cpu_spec->cpu_features &= ~fp->cpu_features;
352     cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
353     + cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
354     cur_cpu_spec->mmu_features &= ~fp->mmu_features;
355     }
356     }
357     diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
358     index 2b2ced9dc00a..6dafabb6ae1a 100644
359     --- a/arch/s390/include/asm/pci.h
360     +++ b/arch/s390/include/asm/pci.h
361     @@ -45,7 +45,8 @@ struct zpci_fmb {
362     u64 rpcit_ops;
363     u64 dma_rbytes;
364     u64 dma_wbytes;
365     -} __packed __aligned(64);
366     + u64 pad[2];
367     +} __packed __aligned(128);
368    
369     enum zpci_state {
370     ZPCI_FN_STATE_RESERVED,
371     diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
372     index a841e9765bd6..8381c09d2870 100644
373     --- a/arch/x86/crypto/sha-mb/sha1_mb.c
374     +++ b/arch/x86/crypto/sha-mb/sha1_mb.c
375     @@ -453,10 +453,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
376    
377     req = cast_mcryptd_ctx_to_req(req_ctx);
378     if (irqs_disabled())
379     - rctx->complete(&req->base, ret);
380     + req_ctx->complete(&req->base, ret);
381     else {
382     local_bh_disable();
383     - rctx->complete(&req->base, ret);
384     + req_ctx->complete(&req->base, ret);
385     local_bh_enable();
386     }
387     }
388     diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
389     index f8a29d2c97b0..e6a8613fbfb0 100644
390     --- a/arch/x86/include/asm/hugetlb.h
391     +++ b/arch/x86/include/asm/hugetlb.h
392     @@ -4,6 +4,7 @@
393     #include <asm/page.h>
394     #include <asm-generic/hugetlb.h>
395    
396     +#define hugepages_supported() cpu_has_pse
397    
398     static inline int is_hugepage_only_range(struct mm_struct *mm,
399     unsigned long addr,
400     diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
401     index 7af2505f20c2..df6b4eeac0bd 100644
402     --- a/arch/x86/kernel/apic/vector.c
403     +++ b/arch/x86/kernel/apic/vector.c
404     @@ -254,7 +254,8 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
405     struct irq_desc *desc;
406     int cpu, vector;
407    
408     - BUG_ON(!data->cfg.vector);
409     + if (!data->cfg.vector)
410     + return;
411    
412     vector = data->cfg.vector;
413     for_each_cpu_and(cpu, data->domain, cpu_online_mask)
414     diff --git a/arch/x86/kernel/cpu/mcheck/mce-genpool.c b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
415     index 0a850100c594..2658e2af74ec 100644
416     --- a/arch/x86/kernel/cpu/mcheck/mce-genpool.c
417     +++ b/arch/x86/kernel/cpu/mcheck/mce-genpool.c
418     @@ -29,7 +29,7 @@ static char gen_pool_buf[MCE_POOLSZ];
419     void mce_gen_pool_process(void)
420     {
421     struct llist_node *head;
422     - struct mce_evt_llist *node;
423     + struct mce_evt_llist *node, *tmp;
424     struct mce *mce;
425    
426     head = llist_del_all(&mce_event_llist);
427     @@ -37,7 +37,7 @@ void mce_gen_pool_process(void)
428     return;
429    
430     head = llist_reverse_order(head);
431     - llist_for_each_entry(node, head, llnode) {
432     + llist_for_each_entry_safe(node, tmp, head, llnode) {
433     mce = &node->mce;
434     atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, mce);
435     gen_pool_free(mce_evt_pool, (unsigned long)node, sizeof(*node));
436     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
437     index 7eb4ebd3ebea..605cea75eb0d 100644
438     --- a/arch/x86/kvm/x86.c
439     +++ b/arch/x86/kvm/x86.c
440     @@ -697,7 +697,6 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
441     if ((xcr0 & XFEATURE_MASK_AVX512) != XFEATURE_MASK_AVX512)
442     return 1;
443     }
444     - kvm_put_guest_xcr0(vcpu);
445     vcpu->arch.xcr0 = xcr0;
446    
447     if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
448     @@ -6495,8 +6494,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
449     kvm_x86_ops->prepare_guest_switch(vcpu);
450     if (vcpu->fpu_active)
451     kvm_load_guest_fpu(vcpu);
452     - kvm_load_guest_xcr0(vcpu);
453     -
454     vcpu->mode = IN_GUEST_MODE;
455    
456     srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
457     @@ -6519,6 +6516,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
458     goto cancel_injection;
459     }
460    
461     + kvm_load_guest_xcr0(vcpu);
462     +
463     if (req_immediate_exit)
464     smp_send_reschedule(vcpu->cpu);
465    
466     @@ -6568,6 +6567,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
467     vcpu->mode = OUTSIDE_GUEST_MODE;
468     smp_wmb();
469    
470     + kvm_put_guest_xcr0(vcpu);
471     +
472     /* Interrupt is enabled by handle_external_intr() */
473     kvm_x86_ops->handle_external_intr(vcpu);
474    
475     @@ -7215,7 +7216,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
476     * and assume host would use all available bits.
477     * Guest xcr0 would be loaded later.
478     */
479     - kvm_put_guest_xcr0(vcpu);
480     vcpu->guest_fpu_loaded = 1;
481     __kernel_fpu_begin();
482     __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
483     @@ -7224,8 +7224,6 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
484    
485     void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
486     {
487     - kvm_put_guest_xcr0(vcpu);
488     -
489     if (!vcpu->guest_fpu_loaded) {
490     vcpu->fpu_counter = 0;
491     return;
492     diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c
493     index 637ab34ed632..ddb2244b06a1 100644
494     --- a/arch/x86/mm/kmmio.c
495     +++ b/arch/x86/mm/kmmio.c
496     @@ -33,7 +33,7 @@
497     struct kmmio_fault_page {
498     struct list_head list;
499     struct kmmio_fault_page *release_next;
500     - unsigned long page; /* location of the fault page */
501     + unsigned long addr; /* the requested address */
502     pteval_t old_presence; /* page presence prior to arming */
503     bool armed;
504    
505     @@ -70,9 +70,16 @@ unsigned int kmmio_count;
506     static struct list_head kmmio_page_table[KMMIO_PAGE_TABLE_SIZE];
507     static LIST_HEAD(kmmio_probes);
508    
509     -static struct list_head *kmmio_page_list(unsigned long page)
510     +static struct list_head *kmmio_page_list(unsigned long addr)
511     {
512     - return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
513     + unsigned int l;
514     + pte_t *pte = lookup_address(addr, &l);
515     +
516     + if (!pte)
517     + return NULL;
518     + addr &= page_level_mask(l);
519     +
520     + return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
521     }
522    
523     /* Accessed per-cpu */
524     @@ -98,15 +105,19 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
525     }
526    
527     /* You must be holding RCU read lock. */
528     -static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long page)
529     +static struct kmmio_fault_page *get_kmmio_fault_page(unsigned long addr)
530     {
531     struct list_head *head;
532     struct kmmio_fault_page *f;
533     + unsigned int l;
534     + pte_t *pte = lookup_address(addr, &l);
535    
536     - page &= PAGE_MASK;
537     - head = kmmio_page_list(page);
538     + if (!pte)
539     + return NULL;
540     + addr &= page_level_mask(l);
541     + head = kmmio_page_list(addr);
542     list_for_each_entry_rcu(f, head, list) {
543     - if (f->page == page)
544     + if (f->addr == addr)
545     return f;
546     }
547     return NULL;
548     @@ -137,10 +148,10 @@ static void clear_pte_presence(pte_t *pte, bool clear, pteval_t *old)
549     static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
550     {
551     unsigned int level;
552     - pte_t *pte = lookup_address(f->page, &level);
553     + pte_t *pte = lookup_address(f->addr, &level);
554    
555     if (!pte) {
556     - pr_err("no pte for page 0x%08lx\n", f->page);
557     + pr_err("no pte for addr 0x%08lx\n", f->addr);
558     return -1;
559     }
560    
561     @@ -156,7 +167,7 @@ static int clear_page_presence(struct kmmio_fault_page *f, bool clear)
562     return -1;
563     }
564    
565     - __flush_tlb_one(f->page);
566     + __flush_tlb_one(f->addr);
567     return 0;
568     }
569    
570     @@ -176,12 +187,12 @@ static int arm_kmmio_fault_page(struct kmmio_fault_page *f)
571     int ret;
572     WARN_ONCE(f->armed, KERN_ERR pr_fmt("kmmio page already armed.\n"));
573     if (f->armed) {
574     - pr_warning("double-arm: page 0x%08lx, ref %d, old %d\n",
575     - f->page, f->count, !!f->old_presence);
576     + pr_warning("double-arm: addr 0x%08lx, ref %d, old %d\n",
577     + f->addr, f->count, !!f->old_presence);
578     }
579     ret = clear_page_presence(f, true);
580     - WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming 0x%08lx failed.\n"),
581     - f->page);
582     + WARN_ONCE(ret < 0, KERN_ERR pr_fmt("arming at 0x%08lx failed.\n"),
583     + f->addr);
584     f->armed = true;
585     return ret;
586     }
587     @@ -191,7 +202,7 @@ static void disarm_kmmio_fault_page(struct kmmio_fault_page *f)
588     {
589     int ret = clear_page_presence(f, false);
590     WARN_ONCE(ret < 0,
591     - KERN_ERR "kmmio disarming 0x%08lx failed.\n", f->page);
592     + KERN_ERR "kmmio disarming at 0x%08lx failed.\n", f->addr);
593     f->armed = false;
594     }
595    
596     @@ -215,6 +226,12 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
597     struct kmmio_context *ctx;
598     struct kmmio_fault_page *faultpage;
599     int ret = 0; /* default to fault not handled */
600     + unsigned long page_base = addr;
601     + unsigned int l;
602     + pte_t *pte = lookup_address(addr, &l);
603     + if (!pte)
604     + return -EINVAL;
605     + page_base &= page_level_mask(l);
606    
607     /*
608     * Preemption is now disabled to prevent process switch during
609     @@ -227,7 +244,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
610     preempt_disable();
611     rcu_read_lock();
612    
613     - faultpage = get_kmmio_fault_page(addr);
614     + faultpage = get_kmmio_fault_page(page_base);
615     if (!faultpage) {
616     /*
617     * Either this page fault is not caused by kmmio, or
618     @@ -239,7 +256,7 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
619    
620     ctx = &get_cpu_var(kmmio_ctx);
621     if (ctx->active) {
622     - if (addr == ctx->addr) {
623     + if (page_base == ctx->addr) {
624     /*
625     * A second fault on the same page means some other
626     * condition needs handling by do_page_fault(), the
627     @@ -267,9 +284,9 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
628     ctx->active++;
629    
630     ctx->fpage = faultpage;
631     - ctx->probe = get_kmmio_probe(addr);
632     + ctx->probe = get_kmmio_probe(page_base);
633     ctx->saved_flags = (regs->flags & (X86_EFLAGS_TF | X86_EFLAGS_IF));
634     - ctx->addr = addr;
635     + ctx->addr = page_base;
636    
637     if (ctx->probe && ctx->probe->pre_handler)
638     ctx->probe->pre_handler(ctx->probe, regs, addr);
639     @@ -354,12 +371,11 @@ out:
640     }
641    
642     /* You must be holding kmmio_lock. */
643     -static int add_kmmio_fault_page(unsigned long page)
644     +static int add_kmmio_fault_page(unsigned long addr)
645     {
646     struct kmmio_fault_page *f;
647    
648     - page &= PAGE_MASK;
649     - f = get_kmmio_fault_page(page);
650     + f = get_kmmio_fault_page(addr);
651     if (f) {
652     if (!f->count)
653     arm_kmmio_fault_page(f);
654     @@ -372,26 +388,25 @@ static int add_kmmio_fault_page(unsigned long page)
655     return -1;
656    
657     f->count = 1;
658     - f->page = page;
659     + f->addr = addr;
660    
661     if (arm_kmmio_fault_page(f)) {
662     kfree(f);
663     return -1;
664     }
665    
666     - list_add_rcu(&f->list, kmmio_page_list(f->page));
667     + list_add_rcu(&f->list, kmmio_page_list(f->addr));
668    
669     return 0;
670     }
671    
672     /* You must be holding kmmio_lock. */
673     -static void release_kmmio_fault_page(unsigned long page,
674     +static void release_kmmio_fault_page(unsigned long addr,
675     struct kmmio_fault_page **release_list)
676     {
677     struct kmmio_fault_page *f;
678    
679     - page &= PAGE_MASK;
680     - f = get_kmmio_fault_page(page);
681     + f = get_kmmio_fault_page(addr);
682     if (!f)
683     return;
684    
685     @@ -420,18 +435,27 @@ int register_kmmio_probe(struct kmmio_probe *p)
686     int ret = 0;
687     unsigned long size = 0;
688     const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
689     + unsigned int l;
690     + pte_t *pte;
691    
692     spin_lock_irqsave(&kmmio_lock, flags);
693     if (get_kmmio_probe(p->addr)) {
694     ret = -EEXIST;
695     goto out;
696     }
697     +
698     + pte = lookup_address(p->addr, &l);
699     + if (!pte) {
700     + ret = -EINVAL;
701     + goto out;
702     + }
703     +
704     kmmio_count++;
705     list_add_rcu(&p->list, &kmmio_probes);
706     while (size < size_lim) {
707     if (add_kmmio_fault_page(p->addr + size))
708     pr_err("Unable to set page fault.\n");
709     - size += PAGE_SIZE;
710     + size += page_level_size(l);
711     }
712     out:
713     spin_unlock_irqrestore(&kmmio_lock, flags);
714     @@ -506,11 +530,17 @@ void unregister_kmmio_probe(struct kmmio_probe *p)
715     const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
716     struct kmmio_fault_page *release_list = NULL;
717     struct kmmio_delayed_release *drelease;
718     + unsigned int l;
719     + pte_t *pte;
720     +
721     + pte = lookup_address(p->addr, &l);
722     + if (!pte)
723     + return;
724    
725     spin_lock_irqsave(&kmmio_lock, flags);
726     while (size < size_lim) {
727     release_kmmio_fault_page(p->addr + size, &release_list);
728     - size += PAGE_SIZE;
729     + size += page_level_size(l);
730     }
731     list_del_rcu(&p->list);
732     kmmio_count--;
733     diff --git a/block/partition-generic.c b/block/partition-generic.c
734     index 746935a5973c..a241e3900bc9 100644
735     --- a/block/partition-generic.c
736     +++ b/block/partition-generic.c
737     @@ -349,15 +349,20 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
738     goto out_del;
739     }
740    
741     + err = hd_ref_init(p);
742     + if (err) {
743     + if (flags & ADDPART_FLAG_WHOLEDISK)
744     + goto out_remove_file;
745     + goto out_del;
746     + }
747     +
748     /* everything is up and running, commence */
749     rcu_assign_pointer(ptbl->part[partno], p);
750    
751     /* suppress uevent if the disk suppresses it */
752     if (!dev_get_uevent_suppress(ddev))
753     kobject_uevent(&pdev->kobj, KOBJ_ADD);
754     -
755     - if (!hd_ref_init(p))
756     - return p;
757     + return p;
758    
759     out_free_info:
760     free_part_info(p);
761     @@ -366,6 +371,8 @@ out_free_stats:
762     out_free:
763     kfree(p);
764     return ERR_PTR(err);
765     +out_remove_file:
766     + device_remove_file(pdev, &dev_attr_whole_disk);
767     out_del:
768     kobject_put(p->holder_dir);
769     device_del(pdev);
770     diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
771     index 65f50eccd49b..a48824deabc5 100644
772     --- a/drivers/base/power/domain.c
773     +++ b/drivers/base/power/domain.c
774     @@ -1381,7 +1381,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
775    
776     mutex_lock(&genpd->lock);
777    
778     - if (!list_empty(&subdomain->slave_links) || subdomain->device_count) {
779     + if (!list_empty(&subdomain->master_links) || subdomain->device_count) {
780     pr_warn("%s: unable to remove subdomain %s\n", genpd->name,
781     subdomain->name);
782     ret = -EBUSY;
783     diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
784     index b8e76f75073b..f8580900c273 100644
785     --- a/drivers/base/power/opp/core.c
786     +++ b/drivers/base/power/opp/core.c
787     @@ -809,8 +809,14 @@ static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev)
788     }
789    
790     opp->u_volt = microvolt[0];
791     - opp->u_volt_min = microvolt[1];
792     - opp->u_volt_max = microvolt[2];
793     +
794     + if (count == 1) {
795     + opp->u_volt_min = opp->u_volt;
796     + opp->u_volt_max = opp->u_volt;
797     + } else {
798     + opp->u_volt_min = microvolt[1];
799     + opp->u_volt_max = microvolt[2];
800     + }
801    
802     if (!of_property_read_u32(opp->np, "opp-microamp", &val))
803     opp->u_amp = val;
804     diff --git a/drivers/block/loop.c b/drivers/block/loop.c
805     index 423f4ca7d712..80cf8add46ff 100644
806     --- a/drivers/block/loop.c
807     +++ b/drivers/block/loop.c
808     @@ -488,6 +488,12 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
809     bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
810     iov_iter_bvec(&iter, ITER_BVEC | rw, bvec,
811     bio_segments(bio), blk_rq_bytes(cmd->rq));
812     + /*
813     + * This bio may be started from the middle of the 'bvec'
814     + * because of bio splitting, so offset from the bvec must
815     + * be passed to iov iterator
816     + */
817     + iter.iov_offset = bio->bi_iter.bi_bvec_done;
818    
819     cmd->iocb.ki_pos = pos;
820     cmd->iocb.ki_filp = file;
821     diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
822     index 562b5a4ca7b7..78a39f736c64 100644
823     --- a/drivers/block/paride/pd.c
824     +++ b/drivers/block/paride/pd.c
825     @@ -126,7 +126,7 @@
826     */
827     #include <linux/types.h>
828    
829     -static bool verbose = 0;
830     +static int verbose = 0;
831     static int major = PD_MAJOR;
832     static char *name = PD_NAME;
833     static int cluster = 64;
834     @@ -161,7 +161,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
835     static DEFINE_MUTEX(pd_mutex);
836     static DEFINE_SPINLOCK(pd_lock);
837    
838     -module_param(verbose, bool, 0);
839     +module_param(verbose, int, 0);
840     module_param(major, int, 0);
841     module_param(name, charp, 0);
842     module_param(cluster, int, 0);
843     diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
844     index 1740d75e8a32..216a94fed5b4 100644
845     --- a/drivers/block/paride/pt.c
846     +++ b/drivers/block/paride/pt.c
847     @@ -117,7 +117,7 @@
848    
849     */
850    
851     -static bool verbose = 0;
852     +static int verbose = 0;
853     static int major = PT_MAJOR;
854     static char *name = PT_NAME;
855     static int disable = 0;
856     @@ -152,7 +152,7 @@ static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
857    
858     #include <asm/uaccess.h>
859    
860     -module_param(verbose, bool, 0);
861     +module_param(verbose, int, 0);
862     module_param(major, int, 0);
863     module_param(name, charp, 0);
864     module_param_array(drive0, int, NULL, 0);
865     diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
866     index e98d15eaa799..1827fc4d15c1 100644
867     --- a/drivers/bus/imx-weim.c
868     +++ b/drivers/bus/imx-weim.c
869     @@ -150,7 +150,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
870     return ret;
871     }
872    
873     - for_each_child_of_node(pdev->dev.of_node, child) {
874     + for_each_available_child_of_node(pdev->dev.of_node, child) {
875     if (!child->name)
876     continue;
877    
878     diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
879     index 98fb8821382d..f53b02a6bc05 100644
880     --- a/drivers/cpufreq/intel_pstate.c
881     +++ b/drivers/cpufreq/intel_pstate.c
882     @@ -667,6 +667,11 @@ static int core_get_max_pstate(void)
883     if (err)
884     goto skip_tar;
885    
886     + /* For level 1 and 2, bits[23:16] contain the ratio */
887     + if (tdp_ctrl)
888     + tdp_ratio >>= 16;
889     +
890     + tdp_ratio &= 0xff; /* ratios are only 8 bits long */
891     if (tdp_ratio - 1 == tar) {
892     max_pstate = tar;
893     pr_debug("max_pstate=TAC %x\n", max_pstate);
894     diff --git a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
895     index 3d9acc53d247..60fc0fa26fd3 100644
896     --- a/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
897     +++ b/drivers/crypto/ccp/ccp-crypto-aes-cmac.c
898     @@ -225,6 +225,9 @@ static int ccp_aes_cmac_export(struct ahash_request *req, void *out)
899     struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req);
900     struct ccp_aes_cmac_exp_ctx state;
901    
902     + /* Don't let anything leak to 'out' */
903     + memset(&state, 0, sizeof(state));
904     +
905     state.null_msg = rctx->null_msg;
906     memcpy(state.iv, rctx->iv, sizeof(state.iv));
907     state.buf_count = rctx->buf_count;
908     diff --git a/drivers/crypto/ccp/ccp-crypto-sha.c b/drivers/crypto/ccp/ccp-crypto-sha.c
909     index 8ef06fad8b14..ab9945f2cb7a 100644
910     --- a/drivers/crypto/ccp/ccp-crypto-sha.c
911     +++ b/drivers/crypto/ccp/ccp-crypto-sha.c
912     @@ -212,6 +212,9 @@ static int ccp_sha_export(struct ahash_request *req, void *out)
913     struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req);
914     struct ccp_sha_exp_ctx state;
915    
916     + /* Don't let anything leak to 'out' */
917     + memset(&state, 0, sizeof(state));
918     +
919     state.type = rctx->type;
920     state.msg_bits = rctx->msg_bits;
921     state.first = rctx->first;
922     diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
923     index b6f9f42e2985..a04fea4d0063 100644
924     --- a/drivers/crypto/talitos.c
925     +++ b/drivers/crypto/talitos.c
926     @@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
927     ptr->eptr = upper_32_bits(dma_addr);
928     }
929    
930     +static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
931     + struct talitos_ptr *src_ptr, bool is_sec1)
932     +{
933     + dst_ptr->ptr = src_ptr->ptr;
934     + if (!is_sec1)
935     + dst_ptr->eptr = src_ptr->eptr;
936     +}
937     +
938     static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
939     bool is_sec1)
940     {
941     @@ -1083,21 +1091,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
942     sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
943     (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
944     : DMA_TO_DEVICE);
945     -
946     /* hmac data */
947     desc->ptr[1].len = cpu_to_be16(areq->assoclen);
948     if (sg_count > 1 &&
949     (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
950     areq->assoclen,
951     &edesc->link_tbl[tbl_off])) > 1) {
952     - tbl_off += ret;
953     -
954     to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
955     sizeof(struct talitos_ptr), 0);
956     desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
957    
958     dma_sync_single_for_device(dev, edesc->dma_link_tbl,
959     edesc->dma_len, DMA_BIDIRECTIONAL);
960     +
961     + tbl_off += ret;
962     } else {
963     to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
964     desc->ptr[1].j_extent = 0;
965     @@ -1126,11 +1133,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
966     if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
967     sg_link_tbl_len += authsize;
968    
969     - if (sg_count > 1 &&
970     - (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
971     - sg_link_tbl_len,
972     - &edesc->link_tbl[tbl_off])) > 1) {
973     - tbl_off += ret;
974     + if (sg_count == 1) {
975     + to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
976     + areq->assoclen, 0);
977     + } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
978     + areq->assoclen, sg_link_tbl_len,
979     + &edesc->link_tbl[tbl_off])) >
980     + 1) {
981     desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
982     to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
983     tbl_off *
984     @@ -1138,8 +1147,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
985     dma_sync_single_for_device(dev, edesc->dma_link_tbl,
986     edesc->dma_len,
987     DMA_BIDIRECTIONAL);
988     - } else
989     - to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
990     + tbl_off += ret;
991     + } else {
992     + copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
993     + }
994    
995     /* cipher out */
996     desc->ptr[5].len = cpu_to_be16(cryptlen);
997     @@ -1151,11 +1162,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
998    
999     edesc->icv_ool = false;
1000    
1001     - if (sg_count > 1 &&
1002     - (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
1003     + if (sg_count == 1) {
1004     + to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
1005     + areq->assoclen, 0);
1006     + } else if ((sg_count =
1007     + sg_to_link_tbl_offset(areq->dst, sg_count,
1008     areq->assoclen, cryptlen,
1009     - &edesc->link_tbl[tbl_off])) >
1010     - 1) {
1011     + &edesc->link_tbl[tbl_off])) > 1) {
1012     struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1013    
1014     to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1015     @@ -1178,8 +1191,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1016     edesc->dma_len, DMA_BIDIRECTIONAL);
1017    
1018     edesc->icv_ool = true;
1019     - } else
1020     - to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
1021     + } else {
1022     + copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
1023     + }
1024    
1025     /* iv out */
1026     map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1027     @@ -2519,21 +2533,11 @@ struct talitos_crypto_alg {
1028     struct talitos_alg_template algt;
1029     };
1030    
1031     -static int talitos_cra_init(struct crypto_tfm *tfm)
1032     +static int talitos_init_common(struct talitos_ctx *ctx,
1033     + struct talitos_crypto_alg *talitos_alg)
1034     {
1035     - struct crypto_alg *alg = tfm->__crt_alg;
1036     - struct talitos_crypto_alg *talitos_alg;
1037     - struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
1038     struct talitos_private *priv;
1039    
1040     - if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
1041     - talitos_alg = container_of(__crypto_ahash_alg(alg),
1042     - struct talitos_crypto_alg,
1043     - algt.alg.hash);
1044     - else
1045     - talitos_alg = container_of(alg, struct talitos_crypto_alg,
1046     - algt.alg.crypto);
1047     -
1048     /* update context with ptr to dev */
1049     ctx->dev = talitos_alg->dev;
1050    
1051     @@ -2551,10 +2555,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
1052     return 0;
1053     }
1054    
1055     +static int talitos_cra_init(struct crypto_tfm *tfm)
1056     +{
1057     + struct crypto_alg *alg = tfm->__crt_alg;
1058     + struct talitos_crypto_alg *talitos_alg;
1059     + struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
1060     +
1061     + if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
1062     + talitos_alg = container_of(__crypto_ahash_alg(alg),
1063     + struct talitos_crypto_alg,
1064     + algt.alg.hash);
1065     + else
1066     + talitos_alg = container_of(alg, struct talitos_crypto_alg,
1067     + algt.alg.crypto);
1068     +
1069     + return talitos_init_common(ctx, talitos_alg);
1070     +}
1071     +
1072     static int talitos_cra_init_aead(struct crypto_aead *tfm)
1073     {
1074     - talitos_cra_init(crypto_aead_tfm(tfm));
1075     - return 0;
1076     + struct aead_alg *alg = crypto_aead_alg(tfm);
1077     + struct talitos_crypto_alg *talitos_alg;
1078     + struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
1079     +
1080     + talitos_alg = container_of(alg, struct talitos_crypto_alg,
1081     + algt.alg.aead);
1082     +
1083     + return talitos_init_common(ctx, talitos_alg);
1084     }
1085    
1086     static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
1087     diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
1088     index 4f099ea29f83..c66133b5e852 100644
1089     --- a/drivers/dma/dw/core.c
1090     +++ b/drivers/dma/dw/core.c
1091     @@ -130,26 +130,14 @@ static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
1092     static void dwc_initialize(struct dw_dma_chan *dwc)
1093     {
1094     struct dw_dma *dw = to_dw_dma(dwc->chan.device);
1095     - struct dw_dma_slave *dws = dwc->chan.private;
1096     u32 cfghi = DWC_CFGH_FIFO_MODE;
1097     u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
1098    
1099     if (dwc->initialized == true)
1100     return;
1101    
1102     - if (dws) {
1103     - /*
1104     - * We need controller-specific data to set up slave
1105     - * transfers.
1106     - */
1107     - BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
1108     -
1109     - cfghi |= DWC_CFGH_DST_PER(dws->dst_id);
1110     - cfghi |= DWC_CFGH_SRC_PER(dws->src_id);
1111     - } else {
1112     - cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
1113     - cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
1114     - }
1115     + cfghi |= DWC_CFGH_DST_PER(dwc->dst_id);
1116     + cfghi |= DWC_CFGH_SRC_PER(dwc->src_id);
1117    
1118     channel_writel(dwc, CFG_LO, cfglo);
1119     channel_writel(dwc, CFG_HI, cfghi);
1120     @@ -936,7 +924,7 @@ bool dw_dma_filter(struct dma_chan *chan, void *param)
1121     struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1122     struct dw_dma_slave *dws = param;
1123    
1124     - if (!dws || dws->dma_dev != chan->device->dev)
1125     + if (dws->dma_dev != chan->device->dev)
1126     return false;
1127    
1128     /* We have to copy data since dws can be temporary storage */
1129     @@ -1160,6 +1148,14 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
1130     * doesn't mean what you think it means), and status writeback.
1131     */
1132    
1133     + /*
1134     + * We need controller-specific data to set up slave transfers.
1135     + */
1136     + if (chan->private && !dw_dma_filter(chan, chan->private)) {
1137     + dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
1138     + return -EINVAL;
1139     + }
1140     +
1141     /* Enable controller here if needed */
1142     if (!dw->in_use)
1143     dw_dma_on(dw);
1144     @@ -1221,6 +1217,14 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
1145     spin_lock_irqsave(&dwc->lock, flags);
1146     list_splice_init(&dwc->free_list, &list);
1147     dwc->descs_allocated = 0;
1148     +
1149     + /* Clear custom channel configuration */
1150     + dwc->src_id = 0;
1151     + dwc->dst_id = 0;
1152     +
1153     + dwc->src_master = 0;
1154     + dwc->dst_master = 0;
1155     +
1156     dwc->initialized = false;
1157    
1158     /* Disable interrupts */
1159     diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c
1160     index 823ad728aecf..efc02b98e6ba 100644
1161     --- a/drivers/dma/hsu/hsu.c
1162     +++ b/drivers/dma/hsu/hsu.c
1163     @@ -135,7 +135,7 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
1164     sr = hsu_chan_readl(hsuc, HSU_CH_SR);
1165     spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
1166    
1167     - return sr;
1168     + return sr & ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY);
1169     }
1170    
1171     irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr)
1172     diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h
1173     index f06579c6d548..26da2865b025 100644
1174     --- a/drivers/dma/hsu/hsu.h
1175     +++ b/drivers/dma/hsu/hsu.h
1176     @@ -41,6 +41,9 @@
1177     #define HSU_CH_SR_DESCTO(x) BIT(8 + (x))
1178     #define HSU_CH_SR_DESCTO_ANY (BIT(11) | BIT(10) | BIT(9) | BIT(8))
1179     #define HSU_CH_SR_CHE BIT(15)
1180     +#define HSU_CH_SR_DESCE(x) BIT(16 + (x))
1181     +#define HSU_CH_SR_DESCE_ANY (BIT(19) | BIT(18) | BIT(17) | BIT(16))
1182     +#define HSU_CH_SR_CDESC_ANY (BIT(31) | BIT(30))
1183    
1184     /* Bits in HSU_CH_CR */
1185     #define HSU_CH_CR_CHA BIT(0)
1186     diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
1187     index a59061e4221a..55f5d33f6dc7 100644
1188     --- a/drivers/dma/pxa_dma.c
1189     +++ b/drivers/dma/pxa_dma.c
1190     @@ -122,6 +122,7 @@ struct pxad_chan {
1191     struct pxad_device {
1192     struct dma_device slave;
1193     int nr_chans;
1194     + int nr_requestors;
1195     void __iomem *base;
1196     struct pxad_phy *phys;
1197     spinlock_t phy_lock; /* Phy association */
1198     @@ -473,7 +474,7 @@ static void pxad_free_phy(struct pxad_chan *chan)
1199     return;
1200    
1201     /* clear the channel mapping in DRCMR */
1202     - if (chan->drcmr <= DRCMR_CHLNUM) {
1203     + if (chan->drcmr <= pdev->nr_requestors) {
1204     reg = pxad_drcmr(chan->drcmr);
1205     writel_relaxed(0, chan->phy->base + reg);
1206     }
1207     @@ -509,6 +510,7 @@ static bool is_running_chan_misaligned(struct pxad_chan *chan)
1208    
1209     static void phy_enable(struct pxad_phy *phy, bool misaligned)
1210     {
1211     + struct pxad_device *pdev;
1212     u32 reg, dalgn;
1213    
1214     if (!phy->vchan)
1215     @@ -518,7 +520,8 @@ static void phy_enable(struct pxad_phy *phy, bool misaligned)
1216     "%s(); phy=%p(%d) misaligned=%d\n", __func__,
1217     phy, phy->idx, misaligned);
1218    
1219     - if (phy->vchan->drcmr <= DRCMR_CHLNUM) {
1220     + pdev = to_pxad_dev(phy->vchan->vc.chan.device);
1221     + if (phy->vchan->drcmr <= pdev->nr_requestors) {
1222     reg = pxad_drcmr(phy->vchan->drcmr);
1223     writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
1224     }
1225     @@ -914,6 +917,7 @@ static void pxad_get_config(struct pxad_chan *chan,
1226     {
1227     u32 maxburst = 0, dev_addr = 0;
1228     enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
1229     + struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
1230    
1231     *dcmd = 0;
1232     if (dir == DMA_DEV_TO_MEM) {
1233     @@ -922,7 +926,7 @@ static void pxad_get_config(struct pxad_chan *chan,
1234     dev_addr = chan->cfg.src_addr;
1235     *dev_src = dev_addr;
1236     *dcmd |= PXA_DCMD_INCTRGADDR;
1237     - if (chan->drcmr <= DRCMR_CHLNUM)
1238     + if (chan->drcmr <= pdev->nr_requestors)
1239     *dcmd |= PXA_DCMD_FLOWSRC;
1240     }
1241     if (dir == DMA_MEM_TO_DEV) {
1242     @@ -931,7 +935,7 @@ static void pxad_get_config(struct pxad_chan *chan,
1243     dev_addr = chan->cfg.dst_addr;
1244     *dev_dst = dev_addr;
1245     *dcmd |= PXA_DCMD_INCSRCADDR;
1246     - if (chan->drcmr <= DRCMR_CHLNUM)
1247     + if (chan->drcmr <= pdev->nr_requestors)
1248     *dcmd |= PXA_DCMD_FLOWTRG;
1249     }
1250     if (dir == DMA_MEM_TO_MEM)
1251     @@ -1341,13 +1345,15 @@ static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
1252    
1253     static int pxad_init_dmadev(struct platform_device *op,
1254     struct pxad_device *pdev,
1255     - unsigned int nr_phy_chans)
1256     + unsigned int nr_phy_chans,
1257     + unsigned int nr_requestors)
1258     {
1259     int ret;
1260     unsigned int i;
1261     struct pxad_chan *c;
1262    
1263     pdev->nr_chans = nr_phy_chans;
1264     + pdev->nr_requestors = nr_requestors;
1265     INIT_LIST_HEAD(&pdev->slave.channels);
1266     pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
1267     pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
1268     @@ -1382,7 +1388,7 @@ static int pxad_probe(struct platform_device *op)
1269     const struct of_device_id *of_id;
1270     struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
1271     struct resource *iores;
1272     - int ret, dma_channels = 0;
1273     + int ret, dma_channels = 0, nb_requestors = 0;
1274     const enum dma_slave_buswidth widths =
1275     DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
1276     DMA_SLAVE_BUSWIDTH_4_BYTES;
1277     @@ -1399,13 +1405,23 @@ static int pxad_probe(struct platform_device *op)
1278     return PTR_ERR(pdev->base);
1279    
1280     of_id = of_match_device(pxad_dt_ids, &op->dev);
1281     - if (of_id)
1282     + if (of_id) {
1283     of_property_read_u32(op->dev.of_node, "#dma-channels",
1284     &dma_channels);
1285     - else if (pdata && pdata->dma_channels)
1286     + ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
1287     + &nb_requestors);
1288     + if (ret) {
1289     + dev_warn(pdev->slave.dev,
1290     + "#dma-requests set to default 32 as missing in OF: %d",
1291     + ret);
1292     + nb_requestors = 32;
1293     + };
1294     + } else if (pdata && pdata->dma_channels) {
1295     dma_channels = pdata->dma_channels;
1296     - else
1297     + nb_requestors = pdata->nb_requestors;
1298     + } else {
1299     dma_channels = 32; /* default 32 channel */
1300     + }
1301    
1302     dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
1303     dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
1304     @@ -1422,7 +1438,7 @@ static int pxad_probe(struct platform_device *op)
1305     pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1306    
1307     pdev->slave.dev = &op->dev;
1308     - ret = pxad_init_dmadev(op, pdev, dma_channels);
1309     + ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
1310     if (ret) {
1311     dev_err(pdev->slave.dev, "unable to register\n");
1312     return ret;
1313     @@ -1441,7 +1457,8 @@ static int pxad_probe(struct platform_device *op)
1314    
1315     platform_set_drvdata(op, pdev);
1316     pxad_init_debugfs(pdev);
1317     - dev_info(pdev->slave.dev, "initialized %d channels\n", dma_channels);
1318     + dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
1319     + dma_channels, nb_requestors);
1320     return 0;
1321     }
1322    
1323     diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
1324     index 01087a38da22..792bdae2b91d 100644
1325     --- a/drivers/edac/i7core_edac.c
1326     +++ b/drivers/edac/i7core_edac.c
1327     @@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
1328    
1329     i7_dev = get_i7core_dev(mce->socketid);
1330     if (!i7_dev)
1331     - return NOTIFY_BAD;
1332     + return NOTIFY_DONE;
1333    
1334     mci = i7_dev->mci;
1335     pvt = mci->pvt_info;
1336     diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
1337     index cbee3179ec08..37649221f81c 100644
1338     --- a/drivers/edac/sb_edac.c
1339     +++ b/drivers/edac/sb_edac.c
1340     @@ -1396,7 +1396,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1341     }
1342    
1343     ch_way = TAD_CH(reg) + 1;
1344     - sck_way = 1 << TAD_SOCK(reg);
1345     + sck_way = TAD_SOCK(reg);
1346    
1347     if (ch_way == 3)
1348     idx = addr >> 6;
1349     @@ -1435,7 +1435,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1350     switch(ch_way) {
1351     case 2:
1352     case 4:
1353     - sck_xch = 1 << sck_way * (ch_way >> 1);
1354     + sck_xch = (1 << sck_way) * (ch_way >> 1);
1355     break;
1356     default:
1357     sprintf(msg, "Invalid mirror set. Can't decode addr");
1358     @@ -1471,7 +1471,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1359    
1360     ch_addr = addr - offset;
1361     ch_addr >>= (6 + shiftup);
1362     - ch_addr /= ch_way * sck_way;
1363     + ch_addr /= sck_xch;
1364     ch_addr <<= (6 + shiftup);
1365     ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
1366    
1367     @@ -2254,7 +2254,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
1368    
1369     mci = get_mci_for_node_id(mce->socketid);
1370     if (!mci)
1371     - return NOTIFY_BAD;
1372     + return NOTIFY_DONE;
1373     pvt = mci->pvt_info;
1374    
1375     /*
1376     diff --git a/drivers/extcon/extcon-max77843.c b/drivers/extcon/extcon-max77843.c
1377     index 9f9ea334399c..b6cb30d207be 100644
1378     --- a/drivers/extcon/extcon-max77843.c
1379     +++ b/drivers/extcon/extcon-max77843.c
1380     @@ -803,7 +803,7 @@ static int max77843_muic_probe(struct platform_device *pdev)
1381     /* Clear IRQ bits before request IRQs */
1382     ret = regmap_bulk_read(max77843->regmap_muic,
1383     MAX77843_MUIC_REG_INT1, info->status,
1384     - MAX77843_MUIC_IRQ_NUM);
1385     + MAX77843_MUIC_STATUS_NUM);
1386     if (ret) {
1387     dev_err(&pdev->dev, "Failed to Clear IRQ bits\n");
1388     goto err_muic_irq;
1389     diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
1390     index 027ca212179f..3b52677f459a 100644
1391     --- a/drivers/firmware/efi/efi.c
1392     +++ b/drivers/firmware/efi/efi.c
1393     @@ -180,6 +180,7 @@ static int generic_ops_register(void)
1394     {
1395     generic_ops.get_variable = efi.get_variable;
1396     generic_ops.set_variable = efi.set_variable;
1397     + generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
1398     generic_ops.get_next_variable = efi.get_next_variable;
1399     generic_ops.query_variable_store = efi_query_variable_store;
1400    
1401     diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
1402     index 7f2ea21c730d..6f182fd91a6d 100644
1403     --- a/drivers/firmware/efi/vars.c
1404     +++ b/drivers/firmware/efi/vars.c
1405     @@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
1406     { NULL_GUID, "", NULL },
1407     };
1408    
1409     +/*
1410     + * Check if @var_name matches the pattern given in @match_name.
1411     + *
1412     + * @var_name: an array of @len non-NUL characters.
1413     + * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
1414     + * final "*" character matches any trailing characters @var_name,
1415     + * including the case when there are none left in @var_name.
1416     + * @match: on output, the number of non-wildcard characters in @match_name
1417     + * that @var_name matches, regardless of the return value.
1418     + * @return: whether @var_name fully matches @match_name.
1419     + */
1420     static bool
1421     variable_matches(const char *var_name, size_t len, const char *match_name,
1422     int *match)
1423     {
1424     for (*match = 0; ; (*match)++) {
1425     char c = match_name[*match];
1426     - char u = var_name[*match];
1427    
1428     - /* Wildcard in the matching name means we've matched */
1429     - if (c == '*')
1430     + switch (c) {
1431     + case '*':
1432     + /* Wildcard in @match_name means we've matched. */
1433     return true;
1434    
1435     - /* Case sensitive match */
1436     - if (!c && *match == len)
1437     - return true;
1438     + case '\0':
1439     + /* @match_name has ended. Has @var_name too? */
1440     + return (*match == len);
1441    
1442     - if (c != u)
1443     + default:
1444     + /*
1445     + * We've reached a non-wildcard char in @match_name.
1446     + * Continue only if there's an identical character in
1447     + * @var_name.
1448     + */
1449     + if (*match < len && c == var_name[*match])
1450     + continue;
1451     return false;
1452     -
1453     - if (!c)
1454     - return true;
1455     + }
1456     }
1457     - return true;
1458     }
1459    
1460     bool
1461     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1462     index bb1099c549df..053fc2f465df 100644
1463     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1464     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
1465     @@ -1673,6 +1673,7 @@ struct amdgpu_uvd {
1466     struct amdgpu_bo *vcpu_bo;
1467     void *cpu_addr;
1468     uint64_t gpu_addr;
1469     + unsigned fw_version;
1470     atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
1471     struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
1472     struct delayed_work idle_work;
1473     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
1474     index 8ac49812a716..5a8fbadbd27b 100644
1475     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
1476     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
1477     @@ -63,10 +63,6 @@ bool amdgpu_has_atpx(void) {
1478     return amdgpu_atpx_priv.atpx_detected;
1479     }
1480    
1481     -bool amdgpu_has_atpx_dgpu_power_cntl(void) {
1482     - return amdgpu_atpx_priv.atpx.functions.power_cntl;
1483     -}
1484     -
1485     /**
1486     * amdgpu_atpx_call - call an ATPX method
1487     *
1488     @@ -146,6 +142,10 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
1489     */
1490     static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
1491     {
1492     + /* make sure required functions are enabled */
1493     + /* dGPU power control is required */
1494     + atpx->functions.power_cntl = true;
1495     +
1496     if (atpx->functions.px_params) {
1497     union acpi_object *info;
1498     struct atpx_px_params output;
1499     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1500     index 9d88023df836..c961fe093e12 100644
1501     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1502     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1503     @@ -61,12 +61,6 @@ static const char *amdgpu_asic_name[] = {
1504     "LAST",
1505     };
1506    
1507     -#if defined(CONFIG_VGA_SWITCHEROO)
1508     -bool amdgpu_has_atpx_dgpu_power_cntl(void);
1509     -#else
1510     -static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
1511     -#endif
1512     -
1513     bool amdgpu_device_is_px(struct drm_device *dev)
1514     {
1515     struct amdgpu_device *adev = dev->dev_private;
1516     @@ -1475,7 +1469,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1517    
1518     if (amdgpu_runtime_pm == 1)
1519     runtime = true;
1520     - if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
1521     + if (amdgpu_device_is_px(ddev))
1522     runtime = true;
1523     vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
1524     if (runtime)
1525     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1526     index e23843f4d877..4488e82f87b0 100644
1527     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1528     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1529     @@ -303,7 +303,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
1530     fw_info.feature = adev->vce.fb_version;
1531     break;
1532     case AMDGPU_INFO_FW_UVD:
1533     - fw_info.ver = 0;
1534     + fw_info.ver = adev->uvd.fw_version;
1535     fw_info.feature = 0;
1536     break;
1537     case AMDGPU_INFO_FW_GMC:
1538     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
1539     index 064ebb347074..89df7871653d 100644
1540     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
1541     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
1542     @@ -52,7 +52,7 @@ struct amdgpu_hpd;
1543    
1544     #define AMDGPU_MAX_HPD_PINS 6
1545     #define AMDGPU_MAX_CRTCS 6
1546     -#define AMDGPU_MAX_AFMT_BLOCKS 7
1547     +#define AMDGPU_MAX_AFMT_BLOCKS 9
1548    
1549     enum amdgpu_rmx_type {
1550     RMX_OFF,
1551     @@ -308,8 +308,8 @@ struct amdgpu_mode_info {
1552     struct atom_context *atom_context;
1553     struct card_info *atom_card_info;
1554     bool mode_config_initialized;
1555     - struct amdgpu_crtc *crtcs[6];
1556     - struct amdgpu_afmt *afmt[7];
1557     + struct amdgpu_crtc *crtcs[AMDGPU_MAX_CRTCS];
1558     + struct amdgpu_afmt *afmt[AMDGPU_MAX_AFMT_BLOCKS];
1559     /* DVI-I properties */
1560     struct drm_property *coherent_mode_property;
1561     /* DAC enable load detect */
1562     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
1563     index 53f987aeeacf..3b35ad83867c 100644
1564     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
1565     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
1566     @@ -156,6 +156,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
1567     DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n",
1568     version_major, version_minor, family_id);
1569    
1570     + adev->uvd.fw_version = ((version_major << 24) | (version_minor << 16) |
1571     + (family_id << 8));
1572     +
1573     bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
1574     + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE;
1575     r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true,
1576     @@ -273,6 +276,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
1577     memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset,
1578     (adev->uvd.fw->size) - offset);
1579    
1580     + cancel_delayed_work_sync(&adev->uvd.idle_work);
1581     +
1582     size = amdgpu_bo_size(adev->uvd.vcpu_bo);
1583     size -= le32_to_cpu(hdr->ucode_size_bytes);
1584     ptr = adev->uvd.cpu_addr;
1585     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
1586     index a745eeeb5d82..bb0da76051a1 100644
1587     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
1588     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
1589     @@ -220,6 +220,7 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
1590     if (i == AMDGPU_MAX_VCE_HANDLES)
1591     return 0;
1592    
1593     + cancel_delayed_work_sync(&adev->vce.idle_work);
1594     /* TODO: suspending running encoding sessions isn't supported */
1595     return -EINVAL;
1596     }
1597     diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
1598     index aa491540ba85..946300764609 100644
1599     --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
1600     +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
1601     @@ -3628,7 +3628,7 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
1602     unsigned vm_id, uint64_t pd_addr)
1603     {
1604     int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
1605     - uint32_t seq = ring->fence_drv.sync_seq;
1606     + uint32_t seq = ring->fence_drv.sync_seq[ring->idx];
1607     uint64_t addr = ring->fence_drv.gpu_addr;
1608    
1609     amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
1610     diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
1611     index c34c393e9aea..d5e19b5fbbfb 100644
1612     --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
1613     +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
1614     @@ -513,7 +513,7 @@ static int dbgdev_wave_control_set_registers(
1615     union SQ_CMD_BITS *in_reg_sq_cmd,
1616     union GRBM_GFX_INDEX_BITS *in_reg_gfx_index)
1617     {
1618     - int status;
1619     + int status = 0;
1620     union SQ_CMD_BITS reg_sq_cmd;
1621     union GRBM_GFX_INDEX_BITS reg_gfx_index;
1622     struct HsaDbgWaveMsgAMDGen2 *pMsg;
1623     diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
1624     index 39d7e2e15c11..d268bf18a662 100644
1625     --- a/drivers/gpu/drm/drm_dp_mst_topology.c
1626     +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
1627     @@ -1665,13 +1665,19 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1628     struct drm_dp_mst_branch *mstb;
1629     int len, ret, port_num;
1630    
1631     + port = drm_dp_get_validated_port_ref(mgr, port);
1632     + if (!port)
1633     + return -EINVAL;
1634     +
1635     port_num = port->port_num;
1636     mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
1637     if (!mstb) {
1638     mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
1639    
1640     - if (!mstb)
1641     + if (!mstb) {
1642     + drm_dp_put_port(port);
1643     return -EINVAL;
1644     + }
1645     }
1646    
1647     txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1648     @@ -1697,6 +1703,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
1649     kfree(txmsg);
1650     fail_put:
1651     drm_dp_put_mst_branch_device(mstb);
1652     + drm_dp_put_port(port);
1653     return ret;
1654     }
1655    
1656     @@ -1779,6 +1786,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1657     req_payload.start_slot = cur_slots;
1658     if (mgr->proposed_vcpis[i]) {
1659     port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
1660     + port = drm_dp_get_validated_port_ref(mgr, port);
1661     + if (!port) {
1662     + mutex_unlock(&mgr->payload_lock);
1663     + return -EINVAL;
1664     + }
1665     req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
1666     } else {
1667     port = NULL;
1668     @@ -1804,6 +1816,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
1669     mgr->payloads[i].payload_state = req_payload.payload_state;
1670     }
1671     cur_slots += req_payload.num_slots;
1672     +
1673     + if (port)
1674     + drm_dp_put_port(port);
1675     }
1676    
1677     for (i = 0; i < mgr->max_payloads; i++) {
1678     @@ -2109,6 +2124,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
1679    
1680     if (mgr->mst_primary) {
1681     int sret;
1682     + u8 guid[16];
1683     +
1684     sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
1685     if (sret != DP_RECEIVER_CAP_SIZE) {
1686     DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
1687     @@ -2123,6 +2140,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
1688     ret = -1;
1689     goto out_unlock;
1690     }
1691     +
1692     + /* Some hubs forget their guids after they resume */
1693     + sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
1694     + if (sret != 16) {
1695     + DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
1696     + ret = -1;
1697     + goto out_unlock;
1698     + }
1699     + drm_dp_check_mstb_guid(mgr->mst_primary, guid);
1700     +
1701     ret = 0;
1702     } else
1703     ret = -1;
1704     diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1705     index f859a5b87ed4..afa81691163d 100644
1706     --- a/drivers/gpu/drm/i915/intel_display.c
1707     +++ b/drivers/gpu/drm/i915/intel_display.c
1708     @@ -4447,7 +4447,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
1709     intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
1710    
1711     return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
1712     - &state->scaler_state.scaler_id, DRM_ROTATE_0,
1713     + &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
1714     state->pipe_src_w, state->pipe_src_h,
1715     adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
1716     }
1717     diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
1718     index 0639275fc471..06bd9257acdc 100644
1719     --- a/drivers/gpu/drm/i915/intel_dp_mst.c
1720     +++ b/drivers/gpu/drm/i915/intel_dp_mst.c
1721     @@ -477,6 +477,8 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
1722     struct intel_connector *intel_connector = to_intel_connector(connector);
1723     struct drm_device *dev = connector->dev;
1724    
1725     + intel_connector->unregister(intel_connector);
1726     +
1727     /* need to nuke the connector */
1728     drm_modeset_lock_all(dev);
1729     if (connector->state->crtc) {
1730     @@ -490,11 +492,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
1731    
1732     WARN(ret, "Disabling mst crtc failed with %i\n", ret);
1733     }
1734     - drm_modeset_unlock_all(dev);
1735    
1736     - intel_connector->unregister(intel_connector);
1737     -
1738     - drm_modeset_lock_all(dev);
1739     intel_connector_remove_from_fbdev(intel_connector);
1740     drm_connector_cleanup(connector);
1741     drm_modeset_unlock_all(dev);
1742     diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
1743     index d69547a65dbb..7058f75c7b42 100644
1744     --- a/drivers/gpu/drm/i915/intel_lrc.c
1745     +++ b/drivers/gpu/drm/i915/intel_lrc.c
1746     @@ -776,11 +776,11 @@ static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes)
1747     if (unlikely(total_bytes > remain_usable)) {
1748     /*
1749     * The base request will fit but the reserved space
1750     - * falls off the end. So only need to to wait for the
1751     - * reserved size after flushing out the remainder.
1752     + * falls off the end. So don't need an immediate wrap
1753     + * and only need to effectively wait for the reserved
1754     + * size space from the start of ringbuffer.
1755     */
1756     wait_bytes = remain_actual + ringbuf->reserved_size;
1757     - need_wrap = true;
1758     } else if (total_bytes > ringbuf->space) {
1759     /* No wrapping required, just waiting. */
1760     wait_bytes = total_bytes;
1761     diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
1762     index f6b2a814e629..9d48443bca2e 100644
1763     --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
1764     +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
1765     @@ -1922,6 +1922,17 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
1766     return 0;
1767     }
1768    
1769     +static void cleanup_phys_status_page(struct intel_engine_cs *ring)
1770     +{
1771     + struct drm_i915_private *dev_priv = to_i915(ring->dev);
1772     +
1773     + if (!dev_priv->status_page_dmah)
1774     + return;
1775     +
1776     + drm_pci_free(ring->dev, dev_priv->status_page_dmah);
1777     + ring->status_page.page_addr = NULL;
1778     +}
1779     +
1780     static void cleanup_status_page(struct intel_engine_cs *ring)
1781     {
1782     struct drm_i915_gem_object *obj;
1783     @@ -1938,9 +1949,9 @@ static void cleanup_status_page(struct intel_engine_cs *ring)
1784    
1785     static int init_status_page(struct intel_engine_cs *ring)
1786     {
1787     - struct drm_i915_gem_object *obj;
1788     + struct drm_i915_gem_object *obj = ring->status_page.obj;
1789    
1790     - if ((obj = ring->status_page.obj) == NULL) {
1791     + if (obj == NULL) {
1792     unsigned flags;
1793     int ret;
1794    
1795     @@ -2134,7 +2145,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1796     if (ret)
1797     goto error;
1798     } else {
1799     - BUG_ON(ring->id != RCS);
1800     + WARN_ON(ring->id != RCS);
1801     ret = init_phys_status_page(ring);
1802     if (ret)
1803     goto error;
1804     @@ -2179,7 +2190,12 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
1805     if (ring->cleanup)
1806     ring->cleanup(ring);
1807    
1808     - cleanup_status_page(ring);
1809     + if (I915_NEED_GFX_HWS(ring->dev)) {
1810     + cleanup_status_page(ring);
1811     + } else {
1812     + WARN_ON(ring->id != RCS);
1813     + cleanup_phys_status_page(ring);
1814     + }
1815    
1816     i915_cmd_parser_fini_ring(ring);
1817     i915_gem_batch_pool_fini(&ring->batch_pool);
1818     @@ -2341,11 +2357,11 @@ static int __intel_ring_prepare(struct intel_engine_cs *ring, int bytes)
1819     if (unlikely(total_bytes > remain_usable)) {
1820     /*
1821     * The base request will fit but the reserved space
1822     - * falls off the end. So only need to to wait for the
1823     - * reserved size after flushing out the remainder.
1824     + * falls off the end. So don't need an immediate wrap
1825     + * and only need to effectively wait for the reserved
1826     + * size space from the start of ringbuffer.
1827     */
1828     wait_bytes = remain_actual + ringbuf->reserved_size;
1829     - need_wrap = true;
1830     } else if (total_bytes > ringbuf->space) {
1831     /* No wrapping required, just waiting. */
1832     wait_bytes = total_bytes;
1833     diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
1834     index 43cba129a0c0..cc91ae832ffb 100644
1835     --- a/drivers/gpu/drm/i915/intel_uncore.c
1836     +++ b/drivers/gpu/drm/i915/intel_uncore.c
1837     @@ -1132,7 +1132,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1838     } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1839     dev_priv->uncore.funcs.force_wake_get =
1840     fw_domains_get_with_thread_status;
1841     - dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1842     + if (IS_HASWELL(dev))
1843     + dev_priv->uncore.funcs.force_wake_put =
1844     + fw_domains_put_with_fifo;
1845     + else
1846     + dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1847     fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1848     FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1849     } else if (IS_IVYBRIDGE(dev)) {
1850     diff --git a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
1851     index 3216e157a8a0..89da47234016 100644
1852     --- a/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
1853     +++ b/drivers/gpu/drm/nouveau/nvkm/core/ramht.c
1854     @@ -131,7 +131,7 @@ nvkm_ramht_del(struct nvkm_ramht **pramht)
1855     struct nvkm_ramht *ramht = *pramht;
1856     if (ramht) {
1857     nvkm_gpuobj_del(&ramht->gpuobj);
1858     - kfree(*pramht);
1859     + vfree(*pramht);
1860     *pramht = NULL;
1861     }
1862     }
1863     @@ -143,8 +143,8 @@ nvkm_ramht_new(struct nvkm_device *device, u32 size, u32 align,
1864     struct nvkm_ramht *ramht;
1865     int ret, i;
1866    
1867     - if (!(ramht = *pramht = kzalloc(sizeof(*ramht) + (size >> 3) *
1868     - sizeof(*ramht->data), GFP_KERNEL)))
1869     + if (!(ramht = *pramht = vzalloc(sizeof(*ramht) +
1870     + (size >> 3) * sizeof(*ramht->data))))
1871     return -ENOMEM;
1872    
1873     ramht->device = device;
1874     diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
1875     index 9f5dfc85147a..36655a74c538 100644
1876     --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
1877     +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
1878     @@ -1717,6 +1717,8 @@ gf100_gr_init(struct gf100_gr *gr)
1879    
1880     gf100_gr_mmio(gr, gr->func->mmio);
1881    
1882     + nvkm_mask(device, TPC_UNIT(0, 0, 0x05c), 0x00000001, 0x00000001);
1883     +
1884     memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
1885     for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
1886     do {
1887     diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
1888     index 183aea1abebc..5edebf495c07 100644
1889     --- a/drivers/gpu/drm/qxl/qxl_display.c
1890     +++ b/drivers/gpu/drm/qxl/qxl_display.c
1891     @@ -375,10 +375,15 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc,
1892    
1893     qxl_bo_kunmap(user_bo);
1894    
1895     + qcrtc->cur_x += qcrtc->hot_spot_x - hot_x;
1896     + qcrtc->cur_y += qcrtc->hot_spot_y - hot_y;
1897     + qcrtc->hot_spot_x = hot_x;
1898     + qcrtc->hot_spot_y = hot_y;
1899     +
1900     cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
1901     cmd->type = QXL_CURSOR_SET;
1902     - cmd->u.set.position.x = qcrtc->cur_x;
1903     - cmd->u.set.position.y = qcrtc->cur_y;
1904     + cmd->u.set.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
1905     + cmd->u.set.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
1906    
1907     cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0);
1908    
1909     @@ -441,8 +446,8 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc,
1910    
1911     cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release);
1912     cmd->type = QXL_CURSOR_MOVE;
1913     - cmd->u.position.x = qcrtc->cur_x;
1914     - cmd->u.position.y = qcrtc->cur_y;
1915     + cmd->u.position.x = qcrtc->cur_x + qcrtc->hot_spot_x;
1916     + cmd->u.position.y = qcrtc->cur_y + qcrtc->hot_spot_y;
1917     qxl_release_unmap(qdev, release, &cmd->release_info);
1918    
1919     qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
1920     diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
1921     index 01a86948eb8c..3ab90179e9ab 100644
1922     --- a/drivers/gpu/drm/qxl/qxl_drv.h
1923     +++ b/drivers/gpu/drm/qxl/qxl_drv.h
1924     @@ -135,6 +135,8 @@ struct qxl_crtc {
1925     int index;
1926     int cur_x;
1927     int cur_y;
1928     + int hot_spot_x;
1929     + int hot_spot_y;
1930     };
1931    
1932     struct qxl_output {
1933     diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
1934     index 2ad462896896..32491355a1d4 100644
1935     --- a/drivers/gpu/drm/radeon/evergreen.c
1936     +++ b/drivers/gpu/drm/radeon/evergreen.c
1937     @@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
1938     WREG32(VM_CONTEXT1_CNTL, 0);
1939     }
1940    
1941     +static const unsigned ni_dig_offsets[] =
1942     +{
1943     + NI_DIG0_REGISTER_OFFSET,
1944     + NI_DIG1_REGISTER_OFFSET,
1945     + NI_DIG2_REGISTER_OFFSET,
1946     + NI_DIG3_REGISTER_OFFSET,
1947     + NI_DIG4_REGISTER_OFFSET,
1948     + NI_DIG5_REGISTER_OFFSET
1949     +};
1950     +
1951     +static const unsigned ni_tx_offsets[] =
1952     +{
1953     + NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
1954     + NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
1955     + NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
1956     + NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
1957     + NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
1958     + NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
1959     +};
1960     +
1961     +static const unsigned evergreen_dp_offsets[] =
1962     +{
1963     + EVERGREEN_DP0_REGISTER_OFFSET,
1964     + EVERGREEN_DP1_REGISTER_OFFSET,
1965     + EVERGREEN_DP2_REGISTER_OFFSET,
1966     + EVERGREEN_DP3_REGISTER_OFFSET,
1967     + EVERGREEN_DP4_REGISTER_OFFSET,
1968     + EVERGREEN_DP5_REGISTER_OFFSET
1969     +};
1970     +
1971     +
1972     +/*
1973     + * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
1974     + * We go from crtc to connector and it is not relible since it
1975     + * should be an opposite direction .If crtc is enable then
1976     + * find the dig_fe which selects this crtc and insure that it enable.
1977     + * if such dig_fe is found then find dig_be which selects found dig_be and
1978     + * insure that it enable and in DP_SST mode.
1979     + * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
1980     + * from dp symbols clocks .
1981     + */
1982     +static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
1983     + unsigned crtc_id, unsigned *ret_dig_fe)
1984     +{
1985     + unsigned i;
1986     + unsigned dig_fe;
1987     + unsigned dig_be;
1988     + unsigned dig_en_be;
1989     + unsigned uniphy_pll;
1990     + unsigned digs_fe_selected;
1991     + unsigned dig_be_mode;
1992     + unsigned dig_fe_mask;
1993     + bool is_enabled = false;
1994     + bool found_crtc = false;
1995     +
1996     + /* loop through all running dig_fe to find selected crtc */
1997     + for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
1998     + dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
1999     + if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
2000     + crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
2001     + /* found running pipe */
2002     + found_crtc = true;
2003     + dig_fe_mask = 1 << i;
2004     + dig_fe = i;
2005     + break;
2006     + }
2007     + }
2008     +
2009     + if (found_crtc) {
2010     + /* loop through all running dig_be to find selected dig_fe */
2011     + for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
2012     + dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
2013     + /* if dig_fe_selected by dig_be? */
2014     + digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
2015     + dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
2016     + if (dig_fe_mask & digs_fe_selected &&
2017     + /* if dig_be in sst mode? */
2018     + dig_be_mode == NI_DIG_BE_DPSST) {
2019     + dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
2020     + ni_dig_offsets[i]);
2021     + uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
2022     + ni_tx_offsets[i]);
2023     + /* dig_be enable and tx is running */
2024     + if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
2025     + dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
2026     + uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
2027     + is_enabled = true;
2028     + *ret_dig_fe = dig_fe;
2029     + break;
2030     + }
2031     + }
2032     + }
2033     + }
2034     +
2035     + return is_enabled;
2036     +}
2037     +
2038     +/*
2039     + * Blank dig when in dp sst mode
2040     + * Dig ignores crtc timing
2041     + */
2042     +static void evergreen_blank_dp_output(struct radeon_device *rdev,
2043     + unsigned dig_fe)
2044     +{
2045     + unsigned stream_ctrl;
2046     + unsigned fifo_ctrl;
2047     + unsigned counter = 0;
2048     +
2049     + if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
2050     + DRM_ERROR("invalid dig_fe %d\n", dig_fe);
2051     + return;
2052     + }
2053     +
2054     + stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2055     + evergreen_dp_offsets[dig_fe]);
2056     + if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
2057     + DRM_ERROR("dig %d , should be enable\n", dig_fe);
2058     + return;
2059     + }
2060     +
2061     + stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
2062     + WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2063     + evergreen_dp_offsets[dig_fe], stream_ctrl);
2064     +
2065     + stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2066     + evergreen_dp_offsets[dig_fe]);
2067     + while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
2068     + msleep(1);
2069     + counter++;
2070     + stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
2071     + evergreen_dp_offsets[dig_fe]);
2072     + }
2073     + if (counter >= 32 )
2074     + DRM_ERROR("counter exceeds %d\n", counter);
2075     +
2076     + fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
2077     + fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
2078     + WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
2079     +
2080     +}
2081     +
2082     void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
2083     {
2084     u32 crtc_enabled, tmp, frame_count, blackout;
2085     int i, j;
2086     + unsigned dig_fe;
2087    
2088     if (!ASIC_IS_NODCE(rdev)) {
2089     save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
2090     @@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
2091     break;
2092     udelay(1);
2093     }
2094     -
2095     + /*we should disable dig if it drives dp sst*/
2096     + /*but we are in radeon_device_init and the topology is unknown*/
2097     + /*and it is available after radeon_modeset_init*/
2098     + /*the following method radeon_atom_encoder_dpms_dig*/
2099     + /*does the job if we initialize it properly*/
2100     + /*for now we do it this manually*/
2101     + /**/
2102     + if (ASIC_IS_DCE5(rdev) &&
2103     + evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
2104     + evergreen_blank_dp_output(rdev, dig_fe);
2105     + /*we could remove 6 lines below*/
2106     /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
2107     WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2108     tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
2109     diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
2110     index aa939dfed3a3..b436badf9efa 100644
2111     --- a/drivers/gpu/drm/radeon/evergreen_reg.h
2112     +++ b/drivers/gpu/drm/radeon/evergreen_reg.h
2113     @@ -250,8 +250,43 @@
2114    
2115     /* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
2116     #define EVERGREEN_HDMI_BASE 0x7030
2117     +/*DIG block*/
2118     +#define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000)
2119     +#define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000)
2120     +#define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000)
2121     +#define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000)
2122     +#define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000)
2123     +#define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000)
2124     +
2125     +
2126     +#define NI_DIG_FE_CNTL 0x7000
2127     +# define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3)
2128     +# define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24)
2129     +
2130     +
2131     +#define NI_DIG_BE_CNTL 0x7140
2132     +# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
2133     +# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
2134     +
2135     +#define NI_DIG_BE_EN_CNTL 0x7144
2136     +# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
2137     +# define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8)
2138     +# define NI_DIG_BE_DPSST 0
2139    
2140     /* Display Port block */
2141     +#define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C)
2142     +#define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C)
2143     +#define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C)
2144     +#define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C)
2145     +#define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C)
2146     +#define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C)
2147     +
2148     +
2149     +#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
2150     +# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
2151     +# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
2152     +#define EVERGREEN_DP_STEER_FIFO 0x7310
2153     +# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
2154     #define EVERGREEN_DP_SEC_CNTL 0x7280
2155     # define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0)
2156     # define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4)
2157     @@ -266,4 +301,15 @@
2158     # define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24)
2159     # define EVERGREEN_DP_SEC_SS_EN (1 << 28)
2160    
2161     +/*DCIO_UNIPHY block*/
2162     +#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
2163     +#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
2164     +#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
2165     +#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
2166     +#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
2167     +#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600)
2168     +
2169     +#define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618
2170     +# define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0)
2171     +
2172     #endif
2173     diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
2174     index 9bc408c9f9f6..c4b4f298a283 100644
2175     --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
2176     +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
2177     @@ -62,10 +62,6 @@ bool radeon_has_atpx(void) {
2178     return radeon_atpx_priv.atpx_detected;
2179     }
2180    
2181     -bool radeon_has_atpx_dgpu_power_cntl(void) {
2182     - return radeon_atpx_priv.atpx.functions.power_cntl;
2183     -}
2184     -
2185     /**
2186     * radeon_atpx_call - call an ATPX method
2187     *
2188     @@ -145,6 +141,10 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
2189     */
2190     static int radeon_atpx_validate(struct radeon_atpx *atpx)
2191     {
2192     + /* make sure required functions are enabled */
2193     + /* dGPU power control is required */
2194     + atpx->functions.power_cntl = true;
2195     +
2196     if (atpx->functions.px_params) {
2197     union acpi_object *info;
2198     struct atpx_px_params output;
2199     diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
2200     index 340f3f549f29..9cfc1c3e1965 100644
2201     --- a/drivers/gpu/drm/radeon/radeon_connectors.c
2202     +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
2203     @@ -1996,10 +1996,12 @@ radeon_add_atom_connector(struct drm_device *dev,
2204     rdev->mode_info.dither_property,
2205     RADEON_FMT_DITHER_DISABLE);
2206    
2207     - if (radeon_audio != 0)
2208     + if (radeon_audio != 0) {
2209     drm_object_attach_property(&radeon_connector->base.base,
2210     rdev->mode_info.audio_property,
2211     RADEON_AUDIO_AUTO);
2212     + radeon_connector->audio = RADEON_AUDIO_AUTO;
2213     + }
2214     if (ASIC_IS_DCE5(rdev))
2215     drm_object_attach_property(&radeon_connector->base.base,
2216     rdev->mode_info.output_csc_property,
2217     @@ -2124,6 +2126,7 @@ radeon_add_atom_connector(struct drm_device *dev,
2218     drm_object_attach_property(&radeon_connector->base.base,
2219     rdev->mode_info.audio_property,
2220     RADEON_AUDIO_AUTO);
2221     + radeon_connector->audio = RADEON_AUDIO_AUTO;
2222     }
2223     if (connector_type == DRM_MODE_CONNECTOR_DVII) {
2224     radeon_connector->dac_load_detect = true;
2225     @@ -2179,6 +2182,7 @@ radeon_add_atom_connector(struct drm_device *dev,
2226     drm_object_attach_property(&radeon_connector->base.base,
2227     rdev->mode_info.audio_property,
2228     RADEON_AUDIO_AUTO);
2229     + radeon_connector->audio = RADEON_AUDIO_AUTO;
2230     }
2231     if (ASIC_IS_DCE5(rdev))
2232     drm_object_attach_property(&radeon_connector->base.base,
2233     @@ -2231,6 +2235,7 @@ radeon_add_atom_connector(struct drm_device *dev,
2234     drm_object_attach_property(&radeon_connector->base.base,
2235     rdev->mode_info.audio_property,
2236     RADEON_AUDIO_AUTO);
2237     + radeon_connector->audio = RADEON_AUDIO_AUTO;
2238     }
2239     if (ASIC_IS_DCE5(rdev))
2240     drm_object_attach_property(&radeon_connector->base.base,
2241     diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
2242     index f78f111e68de..c566993a2ec3 100644
2243     --- a/drivers/gpu/drm/radeon/radeon_device.c
2244     +++ b/drivers/gpu/drm/radeon/radeon_device.c
2245     @@ -103,12 +103,6 @@ static const char radeon_family_name[][16] = {
2246     "LAST",
2247     };
2248    
2249     -#if defined(CONFIG_VGA_SWITCHEROO)
2250     -bool radeon_has_atpx_dgpu_power_cntl(void);
2251     -#else
2252     -static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
2253     -#endif
2254     -
2255     #define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
2256     #define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
2257    
2258     @@ -1439,7 +1433,7 @@ int radeon_device_init(struct radeon_device *rdev,
2259     * ignore it */
2260     vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
2261    
2262     - if ((rdev->flags & RADEON_IS_PX) && radeon_has_atpx_dgpu_power_cntl())
2263     + if (rdev->flags & RADEON_IS_PX)
2264     runtime = true;
2265     vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
2266     if (runtime)
2267     diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
2268     index e06ac546a90f..f342aad79cc6 100644
2269     --- a/drivers/gpu/drm/radeon/radeon_ttm.c
2270     +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
2271     @@ -235,6 +235,8 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
2272     {
2273     struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
2274    
2275     + if (radeon_ttm_tt_has_userptr(bo->ttm))
2276     + return -EPERM;
2277     return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp);
2278     }
2279    
2280     diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
2281     index 7285adb27099..caa73de584a5 100644
2282     --- a/drivers/gpu/drm/radeon/si_dpm.c
2283     +++ b/drivers/gpu/drm/radeon/si_dpm.c
2284     @@ -2931,6 +2931,7 @@ static struct si_dpm_quirk si_dpm_quirk_list[] = {
2285     { PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
2286     { PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
2287     { PCI_VENDOR_ID_ATI, 0x6811, 0x148c, 0x2015, 0, 120000 },
2288     + { PCI_VENDOR_ID_ATI, 0x6810, 0x1682, 0x9275, 0, 120000 },
2289     { 0, 0, 0, 0 },
2290     };
2291    
2292     diff --git a/drivers/hwtracing/stm/Kconfig b/drivers/hwtracing/stm/Kconfig
2293     index 83e9f591a54b..e7a348807f0c 100644
2294     --- a/drivers/hwtracing/stm/Kconfig
2295     +++ b/drivers/hwtracing/stm/Kconfig
2296     @@ -1,6 +1,7 @@
2297     config STM
2298     tristate "System Trace Module devices"
2299     select CONFIGFS_FS
2300     + select SRCU
2301     help
2302     A System Trace Module (STM) is a device exporting data in System
2303     Trace Protocol (STP) format as defined by MIPI STP standards.
2304     diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
2305     index 714bdc837769..b167ab25310a 100644
2306     --- a/drivers/i2c/busses/i2c-cpm.c
2307     +++ b/drivers/i2c/busses/i2c-cpm.c
2308     @@ -116,8 +116,8 @@ struct cpm_i2c {
2309     cbd_t __iomem *rbase;
2310     u_char *txbuf[CPM_MAXBD];
2311     u_char *rxbuf[CPM_MAXBD];
2312     - u32 txdma[CPM_MAXBD];
2313     - u32 rxdma[CPM_MAXBD];
2314     + dma_addr_t txdma[CPM_MAXBD];
2315     + dma_addr_t rxdma[CPM_MAXBD];
2316     };
2317    
2318     static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
2319     diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
2320     index b29c7500461a..f54ece8fce78 100644
2321     --- a/drivers/i2c/busses/i2c-exynos5.c
2322     +++ b/drivers/i2c/busses/i2c-exynos5.c
2323     @@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
2324     return -EIO;
2325     }
2326    
2327     - clk_prepare_enable(i2c->clk);
2328     + ret = clk_enable(i2c->clk);
2329     + if (ret)
2330     + return ret;
2331    
2332     for (i = 0; i < num; i++, msgs++) {
2333     stop = (i == num - 1);
2334     @@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
2335     }
2336    
2337     out:
2338     - clk_disable_unprepare(i2c->clk);
2339     + clk_disable(i2c->clk);
2340     return ret;
2341     }
2342    
2343     @@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
2344     return -ENOENT;
2345     }
2346    
2347     - clk_prepare_enable(i2c->clk);
2348     + ret = clk_prepare_enable(i2c->clk);
2349     + if (ret)
2350     + return ret;
2351    
2352     mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2353     i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
2354     @@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
2355    
2356     platform_set_drvdata(pdev, i2c);
2357    
2358     + clk_disable(i2c->clk);
2359     +
2360     + return 0;
2361     +
2362     err_clk:
2363     clk_disable_unprepare(i2c->clk);
2364     return ret;
2365     @@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
2366    
2367     i2c_del_adapter(&i2c->adap);
2368    
2369     + clk_unprepare(i2c->clk);
2370     +
2371     return 0;
2372     }
2373    
2374     @@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
2375    
2376     i2c->suspended = 1;
2377    
2378     + clk_unprepare(i2c->clk);
2379     +
2380     return 0;
2381     }
2382    
2383     @@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
2384     struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
2385     int ret = 0;
2386    
2387     - clk_prepare_enable(i2c->clk);
2388     + ret = clk_prepare_enable(i2c->clk);
2389     + if (ret)
2390     + return ret;
2391    
2392     ret = exynos5_hsi2c_clock_setup(i2c);
2393     if (ret) {
2394     @@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
2395     }
2396    
2397     exynos5_i2c_init(i2c);
2398     - clk_disable_unprepare(i2c->clk);
2399     + clk_disable(i2c->clk);
2400     i2c->suspended = 0;
2401    
2402     return 0;
2403     diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
2404     index 6b4e8a008bc0..564adf3116e8 100644
2405     --- a/drivers/infiniband/core/ucm.c
2406     +++ b/drivers/infiniband/core/ucm.c
2407     @@ -48,6 +48,7 @@
2408    
2409     #include <asm/uaccess.h>
2410    
2411     +#include <rdma/ib.h>
2412     #include <rdma/ib_cm.h>
2413     #include <rdma/ib_user_cm.h>
2414     #include <rdma/ib_marshall.h>
2415     @@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
2416     struct ib_ucm_cmd_hdr hdr;
2417     ssize_t result;
2418    
2419     + if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
2420     + return -EACCES;
2421     +
2422     if (len < sizeof(hdr))
2423     return -EINVAL;
2424    
2425     diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
2426     index 8b5a934e1133..886f61ea6cc7 100644
2427     --- a/drivers/infiniband/core/ucma.c
2428     +++ b/drivers/infiniband/core/ucma.c
2429     @@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
2430     struct rdma_ucm_cmd_hdr hdr;
2431     ssize_t ret;
2432    
2433     + if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
2434     + return -EACCES;
2435     +
2436     if (len < sizeof(hdr))
2437     return -EINVAL;
2438    
2439     diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
2440     index e3ef28861be6..24f3ca2c4ad7 100644
2441     --- a/drivers/infiniband/core/uverbs_main.c
2442     +++ b/drivers/infiniband/core/uverbs_main.c
2443     @@ -48,6 +48,8 @@
2444    
2445     #include <asm/uaccess.h>
2446    
2447     +#include <rdma/ib.h>
2448     +
2449     #include "uverbs.h"
2450    
2451     MODULE_AUTHOR("Roland Dreier");
2452     @@ -682,6 +684,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
2453     int srcu_key;
2454     ssize_t ret;
2455    
2456     + if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
2457     + return -EACCES;
2458     +
2459     if (count < sizeof hdr)
2460     return -EINVAL;
2461    
2462     diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
2463     index c4e091528390..721d63f5b461 100644
2464     --- a/drivers/infiniband/hw/mlx5/main.c
2465     +++ b/drivers/infiniband/hw/mlx5/main.c
2466     @@ -273,7 +273,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
2467     sizeof(struct mlx5_wqe_ctrl_seg)) /
2468     sizeof(struct mlx5_wqe_data_seg);
2469     props->max_sge = min(max_rq_sg, max_sq_sg);
2470     - props->max_sge_rd = props->max_sge;
2471     + props->max_sge_rd = MLX5_MAX_SGE_RD;
2472     props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
2473     props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
2474     props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
2475     diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
2476     index e449e394963f..24f4a782e0f4 100644
2477     --- a/drivers/infiniband/hw/qib/qib_file_ops.c
2478     +++ b/drivers/infiniband/hw/qib/qib_file_ops.c
2479     @@ -45,6 +45,8 @@
2480     #include <linux/export.h>
2481     #include <linux/uio.h>
2482    
2483     +#include <rdma/ib.h>
2484     +
2485     #include "qib.h"
2486     #include "qib_common.h"
2487     #include "qib_user_sdma.h"
2488     @@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
2489     ssize_t ret = 0;
2490     void *dest;
2491    
2492     + if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
2493     + return -EACCES;
2494     +
2495     if (count < sizeof(cmd.type)) {
2496     ret = -EINVAL;
2497     goto bail;
2498     diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
2499     index 3f02e0e03d12..67aab86048ad 100644
2500     --- a/drivers/input/misc/pmic8xxx-pwrkey.c
2501     +++ b/drivers/input/misc/pmic8xxx-pwrkey.c
2502     @@ -353,7 +353,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
2503     if (of_property_read_u32(pdev->dev.of_node, "debounce", &kpd_delay))
2504     kpd_delay = 15625;
2505    
2506     - if (kpd_delay > 62500 || kpd_delay == 0) {
2507     + /* Valid range of pwr key trigger delay is 1/64 sec to 2 seconds. */
2508     + if (kpd_delay > USEC_PER_SEC * 2 || kpd_delay < USEC_PER_SEC / 64) {
2509     dev_err(&pdev->dev, "invalid power key trigger delay\n");
2510     return -EINVAL;
2511     }
2512     @@ -385,8 +386,8 @@ static int pmic8xxx_pwrkey_probe(struct platform_device *pdev)
2513     pwr->name = "pmic8xxx_pwrkey";
2514     pwr->phys = "pmic8xxx_pwrkey/input0";
2515    
2516     - delay = (kpd_delay << 10) / USEC_PER_SEC;
2517     - delay = 1 + ilog2(delay);
2518     + delay = (kpd_delay << 6) / USEC_PER_SEC;
2519     + delay = ilog2(delay);
2520    
2521     err = regmap_read(regmap, PON_CNTL_1, &pon_cntl);
2522     if (err < 0) {
2523     diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
2524     index 3a7f3a4a4396..7c18249d6c8e 100644
2525     --- a/drivers/input/tablet/gtco.c
2526     +++ b/drivers/input/tablet/gtco.c
2527     @@ -858,6 +858,14 @@ static int gtco_probe(struct usb_interface *usbinterface,
2528     goto err_free_buf;
2529     }
2530    
2531     + /* Sanity check that a device has an endpoint */
2532     + if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) {
2533     + dev_err(&usbinterface->dev,
2534     + "Invalid number of endpoints\n");
2535     + error = -EINVAL;
2536     + goto err_free_urb;
2537     + }
2538     +
2539     /*
2540     * The endpoint is always altsetting 0, we know this since we know
2541     * this device only has one interrupt endpoint
2542     @@ -879,7 +887,7 @@ static int gtco_probe(struct usb_interface *usbinterface,
2543     * HID report descriptor
2544     */
2545     if (usb_get_extra_descriptor(usbinterface->cur_altsetting,
2546     - HID_DEVICE_TYPE, &hid_desc) != 0){
2547     + HID_DEVICE_TYPE, &hid_desc) != 0) {
2548     dev_err(&usbinterface->dev,
2549     "Can't retrieve exta USB descriptor to get hid report descriptor length\n");
2550     error = -EIO;
2551     diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
2552     index fc836f523afa..b9319b76a8a1 100644
2553     --- a/drivers/iommu/amd_iommu.c
2554     +++ b/drivers/iommu/amd_iommu.c
2555     @@ -91,6 +91,7 @@ struct iommu_dev_data {
2556     struct list_head dev_data_list; /* For global dev_data_list */
2557     struct protection_domain *domain; /* Domain the device is bound to */
2558     u16 devid; /* PCI Device ID */
2559     + u16 alias; /* Alias Device ID */
2560     bool iommu_v2; /* Device can make use of IOMMUv2 */
2561     bool passthrough; /* Device is identity mapped */
2562     struct {
2563     @@ -125,6 +126,13 @@ static struct protection_domain *to_pdomain(struct iommu_domain *dom)
2564     return container_of(dom, struct protection_domain, domain);
2565     }
2566    
2567     +static inline u16 get_device_id(struct device *dev)
2568     +{
2569     + struct pci_dev *pdev = to_pci_dev(dev);
2570     +
2571     + return PCI_DEVID(pdev->bus->number, pdev->devfn);
2572     +}
2573     +
2574     static struct iommu_dev_data *alloc_dev_data(u16 devid)
2575     {
2576     struct iommu_dev_data *dev_data;
2577     @@ -162,6 +170,68 @@ out_unlock:
2578     return dev_data;
2579     }
2580    
2581     +static int __last_alias(struct pci_dev *pdev, u16 alias, void *data)
2582     +{
2583     + *(u16 *)data = alias;
2584     + return 0;
2585     +}
2586     +
2587     +static u16 get_alias(struct device *dev)
2588     +{
2589     + struct pci_dev *pdev = to_pci_dev(dev);
2590     + u16 devid, ivrs_alias, pci_alias;
2591     +
2592     + devid = get_device_id(dev);
2593     + ivrs_alias = amd_iommu_alias_table[devid];
2594     + pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
2595     +
2596     + if (ivrs_alias == pci_alias)
2597     + return ivrs_alias;
2598     +
2599     + /*
2600     + * DMA alias showdown
2601     + *
2602     + * The IVRS is fairly reliable in telling us about aliases, but it
2603     + * can't know about every screwy device. If we don't have an IVRS
2604     + * reported alias, use the PCI reported alias. In that case we may
2605     + * still need to initialize the rlookup and dev_table entries if the
2606     + * alias is to a non-existent device.
2607     + */
2608     + if (ivrs_alias == devid) {
2609     + if (!amd_iommu_rlookup_table[pci_alias]) {
2610     + amd_iommu_rlookup_table[pci_alias] =
2611     + amd_iommu_rlookup_table[devid];
2612     + memcpy(amd_iommu_dev_table[pci_alias].data,
2613     + amd_iommu_dev_table[devid].data,
2614     + sizeof(amd_iommu_dev_table[pci_alias].data));
2615     + }
2616     +
2617     + return pci_alias;
2618     + }
2619     +
2620     + pr_info("AMD-Vi: Using IVRS reported alias %02x:%02x.%d "
2621     + "for device %s[%04x:%04x], kernel reported alias "
2622     + "%02x:%02x.%d\n", PCI_BUS_NUM(ivrs_alias), PCI_SLOT(ivrs_alias),
2623     + PCI_FUNC(ivrs_alias), dev_name(dev), pdev->vendor, pdev->device,
2624     + PCI_BUS_NUM(pci_alias), PCI_SLOT(pci_alias),
2625     + PCI_FUNC(pci_alias));
2626     +
2627     + /*
2628     + * If we don't have a PCI DMA alias and the IVRS alias is on the same
2629     + * bus, then the IVRS table may know about a quirk that we don't.
2630     + */
2631     + if (pci_alias == devid &&
2632     + PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) {
2633     + pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN;
2634     + pdev->dma_alias_devfn = ivrs_alias & 0xff;
2635     + pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n",
2636     + PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias),
2637     + dev_name(dev));
2638     + }
2639     +
2640     + return ivrs_alias;
2641     +}
2642     +
2643     static struct iommu_dev_data *find_dev_data(u16 devid)
2644     {
2645     struct iommu_dev_data *dev_data;
2646     @@ -174,13 +244,6 @@ static struct iommu_dev_data *find_dev_data(u16 devid)
2647     return dev_data;
2648     }
2649    
2650     -static inline u16 get_device_id(struct device *dev)
2651     -{
2652     - struct pci_dev *pdev = to_pci_dev(dev);
2653     -
2654     - return PCI_DEVID(pdev->bus->number, pdev->devfn);
2655     -}
2656     -
2657     static struct iommu_dev_data *get_dev_data(struct device *dev)
2658     {
2659     return dev->archdata.iommu;
2660     @@ -308,6 +371,8 @@ static int iommu_init_device(struct device *dev)
2661     if (!dev_data)
2662     return -ENOMEM;
2663    
2664     + dev_data->alias = get_alias(dev);
2665     +
2666     if (pci_iommuv2_capable(pdev)) {
2667     struct amd_iommu *iommu;
2668    
2669     @@ -328,7 +393,7 @@ static void iommu_ignore_device(struct device *dev)
2670     u16 devid, alias;
2671    
2672     devid = get_device_id(dev);
2673     - alias = amd_iommu_alias_table[devid];
2674     + alias = get_alias(dev);
2675    
2676     memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
2677     memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
2678     @@ -1017,7 +1082,7 @@ static int device_flush_dte(struct iommu_dev_data *dev_data)
2679     int ret;
2680    
2681     iommu = amd_iommu_rlookup_table[dev_data->devid];
2682     - alias = amd_iommu_alias_table[dev_data->devid];
2683     + alias = dev_data->alias;
2684    
2685     ret = iommu_flush_dte(iommu, dev_data->devid);
2686     if (!ret && alias != dev_data->devid)
2687     @@ -1891,7 +1956,7 @@ static void do_attach(struct iommu_dev_data *dev_data,
2688     bool ats;
2689    
2690     iommu = amd_iommu_rlookup_table[dev_data->devid];
2691     - alias = amd_iommu_alias_table[dev_data->devid];
2692     + alias = dev_data->alias;
2693     ats = dev_data->ats.enabled;
2694    
2695     /* Update data structures */
2696     @@ -1925,7 +1990,7 @@ static void do_detach(struct iommu_dev_data *dev_data)
2697     return;
2698    
2699     iommu = amd_iommu_rlookup_table[dev_data->devid];
2700     - alias = amd_iommu_alias_table[dev_data->devid];
2701     + alias = dev_data->alias;
2702    
2703     /* decrease reference counters */
2704     dev_data->domain->dev_iommu[iommu->index] -= 1;
2705     diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
2706     index 72d6182666cb..58f2fe687a24 100644
2707     --- a/drivers/iommu/dma-iommu.c
2708     +++ b/drivers/iommu/dma-iommu.c
2709     @@ -403,7 +403,7 @@ static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
2710     unsigned int s_length = sg_dma_len(s);
2711     unsigned int s_dma_len = s->length;
2712    
2713     - s->offset = s_offset;
2714     + s->offset += s_offset;
2715     s->length = s_length;
2716     sg_dma_address(s) = dma_addr + s_offset;
2717     dma_addr += s_dma_len;
2718     @@ -422,7 +422,7 @@ static void __invalidate_sg(struct scatterlist *sg, int nents)
2719    
2720     for_each_sg(sg, s, nents, i) {
2721     if (sg_dma_address(s) != DMA_ERROR_CODE)
2722     - s->offset = sg_dma_address(s);
2723     + s->offset += sg_dma_address(s);
2724     if (sg_dma_len(s))
2725     s->length = sg_dma_len(s);
2726     sg_dma_address(s) = DMA_ERROR_CODE;
2727     diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
2728     index efe50845939d..17304705f2cf 100644
2729     --- a/drivers/irqchip/irq-mxs.c
2730     +++ b/drivers/irqchip/irq-mxs.c
2731     @@ -183,7 +183,7 @@ static void __iomem * __init icoll_init_iobase(struct device_node *np)
2732     void __iomem *icoll_base;
2733    
2734     icoll_base = of_io_request_and_map(np, 0, np->name);
2735     - if (!icoll_base)
2736     + if (IS_ERR(icoll_base))
2737     panic("%s: unable to map resource", np->full_name);
2738     return icoll_base;
2739     }
2740     diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c
2741     index 4ef178078e5b..1254e98f6b57 100644
2742     --- a/drivers/irqchip/irq-sunxi-nmi.c
2743     +++ b/drivers/irqchip/irq-sunxi-nmi.c
2744     @@ -154,9 +154,9 @@ static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
2745    
2746     gc = irq_get_domain_generic_chip(domain, 0);
2747     gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
2748     - if (!gc->reg_base) {
2749     + if (IS_ERR(gc->reg_base)) {
2750     pr_err("unable to map resource\n");
2751     - ret = -ENOMEM;
2752     + ret = PTR_ERR(gc->reg_base);
2753     goto fail_irqd_remove;
2754     }
2755    
2756     diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
2757     index 27f2ef300f8b..3970cda10080 100644
2758     --- a/drivers/md/dm-cache-metadata.c
2759     +++ b/drivers/md/dm-cache-metadata.c
2760     @@ -867,39 +867,55 @@ static int blocks_are_unmapped_or_clean(struct dm_cache_metadata *cmd,
2761     return 0;
2762     }
2763    
2764     -#define WRITE_LOCK(cmd) \
2765     - down_write(&cmd->root_lock); \
2766     - if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
2767     - up_write(&cmd->root_lock); \
2768     - return -EINVAL; \
2769     +static bool cmd_write_lock(struct dm_cache_metadata *cmd)
2770     +{
2771     + down_write(&cmd->root_lock);
2772     + if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) {
2773     + up_write(&cmd->root_lock);
2774     + return false;
2775     }
2776     + return true;
2777     +}
2778    
2779     -#define WRITE_LOCK_VOID(cmd) \
2780     - down_write(&cmd->root_lock); \
2781     - if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
2782     - up_write(&cmd->root_lock); \
2783     - return; \
2784     - }
2785     +#define WRITE_LOCK(cmd) \
2786     + do { \
2787     + if (!cmd_write_lock((cmd))) \
2788     + return -EINVAL; \
2789     + } while(0)
2790     +
2791     +#define WRITE_LOCK_VOID(cmd) \
2792     + do { \
2793     + if (!cmd_write_lock((cmd))) \
2794     + return; \
2795     + } while(0)
2796    
2797     #define WRITE_UNLOCK(cmd) \
2798     - up_write(&cmd->root_lock)
2799     + up_write(&(cmd)->root_lock)
2800    
2801     -#define READ_LOCK(cmd) \
2802     - down_read(&cmd->root_lock); \
2803     - if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
2804     - up_read(&cmd->root_lock); \
2805     - return -EINVAL; \
2806     +static bool cmd_read_lock(struct dm_cache_metadata *cmd)
2807     +{
2808     + down_read(&cmd->root_lock);
2809     + if (cmd->fail_io) {
2810     + up_read(&cmd->root_lock);
2811     + return false;
2812     }
2813     + return true;
2814     +}
2815    
2816     -#define READ_LOCK_VOID(cmd) \
2817     - down_read(&cmd->root_lock); \
2818     - if (cmd->fail_io || dm_bm_is_read_only(cmd->bm)) { \
2819     - up_read(&cmd->root_lock); \
2820     - return; \
2821     - }
2822     +#define READ_LOCK(cmd) \
2823     + do { \
2824     + if (!cmd_read_lock((cmd))) \
2825     + return -EINVAL; \
2826     + } while(0)
2827     +
2828     +#define READ_LOCK_VOID(cmd) \
2829     + do { \
2830     + if (!cmd_read_lock((cmd))) \
2831     + return; \
2832     + } while(0)
2833    
2834     #define READ_UNLOCK(cmd) \
2835     - up_read(&cmd->root_lock)
2836     + up_read(&(cmd)->root_lock)
2837    
2838     int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
2839     {
2840     diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
2841     index 33bdd81065e8..11f39791ec33 100644
2842     --- a/drivers/media/v4l2-core/videobuf2-core.c
2843     +++ b/drivers/media/v4l2-core/videobuf2-core.c
2844     @@ -1502,7 +1502,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
2845     * Will sleep if required for nonblocking == false.
2846     */
2847     static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
2848     - int nonblocking)
2849     + void *pb, int nonblocking)
2850     {
2851     unsigned long flags;
2852     int ret;
2853     @@ -1523,10 +1523,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
2854     /*
2855     * Only remove the buffer from done_list if v4l2_buffer can handle all
2856     * the planes.
2857     - * Verifying planes is NOT necessary since it already has been checked
2858     - * before the buffer is queued/prepared. So it can never fail.
2859     */
2860     - list_del(&(*vb)->done_entry);
2861     + ret = call_bufop(q, verify_planes_array, *vb, pb);
2862     + if (!ret)
2863     + list_del(&(*vb)->done_entry);
2864     spin_unlock_irqrestore(&q->done_lock, flags);
2865    
2866     return ret;
2867     @@ -1604,7 +1604,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, void *pb, bool nonblocking)
2868     struct vb2_buffer *vb = NULL;
2869     int ret;
2870    
2871     - ret = __vb2_get_done_vb(q, &vb, nonblocking);
2872     + ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
2873     if (ret < 0)
2874     return ret;
2875    
2876     diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
2877     index dbec5923fcf0..3c3b517f1d1c 100644
2878     --- a/drivers/media/v4l2-core/videobuf2-memops.c
2879     +++ b/drivers/media/v4l2-core/videobuf2-memops.c
2880     @@ -49,7 +49,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
2881     vec = frame_vector_create(nr);
2882     if (!vec)
2883     return ERR_PTR(-ENOMEM);
2884     - ret = get_vaddr_frames(start, nr, write, 1, vec);
2885     + ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
2886     if (ret < 0)
2887     goto out_destroy;
2888     /* We accept only complete set of PFNs */
2889     diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
2890     index 502984c724ff..6c441be8f893 100644
2891     --- a/drivers/media/v4l2-core/videobuf2-v4l2.c
2892     +++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
2893     @@ -67,6 +67,11 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
2894     return 0;
2895     }
2896    
2897     +static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
2898     +{
2899     + return __verify_planes_array(vb, pb);
2900     +}
2901     +
2902     /**
2903     * __verify_length() - Verify that the bytesused value for each plane fits in
2904     * the plane length and that the data offset doesn't exceed the bytesused value.
2905     @@ -432,6 +437,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
2906     }
2907    
2908     static const struct vb2_buf_ops v4l2_buf_ops = {
2909     + .verify_planes_array = __verify_planes_array_core,
2910     .fill_user_buffer = __fill_v4l2_buffer,
2911     .fill_vb2_buffer = __fill_vb2_buffer,
2912     .set_timestamp = __set_timestamp,
2913     diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
2914     index 22892c701c63..4bf7d50b1bc7 100644
2915     --- a/drivers/misc/Kconfig
2916     +++ b/drivers/misc/Kconfig
2917     @@ -439,7 +439,7 @@ config ARM_CHARLCD
2918     still useful.
2919    
2920     config BMP085
2921     - bool
2922     + tristate
2923     depends on SYSFS
2924    
2925     config BMP085_I2C
2926     diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
2927     index 15e88078ba1e..f1a0b99f5a9a 100644
2928     --- a/drivers/misc/ad525x_dpot.c
2929     +++ b/drivers/misc/ad525x_dpot.c
2930     @@ -216,7 +216,7 @@ static s32 dpot_read_i2c(struct dpot_data *dpot, u8 reg)
2931     */
2932     value = swab16(value);
2933    
2934     - if (dpot->uid == DPOT_UID(AD5271_ID))
2935     + if (dpot->uid == DPOT_UID(AD5274_ID))
2936     value = value >> 2;
2937     return value;
2938     default:
2939     diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
2940     index 09a406058c46..efbb6945eb18 100644
2941     --- a/drivers/misc/cxl/irq.c
2942     +++ b/drivers/misc/cxl/irq.c
2943     @@ -288,7 +288,6 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
2944     void cxl_unmap_irq(unsigned int virq, void *cookie)
2945     {
2946     free_irq(virq, cookie);
2947     - irq_dispose_mapping(virq);
2948     }
2949    
2950     static int cxl_register_one_irq(struct cxl *adapter,
2951     diff --git a/drivers/misc/mic/scif/scif_rma.c b/drivers/misc/mic/scif/scif_rma.c
2952     index 8310b4dbff06..6a451bd65bf3 100644
2953     --- a/drivers/misc/mic/scif/scif_rma.c
2954     +++ b/drivers/misc/mic/scif/scif_rma.c
2955     @@ -1511,7 +1511,7 @@ off_t scif_register_pinned_pages(scif_epd_t epd,
2956     if ((map_flags & SCIF_MAP_FIXED) &&
2957     ((ALIGN(offset, PAGE_SIZE) != offset) ||
2958     (offset < 0) ||
2959     - (offset + (off_t)len < offset)))
2960     + (len > LONG_MAX - offset)))
2961     return -EINVAL;
2962    
2963     might_sleep();
2964     @@ -1614,7 +1614,7 @@ off_t scif_register(scif_epd_t epd, void *addr, size_t len, off_t offset,
2965     if ((map_flags & SCIF_MAP_FIXED) &&
2966     ((ALIGN(offset, PAGE_SIZE) != offset) ||
2967     (offset < 0) ||
2968     - (offset + (off_t)len < offset)))
2969     + (len > LONG_MAX - offset)))
2970     return -EINVAL;
2971    
2972     /* Unsupported protection requested */
2973     @@ -1732,7 +1732,8 @@ scif_unregister(scif_epd_t epd, off_t offset, size_t len)
2974    
2975     /* Offset is not page aligned or offset+len wraps around */
2976     if ((ALIGN(offset, PAGE_SIZE) != offset) ||
2977     - (offset + (off_t)len < offset))
2978     + (offset < 0) ||
2979     + (len > LONG_MAX - offset))
2980     return -EINVAL;
2981    
2982     err = scif_verify_epd(ep);
2983     diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
2984     index 12c6190c6e33..4a07ba1195b5 100644
2985     --- a/drivers/mtd/nand/brcmnand/brcmnand.c
2986     +++ b/drivers/mtd/nand/brcmnand/brcmnand.c
2987     @@ -309,6 +309,36 @@ static const u16 brcmnand_regs_v60[] = {
2988     [BRCMNAND_FC_BASE] = 0x400,
2989     };
2990    
2991     +/* BRCMNAND v7.1 */
2992     +static const u16 brcmnand_regs_v71[] = {
2993     + [BRCMNAND_CMD_START] = 0x04,
2994     + [BRCMNAND_CMD_EXT_ADDRESS] = 0x08,
2995     + [BRCMNAND_CMD_ADDRESS] = 0x0c,
2996     + [BRCMNAND_INTFC_STATUS] = 0x14,
2997     + [BRCMNAND_CS_SELECT] = 0x18,
2998     + [BRCMNAND_CS_XOR] = 0x1c,
2999     + [BRCMNAND_LL_OP] = 0x20,
3000     + [BRCMNAND_CS0_BASE] = 0x50,
3001     + [BRCMNAND_CS1_BASE] = 0,
3002     + [BRCMNAND_CORR_THRESHOLD] = 0xdc,
3003     + [BRCMNAND_CORR_THRESHOLD_EXT] = 0xe0,
3004     + [BRCMNAND_UNCORR_COUNT] = 0xfc,
3005     + [BRCMNAND_CORR_COUNT] = 0x100,
3006     + [BRCMNAND_CORR_EXT_ADDR] = 0x10c,
3007     + [BRCMNAND_CORR_ADDR] = 0x110,
3008     + [BRCMNAND_UNCORR_EXT_ADDR] = 0x114,
3009     + [BRCMNAND_UNCORR_ADDR] = 0x118,
3010     + [BRCMNAND_SEMAPHORE] = 0x150,
3011     + [BRCMNAND_ID] = 0x194,
3012     + [BRCMNAND_ID_EXT] = 0x198,
3013     + [BRCMNAND_LL_RDATA] = 0x19c,
3014     + [BRCMNAND_OOB_READ_BASE] = 0x200,
3015     + [BRCMNAND_OOB_READ_10_BASE] = 0,
3016     + [BRCMNAND_OOB_WRITE_BASE] = 0x280,
3017     + [BRCMNAND_OOB_WRITE_10_BASE] = 0,
3018     + [BRCMNAND_FC_BASE] = 0x400,
3019     +};
3020     +
3021     enum brcmnand_cs_reg {
3022     BRCMNAND_CS_CFG_EXT = 0,
3023     BRCMNAND_CS_CFG,
3024     @@ -404,7 +434,9 @@ static int brcmnand_revision_init(struct brcmnand_controller *ctrl)
3025     }
3026    
3027     /* Register offsets */
3028     - if (ctrl->nand_version >= 0x0600)
3029     + if (ctrl->nand_version >= 0x0701)
3030     + ctrl->reg_offsets = brcmnand_regs_v71;
3031     + else if (ctrl->nand_version >= 0x0600)
3032     ctrl->reg_offsets = brcmnand_regs_v60;
3033     else if (ctrl->nand_version >= 0x0500)
3034     ctrl->reg_offsets = brcmnand_regs_v50;
3035     diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
3036     index 3ff583f165cd..ce7b2cab5762 100644
3037     --- a/drivers/mtd/nand/nand_base.c
3038     +++ b/drivers/mtd/nand/nand_base.c
3039     @@ -3979,7 +3979,6 @@ static int nand_dt_init(struct mtd_info *mtd, struct nand_chip *chip,
3040     * This is the first phase of the normal nand_scan() function. It reads the
3041     * flash ID and sets up MTD fields accordingly.
3042     *
3043     - * The mtd->owner field must be set to the module of the caller.
3044     */
3045     int nand_scan_ident(struct mtd_info *mtd, int maxchips,
3046     struct nand_flash_dev *table)
3047     @@ -4403,19 +4402,12 @@ EXPORT_SYMBOL(nand_scan_tail);
3048     *
3049     * This fills out all the uninitialized function pointers with the defaults.
3050     * The flash ID is read and the mtd/chip structures are filled with the
3051     - * appropriate values. The mtd->owner field must be set to the module of the
3052     - * caller.
3053     + * appropriate values.
3054     */
3055     int nand_scan(struct mtd_info *mtd, int maxchips)
3056     {
3057     int ret;
3058    
3059     - /* Many callers got this wrong, so check for it for a while... */
3060     - if (!mtd->owner && caller_is_module()) {
3061     - pr_crit("%s called with NULL mtd->owner!\n", __func__);
3062     - BUG();
3063     - }
3064     -
3065     ret = nand_scan_ident(mtd, maxchips, NULL);
3066     if (!ret)
3067     ret = nand_scan_tail(mtd);
3068     diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
3069     index 32477c4eb421..37e4135ab213 100644
3070     --- a/drivers/mtd/spi-nor/spi-nor.c
3071     +++ b/drivers/mtd/spi-nor/spi-nor.c
3072     @@ -1067,45 +1067,6 @@ static int spansion_quad_enable(struct spi_nor *nor)
3073     return 0;
3074     }
3075    
3076     -static int micron_quad_enable(struct spi_nor *nor)
3077     -{
3078     - int ret;
3079     - u8 val;
3080     -
3081     - ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
3082     - if (ret < 0) {
3083     - dev_err(nor->dev, "error %d reading EVCR\n", ret);
3084     - return ret;
3085     - }
3086     -
3087     - write_enable(nor);
3088     -
3089     - /* set EVCR, enable quad I/O */
3090     - nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON;
3091     - ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1);
3092     - if (ret < 0) {
3093     - dev_err(nor->dev, "error while writing EVCR register\n");
3094     - return ret;
3095     - }
3096     -
3097     - ret = spi_nor_wait_till_ready(nor);
3098     - if (ret)
3099     - return ret;
3100     -
3101     - /* read EVCR and check it */
3102     - ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1);
3103     - if (ret < 0) {
3104     - dev_err(nor->dev, "error %d reading EVCR\n", ret);
3105     - return ret;
3106     - }
3107     - if (val & EVCR_QUAD_EN_MICRON) {
3108     - dev_err(nor->dev, "Micron EVCR Quad bit not clear\n");
3109     - return -EINVAL;
3110     - }
3111     -
3112     - return 0;
3113     -}
3114     -
3115     static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
3116     {
3117     int status;
3118     @@ -1119,12 +1080,7 @@ static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info)
3119     }
3120     return status;
3121     case SNOR_MFR_MICRON:
3122     - status = micron_quad_enable(nor);
3123     - if (status) {
3124     - dev_err(nor->dev, "Micron quad-read not enabled\n");
3125     - return -EINVAL;
3126     - }
3127     - return status;
3128     + return 0;
3129     default:
3130     status = spansion_quad_enable(nor);
3131     if (status) {
3132     diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
3133     index 973dade2d07f..1257b18e6b90 100644
3134     --- a/drivers/net/ethernet/jme.c
3135     +++ b/drivers/net/ethernet/jme.c
3136     @@ -270,11 +270,17 @@ jme_reset_mac_processor(struct jme_adapter *jme)
3137     }
3138    
3139     static inline void
3140     -jme_clear_pm(struct jme_adapter *jme)
3141     +jme_clear_pm_enable_wol(struct jme_adapter *jme)
3142     {
3143     jwrite32(jme, JME_PMCS, PMCS_STMASK | jme->reg_pmcs);
3144     }
3145    
3146     +static inline void
3147     +jme_clear_pm_disable_wol(struct jme_adapter *jme)
3148     +{
3149     + jwrite32(jme, JME_PMCS, PMCS_STMASK);
3150     +}
3151     +
3152     static int
3153     jme_reload_eeprom(struct jme_adapter *jme)
3154     {
3155     @@ -1853,7 +1859,7 @@ jme_open(struct net_device *netdev)
3156     struct jme_adapter *jme = netdev_priv(netdev);
3157     int rc;
3158    
3159     - jme_clear_pm(jme);
3160     + jme_clear_pm_disable_wol(jme);
3161     JME_NAPI_ENABLE(jme);
3162    
3163     tasklet_init(&jme->linkch_task, jme_link_change_tasklet,
3164     @@ -1925,11 +1931,11 @@ jme_wait_link(struct jme_adapter *jme)
3165     static void
3166     jme_powersave_phy(struct jme_adapter *jme)
3167     {
3168     - if (jme->reg_pmcs) {
3169     + if (jme->reg_pmcs && device_may_wakeup(&jme->pdev->dev)) {
3170     jme_set_100m_half(jme);
3171     if (jme->reg_pmcs & (PMCS_LFEN | PMCS_LREN))
3172     jme_wait_link(jme);
3173     - jme_clear_pm(jme);
3174     + jme_clear_pm_enable_wol(jme);
3175     } else {
3176     jme_phy_off(jme);
3177     }
3178     @@ -2646,9 +2652,6 @@ jme_set_wol(struct net_device *netdev,
3179     if (wol->wolopts & WAKE_MAGIC)
3180     jme->reg_pmcs |= PMCS_MFEN;
3181    
3182     - jwrite32(jme, JME_PMCS, jme->reg_pmcs);
3183     - device_set_wakeup_enable(&jme->pdev->dev, !!(jme->reg_pmcs));
3184     -
3185     return 0;
3186     }
3187    
3188     @@ -3172,8 +3175,8 @@ jme_init_one(struct pci_dev *pdev,
3189     jme->mii_if.mdio_read = jme_mdio_read;
3190     jme->mii_if.mdio_write = jme_mdio_write;
3191    
3192     - jme_clear_pm(jme);
3193     - device_set_wakeup_enable(&pdev->dev, true);
3194     + jme_clear_pm_disable_wol(jme);
3195     + device_init_wakeup(&pdev->dev, true);
3196    
3197     jme_set_phyfifo_5level(jme);
3198     jme->pcirev = pdev->revision;
3199     @@ -3304,7 +3307,7 @@ jme_resume(struct device *dev)
3200     if (!netif_running(netdev))
3201     return 0;
3202    
3203     - jme_clear_pm(jme);
3204     + jme_clear_pm_disable_wol(jme);
3205     jme_phy_on(jme);
3206     if (test_bit(JME_FLAG_SSET, &jme->flags))
3207     jme_set_settings(netdev, &jme->old_ecmd);
3208     diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
3209     index e88afac51c5d..f96ab2f4b90e 100644
3210     --- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
3211     +++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
3212     @@ -1557,6 +1557,8 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
3213     /* the fw is stopped, the aux sta is dead: clean up driver state */
3214     iwl_mvm_del_aux_sta(mvm);
3215    
3216     + iwl_free_fw_paging(mvm);
3217     +
3218     /*
3219     * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
3220     * won't be called in this case).
3221     diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
3222     index c3adf2bcdc85..13c97f665ba8 100644
3223     --- a/drivers/net/wireless/iwlwifi/mvm/ops.c
3224     +++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
3225     @@ -645,8 +645,6 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
3226     for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
3227     kfree(mvm->nvm_sections[i].data);
3228    
3229     - iwl_free_fw_paging(mvm);
3230     -
3231     iwl_mvm_tof_clean(mvm);
3232    
3233     ieee80211_free_hw(mvm->hw);
3234     diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
3235     index 8c7204738aa3..00e0332e2544 100644
3236     --- a/drivers/net/wireless/iwlwifi/pcie/trans.c
3237     +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
3238     @@ -731,8 +731,8 @@ static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
3239     */
3240     val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
3241     if (val & (BIT(1) | BIT(17))) {
3242     - IWL_INFO(trans,
3243     - "can't access the RSA semaphore it is write protected\n");
3244     + IWL_DEBUG_INFO(trans,
3245     + "can't access the RSA semaphore it is write protected\n");
3246     return 0;
3247     }
3248    
3249     diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
3250     index 5c717275a7fa..3d8019eb3d84 100644
3251     --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
3252     +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
3253     @@ -939,7 +939,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
3254     struct mtk_pinctrl *pctl = dev_get_drvdata(chip->dev);
3255     int eint_num, virq, eint_offset;
3256     unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc;
3257     - static const unsigned int dbnc_arr[] = {0 , 1, 16, 32, 64, 128, 256};
3258     + static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, 64000,
3259     + 128000, 256000};
3260     const struct mtk_desc_pin *pin;
3261     struct irq_data *d;
3262    
3263     @@ -957,9 +958,9 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
3264     if (!mtk_eint_can_en_debounce(pctl, eint_num))
3265     return -ENOSYS;
3266    
3267     - dbnc = ARRAY_SIZE(dbnc_arr);
3268     - for (i = 0; i < ARRAY_SIZE(dbnc_arr); i++) {
3269     - if (debounce <= dbnc_arr[i]) {
3270     + dbnc = ARRAY_SIZE(debounce_time);
3271     + for (i = 0; i < ARRAY_SIZE(debounce_time); i++) {
3272     + if (debounce <= debounce_time[i]) {
3273     dbnc = i;
3274     break;
3275     }
3276     diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
3277     index ef04b962c3d5..23b6b8c29a99 100644
3278     --- a/drivers/pinctrl/pinctrl-single.c
3279     +++ b/drivers/pinctrl/pinctrl-single.c
3280     @@ -1273,9 +1273,9 @@ static int pcs_parse_bits_in_pinctrl_entry(struct pcs_device *pcs,
3281    
3282     /* Parse pins in each row from LSB */
3283     while (mask) {
3284     - bit_pos = ffs(mask);
3285     + bit_pos = __ffs(mask);
3286     pin_num_from_lsb = bit_pos / pcs->bits_per_pin;
3287     - mask_pos = ((pcs->fmask) << (bit_pos - 1));
3288     + mask_pos = ((pcs->fmask) << bit_pos);
3289     val_pos = val & mask_pos;
3290     submask = mask & mask_pos;
3291    
3292     @@ -1847,7 +1847,7 @@ static int pcs_probe(struct platform_device *pdev)
3293     ret = of_property_read_u32(np, "pinctrl-single,function-mask",
3294     &pcs->fmask);
3295     if (!ret) {
3296     - pcs->fshift = ffs(pcs->fmask) - 1;
3297     + pcs->fshift = __ffs(pcs->fmask);
3298     pcs->fmax = pcs->fmask >> pcs->fshift;
3299     } else {
3300     /* If mask property doesn't exist, function mux is invalid. */
3301     diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
3302     index b0f62141ea4d..f774cb576ffa 100644
3303     --- a/drivers/platform/x86/toshiba_acpi.c
3304     +++ b/drivers/platform/x86/toshiba_acpi.c
3305     @@ -131,7 +131,7 @@ MODULE_LICENSE("GPL");
3306     /* Field definitions */
3307     #define HCI_ACCEL_MASK 0x7fff
3308     #define HCI_HOTKEY_DISABLE 0x0b
3309     -#define HCI_HOTKEY_ENABLE 0x01
3310     +#define HCI_HOTKEY_ENABLE 0x09
3311     #define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
3312     #define HCI_LCD_BRIGHTNESS_BITS 3
3313     #define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
3314     diff --git a/drivers/pwm/pwm-brcmstb.c b/drivers/pwm/pwm-brcmstb.c
3315     index 423ce087cd9c..5d5adee16886 100644
3316     --- a/drivers/pwm/pwm-brcmstb.c
3317     +++ b/drivers/pwm/pwm-brcmstb.c
3318     @@ -274,8 +274,8 @@ static int brcmstb_pwm_probe(struct platform_device *pdev)
3319    
3320     res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3321     p->base = devm_ioremap_resource(&pdev->dev, res);
3322     - if (!p->base) {
3323     - ret = -ENOMEM;
3324     + if (IS_ERR(p->base)) {
3325     + ret = PTR_ERR(p->base);
3326     goto out_clk;
3327     }
3328    
3329     diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
3330     index 7b94b8ee087c..732ac71b82cd 100644
3331     --- a/drivers/regulator/core.c
3332     +++ b/drivers/regulator/core.c
3333     @@ -148,7 +148,7 @@ static void regulator_lock_supply(struct regulator_dev *rdev)
3334     {
3335     int i;
3336    
3337     - for (i = 0; rdev->supply; rdev = rdev_get_supply(rdev), i++)
3338     + for (i = 0; rdev; rdev = rdev_get_supply(rdev), i++)
3339     mutex_lock_nested(&rdev->mutex, i);
3340     }
3341    
3342     diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
3343     index 58f5d3b8e981..27343e1c43ef 100644
3344     --- a/drivers/regulator/s5m8767.c
3345     +++ b/drivers/regulator/s5m8767.c
3346     @@ -202,9 +202,10 @@ static int s5m8767_get_register(struct s5m8767_info *s5m8767, int reg_id,
3347     }
3348     }
3349    
3350     - if (i < s5m8767->num_regulators)
3351     - *enable_ctrl =
3352     - s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
3353     + if (i >= s5m8767->num_regulators)
3354     + return -EINVAL;
3355     +
3356     + *enable_ctrl = s5m8767_opmode_reg[reg_id][mode] << S5M8767_ENCTRL_SHIFT;
3357    
3358     return 0;
3359     }
3360     @@ -937,8 +938,12 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
3361     else
3362     regulators[id].vsel_mask = 0xff;
3363    
3364     - s5m8767_get_register(s5m8767, id, &enable_reg,
3365     + ret = s5m8767_get_register(s5m8767, id, &enable_reg,
3366     &enable_val);
3367     + if (ret) {
3368     + dev_err(s5m8767->dev, "error reading registers\n");
3369     + return ret;
3370     + }
3371     regulators[id].enable_reg = enable_reg;
3372     regulators[id].enable_mask = S5M8767_ENCTRL_MASK;
3373     regulators[id].enable_val = enable_val;
3374     diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c
3375     index 05a51ef52703..d5c1b057a739 100644
3376     --- a/drivers/rtc/rtc-ds1685.c
3377     +++ b/drivers/rtc/rtc-ds1685.c
3378     @@ -187,9 +187,9 @@ ds1685_rtc_end_data_access(struct ds1685_priv *rtc)
3379     * Only use this where you are certain another lock will not be held.
3380     */
3381     static inline void
3382     -ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long flags)
3383     +ds1685_rtc_begin_ctrl_access(struct ds1685_priv *rtc, unsigned long *flags)
3384     {
3385     - spin_lock_irqsave(&rtc->lock, flags);
3386     + spin_lock_irqsave(&rtc->lock, *flags);
3387     ds1685_rtc_switch_to_bank1(rtc);
3388     }
3389    
3390     @@ -1304,7 +1304,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
3391     {
3392     struct ds1685_priv *rtc = dev_get_drvdata(dev);
3393     u8 reg = 0, bit = 0, tmp;
3394     - unsigned long flags = 0;
3395     + unsigned long flags;
3396     long int val = 0;
3397     const struct ds1685_rtc_ctrl_regs *reg_info =
3398     ds1685_rtc_sysfs_ctrl_regs_lookup(attr->attr.name);
3399     @@ -1325,7 +1325,7 @@ ds1685_rtc_sysfs_ctrl_regs_store(struct device *dev,
3400     bit = reg_info->bit;
3401    
3402     /* Safe to spinlock during a write. */
3403     - ds1685_rtc_begin_ctrl_access(rtc, flags);
3404     + ds1685_rtc_begin_ctrl_access(rtc, &flags);
3405     tmp = rtc->read(rtc, reg);
3406     rtc->write(rtc, reg, (val ? (tmp | bit) : (tmp & ~(bit))));
3407     ds1685_rtc_end_ctrl_access(rtc, flags);
3408     diff --git a/drivers/rtc/rtc-hym8563.c b/drivers/rtc/rtc-hym8563.c
3409     index 097325d96db5..b1b4746a0eab 100644
3410     --- a/drivers/rtc/rtc-hym8563.c
3411     +++ b/drivers/rtc/rtc-hym8563.c
3412     @@ -144,7 +144,7 @@ static int hym8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
3413     * it does not seem to carry it over a subsequent write/read.
3414     * So we'll limit ourself to 100 years, starting at 2000 for now.
3415     */
3416     - buf[6] = tm->tm_year - 100;
3417     + buf[6] = bin2bcd(tm->tm_year - 100);
3418    
3419     /*
3420     * CTL1 only contains TEST-mode bits apart from stop,
3421     diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c
3422     index 7184a0eda793..725dccae24e7 100644
3423     --- a/drivers/rtc/rtc-max77686.c
3424     +++ b/drivers/rtc/rtc-max77686.c
3425     @@ -465,7 +465,7 @@ static int max77686_rtc_probe(struct platform_device *pdev)
3426    
3427     info->virq = regmap_irq_get_virq(max77686->rtc_irq_data,
3428     MAX77686_RTCIRQ_RTCA1);
3429     - if (!info->virq) {
3430     + if (info->virq <= 0) {
3431     ret = -ENXIO;
3432     goto err_rtc;
3433     }
3434     diff --git a/drivers/rtc/rtc-rx8025.c b/drivers/rtc/rtc-rx8025.c
3435     index bd911bafb809..17341feadad1 100644
3436     --- a/drivers/rtc/rtc-rx8025.c
3437     +++ b/drivers/rtc/rtc-rx8025.c
3438     @@ -65,7 +65,6 @@
3439    
3440     static const struct i2c_device_id rx8025_id[] = {
3441     { "rx8025", 0 },
3442     - { "rv8803", 1 },
3443     { }
3444     };
3445     MODULE_DEVICE_TABLE(i2c, rx8025_id);
3446     diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
3447     index f64c282275b3..e1b86bb01062 100644
3448     --- a/drivers/rtc/rtc-vr41xx.c
3449     +++ b/drivers/rtc/rtc-vr41xx.c
3450     @@ -272,12 +272,13 @@ static irqreturn_t rtclong1_interrupt(int irq, void *dev_id)
3451     }
3452    
3453     static const struct rtc_class_ops vr41xx_rtc_ops = {
3454     - .release = vr41xx_rtc_release,
3455     - .ioctl = vr41xx_rtc_ioctl,
3456     - .read_time = vr41xx_rtc_read_time,
3457     - .set_time = vr41xx_rtc_set_time,
3458     - .read_alarm = vr41xx_rtc_read_alarm,
3459     - .set_alarm = vr41xx_rtc_set_alarm,
3460     + .release = vr41xx_rtc_release,
3461     + .ioctl = vr41xx_rtc_ioctl,
3462     + .read_time = vr41xx_rtc_read_time,
3463     + .set_time = vr41xx_rtc_set_time,
3464     + .read_alarm = vr41xx_rtc_read_alarm,
3465     + .set_alarm = vr41xx_rtc_set_alarm,
3466     + .alarm_irq_enable = vr41xx_rtc_alarm_irq_enable,
3467     };
3468    
3469     static int rtc_probe(struct platform_device *pdev)
3470     diff --git a/drivers/scsi/device_handler/Kconfig b/drivers/scsi/device_handler/Kconfig
3471     index e5647d59224f..0b331c9c0a8f 100644
3472     --- a/drivers/scsi/device_handler/Kconfig
3473     +++ b/drivers/scsi/device_handler/Kconfig
3474     @@ -13,13 +13,13 @@ menuconfig SCSI_DH
3475    
3476     config SCSI_DH_RDAC
3477     tristate "LSI RDAC Device Handler"
3478     - depends on SCSI_DH
3479     + depends on SCSI_DH && SCSI
3480     help
3481     If you have a LSI RDAC select y. Otherwise, say N.
3482    
3483     config SCSI_DH_HP_SW
3484     tristate "HP/COMPAQ MSA Device Handler"
3485     - depends on SCSI_DH
3486     + depends on SCSI_DH && SCSI
3487     help
3488     If you have a HP/COMPAQ MSA device that requires START_STOP to
3489     be sent to start it and cannot upgrade the firmware then select y.
3490     @@ -27,13 +27,13 @@ config SCSI_DH_HP_SW
3491    
3492     config SCSI_DH_EMC
3493     tristate "EMC CLARiiON Device Handler"
3494     - depends on SCSI_DH
3495     + depends on SCSI_DH && SCSI
3496     help
3497     If you have a EMC CLARiiON select y. Otherwise, say N.
3498    
3499     config SCSI_DH_ALUA
3500     tristate "SPC-3 ALUA Device Handler"
3501     - depends on SCSI_DH
3502     + depends on SCSI_DH && SCSI
3503     help
3504     SCSI Device handler for generic SPC-3 Asymmetric Logical Unit
3505     Access (ALUA).
3506     diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
3507     index 97a1c1c33b05..00ce3e269a43 100644
3508     --- a/drivers/scsi/megaraid/megaraid_sas_base.c
3509     +++ b/drivers/scsi/megaraid/megaraid_sas_base.c
3510     @@ -6282,12 +6282,13 @@ out:
3511     }
3512    
3513     for (i = 0; i < ioc->sge_count; i++) {
3514     - if (kbuff_arr[i])
3515     + if (kbuff_arr[i]) {
3516     dma_free_coherent(&instance->pdev->dev,
3517     le32_to_cpu(kern_sge32[i].length),
3518     kbuff_arr[i],
3519     le32_to_cpu(kern_sge32[i].phys_addr));
3520     kbuff_arr[i] = NULL;
3521     + }
3522     }
3523    
3524     megasas_return_cmd(instance, cmd);
3525     diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
3526     index 79a8bc4f6cec..035767c02072 100644
3527     --- a/drivers/spi/spi-rockchip.c
3528     +++ b/drivers/spi/spi-rockchip.c
3529     @@ -265,7 +265,10 @@ static inline u32 rx_max(struct rockchip_spi *rs)
3530     static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
3531     {
3532     u32 ser;
3533     - struct rockchip_spi *rs = spi_master_get_devdata(spi->master);
3534     + struct spi_master *master = spi->master;
3535     + struct rockchip_spi *rs = spi_master_get_devdata(master);
3536     +
3537     + pm_runtime_get_sync(rs->dev);
3538    
3539     ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
3540    
3541     @@ -290,6 +293,8 @@ static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
3542     ser &= ~(1 << spi->chip_select);
3543    
3544     writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
3545     +
3546     + pm_runtime_put_sync(rs->dev);
3547     }
3548    
3549     static int rockchip_spi_prepare_message(struct spi_master *master,
3550     diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
3551     index 05de0dad8762..4c6f1d7d2eaf 100644
3552     --- a/drivers/staging/rdma/hfi1/TODO
3553     +++ b/drivers/staging/rdma/hfi1/TODO
3554     @@ -3,4 +3,4 @@ July, 2015
3555     - Remove unneeded file entries in sysfs
3556     - Remove software processing of IB protocol and place in library for use
3557     by qib, ipath (if still present), hfi1, and eventually soft-roce
3558     -
3559     +- Replace incorrect uAPI
3560     diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
3561     index aae9826ec62b..c851e51b1dc3 100644
3562     --- a/drivers/staging/rdma/hfi1/file_ops.c
3563     +++ b/drivers/staging/rdma/hfi1/file_ops.c
3564     @@ -62,6 +62,8 @@
3565     #include <linux/cred.h>
3566     #include <linux/uio.h>
3567    
3568     +#include <rdma/ib.h>
3569     +
3570     #include "hfi.h"
3571     #include "pio.h"
3572     #include "device.h"
3573     @@ -214,6 +216,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
3574     int uctxt_required = 1;
3575     int must_be_root = 0;
3576    
3577     + /* FIXME: This interface cannot continue out of staging */
3578     + if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
3579     + return -EACCES;
3580     +
3581     if (count < sizeof(cmd)) {
3582     ret = -EINVAL;
3583     goto bail;
3584     diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
3585     index e845841ab036..7106288efae3 100644
3586     --- a/drivers/thermal/rockchip_thermal.c
3587     +++ b/drivers/thermal/rockchip_thermal.c
3588     @@ -545,15 +545,14 @@ static int rockchip_configure_from_dt(struct device *dev,
3589     thermal->chip->tshut_temp);
3590     thermal->tshut_temp = thermal->chip->tshut_temp;
3591     } else {
3592     + if (shut_temp > INT_MAX) {
3593     + dev_err(dev, "Invalid tshut temperature specified: %d\n",
3594     + shut_temp);
3595     + return -ERANGE;
3596     + }
3597     thermal->tshut_temp = shut_temp;
3598     }
3599    
3600     - if (thermal->tshut_temp > INT_MAX) {
3601     - dev_err(dev, "Invalid tshut temperature specified: %d\n",
3602     - thermal->tshut_temp);
3603     - return -ERANGE;
3604     - }
3605     -
3606     if (of_property_read_u32(np, "rockchip,hw-tshut-mode", &tshut_mode)) {
3607     dev_warn(dev,
3608     "Missing tshut mode property, using default (%s)\n",
3609     diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
3610     index 51c7507b0444..63a06ab6ba03 100644
3611     --- a/drivers/tty/serial/sh-sci.c
3612     +++ b/drivers/tty/serial/sh-sci.c
3613     @@ -38,7 +38,6 @@
3614     #include <linux/major.h>
3615     #include <linux/module.h>
3616     #include <linux/mm.h>
3617     -#include <linux/notifier.h>
3618     #include <linux/of.h>
3619     #include <linux/platform_device.h>
3620     #include <linux/pm_runtime.h>
3621     @@ -116,8 +115,6 @@ struct sci_port {
3622     struct timer_list rx_timer;
3623     unsigned int rx_timeout;
3624     #endif
3625     -
3626     - struct notifier_block freq_transition;
3627     };
3628    
3629     #define SCI_NPORTS CONFIG_SERIAL_SH_SCI_NR_UARTS
3630     @@ -1606,29 +1603,6 @@ static irqreturn_t sci_mpxed_interrupt(int irq, void *ptr)
3631     return ret;
3632     }
3633    
3634     -/*
3635     - * Here we define a transition notifier so that we can update all of our
3636     - * ports' baud rate when the peripheral clock changes.
3637     - */
3638     -static int sci_notifier(struct notifier_block *self,
3639     - unsigned long phase, void *p)
3640     -{
3641     - struct sci_port *sci_port;
3642     - unsigned long flags;
3643     -
3644     - sci_port = container_of(self, struct sci_port, freq_transition);
3645     -
3646     - if (phase == CPUFREQ_POSTCHANGE) {
3647     - struct uart_port *port = &sci_port->port;
3648     -
3649     - spin_lock_irqsave(&port->lock, flags);
3650     - port->uartclk = clk_get_rate(sci_port->iclk);
3651     - spin_unlock_irqrestore(&port->lock, flags);
3652     - }
3653     -
3654     - return NOTIFY_OK;
3655     -}
3656     -
3657     static const struct sci_irq_desc {
3658     const char *desc;
3659     irq_handler_t handler;
3660     @@ -2559,9 +2533,6 @@ static int sci_remove(struct platform_device *dev)
3661     {
3662     struct sci_port *port = platform_get_drvdata(dev);
3663    
3664     - cpufreq_unregister_notifier(&port->freq_transition,
3665     - CPUFREQ_TRANSITION_NOTIFIER);
3666     -
3667     uart_remove_one_port(&sci_uart_driver, &port->port);
3668    
3669     sci_cleanup_single(port);
3670     @@ -2714,16 +2685,6 @@ static int sci_probe(struct platform_device *dev)
3671     if (ret)
3672     return ret;
3673    
3674     - sp->freq_transition.notifier_call = sci_notifier;
3675     -
3676     - ret = cpufreq_register_notifier(&sp->freq_transition,
3677     - CPUFREQ_TRANSITION_NOTIFIER);
3678     - if (unlikely(ret < 0)) {
3679     - uart_remove_one_port(&sci_uart_driver, &sp->port);
3680     - sci_cleanup_single(sp);
3681     - return ret;
3682     - }
3683     -
3684     #ifdef CONFIG_SH_STANDARD_BIOS
3685     sh_bios_gdb_detach();
3686     #endif
3687     diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
3688     index 9eb1cff28bd4..b8b580e5ae6e 100644
3689     --- a/drivers/usb/core/hcd-pci.c
3690     +++ b/drivers/usb/core/hcd-pci.c
3691     @@ -74,6 +74,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd,
3692     if (companion->bus != pdev->bus ||
3693     PCI_SLOT(companion->devfn) != slot)
3694     continue;
3695     +
3696     + /*
3697     + * Companion device should be either UHCI,OHCI or EHCI host
3698     + * controller, otherwise skip.
3699     + */
3700     + if (companion->class != CL_UHCI && companion->class != CL_OHCI &&
3701     + companion->class != CL_EHCI)
3702     + continue;
3703     +
3704     companion_hcd = pci_get_drvdata(companion);
3705     if (!companion_hcd || !companion_hcd->self.root_hub)
3706     continue;
3707     diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
3708     index cf43e9e18368..79d895c2dd71 100644
3709     --- a/drivers/usb/gadget/function/f_fs.c
3710     +++ b/drivers/usb/gadget/function/f_fs.c
3711     @@ -646,6 +646,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
3712     work);
3713     int ret = io_data->req->status ? io_data->req->status :
3714     io_data->req->actual;
3715     + bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
3716    
3717     if (io_data->read && ret > 0) {
3718     use_mm(io_data->mm);
3719     @@ -657,13 +658,11 @@ static void ffs_user_copy_worker(struct work_struct *work)
3720    
3721     io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
3722    
3723     - if (io_data->ffs->ffs_eventfd &&
3724     - !(io_data->kiocb->ki_flags & IOCB_EVENTFD))
3725     + if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
3726     eventfd_signal(io_data->ffs->ffs_eventfd, 1);
3727    
3728     usb_ep_free_request(io_data->ep, io_data->req);
3729    
3730     - io_data->kiocb->private = NULL;
3731     if (io_data->read)
3732     kfree(io_data->to_free);
3733     kfree(io_data->buf);
3734     diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
3735     index c48cbe731356..d8dbd7e5194b 100644
3736     --- a/drivers/usb/host/xhci-mem.c
3737     +++ b/drivers/usb/host/xhci-mem.c
3738     @@ -1875,6 +1875,12 @@ no_bw:
3739     kfree(xhci->rh_bw);
3740     kfree(xhci->ext_caps);
3741    
3742     + xhci->usb2_ports = NULL;
3743     + xhci->usb3_ports = NULL;
3744     + xhci->port_array = NULL;
3745     + xhci->rh_bw = NULL;
3746     + xhci->ext_caps = NULL;
3747     +
3748     xhci->page_size = 0;
3749     xhci->page_shift = 0;
3750     xhci->bus_state[0].bus_suspended = 0;
3751     diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
3752     index c2d65206ec6c..ea4fb4b0cd44 100644
3753     --- a/drivers/usb/host/xhci-pci.c
3754     +++ b/drivers/usb/host/xhci-pci.c
3755     @@ -48,6 +48,7 @@
3756     #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f
3757     #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f
3758     #define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
3759     +#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
3760    
3761     static const char hcd_name[] = "xhci_hcd";
3762    
3763     @@ -156,7 +157,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
3764     (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
3765     pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
3766     pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
3767     - pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI)) {
3768     + pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
3769     + pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) {
3770     xhci->quirks |= XHCI_PME_STUCK_QUIRK;
3771     }
3772     if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
3773     @@ -299,6 +301,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
3774     struct xhci_hcd *xhci;
3775    
3776     xhci = hcd_to_xhci(pci_get_drvdata(dev));
3777     + xhci->xhc_state |= XHCI_STATE_REMOVING;
3778     if (xhci->shared_hcd) {
3779     usb_remove_hcd(xhci->shared_hcd);
3780     usb_put_hcd(xhci->shared_hcd);
3781     diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
3782     index db0f0831b94f..2b63969c2bbf 100644
3783     --- a/drivers/usb/host/xhci-ring.c
3784     +++ b/drivers/usb/host/xhci-ring.c
3785     @@ -4008,7 +4008,8 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
3786     int reserved_trbs = xhci->cmd_ring_reserved_trbs;
3787     int ret;
3788    
3789     - if (xhci->xhc_state) {
3790     + if ((xhci->xhc_state & XHCI_STATE_DYING) ||
3791     + (xhci->xhc_state & XHCI_STATE_HALTED)) {
3792     xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
3793     return -ESHUTDOWN;
3794     }
3795     diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
3796     index 776d59c32bc5..ec9e758d5fcd 100644
3797     --- a/drivers/usb/host/xhci.c
3798     +++ b/drivers/usb/host/xhci.c
3799     @@ -146,7 +146,8 @@ static int xhci_start(struct xhci_hcd *xhci)
3800     "waited %u microseconds.\n",
3801     XHCI_MAX_HALT_USEC);
3802     if (!ret)
3803     - xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
3804     + /* clear state flags. Including dying, halted or removing */
3805     + xhci->xhc_state = 0;
3806    
3807     return ret;
3808     }
3809     @@ -1103,8 +1104,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
3810     /* Resume root hubs only when have pending events. */
3811     status = readl(&xhci->op_regs->status);
3812     if (status & STS_EINT) {
3813     - usb_hcd_resume_root_hub(hcd);
3814     usb_hcd_resume_root_hub(xhci->shared_hcd);
3815     + usb_hcd_resume_root_hub(hcd);
3816     }
3817     }
3818    
3819     @@ -1119,10 +1120,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
3820    
3821     /* Re-enable port polling. */
3822     xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
3823     - set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
3824     - usb_hcd_poll_rh_status(hcd);
3825     set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
3826     usb_hcd_poll_rh_status(xhci->shared_hcd);
3827     + set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
3828     + usb_hcd_poll_rh_status(hcd);
3829    
3830     return retval;
3831     }
3832     @@ -2753,7 +2754,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
3833     if (ret <= 0)
3834     return ret;
3835     xhci = hcd_to_xhci(hcd);
3836     - if (xhci->xhc_state & XHCI_STATE_DYING)
3837     + if ((xhci->xhc_state & XHCI_STATE_DYING) ||
3838     + (xhci->xhc_state & XHCI_STATE_REMOVING))
3839     return -ENODEV;
3840    
3841     xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
3842     @@ -3800,7 +3802,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
3843    
3844     mutex_lock(&xhci->mutex);
3845    
3846     - if (xhci->xhc_state) /* dying or halted */
3847     + if (xhci->xhc_state) /* dying, removing or halted */
3848     goto out;
3849    
3850     if (!udev->slot_id) {
3851     diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
3852     index 0b9451250e33..99ac2289dbf3 100644
3853     --- a/drivers/usb/host/xhci.h
3854     +++ b/drivers/usb/host/xhci.h
3855     @@ -1596,6 +1596,7 @@ struct xhci_hcd {
3856     */
3857     #define XHCI_STATE_DYING (1 << 0)
3858     #define XHCI_STATE_HALTED (1 << 1)
3859     +#define XHCI_STATE_REMOVING (1 << 2)
3860     /* Statistics */
3861     int error_bitmask;
3862     unsigned int quirks;
3863     diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c
3864     index facaaf003f19..e40da7759a0e 100644
3865     --- a/drivers/usb/usbip/usbip_common.c
3866     +++ b/drivers/usb/usbip/usbip_common.c
3867     @@ -741,6 +741,17 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
3868     if (!(size > 0))
3869     return 0;
3870    
3871     + if (size > urb->transfer_buffer_length) {
3872     + /* should not happen, probably malicious packet */
3873     + if (ud->side == USBIP_STUB) {
3874     + usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
3875     + return 0;
3876     + } else {
3877     + usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
3878     + return -EPIPE;
3879     + }
3880     + }
3881     +
3882     ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
3883     if (ret != size) {
3884     dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
3885     diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
3886     index e6d16d65e4e6..f07a0974fda2 100644
3887     --- a/drivers/video/fbdev/Kconfig
3888     +++ b/drivers/video/fbdev/Kconfig
3889     @@ -2249,7 +2249,6 @@ config XEN_FBDEV_FRONTEND
3890     select FB_SYS_IMAGEBLIT
3891     select FB_SYS_FOPS
3892     select FB_DEFERRED_IO
3893     - select INPUT_XEN_KBDDEV_FRONTEND if INPUT_MISC
3894     select XEN_XENBUS_FRONTEND
3895     default y
3896     help
3897     diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
3898     index 0081725c6b5b..d00510029c93 100644
3899     --- a/drivers/video/fbdev/da8xx-fb.c
3900     +++ b/drivers/video/fbdev/da8xx-fb.c
3901     @@ -209,8 +209,7 @@ static struct fb_videomode known_lcd_panels[] = {
3902     .lower_margin = 2,
3903     .hsync_len = 0,
3904     .vsync_len = 0,
3905     - .sync = FB_SYNC_CLK_INVERT |
3906     - FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
3907     + .sync = FB_SYNC_CLK_INVERT,
3908     },
3909     /* Sharp LK043T1DG01 */
3910     [1] = {
3911     @@ -224,7 +223,7 @@ static struct fb_videomode known_lcd_panels[] = {
3912     .lower_margin = 2,
3913     .hsync_len = 41,
3914     .vsync_len = 10,
3915     - .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
3916     + .sync = 0,
3917     .flag = 0,
3918     },
3919     [2] = {
3920     @@ -239,7 +238,7 @@ static struct fb_videomode known_lcd_panels[] = {
3921     .lower_margin = 10,
3922     .hsync_len = 10,
3923     .vsync_len = 10,
3924     - .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT,
3925     + .sync = 0,
3926     .flag = 0,
3927     },
3928     [3] = {
3929     diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
3930     index b7fcc0de0b2f..0f5d05bf2131 100644
3931     --- a/fs/debugfs/inode.c
3932     +++ b/fs/debugfs/inode.c
3933     @@ -457,7 +457,7 @@ struct dentry *debugfs_create_automount(const char *name,
3934     if (unlikely(!inode))
3935     return failed_creating(dentry);
3936    
3937     - inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
3938     + make_empty_dir_inode(inode);
3939     inode->i_flags |= S_AUTOMOUNT;
3940     inode->i_private = data;
3941     dentry->d_fsdata = (void *)f;
3942     diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
3943     index d4156e1c128d..b7e921d207fb 100644
3944     --- a/fs/ext4/ext4.h
3945     +++ b/fs/ext4/ext4.h
3946     @@ -933,6 +933,15 @@ struct ext4_inode_info {
3947     * by other means, so we have i_data_sem.
3948     */
3949     struct rw_semaphore i_data_sem;
3950     + /*
3951     + * i_mmap_sem is for serializing page faults with truncate / punch hole
3952     + * operations. We have to make sure that new page cannot be faulted in
3953     + * a section of the inode that is being punched. We cannot easily use
3954     + * i_data_sem for this since we need protection for the whole punch
3955     + * operation and i_data_sem ranks below transaction start so we have
3956     + * to occasionally drop it.
3957     + */
3958     + struct rw_semaphore i_mmap_sem;
3959     struct inode vfs_inode;
3960     struct jbd2_inode *jinode;
3961    
3962     @@ -2507,6 +2516,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
3963     extern int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
3964     loff_t lstart, loff_t lend);
3965     extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
3966     +extern int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
3967     extern qsize_t *ext4_get_reserved_space(struct inode *inode);
3968     extern void ext4_da_update_reserve_space(struct inode *inode,
3969     int used, int quota_claim);
3970     @@ -2871,6 +2881,9 @@ static inline int ext4_update_inode_size(struct inode *inode, loff_t newsize)
3971     return changed;
3972     }
3973    
3974     +int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
3975     + loff_t len);
3976     +
3977     struct ext4_group_info {
3978     unsigned long bb_state;
3979     struct rb_root bb_free_root;
3980     diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
3981     index 551353b1b17a..3578b25fccfd 100644
3982     --- a/fs/ext4/extents.c
3983     +++ b/fs/ext4/extents.c
3984     @@ -4685,10 +4685,6 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset,
3985     if (len <= EXT_UNWRITTEN_MAX_LEN)
3986     flags |= EXT4_GET_BLOCKS_NO_NORMALIZE;
3987    
3988     - /* Wait all existing dio workers, newcomers will block on i_mutex */
3989     - ext4_inode_block_unlocked_dio(inode);
3990     - inode_dio_wait(inode);
3991     -
3992     /*
3993     * credits to insert 1 extent into extent tree
3994     */
3995     @@ -4752,8 +4748,6 @@ retry:
3996     goto retry;
3997     }
3998    
3999     - ext4_inode_resume_unlocked_dio(inode);
4000     -
4001     return ret > 0 ? ret2 : ret;
4002     }
4003    
4004     @@ -4770,7 +4764,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4005     int partial_begin, partial_end;
4006     loff_t start, end;
4007     ext4_lblk_t lblk;
4008     - struct address_space *mapping = inode->i_mapping;
4009     unsigned int blkbits = inode->i_blkbits;
4010    
4011     trace_ext4_zero_range(inode, offset, len, mode);
4012     @@ -4786,17 +4779,6 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4013     }
4014    
4015     /*
4016     - * Write out all dirty pages to avoid race conditions
4017     - * Then release them.
4018     - */
4019     - if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
4020     - ret = filemap_write_and_wait_range(mapping, offset,
4021     - offset + len - 1);
4022     - if (ret)
4023     - return ret;
4024     - }
4025     -
4026     - /*
4027     * Round up offset. This is not fallocate, we neet to zero out
4028     * blocks, so convert interior block aligned part of the range to
4029     * unwritten and possibly manually zero out unaligned parts of the
4030     @@ -4839,6 +4821,10 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4031     if (mode & FALLOC_FL_KEEP_SIZE)
4032     flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4033    
4034     + /* Wait all existing dio workers, newcomers will block on i_mutex */
4035     + ext4_inode_block_unlocked_dio(inode);
4036     + inode_dio_wait(inode);
4037     +
4038     /* Preallocate the range including the unaligned edges */
4039     if (partial_begin || partial_end) {
4040     ret = ext4_alloc_file_blocks(file,
4041     @@ -4847,7 +4833,7 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4042     round_down(offset, 1 << blkbits)) >> blkbits,
4043     new_size, flags, mode);
4044     if (ret)
4045     - goto out_mutex;
4046     + goto out_dio;
4047    
4048     }
4049    
4050     @@ -4856,16 +4842,23 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4051     flags |= (EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
4052     EXT4_EX_NOCACHE);
4053    
4054     - /* Now release the pages and zero block aligned part of pages*/
4055     + /*
4056     + * Prevent page faults from reinstantiating pages we have
4057     + * released from page cache.
4058     + */
4059     + down_write(&EXT4_I(inode)->i_mmap_sem);
4060     + ret = ext4_update_disksize_before_punch(inode, offset, len);
4061     + if (ret) {
4062     + up_write(&EXT4_I(inode)->i_mmap_sem);
4063     + goto out_dio;
4064     + }
4065     + /* Now release the pages and zero block aligned part of pages */
4066     truncate_pagecache_range(inode, start, end - 1);
4067     inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4068    
4069     - /* Wait all existing dio workers, newcomers will block on i_mutex */
4070     - ext4_inode_block_unlocked_dio(inode);
4071     - inode_dio_wait(inode);
4072     -
4073     ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
4074     flags, mode);
4075     + up_write(&EXT4_I(inode)->i_mmap_sem);
4076     if (ret)
4077     goto out_dio;
4078     }
4079     @@ -4998,8 +4991,13 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
4080     goto out;
4081     }
4082    
4083     + /* Wait all existing dio workers, newcomers will block on i_mutex */
4084     + ext4_inode_block_unlocked_dio(inode);
4085     + inode_dio_wait(inode);
4086     +
4087     ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
4088     flags, mode);
4089     + ext4_inode_resume_unlocked_dio(inode);
4090     if (ret)
4091     goto out;
4092    
4093     @@ -5494,21 +5492,7 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
4094     return ret;
4095     }
4096    
4097     - /*
4098     - * Need to round down offset to be aligned with page size boundary
4099     - * for page size > block size.
4100     - */
4101     - ioffset = round_down(offset, PAGE_SIZE);
4102     -
4103     - /* Write out all dirty pages */
4104     - ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
4105     - LLONG_MAX);
4106     - if (ret)
4107     - return ret;
4108     -
4109     - /* Take mutex lock */
4110     mutex_lock(&inode->i_mutex);
4111     -
4112     /*
4113     * There is no need to overlap collapse range with EOF, in which case
4114     * it is effectively a truncate operation
4115     @@ -5524,17 +5508,43 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
4116     goto out_mutex;
4117     }
4118    
4119     - truncate_pagecache(inode, ioffset);
4120     -
4121     /* Wait for existing dio to complete */
4122     ext4_inode_block_unlocked_dio(inode);
4123     inode_dio_wait(inode);
4124    
4125     + /*
4126     + * Prevent page faults from reinstantiating pages we have released from
4127     + * page cache.
4128     + */
4129     + down_write(&EXT4_I(inode)->i_mmap_sem);
4130     + /*
4131     + * Need to round down offset to be aligned with page size boundary
4132     + * for page size > block size.
4133     + */
4134     + ioffset = round_down(offset, PAGE_SIZE);
4135     + /*
4136     + * Write tail of the last page before removed range since it will get
4137     + * removed from the page cache below.
4138     + */
4139     + ret = filemap_write_and_wait_range(inode->i_mapping, ioffset, offset);
4140     + if (ret)
4141     + goto out_mmap;
4142     + /*
4143     + * Write data that will be shifted to preserve them when discarding
4144     + * page cache below. We are also protected from pages becoming dirty
4145     + * by i_mmap_sem.
4146     + */
4147     + ret = filemap_write_and_wait_range(inode->i_mapping, offset + len,
4148     + LLONG_MAX);
4149     + if (ret)
4150     + goto out_mmap;
4151     + truncate_pagecache(inode, ioffset);
4152     +
4153     credits = ext4_writepage_trans_blocks(inode);
4154     handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4155     if (IS_ERR(handle)) {
4156     ret = PTR_ERR(handle);
4157     - goto out_dio;
4158     + goto out_mmap;
4159     }
4160    
4161     down_write(&EXT4_I(inode)->i_data_sem);
4162     @@ -5573,7 +5583,8 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
4163    
4164     out_stop:
4165     ext4_journal_stop(handle);
4166     -out_dio:
4167     +out_mmap:
4168     + up_write(&EXT4_I(inode)->i_mmap_sem);
4169     ext4_inode_resume_unlocked_dio(inode);
4170     out_mutex:
4171     mutex_unlock(&inode->i_mutex);
4172     @@ -5627,21 +5638,7 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
4173     return ret;
4174     }
4175    
4176     - /*
4177     - * Need to round down to align start offset to page size boundary
4178     - * for page size > block size.
4179     - */
4180     - ioffset = round_down(offset, PAGE_SIZE);
4181     -
4182     - /* Write out all dirty pages */
4183     - ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
4184     - LLONG_MAX);
4185     - if (ret)
4186     - return ret;
4187     -
4188     - /* Take mutex lock */
4189     mutex_lock(&inode->i_mutex);
4190     -
4191     /* Currently just for extent based files */
4192     if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
4193     ret = -EOPNOTSUPP;
4194     @@ -5660,17 +5657,32 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
4195     goto out_mutex;
4196     }
4197    
4198     - truncate_pagecache(inode, ioffset);
4199     -
4200     /* Wait for existing dio to complete */
4201     ext4_inode_block_unlocked_dio(inode);
4202     inode_dio_wait(inode);
4203    
4204     + /*
4205     + * Prevent page faults from reinstantiating pages we have released from
4206     + * page cache.
4207     + */
4208     + down_write(&EXT4_I(inode)->i_mmap_sem);
4209     + /*
4210     + * Need to round down to align start offset to page size boundary
4211     + * for page size > block size.
4212     + */
4213     + ioffset = round_down(offset, PAGE_SIZE);
4214     + /* Write out all dirty pages */
4215     + ret = filemap_write_and_wait_range(inode->i_mapping, ioffset,
4216     + LLONG_MAX);
4217     + if (ret)
4218     + goto out_mmap;
4219     + truncate_pagecache(inode, ioffset);
4220     +
4221     credits = ext4_writepage_trans_blocks(inode);
4222     handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
4223     if (IS_ERR(handle)) {
4224     ret = PTR_ERR(handle);
4225     - goto out_dio;
4226     + goto out_mmap;
4227     }
4228    
4229     /* Expand file to avoid data loss if there is error while shifting */
4230     @@ -5741,7 +5753,8 @@ int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len)
4231    
4232     out_stop:
4233     ext4_journal_stop(handle);
4234     -out_dio:
4235     +out_mmap:
4236     + up_write(&EXT4_I(inode)->i_mmap_sem);
4237     ext4_inode_resume_unlocked_dio(inode);
4238     out_mutex:
4239     mutex_unlock(&inode->i_mutex);
4240     diff --git a/fs/ext4/file.c b/fs/ext4/file.c
4241     index 113837e7ba98..0d24ebcd7c9e 100644
4242     --- a/fs/ext4/file.c
4243     +++ b/fs/ext4/file.c
4244     @@ -209,15 +209,18 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4245     {
4246     int result;
4247     handle_t *handle = NULL;
4248     - struct super_block *sb = file_inode(vma->vm_file)->i_sb;
4249     + struct inode *inode = file_inode(vma->vm_file);
4250     + struct super_block *sb = inode->i_sb;
4251     bool write = vmf->flags & FAULT_FLAG_WRITE;
4252    
4253     if (write) {
4254     sb_start_pagefault(sb);
4255     file_update_time(vma->vm_file);
4256     + down_read(&EXT4_I(inode)->i_mmap_sem);
4257     handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
4258     EXT4_DATA_TRANS_BLOCKS(sb));
4259     - }
4260     + } else
4261     + down_read(&EXT4_I(inode)->i_mmap_sem);
4262    
4263     if (IS_ERR(handle))
4264     result = VM_FAULT_SIGBUS;
4265     @@ -228,8 +231,10 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4266     if (write) {
4267     if (!IS_ERR(handle))
4268     ext4_journal_stop(handle);
4269     + up_read(&EXT4_I(inode)->i_mmap_sem);
4270     sb_end_pagefault(sb);
4271     - }
4272     + } else
4273     + up_read(&EXT4_I(inode)->i_mmap_sem);
4274    
4275     return result;
4276     }
4277     @@ -246,10 +251,12 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
4278     if (write) {
4279     sb_start_pagefault(sb);
4280     file_update_time(vma->vm_file);
4281     + down_read(&EXT4_I(inode)->i_mmap_sem);
4282     handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
4283     ext4_chunk_trans_blocks(inode,
4284     PMD_SIZE / PAGE_SIZE));
4285     - }
4286     + } else
4287     + down_read(&EXT4_I(inode)->i_mmap_sem);
4288    
4289     if (IS_ERR(handle))
4290     result = VM_FAULT_SIGBUS;
4291     @@ -260,30 +267,71 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
4292     if (write) {
4293     if (!IS_ERR(handle))
4294     ext4_journal_stop(handle);
4295     + up_read(&EXT4_I(inode)->i_mmap_sem);
4296     sb_end_pagefault(sb);
4297     - }
4298     + } else
4299     + up_read(&EXT4_I(inode)->i_mmap_sem);
4300    
4301     return result;
4302     }
4303    
4304     static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4305     {
4306     - return dax_mkwrite(vma, vmf, ext4_get_block_dax,
4307     - ext4_end_io_unwritten);
4308     + int err;
4309     + struct inode *inode = file_inode(vma->vm_file);
4310     +
4311     + sb_start_pagefault(inode->i_sb);
4312     + file_update_time(vma->vm_file);
4313     + down_read(&EXT4_I(inode)->i_mmap_sem);
4314     + err = __dax_mkwrite(vma, vmf, ext4_get_block_dax,
4315     + ext4_end_io_unwritten);
4316     + up_read(&EXT4_I(inode)->i_mmap_sem);
4317     + sb_end_pagefault(inode->i_sb);
4318     +
4319     + return err;
4320     +}
4321     +
4322     +/*
4323     + * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_mkwrite()
4324     + * handler we check for races agaist truncate. Note that since we cycle through
4325     + * i_mmap_sem, we are sure that also any hole punching that began before we
4326     + * were called is finished by now and so if it included part of the file we
4327     + * are working on, our pte will get unmapped and the check for pte_same() in
4328     + * wp_pfn_shared() fails. Thus fault gets retried and things work out as
4329     + * desired.
4330     + */
4331     +static int ext4_dax_pfn_mkwrite(struct vm_area_struct *vma,
4332     + struct vm_fault *vmf)
4333     +{
4334     + struct inode *inode = file_inode(vma->vm_file);
4335     + struct super_block *sb = inode->i_sb;
4336     + int ret = VM_FAULT_NOPAGE;
4337     + loff_t size;
4338     +
4339     + sb_start_pagefault(sb);
4340     + file_update_time(vma->vm_file);
4341     + down_read(&EXT4_I(inode)->i_mmap_sem);
4342     + size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
4343     + if (vmf->pgoff >= size)
4344     + ret = VM_FAULT_SIGBUS;
4345     + up_read(&EXT4_I(inode)->i_mmap_sem);
4346     + sb_end_pagefault(sb);
4347     +
4348     + return ret;
4349     }
4350    
4351     static const struct vm_operations_struct ext4_dax_vm_ops = {
4352     .fault = ext4_dax_fault,
4353     .pmd_fault = ext4_dax_pmd_fault,
4354     .page_mkwrite = ext4_dax_mkwrite,
4355     - .pfn_mkwrite = dax_pfn_mkwrite,
4356     + .pfn_mkwrite = ext4_dax_pfn_mkwrite,
4357     };
4358     #else
4359     #define ext4_dax_vm_ops ext4_file_vm_ops
4360     #endif
4361    
4362     static const struct vm_operations_struct ext4_file_vm_ops = {
4363     - .fault = filemap_fault,
4364     + .fault = ext4_filemap_fault,
4365     .map_pages = filemap_map_pages,
4366     .page_mkwrite = ext4_page_mkwrite,
4367     };
4368     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
4369     index 06bda0361e7c..e31d762eedce 100644
4370     --- a/fs/ext4/inode.c
4371     +++ b/fs/ext4/inode.c
4372     @@ -3587,6 +3587,35 @@ int ext4_can_truncate(struct inode *inode)
4373     }
4374    
4375     /*
4376     + * We have to make sure i_disksize gets properly updated before we truncate
4377     + * page cache due to hole punching or zero range. Otherwise i_disksize update
4378     + * can get lost as it may have been postponed to submission of writeback but
4379     + * that will never happen after we truncate page cache.
4380     + */
4381     +int ext4_update_disksize_before_punch(struct inode *inode, loff_t offset,
4382     + loff_t len)
4383     +{
4384     + handle_t *handle;
4385     + loff_t size = i_size_read(inode);
4386     +
4387     + WARN_ON(!mutex_is_locked(&inode->i_mutex));
4388     + if (offset > size || offset + len < size)
4389     + return 0;
4390     +
4391     + if (EXT4_I(inode)->i_disksize >= size)
4392     + return 0;
4393     +
4394     + handle = ext4_journal_start(inode, EXT4_HT_MISC, 1);
4395     + if (IS_ERR(handle))
4396     + return PTR_ERR(handle);
4397     + ext4_update_i_disksize(inode, size);
4398     + ext4_mark_inode_dirty(handle, inode);
4399     + ext4_journal_stop(handle);
4400     +
4401     + return 0;
4402     +}
4403     +
4404     +/*
4405     * ext4_punch_hole: punches a hole in a file by releaseing the blocks
4406     * associated with the given offset and length
4407     *
4408     @@ -3651,17 +3680,26 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
4409    
4410     }
4411    
4412     + /* Wait all existing dio workers, newcomers will block on i_mutex */
4413     + ext4_inode_block_unlocked_dio(inode);
4414     + inode_dio_wait(inode);
4415     +
4416     + /*
4417     + * Prevent page faults from reinstantiating pages we have released from
4418     + * page cache.
4419     + */
4420     + down_write(&EXT4_I(inode)->i_mmap_sem);
4421     first_block_offset = round_up(offset, sb->s_blocksize);
4422     last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
4423    
4424     /* Now release the pages and zero block aligned part of pages*/
4425     - if (last_block_offset > first_block_offset)
4426     + if (last_block_offset > first_block_offset) {
4427     + ret = ext4_update_disksize_before_punch(inode, offset, length);
4428     + if (ret)
4429     + goto out_dio;
4430     truncate_pagecache_range(inode, first_block_offset,
4431     last_block_offset);
4432     -
4433     - /* Wait all existing dio workers, newcomers will block on i_mutex */
4434     - ext4_inode_block_unlocked_dio(inode);
4435     - inode_dio_wait(inode);
4436     + }
4437    
4438     if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
4439     credits = ext4_writepage_trans_blocks(inode);
4440     @@ -3708,16 +3746,12 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
4441     if (IS_SYNC(inode))
4442     ext4_handle_sync(handle);
4443    
4444     - /* Now release the pages again to reduce race window */
4445     - if (last_block_offset > first_block_offset)
4446     - truncate_pagecache_range(inode, first_block_offset,
4447     - last_block_offset);
4448     -
4449     inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
4450     ext4_mark_inode_dirty(handle, inode);
4451     out_stop:
4452     ext4_journal_stop(handle);
4453     out_dio:
4454     + up_write(&EXT4_I(inode)->i_mmap_sem);
4455     ext4_inode_resume_unlocked_dio(inode);
4456     out_mutex:
4457     mutex_unlock(&inode->i_mutex);
4458     @@ -4851,6 +4885,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4459     } else
4460     ext4_wait_for_tail_page_commit(inode);
4461     }
4462     + down_write(&EXT4_I(inode)->i_mmap_sem);
4463     /*
4464     * Truncate pagecache after we've waited for commit
4465     * in data=journal mode to make pages freeable.
4466     @@ -4858,6 +4893,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4467     truncate_pagecache(inode, inode->i_size);
4468     if (shrink)
4469     ext4_truncate(inode);
4470     + up_write(&EXT4_I(inode)->i_mmap_sem);
4471     }
4472    
4473     if (!rc) {
4474     @@ -5109,6 +5145,8 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4475     might_sleep();
4476     trace_ext4_mark_inode_dirty(inode, _RET_IP_);
4477     err = ext4_reserve_inode_write(handle, inode, &iloc);
4478     + if (err)
4479     + return err;
4480     if (ext4_handle_valid(handle) &&
4481     EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
4482     !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
4483     @@ -5139,9 +5177,7 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4484     }
4485     }
4486     }
4487     - if (!err)
4488     - err = ext4_mark_iloc_dirty(handle, inode, &iloc);
4489     - return err;
4490     + return ext4_mark_iloc_dirty(handle, inode, &iloc);
4491     }
4492    
4493     /*
4494     @@ -5306,6 +5342,8 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4495    
4496     sb_start_pagefault(inode->i_sb);
4497     file_update_time(vma->vm_file);
4498     +
4499     + down_read(&EXT4_I(inode)->i_mmap_sem);
4500     /* Delalloc case is easy... */
4501     if (test_opt(inode->i_sb, DELALLOC) &&
4502     !ext4_should_journal_data(inode) &&
4503     @@ -5375,6 +5413,19 @@ retry_alloc:
4504     out_ret:
4505     ret = block_page_mkwrite_return(ret);
4506     out:
4507     + up_read(&EXT4_I(inode)->i_mmap_sem);
4508     sb_end_pagefault(inode->i_sb);
4509     return ret;
4510     }
4511     +
4512     +int ext4_filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4513     +{
4514     + struct inode *inode = file_inode(vma->vm_file);
4515     + int err;
4516     +
4517     + down_read(&EXT4_I(inode)->i_mmap_sem);
4518     + err = filemap_fault(vma, vmf);
4519     + up_read(&EXT4_I(inode)->i_mmap_sem);
4520     +
4521     + return err;
4522     +}
4523     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
4524     index ba1cf0bf2f81..852c26806af2 100644
4525     --- a/fs/ext4/super.c
4526     +++ b/fs/ext4/super.c
4527     @@ -958,6 +958,7 @@ static void init_once(void *foo)
4528     INIT_LIST_HEAD(&ei->i_orphan);
4529     init_rwsem(&ei->xattr_sem);
4530     init_rwsem(&ei->i_data_sem);
4531     + init_rwsem(&ei->i_mmap_sem);
4532     inode_init_once(&ei->vfs_inode);
4533     }
4534    
4535     diff --git a/fs/ext4/truncate.h b/fs/ext4/truncate.h
4536     index 011ba6670d99..c70d06a383e2 100644
4537     --- a/fs/ext4/truncate.h
4538     +++ b/fs/ext4/truncate.h
4539     @@ -10,8 +10,10 @@
4540     */
4541     static inline void ext4_truncate_failed_write(struct inode *inode)
4542     {
4543     + down_write(&EXT4_I(inode)->i_mmap_sem);
4544     truncate_inode_pages(inode->i_mapping, inode->i_size);
4545     ext4_truncate(inode);
4546     + up_write(&EXT4_I(inode)->i_mmap_sem);
4547     }
4548    
4549     /*
4550     diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
4551     index 09cd3edde08a..f6478301db00 100644
4552     --- a/fs/proc/task_mmu.c
4553     +++ b/fs/proc/task_mmu.c
4554     @@ -1435,6 +1435,32 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
4555     return page;
4556     }
4557    
4558     +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4559     +static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
4560     + struct vm_area_struct *vma,
4561     + unsigned long addr)
4562     +{
4563     + struct page *page;
4564     + int nid;
4565     +
4566     + if (!pmd_present(pmd))
4567     + return NULL;
4568     +
4569     + page = vm_normal_page_pmd(vma, addr, pmd);
4570     + if (!page)
4571     + return NULL;
4572     +
4573     + if (PageReserved(page))
4574     + return NULL;
4575     +
4576     + nid = page_to_nid(page);
4577     + if (!node_isset(nid, node_states[N_MEMORY]))
4578     + return NULL;
4579     +
4580     + return page;
4581     +}
4582     +#endif
4583     +
4584     static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
4585     unsigned long end, struct mm_walk *walk)
4586     {
4587     @@ -1444,13 +1470,13 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
4588     pte_t *orig_pte;
4589     pte_t *pte;
4590    
4591     +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
4592     if (pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
4593     - pte_t huge_pte = *(pte_t *)pmd;
4594     struct page *page;
4595    
4596     - page = can_gather_numa_stats(huge_pte, vma, addr);
4597     + page = can_gather_numa_stats_pmd(*pmd, vma, addr);
4598     if (page)
4599     - gather_stats(page, md, pte_dirty(huge_pte),
4600     + gather_stats(page, md, pmd_dirty(*pmd),
4601     HPAGE_PMD_SIZE/PAGE_SIZE);
4602     spin_unlock(ptl);
4603     return 0;
4604     @@ -1458,6 +1484,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
4605    
4606     if (pmd_trans_unstable(pmd))
4607     return 0;
4608     +#endif
4609     orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
4610     do {
4611     struct page *page = can_gather_numa_stats(*pte, vma, addr);
4612     diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
4613     index e56272c919b5..bf2d34c9d804 100644
4614     --- a/include/asm-generic/futex.h
4615     +++ b/include/asm-generic/futex.h
4616     @@ -108,11 +108,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
4617     u32 val;
4618    
4619     preempt_disable();
4620     - if (unlikely(get_user(val, uaddr) != 0))
4621     + if (unlikely(get_user(val, uaddr) != 0)) {
4622     + preempt_enable();
4623     return -EFAULT;
4624     + }
4625    
4626     - if (val == oldval && unlikely(put_user(newval, uaddr) != 0))
4627     + if (val == oldval && unlikely(put_user(newval, uaddr) != 0)) {
4628     + preempt_enable();
4629     return -EFAULT;
4630     + }
4631    
4632     *uval = val;
4633     preempt_enable();
4634     diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
4635     index 461a0558bca4..cebecff536a3 100644
4636     --- a/include/drm/drm_cache.h
4637     +++ b/include/drm/drm_cache.h
4638     @@ -39,6 +39,8 @@ static inline bool drm_arch_can_wc_memory(void)
4639     {
4640     #if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
4641     return false;
4642     +#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
4643     + return false;
4644     #else
4645     return true;
4646     #endif
4647     diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
4648     index a7c7f74808a4..8da263299754 100644
4649     --- a/include/linux/cgroup-defs.h
4650     +++ b/include/linux/cgroup-defs.h
4651     @@ -434,6 +434,7 @@ struct cgroup_subsys {
4652     int (*can_attach)(struct cgroup_taskset *tset);
4653     void (*cancel_attach)(struct cgroup_taskset *tset);
4654     void (*attach)(struct cgroup_taskset *tset);
4655     + void (*post_attach)(void);
4656     int (*can_fork)(struct task_struct *task, void **priv_p);
4657     void (*cancel_fork)(struct task_struct *task, void *priv);
4658     void (*fork)(struct task_struct *task, void *priv);
4659     diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
4660     index fea160ee5803..85a868ccb493 100644
4661     --- a/include/linux/cpuset.h
4662     +++ b/include/linux/cpuset.h
4663     @@ -137,8 +137,6 @@ static inline void set_mems_allowed(nodemask_t nodemask)
4664     task_unlock(current);
4665     }
4666    
4667     -extern void cpuset_post_attach_flush(void);
4668     -
4669     #else /* !CONFIG_CPUSETS */
4670    
4671     static inline bool cpusets_enabled(void) { return false; }
4672     @@ -245,10 +243,6 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
4673     return false;
4674     }
4675    
4676     -static inline void cpuset_post_attach_flush(void)
4677     -{
4678     -}
4679     -
4680     #endif /* !CONFIG_CPUSETS */
4681    
4682     #endif /* _LINUX_CPUSET_H */
4683     diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
4684     index 0b473cbfa7ef..a91b67b18a73 100644
4685     --- a/include/linux/mlx5/device.h
4686     +++ b/include/linux/mlx5/device.h
4687     @@ -334,6 +334,17 @@ enum {
4688     MLX5_CAP_OFF_CMDIF_CSUM = 46,
4689     };
4690    
4691     +enum {
4692     + /*
4693     + * Max wqe size for rdma read is 512 bytes, so this
4694     + * limits our max_sge_rd as the wqe needs to fit:
4695     + * - ctrl segment (16 bytes)
4696     + * - rdma segment (16 bytes)
4697     + * - scatter elements (16 bytes each)
4698     + */
4699     + MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
4700     +};
4701     +
4702     struct mlx5_inbox_hdr {
4703     __be16 opcode;
4704     u8 rsvd[4];
4705     diff --git a/include/linux/mm.h b/include/linux/mm.h
4706     index 00bad7793788..fb8b20e5d021 100644
4707     --- a/include/linux/mm.h
4708     +++ b/include/linux/mm.h
4709     @@ -1084,6 +1084,8 @@ struct zap_details {
4710    
4711     struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
4712     pte_t pte);
4713     +struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
4714     + pmd_t pmd);
4715    
4716     int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
4717     unsigned long size);
4718     diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
4719     index 647ebfe5174f..d4227a8a2a23 100644
4720     --- a/include/media/videobuf2-core.h
4721     +++ b/include/media/videobuf2-core.h
4722     @@ -363,6 +363,7 @@ struct vb2_ops {
4723     };
4724    
4725     struct vb2_buf_ops {
4726     + int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
4727     int (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
4728     int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
4729     struct vb2_plane *planes);
4730     diff --git a/include/rdma/ib.h b/include/rdma/ib.h
4731     index cf8f9e700e48..a6b93706b0fc 100644
4732     --- a/include/rdma/ib.h
4733     +++ b/include/rdma/ib.h
4734     @@ -34,6 +34,7 @@
4735     #define _RDMA_IB_H
4736    
4737     #include <linux/types.h>
4738     +#include <linux/sched.h>
4739    
4740     struct ib_addr {
4741     union {
4742     @@ -86,4 +87,19 @@ struct sockaddr_ib {
4743     __u64 sib_scope_id;
4744     };
4745    
4746     +/*
4747     + * The IB interfaces that use write() as bi-directional ioctl() are
4748     + * fundamentally unsafe, since there are lots of ways to trigger "write()"
4749     + * calls from various contexts with elevated privileges. That includes the
4750     + * traditional suid executable error message writes, but also various kernel
4751     + * interfaces that can write to file descriptors.
4752     + *
4753     + * This function provides protection for the legacy API by restricting the
4754     + * calling context.
4755     + */
4756     +static inline bool ib_safe_file_access(struct file *filp)
4757     +{
4758     + return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
4759     +}
4760     +
4761     #endif /* _RDMA_IB_H */
4762     diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
4763     index c039f1d68a09..086168e18ca8 100644
4764     --- a/include/uapi/linux/v4l2-dv-timings.h
4765     +++ b/include/uapi/linux/v4l2-dv-timings.h
4766     @@ -183,7 +183,8 @@
4767    
4768     #define V4L2_DV_BT_CEA_3840X2160P24 { \
4769     .type = V4L2_DV_BT_656_1120, \
4770     - V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
4771     + V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
4772     + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
4773     297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
4774     V4L2_DV_BT_STD_CEA861, \
4775     V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
4776     @@ -191,14 +192,16 @@
4777    
4778     #define V4L2_DV_BT_CEA_3840X2160P25 { \
4779     .type = V4L2_DV_BT_656_1120, \
4780     - V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
4781     + V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
4782     + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
4783     297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
4784     V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
4785     }
4786    
4787     #define V4L2_DV_BT_CEA_3840X2160P30 { \
4788     .type = V4L2_DV_BT_656_1120, \
4789     - V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
4790     + V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
4791     + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
4792     297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
4793     V4L2_DV_BT_STD_CEA861, \
4794     V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
4795     @@ -206,14 +209,16 @@
4796    
4797     #define V4L2_DV_BT_CEA_3840X2160P50 { \
4798     .type = V4L2_DV_BT_656_1120, \
4799     - V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
4800     + V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
4801     + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
4802     594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
4803     V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
4804     }
4805    
4806     #define V4L2_DV_BT_CEA_3840X2160P60 { \
4807     .type = V4L2_DV_BT_656_1120, \
4808     - V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
4809     + V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
4810     + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
4811     594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
4812     V4L2_DV_BT_STD_CEA861, \
4813     V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
4814     @@ -221,7 +226,8 @@
4815    
4816     #define V4L2_DV_BT_CEA_4096X2160P24 { \
4817     .type = V4L2_DV_BT_656_1120, \
4818     - V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
4819     + V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
4820     + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
4821     297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
4822     V4L2_DV_BT_STD_CEA861, \
4823     V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
4824     @@ -229,14 +235,16 @@
4825    
4826     #define V4L2_DV_BT_CEA_4096X2160P25 { \
4827     .type = V4L2_DV_BT_656_1120, \
4828     - V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
4829     + V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
4830     + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
4831     297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
4832     V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
4833     }
4834    
4835     #define V4L2_DV_BT_CEA_4096X2160P30 { \
4836     .type = V4L2_DV_BT_656_1120, \
4837     - V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
4838     + V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
4839     + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
4840     297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
4841     V4L2_DV_BT_STD_CEA861, \
4842     V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
4843     @@ -244,14 +252,16 @@
4844    
4845     #define V4L2_DV_BT_CEA_4096X2160P50 { \
4846     .type = V4L2_DV_BT_656_1120, \
4847     - V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
4848     + V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
4849     + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
4850     594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
4851     V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
4852     }
4853    
4854     #define V4L2_DV_BT_CEA_4096X2160P60 { \
4855     .type = V4L2_DV_BT_656_1120, \
4856     - V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
4857     + V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
4858     + V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
4859     594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
4860     V4L2_DV_BT_STD_CEA861, \
4861     V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
4862     diff --git a/kernel/cgroup.c b/kernel/cgroup.c
4863     index dc94f8beb097..1c9d701f7a72 100644
4864     --- a/kernel/cgroup.c
4865     +++ b/kernel/cgroup.c
4866     @@ -2721,9 +2721,10 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
4867     size_t nbytes, loff_t off, bool threadgroup)
4868     {
4869     struct task_struct *tsk;
4870     + struct cgroup_subsys *ss;
4871     struct cgroup *cgrp;
4872     pid_t pid;
4873     - int ret;
4874     + int ssid, ret;
4875    
4876     if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
4877     return -EINVAL;
4878     @@ -2771,8 +2772,10 @@ out_unlock_rcu:
4879     rcu_read_unlock();
4880     out_unlock_threadgroup:
4881     percpu_up_write(&cgroup_threadgroup_rwsem);
4882     + for_each_subsys(ss, ssid)
4883     + if (ss->post_attach)
4884     + ss->post_attach();
4885     cgroup_kn_unlock(of->kn);
4886     - cpuset_post_attach_flush();
4887     return ret ?: nbytes;
4888     }
4889    
4890     @@ -4689,14 +4692,15 @@ static void css_free_work_fn(struct work_struct *work)
4891    
4892     if (ss) {
4893     /* css free path */
4894     + struct cgroup_subsys_state *parent = css->parent;
4895     int id = css->id;
4896    
4897     - if (css->parent)
4898     - css_put(css->parent);
4899     -
4900     ss->css_free(css);
4901     cgroup_idr_remove(&ss->css_idr, id);
4902     cgroup_put(cgrp);
4903     +
4904     + if (parent)
4905     + css_put(parent);
4906     } else {
4907     /* cgroup free path */
4908     atomic_dec(&cgrp->root->nr_cgrps);
4909     diff --git a/kernel/cpuset.c b/kernel/cpuset.c
4910     index 2ade632197d5..11eaf14b52c2 100644
4911     --- a/kernel/cpuset.c
4912     +++ b/kernel/cpuset.c
4913     @@ -57,7 +57,6 @@
4914     #include <asm/uaccess.h>
4915     #include <linux/atomic.h>
4916     #include <linux/mutex.h>
4917     -#include <linux/workqueue.h>
4918     #include <linux/cgroup.h>
4919     #include <linux/wait.h>
4920    
4921     @@ -1015,7 +1014,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
4922     }
4923     }
4924    
4925     -void cpuset_post_attach_flush(void)
4926     +static void cpuset_post_attach(void)
4927     {
4928     flush_workqueue(cpuset_migrate_mm_wq);
4929     }
4930     @@ -2083,6 +2082,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
4931     .can_attach = cpuset_can_attach,
4932     .cancel_attach = cpuset_cancel_attach,
4933     .attach = cpuset_attach,
4934     + .post_attach = cpuset_post_attach,
4935     .bind = cpuset_bind,
4936     .legacy_cftypes = files,
4937     .early_init = 1,
4938     diff --git a/kernel/futex.c b/kernel/futex.c
4939     index 461c72b2dac2..9d8163afd87c 100644
4940     --- a/kernel/futex.c
4941     +++ b/kernel/futex.c
4942     @@ -1244,10 +1244,20 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
4943     if (unlikely(should_fail_futex(true)))
4944     ret = -EFAULT;
4945    
4946     - if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
4947     + if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
4948     ret = -EFAULT;
4949     - else if (curval != uval)
4950     - ret = -EINVAL;
4951     + } else if (curval != uval) {
4952     + /*
4953     + * If a unconditional UNLOCK_PI operation (user space did not
4954     + * try the TID->0 transition) raced with a waiter setting the
4955     + * FUTEX_WAITERS flag between get_user() and locking the hash
4956     + * bucket lock, retry the operation.
4957     + */
4958     + if ((FUTEX_TID_MASK & curval) == uval)
4959     + ret = -EAGAIN;
4960     + else
4961     + ret = -EINVAL;
4962     + }
4963     if (ret) {
4964     raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
4965     return ret;
4966     @@ -1474,8 +1484,8 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
4967     if (likely(&hb1->chain != &hb2->chain)) {
4968     plist_del(&q->list, &hb1->chain);
4969     hb_waiters_dec(hb1);
4970     - plist_add(&q->list, &hb2->chain);
4971     hb_waiters_inc(hb2);
4972     + plist_add(&q->list, &hb2->chain);
4973     q->lock_ptr = &hb2->lock;
4974     }
4975     get_futex_key_refs(key2);
4976     @@ -2538,6 +2548,15 @@ retry:
4977     if (ret == -EFAULT)
4978     goto pi_faulted;
4979     /*
4980     + * A unconditional UNLOCK_PI op raced against a waiter
4981     + * setting the FUTEX_WAITERS bit. Try again.
4982     + */
4983     + if (ret == -EAGAIN) {
4984     + spin_unlock(&hb->lock);
4985     + put_futex_key(&key);
4986     + goto retry;
4987     + }
4988     + /*
4989     * wake_futex_pi has detected invalid state. Tell user
4990     * space.
4991     */
4992     diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
4993     index 5b9102a47ea5..c835270f0c2f 100644
4994     --- a/kernel/locking/mcs_spinlock.h
4995     +++ b/kernel/locking/mcs_spinlock.h
4996     @@ -67,7 +67,13 @@ void mcs_spin_lock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
4997     node->locked = 0;
4998     node->next = NULL;
4999    
5000     - prev = xchg_acquire(lock, node);
5001     + /*
5002     + * We rely on the full barrier with global transitivity implied by the
5003     + * below xchg() to order the initialization stores above against any
5004     + * observation of @node. And to provide the ACQUIRE ordering associated
5005     + * with a LOCK primitive.
5006     + */
5007     + prev = xchg(lock, node);
5008     if (likely(prev == NULL)) {
5009     /*
5010     * Lock acquired, don't need to set node->locked to 1. Threads
5011     diff --git a/kernel/sched/core.c b/kernel/sched/core.c
5012     index 70e5e09341f1..55bebf924946 100644
5013     --- a/kernel/sched/core.c
5014     +++ b/kernel/sched/core.c
5015     @@ -7693,7 +7693,7 @@ void set_curr_task(int cpu, struct task_struct *p)
5016     /* task_group_lock serializes the addition/removal of task groups */
5017     static DEFINE_SPINLOCK(task_group_lock);
5018    
5019     -static void free_sched_group(struct task_group *tg)
5020     +static void sched_free_group(struct task_group *tg)
5021     {
5022     free_fair_sched_group(tg);
5023     free_rt_sched_group(tg);
5024     @@ -7719,7 +7719,7 @@ struct task_group *sched_create_group(struct task_group *parent)
5025     return tg;
5026    
5027     err:
5028     - free_sched_group(tg);
5029     + sched_free_group(tg);
5030     return ERR_PTR(-ENOMEM);
5031     }
5032    
5033     @@ -7739,17 +7739,16 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
5034     }
5035    
5036     /* rcu callback to free various structures associated with a task group */
5037     -static void free_sched_group_rcu(struct rcu_head *rhp)
5038     +static void sched_free_group_rcu(struct rcu_head *rhp)
5039     {
5040     /* now it should be safe to free those cfs_rqs */
5041     - free_sched_group(container_of(rhp, struct task_group, rcu));
5042     + sched_free_group(container_of(rhp, struct task_group, rcu));
5043     }
5044    
5045     -/* Destroy runqueue etc associated with a task group */
5046     void sched_destroy_group(struct task_group *tg)
5047     {
5048     /* wait for possible concurrent references to cfs_rqs complete */
5049     - call_rcu(&tg->rcu, free_sched_group_rcu);
5050     + call_rcu(&tg->rcu, sched_free_group_rcu);
5051     }
5052    
5053     void sched_offline_group(struct task_group *tg)
5054     @@ -8210,31 +8209,26 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
5055     if (IS_ERR(tg))
5056     return ERR_PTR(-ENOMEM);
5057    
5058     + sched_online_group(tg, parent);
5059     +
5060     return &tg->css;
5061     }
5062    
5063     -static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
5064     +static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
5065     {
5066     struct task_group *tg = css_tg(css);
5067     - struct task_group *parent = css_tg(css->parent);
5068    
5069     - if (parent)
5070     - sched_online_group(tg, parent);
5071     - return 0;
5072     + sched_offline_group(tg);
5073     }
5074    
5075     static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
5076     {
5077     struct task_group *tg = css_tg(css);
5078    
5079     - sched_destroy_group(tg);
5080     -}
5081     -
5082     -static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
5083     -{
5084     - struct task_group *tg = css_tg(css);
5085     -
5086     - sched_offline_group(tg);
5087     + /*
5088     + * Relies on the RCU grace period between css_released() and this.
5089     + */
5090     + sched_free_group(tg);
5091     }
5092    
5093     static void cpu_cgroup_fork(struct task_struct *task, void *private)
5094     @@ -8594,9 +8588,8 @@ static struct cftype cpu_files[] = {
5095    
5096     struct cgroup_subsys cpu_cgrp_subsys = {
5097     .css_alloc = cpu_cgroup_css_alloc,
5098     + .css_released = cpu_cgroup_css_released,
5099     .css_free = cpu_cgroup_css_free,
5100     - .css_online = cpu_cgroup_css_online,
5101     - .css_offline = cpu_cgroup_css_offline,
5102     .fork = cpu_cgroup_fork,
5103     .can_attach = cpu_cgroup_can_attach,
5104     .attach = cpu_cgroup_attach,
5105     diff --git a/kernel/workqueue.c b/kernel/workqueue.c
5106     index 450c21fd0e6e..0ec05948a97b 100644
5107     --- a/kernel/workqueue.c
5108     +++ b/kernel/workqueue.c
5109     @@ -649,6 +649,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
5110     */
5111     smp_wmb();
5112     set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
5113     + /*
5114     + * The following mb guarantees that previous clear of a PENDING bit
5115     + * will not be reordered with any speculative LOADS or STORES from
5116     + * work->current_func, which is executed afterwards. This possible
5117     + * reordering can lead to a missed execution on attempt to qeueue
5118     + * the same @work. E.g. consider this case:
5119     + *
5120     + * CPU#0 CPU#1
5121     + * ---------------------------- --------------------------------
5122     + *
5123     + * 1 STORE event_indicated
5124     + * 2 queue_work_on() {
5125     + * 3 test_and_set_bit(PENDING)
5126     + * 4 } set_..._and_clear_pending() {
5127     + * 5 set_work_data() # clear bit
5128     + * 6 smp_mb()
5129     + * 7 work->current_func() {
5130     + * 8 LOAD event_indicated
5131     + * }
5132     + *
5133     + * Without an explicit full barrier speculative LOAD on line 8 can
5134     + * be executed before CPU#0 does STORE on line 1. If that happens,
5135     + * CPU#0 observes the PENDING bit is still set and new execution of
5136     + * a @work is not queued in a hope, that CPU#1 will eventually
5137     + * finish the queued @work. Meanwhile CPU#1 does not see
5138     + * event_indicated is set, because speculative LOAD was executed
5139     + * before actual STORE.
5140     + */
5141     + smp_mb();
5142     }
5143    
5144     static void clear_work_data(struct work_struct *work)
5145     diff --git a/lib/assoc_array.c b/lib/assoc_array.c
5146     index 03dd576e6773..59fd7c0b119c 100644
5147     --- a/lib/assoc_array.c
5148     +++ b/lib/assoc_array.c
5149     @@ -524,7 +524,9 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit,
5150     free_slot = i;
5151     continue;
5152     }
5153     - if (ops->compare_object(assoc_array_ptr_to_leaf(ptr), index_key)) {
5154     + if (assoc_array_ptr_is_leaf(ptr) &&
5155     + ops->compare_object(assoc_array_ptr_to_leaf(ptr),
5156     + index_key)) {
5157     pr_devel("replace in slot %d\n", i);
5158     edit->leaf_p = &node->slots[i];
5159     edit->dead_leaf = node->slots[i];
5160     diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
5161     index abcecdc2d0f2..0710a62ad2f6 100644
5162     --- a/lib/lz4/lz4defs.h
5163     +++ b/lib/lz4/lz4defs.h
5164     @@ -11,8 +11,7 @@
5165     /*
5166     * Detects 64 bits mode
5167     */
5168     -#if (defined(__x86_64__) || defined(__x86_64) || defined(__amd64__) \
5169     - || defined(__ppc64__) || defined(__LP64__))
5170     +#if defined(CONFIG_64BIT)
5171     #define LZ4_ARCH64 1
5172     #else
5173     #define LZ4_ARCH64 0
5174     @@ -35,6 +34,10 @@ typedef struct _U64_S { u64 v; } U64_S;
5175    
5176     #define PUT4(s, d) (A32(d) = A32(s))
5177     #define PUT8(s, d) (A64(d) = A64(s))
5178     +
5179     +#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
5180     + (d = s - A16(p))
5181     +
5182     #define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
5183     do { \
5184     A16(p) = v; \
5185     @@ -51,10 +54,13 @@ typedef struct _U64_S { u64 v; } U64_S;
5186     #define PUT8(s, d) \
5187     put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
5188    
5189     -#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
5190     - do { \
5191     - put_unaligned(v, (u16 *)(p)); \
5192     - p += 2; \
5193     +#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
5194     + (d = s - get_unaligned_le16(p))
5195     +
5196     +#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
5197     + do { \
5198     + put_unaligned_le16(v, (u16 *)(p)); \
5199     + p += 2; \
5200     } while (0)
5201     #endif
5202    
5203     @@ -140,9 +146,6 @@ typedef struct _U64_S { u64 v; } U64_S;
5204    
5205     #endif
5206    
5207     -#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
5208     - (d = s - get_unaligned_le16(p))
5209     -
5210     #define LZ4_WILDCOPY(s, d, e) \
5211     do { \
5212     LZ4_COPYPACKET(s, d); \
5213     diff --git a/lib/mpi/mpicoder.c b/lib/mpi/mpicoder.c
5214     index 3db76b8c1115..e00ff00e861c 100644
5215     --- a/lib/mpi/mpicoder.c
5216     +++ b/lib/mpi/mpicoder.c
5217     @@ -128,6 +128,23 @@ leave:
5218     }
5219     EXPORT_SYMBOL_GPL(mpi_read_from_buffer);
5220    
5221     +static int count_lzeros(MPI a)
5222     +{
5223     + mpi_limb_t alimb;
5224     + int i, lzeros = 0;
5225     +
5226     + for (i = a->nlimbs - 1; i >= 0; i--) {
5227     + alimb = a->d[i];
5228     + if (alimb == 0) {
5229     + lzeros += sizeof(mpi_limb_t);
5230     + } else {
5231     + lzeros += count_leading_zeros(alimb) / 8;
5232     + break;
5233     + }
5234     + }
5235     + return lzeros;
5236     +}
5237     +
5238     /**
5239     * mpi_read_buffer() - read MPI to a bufer provided by user (msb first)
5240     *
5241     @@ -146,7 +163,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
5242     uint8_t *p;
5243     mpi_limb_t alimb;
5244     unsigned int n = mpi_get_size(a);
5245     - int i, lzeros = 0;
5246     + int i, lzeros;
5247    
5248     if (buf_len < n || !buf || !nbytes)
5249     return -EINVAL;
5250     @@ -154,14 +171,7 @@ int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes,
5251     if (sign)
5252     *sign = a->sign;
5253    
5254     - p = (void *)&a->d[a->nlimbs] - 1;
5255     -
5256     - for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
5257     - if (!*p)
5258     - lzeros++;
5259     - else
5260     - break;
5261     - }
5262     + lzeros = count_lzeros(a);
5263    
5264     p = buf;
5265     *nbytes = n - lzeros;
5266     @@ -343,7 +353,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
5267     u8 *p, *p2;
5268     mpi_limb_t alimb, alimb2;
5269     unsigned int n = mpi_get_size(a);
5270     - int i, x, y = 0, lzeros = 0, buf_len;
5271     + int i, x, y = 0, lzeros, buf_len;
5272    
5273     if (!nbytes || *nbytes < n)
5274     return -EINVAL;
5275     @@ -351,14 +361,7 @@ int mpi_write_to_sgl(MPI a, struct scatterlist *sgl, unsigned *nbytes,
5276     if (sign)
5277     *sign = a->sign;
5278    
5279     - p = (void *)&a->d[a->nlimbs] - 1;
5280     -
5281     - for (i = a->nlimbs * sizeof(alimb) - 1; i >= 0; i--, p--) {
5282     - if (!*p)
5283     - lzeros++;
5284     - else
5285     - break;
5286     - }
5287     + lzeros = count_lzeros(a);
5288    
5289     *nbytes = n - lzeros;
5290     buf_len = sgl->length;
5291     diff --git a/mm/huge_memory.c b/mm/huge_memory.c
5292     index 62fe06bb7d04..530e6427f823 100644
5293     --- a/mm/huge_memory.c
5294     +++ b/mm/huge_memory.c
5295     @@ -2134,10 +2134,9 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
5296     * page fault if needed.
5297     */
5298     return 0;
5299     - if (vma->vm_ops)
5300     + if (vma->vm_ops || (vm_flags & VM_NO_THP))
5301     /* khugepaged not yet working on file or special mappings */
5302     return 0;
5303     - VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
5304     hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
5305     hend = vma->vm_end & HPAGE_PMD_MASK;
5306     if (hstart < hend)
5307     @@ -2498,8 +2497,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
5308     return false;
5309     if (is_vma_temporary_stack(vma))
5310     return false;
5311     - VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
5312     - return true;
5313     + return !(vma->vm_flags & VM_NO_THP);
5314     }
5315    
5316     static void collapse_huge_page(struct mm_struct *mm,
5317     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
5318     index fc0bcc41d57f..6ba4dd988e2e 100644
5319     --- a/mm/memcontrol.c
5320     +++ b/mm/memcontrol.c
5321     @@ -196,6 +196,7 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
5322     /* "mc" and its members are protected by cgroup_mutex */
5323     static struct move_charge_struct {
5324     spinlock_t lock; /* for from, to */
5325     + struct mm_struct *mm;
5326     struct mem_cgroup *from;
5327     struct mem_cgroup *to;
5328     unsigned long flags;
5329     @@ -4800,6 +4801,8 @@ static void __mem_cgroup_clear_mc(void)
5330    
5331     static void mem_cgroup_clear_mc(void)
5332     {
5333     + struct mm_struct *mm = mc.mm;
5334     +
5335     /*
5336     * we must clear moving_task before waking up waiters at the end of
5337     * task migration.
5338     @@ -4809,7 +4812,10 @@ static void mem_cgroup_clear_mc(void)
5339     spin_lock(&mc.lock);
5340     mc.from = NULL;
5341     mc.to = NULL;
5342     + mc.mm = NULL;
5343     spin_unlock(&mc.lock);
5344     +
5345     + mmput(mm);
5346     }
5347    
5348     static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5349     @@ -4866,6 +4872,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5350     VM_BUG_ON(mc.moved_swap);
5351    
5352     spin_lock(&mc.lock);
5353     + mc.mm = mm;
5354     mc.from = from;
5355     mc.to = memcg;
5356     mc.flags = move_flags;
5357     @@ -4875,8 +4882,9 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5358     ret = mem_cgroup_precharge_mc(mm);
5359     if (ret)
5360     mem_cgroup_clear_mc();
5361     + } else {
5362     + mmput(mm);
5363     }
5364     - mmput(mm);
5365     return ret;
5366     }
5367    
5368     @@ -4985,11 +4993,11 @@ put: /* get_mctgt_type() gets the page */
5369     return ret;
5370     }
5371    
5372     -static void mem_cgroup_move_charge(struct mm_struct *mm)
5373     +static void mem_cgroup_move_charge(void)
5374     {
5375     struct mm_walk mem_cgroup_move_charge_walk = {
5376     .pmd_entry = mem_cgroup_move_charge_pte_range,
5377     - .mm = mm,
5378     + .mm = mc.mm,
5379     };
5380    
5381     lru_add_drain_all();
5382     @@ -5001,7 +5009,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
5383     atomic_inc(&mc.from->moving_account);
5384     synchronize_rcu();
5385     retry:
5386     - if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
5387     + if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
5388     /*
5389     * Someone who are holding the mmap_sem might be waiting in
5390     * waitq. So we cancel all extra charges, wake up all waiters,
5391     @@ -5018,23 +5026,16 @@ retry:
5392     * additional charge, the page walk just aborts.
5393     */
5394     walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
5395     - up_read(&mm->mmap_sem);
5396     + up_read(&mc.mm->mmap_sem);
5397     atomic_dec(&mc.from->moving_account);
5398     }
5399    
5400     -static void mem_cgroup_move_task(struct cgroup_taskset *tset)
5401     +static void mem_cgroup_move_task(void)
5402     {
5403     - struct cgroup_subsys_state *css;
5404     - struct task_struct *p = cgroup_taskset_first(tset, &css);
5405     - struct mm_struct *mm = get_task_mm(p);
5406     -
5407     - if (mm) {
5408     - if (mc.to)
5409     - mem_cgroup_move_charge(mm);
5410     - mmput(mm);
5411     - }
5412     - if (mc.to)
5413     + if (mc.to) {
5414     + mem_cgroup_move_charge();
5415     mem_cgroup_clear_mc();
5416     + }
5417     }
5418     #else /* !CONFIG_MMU */
5419     static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5420     @@ -5044,7 +5045,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
5421     static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
5422     {
5423     }
5424     -static void mem_cgroup_move_task(struct cgroup_taskset *tset)
5425     +static void mem_cgroup_move_task(void)
5426     {
5427     }
5428     #endif
5429     @@ -5258,7 +5259,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
5430     .css_reset = mem_cgroup_css_reset,
5431     .can_attach = mem_cgroup_can_attach,
5432     .cancel_attach = mem_cgroup_cancel_attach,
5433     - .attach = mem_cgroup_move_task,
5434     + .post_attach = mem_cgroup_move_task,
5435     .bind = mem_cgroup_bind,
5436     .dfl_cftypes = memory_files,
5437     .legacy_cftypes = mem_cgroup_legacy_files,
5438     diff --git a/mm/memory.c b/mm/memory.c
5439     index b80bf4746b67..76dcee317714 100644
5440     --- a/mm/memory.c
5441     +++ b/mm/memory.c
5442     @@ -797,6 +797,46 @@ out:
5443     return pfn_to_page(pfn);
5444     }
5445    
5446     +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5447     +struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
5448     + pmd_t pmd)
5449     +{
5450     + unsigned long pfn = pmd_pfn(pmd);
5451     +
5452     + /*
5453     + * There is no pmd_special() but there may be special pmds, e.g.
5454     + * in a direct-access (dax) mapping, so let's just replicate the
5455     + * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
5456     + */
5457     + if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
5458     + if (vma->vm_flags & VM_MIXEDMAP) {
5459     + if (!pfn_valid(pfn))
5460     + return NULL;
5461     + goto out;
5462     + } else {
5463     + unsigned long off;
5464     + off = (addr - vma->vm_start) >> PAGE_SHIFT;
5465     + if (pfn == vma->vm_pgoff + off)
5466     + return NULL;
5467     + if (!is_cow_mapping(vma->vm_flags))
5468     + return NULL;
5469     + }
5470     + }
5471     +
5472     + if (is_zero_pfn(pfn))
5473     + return NULL;
5474     + if (unlikely(pfn > highest_memmap_pfn))
5475     + return NULL;
5476     +
5477     + /*
5478     + * NOTE! We still have PageReserved() pages in the page tables.
5479     + * eg. VDSO mappings can cause them to exist.
5480     + */
5481     +out:
5482     + return pfn_to_page(pfn);
5483     +}
5484     +#endif
5485     +
5486     /*
5487     * copy one vm_area from one task to the other. Assumes the page tables
5488     * already present in the new task to be cleared in the whole range
5489     diff --git a/mm/migrate.c b/mm/migrate.c
5490     index 6d17e0ab42d4..bbeb0b71fcf4 100644
5491     --- a/mm/migrate.c
5492     +++ b/mm/migrate.c
5493     @@ -963,7 +963,13 @@ out:
5494     dec_zone_page_state(page, NR_ISOLATED_ANON +
5495     page_is_file_cache(page));
5496     /* Soft-offlined page shouldn't go through lru cache list */
5497     - if (reason == MR_MEMORY_FAILURE) {
5498     + if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
5499     + /*
5500     + * With this release, we free successfully migrated
5501     + * page and set PG_HWPoison on just freed page
5502     + * intentionally. Although it's rather weird, it's how
5503     + * HWPoison flag works at the moment.
5504     + */
5505     put_page(page);
5506     if (!test_set_page_hwpoison(page))
5507     num_poisoned_pages_inc();
5508     diff --git a/mm/slub.c b/mm/slub.c
5509     index 46997517406e..65d5f92d51d2 100644
5510     --- a/mm/slub.c
5511     +++ b/mm/slub.c
5512     @@ -2819,6 +2819,7 @@ struct detached_freelist {
5513     void *tail;
5514     void *freelist;
5515     int cnt;
5516     + struct kmem_cache *s;
5517     };
5518    
5519     /*
5520     @@ -2833,8 +2834,9 @@ struct detached_freelist {
5521     * synchronization primitive. Look ahead in the array is limited due
5522     * to performance reasons.
5523     */
5524     -static int build_detached_freelist(struct kmem_cache *s, size_t size,
5525     - void **p, struct detached_freelist *df)
5526     +static inline
5527     +int build_detached_freelist(struct kmem_cache *s, size_t size,
5528     + void **p, struct detached_freelist *df)
5529     {
5530     size_t first_skipped_index = 0;
5531     int lookahead = 3;
5532     @@ -2850,8 +2852,11 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
5533     if (!object)
5534     return 0;
5535    
5536     + /* Support for memcg, compiler can optimize this out */
5537     + df->s = cache_from_obj(s, object);
5538     +
5539     /* Start new detached freelist */
5540     - set_freepointer(s, object, NULL);
5541     + set_freepointer(df->s, object, NULL);
5542     df->page = virt_to_head_page(object);
5543     df->tail = object;
5544     df->freelist = object;
5545     @@ -2866,7 +2871,7 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
5546     /* df->page is always set at this point */
5547     if (df->page == virt_to_head_page(object)) {
5548     /* Opportunity build freelist */
5549     - set_freepointer(s, object, df->freelist);
5550     + set_freepointer(df->s, object, df->freelist);
5551     df->freelist = object;
5552     df->cnt++;
5553     p[size] = NULL; /* mark object processed */
5554     @@ -2885,25 +2890,20 @@ static int build_detached_freelist(struct kmem_cache *s, size_t size,
5555     return first_skipped_index;
5556     }
5557    
5558     -
5559     /* Note that interrupts must be enabled when calling this function. */
5560     -void kmem_cache_free_bulk(struct kmem_cache *orig_s, size_t size, void **p)
5561     +void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
5562     {
5563     if (WARN_ON(!size))
5564     return;
5565    
5566     do {
5567     struct detached_freelist df;
5568     - struct kmem_cache *s;
5569     -
5570     - /* Support for memcg */
5571     - s = cache_from_obj(orig_s, p[size - 1]);
5572    
5573     size = build_detached_freelist(s, size, p, &df);
5574     if (unlikely(!df.page))
5575     continue;
5576    
5577     - slab_free(s, df.page, df.freelist, df.tail, df.cnt, _RET_IP_);
5578     + slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
5579     } while (likely(size));
5580     }
5581     EXPORT_SYMBOL(kmem_cache_free_bulk);
5582     diff --git a/mm/vmscan.c b/mm/vmscan.c
5583     index 2aec4241b42a..0c114e2b01d3 100644
5584     --- a/mm/vmscan.c
5585     +++ b/mm/vmscan.c
5586     @@ -2534,7 +2534,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
5587     sc->gfp_mask |= __GFP_HIGHMEM;
5588    
5589     for_each_zone_zonelist_nodemask(zone, z, zonelist,
5590     - requested_highidx, sc->nodemask) {
5591     + gfp_zone(sc->gfp_mask), sc->nodemask) {
5592     enum zone_type classzone_idx;
5593    
5594     if (!populated_zone(zone))
5595     diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
5596     index 59651af8cc27..992b35fb8615 100644
5597     --- a/net/netlink/af_netlink.c
5598     +++ b/net/netlink/af_netlink.c
5599     @@ -1305,7 +1305,7 @@ static int netlink_release(struct socket *sock)
5600    
5601     skb_queue_purge(&sk->sk_write_queue);
5602    
5603     - if (nlk->portid) {
5604     + if (nlk->portid && nlk->bound) {
5605     struct netlink_notify n = {
5606     .net = sock_net(sk),
5607     .protocol = sk->sk_protocol,
5608     diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
5609     index 21e20353178e..63fb5ee212cf 100644
5610     --- a/net/sunrpc/cache.c
5611     +++ b/net/sunrpc/cache.c
5612     @@ -1182,14 +1182,14 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
5613     }
5614    
5615     crq->q.reader = 0;
5616     - crq->item = cache_get(h);
5617     crq->buf = buf;
5618     crq->len = 0;
5619     crq->readers = 0;
5620     spin_lock(&queue_lock);
5621     - if (test_bit(CACHE_PENDING, &h->flags))
5622     + if (test_bit(CACHE_PENDING, &h->flags)) {
5623     + crq->item = cache_get(h);
5624     list_add_tail(&crq->q.list, &detail->queue);
5625     - else
5626     + } else
5627     /* Lost a race, no longer PENDING, so don't enqueue */
5628     ret = -EAGAIN;
5629     spin_unlock(&queue_lock);
5630     diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
5631     index 75b0d23ee882..5d89f13a98db 100644
5632     --- a/net/wireless/nl80211.c
5633     +++ b/net/wireless/nl80211.c
5634     @@ -13161,7 +13161,7 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
5635     struct wireless_dev *wdev;
5636     struct cfg80211_beacon_registration *reg, *tmp;
5637    
5638     - if (state != NETLINK_URELEASE)
5639     + if (state != NETLINK_URELEASE || notify->protocol != NETLINK_GENERIC)
5640     return NOTIFY_DONE;
5641    
5642     rcu_read_lock();
5643     diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
5644     index 0b7dc2fd7bac..dd243d2abd87 100644
5645     --- a/scripts/kconfig/confdata.c
5646     +++ b/scripts/kconfig/confdata.c
5647     @@ -267,10 +267,8 @@ int conf_read_simple(const char *name, int def)
5648     if (in)
5649     goto load;
5650     sym_add_change_count(1);
5651     - if (!sym_defconfig_list) {
5652     - sym_calc_value(modules_sym);
5653     + if (!sym_defconfig_list)
5654     return 1;
5655     - }
5656    
5657     for_all_defaults(sym_defconfig_list, prop) {
5658     if (expr_calc_value(prop->visible.expr) == no ||
5659     @@ -403,7 +401,6 @@ setsym:
5660     }
5661     free(line);
5662     fclose(in);
5663     - sym_calc_value(modules_sym);
5664     return 0;
5665     }
5666    
5667     @@ -414,8 +411,12 @@ int conf_read(const char *name)
5668    
5669     sym_set_change_count(0);
5670    
5671     - if (conf_read_simple(name, S_DEF_USER))
5672     + if (conf_read_simple(name, S_DEF_USER)) {
5673     + sym_calc_value(modules_sym);
5674     return 1;
5675     + }
5676     +
5677     + sym_calc_value(modules_sym);
5678    
5679     for_all_symbols(i, sym) {
5680     sym_calc_value(sym);
5681     @@ -846,6 +847,7 @@ static int conf_split_config(void)
5682    
5683     name = conf_get_autoconfig_name();
5684     conf_read_simple(name, S_DEF_AUTO);
5685     + sym_calc_value(modules_sym);
5686    
5687     if (chdir("include/config"))
5688     return 1;
5689     diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
5690     index 5c4fa8eba1d0..367dbf0d285e 100644
5691     --- a/sound/pci/hda/hda_generic.c
5692     +++ b/sound/pci/hda/hda_generic.c
5693     @@ -843,7 +843,7 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
5694     bool allow_powerdown)
5695     {
5696     hda_nid_t nid, changed = 0;
5697     - int i, state;
5698     + int i, state, power;
5699    
5700     for (i = 0; i < path->depth; i++) {
5701     nid = path->path[i];
5702     @@ -855,7 +855,9 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
5703     state = AC_PWRST_D0;
5704     else
5705     state = AC_PWRST_D3;
5706     - if (!snd_hda_check_power_state(codec, nid, state)) {
5707     + power = snd_hda_codec_read(codec, nid, 0,
5708     + AC_VERB_GET_POWER_STATE, 0);
5709     + if (power != (state | (state << 4))) {
5710     snd_hda_codec_write(codec, nid, 0,
5711     AC_VERB_SET_POWER_STATE, state);
5712     changed = nid;
5713     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
5714     index 2ff692dd2c5f..411630e9c034 100644
5715     --- a/sound/pci/hda/hda_intel.c
5716     +++ b/sound/pci/hda/hda_intel.c
5717     @@ -2207,6 +2207,9 @@ static const struct pci_device_id azx_ids[] = {
5718     /* Broxton-P(Apollolake) */
5719     { PCI_DEVICE(0x8086, 0x5a98),
5720     .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
5721     + /* Broxton-T */
5722     + { PCI_DEVICE(0x8086, 0x1a98),
5723     + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
5724     /* Haswell */
5725     { PCI_DEVICE(0x8086, 0x0a0c),
5726     .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
5727     diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
5728     index a47e8ae0eb30..80bbadc83721 100644
5729     --- a/sound/pci/hda/patch_cirrus.c
5730     +++ b/sound/pci/hda/patch_cirrus.c
5731     @@ -361,6 +361,7 @@ static int cs_parse_auto_config(struct hda_codec *codec)
5732     {
5733     struct cs_spec *spec = codec->spec;
5734     int err;
5735     + int i;
5736    
5737     err = snd_hda_parse_pin_defcfg(codec, &spec->gen.autocfg, NULL, 0);
5738     if (err < 0)
5739     @@ -370,6 +371,19 @@ static int cs_parse_auto_config(struct hda_codec *codec)
5740     if (err < 0)
5741     return err;
5742    
5743     + /* keep the ADCs powered up when it's dynamically switchable */
5744     + if (spec->gen.dyn_adc_switch) {
5745     + unsigned int done = 0;
5746     + for (i = 0; i < spec->gen.input_mux.num_items; i++) {
5747     + int idx = spec->gen.dyn_adc_idx[i];
5748     + if (done & (1 << idx))
5749     + continue;
5750     + snd_hda_gen_fix_pin_power(codec,
5751     + spec->gen.adc_nids[idx]);
5752     + done |= 1 << idx;
5753     + }
5754     + }
5755     +
5756     return 0;
5757     }
5758    
5759     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5760     index 1402ba954b3d..ac4490a96863 100644
5761     --- a/sound/pci/hda/patch_realtek.c
5762     +++ b/sound/pci/hda/patch_realtek.c
5763     @@ -5449,6 +5449,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5764     SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5765     SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5766     SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
5767     + SND_PCI_QUIRK(0x1028, 0x0669, "Dell Optiplex 9020m", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
5768     SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
5769     SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
5770     SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5771     @@ -5583,6 +5584,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5772     SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
5773     SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
5774     SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
5775     + SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
5776     SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
5777     SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5778     SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
5779     diff --git a/sound/pci/pcxhr/pcxhr_core.c b/sound/pci/pcxhr/pcxhr_core.c
5780     index c5194f5b150a..d7e71f309299 100644
5781     --- a/sound/pci/pcxhr/pcxhr_core.c
5782     +++ b/sound/pci/pcxhr/pcxhr_core.c
5783     @@ -1341,5 +1341,6 @@ irqreturn_t pcxhr_threaded_irq(int irq, void *dev_id)
5784     }
5785    
5786     pcxhr_msg_thread(mgr);
5787     + mutex_unlock(&mgr->lock);
5788     return IRQ_HANDLED;
5789     }
5790     diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
5791     index f2beb1aa5763..b1c8bb39cdf1 100644
5792     --- a/sound/soc/codecs/rt5640.c
5793     +++ b/sound/soc/codecs/rt5640.c
5794     @@ -359,7 +359,7 @@ static const DECLARE_TLV_DB_RANGE(bst_tlv,
5795    
5796     /* Interface data select */
5797     static const char * const rt5640_data_select[] = {
5798     - "Normal", "left copy to right", "right copy to left", "Swap"};
5799     + "Normal", "Swap", "left copy to right", "right copy to left"};
5800    
5801     static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
5802     RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
5803     diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
5804     index 3deb8babeabb..243f42633989 100644
5805     --- a/sound/soc/codecs/rt5640.h
5806     +++ b/sound/soc/codecs/rt5640.h
5807     @@ -442,39 +442,39 @@
5808     #define RT5640_IF1_DAC_SEL_MASK (0x3 << 14)
5809     #define RT5640_IF1_DAC_SEL_SFT 14
5810     #define RT5640_IF1_DAC_SEL_NOR (0x0 << 14)
5811     -#define RT5640_IF1_DAC_SEL_L2R (0x1 << 14)
5812     -#define RT5640_IF1_DAC_SEL_R2L (0x2 << 14)
5813     -#define RT5640_IF1_DAC_SEL_SWAP (0x3 << 14)
5814     +#define RT5640_IF1_DAC_SEL_SWAP (0x1 << 14)
5815     +#define RT5640_IF1_DAC_SEL_L2R (0x2 << 14)
5816     +#define RT5640_IF1_DAC_SEL_R2L (0x3 << 14)
5817     #define RT5640_IF1_ADC_SEL_MASK (0x3 << 12)
5818     #define RT5640_IF1_ADC_SEL_SFT 12
5819     #define RT5640_IF1_ADC_SEL_NOR (0x0 << 12)
5820     -#define RT5640_IF1_ADC_SEL_L2R (0x1 << 12)
5821     -#define RT5640_IF1_ADC_SEL_R2L (0x2 << 12)
5822     -#define RT5640_IF1_ADC_SEL_SWAP (0x3 << 12)
5823     +#define RT5640_IF1_ADC_SEL_SWAP (0x1 << 12)
5824     +#define RT5640_IF1_ADC_SEL_L2R (0x2 << 12)
5825     +#define RT5640_IF1_ADC_SEL_R2L (0x3 << 12)
5826     #define RT5640_IF2_DAC_SEL_MASK (0x3 << 10)
5827     #define RT5640_IF2_DAC_SEL_SFT 10
5828     #define RT5640_IF2_DAC_SEL_NOR (0x0 << 10)
5829     -#define RT5640_IF2_DAC_SEL_L2R (0x1 << 10)
5830     -#define RT5640_IF2_DAC_SEL_R2L (0x2 << 10)
5831     -#define RT5640_IF2_DAC_SEL_SWAP (0x3 << 10)
5832     +#define RT5640_IF2_DAC_SEL_SWAP (0x1 << 10)
5833     +#define RT5640_IF2_DAC_SEL_L2R (0x2 << 10)
5834     +#define RT5640_IF2_DAC_SEL_R2L (0x3 << 10)
5835     #define RT5640_IF2_ADC_SEL_MASK (0x3 << 8)
5836     #define RT5640_IF2_ADC_SEL_SFT 8
5837     #define RT5640_IF2_ADC_SEL_NOR (0x0 << 8)
5838     -#define RT5640_IF2_ADC_SEL_L2R (0x1 << 8)
5839     -#define RT5640_IF2_ADC_SEL_R2L (0x2 << 8)
5840     -#define RT5640_IF2_ADC_SEL_SWAP (0x3 << 8)
5841     +#define RT5640_IF2_ADC_SEL_SWAP (0x1 << 8)
5842     +#define RT5640_IF2_ADC_SEL_L2R (0x2 << 8)
5843     +#define RT5640_IF2_ADC_SEL_R2L (0x3 << 8)
5844     #define RT5640_IF3_DAC_SEL_MASK (0x3 << 6)
5845     #define RT5640_IF3_DAC_SEL_SFT 6
5846     #define RT5640_IF3_DAC_SEL_NOR (0x0 << 6)
5847     -#define RT5640_IF3_DAC_SEL_L2R (0x1 << 6)
5848     -#define RT5640_IF3_DAC_SEL_R2L (0x2 << 6)
5849     -#define RT5640_IF3_DAC_SEL_SWAP (0x3 << 6)
5850     +#define RT5640_IF3_DAC_SEL_SWAP (0x1 << 6)
5851     +#define RT5640_IF3_DAC_SEL_L2R (0x2 << 6)
5852     +#define RT5640_IF3_DAC_SEL_R2L (0x3 << 6)
5853     #define RT5640_IF3_ADC_SEL_MASK (0x3 << 4)
5854     #define RT5640_IF3_ADC_SEL_SFT 4
5855     #define RT5640_IF3_ADC_SEL_NOR (0x0 << 4)
5856     -#define RT5640_IF3_ADC_SEL_L2R (0x1 << 4)
5857     -#define RT5640_IF3_ADC_SEL_R2L (0x2 << 4)
5858     -#define RT5640_IF3_ADC_SEL_SWAP (0x3 << 4)
5859     +#define RT5640_IF3_ADC_SEL_SWAP (0x1 << 4)
5860     +#define RT5640_IF3_ADC_SEL_L2R (0x2 << 4)
5861     +#define RT5640_IF3_ADC_SEL_R2L (0x3 << 4)
5862    
5863     /* REC Left Mixer Control 1 (0x3b) */
5864     #define RT5640_G_HP_L_RM_L_MASK (0x7 << 13)
5865     diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
5866     index e619d5651b09..080c78e88e10 100644
5867     --- a/sound/soc/codecs/ssm4567.c
5868     +++ b/sound/soc/codecs/ssm4567.c
5869     @@ -352,6 +352,11 @@ static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
5870     regcache_cache_only(ssm4567->regmap, !enable);
5871    
5872     if (enable) {
5873     + ret = regmap_write(ssm4567->regmap, SSM4567_REG_SOFT_RESET,
5874     + 0x00);
5875     + if (ret)
5876     + return ret;
5877     +
5878     ret = regmap_update_bits(ssm4567->regmap,
5879     SSM4567_REG_POWER_CTRL,
5880     SSM4567_POWER_SPWDN, 0x00);
5881     diff --git a/sound/soc/samsung/s3c-i2s-v2.c b/sound/soc/samsung/s3c-i2s-v2.c
5882     index df65c5b494b1..b6ab3fc5789e 100644
5883     --- a/sound/soc/samsung/s3c-i2s-v2.c
5884     +++ b/sound/soc/samsung/s3c-i2s-v2.c
5885     @@ -709,7 +709,7 @@ static int s3c2412_i2s_resume(struct snd_soc_dai *dai)
5886     #endif
5887    
5888     int s3c_i2sv2_register_component(struct device *dev, int id,
5889     - struct snd_soc_component_driver *cmp_drv,
5890     + const struct snd_soc_component_driver *cmp_drv,
5891     struct snd_soc_dai_driver *dai_drv)
5892     {
5893     struct snd_soc_dai_ops *ops = (struct snd_soc_dai_ops *)dai_drv->ops;
5894     diff --git a/sound/soc/samsung/s3c-i2s-v2.h b/sound/soc/samsung/s3c-i2s-v2.h
5895     index 90abab364b49..d0684145ed1f 100644
5896     --- a/sound/soc/samsung/s3c-i2s-v2.h
5897     +++ b/sound/soc/samsung/s3c-i2s-v2.h
5898     @@ -101,7 +101,7 @@ extern int s3c_i2sv2_probe(struct snd_soc_dai *dai,
5899     * soc core.
5900     */
5901     extern int s3c_i2sv2_register_component(struct device *dev, int id,
5902     - struct snd_soc_component_driver *cmp_drv,
5903     + const struct snd_soc_component_driver *cmp_drv,
5904     struct snd_soc_dai_driver *dai_drv);
5905    
5906     #endif /* __SND_SOC_S3C24XX_S3C_I2SV2_I2S_H */
5907     diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
5908     index 416514fe9e63..afb70a5d4fd3 100644
5909     --- a/sound/soc/soc-dapm.c
5910     +++ b/sound/soc/soc-dapm.c
5911     @@ -2188,6 +2188,13 @@ static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
5912     int count = 0;
5913     char *state = "not set";
5914    
5915     + /* card won't be set for the dummy component, as a spot fix
5916     + * we're checking for that case specifically here but in future
5917     + * we will ensure that the dummy component looks like others.
5918     + */
5919     + if (!cmpnt->card)
5920     + return 0;
5921     +
5922     list_for_each_entry(w, &cmpnt->card->widgets, list) {
5923     if (w->dapm != dapm)
5924     continue;
5925     diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
5926     index 4e074a660826..90c3558c2c12 100644
5927     --- a/tools/perf/Documentation/perf-stat.txt
5928     +++ b/tools/perf/Documentation/perf-stat.txt
5929     @@ -62,6 +62,14 @@ OPTIONS
5930     --scale::
5931     scale/normalize counter values
5932    
5933     +-d::
5934     +--detailed::
5935     + print more detailed statistics, can be specified up to 3 times
5936     +
5937     + -d: detailed events, L1 and LLC data cache
5938     + -d -d: more detailed events, dTLB and iTLB events
5939     + -d -d -d: very detailed events, adding prefetch events
5940     +
5941     -r::
5942     --repeat=<n>::
5943     repeat command and print average + stddev (max: 100). 0 means forever.
5944     diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
5945     index 81def6c3f24b..3900386a3629 100644
5946     --- a/tools/perf/ui/browsers/hists.c
5947     +++ b/tools/perf/ui/browsers/hists.c
5948     @@ -2059,10 +2059,12 @@ skip_annotation:
5949     *
5950     * See hist_browser__show_entry.
5951     */
5952     - nr_options += add_script_opt(browser,
5953     - &actions[nr_options],
5954     - &options[nr_options],
5955     - NULL, browser->selection->sym);
5956     + if (sort__has_sym && browser->selection->sym) {
5957     + nr_options += add_script_opt(browser,
5958     + &actions[nr_options],
5959     + &options[nr_options],
5960     + NULL, browser->selection->sym);
5961     + }
5962     }
5963     nr_options += add_script_opt(browser, &actions[nr_options],
5964     &options[nr_options], NULL, NULL);
5965     diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
5966     index 8b10621b415c..956187bf1a85 100644
5967     --- a/tools/perf/util/event.c
5968     +++ b/tools/perf/util/event.c
5969     @@ -274,7 +274,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
5970     strcpy(execname, "");
5971    
5972     /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
5973     - n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
5974     + n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %[^\n]\n",
5975     &event->mmap2.start, &event->mmap2.len, prot,
5976     &event->mmap2.pgoff, &event->mmap2.maj,
5977     &event->mmap2.min,
5978     diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
5979     index d1392194a9a9..b4b96120fc3b 100644
5980     --- a/tools/perf/util/evlist.c
5981     +++ b/tools/perf/util/evlist.c
5982     @@ -1211,12 +1211,12 @@ void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
5983     */
5984     if (cpus != evlist->cpus) {
5985     cpu_map__put(evlist->cpus);
5986     - evlist->cpus = cpus;
5987     + evlist->cpus = cpu_map__get(cpus);
5988     }
5989    
5990     if (threads != evlist->threads) {
5991     thread_map__put(evlist->threads);
5992     - evlist->threads = threads;
5993     + evlist->threads = thread_map__get(threads);
5994     }
5995    
5996     perf_evlist__propagate_maps(evlist);
5997     diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
5998     index 97f963a3dcb9..9227c2f076c3 100644
5999     --- a/tools/perf/util/intel-pt.c
6000     +++ b/tools/perf/util/intel-pt.c
6001     @@ -1127,7 +1127,7 @@ static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
6002     pr_err("Intel Processor Trace: failed to deliver transaction event, error %d\n",
6003     ret);
6004    
6005     - if (pt->synth_opts.callchain)
6006     + if (pt->synth_opts.last_branch)
6007     intel_pt_reset_last_branch_rb(ptq);
6008    
6009     return ret;
6010     diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
6011     index ea6064696fe4..a7b9022b5c8f 100644
6012     --- a/virt/kvm/arm/arch_timer.c
6013     +++ b/virt/kvm/arm/arch_timer.c
6014     @@ -86,6 +86,8 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
6015     vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
6016     vcpu->arch.timer_cpu.armed = false;
6017    
6018     + WARN_ON(!kvm_timer_should_fire(vcpu));
6019     +
6020     /*
6021     * If the vcpu is blocked we want to wake it up so that it will see
6022     * the timer has expired when entering the guest.
6023     @@ -93,10 +95,46 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
6024     kvm_vcpu_kick(vcpu);
6025     }
6026    
6027     +static u64 kvm_timer_compute_delta(struct kvm_vcpu *vcpu)
6028     +{
6029     + cycle_t cval, now;
6030     +
6031     + cval = vcpu->arch.timer_cpu.cntv_cval;
6032     + now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
6033     +
6034     + if (now < cval) {
6035     + u64 ns;
6036     +
6037     + ns = cyclecounter_cyc2ns(timecounter->cc,
6038     + cval - now,
6039     + timecounter->mask,
6040     + &timecounter->frac);
6041     + return ns;
6042     + }
6043     +
6044     + return 0;
6045     +}
6046     +
6047     static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt)
6048     {
6049     struct arch_timer_cpu *timer;
6050     + struct kvm_vcpu *vcpu;
6051     + u64 ns;
6052     +
6053     timer = container_of(hrt, struct arch_timer_cpu, timer);
6054     + vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu);
6055     +
6056     + /*
6057     + * Check that the timer has really expired from the guest's
6058     + * PoV (NTP on the host may have forced it to expire
6059     + * early). If we should have slept longer, restart it.
6060     + */
6061     + ns = kvm_timer_compute_delta(vcpu);
6062     + if (unlikely(ns)) {
6063     + hrtimer_forward_now(hrt, ns_to_ktime(ns));
6064     + return HRTIMER_RESTART;
6065     + }
6066     +
6067     queue_work(wqueue, &timer->expired);
6068     return HRTIMER_NORESTART;
6069     }
6070     @@ -170,8 +208,6 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
6071     void kvm_timer_schedule(struct kvm_vcpu *vcpu)
6072     {
6073     struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
6074     - u64 ns;
6075     - cycle_t cval, now;
6076    
6077     BUG_ON(timer_is_armed(timer));
6078    
6079     @@ -191,14 +227,7 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu)
6080     return;
6081    
6082     /* The timer has not yet expired, schedule a background timer */
6083     - cval = timer->cntv_cval;
6084     - now = kvm_phys_timer_read() - vcpu->kvm->arch.timer.cntvoff;
6085     -
6086     - ns = cyclecounter_cyc2ns(timecounter->cc,
6087     - cval - now,
6088     - timecounter->mask,
6089     - &timecounter->frac);
6090     - timer_arm(timer, ns);
6091     + timer_arm(timer, kvm_timer_compute_delta(vcpu));
6092     }
6093    
6094     void kvm_timer_unschedule(struct kvm_vcpu *vcpu)