Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.14/0169-4.14.70-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (hide annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 6 months ago) by niro
File size: 119404 byte(s)
-added up to patches-4.14.79
1 niro 3238 diff --git a/Makefile b/Makefile
2     index 3ecda1d2e23a..aa458afa7fa2 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 14
9     -SUBLEVEL = 69
10     +SUBLEVEL = 70
11     EXTRAVERSION =
12     NAME = Petit Gorille
13    
14     diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
15     index 21ac9f02407e..32acac9ab81a 100644
16     --- a/arch/arm/configs/imx_v6_v7_defconfig
17     +++ b/arch/arm/configs/imx_v6_v7_defconfig
18     @@ -289,7 +289,6 @@ CONFIG_USB_STORAGE=y
19     CONFIG_USB_CHIPIDEA=y
20     CONFIG_USB_CHIPIDEA_UDC=y
21     CONFIG_USB_CHIPIDEA_HOST=y
22     -CONFIG_USB_CHIPIDEA_ULPI=y
23     CONFIG_USB_SERIAL=m
24     CONFIG_USB_SERIAL_GENERIC=y
25     CONFIG_USB_SERIAL_FTDI_SIO=m
26     @@ -326,7 +325,6 @@ CONFIG_USB_GADGETFS=m
27     CONFIG_USB_FUNCTIONFS=m
28     CONFIG_USB_MASS_STORAGE=m
29     CONFIG_USB_G_SERIAL=m
30     -CONFIG_USB_ULPI_BUS=y
31     CONFIG_MMC=y
32     CONFIG_MMC_SDHCI=y
33     CONFIG_MMC_SDHCI_PLTFM=y
34     diff --git a/arch/arm/mach-rockchip/Kconfig b/arch/arm/mach-rockchip/Kconfig
35     index a4065966881a..57f0bc4cd9b8 100644
36     --- a/arch/arm/mach-rockchip/Kconfig
37     +++ b/arch/arm/mach-rockchip/Kconfig
38     @@ -18,6 +18,7 @@ config ARCH_ROCKCHIP
39     select ARM_GLOBAL_TIMER
40     select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
41     select ZONE_DMA if ARM_LPAE
42     + select PM
43     help
44     Support for Rockchip's Cortex-A9 Single-to-Quad-Core-SoCs
45     containing the RK2928, RK30xx and RK31xx series.
46     diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
47     index 6b54ee8c1262..456d21542250 100644
48     --- a/arch/arm64/Kconfig.platforms
49     +++ b/arch/arm64/Kconfig.platforms
50     @@ -148,6 +148,7 @@ config ARCH_ROCKCHIP
51     select GPIOLIB
52     select PINCTRL
53     select PINCTRL_ROCKCHIP
54     + select PM
55     select ROCKCHIP_TIMER
56     help
57     This enables support for the ARMv8 based Rockchip chipsets,
58     diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
59     index ea9bb4e0e9bb..e40f8a2df545 100644
60     --- a/arch/arm64/include/asm/cache.h
61     +++ b/arch/arm64/include/asm/cache.h
62     @@ -20,9 +20,14 @@
63    
64     #define CTR_L1IP_SHIFT 14
65     #define CTR_L1IP_MASK 3
66     +#define CTR_DMINLINE_SHIFT 16
67     +#define CTR_IMINLINE_SHIFT 0
68     #define CTR_CWG_SHIFT 24
69     #define CTR_CWG_MASK 15
70    
71     +#define CTR_CACHE_MINLINE_MASK \
72     + (0xf << CTR_DMINLINE_SHIFT | 0xf << CTR_IMINLINE_SHIFT)
73     +
74     #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
75    
76     #define ICACHE_POLICY_VPIPT 0
77     diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
78     index 76c0d23ca161..7d6425d426ac 100644
79     --- a/arch/arm64/include/asm/cpucaps.h
80     +++ b/arch/arm64/include/asm/cpucaps.h
81     @@ -44,7 +44,8 @@
82     #define ARM64_HARDEN_BRANCH_PREDICTOR 24
83     #define ARM64_HARDEN_BP_POST_GUEST_EXIT 25
84     #define ARM64_SSBD 26
85     +#define ARM64_MISMATCHED_CACHE_TYPE 27
86    
87     -#define ARM64_NCAPS 27
88     +#define ARM64_NCAPS 28
89    
90     #endif /* __ASM_CPUCAPS_H */
91     diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
92     index eccdb28b4a39..3d6d7fae45de 100644
93     --- a/arch/arm64/kernel/cpu_errata.c
94     +++ b/arch/arm64/kernel/cpu_errata.c
95     @@ -16,6 +16,8 @@
96     * along with this program. If not, see <http://www.gnu.org/licenses/>.
97     */
98    
99     +#include <linux/arm-smccc.h>
100     +#include <linux/psci.h>
101     #include <linux/types.h>
102     #include <asm/cpu.h>
103     #include <asm/cputype.h>
104     @@ -45,12 +47,18 @@ is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
105     }
106    
107     static bool
108     -has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
109     - int scope)
110     +has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
111     + int scope)
112     {
113     + u64 mask = CTR_CACHE_MINLINE_MASK;
114     +
115     + /* Skip matching the min line sizes for cache type check */
116     + if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
117     + mask ^= arm64_ftr_reg_ctrel0.strict_mask;
118     +
119     WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
120     - return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
121     - (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
122     + return (read_cpuid_cachetype() & mask) !=
123     + (arm64_ftr_reg_ctrel0.sys_val & mask);
124     }
125    
126     static int cpu_enable_trap_ctr_access(void *__unused)
127     @@ -511,7 +519,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
128     {
129     .desc = "Mismatched cache line size",
130     .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
131     - .matches = has_mismatched_cache_line_size,
132     + .matches = has_mismatched_cache_type,
133     + .def_scope = SCOPE_LOCAL_CPU,
134     + .enable = cpu_enable_trap_ctr_access,
135     + },
136     + {
137     + .desc = "Mismatched cache type",
138     + .capability = ARM64_MISMATCHED_CACHE_TYPE,
139     + .matches = has_mismatched_cache_type,
140     .def_scope = SCOPE_LOCAL_CPU,
141     .enable = cpu_enable_trap_ctr_access,
142     },
143     diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
144     index 376cf12edf0c..003dd39225a0 100644
145     --- a/arch/arm64/kernel/cpufeature.c
146     +++ b/arch/arm64/kernel/cpufeature.c
147     @@ -180,14 +180,14 @@ static const struct arm64_ftr_bits ftr_ctr[] = {
148     ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
149     ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
150     ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */
151     - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
152     + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
153     /*
154     * Linux can handle differing I-cache policies. Userspace JITs will
155     * make use of *minLine.
156     * If we have differing I-cache policies, report it as the weakest - VIPT.
157     */
158     ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */
159     - ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
160     + ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
161     ARM64_FTR_END,
162     };
163    
164     diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
165     index 11f4bd07cce0..565cead12be2 100644
166     --- a/arch/powerpc/include/asm/uaccess.h
167     +++ b/arch/powerpc/include/asm/uaccess.h
168     @@ -223,10 +223,17 @@ do { \
169     } \
170     } while (0)
171    
172     +/*
173     + * This is a type: either unsigned long, if the argument fits into
174     + * that type, or otherwise unsigned long long.
175     + */
176     +#define __long_type(x) \
177     + __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
178     +
179     #define __get_user_nocheck(x, ptr, size) \
180     ({ \
181     long __gu_err; \
182     - unsigned long __gu_val; \
183     + __long_type(*(ptr)) __gu_val; \
184     const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
185     __chk_user_ptr(ptr); \
186     if (!is_kernel_addr((unsigned long)__gu_addr)) \
187     @@ -239,7 +246,7 @@ do { \
188     #define __get_user_check(x, ptr, size) \
189     ({ \
190     long __gu_err = -EFAULT; \
191     - unsigned long __gu_val = 0; \
192     + __long_type(*(ptr)) __gu_val = 0; \
193     const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
194     might_fault(); \
195     if (access_ok(VERIFY_READ, __gu_addr, (size))) \
196     @@ -251,7 +258,7 @@ do { \
197     #define __get_user_nosleep(x, ptr, size) \
198     ({ \
199     long __gu_err; \
200     - unsigned long __gu_val; \
201     + __long_type(*(ptr)) __gu_val; \
202     const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
203     __chk_user_ptr(ptr); \
204     __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
205     diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
206     index c09f0a6f8495..f65bb53df43b 100644
207     --- a/arch/powerpc/kernel/exceptions-64s.S
208     +++ b/arch/powerpc/kernel/exceptions-64s.S
209     @@ -1452,6 +1452,8 @@ TRAMP_REAL_BEGIN(stf_barrier_fallback)
210     TRAMP_REAL_BEGIN(rfi_flush_fallback)
211     SET_SCRATCH0(r13);
212     GET_PACA(r13);
213     + std r1,PACA_EXRFI+EX_R12(r13)
214     + ld r1,PACAKSAVE(r13)
215     std r9,PACA_EXRFI+EX_R9(r13)
216     std r10,PACA_EXRFI+EX_R10(r13)
217     std r11,PACA_EXRFI+EX_R11(r13)
218     @@ -1486,12 +1488,15 @@ TRAMP_REAL_BEGIN(rfi_flush_fallback)
219     ld r9,PACA_EXRFI+EX_R9(r13)
220     ld r10,PACA_EXRFI+EX_R10(r13)
221     ld r11,PACA_EXRFI+EX_R11(r13)
222     + ld r1,PACA_EXRFI+EX_R12(r13)
223     GET_SCRATCH0(r13);
224     rfid
225    
226     TRAMP_REAL_BEGIN(hrfi_flush_fallback)
227     SET_SCRATCH0(r13);
228     GET_PACA(r13);
229     + std r1,PACA_EXRFI+EX_R12(r13)
230     + ld r1,PACAKSAVE(r13)
231     std r9,PACA_EXRFI+EX_R9(r13)
232     std r10,PACA_EXRFI+EX_R10(r13)
233     std r11,PACA_EXRFI+EX_R11(r13)
234     @@ -1526,6 +1531,7 @@ TRAMP_REAL_BEGIN(hrfi_flush_fallback)
235     ld r9,PACA_EXRFI+EX_R9(r13)
236     ld r10,PACA_EXRFI+EX_R10(r13)
237     ld r11,PACA_EXRFI+EX_R11(r13)
238     + ld r1,PACA_EXRFI+EX_R12(r13)
239     GET_SCRATCH0(r13);
240     hrfid
241    
242     diff --git a/arch/powerpc/platforms/85xx/t1042rdb_diu.c b/arch/powerpc/platforms/85xx/t1042rdb_diu.c
243     index 58fa3d319f1c..dac36ba82fea 100644
244     --- a/arch/powerpc/platforms/85xx/t1042rdb_diu.c
245     +++ b/arch/powerpc/platforms/85xx/t1042rdb_diu.c
246     @@ -9,8 +9,10 @@
247     * option) any later version.
248     */
249    
250     +#include <linux/init.h>
251     #include <linux/io.h>
252     #include <linux/kernel.h>
253     +#include <linux/module.h>
254     #include <linux/of.h>
255     #include <linux/of_address.h>
256    
257     @@ -150,3 +152,5 @@ static int __init t1042rdb_diu_init(void)
258     }
259    
260     early_initcall(t1042rdb_diu_init);
261     +
262     +MODULE_LICENSE("GPL");
263     diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
264     index 2edc673be137..99d1152ae224 100644
265     --- a/arch/powerpc/platforms/pseries/ras.c
266     +++ b/arch/powerpc/platforms/pseries/ras.c
267     @@ -371,7 +371,7 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
268     int len, error_log_length;
269    
270     error_log_length = 8 + rtas_error_extended_log_length(h);
271     - len = max_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
272     + len = min_t(int, error_log_length, RTAS_ERROR_LOG_MAX);
273     memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
274     memcpy(global_mce_data_buf, h, len);
275     errhdr = (struct rtas_error_log *)global_mce_data_buf;
276     diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
277     index eb69a5186243..280e964e1aa8 100644
278     --- a/arch/powerpc/sysdev/mpic_msgr.c
279     +++ b/arch/powerpc/sysdev/mpic_msgr.c
280     @@ -196,7 +196,7 @@ static int mpic_msgr_probe(struct platform_device *dev)
281    
282     /* IO map the message register block. */
283     of_address_to_resource(np, 0, &rsrc);
284     - msgr_block_addr = ioremap(rsrc.start, rsrc.end - rsrc.start);
285     + msgr_block_addr = ioremap(rsrc.start, resource_size(&rsrc));
286     if (!msgr_block_addr) {
287     dev_err(&dev->dev, "Failed to iomap MPIC message registers");
288     return -EFAULT;
289     diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
290     index 9f5ea9d87069..9b0216d571ad 100644
291     --- a/arch/s390/kernel/crash_dump.c
292     +++ b/arch/s390/kernel/crash_dump.c
293     @@ -404,11 +404,13 @@ static void *get_vmcoreinfo_old(unsigned long *size)
294     if (copy_oldmem_kernel(nt_name, addr + sizeof(note),
295     sizeof(nt_name) - 1))
296     return NULL;
297     - if (strcmp(nt_name, "VMCOREINFO") != 0)
298     + if (strcmp(nt_name, VMCOREINFO_NOTE_NAME) != 0)
299     return NULL;
300     vmcoreinfo = kzalloc_panic(note.n_descsz);
301     - if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz))
302     + if (copy_oldmem_kernel(vmcoreinfo, addr + 24, note.n_descsz)) {
303     + kfree(vmcoreinfo);
304     return NULL;
305     + }
306     *size = note.n_descsz;
307     return vmcoreinfo;
308     }
309     @@ -418,15 +420,20 @@ static void *get_vmcoreinfo_old(unsigned long *size)
310     */
311     static void *nt_vmcoreinfo(void *ptr)
312     {
313     + const char *name = VMCOREINFO_NOTE_NAME;
314     unsigned long size;
315     void *vmcoreinfo;
316    
317     vmcoreinfo = os_info_old_entry(OS_INFO_VMCOREINFO, &size);
318     - if (!vmcoreinfo)
319     - vmcoreinfo = get_vmcoreinfo_old(&size);
320     + if (vmcoreinfo)
321     + return nt_init_name(ptr, 0, vmcoreinfo, size, name);
322     +
323     + vmcoreinfo = get_vmcoreinfo_old(&size);
324     if (!vmcoreinfo)
325     return ptr;
326     - return nt_init_name(ptr, 0, vmcoreinfo, size, "VMCOREINFO");
327     + ptr = nt_init_name(ptr, 0, vmcoreinfo, size, name);
328     + kfree(vmcoreinfo);
329     + return ptr;
330     }
331    
332     /*
333     diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
334     index e1fa974ac500..37e52118d7e9 100644
335     --- a/arch/s390/lib/mem.S
336     +++ b/arch/s390/lib/mem.S
337     @@ -17,7 +17,7 @@
338     ENTRY(memmove)
339     ltgr %r4,%r4
340     lgr %r1,%r2
341     - bzr %r14
342     + jz .Lmemmove_exit
343     aghi %r4,-1
344     clgr %r2,%r3
345     jnh .Lmemmove_forward
346     @@ -36,6 +36,7 @@ ENTRY(memmove)
347     .Lmemmove_forward_remainder:
348     larl %r5,.Lmemmove_mvc
349     ex %r4,0(%r5)
350     +.Lmemmove_exit:
351     BR_EX %r14
352     .Lmemmove_reverse:
353     ic %r0,0(%r4,%r3)
354     @@ -65,7 +66,7 @@ EXPORT_SYMBOL(memmove)
355     */
356     ENTRY(memset)
357     ltgr %r4,%r4
358     - bzr %r14
359     + jz .Lmemset_exit
360     ltgr %r3,%r3
361     jnz .Lmemset_fill
362     aghi %r4,-1
363     @@ -80,12 +81,13 @@ ENTRY(memset)
364     .Lmemset_clear_remainder:
365     larl %r3,.Lmemset_xc
366     ex %r4,0(%r3)
367     +.Lmemset_exit:
368     BR_EX %r14
369     .Lmemset_fill:
370     stc %r3,0(%r2)
371     cghi %r4,1
372     lgr %r1,%r2
373     - ber %r14
374     + je .Lmemset_fill_exit
375     aghi %r4,-2
376     srlg %r3,%r4,8
377     ltgr %r3,%r3
378     @@ -97,6 +99,7 @@ ENTRY(memset)
379     .Lmemset_fill_remainder:
380     larl %r3,.Lmemset_mvc
381     ex %r4,0(%r3)
382     +.Lmemset_fill_exit:
383     BR_EX %r14
384     .Lmemset_xc:
385     xc 0(1,%r1),0(%r1)
386     @@ -111,7 +114,7 @@ EXPORT_SYMBOL(memset)
387     */
388     ENTRY(memcpy)
389     ltgr %r4,%r4
390     - bzr %r14
391     + jz .Lmemcpy_exit
392     aghi %r4,-1
393     srlg %r5,%r4,8
394     ltgr %r5,%r5
395     @@ -120,6 +123,7 @@ ENTRY(memcpy)
396     .Lmemcpy_remainder:
397     larl %r5,.Lmemcpy_mvc
398     ex %r4,0(%r5)
399     +.Lmemcpy_exit:
400     BR_EX %r14
401     .Lmemcpy_loop:
402     mvc 0(256,%r1),0(%r3)
403     diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
404     index 340070415c2c..90fef69e4c5a 100644
405     --- a/arch/x86/include/asm/mce.h
406     +++ b/arch/x86/include/asm/mce.h
407     @@ -200,6 +200,7 @@ enum mce_notifier_prios {
408     MCE_PRIO_LOWEST = 0,
409     };
410    
411     +struct notifier_block;
412     extern void mce_register_decode_chain(struct notifier_block *nb);
413     extern void mce_unregister_decode_chain(struct notifier_block *nb);
414    
415     diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
416     index 9dc19b4a2a87..c5d4931d1ef9 100644
417     --- a/arch/x86/include/asm/pgtable-3level.h
418     +++ b/arch/x86/include/asm/pgtable-3level.h
419     @@ -2,6 +2,8 @@
420     #ifndef _ASM_X86_PGTABLE_3LEVEL_H
421     #define _ASM_X86_PGTABLE_3LEVEL_H
422    
423     +#include <asm/atomic64_32.h>
424     +
425     /*
426     * Intel Physical Address Extension (PAE) Mode - three-level page
427     * tables on PPro+ CPUs.
428     @@ -147,10 +149,7 @@ static inline pte_t native_ptep_get_and_clear(pte_t *ptep)
429     {
430     pte_t res;
431    
432     - /* xchg acts as a barrier before the setting of the high bits */
433     - res.pte_low = xchg(&ptep->pte_low, 0);
434     - res.pte_high = ptep->pte_high;
435     - ptep->pte_high = 0;
436     + res.pte = (pteval_t)atomic64_xchg((atomic64_t *)ptep, 0);
437    
438     return res;
439     }
440     diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
441     index 00e2ae033a0f..1dfb808abd23 100644
442     --- a/arch/x86/kvm/mmu.c
443     +++ b/arch/x86/kvm/mmu.c
444     @@ -220,6 +220,17 @@ static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK |
445     PT64_EPT_EXECUTABLE_MASK;
446     static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT;
447    
448     +/*
449     + * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order
450     + * to guard against L1TF attacks.
451     + */
452     +static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
453     +
454     +/*
455     + * The number of high-order 1 bits to use in the mask above.
456     + */
457     +static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
458     +
459     static void mmu_spte_set(u64 *sptep, u64 spte);
460     static void mmu_free_roots(struct kvm_vcpu *vcpu);
461    
462     @@ -308,9 +319,13 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
463     {
464     unsigned int gen = kvm_current_mmio_generation(vcpu);
465     u64 mask = generation_mmio_spte_mask(gen);
466     + u64 gpa = gfn << PAGE_SHIFT;
467    
468     access &= ACC_WRITE_MASK | ACC_USER_MASK;
469     - mask |= shadow_mmio_value | access | gfn << PAGE_SHIFT;
470     + mask |= shadow_mmio_value | access;
471     + mask |= gpa | shadow_nonpresent_or_rsvd_mask;
472     + mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
473     + << shadow_nonpresent_or_rsvd_mask_len;
474    
475     trace_mark_mmio_spte(sptep, gfn, access, gen);
476     mmu_spte_set(sptep, mask);
477     @@ -323,8 +338,14 @@ static bool is_mmio_spte(u64 spte)
478    
479     static gfn_t get_mmio_spte_gfn(u64 spte)
480     {
481     - u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
482     - return (spte & ~mask) >> PAGE_SHIFT;
483     + u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
484     + shadow_nonpresent_or_rsvd_mask;
485     + u64 gpa = spte & ~mask;
486     +
487     + gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
488     + & shadow_nonpresent_or_rsvd_mask;
489     +
490     + return gpa >> PAGE_SHIFT;
491     }
492    
493     static unsigned get_mmio_spte_access(u64 spte)
494     @@ -381,7 +402,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
495     }
496     EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
497    
498     -void kvm_mmu_clear_all_pte_masks(void)
499     +static void kvm_mmu_reset_all_pte_masks(void)
500     {
501     shadow_user_mask = 0;
502     shadow_accessed_mask = 0;
503     @@ -391,6 +412,18 @@ void kvm_mmu_clear_all_pte_masks(void)
504     shadow_mmio_mask = 0;
505     shadow_present_mask = 0;
506     shadow_acc_track_mask = 0;
507     +
508     + /*
509     + * If the CPU has 46 or less physical address bits, then set an
510     + * appropriate mask to guard against L1TF attacks. Otherwise, it is
511     + * assumed that the CPU is not vulnerable to L1TF.
512     + */
513     + if (boot_cpu_data.x86_phys_bits <
514     + 52 - shadow_nonpresent_or_rsvd_mask_len)
515     + shadow_nonpresent_or_rsvd_mask =
516     + rsvd_bits(boot_cpu_data.x86_phys_bits -
517     + shadow_nonpresent_or_rsvd_mask_len,
518     + boot_cpu_data.x86_phys_bits - 1);
519     }
520    
521     static int is_cpuid_PSE36(void)
522     @@ -5473,7 +5506,7 @@ static void mmu_destroy_caches(void)
523    
524     int kvm_mmu_module_init(void)
525     {
526     - kvm_mmu_clear_all_pte_masks();
527     + kvm_mmu_reset_all_pte_masks();
528    
529     pte_list_desc_cache = kmem_cache_create("pte_list_desc",
530     sizeof(struct pte_list_desc),
531     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
532     index a466ee14ad41..4e5a8e30cc4e 100644
533     --- a/arch/x86/kvm/vmx.c
534     +++ b/arch/x86/kvm/vmx.c
535     @@ -749,17 +749,21 @@ struct vcpu_vmx {
536     /*
537     * loaded_vmcs points to the VMCS currently used in this vcpu. For a
538     * non-nested (L1) guest, it always points to vmcs01. For a nested
539     - * guest (L2), it points to a different VMCS.
540     + * guest (L2), it points to a different VMCS. loaded_cpu_state points
541     + * to the VMCS whose state is loaded into the CPU registers that only
542     + * need to be switched when transitioning to/from the kernel; a NULL
543     + * value indicates that host state is loaded.
544     */
545     struct loaded_vmcs vmcs01;
546     struct loaded_vmcs *loaded_vmcs;
547     + struct loaded_vmcs *loaded_cpu_state;
548     bool __launched; /* temporary, used in vmx_vcpu_run */
549     struct msr_autoload {
550     struct vmx_msrs guest;
551     struct vmx_msrs host;
552     } msr_autoload;
553     +
554     struct {
555     - int loaded;
556     u16 fs_sel, gs_sel, ldt_sel;
557     #ifdef CONFIG_X86_64
558     u16 ds_sel, es_sel;
559     @@ -2336,10 +2340,11 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
560     struct vcpu_vmx *vmx = to_vmx(vcpu);
561     int i;
562    
563     - if (vmx->host_state.loaded)
564     + if (vmx->loaded_cpu_state)
565     return;
566    
567     - vmx->host_state.loaded = 1;
568     + vmx->loaded_cpu_state = vmx->loaded_vmcs;
569     +
570     /*
571     * Set host fs and gs selectors. Unfortunately, 22.2.3 does not
572     * allow segment selectors with cpl > 0 or ti == 1.
573     @@ -2390,11 +2395,14 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
574    
575     static void __vmx_load_host_state(struct vcpu_vmx *vmx)
576     {
577     - if (!vmx->host_state.loaded)
578     + if (!vmx->loaded_cpu_state)
579     return;
580    
581     + WARN_ON_ONCE(vmx->loaded_cpu_state != vmx->loaded_vmcs);
582     +
583     ++vmx->vcpu.stat.host_state_reload;
584     - vmx->host_state.loaded = 0;
585     + vmx->loaded_cpu_state = NULL;
586     +
587     #ifdef CONFIG_X86_64
588     if (is_long_mode(&vmx->vcpu))
589     rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
590     @@ -7582,7 +7590,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
591    
592     /* CPL=0 must be checked manually. */
593     if (vmx_get_cpl(vcpu)) {
594     - kvm_queue_exception(vcpu, UD_VECTOR);
595     + kvm_inject_gp(vcpu, 0);
596     return 1;
597     }
598    
599     @@ -7646,7 +7654,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
600     static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
601     {
602     if (vmx_get_cpl(vcpu)) {
603     - kvm_queue_exception(vcpu, UD_VECTOR);
604     + kvm_inject_gp(vcpu, 0);
605     return 0;
606     }
607    
608     @@ -9944,8 +9952,8 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
609     return;
610    
611     cpu = get_cpu();
612     - vmx->loaded_vmcs = vmcs;
613     vmx_vcpu_put(vcpu);
614     + vmx->loaded_vmcs = vmcs;
615     vmx_vcpu_load(vcpu, cpu);
616     vcpu->cpu = cpu;
617     put_cpu();
618     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
619     index 5c2c09f6c1c3..3856828ee1dc 100644
620     --- a/arch/x86/kvm/x86.c
621     +++ b/arch/x86/kvm/x86.c
622     @@ -6194,20 +6194,22 @@ static void kvm_set_mmio_spte_mask(void)
623     * Set the reserved bits and the present bit of an paging-structure
624     * entry to generate page fault with PFER.RSV = 1.
625     */
626     - /* Mask the reserved physical address bits. */
627     - mask = rsvd_bits(maxphyaddr, 51);
628     +
629     + /*
630     + * Mask the uppermost physical address bit, which would be reserved as
631     + * long as the supported physical address width is less than 52.
632     + */
633     + mask = 1ull << 51;
634    
635     /* Set the present bit. */
636     mask |= 1ull;
637    
638     -#ifdef CONFIG_X86_64
639     /*
640     * If reserved bit is not supported, clear the present bit to disable
641     * mmio page fault.
642     */
643     - if (maxphyaddr == 52)
644     + if (IS_ENABLED(CONFIG_X86_64) && maxphyaddr == 52)
645     mask &= ~1ull;
646     -#endif
647    
648     kvm_mmu_set_mmio_spte_mask(mask, mask);
649     }
650     diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c
651     index b3526a98a5a5..42cfad67b6ac 100644
652     --- a/arch/x86/xen/mmu_pv.c
653     +++ b/arch/x86/xen/mmu_pv.c
654     @@ -425,14 +425,13 @@ static void xen_set_pud(pud_t *ptr, pud_t val)
655     static void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
656     {
657     trace_xen_mmu_set_pte_atomic(ptep, pte);
658     - set_64bit((u64 *)ptep, native_pte_val(pte));
659     + __xen_set_pte(ptep, pte);
660     }
661    
662     static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
663     {
664     trace_xen_mmu_pte_clear(mm, addr, ptep);
665     - if (!xen_batched_set_pte(ptep, native_make_pte(0)))
666     - native_pte_clear(mm, addr, ptep);
667     + __xen_set_pte(ptep, native_make_pte(0));
668     }
669    
670     static void xen_pmd_clear(pmd_t *pmdp)
671     @@ -1543,7 +1542,7 @@ static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
672     pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
673     pte_val_ma(pte));
674     #endif
675     - native_set_pte(ptep, pte);
676     + __xen_set_pte(ptep, pte);
677     }
678    
679     /* Early in boot, while setting up the initial pagetable, assume
680     diff --git a/block/bio.c b/block/bio.c
681     index 194d28cdc642..2e5d881423b8 100644
682     --- a/block/bio.c
683     +++ b/block/bio.c
684     @@ -156,7 +156,7 @@ out:
685    
686     unsigned int bvec_nr_vecs(unsigned short idx)
687     {
688     - return bvec_slabs[idx].nr_vecs;
689     + return bvec_slabs[--idx].nr_vecs;
690     }
691    
692     void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
693     diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
694     index 9f342ef1ad42..9c4f1c496c90 100644
695     --- a/block/cfq-iosched.c
696     +++ b/block/cfq-iosched.c
697     @@ -4741,12 +4741,13 @@ USEC_SHOW_FUNCTION(cfq_target_latency_us_show, cfqd->cfq_target_latency);
698     static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
699     { \
700     struct cfq_data *cfqd = e->elevator_data; \
701     - unsigned int __data; \
702     + unsigned int __data, __min = (MIN), __max = (MAX); \
703     + \
704     cfq_var_store(&__data, (page)); \
705     - if (__data < (MIN)) \
706     - __data = (MIN); \
707     - else if (__data > (MAX)) \
708     - __data = (MAX); \
709     + if (__data < __min) \
710     + __data = __min; \
711     + else if (__data > __max) \
712     + __data = __max; \
713     if (__CONV) \
714     *(__PTR) = (u64)__data * NSEC_PER_MSEC; \
715     else \
716     @@ -4775,12 +4776,13 @@ STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX,
717     static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
718     { \
719     struct cfq_data *cfqd = e->elevator_data; \
720     - unsigned int __data; \
721     + unsigned int __data, __min = (MIN), __max = (MAX); \
722     + \
723     cfq_var_store(&__data, (page)); \
724     - if (__data < (MIN)) \
725     - __data = (MIN); \
726     - else if (__data > (MAX)) \
727     - __data = (MAX); \
728     + if (__data < __min) \
729     + __data = __min; \
730     + else if (__data > __max) \
731     + __data = __max; \
732     *(__PTR) = (u64)__data * NSEC_PER_USEC; \
733     return count; \
734     }
735     diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
736     index c0984d33c4c8..2eddbb1fae6a 100644
737     --- a/drivers/acpi/scan.c
738     +++ b/drivers/acpi/scan.c
739     @@ -1599,7 +1599,8 @@ static int acpi_add_single_object(struct acpi_device **child,
740     * Note this must be done before the get power-/wakeup_dev-flags calls.
741     */
742     if (type == ACPI_BUS_TYPE_DEVICE)
743     - acpi_bus_get_status(device);
744     + if (acpi_bus_get_status(device) < 0)
745     + acpi_set_device_status(device, 0);
746    
747     acpi_bus_get_power_flags(device);
748     acpi_bus_get_wakeup_device_flags(device);
749     @@ -1677,7 +1678,7 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
750     * acpi_add_single_object updates this once we've an acpi_device
751     * so that acpi_bus_get_status' quirk handling can be used.
752     */
753     - *sta = 0;
754     + *sta = ACPI_STA_DEFAULT;
755     break;
756     case ACPI_TYPE_PROCESSOR:
757     *type = ACPI_BUS_TYPE_PROCESSOR;
758     diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
759     index 62d0a69f8da0..3acf5f041e3c 100644
760     --- a/drivers/clk/rockchip/clk-rk3399.c
761     +++ b/drivers/clk/rockchip/clk-rk3399.c
762     @@ -1522,6 +1522,7 @@ static const char *const rk3399_pmucru_critical_clocks[] __initconst = {
763     "pclk_pmu_src",
764     "fclk_cm0s_src_pmu",
765     "clk_timer_src_pmu",
766     + "pclk_rkpwm_pmu",
767     };
768    
769     static void __init rk3399_clk_init(struct device_node *np)
770     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
771     index 8c2204c7b384..7ad8fa891ce6 100644
772     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
773     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
774     @@ -134,6 +134,11 @@ psp_cmd_submit_buf(struct psp_context *psp,
775     msleep(1);
776     }
777    
778     + if (ucode) {
779     + ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
780     + ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
781     + }
782     +
783     return ret;
784     }
785    
786     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
787     index 30b5500dc152..fa7b25e1e5d2 100644
788     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
789     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
790     @@ -172,6 +172,7 @@ enum AMDGPU_UCODE_ID {
791     AMDGPU_UCODE_ID_SMC,
792     AMDGPU_UCODE_ID_UVD,
793     AMDGPU_UCODE_ID_VCE,
794     + AMDGPU_UCODE_ID_VCN,
795     AMDGPU_UCODE_ID_MAXIMUM,
796     };
797    
798     @@ -204,6 +205,9 @@ struct amdgpu_firmware_info {
799     void *kaddr;
800     /* ucode_size_bytes */
801     uint32_t ucode_size;
802     + /* starting tmr mc address */
803     + uint32_t tmr_mc_addr_lo;
804     + uint32_t tmr_mc_addr_hi;
805     };
806    
807     void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
808     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
809     index 308a9755eae3..1612d8aa6ad6 100644
810     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
811     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
812     @@ -93,9 +93,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
813     version_major, version_minor, family_id);
814    
815    
816     - bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
817     - + AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
818     + bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
819     + AMDGPU_VCN_SESSION_SIZE * 40;
820     + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
821     + bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
822     r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
823     AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
824     &adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
825     @@ -191,11 +192,13 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
826     unsigned offset;
827    
828     hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
829     - offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
830     - memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
831     - le32_to_cpu(hdr->ucode_size_bytes));
832     - size -= le32_to_cpu(hdr->ucode_size_bytes);
833     - ptr += le32_to_cpu(hdr->ucode_size_bytes);
834     + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
835     + offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
836     + memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
837     + le32_to_cpu(hdr->ucode_size_bytes));
838     + size -= le32_to_cpu(hdr->ucode_size_bytes);
839     + ptr += le32_to_cpu(hdr->ucode_size_bytes);
840     + }
841     memset_io(ptr, 0, size);
842     }
843    
844     diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
845     index 1a30c54a0889..3981915e2311 100644
846     --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
847     +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
848     @@ -3113,7 +3113,7 @@ static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
849    
850     /* wait for RLC_SAFE_MODE */
851     for (i = 0; i < adev->usec_timeout; i++) {
852     - if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
853     + if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
854     break;
855     udelay(1);
856     }
857     diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
858     index f7cf994b1da2..86db90ff693a 100644
859     --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
860     +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
861     @@ -78,6 +78,9 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *
862     case AMDGPU_UCODE_ID_VCE:
863     *type = GFX_FW_TYPE_VCE;
864     break;
865     + case AMDGPU_UCODE_ID_VCN:
866     + *type = GFX_FW_TYPE_VCN;
867     + break;
868     case AMDGPU_UCODE_ID_MAXIMUM:
869     default:
870     return -EINVAL;
871     diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
872     index a098712bdd2f..f7b8caccab9f 100644
873     --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
874     +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
875     @@ -91,6 +91,16 @@ static int vcn_v1_0_sw_init(void *handle)
876     if (r)
877     return r;
878    
879     + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
880     + const struct common_firmware_header *hdr;
881     + hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
882     + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
883     + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
884     + adev->firmware.fw_size +=
885     + ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
886     + DRM_INFO("PSP loading VCN firmware\n");
887     + }
888     +
889     r = amdgpu_vcn_resume(adev);
890     if (r)
891     return r;
892     @@ -248,26 +258,38 @@ static int vcn_v1_0_resume(void *handle)
893     static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
894     {
895     uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
896     -
897     - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
898     + uint32_t offset;
899     +
900     + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
901     + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
902     + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
903     + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
904     + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
905     + WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
906     + offset = 0;
907     + } else {
908     + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
909     lower_32_bits(adev->vcn.gpu_addr));
910     - WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
911     + WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
912     upper_32_bits(adev->vcn.gpu_addr));
913     - WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
914     - AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
915     + offset = size;
916     + WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
917     + AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
918     + }
919     +
920     WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
921    
922     WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
923     - lower_32_bits(adev->vcn.gpu_addr + size));
924     + lower_32_bits(adev->vcn.gpu_addr + offset));
925     WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
926     - upper_32_bits(adev->vcn.gpu_addr + size));
927     + upper_32_bits(adev->vcn.gpu_addr + offset));
928     WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
929     WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE);
930    
931     WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
932     - lower_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
933     + lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
934     WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
935     - upper_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
936     + upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
937     WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
938     WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
939     AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40));
940     diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
941     index 1dc31aa72781..12856de09f57 100644
942     --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
943     +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
944     @@ -403,6 +403,49 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = {
945     { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
946    
947     { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
948     + { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
949     +
950     + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
951     + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
952     + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
953     + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
954     + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
955     + { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
956     +
957     + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
958     + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
959     + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
960     + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND },
961     + { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
962     +
963     + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
964     + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
965     + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND },
966     + { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
967     +
968     + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
969     + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
970     + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
971     + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
972     + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
973     + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
974     + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
975     + { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
976     +
977     + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
978     + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
979     + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND },
980     + { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND },
981     +
982     + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
983     + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND },
984     + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
985     + { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
986     +
987     + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
988     + { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
989     +
990     + { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
991     { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
992    
993     { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
994     diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
995     index 1f1fd3139c5b..c29dea895605 100644
996     --- a/drivers/gpu/drm/drm_edid.c
997     +++ b/drivers/gpu/drm/drm_edid.c
998     @@ -114,6 +114,9 @@ static const struct edid_quirk {
999     /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
1000     { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
1001    
1002     + /* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
1003     + { "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC },
1004     +
1005     /* Belinea 10 15 55 */
1006     { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
1007     { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
1008     diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
1009     index 2fdf302ebdad..8a541d0e3e80 100644
1010     --- a/drivers/gpu/drm/i915/intel_lpe_audio.c
1011     +++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
1012     @@ -128,9 +128,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
1013    
1014     kfree(rsc);
1015    
1016     - pm_runtime_forbid(&platdev->dev);
1017     - pm_runtime_set_active(&platdev->dev);
1018     - pm_runtime_enable(&platdev->dev);
1019     + pm_runtime_no_callbacks(&platdev->dev);
1020    
1021     return platdev;
1022    
1023     diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c
1024     index beb9baaf2f2e..f71fef10ecc6 100644
1025     --- a/drivers/gpu/drm/i915/intel_lspcon.c
1026     +++ b/drivers/gpu/drm/i915/intel_lspcon.c
1027     @@ -75,7 +75,7 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon,
1028     lspcon_mode_name(mode));
1029    
1030     wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode ||
1031     - current_mode == DRM_LSPCON_MODE_INVALID, 100);
1032     + current_mode == DRM_LSPCON_MODE_INVALID, 400);
1033     if (current_mode != mode)
1034     DRM_DEBUG_KMS("LSPCON mode hasn't settled\n");
1035    
1036     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1037     index 9e478f03e845..81ee1d026648 100644
1038     --- a/drivers/hid/hid-ids.h
1039     +++ b/drivers/hid/hid-ids.h
1040     @@ -528,6 +528,7 @@
1041     #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A 0x0a4a
1042     #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a
1043     #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a
1044     +#define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A 0x094a
1045    
1046     #define USB_VENDOR_ID_HUION 0x256c
1047     #define USB_DEVICE_ID_HUION_TABLET 0x006e
1048     diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
1049     index f489a5cfcb48..e10eda031b01 100644
1050     --- a/drivers/hid/usbhid/hid-quirks.c
1051     +++ b/drivers/hid/usbhid/hid-quirks.c
1052     @@ -99,6 +99,7 @@ static const struct hid_blacklist {
1053     { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
1054     { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
1055     { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
1056     + { USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A, HID_QUIRK_ALWAYS_POLL },
1057     { USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6680, HID_QUIRK_MULTI_INPUT },
1058     { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007, HID_QUIRK_ALWAYS_POLL },
1059     { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077, HID_QUIRK_ALWAYS_POLL },
1060     diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
1061     index b5fab55cc275..b197e925fe36 100644
1062     --- a/drivers/infiniband/hw/hfi1/affinity.c
1063     +++ b/drivers/infiniband/hw/hfi1/affinity.c
1064     @@ -146,7 +146,7 @@ int node_affinity_init(void)
1065     while ((dev = pci_get_device(ids->vendor, ids->device, dev))) {
1066     node = pcibus_to_node(dev->bus);
1067     if (node < 0)
1068     - node = numa_node_id();
1069     + goto out;
1070    
1071     hfi1_per_node_cntr[node]++;
1072     }
1073     @@ -154,6 +154,18 @@ int node_affinity_init(void)
1074     }
1075    
1076     return 0;
1077     +
1078     +out:
1079     + /*
1080     + * Invalid PCI NUMA node information found, note it, and populate
1081     + * our database 1:1.
1082     + */
1083     + pr_err("HFI: Invalid PCI NUMA node. Performance may be affected\n");
1084     + pr_err("HFI: System BIOS may need to be upgraded\n");
1085     + for (node = 0; node < node_affinity.num_possible_nodes; node++)
1086     + hfi1_per_node_cntr[node] = 1;
1087     +
1088     + return 0;
1089     }
1090    
1091     void node_affinity_destroy(void)
1092     @@ -227,8 +239,14 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
1093     const struct cpumask *local_mask;
1094     int curr_cpu, possible, i;
1095    
1096     - if (node < 0)
1097     - node = numa_node_id();
1098     + /*
1099     + * If the BIOS does not have the NUMA node information set, select
1100     + * NUMA 0 so we get consistent performance.
1101     + */
1102     + if (node < 0) {
1103     + dd_dev_err(dd, "Invalid PCI NUMA node. Performance may be affected\n");
1104     + node = 0;
1105     + }
1106     dd->node = node;
1107    
1108     local_mask = cpumask_of_node(dd->node);
1109     diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c
1110     index a64500fa1145..3cef53c65133 100644
1111     --- a/drivers/infiniband/hw/hns/hns_roce_pd.c
1112     +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c
1113     @@ -35,7 +35,7 @@
1114    
1115     static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
1116     {
1117     - return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn);
1118     + return hns_roce_bitmap_alloc(&hr_dev->pd_bitmap, pdn) ? -ENOMEM : 0;
1119     }
1120    
1121     static void hns_roce_pd_free(struct hns_roce_dev *hr_dev, unsigned long pdn)
1122     diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c
1123     index f5dd21c2d275..3a37d26889df 100644
1124     --- a/drivers/infiniband/hw/hns/hns_roce_qp.c
1125     +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c
1126     @@ -114,7 +114,10 @@ static int hns_roce_reserve_range_qp(struct hns_roce_dev *hr_dev, int cnt,
1127     {
1128     struct hns_roce_qp_table *qp_table = &hr_dev->qp_table;
1129    
1130     - return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align, base);
1131     + return hns_roce_bitmap_alloc_range(&qp_table->bitmap, cnt, align,
1132     + base) ?
1133     + -ENOMEM :
1134     + 0;
1135     }
1136    
1137     enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state)
1138     diff --git a/drivers/input/input.c b/drivers/input/input.c
1139     index 762bfb9487dc..50d425fe6706 100644
1140     --- a/drivers/input/input.c
1141     +++ b/drivers/input/input.c
1142     @@ -480,11 +480,19 @@ EXPORT_SYMBOL(input_inject_event);
1143     */
1144     void input_alloc_absinfo(struct input_dev *dev)
1145     {
1146     - if (!dev->absinfo)
1147     - dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo),
1148     - GFP_KERNEL);
1149     + if (dev->absinfo)
1150     + return;
1151    
1152     - WARN(!dev->absinfo, "%s(): kcalloc() failed?\n", __func__);
1153     + dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL);
1154     + if (!dev->absinfo) {
1155     + dev_err(dev->dev.parent ?: &dev->dev,
1156     + "%s: unable to allocate memory\n", __func__);
1157     + /*
1158     + * We will handle this allocation failure in
1159     + * input_register_device() when we refuse to register input
1160     + * device with ABS bits but without absinfo.
1161     + */
1162     + }
1163     }
1164     EXPORT_SYMBOL(input_alloc_absinfo);
1165    
1166     diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
1167     index bd67e1b2c64e..57960cb5e045 100644
1168     --- a/drivers/iommu/omap-iommu.c
1169     +++ b/drivers/iommu/omap-iommu.c
1170     @@ -529,7 +529,7 @@ static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd,
1171    
1172     pte_ready:
1173     iopte = iopte_offset(iopgd, da);
1174     - *pt_dma = virt_to_phys(iopte);
1175     + *pt_dma = iopgd_page_paddr(iopgd);
1176     dev_vdbg(obj->dev,
1177     "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
1178     __func__, da, iopgd, *iopgd, iopte, *iopte);
1179     @@ -717,7 +717,7 @@ static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
1180     }
1181     bytes *= nent;
1182     memset(iopte, 0, nent * sizeof(*iopte));
1183     - pt_dma = virt_to_phys(iopte);
1184     + pt_dma = iopgd_page_paddr(iopgd);
1185     flush_iopte_range(obj->dev, pt_dma, pt_offset, nent);
1186    
1187     /*
1188     diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
1189     index 55cfb986225b..0b9a8b709abf 100644
1190     --- a/drivers/irqchip/irq-bcm7038-l1.c
1191     +++ b/drivers/irqchip/irq-bcm7038-l1.c
1192     @@ -217,6 +217,7 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
1193     return 0;
1194     }
1195    
1196     +#ifdef CONFIG_SMP
1197     static void bcm7038_l1_cpu_offline(struct irq_data *d)
1198     {
1199     struct cpumask *mask = irq_data_get_affinity_mask(d);
1200     @@ -241,6 +242,7 @@ static void bcm7038_l1_cpu_offline(struct irq_data *d)
1201     }
1202     irq_set_affinity_locked(d, &new_affinity, false);
1203     }
1204     +#endif
1205    
1206     static int __init bcm7038_l1_init_one(struct device_node *dn,
1207     unsigned int idx,
1208     @@ -293,7 +295,9 @@ static struct irq_chip bcm7038_l1_irq_chip = {
1209     .irq_mask = bcm7038_l1_mask,
1210     .irq_unmask = bcm7038_l1_unmask,
1211     .irq_set_affinity = bcm7038_l1_set_affinity,
1212     +#ifdef CONFIG_SMP
1213     .irq_cpu_offline = bcm7038_l1_cpu_offline,
1214     +#endif
1215     };
1216    
1217     static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
1218     diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
1219     index 3f0ddc0d7393..3fb65778e03d 100644
1220     --- a/drivers/lightnvm/pblk-core.c
1221     +++ b/drivers/lightnvm/pblk-core.c
1222     @@ -190,7 +190,6 @@ void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
1223    
1224     WARN_ON(off + nr_pages != bio->bi_vcnt);
1225    
1226     - bio_advance(bio, off * PBLK_EXPOSED_PAGE_SIZE);
1227     for (i = off; i < nr_pages + off; i++) {
1228     bv = bio->bi_io_vec[i];
1229     mempool_free(bv.bv_page, pblk->page_bio_pool);
1230     diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
1231     index 3ad9e56d2473..d89ac573f8d8 100644
1232     --- a/drivers/lightnvm/pblk-write.c
1233     +++ b/drivers/lightnvm/pblk-write.c
1234     @@ -33,6 +33,10 @@ static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
1235     bio_endio(original_bio);
1236     }
1237    
1238     + if (c_ctx->nr_padded)
1239     + pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
1240     + c_ctx->nr_padded);
1241     +
1242     #ifdef CONFIG_NVM_DEBUG
1243     atomic_long_add(c_ctx->nr_valid, &pblk->sync_writes);
1244     #endif
1245     @@ -521,7 +525,8 @@ static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
1246     struct bio *bio = rqd->bio;
1247    
1248     if (c_ctx->nr_padded)
1249     - pblk_bio_free_pages(pblk, bio, rqd->nr_ppas, c_ctx->nr_padded);
1250     + pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
1251     + c_ctx->nr_padded);
1252     }
1253    
1254     static int pblk_submit_write(struct pblk *pblk)
1255     diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
1256     index cf2c67e35eaf..d4b326914f06 100644
1257     --- a/drivers/md/dm-kcopyd.c
1258     +++ b/drivers/md/dm-kcopyd.c
1259     @@ -484,6 +484,8 @@ static int run_complete_job(struct kcopyd_job *job)
1260     if (atomic_dec_and_test(&kc->nr_jobs))
1261     wake_up(&kc->destroyq);
1262    
1263     + cond_resched();
1264     +
1265     return 0;
1266     }
1267    
1268     diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
1269     index 40534352e574..3270b8dbc949 100644
1270     --- a/drivers/mfd/sm501.c
1271     +++ b/drivers/mfd/sm501.c
1272     @@ -714,6 +714,7 @@ sm501_create_subdev(struct sm501_devdata *sm, char *name,
1273     smdev->pdev.name = name;
1274     smdev->pdev.id = sm->pdev_id;
1275     smdev->pdev.dev.parent = sm->dev;
1276     + smdev->pdev.dev.coherent_dma_mask = 0xffffffff;
1277    
1278     if (res_count) {
1279     smdev->pdev.resource = (struct resource *)(smdev+1);
1280     diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
1281     index 4c49d0b97748..9d499c5c8f8a 100644
1282     --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
1283     +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
1284     @@ -185,6 +185,9 @@ struct bcmgenet_mib_counters {
1285     #define UMAC_MAC1 0x010
1286     #define UMAC_MAX_FRAME_LEN 0x014
1287    
1288     +#define UMAC_MODE 0x44
1289     +#define MODE_LINK_STATUS (1 << 5)
1290     +
1291     #define UMAC_EEE_CTRL 0x064
1292     #define EN_LPI_RX_PAUSE (1 << 0)
1293     #define EN_LPI_TX_PFC (1 << 1)
1294     diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
1295     index 18f5723be2c9..6ad0ca7ed3e9 100644
1296     --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
1297     +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
1298     @@ -115,8 +115,14 @@ void bcmgenet_mii_setup(struct net_device *dev)
1299     static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
1300     struct fixed_phy_status *status)
1301     {
1302     - if (dev && dev->phydev && status)
1303     - status->link = dev->phydev->link;
1304     + struct bcmgenet_priv *priv;
1305     + u32 reg;
1306     +
1307     + if (dev && dev->phydev && status) {
1308     + priv = netdev_priv(dev);
1309     + reg = bcmgenet_umac_readl(priv, UMAC_MODE);
1310     + status->link = !!(reg & MODE_LINK_STATUS);
1311     + }
1312    
1313     return 0;
1314     }
1315     diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
1316     index 6df2cad61647..dfef4ec167c1 100644
1317     --- a/drivers/net/ethernet/cadence/macb_main.c
1318     +++ b/drivers/net/ethernet/cadence/macb_main.c
1319     @@ -1884,14 +1884,17 @@ static void macb_reset_hw(struct macb *bp)
1320     {
1321     struct macb_queue *queue;
1322     unsigned int q;
1323     + u32 ctrl = macb_readl(bp, NCR);
1324    
1325     /* Disable RX and TX (XXX: Should we halt the transmission
1326     * more gracefully?)
1327     */
1328     - macb_writel(bp, NCR, 0);
1329     + ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
1330    
1331     /* Clear the stats registers (XXX: Update stats first?) */
1332     - macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
1333     + ctrl |= MACB_BIT(CLRSTAT);
1334     +
1335     + macb_writel(bp, NCR, ctrl);
1336    
1337     /* Clear all status flags */
1338     macb_writel(bp, TSR, -1);
1339     @@ -2070,7 +2073,7 @@ static void macb_init_hw(struct macb *bp)
1340     }
1341    
1342     /* Enable TX and RX */
1343     - macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
1344     + macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
1345     }
1346    
1347     /* The hash address register is 64 bits long and takes up two
1348     diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1349     index c133491ad9fa..654aad6e748b 100644
1350     --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1351     +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1352     @@ -3105,7 +3105,7 @@ static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
1353     #define HCLGE_FUNC_NUMBER_PER_DESC 6
1354     int i, j;
1355    
1356     - for (i = 0; i < HCLGE_DESC_NUMBER; i++)
1357     + for (i = 1; i < HCLGE_DESC_NUMBER; i++)
1358     for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
1359     if (desc[i].data[j])
1360     return false;
1361     diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
1362     index f32d719c4f77..8f90dd1be6b5 100644
1363     --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
1364     +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
1365     @@ -187,6 +187,8 @@ int hclge_mac_start_phy(struct hclge_dev *hdev)
1366     if (!phydev)
1367     return 0;
1368    
1369     + phydev->supported &= ~SUPPORTED_FIBRE;
1370     +
1371     ret = phy_connect_direct(netdev, phydev,
1372     hclge_mac_adjust_link,
1373     PHY_INTERFACE_MODE_SGMII);
1374     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
1375     index 8c4ce0a0cc82..06eeea6b2f93 100644
1376     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
1377     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
1378     @@ -395,6 +395,8 @@ int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
1379     void
1380     mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
1381     void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif);
1382     +void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
1383     + struct net_device *dev);
1384    
1385     /* spectrum_kvdl.c */
1386     int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count,
1387     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
1388     index 516e63244606..3ed4fb346f23 100644
1389     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
1390     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
1391     @@ -5131,6 +5131,17 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
1392     mlxsw_sp_vr_put(vr);
1393     }
1394    
1395     +void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
1396     + struct net_device *dev)
1397     +{
1398     + struct mlxsw_sp_rif *rif;
1399     +
1400     + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
1401     + if (!rif)
1402     + return;
1403     + mlxsw_sp_rif_destroy(rif);
1404     +}
1405     +
1406     static void
1407     mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params,
1408     struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1409     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1410     index 7924f241e3ad..32c25772f755 100644
1411     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1412     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
1413     @@ -140,6 +140,24 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
1414     return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
1415     }
1416    
1417     +static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
1418     + void *data)
1419     +{
1420     + struct mlxsw_sp *mlxsw_sp = data;
1421     +
1422     + mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
1423     + return 0;
1424     +}
1425     +
1426     +static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
1427     + struct net_device *dev)
1428     +{
1429     + mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
1430     + netdev_walk_all_upper_dev_rcu(dev,
1431     + mlxsw_sp_bridge_device_upper_rif_destroy,
1432     + mlxsw_sp);
1433     +}
1434     +
1435     static struct mlxsw_sp_bridge_device *
1436     mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
1437     struct net_device *br_dev)
1438     @@ -176,6 +194,8 @@ static void
1439     mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
1440     struct mlxsw_sp_bridge_device *bridge_device)
1441     {
1442     + mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
1443     + bridge_device->dev);
1444     list_del(&bridge_device->list);
1445     if (bridge_device->vlan_enabled)
1446     bridge->vlan_enabled_exists = false;
1447     diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
1448     index 8d53a593fb27..b482a8fb0e92 100644
1449     --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
1450     +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
1451     @@ -227,29 +227,16 @@ done:
1452     spin_unlock_bh(&nn->reconfig_lock);
1453     }
1454    
1455     -/**
1456     - * nfp_net_reconfig() - Reconfigure the firmware
1457     - * @nn: NFP Net device to reconfigure
1458     - * @update: The value for the update field in the BAR config
1459     - *
1460     - * Write the update word to the BAR and ping the reconfig queue. The
1461     - * poll until the firmware has acknowledged the update by zeroing the
1462     - * update word.
1463     - *
1464     - * Return: Negative errno on error, 0 on success
1465     - */
1466     -int nfp_net_reconfig(struct nfp_net *nn, u32 update)
1467     +static void nfp_net_reconfig_sync_enter(struct nfp_net *nn)
1468     {
1469     bool cancelled_timer = false;
1470     u32 pre_posted_requests;
1471     - int ret;
1472    
1473     spin_lock_bh(&nn->reconfig_lock);
1474    
1475     nn->reconfig_sync_present = true;
1476    
1477     if (nn->reconfig_timer_active) {
1478     - del_timer(&nn->reconfig_timer);
1479     nn->reconfig_timer_active = false;
1480     cancelled_timer = true;
1481     }
1482     @@ -258,14 +245,43 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update)
1483    
1484     spin_unlock_bh(&nn->reconfig_lock);
1485    
1486     - if (cancelled_timer)
1487     + if (cancelled_timer) {
1488     + del_timer_sync(&nn->reconfig_timer);
1489     nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
1490     + }
1491    
1492     /* Run the posted reconfigs which were issued before we started */
1493     if (pre_posted_requests) {
1494     nfp_net_reconfig_start(nn, pre_posted_requests);
1495     nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
1496     }
1497     +}
1498     +
1499     +static void nfp_net_reconfig_wait_posted(struct nfp_net *nn)
1500     +{
1501     + nfp_net_reconfig_sync_enter(nn);
1502     +
1503     + spin_lock_bh(&nn->reconfig_lock);
1504     + nn->reconfig_sync_present = false;
1505     + spin_unlock_bh(&nn->reconfig_lock);
1506     +}
1507     +
1508     +/**
1509     + * nfp_net_reconfig() - Reconfigure the firmware
1510     + * @nn: NFP Net device to reconfigure
1511     + * @update: The value for the update field in the BAR config
1512     + *
1513     + * Write the update word to the BAR and ping the reconfig queue. The
1514     + * poll until the firmware has acknowledged the update by zeroing the
1515     + * update word.
1516     + *
1517     + * Return: Negative errno on error, 0 on success
1518     + */
1519     +int nfp_net_reconfig(struct nfp_net *nn, u32 update)
1520     +{
1521     + int ret;
1522     +
1523     + nfp_net_reconfig_sync_enter(nn);
1524    
1525     nfp_net_reconfig_start(nn, update);
1526     ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
1527     @@ -3560,6 +3576,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev,
1528     */
1529     void nfp_net_free(struct nfp_net *nn)
1530     {
1531     + WARN_ON(timer_pending(&nn->reconfig_timer) || nn->reconfig_posted);
1532     if (nn->xdp_prog)
1533     bpf_prog_put(nn->xdp_prog);
1534    
1535     @@ -3829,4 +3846,5 @@ void nfp_net_clean(struct nfp_net *nn)
1536     return;
1537    
1538     unregister_netdev(nn->dp.netdev);
1539     + nfp_net_reconfig_wait_posted(nn);
1540     }
1541     diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
1542     index 9feec7009443..0e3b2890b925 100644
1543     --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
1544     +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
1545     @@ -2386,26 +2386,20 @@ static int qlge_update_hw_vlan_features(struct net_device *ndev,
1546     return status;
1547     }
1548    
1549     -static netdev_features_t qlge_fix_features(struct net_device *ndev,
1550     - netdev_features_t features)
1551     -{
1552     - int err;
1553     -
1554     - /* Update the behavior of vlan accel in the adapter */
1555     - err = qlge_update_hw_vlan_features(ndev, features);
1556     - if (err)
1557     - return err;
1558     -
1559     - return features;
1560     -}
1561     -
1562     static int qlge_set_features(struct net_device *ndev,
1563     netdev_features_t features)
1564     {
1565     netdev_features_t changed = ndev->features ^ features;
1566     + int err;
1567     +
1568     + if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
1569     + /* Update the behavior of vlan accel in the adapter */
1570     + err = qlge_update_hw_vlan_features(ndev, features);
1571     + if (err)
1572     + return err;
1573    
1574     - if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1575     qlge_vlan_mode(ndev, features);
1576     + }
1577    
1578     return 0;
1579     }
1580     @@ -4719,7 +4713,6 @@ static const struct net_device_ops qlge_netdev_ops = {
1581     .ndo_set_mac_address = qlge_set_mac_address,
1582     .ndo_validate_addr = eth_validate_addr,
1583     .ndo_tx_timeout = qlge_tx_timeout,
1584     - .ndo_fix_features = qlge_fix_features,
1585     .ndo_set_features = qlge_set_features,
1586     .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
1587     .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
1588     diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
1589     index b98fcc9e93e5..3669005b9294 100644
1590     --- a/drivers/net/ethernet/realtek/r8169.c
1591     +++ b/drivers/net/ethernet/realtek/r8169.c
1592     @@ -329,6 +329,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
1593     { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
1594     { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
1595     { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
1596     + { PCI_DEVICE(PCI_VENDOR_ID_NCUBE, 0x8168), 0, 0, RTL_CFG_1 },
1597     { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
1598     { PCI_VENDOR_ID_DLINK, 0x4300,
1599     PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
1600     diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
1601     index 6a77ef38c549..aba16d81e9bb 100644
1602     --- a/drivers/net/hyperv/netvsc_drv.c
1603     +++ b/drivers/net/hyperv/netvsc_drv.c
1604     @@ -29,6 +29,7 @@
1605     #include <linux/netdevice.h>
1606     #include <linux/inetdevice.h>
1607     #include <linux/etherdevice.h>
1608     +#include <linux/pci.h>
1609     #include <linux/skbuff.h>
1610     #include <linux/if_vlan.h>
1611     #include <linux/in.h>
1612     @@ -1895,11 +1896,15 @@ static int netvsc_register_vf(struct net_device *vf_netdev)
1613     {
1614     struct net_device *ndev;
1615     struct net_device_context *net_device_ctx;
1616     + struct device *pdev = vf_netdev->dev.parent;
1617     struct netvsc_device *netvsc_dev;
1618    
1619     if (vf_netdev->addr_len != ETH_ALEN)
1620     return NOTIFY_DONE;
1621    
1622     + if (!pdev || !dev_is_pci(pdev) || dev_is_pf(pdev))
1623     + return NOTIFY_DONE;
1624     +
1625     /*
1626     * We will use the MAC address to locate the synthetic interface to
1627     * associate with the VF interface. If we don't find a matching
1628     @@ -2039,6 +2044,16 @@ static int netvsc_probe(struct hv_device *dev,
1629    
1630     memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
1631    
1632     + /* We must get rtnl lock before scheduling nvdev->subchan_work,
1633     + * otherwise netvsc_subchan_work() can get rtnl lock first and wait
1634     + * all subchannels to show up, but that may not happen because
1635     + * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
1636     + * -> ... -> device_add() -> ... -> __device_attach() can't get
1637     + * the device lock, so all the subchannels can't be processed --
1638     + * finally netvsc_subchan_work() hangs for ever.
1639     + */
1640     + rtnl_lock();
1641     +
1642     if (nvdev->num_chn > 1)
1643     schedule_work(&nvdev->subchan_work);
1644    
1645     @@ -2057,7 +2072,6 @@ static int netvsc_probe(struct hv_device *dev,
1646     else
1647     net->max_mtu = ETH_DATA_LEN;
1648    
1649     - rtnl_lock();
1650     ret = register_netdevice(net);
1651     if (ret != 0) {
1652     pr_err("Unable to register netdev.\n");
1653     diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
1654     index 8d88f19dc171..12c1c1851ee6 100644
1655     --- a/drivers/pci/host/pci-mvebu.c
1656     +++ b/drivers/pci/host/pci-mvebu.c
1657     @@ -1220,7 +1220,7 @@ static int mvebu_pcie_probe(struct platform_device *pdev)
1658     pcie->realio.start = PCIBIOS_MIN_IO;
1659     pcie->realio.end = min_t(resource_size_t,
1660     IO_SPACE_LIMIT,
1661     - resource_size(&pcie->io));
1662     + resource_size(&pcie->io) - 1);
1663     } else
1664     pcie->realio = pcie->io;
1665    
1666     diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
1667     index 5269a01d9bdd..a6a33327f5e7 100644
1668     --- a/drivers/platform/x86/asus-nb-wmi.c
1669     +++ b/drivers/platform/x86/asus-nb-wmi.c
1670     @@ -487,6 +487,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = {
1671     { KE_KEY, 0xC4, { KEY_KBDILLUMUP } },
1672     { KE_KEY, 0xC5, { KEY_KBDILLUMDOWN } },
1673     { KE_IGNORE, 0xC6, }, /* Ambient Light Sensor notification */
1674     + { KE_KEY, 0xFA, { KEY_PROG2 } }, /* Lid flip action */
1675     { KE_END, 0},
1676     };
1677    
1678     diff --git a/drivers/platform/x86/intel_punit_ipc.c b/drivers/platform/x86/intel_punit_ipc.c
1679     index b5b890127479..b7dfe06261f1 100644
1680     --- a/drivers/platform/x86/intel_punit_ipc.c
1681     +++ b/drivers/platform/x86/intel_punit_ipc.c
1682     @@ -17,6 +17,7 @@
1683     #include <linux/bitops.h>
1684     #include <linux/device.h>
1685     #include <linux/interrupt.h>
1686     +#include <linux/io.h>
1687     #include <linux/platform_device.h>
1688     #include <asm/intel_punit_ipc.h>
1689    
1690     diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c
1691     index d589331d1884..3540d00425d0 100644
1692     --- a/drivers/pwm/pwm-meson.c
1693     +++ b/drivers/pwm/pwm-meson.c
1694     @@ -432,7 +432,6 @@ static int meson_pwm_init_channels(struct meson_pwm *meson,
1695     struct meson_pwm_channel *channels)
1696     {
1697     struct device *dev = meson->chip.dev;
1698     - struct device_node *np = dev->of_node;
1699     struct clk_init_data init;
1700     unsigned int i;
1701     char name[255];
1702     @@ -441,7 +440,7 @@ static int meson_pwm_init_channels(struct meson_pwm *meson,
1703     for (i = 0; i < meson->chip.npwm; i++) {
1704     struct meson_pwm_channel *channel = &channels[i];
1705    
1706     - snprintf(name, sizeof(name), "%pOF#mux%u", np, i);
1707     + snprintf(name, sizeof(name), "%s#mux%u", dev_name(dev), i);
1708    
1709     init.name = name;
1710     init.ops = &clk_mux_ops;
1711     diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
1712     index 5ede251c52ca..4c7c8455da96 100644
1713     --- a/drivers/s390/block/dasd_eckd.c
1714     +++ b/drivers/s390/block/dasd_eckd.c
1715     @@ -1778,6 +1778,9 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
1716     struct dasd_eckd_private *private = device->private;
1717     int i;
1718    
1719     + if (!private)
1720     + return;
1721     +
1722     dasd_alias_disconnect_device_from_lcu(device);
1723     private->ned = NULL;
1724     private->sneq = NULL;
1725     @@ -2032,8 +2035,11 @@ static int dasd_eckd_basic_to_ready(struct dasd_device *device)
1726    
1727     static int dasd_eckd_online_to_ready(struct dasd_device *device)
1728     {
1729     - cancel_work_sync(&device->reload_device);
1730     - cancel_work_sync(&device->kick_validate);
1731     + if (cancel_work_sync(&device->reload_device))
1732     + dasd_put_device(device);
1733     + if (cancel_work_sync(&device->kick_validate))
1734     + dasd_put_device(device);
1735     +
1736     return 0;
1737     };
1738    
1739     diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
1740     index 6c838865ac5a..4a4746cc6745 100644
1741     --- a/drivers/scsi/aic94xx/aic94xx_init.c
1742     +++ b/drivers/scsi/aic94xx/aic94xx_init.c
1743     @@ -1030,8 +1030,10 @@ static int __init aic94xx_init(void)
1744    
1745     aic94xx_transport_template =
1746     sas_domain_attach_transport(&aic94xx_transport_functions);
1747     - if (!aic94xx_transport_template)
1748     + if (!aic94xx_transport_template) {
1749     + err = -ENOMEM;
1750     goto out_destroy_caches;
1751     + }
1752    
1753     err = pci_register_driver(&aic94xx_pci_driver);
1754     if (err)
1755     diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
1756     index 2cac160993bb..158f3e83efb6 100644
1757     --- a/drivers/staging/comedi/drivers/ni_mio_common.c
1758     +++ b/drivers/staging/comedi/drivers/ni_mio_common.c
1759     @@ -5453,11 +5453,11 @@ static int ni_E_init(struct comedi_device *dev,
1760     /* Digital I/O (PFI) subdevice */
1761     s = &dev->subdevices[NI_PFI_DIO_SUBDEV];
1762     s->type = COMEDI_SUBD_DIO;
1763     - s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
1764     s->maxdata = 1;
1765     if (devpriv->is_m_series) {
1766     s->n_chan = 16;
1767     s->insn_bits = ni_pfi_insn_bits;
1768     + s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
1769    
1770     ni_writew(dev, s->state, NI_M_PFI_DO_REG);
1771     for (i = 0; i < NUM_PFI_OUTPUT_SELECT_REGS; ++i) {
1772     @@ -5466,6 +5466,7 @@ static int ni_E_init(struct comedi_device *dev,
1773     }
1774     } else {
1775     s->n_chan = 10;
1776     + s->subdev_flags = SDF_INTERNAL;
1777     }
1778     s->insn_config = ni_pfi_insn_config;
1779    
1780     diff --git a/drivers/staging/irda/net/af_irda.c b/drivers/staging/irda/net/af_irda.c
1781     index 23fa7c8b09a5..cebe9878ca03 100644
1782     --- a/drivers/staging/irda/net/af_irda.c
1783     +++ b/drivers/staging/irda/net/af_irda.c
1784     @@ -775,6 +775,13 @@ static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1785     return -EINVAL;
1786    
1787     lock_sock(sk);
1788     +
1789     + /* Ensure that the socket is not already bound */
1790     + if (self->ias_obj) {
1791     + err = -EINVAL;
1792     + goto out;
1793     + }
1794     +
1795     #ifdef CONFIG_IRDA_ULTRA
1796     /* Special care for Ultra sockets */
1797     if ((sk->sk_type == SOCK_DGRAM) &&
1798     @@ -2012,7 +2019,11 @@ static int irda_setsockopt(struct socket *sock, int level, int optname,
1799     err = -EINVAL;
1800     goto out;
1801     }
1802     - irias_insert_object(ias_obj);
1803     +
1804     + /* Only insert newly allocated objects */
1805     + if (free_ias)
1806     + irias_insert_object(ias_obj);
1807     +
1808     kfree(ias_opt);
1809     break;
1810     case IRLMP_IAS_DEL:
1811     diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
1812     index dca78bb20e5d..8b323a360e03 100644
1813     --- a/drivers/usb/dwc3/core.c
1814     +++ b/drivers/usb/dwc3/core.c
1815     @@ -511,6 +511,22 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc)
1816     parms->hwparams8 = dwc3_readl(dwc->regs, DWC3_GHWPARAMS8);
1817     }
1818    
1819     +static int dwc3_core_ulpi_init(struct dwc3 *dwc)
1820     +{
1821     + int intf;
1822     + int ret = 0;
1823     +
1824     + intf = DWC3_GHWPARAMS3_HSPHY_IFC(dwc->hwparams.hwparams3);
1825     +
1826     + if (intf == DWC3_GHWPARAMS3_HSPHY_IFC_ULPI ||
1827     + (intf == DWC3_GHWPARAMS3_HSPHY_IFC_UTMI_ULPI &&
1828     + dwc->hsphy_interface &&
1829     + !strncmp(dwc->hsphy_interface, "ulpi", 4)))
1830     + ret = dwc3_ulpi_init(dwc);
1831     +
1832     + return ret;
1833     +}
1834     +
1835     /**
1836     * dwc3_phy_setup - Configure USB PHY Interface of DWC3 Core
1837     * @dwc: Pointer to our controller context structure
1838     @@ -522,7 +538,6 @@ static void dwc3_cache_hwparams(struct dwc3 *dwc)
1839     static int dwc3_phy_setup(struct dwc3 *dwc)
1840     {
1841     u32 reg;
1842     - int ret;
1843    
1844     reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
1845    
1846     @@ -593,9 +608,6 @@ static int dwc3_phy_setup(struct dwc3 *dwc)
1847     }
1848     /* FALLTHROUGH */
1849     case DWC3_GHWPARAMS3_HSPHY_IFC_ULPI:
1850     - ret = dwc3_ulpi_init(dwc);
1851     - if (ret)
1852     - return ret;
1853     /* FALLTHROUGH */
1854     default:
1855     break;
1856     @@ -752,6 +764,7 @@ static void dwc3_core_setup_global_control(struct dwc3 *dwc)
1857     }
1858    
1859     static int dwc3_core_get_phy(struct dwc3 *dwc);
1860     +static int dwc3_core_ulpi_init(struct dwc3 *dwc);
1861    
1862     /**
1863     * dwc3_core_init - Low-level initialization of DWC3 Core
1864     @@ -783,17 +796,27 @@ static int dwc3_core_init(struct dwc3 *dwc)
1865     dwc->maximum_speed = USB_SPEED_HIGH;
1866     }
1867    
1868     - ret = dwc3_core_get_phy(dwc);
1869     + ret = dwc3_phy_setup(dwc);
1870     if (ret)
1871     goto err0;
1872    
1873     - ret = dwc3_core_soft_reset(dwc);
1874     - if (ret)
1875     - goto err0;
1876     + if (!dwc->ulpi_ready) {
1877     + ret = dwc3_core_ulpi_init(dwc);
1878     + if (ret)
1879     + goto err0;
1880     + dwc->ulpi_ready = true;
1881     + }
1882    
1883     - ret = dwc3_phy_setup(dwc);
1884     + if (!dwc->phys_ready) {
1885     + ret = dwc3_core_get_phy(dwc);
1886     + if (ret)
1887     + goto err0a;
1888     + dwc->phys_ready = true;
1889     + }
1890     +
1891     + ret = dwc3_core_soft_reset(dwc);
1892     if (ret)
1893     - goto err0;
1894     + goto err0a;
1895    
1896     dwc3_core_setup_global_control(dwc);
1897     dwc3_core_num_eps(dwc);
1898     @@ -866,6 +889,9 @@ err1:
1899     phy_exit(dwc->usb2_generic_phy);
1900     phy_exit(dwc->usb3_generic_phy);
1901    
1902     +err0a:
1903     + dwc3_ulpi_exit(dwc);
1904     +
1905     err0:
1906     return ret;
1907     }
1908     @@ -1256,7 +1282,6 @@ err4:
1909    
1910     err3:
1911     dwc3_free_event_buffers(dwc);
1912     - dwc3_ulpi_exit(dwc);
1913    
1914     err2:
1915     pm_runtime_allow(&pdev->dev);
1916     diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
1917     index b782ba58a7fc..abd1142c9e4d 100644
1918     --- a/drivers/usb/dwc3/core.h
1919     +++ b/drivers/usb/dwc3/core.h
1920     @@ -805,7 +805,9 @@ struct dwc3_scratchpad_array {
1921     * @usb3_phy: pointer to USB3 PHY
1922     * @usb2_generic_phy: pointer to USB2 PHY
1923     * @usb3_generic_phy: pointer to USB3 PHY
1924     + * @phys_ready: flag to indicate that PHYs are ready
1925     * @ulpi: pointer to ulpi interface
1926     + * @ulpi_ready: flag to indicate that ULPI is initialized
1927     * @isoch_delay: wValue from Set Isochronous Delay request;
1928     * @u2sel: parameter from Set SEL request.
1929     * @u2pel: parameter from Set SEL request.
1930     @@ -903,7 +905,10 @@ struct dwc3 {
1931     struct phy *usb2_generic_phy;
1932     struct phy *usb3_generic_phy;
1933    
1934     + bool phys_ready;
1935     +
1936     struct ulpi *ulpi;
1937     + bool ulpi_ready;
1938    
1939     void __iomem *regs;
1940     size_t regs_size;
1941     diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
1942     index 3cf74f54c7a1..7ee3167bc083 100644
1943     --- a/drivers/vhost/vhost.c
1944     +++ b/drivers/vhost/vhost.c
1945     @@ -960,7 +960,7 @@ static void vhost_iotlb_notify_vq(struct vhost_dev *d,
1946     list_for_each_entry_safe(node, n, &d->pending_list, node) {
1947     struct vhost_iotlb_msg *vq_msg = &node->msg.iotlb;
1948     if (msg->iova <= vq_msg->iova &&
1949     - msg->iova + msg->size - 1 > vq_msg->iova &&
1950     + msg->iova + msg->size - 1 >= vq_msg->iova &&
1951     vq_msg->type == VHOST_IOTLB_MISS) {
1952     vhost_poll_queue(&node->vq->poll);
1953     list_del(&node->node);
1954     diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
1955     index 2780886e8ba3..de062fb201bc 100644
1956     --- a/drivers/virtio/virtio_pci_legacy.c
1957     +++ b/drivers/virtio/virtio_pci_legacy.c
1958     @@ -122,6 +122,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
1959     struct virtqueue *vq;
1960     u16 num;
1961     int err;
1962     + u64 q_pfn;
1963    
1964     /* Select the queue we're interested in */
1965     iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
1966     @@ -141,9 +142,17 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
1967     if (!vq)
1968     return ERR_PTR(-ENOMEM);
1969    
1970     + q_pfn = virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
1971     + if (q_pfn >> 32) {
1972     + dev_err(&vp_dev->pci_dev->dev,
1973     + "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
1974     + 0x1ULL << (32 + PAGE_SHIFT - 30));
1975     + err = -E2BIG;
1976     + goto out_del_vq;
1977     + }
1978     +
1979     /* activate the queue */
1980     - iowrite32(virtqueue_get_desc_addr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT,
1981     - vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
1982     + iowrite32(q_pfn, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
1983    
1984     vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
1985    
1986     @@ -160,6 +169,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
1987    
1988     out_deactivate:
1989     iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
1990     +out_del_vq:
1991     vring_del_virtqueue(vq);
1992     return ERR_PTR(err);
1993     }
1994     diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
1995     index b437fccd4e62..294f35ce9e46 100644
1996     --- a/drivers/xen/xen-balloon.c
1997     +++ b/drivers/xen/xen-balloon.c
1998     @@ -81,7 +81,7 @@ static void watch_target(struct xenbus_watch *watch,
1999     static_max = new_target;
2000     else
2001     static_max >>= PAGE_SHIFT - 10;
2002     - target_diff = xen_pv_domain() ? 0
2003     + target_diff = (xen_pv_domain() || xen_initial_domain()) ? 0
2004     : static_max - balloon_stats.target_pages;
2005     }
2006    
2007     diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
2008     index 7c655f9a7a50..dd80a1bdf9e2 100644
2009     --- a/fs/btrfs/dev-replace.c
2010     +++ b/fs/btrfs/dev-replace.c
2011     @@ -588,6 +588,12 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
2012    
2013     btrfs_rm_dev_replace_unblocked(fs_info);
2014    
2015     + /*
2016     + * Increment dev_stats_ccnt so that btrfs_run_dev_stats() will
2017     + * update on-disk dev stats value during commit transaction
2018     + */
2019     + atomic_inc(&tgt_device->dev_stats_ccnt);
2020     +
2021     /*
2022     * this is again a consistent state where no dev_replace procedure
2023     * is running, the target device is part of the filesystem, the
2024     diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
2025     index bbabe37c2e8c..f96f72659693 100644
2026     --- a/fs/btrfs/extent-tree.c
2027     +++ b/fs/btrfs/extent-tree.c
2028     @@ -10757,7 +10757,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
2029     /* Don't want to race with allocators so take the groups_sem */
2030     down_write(&space_info->groups_sem);
2031     spin_lock(&block_group->lock);
2032     - if (block_group->reserved ||
2033     + if (block_group->reserved || block_group->pinned ||
2034     btrfs_block_group_used(&block_group->item) ||
2035     block_group->ro ||
2036     list_is_singular(&block_group->list)) {
2037     diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
2038     index 9841faef08ea..b80b03e0c5d3 100644
2039     --- a/fs/btrfs/relocation.c
2040     +++ b/fs/btrfs/relocation.c
2041     @@ -1334,18 +1334,19 @@ static void __del_reloc_root(struct btrfs_root *root)
2042     struct mapping_node *node = NULL;
2043     struct reloc_control *rc = fs_info->reloc_ctl;
2044    
2045     - spin_lock(&rc->reloc_root_tree.lock);
2046     - rb_node = tree_search(&rc->reloc_root_tree.rb_root,
2047     - root->node->start);
2048     - if (rb_node) {
2049     - node = rb_entry(rb_node, struct mapping_node, rb_node);
2050     - rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
2051     + if (rc) {
2052     + spin_lock(&rc->reloc_root_tree.lock);
2053     + rb_node = tree_search(&rc->reloc_root_tree.rb_root,
2054     + root->node->start);
2055     + if (rb_node) {
2056     + node = rb_entry(rb_node, struct mapping_node, rb_node);
2057     + rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
2058     + }
2059     + spin_unlock(&rc->reloc_root_tree.lock);
2060     + if (!node)
2061     + return;
2062     + BUG_ON((struct btrfs_root *)node->data != root);
2063     }
2064     - spin_unlock(&rc->reloc_root_tree.lock);
2065     -
2066     - if (!node)
2067     - return;
2068     - BUG_ON((struct btrfs_root *)node->data != root);
2069    
2070     spin_lock(&fs_info->trans_lock);
2071     list_del_init(&root->root_list);
2072     diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
2073     index 08afafb6ecf7..a39b1f0b0606 100644
2074     --- a/fs/btrfs/volumes.c
2075     +++ b/fs/btrfs/volumes.c
2076     @@ -6492,10 +6492,14 @@ static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
2077     write_lock(&map_tree->map_tree.lock);
2078     ret = add_extent_mapping(&map_tree->map_tree, em, 0);
2079     write_unlock(&map_tree->map_tree.lock);
2080     - BUG_ON(ret); /* Tree corruption */
2081     + if (ret < 0) {
2082     + btrfs_err(fs_info,
2083     + "failed to add chunk map, start=%llu len=%llu: %d",
2084     + em->start, em->len, ret);
2085     + }
2086     free_extent_map(em);
2087    
2088     - return 0;
2089     + return ret;
2090     }
2091    
2092     static void fill_device_from_item(struct extent_buffer *leaf,
2093     diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
2094     index 53c9c49f0fbb..2565cee702e4 100644
2095     --- a/fs/cifs/cifs_debug.c
2096     +++ b/fs/cifs/cifs_debug.c
2097     @@ -289,6 +289,10 @@ static ssize_t cifs_stats_proc_write(struct file *file,
2098     atomic_set(&totBufAllocCount, 0);
2099     atomic_set(&totSmBufAllocCount, 0);
2100     #endif /* CONFIG_CIFS_STATS2 */
2101     + spin_lock(&GlobalMid_Lock);
2102     + GlobalMaxActiveXid = 0;
2103     + GlobalCurrentXid = 0;
2104     + spin_unlock(&GlobalMid_Lock);
2105     spin_lock(&cifs_tcp_ses_lock);
2106     list_for_each(tmp1, &cifs_tcp_ses_list) {
2107     server = list_entry(tmp1, struct TCP_Server_Info,
2108     @@ -301,6 +305,10 @@ static ssize_t cifs_stats_proc_write(struct file *file,
2109     struct cifs_tcon,
2110     tcon_list);
2111     atomic_set(&tcon->num_smbs_sent, 0);
2112     + spin_lock(&tcon->stat_lock);
2113     + tcon->bytes_read = 0;
2114     + tcon->bytes_written = 0;
2115     + spin_unlock(&tcon->stat_lock);
2116     if (server->ops->clear_stats)
2117     server->ops->clear_stats(tcon);
2118     }
2119     diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
2120     index 7b08a1446a7f..efdfdb47a7dd 100644
2121     --- a/fs/cifs/smb2misc.c
2122     +++ b/fs/cifs/smb2misc.c
2123     @@ -211,6 +211,13 @@ smb2_check_message(char *buf, unsigned int length, struct TCP_Server_Info *srvr)
2124     if (clc_len == 4 + len + 1)
2125     return 0;
2126    
2127     + /*
2128     + * Some windows servers (win2016) will pad also the final
2129     + * PDU in a compound to 8 bytes.
2130     + */
2131     + if (((clc_len + 7) & ~7) == len)
2132     + return 0;
2133     +
2134     /*
2135     * MacOS server pads after SMB2.1 write response with 3 bytes
2136     * of junk. Other servers match RFC1001 len to actual
2137     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
2138     index e317e9a400c1..58842b36481d 100644
2139     --- a/fs/cifs/smb2pdu.c
2140     +++ b/fs/cifs/smb2pdu.c
2141     @@ -393,7 +393,7 @@ small_smb2_init(__le16 smb2_command, struct cifs_tcon *tcon,
2142     pdu->hdr.smb2_buf_length = cpu_to_be32(total_len);
2143    
2144     if (tcon != NULL) {
2145     -#ifdef CONFIG_CIFS_STATS2
2146     +#ifdef CONFIG_CIFS_STATS
2147     uint16_t com_code = le16_to_cpu(smb2_command);
2148     cifs_stats_inc(&tcon->stats.smb2_stats.smb2_com_sent[com_code]);
2149     #endif
2150     diff --git a/fs/dcache.c b/fs/dcache.c
2151     index 8d4935978fec..c1a7c174a905 100644
2152     --- a/fs/dcache.c
2153     +++ b/fs/dcache.c
2154     @@ -291,7 +291,8 @@ void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry
2155     spin_unlock(&dentry->d_lock);
2156     name->name = p->name;
2157     } else {
2158     - memcpy(name->inline_name, dentry->d_iname, DNAME_INLINE_LEN);
2159     + memcpy(name->inline_name, dentry->d_iname,
2160     + dentry->d_name.len + 1);
2161     spin_unlock(&dentry->d_lock);
2162     name->name = name->inline_name;
2163     }
2164     diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
2165     index 85142e5df88b..e10bd73f0723 100644
2166     --- a/fs/f2fs/data.c
2167     +++ b/fs/f2fs/data.c
2168     @@ -2190,6 +2190,10 @@ static int f2fs_set_data_page_dirty(struct page *page)
2169     if (!PageUptodate(page))
2170     SetPageUptodate(page);
2171    
2172     + /* don't remain PG_checked flag which was set during GC */
2173     + if (is_cold_data(page))
2174     + clear_cold_data(page);
2175     +
2176     if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
2177     if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
2178     register_inmem_page(inode, page);
2179     diff --git a/fs/fat/cache.c b/fs/fat/cache.c
2180     index e9bed49df6b7..78d501c1fb65 100644
2181     --- a/fs/fat/cache.c
2182     +++ b/fs/fat/cache.c
2183     @@ -225,7 +225,8 @@ static inline void cache_init(struct fat_cache_id *cid, int fclus, int dclus)
2184     int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
2185     {
2186     struct super_block *sb = inode->i_sb;
2187     - const int limit = sb->s_maxbytes >> MSDOS_SB(sb)->cluster_bits;
2188     + struct msdos_sb_info *sbi = MSDOS_SB(sb);
2189     + const int limit = sb->s_maxbytes >> sbi->cluster_bits;
2190     struct fat_entry fatent;
2191     struct fat_cache_id cid;
2192     int nr;
2193     @@ -234,6 +235,12 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
2194    
2195     *fclus = 0;
2196     *dclus = MSDOS_I(inode)->i_start;
2197     + if (!fat_valid_entry(sbi, *dclus)) {
2198     + fat_fs_error_ratelimit(sb,
2199     + "%s: invalid start cluster (i_pos %lld, start %08x)",
2200     + __func__, MSDOS_I(inode)->i_pos, *dclus);
2201     + return -EIO;
2202     + }
2203     if (cluster == 0)
2204     return 0;
2205    
2206     @@ -250,9 +257,8 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
2207     /* prevent the infinite loop of cluster chain */
2208     if (*fclus > limit) {
2209     fat_fs_error_ratelimit(sb,
2210     - "%s: detected the cluster chain loop"
2211     - " (i_pos %lld)", __func__,
2212     - MSDOS_I(inode)->i_pos);
2213     + "%s: detected the cluster chain loop (i_pos %lld)",
2214     + __func__, MSDOS_I(inode)->i_pos);
2215     nr = -EIO;
2216     goto out;
2217     }
2218     @@ -262,9 +268,8 @@ int fat_get_cluster(struct inode *inode, int cluster, int *fclus, int *dclus)
2219     goto out;
2220     else if (nr == FAT_ENT_FREE) {
2221     fat_fs_error_ratelimit(sb,
2222     - "%s: invalid cluster chain (i_pos %lld)",
2223     - __func__,
2224     - MSDOS_I(inode)->i_pos);
2225     + "%s: invalid cluster chain (i_pos %lld)",
2226     + __func__, MSDOS_I(inode)->i_pos);
2227     nr = -EIO;
2228     goto out;
2229     } else if (nr == FAT_ENT_EOF) {
2230     diff --git a/fs/fat/fat.h b/fs/fat/fat.h
2231     index 8fc1093da47d..a0a00f3734bc 100644
2232     --- a/fs/fat/fat.h
2233     +++ b/fs/fat/fat.h
2234     @@ -348,6 +348,11 @@ static inline void fatent_brelse(struct fat_entry *fatent)
2235     fatent->fat_inode = NULL;
2236     }
2237    
2238     +static inline bool fat_valid_entry(struct msdos_sb_info *sbi, int entry)
2239     +{
2240     + return FAT_START_ENT <= entry && entry < sbi->max_cluster;
2241     +}
2242     +
2243     extern void fat_ent_access_init(struct super_block *sb);
2244     extern int fat_ent_read(struct inode *inode, struct fat_entry *fatent,
2245     int entry);
2246     diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
2247     index 48b2336692f9..a40f36b1b292 100644
2248     --- a/fs/fat/fatent.c
2249     +++ b/fs/fat/fatent.c
2250     @@ -23,7 +23,7 @@ static void fat12_ent_blocknr(struct super_block *sb, int entry,
2251     {
2252     struct msdos_sb_info *sbi = MSDOS_SB(sb);
2253     int bytes = entry + (entry >> 1);
2254     - WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
2255     + WARN_ON(!fat_valid_entry(sbi, entry));
2256     *offset = bytes & (sb->s_blocksize - 1);
2257     *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
2258     }
2259     @@ -33,7 +33,7 @@ static void fat_ent_blocknr(struct super_block *sb, int entry,
2260     {
2261     struct msdos_sb_info *sbi = MSDOS_SB(sb);
2262     int bytes = (entry << sbi->fatent_shift);
2263     - WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
2264     + WARN_ON(!fat_valid_entry(sbi, entry));
2265     *offset = bytes & (sb->s_blocksize - 1);
2266     *blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
2267     }
2268     @@ -353,7 +353,7 @@ int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
2269     int err, offset;
2270     sector_t blocknr;
2271    
2272     - if (entry < FAT_START_ENT || sbi->max_cluster <= entry) {
2273     + if (!fat_valid_entry(sbi, entry)) {
2274     fatent_brelse(fatent);
2275     fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
2276     return -EIO;
2277     diff --git a/fs/hfs/brec.c b/fs/hfs/brec.c
2278     index ad04a5741016..9a8772465a90 100644
2279     --- a/fs/hfs/brec.c
2280     +++ b/fs/hfs/brec.c
2281     @@ -75,9 +75,10 @@ int hfs_brec_insert(struct hfs_find_data *fd, void *entry, int entry_len)
2282     if (!fd->bnode) {
2283     if (!tree->root)
2284     hfs_btree_inc_height(tree);
2285     - fd->bnode = hfs_bnode_find(tree, tree->leaf_head);
2286     - if (IS_ERR(fd->bnode))
2287     - return PTR_ERR(fd->bnode);
2288     + node = hfs_bnode_find(tree, tree->leaf_head);
2289     + if (IS_ERR(node))
2290     + return PTR_ERR(node);
2291     + fd->bnode = node;
2292     fd->record = -1;
2293     }
2294     new_node = NULL;
2295     diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
2296     index e8120a282435..1a44c4621e74 100644
2297     --- a/fs/hfsplus/dir.c
2298     +++ b/fs/hfsplus/dir.c
2299     @@ -78,13 +78,13 @@ again:
2300     cpu_to_be32(HFSP_HARDLINK_TYPE) &&
2301     entry.file.user_info.fdCreator ==
2302     cpu_to_be32(HFSP_HFSPLUS_CREATOR) &&
2303     + HFSPLUS_SB(sb)->hidden_dir &&
2304     (entry.file.create_date ==
2305     HFSPLUS_I(HFSPLUS_SB(sb)->hidden_dir)->
2306     create_date ||
2307     entry.file.create_date ==
2308     HFSPLUS_I(d_inode(sb->s_root))->
2309     - create_date) &&
2310     - HFSPLUS_SB(sb)->hidden_dir) {
2311     + create_date)) {
2312     struct qstr str;
2313     char name[32];
2314    
2315     diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
2316     index 3cba08c931ee..410f59372f19 100644
2317     --- a/fs/hfsplus/super.c
2318     +++ b/fs/hfsplus/super.c
2319     @@ -524,8 +524,10 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
2320     goto out_put_root;
2321     if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
2322     hfs_find_exit(&fd);
2323     - if (entry.type != cpu_to_be16(HFSPLUS_FOLDER))
2324     + if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) {
2325     + err = -EINVAL;
2326     goto out_put_root;
2327     + }
2328     inode = hfsplus_iget(sb, be32_to_cpu(entry.folder.id));
2329     if (IS_ERR(inode)) {
2330     err = PTR_ERR(inode);
2331     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2332     index dda4a3a3ef6e..77c7d29fcd3b 100644
2333     --- a/fs/nfs/nfs4proc.c
2334     +++ b/fs/nfs/nfs4proc.c
2335     @@ -7497,7 +7497,7 @@ static int nfs4_sp4_select_mode(struct nfs_client *clp,
2336     }
2337     out:
2338     clp->cl_sp4_flags = flags;
2339     - return 0;
2340     + return ret;
2341     }
2342    
2343     struct nfs41_exchange_id_data {
2344     diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
2345     index e64ecb9f2720..66c373230e60 100644
2346     --- a/fs/proc/kcore.c
2347     +++ b/fs/proc/kcore.c
2348     @@ -384,8 +384,10 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
2349     phdr->p_flags = PF_R|PF_W|PF_X;
2350     phdr->p_offset = kc_vaddr_to_offset(m->addr) + dataoff;
2351     phdr->p_vaddr = (size_t)m->addr;
2352     - if (m->type == KCORE_RAM || m->type == KCORE_TEXT)
2353     + if (m->type == KCORE_RAM)
2354     phdr->p_paddr = __pa(m->addr);
2355     + else if (m->type == KCORE_TEXT)
2356     + phdr->p_paddr = __pa_symbol(m->addr);
2357     else
2358     phdr->p_paddr = (elf_addr_t)-1;
2359     phdr->p_filesz = phdr->p_memsz = m->size;
2360     diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
2361     index 48835a659948..eabf85371ece 100644
2362     --- a/fs/reiserfs/reiserfs.h
2363     +++ b/fs/reiserfs/reiserfs.h
2364     @@ -271,7 +271,7 @@ struct reiserfs_journal_list {
2365    
2366     struct mutex j_commit_mutex;
2367     unsigned int j_trans_id;
2368     - time_t j_timestamp;
2369     + time64_t j_timestamp; /* write-only but useful for crash dump analysis */
2370     struct reiserfs_list_bitmap *j_list_bitmap;
2371     struct buffer_head *j_commit_bh; /* commit buffer head */
2372     struct reiserfs_journal_cnode *j_realblock;
2373     diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
2374     index ab20dc5db423..7fa3f1498b34 100644
2375     --- a/include/linux/pci_ids.h
2376     +++ b/include/linux/pci_ids.h
2377     @@ -3062,4 +3062,6 @@
2378    
2379     #define PCI_VENDOR_ID_OCZ 0x1b85
2380    
2381     +#define PCI_VENDOR_ID_NCUBE 0x10ff
2382     +
2383     #endif /* _LINUX_PCI_IDS_H */
2384     diff --git a/include/net/tcp.h b/include/net/tcp.h
2385     index eca8d65cad1e..0c828aac7e04 100644
2386     --- a/include/net/tcp.h
2387     +++ b/include/net/tcp.h
2388     @@ -2063,6 +2063,10 @@ int tcp_set_ulp(struct sock *sk, const char *name);
2389     void tcp_get_available_ulp(char *buf, size_t len);
2390     void tcp_cleanup_ulp(struct sock *sk);
2391    
2392     +#define MODULE_ALIAS_TCP_ULP(name) \
2393     + __MODULE_INFO(alias, alias_userspace, name); \
2394     + __MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
2395     +
2396     /* Call BPF_SOCK_OPS program that returns an int. If the return value
2397     * is < 0, then the BPF op failed (for example if the loaded BPF
2398     * program does not support the chosen operation or there is no BPF
2399     diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
2400     index 7b8c9e19bad1..910cc4334b21 100644
2401     --- a/include/uapi/linux/keyctl.h
2402     +++ b/include/uapi/linux/keyctl.h
2403     @@ -65,7 +65,7 @@
2404    
2405     /* keyctl structures */
2406     struct keyctl_dh_params {
2407     - __s32 private;
2408     + __s32 dh_private;
2409     __s32 prime;
2410     __s32 base;
2411     };
2412     diff --git a/kernel/fork.c b/kernel/fork.c
2413     index 91907a3701ce..6a219fea4926 100644
2414     --- a/kernel/fork.c
2415     +++ b/kernel/fork.c
2416     @@ -1350,7 +1350,9 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
2417     return -ENOMEM;
2418    
2419     atomic_set(&sig->count, 1);
2420     + spin_lock_irq(&current->sighand->siglock);
2421     memcpy(sig->action, current->sighand->action, sizeof(sig->action));
2422     + spin_unlock_irq(&current->sighand->siglock);
2423     return 0;
2424     }
2425    
2426     diff --git a/kernel/memremap.c b/kernel/memremap.c
2427     index 4712ce646e04..2b136d4988f7 100644
2428     --- a/kernel/memremap.c
2429     +++ b/kernel/memremap.c
2430     @@ -248,13 +248,16 @@ int device_private_entry_fault(struct vm_area_struct *vma,
2431     EXPORT_SYMBOL(device_private_entry_fault);
2432     #endif /* CONFIG_DEVICE_PRIVATE */
2433    
2434     -static void pgmap_radix_release(struct resource *res)
2435     +static void pgmap_radix_release(struct resource *res, unsigned long end_pgoff)
2436     {
2437     unsigned long pgoff, order;
2438    
2439     mutex_lock(&pgmap_lock);
2440     - foreach_order_pgoff(res, order, pgoff)
2441     + foreach_order_pgoff(res, order, pgoff) {
2442     + if (pgoff >= end_pgoff)
2443     + break;
2444     radix_tree_delete(&pgmap_radix, PHYS_PFN(res->start) + pgoff);
2445     + }
2446     mutex_unlock(&pgmap_lock);
2447    
2448     synchronize_rcu();
2449     @@ -309,7 +312,7 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
2450     mem_hotplug_done();
2451    
2452     untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
2453     - pgmap_radix_release(res);
2454     + pgmap_radix_release(res, -1);
2455     dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
2456     "%s: failed to free all reserved pages\n", __func__);
2457     }
2458     @@ -459,7 +462,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
2459     untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
2460     err_pfn_remap:
2461     err_radix:
2462     - pgmap_radix_release(res);
2463     + pgmap_radix_release(res, pgoff);
2464     devres_free(page_map);
2465     return ERR_PTR(error);
2466     }
2467     diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
2468     index 501f17c642ab..b2589c7e9439 100644
2469     --- a/kernel/sched/deadline.c
2470     +++ b/kernel/sched/deadline.c
2471     @@ -1365,6 +1365,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
2472     update_dl_entity(dl_se, pi_se);
2473     } else if (flags & ENQUEUE_REPLENISH) {
2474     replenish_dl_entity(dl_se, pi_se);
2475     + } else if ((flags & ENQUEUE_RESTORE) &&
2476     + dl_time_before(dl_se->deadline,
2477     + rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
2478     + setup_new_dl_entity(dl_se);
2479     }
2480    
2481     __enqueue_dl_entity(dl_se);
2482     @@ -2256,13 +2260,6 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
2483    
2484     return;
2485     }
2486     - /*
2487     - * If p is boosted we already updated its params in
2488     - * rt_mutex_setprio()->enqueue_task(..., ENQUEUE_REPLENISH),
2489     - * p's deadline being now already after rq_clock(rq).
2490     - */
2491     - if (dl_time_before(p->dl.deadline, rq_clock(rq)))
2492     - setup_new_dl_entity(&p->dl);
2493    
2494     if (rq->curr != p) {
2495     #ifdef CONFIG_SMP
2496     diff --git a/lib/debugobjects.c b/lib/debugobjects.c
2497     index 2f5349c6e81a..99308479b1c8 100644
2498     --- a/lib/debugobjects.c
2499     +++ b/lib/debugobjects.c
2500     @@ -322,9 +322,12 @@ static void debug_object_is_on_stack(void *addr, int onstack)
2501    
2502     limit++;
2503     if (is_on_stack)
2504     - pr_warn("object is on stack, but not annotated\n");
2505     + pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
2506     + task_stack_page(current));
2507     else
2508     - pr_warn("object is not on stack, but annotated\n");
2509     + pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
2510     + task_stack_page(current));
2511     +
2512     WARN_ON(1);
2513     }
2514    
2515     diff --git a/mm/fadvise.c b/mm/fadvise.c
2516     index 767887f5f3bf..3f5f68ad5708 100644
2517     --- a/mm/fadvise.c
2518     +++ b/mm/fadvise.c
2519     @@ -71,8 +71,12 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
2520     goto out;
2521     }
2522    
2523     - /* Careful about overflows. Len == 0 means "as much as possible" */
2524     - endbyte = offset + len;
2525     + /*
2526     + * Careful about overflows. Len == 0 means "as much as possible". Use
2527     + * unsigned math because signed overflows are undefined and UBSan
2528     + * complains.
2529     + */
2530     + endbyte = (u64)offset + (u64)len;
2531     if (!len || endbyte < len)
2532     endbyte = -1;
2533     else
2534     diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
2535     index 38e21a1e97bc..a9c65f13b7f5 100644
2536     --- a/net/9p/trans_fd.c
2537     +++ b/net/9p/trans_fd.c
2538     @@ -199,15 +199,14 @@ static void p9_mux_poll_stop(struct p9_conn *m)
2539     static void p9_conn_cancel(struct p9_conn *m, int err)
2540     {
2541     struct p9_req_t *req, *rtmp;
2542     - unsigned long flags;
2543     LIST_HEAD(cancel_list);
2544    
2545     p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
2546    
2547     - spin_lock_irqsave(&m->client->lock, flags);
2548     + spin_lock(&m->client->lock);
2549    
2550     if (m->err) {
2551     - spin_unlock_irqrestore(&m->client->lock, flags);
2552     + spin_unlock(&m->client->lock);
2553     return;
2554     }
2555    
2556     @@ -219,7 +218,6 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
2557     list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
2558     list_move(&req->req_list, &cancel_list);
2559     }
2560     - spin_unlock_irqrestore(&m->client->lock, flags);
2561    
2562     list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
2563     p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
2564     @@ -228,6 +226,7 @@ static void p9_conn_cancel(struct p9_conn *m, int err)
2565     req->t_err = err;
2566     p9_client_cb(m->client, req, REQ_STATUS_ERROR);
2567     }
2568     + spin_unlock(&m->client->lock);
2569     }
2570    
2571     static int
2572     @@ -385,8 +384,9 @@ static void p9_read_work(struct work_struct *work)
2573     if (m->req->status != REQ_STATUS_ERROR)
2574     status = REQ_STATUS_RCVD;
2575     list_del(&m->req->req_list);
2576     - spin_unlock(&m->client->lock);
2577     + /* update req->status while holding client->lock */
2578     p9_client_cb(m->client, m->req, status);
2579     + spin_unlock(&m->client->lock);
2580     m->rc.sdata = NULL;
2581     m->rc.offset = 0;
2582     m->rc.capacity = 0;
2583     diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
2584     index da0d3b257459..e73fd647065a 100644
2585     --- a/net/9p/trans_virtio.c
2586     +++ b/net/9p/trans_virtio.c
2587     @@ -571,7 +571,7 @@ static int p9_virtio_probe(struct virtio_device *vdev)
2588     chan->vq = virtio_find_single_vq(vdev, req_done, "requests");
2589     if (IS_ERR(chan->vq)) {
2590     err = PTR_ERR(chan->vq);
2591     - goto out_free_vq;
2592     + goto out_free_chan;
2593     }
2594     chan->vq->vdev->priv = chan;
2595     spin_lock_init(&chan->lock);
2596     @@ -624,6 +624,7 @@ out_free_tag:
2597     kfree(tag);
2598     out_free_vq:
2599     vdev->config->del_vqs(vdev);
2600     +out_free_chan:
2601     kfree(chan);
2602     fail:
2603     return err;
2604     diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
2605     index a95ccdceb797..0e1a670dabd9 100644
2606     --- a/net/ipv4/tcp_ipv4.c
2607     +++ b/net/ipv4/tcp_ipv4.c
2608     @@ -2468,6 +2468,12 @@ static int __net_init tcp_sk_init(struct net *net)
2609     if (res)
2610     goto fail;
2611     sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2612     +
2613     + /* Please enforce IP_DF and IPID==0 for RST and
2614     + * ACK sent in SYN-RECV and TIME-WAIT state.
2615     + */
2616     + inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
2617     +
2618     *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2619     }
2620    
2621     diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
2622     index 420fecbb98fe..61584638dba7 100644
2623     --- a/net/ipv4/tcp_minisocks.c
2624     +++ b/net/ipv4/tcp_minisocks.c
2625     @@ -185,8 +185,9 @@ kill:
2626     inet_twsk_deschedule_put(tw);
2627     return TCP_TW_SUCCESS;
2628     }
2629     + } else {
2630     + inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
2631     }
2632     - inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
2633    
2634     if (tmp_opt.saw_tstamp) {
2635     tcptw->tw_ts_recent = tmp_opt.rcv_tsval;
2636     diff --git a/net/ipv4/tcp_ulp.c b/net/ipv4/tcp_ulp.c
2637     index 6bb9e14c710a..1feecb72f4fc 100644
2638     --- a/net/ipv4/tcp_ulp.c
2639     +++ b/net/ipv4/tcp_ulp.c
2640     @@ -39,7 +39,7 @@ static const struct tcp_ulp_ops *__tcp_ulp_find_autoload(const char *name)
2641     #ifdef CONFIG_MODULES
2642     if (!ulp && capable(CAP_NET_ADMIN)) {
2643     rcu_read_unlock();
2644     - request_module("%s", name);
2645     + request_module("tcp-ulp-%s", name);
2646     rcu_read_lock();
2647     ulp = tcp_ulp_find(name);
2648     }
2649     diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
2650     index b9e638cc955f..db5a24f09335 100644
2651     --- a/net/ipv6/ip6_vti.c
2652     +++ b/net/ipv6/ip6_vti.c
2653     @@ -481,7 +481,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
2654     }
2655    
2656     mtu = dst_mtu(dst);
2657     - if (!skb->ignore_df && skb->len > mtu) {
2658     + if (skb->len > mtu) {
2659     skb_dst_update_pmtu(skb, mtu);
2660    
2661     if (skb->protocol == htons(ETH_P_IPV6)) {
2662     diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
2663     index 1c4a5de3f301..40eb16bd9786 100644
2664     --- a/net/ipv6/netfilter/ip6t_rpfilter.c
2665     +++ b/net/ipv6/netfilter/ip6t_rpfilter.c
2666     @@ -26,6 +26,12 @@ static bool rpfilter_addr_unicast(const struct in6_addr *addr)
2667     return addr_type & IPV6_ADDR_UNICAST;
2668     }
2669    
2670     +static bool rpfilter_addr_linklocal(const struct in6_addr *addr)
2671     +{
2672     + int addr_type = ipv6_addr_type(addr);
2673     + return addr_type & IPV6_ADDR_LINKLOCAL;
2674     +}
2675     +
2676     static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
2677     const struct net_device *dev, u8 flags)
2678     {
2679     @@ -48,7 +54,11 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
2680     }
2681    
2682     fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
2683     - if ((flags & XT_RPFILTER_LOOSE) == 0)
2684     +
2685     + if (rpfilter_addr_linklocal(&iph->saddr)) {
2686     + lookup_flags |= RT6_LOOKUP_F_IFACE;
2687     + fl6.flowi6_oif = dev->ifindex;
2688     + } else if ((flags & XT_RPFILTER_LOOSE) == 0)
2689     fl6.flowi6_oif = dev->ifindex;
2690    
2691     rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags);
2692     diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
2693     index 5cb7cac9177d..1bd53b1e7672 100644
2694     --- a/net/netfilter/ipvs/ip_vs_core.c
2695     +++ b/net/netfilter/ipvs/ip_vs_core.c
2696     @@ -1960,13 +1960,20 @@ ip_vs_in(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, int
2697     if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {
2698     /* the destination server is not available */
2699    
2700     - if (sysctl_expire_nodest_conn(ipvs)) {
2701     + __u32 flags = cp->flags;
2702     +
2703     + /* when timer already started, silently drop the packet.*/
2704     + if (timer_pending(&cp->timer))
2705     + __ip_vs_conn_put(cp);
2706     + else
2707     + ip_vs_conn_put(cp);
2708     +
2709     + if (sysctl_expire_nodest_conn(ipvs) &&
2710     + !(flags & IP_VS_CONN_F_ONE_PACKET)) {
2711     /* try to expire the connection immediately */
2712     ip_vs_conn_expire_now(cp);
2713     }
2714     - /* don't restart its timer, and silently
2715     - drop the packet. */
2716     - __ip_vs_conn_put(cp);
2717     +
2718     return NF_DROP;
2719     }
2720    
2721     diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
2722     index de4053d84364..48dab1403b2c 100644
2723     --- a/net/netfilter/nf_conntrack_netlink.c
2724     +++ b/net/netfilter/nf_conntrack_netlink.c
2725     @@ -788,6 +788,21 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[])
2726     #endif
2727     }
2728    
2729     +static int ctnetlink_start(struct netlink_callback *cb)
2730     +{
2731     + const struct nlattr * const *cda = cb->data;
2732     + struct ctnetlink_filter *filter = NULL;
2733     +
2734     + if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
2735     + filter = ctnetlink_alloc_filter(cda);
2736     + if (IS_ERR(filter))
2737     + return PTR_ERR(filter);
2738     + }
2739     +
2740     + cb->data = filter;
2741     + return 0;
2742     +}
2743     +
2744     static int ctnetlink_filter_match(struct nf_conn *ct, void *data)
2745     {
2746     struct ctnetlink_filter *filter = data;
2747     @@ -1194,19 +1209,12 @@ static int ctnetlink_get_conntrack(struct net *net, struct sock *ctnl,
2748    
2749     if (nlh->nlmsg_flags & NLM_F_DUMP) {
2750     struct netlink_dump_control c = {
2751     + .start = ctnetlink_start,
2752     .dump = ctnetlink_dump_table,
2753     .done = ctnetlink_done,
2754     + .data = (void *)cda,
2755     };
2756    
2757     - if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
2758     - struct ctnetlink_filter *filter;
2759     -
2760     - filter = ctnetlink_alloc_filter(cda);
2761     - if (IS_ERR(filter))
2762     - return PTR_ERR(filter);
2763     -
2764     - c.data = filter;
2765     - }
2766     return netlink_dump_start(ctnl, skb, nlh, &c);
2767     }
2768    
2769     diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
2770     index c45e6d4358ab..75624d17fc69 100644
2771     --- a/net/netfilter/nfnetlink_acct.c
2772     +++ b/net/netfilter/nfnetlink_acct.c
2773     @@ -238,29 +238,33 @@ static const struct nla_policy filter_policy[NFACCT_FILTER_MAX + 1] = {
2774     [NFACCT_FILTER_VALUE] = { .type = NLA_U32 },
2775     };
2776    
2777     -static struct nfacct_filter *
2778     -nfacct_filter_alloc(const struct nlattr * const attr)
2779     +static int nfnl_acct_start(struct netlink_callback *cb)
2780     {
2781     - struct nfacct_filter *filter;
2782     + const struct nlattr *const attr = cb->data;
2783     struct nlattr *tb[NFACCT_FILTER_MAX + 1];
2784     + struct nfacct_filter *filter;
2785     int err;
2786    
2787     + if (!attr)
2788     + return 0;
2789     +
2790     err = nla_parse_nested(tb, NFACCT_FILTER_MAX, attr, filter_policy,
2791     NULL);
2792     if (err < 0)
2793     - return ERR_PTR(err);
2794     + return err;
2795    
2796     if (!tb[NFACCT_FILTER_MASK] || !tb[NFACCT_FILTER_VALUE])
2797     - return ERR_PTR(-EINVAL);
2798     + return -EINVAL;
2799    
2800     filter = kzalloc(sizeof(struct nfacct_filter), GFP_KERNEL);
2801     if (!filter)
2802     - return ERR_PTR(-ENOMEM);
2803     + return -ENOMEM;
2804    
2805     filter->mask = ntohl(nla_get_be32(tb[NFACCT_FILTER_MASK]));
2806     filter->value = ntohl(nla_get_be32(tb[NFACCT_FILTER_VALUE]));
2807     + cb->data = filter;
2808    
2809     - return filter;
2810     + return 0;
2811     }
2812    
2813     static int nfnl_acct_get(struct net *net, struct sock *nfnl,
2814     @@ -275,18 +279,11 @@ static int nfnl_acct_get(struct net *net, struct sock *nfnl,
2815     if (nlh->nlmsg_flags & NLM_F_DUMP) {
2816     struct netlink_dump_control c = {
2817     .dump = nfnl_acct_dump,
2818     + .start = nfnl_acct_start,
2819     .done = nfnl_acct_done,
2820     + .data = (void *)tb[NFACCT_FILTER],
2821     };
2822    
2823     - if (tb[NFACCT_FILTER]) {
2824     - struct nfacct_filter *filter;
2825     -
2826     - filter = nfacct_filter_alloc(tb[NFACCT_FILTER]);
2827     - if (IS_ERR(filter))
2828     - return PTR_ERR(filter);
2829     -
2830     - c.data = filter;
2831     - }
2832     return netlink_dump_start(nfnl, skb, nlh, &c);
2833     }
2834    
2835     diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
2836     index 48332a6ed738..d290416e79e9 100644
2837     --- a/net/rds/ib_frmr.c
2838     +++ b/net/rds/ib_frmr.c
2839     @@ -61,6 +61,7 @@ static struct rds_ib_mr *rds_ib_alloc_frmr(struct rds_ib_device *rds_ibdev,
2840     pool->fmr_attr.max_pages);
2841     if (IS_ERR(frmr->mr)) {
2842     pr_warn("RDS/IB: %s failed to allocate MR", __func__);
2843     + err = PTR_ERR(frmr->mr);
2844     goto out_no_cigar;
2845     }
2846    
2847     diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
2848     index 85757af7f150..31de26c99023 100644
2849     --- a/net/sched/act_ife.c
2850     +++ b/net/sched/act_ife.c
2851     @@ -249,10 +249,8 @@ static int ife_validate_metatype(struct tcf_meta_ops *ops, void *val, int len)
2852     }
2853    
2854     /* called when adding new meta information
2855     - * under ife->tcf_lock for existing action
2856     */
2857     -static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
2858     - void *val, int len, bool exists)
2859     +static int load_metaops_and_vet(u32 metaid, void *val, int len)
2860     {
2861     struct tcf_meta_ops *ops = find_ife_oplist(metaid);
2862     int ret = 0;
2863     @@ -260,13 +258,9 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
2864     if (!ops) {
2865     ret = -ENOENT;
2866     #ifdef CONFIG_MODULES
2867     - if (exists)
2868     - spin_unlock_bh(&ife->tcf_lock);
2869     rtnl_unlock();
2870     request_module("ifemeta%u", metaid);
2871     rtnl_lock();
2872     - if (exists)
2873     - spin_lock_bh(&ife->tcf_lock);
2874     ops = find_ife_oplist(metaid);
2875     #endif
2876     }
2877     @@ -283,24 +277,17 @@ static int load_metaops_and_vet(struct tcf_ife_info *ife, u32 metaid,
2878     }
2879    
2880     /* called when adding new meta information
2881     - * under ife->tcf_lock for existing action
2882     */
2883     -static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
2884     - int len, bool atomic)
2885     +static int __add_metainfo(const struct tcf_meta_ops *ops,
2886     + struct tcf_ife_info *ife, u32 metaid, void *metaval,
2887     + int len, bool atomic, bool exists)
2888     {
2889     struct tcf_meta_info *mi = NULL;
2890     - struct tcf_meta_ops *ops = find_ife_oplist(metaid);
2891     int ret = 0;
2892    
2893     - if (!ops)
2894     - return -ENOENT;
2895     -
2896     mi = kzalloc(sizeof(*mi), atomic ? GFP_ATOMIC : GFP_KERNEL);
2897     - if (!mi) {
2898     - /*put back what find_ife_oplist took */
2899     - module_put(ops->owner);
2900     + if (!mi)
2901     return -ENOMEM;
2902     - }
2903    
2904     mi->metaid = metaid;
2905     mi->ops = ops;
2906     @@ -308,17 +295,49 @@ static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
2907     ret = ops->alloc(mi, metaval, atomic ? GFP_ATOMIC : GFP_KERNEL);
2908     if (ret != 0) {
2909     kfree(mi);
2910     - module_put(ops->owner);
2911     return ret;
2912     }
2913     }
2914    
2915     + if (exists)
2916     + spin_lock_bh(&ife->tcf_lock);
2917     list_add_tail(&mi->metalist, &ife->metalist);
2918     + if (exists)
2919     + spin_unlock_bh(&ife->tcf_lock);
2920     +
2921     + return ret;
2922     +}
2923     +
2924     +static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
2925     + struct tcf_ife_info *ife, u32 metaid,
2926     + bool exists)
2927     +{
2928     + int ret;
2929     +
2930     + if (!try_module_get(ops->owner))
2931     + return -ENOENT;
2932     + ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
2933     + if (ret)
2934     + module_put(ops->owner);
2935     + return ret;
2936     +}
2937     +
2938     +static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
2939     + int len, bool exists)
2940     +{
2941     + const struct tcf_meta_ops *ops = find_ife_oplist(metaid);
2942     + int ret;
2943    
2944     + if (!ops)
2945     + return -ENOENT;
2946     + ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists);
2947     + if (ret)
2948     + /*put back what find_ife_oplist took */
2949     + module_put(ops->owner);
2950     return ret;
2951     }
2952    
2953     -static int use_all_metadata(struct tcf_ife_info *ife)
2954     +static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
2955     {
2956     struct tcf_meta_ops *o;
2957     int rc = 0;
2958     @@ -326,7 +345,7 @@ static int use_all_metadata(struct tcf_ife_info *ife)
2959    
2960     read_lock(&ife_mod_lock);
2961     list_for_each_entry(o, &ifeoplist, list) {
2962     - rc = add_metainfo(ife, o->metaid, NULL, 0, true);
2963     + rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
2964     if (rc == 0)
2965     installed += 1;
2966     }
2967     @@ -377,7 +396,6 @@ static void _tcf_ife_cleanup(struct tc_action *a, int bind)
2968     struct tcf_meta_info *e, *n;
2969    
2970     list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
2971     - module_put(e->ops->owner);
2972     list_del(&e->metalist);
2973     if (e->metaval) {
2974     if (e->ops->release)
2975     @@ -385,6 +403,7 @@ static void _tcf_ife_cleanup(struct tc_action *a, int bind)
2976     else
2977     kfree(e->metaval);
2978     }
2979     + module_put(e->ops->owner);
2980     kfree(e);
2981     }
2982     }
2983     @@ -398,7 +417,6 @@ static void tcf_ife_cleanup(struct tc_action *a, int bind)
2984     spin_unlock_bh(&ife->tcf_lock);
2985     }
2986    
2987     -/* under ife->tcf_lock for existing action */
2988     static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
2989     bool exists)
2990     {
2991     @@ -412,7 +430,7 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
2992     val = nla_data(tb[i]);
2993     len = nla_len(tb[i]);
2994    
2995     - rc = load_metaops_and_vet(ife, i, val, len, exists);
2996     + rc = load_metaops_and_vet(i, val, len);
2997     if (rc != 0)
2998     return rc;
2999    
3000     @@ -481,6 +499,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
3001     if (exists)
3002     spin_lock_bh(&ife->tcf_lock);
3003     ife->tcf_action = parm->action;
3004     + if (exists)
3005     + spin_unlock_bh(&ife->tcf_lock);
3006    
3007     if (parm->flags & IFE_ENCODE) {
3008     if (daddr)
3009     @@ -508,9 +528,6 @@ metadata_parse_err:
3010     tcf_idr_release(*a, bind);
3011     if (ret == ACT_P_CREATED)
3012     _tcf_ife_cleanup(*a, bind);
3013     -
3014     - if (exists)
3015     - spin_unlock_bh(&ife->tcf_lock);
3016     return err;
3017     }
3018    
3019     @@ -524,20 +541,14 @@ metadata_parse_err:
3020     * as we can. You better have at least one else we are
3021     * going to bail out
3022     */
3023     - err = use_all_metadata(ife);
3024     + err = use_all_metadata(ife, exists);
3025     if (err) {
3026     if (ret == ACT_P_CREATED)
3027     _tcf_ife_cleanup(*a, bind);
3028     -
3029     - if (exists)
3030     - spin_unlock_bh(&ife->tcf_lock);
3031     return err;
3032     }
3033     }
3034    
3035     - if (exists)
3036     - spin_unlock_bh(&ife->tcf_lock);
3037     -
3038     if (ret == ACT_P_CREATED)
3039     tcf_idr_insert(tn, *a);
3040    
3041     diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
3042     index 51ab463d9e16..656b6ada9221 100644
3043     --- a/net/sched/act_pedit.c
3044     +++ b/net/sched/act_pedit.c
3045     @@ -109,16 +109,18 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
3046     {
3047     struct nlattr *keys_start = nla_nest_start(skb, TCA_PEDIT_KEYS_EX);
3048    
3049     + if (!keys_start)
3050     + goto nla_failure;
3051     for (; n > 0; n--) {
3052     struct nlattr *key_start;
3053    
3054     key_start = nla_nest_start(skb, TCA_PEDIT_KEY_EX);
3055     + if (!key_start)
3056     + goto nla_failure;
3057    
3058     if (nla_put_u16(skb, TCA_PEDIT_KEY_EX_HTYPE, keys_ex->htype) ||
3059     - nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd)) {
3060     - nlmsg_trim(skb, keys_start);
3061     - return -EINVAL;
3062     - }
3063     + nla_put_u16(skb, TCA_PEDIT_KEY_EX_CMD, keys_ex->cmd))
3064     + goto nla_failure;
3065    
3066     nla_nest_end(skb, key_start);
3067    
3068     @@ -128,6 +130,9 @@ static int tcf_pedit_key_ex_dump(struct sk_buff *skb,
3069     nla_nest_end(skb, keys_start);
3070    
3071     return 0;
3072     +nla_failure:
3073     + nla_nest_cancel(skb, keys_start);
3074     + return -EINVAL;
3075     }
3076    
3077     static int tcf_pedit_init(struct net *net, struct nlattr *nla,
3078     @@ -395,7 +400,10 @@ static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a,
3079     opt->bindcnt = p->tcf_bindcnt - bind;
3080    
3081     if (p->tcfp_keys_ex) {
3082     - tcf_pedit_key_ex_dump(skb, p->tcfp_keys_ex, p->tcfp_nkeys);
3083     + if (tcf_pedit_key_ex_dump(skb,
3084     + p->tcfp_keys_ex,
3085     + p->tcfp_nkeys))
3086     + goto nla_put_failure;
3087    
3088     if (nla_put(skb, TCA_PEDIT_PARMS_EX, s, opt))
3089     goto nla_put_failure;
3090     diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
3091     index ba37d8f57e68..0c9bc29dcf97 100644
3092     --- a/net/sched/cls_u32.c
3093     +++ b/net/sched/cls_u32.c
3094     @@ -903,6 +903,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
3095     struct nlattr *opt = tca[TCA_OPTIONS];
3096     struct nlattr *tb[TCA_U32_MAX + 1];
3097     u32 htid, flags = 0;
3098     + size_t sel_size;
3099     int err;
3100     #ifdef CONFIG_CLS_U32_PERF
3101     size_t size;
3102     @@ -1024,8 +1025,11 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
3103     return -EINVAL;
3104    
3105     s = nla_data(tb[TCA_U32_SEL]);
3106     + sel_size = sizeof(*s) + sizeof(*s->keys) * s->nkeys;
3107     + if (nla_len(tb[TCA_U32_SEL]) < sel_size)
3108     + return -EINVAL;
3109    
3110     - n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
3111     + n = kzalloc(offsetof(typeof(*n), sel) + sel_size, GFP_KERNEL);
3112     if (n == NULL)
3113     return -ENOBUFS;
3114    
3115     @@ -1038,7 +1042,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
3116     }
3117     #endif
3118    
3119     - memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
3120     + memcpy(&n->sel, s, sel_size);
3121     RCU_INIT_POINTER(n->ht_up, ht);
3122     n->handle = handle;
3123     n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
3124     diff --git a/net/sctp/proc.c b/net/sctp/proc.c
3125     index 26b4be6b4172..6c82a959fc6e 100644
3126     --- a/net/sctp/proc.c
3127     +++ b/net/sctp/proc.c
3128     @@ -335,8 +335,6 @@ static int sctp_assocs_seq_show(struct seq_file *seq, void *v)
3129     }
3130    
3131     transport = (struct sctp_transport *)v;
3132     - if (!sctp_transport_hold(transport))
3133     - return 0;
3134     assoc = transport->asoc;
3135     epb = &assoc->base;
3136     sk = epb->sk;
3137     @@ -426,8 +424,6 @@ static int sctp_remaddr_seq_show(struct seq_file *seq, void *v)
3138     }
3139    
3140     transport = (struct sctp_transport *)v;
3141     - if (!sctp_transport_hold(transport))
3142     - return 0;
3143     assoc = transport->asoc;
3144    
3145     list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list,
3146     diff --git a/net/sctp/socket.c b/net/sctp/socket.c
3147     index 2d6f612f32c3..790094311143 100644
3148     --- a/net/sctp/socket.c
3149     +++ b/net/sctp/socket.c
3150     @@ -4660,9 +4660,14 @@ struct sctp_transport *sctp_transport_get_next(struct net *net,
3151     break;
3152     }
3153    
3154     + if (!sctp_transport_hold(t))
3155     + continue;
3156     +
3157     if (net_eq(sock_net(t->asoc->base.sk), net) &&
3158     t->asoc->peer.primary_path == t)
3159     break;
3160     +
3161     + sctp_transport_put(t);
3162     }
3163    
3164     return t;
3165     @@ -4672,13 +4677,18 @@ struct sctp_transport *sctp_transport_get_idx(struct net *net,
3166     struct rhashtable_iter *iter,
3167     int pos)
3168     {
3169     - void *obj = SEQ_START_TOKEN;
3170     + struct sctp_transport *t;
3171    
3172     - while (pos && (obj = sctp_transport_get_next(net, iter)) &&
3173     - !IS_ERR(obj))
3174     - pos--;
3175     + if (!pos)
3176     + return SEQ_START_TOKEN;
3177    
3178     - return obj;
3179     + while ((t = sctp_transport_get_next(net, iter)) && !IS_ERR(t)) {
3180     + if (!--pos)
3181     + break;
3182     + sctp_transport_put(t);
3183     + }
3184     +
3185     + return t;
3186     }
3187    
3188     int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
3189     @@ -4738,8 +4748,6 @@ again:
3190    
3191     tsp = sctp_transport_get_idx(net, &hti, *pos + 1);
3192     for (; !IS_ERR_OR_NULL(tsp); tsp = sctp_transport_get_next(net, &hti)) {
3193     - if (!sctp_transport_hold(tsp))
3194     - continue;
3195     ret = cb(tsp, p);
3196     if (ret)
3197     break;
3198     diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
3199     index 8654494b4d0a..834eb2b9e41b 100644
3200     --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
3201     +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
3202     @@ -169,7 +169,7 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
3203     struct scatterlist sg[1];
3204     int err = -1;
3205     u8 *checksumdata;
3206     - u8 rc4salt[4];
3207     + u8 *rc4salt;
3208     struct crypto_ahash *md5;
3209     struct crypto_ahash *hmac_md5;
3210     struct ahash_request *req;
3211     @@ -183,14 +183,18 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
3212     return GSS_S_FAILURE;
3213     }
3214    
3215     + rc4salt = kmalloc_array(4, sizeof(*rc4salt), GFP_NOFS);
3216     + if (!rc4salt)
3217     + return GSS_S_FAILURE;
3218     +
3219     if (arcfour_hmac_md5_usage_to_salt(usage, rc4salt)) {
3220     dprintk("%s: invalid usage value %u\n", __func__, usage);
3221     - return GSS_S_FAILURE;
3222     + goto out_free_rc4salt;
3223     }
3224    
3225     checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_NOFS);
3226     if (!checksumdata)
3227     - return GSS_S_FAILURE;
3228     + goto out_free_rc4salt;
3229    
3230     md5 = crypto_alloc_ahash("md5", 0, CRYPTO_ALG_ASYNC);
3231     if (IS_ERR(md5))
3232     @@ -258,6 +262,8 @@ out_free_md5:
3233     crypto_free_ahash(md5);
3234     out_free_cksum:
3235     kfree(checksumdata);
3236     +out_free_rc4salt:
3237     + kfree(rc4salt);
3238     return err ? GSS_S_FAILURE : 0;
3239     }
3240    
3241     diff --git a/net/tipc/socket.c b/net/tipc/socket.c
3242     index 98a44ecb11e7..0aebf0695ae0 100644
3243     --- a/net/tipc/socket.c
3244     +++ b/net/tipc/socket.c
3245     @@ -2268,6 +2268,8 @@ void tipc_sk_reinit(struct net *net)
3246     walk_stop:
3247     rhashtable_walk_stop(&iter);
3248     } while (tsk == ERR_PTR(-EAGAIN));
3249     +
3250     + rhashtable_walk_exit(&iter);
3251     }
3252    
3253     static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
3254     diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
3255     index ffb1a3a69bdd..055b9992d8c7 100644
3256     --- a/net/tls/tls_main.c
3257     +++ b/net/tls/tls_main.c
3258     @@ -44,6 +44,7 @@
3259     MODULE_AUTHOR("Mellanox Technologies");
3260     MODULE_DESCRIPTION("Transport Layer Security Support");
3261     MODULE_LICENSE("Dual BSD/GPL");
3262     +MODULE_ALIAS_TCP_ULP("tls");
3263    
3264     static struct proto tls_base_prot;
3265     static struct proto tls_sw_prot;
3266     diff --git a/scripts/depmod.sh b/scripts/depmod.sh
3267     index f41b0a4b575c..cf5b2b24b3cf 100755
3268     --- a/scripts/depmod.sh
3269     +++ b/scripts/depmod.sh
3270     @@ -16,9 +16,9 @@ if ! test -r System.map ; then
3271     fi
3272    
3273     if [ -z $(command -v $DEPMOD) ]; then
3274     - echo "'make modules_install' requires $DEPMOD. Please install it." >&2
3275     + echo "Warning: 'make modules_install' requires $DEPMOD. Please install it." >&2
3276     echo "This is probably in the kmod package." >&2
3277     - exit 1
3278     + exit 0
3279     fi
3280    
3281     # older versions of depmod don't support -P <symbol-prefix>
3282     diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
3283     index 54deaa1066cf..957f6041dd79 100644
3284     --- a/scripts/mod/modpost.c
3285     +++ b/scripts/mod/modpost.c
3286     @@ -677,7 +677,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
3287     if (ELF_ST_TYPE(sym->st_info) == STT_SPARC_REGISTER)
3288     break;
3289     if (symname[0] == '.') {
3290     - char *munged = strdup(symname);
3291     + char *munged = NOFAIL(strdup(symname));
3292     munged[0] = '_';
3293     munged[1] = toupper(munged[1]);
3294     symname = munged;
3295     @@ -1329,7 +1329,7 @@ static Elf_Sym *find_elf_symbol2(struct elf_info *elf, Elf_Addr addr,
3296     static char *sec2annotation(const char *s)
3297     {
3298     if (match(s, init_exit_sections)) {
3299     - char *p = malloc(20);
3300     + char *p = NOFAIL(malloc(20));
3301     char *r = p;
3302    
3303     *p++ = '_';
3304     @@ -1349,7 +1349,7 @@ static char *sec2annotation(const char *s)
3305     strcat(p, " ");
3306     return r;
3307     } else {
3308     - return strdup("");
3309     + return NOFAIL(strdup(""));
3310     }
3311     }
3312    
3313     @@ -2050,7 +2050,7 @@ void buf_write(struct buffer *buf, const char *s, int len)
3314     {
3315     if (buf->size - buf->pos < len) {
3316     buf->size += len + SZ;
3317     - buf->p = realloc(buf->p, buf->size);
3318     + buf->p = NOFAIL(realloc(buf->p, buf->size));
3319     }
3320     strncpy(buf->p + buf->pos, s, len);
3321     buf->pos += len;
3322     diff --git a/security/keys/dh.c b/security/keys/dh.c
3323     index d1ea9f325f94..35543f04e759 100644
3324     --- a/security/keys/dh.c
3325     +++ b/security/keys/dh.c
3326     @@ -307,7 +307,7 @@ long __keyctl_dh_compute(struct keyctl_dh_params __user *params,
3327     }
3328     dh_inputs.g_size = dlen;
3329    
3330     - dlen = dh_data_from_key(pcopy.private, &dh_inputs.key);
3331     + dlen = dh_data_from_key(pcopy.dh_private, &dh_inputs.key);
3332     if (dlen < 0) {
3333     ret = dlen;
3334     goto out2;
3335     diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
3336     index 0791fec398fb..1cd20b88a3a9 100644
3337     --- a/sound/soc/codecs/rt5677.c
3338     +++ b/sound/soc/codecs/rt5677.c
3339     @@ -5017,7 +5017,7 @@ static const struct i2c_device_id rt5677_i2c_id[] = {
3340     MODULE_DEVICE_TABLE(i2c, rt5677_i2c_id);
3341    
3342     static const struct of_device_id rt5677_of_match[] = {
3343     - { .compatible = "realtek,rt5677", RT5677 },
3344     + { .compatible = "realtek,rt5677", .data = (const void *)RT5677 },
3345     { }
3346     };
3347     MODULE_DEVICE_TABLE(of, rt5677_of_match);
3348     diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
3349     index 3896523b71e9..f289762cd676 100644
3350     --- a/sound/soc/codecs/wm8994.c
3351     +++ b/sound/soc/codecs/wm8994.c
3352     @@ -2431,6 +2431,7 @@ static int wm8994_set_dai_sysclk(struct snd_soc_dai *dai,
3353     snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_2,
3354     WM8994_OPCLK_ENA, 0);
3355     }
3356     + break;
3357    
3358     default:
3359     return -EINVAL;
3360     diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
3361     index 53d83d7e6a09..20e7d74d86cd 100644
3362     --- a/tools/perf/arch/powerpc/util/sym-handling.c
3363     +++ b/tools/perf/arch/powerpc/util/sym-handling.c
3364     @@ -141,8 +141,10 @@ void arch__post_process_probe_trace_events(struct perf_probe_event *pev,
3365     for (i = 0; i < ntevs; i++) {
3366     tev = &pev->tevs[i];
3367     map__for_each_symbol(map, sym, tmp) {
3368     - if (map->unmap_ip(map, sym->start) == tev->point.address)
3369     + if (map->unmap_ip(map, sym->start) == tev->point.address) {
3370     arch__fix_tev_from_maps(pev, tev, map, sym);
3371     + break;
3372     + }
3373     }
3374     }
3375     }
3376     diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c
3377     index a58e91197729..1ef0049860a8 100644
3378     --- a/tools/perf/util/namespaces.c
3379     +++ b/tools/perf/util/namespaces.c
3380     @@ -138,6 +138,9 @@ struct nsinfo *nsinfo__copy(struct nsinfo *nsi)
3381     {
3382     struct nsinfo *nnsi;
3383    
3384     + if (nsi == NULL)
3385     + return NULL;
3386     +
3387     nnsi = calloc(1, sizeof(*nnsi));
3388     if (nnsi != NULL) {
3389     nnsi->pid = nsi->pid;
3390     diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c
3391     index 66d31de60b9a..9d7166dfad1e 100644
3392     --- a/tools/testing/selftests/powerpc/harness.c
3393     +++ b/tools/testing/selftests/powerpc/harness.c
3394     @@ -85,13 +85,13 @@ wait:
3395     return status;
3396     }
3397    
3398     -static void alarm_handler(int signum)
3399     +static void sig_handler(int signum)
3400     {
3401     - /* Jut wake us up from waitpid */
3402     + /* Just wake us up from waitpid */
3403     }
3404    
3405     -static struct sigaction alarm_action = {
3406     - .sa_handler = alarm_handler,
3407     +static struct sigaction sig_action = {
3408     + .sa_handler = sig_handler,
3409     };
3410    
3411     void test_harness_set_timeout(uint64_t time)
3412     @@ -106,8 +106,14 @@ int test_harness(int (test_function)(void), char *name)
3413     test_start(name);
3414     test_set_git_version(GIT_VERSION);
3415    
3416     - if (sigaction(SIGALRM, &alarm_action, NULL)) {
3417     - perror("sigaction");
3418     + if (sigaction(SIGINT, &sig_action, NULL)) {
3419     + perror("sigaction (sigint)");
3420     + test_error(name);
3421     + return 1;
3422     + }
3423     +
3424     + if (sigaction(SIGALRM, &sig_action, NULL)) {
3425     + perror("sigaction (sigalrm)");
3426     test_error(name);
3427     return 1;
3428     }