Magellan Linux

Contents of /trunk/kernel-alx/patches-4.19/0153-4.19.54-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3432 - (show annotations) (download)
Fri Aug 2 11:48:00 2019 UTC (4 years, 9 months ago) by niro
File size: 61166 byte(s)
-linux-4.19.54
1 diff --git a/Makefile b/Makefile
2 index bedcb121dc3d..b234837e4d07 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 19
9 -SUBLEVEL = 53
10 +SUBLEVEL = 54
11 EXTRAVERSION =
12 NAME = "People's Front"
13
14 diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
15 index ad8be16a39c9..58102652bf9e 100644
16 --- a/arch/arm64/include/asm/syscall.h
17 +++ b/arch/arm64/include/asm/syscall.h
18 @@ -20,7 +20,7 @@
19 #include <linux/compat.h>
20 #include <linux/err.h>
21
22 -typedef long (*syscall_fn_t)(struct pt_regs *regs);
23 +typedef long (*syscall_fn_t)(const struct pt_regs *regs);
24
25 extern const syscall_fn_t sys_call_table[];
26
27 diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h
28 index a4477e515b79..507d0ee6bc69 100644
29 --- a/arch/arm64/include/asm/syscall_wrapper.h
30 +++ b/arch/arm64/include/asm/syscall_wrapper.h
31 @@ -30,10 +30,10 @@
32 } \
33 static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
34
35 -#define COMPAT_SYSCALL_DEFINE0(sname) \
36 - asmlinkage long __arm64_compat_sys_##sname(void); \
37 - ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO); \
38 - asmlinkage long __arm64_compat_sys_##sname(void)
39 +#define COMPAT_SYSCALL_DEFINE0(sname) \
40 + asmlinkage long __arm64_compat_sys_##sname(const struct pt_regs *__unused); \
41 + ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO); \
42 + asmlinkage long __arm64_compat_sys_##sname(const struct pt_regs *__unused)
43
44 #define COND_SYSCALL_COMPAT(name) \
45 cond_syscall(__arm64_compat_sys_##name);
46 @@ -62,11 +62,11 @@
47 static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
48
49 #ifndef SYSCALL_DEFINE0
50 -#define SYSCALL_DEFINE0(sname) \
51 - SYSCALL_METADATA(_##sname, 0); \
52 - asmlinkage long __arm64_sys_##sname(void); \
53 - ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO); \
54 - asmlinkage long __arm64_sys_##sname(void)
55 +#define SYSCALL_DEFINE0(sname) \
56 + SYSCALL_METADATA(_##sname, 0); \
57 + asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused); \
58 + ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO); \
59 + asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused)
60 #endif
61
62 #ifndef COND_SYSCALL
63 diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c
64 index 162a95ed0881..fe20c461582a 100644
65 --- a/arch/arm64/kernel/sys.c
66 +++ b/arch/arm64/kernel/sys.c
67 @@ -47,22 +47,26 @@ SYSCALL_DEFINE1(arm64_personality, unsigned int, personality)
68 return ksys_personality(personality);
69 }
70
71 +asmlinkage long sys_ni_syscall(void);
72 +
73 +asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused)
74 +{
75 + return sys_ni_syscall();
76 +}
77 +
78 /*
79 * Wrappers to pass the pt_regs argument.
80 */
81 #define __arm64_sys_personality __arm64_sys_arm64_personality
82
83 -asmlinkage long sys_ni_syscall(const struct pt_regs *);
84 -#define __arm64_sys_ni_syscall sys_ni_syscall
85 -
86 #undef __SYSCALL
87 #define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
88 #include <asm/unistd.h>
89
90 #undef __SYSCALL
91 -#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym,
92 +#define __SYSCALL(nr, sym) [nr] = __arm64_##sym,
93
94 const syscall_fn_t sys_call_table[__NR_syscalls] = {
95 - [0 ... __NR_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
96 + [0 ... __NR_syscalls - 1] = __arm64_sys_ni_syscall,
97 #include <asm/unistd.h>
98 };
99 diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c
100 index 0f8bcb7de700..3c80a40c1c9d 100644
101 --- a/arch/arm64/kernel/sys32.c
102 +++ b/arch/arm64/kernel/sys32.c
103 @@ -133,17 +133,14 @@ COMPAT_SYSCALL_DEFINE6(aarch32_fallocate, int, fd, int, mode,
104 return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len));
105 }
106
107 -asmlinkage long sys_ni_syscall(const struct pt_regs *);
108 -#define __arm64_sys_ni_syscall sys_ni_syscall
109 -
110 #undef __SYSCALL
111 #define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
112 #include <asm/unistd32.h>
113
114 #undef __SYSCALL
115 -#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym,
116 +#define __SYSCALL(nr, sym) [nr] = __arm64_##sym,
117
118 const syscall_fn_t compat_sys_call_table[__NR_compat_syscalls] = {
119 - [0 ... __NR_compat_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
120 + [0 ... __NR_compat_syscalls - 1] = __arm64_sys_ni_syscall,
121 #include <asm/unistd32.h>
122 };
123 diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
124 index aa19b7ac8222..476c7b4be378 100644
125 --- a/arch/ia64/mm/numa.c
126 +++ b/arch/ia64/mm/numa.c
127 @@ -49,6 +49,7 @@ paddr_to_nid(unsigned long paddr)
128
129 return (i < num_node_memblks) ? node_memblk[i].nid : (num_node_memblks ? -1 : 0);
130 }
131 +EXPORT_SYMBOL(paddr_to_nid);
132
133 #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA)
134 /*
135 diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
136 index bccc5051249e..2b6049e83970 100644
137 --- a/arch/powerpc/include/asm/kvm_host.h
138 +++ b/arch/powerpc/include/asm/kvm_host.h
139 @@ -299,6 +299,7 @@ struct kvm_arch {
140 #ifdef CONFIG_PPC_BOOK3S_64
141 struct list_head spapr_tce_tables;
142 struct list_head rtas_tokens;
143 + struct mutex rtas_token_lock;
144 DECLARE_BITMAP(enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
145 #endif
146 #ifdef CONFIG_KVM_MPIC
147 diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
148 index 87348e498c89..281f074581a3 100644
149 --- a/arch/powerpc/kvm/book3s.c
150 +++ b/arch/powerpc/kvm/book3s.c
151 @@ -840,6 +840,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
152 #ifdef CONFIG_PPC64
153 INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
154 INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
155 + mutex_init(&kvm->arch.rtas_token_lock);
156 #endif
157
158 return kvm->arch.kvm_ops->init_vm(kvm);
159 diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
160 index 3e3a71594e63..083dcedba11c 100644
161 --- a/arch/powerpc/kvm/book3s_hv.c
162 +++ b/arch/powerpc/kvm/book3s_hv.c
163 @@ -426,12 +426,7 @@ static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
164
165 static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
166 {
167 - struct kvm_vcpu *ret;
168 -
169 - mutex_lock(&kvm->lock);
170 - ret = kvm_get_vcpu_by_id(kvm, id);
171 - mutex_unlock(&kvm->lock);
172 - return ret;
173 + return kvm_get_vcpu_by_id(kvm, id);
174 }
175
176 static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
177 @@ -1309,7 +1304,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
178 struct kvmppc_vcore *vc = vcpu->arch.vcore;
179 u64 mask;
180
181 - mutex_lock(&kvm->lock);
182 spin_lock(&vc->lock);
183 /*
184 * If ILE (interrupt little-endian) has changed, update the
185 @@ -1349,7 +1343,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
186 mask &= 0xFFFFFFFF;
187 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
188 spin_unlock(&vc->lock);
189 - mutex_unlock(&kvm->lock);
190 }
191
192 static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
193 diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c
194 index 2d3b2b1cc272..8f2355138f80 100644
195 --- a/arch/powerpc/kvm/book3s_rtas.c
196 +++ b/arch/powerpc/kvm/book3s_rtas.c
197 @@ -146,7 +146,7 @@ static int rtas_token_undefine(struct kvm *kvm, char *name)
198 {
199 struct rtas_token_definition *d, *tmp;
200
201 - lockdep_assert_held(&kvm->lock);
202 + lockdep_assert_held(&kvm->arch.rtas_token_lock);
203
204 list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
205 if (rtas_name_matches(d->handler->name, name)) {
206 @@ -167,7 +167,7 @@ static int rtas_token_define(struct kvm *kvm, char *name, u64 token)
207 bool found;
208 int i;
209
210 - lockdep_assert_held(&kvm->lock);
211 + lockdep_assert_held(&kvm->arch.rtas_token_lock);
212
213 list_for_each_entry(d, &kvm->arch.rtas_tokens, list) {
214 if (d->token == token)
215 @@ -206,14 +206,14 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
216 if (copy_from_user(&args, argp, sizeof(args)))
217 return -EFAULT;
218
219 - mutex_lock(&kvm->lock);
220 + mutex_lock(&kvm->arch.rtas_token_lock);
221
222 if (args.token)
223 rc = rtas_token_define(kvm, args.name, args.token);
224 else
225 rc = rtas_token_undefine(kvm, args.name);
226
227 - mutex_unlock(&kvm->lock);
228 + mutex_unlock(&kvm->arch.rtas_token_lock);
229
230 return rc;
231 }
232 @@ -245,7 +245,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
233 orig_rets = args.rets;
234 args.rets = &args.args[be32_to_cpu(args.nargs)];
235
236 - mutex_lock(&vcpu->kvm->lock);
237 + mutex_lock(&vcpu->kvm->arch.rtas_token_lock);
238
239 rc = -ENOENT;
240 list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
241 @@ -256,7 +256,7 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
242 }
243 }
244
245 - mutex_unlock(&vcpu->kvm->lock);
246 + mutex_unlock(&vcpu->kvm->arch.rtas_token_lock);
247
248 if (rc == 0) {
249 args.rets = orig_rets;
250 @@ -282,8 +282,6 @@ void kvmppc_rtas_tokens_free(struct kvm *kvm)
251 {
252 struct rtas_token_definition *d, *tmp;
253
254 - lockdep_assert_held(&kvm->lock);
255 -
256 list_for_each_entry_safe(d, tmp, &kvm->arch.rtas_tokens, list) {
257 list_del(&d->list);
258 kfree(d);
259 diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
260 index 3d27f02695e4..828f6656f8f7 100644
261 --- a/arch/powerpc/platforms/powernv/opal-imc.c
262 +++ b/arch/powerpc/platforms/powernv/opal-imc.c
263 @@ -161,6 +161,10 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
264 struct imc_pmu *pmu_ptr;
265 u32 offset;
266
267 + /* Return for unknown domain */
268 + if (domain < 0)
269 + return -EINVAL;
270 +
271 /* memory for pmu */
272 pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL);
273 if (!pmu_ptr)
274 diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
275 index b7b01d762d32..e91814d1a27f 100644
276 --- a/arch/x86/events/intel/ds.c
277 +++ b/arch/x86/events/intel/ds.c
278 @@ -684,7 +684,7 @@ struct event_constraint intel_core2_pebs_event_constraints[] = {
279 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1fc7, 0x1), /* SIMD_INST_RETURED.ANY */
280 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
281 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
282 - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
283 + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
284 EVENT_CONSTRAINT_END
285 };
286
287 @@ -693,7 +693,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
288 INTEL_FLAGS_UEVENT_CONSTRAINT(0x00c5, 0x1), /* MISPREDICTED_BRANCH_RETIRED */
289 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED.* */
290 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
291 - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x01),
292 + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x01),
293 /* Allow all events as PEBS with no flags */
294 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
295 EVENT_CONSTRAINT_END
296 @@ -701,7 +701,7 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
297
298 struct event_constraint intel_slm_pebs_event_constraints[] = {
299 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
300 - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1),
301 + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x1),
302 /* Allow all events as PEBS with no flags */
303 INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
304 EVENT_CONSTRAINT_END
305 @@ -726,7 +726,7 @@ struct event_constraint intel_nehalem_pebs_event_constraints[] = {
306 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
307 INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
308 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
309 - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
310 + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
311 EVENT_CONSTRAINT_END
312 };
313
314 @@ -743,7 +743,7 @@ struct event_constraint intel_westmere_pebs_event_constraints[] = {
315 INTEL_FLAGS_EVENT_CONSTRAINT(0xcb, 0xf), /* MEM_LOAD_RETIRED.* */
316 INTEL_FLAGS_EVENT_CONSTRAINT(0xf7, 0xf), /* FP_ASSIST.* */
317 /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
318 - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
319 + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
320 EVENT_CONSTRAINT_END
321 };
322
323 @@ -752,7 +752,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
324 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
325 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
326 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
327 - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
328 + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
329 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
330 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
331 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
332 @@ -767,9 +767,9 @@ struct event_constraint intel_ivb_pebs_event_constraints[] = {
333 INTEL_PLD_CONSTRAINT(0x01cd, 0x8), /* MEM_TRANS_RETIRED.LAT_ABOVE_THR */
334 INTEL_PST_CONSTRAINT(0x02cd, 0x8), /* MEM_TRANS_RETIRED.PRECISE_STORES */
335 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
336 - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
337 + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
338 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
339 - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
340 + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
341 INTEL_EXCLEVT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
342 INTEL_EXCLEVT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
343 INTEL_EXCLEVT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
344 @@ -783,9 +783,9 @@ struct event_constraint intel_hsw_pebs_event_constraints[] = {
345 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
346 INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
347 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
348 - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
349 + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
350 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
351 - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
352 + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
353 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
354 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
355 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_XLD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
356 @@ -806,9 +806,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = {
357 INTEL_FLAGS_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */
358 INTEL_PLD_CONSTRAINT(0x01cd, 0xf), /* MEM_TRANS_RETIRED.* */
359 /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
360 - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
361 + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c2, 0xf),
362 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
363 - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
364 + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
365 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(0x01c2, 0xf), /* UOPS_RETIRED.ALL */
366 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_UOPS_RETIRED.STLB_MISS_LOADS */
367 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_UOPS_RETIRED.LOCK_LOADS */
368 @@ -829,9 +829,9 @@ struct event_constraint intel_bdw_pebs_event_constraints[] = {
369 struct event_constraint intel_skl_pebs_event_constraints[] = {
370 INTEL_FLAGS_UEVENT_CONSTRAINT(0x1c0, 0x2), /* INST_RETIRED.PREC_DIST */
371 /* INST_RETIRED.PREC_DIST, inv=1, cmask=16 (cycles:ppp). */
372 - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c0, 0x2),
373 + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108001c0, 0x2),
374 /* INST_RETIRED.TOTAL_CYCLES_PS (inv=1, cmask=16) (cycles:p). */
375 - INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x0f),
376 + INTEL_FLAGS_UEVENT_CONSTRAINT(0x108000c0, 0x0f),
377 INTEL_PLD_CONSTRAINT(0x1cd, 0xf), /* MEM_TRANS_RETIRED.* */
378 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
379 INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
380 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
381 index 6a25278e0092..da1f5e78363e 100644
382 --- a/arch/x86/kernel/cpu/amd.c
383 +++ b/arch/x86/kernel/cpu/amd.c
384 @@ -819,8 +819,11 @@ static void init_amd_zn(struct cpuinfo_x86 *c)
385 {
386 set_cpu_cap(c, X86_FEATURE_ZEN);
387
388 - /* Fix erratum 1076: CPB feature bit not being set in CPUID. */
389 - if (!cpu_has(c, X86_FEATURE_CPB))
390 + /*
391 + * Fix erratum 1076: CPB feature bit not being set in CPUID.
392 + * Always set it, except when running under a hypervisor.
393 + */
394 + if (!cpu_has(c, X86_FEATURE_HYPERVISOR) && !cpu_has(c, X86_FEATURE_CPB))
395 set_cpu_cap(c, X86_FEATURE_CPB);
396 }
397
398 diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
399 index a7c2673ffd36..1806260938e8 100644
400 --- a/drivers/acpi/device_pm.c
401 +++ b/drivers/acpi/device_pm.c
402 @@ -948,8 +948,8 @@ static bool acpi_dev_needs_resume(struct device *dev, struct acpi_device *adev)
403 u32 sys_target = acpi_target_system_state();
404 int ret, state;
405
406 - if (!pm_runtime_suspended(dev) || !adev ||
407 - device_may_wakeup(dev) != !!adev->wakeup.prepare_count)
408 + if (!pm_runtime_suspended(dev) || !adev || (adev->wakeup.flags.valid &&
409 + device_may_wakeup(dev) != !!adev->wakeup.prepare_count))
410 return true;
411
412 if (sys_target == ACPI_STATE_S0)
413 diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
414 index 421b05392220..ca3218337fd7 100644
415 --- a/drivers/clk/ti/clkctrl.c
416 +++ b/drivers/clk/ti/clkctrl.c
417 @@ -137,9 +137,6 @@ static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
418 int ret;
419 union omap4_timeout timeout = { 0 };
420
421 - if (!clk->enable_bit)
422 - return 0;
423 -
424 if (clk->clkdm) {
425 ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
426 if (ret) {
427 @@ -151,6 +148,9 @@ static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
428 }
429 }
430
431 + if (!clk->enable_bit)
432 + return 0;
433 +
434 val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
435
436 val &= ~OMAP4_MODULEMODE_MASK;
437 @@ -179,7 +179,7 @@ static void _omap4_clkctrl_clk_disable(struct clk_hw *hw)
438 union omap4_timeout timeout = { 0 };
439
440 if (!clk->enable_bit)
441 - return;
442 + goto exit;
443
444 val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
445
446 diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
447 index 4f52c3a8ec99..ed51221621a5 100644
448 --- a/drivers/gpio/Kconfig
449 +++ b/drivers/gpio/Kconfig
450 @@ -784,6 +784,7 @@ config GPIO_ADP5588
451 config GPIO_ADP5588_IRQ
452 bool "Interrupt controller support for ADP5588"
453 depends on GPIO_ADP5588=y
454 + select GPIOLIB_IRQCHIP
455 help
456 Say yes here to enable the adp5588 to be used as an interrupt
457 controller. It requires the driver to be built in the kernel.
458 diff --git a/drivers/gpu/drm/etnaviv/etnaviv_dump.c b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
459 index 9146e30e24a6..468dff2f7904 100644
460 --- a/drivers/gpu/drm/etnaviv/etnaviv_dump.c
461 +++ b/drivers/gpu/drm/etnaviv/etnaviv_dump.c
462 @@ -124,6 +124,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
463 return;
464 etnaviv_dump_core = false;
465
466 + mutex_lock(&gpu->mmu->lock);
467 +
468 mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
469
470 /* We always dump registers, mmu, ring and end marker */
471 @@ -166,6 +168,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
472 iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
473 PAGE_KERNEL);
474 if (!iter.start) {
475 + mutex_unlock(&gpu->mmu->lock);
476 dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
477 return;
478 }
479 @@ -233,6 +236,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
480 obj->base.size);
481 }
482
483 + mutex_unlock(&gpu->mmu->lock);
484 +
485 etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
486
487 dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
488 diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
489 index ccd76c71af09..cb07651f4b46 100644
490 --- a/drivers/i2c/i2c-dev.c
491 +++ b/drivers/i2c/i2c-dev.c
492 @@ -283,6 +283,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
493 msgs[i].len < 1 || msgs[i].buf[0] < 1 ||
494 msgs[i].len < msgs[i].buf[0] +
495 I2C_SMBUS_BLOCK_MAX) {
496 + i++;
497 res = -EINVAL;
498 break;
499 }
500 diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
501 index b2abc44fa5cb..a73337b74f41 100644
502 --- a/drivers/isdn/mISDN/socket.c
503 +++ b/drivers/isdn/mISDN/socket.c
504 @@ -394,7 +394,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
505 memcpy(di.channelmap, dev->channelmap,
506 sizeof(di.channelmap));
507 di.nrbchan = dev->nrbchan;
508 - strcpy(di.name, dev_name(&dev->dev));
509 + strscpy(di.name, dev_name(&dev->dev), sizeof(di.name));
510 if (copy_to_user((void __user *)arg, &di, sizeof(di)))
511 err = -EFAULT;
512 } else
513 @@ -677,7 +677,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
514 memcpy(di.channelmap, dev->channelmap,
515 sizeof(di.channelmap));
516 di.nrbchan = dev->nrbchan;
517 - strcpy(di.name, dev_name(&dev->dev));
518 + strscpy(di.name, dev_name(&dev->dev), sizeof(di.name));
519 if (copy_to_user((void __user *)arg, &di, sizeof(di)))
520 err = -EFAULT;
521 } else
522 @@ -691,6 +691,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
523 err = -EFAULT;
524 break;
525 }
526 + dn.name[sizeof(dn.name) - 1] = '\0';
527 dev = get_mdevice(dn.id);
528 if (dev)
529 err = device_rename(&dev->dev, dn.name);
530 diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
531 index 6dedd43442cc..35b767baf21f 100644
532 --- a/drivers/net/dsa/rtl8366.c
533 +++ b/drivers/net/dsa/rtl8366.c
534 @@ -307,7 +307,8 @@ int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
535 struct rtl8366_vlan_4k vlan4k;
536 int ret;
537
538 - if (!smi->ops->is_vlan_valid(smi, port))
539 + /* Use VLAN nr port + 1 since VLAN0 is not valid */
540 + if (!smi->ops->is_vlan_valid(smi, port + 1))
541 return -EINVAL;
542
543 dev_info(smi->dev, "%s filtering on port %d\n",
544 @@ -318,12 +319,12 @@ int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering)
545 * The hardware support filter ID (FID) 0..7, I have no clue how to
546 * support this in the driver when the callback only says on/off.
547 */
548 - ret = smi->ops->get_vlan_4k(smi, port, &vlan4k);
549 + ret = smi->ops->get_vlan_4k(smi, port + 1, &vlan4k);
550 if (ret)
551 return ret;
552
553 /* Just set the filter to FID 1 for now then */
554 - ret = rtl8366_set_vlan(smi, port,
555 + ret = rtl8366_set_vlan(smi, port + 1,
556 vlan4k.member,
557 vlan4k.untag,
558 1);
559 diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
560 index 6f3312350cac..b3c7994d73eb 100644
561 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
562 +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
563 @@ -139,10 +139,10 @@ void aq_ring_queue_stop(struct aq_ring_s *ring)
564 bool aq_ring_tx_clean(struct aq_ring_s *self)
565 {
566 struct device *dev = aq_nic_get_dev(self->aq_nic);
567 - unsigned int budget = AQ_CFG_TX_CLEAN_BUDGET;
568 + unsigned int budget;
569
570 - for (; self->sw_head != self->hw_head && budget--;
571 - self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
572 + for (budget = AQ_CFG_TX_CLEAN_BUDGET;
573 + budget && self->sw_head != self->hw_head; budget--) {
574 struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
575
576 if (likely(buff->is_mapped)) {
577 @@ -167,6 +167,7 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
578
579 buff->pa = 0U;
580 buff->eop_index = 0xffffU;
581 + self->sw_head = aq_ring_next_dx(self, self->sw_head);
582 }
583
584 return !!budget;
585 diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
586 index 56363ff5c891..51cd1f98bcf0 100644
587 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
588 +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
589 @@ -695,38 +695,41 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
590 if ((rx_stat & BIT(0)) || rxd_wb->type & 0x1000U) {
591 /* MAC error or DMA error */
592 buff->is_error = 1U;
593 - } else {
594 - if (self->aq_nic_cfg->is_rss) {
595 - /* last 4 byte */
596 - u16 rss_type = rxd_wb->type & 0xFU;
597 -
598 - if (rss_type && rss_type < 0x8U) {
599 - buff->is_hash_l4 = (rss_type == 0x4 ||
600 - rss_type == 0x5);
601 - buff->rss_hash = rxd_wb->rss_hash;
602 - }
603 + }
604 + if (self->aq_nic_cfg->is_rss) {
605 + /* last 4 byte */
606 + u16 rss_type = rxd_wb->type & 0xFU;
607 +
608 + if (rss_type && rss_type < 0x8U) {
609 + buff->is_hash_l4 = (rss_type == 0x4 ||
610 + rss_type == 0x5);
611 + buff->rss_hash = rxd_wb->rss_hash;
612 }
613 + }
614
615 - if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
616 - buff->len = rxd_wb->pkt_len %
617 - AQ_CFG_RX_FRAME_MAX;
618 - buff->len = buff->len ?
619 - buff->len : AQ_CFG_RX_FRAME_MAX;
620 - buff->next = 0U;
621 - buff->is_eop = 1U;
622 + if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
623 + buff->len = rxd_wb->pkt_len %
624 + AQ_CFG_RX_FRAME_MAX;
625 + buff->len = buff->len ?
626 + buff->len : AQ_CFG_RX_FRAME_MAX;
627 + buff->next = 0U;
628 + buff->is_eop = 1U;
629 + } else {
630 + buff->len =
631 + rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ?
632 + AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len;
633 +
634 + if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
635 + rxd_wb->status) {
636 + /* LRO */
637 + buff->next = rxd_wb->next_desc_ptr;
638 + ++ring->stats.rx.lro_packets;
639 } else {
640 - if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
641 - rxd_wb->status) {
642 - /* LRO */
643 - buff->next = rxd_wb->next_desc_ptr;
644 - ++ring->stats.rx.lro_packets;
645 - } else {
646 - /* jumbo */
647 - buff->next =
648 - aq_ring_next_dx(ring,
649 - ring->hw_head);
650 - ++ring->stats.rx.jumbo_packets;
651 - }
652 + /* jumbo */
653 + buff->next =
654 + aq_ring_next_dx(ring,
655 + ring->hw_head);
656 + ++ring->stats.rx.jumbo_packets;
657 }
658 }
659 }
660 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
661 index 66535d1653f6..f16853c3c851 100644
662 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
663 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
664 @@ -2107,7 +2107,6 @@ static struct eisa_driver de4x5_eisa_driver = {
665 .remove = de4x5_eisa_remove,
666 }
667 };
668 -MODULE_DEVICE_TABLE(eisa, de4x5_eisa_ids);
669 #endif
670
671 #ifdef CONFIG_PCI
672 diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
673 index 3f6749fc889f..bfb16a474490 100644
674 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
675 +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
676 @@ -1105,7 +1105,7 @@ static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
677 cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type);
678 break;
679 case ETHTOOL_GRXRINGS:
680 - cmd->data = adapter->num_rx_qs - 1;
681 + cmd->data = adapter->num_rx_qs;
682 break;
683 default:
684 return -EINVAL;
685 diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
686 index 392fd895f278..ae2240074d8e 100644
687 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
688 +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
689 @@ -1905,8 +1905,7 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
690 }
691
692 /* Find tcam entry with matched pair <vid,port> */
693 -static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
694 - u16 mask)
695 +static int mvpp2_prs_vid_range_find(struct mvpp2_port *port, u16 vid, u16 mask)
696 {
697 unsigned char byte[2], enable[2];
698 struct mvpp2_prs_entry pe;
699 @@ -1914,13 +1913,13 @@ static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
700 int tid;
701
702 /* Go through the all entries with MVPP2_PRS_LU_VID */
703 - for (tid = MVPP2_PE_VID_FILT_RANGE_START;
704 - tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) {
705 - if (!priv->prs_shadow[tid].valid ||
706 - priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
707 + for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
708 + tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
709 + if (!port->priv->prs_shadow[tid].valid ||
710 + port->priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
711 continue;
712
713 - mvpp2_prs_init_from_hw(priv, &pe, tid);
714 + mvpp2_prs_init_from_hw(port->priv, &pe, tid);
715
716 mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
717 mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
718 @@ -1950,7 +1949,7 @@ int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
719 memset(&pe, 0, sizeof(pe));
720
721 /* Scan TCAM and see if entry with this <vid,port> already exist */
722 - tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask);
723 + tid = mvpp2_prs_vid_range_find(port, vid, mask);
724
725 reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
726 if (reg_val & MVPP2_DSA_EXTENDED)
727 @@ -2008,7 +2007,7 @@ void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
728 int tid;
729
730 /* Scan TCAM and see if entry with this <vid,port> already exist */
731 - tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff);
732 + tid = mvpp2_prs_vid_range_find(port, vid, 0xfff);
733
734 /* No such entry */
735 if (tid < 0)
736 @@ -2026,8 +2025,10 @@ void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
737
738 for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
739 tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
740 - if (priv->prs_shadow[tid].valid)
741 - mvpp2_prs_vid_entry_remove(port, tid);
742 + if (priv->prs_shadow[tid].valid) {
743 + mvpp2_prs_hw_inv(priv, tid);
744 + priv->prs_shadow[tid].valid = false;
745 + }
746 }
747 }
748
749 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
750 index 37ba7c78859d..1c225be9c7db 100644
751 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
752 +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
753 @@ -342,11 +342,32 @@ void mlx5_unregister_interface(struct mlx5_interface *intf)
754 }
755 EXPORT_SYMBOL(mlx5_unregister_interface);
756
757 +/* Must be called with intf_mutex held */
758 +static bool mlx5_has_added_dev_by_protocol(struct mlx5_core_dev *mdev, int protocol)
759 +{
760 + struct mlx5_device_context *dev_ctx;
761 + struct mlx5_interface *intf;
762 + bool found = false;
763 +
764 + list_for_each_entry(intf, &intf_list, list) {
765 + if (intf->protocol == protocol) {
766 + dev_ctx = mlx5_get_device(intf, &mdev->priv);
767 + if (dev_ctx && test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
768 + found = true;
769 + break;
770 + }
771 + }
772 +
773 + return found;
774 +}
775 +
776 void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol)
777 {
778 mutex_lock(&mlx5_intf_mutex);
779 - mlx5_remove_dev_by_protocol(mdev, protocol);
780 - mlx5_add_dev_by_protocol(mdev, protocol);
781 + if (mlx5_has_added_dev_by_protocol(mdev, protocol)) {
782 + mlx5_remove_dev_by_protocol(mdev, protocol);
783 + mlx5_add_dev_by_protocol(mdev, protocol);
784 + }
785 mutex_unlock(&mlx5_intf_mutex);
786 }
787
788 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
789 index c5b82e283d13..ff2f6b8e2fab 100644
790 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
791 +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
792 @@ -2488,6 +2488,10 @@ mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
793 mlxsw_reg_ptys_eth_unpack(ptys_pl, &eth_proto_cap, NULL, NULL);
794
795 autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
796 + if (!autoneg && cmd->base.speed == SPEED_56000) {
797 + netdev_err(dev, "56G not supported with autoneg off\n");
798 + return -EINVAL;
799 + }
800 eth_proto_new = autoneg ?
801 mlxsw_sp_to_ptys_advert_link(cmd) :
802 mlxsw_sp_to_ptys_speed(cmd->base.speed);
803 diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
804 index f27a0dc8c563..5e3e6e262ba3 100644
805 --- a/drivers/net/ethernet/renesas/sh_eth.c
806 +++ b/drivers/net/ethernet/renesas/sh_eth.c
807 @@ -1588,6 +1588,10 @@ static void sh_eth_dev_exit(struct net_device *ndev)
808 sh_eth_get_stats(ndev);
809 mdp->cd->soft_reset(ndev);
810
811 + /* Set the RMII mode again if required */
812 + if (mdp->cd->rmiimode)
813 + sh_eth_write(ndev, 0x1, RMIIMODE);
814 +
815 /* Set MAC address again */
816 update_mac_address(ndev);
817 }
818 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
819 index 50c00822b2d8..45e64d71a93f 100644
820 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
821 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
822 @@ -3319,6 +3319,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
823 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
824 }
825 rx_q->dirty_rx = entry;
826 + stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
827 }
828
829 /**
830 diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
831 index 9d699bd5f715..cf6b9b1771f1 100644
832 --- a/drivers/net/hyperv/netvsc_drv.c
833 +++ b/drivers/net/hyperv/netvsc_drv.c
834 @@ -2405,7 +2405,7 @@ static struct hv_driver netvsc_drv = {
835 .probe = netvsc_probe,
836 .remove = netvsc_remove,
837 .driver = {
838 - .probe_type = PROBE_PREFER_ASYNCHRONOUS,
839 + .probe_type = PROBE_FORCE_SYNCHRONOUS,
840 },
841 };
842
843 diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
844 index b3935778b19f..e4bf9e7d7583 100644
845 --- a/drivers/net/phy/dp83867.c
846 +++ b/drivers/net/phy/dp83867.c
847 @@ -260,10 +260,8 @@ static int dp83867_config_init(struct phy_device *phydev)
848 ret = phy_write(phydev, MII_DP83867_PHYCTRL, val);
849 if (ret)
850 return ret;
851 - }
852
853 - if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) &&
854 - (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) {
855 + /* Set up RGMII delays */
856 val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIICTL);
857
858 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
859 diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
860 index f6e70f2dfd12..e029c7977a56 100644
861 --- a/drivers/net/phy/phylink.c
862 +++ b/drivers/net/phy/phylink.c
863 @@ -54,6 +54,10 @@ struct phylink {
864
865 /* The link configuration settings */
866 struct phylink_link_state link_config;
867 +
868 + /* The current settings */
869 + phy_interface_t cur_interface;
870 +
871 struct gpio_desc *link_gpio;
872 struct timer_list link_poll;
873 void (*get_fixed_state)(struct net_device *dev,
874 @@ -477,12 +481,12 @@ static void phylink_resolve(struct work_struct *w)
875 if (!link_state.link) {
876 netif_carrier_off(ndev);
877 pl->ops->mac_link_down(ndev, pl->link_an_mode,
878 - pl->phy_state.interface);
879 + pl->cur_interface);
880 netdev_info(ndev, "Link is Down\n");
881 } else {
882 + pl->cur_interface = link_state.interface;
883 pl->ops->mac_link_up(ndev, pl->link_an_mode,
884 - pl->phy_state.interface,
885 - pl->phydev);
886 + pl->cur_interface, pl->phydev);
887
888 netif_carrier_on(ndev);
889
890 diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
891 index f8436d1c4d45..f7218c1673ce 100644
892 --- a/drivers/pci/pci-acpi.c
893 +++ b/drivers/pci/pci-acpi.c
894 @@ -625,7 +625,8 @@ static bool acpi_pci_need_resume(struct pci_dev *dev)
895 if (!adev || !acpi_device_power_manageable(adev))
896 return false;
897
898 - if (device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
899 + if (adev->wakeup.flags.valid &&
900 + device_may_wakeup(&dev->dev) != !!adev->wakeup.prepare_count)
901 return true;
902
903 if (acpi_target_system_state() == ACPI_STATE_S0)
904 diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
905 index f2c561ca731a..cd2c247d6d0c 100644
906 --- a/drivers/scsi/cxgbi/libcxgbi.c
907 +++ b/drivers/scsi/cxgbi/libcxgbi.c
908 @@ -641,6 +641,10 @@ cxgbi_check_route(struct sockaddr *dst_addr, int ifindex)
909
910 if (ndev->flags & IFF_LOOPBACK) {
911 ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
912 + if (!ndev) {
913 + err = -ENETUNREACH;
914 + goto rel_neigh;
915 + }
916 mtu = ndev->mtu;
917 pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
918 n->dev->name, ndev->name, mtu);
919 diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
920 index 12dc7100bb4c..d1154baa9436 100644
921 --- a/drivers/scsi/device_handler/scsi_dh_alua.c
922 +++ b/drivers/scsi/device_handler/scsi_dh_alua.c
923 @@ -1173,10 +1173,8 @@ static int __init alua_init(void)
924 int r;
925
926 kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0);
927 - if (!kaluad_wq) {
928 - /* Temporary failure, bypass */
929 - return SCSI_DH_DEV_TEMP_BUSY;
930 - }
931 + if (!kaluad_wq)
932 + return -ENOMEM;
933
934 r = scsi_register_device_handler(&alua_dh);
935 if (r != 0) {
936 diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
937 index 231eb79efa32..b141d1061f38 100644
938 --- a/drivers/scsi/libsas/sas_expander.c
939 +++ b/drivers/scsi/libsas/sas_expander.c
940 @@ -989,6 +989,8 @@ static struct domain_device *sas_ex_discover_expander(
941 list_del(&child->dev_list_node);
942 spin_unlock_irq(&parent->port->dev_list_lock);
943 sas_put_device(child);
944 + sas_port_delete(phy->port);
945 + phy->port = NULL;
946 return NULL;
947 }
948 list_add_tail(&child->siblings, &parent->ex_dev.children);
949 diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
950 index 3781e8109dd7..411d656f2530 100644
951 --- a/drivers/scsi/smartpqi/smartpqi_init.c
952 +++ b/drivers/scsi/smartpqi/smartpqi_init.c
953 @@ -6378,7 +6378,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
954 else
955 mask = DMA_BIT_MASK(32);
956
957 - rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
958 + rc = dma_set_mask_and_coherent(&ctrl_info->pci_dev->dev, mask);
959 if (rc) {
960 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
961 goto disable_device;
962 diff --git a/drivers/staging/vc04_services/bcm2835-camera/controls.c b/drivers/staging/vc04_services/bcm2835-camera/controls.c
963 index cff7b1e07153..b688ebc01740 100644
964 --- a/drivers/staging/vc04_services/bcm2835-camera/controls.c
965 +++ b/drivers/staging/vc04_services/bcm2835-camera/controls.c
966 @@ -576,7 +576,7 @@ exit:
967 dev->colourfx.enable ? "true" : "false",
968 dev->colourfx.u, dev->colourfx.v,
969 ret, (ret == 0 ? 0 : -EINVAL));
970 - return (ret == 0 ? 0 : EINVAL);
971 + return (ret == 0 ? 0 : -EINVAL);
972 }
973
974 static int ctrl_set_colfx(struct bm2835_mmal_dev *dev,
975 @@ -600,7 +600,7 @@ static int ctrl_set_colfx(struct bm2835_mmal_dev *dev,
976 "%s: After: mmal_ctrl:%p ctrl id:0x%x ctrl val:%d ret %d(%d)\n",
977 __func__, mmal_ctrl, ctrl->id, ctrl->val, ret,
978 (ret == 0 ? 0 : -EINVAL));
979 - return (ret == 0 ? 0 : EINVAL);
980 + return (ret == 0 ? 0 : -EINVAL);
981 }
982
983 static int ctrl_set_bitrate(struct bm2835_mmal_dev *dev,
984 diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
985 index 63e34d868de8..f8503f8fc44e 100644
986 --- a/drivers/tty/serial/sunhv.c
987 +++ b/drivers/tty/serial/sunhv.c
988 @@ -397,7 +397,7 @@ static const struct uart_ops sunhv_pops = {
989 static struct uart_driver sunhv_reg = {
990 .owner = THIS_MODULE,
991 .driver_name = "sunhv",
992 - .dev_name = "ttyS",
993 + .dev_name = "ttyHV",
994 .major = TTY_MAJOR,
995 };
996
997 diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
998 index cadc01336bf8..7ba6afc7ef23 100644
999 --- a/drivers/usb/host/xhci-debugfs.c
1000 +++ b/drivers/usb/host/xhci-debugfs.c
1001 @@ -440,6 +440,9 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci,
1002 struct xhci_ep_priv *epriv;
1003 struct xhci_slot_priv *spriv = dev->debugfs_private;
1004
1005 + if (!spriv)
1006 + return;
1007 +
1008 if (spriv->eps[ep_index])
1009 return;
1010
1011 diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c
1012 index 91da7e44d5d4..3a144eecb6a7 100644
1013 --- a/drivers/xen/pvcalls-front.c
1014 +++ b/drivers/xen/pvcalls-front.c
1015 @@ -538,7 +538,6 @@ out:
1016 int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,
1017 size_t len)
1018 {
1019 - struct pvcalls_bedata *bedata;
1020 struct sock_mapping *map;
1021 int sent, tot_sent = 0;
1022 int count = 0, flags;
1023 @@ -550,7 +549,6 @@ int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,
1024 map = pvcalls_enter_sock(sock);
1025 if (IS_ERR(map))
1026 return PTR_ERR(map);
1027 - bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
1028
1029 mutex_lock(&map->active.out_mutex);
1030 if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) {
1031 @@ -633,7 +631,6 @@ out:
1032 int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1033 int flags)
1034 {
1035 - struct pvcalls_bedata *bedata;
1036 int ret;
1037 struct sock_mapping *map;
1038
1039 @@ -643,7 +640,6 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1040 map = pvcalls_enter_sock(sock);
1041 if (IS_ERR(map))
1042 return PTR_ERR(map);
1043 - bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
1044
1045 mutex_lock(&map->active.in_mutex);
1046 if (len > XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER))
1047 diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h
1048 index 092981171df1..d75a2385b37c 100644
1049 --- a/drivers/xen/xenbus/xenbus.h
1050 +++ b/drivers/xen/xenbus/xenbus.h
1051 @@ -83,6 +83,7 @@ struct xb_req_data {
1052 int num_vecs;
1053 int err;
1054 enum xb_req_state state;
1055 + bool user_req;
1056 void (*cb)(struct xb_req_data *);
1057 void *par;
1058 };
1059 @@ -133,4 +134,6 @@ void xenbus_ring_ops_init(void);
1060 int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par);
1061 void xenbus_dev_queue_reply(struct xb_req_data *req);
1062
1063 +extern unsigned int xb_dev_generation_id;
1064 +
1065 #endif
1066 diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
1067 index 0782ff3c2273..39c63152a358 100644
1068 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c
1069 +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
1070 @@ -62,6 +62,8 @@
1071
1072 #include "xenbus.h"
1073
1074 +unsigned int xb_dev_generation_id;
1075 +
1076 /*
1077 * An element of a list of outstanding transactions, for which we're
1078 * still waiting a reply.
1079 @@ -69,6 +71,7 @@
1080 struct xenbus_transaction_holder {
1081 struct list_head list;
1082 struct xenbus_transaction handle;
1083 + unsigned int generation_id;
1084 };
1085
1086 /*
1087 @@ -441,6 +444,7 @@ static int xenbus_write_transaction(unsigned msg_type,
1088 rc = -ENOMEM;
1089 goto out;
1090 }
1091 + trans->generation_id = xb_dev_generation_id;
1092 list_add(&trans->list, &u->transactions);
1093 } else if (msg->hdr.tx_id != 0 &&
1094 !xenbus_get_transaction(u, msg->hdr.tx_id))
1095 @@ -449,6 +453,20 @@ static int xenbus_write_transaction(unsigned msg_type,
1096 !(msg->hdr.len == 2 &&
1097 (!strcmp(msg->body, "T") || !strcmp(msg->body, "F"))))
1098 return xenbus_command_reply(u, XS_ERROR, "EINVAL");
1099 + else if (msg_type == XS_TRANSACTION_END) {
1100 + trans = xenbus_get_transaction(u, msg->hdr.tx_id);
1101 + if (trans && trans->generation_id != xb_dev_generation_id) {
1102 + list_del(&trans->list);
1103 + kfree(trans);
1104 + if (!strcmp(msg->body, "T"))
1105 + return xenbus_command_reply(u, XS_ERROR,
1106 + "EAGAIN");
1107 + else
1108 + return xenbus_command_reply(u,
1109 + XS_TRANSACTION_END,
1110 + "OK");
1111 + }
1112 + }
1113
1114 rc = xenbus_dev_request_and_reply(&msg->hdr, u);
1115 if (rc && trans) {
1116 diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
1117 index 49a3874ae6bb..ddc18da61834 100644
1118 --- a/drivers/xen/xenbus/xenbus_xs.c
1119 +++ b/drivers/xen/xenbus/xenbus_xs.c
1120 @@ -105,6 +105,7 @@ static void xs_suspend_enter(void)
1121
1122 static void xs_suspend_exit(void)
1123 {
1124 + xb_dev_generation_id++;
1125 spin_lock(&xs_state_lock);
1126 xs_suspend_active--;
1127 spin_unlock(&xs_state_lock);
1128 @@ -125,7 +126,7 @@ static uint32_t xs_request_enter(struct xb_req_data *req)
1129 spin_lock(&xs_state_lock);
1130 }
1131
1132 - if (req->type == XS_TRANSACTION_START)
1133 + if (req->type == XS_TRANSACTION_START && !req->user_req)
1134 xs_state_users++;
1135 xs_state_users++;
1136 rq_id = xs_request_id++;
1137 @@ -140,7 +141,7 @@ void xs_request_exit(struct xb_req_data *req)
1138 spin_lock(&xs_state_lock);
1139 xs_state_users--;
1140 if ((req->type == XS_TRANSACTION_START && req->msg.type == XS_ERROR) ||
1141 - (req->type == XS_TRANSACTION_END &&
1142 + (req->type == XS_TRANSACTION_END && !req->user_req &&
1143 !WARN_ON_ONCE(req->msg.type == XS_ERROR &&
1144 !strcmp(req->body, "ENOENT"))))
1145 xs_state_users--;
1146 @@ -286,6 +287,7 @@ int xenbus_dev_request_and_reply(struct xsd_sockmsg *msg, void *par)
1147 req->num_vecs = 1;
1148 req->cb = xenbus_dev_queue_reply;
1149 req->par = par;
1150 + req->user_req = true;
1151
1152 xs_send(req, msg);
1153
1154 @@ -313,6 +315,7 @@ static void *xs_talkv(struct xenbus_transaction t,
1155 req->vec = iovec;
1156 req->num_vecs = num_vecs;
1157 req->cb = xs_wake_up;
1158 + req->user_req = false;
1159
1160 msg.req_id = 0;
1161 msg.tx_id = t.id;
1162 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
1163 index 920d350df37b..809c1edffbaf 100644
1164 --- a/fs/configfs/dir.c
1165 +++ b/fs/configfs/dir.c
1166 @@ -58,15 +58,13 @@ static void configfs_d_iput(struct dentry * dentry,
1167 if (sd) {
1168 /* Coordinate with configfs_readdir */
1169 spin_lock(&configfs_dirent_lock);
1170 - /* Coordinate with configfs_attach_attr where will increase
1171 - * sd->s_count and update sd->s_dentry to new allocated one.
1172 - * Only set sd->dentry to null when this dentry is the only
1173 - * sd owner.
1174 - * If not do so, configfs_d_iput may run just after
1175 - * configfs_attach_attr and set sd->s_dentry to null
1176 - * even it's still in use.
1177 + /*
1178 + * Set sd->s_dentry to null only when this dentry is the one
1179 + * that is going to be killed. Otherwise configfs_d_iput may
1180 + * run just after configfs_attach_attr and set sd->s_dentry to
1181 + * NULL even it's still in use.
1182 */
1183 - if (atomic_read(&sd->s_count) <= 2)
1184 + if (sd->s_dentry == dentry)
1185 sd->s_dentry = NULL;
1186
1187 spin_unlock(&configfs_dirent_lock);
1188 diff --git a/fs/inode.c b/fs/inode.c
1189 index 42f6d25f32a5..5c63693326bb 100644
1190 --- a/fs/inode.c
1191 +++ b/fs/inode.c
1192 @@ -1817,8 +1817,13 @@ int file_remove_privs(struct file *file)
1193 int kill;
1194 int error = 0;
1195
1196 - /* Fast path for nothing security related */
1197 - if (IS_NOSEC(inode))
1198 + /*
1199 + * Fast path for nothing security related.
1200 + * As well for non-regular files, e.g. blkdev inodes.
1201 + * For example, blkdev_write_iter() might get here
1202 + * trying to remove privs which it is not allowed to.
1203 + */
1204 + if (IS_NOSEC(inode) || !S_ISREG(inode->i_mode))
1205 return 0;
1206
1207 kill = dentry_needs_remove_privs(dentry);
1208 diff --git a/fs/ocfs2/filecheck.c b/fs/ocfs2/filecheck.c
1209 index f65f2b2f594d..1906cc962c4d 100644
1210 --- a/fs/ocfs2/filecheck.c
1211 +++ b/fs/ocfs2/filecheck.c
1212 @@ -193,6 +193,7 @@ int ocfs2_filecheck_create_sysfs(struct ocfs2_super *osb)
1213 ret = kobject_init_and_add(&entry->fs_kobj, &ocfs2_ktype_filecheck,
1214 NULL, "filecheck");
1215 if (ret) {
1216 + kobject_put(&entry->fs_kobj);
1217 kfree(fcheck);
1218 return ret;
1219 }
1220 diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
1221 index cebb79fe2c72..0d10b7ce0da7 100644
1222 --- a/include/linux/sched/mm.h
1223 +++ b/include/linux/sched/mm.h
1224 @@ -54,6 +54,10 @@ static inline void mmdrop(struct mm_struct *mm)
1225 * followed by taking the mmap_sem for writing before modifying the
1226 * vmas or anything the coredump pretends not to change from under it.
1227 *
1228 + * It also has to be called when mmgrab() is used in the context of
1229 + * the process, but then the mm_count refcount is transferred outside
1230 + * the context of the process to run down_write() on that pinned mm.
1231 + *
1232 * NOTE: find_extend_vma() called from GUP context is the only place
1233 * that can modify the "mm" (notably the vm_start/end) under mmap_sem
1234 * for reading and outside the context of the process, so it is also
1235 diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
1236 index 99c7f199f2d4..12f351b253bb 100644
1237 --- a/kernel/events/ring_buffer.c
1238 +++ b/kernel/events/ring_buffer.c
1239 @@ -49,14 +49,30 @@ static void perf_output_put_handle(struct perf_output_handle *handle)
1240 unsigned long head;
1241
1242 again:
1243 + /*
1244 + * In order to avoid publishing a head value that goes backwards,
1245 + * we must ensure the load of @rb->head happens after we've
1246 + * incremented @rb->nest.
1247 + *
1248 + * Otherwise we can observe a @rb->head value before one published
1249 + * by an IRQ/NMI happening between the load and the increment.
1250 + */
1251 + barrier();
1252 head = local_read(&rb->head);
1253
1254 /*
1255 - * IRQ/NMI can happen here, which means we can miss a head update.
1256 + * IRQ/NMI can happen here and advance @rb->head, causing our
1257 + * load above to be stale.
1258 */
1259
1260 - if (!local_dec_and_test(&rb->nest))
1261 + /*
1262 + * If this isn't the outermost nesting, we don't have to update
1263 + * @rb->user_page->data_head.
1264 + */
1265 + if (local_read(&rb->nest) > 1) {
1266 + local_dec(&rb->nest);
1267 goto out;
1268 + }
1269
1270 /*
1271 * Since the mmap() consumer (userspace) can run on a different CPU:
1272 @@ -85,12 +101,21 @@ again:
1273 * See perf_output_begin().
1274 */
1275 smp_wmb(); /* B, matches C */
1276 - rb->user_page->data_head = head;
1277 + WRITE_ONCE(rb->user_page->data_head, head);
1278 +
1279 + /*
1280 + * We must publish the head before decrementing the nest count,
1281 + * otherwise an IRQ/NMI can publish a more recent head value and our
1282 + * write will (temporarily) publish a stale value.
1283 + */
1284 + barrier();
1285 + local_set(&rb->nest, 0);
1286
1287 /*
1288 - * Now check if we missed an update -- rely on previous implied
1289 - * compiler barriers to force a re-read.
1290 + * Ensure we decrement @rb->nest before we validate the @rb->head.
1291 + * Otherwise we cannot be sure we caught the 'last' nested update.
1292 */
1293 + barrier();
1294 if (unlikely(head != local_read(&rb->head))) {
1295 local_inc(&rb->nest);
1296 goto again;
1297 @@ -465,7 +490,7 @@ void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
1298 handle->aux_flags);
1299 }
1300
1301 - rb->user_page->aux_head = rb->aux_head;
1302 + WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
1303 if (rb_need_aux_wakeup(rb))
1304 wakeup = true;
1305
1306 @@ -497,7 +522,7 @@ int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
1307
1308 rb->aux_head += size;
1309
1310 - rb->user_page->aux_head = rb->aux_head;
1311 + WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
1312 if (rb_need_aux_wakeup(rb)) {
1313 perf_output_wakeup(handle);
1314 handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
1315 diff --git a/mm/khugepaged.c b/mm/khugepaged.c
1316 index fde5820be24d..ecefdba4b0dd 100644
1317 --- a/mm/khugepaged.c
1318 +++ b/mm/khugepaged.c
1319 @@ -1005,6 +1005,9 @@ static void collapse_huge_page(struct mm_struct *mm,
1320 * handled by the anon_vma lock + PG_lock.
1321 */
1322 down_write(&mm->mmap_sem);
1323 + result = SCAN_ANY_PROCESS;
1324 + if (!mmget_still_valid(mm))
1325 + goto out;
1326 result = hugepage_vma_revalidate(mm, address, &vma);
1327 if (result)
1328 goto out;
1329 diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
1330 index 66f74c85cf6b..66d54fc11831 100644
1331 --- a/net/ax25/ax25_route.c
1332 +++ b/net/ax25/ax25_route.c
1333 @@ -429,9 +429,11 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
1334 }
1335
1336 if (ax25->sk != NULL) {
1337 + local_bh_disable();
1338 bh_lock_sock(ax25->sk);
1339 sock_reset_flag(ax25->sk, SOCK_ZAPPED);
1340 bh_unlock_sock(ax25->sk);
1341 + local_bh_enable();
1342 }
1343
1344 put:
1345 diff --git a/net/core/neighbour.c b/net/core/neighbour.c
1346 index 4e4ac77c6816..cd9e991f21d7 100644
1347 --- a/net/core/neighbour.c
1348 +++ b/net/core/neighbour.c
1349 @@ -2751,6 +2751,7 @@ static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
1350 }
1351
1352 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
1353 + __acquires(tbl->lock)
1354 __acquires(rcu_bh)
1355 {
1356 struct neigh_seq_state *state = seq->private;
1357 @@ -2761,6 +2762,7 @@ void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl
1358
1359 rcu_read_lock_bh();
1360 state->nht = rcu_dereference_bh(tbl->nht);
1361 + read_lock(&tbl->lock);
1362
1363 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
1364 }
1365 @@ -2794,8 +2796,13 @@ out:
1366 EXPORT_SYMBOL(neigh_seq_next);
1367
1368 void neigh_seq_stop(struct seq_file *seq, void *v)
1369 + __releases(tbl->lock)
1370 __releases(rcu_bh)
1371 {
1372 + struct neigh_seq_state *state = seq->private;
1373 + struct neigh_table *tbl = state->tbl;
1374 +
1375 + read_unlock(&tbl->lock);
1376 rcu_read_unlock_bh();
1377 }
1378 EXPORT_SYMBOL(neigh_seq_stop);
1379 diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
1380 index be5f3d7ceb96..f994f50e1516 100644
1381 --- a/net/ipv6/ip6_flowlabel.c
1382 +++ b/net/ipv6/ip6_flowlabel.c
1383 @@ -254,9 +254,9 @@ struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, __be32 label)
1384 rcu_read_lock_bh();
1385 for_each_sk_fl_rcu(np, sfl) {
1386 struct ip6_flowlabel *fl = sfl->fl;
1387 - if (fl->label == label) {
1388 +
1389 + if (fl->label == label && atomic_inc_not_zero(&fl->users)) {
1390 fl->lastuse = jiffies;
1391 - atomic_inc(&fl->users);
1392 rcu_read_unlock_bh();
1393 return fl;
1394 }
1395 @@ -622,7 +622,8 @@ int ipv6_flowlabel_opt(struct sock *sk, char __user *optval, int optlen)
1396 goto done;
1397 }
1398 fl1 = sfl->fl;
1399 - atomic_inc(&fl1->users);
1400 + if (!atomic_inc_not_zero(&fl1->users))
1401 + fl1 = NULL;
1402 break;
1403 }
1404 }
1405 diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
1406 index db6e0afe3a20..1740f852002e 100644
1407 --- a/net/lapb/lapb_iface.c
1408 +++ b/net/lapb/lapb_iface.c
1409 @@ -182,6 +182,7 @@ int lapb_unregister(struct net_device *dev)
1410 lapb = __lapb_devtostruct(dev);
1411 if (!lapb)
1412 goto out;
1413 + lapb_put(lapb);
1414
1415 lapb_stop_t1timer(lapb);
1416 lapb_stop_t2timer(lapb);
1417 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
1418 index a42c1bc7c698..62c0e80dcd71 100644
1419 --- a/net/netfilter/ipvs/ip_vs_core.c
1420 +++ b/net/netfilter/ipvs/ip_vs_core.c
1421 @@ -2280,7 +2280,6 @@ static void __net_exit __ip_vs_cleanup(struct net *net)
1422 {
1423 struct netns_ipvs *ipvs = net_ipvs(net);
1424
1425 - nf_unregister_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
1426 ip_vs_service_net_cleanup(ipvs); /* ip_vs_flush() with locks */
1427 ip_vs_conn_net_cleanup(ipvs);
1428 ip_vs_app_net_cleanup(ipvs);
1429 @@ -2295,6 +2294,7 @@ static void __net_exit __ip_vs_dev_cleanup(struct net *net)
1430 {
1431 struct netns_ipvs *ipvs = net_ipvs(net);
1432 EnterFunction(2);
1433 + nf_unregister_net_hooks(net, ip_vs_ops, ARRAY_SIZE(ip_vs_ops));
1434 ipvs->enable = 0; /* Disable packet reception */
1435 smp_wmb();
1436 ip_vs_sync_net_cleanup(ipvs);
1437 diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
1438 index d67a96a25a68..7569ba00e732 100644
1439 --- a/net/netfilter/nf_queue.c
1440 +++ b/net/netfilter/nf_queue.c
1441 @@ -238,6 +238,7 @@ static unsigned int nf_iterate(struct sk_buff *skb,
1442 repeat:
1443 verdict = nf_hook_entry_hookfn(hook, skb, state);
1444 if (verdict != NF_ACCEPT) {
1445 + *index = i;
1446 if (verdict != NF_REPEAT)
1447 return verdict;
1448 goto repeat;
1449 diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
1450 index 376181cc1def..9f2875efb4ac 100644
1451 --- a/net/nfc/netlink.c
1452 +++ b/net/nfc/netlink.c
1453 @@ -922,7 +922,8 @@ static int nfc_genl_deactivate_target(struct sk_buff *skb,
1454 u32 device_idx, target_idx;
1455 int rc;
1456
1457 - if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
1458 + if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
1459 + !info->attrs[NFC_ATTR_TARGET_INDEX])
1460 return -EINVAL;
1461
1462 device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
1463 diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c
1464 index bb95c43aae76..5a304cfc8423 100644
1465 --- a/net/openvswitch/vport-internal_dev.c
1466 +++ b/net/openvswitch/vport-internal_dev.c
1467 @@ -169,7 +169,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
1468 {
1469 struct vport *vport;
1470 struct internal_dev *internal_dev;
1471 + struct net_device *dev;
1472 int err;
1473 + bool free_vport = true;
1474
1475 vport = ovs_vport_alloc(0, &ovs_internal_vport_ops, parms);
1476 if (IS_ERR(vport)) {
1477 @@ -177,8 +179,9 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
1478 goto error;
1479 }
1480
1481 - vport->dev = alloc_netdev(sizeof(struct internal_dev),
1482 - parms->name, NET_NAME_USER, do_setup);
1483 + dev = alloc_netdev(sizeof(struct internal_dev),
1484 + parms->name, NET_NAME_USER, do_setup);
1485 + vport->dev = dev;
1486 if (!vport->dev) {
1487 err = -ENOMEM;
1488 goto error_free_vport;
1489 @@ -199,8 +202,10 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
1490
1491 rtnl_lock();
1492 err = register_netdevice(vport->dev);
1493 - if (err)
1494 + if (err) {
1495 + free_vport = false;
1496 goto error_unlock;
1497 + }
1498
1499 dev_set_promiscuity(vport->dev, 1);
1500 rtnl_unlock();
1501 @@ -210,11 +215,12 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
1502
1503 error_unlock:
1504 rtnl_unlock();
1505 - free_percpu(vport->dev->tstats);
1506 + free_percpu(dev->tstats);
1507 error_free_netdev:
1508 - free_netdev(vport->dev);
1509 + free_netdev(dev);
1510 error_free_vport:
1511 - ovs_vport_free(vport);
1512 + if (free_vport)
1513 + ovs_vport_free(vport);
1514 error:
1515 return ERR_PTR(err);
1516 }
1517 diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
1518 index ae65a1cfa596..fb546b2d67ca 100644
1519 --- a/net/sctp/sm_make_chunk.c
1520 +++ b/net/sctp/sm_make_chunk.c
1521 @@ -2600,6 +2600,8 @@ do_addr_param:
1522 case SCTP_PARAM_STATE_COOKIE:
1523 asoc->peer.cookie_len =
1524 ntohs(param.p->length) - sizeof(struct sctp_paramhdr);
1525 + if (asoc->peer.cookie)
1526 + kfree(asoc->peer.cookie);
1527 asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp);
1528 if (!asoc->peer.cookie)
1529 retval = 0;
1530 @@ -2664,6 +2666,8 @@ do_addr_param:
1531 goto fall_through;
1532
1533 /* Save peer's random parameter */
1534 + if (asoc->peer.peer_random)
1535 + kfree(asoc->peer.peer_random);
1536 asoc->peer.peer_random = kmemdup(param.p,
1537 ntohs(param.p->length), gfp);
1538 if (!asoc->peer.peer_random) {
1539 @@ -2677,6 +2681,8 @@ do_addr_param:
1540 goto fall_through;
1541
1542 /* Save peer's HMAC list */
1543 + if (asoc->peer.peer_hmacs)
1544 + kfree(asoc->peer.peer_hmacs);
1545 asoc->peer.peer_hmacs = kmemdup(param.p,
1546 ntohs(param.p->length), gfp);
1547 if (!asoc->peer.peer_hmacs) {
1548 @@ -2692,6 +2698,8 @@ do_addr_param:
1549 if (!ep->auth_enable)
1550 goto fall_through;
1551
1552 + if (asoc->peer.peer_chunks)
1553 + kfree(asoc->peer.peer_chunks);
1554 asoc->peer.peer_chunks = kmemdup(param.p,
1555 ntohs(param.p->length), gfp);
1556 if (!asoc->peer.peer_chunks)
1557 diff --git a/net/tipc/group.c b/net/tipc/group.c
1558 index 06fee142f09f..3ee93b5c19b6 100644
1559 --- a/net/tipc/group.c
1560 +++ b/net/tipc/group.c
1561 @@ -218,6 +218,7 @@ void tipc_group_delete(struct net *net, struct tipc_group *grp)
1562
1563 rbtree_postorder_for_each_entry_safe(m, tmp, tree, tree_node) {
1564 tipc_group_proto_xmit(grp, m, GRP_LEAVE_MSG, &xmitq);
1565 + __skb_queue_purge(&m->deferredq);
1566 list_del(&m->list);
1567 kfree(m);
1568 }
1569 diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
1570 index f3f3d06cb6d8..e30f53728725 100644
1571 --- a/net/vmw_vsock/virtio_transport_common.c
1572 +++ b/net/vmw_vsock/virtio_transport_common.c
1573 @@ -871,8 +871,10 @@ virtio_transport_recv_connected(struct sock *sk,
1574 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
1575 vsk->peer_shutdown |= SEND_SHUTDOWN;
1576 if (vsk->peer_shutdown == SHUTDOWN_MASK &&
1577 - vsock_stream_has_data(vsk) <= 0)
1578 + vsock_stream_has_data(vsk) <= 0) {
1579 + sock_set_flag(sk, SOCK_DONE);
1580 sk->sk_state = TCP_CLOSING;
1581 + }
1582 if (le32_to_cpu(pkt->hdr.flags))
1583 sk->sk_state_change(sk);
1584 break;
1585 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
1586 index 45bf89ed31de..308ce76149cc 100644
1587 --- a/sound/pci/hda/hda_intel.c
1588 +++ b/sound/pci/hda/hda_intel.c
1589 @@ -378,6 +378,7 @@ enum {
1590
1591 #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
1592 #define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
1593 +#define IS_CNL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9dc8)
1594
1595 static char *driver_short_names[] = {
1596 [AZX_DRIVER_ICH] = "HDA Intel",
1597 @@ -1795,8 +1796,8 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
1598 else
1599 chip->bdl_pos_adj = bdl_pos_adj[dev];
1600
1601 - /* Workaround for a communication error on CFL (bko#199007) */
1602 - if (IS_CFL(pci))
1603 + /* Workaround for a communication error on CFL (bko#199007) and CNL */
1604 + if (IS_CFL(pci) || IS_CNL(pci))
1605 chip->polling_mode = 1;
1606
1607 err = azx_bus_init(chip, model[dev], &pci_hda_io_ops);
1608 diff --git a/tools/perf/arch/s390/util/machine.c b/tools/perf/arch/s390/util/machine.c
1609 index 0b2054007314..a19690a17291 100644
1610 --- a/tools/perf/arch/s390/util/machine.c
1611 +++ b/tools/perf/arch/s390/util/machine.c
1612 @@ -5,16 +5,19 @@
1613 #include "util.h"
1614 #include "machine.h"
1615 #include "api/fs/fs.h"
1616 +#include "debug.h"
1617
1618 int arch__fix_module_text_start(u64 *start, const char *name)
1619 {
1620 + u64 m_start = *start;
1621 char path[PATH_MAX];
1622
1623 snprintf(path, PATH_MAX, "module/%.*s/sections/.text",
1624 (int)strlen(name) - 2, name + 1);
1625 -
1626 - if (sysfs__read_ull(path, (unsigned long long *)start) < 0)
1627 - return -1;
1628 + if (sysfs__read_ull(path, (unsigned long long *)start) < 0) {
1629 + pr_debug2("Using module %s start:%#lx\n", path, m_start);
1630 + *start = m_start;
1631 + }
1632
1633 return 0;
1634 }
1635 diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
1636 index abd38abf1d91..24f2a87cf91d 100644
1637 --- a/tools/perf/util/data-convert-bt.c
1638 +++ b/tools/perf/util/data-convert-bt.c
1639 @@ -271,7 +271,7 @@ static int string_set_value(struct bt_ctf_field *field, const char *string)
1640 if (i > 0)
1641 strncpy(buffer, string, i);
1642 }
1643 - strncat(buffer + p, numstr, 4);
1644 + memcpy(buffer + p, numstr, 4);
1645 p += 3;
1646 }
1647 }
1648 diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
1649 index 2048d393ece6..56007a7e0b4d 100644
1650 --- a/tools/perf/util/thread.c
1651 +++ b/tools/perf/util/thread.c
1652 @@ -128,7 +128,7 @@ void thread__put(struct thread *thread)
1653 }
1654 }
1655
1656 -struct namespaces *thread__namespaces(const struct thread *thread)
1657 +static struct namespaces *__thread__namespaces(const struct thread *thread)
1658 {
1659 if (list_empty(&thread->namespaces_list))
1660 return NULL;
1661 @@ -136,10 +136,21 @@ struct namespaces *thread__namespaces(const struct thread *thread)
1662 return list_first_entry(&thread->namespaces_list, struct namespaces, list);
1663 }
1664
1665 +struct namespaces *thread__namespaces(const struct thread *thread)
1666 +{
1667 + struct namespaces *ns;
1668 +
1669 + down_read((struct rw_semaphore *)&thread->namespaces_lock);
1670 + ns = __thread__namespaces(thread);
1671 + up_read((struct rw_semaphore *)&thread->namespaces_lock);
1672 +
1673 + return ns;
1674 +}
1675 +
1676 static int __thread__set_namespaces(struct thread *thread, u64 timestamp,
1677 struct namespaces_event *event)
1678 {
1679 - struct namespaces *new, *curr = thread__namespaces(thread);
1680 + struct namespaces *new, *curr = __thread__namespaces(thread);
1681
1682 new = namespaces__new(event);
1683 if (!new)
1684 diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
1685 index 8ec76681605c..f25f72a75cf3 100755
1686 --- a/tools/testing/selftests/netfilter/nft_nat.sh
1687 +++ b/tools/testing/selftests/netfilter/nft_nat.sh
1688 @@ -23,7 +23,11 @@ ip netns add ns0
1689 ip netns add ns1
1690 ip netns add ns2
1691
1692 -ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
1693 +ip link add veth0 netns ns0 type veth peer name eth0 netns ns1 > /dev/null 2>&1
1694 +if [ $? -ne 0 ];then
1695 + echo "SKIP: No virtual ethernet pair device support in kernel"
1696 + exit $ksft_skip
1697 +fi
1698 ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
1699
1700 ip -net ns0 link set lo up