Magellan Linux

Contents of /trunk/kernel-magellan/patches-5.0/0105-5.0.6-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3332 - (show annotations) (download)
Fri Apr 26 12:20:28 2019 UTC (5 years ago) by niro
File size: 158283 byte(s)
-linux-5.0.6
1 diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
2 index 356156f5c52d..ba8927c0d45c 100644
3 --- a/Documentation/virtual/kvm/api.txt
4 +++ b/Documentation/virtual/kvm/api.txt
5 @@ -13,7 +13,7 @@ of a virtual machine. The ioctls belong to three classes
6
7 - VM ioctls: These query and set attributes that affect an entire virtual
8 machine, for example memory layout. In addition a VM ioctl is used to
9 - create virtual cpus (vcpus).
10 + create virtual cpus (vcpus) and devices.
11
12 Only run VM ioctls from the same process (address space) that was used
13 to create the VM.
14 @@ -24,6 +24,11 @@ of a virtual machine. The ioctls belong to three classes
15 Only run vcpu ioctls from the same thread that was used to create the
16 vcpu.
17
18 + - device ioctls: These query and set attributes that control the operation
19 + of a single device.
20 +
21 + device ioctls must be issued from the same process (address space) that
22 + was used to create the VM.
23
24 2. File descriptors
25 -------------------
26 @@ -32,10 +37,11 @@ The kvm API is centered around file descriptors. An initial
27 open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
28 can be used to issue system ioctls. A KVM_CREATE_VM ioctl on this
29 handle will create a VM file descriptor which can be used to issue VM
30 -ioctls. A KVM_CREATE_VCPU ioctl on a VM fd will create a virtual cpu
31 -and return a file descriptor pointing to it. Finally, ioctls on a vcpu
32 -fd can be used to control the vcpu, including the important task of
33 -actually running guest code.
34 +ioctls. A KVM_CREATE_VCPU or KVM_CREATE_DEVICE ioctl on a VM fd will
35 +create a virtual cpu or device and return a file descriptor pointing to
36 +the new resource. Finally, ioctls on a vcpu or device fd can be used
37 +to control the vcpu or device. For vcpus, this includes the important
38 +task of actually running guest code.
39
40 In general file descriptors can be migrated among processes by means
41 of fork() and the SCM_RIGHTS facility of unix domain socket. These
42 diff --git a/Makefile b/Makefile
43 index 63152c5ca136..3ee390feea61 100644
44 --- a/Makefile
45 +++ b/Makefile
46 @@ -1,7 +1,7 @@
47 # SPDX-License-Identifier: GPL-2.0
48 VERSION = 5
49 PATCHLEVEL = 0
50 -SUBLEVEL = 5
51 +SUBLEVEL = 6
52 EXTRAVERSION =
53 NAME = Shy Crocodile
54
55 @@ -944,9 +944,11 @@ mod_sign_cmd = true
56 endif
57 export mod_sign_cmd
58
59 +HOST_LIBELF_LIBS = $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
60 +
61 ifdef CONFIG_STACK_VALIDATION
62 has_libelf := $(call try-run,\
63 - echo "int main() {}" | $(HOSTCC) -xc -o /dev/null -lelf -,1,0)
64 + echo "int main() {}" | $(HOSTCC) -xc -o /dev/null $(HOST_LIBELF_LIBS) -,1,0)
65 ifeq ($(has_libelf),1)
66 objtool_target := tools/objtool FORCE
67 else
68 diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c
69 index bfeb25aaf9a2..326e870d7123 100644
70 --- a/arch/arm/mach-imx/cpuidle-imx6q.c
71 +++ b/arch/arm/mach-imx/cpuidle-imx6q.c
72 @@ -16,30 +16,23 @@
73 #include "cpuidle.h"
74 #include "hardware.h"
75
76 -static atomic_t master = ATOMIC_INIT(0);
77 -static DEFINE_SPINLOCK(master_lock);
78 +static int num_idle_cpus = 0;
79 +static DEFINE_SPINLOCK(cpuidle_lock);
80
81 static int imx6q_enter_wait(struct cpuidle_device *dev,
82 struct cpuidle_driver *drv, int index)
83 {
84 - if (atomic_inc_return(&master) == num_online_cpus()) {
85 - /*
86 - * With this lock, we prevent other cpu to exit and enter
87 - * this function again and become the master.
88 - */
89 - if (!spin_trylock(&master_lock))
90 - goto idle;
91 + spin_lock(&cpuidle_lock);
92 + if (++num_idle_cpus == num_online_cpus())
93 imx6_set_lpm(WAIT_UNCLOCKED);
94 - cpu_do_idle();
95 - imx6_set_lpm(WAIT_CLOCKED);
96 - spin_unlock(&master_lock);
97 - goto done;
98 - }
99 + spin_unlock(&cpuidle_lock);
100
101 -idle:
102 cpu_do_idle();
103 -done:
104 - atomic_dec(&master);
105 +
106 + spin_lock(&cpuidle_lock);
107 + if (num_idle_cpus-- == num_online_cpus())
108 + imx6_set_lpm(WAIT_CLOCKED);
109 + spin_unlock(&cpuidle_lock);
110
111 return index;
112 }
113 diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
114 index 19a8834e0398..0690a306f6ca 100644
115 --- a/arch/powerpc/include/asm/ppc-opcode.h
116 +++ b/arch/powerpc/include/asm/ppc-opcode.h
117 @@ -302,6 +302,7 @@
118 /* Misc instructions for BPF compiler */
119 #define PPC_INST_LBZ 0x88000000
120 #define PPC_INST_LD 0xe8000000
121 +#define PPC_INST_LDX 0x7c00002a
122 #define PPC_INST_LHZ 0xa0000000
123 #define PPC_INST_LWZ 0x80000000
124 #define PPC_INST_LHBRX 0x7c00062c
125 @@ -309,6 +310,7 @@
126 #define PPC_INST_STB 0x98000000
127 #define PPC_INST_STH 0xb0000000
128 #define PPC_INST_STD 0xf8000000
129 +#define PPC_INST_STDX 0x7c00012a
130 #define PPC_INST_STDU 0xf8000001
131 #define PPC_INST_STW 0x90000000
132 #define PPC_INST_STWU 0x94000000
133 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
134 index afb638778f44..447defdd4503 100644
135 --- a/arch/powerpc/kernel/exceptions-64e.S
136 +++ b/arch/powerpc/kernel/exceptions-64e.S
137 @@ -349,6 +349,7 @@ ret_from_mc_except:
138 #define GEN_BTB_FLUSH
139 #define CRIT_BTB_FLUSH
140 #define DBG_BTB_FLUSH
141 +#define MC_BTB_FLUSH
142 #define GDBELL_BTB_FLUSH
143 #endif
144
145 diff --git a/arch/powerpc/lib/memcmp_64.S b/arch/powerpc/lib/memcmp_64.S
146 index 844d8e774492..b7f6f6e0b6e8 100644
147 --- a/arch/powerpc/lib/memcmp_64.S
148 +++ b/arch/powerpc/lib/memcmp_64.S
149 @@ -215,11 +215,20 @@ _GLOBAL_TOC(memcmp)
150 beq .Lzero
151
152 .Lcmp_rest_lt8bytes:
153 - /* Here we have only less than 8 bytes to compare with. at least s1
154 - * Address is aligned with 8 bytes.
155 - * The next double words are load and shift right with appropriate
156 - * bits.
157 + /*
158 + * Here we have less than 8 bytes to compare. At least s1 is aligned to
159 + * 8 bytes, but s2 may not be. We must make sure s2 + 7 doesn't cross a
160 + * page boundary, otherwise we might read past the end of the buffer and
161 + * trigger a page fault. We use 4K as the conservative minimum page
162 + * size. If we detect that case we go to the byte-by-byte loop.
163 + *
164 + * Otherwise the next double word is loaded from s1 and s2, and shifted
165 + * right to compare the appropriate bits.
166 */
167 + clrldi r6,r4,(64-12) // r6 = r4 & 0xfff
168 + cmpdi r6,0xff8
169 + bgt .Lshort
170 +
171 subfic r6,r5,8
172 slwi r6,r6,3
173 LD rA,0,r3
174 diff --git a/arch/powerpc/net/bpf_jit.h b/arch/powerpc/net/bpf_jit.h
175 index c2d5192ed64f..e52e30bf7d86 100644
176 --- a/arch/powerpc/net/bpf_jit.h
177 +++ b/arch/powerpc/net/bpf_jit.h
178 @@ -51,6 +51,8 @@
179 #define PPC_LIS(r, i) PPC_ADDIS(r, 0, i)
180 #define PPC_STD(r, base, i) EMIT(PPC_INST_STD | ___PPC_RS(r) | \
181 ___PPC_RA(base) | ((i) & 0xfffc))
182 +#define PPC_STDX(r, base, b) EMIT(PPC_INST_STDX | ___PPC_RS(r) | \
183 + ___PPC_RA(base) | ___PPC_RB(b))
184 #define PPC_STDU(r, base, i) EMIT(PPC_INST_STDU | ___PPC_RS(r) | \
185 ___PPC_RA(base) | ((i) & 0xfffc))
186 #define PPC_STW(r, base, i) EMIT(PPC_INST_STW | ___PPC_RS(r) | \
187 @@ -65,7 +67,9 @@
188 #define PPC_LBZ(r, base, i) EMIT(PPC_INST_LBZ | ___PPC_RT(r) | \
189 ___PPC_RA(base) | IMM_L(i))
190 #define PPC_LD(r, base, i) EMIT(PPC_INST_LD | ___PPC_RT(r) | \
191 - ___PPC_RA(base) | IMM_L(i))
192 + ___PPC_RA(base) | ((i) & 0xfffc))
193 +#define PPC_LDX(r, base, b) EMIT(PPC_INST_LDX | ___PPC_RT(r) | \
194 + ___PPC_RA(base) | ___PPC_RB(b))
195 #define PPC_LWZ(r, base, i) EMIT(PPC_INST_LWZ | ___PPC_RT(r) | \
196 ___PPC_RA(base) | IMM_L(i))
197 #define PPC_LHZ(r, base, i) EMIT(PPC_INST_LHZ | ___PPC_RT(r) | \
198 @@ -85,17 +89,6 @@
199 ___PPC_RA(a) | ___PPC_RB(b))
200 #define PPC_BPF_STDCX(s, a, b) EMIT(PPC_INST_STDCX | ___PPC_RS(s) | \
201 ___PPC_RA(a) | ___PPC_RB(b))
202 -
203 -#ifdef CONFIG_PPC64
204 -#define PPC_BPF_LL(r, base, i) do { PPC_LD(r, base, i); } while(0)
205 -#define PPC_BPF_STL(r, base, i) do { PPC_STD(r, base, i); } while(0)
206 -#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
207 -#else
208 -#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
209 -#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
210 -#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
211 -#endif
212 -
213 #define PPC_CMPWI(a, i) EMIT(PPC_INST_CMPWI | ___PPC_RA(a) | IMM_L(i))
214 #define PPC_CMPDI(a, i) EMIT(PPC_INST_CMPDI | ___PPC_RA(a) | IMM_L(i))
215 #define PPC_CMPW(a, b) EMIT(PPC_INST_CMPW | ___PPC_RA(a) | \
216 diff --git a/arch/powerpc/net/bpf_jit32.h b/arch/powerpc/net/bpf_jit32.h
217 index 6f4daacad296..ade04547703f 100644
218 --- a/arch/powerpc/net/bpf_jit32.h
219 +++ b/arch/powerpc/net/bpf_jit32.h
220 @@ -123,6 +123,10 @@ DECLARE_LOAD_FUNC(sk_load_byte_msh);
221 #define PPC_NTOHS_OFFS(r, base, i) PPC_LHZ_OFFS(r, base, i)
222 #endif
223
224 +#define PPC_BPF_LL(r, base, i) do { PPC_LWZ(r, base, i); } while(0)
225 +#define PPC_BPF_STL(r, base, i) do { PPC_STW(r, base, i); } while(0)
226 +#define PPC_BPF_STLU(r, base, i) do { PPC_STWU(r, base, i); } while(0)
227 +
228 #define SEEN_DATAREF 0x10000 /* might call external helpers */
229 #define SEEN_XREG 0x20000 /* X reg is used */
230 #define SEEN_MEM 0x40000 /* SEEN_MEM+(1<<n) = use mem[n] for temporary
231 diff --git a/arch/powerpc/net/bpf_jit64.h b/arch/powerpc/net/bpf_jit64.h
232 index 3609be4692b3..47f441f351a6 100644
233 --- a/arch/powerpc/net/bpf_jit64.h
234 +++ b/arch/powerpc/net/bpf_jit64.h
235 @@ -68,6 +68,26 @@ static const int b2p[] = {
236 /* PPC NVR range -- update this if we ever use NVRs below r27 */
237 #define BPF_PPC_NVR_MIN 27
238
239 +/*
240 + * WARNING: These can use TMP_REG_2 if the offset is not at word boundary,
241 + * so ensure that it isn't in use already.
242 + */
243 +#define PPC_BPF_LL(r, base, i) do { \
244 + if ((i) % 4) { \
245 + PPC_LI(b2p[TMP_REG_2], (i)); \
246 + PPC_LDX(r, base, b2p[TMP_REG_2]); \
247 + } else \
248 + PPC_LD(r, base, i); \
249 + } while(0)
250 +#define PPC_BPF_STL(r, base, i) do { \
251 + if ((i) % 4) { \
252 + PPC_LI(b2p[TMP_REG_2], (i)); \
253 + PPC_STDX(r, base, b2p[TMP_REG_2]); \
254 + } else \
255 + PPC_STD(r, base, i); \
256 + } while(0)
257 +#define PPC_BPF_STLU(r, base, i) do { PPC_STDU(r, base, i); } while(0)
258 +
259 #define SEEN_FUNC 0x1000 /* might call external helpers */
260 #define SEEN_STACK 0x2000 /* uses BPF stack */
261 #define SEEN_TAILCALL 0x4000 /* uses tail calls */
262 diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
263 index 7ce57657d3b8..b1a116eecae2 100644
264 --- a/arch/powerpc/net/bpf_jit_comp64.c
265 +++ b/arch/powerpc/net/bpf_jit_comp64.c
266 @@ -252,7 +252,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
267 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
268 * goto out;
269 */
270 - PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
271 + PPC_BPF_LL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
272 PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
273 PPC_BCC(COND_GT, out);
274
275 @@ -265,7 +265,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
276 /* prog = array->ptrs[index]; */
277 PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
278 PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
279 - PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
280 + PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
281
282 /*
283 * if (prog == NULL)
284 @@ -275,7 +275,7 @@ static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32
285 PPC_BCC(COND_EQ, out);
286
287 /* goto *(prog->bpf_func + prologue_size); */
288 - PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
289 + PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
290 #ifdef PPC64_ELF_ABI_v1
291 /* skip past the function descriptor */
292 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
293 @@ -606,7 +606,7 @@ bpf_alu32_trunc:
294 * the instructions generated will remain the
295 * same across all passes
296 */
297 - PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
298 + PPC_BPF_STL(dst_reg, 1, bpf_jit_stack_local(ctx));
299 PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
300 PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
301 break;
302 @@ -662,7 +662,7 @@ emit_clear:
303 PPC_LI32(b2p[TMP_REG_1], imm);
304 src_reg = b2p[TMP_REG_1];
305 }
306 - PPC_STD(src_reg, dst_reg, off);
307 + PPC_BPF_STL(src_reg, dst_reg, off);
308 break;
309
310 /*
311 @@ -709,7 +709,7 @@ emit_clear:
312 break;
313 /* dst = *(u64 *)(ul) (src + off) */
314 case BPF_LDX | BPF_MEM | BPF_DW:
315 - PPC_LD(dst_reg, src_reg, off);
316 + PPC_BPF_LL(dst_reg, src_reg, off);
317 break;
318
319 /*
320 diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
321 index 6ed22127391b..921f12182f3e 100644
322 --- a/arch/powerpc/platforms/pseries/pseries_energy.c
323 +++ b/arch/powerpc/platforms/pseries/pseries_energy.c
324 @@ -77,18 +77,27 @@ static u32 cpu_to_drc_index(int cpu)
325
326 ret = drc.drc_index_start + (thread_index * drc.sequential_inc);
327 } else {
328 - const __be32 *indexes;
329 -
330 - indexes = of_get_property(dn, "ibm,drc-indexes", NULL);
331 - if (indexes == NULL)
332 - goto err_of_node_put;
333 + u32 nr_drc_indexes, thread_drc_index;
334
335 /*
336 - * The first element indexes[0] is the number of drc_indexes
337 - * returned in the list. Hence thread_index+1 will get the
338 - * drc_index corresponding to core number thread_index.
339 + * The first element of ibm,drc-indexes array is the
340 + * number of drc_indexes returned in the list. Hence
341 + * thread_index+1 will get the drc_index corresponding
342 + * to core number thread_index.
343 */
344 - ret = indexes[thread_index + 1];
345 + rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
346 + 0, &nr_drc_indexes);
347 + if (rc)
348 + goto err_of_node_put;
349 +
350 + WARN_ON_ONCE(thread_index > nr_drc_indexes);
351 + rc = of_property_read_u32_index(dn, "ibm,drc-indexes",
352 + thread_index + 1,
353 + &thread_drc_index);
354 + if (rc)
355 + goto err_of_node_put;
356 +
357 + ret = thread_drc_index;
358 }
359
360 rc = 0;
361 diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
362 index d97d52772789..452dcfd7e5dd 100644
363 --- a/arch/powerpc/platforms/pseries/ras.c
364 +++ b/arch/powerpc/platforms/pseries/ras.c
365 @@ -550,6 +550,7 @@ static void pseries_print_mce_info(struct pt_regs *regs,
366 "UE",
367 "SLB",
368 "ERAT",
369 + "Unknown",
370 "TLB",
371 "D-Cache",
372 "Unknown",
373 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
374 index 68261430fe6e..64d5a3327030 100644
375 --- a/arch/x86/Kconfig
376 +++ b/arch/x86/Kconfig
377 @@ -2221,14 +2221,8 @@ config RANDOMIZE_MEMORY_PHYSICAL_PADDING
378 If unsure, leave at the default value.
379
380 config HOTPLUG_CPU
381 - bool "Support for hot-pluggable CPUs"
382 + def_bool y
383 depends on SMP
384 - ---help---
385 - Say Y here to allow turning CPUs off and on. CPUs can be
386 - controlled through /sys/devices/system/cpu.
387 - ( Note: power management support will enable this option
388 - automatically on SMP systems. )
389 - Say N if you want to disable CPU hotplug.
390
391 config BOOTPARAM_HOTPLUG_CPU0
392 bool "Set default setting of cpu0_hotpluggable"
393 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
394 index e40be168c73c..71d763ad2637 100644
395 --- a/arch/x86/include/asm/kvm_host.h
396 +++ b/arch/x86/include/asm/kvm_host.h
397 @@ -352,6 +352,7 @@ struct kvm_mmu_page {
398 };
399
400 struct kvm_pio_request {
401 + unsigned long linear_rip;
402 unsigned long count;
403 int in;
404 int port;
405 @@ -570,6 +571,7 @@ struct kvm_vcpu_arch {
406 bool tpr_access_reporting;
407 u64 ia32_xss;
408 u64 microcode_version;
409 + u64 arch_capabilities;
410
411 /*
412 * Paging state of the vcpu
413 diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
414 index d86eee07d327..a0a770816429 100644
415 --- a/arch/x86/kvm/vmx/vmx.c
416 +++ b/arch/x86/kvm/vmx/vmx.c
417 @@ -1679,12 +1679,6 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
418
419 msr_info->data = to_vmx(vcpu)->spec_ctrl;
420 break;
421 - case MSR_IA32_ARCH_CAPABILITIES:
422 - if (!msr_info->host_initiated &&
423 - !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
424 - return 1;
425 - msr_info->data = to_vmx(vcpu)->arch_capabilities;
426 - break;
427 case MSR_IA32_SYSENTER_CS:
428 msr_info->data = vmcs_read32(GUEST_SYSENTER_CS);
429 break;
430 @@ -1891,11 +1885,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
431 vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD,
432 MSR_TYPE_W);
433 break;
434 - case MSR_IA32_ARCH_CAPABILITIES:
435 - if (!msr_info->host_initiated)
436 - return 1;
437 - vmx->arch_capabilities = data;
438 - break;
439 case MSR_IA32_CR_PAT:
440 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
441 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
442 @@ -4083,8 +4072,6 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
443 ++vmx->nmsrs;
444 }
445
446 - vmx->arch_capabilities = kvm_get_arch_capabilities();
447 -
448 vm_exit_controls_init(vmx, vmx_vmexit_ctrl());
449
450 /* 22.2.1, 20.8.1 */
451 diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
452 index 0ac0a64c7790..1abae731c3e4 100644
453 --- a/arch/x86/kvm/vmx/vmx.h
454 +++ b/arch/x86/kvm/vmx/vmx.h
455 @@ -191,7 +191,6 @@ struct vcpu_vmx {
456 u64 msr_guest_kernel_gs_base;
457 #endif
458
459 - u64 arch_capabilities;
460 u64 spec_ctrl;
461
462 u32 vm_entry_controls_shadow;
463 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
464 index 2bcef72a7c40..7ee802a92bc8 100644
465 --- a/arch/x86/kvm/x86.c
466 +++ b/arch/x86/kvm/x86.c
467 @@ -2443,6 +2443,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
468 if (msr_info->host_initiated)
469 vcpu->arch.microcode_version = data;
470 break;
471 + case MSR_IA32_ARCH_CAPABILITIES:
472 + if (!msr_info->host_initiated)
473 + return 1;
474 + vcpu->arch.arch_capabilities = data;
475 + break;
476 case MSR_EFER:
477 return set_efer(vcpu, data);
478 case MSR_K7_HWCR:
479 @@ -2747,6 +2752,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
480 case MSR_IA32_UCODE_REV:
481 msr_info->data = vcpu->arch.microcode_version;
482 break;
483 + case MSR_IA32_ARCH_CAPABILITIES:
484 + if (!msr_info->host_initiated &&
485 + !guest_cpuid_has(vcpu, X86_FEATURE_ARCH_CAPABILITIES))
486 + return 1;
487 + msr_info->data = vcpu->arch.arch_capabilities;
488 + break;
489 case MSR_IA32_TSC:
490 msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset;
491 break;
492 @@ -6522,14 +6533,27 @@ int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
493 }
494 EXPORT_SYMBOL_GPL(kvm_emulate_instruction_from_buffer);
495
496 +static int complete_fast_pio_out(struct kvm_vcpu *vcpu)
497 +{
498 + vcpu->arch.pio.count = 0;
499 +
500 + if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip)))
501 + return 1;
502 +
503 + return kvm_skip_emulated_instruction(vcpu);
504 +}
505 +
506 static int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size,
507 unsigned short port)
508 {
509 unsigned long val = kvm_register_read(vcpu, VCPU_REGS_RAX);
510 int ret = emulator_pio_out_emulated(&vcpu->arch.emulate_ctxt,
511 size, port, &val, 1);
512 - /* do not return to emulator after return from userspace */
513 - vcpu->arch.pio.count = 0;
514 +
515 + if (!ret) {
516 + vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
517 + vcpu->arch.complete_userspace_io = complete_fast_pio_out;
518 + }
519 return ret;
520 }
521
522 @@ -6540,6 +6564,11 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
523 /* We should only ever be called with arch.pio.count equal to 1 */
524 BUG_ON(vcpu->arch.pio.count != 1);
525
526 + if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.pio.linear_rip))) {
527 + vcpu->arch.pio.count = 0;
528 + return 1;
529 + }
530 +
531 /* For size less than 4 we merge, else we zero extend */
532 val = (vcpu->arch.pio.size < 4) ? kvm_register_read(vcpu, VCPU_REGS_RAX)
533 : 0;
534 @@ -6552,7 +6581,7 @@ static int complete_fast_pio_in(struct kvm_vcpu *vcpu)
535 vcpu->arch.pio.port, &val, 1);
536 kvm_register_write(vcpu, VCPU_REGS_RAX, val);
537
538 - return 1;
539 + return kvm_skip_emulated_instruction(vcpu);
540 }
541
542 static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
543 @@ -6571,6 +6600,7 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
544 return ret;
545 }
546
547 + vcpu->arch.pio.linear_rip = kvm_get_linear_rip(vcpu);
548 vcpu->arch.complete_userspace_io = complete_fast_pio_in;
549
550 return 0;
551 @@ -6578,16 +6608,13 @@ static int kvm_fast_pio_in(struct kvm_vcpu *vcpu, int size,
552
553 int kvm_fast_pio(struct kvm_vcpu *vcpu, int size, unsigned short port, int in)
554 {
555 - int ret = kvm_skip_emulated_instruction(vcpu);
556 + int ret;
557
558 - /*
559 - * TODO: we might be squashing a KVM_GUESTDBG_SINGLESTEP-triggered
560 - * KVM_EXIT_DEBUG here.
561 - */
562 if (in)
563 - return kvm_fast_pio_in(vcpu, size, port) && ret;
564 + ret = kvm_fast_pio_in(vcpu, size, port);
565 else
566 - return kvm_fast_pio_out(vcpu, size, port) && ret;
567 + ret = kvm_fast_pio_out(vcpu, size, port);
568 + return ret && kvm_skip_emulated_instruction(vcpu);
569 }
570 EXPORT_SYMBOL_GPL(kvm_fast_pio);
571
572 @@ -8725,6 +8752,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
573
574 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
575 {
576 + vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
577 vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
578 kvm_vcpu_mtrr_init(vcpu);
579 vcpu_load(vcpu);
580 diff --git a/block/blk-mq.c b/block/blk-mq.c
581 index 9437a5eb07cf..b9283b63d116 100644
582 --- a/block/blk-mq.c
583 +++ b/block/blk-mq.c
584 @@ -1076,7 +1076,13 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
585 hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
586
587 spin_lock(&hctx->dispatch_wait_lock);
588 - list_del_init(&wait->entry);
589 + if (!list_empty(&wait->entry)) {
590 + struct sbitmap_queue *sbq;
591 +
592 + list_del_init(&wait->entry);
593 + sbq = &hctx->tags->bitmap_tags;
594 + atomic_dec(&sbq->ws_active);
595 + }
596 spin_unlock(&hctx->dispatch_wait_lock);
597
598 blk_mq_run_hw_queue(hctx, true);
599 @@ -1092,6 +1098,7 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
600 static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
601 struct request *rq)
602 {
603 + struct sbitmap_queue *sbq = &hctx->tags->bitmap_tags;
604 struct wait_queue_head *wq;
605 wait_queue_entry_t *wait;
606 bool ret;
607 @@ -1115,7 +1122,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
608 if (!list_empty_careful(&wait->entry))
609 return false;
610
611 - wq = &bt_wait_ptr(&hctx->tags->bitmap_tags, hctx)->wait;
612 + wq = &bt_wait_ptr(sbq, hctx)->wait;
613
614 spin_lock_irq(&wq->lock);
615 spin_lock(&hctx->dispatch_wait_lock);
616 @@ -1125,6 +1132,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
617 return false;
618 }
619
620 + atomic_inc(&sbq->ws_active);
621 wait->flags &= ~WQ_FLAG_EXCLUSIVE;
622 __add_wait_queue(wq, wait);
623
624 @@ -1145,6 +1153,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
625 * someone else gets the wakeup.
626 */
627 list_del_init(&wait->entry);
628 + atomic_dec(&sbq->ws_active);
629 spin_unlock(&hctx->dispatch_wait_lock);
630 spin_unlock_irq(&wq->lock);
631
632 diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
633 index 217a782c3e55..7aa08884ed48 100644
634 --- a/drivers/acpi/cppc_acpi.c
635 +++ b/drivers/acpi/cppc_acpi.c
636 @@ -1108,8 +1108,13 @@ int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
637 cpc_read(cpunum, nominal_reg, &nom);
638 perf_caps->nominal_perf = nom;
639
640 - cpc_read(cpunum, guaranteed_reg, &guaranteed);
641 - perf_caps->guaranteed_perf = guaranteed;
642 + if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
643 + IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
644 + perf_caps->guaranteed_perf = 0;
645 + } else {
646 + cpc_read(cpunum, guaranteed_reg, &guaranteed);
647 + perf_caps->guaranteed_perf = guaranteed;
648 + }
649
650 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
651 perf_caps->lowest_nonlinear_perf = min_nonlinear;
652 diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
653 index 04ca65912638..684854d3b0ad 100644
654 --- a/drivers/block/zram/zram_drv.c
655 +++ b/drivers/block/zram/zram_drv.c
656 @@ -290,18 +290,8 @@ static ssize_t idle_store(struct device *dev,
657 struct zram *zram = dev_to_zram(dev);
658 unsigned long nr_pages = zram->disksize >> PAGE_SHIFT;
659 int index;
660 - char mode_buf[8];
661 - ssize_t sz;
662
663 - sz = strscpy(mode_buf, buf, sizeof(mode_buf));
664 - if (sz <= 0)
665 - return -EINVAL;
666 -
667 - /* ignore trailing new line */
668 - if (mode_buf[sz - 1] == '\n')
669 - mode_buf[sz - 1] = 0x00;
670 -
671 - if (strcmp(mode_buf, "all"))
672 + if (!sysfs_streq(buf, "all"))
673 return -EINVAL;
674
675 down_read(&zram->init_lock);
676 @@ -635,25 +625,15 @@ static ssize_t writeback_store(struct device *dev,
677 struct bio bio;
678 struct bio_vec bio_vec;
679 struct page *page;
680 - ssize_t ret, sz;
681 - char mode_buf[8];
682 - int mode = -1;
683 + ssize_t ret;
684 + int mode;
685 unsigned long blk_idx = 0;
686
687 - sz = strscpy(mode_buf, buf, sizeof(mode_buf));
688 - if (sz <= 0)
689 - return -EINVAL;
690 -
691 - /* ignore trailing newline */
692 - if (mode_buf[sz - 1] == '\n')
693 - mode_buf[sz - 1] = 0x00;
694 -
695 - if (!strcmp(mode_buf, "idle"))
696 + if (sysfs_streq(buf, "idle"))
697 mode = IDLE_WRITEBACK;
698 - else if (!strcmp(mode_buf, "huge"))
699 + else if (sysfs_streq(buf, "huge"))
700 mode = HUGE_WRITEBACK;
701 -
702 - if (mode == -1)
703 + else
704 return -EINVAL;
705
706 down_read(&zram->init_lock);
707 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
708 index 5ab6a4fe93aa..a579ca4552df 100644
709 --- a/drivers/cpufreq/intel_pstate.c
710 +++ b/drivers/cpufreq/intel_pstate.c
711 @@ -383,7 +383,10 @@ static int intel_pstate_get_cppc_guranteed(int cpu)
712 if (ret)
713 return ret;
714
715 - return cppc_perf.guaranteed_perf;
716 + if (cppc_perf.guaranteed_perf)
717 + return cppc_perf.guaranteed_perf;
718 +
719 + return cppc_perf.nominal_perf;
720 }
721
722 #else /* CONFIG_ACPI_CPPC_LIB */
723 diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c
724 index 99449738faa4..632ccf82c5d3 100644
725 --- a/drivers/cpufreq/scpi-cpufreq.c
726 +++ b/drivers/cpufreq/scpi-cpufreq.c
727 @@ -189,8 +189,8 @@ static int scpi_cpufreq_exit(struct cpufreq_policy *policy)
728 cpufreq_cooling_unregister(priv->cdev);
729 clk_put(priv->clk);
730 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table);
731 - kfree(priv);
732 dev_pm_opp_remove_all_dynamic(priv->cpu_dev);
733 + kfree(priv);
734
735 return 0;
736 }
737 diff --git a/drivers/gpio/gpio-adnp.c b/drivers/gpio/gpio-adnp.c
738 index 91b90c0cea73..12acdac85820 100644
739 --- a/drivers/gpio/gpio-adnp.c
740 +++ b/drivers/gpio/gpio-adnp.c
741 @@ -132,8 +132,10 @@ static int adnp_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
742 if (err < 0)
743 goto out;
744
745 - if (err & BIT(pos))
746 - err = -EACCES;
747 + if (value & BIT(pos)) {
748 + err = -EPERM;
749 + goto out;
750 + }
751
752 err = 0;
753
754 diff --git a/drivers/gpio/gpio-exar.c b/drivers/gpio/gpio-exar.c
755 index 0ecd2369c2ca..a09d2f9ebacc 100644
756 --- a/drivers/gpio/gpio-exar.c
757 +++ b/drivers/gpio/gpio-exar.c
758 @@ -148,6 +148,8 @@ static int gpio_exar_probe(struct platform_device *pdev)
759 mutex_init(&exar_gpio->lock);
760
761 index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL);
762 + if (index < 0)
763 + goto err_destroy;
764
765 sprintf(exar_gpio->name, "exar_gpio%d", index);
766 exar_gpio->gpio_chip.label = exar_gpio->name;
767 diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
768 index 77ae634eb11c..bd95fd6b4ac8 100644
769 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
770 +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
771 @@ -1446,7 +1446,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
772 }
773
774 if (index_mode) {
775 - if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
776 + if (guest_gma >= I915_GTT_PAGE_SIZE) {
777 ret = -EFAULT;
778 goto err;
779 }
780 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
781 index b1c31967194b..489c1e656ff6 100644
782 --- a/drivers/gpu/drm/i915/i915_drv.h
783 +++ b/drivers/gpu/drm/i915/i915_drv.h
784 @@ -2293,7 +2293,8 @@ intel_info(const struct drm_i915_private *dev_priv)
785 INTEL_DEVID(dev_priv) == 0x5915 || \
786 INTEL_DEVID(dev_priv) == 0x591E)
787 #define IS_AML_ULX(dev_priv) (INTEL_DEVID(dev_priv) == 0x591C || \
788 - INTEL_DEVID(dev_priv) == 0x87C0)
789 + INTEL_DEVID(dev_priv) == 0x87C0 || \
790 + INTEL_DEVID(dev_priv) == 0x87CA)
791 #define IS_SKL_GT2(dev_priv) (IS_SKYLAKE(dev_priv) && \
792 (dev_priv)->info.gt == 2)
793 #define IS_SKL_GT3(dev_priv) (IS_SKYLAKE(dev_priv) && \
794 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
795 index 067054cf4a86..60bed3f27775 100644
796 --- a/drivers/gpu/drm/i915/i915_reg.h
797 +++ b/drivers/gpu/drm/i915/i915_reg.h
798 @@ -9205,7 +9205,7 @@ enum skl_power_gate {
799 #define TRANS_DDI_FUNC_CTL2(tran) _MMIO_TRANS2(tran, \
800 _TRANS_DDI_FUNC_CTL2_A)
801 #define PORT_SYNC_MODE_ENABLE (1 << 4)
802 -#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) < 0)
803 +#define PORT_SYNC_MODE_MASTER_SELECT(x) ((x) << 0)
804 #define PORT_SYNC_MODE_MASTER_SELECT_MASK (0x7 << 0)
805 #define PORT_SYNC_MODE_MASTER_SELECT_SHIFT 0
806
807 diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
808 index fb70fb486fbf..cdbb47566cac 100644
809 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
810 +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
811 @@ -511,6 +511,18 @@ static void vop_core_clks_disable(struct vop *vop)
812 clk_disable(vop->hclk);
813 }
814
815 +static void vop_win_disable(struct vop *vop, const struct vop_win_data *win)
816 +{
817 + if (win->phy->scl && win->phy->scl->ext) {
818 + VOP_SCL_SET_EXT(vop, win, yrgb_hor_scl_mode, SCALE_NONE);
819 + VOP_SCL_SET_EXT(vop, win, yrgb_ver_scl_mode, SCALE_NONE);
820 + VOP_SCL_SET_EXT(vop, win, cbcr_hor_scl_mode, SCALE_NONE);
821 + VOP_SCL_SET_EXT(vop, win, cbcr_ver_scl_mode, SCALE_NONE);
822 + }
823 +
824 + VOP_WIN_SET(vop, win, enable, 0);
825 +}
826 +
827 static int vop_enable(struct drm_crtc *crtc)
828 {
829 struct vop *vop = to_vop(crtc);
830 @@ -556,7 +568,7 @@ static int vop_enable(struct drm_crtc *crtc)
831 struct vop_win *vop_win = &vop->win[i];
832 const struct vop_win_data *win = vop_win->data;
833
834 - VOP_WIN_SET(vop, win, enable, 0);
835 + vop_win_disable(vop, win);
836 }
837 spin_unlock(&vop->reg_lock);
838
839 @@ -700,7 +712,7 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
840
841 spin_lock(&vop->reg_lock);
842
843 - VOP_WIN_SET(vop, win, enable, 0);
844 + vop_win_disable(vop, win);
845
846 spin_unlock(&vop->reg_lock);
847 }
848 @@ -1476,7 +1488,7 @@ static int vop_initial(struct vop *vop)
849 int channel = i * 2 + 1;
850
851 VOP_WIN_SET(vop, win, channel, (channel + 1) << 4 | channel);
852 - VOP_WIN_SET(vop, win, enable, 0);
853 + vop_win_disable(vop, win);
854 VOP_WIN_SET(vop, win, gate, 1);
855 }
856
857 diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
858 index 5930facd6d2d..11a8f99ba18c 100644
859 --- a/drivers/gpu/drm/vgem/vgem_drv.c
860 +++ b/drivers/gpu/drm/vgem/vgem_drv.c
861 @@ -191,13 +191,9 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
862 ret = drm_gem_handle_create(file, &obj->base, handle);
863 drm_gem_object_put_unlocked(&obj->base);
864 if (ret)
865 - goto err;
866 + return ERR_PTR(ret);
867
868 return &obj->base;
869 -
870 -err:
871 - __vgem_gem_destroy(obj);
872 - return ERR_PTR(ret);
873 }
874
875 static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
876 diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
877 index 138b0bb325cf..69048e73377d 100644
878 --- a/drivers/gpu/drm/vkms/vkms_gem.c
879 +++ b/drivers/gpu/drm/vkms/vkms_gem.c
880 @@ -111,11 +111,8 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
881
882 ret = drm_gem_handle_create(file, &obj->gem, handle);
883 drm_gem_object_put_unlocked(&obj->gem);
884 - if (ret) {
885 - drm_gem_object_release(&obj->gem);
886 - kfree(obj);
887 + if (ret)
888 return ERR_PTR(ret);
889 - }
890
891 return &obj->gem;
892 }
893 diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
894 index cec29bf45c9b..1b9e40a203e0 100644
895 --- a/drivers/iommu/io-pgtable-arm-v7s.c
896 +++ b/drivers/iommu/io-pgtable-arm-v7s.c
897 @@ -161,6 +161,14 @@
898
899 #define ARM_V7S_TCR_PD1 BIT(5)
900
901 +#ifdef CONFIG_ZONE_DMA32
902 +#define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
903 +#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
904 +#else
905 +#define ARM_V7S_TABLE_GFP_DMA GFP_DMA
906 +#define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA
907 +#endif
908 +
909 typedef u32 arm_v7s_iopte;
910
911 static bool selftest_running;
912 @@ -198,13 +206,16 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
913 void *table = NULL;
914
915 if (lvl == 1)
916 - table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size));
917 + table = (void *)__get_free_pages(
918 + __GFP_ZERO | ARM_V7S_TABLE_GFP_DMA, get_order(size));
919 else if (lvl == 2)
920 - table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA);
921 + table = kmem_cache_zalloc(data->l2_tables, gfp);
922 phys = virt_to_phys(table);
923 - if (phys != (arm_v7s_iopte)phys)
924 + if (phys != (arm_v7s_iopte)phys) {
925 /* Doesn't fit in PTE */
926 + dev_err(dev, "Page table does not fit in PTE: %pa", &phys);
927 goto out_free;
928 + }
929 if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
930 dma = dma_map_single(dev, table, size, DMA_TO_DEVICE);
931 if (dma_mapping_error(dev, dma))
932 @@ -733,7 +744,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg,
933 data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2",
934 ARM_V7S_TABLE_SIZE(2),
935 ARM_V7S_TABLE_SIZE(2),
936 - SLAB_CACHE_DMA, NULL);
937 + ARM_V7S_TABLE_SLAB_FLAGS, NULL);
938 if (!data->l2_tables)
939 goto out_free_data;
940
941 diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
942 index 4d85645c87f7..0928fd1f0e0c 100644
943 --- a/drivers/isdn/hardware/mISDN/hfcmulti.c
944 +++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
945 @@ -4365,7 +4365,8 @@ setup_pci(struct hfc_multi *hc, struct pci_dev *pdev,
946 if (m->clock2)
947 test_and_set_bit(HFC_CHIP_CLOCK2, &hc->chip);
948
949 - if (ent->device == 0xB410) {
950 + if (ent->vendor == PCI_VENDOR_ID_DIGIUM &&
951 + ent->device == PCI_DEVICE_ID_DIGIUM_HFC4S) {
952 test_and_set_bit(HFC_CHIP_B410P, &hc->chip);
953 test_and_set_bit(HFC_CHIP_PCM_MASTER, &hc->chip);
954 test_and_clear_bit(HFC_CHIP_PCM_SLAVE, &hc->chip);
955 diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
956 index 21bf8ac78380..390e896dadc7 100644
957 --- a/drivers/net/Kconfig
958 +++ b/drivers/net/Kconfig
959 @@ -213,8 +213,8 @@ config GENEVE
960
961 config GTP
962 tristate "GPRS Tunneling Protocol datapath (GTP-U)"
963 - depends on INET && NET_UDP_TUNNEL
964 - select NET_IP_TUNNEL
965 + depends on INET
966 + select NET_UDP_TUNNEL
967 ---help---
968 This allows one to create gtp virtual interfaces that provide
969 the GPRS Tunneling Protocol datapath (GTP-U). This tunneling protocol
970 diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
971 index 5e921bb6c214..41eee62fed25 100644
972 --- a/drivers/net/dsa/mv88e6xxx/port.c
973 +++ b/drivers/net/dsa/mv88e6xxx/port.c
974 @@ -427,18 +427,22 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
975 return 0;
976
977 lane = mv88e6390x_serdes_get_lane(chip, port);
978 - if (lane < 0)
979 + if (lane < 0 && lane != -ENODEV)
980 return lane;
981
982 - if (chip->ports[port].serdes_irq) {
983 - err = mv88e6390_serdes_irq_disable(chip, port, lane);
984 + if (lane >= 0) {
985 + if (chip->ports[port].serdes_irq) {
986 + err = mv88e6390_serdes_irq_disable(chip, port, lane);
987 + if (err)
988 + return err;
989 + }
990 +
991 + err = mv88e6390x_serdes_power(chip, port, false);
992 if (err)
993 return err;
994 }
995
996 - err = mv88e6390x_serdes_power(chip, port, false);
997 - if (err)
998 - return err;
999 + chip->ports[port].cmode = 0;
1000
1001 if (cmode) {
1002 err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
1003 @@ -452,6 +456,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
1004 if (err)
1005 return err;
1006
1007 + chip->ports[port].cmode = cmode;
1008 +
1009 + lane = mv88e6390x_serdes_get_lane(chip, port);
1010 + if (lane < 0)
1011 + return lane;
1012 +
1013 err = mv88e6390x_serdes_power(chip, port, true);
1014 if (err)
1015 return err;
1016 @@ -463,8 +473,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
1017 }
1018 }
1019
1020 - chip->ports[port].cmode = cmode;
1021 -
1022 return 0;
1023 }
1024
1025 diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
1026 index 7e97e620bd44..a26850c888cf 100644
1027 --- a/drivers/net/dsa/qca8k.c
1028 +++ b/drivers/net/dsa/qca8k.c
1029 @@ -620,22 +620,6 @@ qca8k_adjust_link(struct dsa_switch *ds, int port, struct phy_device *phy)
1030 qca8k_port_set_status(priv, port, 1);
1031 }
1032
1033 -static int
1034 -qca8k_phy_read(struct dsa_switch *ds, int phy, int regnum)
1035 -{
1036 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1037 -
1038 - return mdiobus_read(priv->bus, phy, regnum);
1039 -}
1040 -
1041 -static int
1042 -qca8k_phy_write(struct dsa_switch *ds, int phy, int regnum, u16 val)
1043 -{
1044 - struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
1045 -
1046 - return mdiobus_write(priv->bus, phy, regnum, val);
1047 -}
1048 -
1049 static void
1050 qca8k_get_strings(struct dsa_switch *ds, int port, u32 stringset, uint8_t *data)
1051 {
1052 @@ -876,8 +860,6 @@ static const struct dsa_switch_ops qca8k_switch_ops = {
1053 .setup = qca8k_setup,
1054 .adjust_link = qca8k_adjust_link,
1055 .get_strings = qca8k_get_strings,
1056 - .phy_read = qca8k_phy_read,
1057 - .phy_write = qca8k_phy_write,
1058 .get_ethtool_stats = qca8k_get_ethtool_stats,
1059 .get_sset_count = qca8k_get_sset_count,
1060 .get_mac_eee = qca8k_get_mac_eee,
1061 diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
1062 index 342ae08ec3c2..d60a86aa8aa8 100644
1063 --- a/drivers/net/ethernet/8390/mac8390.c
1064 +++ b/drivers/net/ethernet/8390/mac8390.c
1065 @@ -153,8 +153,6 @@ static void dayna_block_input(struct net_device *dev, int count,
1066 static void dayna_block_output(struct net_device *dev, int count,
1067 const unsigned char *buf, int start_page);
1068
1069 -#define memcmp_withio(a, b, c) memcmp((a), (void *)(b), (c))
1070 -
1071 /* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
1072 static void slow_sane_get_8390_hdr(struct net_device *dev,
1073 struct e8390_pkt_hdr *hdr, int ring_page);
1074 @@ -233,19 +231,26 @@ static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
1075
1076 static enum mac8390_access mac8390_testio(unsigned long membase)
1077 {
1078 - unsigned long outdata = 0xA5A0B5B0;
1079 - unsigned long indata = 0x00000000;
1080 + u32 outdata = 0xA5A0B5B0;
1081 + u32 indata = 0;
1082 +
1083 /* Try writing 32 bits */
1084 - memcpy_toio((void __iomem *)membase, &outdata, 4);
1085 - /* Now compare them */
1086 - if (memcmp_withio(&outdata, membase, 4) == 0)
1087 + nubus_writel(outdata, membase);
1088 + /* Now read it back */
1089 + indata = nubus_readl(membase);
1090 + if (outdata == indata)
1091 return ACCESS_32;
1092 +
1093 + outdata = 0xC5C0D5D0;
1094 + indata = 0;
1095 +
1096 /* Write 16 bit output */
1097 word_memcpy_tocard(membase, &outdata, 4);
1098 /* Now read it back */
1099 word_memcpy_fromcard(&indata, membase, 4);
1100 if (outdata == indata)
1101 return ACCESS_16;
1102 +
1103 return ACCESS_UNKNOWN;
1104 }
1105
1106 diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
1107 index 74550ccc7a20..e2ffb159cbe2 100644
1108 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
1109 +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
1110 @@ -186,11 +186,12 @@ static void aq_rx_checksum(struct aq_ring_s *self,
1111 }
1112 if (buff->is_ip_cso) {
1113 __skb_incr_checksum_unnecessary(skb);
1114 - if (buff->is_udp_cso || buff->is_tcp_cso)
1115 - __skb_incr_checksum_unnecessary(skb);
1116 } else {
1117 skb->ip_summed = CHECKSUM_NONE;
1118 }
1119 +
1120 + if (buff->is_udp_cso || buff->is_tcp_cso)
1121 + __skb_incr_checksum_unnecessary(skb);
1122 }
1123
1124 #define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
1125 diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
1126 index 5b4d3badcb73..e246f9733bb8 100644
1127 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
1128 +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
1129 @@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
1130 /* Check if page can be recycled */
1131 if (page) {
1132 ref_count = page_ref_count(page);
1133 - /* Check if this page has been used once i.e 'put_page'
1134 - * called after packet transmission i.e internal ref_count
1135 - * and page's ref_count are equal i.e page can be recycled.
1136 + /* This page can be recycled if internal ref_count and page's
1137 + * ref_count are equal, indicating that the page has been used
1138 + * once for packet transmission. For non-XDP mode, internal
1139 + * ref_count is always '1'.
1140 */
1141 - if (rbdr->is_xdp && (ref_count == pgcache->ref_count))
1142 - pgcache->ref_count--;
1143 - else
1144 - page = NULL;
1145 -
1146 - /* In non-XDP mode, page's ref_count needs to be '1' for it
1147 - * to be recycled.
1148 - */
1149 - if (!rbdr->is_xdp && (ref_count != 1))
1150 + if (rbdr->is_xdp) {
1151 + if (ref_count == pgcache->ref_count)
1152 + pgcache->ref_count--;
1153 + else
1154 + page = NULL;
1155 + } else if (ref_count != 1) {
1156 page = NULL;
1157 + }
1158 }
1159
1160 if (!page) {
1161 @@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
1162 while (head < rbdr->pgcnt) {
1163 pgcache = &rbdr->pgcache[head];
1164 if (pgcache->page && page_ref_count(pgcache->page) != 0) {
1165 - if (!rbdr->is_xdp) {
1166 - put_page(pgcache->page);
1167 - continue;
1168 + if (rbdr->is_xdp) {
1169 + page_ref_sub(pgcache->page,
1170 + pgcache->ref_count - 1);
1171 }
1172 - page_ref_sub(pgcache->page, pgcache->ref_count - 1);
1173 put_page(pgcache->page);
1174 }
1175 head++;
1176 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
1177 index 6e36b88ca7c9..f55d177ae894 100644
1178 --- a/drivers/net/ethernet/realtek/r8169.c
1179 +++ b/drivers/net/ethernet/realtek/r8169.c
1180 @@ -6435,7 +6435,7 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
1181 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
1182 }
1183
1184 - if (status & RTL_EVENT_NAPI) {
1185 + if (status & (RTL_EVENT_NAPI | LinkChg)) {
1186 rtl_irq_disable(tp);
1187 napi_schedule_irqoff(&tp->napi);
1188 }
1189 diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
1190 index d8c5bc412219..c0c75c111abb 100644
1191 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
1192 +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
1193 @@ -111,10 +111,11 @@ static unsigned int is_jumbo_frm(int len, int enh_desc)
1194
1195 static void refill_desc3(void *priv_ptr, struct dma_desc *p)
1196 {
1197 - struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
1198 + struct stmmac_rx_queue *rx_q = priv_ptr;
1199 + struct stmmac_priv *priv = rx_q->priv_data;
1200
1201 /* Fill DES3 in case of RING mode */
1202 - if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
1203 + if (priv->dma_buf_sz == BUF_SIZE_16KiB)
1204 p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
1205 }
1206
1207 diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c
1208 index 3ddaf9595697..68af4c75ffb3 100644
1209 --- a/drivers/net/phy/meson-gxl.c
1210 +++ b/drivers/net/phy/meson-gxl.c
1211 @@ -211,6 +211,7 @@ static int meson_gxl_ack_interrupt(struct phy_device *phydev)
1212 static int meson_gxl_config_intr(struct phy_device *phydev)
1213 {
1214 u16 val;
1215 + int ret;
1216
1217 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
1218 val = INTSRC_ANEG_PR
1219 @@ -223,6 +224,11 @@ static int meson_gxl_config_intr(struct phy_device *phydev)
1220 val = 0;
1221 }
1222
1223 + /* Ack any pending IRQ */
1224 + ret = meson_gxl_ack_interrupt(phydev);
1225 + if (ret)
1226 + return ret;
1227 +
1228 return phy_write(phydev, INTSRC_MASK, val);
1229 }
1230
1231 diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
1232 index 46c86725a693..739434fe04fa 100644
1233 --- a/drivers/net/phy/phy_device.c
1234 +++ b/drivers/net/phy/phy_device.c
1235 @@ -1827,7 +1827,7 @@ int genphy_soft_reset(struct phy_device *phydev)
1236 {
1237 int ret;
1238
1239 - ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
1240 + ret = phy_set_bits(phydev, MII_BMCR, BMCR_RESET);
1241 if (ret < 0)
1242 return ret;
1243
1244 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1245 index 53f4f37b0ffd..448d5439ff6a 100644
1246 --- a/drivers/net/tun.c
1247 +++ b/drivers/net/tun.c
1248 @@ -1763,9 +1763,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1249 int skb_xdp = 1;
1250 bool frags = tun_napi_frags_enabled(tfile);
1251
1252 - if (!(tun->dev->flags & IFF_UP))
1253 - return -EIO;
1254 -
1255 if (!(tun->flags & IFF_NO_PI)) {
1256 if (len < sizeof(pi))
1257 return -EINVAL;
1258 @@ -1867,6 +1864,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1259 err = skb_copy_datagram_from_iter(skb, 0, from, len);
1260
1261 if (err) {
1262 + err = -EFAULT;
1263 +drop:
1264 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1265 kfree_skb(skb);
1266 if (frags) {
1267 @@ -1874,7 +1873,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1268 mutex_unlock(&tfile->napi_mutex);
1269 }
1270
1271 - return -EFAULT;
1272 + return err;
1273 }
1274 }
1275
1276 @@ -1958,6 +1957,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1277 !tfile->detached)
1278 rxhash = __skb_get_hash_symmetric(skb);
1279
1280 + rcu_read_lock();
1281 + if (unlikely(!(tun->dev->flags & IFF_UP))) {
1282 + err = -EIO;
1283 + rcu_read_unlock();
1284 + goto drop;
1285 + }
1286 +
1287 if (frags) {
1288 /* Exercise flow dissector code path. */
1289 u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
1290 @@ -1965,6 +1971,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1291 if (unlikely(headlen > skb_headlen(skb))) {
1292 this_cpu_inc(tun->pcpu_stats->rx_dropped);
1293 napi_free_frags(&tfile->napi);
1294 + rcu_read_unlock();
1295 mutex_unlock(&tfile->napi_mutex);
1296 WARN_ON(1);
1297 return -ENOMEM;
1298 @@ -1992,6 +1999,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1299 } else {
1300 netif_rx_ni(skb);
1301 }
1302 + rcu_read_unlock();
1303
1304 stats = get_cpu_ptr(tun->pcpu_stats);
1305 u64_stats_update_begin(&stats->syncp);
1306 diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
1307 index 820a2fe7d027..aff995be2a31 100644
1308 --- a/drivers/net/usb/aqc111.c
1309 +++ b/drivers/net/usb/aqc111.c
1310 @@ -1301,6 +1301,20 @@ static const struct driver_info trendnet_info = {
1311 .tx_fixup = aqc111_tx_fixup,
1312 };
1313
1314 +static const struct driver_info qnap_info = {
1315 + .description = "QNAP QNA-UC5G1T USB to 5GbE Adapter",
1316 + .bind = aqc111_bind,
1317 + .unbind = aqc111_unbind,
1318 + .status = aqc111_status,
1319 + .link_reset = aqc111_link_reset,
1320 + .reset = aqc111_reset,
1321 + .stop = aqc111_stop,
1322 + .flags = FLAG_ETHER | FLAG_FRAMING_AX |
1323 + FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET,
1324 + .rx_fixup = aqc111_rx_fixup,
1325 + .tx_fixup = aqc111_tx_fixup,
1326 +};
1327 +
1328 static int aqc111_suspend(struct usb_interface *intf, pm_message_t message)
1329 {
1330 struct usbnet *dev = usb_get_intfdata(intf);
1331 @@ -1455,6 +1469,7 @@ static const struct usb_device_id products[] = {
1332 {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)},
1333 {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)},
1334 {AQC111_USB_ETH_DEV(0x20f4, 0xe05a, trendnet_info)},
1335 + {AQC111_USB_ETH_DEV(0x1c04, 0x0015, qnap_info)},
1336 { },/* END */
1337 };
1338 MODULE_DEVICE_TABLE(usb, products);
1339 diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
1340 index 5512a1038721..3e9b2c319e45 100644
1341 --- a/drivers/net/usb/cdc_ether.c
1342 +++ b/drivers/net/usb/cdc_ether.c
1343 @@ -851,6 +851,14 @@ static const struct usb_device_id products[] = {
1344 .driver_info = 0,
1345 },
1346
1347 +/* QNAP QNA-UC5G1T USB to 5GbE Adapter (based on AQC111U) */
1348 +{
1349 + USB_DEVICE_AND_INTERFACE_INFO(0x1c04, 0x0015, USB_CLASS_COMM,
1350 + USB_CDC_SUBCLASS_ETHERNET,
1351 + USB_CDC_PROTO_NONE),
1352 + .driver_info = 0,
1353 +},
1354 +
1355 /* WHITELIST!!!
1356 *
1357 * CDC Ether uses two interfaces, not necessarily consecutive.
1358 diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
1359 index 7c1430ed0244..6d1a1abbed27 100644
1360 --- a/drivers/net/vrf.c
1361 +++ b/drivers/net/vrf.c
1362 @@ -1273,6 +1273,7 @@ static void vrf_setup(struct net_device *dev)
1363
1364 /* default to no qdisc; user can add if desired */
1365 dev->priv_flags |= IFF_NO_QUEUE;
1366 + dev->priv_flags |= IFF_NO_RX_HANDLER;
1367
1368 dev->min_mtu = 0;
1369 dev->max_mtu = 0;
1370 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
1371 index d6fb6a89f9b3..5006daed2e96 100644
1372 --- a/drivers/net/vxlan.c
1373 +++ b/drivers/net/vxlan.c
1374 @@ -4184,10 +4184,8 @@ static void vxlan_destroy_tunnels(struct net *net, struct list_head *head)
1375 /* If vxlan->dev is in the same netns, it has already been added
1376 * to the list by the previous loop.
1377 */
1378 - if (!net_eq(dev_net(vxlan->dev), net)) {
1379 - gro_cells_destroy(&vxlan->gro_cells);
1380 + if (!net_eq(dev_net(vxlan->dev), net))
1381 unregister_netdevice_queue(vxlan->dev, head);
1382 - }
1383 }
1384
1385 for (h = 0; h < PORT_HASH_SIZE; ++h)
1386 diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
1387 index 5cd508a68609..6d29ba4046c3 100644
1388 --- a/drivers/net/wireless/mediatek/mt76/mt76.h
1389 +++ b/drivers/net/wireless/mediatek/mt76/mt76.h
1390 @@ -713,6 +713,19 @@ static inline bool mt76u_check_sg(struct mt76_dev *dev)
1391 udev->speed == USB_SPEED_WIRELESS));
1392 }
1393
1394 +static inline int
1395 +mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int timeout)
1396 +{
1397 + struct usb_interface *intf = to_usb_interface(dev->dev);
1398 + struct usb_device *udev = interface_to_usbdev(intf);
1399 + struct mt76_usb *usb = &dev->usb;
1400 + unsigned int pipe;
1401 + int sent;
1402 +
1403 + pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
1404 + return usb_bulk_msg(udev, pipe, data, len, &sent, timeout);
1405 +}
1406 +
1407 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
1408 u8 req_type, u16 val, u16 offset,
1409 void *buf, size_t len);
1410 diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
1411 index 6db789f90269..2ca393e267af 100644
1412 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
1413 +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c
1414 @@ -121,18 +121,14 @@ static int
1415 __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
1416 int cmd, bool wait_resp)
1417 {
1418 - struct usb_interface *intf = to_usb_interface(dev->dev);
1419 - struct usb_device *udev = interface_to_usbdev(intf);
1420 struct mt76_usb *usb = &dev->usb;
1421 - unsigned int pipe;
1422 - int ret, sent;
1423 + int ret;
1424 u8 seq = 0;
1425 u32 info;
1426
1427 if (test_bit(MT76_REMOVED, &dev->state))
1428 return 0;
1429
1430 - pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
1431 if (wait_resp) {
1432 seq = ++usb->mcu.msg_seq & 0xf;
1433 if (!seq)
1434 @@ -146,7 +142,7 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb,
1435 if (ret)
1436 return ret;
1437
1438 - ret = usb_bulk_msg(udev, pipe, skb->data, skb->len, &sent, 500);
1439 + ret = mt76u_bulk_msg(dev, skb->data, skb->len, 500);
1440 if (ret)
1441 return ret;
1442
1443 @@ -268,14 +264,12 @@ void mt76x02u_mcu_fw_reset(struct mt76x02_dev *dev)
1444 EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_reset);
1445
1446 static int
1447 -__mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf,
1448 +__mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, u8 *data,
1449 const void *fw_data, int len, u32 dst_addr)
1450 {
1451 - u8 *data = sg_virt(&buf->urb->sg[0]);
1452 - DECLARE_COMPLETION_ONSTACK(cmpl);
1453 __le32 info;
1454 u32 val;
1455 - int err;
1456 + int err, data_len;
1457
1458 info = cpu_to_le32(FIELD_PREP(MT_MCU_MSG_PORT, CPU_TX_PORT) |
1459 FIELD_PREP(MT_MCU_MSG_LEN, len) |
1460 @@ -291,25 +285,12 @@ __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf,
1461 mt76u_single_wr(&dev->mt76, MT_VEND_WRITE_FCE,
1462 MT_FCE_DMA_LEN, len << 16);
1463
1464 - buf->len = MT_CMD_HDR_LEN + len + sizeof(info);
1465 - err = mt76u_submit_buf(&dev->mt76, USB_DIR_OUT,
1466 - MT_EP_OUT_INBAND_CMD,
1467 - buf, GFP_KERNEL,
1468 - mt76u_mcu_complete_urb, &cmpl);
1469 - if (err < 0)
1470 - return err;
1471 -
1472 - if (!wait_for_completion_timeout(&cmpl,
1473 - msecs_to_jiffies(1000))) {
1474 - dev_err(dev->mt76.dev, "firmware upload timed out\n");
1475 - usb_kill_urb(buf->urb);
1476 - return -ETIMEDOUT;
1477 - }
1478 + data_len = MT_CMD_HDR_LEN + len + sizeof(info);
1479
1480 - if (mt76u_urb_error(buf->urb)) {
1481 - dev_err(dev->mt76.dev, "firmware upload failed: %d\n",
1482 - buf->urb->status);
1483 - return buf->urb->status;
1484 + err = mt76u_bulk_msg(&dev->mt76, data, data_len, 1000);
1485 + if (err) {
1486 + dev_err(dev->mt76.dev, "firmware upload failed: %d\n", err);
1487 + return err;
1488 }
1489
1490 val = mt76_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX);
1491 @@ -322,17 +303,16 @@ __mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, struct mt76u_buf *buf,
1492 int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
1493 int data_len, u32 max_payload, u32 offset)
1494 {
1495 - int err, len, pos = 0, max_len = max_payload - 8;
1496 - struct mt76u_buf buf;
1497 + int len, err = 0, pos = 0, max_len = max_payload - 8;
1498 + u8 *buf;
1499
1500 - err = mt76u_buf_alloc(&dev->mt76, &buf, 1, max_payload, max_payload,
1501 - GFP_KERNEL);
1502 - if (err < 0)
1503 - return err;
1504 + buf = kmalloc(max_payload, GFP_KERNEL);
1505 + if (!buf)
1506 + return -ENOMEM;
1507
1508 while (data_len > 0) {
1509 len = min_t(int, data_len, max_len);
1510 - err = __mt76x02u_mcu_fw_send_data(dev, &buf, data + pos,
1511 + err = __mt76x02u_mcu_fw_send_data(dev, buf, data + pos,
1512 len, offset + pos);
1513 if (err < 0)
1514 break;
1515 @@ -341,7 +321,7 @@ int mt76x02u_mcu_fw_send_data(struct mt76x02_dev *dev, const void *data,
1516 pos += len;
1517 usleep_range(5000, 10000);
1518 }
1519 - mt76u_buf_free(&buf);
1520 + kfree(buf);
1521
1522 return err;
1523 }
1524 diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
1525 index b061263453d4..09923cedd039 100644
1526 --- a/drivers/net/wireless/mediatek/mt76/usb.c
1527 +++ b/drivers/net/wireless/mediatek/mt76/usb.c
1528 @@ -326,7 +326,6 @@ int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf,
1529
1530 return mt76u_fill_rx_sg(dev, buf, nsgs, len, sglen);
1531 }
1532 -EXPORT_SYMBOL_GPL(mt76u_buf_alloc);
1533
1534 void mt76u_buf_free(struct mt76u_buf *buf)
1535 {
1536 diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
1537 index 5163097b43df..4bbd9ede38c8 100644
1538 --- a/drivers/phy/allwinner/phy-sun4i-usb.c
1539 +++ b/drivers/phy/allwinner/phy-sun4i-usb.c
1540 @@ -485,8 +485,11 @@ static int sun4i_usb_phy_set_mode(struct phy *_phy,
1541 struct sun4i_usb_phy_data *data = to_sun4i_usb_phy_data(phy);
1542 int new_mode;
1543
1544 - if (phy->index != 0)
1545 + if (phy->index != 0) {
1546 + if (mode == PHY_MODE_USB_HOST)
1547 + return 0;
1548 return -EINVAL;
1549 + }
1550
1551 switch (mode) {
1552 case PHY_MODE_USB_HOST:
1553 diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
1554 index a10cec0e86eb..0b3b9de45c60 100644
1555 --- a/drivers/s390/cio/vfio_ccw_drv.c
1556 +++ b/drivers/s390/cio/vfio_ccw_drv.c
1557 @@ -72,20 +72,24 @@ static void vfio_ccw_sch_io_todo(struct work_struct *work)
1558 {
1559 struct vfio_ccw_private *private;
1560 struct irb *irb;
1561 + bool is_final;
1562
1563 private = container_of(work, struct vfio_ccw_private, io_work);
1564 irb = &private->irb;
1565
1566 + is_final = !(scsw_actl(&irb->scsw) &
1567 + (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT));
1568 if (scsw_is_solicited(&irb->scsw)) {
1569 cp_update_scsw(&private->cp, &irb->scsw);
1570 - cp_free(&private->cp);
1571 + if (is_final)
1572 + cp_free(&private->cp);
1573 }
1574 memcpy(private->io_region->irb_area, irb, sizeof(*irb));
1575
1576 if (private->io_trigger)
1577 eventfd_signal(private->io_trigger, 1);
1578
1579 - if (private->mdev)
1580 + if (private->mdev && is_final)
1581 private->state = VFIO_CCW_STATE_IDLE;
1582 }
1583
1584 diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
1585 index 744a64680d5b..e8fc28dba8df 100644
1586 --- a/drivers/s390/scsi/zfcp_erp.c
1587 +++ b/drivers/s390/scsi/zfcp_erp.c
1588 @@ -624,6 +624,20 @@ static void zfcp_erp_strategy_memwait(struct zfcp_erp_action *erp_action)
1589 add_timer(&erp_action->timer);
1590 }
1591
1592 +void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
1593 + int clear, char *dbftag)
1594 +{
1595 + unsigned long flags;
1596 + struct zfcp_port *port;
1597 +
1598 + write_lock_irqsave(&adapter->erp_lock, flags);
1599 + read_lock(&adapter->port_list_lock);
1600 + list_for_each_entry(port, &adapter->port_list, list)
1601 + _zfcp_erp_port_forced_reopen(port, clear, dbftag);
1602 + read_unlock(&adapter->port_list_lock);
1603 + write_unlock_irqrestore(&adapter->erp_lock, flags);
1604 +}
1605 +
1606 static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
1607 int clear, char *dbftag)
1608 {
1609 @@ -1341,6 +1355,9 @@ static void zfcp_erp_try_rport_unblock(struct zfcp_port *port)
1610 struct zfcp_scsi_dev *zsdev = sdev_to_zfcp(sdev);
1611 int lun_status;
1612
1613 + if (sdev->sdev_state == SDEV_DEL ||
1614 + sdev->sdev_state == SDEV_CANCEL)
1615 + continue;
1616 if (zsdev->port != port)
1617 continue;
1618 /* LUN under port of interest */
1619 diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
1620 index 3fce47b0b21b..c6acca521ffe 100644
1621 --- a/drivers/s390/scsi/zfcp_ext.h
1622 +++ b/drivers/s390/scsi/zfcp_ext.h
1623 @@ -70,6 +70,8 @@ extern void zfcp_erp_port_reopen(struct zfcp_port *port, int clear,
1624 char *dbftag);
1625 extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *);
1626 extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *);
1627 +extern void zfcp_erp_port_forced_reopen_all(struct zfcp_adapter *adapter,
1628 + int clear, char *dbftag);
1629 extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
1630 extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
1631 extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *);
1632 diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
1633 index f4f6a07c5222..221d0dfb8493 100644
1634 --- a/drivers/s390/scsi/zfcp_scsi.c
1635 +++ b/drivers/s390/scsi/zfcp_scsi.c
1636 @@ -368,6 +368,10 @@ static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
1637 struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
1638 int ret = SUCCESS, fc_ret;
1639
1640 + if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
1641 + zfcp_erp_port_forced_reopen_all(adapter, 0, "schrh_p");
1642 + zfcp_erp_wait(adapter);
1643 + }
1644 zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
1645 zfcp_erp_wait(adapter);
1646 fc_ret = fc_block_scsi_eh(scpnt);
1647 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
1648 index b84099479fe0..d64553c0a051 100644
1649 --- a/drivers/scsi/sd.c
1650 +++ b/drivers/scsi/sd.c
1651 @@ -1398,11 +1398,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode)
1652 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW);
1653 }
1654
1655 - /*
1656 - * XXX and what if there are packets in flight and this close()
1657 - * XXX is followed by a "rmmod sd_mod"?
1658 - */
1659 -
1660 scsi_disk_put(sdkp);
1661 }
1662
1663 @@ -3059,6 +3054,9 @@ static bool sd_validate_opt_xfer_size(struct scsi_disk *sdkp,
1664 unsigned int opt_xfer_bytes =
1665 logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
1666
1667 + if (sdkp->opt_xfer_blocks == 0)
1668 + return false;
1669 +
1670 if (sdkp->opt_xfer_blocks > dev_max) {
1671 sd_first_printk(KERN_WARNING, sdkp,
1672 "Optimal transfer size %u logical blocks " \
1673 @@ -3488,9 +3486,21 @@ static void scsi_disk_release(struct device *dev)
1674 {
1675 struct scsi_disk *sdkp = to_scsi_disk(dev);
1676 struct gendisk *disk = sdkp->disk;
1677 -
1678 + struct request_queue *q = disk->queue;
1679 +
1680 ida_free(&sd_index_ida, sdkp->index);
1681
1682 + /*
1683 + * Wait until all requests that are in progress have completed.
1684 + * This is necessary to avoid that e.g. scsi_end_request() crashes
1685 + * due to clearing the disk->private_data pointer. Wait from inside
1686 + * scsi_disk_release() instead of from sd_release() to avoid that
1687 + * freezing and unfreezing the request queue affects user space I/O
1688 + * in case multiple processes open a /dev/sd... node concurrently.
1689 + */
1690 + blk_mq_freeze_queue(q);
1691 + blk_mq_unfreeze_queue(q);
1692 +
1693 disk->private_data = NULL;
1694 put_disk(disk);
1695 put_device(&sdkp->device->sdev_gendev);
1696 diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
1697 index a7d569cfca5d..0dff1ac057cd 100644
1698 --- a/drivers/staging/comedi/comedidev.h
1699 +++ b/drivers/staging/comedi/comedidev.h
1700 @@ -1001,6 +1001,8 @@ int comedi_dio_insn_config(struct comedi_device *dev,
1701 unsigned int mask);
1702 unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
1703 unsigned int *data);
1704 +unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
1705 + struct comedi_cmd *cmd);
1706 unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s);
1707 unsigned int comedi_nscans_left(struct comedi_subdevice *s,
1708 unsigned int nscans);
1709 diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
1710 index eefa62f42c0f..5a32b8fc000e 100644
1711 --- a/drivers/staging/comedi/drivers.c
1712 +++ b/drivers/staging/comedi/drivers.c
1713 @@ -394,11 +394,13 @@ unsigned int comedi_dio_update_state(struct comedi_subdevice *s,
1714 EXPORT_SYMBOL_GPL(comedi_dio_update_state);
1715
1716 /**
1717 - * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
1718 + * comedi_bytes_per_scan_cmd() - Get length of asynchronous command "scan" in
1719 + * bytes
1720 * @s: COMEDI subdevice.
1721 + * @cmd: COMEDI command.
1722 *
1723 * Determines the overall scan length according to the subdevice type and the
1724 - * number of channels in the scan.
1725 + * number of channels in the scan for the specified command.
1726 *
1727 * For digital input, output or input/output subdevices, samples for
1728 * multiple channels are assumed to be packed into one or more unsigned
1729 @@ -408,9 +410,9 @@ EXPORT_SYMBOL_GPL(comedi_dio_update_state);
1730 *
1731 * Returns the overall scan length in bytes.
1732 */
1733 -unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
1734 +unsigned int comedi_bytes_per_scan_cmd(struct comedi_subdevice *s,
1735 + struct comedi_cmd *cmd)
1736 {
1737 - struct comedi_cmd *cmd = &s->async->cmd;
1738 unsigned int num_samples;
1739 unsigned int bits_per_sample;
1740
1741 @@ -427,6 +429,29 @@ unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
1742 }
1743 return comedi_samples_to_bytes(s, num_samples);
1744 }
1745 +EXPORT_SYMBOL_GPL(comedi_bytes_per_scan_cmd);
1746 +
1747 +/**
1748 + * comedi_bytes_per_scan() - Get length of asynchronous command "scan" in bytes
1749 + * @s: COMEDI subdevice.
1750 + *
1751 + * Determines the overall scan length according to the subdevice type and the
1752 + * number of channels in the scan for the current command.
1753 + *
1754 + * For digital input, output or input/output subdevices, samples for
1755 + * multiple channels are assumed to be packed into one or more unsigned
1756 + * short or unsigned int values according to the subdevice's %SDF_LSAMPL
1757 + * flag. For other types of subdevice, samples are assumed to occupy a
1758 + * whole unsigned short or unsigned int according to the %SDF_LSAMPL flag.
1759 + *
1760 + * Returns the overall scan length in bytes.
1761 + */
1762 +unsigned int comedi_bytes_per_scan(struct comedi_subdevice *s)
1763 +{
1764 + struct comedi_cmd *cmd = &s->async->cmd;
1765 +
1766 + return comedi_bytes_per_scan_cmd(s, cmd);
1767 +}
1768 EXPORT_SYMBOL_GPL(comedi_bytes_per_scan);
1769
1770 static unsigned int __comedi_nscans_left(struct comedi_subdevice *s,
1771 diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
1772 index 5edf59ac6706..b04dad8c7092 100644
1773 --- a/drivers/staging/comedi/drivers/ni_mio_common.c
1774 +++ b/drivers/staging/comedi/drivers/ni_mio_common.c
1775 @@ -3545,6 +3545,7 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
1776 struct comedi_subdevice *s, struct comedi_cmd *cmd)
1777 {
1778 struct ni_private *devpriv = dev->private;
1779 + unsigned int bytes_per_scan;
1780 int err = 0;
1781
1782 /* Step 1 : check if triggers are trivially valid */
1783 @@ -3579,9 +3580,12 @@ static int ni_cdio_cmdtest(struct comedi_device *dev,
1784 err |= comedi_check_trigger_arg_is(&cmd->convert_arg, 0);
1785 err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
1786 cmd->chanlist_len);
1787 - err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
1788 - s->async->prealloc_bufsz /
1789 - comedi_bytes_per_scan(s));
1790 + bytes_per_scan = comedi_bytes_per_scan_cmd(s, cmd);
1791 + if (bytes_per_scan) {
1792 + err |= comedi_check_trigger_arg_max(&cmd->stop_arg,
1793 + s->async->prealloc_bufsz /
1794 + bytes_per_scan);
1795 + }
1796
1797 if (err)
1798 return 3;
1799 diff --git a/drivers/staging/erofs/dir.c b/drivers/staging/erofs/dir.c
1800 index 833f052f79d0..b21ed5b4c711 100644
1801 --- a/drivers/staging/erofs/dir.c
1802 +++ b/drivers/staging/erofs/dir.c
1803 @@ -23,6 +23,21 @@ static const unsigned char erofs_filetype_table[EROFS_FT_MAX] = {
1804 [EROFS_FT_SYMLINK] = DT_LNK,
1805 };
1806
1807 +static void debug_one_dentry(unsigned char d_type, const char *de_name,
1808 + unsigned int de_namelen)
1809 +{
1810 +#ifdef CONFIG_EROFS_FS_DEBUG
1811 + /* since the on-disk name could not have the trailing '\0' */
1812 + unsigned char dbg_namebuf[EROFS_NAME_LEN + 1];
1813 +
1814 + memcpy(dbg_namebuf, de_name, de_namelen);
1815 + dbg_namebuf[de_namelen] = '\0';
1816 +
1817 + debugln("found dirent %s de_len %u d_type %d", dbg_namebuf,
1818 + de_namelen, d_type);
1819 +#endif
1820 +}
1821 +
1822 static int erofs_fill_dentries(struct dir_context *ctx,
1823 void *dentry_blk, unsigned int *ofs,
1824 unsigned int nameoff, unsigned int maxsize)
1825 @@ -33,14 +48,10 @@ static int erofs_fill_dentries(struct dir_context *ctx,
1826 de = dentry_blk + *ofs;
1827 while (de < end) {
1828 const char *de_name;
1829 - int de_namelen;
1830 + unsigned int de_namelen;
1831 unsigned char d_type;
1832 -#ifdef CONFIG_EROFS_FS_DEBUG
1833 - unsigned int dbg_namelen;
1834 - unsigned char dbg_namebuf[EROFS_NAME_LEN];
1835 -#endif
1836
1837 - if (unlikely(de->file_type < EROFS_FT_MAX))
1838 + if (de->file_type < EROFS_FT_MAX)
1839 d_type = erofs_filetype_table[de->file_type];
1840 else
1841 d_type = DT_UNKNOWN;
1842 @@ -48,26 +59,20 @@ static int erofs_fill_dentries(struct dir_context *ctx,
1843 nameoff = le16_to_cpu(de->nameoff);
1844 de_name = (char *)dentry_blk + nameoff;
1845
1846 - de_namelen = unlikely(de + 1 >= end) ?
1847 - /* last directory entry */
1848 - strnlen(de_name, maxsize - nameoff) :
1849 - le16_to_cpu(de[1].nameoff) - nameoff;
1850 + /* the last dirent in the block? */
1851 + if (de + 1 >= end)
1852 + de_namelen = strnlen(de_name, maxsize - nameoff);
1853 + else
1854 + de_namelen = le16_to_cpu(de[1].nameoff) - nameoff;
1855
1856 /* a corrupted entry is found */
1857 - if (unlikely(de_namelen < 0)) {
1858 + if (unlikely(nameoff + de_namelen > maxsize ||
1859 + de_namelen > EROFS_NAME_LEN)) {
1860 DBG_BUGON(1);
1861 return -EIO;
1862 }
1863
1864 -#ifdef CONFIG_EROFS_FS_DEBUG
1865 - dbg_namelen = min(EROFS_NAME_LEN - 1, de_namelen);
1866 - memcpy(dbg_namebuf, de_name, dbg_namelen);
1867 - dbg_namebuf[dbg_namelen] = '\0';
1868 -
1869 - debugln("%s, found de_name %s de_len %d d_type %d", __func__,
1870 - dbg_namebuf, de_namelen, d_type);
1871 -#endif
1872 -
1873 + debug_one_dentry(d_type, de_name, de_namelen);
1874 if (!dir_emit(ctx, de_name, de_namelen,
1875 le64_to_cpu(de->nid), d_type))
1876 /* stopped by some reason */
1877 diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
1878 index ab30d14ded06..d850be1abc84 100644
1879 --- a/drivers/staging/erofs/unzip_vle.c
1880 +++ b/drivers/staging/erofs/unzip_vle.c
1881 @@ -977,6 +977,7 @@ repeat:
1882 overlapped = false;
1883 compressed_pages = grp->compressed_pages;
1884
1885 + err = 0;
1886 for (i = 0; i < clusterpages; ++i) {
1887 unsigned int pagenr;
1888
1889 @@ -986,26 +987,39 @@ repeat:
1890 DBG_BUGON(!page);
1891 DBG_BUGON(!page->mapping);
1892
1893 - if (z_erofs_is_stagingpage(page))
1894 - continue;
1895 + if (!z_erofs_is_stagingpage(page)) {
1896 #ifdef EROFS_FS_HAS_MANAGED_CACHE
1897 - if (page->mapping == MNGD_MAPPING(sbi)) {
1898 - DBG_BUGON(!PageUptodate(page));
1899 - continue;
1900 - }
1901 + if (page->mapping == MNGD_MAPPING(sbi)) {
1902 + if (unlikely(!PageUptodate(page)))
1903 + err = -EIO;
1904 + continue;
1905 + }
1906 #endif
1907
1908 - /* only non-head page could be reused as a compressed page */
1909 - pagenr = z_erofs_onlinepage_index(page);
1910 + /*
1911 + * only if non-head page can be selected
1912 + * for inplace decompression
1913 + */
1914 + pagenr = z_erofs_onlinepage_index(page);
1915
1916 - DBG_BUGON(pagenr >= nr_pages);
1917 - DBG_BUGON(pages[pagenr]);
1918 - ++sparsemem_pages;
1919 - pages[pagenr] = page;
1920 + DBG_BUGON(pagenr >= nr_pages);
1921 + DBG_BUGON(pages[pagenr]);
1922 + ++sparsemem_pages;
1923 + pages[pagenr] = page;
1924
1925 - overlapped = true;
1926 + overlapped = true;
1927 + }
1928 +
1929 + /* PG_error needs checking for inplaced and staging pages */
1930 + if (unlikely(PageError(page))) {
1931 + DBG_BUGON(PageUptodate(page));
1932 + err = -EIO;
1933 + }
1934 }
1935
1936 + if (unlikely(err))
1937 + goto out;
1938 +
1939 llen = (nr_pages << PAGE_SHIFT) - work->pageofs;
1940
1941 if (z_erofs_vle_workgrp_fmt(grp) == Z_EROFS_VLE_WORKGRP_FMT_PLAIN) {
1942 @@ -1034,6 +1048,10 @@ repeat:
1943
1944 skip_allocpage:
1945 vout = erofs_vmap(pages, nr_pages);
1946 + if (!vout) {
1947 + err = -ENOMEM;
1948 + goto out;
1949 + }
1950
1951 err = z_erofs_vle_unzip_vmap(compressed_pages,
1952 clusterpages, vout, llen, work->pageofs, overlapped);
1953 @@ -1199,6 +1217,7 @@ repeat:
1954 if (page->mapping == mc) {
1955 WRITE_ONCE(grp->compressed_pages[nr], page);
1956
1957 + ClearPageError(page);
1958 if (!PagePrivate(page)) {
1959 /*
1960 * impossible to be !PagePrivate(page) for
1961 diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
1962 index f471b894c848..3e8b0ff2efeb 100644
1963 --- a/drivers/staging/erofs/unzip_vle_lz4.c
1964 +++ b/drivers/staging/erofs/unzip_vle_lz4.c
1965 @@ -136,10 +136,13 @@ int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
1966
1967 nr_pages = DIV_ROUND_UP(outlen + pageofs, PAGE_SIZE);
1968
1969 - if (clusterpages == 1)
1970 + if (clusterpages == 1) {
1971 vin = kmap_atomic(compressed_pages[0]);
1972 - else
1973 + } else {
1974 vin = erofs_vmap(compressed_pages, clusterpages);
1975 + if (!vin)
1976 + return -ENOMEM;
1977 + }
1978
1979 preempt_disable();
1980 vout = erofs_pcpubuf[smp_processor_id()].data;
1981 diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
1982 index 80b8d4153414..a54286498a47 100644
1983 --- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
1984 +++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
1985 @@ -45,7 +45,7 @@ static int dcon_init_xo_1(struct dcon_priv *dcon)
1986 {
1987 unsigned char lob;
1988 int ret, i;
1989 - struct dcon_gpio *pin = &gpios_asis[0];
1990 + const struct dcon_gpio *pin = &gpios_asis[0];
1991
1992 for (i = 0; i < ARRAY_SIZE(gpios_asis); i++) {
1993 gpios[i] = devm_gpiod_get(&dcon->client->dev, pin[i].name,
1994 diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
1995 index 947c79532e10..d5383974d40e 100644
1996 --- a/drivers/staging/speakup/speakup_soft.c
1997 +++ b/drivers/staging/speakup/speakup_soft.c
1998 @@ -208,12 +208,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
1999 return -EINVAL;
2000
2001 spin_lock_irqsave(&speakup_info.spinlock, flags);
2002 + synth_soft.alive = 1;
2003 while (1) {
2004 prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
2005 - if (!unicode)
2006 - synth_buffer_skip_nonlatin1();
2007 - if (!synth_buffer_empty() || speakup_info.flushing)
2008 - break;
2009 + if (synth_current() == &synth_soft) {
2010 + if (!unicode)
2011 + synth_buffer_skip_nonlatin1();
2012 + if (!synth_buffer_empty() || speakup_info.flushing)
2013 + break;
2014 + }
2015 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
2016 if (fp->f_flags & O_NONBLOCK) {
2017 finish_wait(&speakup_event, &wait);
2018 @@ -233,6 +236,8 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
2019
2020 /* Keep 3 bytes available for a 16bit UTF-8-encoded character */
2021 while (chars_sent <= count - bytes_per_ch) {
2022 + if (synth_current() != &synth_soft)
2023 + break;
2024 if (speakup_info.flushing) {
2025 speakup_info.flushing = 0;
2026 ch = '\x18';
2027 @@ -329,7 +334,8 @@ static __poll_t softsynth_poll(struct file *fp, struct poll_table_struct *wait)
2028 poll_wait(fp, &speakup_event, wait);
2029
2030 spin_lock_irqsave(&speakup_info.spinlock, flags);
2031 - if (!synth_buffer_empty() || speakup_info.flushing)
2032 + if (synth_current() == &synth_soft &&
2033 + (!synth_buffer_empty() || speakup_info.flushing))
2034 ret = EPOLLIN | EPOLLRDNORM;
2035 spin_unlock_irqrestore(&speakup_info.spinlock, flags);
2036 return ret;
2037 diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h
2038 index c8e688878fc7..ac6a74883af4 100644
2039 --- a/drivers/staging/speakup/spk_priv.h
2040 +++ b/drivers/staging/speakup/spk_priv.h
2041 @@ -74,6 +74,7 @@ int synth_request_region(unsigned long start, unsigned long n);
2042 int synth_release_region(unsigned long start, unsigned long n);
2043 int synth_add(struct spk_synth *in_synth);
2044 void synth_remove(struct spk_synth *in_synth);
2045 +struct spk_synth *synth_current(void);
2046
2047 extern struct speakup_info_t speakup_info;
2048
2049 diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
2050 index 25f259ee4ffc..3568bfb89912 100644
2051 --- a/drivers/staging/speakup/synth.c
2052 +++ b/drivers/staging/speakup/synth.c
2053 @@ -481,4 +481,10 @@ void synth_remove(struct spk_synth *in_synth)
2054 }
2055 EXPORT_SYMBOL_GPL(synth_remove);
2056
2057 +struct spk_synth *synth_current(void)
2058 +{
2059 + return synth;
2060 +}
2061 +EXPORT_SYMBOL_GPL(synth_current);
2062 +
2063 short spk_punc_masks[] = { 0, SOME, MOST, PUNC, PUNC | B_SYM };
2064 diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
2065 index c9097e7367d8..2e28fbcdfe8e 100644
2066 --- a/drivers/staging/vt6655/device_main.c
2067 +++ b/drivers/staging/vt6655/device_main.c
2068 @@ -1033,8 +1033,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
2069 return;
2070 }
2071
2072 - MACvIntDisable(priv->PortOffset);
2073 -
2074 spin_lock_irqsave(&priv->lock, flags);
2075
2076 /* Read low level stats */
2077 @@ -1122,8 +1120,6 @@ static void vnt_interrupt_process(struct vnt_private *priv)
2078 }
2079
2080 spin_unlock_irqrestore(&priv->lock, flags);
2081 -
2082 - MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
2083 }
2084
2085 static void vnt_interrupt_work(struct work_struct *work)
2086 @@ -1133,14 +1129,17 @@ static void vnt_interrupt_work(struct work_struct *work)
2087
2088 if (priv->vif)
2089 vnt_interrupt_process(priv);
2090 +
2091 + MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
2092 }
2093
2094 static irqreturn_t vnt_interrupt(int irq, void *arg)
2095 {
2096 struct vnt_private *priv = arg;
2097
2098 - if (priv->vif)
2099 - schedule_work(&priv->interrupt_work);
2100 + schedule_work(&priv->interrupt_work);
2101 +
2102 + MACvIntDisable(priv->PortOffset);
2103
2104 return IRQ_HANDLED;
2105 }
2106 diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
2107 index 05147fe24343..0b4f36905321 100644
2108 --- a/drivers/tty/serial/atmel_serial.c
2109 +++ b/drivers/tty/serial/atmel_serial.c
2110 @@ -166,6 +166,8 @@ struct atmel_uart_port {
2111 unsigned int pending_status;
2112 spinlock_t lock_suspended;
2113
2114 + bool hd_start_rx; /* can start RX during half-duplex operation */
2115 +
2116 /* ISO7816 */
2117 unsigned int fidi_min;
2118 unsigned int fidi_max;
2119 @@ -231,6 +233,13 @@ static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
2120 __raw_writeb(value, port->membase + ATMEL_US_THR);
2121 }
2122
2123 +static inline int atmel_uart_is_half_duplex(struct uart_port *port)
2124 +{
2125 + return ((port->rs485.flags & SER_RS485_ENABLED) &&
2126 + !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
2127 + (port->iso7816.flags & SER_ISO7816_ENABLED);
2128 +}
2129 +
2130 #ifdef CONFIG_SERIAL_ATMEL_PDC
2131 static bool atmel_use_pdc_rx(struct uart_port *port)
2132 {
2133 @@ -608,10 +617,9 @@ static void atmel_stop_tx(struct uart_port *port)
2134 /* Disable interrupts */
2135 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
2136
2137 - if (((port->rs485.flags & SER_RS485_ENABLED) &&
2138 - !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
2139 - port->iso7816.flags & SER_ISO7816_ENABLED)
2140 + if (atmel_uart_is_half_duplex(port))
2141 atmel_start_rx(port);
2142 +
2143 }
2144
2145 /*
2146 @@ -628,9 +636,7 @@ static void atmel_start_tx(struct uart_port *port)
2147 return;
2148
2149 if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
2150 - if (((port->rs485.flags & SER_RS485_ENABLED) &&
2151 - !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
2152 - port->iso7816.flags & SER_ISO7816_ENABLED)
2153 + if (atmel_uart_is_half_duplex(port))
2154 atmel_stop_rx(port);
2155
2156 if (atmel_use_pdc_tx(port))
2157 @@ -928,11 +934,14 @@ static void atmel_complete_tx_dma(void *arg)
2158 */
2159 if (!uart_circ_empty(xmit))
2160 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
2161 - else if (((port->rs485.flags & SER_RS485_ENABLED) &&
2162 - !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
2163 - port->iso7816.flags & SER_ISO7816_ENABLED) {
2164 - /* DMA done, stop TX, start RX for RS485 */
2165 - atmel_start_rx(port);
2166 + else if (atmel_uart_is_half_duplex(port)) {
2167 + /*
2168 + * DMA done, re-enable TXEMPTY and signal that we can stop
2169 + * TX and start RX for RS485
2170 + */
2171 + atmel_port->hd_start_rx = true;
2172 + atmel_uart_writel(port, ATMEL_US_IER,
2173 + atmel_port->tx_done_mask);
2174 }
2175
2176 spin_unlock_irqrestore(&port->lock, flags);
2177 @@ -1288,6 +1297,10 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
2178 sg_dma_len(&atmel_port->sg_rx)/2,
2179 DMA_DEV_TO_MEM,
2180 DMA_PREP_INTERRUPT);
2181 + if (!desc) {
2182 + dev_err(port->dev, "Preparing DMA cyclic failed\n");
2183 + goto chan_err;
2184 + }
2185 desc->callback = atmel_complete_rx_dma;
2186 desc->callback_param = port;
2187 atmel_port->desc_rx = desc;
2188 @@ -1376,9 +1389,20 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
2189 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
2190
2191 if (pending & atmel_port->tx_done_mask) {
2192 - /* Either PDC or interrupt transmission */
2193 atmel_uart_writel(port, ATMEL_US_IDR,
2194 atmel_port->tx_done_mask);
2195 +
2196 + /* Start RX if flag was set and FIFO is empty */
2197 + if (atmel_port->hd_start_rx) {
2198 + if (!(atmel_uart_readl(port, ATMEL_US_CSR)
2199 + & ATMEL_US_TXEMPTY))
2200 + dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n");
2201 +
2202 + atmel_port->hd_start_rx = false;
2203 + atmel_start_rx(port);
2204 + return;
2205 + }
2206 +
2207 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
2208 }
2209 }
2210 @@ -1508,9 +1532,7 @@ static void atmel_tx_pdc(struct uart_port *port)
2211 atmel_uart_writel(port, ATMEL_US_IER,
2212 atmel_port->tx_done_mask);
2213 } else {
2214 - if (((port->rs485.flags & SER_RS485_ENABLED) &&
2215 - !(port->rs485.flags & SER_RS485_RX_DURING_TX)) ||
2216 - port->iso7816.flags & SER_ISO7816_ENABLED) {
2217 + if (atmel_uart_is_half_duplex(port)) {
2218 /* DMA done, stop TX, start RX for RS485 */
2219 atmel_start_rx(port);
2220 }
2221 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
2222 index 6fb312e7af71..bfe5e9e034ec 100644
2223 --- a/drivers/tty/serial/kgdboc.c
2224 +++ b/drivers/tty/serial/kgdboc.c
2225 @@ -148,8 +148,10 @@ static int configure_kgdboc(void)
2226 char *cptr = config;
2227 struct console *cons;
2228
2229 - if (!strlen(config) || isspace(config[0]))
2230 + if (!strlen(config) || isspace(config[0])) {
2231 + err = 0;
2232 goto noconfig;
2233 + }
2234
2235 kgdboc_io_ops.is_console = 0;
2236 kgdb_tty_driver = NULL;
2237 diff --git a/drivers/tty/serial/max310x.c b/drivers/tty/serial/max310x.c
2238 index 4f479841769a..0fdf3a760aa0 100644
2239 --- a/drivers/tty/serial/max310x.c
2240 +++ b/drivers/tty/serial/max310x.c
2241 @@ -1416,6 +1416,8 @@ static int max310x_spi_probe(struct spi_device *spi)
2242 if (spi->dev.of_node) {
2243 const struct of_device_id *of_id =
2244 of_match_device(max310x_dt_ids, &spi->dev);
2245 + if (!of_id)
2246 + return -ENODEV;
2247
2248 devtype = (struct max310x_devtype *)of_id->data;
2249 } else {
2250 diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
2251 index 231f751d1ef4..7e7b1559fa36 100644
2252 --- a/drivers/tty/serial/mvebu-uart.c
2253 +++ b/drivers/tty/serial/mvebu-uart.c
2254 @@ -810,6 +810,9 @@ static int mvebu_uart_probe(struct platform_device *pdev)
2255 return -EINVAL;
2256 }
2257
2258 + if (!match)
2259 + return -ENODEV;
2260 +
2261 /* Assume that all UART ports have a DT alias or none has */
2262 id = of_alias_get_id(pdev->dev.of_node, "serial");
2263 if (!pdev->dev.of_node || id < 0)
2264 diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
2265 index 27235a526cce..4c188f4079b3 100644
2266 --- a/drivers/tty/serial/mxs-auart.c
2267 +++ b/drivers/tty/serial/mxs-auart.c
2268 @@ -1686,6 +1686,10 @@ static int mxs_auart_probe(struct platform_device *pdev)
2269
2270 s->port.mapbase = r->start;
2271 s->port.membase = ioremap(r->start, resource_size(r));
2272 + if (!s->port.membase) {
2273 + ret = -ENOMEM;
2274 + goto out_disable_clks;
2275 + }
2276 s->port.ops = &mxs_auart_ops;
2277 s->port.iotype = UPIO_MEM;
2278 s->port.fifosize = MXS_AUART_FIFO_SIZE;
2279 diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
2280 index 38016609c7fa..d30502c58106 100644
2281 --- a/drivers/tty/serial/qcom_geni_serial.c
2282 +++ b/drivers/tty/serial/qcom_geni_serial.c
2283 @@ -1117,7 +1117,7 @@ static int __init qcom_geni_console_setup(struct console *co, char *options)
2284 {
2285 struct uart_port *uport;
2286 struct qcom_geni_serial_port *port;
2287 - int baud;
2288 + int baud = 9600;
2289 int bits = 8;
2290 int parity = 'n';
2291 int flow = 'n';
2292 diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
2293 index 64bbeb7d7e0c..93bd90f1ff14 100644
2294 --- a/drivers/tty/serial/sh-sci.c
2295 +++ b/drivers/tty/serial/sh-sci.c
2296 @@ -838,19 +838,9 @@ static void sci_transmit_chars(struct uart_port *port)
2297
2298 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
2299 uart_write_wakeup(port);
2300 - if (uart_circ_empty(xmit)) {
2301 + if (uart_circ_empty(xmit))
2302 sci_stop_tx(port);
2303 - } else {
2304 - ctrl = serial_port_in(port, SCSCR);
2305 -
2306 - if (port->type != PORT_SCI) {
2307 - serial_port_in(port, SCxSR); /* Dummy read */
2308 - sci_clear_SCxSR(port, SCxSR_TDxE_CLEAR(port));
2309 - }
2310
2311 - ctrl |= SCSCR_TIE;
2312 - serial_port_out(port, SCSCR, ctrl);
2313 - }
2314 }
2315
2316 /* On SH3, SCIF may read end-of-break as a space->mark char */
2317 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
2318 index 739f8960811a..ec666eb4b7b4 100644
2319 --- a/drivers/usb/class/cdc-acm.c
2320 +++ b/drivers/usb/class/cdc-acm.c
2321 @@ -558,10 +558,8 @@ static void acm_softint(struct work_struct *work)
2322 clear_bit(EVENT_RX_STALL, &acm->flags);
2323 }
2324
2325 - if (test_bit(EVENT_TTY_WAKEUP, &acm->flags)) {
2326 + if (test_and_clear_bit(EVENT_TTY_WAKEUP, &acm->flags))
2327 tty_port_tty_wakeup(&acm->port);
2328 - clear_bit(EVENT_TTY_WAKEUP, &acm->flags);
2329 - }
2330 }
2331
2332 /*
2333 diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
2334 index 48277bbc15e4..73c8e6591746 100644
2335 --- a/drivers/usb/common/common.c
2336 +++ b/drivers/usb/common/common.c
2337 @@ -145,6 +145,8 @@ enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
2338
2339 do {
2340 controller = of_find_node_with_property(controller, "phys");
2341 + if (!of_device_is_available(controller))
2342 + continue;
2343 index = 0;
2344 do {
2345 if (arg0 == -1) {
2346 diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
2347 index 75b113a5b25c..f3816a5c861e 100644
2348 --- a/drivers/usb/gadget/function/f_hid.c
2349 +++ b/drivers/usb/gadget/function/f_hid.c
2350 @@ -391,20 +391,20 @@ try_again:
2351 req->complete = f_hidg_req_complete;
2352 req->context = hidg;
2353
2354 + spin_unlock_irqrestore(&hidg->write_spinlock, flags);
2355 +
2356 status = usb_ep_queue(hidg->in_ep, req, GFP_ATOMIC);
2357 if (status < 0) {
2358 ERROR(hidg->func.config->cdev,
2359 "usb_ep_queue error on int endpoint %zd\n", status);
2360 - goto release_write_pending_unlocked;
2361 + goto release_write_pending;
2362 } else {
2363 status = count;
2364 }
2365 - spin_unlock_irqrestore(&hidg->write_spinlock, flags);
2366
2367 return status;
2368 release_write_pending:
2369 spin_lock_irqsave(&hidg->write_spinlock, flags);
2370 -release_write_pending_unlocked:
2371 hidg->write_pending = 0;
2372 spin_unlock_irqrestore(&hidg->write_spinlock, flags);
2373
2374 diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
2375 index 86cff5c28eff..ba841c569c48 100644
2376 --- a/drivers/usb/host/xhci-dbgcap.c
2377 +++ b/drivers/usb/host/xhci-dbgcap.c
2378 @@ -516,7 +516,6 @@ static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
2379 return -1;
2380
2381 writel(0, &dbc->regs->control);
2382 - xhci_dbc_mem_cleanup(xhci);
2383 dbc->state = DS_DISABLED;
2384
2385 return 0;
2386 @@ -562,8 +561,10 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
2387 ret = xhci_do_dbc_stop(xhci);
2388 spin_unlock_irqrestore(&dbc->lock, flags);
2389
2390 - if (!ret)
2391 + if (!ret) {
2392 + xhci_dbc_mem_cleanup(xhci);
2393 pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
2394 + }
2395 }
2396
2397 static void
2398 diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
2399 index e2eece693655..96a740543183 100644
2400 --- a/drivers/usb/host/xhci-hub.c
2401 +++ b/drivers/usb/host/xhci-hub.c
2402 @@ -1545,20 +1545,25 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
2403 port_index = max_ports;
2404 while (port_index--) {
2405 u32 t1, t2;
2406 -
2407 + int retries = 10;
2408 +retry:
2409 t1 = readl(ports[port_index]->addr);
2410 t2 = xhci_port_state_to_neutral(t1);
2411 portsc_buf[port_index] = 0;
2412
2413 - /* Bail out if a USB3 port has a new device in link training */
2414 - if ((hcd->speed >= HCD_USB3) &&
2415 + /*
2416 + * Give a USB3 port in link training time to finish, but don't
2417 + * prevent suspend as port might be stuck
2418 + */
2419 + if ((hcd->speed >= HCD_USB3) && retries-- &&
2420 (t1 & PORT_PLS_MASK) == XDEV_POLLING) {
2421 - bus_state->bus_suspended = 0;
2422 spin_unlock_irqrestore(&xhci->lock, flags);
2423 - xhci_dbg(xhci, "Bus suspend bailout, port in polling\n");
2424 - return -EBUSY;
2425 + msleep(XHCI_PORT_POLLING_LFPS_TIME);
2426 + spin_lock_irqsave(&xhci->lock, flags);
2427 + xhci_dbg(xhci, "port %d polling in bus suspend, waiting\n",
2428 + port_index);
2429 + goto retry;
2430 }
2431 -
2432 /* suspend ports in U0, or bail out for new connect changes */
2433 if ((t1 & PORT_PE) && (t1 & PORT_PLS_MASK) == XDEV_U0) {
2434 if ((t1 & PORT_CSC) && wake_enabled) {
2435 diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c
2436 index a6e463715779..671bce18782c 100644
2437 --- a/drivers/usb/host/xhci-rcar.c
2438 +++ b/drivers/usb/host/xhci-rcar.c
2439 @@ -246,6 +246,7 @@ int xhci_rcar_init_quirk(struct usb_hcd *hcd)
2440 if (!xhci_rcar_wait_for_pll_active(hcd))
2441 return -ETIMEDOUT;
2442
2443 + xhci->quirks |= XHCI_TRUST_TX_LENGTH;
2444 return xhci_rcar_download_firmware(hcd);
2445 }
2446
2447 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
2448 index 40fa25c4d041..9215a28dad40 100644
2449 --- a/drivers/usb/host/xhci-ring.c
2450 +++ b/drivers/usb/host/xhci-ring.c
2451 @@ -1647,10 +1647,13 @@ static void handle_port_status(struct xhci_hcd *xhci,
2452 }
2453 }
2454
2455 - if ((portsc & PORT_PLC) && (portsc & PORT_PLS_MASK) == XDEV_U0 &&
2456 - DEV_SUPERSPEED_ANY(portsc)) {
2457 + if ((portsc & PORT_PLC) &&
2458 + DEV_SUPERSPEED_ANY(portsc) &&
2459 + ((portsc & PORT_PLS_MASK) == XDEV_U0 ||
2460 + (portsc & PORT_PLS_MASK) == XDEV_U1 ||
2461 + (portsc & PORT_PLS_MASK) == XDEV_U2)) {
2462 xhci_dbg(xhci, "resume SS port %d finished\n", port_id);
2463 - /* We've just brought the device into U0 through either the
2464 + /* We've just brought the device into U0/1/2 through either the
2465 * Resume state after a device remote wakeup, or through the
2466 * U3Exit state after a host-initiated resume. If it's a device
2467 * initiated remote wake, don't pass up the link state change,
2468 diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
2469 index 652dc36e3012..9334cdee382a 100644
2470 --- a/drivers/usb/host/xhci.h
2471 +++ b/drivers/usb/host/xhci.h
2472 @@ -452,6 +452,14 @@ struct xhci_op_regs {
2473 */
2474 #define XHCI_DEFAULT_BESL 4
2475
2476 +/*
2477 + * USB3 specification define a 360ms tPollingLFPSTiemout for USB3 ports
2478 + * to complete link training. usually link trainig completes much faster
2479 + * so check status 10 times with 36ms sleep in places we need to wait for
2480 + * polling to complete.
2481 + */
2482 +#define XHCI_PORT_POLLING_LFPS_TIME 36
2483 +
2484 /**
2485 * struct xhci_intr_reg - Interrupt Register Set
2486 * @irq_pending: IMAN - Interrupt Management Register. Used to enable
2487 diff --git a/drivers/usb/mtu3/Kconfig b/drivers/usb/mtu3/Kconfig
2488 index 40bbf1f53337..fe58904f350b 100644
2489 --- a/drivers/usb/mtu3/Kconfig
2490 +++ b/drivers/usb/mtu3/Kconfig
2491 @@ -4,6 +4,7 @@ config USB_MTU3
2492 tristate "MediaTek USB3 Dual Role controller"
2493 depends on USB || USB_GADGET
2494 depends on ARCH_MEDIATEK || COMPILE_TEST
2495 + depends on EXTCON || !EXTCON
2496 select USB_XHCI_MTK if USB_SUPPORT && USB_XHCI_HCD
2497 help
2498 Say Y or M here if your system runs on MediaTek SoCs with
2499 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
2500 index 4c66edf533fe..e732949f6567 100644
2501 --- a/drivers/usb/serial/cp210x.c
2502 +++ b/drivers/usb/serial/cp210x.c
2503 @@ -80,6 +80,7 @@ static const struct usb_device_id id_table[] = {
2504 { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */
2505 { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */
2506 { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */
2507 + { USB_DEVICE(0x10C4, 0x8056) }, /* Lorenz Messtechnik devices */
2508 { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
2509 { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
2510 { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
2511 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2512 index 8f5b17471759..1d8461ae2c34 100644
2513 --- a/drivers/usb/serial/ftdi_sio.c
2514 +++ b/drivers/usb/serial/ftdi_sio.c
2515 @@ -609,6 +609,8 @@ static const struct usb_device_id id_table_combined[] = {
2516 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
2517 { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLXM_PID),
2518 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
2519 + { USB_DEVICE(FTDI_VID, FTDI_NT_ORIONLX_PLUS_PID) },
2520 + { USB_DEVICE(FTDI_VID, FTDI_NT_ORION_IO_PID) },
2521 { USB_DEVICE(FTDI_VID, FTDI_SYNAPSE_SS200_PID) },
2522 { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX_PID) },
2523 { USB_DEVICE(FTDI_VID, FTDI_CUSTOMWARE_MINIPLEX2_PID) },
2524 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
2525 index b863bedb55a1..5755f0df0025 100644
2526 --- a/drivers/usb/serial/ftdi_sio_ids.h
2527 +++ b/drivers/usb/serial/ftdi_sio_ids.h
2528 @@ -567,7 +567,9 @@
2529 /*
2530 * NovaTech product ids (FTDI_VID)
2531 */
2532 -#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
2533 +#define FTDI_NT_ORIONLXM_PID 0x7c90 /* OrionLXm Substation Automation Platform */
2534 +#define FTDI_NT_ORIONLX_PLUS_PID 0x7c91 /* OrionLX+ Substation Automation Platform */
2535 +#define FTDI_NT_ORION_IO_PID 0x7c92 /* Orion I/O */
2536
2537 /*
2538 * Synapse Wireless product ids (FTDI_VID)
2539 diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
2540 index fc52ac75fbf6..18110225d506 100644
2541 --- a/drivers/usb/serial/mos7720.c
2542 +++ b/drivers/usb/serial/mos7720.c
2543 @@ -366,8 +366,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
2544 if (!urbtrack)
2545 return -ENOMEM;
2546
2547 - kref_get(&mos_parport->ref_count);
2548 - urbtrack->mos_parport = mos_parport;
2549 urbtrack->urb = usb_alloc_urb(0, GFP_ATOMIC);
2550 if (!urbtrack->urb) {
2551 kfree(urbtrack);
2552 @@ -388,6 +386,8 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport,
2553 usb_sndctrlpipe(usbdev, 0),
2554 (unsigned char *)urbtrack->setup,
2555 NULL, 0, async_complete, urbtrack);
2556 + kref_get(&mos_parport->ref_count);
2557 + urbtrack->mos_parport = mos_parport;
2558 kref_init(&urbtrack->ref_count);
2559 INIT_LIST_HEAD(&urbtrack->urblist_entry);
2560
2561 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2562 index 11b21d9410f3..83869065b802 100644
2563 --- a/drivers/usb/serial/option.c
2564 +++ b/drivers/usb/serial/option.c
2565 @@ -246,6 +246,7 @@ static void option_instat_callback(struct urb *urb);
2566 #define QUECTEL_PRODUCT_EC25 0x0125
2567 #define QUECTEL_PRODUCT_BG96 0x0296
2568 #define QUECTEL_PRODUCT_EP06 0x0306
2569 +#define QUECTEL_PRODUCT_EM12 0x0512
2570
2571 #define CMOTECH_VENDOR_ID 0x16d8
2572 #define CMOTECH_PRODUCT_6001 0x6001
2573 @@ -1066,7 +1067,8 @@ static const struct usb_device_id option_ids[] = {
2574 .driver_info = RSVD(3) },
2575 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
2576 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
2577 - { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
2578 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000), /* SIMCom SIM5218 */
2579 + .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | NCTRL(3) | RSVD(4) },
2580 /* Quectel products using Qualcomm vendor ID */
2581 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
2582 { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
2583 @@ -1087,6 +1089,9 @@ static const struct usb_device_id option_ids[] = {
2584 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff),
2585 .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
2586 { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) },
2587 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
2588 + .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
2589 + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
2590 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
2591 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
2592 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
2593 @@ -1940,10 +1945,12 @@ static const struct usb_device_id option_ids[] = {
2594 .driver_info = RSVD(4) },
2595 { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
2596 .driver_info = RSVD(4) },
2597 - { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
2598 - { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
2599 - { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
2600 - { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
2601 + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
2602 + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
2603 + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
2604 + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
2605 + .driver_info = RSVD(4) },
2606 + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
2607 { USB_DEVICE(INOVIA_VENDOR_ID, INOVIA_SEW858) },
2608 { USB_DEVICE(VIATELECOM_VENDOR_ID, VIATELECOM_PRODUCT_CDS7) },
2609 { USB_DEVICE_AND_INTERFACE_INFO(WETELECOM_VENDOR_ID, WETELECOM_PRODUCT_WMD200, 0xff, 0xff, 0xff) },
2610 diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
2611 index f1c39a3c7534..d34e945e5d09 100644
2612 --- a/drivers/usb/typec/tcpm/tcpm.c
2613 +++ b/drivers/usb/typec/tcpm/tcpm.c
2614 @@ -37,6 +37,7 @@
2615 S(SRC_ATTACHED), \
2616 S(SRC_STARTUP), \
2617 S(SRC_SEND_CAPABILITIES), \
2618 + S(SRC_SEND_CAPABILITIES_TIMEOUT), \
2619 S(SRC_NEGOTIATE_CAPABILITIES), \
2620 S(SRC_TRANSITION_SUPPLY), \
2621 S(SRC_READY), \
2622 @@ -2966,10 +2967,34 @@ static void run_state_machine(struct tcpm_port *port)
2623 /* port->hard_reset_count = 0; */
2624 port->caps_count = 0;
2625 port->pd_capable = true;
2626 - tcpm_set_state_cond(port, hard_reset_state(port),
2627 + tcpm_set_state_cond(port, SRC_SEND_CAPABILITIES_TIMEOUT,
2628 PD_T_SEND_SOURCE_CAP);
2629 }
2630 break;
2631 + case SRC_SEND_CAPABILITIES_TIMEOUT:
2632 + /*
2633 + * Error recovery for a PD_DATA_SOURCE_CAP reply timeout.
2634 + *
2635 + * PD 2.0 sinks are supposed to accept src-capabilities with a
2636 + * 3.0 header and simply ignore any src PDOs which the sink does
2637 + * not understand such as PPS but some 2.0 sinks instead ignore
2638 + * the entire PD_DATA_SOURCE_CAP message, causing contract
2639 + * negotiation to fail.
2640 + *
2641 + * After PD_N_HARD_RESET_COUNT hard-reset attempts, we try
2642 + * sending src-capabilities with a lower PD revision to
2643 + * make these broken sinks work.
2644 + */
2645 + if (port->hard_reset_count < PD_N_HARD_RESET_COUNT) {
2646 + tcpm_set_state(port, HARD_RESET_SEND, 0);
2647 + } else if (port->negotiated_rev > PD_REV20) {
2648 + port->negotiated_rev--;
2649 + port->hard_reset_count = 0;
2650 + tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
2651 + } else {
2652 + tcpm_set_state(port, hard_reset_state(port), 0);
2653 + }
2654 + break;
2655 case SRC_NEGOTIATE_CAPABILITIES:
2656 ret = tcpm_pd_check_request(port);
2657 if (ret < 0) {
2658 diff --git a/drivers/usb/typec/tcpm/wcove.c b/drivers/usb/typec/tcpm/wcove.c
2659 index 423208e19383..6770afd40765 100644
2660 --- a/drivers/usb/typec/tcpm/wcove.c
2661 +++ b/drivers/usb/typec/tcpm/wcove.c
2662 @@ -615,8 +615,13 @@ static int wcove_typec_probe(struct platform_device *pdev)
2663 wcove->dev = &pdev->dev;
2664 wcove->regmap = pmic->regmap;
2665
2666 - irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr,
2667 - platform_get_irq(pdev, 0));
2668 + irq = platform_get_irq(pdev, 0);
2669 + if (irq < 0) {
2670 + dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq);
2671 + return irq;
2672 + }
2673 +
2674 + irq = regmap_irq_get_virq(pmic->irq_chip_data_chgr, irq);
2675 if (irq < 0)
2676 return irq;
2677
2678 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
2679 index d81035b7ea7d..0a6615573351 100644
2680 --- a/fs/btrfs/extent-tree.c
2681 +++ b/fs/btrfs/extent-tree.c
2682 @@ -6115,7 +6115,7 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info,
2683 *
2684 * This is overestimating in most cases.
2685 */
2686 - qgroup_rsv_size = outstanding_extents * fs_info->nodesize;
2687 + qgroup_rsv_size = (u64)outstanding_extents * fs_info->nodesize;
2688
2689 spin_lock(&block_rsv->lock);
2690 block_rsv->size = reserve_size;
2691 diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
2692 index 4e473a998219..543dd5e66f31 100644
2693 --- a/fs/btrfs/qgroup.c
2694 +++ b/fs/btrfs/qgroup.c
2695 @@ -1917,8 +1917,8 @@ static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
2696 int i;
2697
2698 /* Level sanity check */
2699 - if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL ||
2700 - root_level < 0 || root_level >= BTRFS_MAX_LEVEL ||
2701 + if (cur_level < 0 || cur_level >= BTRFS_MAX_LEVEL - 1 ||
2702 + root_level < 0 || root_level >= BTRFS_MAX_LEVEL - 1 ||
2703 root_level < cur_level) {
2704 btrfs_err_rl(fs_info,
2705 "%s: bad levels, cur_level=%d root_level=%d",
2706 diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
2707 index e74455eb42f9..6976e2280771 100644
2708 --- a/fs/btrfs/raid56.c
2709 +++ b/fs/btrfs/raid56.c
2710 @@ -2429,8 +2429,9 @@ static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2711 bitmap_clear(rbio->dbitmap, pagenr, 1);
2712 kunmap(p);
2713
2714 - for (stripe = 0; stripe < rbio->real_stripes; stripe++)
2715 + for (stripe = 0; stripe < nr_data; stripe++)
2716 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2717 + kunmap(p_page);
2718 }
2719
2720 __free_page(p_page);
2721 diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
2722 index ac232b3d6d7e..7f3b74a55073 100644
2723 --- a/fs/btrfs/tree-log.c
2724 +++ b/fs/btrfs/tree-log.c
2725 @@ -3517,9 +3517,16 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
2726 }
2727 btrfs_release_path(path);
2728
2729 - /* find the first key from this transaction again */
2730 + /*
2731 + * Find the first key from this transaction again. See the note for
2732 + * log_new_dir_dentries, if we're logging a directory recursively we
2733 + * won't be holding its i_mutex, which means we can modify the directory
2734 + * while we're logging it. If we remove an entry between our first
2735 + * search and this search we'll not find the key again and can just
2736 + * bail.
2737 + */
2738 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0);
2739 - if (WARN_ON(ret != 0))
2740 + if (ret != 0)
2741 goto done;
2742
2743 /*
2744 @@ -4481,6 +4488,19 @@ static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode,
2745 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2746 struct btrfs_inode_item);
2747 *size_ret = btrfs_inode_size(path->nodes[0], item);
2748 + /*
2749 + * If the in-memory inode's i_size is smaller then the inode
2750 + * size stored in the btree, return the inode's i_size, so
2751 + * that we get a correct inode size after replaying the log
2752 + * when before a power failure we had a shrinking truncate
2753 + * followed by addition of a new name (rename / new hard link).
2754 + * Otherwise return the inode size from the btree, to avoid
2755 + * data loss when replaying a log due to previously doing a
2756 + * write that expands the inode's size and logging a new name
2757 + * immediately after.
2758 + */
2759 + if (*size_ret > inode->vfs_inode.i_size)
2760 + *size_ret = inode->vfs_inode.i_size;
2761 }
2762
2763 btrfs_release_path(path);
2764 @@ -4642,15 +4662,8 @@ static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans,
2765 struct btrfs_file_extent_item);
2766
2767 if (btrfs_file_extent_type(leaf, extent) ==
2768 - BTRFS_FILE_EXTENT_INLINE) {
2769 - len = btrfs_file_extent_ram_bytes(leaf, extent);
2770 - ASSERT(len == i_size ||
2771 - (len == fs_info->sectorsize &&
2772 - btrfs_file_extent_compression(leaf, extent) !=
2773 - BTRFS_COMPRESS_NONE) ||
2774 - (len < i_size && i_size < fs_info->sectorsize));
2775 + BTRFS_FILE_EXTENT_INLINE)
2776 return 0;
2777 - }
2778
2779 len = btrfs_file_extent_num_bytes(leaf, extent);
2780 /* Last extent goes beyond i_size, no need to log a hole. */
2781 diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
2782 index 48523bcabae9..88a323a453d8 100644
2783 --- a/fs/btrfs/volumes.c
2784 +++ b/fs/btrfs/volumes.c
2785 @@ -6413,7 +6413,7 @@ static void btrfs_end_bio(struct bio *bio)
2786 if (bio_op(bio) == REQ_OP_WRITE)
2787 btrfs_dev_stat_inc_and_print(dev,
2788 BTRFS_DEV_STAT_WRITE_ERRS);
2789 - else
2790 + else if (!(bio->bi_opf & REQ_RAHEAD))
2791 btrfs_dev_stat_inc_and_print(dev,
2792 BTRFS_DEV_STAT_READ_ERRS);
2793 if (bio->bi_opf & REQ_PREFLUSH)
2794 diff --git a/fs/lockd/host.c b/fs/lockd/host.c
2795 index 93fb7cf0b92b..f0b5c987d6ae 100644
2796 --- a/fs/lockd/host.c
2797 +++ b/fs/lockd/host.c
2798 @@ -290,12 +290,11 @@ void nlmclnt_release_host(struct nlm_host *host)
2799
2800 WARN_ON_ONCE(host->h_server);
2801
2802 - if (refcount_dec_and_test(&host->h_count)) {
2803 + if (refcount_dec_and_mutex_lock(&host->h_count, &nlm_host_mutex)) {
2804 WARN_ON_ONCE(!list_empty(&host->h_lockowners));
2805 WARN_ON_ONCE(!list_empty(&host->h_granted));
2806 WARN_ON_ONCE(!list_empty(&host->h_reclaim));
2807
2808 - mutex_lock(&nlm_host_mutex);
2809 nlm_destroy_host_locked(host);
2810 mutex_unlock(&nlm_host_mutex);
2811 }
2812 diff --git a/fs/locks.c b/fs/locks.c
2813 index ff6af2c32601..5f468cd95f68 100644
2814 --- a/fs/locks.c
2815 +++ b/fs/locks.c
2816 @@ -1160,6 +1160,11 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
2817 */
2818 error = -EDEADLK;
2819 spin_lock(&blocked_lock_lock);
2820 + /*
2821 + * Ensure that we don't find any locks blocked on this
2822 + * request during deadlock detection.
2823 + */
2824 + __locks_wake_up_blocks(request);
2825 if (likely(!posix_locks_deadlock(request, fl))) {
2826 error = FILE_LOCK_DEFERRED;
2827 __locks_insert_block(fl, request,
2828 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2829 index 64ac80ec6b7b..44258c516305 100644
2830 --- a/fs/nfs/nfs4proc.c
2831 +++ b/fs/nfs/nfs4proc.c
2832 @@ -2938,7 +2938,8 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
2833 }
2834
2835 out:
2836 - nfs4_sequence_free_slot(&opendata->o_res.seq_res);
2837 + if (!opendata->cancelled)
2838 + nfs4_sequence_free_slot(&opendata->o_res.seq_res);
2839 return ret;
2840 }
2841
2842 @@ -6306,7 +6307,6 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
2843 p->arg.seqid = seqid;
2844 p->res.seqid = seqid;
2845 p->lsp = lsp;
2846 - refcount_inc(&lsp->ls_count);
2847 /* Ensure we don't close file until we're done freeing locks! */
2848 p->ctx = get_nfs_open_context(ctx);
2849 p->l_ctx = nfs_get_lock_context(ctx);
2850 @@ -6531,7 +6531,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
2851 p->res.lock_seqid = p->arg.lock_seqid;
2852 p->lsp = lsp;
2853 p->server = server;
2854 - refcount_inc(&lsp->ls_count);
2855 p->ctx = get_nfs_open_context(ctx);
2856 locks_init_lock(&p->fl);
2857 locks_copy_lock(&p->fl, fl);
2858 diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
2859 index a35259eebc56..1dc9a08e8bdc 100644
2860 --- a/fs/ocfs2/refcounttree.c
2861 +++ b/fs/ocfs2/refcounttree.c
2862 @@ -4719,22 +4719,23 @@ out:
2863
2864 /* Lock an inode and grab a bh pointing to the inode. */
2865 int ocfs2_reflink_inodes_lock(struct inode *s_inode,
2866 - struct buffer_head **bh1,
2867 + struct buffer_head **bh_s,
2868 struct inode *t_inode,
2869 - struct buffer_head **bh2)
2870 + struct buffer_head **bh_t)
2871 {
2872 - struct inode *inode1;
2873 - struct inode *inode2;
2874 + struct inode *inode1 = s_inode;
2875 + struct inode *inode2 = t_inode;
2876 struct ocfs2_inode_info *oi1;
2877 struct ocfs2_inode_info *oi2;
2878 + struct buffer_head *bh1 = NULL;
2879 + struct buffer_head *bh2 = NULL;
2880 bool same_inode = (s_inode == t_inode);
2881 + bool need_swap = (inode1->i_ino > inode2->i_ino);
2882 int status;
2883
2884 /* First grab the VFS and rw locks. */
2885 lock_two_nondirectories(s_inode, t_inode);
2886 - inode1 = s_inode;
2887 - inode2 = t_inode;
2888 - if (inode1->i_ino > inode2->i_ino)
2889 + if (need_swap)
2890 swap(inode1, inode2);
2891
2892 status = ocfs2_rw_lock(inode1, 1);
2893 @@ -4757,17 +4758,13 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
2894 trace_ocfs2_double_lock((unsigned long long)oi1->ip_blkno,
2895 (unsigned long long)oi2->ip_blkno);
2896
2897 - if (*bh1)
2898 - *bh1 = NULL;
2899 - if (*bh2)
2900 - *bh2 = NULL;
2901 -
2902 /* We always want to lock the one with the lower lockid first. */
2903 if (oi1->ip_blkno > oi2->ip_blkno)
2904 mlog_errno(-ENOLCK);
2905
2906 /* lock id1 */
2907 - status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_REFLINK_TARGET);
2908 + status = ocfs2_inode_lock_nested(inode1, &bh1, 1,
2909 + OI_LS_REFLINK_TARGET);
2910 if (status < 0) {
2911 if (status != -ENOENT)
2912 mlog_errno(status);
2913 @@ -4776,15 +4773,25 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
2914
2915 /* lock id2 */
2916 if (!same_inode) {
2917 - status = ocfs2_inode_lock_nested(inode2, bh2, 1,
2918 + status = ocfs2_inode_lock_nested(inode2, &bh2, 1,
2919 OI_LS_REFLINK_TARGET);
2920 if (status < 0) {
2921 if (status != -ENOENT)
2922 mlog_errno(status);
2923 goto out_cl1;
2924 }
2925 - } else
2926 - *bh2 = *bh1;
2927 + } else {
2928 + bh2 = bh1;
2929 + }
2930 +
2931 + /*
2932 + * If we swapped inode order above, we have to swap the buffer heads
2933 + * before passing them back to the caller.
2934 + */
2935 + if (need_swap)
2936 + swap(bh1, bh2);
2937 + *bh_s = bh1;
2938 + *bh_t = bh2;
2939
2940 trace_ocfs2_double_lock_end(
2941 (unsigned long long)oi1->ip_blkno,
2942 @@ -4794,8 +4801,7 @@ int ocfs2_reflink_inodes_lock(struct inode *s_inode,
2943
2944 out_cl1:
2945 ocfs2_inode_unlock(inode1, 1);
2946 - brelse(*bh1);
2947 - *bh1 = NULL;
2948 + brelse(bh1);
2949 out_rw2:
2950 ocfs2_rw_unlock(inode2, 1);
2951 out_i2:
2952 diff --git a/fs/open.c b/fs/open.c
2953 index 0285ce7dbd51..f1c2f855fd43 100644
2954 --- a/fs/open.c
2955 +++ b/fs/open.c
2956 @@ -733,6 +733,12 @@ static int do_dentry_open(struct file *f,
2957 return 0;
2958 }
2959
2960 + /* Any file opened for execve()/uselib() has to be a regular file. */
2961 + if (unlikely(f->f_flags & FMODE_EXEC && !S_ISREG(inode->i_mode))) {
2962 + error = -EACCES;
2963 + goto cleanup_file;
2964 + }
2965 +
2966 if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) {
2967 error = get_write_access(inode);
2968 if (unlikely(error))
2969 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
2970 index 4d598a399bbf..d65390727541 100644
2971 --- a/fs/proc/proc_sysctl.c
2972 +++ b/fs/proc/proc_sysctl.c
2973 @@ -1626,7 +1626,8 @@ static void drop_sysctl_table(struct ctl_table_header *header)
2974 if (--header->nreg)
2975 return;
2976
2977 - put_links(header);
2978 + if (parent)
2979 + put_links(header);
2980 start_unregistering(header);
2981 if (!--header->count)
2982 kfree_rcu(header, rcu);
2983 diff --git a/include/linux/mii.h b/include/linux/mii.h
2984 index 6fee8b1a4400..5cd824c1c0ca 100644
2985 --- a/include/linux/mii.h
2986 +++ b/include/linux/mii.h
2987 @@ -469,7 +469,7 @@ static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising)
2988 if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2989 advertising))
2990 lcl_adv |= ADVERTISE_PAUSE_CAP;
2991 - if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2992 + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2993 advertising))
2994 lcl_adv |= ADVERTISE_PAUSE_ASYM;
2995
2996 diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
2997 index 4eb26d278046..280ae96dc4c3 100644
2998 --- a/include/linux/page-isolation.h
2999 +++ b/include/linux/page-isolation.h
3000 @@ -41,16 +41,6 @@ int move_freepages_block(struct zone *zone, struct page *page,
3001
3002 /*
3003 * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
3004 - * If specified range includes migrate types other than MOVABLE or CMA,
3005 - * this will fail with -EBUSY.
3006 - *
3007 - * For isolating all pages in the range finally, the caller have to
3008 - * free all pages in the range. test_page_isolated() can be used for
3009 - * test it.
3010 - *
3011 - * The following flags are allowed (they can be combined in a bit mask)
3012 - * SKIP_HWPOISON - ignore hwpoison pages
3013 - * REPORT_FAILURE - report details about the failure to isolate the range
3014 */
3015 int
3016 start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
3017 diff --git a/include/linux/slab.h b/include/linux/slab.h
3018 index 11b45f7ae405..9449b19c5f10 100644
3019 --- a/include/linux/slab.h
3020 +++ b/include/linux/slab.h
3021 @@ -32,6 +32,8 @@
3022 #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
3023 /* Use GFP_DMA memory */
3024 #define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
3025 +/* Use GFP_DMA32 memory */
3026 +#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
3027 /* DEBUG: Store the last owner for bug hunting */
3028 #define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
3029 /* Panic if kmem_cache_create() fails */
3030 diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
3031 index b4984bbbe157..3d58acf94dd2 100644
3032 --- a/include/net/netfilter/nf_tables.h
3033 +++ b/include/net/netfilter/nf_tables.h
3034 @@ -416,7 +416,8 @@ struct nft_set {
3035 unsigned char *udata;
3036 /* runtime data below here */
3037 const struct nft_set_ops *ops ____cacheline_aligned;
3038 - u16 flags:14,
3039 + u16 flags:13,
3040 + bound:1,
3041 genmask:2;
3042 u8 klen;
3043 u8 dlen;
3044 @@ -1329,15 +1330,12 @@ struct nft_trans_rule {
3045 struct nft_trans_set {
3046 struct nft_set *set;
3047 u32 set_id;
3048 - bool bound;
3049 };
3050
3051 #define nft_trans_set(trans) \
3052 (((struct nft_trans_set *)trans->data)->set)
3053 #define nft_trans_set_id(trans) \
3054 (((struct nft_trans_set *)trans->data)->set_id)
3055 -#define nft_trans_set_bound(trans) \
3056 - (((struct nft_trans_set *)trans->data)->bound)
3057
3058 struct nft_trans_chain {
3059 bool update;
3060 diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
3061 index 32ee65a30aff..1c6e6c0766ca 100644
3062 --- a/include/net/sctp/checksum.h
3063 +++ b/include/net/sctp/checksum.h
3064 @@ -61,7 +61,7 @@ static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
3065 static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
3066 unsigned int offset)
3067 {
3068 - struct sctphdr *sh = sctp_hdr(skb);
3069 + struct sctphdr *sh = (struct sctphdr *)(skb->data + offset);
3070 const struct skb_checksum_ops ops = {
3071 .update = sctp_csum_update,
3072 .combine = sctp_csum_combine,
3073 diff --git a/include/net/sock.h b/include/net/sock.h
3074 index f43f935cb113..89d0d94d5db2 100644
3075 --- a/include/net/sock.h
3076 +++ b/include/net/sock.h
3077 @@ -710,6 +710,12 @@ static inline void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
3078 hlist_add_head_rcu(&sk->sk_node, list);
3079 }
3080
3081 +static inline void sk_add_node_tail_rcu(struct sock *sk, struct hlist_head *list)
3082 +{
3083 + sock_hold(sk);
3084 + hlist_add_tail_rcu(&sk->sk_node, list);
3085 +}
3086 +
3087 static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
3088 {
3089 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
3090 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
3091 index 5fcce2f4209d..d53825b6fcd9 100644
3092 --- a/kernel/bpf/verifier.c
3093 +++ b/kernel/bpf/verifier.c
3094 @@ -3187,7 +3187,7 @@ do_sim:
3095 *dst_reg = *ptr_reg;
3096 }
3097 ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
3098 - if (!ptr_is_dst_reg)
3099 + if (!ptr_is_dst_reg && ret)
3100 *dst_reg = tmp;
3101 return !ret ? -EFAULT : 0;
3102 }
3103 diff --git a/kernel/cpu.c b/kernel/cpu.c
3104 index d1c6d152da89..47f695d80dd1 100644
3105 --- a/kernel/cpu.c
3106 +++ b/kernel/cpu.c
3107 @@ -555,6 +555,20 @@ static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
3108 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
3109 }
3110
3111 +static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
3112 +{
3113 + if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
3114 + return true;
3115 + /*
3116 + * When CPU hotplug is disabled, then taking the CPU down is not
3117 + * possible because takedown_cpu() and the architecture and
3118 + * subsystem specific mechanisms are not available. So the CPU
3119 + * which would be completely unplugged again needs to stay around
3120 + * in the current state.
3121 + */
3122 + return st->state <= CPUHP_BRINGUP_CPU;
3123 +}
3124 +
3125 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
3126 enum cpuhp_state target)
3127 {
3128 @@ -565,8 +579,10 @@ static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
3129 st->state++;
3130 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
3131 if (ret) {
3132 - st->target = prev_state;
3133 - undo_cpu_up(cpu, st);
3134 + if (can_rollback_cpu(st)) {
3135 + st->target = prev_state;
3136 + undo_cpu_up(cpu, st);
3137 + }
3138 break;
3139 }
3140 }
3141 diff --git a/kernel/trace/trace_dynevent.c b/kernel/trace/trace_dynevent.c
3142 index dd1f43588d70..fa100ed3b4de 100644
3143 --- a/kernel/trace/trace_dynevent.c
3144 +++ b/kernel/trace/trace_dynevent.c
3145 @@ -74,7 +74,7 @@ int dyn_event_release(int argc, char **argv, struct dyn_event_operations *type)
3146 static int create_dyn_event(int argc, char **argv)
3147 {
3148 struct dyn_event_operations *ops;
3149 - int ret;
3150 + int ret = -ENODEV;
3151
3152 if (argv[0][0] == '-' || argv[0][0] == '!')
3153 return dyn_event_release(argc, argv, NULL);
3154 diff --git a/kernel/watchdog.c b/kernel/watchdog.c
3155 index 977918d5d350..bbc4940f21af 100644
3156 --- a/kernel/watchdog.c
3157 +++ b/kernel/watchdog.c
3158 @@ -547,13 +547,15 @@ static void softlockup_start_all(void)
3159
3160 int lockup_detector_online_cpu(unsigned int cpu)
3161 {
3162 - watchdog_enable(cpu);
3163 + if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
3164 + watchdog_enable(cpu);
3165 return 0;
3166 }
3167
3168 int lockup_detector_offline_cpu(unsigned int cpu)
3169 {
3170 - watchdog_disable(cpu);
3171 + if (cpumask_test_cpu(cpu, &watchdog_allowed_mask))
3172 + watchdog_disable(cpu);
3173 return 0;
3174 }
3175
3176 diff --git a/lib/rhashtable.c b/lib/rhashtable.c
3177 index 852ffa5160f1..4edcf3310513 100644
3178 --- a/lib/rhashtable.c
3179 +++ b/lib/rhashtable.c
3180 @@ -416,8 +416,12 @@ static void rht_deferred_worker(struct work_struct *work)
3181 else if (tbl->nest)
3182 err = rhashtable_rehash_alloc(ht, tbl, tbl->size);
3183
3184 - if (!err)
3185 - err = rhashtable_rehash_table(ht);
3186 + if (!err || err == -EEXIST) {
3187 + int nerr;
3188 +
3189 + nerr = rhashtable_rehash_table(ht);
3190 + err = err ?: nerr;
3191 + }
3192
3193 mutex_unlock(&ht->mutex);
3194
3195 diff --git a/mm/debug.c b/mm/debug.c
3196 index 1611cf00a137..854d5f84047d 100644
3197 --- a/mm/debug.c
3198 +++ b/mm/debug.c
3199 @@ -79,7 +79,7 @@ void __dump_page(struct page *page, const char *reason)
3200 pr_warn("ksm ");
3201 else if (mapping) {
3202 pr_warn("%ps ", mapping->a_ops);
3203 - if (mapping->host->i_dentry.first) {
3204 + if (mapping->host && mapping->host->i_dentry.first) {
3205 struct dentry *dentry;
3206 dentry = container_of(mapping->host->i_dentry.first, struct dentry, d_u.d_alias);
3207 pr_warn("name:\"%pd\" ", dentry);
3208 diff --git a/mm/memory.c b/mm/memory.c
3209 index e8d69ade5acc..8d3f38fa530d 100644
3210 --- a/mm/memory.c
3211 +++ b/mm/memory.c
3212 @@ -1546,10 +1546,12 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
3213 WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte)));
3214 goto out_unlock;
3215 }
3216 - entry = *pte;
3217 - goto out_mkwrite;
3218 - } else
3219 - goto out_unlock;
3220 + entry = pte_mkyoung(*pte);
3221 + entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3222 + if (ptep_set_access_flags(vma, addr, pte, entry, 1))
3223 + update_mmu_cache(vma, addr, pte);
3224 + }
3225 + goto out_unlock;
3226 }
3227
3228 /* Ok, finally just insert the thing.. */
3229 @@ -1558,7 +1560,6 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
3230 else
3231 entry = pte_mkspecial(pfn_t_pte(pfn, prot));
3232
3233 -out_mkwrite:
3234 if (mkwrite) {
3235 entry = pte_mkyoung(entry);
3236 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3237 diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
3238 index 1ad28323fb9f..11593a03c051 100644
3239 --- a/mm/memory_hotplug.c
3240 +++ b/mm/memory_hotplug.c
3241 @@ -1560,7 +1560,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
3242 {
3243 unsigned long pfn, nr_pages;
3244 long offlined_pages;
3245 - int ret, node;
3246 + int ret, node, nr_isolate_pageblock;
3247 unsigned long flags;
3248 unsigned long valid_start, valid_end;
3249 struct zone *zone;
3250 @@ -1586,10 +1586,11 @@ static int __ref __offline_pages(unsigned long start_pfn,
3251 ret = start_isolate_page_range(start_pfn, end_pfn,
3252 MIGRATE_MOVABLE,
3253 SKIP_HWPOISON | REPORT_FAILURE);
3254 - if (ret) {
3255 + if (ret < 0) {
3256 reason = "failure to isolate range";
3257 goto failed_removal;
3258 }
3259 + nr_isolate_pageblock = ret;
3260
3261 arg.start_pfn = start_pfn;
3262 arg.nr_pages = nr_pages;
3263 @@ -1642,8 +1643,16 @@ static int __ref __offline_pages(unsigned long start_pfn,
3264 /* Ok, all of our target is isolated.
3265 We cannot do rollback at this point. */
3266 offline_isolated_pages(start_pfn, end_pfn);
3267 - /* reset pagetype flags and makes migrate type to be MOVABLE */
3268 - undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
3269 +
3270 + /*
3271 + * Onlining will reset pagetype flags and makes migrate type
3272 + * MOVABLE, so just need to decrease the number of isolated
3273 + * pageblocks zone counter here.
3274 + */
3275 + spin_lock_irqsave(&zone->lock, flags);
3276 + zone->nr_isolate_pageblock -= nr_isolate_pageblock;
3277 + spin_unlock_irqrestore(&zone->lock, flags);
3278 +
3279 /* removal success */
3280 adjust_managed_page_count(pfn_to_page(start_pfn), -offlined_pages);
3281 zone->present_pages -= offlined_pages;
3282 @@ -1675,12 +1684,12 @@ static int __ref __offline_pages(unsigned long start_pfn,
3283
3284 failed_removal_isolated:
3285 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
3286 + memory_notify(MEM_CANCEL_OFFLINE, &arg);
3287 failed_removal:
3288 pr_debug("memory offlining [mem %#010llx-%#010llx] failed due to %s\n",
3289 (unsigned long long) start_pfn << PAGE_SHIFT,
3290 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
3291 reason);
3292 - memory_notify(MEM_CANCEL_OFFLINE, &arg);
3293 /* pushback to free area */
3294 mem_hotplug_done();
3295 return ret;
3296 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
3297 index ee2bce59d2bf..6bc9786aad6e 100644
3298 --- a/mm/mempolicy.c
3299 +++ b/mm/mempolicy.c
3300 @@ -428,6 +428,13 @@ static inline bool queue_pages_required(struct page *page,
3301 return node_isset(nid, *qp->nmask) == !(flags & MPOL_MF_INVERT);
3302 }
3303
3304 +/*
3305 + * queue_pages_pmd() has three possible return values:
3306 + * 1 - pages are placed on the right node or queued successfully.
3307 + * 0 - THP was split.
3308 + * -EIO - is migration entry or MPOL_MF_STRICT was specified and an existing
3309 + * page was already on a node that does not follow the policy.
3310 + */
3311 static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
3312 unsigned long end, struct mm_walk *walk)
3313 {
3314 @@ -437,7 +444,7 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
3315 unsigned long flags;
3316
3317 if (unlikely(is_pmd_migration_entry(*pmd))) {
3318 - ret = 1;
3319 + ret = -EIO;
3320 goto unlock;
3321 }
3322 page = pmd_page(*pmd);
3323 @@ -454,8 +461,15 @@ static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
3324 ret = 1;
3325 flags = qp->flags;
3326 /* go to thp migration */
3327 - if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
3328 + if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
3329 + if (!vma_migratable(walk->vma)) {
3330 + ret = -EIO;
3331 + goto unlock;
3332 + }
3333 +
3334 migrate_page_add(page, qp->pagelist, flags);
3335 + } else
3336 + ret = -EIO;
3337 unlock:
3338 spin_unlock(ptl);
3339 out:
3340 @@ -480,8 +494,10 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
3341 ptl = pmd_trans_huge_lock(pmd, vma);
3342 if (ptl) {
3343 ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
3344 - if (ret)
3345 + if (ret > 0)
3346 return 0;
3347 + else if (ret < 0)
3348 + return ret;
3349 }
3350
3351 if (pmd_trans_unstable(pmd))
3352 @@ -502,11 +518,16 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
3353 continue;
3354 if (!queue_pages_required(page, qp))
3355 continue;
3356 - migrate_page_add(page, qp->pagelist, flags);
3357 + if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
3358 + if (!vma_migratable(vma))
3359 + break;
3360 + migrate_page_add(page, qp->pagelist, flags);
3361 + } else
3362 + break;
3363 }
3364 pte_unmap_unlock(pte - 1, ptl);
3365 cond_resched();
3366 - return 0;
3367 + return addr != end ? -EIO : 0;
3368 }
3369
3370 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
3371 @@ -576,7 +597,12 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
3372 unsigned long endvma = vma->vm_end;
3373 unsigned long flags = qp->flags;
3374
3375 - if (!vma_migratable(vma))
3376 + /*
3377 + * Need check MPOL_MF_STRICT to return -EIO if possible
3378 + * regardless of vma_migratable
3379 + */
3380 + if (!vma_migratable(vma) &&
3381 + !(flags & MPOL_MF_STRICT))
3382 return 1;
3383
3384 if (endvma > end)
3385 @@ -603,7 +629,7 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
3386 }
3387
3388 /* queue pages from current vma */
3389 - if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
3390 + if (flags & MPOL_MF_VALID)
3391 return 0;
3392 return 1;
3393 }
3394 diff --git a/mm/migrate.c b/mm/migrate.c
3395 index 181f5d2718a9..76e237b4610c 100644
3396 --- a/mm/migrate.c
3397 +++ b/mm/migrate.c
3398 @@ -248,10 +248,8 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
3399 pte = swp_entry_to_pte(entry);
3400 } else if (is_device_public_page(new)) {
3401 pte = pte_mkdevmap(pte);
3402 - flush_dcache_page(new);
3403 }
3404 - } else
3405 - flush_dcache_page(new);
3406 + }
3407
3408 #ifdef CONFIG_HUGETLB_PAGE
3409 if (PageHuge(new)) {
3410 @@ -995,6 +993,13 @@ static int move_to_new_page(struct page *newpage, struct page *page,
3411 */
3412 if (!PageMappingFlags(page))
3413 page->mapping = NULL;
3414 +
3415 + if (unlikely(is_zone_device_page(newpage))) {
3416 + if (is_device_public_page(newpage))
3417 + flush_dcache_page(newpage);
3418 + } else
3419 + flush_dcache_page(newpage);
3420 +
3421 }
3422 out:
3423 return rc;
3424 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
3425 index 0b9f577b1a2a..11dc3c0e8728 100644
3426 --- a/mm/page_alloc.c
3427 +++ b/mm/page_alloc.c
3428 @@ -8160,7 +8160,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
3429
3430 ret = start_isolate_page_range(pfn_max_align_down(start),
3431 pfn_max_align_up(end), migratetype, 0);
3432 - if (ret)
3433 + if (ret < 0)
3434 return ret;
3435
3436 /*
3437 diff --git a/mm/page_isolation.c b/mm/page_isolation.c
3438 index ce323e56b34d..019280712e1b 100644
3439 --- a/mm/page_isolation.c
3440 +++ b/mm/page_isolation.c
3441 @@ -59,7 +59,8 @@ static int set_migratetype_isolate(struct page *page, int migratetype, int isol_
3442 * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
3443 * We just check MOVABLE pages.
3444 */
3445 - if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype, flags))
3446 + if (!has_unmovable_pages(zone, page, arg.pages_found, migratetype,
3447 + isol_flags))
3448 ret = 0;
3449
3450 /*
3451 @@ -160,27 +161,36 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
3452 return NULL;
3453 }
3454
3455 -/*
3456 - * start_isolate_page_range() -- make page-allocation-type of range of pages
3457 - * to be MIGRATE_ISOLATE.
3458 - * @start_pfn: The lower PFN of the range to be isolated.
3459 - * @end_pfn: The upper PFN of the range to be isolated.
3460 - * @migratetype: migrate type to set in error recovery.
3461 +/**
3462 + * start_isolate_page_range() - make page-allocation-type of range of pages to
3463 + * be MIGRATE_ISOLATE.
3464 + * @start_pfn: The lower PFN of the range to be isolated.
3465 + * @end_pfn: The upper PFN of the range to be isolated.
3466 + * start_pfn/end_pfn must be aligned to pageblock_order.
3467 + * @migratetype: Migrate type to set in error recovery.
3468 + * @flags: The following flags are allowed (they can be combined in
3469 + * a bit mask)
3470 + * SKIP_HWPOISON - ignore hwpoison pages
3471 + * REPORT_FAILURE - report details about the failure to
3472 + * isolate the range
3473 *
3474 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
3475 * the range will never be allocated. Any free pages and pages freed in the
3476 - * future will not be allocated again.
3477 - *
3478 - * start_pfn/end_pfn must be aligned to pageblock_order.
3479 - * Return 0 on success and -EBUSY if any part of range cannot be isolated.
3480 + * future will not be allocated again. If specified range includes migrate types
3481 + * other than MOVABLE or CMA, this will fail with -EBUSY. For isolating all
3482 + * pages in the range finally, the caller have to free all pages in the range.
3483 + * test_page_isolated() can be used for test it.
3484 *
3485 * There is no high level synchronization mechanism that prevents two threads
3486 - * from trying to isolate overlapping ranges. If this happens, one thread
3487 + * from trying to isolate overlapping ranges. If this happens, one thread
3488 * will notice pageblocks in the overlapping range already set to isolate.
3489 * This happens in set_migratetype_isolate, and set_migratetype_isolate
3490 - * returns an error. We then clean up by restoring the migration type on
3491 - * pageblocks we may have modified and return -EBUSY to caller. This
3492 + * returns an error. We then clean up by restoring the migration type on
3493 + * pageblocks we may have modified and return -EBUSY to caller. This
3494 * prevents two threads from simultaneously working on overlapping ranges.
3495 + *
3496 + * Return: the number of isolated pageblocks on success and -EBUSY if any part
3497 + * of range cannot be isolated.
3498 */
3499 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
3500 unsigned migratetype, int flags)
3501 @@ -188,6 +198,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
3502 unsigned long pfn;
3503 unsigned long undo_pfn;
3504 struct page *page;
3505 + int nr_isolate_pageblock = 0;
3506
3507 BUG_ON(!IS_ALIGNED(start_pfn, pageblock_nr_pages));
3508 BUG_ON(!IS_ALIGNED(end_pfn, pageblock_nr_pages));
3509 @@ -196,13 +207,15 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
3510 pfn < end_pfn;
3511 pfn += pageblock_nr_pages) {
3512 page = __first_valid_page(pfn, pageblock_nr_pages);
3513 - if (page &&
3514 - set_migratetype_isolate(page, migratetype, flags)) {
3515 - undo_pfn = pfn;
3516 - goto undo;
3517 + if (page) {
3518 + if (set_migratetype_isolate(page, migratetype, flags)) {
3519 + undo_pfn = pfn;
3520 + goto undo;
3521 + }
3522 + nr_isolate_pageblock++;
3523 }
3524 }
3525 - return 0;
3526 + return nr_isolate_pageblock;
3527 undo:
3528 for (pfn = start_pfn;
3529 pfn < undo_pfn;
3530 diff --git a/mm/slab.c b/mm/slab.c
3531 index 91c1863df93d..b3e74b56a468 100644
3532 --- a/mm/slab.c
3533 +++ b/mm/slab.c
3534 @@ -2111,6 +2111,8 @@ done:
3535 cachep->allocflags = __GFP_COMP;
3536 if (flags & SLAB_CACHE_DMA)
3537 cachep->allocflags |= GFP_DMA;
3538 + if (flags & SLAB_CACHE_DMA32)
3539 + cachep->allocflags |= GFP_DMA32;
3540 if (flags & SLAB_RECLAIM_ACCOUNT)
3541 cachep->allocflags |= __GFP_RECLAIMABLE;
3542 cachep->size = size;
3543 diff --git a/mm/slab.h b/mm/slab.h
3544 index 384105318779..27834ead5f14 100644
3545 --- a/mm/slab.h
3546 +++ b/mm/slab.h
3547 @@ -127,7 +127,8 @@ static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
3548
3549
3550 /* Legal flag mask for kmem_cache_create(), for various configurations */
3551 -#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
3552 +#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
3553 + SLAB_CACHE_DMA32 | SLAB_PANIC | \
3554 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
3555
3556 #if defined(CONFIG_DEBUG_SLAB)
3557 diff --git a/mm/slab_common.c b/mm/slab_common.c
3558 index f9d89c1b5977..333618231f8d 100644
3559 --- a/mm/slab_common.c
3560 +++ b/mm/slab_common.c
3561 @@ -53,7 +53,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
3562 SLAB_FAILSLAB | SLAB_KASAN)
3563
3564 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
3565 - SLAB_ACCOUNT)
3566 + SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
3567
3568 /*
3569 * Merge control. If this is set then no merging of slab caches will occur.
3570 diff --git a/mm/slub.c b/mm/slub.c
3571 index dc777761b6b7..1673100fd534 100644
3572 --- a/mm/slub.c
3573 +++ b/mm/slub.c
3574 @@ -3591,6 +3591,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
3575 if (s->flags & SLAB_CACHE_DMA)
3576 s->allocflags |= GFP_DMA;
3577
3578 + if (s->flags & SLAB_CACHE_DMA32)
3579 + s->allocflags |= GFP_DMA32;
3580 +
3581 if (s->flags & SLAB_RECLAIM_ACCOUNT)
3582 s->allocflags |= __GFP_RECLAIMABLE;
3583
3584 @@ -5681,6 +5684,8 @@ static char *create_unique_id(struct kmem_cache *s)
3585 */
3586 if (s->flags & SLAB_CACHE_DMA)
3587 *p++ = 'd';
3588 + if (s->flags & SLAB_CACHE_DMA32)
3589 + *p++ = 'D';
3590 if (s->flags & SLAB_RECLAIM_ACCOUNT)
3591 *p++ = 'a';
3592 if (s->flags & SLAB_CONSISTENCY_CHECKS)
3593 diff --git a/mm/sparse.c b/mm/sparse.c
3594 index 7ea5dc6c6b19..4763519d4399 100644
3595 --- a/mm/sparse.c
3596 +++ b/mm/sparse.c
3597 @@ -556,7 +556,7 @@ void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
3598 }
3599
3600 #ifdef CONFIG_MEMORY_HOTREMOVE
3601 -/* Mark all memory sections within the pfn range as online */
3602 +/* Mark all memory sections within the pfn range as offline */
3603 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
3604 {
3605 unsigned long pfn;
3606 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
3607 index 2a7fb517d460..ccdc5c67d22a 100644
3608 --- a/net/bluetooth/l2cap_core.c
3609 +++ b/net/bluetooth/l2cap_core.c
3610 @@ -3337,16 +3337,22 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
3611
3612 while (len >= L2CAP_CONF_OPT_SIZE) {
3613 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3614 + if (len < 0)
3615 + break;
3616
3617 hint = type & L2CAP_CONF_HINT;
3618 type &= L2CAP_CONF_MASK;
3619
3620 switch (type) {
3621 case L2CAP_CONF_MTU:
3622 + if (olen != 2)
3623 + break;
3624 mtu = val;
3625 break;
3626
3627 case L2CAP_CONF_FLUSH_TO:
3628 + if (olen != 2)
3629 + break;
3630 chan->flush_to = val;
3631 break;
3632
3633 @@ -3354,26 +3360,30 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
3634 break;
3635
3636 case L2CAP_CONF_RFC:
3637 - if (olen == sizeof(rfc))
3638 - memcpy(&rfc, (void *) val, olen);
3639 + if (olen != sizeof(rfc))
3640 + break;
3641 + memcpy(&rfc, (void *) val, olen);
3642 break;
3643
3644 case L2CAP_CONF_FCS:
3645 + if (olen != 1)
3646 + break;
3647 if (val == L2CAP_FCS_NONE)
3648 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3649 break;
3650
3651 case L2CAP_CONF_EFS:
3652 - if (olen == sizeof(efs)) {
3653 - remote_efs = 1;
3654 - memcpy(&efs, (void *) val, olen);
3655 - }
3656 + if (olen != sizeof(efs))
3657 + break;
3658 + remote_efs = 1;
3659 + memcpy(&efs, (void *) val, olen);
3660 break;
3661
3662 case L2CAP_CONF_EWS:
3663 + if (olen != 2)
3664 + break;
3665 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3666 return -ECONNREFUSED;
3667 -
3668 set_bit(FLAG_EXT_CTRL, &chan->flags);
3669 set_bit(CONF_EWS_RECV, &chan->conf_state);
3670 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3671 @@ -3383,7 +3393,6 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data
3672 default:
3673 if (hint)
3674 break;
3675 -
3676 result = L2CAP_CONF_UNKNOWN;
3677 *((u8 *) ptr++) = type;
3678 break;
3679 @@ -3548,58 +3557,65 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3680
3681 while (len >= L2CAP_CONF_OPT_SIZE) {
3682 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3683 + if (len < 0)
3684 + break;
3685
3686 switch (type) {
3687 case L2CAP_CONF_MTU:
3688 + if (olen != 2)
3689 + break;
3690 if (val < L2CAP_DEFAULT_MIN_MTU) {
3691 *result = L2CAP_CONF_UNACCEPT;
3692 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3693 } else
3694 chan->imtu = val;
3695 - l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3696 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3697 + endptr - ptr);
3698 break;
3699
3700 case L2CAP_CONF_FLUSH_TO:
3701 + if (olen != 2)
3702 + break;
3703 chan->flush_to = val;
3704 - l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3705 - 2, chan->flush_to, endptr - ptr);
3706 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3707 + chan->flush_to, endptr - ptr);
3708 break;
3709
3710 case L2CAP_CONF_RFC:
3711 - if (olen == sizeof(rfc))
3712 - memcpy(&rfc, (void *)val, olen);
3713 -
3714 + if (olen != sizeof(rfc))
3715 + break;
3716 + memcpy(&rfc, (void *)val, olen);
3717 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3718 rfc.mode != chan->mode)
3719 return -ECONNREFUSED;
3720 -
3721 chan->fcs = 0;
3722 -
3723 - l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3724 - sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3725 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3726 + (unsigned long) &rfc, endptr - ptr);
3727 break;
3728
3729 case L2CAP_CONF_EWS:
3730 + if (olen != 2)
3731 + break;
3732 chan->ack_win = min_t(u16, val, chan->ack_win);
3733 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3734 chan->tx_win, endptr - ptr);
3735 break;
3736
3737 case L2CAP_CONF_EFS:
3738 - if (olen == sizeof(efs)) {
3739 - memcpy(&efs, (void *)val, olen);
3740 -
3741 - if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3742 - efs.stype != L2CAP_SERV_NOTRAFIC &&
3743 - efs.stype != chan->local_stype)
3744 - return -ECONNREFUSED;
3745 -
3746 - l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3747 - (unsigned long) &efs, endptr - ptr);
3748 - }
3749 + if (olen != sizeof(efs))
3750 + break;
3751 + memcpy(&efs, (void *)val, olen);
3752 + if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3753 + efs.stype != L2CAP_SERV_NOTRAFIC &&
3754 + efs.stype != chan->local_stype)
3755 + return -ECONNREFUSED;
3756 + l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3757 + (unsigned long) &efs, endptr - ptr);
3758 break;
3759
3760 case L2CAP_CONF_FCS:
3761 + if (olen != 1)
3762 + break;
3763 if (*result == L2CAP_CONF_PENDING)
3764 if (val == L2CAP_FCS_NONE)
3765 set_bit(CONF_RECV_NO_FCS,
3766 @@ -3728,13 +3744,18 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3767
3768 while (len >= L2CAP_CONF_OPT_SIZE) {
3769 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3770 + if (len < 0)
3771 + break;
3772
3773 switch (type) {
3774 case L2CAP_CONF_RFC:
3775 - if (olen == sizeof(rfc))
3776 - memcpy(&rfc, (void *)val, olen);
3777 + if (olen != sizeof(rfc))
3778 + break;
3779 + memcpy(&rfc, (void *)val, olen);
3780 break;
3781 case L2CAP_CONF_EWS:
3782 + if (olen != 2)
3783 + break;
3784 txwin_ext = val;
3785 break;
3786 }
3787 diff --git a/net/core/datagram.c b/net/core/datagram.c
3788 index b2651bb6d2a3..e657289db4ac 100644
3789 --- a/net/core/datagram.c
3790 +++ b/net/core/datagram.c
3791 @@ -279,7 +279,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
3792 break;
3793
3794 sk_busy_loop(sk, flags & MSG_DONTWAIT);
3795 - } while (!skb_queue_empty(&sk->sk_receive_queue));
3796 + } while (sk->sk_receive_queue.prev != *last);
3797
3798 error = -EAGAIN;
3799
3800 diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
3801 index 73ad7607dcd1..aec26584f0ca 100644
3802 --- a/net/core/net-sysfs.c
3803 +++ b/net/core/net-sysfs.c
3804 @@ -934,6 +934,8 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
3805 if (error)
3806 return error;
3807
3808 + dev_hold(queue->dev);
3809 +
3810 if (dev->sysfs_rx_queue_group) {
3811 error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
3812 if (error) {
3813 @@ -943,7 +945,6 @@ static int rx_queue_add_kobject(struct net_device *dev, int index)
3814 }
3815
3816 kobject_uevent(kobj, KOBJ_ADD);
3817 - dev_hold(queue->dev);
3818
3819 return error;
3820 }
3821 @@ -1472,6 +1473,8 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
3822 if (error)
3823 return error;
3824
3825 + dev_hold(queue->dev);
3826 +
3827 #ifdef CONFIG_BQL
3828 error = sysfs_create_group(kobj, &dql_group);
3829 if (error) {
3830 @@ -1481,7 +1484,6 @@ static int netdev_queue_add_kobject(struct net_device *dev, int index)
3831 #endif
3832
3833 kobject_uevent(kobj, KOBJ_ADD);
3834 - dev_hold(queue->dev);
3835
3836 return 0;
3837 }
3838 diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
3839 index d5740bad5b18..57d84e9b7b6f 100644
3840 --- a/net/dccp/ipv6.c
3841 +++ b/net/dccp/ipv6.c
3842 @@ -436,8 +436,8 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
3843 newnp->ipv6_mc_list = NULL;
3844 newnp->ipv6_ac_list = NULL;
3845 newnp->ipv6_fl_list = NULL;
3846 - newnp->mcast_oif = inet6_iif(skb);
3847 - newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
3848 + newnp->mcast_oif = inet_iif(skb);
3849 + newnp->mcast_hops = ip_hdr(skb)->ttl;
3850
3851 /*
3852 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
3853 diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
3854 index 17c455ff69ff..7858fa9ea103 100644
3855 --- a/net/ipv6/ila/ila_xlat.c
3856 +++ b/net/ipv6/ila/ila_xlat.c
3857 @@ -420,6 +420,7 @@ int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info)
3858
3859 done:
3860 rhashtable_walk_stop(&iter);
3861 + rhashtable_walk_exit(&iter);
3862 return ret;
3863 }
3864
3865 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
3866 index 8dad1d690b78..0086acc16f3c 100644
3867 --- a/net/ipv6/route.c
3868 +++ b/net/ipv6/route.c
3869 @@ -1040,14 +1040,20 @@ static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt)
3870 struct rt6_info *nrt;
3871
3872 if (!fib6_info_hold_safe(rt))
3873 - return NULL;
3874 + goto fallback;
3875
3876 nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
3877 - if (nrt)
3878 - ip6_rt_copy_init(nrt, rt);
3879 - else
3880 + if (!nrt) {
3881 fib6_info_release(rt);
3882 + goto fallback;
3883 + }
3884
3885 + ip6_rt_copy_init(nrt, rt);
3886 + return nrt;
3887 +
3888 +fallback:
3889 + nrt = dev_net(dev)->ipv6.ip6_null_entry;
3890 + dst_hold(&nrt->dst);
3891 return nrt;
3892 }
3893
3894 @@ -1096,10 +1102,6 @@ restart:
3895 dst_hold(&rt->dst);
3896 } else {
3897 rt = ip6_create_rt_rcu(f6i);
3898 - if (!rt) {
3899 - rt = net->ipv6.ip6_null_entry;
3900 - dst_hold(&rt->dst);
3901 - }
3902 }
3903
3904 rcu_read_unlock();
3905 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
3906 index b81eb7cb815e..8505d96483d5 100644
3907 --- a/net/ipv6/tcp_ipv6.c
3908 +++ b/net/ipv6/tcp_ipv6.c
3909 @@ -1112,11 +1112,11 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
3910 newnp->ipv6_fl_list = NULL;
3911 newnp->pktoptions = NULL;
3912 newnp->opt = NULL;
3913 - newnp->mcast_oif = tcp_v6_iif(skb);
3914 - newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
3915 - newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
3916 + newnp->mcast_oif = inet_iif(skb);
3917 + newnp->mcast_hops = ip_hdr(skb)->ttl;
3918 + newnp->rcv_flowinfo = 0;
3919 if (np->repflow)
3920 - newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
3921 + newnp->flow_label = 0;
3922
3923 /*
3924 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
3925 diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
3926 index 4893f248dfdc..e1724f9d8b9d 100644
3927 --- a/net/netfilter/nf_tables_api.c
3928 +++ b/net/netfilter/nf_tables_api.c
3929 @@ -127,7 +127,7 @@ static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set)
3930 list_for_each_entry_reverse(trans, &net->nft.commit_list, list) {
3931 if (trans->msg_type == NFT_MSG_NEWSET &&
3932 nft_trans_set(trans) == set) {
3933 - nft_trans_set_bound(trans) = true;
3934 + set->bound = true;
3935 break;
3936 }
3937 }
3938 @@ -6617,8 +6617,7 @@ static void nf_tables_abort_release(struct nft_trans *trans)
3939 nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans));
3940 break;
3941 case NFT_MSG_NEWSET:
3942 - if (!nft_trans_set_bound(trans))
3943 - nft_set_destroy(nft_trans_set(trans));
3944 + nft_set_destroy(nft_trans_set(trans));
3945 break;
3946 case NFT_MSG_NEWSETELEM:
3947 nft_set_elem_destroy(nft_trans_elem_set(trans),
3948 @@ -6691,8 +6690,11 @@ static int __nf_tables_abort(struct net *net)
3949 break;
3950 case NFT_MSG_NEWSET:
3951 trans->ctx.table->use--;
3952 - if (!nft_trans_set_bound(trans))
3953 - list_del_rcu(&nft_trans_set(trans)->list);
3954 + if (nft_trans_set(trans)->bound) {
3955 + nft_trans_destroy(trans);
3956 + break;
3957 + }
3958 + list_del_rcu(&nft_trans_set(trans)->list);
3959 break;
3960 case NFT_MSG_DELSET:
3961 trans->ctx.table->use++;
3962 @@ -6700,8 +6702,11 @@ static int __nf_tables_abort(struct net *net)
3963 nft_trans_destroy(trans);
3964 break;
3965 case NFT_MSG_NEWSETELEM:
3966 + if (nft_trans_elem_set(trans)->bound) {
3967 + nft_trans_destroy(trans);
3968 + break;
3969 + }
3970 te = (struct nft_trans_elem *)trans->data;
3971 -
3972 te->set->ops->remove(net, te->set, &te->elem);
3973 atomic_dec(&te->set->nelems);
3974 break;
3975 diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
3976 index 25eeb6d2a75a..f0ec068e1d02 100644
3977 --- a/net/netlink/genetlink.c
3978 +++ b/net/netlink/genetlink.c
3979 @@ -366,7 +366,7 @@ int genl_register_family(struct genl_family *family)
3980 start, end + 1, GFP_KERNEL);
3981 if (family->id < 0) {
3982 err = family->id;
3983 - goto errout_locked;
3984 + goto errout_free;
3985 }
3986
3987 err = genl_validate_assign_mc_groups(family);
3988 @@ -385,6 +385,7 @@ int genl_register_family(struct genl_family *family)
3989
3990 errout_remove:
3991 idr_remove(&genl_fam_idr, family->id);
3992 +errout_free:
3993 kfree(family->attrbuf);
3994 errout_locked:
3995 genl_unlock_all();
3996 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3997 index 1cd1d83a4be0..8406bf11eef4 100644
3998 --- a/net/packet/af_packet.c
3999 +++ b/net/packet/af_packet.c
4000 @@ -3245,7 +3245,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
4001 }
4002
4003 mutex_lock(&net->packet.sklist_lock);
4004 - sk_add_node_rcu(sk, &net->packet.sklist);
4005 + sk_add_node_tail_rcu(sk, &net->packet.sklist);
4006 mutex_unlock(&net->packet.sklist_lock);
4007
4008 preempt_disable();
4009 @@ -4211,7 +4211,7 @@ static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4010 struct pgv *pg_vec;
4011 int i;
4012
4013 - pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
4014 + pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4015 if (unlikely(!pg_vec))
4016 goto out;
4017
4018 diff --git a/net/rose/rose_subr.c b/net/rose/rose_subr.c
4019 index 7ca57741b2fb..7849f286bb93 100644
4020 --- a/net/rose/rose_subr.c
4021 +++ b/net/rose/rose_subr.c
4022 @@ -105,16 +105,17 @@ void rose_write_internal(struct sock *sk, int frametype)
4023 struct sk_buff *skb;
4024 unsigned char *dptr;
4025 unsigned char lci1, lci2;
4026 - char buffer[100];
4027 - int len, faclen = 0;
4028 + int maxfaclen = 0;
4029 + int len, faclen;
4030 + int reserve;
4031
4032 - len = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + ROSE_MIN_LEN + 1;
4033 + reserve = AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1;
4034 + len = ROSE_MIN_LEN;
4035
4036 switch (frametype) {
4037 case ROSE_CALL_REQUEST:
4038 len += 1 + ROSE_ADDR_LEN + ROSE_ADDR_LEN;
4039 - faclen = rose_create_facilities(buffer, rose);
4040 - len += faclen;
4041 + maxfaclen = 256;
4042 break;
4043 case ROSE_CALL_ACCEPTED:
4044 case ROSE_CLEAR_REQUEST:
4045 @@ -123,15 +124,16 @@ void rose_write_internal(struct sock *sk, int frametype)
4046 break;
4047 }
4048
4049 - if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
4050 + skb = alloc_skb(reserve + len + maxfaclen, GFP_ATOMIC);
4051 + if (!skb)
4052 return;
4053
4054 /*
4055 * Space for AX.25 header and PID.
4056 */
4057 - skb_reserve(skb, AX25_BPQ_HEADER_LEN + AX25_MAX_HEADER_LEN + 1);
4058 + skb_reserve(skb, reserve);
4059
4060 - dptr = skb_put(skb, skb_tailroom(skb));
4061 + dptr = skb_put(skb, len);
4062
4063 lci1 = (rose->lci >> 8) & 0x0F;
4064 lci2 = (rose->lci >> 0) & 0xFF;
4065 @@ -146,7 +148,8 @@ void rose_write_internal(struct sock *sk, int frametype)
4066 dptr += ROSE_ADDR_LEN;
4067 memcpy(dptr, &rose->source_addr, ROSE_ADDR_LEN);
4068 dptr += ROSE_ADDR_LEN;
4069 - memcpy(dptr, buffer, faclen);
4070 + faclen = rose_create_facilities(dptr, rose);
4071 + skb_put(skb, faclen);
4072 dptr += faclen;
4073 break;
4074
4075 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
4076 index a2771b3b3c14..5f68420b4b0d 100644
4077 --- a/net/sctp/socket.c
4078 +++ b/net/sctp/socket.c
4079 @@ -999,7 +999,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
4080 if (unlikely(addrs_size <= 0))
4081 return -EINVAL;
4082
4083 - kaddrs = vmemdup_user(addrs, addrs_size);
4084 + kaddrs = memdup_user(addrs, addrs_size);
4085 if (unlikely(IS_ERR(kaddrs)))
4086 return PTR_ERR(kaddrs);
4087
4088 @@ -1007,7 +1007,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
4089 addr_buf = kaddrs;
4090 while (walk_size < addrs_size) {
4091 if (walk_size + sizeof(sa_family_t) > addrs_size) {
4092 - kvfree(kaddrs);
4093 + kfree(kaddrs);
4094 return -EINVAL;
4095 }
4096
4097 @@ -1018,7 +1018,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
4098 * causes the address buffer to overflow return EINVAL.
4099 */
4100 if (!af || (walk_size + af->sockaddr_len) > addrs_size) {
4101 - kvfree(kaddrs);
4102 + kfree(kaddrs);
4103 return -EINVAL;
4104 }
4105 addrcnt++;
4106 @@ -1054,7 +1054,7 @@ static int sctp_setsockopt_bindx(struct sock *sk,
4107 }
4108
4109 out:
4110 - kvfree(kaddrs);
4111 + kfree(kaddrs);
4112
4113 return err;
4114 }
4115 @@ -1329,7 +1329,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
4116 if (unlikely(addrs_size <= 0))
4117 return -EINVAL;
4118
4119 - kaddrs = vmemdup_user(addrs, addrs_size);
4120 + kaddrs = memdup_user(addrs, addrs_size);
4121 if (unlikely(IS_ERR(kaddrs)))
4122 return PTR_ERR(kaddrs);
4123
4124 @@ -1349,7 +1349,7 @@ static int __sctp_setsockopt_connectx(struct sock *sk,
4125 err = __sctp_connect(sk, kaddrs, addrs_size, flags, assoc_id);
4126
4127 out_free:
4128 - kvfree(kaddrs);
4129 + kfree(kaddrs);
4130
4131 return err;
4132 }
4133 diff --git a/net/tipc/net.c b/net/tipc/net.c
4134 index f076edb74338..7ce1e86b024f 100644
4135 --- a/net/tipc/net.c
4136 +++ b/net/tipc/net.c
4137 @@ -163,12 +163,9 @@ void tipc_sched_net_finalize(struct net *net, u32 addr)
4138
4139 void tipc_net_stop(struct net *net)
4140 {
4141 - u32 self = tipc_own_addr(net);
4142 -
4143 - if (!self)
4144 + if (!tipc_own_id(net))
4145 return;
4146
4147 - tipc_nametbl_withdraw(net, TIPC_CFG_SRV, self, self, self);
4148 rtnl_lock();
4149 tipc_bearer_stop(net);
4150 tipc_node_stop(net);
4151 diff --git a/net/tipc/socket.c b/net/tipc/socket.c
4152 index 139694f2c576..4dca9161f99b 100644
4153 --- a/net/tipc/socket.c
4154 +++ b/net/tipc/socket.c
4155 @@ -2349,6 +2349,16 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
4156 return 0;
4157 }
4158
4159 +static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
4160 +{
4161 + if (addr->family != AF_TIPC)
4162 + return false;
4163 + if (addr->addrtype == TIPC_SERVICE_RANGE)
4164 + return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
4165 + return (addr->addrtype == TIPC_SERVICE_ADDR ||
4166 + addr->addrtype == TIPC_SOCKET_ADDR);
4167 +}
4168 +
4169 /**
4170 * tipc_connect - establish a connection to another TIPC port
4171 * @sock: socket structure
4172 @@ -2384,18 +2394,18 @@ static int tipc_connect(struct socket *sock, struct sockaddr *dest,
4173 if (!tipc_sk_type_connectionless(sk))
4174 res = -EINVAL;
4175 goto exit;
4176 - } else if (dst->family != AF_TIPC) {
4177 - res = -EINVAL;
4178 }
4179 - if (dst->addrtype != TIPC_ADDR_ID && dst->addrtype != TIPC_ADDR_NAME)
4180 + if (!tipc_sockaddr_is_sane(dst)) {
4181 res = -EINVAL;
4182 - if (res)
4183 goto exit;
4184 -
4185 + }
4186 /* DGRAM/RDM connect(), just save the destaddr */
4187 if (tipc_sk_type_connectionless(sk)) {
4188 memcpy(&tsk->peer, dest, destlen);
4189 goto exit;
4190 + } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
4191 + res = -EINVAL;
4192 + goto exit;
4193 }
4194
4195 previous = sk->sk_state;
4196 diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
4197 index a457c0fbbef1..f5edb213d760 100644
4198 --- a/net/tipc/topsrv.c
4199 +++ b/net/tipc/topsrv.c
4200 @@ -365,6 +365,7 @@ static int tipc_conn_rcv_sub(struct tipc_topsrv *srv,
4201 struct tipc_subscription *sub;
4202
4203 if (tipc_sub_read(s, filter) & TIPC_SUB_CANCEL) {
4204 + s->filter &= __constant_ntohl(~TIPC_SUB_CANCEL);
4205 tipc_conn_delete_sub(con, s);
4206 return 0;
4207 }
4208 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
4209 index 26bf886bd168..588a3bc29ecc 100644
4210 --- a/scripts/mod/modpost.c
4211 +++ b/scripts/mod/modpost.c
4212 @@ -640,7 +640,7 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
4213 info->sechdrs[sym->st_shndx].sh_offset -
4214 (info->hdr->e_type != ET_REL ?
4215 info->sechdrs[sym->st_shndx].sh_addr : 0);
4216 - crc = *crcp;
4217 + crc = TO_NATIVE(*crcp);
4218 }
4219 sym_update_crc(symname + strlen("__crc_"), mod, crc,
4220 export);
4221 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
4222 index 467039b342b5..41abb8bd466a 100644
4223 --- a/sound/core/oss/pcm_oss.c
4224 +++ b/sound/core/oss/pcm_oss.c
4225 @@ -940,6 +940,28 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
4226 oss_frame_size = snd_pcm_format_physical_width(params_format(params)) *
4227 params_channels(params) / 8;
4228
4229 + err = snd_pcm_oss_period_size(substream, params, sparams);
4230 + if (err < 0)
4231 + goto failure;
4232 +
4233 + n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
4234 + err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
4235 + if (err < 0)
4236 + goto failure;
4237 +
4238 + err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
4239 + runtime->oss.periods, NULL);
4240 + if (err < 0)
4241 + goto failure;
4242 +
4243 + snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
4244 +
4245 + err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams);
4246 + if (err < 0) {
4247 + pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
4248 + goto failure;
4249 + }
4250 +
4251 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
4252 snd_pcm_oss_plugin_clear(substream);
4253 if (!direct) {
4254 @@ -974,27 +996,6 @@ static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
4255 }
4256 #endif
4257
4258 - err = snd_pcm_oss_period_size(substream, params, sparams);
4259 - if (err < 0)
4260 - goto failure;
4261 -
4262 - n = snd_pcm_plug_slave_size(substream, runtime->oss.period_bytes / oss_frame_size);
4263 - err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, n, NULL);
4264 - if (err < 0)
4265 - goto failure;
4266 -
4267 - err = snd_pcm_hw_param_near(substream, sparams, SNDRV_PCM_HW_PARAM_PERIODS,
4268 - runtime->oss.periods, NULL);
4269 - if (err < 0)
4270 - goto failure;
4271 -
4272 - snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
4273 -
4274 - if ((err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, sparams)) < 0) {
4275 - pcm_dbg(substream->pcm, "HW_PARAMS failed: %i\n", err);
4276 - goto failure;
4277 - }
4278 -
4279 if (runtime->oss.trigger) {
4280 sw_params->start_threshold = 1;
4281 } else {
4282 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
4283 index 818dff1de545..b67f6fe08a1b 100644
4284 --- a/sound/core/pcm_native.c
4285 +++ b/sound/core/pcm_native.c
4286 @@ -1426,8 +1426,15 @@ static int snd_pcm_pause(struct snd_pcm_substream *substream, int push)
4287 static int snd_pcm_pre_suspend(struct snd_pcm_substream *substream, int state)
4288 {
4289 struct snd_pcm_runtime *runtime = substream->runtime;
4290 - if (runtime->status->state == SNDRV_PCM_STATE_SUSPENDED)
4291 + switch (runtime->status->state) {
4292 + case SNDRV_PCM_STATE_SUSPENDED:
4293 return -EBUSY;
4294 + /* unresumable PCM state; return -EBUSY for skipping suspend */
4295 + case SNDRV_PCM_STATE_OPEN:
4296 + case SNDRV_PCM_STATE_SETUP:
4297 + case SNDRV_PCM_STATE_DISCONNECTED:
4298 + return -EBUSY;
4299 + }
4300 runtime->trigger_master = substream;
4301 return 0;
4302 }
4303 diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
4304 index ee601d7f0926..c0690d1ecd55 100644
4305 --- a/sound/core/rawmidi.c
4306 +++ b/sound/core/rawmidi.c
4307 @@ -30,6 +30,7 @@
4308 #include <linux/module.h>
4309 #include <linux/delay.h>
4310 #include <linux/mm.h>
4311 +#include <linux/nospec.h>
4312 #include <sound/rawmidi.h>
4313 #include <sound/info.h>
4314 #include <sound/control.h>
4315 @@ -601,6 +602,7 @@ static int __snd_rawmidi_info_select(struct snd_card *card,
4316 return -ENXIO;
4317 if (info->stream < 0 || info->stream > 1)
4318 return -EINVAL;
4319 + info->stream = array_index_nospec(info->stream, 2);
4320 pstr = &rmidi->streams[info->stream];
4321 if (pstr->substream_count == 0)
4322 return -ENOENT;
4323 diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
4324 index 278ebb993122..c93945917235 100644
4325 --- a/sound/core/seq/oss/seq_oss_synth.c
4326 +++ b/sound/core/seq/oss/seq_oss_synth.c
4327 @@ -617,13 +617,14 @@ int
4328 snd_seq_oss_synth_make_info(struct seq_oss_devinfo *dp, int dev, struct synth_info *inf)
4329 {
4330 struct seq_oss_synth *rec;
4331 + struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
4332
4333 - if (dev < 0 || dev >= dp->max_synthdev)
4334 + if (!info)
4335 return -ENXIO;
4336
4337 - if (dp->synths[dev].is_midi) {
4338 + if (info->is_midi) {
4339 struct midi_info minf;
4340 - snd_seq_oss_midi_make_info(dp, dp->synths[dev].midi_mapped, &minf);
4341 + snd_seq_oss_midi_make_info(dp, info->midi_mapped, &minf);
4342 inf->synth_type = SYNTH_TYPE_MIDI;
4343 inf->synth_subtype = 0;
4344 inf->nr_voices = 16;
4345 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4346 index 3a8568d3928f..00c27b3b8c14 100644
4347 --- a/sound/pci/hda/patch_realtek.c
4348 +++ b/sound/pci/hda/patch_realtek.c
4349 @@ -5489,7 +5489,7 @@ static void alc_headset_btn_callback(struct hda_codec *codec,
4350 jack->jack->button_state = report;
4351 }
4352
4353 -static void alc_fixup_headset_jack(struct hda_codec *codec,
4354 +static void alc295_fixup_chromebook(struct hda_codec *codec,
4355 const struct hda_fixup *fix, int action)
4356 {
4357
4358 @@ -5499,6 +5499,16 @@ static void alc_fixup_headset_jack(struct hda_codec *codec,
4359 alc_headset_btn_callback);
4360 snd_hda_jack_add_kctl(codec, 0x55, "Headset Jack", false,
4361 SND_JACK_HEADSET, alc_headset_btn_keymap);
4362 + switch (codec->core.vendor_id) {
4363 + case 0x10ec0295:
4364 + alc_update_coef_idx(codec, 0x4a, 0x8000, 1 << 15); /* Reset HP JD */
4365 + alc_update_coef_idx(codec, 0x4a, 0x8000, 0 << 15);
4366 + break;
4367 + case 0x10ec0236:
4368 + alc_update_coef_idx(codec, 0x1b, 0x8000, 1 << 15); /* Reset HP JD */
4369 + alc_update_coef_idx(codec, 0x1b, 0x8000, 0 << 15);
4370 + break;
4371 + }
4372 break;
4373 case HDA_FIXUP_ACT_INIT:
4374 switch (codec->core.vendor_id) {
4375 @@ -5668,10 +5678,16 @@ enum {
4376 ALC294_FIXUP_ASUS_MIC,
4377 ALC294_FIXUP_ASUS_HEADSET_MIC,
4378 ALC294_FIXUP_ASUS_SPK,
4379 - ALC225_FIXUP_HEADSET_JACK,
4380 ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
4381 ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
4382 ALC255_FIXUP_ACER_HEADSET_MIC,
4383 + ALC295_FIXUP_CHROME_BOOK,
4384 + ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE,
4385 + ALC225_FIXUP_WYSE_AUTO_MUTE,
4386 + ALC225_FIXUP_WYSE_DISABLE_MIC_VREF,
4387 + ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
4388 + ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
4389 + ALC299_FIXUP_PREDATOR_SPK,
4390 };
4391
4392 static const struct hda_fixup alc269_fixups[] = {
4393 @@ -6614,9 +6630,9 @@ static const struct hda_fixup alc269_fixups[] = {
4394 .chained = true,
4395 .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
4396 },
4397 - [ALC225_FIXUP_HEADSET_JACK] = {
4398 + [ALC295_FIXUP_CHROME_BOOK] = {
4399 .type = HDA_FIXUP_FUNC,
4400 - .v.func = alc_fixup_headset_jack,
4401 + .v.func = alc295_fixup_chromebook,
4402 },
4403 [ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = {
4404 .type = HDA_FIXUP_PINS,
4405 @@ -6648,6 +6664,54 @@ static const struct hda_fixup alc269_fixups[] = {
4406 .chained = true,
4407 .chain_id = ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC
4408 },
4409 + [ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE] = {
4410 + .type = HDA_FIXUP_PINS,
4411 + .v.pins = (const struct hda_pintbl[]) {
4412 + { 0x16, 0x01011020 }, /* Rear Line out */
4413 + { 0x19, 0x01a1913c }, /* use as Front headset mic, without its own jack detect */
4414 + { }
4415 + },
4416 + .chained = true,
4417 + .chain_id = ALC225_FIXUP_WYSE_AUTO_MUTE
4418 + },
4419 + [ALC225_FIXUP_WYSE_AUTO_MUTE] = {
4420 + .type = HDA_FIXUP_FUNC,
4421 + .v.func = alc_fixup_auto_mute_via_amp,
4422 + .chained = true,
4423 + .chain_id = ALC225_FIXUP_WYSE_DISABLE_MIC_VREF
4424 + },
4425 + [ALC225_FIXUP_WYSE_DISABLE_MIC_VREF] = {
4426 + .type = HDA_FIXUP_FUNC,
4427 + .v.func = alc_fixup_disable_mic_vref,
4428 + .chained = true,
4429 + .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
4430 + },
4431 + [ALC286_FIXUP_ACER_AIO_HEADSET_MIC] = {
4432 + .type = HDA_FIXUP_VERBS,
4433 + .v.verbs = (const struct hda_verb[]) {
4434 + { 0x20, AC_VERB_SET_COEF_INDEX, 0x4f },
4435 + { 0x20, AC_VERB_SET_PROC_COEF, 0x5029 },
4436 + { }
4437 + },
4438 + .chained = true,
4439 + .chain_id = ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE
4440 + },
4441 + [ALC256_FIXUP_ASUS_MIC_NO_PRESENCE] = {
4442 + .type = HDA_FIXUP_PINS,
4443 + .v.pins = (const struct hda_pintbl[]) {
4444 + { 0x19, 0x04a11120 }, /* use as headset mic, without its own jack detect */
4445 + { }
4446 + },
4447 + .chained = true,
4448 + .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
4449 + },
4450 + [ALC299_FIXUP_PREDATOR_SPK] = {
4451 + .type = HDA_FIXUP_PINS,
4452 + .v.pins = (const struct hda_pintbl[]) {
4453 + { 0x21, 0x90170150 }, /* use as headset mic, without its own jack detect */
4454 + { }
4455 + }
4456 + },
4457 };
4458
4459 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4460 @@ -6664,9 +6728,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4461 SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
4462 SND_PCI_QUIRK(0x1025, 0x102b, "Acer Aspire C24-860", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
4463 SND_PCI_QUIRK(0x1025, 0x106d, "Acer Cloudbook 14", ALC283_FIXUP_CHROME_BOOK),
4464 - SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
4465 - SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
4466 - SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
4467 + SND_PCI_QUIRK(0x1025, 0x1099, "Acer Aspire E5-523G", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
4468 + SND_PCI_QUIRK(0x1025, 0x110e, "Acer Aspire ES1-432", ALC255_FIXUP_ACER_MIC_NO_PRESENCE),
4469 + SND_PCI_QUIRK(0x1025, 0x1246, "Acer Predator Helios 500", ALC299_FIXUP_PREDATOR_SPK),
4470 + SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
4471 + SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
4472 + SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
4473 + SND_PCI_QUIRK(0x1025, 0x1308, "Acer Aspire Z24-890", ALC286_FIXUP_ACER_AIO_HEADSET_MIC),
4474 SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
4475 SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
4476 SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
4477 @@ -6712,6 +6780,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4478 SND_PCI_QUIRK(0x1028, 0x0871, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
4479 SND_PCI_QUIRK(0x1028, 0x0872, "Dell Precision 3630", ALC255_FIXUP_DELL_HEADSET_MIC),
4480 SND_PCI_QUIRK(0x1028, 0x0873, "Dell Precision 3930", ALC255_FIXUP_DUMMY_LINEOUT_VERB),
4481 + SND_PCI_QUIRK(0x1028, 0x08ad, "Dell WYSE AIO", ALC225_FIXUP_DELL_WYSE_AIO_MIC_NO_PRESENCE),
4482 + SND_PCI_QUIRK(0x1028, 0x08ae, "Dell WYSE NB", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE),
4483 SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
4484 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4485 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4486 @@ -7060,7 +7130,8 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
4487 {.id = ALC255_FIXUP_DUMMY_LINEOUT_VERB, .name = "alc255-dummy-lineout"},
4488 {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"},
4489 {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"},
4490 - {.id = ALC225_FIXUP_HEADSET_JACK, .name = "alc-sense-combo"},
4491 + {.id = ALC295_FIXUP_CHROME_BOOK, .name = "alc-sense-combo"},
4492 + {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
4493 {}
4494 };
4495 #define ALC225_STANDARD_PINS \
4496 @@ -7281,6 +7352,18 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
4497 {0x14, 0x90170110},
4498 {0x1b, 0x90a70130},
4499 {0x21, 0x03211020}),
4500 + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
4501 + {0x12, 0x90a60130},
4502 + {0x14, 0x90170110},
4503 + {0x21, 0x03211020}),
4504 + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
4505 + {0x12, 0x90a60130},
4506 + {0x14, 0x90170110},
4507 + {0x21, 0x04211020}),
4508 + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1043, "ASUS", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
4509 + {0x1a, 0x90a70130},
4510 + {0x1b, 0x90170110},
4511 + {0x21, 0x03211020}),
4512 SND_HDA_PIN_QUIRK(0x10ec0274, 0x1028, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
4513 {0x12, 0xb7a60130},
4514 {0x13, 0xb8a61140},
4515 diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
4516 index c9d038f91af6..53f8be0f4a1f 100644
4517 --- a/tools/objtool/Makefile
4518 +++ b/tools/objtool/Makefile
4519 @@ -25,14 +25,17 @@ LIBSUBCMD = $(LIBSUBCMD_OUTPUT)libsubcmd.a
4520 OBJTOOL := $(OUTPUT)objtool
4521 OBJTOOL_IN := $(OBJTOOL)-in.o
4522
4523 +LIBELF_FLAGS := $(shell pkg-config libelf --cflags 2>/dev/null)
4524 +LIBELF_LIBS := $(shell pkg-config libelf --libs 2>/dev/null || echo -lelf)
4525 +
4526 all: $(OBJTOOL)
4527
4528 INCLUDES := -I$(srctree)/tools/include \
4529 -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
4530 -I$(srctree)/tools/objtool/arch/$(ARCH)/include
4531 WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
4532 -CFLAGS += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES)
4533 -LDFLAGS += -lelf $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
4534 +CFLAGS += -Werror $(WARNINGS) $(KBUILD_HOSTCFLAGS) -g $(INCLUDES) $(LIBELF_FLAGS)
4535 +LDFLAGS += $(LIBELF_LIBS) $(LIBSUBCMD) $(KBUILD_HOSTLDFLAGS)
4536
4537 # Allow old libelf to be used:
4538 elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr)
4539 diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4540 index a54d6c9a4601..7c0b975dd2f0 100644
4541 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4542 +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
4543 @@ -251,19 +251,15 @@ struct intel_pt_decoder *intel_pt_decoder_new(struct intel_pt_params *params)
4544 if (!(decoder->tsc_ctc_ratio_n % decoder->tsc_ctc_ratio_d))
4545 decoder->tsc_ctc_mult = decoder->tsc_ctc_ratio_n /
4546 decoder->tsc_ctc_ratio_d;
4547 -
4548 - /*
4549 - * Allow for timestamps appearing to backwards because a TSC
4550 - * packet has slipped past a MTC packet, so allow 2 MTC ticks
4551 - * or ...
4552 - */
4553 - decoder->tsc_slip = multdiv(2 << decoder->mtc_shift,
4554 - decoder->tsc_ctc_ratio_n,
4555 - decoder->tsc_ctc_ratio_d);
4556 }
4557 - /* ... or 0x100 paranoia */
4558 - if (decoder->tsc_slip < 0x100)
4559 - decoder->tsc_slip = 0x100;
4560 +
4561 + /*
4562 + * A TSC packet can slip past MTC packets so that the timestamp appears
4563 + * to go backwards. One estimate is that can be up to about 40 CPU
4564 + * cycles, which is certainly less than 0x1000 TSC ticks, but accept
4565 + * slippage an order of magnitude more to be on the safe side.
4566 + */
4567 + decoder->tsc_slip = 0x10000;
4568
4569 intel_pt_log("timestamp: mtc_shift %u\n", decoder->mtc_shift);
4570 intel_pt_log("timestamp: tsc_ctc_ratio_n %u\n", decoder->tsc_ctc_ratio_n);
4571 diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
4572 index 11a234740632..ccd3275feeaa 100644
4573 --- a/tools/perf/util/pmu.c
4574 +++ b/tools/perf/util/pmu.c
4575 @@ -734,10 +734,20 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu)
4576
4577 if (!is_arm_pmu_core(name)) {
4578 pname = pe->pmu ? pe->pmu : "cpu";
4579 +
4580 + /*
4581 + * uncore alias may be from different PMU
4582 + * with common prefix
4583 + */
4584 + if (pmu_is_uncore(name) &&
4585 + !strncmp(pname, name, strlen(pname)))
4586 + goto new_alias;
4587 +
4588 if (strcmp(pname, name))
4589 continue;
4590 }
4591
4592 +new_alias:
4593 /* need type casts to override 'const' */
4594 __perf_pmu__new_alias(head, NULL, (char *)pe->name,
4595 (char *)pe->desc, (char *)pe->event,
4596 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
4597 index 4e1024dbb73f..b4f2d892a1d3 100644
4598 --- a/virt/kvm/kvm_main.c
4599 +++ b/virt/kvm/kvm_main.c
4600 @@ -2902,6 +2902,9 @@ static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
4601 {
4602 struct kvm_device *dev = filp->private_data;
4603
4604 + if (dev->kvm->mm != current->mm)
4605 + return -EIO;
4606 +
4607 switch (ioctl) {
4608 case KVM_SET_DEVICE_ATTR:
4609 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);