Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.16/0106-4.16.7-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3110 - (show annotations) (download)
Wed May 16 14:24:38 2018 UTC (5 years, 11 months ago) by niro
File size: 158110 byte(s)
-linux-4.16.7
1 diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
2 index d6b3ff51a14f..36187fc32ab2 100644
3 --- a/Documentation/virtual/kvm/api.txt
4 +++ b/Documentation/virtual/kvm/api.txt
5 @@ -1960,6 +1960,9 @@ ARM 32-bit VFP control registers have the following id bit patterns:
6 ARM 64-bit FP registers have the following id bit patterns:
7 0x4030 0000 0012 0 <regno:12>
8
9 +ARM firmware pseudo-registers have the following bit pattern:
10 + 0x4030 0000 0014 <regno:16>
11 +
12
13 arm64 registers are mapped using the lower 32 bits. The upper 16 of
14 that is the register group type, or coprocessor number:
15 @@ -1976,6 +1979,9 @@ arm64 CCSIDR registers are demultiplexed by CSSELR value:
16 arm64 system registers have the following id bit patterns:
17 0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3>
18
19 +arm64 firmware pseudo-registers have the following bit pattern:
20 + 0x6030 0000 0014 <regno:16>
21 +
22
23 MIPS registers are mapped using the lower 32 bits. The upper 16 of that is
24 the register group type:
25 @@ -2510,7 +2516,8 @@ Possible features:
26 and execute guest code when KVM_RUN is called.
27 - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
28 Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
29 - - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU.
30 + - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 (or a future revision
31 + backward compatible with v0.2) for the CPU.
32 Depends on KVM_CAP_ARM_PSCI_0_2.
33 - KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU.
34 Depends on KVM_CAP_ARM_PMU_V3.
35 diff --git a/Documentation/virtual/kvm/arm/psci.txt b/Documentation/virtual/kvm/arm/psci.txt
36 new file mode 100644
37 index 000000000000..aafdab887b04
38 --- /dev/null
39 +++ b/Documentation/virtual/kvm/arm/psci.txt
40 @@ -0,0 +1,30 @@
41 +KVM implements the PSCI (Power State Coordination Interface)
42 +specification in order to provide services such as CPU on/off, reset
43 +and power-off to the guest.
44 +
45 +The PSCI specification is regularly updated to provide new features,
46 +and KVM implements these updates if they make sense from a virtualization
47 +point of view.
48 +
49 +This means that a guest booted on two different versions of KVM can
50 +observe two different "firmware" revisions. This could cause issues if
51 +a given guest is tied to a particular PSCI revision (unlikely), or if
52 +a migration causes a different PSCI version to be exposed out of the
53 +blue to an unsuspecting guest.
54 +
55 +In order to remedy this situation, KVM exposes a set of "firmware
56 +pseudo-registers" that can be manipulated using the GET/SET_ONE_REG
57 +interface. These registers can be saved/restored by userspace, and set
58 +to a convenient value if required.
59 +
60 +The following register is defined:
61 +
62 +* KVM_REG_ARM_PSCI_VERSION:
63 +
64 + - Only valid if the vcpu has the KVM_ARM_VCPU_PSCI_0_2 feature set
65 + (and thus has already been initialized)
66 + - Returns the current PSCI version on GET_ONE_REG (defaulting to the
67 + highest PSCI version implemented by KVM and compatible with v0.2)
68 + - Allows any PSCI version implemented by KVM and compatible with
69 + v0.2 to be set with SET_ONE_REG
70 + - Affects the whole VM (even if the register view is per-vcpu)
71 diff --git a/Makefile b/Makefile
72 index 41f07b2b7905..1c5d5d8c45e2 100644
73 --- a/Makefile
74 +++ b/Makefile
75 @@ -1,7 +1,7 @@
76 # SPDX-License-Identifier: GPL-2.0
77 VERSION = 4
78 PATCHLEVEL = 16
79 -SUBLEVEL = 6
80 +SUBLEVEL = 7
81 EXTRAVERSION =
82 NAME = Fearless Coyote
83
84 diff --git a/arch/arm/boot/dts/gemini-nas4220b.dts b/arch/arm/boot/dts/gemini-nas4220b.dts
85 index 8bbb6f85d161..4785fbcc41ed 100644
86 --- a/arch/arm/boot/dts/gemini-nas4220b.dts
87 +++ b/arch/arm/boot/dts/gemini-nas4220b.dts
88 @@ -134,37 +134,37 @@
89 function = "gmii";
90 groups = "gmii_gmac0_grp";
91 };
92 - /* Settings come from OpenWRT */
93 + /* Settings come from OpenWRT, pins on SL3516 */
94 conf0 {
95 - pins = "R8 GMAC0 RXDV", "U11 GMAC1 RXDV";
96 + pins = "V8 GMAC0 RXDV", "T10 GMAC1 RXDV";
97 skew-delay = <0>;
98 };
99 conf1 {
100 - pins = "T8 GMAC0 RXC", "T11 GMAC1 RXC";
101 + pins = "Y7 GMAC0 RXC", "Y11 GMAC1 RXC";
102 skew-delay = <15>;
103 };
104 conf2 {
105 - pins = "P8 GMAC0 TXEN", "V11 GMAC1 TXEN";
106 + pins = "T8 GMAC0 TXEN", "W11 GMAC1 TXEN";
107 skew-delay = <7>;
108 };
109 conf3 {
110 - pins = "V7 GMAC0 TXC";
111 + pins = "U8 GMAC0 TXC";
112 skew-delay = <11>;
113 };
114 conf4 {
115 - pins = "P10 GMAC1 TXC";
116 + pins = "V11 GMAC1 TXC";
117 skew-delay = <10>;
118 };
119 conf5 {
120 /* The data lines all have default skew */
121 - pins = "U8 GMAC0 RXD0", "V8 GMAC0 RXD1",
122 - "P9 GMAC0 RXD2", "R9 GMAC0 RXD3",
123 - "U7 GMAC0 TXD0", "T7 GMAC0 TXD1",
124 - "R7 GMAC0 TXD2", "P7 GMAC0 TXD3",
125 - "R11 GMAC1 RXD0", "P11 GMAC1 RXD1",
126 - "V12 GMAC1 RXD2", "U12 GMAC1 RXD3",
127 - "R10 GMAC1 TXD0", "T10 GMAC1 TXD1",
128 - "U10 GMAC1 TXD2", "V10 GMAC1 TXD3";
129 + pins = "W8 GMAC0 RXD0", "V9 GMAC0 RXD1",
130 + "Y8 GMAC0 RXD2", "U9 GMAC0 RXD3",
131 + "T7 GMAC0 TXD0", "U6 GMAC0 TXD1",
132 + "V7 GMAC0 TXD2", "U7 GMAC0 TXD3",
133 + "Y12 GMAC1 RXD0", "V12 GMAC1 RXD1",
134 + "T11 GMAC1 RXD2", "W12 GMAC1 RXD3",
135 + "U10 GMAC1 TXD0", "Y10 GMAC1 TXD1",
136 + "W10 GMAC1 TXD2", "T9 GMAC1 TXD3";
137 skew-delay = <7>;
138 };
139 /* Set up drive strength on GMAC0 to 16 mA */
140 diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig
141 index 2620ce790db0..371fca4e1ab7 100644
142 --- a/arch/arm/configs/socfpga_defconfig
143 +++ b/arch/arm/configs/socfpga_defconfig
144 @@ -57,6 +57,7 @@ CONFIG_MTD_M25P80=y
145 CONFIG_MTD_NAND=y
146 CONFIG_MTD_NAND_DENALI_DT=y
147 CONFIG_MTD_SPI_NOR=y
148 +# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set
149 CONFIG_SPI_CADENCE_QUADSPI=y
150 CONFIG_OF_OVERLAY=y
151 CONFIG_OF_CONFIGFS=y
152 diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
153 index 248b930563e5..8b908d23c58a 100644
154 --- a/arch/arm/include/asm/kvm_host.h
155 +++ b/arch/arm/include/asm/kvm_host.h
156 @@ -77,6 +77,9 @@ struct kvm_arch {
157 /* Interrupt controller */
158 struct vgic_dist vgic;
159 int max_vcpus;
160 +
161 + /* Mandated version of PSCI */
162 + u32 psci_version;
163 };
164
165 #define KVM_NR_MEM_OBJS 40
166 diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
167 index 6edd177bb1c7..47dfc99f5cd0 100644
168 --- a/arch/arm/include/uapi/asm/kvm.h
169 +++ b/arch/arm/include/uapi/asm/kvm.h
170 @@ -186,6 +186,12 @@ struct kvm_arch_memory_slot {
171 #define KVM_REG_ARM_VFP_FPINST 0x1009
172 #define KVM_REG_ARM_VFP_FPINST2 0x100A
173
174 +/* KVM-as-firmware specific pseudo-registers */
175 +#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
176 +#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \
177 + KVM_REG_ARM_FW | ((r) & 0xffff))
178 +#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
179 +
180 /* Device Control API: ARM VGIC */
181 #define KVM_DEV_ARM_VGIC_GRP_ADDR 0
182 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
183 diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
184 index 1e0784ebbfd6..a18f33edc471 100644
185 --- a/arch/arm/kvm/guest.c
186 +++ b/arch/arm/kvm/guest.c
187 @@ -22,6 +22,7 @@
188 #include <linux/module.h>
189 #include <linux/vmalloc.h>
190 #include <linux/fs.h>
191 +#include <kvm/arm_psci.h>
192 #include <asm/cputype.h>
193 #include <linux/uaccess.h>
194 #include <asm/kvm.h>
195 @@ -176,6 +177,7 @@ static unsigned long num_core_regs(void)
196 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
197 {
198 return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
199 + + kvm_arm_get_fw_num_regs(vcpu)
200 + NUM_TIMER_REGS;
201 }
202
203 @@ -196,6 +198,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
204 uindices++;
205 }
206
207 + ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
208 + if (ret)
209 + return ret;
210 + uindices += kvm_arm_get_fw_num_regs(vcpu);
211 +
212 ret = copy_timer_indices(vcpu, uindices);
213 if (ret)
214 return ret;
215 @@ -214,6 +221,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
216 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
217 return get_core_reg(vcpu, reg);
218
219 + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
220 + return kvm_arm_get_fw_reg(vcpu, reg);
221 +
222 if (is_timer_reg(reg->id))
223 return get_timer_reg(vcpu, reg);
224
225 @@ -230,6 +240,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
226 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
227 return set_core_reg(vcpu, reg);
228
229 + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
230 + return kvm_arm_set_fw_reg(vcpu, reg);
231 +
232 if (is_timer_reg(reg->id))
233 return set_timer_reg(vcpu, reg);
234
235 diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
236 index 596f8e414a4c..b9e355bd3b78 100644
237 --- a/arch/arm64/include/asm/kvm_host.h
238 +++ b/arch/arm64/include/asm/kvm_host.h
239 @@ -75,6 +75,9 @@ struct kvm_arch {
240
241 /* Interrupt controller */
242 struct vgic_dist vgic;
243 +
244 + /* Mandated version of PSCI */
245 + u32 psci_version;
246 };
247
248 #define KVM_NR_MEM_OBJS 40
249 diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
250 index 9abbf3044654..04b3256f8e6d 100644
251 --- a/arch/arm64/include/uapi/asm/kvm.h
252 +++ b/arch/arm64/include/uapi/asm/kvm.h
253 @@ -206,6 +206,12 @@ struct kvm_arch_memory_slot {
254 #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
255 #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
256
257 +/* KVM-as-firmware specific pseudo-registers */
258 +#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
259 +#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
260 + KVM_REG_ARM_FW | ((r) & 0xffff))
261 +#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
262 +
263 /* Device Control API: ARM VGIC */
264 #define KVM_DEV_ARM_VGIC_GRP_ADDR 0
265 #define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
266 diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
267 index 959e50d2588c..56a0260ceb11 100644
268 --- a/arch/arm64/kvm/guest.c
269 +++ b/arch/arm64/kvm/guest.c
270 @@ -25,6 +25,7 @@
271 #include <linux/module.h>
272 #include <linux/vmalloc.h>
273 #include <linux/fs.h>
274 +#include <kvm/arm_psci.h>
275 #include <asm/cputype.h>
276 #include <linux/uaccess.h>
277 #include <asm/kvm.h>
278 @@ -205,7 +206,7 @@ static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
279 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
280 {
281 return num_core_regs() + kvm_arm_num_sys_reg_descs(vcpu)
282 - + NUM_TIMER_REGS;
283 + + kvm_arm_get_fw_num_regs(vcpu) + NUM_TIMER_REGS;
284 }
285
286 /**
287 @@ -225,6 +226,11 @@ int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
288 uindices++;
289 }
290
291 + ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
292 + if (ret)
293 + return ret;
294 + uindices += kvm_arm_get_fw_num_regs(vcpu);
295 +
296 ret = copy_timer_indices(vcpu, uindices);
297 if (ret)
298 return ret;
299 @@ -243,6 +249,9 @@ int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
300 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
301 return get_core_reg(vcpu, reg);
302
303 + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
304 + return kvm_arm_get_fw_reg(vcpu, reg);
305 +
306 if (is_timer_reg(reg->id))
307 return get_timer_reg(vcpu, reg);
308
309 @@ -259,6 +268,9 @@ int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
310 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
311 return set_core_reg(vcpu, reg);
312
313 + if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
314 + return kvm_arm_set_fw_reg(vcpu, reg);
315 +
316 if (is_timer_reg(reg->id))
317 return set_timer_reg(vcpu, reg);
318
319 diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
320 index fe6fc63251fe..38c5b4764bfe 100644
321 --- a/arch/powerpc/kernel/mce_power.c
322 +++ b/arch/powerpc/kernel/mce_power.c
323 @@ -441,7 +441,6 @@ static int mce_handle_ierror(struct pt_regs *regs,
324 if (pfn != ULONG_MAX) {
325 *phys_addr =
326 (pfn << PAGE_SHIFT);
327 - handled = 1;
328 }
329 }
330 }
331 @@ -532,9 +531,7 @@ static int mce_handle_derror(struct pt_regs *regs,
332 * kernel/exception-64s.h
333 */
334 if (get_paca()->in_mce < MAX_MCE_DEPTH)
335 - if (!mce_find_instr_ea_and_pfn(regs, addr,
336 - phys_addr))
337 - handled = 1;
338 + mce_find_instr_ea_and_pfn(regs, addr, phys_addr);
339 }
340 found = 1;
341 }
342 @@ -572,7 +569,7 @@ static long mce_handle_error(struct pt_regs *regs,
343 const struct mce_ierror_table itable[])
344 {
345 struct mce_error_info mce_err = { 0 };
346 - uint64_t addr, phys_addr;
347 + uint64_t addr, phys_addr = ULONG_MAX;
348 uint64_t srr1 = regs->msr;
349 long handled;
350
351 diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
352 index fe8c61149fb8..0cd9031b6b54 100644
353 --- a/arch/powerpc/mm/mem.c
354 +++ b/arch/powerpc/mm/mem.c
355 @@ -143,6 +143,7 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
356 start, start + size, rc);
357 return -EFAULT;
358 }
359 + flush_inval_dcache_range(start, start + size);
360
361 return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
362 }
363 @@ -169,6 +170,7 @@ int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
364
365 /* Remove htab bolted mappings for this section of memory */
366 start = (unsigned long)__va(start);
367 + flush_inval_dcache_range(start, start + size);
368 ret = remove_section_mapping(start, start + size);
369
370 /* Ensure all vmalloc mappings are flushed in case they also
371 diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
372 index 0a253b64ac5f..e7b621f619b2 100644
373 --- a/arch/powerpc/platforms/powernv/npu-dma.c
374 +++ b/arch/powerpc/platforms/powernv/npu-dma.c
375 @@ -33,6 +33,13 @@
376
377 #define npu_to_phb(x) container_of(x, struct pnv_phb, npu)
378
379 +/*
380 + * When an address shootdown range exceeds this threshold we invalidate the
381 + * entire TLB on the GPU for the given PID rather than each specific address in
382 + * the range.
383 + */
384 +#define ATSD_THRESHOLD (2*1024*1024)
385 +
386 /*
387 * Other types of TCE cache invalidation are not functional in the
388 * hardware.
389 @@ -627,11 +634,19 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
390 struct npu_context *npu_context = mn_to_npu_context(mn);
391 unsigned long address;
392
393 - for (address = start; address < end; address += PAGE_SIZE)
394 - mmio_invalidate(npu_context, 1, address, false);
395 + if (end - start > ATSD_THRESHOLD) {
396 + /*
397 + * Just invalidate the entire PID if the address range is too
398 + * large.
399 + */
400 + mmio_invalidate(npu_context, 0, 0, true);
401 + } else {
402 + for (address = start; address < end; address += PAGE_SIZE)
403 + mmio_invalidate(npu_context, 1, address, false);
404
405 - /* Do the flush only on the final addess == end */
406 - mmio_invalidate(npu_context, 1, address, true);
407 + /* Do the flush only on the final addess == end */
408 + mmio_invalidate(npu_context, 1, address, true);
409 + }
410 }
411
412 static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
413 diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c
414 index f8868864f373..aa2a5139462e 100644
415 --- a/arch/powerpc/platforms/powernv/opal-rtc.c
416 +++ b/arch/powerpc/platforms/powernv/opal-rtc.c
417 @@ -48,10 +48,12 @@ unsigned long __init opal_get_boot_time(void)
418
419 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
420 rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
421 - if (rc == OPAL_BUSY_EVENT)
422 + if (rc == OPAL_BUSY_EVENT) {
423 + mdelay(OPAL_BUSY_DELAY_MS);
424 opal_poll_events(NULL);
425 - else if (rc == OPAL_BUSY)
426 - mdelay(10);
427 + } else if (rc == OPAL_BUSY) {
428 + mdelay(OPAL_BUSY_DELAY_MS);
429 + }
430 }
431 if (rc != OPAL_SUCCESS)
432 return 0;
433 diff --git a/arch/sparc/include/uapi/asm/oradax.h b/arch/sparc/include/uapi/asm/oradax.h
434 index 722951908b0a..4f6676fe4bcc 100644
435 --- a/arch/sparc/include/uapi/asm/oradax.h
436 +++ b/arch/sparc/include/uapi/asm/oradax.h
437 @@ -3,7 +3,7 @@
438 *
439 * This program is free software: you can redistribute it and/or modify
440 * it under the terms of the GNU General Public License as published by
441 - * the Free Software Foundation, either version 3 of the License, or
442 + * the Free Software Foundation, either version 2 of the License, or
443 * (at your option) any later version.
444 *
445 * This program is distributed in the hope that it will be useful,
446 diff --git a/arch/x86/include/uapi/asm/msgbuf.h b/arch/x86/include/uapi/asm/msgbuf.h
447 index 809134c644a6..90ab9a795b49 100644
448 --- a/arch/x86/include/uapi/asm/msgbuf.h
449 +++ b/arch/x86/include/uapi/asm/msgbuf.h
450 @@ -1 +1,32 @@
451 +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
452 +#ifndef __ASM_X64_MSGBUF_H
453 +#define __ASM_X64_MSGBUF_H
454 +
455 +#if !defined(__x86_64__) || !defined(__ILP32__)
456 #include <asm-generic/msgbuf.h>
457 +#else
458 +/*
459 + * The msqid64_ds structure for x86 architecture with x32 ABI.
460 + *
461 + * On x86-32 and x86-64 we can just use the generic definition, but
462 + * x32 uses the same binary layout as x86_64, which is differnet
463 + * from other 32-bit architectures.
464 + */
465 +
466 +struct msqid64_ds {
467 + struct ipc64_perm msg_perm;
468 + __kernel_time_t msg_stime; /* last msgsnd time */
469 + __kernel_time_t msg_rtime; /* last msgrcv time */
470 + __kernel_time_t msg_ctime; /* last change time */
471 + __kernel_ulong_t msg_cbytes; /* current number of bytes on queue */
472 + __kernel_ulong_t msg_qnum; /* number of messages in queue */
473 + __kernel_ulong_t msg_qbytes; /* max number of bytes on queue */
474 + __kernel_pid_t msg_lspid; /* pid of last msgsnd */
475 + __kernel_pid_t msg_lrpid; /* last receive pid */
476 + __kernel_ulong_t __unused4;
477 + __kernel_ulong_t __unused5;
478 +};
479 +
480 +#endif
481 +
482 +#endif /* __ASM_GENERIC_MSGBUF_H */
483 diff --git a/arch/x86/include/uapi/asm/shmbuf.h b/arch/x86/include/uapi/asm/shmbuf.h
484 index 83c05fc2de38..644421f3823b 100644
485 --- a/arch/x86/include/uapi/asm/shmbuf.h
486 +++ b/arch/x86/include/uapi/asm/shmbuf.h
487 @@ -1 +1,43 @@
488 +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
489 +#ifndef __ASM_X86_SHMBUF_H
490 +#define __ASM_X86_SHMBUF_H
491 +
492 +#if !defined(__x86_64__) || !defined(__ILP32__)
493 #include <asm-generic/shmbuf.h>
494 +#else
495 +/*
496 + * The shmid64_ds structure for x86 architecture with x32 ABI.
497 + *
498 + * On x86-32 and x86-64 we can just use the generic definition, but
499 + * x32 uses the same binary layout as x86_64, which is differnet
500 + * from other 32-bit architectures.
501 + */
502 +
503 +struct shmid64_ds {
504 + struct ipc64_perm shm_perm; /* operation perms */
505 + size_t shm_segsz; /* size of segment (bytes) */
506 + __kernel_time_t shm_atime; /* last attach time */
507 + __kernel_time_t shm_dtime; /* last detach time */
508 + __kernel_time_t shm_ctime; /* last change time */
509 + __kernel_pid_t shm_cpid; /* pid of creator */
510 + __kernel_pid_t shm_lpid; /* pid of last operator */
511 + __kernel_ulong_t shm_nattch; /* no. of current attaches */
512 + __kernel_ulong_t __unused4;
513 + __kernel_ulong_t __unused5;
514 +};
515 +
516 +struct shminfo64 {
517 + __kernel_ulong_t shmmax;
518 + __kernel_ulong_t shmmin;
519 + __kernel_ulong_t shmmni;
520 + __kernel_ulong_t shmseg;
521 + __kernel_ulong_t shmall;
522 + __kernel_ulong_t __unused1;
523 + __kernel_ulong_t __unused2;
524 + __kernel_ulong_t __unused3;
525 + __kernel_ulong_t __unused4;
526 +};
527 +
528 +#endif
529 +
530 +#endif /* __ASM_X86_SHMBUF_H */
531 diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
532 index 10c4fc2c91f8..77e201301528 100644
533 --- a/arch/x86/kernel/cpu/microcode/core.c
534 +++ b/arch/x86/kernel/cpu/microcode/core.c
535 @@ -564,14 +564,12 @@ static int __reload_late(void *info)
536 apply_microcode_local(&err);
537 spin_unlock(&update_lock);
538
539 + /* siblings return UCODE_OK because their engine got updated already */
540 if (err > UCODE_NFOUND) {
541 pr_warn("Error reloading microcode on CPU %d\n", cpu);
542 - return -1;
543 - /* siblings return UCODE_OK because their engine got updated already */
544 + ret = -1;
545 } else if (err == UCODE_UPDATED || err == UCODE_OK) {
546 ret = 1;
547 - } else {
548 - return ret;
549 }
550
551 /*
552 diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
553 index 32b8e5724f96..1c2cfa0644aa 100644
554 --- a/arch/x86/kernel/cpu/microcode/intel.c
555 +++ b/arch/x86/kernel/cpu/microcode/intel.c
556 @@ -485,7 +485,6 @@ static void show_saved_mc(void)
557 */
558 static void save_mc_for_early(u8 *mc, unsigned int size)
559 {
560 -#ifdef CONFIG_HOTPLUG_CPU
561 /* Synchronization during CPU hotplug. */
562 static DEFINE_MUTEX(x86_cpu_microcode_mutex);
563
564 @@ -495,7 +494,6 @@ static void save_mc_for_early(u8 *mc, unsigned int size)
565 show_saved_mc();
566
567 mutex_unlock(&x86_cpu_microcode_mutex);
568 -#endif
569 }
570
571 static bool load_builtin_intel_microcode(struct cpio_data *cp)
572 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
573 index ff99e2b6fc54..12599e55e040 100644
574 --- a/arch/x86/kernel/smpboot.c
575 +++ b/arch/x86/kernel/smpboot.c
576 @@ -1536,6 +1536,8 @@ static inline void mwait_play_dead(void)
577 void *mwait_ptr;
578 int i;
579
580 + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
581 + return;
582 if (!this_cpu_has(X86_FEATURE_MWAIT))
583 return;
584 if (!this_cpu_has(X86_FEATURE_CLFLUSH))
585 diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
586 index aeca22d91101..3193b2663bed 100644
587 --- a/block/bfq-iosched.c
588 +++ b/block/bfq-iosched.c
589 @@ -4911,8 +4911,16 @@ static void bfq_prepare_request(struct request *rq, struct bio *bio)
590 bool new_queue = false;
591 bool bfqq_already_existing = false, split = false;
592
593 - if (!rq->elv.icq)
594 + /*
595 + * Even if we don't have an icq attached, we should still clear
596 + * the scheduler pointers, as they might point to previously
597 + * allocated bic/bfqq structs.
598 + */
599 + if (!rq->elv.icq) {
600 + rq->elv.priv[0] = rq->elv.priv[1] = NULL;
601 return;
602 + }
603 +
604 bic = icq_to_bic(rq->elv.icq);
605
606 spin_lock_irq(&bfqd->lock);
607 diff --git a/block/blk-core.c b/block/blk-core.c
608 index 3b489527c8f2..b459d277d170 100644
609 --- a/block/blk-core.c
610 +++ b/block/blk-core.c
611 @@ -129,6 +129,10 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
612 rq->part = NULL;
613 seqcount_init(&rq->gstate_seq);
614 u64_stats_init(&rq->aborted_gstate_sync);
615 + /*
616 + * See comment of blk_mq_init_request
617 + */
618 + WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC);
619 }
620 EXPORT_SYMBOL(blk_rq_init);
621
622 @@ -825,7 +829,6 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
623
624 while (true) {
625 bool success = false;
626 - int ret;
627
628 rcu_read_lock();
629 if (percpu_ref_tryget_live(&q->q_usage_counter)) {
630 @@ -857,14 +860,12 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
631 */
632 smp_rmb();
633
634 - ret = wait_event_interruptible(q->mq_freeze_wq,
635 - (atomic_read(&q->mq_freeze_depth) == 0 &&
636 - (preempt || !blk_queue_preempt_only(q))) ||
637 - blk_queue_dying(q));
638 + wait_event(q->mq_freeze_wq,
639 + (atomic_read(&q->mq_freeze_depth) == 0 &&
640 + (preempt || !blk_queue_preempt_only(q))) ||
641 + blk_queue_dying(q));
642 if (blk_queue_dying(q))
643 return -ENODEV;
644 - if (ret)
645 - return ret;
646 }
647 }
648
649 diff --git a/block/blk-mq.c b/block/blk-mq.c
650 index 56e0c3699f9e..96de7aa4f62a 100644
651 --- a/block/blk-mq.c
652 +++ b/block/blk-mq.c
653 @@ -2076,6 +2076,13 @@ static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
654
655 seqcount_init(&rq->gstate_seq);
656 u64_stats_init(&rq->aborted_gstate_sync);
657 + /*
658 + * start gstate with gen 1 instead of 0, otherwise it will be equal
659 + * to aborted_gstate, and be identified timed out by
660 + * blk_mq_terminate_expired.
661 + */
662 + WRITE_ONCE(rq->gstate, MQ_RQ_GEN_INC);
663 +
664 return 0;
665 }
666
667 diff --git a/crypto/drbg.c b/crypto/drbg.c
668 index 4faa2781c964..466a112a4446 100644
669 --- a/crypto/drbg.c
670 +++ b/crypto/drbg.c
671 @@ -1134,8 +1134,10 @@ static inline void drbg_dealloc_state(struct drbg_state *drbg)
672 if (!drbg)
673 return;
674 kzfree(drbg->Vbuf);
675 + drbg->Vbuf = NULL;
676 drbg->V = NULL;
677 kzfree(drbg->Cbuf);
678 + drbg->Cbuf = NULL;
679 drbg->C = NULL;
680 kzfree(drbg->scratchpadbuf);
681 drbg->scratchpadbuf = NULL;
682 diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
683 index 594c228d2f02..4a3ac31c07d0 100644
684 --- a/drivers/amba/bus.c
685 +++ b/drivers/amba/bus.c
686 @@ -69,11 +69,12 @@ static ssize_t driver_override_show(struct device *_dev,
687 struct device_attribute *attr, char *buf)
688 {
689 struct amba_device *dev = to_amba_device(_dev);
690 + ssize_t len;
691
692 - if (!dev->driver_override)
693 - return 0;
694 -
695 - return sprintf(buf, "%s\n", dev->driver_override);
696 + device_lock(_dev);
697 + len = sprintf(buf, "%s\n", dev->driver_override);
698 + device_unlock(_dev);
699 + return len;
700 }
701
702 static ssize_t driver_override_store(struct device *_dev,
703 @@ -81,9 +82,10 @@ static ssize_t driver_override_store(struct device *_dev,
704 const char *buf, size_t count)
705 {
706 struct amba_device *dev = to_amba_device(_dev);
707 - char *driver_override, *old = dev->driver_override, *cp;
708 + char *driver_override, *old, *cp;
709
710 - if (count > PATH_MAX)
711 + /* We need to keep extra room for a newline */
712 + if (count >= (PAGE_SIZE - 1))
713 return -EINVAL;
714
715 driver_override = kstrndup(buf, count, GFP_KERNEL);
716 @@ -94,12 +96,15 @@ static ssize_t driver_override_store(struct device *_dev,
717 if (cp)
718 *cp = '\0';
719
720 + device_lock(_dev);
721 + old = dev->driver_override;
722 if (strlen(driver_override)) {
723 dev->driver_override = driver_override;
724 } else {
725 kfree(driver_override);
726 dev->driver_override = NULL;
727 }
728 + device_unlock(_dev);
729
730 kfree(old);
731
732 diff --git a/drivers/android/binder.c b/drivers/android/binder.c
733 index 764b63a5aade..e578eee31589 100644
734 --- a/drivers/android/binder.c
735 +++ b/drivers/android/binder.c
736 @@ -2839,6 +2839,14 @@ static void binder_transaction(struct binder_proc *proc,
737 else
738 return_error = BR_DEAD_REPLY;
739 mutex_unlock(&context->context_mgr_node_lock);
740 + if (target_node && target_proc == proc) {
741 + binder_user_error("%d:%d got transaction to context manager from process owning it\n",
742 + proc->pid, thread->pid);
743 + return_error = BR_FAILED_REPLY;
744 + return_error_param = -EINVAL;
745 + return_error_line = __LINE__;
746 + goto err_invalid_target_handle;
747 + }
748 }
749 if (!target_node) {
750 /*
751 diff --git a/drivers/char/random.c b/drivers/char/random.c
752 index 38729baed6ee..8f4e11842c60 100644
753 --- a/drivers/char/random.c
754 +++ b/drivers/char/random.c
755 @@ -261,6 +261,7 @@
756 #include <linux/ptrace.h>
757 #include <linux/workqueue.h>
758 #include <linux/irq.h>
759 +#include <linux/ratelimit.h>
760 #include <linux/syscalls.h>
761 #include <linux/completion.h>
762 #include <linux/uuid.h>
763 @@ -438,6 +439,16 @@ static void _crng_backtrack_protect(struct crng_state *crng,
764 static void process_random_ready_list(void);
765 static void _get_random_bytes(void *buf, int nbytes);
766
767 +static struct ratelimit_state unseeded_warning =
768 + RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
769 +static struct ratelimit_state urandom_warning =
770 + RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
771 +
772 +static int ratelimit_disable __read_mostly;
773 +
774 +module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
775 +MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
776 +
777 /**********************************************************************
778 *
779 * OS independent entropy store. Here are the functions which handle
780 @@ -787,6 +798,39 @@ static void crng_initialize(struct crng_state *crng)
781 crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
782 }
783
784 +#ifdef CONFIG_NUMA
785 +static void do_numa_crng_init(struct work_struct *work)
786 +{
787 + int i;
788 + struct crng_state *crng;
789 + struct crng_state **pool;
790 +
791 + pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
792 + for_each_online_node(i) {
793 + crng = kmalloc_node(sizeof(struct crng_state),
794 + GFP_KERNEL | __GFP_NOFAIL, i);
795 + spin_lock_init(&crng->lock);
796 + crng_initialize(crng);
797 + pool[i] = crng;
798 + }
799 + mb();
800 + if (cmpxchg(&crng_node_pool, NULL, pool)) {
801 + for_each_node(i)
802 + kfree(pool[i]);
803 + kfree(pool);
804 + }
805 +}
806 +
807 +static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
808 +
809 +static void numa_crng_init(void)
810 +{
811 + schedule_work(&numa_crng_init_work);
812 +}
813 +#else
814 +static void numa_crng_init(void) {}
815 +#endif
816 +
817 /*
818 * crng_fast_load() can be called by code in the interrupt service
819 * path. So we can't afford to dilly-dally.
820 @@ -893,10 +937,23 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
821 spin_unlock_irqrestore(&crng->lock, flags);
822 if (crng == &primary_crng && crng_init < 2) {
823 invalidate_batched_entropy();
824 + numa_crng_init();
825 crng_init = 2;
826 process_random_ready_list();
827 wake_up_interruptible(&crng_init_wait);
828 pr_notice("random: crng init done\n");
829 + if (unseeded_warning.missed) {
830 + pr_notice("random: %d get_random_xx warning(s) missed "
831 + "due to ratelimiting\n",
832 + unseeded_warning.missed);
833 + unseeded_warning.missed = 0;
834 + }
835 + if (urandom_warning.missed) {
836 + pr_notice("random: %d urandom warning(s) missed "
837 + "due to ratelimiting\n",
838 + urandom_warning.missed);
839 + urandom_warning.missed = 0;
840 + }
841 }
842 }
843
844 @@ -1540,8 +1597,9 @@ static void _warn_unseeded_randomness(const char *func_name, void *caller,
845 #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
846 print_once = true;
847 #endif
848 - pr_notice("random: %s called from %pS with crng_init=%d\n",
849 - func_name, caller, crng_init);
850 + if (__ratelimit(&unseeded_warning))
851 + pr_notice("random: %s called from %pS with crng_init=%d\n",
852 + func_name, caller, crng_init);
853 }
854
855 /*
856 @@ -1731,29 +1789,14 @@ static void init_std_data(struct entropy_store *r)
857 */
858 static int rand_initialize(void)
859 {
860 -#ifdef CONFIG_NUMA
861 - int i;
862 - struct crng_state *crng;
863 - struct crng_state **pool;
864 -#endif
865 -
866 init_std_data(&input_pool);
867 init_std_data(&blocking_pool);
868 crng_initialize(&primary_crng);
869 crng_global_init_time = jiffies;
870 -
871 -#ifdef CONFIG_NUMA
872 - pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
873 - for_each_online_node(i) {
874 - crng = kmalloc_node(sizeof(struct crng_state),
875 - GFP_KERNEL | __GFP_NOFAIL, i);
876 - spin_lock_init(&crng->lock);
877 - crng_initialize(crng);
878 - pool[i] = crng;
879 + if (ratelimit_disable) {
880 + urandom_warning.interval = 0;
881 + unseeded_warning.interval = 0;
882 }
883 - mb();
884 - crng_node_pool = pool;
885 -#endif
886 return 0;
887 }
888 early_initcall(rand_initialize);
889 @@ -1821,9 +1864,10 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
890
891 if (!crng_ready() && maxwarn > 0) {
892 maxwarn--;
893 - printk(KERN_NOTICE "random: %s: uninitialized urandom read "
894 - "(%zd bytes read)\n",
895 - current->comm, nbytes);
896 + if (__ratelimit(&urandom_warning))
897 + printk(KERN_NOTICE "random: %s: uninitialized "
898 + "urandom read (%zd bytes read)\n",
899 + current->comm, nbytes);
900 spin_lock_irqsave(&primary_crng.lock, flags);
901 crng_init_cnt = 0;
902 spin_unlock_irqrestore(&primary_crng.lock, flags);
903 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
904 index 468f06134012..21085515814f 100644
905 --- a/drivers/char/virtio_console.c
906 +++ b/drivers/char/virtio_console.c
907 @@ -422,7 +422,7 @@ static void reclaim_dma_bufs(void)
908 }
909 }
910
911 -static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
912 +static struct port_buffer *alloc_buf(struct virtio_device *vdev, size_t buf_size,
913 int pages)
914 {
915 struct port_buffer *buf;
916 @@ -445,16 +445,16 @@ static struct port_buffer *alloc_buf(struct virtqueue *vq, size_t buf_size,
917 return buf;
918 }
919
920 - if (is_rproc_serial(vq->vdev)) {
921 + if (is_rproc_serial(vdev)) {
922 /*
923 * Allocate DMA memory from ancestor. When a virtio
924 * device is created by remoteproc, the DMA memory is
925 * associated with the grandparent device:
926 * vdev => rproc => platform-dev.
927 */
928 - if (!vq->vdev->dev.parent || !vq->vdev->dev.parent->parent)
929 + if (!vdev->dev.parent || !vdev->dev.parent->parent)
930 goto free_buf;
931 - buf->dev = vq->vdev->dev.parent->parent;
932 + buf->dev = vdev->dev.parent->parent;
933
934 /* Increase device refcnt to avoid freeing it */
935 get_device(buf->dev);
936 @@ -838,7 +838,7 @@ static ssize_t port_fops_write(struct file *filp, const char __user *ubuf,
937
938 count = min((size_t)(32 * 1024), count);
939
940 - buf = alloc_buf(port->out_vq, count, 0);
941 + buf = alloc_buf(port->portdev->vdev, count, 0);
942 if (!buf)
943 return -ENOMEM;
944
945 @@ -957,7 +957,7 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe,
946 if (ret < 0)
947 goto error_out;
948
949 - buf = alloc_buf(port->out_vq, 0, pipe->nrbufs);
950 + buf = alloc_buf(port->portdev->vdev, 0, pipe->nrbufs);
951 if (!buf) {
952 ret = -ENOMEM;
953 goto error_out;
954 @@ -1374,7 +1374,7 @@ static unsigned int fill_queue(struct virtqueue *vq, spinlock_t *lock)
955
956 nr_added_bufs = 0;
957 do {
958 - buf = alloc_buf(vq, PAGE_SIZE, 0);
959 + buf = alloc_buf(vq->vdev, PAGE_SIZE, 0);
960 if (!buf)
961 break;
962
963 @@ -1402,7 +1402,6 @@ static int add_port(struct ports_device *portdev, u32 id)
964 {
965 char debugfs_name[16];
966 struct port *port;
967 - struct port_buffer *buf;
968 dev_t devt;
969 unsigned int nr_added_bufs;
970 int err;
971 @@ -1513,8 +1512,6 @@ static int add_port(struct ports_device *portdev, u32 id)
972 return 0;
973
974 free_inbufs:
975 - while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
976 - free_buf(buf, true);
977 free_device:
978 device_destroy(pdrvdata.class, port->dev->devt);
979 free_cdev:
980 @@ -1539,34 +1536,14 @@ static void remove_port(struct kref *kref)
981
982 static void remove_port_data(struct port *port)
983 {
984 - struct port_buffer *buf;
985 -
986 spin_lock_irq(&port->inbuf_lock);
987 /* Remove unused data this port might have received. */
988 discard_port_data(port);
989 spin_unlock_irq(&port->inbuf_lock);
990
991 - /* Remove buffers we queued up for the Host to send us data in. */
992 - do {
993 - spin_lock_irq(&port->inbuf_lock);
994 - buf = virtqueue_detach_unused_buf(port->in_vq);
995 - spin_unlock_irq(&port->inbuf_lock);
996 - if (buf)
997 - free_buf(buf, true);
998 - } while (buf);
999 -
1000 spin_lock_irq(&port->outvq_lock);
1001 reclaim_consumed_buffers(port);
1002 spin_unlock_irq(&port->outvq_lock);
1003 -
1004 - /* Free pending buffers from the out-queue. */
1005 - do {
1006 - spin_lock_irq(&port->outvq_lock);
1007 - buf = virtqueue_detach_unused_buf(port->out_vq);
1008 - spin_unlock_irq(&port->outvq_lock);
1009 - if (buf)
1010 - free_buf(buf, true);
1011 - } while (buf);
1012 }
1013
1014 /*
1015 @@ -1791,13 +1768,24 @@ static void control_work_handler(struct work_struct *work)
1016 spin_unlock(&portdev->c_ivq_lock);
1017 }
1018
1019 +static void flush_bufs(struct virtqueue *vq, bool can_sleep)
1020 +{
1021 + struct port_buffer *buf;
1022 + unsigned int len;
1023 +
1024 + while ((buf = virtqueue_get_buf(vq, &len)))
1025 + free_buf(buf, can_sleep);
1026 +}
1027 +
1028 static void out_intr(struct virtqueue *vq)
1029 {
1030 struct port *port;
1031
1032 port = find_port_by_vq(vq->vdev->priv, vq);
1033 - if (!port)
1034 + if (!port) {
1035 + flush_bufs(vq, false);
1036 return;
1037 + }
1038
1039 wake_up_interruptible(&port->waitqueue);
1040 }
1041 @@ -1808,8 +1796,10 @@ static void in_intr(struct virtqueue *vq)
1042 unsigned long flags;
1043
1044 port = find_port_by_vq(vq->vdev->priv, vq);
1045 - if (!port)
1046 + if (!port) {
1047 + flush_bufs(vq, false);
1048 return;
1049 + }
1050
1051 spin_lock_irqsave(&port->inbuf_lock, flags);
1052 port->inbuf = get_inbuf(port);
1053 @@ -1984,24 +1974,54 @@ static const struct file_operations portdev_fops = {
1054
1055 static void remove_vqs(struct ports_device *portdev)
1056 {
1057 + struct virtqueue *vq;
1058 +
1059 + virtio_device_for_each_vq(portdev->vdev, vq) {
1060 + struct port_buffer *buf;
1061 +
1062 + flush_bufs(vq, true);
1063 + while ((buf = virtqueue_detach_unused_buf(vq)))
1064 + free_buf(buf, true);
1065 + }
1066 portdev->vdev->config->del_vqs(portdev->vdev);
1067 kfree(portdev->in_vqs);
1068 kfree(portdev->out_vqs);
1069 }
1070
1071 -static void remove_controlq_data(struct ports_device *portdev)
1072 +static void virtcons_remove(struct virtio_device *vdev)
1073 {
1074 - struct port_buffer *buf;
1075 - unsigned int len;
1076 + struct ports_device *portdev;
1077 + struct port *port, *port2;
1078
1079 - if (!use_multiport(portdev))
1080 - return;
1081 + portdev = vdev->priv;
1082
1083 - while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
1084 - free_buf(buf, true);
1085 + spin_lock_irq(&pdrvdata_lock);
1086 + list_del(&portdev->list);
1087 + spin_unlock_irq(&pdrvdata_lock);
1088
1089 - while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
1090 - free_buf(buf, true);
1091 + /* Disable interrupts for vqs */
1092 + vdev->config->reset(vdev);
1093 + /* Finish up work that's lined up */
1094 + if (use_multiport(portdev))
1095 + cancel_work_sync(&portdev->control_work);
1096 + else
1097 + cancel_work_sync(&portdev->config_work);
1098 +
1099 + list_for_each_entry_safe(port, port2, &portdev->ports, list)
1100 + unplug_port(port);
1101 +
1102 + unregister_chrdev(portdev->chr_major, "virtio-portsdev");
1103 +
1104 + /*
1105 + * When yanking out a device, we immediately lose the
1106 + * (device-side) queues. So there's no point in keeping the
1107 + * guest side around till we drop our final reference. This
1108 + * also means that any ports which are in an open state will
1109 + * have to just stop using the port, as the vqs are going
1110 + * away.
1111 + */
1112 + remove_vqs(portdev);
1113 + kfree(portdev);
1114 }
1115
1116 /*
1117 @@ -2070,6 +2090,7 @@ static int virtcons_probe(struct virtio_device *vdev)
1118
1119 spin_lock_init(&portdev->ports_lock);
1120 INIT_LIST_HEAD(&portdev->ports);
1121 + INIT_LIST_HEAD(&portdev->list);
1122
1123 virtio_device_ready(portdev->vdev);
1124
1125 @@ -2087,8 +2108,15 @@ static int virtcons_probe(struct virtio_device *vdev)
1126 if (!nr_added_bufs) {
1127 dev_err(&vdev->dev,
1128 "Error allocating buffers for control queue\n");
1129 - err = -ENOMEM;
1130 - goto free_vqs;
1131 + /*
1132 + * The host might want to notify mgmt sw about device
1133 + * add failure.
1134 + */
1135 + __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
1136 + VIRTIO_CONSOLE_DEVICE_READY, 0);
1137 + /* Device was functional: we need full cleanup. */
1138 + virtcons_remove(vdev);
1139 + return -ENOMEM;
1140 }
1141 } else {
1142 /*
1143 @@ -2119,11 +2147,6 @@ static int virtcons_probe(struct virtio_device *vdev)
1144
1145 return 0;
1146
1147 -free_vqs:
1148 - /* The host might want to notify mgmt sw about device add failure */
1149 - __send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
1150 - VIRTIO_CONSOLE_DEVICE_READY, 0);
1151 - remove_vqs(portdev);
1152 free_chrdev:
1153 unregister_chrdev(portdev->chr_major, "virtio-portsdev");
1154 free:
1155 @@ -2132,43 +2155,6 @@ static int virtcons_probe(struct virtio_device *vdev)
1156 return err;
1157 }
1158
1159 -static void virtcons_remove(struct virtio_device *vdev)
1160 -{
1161 - struct ports_device *portdev;
1162 - struct port *port, *port2;
1163 -
1164 - portdev = vdev->priv;
1165 -
1166 - spin_lock_irq(&pdrvdata_lock);
1167 - list_del(&portdev->list);
1168 - spin_unlock_irq(&pdrvdata_lock);
1169 -
1170 - /* Disable interrupts for vqs */
1171 - vdev->config->reset(vdev);
1172 - /* Finish up work that's lined up */
1173 - if (use_multiport(portdev))
1174 - cancel_work_sync(&portdev->control_work);
1175 - else
1176 - cancel_work_sync(&portdev->config_work);
1177 -
1178 - list_for_each_entry_safe(port, port2, &portdev->ports, list)
1179 - unplug_port(port);
1180 -
1181 - unregister_chrdev(portdev->chr_major, "virtio-portsdev");
1182 -
1183 - /*
1184 - * When yanking out a device, we immediately lose the
1185 - * (device-side) queues. So there's no point in keeping the
1186 - * guest side around till we drop our final reference. This
1187 - * also means that any ports which are in an open state will
1188 - * have to just stop using the port, as the vqs are going
1189 - * away.
1190 - */
1191 - remove_controlq_data(portdev);
1192 - remove_vqs(portdev);
1193 - kfree(portdev);
1194 -}
1195 -
1196 static struct virtio_device_id id_table[] = {
1197 { VIRTIO_ID_CONSOLE, VIRTIO_DEV_ANY_ID },
1198 { 0 },
1199 @@ -2209,7 +2195,6 @@ static int virtcons_freeze(struct virtio_device *vdev)
1200 */
1201 if (use_multiport(portdev))
1202 virtqueue_disable_cb(portdev->c_ivq);
1203 - remove_controlq_data(portdev);
1204
1205 list_for_each_entry(port, &portdev->ports, list) {
1206 virtqueue_disable_cb(port->in_vq);
1207 diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
1208 index 29cdec198657..422e1fc38b43 100644
1209 --- a/drivers/cpufreq/powernv-cpufreq.c
1210 +++ b/drivers/cpufreq/powernv-cpufreq.c
1211 @@ -679,6 +679,16 @@ void gpstate_timer_handler(struct timer_list *t)
1212
1213 if (!spin_trylock(&gpstates->gpstate_lock))
1214 return;
1215 + /*
1216 + * If the timer has migrated to the different cpu then bring
1217 + * it back to one of the policy->cpus
1218 + */
1219 + if (!cpumask_test_cpu(raw_smp_processor_id(), policy->cpus)) {
1220 + gpstates->timer.expires = jiffies + msecs_to_jiffies(1);
1221 + add_timer_on(&gpstates->timer, cpumask_first(policy->cpus));
1222 + spin_unlock(&gpstates->gpstate_lock);
1223 + return;
1224 + }
1225
1226 /*
1227 * If PMCR was last updated was using fast_swtich then
1228 @@ -718,10 +728,8 @@ void gpstate_timer_handler(struct timer_list *t)
1229 if (gpstate_idx != gpstates->last_lpstate_idx)
1230 queue_gpstate_timer(gpstates);
1231
1232 + set_pstate(&freq_data);
1233 spin_unlock(&gpstates->gpstate_lock);
1234 -
1235 - /* Timer may get migrated to a different cpu on cpu hot unplug */
1236 - smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1);
1237 }
1238
1239 /*
1240 diff --git a/drivers/crypto/ccp/sp-dev.c b/drivers/crypto/ccp/sp-dev.c
1241 index eb0da6572720..e0459002eb71 100644
1242 --- a/drivers/crypto/ccp/sp-dev.c
1243 +++ b/drivers/crypto/ccp/sp-dev.c
1244 @@ -252,12 +252,12 @@ struct sp_device *sp_get_psp_master_device(void)
1245 goto unlock;
1246
1247 list_for_each_entry(i, &sp_units, entry) {
1248 - if (i->psp_data)
1249 + if (i->psp_data && i->get_psp_master_device) {
1250 + ret = i->get_psp_master_device();
1251 break;
1252 + }
1253 }
1254
1255 - if (i->get_psp_master_device)
1256 - ret = i->get_psp_master_device();
1257 unlock:
1258 write_unlock_irqrestore(&sp_unit_lock, flags);
1259 return ret;
1260 diff --git a/drivers/fpga/altera-ps-spi.c b/drivers/fpga/altera-ps-spi.c
1261 index 14f14efdf0d5..06d212a3d49d 100644
1262 --- a/drivers/fpga/altera-ps-spi.c
1263 +++ b/drivers/fpga/altera-ps-spi.c
1264 @@ -249,7 +249,7 @@ static int altera_ps_probe(struct spi_device *spi)
1265
1266 conf->data = of_id->data;
1267 conf->spi = spi;
1268 - conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_HIGH);
1269 + conf->config = devm_gpiod_get(&spi->dev, "nconfig", GPIOD_OUT_LOW);
1270 if (IS_ERR(conf->config)) {
1271 dev_err(&spi->dev, "Failed to get config gpio: %ld\n",
1272 PTR_ERR(conf->config));
1273 diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
1274 index 4e694ae9f308..45cc4d572897 100644
1275 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
1276 +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
1277 @@ -1459,10 +1459,11 @@ static const u32 sgpr_init_compute_shader[] =
1278 static const u32 vgpr_init_regs[] =
1279 {
1280 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0xffffffff,
1281 - mmCOMPUTE_RESOURCE_LIMITS, 0,
1282 + mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1283 mmCOMPUTE_NUM_THREAD_X, 256*4,
1284 mmCOMPUTE_NUM_THREAD_Y, 1,
1285 mmCOMPUTE_NUM_THREAD_Z, 1,
1286 + mmCOMPUTE_PGM_RSRC1, 0x100004f, /* VGPRS=15 (64 logical VGPRs), SGPRS=1 (16 SGPRs), BULKY=1 */
1287 mmCOMPUTE_PGM_RSRC2, 20,
1288 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1289 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1290 @@ -1479,10 +1480,11 @@ static const u32 vgpr_init_regs[] =
1291 static const u32 sgpr1_init_regs[] =
1292 {
1293 mmCOMPUTE_STATIC_THREAD_MGMT_SE0, 0x0f,
1294 - mmCOMPUTE_RESOURCE_LIMITS, 0x1000000,
1295 + mmCOMPUTE_RESOURCE_LIMITS, 0x1000000, /* CU_GROUP_COUNT=1 */
1296 mmCOMPUTE_NUM_THREAD_X, 256*5,
1297 mmCOMPUTE_NUM_THREAD_Y, 1,
1298 mmCOMPUTE_NUM_THREAD_Z, 1,
1299 + mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1300 mmCOMPUTE_PGM_RSRC2, 20,
1301 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1302 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1303 @@ -1503,6 +1505,7 @@ static const u32 sgpr2_init_regs[] =
1304 mmCOMPUTE_NUM_THREAD_X, 256*5,
1305 mmCOMPUTE_NUM_THREAD_Y, 1,
1306 mmCOMPUTE_NUM_THREAD_Z, 1,
1307 + mmCOMPUTE_PGM_RSRC1, 0x240, /* SGPRS=9 (80 GPRS) */
1308 mmCOMPUTE_PGM_RSRC2, 20,
1309 mmCOMPUTE_USER_DATA_0, 0xedcedc00,
1310 mmCOMPUTE_USER_DATA_1, 0xedcedc01,
1311 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1312 index 8a6e6fbc78cd..2e94881d4f7f 100644
1313 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1314 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1315 @@ -4506,6 +4506,7 @@ static int dm_update_crtcs_state(struct dc *dc,
1316 struct amdgpu_dm_connector *aconnector = NULL;
1317 struct drm_connector_state *new_con_state = NULL;
1318 struct dm_connector_state *dm_conn_state = NULL;
1319 + struct drm_plane_state *new_plane_state = NULL;
1320
1321 new_stream = NULL;
1322
1323 @@ -4513,6 +4514,13 @@ static int dm_update_crtcs_state(struct dc *dc,
1324 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1325 acrtc = to_amdgpu_crtc(crtc);
1326
1327 + new_plane_state = drm_atomic_get_new_plane_state(state, new_crtc_state->crtc->primary);
1328 +
1329 + if (new_crtc_state->enable && new_plane_state && !new_plane_state->fb) {
1330 + ret = -EINVAL;
1331 + goto fail;
1332 + }
1333 +
1334 aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
1335
1336 /* TODO This hack should go away */
1337 @@ -4685,7 +4693,7 @@ static int dm_update_planes_state(struct dc *dc,
1338 if (!dm_old_crtc_state->stream)
1339 continue;
1340
1341 - DRM_DEBUG_DRIVER("Disabling DRM plane: %d on DRM crtc %d\n",
1342 + DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
1343 plane->base.id, old_plane_crtc->base.id);
1344
1345 if (!dc_remove_plane_from_context(
1346 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
1347 index 422055080df4..54a25fb048fb 100644
1348 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
1349 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
1350 @@ -400,14 +400,15 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
1351 {
1352 int src;
1353 struct irq_list_head *lh;
1354 + unsigned long irq_table_flags;
1355 DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
1356 -
1357 for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
1358 -
1359 + DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
1360 /* The handler was removed from the table,
1361 * it means it is safe to flush all the 'work'
1362 * (because no code can schedule a new one). */
1363 lh = &adev->dm.irq_handler_list_low_tab[src];
1364 + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
1365 flush_work(&lh->work);
1366 }
1367
1368 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
1369 index 93421dad21bd..160933c16461 100644
1370 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
1371 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
1372 @@ -157,6 +157,11 @@ dm_dp_mst_connector_destroy(struct drm_connector *connector)
1373 struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
1374 struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder;
1375
1376 + if (amdgpu_dm_connector->edid) {
1377 + kfree(amdgpu_dm_connector->edid);
1378 + amdgpu_dm_connector->edid = NULL;
1379 + }
1380 +
1381 drm_encoder_cleanup(&amdgpu_encoder->base);
1382 kfree(amdgpu_encoder);
1383 drm_connector_cleanup(connector);
1384 @@ -183,28 +188,22 @@ static int dm_connector_update_modes(struct drm_connector *connector,
1385 void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
1386 {
1387 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
1388 - struct edid *edid;
1389 struct dc_sink *dc_sink;
1390 struct dc_sink_init_data init_params = {
1391 .link = aconnector->dc_link,
1392 .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST };
1393
1394 + /* FIXME none of this is safe. we shouldn't touch aconnector here in
1395 + * atomic_check
1396 + */
1397 +
1398 /*
1399 * TODO: Need to further figure out why ddc.algo is NULL while MST port exists
1400 */
1401 if (!aconnector->port || !aconnector->port->aux.ddc.algo)
1402 return;
1403
1404 - edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port);
1405 -
1406 - if (!edid) {
1407 - drm_mode_connector_update_edid_property(
1408 - &aconnector->base,
1409 - NULL);
1410 - return;
1411 - }
1412 -
1413 - aconnector->edid = edid;
1414 + ASSERT(aconnector->edid);
1415
1416 dc_sink = dc_link_add_remote_sink(
1417 aconnector->dc_link,
1418 @@ -217,9 +216,6 @@ void dm_dp_mst_dc_sink_create(struct drm_connector *connector)
1419
1420 amdgpu_dm_add_sink_to_freesync_module(
1421 connector, aconnector->edid);
1422 -
1423 - drm_mode_connector_update_edid_property(
1424 - &aconnector->base, aconnector->edid);
1425 }
1426
1427 static int dm_dp_mst_get_modes(struct drm_connector *connector)
1428 @@ -426,14 +422,6 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
1429 dc_sink_release(aconnector->dc_sink);
1430 aconnector->dc_sink = NULL;
1431 }
1432 - if (aconnector->edid) {
1433 - kfree(aconnector->edid);
1434 - aconnector->edid = NULL;
1435 - }
1436 -
1437 - drm_mode_connector_update_edid_property(
1438 - &aconnector->base,
1439 - NULL);
1440
1441 aconnector->mst_connected = false;
1442 }
1443 diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
1444 index 4f751a9d71a3..2368ad0b3f4d 100644
1445 --- a/drivers/gpu/drm/drm_edid.c
1446 +++ b/drivers/gpu/drm/drm_edid.c
1447 @@ -4450,6 +4450,7 @@ drm_reset_display_info(struct drm_connector *connector)
1448 info->max_tmds_clock = 0;
1449 info->dvi_dual = false;
1450 info->has_hdmi_infoframe = false;
1451 + memset(&info->hdmi, 0, sizeof(info->hdmi));
1452
1453 info->non_desktop = 0;
1454 }
1455 @@ -4461,17 +4462,11 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
1456
1457 u32 quirks = edid_get_quirks(edid);
1458
1459 + drm_reset_display_info(connector);
1460 +
1461 info->width_mm = edid->width_cm * 10;
1462 info->height_mm = edid->height_cm * 10;
1463
1464 - /* driver figures it out in this case */
1465 - info->bpc = 0;
1466 - info->color_formats = 0;
1467 - info->cea_rev = 0;
1468 - info->max_tmds_clock = 0;
1469 - info->dvi_dual = false;
1470 - info->has_hdmi_infoframe = false;
1471 -
1472 info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
1473
1474 DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
1475 diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
1476 index 1704c8897afd..fd58647fbff3 100644
1477 --- a/drivers/gpu/drm/i915/intel_cdclk.c
1478 +++ b/drivers/gpu/drm/i915/intel_cdclk.c
1479 @@ -1946,10 +1946,22 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
1480 }
1481 }
1482
1483 - /* According to BSpec, "The CD clock frequency must be at least twice
1484 + /*
1485 + * According to BSpec, "The CD clock frequency must be at least twice
1486 * the frequency of the Azalia BCLK." and BCLK is 96 MHz by default.
1487 + *
1488 + * FIXME: Check the actual, not default, BCLK being used.
1489 + *
1490 + * FIXME: This does not depend on ->has_audio because the higher CDCLK
1491 + * is required for audio probe, also when there are no audio capable
1492 + * displays connected at probe time. This leads to unnecessarily high
1493 + * CDCLK when audio is not required.
1494 + *
1495 + * FIXME: This limit is only applied when there are displays connected
1496 + * at probe time. If we probe without displays, we'll still end up using
1497 + * the platform minimum CDCLK, failing audio probe.
1498 */
1499 - if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
1500 + if (INTEL_GEN(dev_priv) >= 9)
1501 min_cdclk = max(2 * 96000, min_cdclk);
1502
1503 /*
1504 diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
1505 index da48af11eb6b..0cf33034a8ba 100644
1506 --- a/drivers/gpu/drm/i915/intel_fbdev.c
1507 +++ b/drivers/gpu/drm/i915/intel_fbdev.c
1508 @@ -801,7 +801,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
1509 return;
1510
1511 intel_fbdev_sync(ifbdev);
1512 - if (ifbdev->vma)
1513 + if (ifbdev->vma || ifbdev->helper.deferred_setup)
1514 drm_fb_helper_hotplug_event(&ifbdev->helper);
1515 }
1516
1517 diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
1518 index d758da6156a8..9faee4875ddf 100644
1519 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c
1520 +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
1521 @@ -624,19 +624,18 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
1522
1523 DRM_DEBUG_KMS("Enabling DC6\n");
1524
1525 - gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1526 + /* Wa Display #1183: skl,kbl,cfl */
1527 + if (IS_GEN9_BC(dev_priv))
1528 + I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
1529 + SKL_SELECT_ALTERNATE_DC_EXIT);
1530
1531 + gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
1532 }
1533
1534 void skl_disable_dc6(struct drm_i915_private *dev_priv)
1535 {
1536 DRM_DEBUG_KMS("Disabling DC6\n");
1537
1538 - /* Wa Display #1183: skl,kbl,cfl */
1539 - if (IS_GEN9_BC(dev_priv))
1540 - I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
1541 - SKL_SELECT_ALTERNATE_DC_EXIT);
1542 -
1543 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
1544 }
1545
1546 diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
1547 index 9eb96fb2c147..26a2da1f712d 100644
1548 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c
1549 +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
1550 @@ -291,7 +291,7 @@ static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
1551 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
1552 if (ret == -ENOSPC) {
1553 spin_unlock(&vgdev->ctrlq.qlock);
1554 - wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
1555 + wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
1556 spin_lock(&vgdev->ctrlq.qlock);
1557 goto retry;
1558 } else {
1559 @@ -366,7 +366,7 @@ static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
1560 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
1561 if (ret == -ENOSPC) {
1562 spin_unlock(&vgdev->cursorq.qlock);
1563 - wait_event(vgdev->cursorq.ack_queue, vq->num_free);
1564 + wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
1565 spin_lock(&vgdev->cursorq.qlock);
1566 goto retry;
1567 } else {
1568 diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
1569 index 5e1b68cbcd0a..e1b603ca0170 100644
1570 --- a/drivers/mtd/chips/cfi_cmdset_0001.c
1571 +++ b/drivers/mtd/chips/cfi_cmdset_0001.c
1572 @@ -45,6 +45,7 @@
1573 #define I82802AB 0x00ad
1574 #define I82802AC 0x00ac
1575 #define PF38F4476 0x881c
1576 +#define M28F00AP30 0x8963
1577 /* STMicroelectronics chips */
1578 #define M50LPW080 0x002F
1579 #define M50FLW080A 0x0080
1580 @@ -375,6 +376,17 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi,
1581 extp->MinorVersion = '1';
1582 }
1583
1584 +static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
1585 +{
1586 + /*
1587 + * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
1588 + * Erase Supend for their small Erase Blocks(0x8000)
1589 + */
1590 + if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
1591 + return 1;
1592 + return 0;
1593 +}
1594 +
1595 static inline struct cfi_pri_intelext *
1596 read_pri_intelext(struct map_info *map, __u16 adr)
1597 {
1598 @@ -831,21 +843,30 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
1599 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
1600 goto sleep;
1601
1602 + /* Do not allow suspend iff read/write to EB address */
1603 + if ((adr & chip->in_progress_block_mask) ==
1604 + chip->in_progress_block_addr)
1605 + goto sleep;
1606 +
1607 + /* do not suspend small EBs, buggy Micron Chips */
1608 + if (cfi_is_micron_28F00AP30(cfi, chip) &&
1609 + (chip->in_progress_block_mask == ~(0x8000-1)))
1610 + goto sleep;
1611
1612 /* Erase suspend */
1613 - map_write(map, CMD(0xB0), adr);
1614 + map_write(map, CMD(0xB0), chip->in_progress_block_addr);
1615
1616 /* If the flash has finished erasing, then 'erase suspend'
1617 * appears to make some (28F320) flash devices switch to
1618 * 'read' mode. Make sure that we switch to 'read status'
1619 * mode so we get the right data. --rmk
1620 */
1621 - map_write(map, CMD(0x70), adr);
1622 + map_write(map, CMD(0x70), chip->in_progress_block_addr);
1623 chip->oldstate = FL_ERASING;
1624 chip->state = FL_ERASE_SUSPENDING;
1625 chip->erase_suspended = 1;
1626 for (;;) {
1627 - status = map_read(map, adr);
1628 + status = map_read(map, chip->in_progress_block_addr);
1629 if (map_word_andequal(map, status, status_OK, status_OK))
1630 break;
1631
1632 @@ -1041,8 +1062,8 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
1633 sending the 0x70 (Read Status) command to an erasing
1634 chip and expecting it to be ignored, that's what we
1635 do. */
1636 - map_write(map, CMD(0xd0), adr);
1637 - map_write(map, CMD(0x70), adr);
1638 + map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1639 + map_write(map, CMD(0x70), chip->in_progress_block_addr);
1640 chip->oldstate = FL_READY;
1641 chip->state = FL_ERASING;
1642 break;
1643 @@ -1933,6 +1954,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1644 map_write(map, CMD(0xD0), adr);
1645 chip->state = FL_ERASING;
1646 chip->erase_suspended = 0;
1647 + chip->in_progress_block_addr = adr;
1648 + chip->in_progress_block_mask = ~(len - 1);
1649
1650 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1651 adr, len,
1652 diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
1653 index 56aa6b75213d..d524a64ed754 100644
1654 --- a/drivers/mtd/chips/cfi_cmdset_0002.c
1655 +++ b/drivers/mtd/chips/cfi_cmdset_0002.c
1656 @@ -816,9 +816,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
1657 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
1658 goto sleep;
1659
1660 - /* We could check to see if we're trying to access the sector
1661 - * that is currently being erased. However, no user will try
1662 - * anything like that so we just wait for the timeout. */
1663 + /* Do not allow suspend iff read/write to EB address */
1664 + if ((adr & chip->in_progress_block_mask) ==
1665 + chip->in_progress_block_addr)
1666 + goto sleep;
1667
1668 /* Erase suspend */
1669 /* It's harmless to issue the Erase-Suspend and Erase-Resume
1670 @@ -2267,6 +2268,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1671 chip->state = FL_ERASING;
1672 chip->erase_suspended = 0;
1673 chip->in_progress_block_addr = adr;
1674 + chip->in_progress_block_mask = ~(map->size - 1);
1675
1676 INVALIDATE_CACHE_UDELAY(map, chip,
1677 adr, map->size,
1678 @@ -2356,6 +2358,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1679 chip->state = FL_ERASING;
1680 chip->erase_suspended = 0;
1681 chip->in_progress_block_addr = adr;
1682 + chip->in_progress_block_mask = ~(len - 1);
1683
1684 INVALIDATE_CACHE_UDELAY(map, chip,
1685 adr, len,
1686 diff --git a/drivers/mtd/nand/marvell_nand.c b/drivers/mtd/nand/marvell_nand.c
1687 index 2196f2a233d6..795f868fe1f7 100644
1688 --- a/drivers/mtd/nand/marvell_nand.c
1689 +++ b/drivers/mtd/nand/marvell_nand.c
1690 @@ -2277,29 +2277,20 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc,
1691 /*
1692 * The legacy "num-cs" property indicates the number of CS on the only
1693 * chip connected to the controller (legacy bindings does not support
1694 - * more than one chip). CS are only incremented one by one while the RB
1695 - * pin is always the #0.
1696 + * more than one chip). The CS and RB pins are always the #0.
1697 *
1698 * When not using legacy bindings, a couple of "reg" and "nand-rb"
1699 * properties must be filled. For each chip, expressed as a subnode,
1700 * "reg" points to the CS lines and "nand-rb" to the RB line.
1701 */
1702 - if (pdata) {
1703 + if (pdata || nfc->caps->legacy_of_bindings) {
1704 nsels = 1;
1705 - } else if (nfc->caps->legacy_of_bindings &&
1706 - !of_get_property(np, "num-cs", &nsels)) {
1707 - dev_err(dev, "missing num-cs property\n");
1708 - return -EINVAL;
1709 - } else if (!of_get_property(np, "reg", &nsels)) {
1710 - dev_err(dev, "missing reg property\n");
1711 - return -EINVAL;
1712 - }
1713 -
1714 - if (!pdata)
1715 - nsels /= sizeof(u32);
1716 - if (!nsels) {
1717 - dev_err(dev, "invalid reg property size\n");
1718 - return -EINVAL;
1719 + } else {
1720 + nsels = of_property_count_elems_of_size(np, "reg", sizeof(u32));
1721 + if (nsels <= 0) {
1722 + dev_err(dev, "missing/invalid reg property\n");
1723 + return -EINVAL;
1724 + }
1725 }
1726
1727 /* Alloc the nand chip structure */
1728 diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c
1729 index c5bee00b7f5e..76761b841f1f 100644
1730 --- a/drivers/mtd/nand/tango_nand.c
1731 +++ b/drivers/mtd/nand/tango_nand.c
1732 @@ -643,7 +643,7 @@ static int tango_nand_probe(struct platform_device *pdev)
1733
1734 writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
1735
1736 - clk = clk_get(&pdev->dev, NULL);
1737 + clk = devm_clk_get(&pdev->dev, NULL);
1738 if (IS_ERR(clk))
1739 return PTR_ERR(clk);
1740
1741 diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
1742 index 4b8e9183489a..5872f31eaa60 100644
1743 --- a/drivers/mtd/spi-nor/cadence-quadspi.c
1744 +++ b/drivers/mtd/spi-nor/cadence-quadspi.c
1745 @@ -501,7 +501,9 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
1746 void __iomem *reg_base = cqspi->iobase;
1747 void __iomem *ahb_base = cqspi->ahb_base;
1748 unsigned int remaining = n_rx;
1749 + unsigned int mod_bytes = n_rx % 4;
1750 unsigned int bytes_to_read = 0;
1751 + u8 *rxbuf_end = rxbuf + n_rx;
1752 int ret = 0;
1753
1754 writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
1755 @@ -530,11 +532,24 @@ static int cqspi_indirect_read_execute(struct spi_nor *nor, u8 *rxbuf,
1756 }
1757
1758 while (bytes_to_read != 0) {
1759 + unsigned int word_remain = round_down(remaining, 4);
1760 +
1761 bytes_to_read *= cqspi->fifo_width;
1762 bytes_to_read = bytes_to_read > remaining ?
1763 remaining : bytes_to_read;
1764 - ioread32_rep(ahb_base, rxbuf,
1765 - DIV_ROUND_UP(bytes_to_read, 4));
1766 + bytes_to_read = round_down(bytes_to_read, 4);
1767 + /* Read 4 byte word chunks then single bytes */
1768 + if (bytes_to_read) {
1769 + ioread32_rep(ahb_base, rxbuf,
1770 + (bytes_to_read / 4));
1771 + } else if (!word_remain && mod_bytes) {
1772 + unsigned int temp = ioread32(ahb_base);
1773 +
1774 + bytes_to_read = mod_bytes;
1775 + memcpy(rxbuf, &temp, min((unsigned int)
1776 + (rxbuf_end - rxbuf),
1777 + bytes_to_read));
1778 + }
1779 rxbuf += bytes_to_read;
1780 remaining -= bytes_to_read;
1781 bytes_to_read = cqspi_get_rd_sram_level(cqspi);
1782 diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
1783 index 84aa9d676375..6da20b9688f7 100644
1784 --- a/drivers/of/fdt.c
1785 +++ b/drivers/of/fdt.c
1786 @@ -942,7 +942,7 @@ int __init early_init_dt_scan_chosen_stdout(void)
1787 int offset;
1788 const char *p, *q, *options = NULL;
1789 int l;
1790 - const struct earlycon_id *match;
1791 + const struct earlycon_id **p_match;
1792 const void *fdt = initial_boot_params;
1793
1794 offset = fdt_path_offset(fdt, "/chosen");
1795 @@ -969,7 +969,10 @@ int __init early_init_dt_scan_chosen_stdout(void)
1796 return 0;
1797 }
1798
1799 - for (match = __earlycon_table; match < __earlycon_table_end; match++) {
1800 + for (p_match = __earlycon_table; p_match < __earlycon_table_end;
1801 + p_match++) {
1802 + const struct earlycon_id *match = *p_match;
1803 +
1804 if (!match->compatible[0])
1805 continue;
1806
1807 diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
1808 index b04d37b3c5de..9abf549631b4 100644
1809 --- a/drivers/pci/host/pci-aardvark.c
1810 +++ b/drivers/pci/host/pci-aardvark.c
1811 @@ -29,6 +29,7 @@
1812 #define PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT 5
1813 #define PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE (0 << 11)
1814 #define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT 12
1815 +#define PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ 0x2
1816 #define PCIE_CORE_LINK_CTRL_STAT_REG 0xd0
1817 #define PCIE_CORE_LINK_L0S_ENTRY BIT(0)
1818 #define PCIE_CORE_LINK_TRAINING BIT(5)
1819 @@ -100,7 +101,8 @@
1820 #define PCIE_ISR1_MASK_REG (CONTROL_BASE_ADDR + 0x4C)
1821 #define PCIE_ISR1_POWER_STATE_CHANGE BIT(4)
1822 #define PCIE_ISR1_FLUSH BIT(5)
1823 -#define PCIE_ISR1_ALL_MASK GENMASK(5, 4)
1824 +#define PCIE_ISR1_INTX_ASSERT(val) BIT(8 + (val))
1825 +#define PCIE_ISR1_ALL_MASK GENMASK(11, 4)
1826 #define PCIE_MSI_ADDR_LOW_REG (CONTROL_BASE_ADDR + 0x50)
1827 #define PCIE_MSI_ADDR_HIGH_REG (CONTROL_BASE_ADDR + 0x54)
1828 #define PCIE_MSI_STATUS_REG (CONTROL_BASE_ADDR + 0x58)
1829 @@ -172,8 +174,6 @@
1830 #define PCIE_CONFIG_WR_TYPE0 0xa
1831 #define PCIE_CONFIG_WR_TYPE1 0xb
1832
1833 -/* PCI_BDF shifts 8bit, so we need extra 4bit shift */
1834 -#define PCIE_BDF(dev) (dev << 4)
1835 #define PCIE_CONF_BUS(bus) (((bus) & 0xff) << 20)
1836 #define PCIE_CONF_DEV(dev) (((dev) & 0x1f) << 15)
1837 #define PCIE_CONF_FUNC(fun) (((fun) & 0x7) << 12)
1838 @@ -296,7 +296,8 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
1839 reg = PCIE_CORE_DEV_CTRL_STATS_RELAX_ORDER_DISABLE |
1840 (7 << PCIE_CORE_DEV_CTRL_STATS_MAX_PAYLOAD_SZ_SHIFT) |
1841 PCIE_CORE_DEV_CTRL_STATS_SNOOP_DISABLE |
1842 - PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT;
1843 + (PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SZ <<
1844 + PCIE_CORE_DEV_CTRL_STATS_MAX_RD_REQ_SIZE_SHIFT);
1845 advk_writel(pcie, reg, PCIE_CORE_DEV_CTRL_STATS_REG);
1846
1847 /* Program PCIe Control 2 to disable strict ordering */
1848 @@ -437,7 +438,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
1849 u32 reg;
1850 int ret;
1851
1852 - if (PCI_SLOT(devfn) != 0) {
1853 + if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) {
1854 *val = 0xffffffff;
1855 return PCIBIOS_DEVICE_NOT_FOUND;
1856 }
1857 @@ -456,7 +457,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn,
1858 advk_writel(pcie, reg, PIO_CTRL);
1859
1860 /* Program the address registers */
1861 - reg = PCIE_BDF(devfn) | PCIE_CONF_REG(where);
1862 + reg = PCIE_CONF_ADDR(bus->number, devfn, where);
1863 advk_writel(pcie, reg, PIO_ADDR_LS);
1864 advk_writel(pcie, 0, PIO_ADDR_MS);
1865
1866 @@ -491,7 +492,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
1867 int offset;
1868 int ret;
1869
1870 - if (PCI_SLOT(devfn) != 0)
1871 + if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0)
1872 return PCIBIOS_DEVICE_NOT_FOUND;
1873
1874 if (where % size)
1875 @@ -609,9 +610,9 @@ static void advk_pcie_irq_mask(struct irq_data *d)
1876 irq_hw_number_t hwirq = irqd_to_hwirq(d);
1877 u32 mask;
1878
1879 - mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
1880 - mask |= PCIE_ISR0_INTX_ASSERT(hwirq);
1881 - advk_writel(pcie, mask, PCIE_ISR0_MASK_REG);
1882 + mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
1883 + mask |= PCIE_ISR1_INTX_ASSERT(hwirq);
1884 + advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
1885 }
1886
1887 static void advk_pcie_irq_unmask(struct irq_data *d)
1888 @@ -620,9 +621,9 @@ static void advk_pcie_irq_unmask(struct irq_data *d)
1889 irq_hw_number_t hwirq = irqd_to_hwirq(d);
1890 u32 mask;
1891
1892 - mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
1893 - mask &= ~PCIE_ISR0_INTX_ASSERT(hwirq);
1894 - advk_writel(pcie, mask, PCIE_ISR0_MASK_REG);
1895 + mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
1896 + mask &= ~PCIE_ISR1_INTX_ASSERT(hwirq);
1897 + advk_writel(pcie, mask, PCIE_ISR1_MASK_REG);
1898 }
1899
1900 static int advk_pcie_irq_map(struct irq_domain *h,
1901 @@ -765,29 +766,35 @@ static void advk_pcie_handle_msi(struct advk_pcie *pcie)
1902
1903 static void advk_pcie_handle_int(struct advk_pcie *pcie)
1904 {
1905 - u32 val, mask, status;
1906 + u32 isr0_val, isr0_mask, isr0_status;
1907 + u32 isr1_val, isr1_mask, isr1_status;
1908 int i, virq;
1909
1910 - val = advk_readl(pcie, PCIE_ISR0_REG);
1911 - mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
1912 - status = val & ((~mask) & PCIE_ISR0_ALL_MASK);
1913 + isr0_val = advk_readl(pcie, PCIE_ISR0_REG);
1914 + isr0_mask = advk_readl(pcie, PCIE_ISR0_MASK_REG);
1915 + isr0_status = isr0_val & ((~isr0_mask) & PCIE_ISR0_ALL_MASK);
1916 +
1917 + isr1_val = advk_readl(pcie, PCIE_ISR1_REG);
1918 + isr1_mask = advk_readl(pcie, PCIE_ISR1_MASK_REG);
1919 + isr1_status = isr1_val & ((~isr1_mask) & PCIE_ISR1_ALL_MASK);
1920
1921 - if (!status) {
1922 - advk_writel(pcie, val, PCIE_ISR0_REG);
1923 + if (!isr0_status && !isr1_status) {
1924 + advk_writel(pcie, isr0_val, PCIE_ISR0_REG);
1925 + advk_writel(pcie, isr1_val, PCIE_ISR1_REG);
1926 return;
1927 }
1928
1929 /* Process MSI interrupts */
1930 - if (status & PCIE_ISR0_MSI_INT_PENDING)
1931 + if (isr0_status & PCIE_ISR0_MSI_INT_PENDING)
1932 advk_pcie_handle_msi(pcie);
1933
1934 /* Process legacy interrupts */
1935 for (i = 0; i < PCI_NUM_INTX; i++) {
1936 - if (!(status & PCIE_ISR0_INTX_ASSERT(i)))
1937 + if (!(isr1_status & PCIE_ISR1_INTX_ASSERT(i)))
1938 continue;
1939
1940 - advk_writel(pcie, PCIE_ISR0_INTX_ASSERT(i),
1941 - PCIE_ISR0_REG);
1942 + advk_writel(pcie, PCIE_ISR1_INTX_ASSERT(i),
1943 + PCIE_ISR1_REG);
1944
1945 virq = irq_find_mapping(pcie->irq_domain, i);
1946 generic_handle_irq(virq);
1947 diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
1948 index 3bed6beda051..eede34e5ada2 100644
1949 --- a/drivers/pci/pci-driver.c
1950 +++ b/drivers/pci/pci-driver.c
1951 @@ -945,10 +945,11 @@ static int pci_pm_freeze(struct device *dev)
1952 * devices should not be touched during freeze/thaw transitions,
1953 * however.
1954 */
1955 - if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND))
1956 + if (!dev_pm_smart_suspend_and_suspended(dev)) {
1957 pm_runtime_resume(dev);
1958 + pci_dev->state_saved = false;
1959 + }
1960
1961 - pci_dev->state_saved = false;
1962 if (pm->freeze) {
1963 int error;
1964
1965 diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
1966 index 304e891e35fc..60f2250fd96b 100644
1967 --- a/drivers/rtc/rtc-opal.c
1968 +++ b/drivers/rtc/rtc-opal.c
1969 @@ -57,7 +57,7 @@ static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms)
1970
1971 static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
1972 {
1973 - long rc = OPAL_BUSY;
1974 + s64 rc = OPAL_BUSY;
1975 int retries = 10;
1976 u32 y_m_d;
1977 u64 h_m_s_ms;
1978 @@ -66,13 +66,17 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
1979
1980 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
1981 rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
1982 - if (rc == OPAL_BUSY_EVENT)
1983 + if (rc == OPAL_BUSY_EVENT) {
1984 + msleep(OPAL_BUSY_DELAY_MS);
1985 opal_poll_events(NULL);
1986 - else if (retries-- && (rc == OPAL_HARDWARE
1987 - || rc == OPAL_INTERNAL_ERROR))
1988 - msleep(10);
1989 - else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
1990 - break;
1991 + } else if (rc == OPAL_BUSY) {
1992 + msleep(OPAL_BUSY_DELAY_MS);
1993 + } else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) {
1994 + if (retries--) {
1995 + msleep(10); /* Wait 10ms before retry */
1996 + rc = OPAL_BUSY; /* go around again */
1997 + }
1998 + }
1999 }
2000
2001 if (rc != OPAL_SUCCESS)
2002 @@ -87,21 +91,26 @@ static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
2003
2004 static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
2005 {
2006 - long rc = OPAL_BUSY;
2007 + s64 rc = OPAL_BUSY;
2008 int retries = 10;
2009 u32 y_m_d = 0;
2010 u64 h_m_s_ms = 0;
2011
2012 tm_to_opal(tm, &y_m_d, &h_m_s_ms);
2013 +
2014 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
2015 rc = opal_rtc_write(y_m_d, h_m_s_ms);
2016 - if (rc == OPAL_BUSY_EVENT)
2017 + if (rc == OPAL_BUSY_EVENT) {
2018 + msleep(OPAL_BUSY_DELAY_MS);
2019 opal_poll_events(NULL);
2020 - else if (retries-- && (rc == OPAL_HARDWARE
2021 - || rc == OPAL_INTERNAL_ERROR))
2022 - msleep(10);
2023 - else if (rc != OPAL_BUSY && rc != OPAL_BUSY_EVENT)
2024 - break;
2025 + } else if (rc == OPAL_BUSY) {
2026 + msleep(OPAL_BUSY_DELAY_MS);
2027 + } else if (rc == OPAL_HARDWARE || rc == OPAL_INTERNAL_ERROR) {
2028 + if (retries--) {
2029 + msleep(10); /* Wait 10ms before retry */
2030 + rc = OPAL_BUSY; /* go around again */
2031 + }
2032 + }
2033 }
2034
2035 return rc == OPAL_SUCCESS ? 0 : -EIO;
2036 diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
2037 index c30420c517b1..e96b85579f21 100644
2038 --- a/drivers/s390/cio/vfio_ccw_fsm.c
2039 +++ b/drivers/s390/cio/vfio_ccw_fsm.c
2040 @@ -20,12 +20,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
2041 int ccode;
2042 __u8 lpm;
2043 unsigned long flags;
2044 + int ret;
2045
2046 sch = private->sch;
2047
2048 spin_lock_irqsave(sch->lock, flags);
2049 private->state = VFIO_CCW_STATE_BUSY;
2050 - spin_unlock_irqrestore(sch->lock, flags);
2051
2052 orb = cp_get_orb(&private->cp, (u32)(addr_t)sch, sch->lpm);
2053
2054 @@ -38,10 +38,12 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
2055 * Initialize device status information
2056 */
2057 sch->schib.scsw.cmd.actl |= SCSW_ACTL_START_PEND;
2058 - return 0;
2059 + ret = 0;
2060 + break;
2061 case 1: /* Status pending */
2062 case 2: /* Busy */
2063 - return -EBUSY;
2064 + ret = -EBUSY;
2065 + break;
2066 case 3: /* Device/path not operational */
2067 {
2068 lpm = orb->cmd.lpm;
2069 @@ -51,13 +53,16 @@ static int fsm_io_helper(struct vfio_ccw_private *private)
2070 sch->lpm = 0;
2071
2072 if (cio_update_schib(sch))
2073 - return -ENODEV;
2074 -
2075 - return sch->lpm ? -EACCES : -ENODEV;
2076 + ret = -ENODEV;
2077 + else
2078 + ret = sch->lpm ? -EACCES : -ENODEV;
2079 + break;
2080 }
2081 default:
2082 - return ccode;
2083 + ret = ccode;
2084 }
2085 + spin_unlock_irqrestore(sch->lock, flags);
2086 + return ret;
2087 }
2088
2089 static void fsm_notoper(struct vfio_ccw_private *private,
2090 diff --git a/drivers/sbus/char/oradax.c b/drivers/sbus/char/oradax.c
2091 index c44d7c7ffc92..1754f55e2fac 100644
2092 --- a/drivers/sbus/char/oradax.c
2093 +++ b/drivers/sbus/char/oradax.c
2094 @@ -3,7 +3,7 @@
2095 *
2096 * This program is free software: you can redistribute it and/or modify
2097 * it under the terms of the GNU General Public License as published by
2098 - * the Free Software Foundation, either version 3 of the License, or
2099 + * the Free Software Foundation, either version 2 of the License, or
2100 * (at your option) any later version.
2101 *
2102 * This program is distributed in the hope that it will be useful,
2103 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
2104 index 1fa84d6a0f8b..d19b41bcebea 100644
2105 --- a/drivers/scsi/sd.c
2106 +++ b/drivers/scsi/sd.c
2107 @@ -2121,6 +2121,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
2108 break; /* standby */
2109 if (sshdr.asc == 4 && sshdr.ascq == 0xc)
2110 break; /* unavailable */
2111 + if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
2112 + break; /* sanitize in progress */
2113 /*
2114 * Issue command to spin up drive when not ready
2115 */
2116 diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
2117 index 89cf4498f535..973a497739f0 100644
2118 --- a/drivers/scsi/sd_zbc.c
2119 +++ b/drivers/scsi/sd_zbc.c
2120 @@ -400,8 +400,10 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
2121 *
2122 * Check that all zones of the device are equal. The last zone can however
2123 * be smaller. The zone size must also be a power of two number of LBAs.
2124 + *
2125 + * Returns the zone size in bytes upon success or an error code upon failure.
2126 */
2127 -static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
2128 +static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
2129 {
2130 u64 zone_blocks = 0;
2131 sector_t block = 0;
2132 @@ -412,8 +414,6 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
2133 int ret;
2134 u8 same;
2135
2136 - sdkp->zone_blocks = 0;
2137 -
2138 /* Get a buffer */
2139 buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
2140 if (!buf)
2141 @@ -445,16 +445,17 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
2142
2143 /* Parse zone descriptors */
2144 while (rec < buf + buf_len) {
2145 - zone_blocks = get_unaligned_be64(&rec[8]);
2146 - if (sdkp->zone_blocks == 0) {
2147 - sdkp->zone_blocks = zone_blocks;
2148 - } else if (zone_blocks != sdkp->zone_blocks &&
2149 - (block + zone_blocks < sdkp->capacity
2150 - || zone_blocks > sdkp->zone_blocks)) {
2151 - zone_blocks = 0;
2152 + u64 this_zone_blocks = get_unaligned_be64(&rec[8]);
2153 +
2154 + if (zone_blocks == 0) {
2155 + zone_blocks = this_zone_blocks;
2156 + } else if (this_zone_blocks != zone_blocks &&
2157 + (block + this_zone_blocks < sdkp->capacity
2158 + || this_zone_blocks > zone_blocks)) {
2159 + this_zone_blocks = 0;
2160 goto out;
2161 }
2162 - block += zone_blocks;
2163 + block += this_zone_blocks;
2164 rec += 64;
2165 }
2166
2167 @@ -467,8 +468,6 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
2168
2169 } while (block < sdkp->capacity);
2170
2171 - zone_blocks = sdkp->zone_blocks;
2172 -
2173 out:
2174 if (!zone_blocks) {
2175 if (sdkp->first_scan)
2176 @@ -488,8 +487,7 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
2177 "Zone size too large\n");
2178 ret = -ENODEV;
2179 } else {
2180 - sdkp->zone_blocks = zone_blocks;
2181 - sdkp->zone_shift = ilog2(zone_blocks);
2182 + ret = zone_blocks;
2183 }
2184
2185 out_free:
2186 @@ -500,21 +498,21 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
2187
2188 /**
2189 * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone).
2190 - * @sdkp: The disk of the bitmap
2191 + * @nr_zones: Number of zones to allocate space for.
2192 + * @numa_node: NUMA node to allocate the memory from.
2193 */
2194 -static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
2195 +static inline unsigned long *
2196 +sd_zbc_alloc_zone_bitmap(u32 nr_zones, int numa_node)
2197 {
2198 - struct request_queue *q = sdkp->disk->queue;
2199 -
2200 - return kzalloc_node(BITS_TO_LONGS(sdkp->nr_zones)
2201 - * sizeof(unsigned long),
2202 - GFP_KERNEL, q->node);
2203 + return kzalloc_node(BITS_TO_LONGS(nr_zones) * sizeof(unsigned long),
2204 + GFP_KERNEL, numa_node);
2205 }
2206
2207 /**
2208 * sd_zbc_get_seq_zones - Parse report zones reply to identify sequential zones
2209 * @sdkp: disk used
2210 * @buf: report reply buffer
2211 + * @zone_shift: logarithm base 2 of the number of blocks in a zone
2212 * @seq_zone_bitamp: bitmap of sequential zones to set
2213 *
2214 * Parse reported zone descriptors in @buf to identify sequential zones and
2215 @@ -524,7 +522,7 @@ static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
2216 * Return the LBA after the last zone reported.
2217 */
2218 static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
2219 - unsigned int buflen,
2220 + unsigned int buflen, u32 zone_shift,
2221 unsigned long *seq_zones_bitmap)
2222 {
2223 sector_t lba, next_lba = sdkp->capacity;
2224 @@ -543,7 +541,7 @@ static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
2225 if (type != ZBC_ZONE_TYPE_CONV &&
2226 cond != ZBC_ZONE_COND_READONLY &&
2227 cond != ZBC_ZONE_COND_OFFLINE)
2228 - set_bit(lba >> sdkp->zone_shift, seq_zones_bitmap);
2229 + set_bit(lba >> zone_shift, seq_zones_bitmap);
2230 next_lba = lba + get_unaligned_be64(&rec[8]);
2231 rec += 64;
2232 }
2233 @@ -552,12 +550,16 @@ static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
2234 }
2235
2236 /**
2237 - * sd_zbc_setup_seq_zones_bitmap - Initialize the disk seq zone bitmap.
2238 + * sd_zbc_setup_seq_zones_bitmap - Initialize a seq zone bitmap.
2239 * @sdkp: target disk
2240 + * @zone_shift: logarithm base 2 of the number of blocks in a zone
2241 + * @nr_zones: number of zones to set up a seq zone bitmap for
2242 *
2243 * Allocate a zone bitmap and initialize it by identifying sequential zones.
2244 */
2245 -static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
2246 +static unsigned long *
2247 +sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp, u32 zone_shift,
2248 + u32 nr_zones)
2249 {
2250 struct request_queue *q = sdkp->disk->queue;
2251 unsigned long *seq_zones_bitmap;
2252 @@ -565,9 +567,9 @@ static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
2253 unsigned char *buf;
2254 int ret = -ENOMEM;
2255
2256 - seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(sdkp);
2257 + seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(nr_zones, q->node);
2258 if (!seq_zones_bitmap)
2259 - return -ENOMEM;
2260 + return ERR_PTR(-ENOMEM);
2261
2262 buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
2263 if (!buf)
2264 @@ -578,7 +580,7 @@ static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
2265 if (ret)
2266 goto out;
2267 lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
2268 - seq_zones_bitmap);
2269 + zone_shift, seq_zones_bitmap);
2270 }
2271
2272 if (lba != sdkp->capacity) {
2273 @@ -590,12 +592,9 @@ static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
2274 kfree(buf);
2275 if (ret) {
2276 kfree(seq_zones_bitmap);
2277 - return ret;
2278 + return ERR_PTR(ret);
2279 }
2280 -
2281 - q->seq_zones_bitmap = seq_zones_bitmap;
2282 -
2283 - return 0;
2284 + return seq_zones_bitmap;
2285 }
2286
2287 static void sd_zbc_cleanup(struct scsi_disk *sdkp)
2288 @@ -611,44 +610,64 @@ static void sd_zbc_cleanup(struct scsi_disk *sdkp)
2289 q->nr_zones = 0;
2290 }
2291
2292 -static int sd_zbc_setup(struct scsi_disk *sdkp)
2293 +static int sd_zbc_setup(struct scsi_disk *sdkp, u32 zone_blocks)
2294 {
2295 struct request_queue *q = sdkp->disk->queue;
2296 + u32 zone_shift = ilog2(zone_blocks);
2297 + u32 nr_zones;
2298 int ret;
2299
2300 - /* READ16/WRITE16 is mandatory for ZBC disks */
2301 - sdkp->device->use_16_for_rw = 1;
2302 - sdkp->device->use_10_for_rw = 0;
2303 -
2304 /* chunk_sectors indicates the zone size */
2305 - blk_queue_chunk_sectors(sdkp->disk->queue,
2306 - logical_to_sectors(sdkp->device, sdkp->zone_blocks));
2307 - sdkp->nr_zones =
2308 - round_up(sdkp->capacity, sdkp->zone_blocks) >> sdkp->zone_shift;
2309 + blk_queue_chunk_sectors(q,
2310 + logical_to_sectors(sdkp->device, zone_blocks));
2311 + nr_zones = round_up(sdkp->capacity, zone_blocks) >> zone_shift;
2312
2313 /*
2314 * Initialize the device request queue information if the number
2315 * of zones changed.
2316 */
2317 - if (sdkp->nr_zones != q->nr_zones) {
2318 -
2319 - sd_zbc_cleanup(sdkp);
2320 -
2321 - q->nr_zones = sdkp->nr_zones;
2322 - if (sdkp->nr_zones) {
2323 - q->seq_zones_wlock = sd_zbc_alloc_zone_bitmap(sdkp);
2324 - if (!q->seq_zones_wlock) {
2325 + if (nr_zones != sdkp->nr_zones || nr_zones != q->nr_zones) {
2326 + unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL;
2327 + size_t zone_bitmap_size;
2328 +
2329 + if (nr_zones) {
2330 + seq_zones_wlock = sd_zbc_alloc_zone_bitmap(nr_zones,
2331 + q->node);
2332 + if (!seq_zones_wlock) {
2333 ret = -ENOMEM;
2334 goto err;
2335 }
2336
2337 - ret = sd_zbc_setup_seq_zones_bitmap(sdkp);
2338 - if (ret) {
2339 - sd_zbc_cleanup(sdkp);
2340 + seq_zones_bitmap = sd_zbc_setup_seq_zones_bitmap(sdkp,
2341 + zone_shift, nr_zones);
2342 + if (IS_ERR(seq_zones_bitmap)) {
2343 + ret = PTR_ERR(seq_zones_bitmap);
2344 + kfree(seq_zones_wlock);
2345 goto err;
2346 }
2347 }
2348 -
2349 + zone_bitmap_size = BITS_TO_LONGS(nr_zones) *
2350 + sizeof(unsigned long);
2351 + blk_mq_freeze_queue(q);
2352 + if (q->nr_zones != nr_zones) {
2353 + /* READ16/WRITE16 is mandatory for ZBC disks */
2354 + sdkp->device->use_16_for_rw = 1;
2355 + sdkp->device->use_10_for_rw = 0;
2356 +
2357 + sdkp->zone_blocks = zone_blocks;
2358 + sdkp->zone_shift = zone_shift;
2359 + sdkp->nr_zones = nr_zones;
2360 + q->nr_zones = nr_zones;
2361 + swap(q->seq_zones_wlock, seq_zones_wlock);
2362 + swap(q->seq_zones_bitmap, seq_zones_bitmap);
2363 + } else if (memcmp(q->seq_zones_bitmap, seq_zones_bitmap,
2364 + zone_bitmap_size) != 0) {
2365 + memcpy(q->seq_zones_bitmap, seq_zones_bitmap,
2366 + zone_bitmap_size);
2367 + }
2368 + blk_mq_unfreeze_queue(q);
2369 + kfree(seq_zones_wlock);
2370 + kfree(seq_zones_bitmap);
2371 }
2372
2373 return 0;
2374 @@ -660,6 +679,7 @@ static int sd_zbc_setup(struct scsi_disk *sdkp)
2375
2376 int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
2377 {
2378 + int64_t zone_blocks;
2379 int ret;
2380
2381 if (!sd_is_zoned(sdkp))
2382 @@ -696,12 +716,16 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
2383 * Check zone size: only devices with a constant zone size (except
2384 * an eventual last runt zone) that is a power of 2 are supported.
2385 */
2386 - ret = sd_zbc_check_zone_size(sdkp);
2387 - if (ret)
2388 + zone_blocks = sd_zbc_check_zone_size(sdkp);
2389 + ret = -EFBIG;
2390 + if (zone_blocks != (u32)zone_blocks)
2391 + goto err;
2392 + ret = zone_blocks;
2393 + if (ret < 0)
2394 goto err;
2395
2396 /* The drive satisfies the kernel restrictions: set it up */
2397 - ret = sd_zbc_setup(sdkp);
2398 + ret = sd_zbc_setup(sdkp, zone_blocks);
2399 if (ret)
2400 goto err;
2401
2402 diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
2403 index 884419c37e84..457ea1f8db30 100644
2404 --- a/drivers/slimbus/messaging.c
2405 +++ b/drivers/slimbus/messaging.c
2406 @@ -183,7 +183,7 @@ static u16 slim_slicesize(int code)
2407 0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7
2408 };
2409
2410 - clamp(code, 1, (int)ARRAY_SIZE(sizetocode));
2411 + code = clamp(code, 1, (int)ARRAY_SIZE(sizetocode));
2412
2413 return sizetocode[code - 1];
2414 }
2415 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
2416 index 3b3e1f6632d7..1dbe27c9946c 100644
2417 --- a/drivers/tty/n_gsm.c
2418 +++ b/drivers/tty/n_gsm.c
2419 @@ -121,6 +121,9 @@ struct gsm_dlci {
2420 struct mutex mutex;
2421
2422 /* Link layer */
2423 + int mode;
2424 +#define DLCI_MODE_ABM 0 /* Normal Asynchronous Balanced Mode */
2425 +#define DLCI_MODE_ADM 1 /* Asynchronous Disconnected Mode */
2426 spinlock_t lock; /* Protects the internal state */
2427 struct timer_list t1; /* Retransmit timer for SABM and UA */
2428 int retries;
2429 @@ -1364,7 +1367,13 @@ static struct gsm_control *gsm_control_send(struct gsm_mux *gsm,
2430 ctrl->data = data;
2431 ctrl->len = clen;
2432 gsm->pending_cmd = ctrl;
2433 - gsm->cretries = gsm->n2;
2434 +
2435 + /* If DLCI0 is in ADM mode skip retries, it won't respond */
2436 + if (gsm->dlci[0]->mode == DLCI_MODE_ADM)
2437 + gsm->cretries = 1;
2438 + else
2439 + gsm->cretries = gsm->n2;
2440 +
2441 mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
2442 gsm_control_transmit(gsm, ctrl);
2443 spin_unlock_irqrestore(&gsm->control_lock, flags);
2444 @@ -1472,6 +1481,7 @@ static void gsm_dlci_t1(struct timer_list *t)
2445 if (debug & 8)
2446 pr_info("DLCI %d opening in ADM mode.\n",
2447 dlci->addr);
2448 + dlci->mode = DLCI_MODE_ADM;
2449 gsm_dlci_open(dlci);
2450 } else {
2451 gsm_dlci_close(dlci);
2452 @@ -2861,11 +2871,22 @@ static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk)
2453 static int gsm_carrier_raised(struct tty_port *port)
2454 {
2455 struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
2456 + struct gsm_mux *gsm = dlci->gsm;
2457 +
2458 /* Not yet open so no carrier info */
2459 if (dlci->state != DLCI_OPEN)
2460 return 0;
2461 if (debug & 2)
2462 return 1;
2463 +
2464 + /*
2465 + * Basic mode with control channel in ADM mode may not respond
2466 + * to CMD_MSC at all and modem_rx is empty.
2467 + */
2468 + if (gsm->encoding == 0 && gsm->dlci[0]->mode == DLCI_MODE_ADM &&
2469 + !dlci->modem_rx)
2470 + return 1;
2471 +
2472 return dlci->modem_rx & TIOCM_CD;
2473 }
2474
2475 diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
2476 index a24278380fec..22683393a0f2 100644
2477 --- a/drivers/tty/serial/earlycon.c
2478 +++ b/drivers/tty/serial/earlycon.c
2479 @@ -169,7 +169,7 @@ static int __init register_earlycon(char *buf, const struct earlycon_id *match)
2480 */
2481 int __init setup_earlycon(char *buf)
2482 {
2483 - const struct earlycon_id *match;
2484 + const struct earlycon_id **p_match;
2485
2486 if (!buf || !buf[0])
2487 return -EINVAL;
2488 @@ -177,7 +177,9 @@ int __init setup_earlycon(char *buf)
2489 if (early_con.flags & CON_ENABLED)
2490 return -EALREADY;
2491
2492 - for (match = __earlycon_table; match < __earlycon_table_end; match++) {
2493 + for (p_match = __earlycon_table; p_match < __earlycon_table_end;
2494 + p_match++) {
2495 + const struct earlycon_id *match = *p_match;
2496 size_t len = strlen(match->name);
2497
2498 if (strncmp(buf, match->name, len))
2499 diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
2500 index a100e98259d7..03d26aabb0c4 100644
2501 --- a/drivers/tty/serial/mvebu-uart.c
2502 +++ b/drivers/tty/serial/mvebu-uart.c
2503 @@ -495,7 +495,6 @@ static void mvebu_uart_set_termios(struct uart_port *port,
2504 termios->c_iflag |= old->c_iflag & ~(INPCK | IGNPAR);
2505 termios->c_cflag &= CREAD | CBAUD;
2506 termios->c_cflag |= old->c_cflag & ~(CREAD | CBAUD);
2507 - termios->c_lflag = old->c_lflag;
2508 }
2509
2510 spin_unlock_irqrestore(&port->lock, flags);
2511 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
2512 index 63114ea35ec1..7c838b90a31d 100644
2513 --- a/drivers/tty/tty_io.c
2514 +++ b/drivers/tty/tty_io.c
2515 @@ -2816,7 +2816,10 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
2516
2517 kref_init(&tty->kref);
2518 tty->magic = TTY_MAGIC;
2519 - tty_ldisc_init(tty);
2520 + if (tty_ldisc_init(tty)) {
2521 + kfree(tty);
2522 + return NULL;
2523 + }
2524 tty->session = NULL;
2525 tty->pgrp = NULL;
2526 mutex_init(&tty->legacy_mutex);
2527 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
2528 index 050f4d650891..fb7329ab2b37 100644
2529 --- a/drivers/tty/tty_ldisc.c
2530 +++ b/drivers/tty/tty_ldisc.c
2531 @@ -176,12 +176,11 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
2532 return ERR_CAST(ldops);
2533 }
2534
2535 - ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL);
2536 - if (ld == NULL) {
2537 - put_ldops(ldops);
2538 - return ERR_PTR(-ENOMEM);
2539 - }
2540 -
2541 + /*
2542 + * There is no way to handle allocation failure of only 16 bytes.
2543 + * Let's simplify error handling and save more memory.
2544 + */
2545 + ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL | __GFP_NOFAIL);
2546 ld->ops = ldops;
2547 ld->tty = tty;
2548
2549 @@ -527,19 +526,16 @@ static int tty_ldisc_failto(struct tty_struct *tty, int ld)
2550 static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
2551 {
2552 /* There is an outstanding reference here so this is safe */
2553 - old = tty_ldisc_get(tty, old->ops->num);
2554 - WARN_ON(IS_ERR(old));
2555 - tty->ldisc = old;
2556 - tty_set_termios_ldisc(tty, old->ops->num);
2557 - if (tty_ldisc_open(tty, old) < 0) {
2558 - tty_ldisc_put(old);
2559 + if (tty_ldisc_failto(tty, old->ops->num) < 0) {
2560 + const char *name = tty_name(tty);
2561 +
2562 + pr_warn("Falling back ldisc for %s.\n", name);
2563 /* The traditional behaviour is to fall back to N_TTY, we
2564 want to avoid falling back to N_NULL unless we have no
2565 choice to avoid the risk of breaking anything */
2566 if (tty_ldisc_failto(tty, N_TTY) < 0 &&
2567 tty_ldisc_failto(tty, N_NULL) < 0)
2568 - panic("Couldn't open N_NULL ldisc for %s.",
2569 - tty_name(tty));
2570 + panic("Couldn't open N_NULL ldisc for %s.", name);
2571 }
2572 }
2573
2574 @@ -824,12 +820,13 @@ EXPORT_SYMBOL_GPL(tty_ldisc_release);
2575 * the tty structure is not completely set up when this call is made.
2576 */
2577
2578 -void tty_ldisc_init(struct tty_struct *tty)
2579 +int tty_ldisc_init(struct tty_struct *tty)
2580 {
2581 struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY);
2582 if (IS_ERR(ld))
2583 - panic("n_tty: init_tty");
2584 + return PTR_ERR(ld);
2585 tty->ldisc = ld;
2586 + return 0;
2587 }
2588
2589 /**
2590 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
2591 index fc32391a34d5..15736b462c55 100644
2592 --- a/drivers/usb/core/hcd.c
2593 +++ b/drivers/usb/core/hcd.c
2594 @@ -2365,6 +2365,7 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd)
2595
2596 spin_lock_irqsave (&hcd_root_hub_lock, flags);
2597 if (hcd->rh_registered) {
2598 + pm_wakeup_event(&hcd->self.root_hub->dev, 0);
2599 set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
2600 queue_work(pm_wq, &hcd->wakeup_work);
2601 }
2602 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2603 index c5c1f6cf3228..83c58a20d16f 100644
2604 --- a/drivers/usb/core/hub.c
2605 +++ b/drivers/usb/core/hub.c
2606 @@ -653,12 +653,17 @@ void usb_wakeup_notification(struct usb_device *hdev,
2607 unsigned int portnum)
2608 {
2609 struct usb_hub *hub;
2610 + struct usb_port *port_dev;
2611
2612 if (!hdev)
2613 return;
2614
2615 hub = usb_hub_to_struct_hub(hdev);
2616 if (hub) {
2617 + port_dev = hub->ports[portnum - 1];
2618 + if (port_dev && port_dev->child)
2619 + pm_wakeup_event(&port_dev->child->dev, 0);
2620 +
2621 set_bit(portnum, hub->wakeup_bits);
2622 kick_hub_wq(hub);
2623 }
2624 @@ -3430,8 +3435,11 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
2625
2626 /* Skip the initial Clear-Suspend step for a remote wakeup */
2627 status = hub_port_status(hub, port1, &portstatus, &portchange);
2628 - if (status == 0 && !port_is_suspended(hub, portstatus))
2629 + if (status == 0 && !port_is_suspended(hub, portstatus)) {
2630 + if (portchange & USB_PORT_STAT_C_SUSPEND)
2631 + pm_wakeup_event(&udev->dev, 0);
2632 goto SuspendCleared;
2633 + }
2634
2635 /* see 7.1.7.7; affects power usage, but not budgeting */
2636 if (hub_is_superspeed(hub->hdev))
2637 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
2638 index 54b019e267c5..9f5f78b7bb55 100644
2639 --- a/drivers/usb/core/quirks.c
2640 +++ b/drivers/usb/core/quirks.c
2641 @@ -40,6 +40,9 @@ static const struct usb_device_id usb_quirk_list[] = {
2642 { USB_DEVICE(0x03f0, 0x0701), .driver_info =
2643 USB_QUIRK_STRING_FETCH_255 },
2644
2645 + /* HP v222w 16GB Mini USB Drive */
2646 + { USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT },
2647 +
2648 /* Creative SB Audigy 2 NX */
2649 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
2650
2651 diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c
2652 index 75f0b92694ba..50203e77c925 100644
2653 --- a/drivers/usb/host/xhci-dbgtty.c
2654 +++ b/drivers/usb/host/xhci-dbgtty.c
2655 @@ -320,9 +320,11 @@ int xhci_dbc_tty_register_driver(struct xhci_hcd *xhci)
2656
2657 void xhci_dbc_tty_unregister_driver(void)
2658 {
2659 - tty_unregister_driver(dbc_tty_driver);
2660 - put_tty_driver(dbc_tty_driver);
2661 - dbc_tty_driver = NULL;
2662 + if (dbc_tty_driver) {
2663 + tty_unregister_driver(dbc_tty_driver);
2664 + put_tty_driver(dbc_tty_driver);
2665 + dbc_tty_driver = NULL;
2666 + }
2667 }
2668
2669 static void dbc_rx_push(unsigned long _port)
2670 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
2671 index d9f831b67e57..93ce34bce7b5 100644
2672 --- a/drivers/usb/host/xhci-pci.c
2673 +++ b/drivers/usb/host/xhci-pci.c
2674 @@ -126,7 +126,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
2675 if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
2676 xhci->quirks |= XHCI_AMD_PLL_FIX;
2677
2678 - if (pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x43bb)
2679 + if (pdev->vendor == PCI_VENDOR_ID_AMD &&
2680 + (pdev->device == 0x15e0 ||
2681 + pdev->device == 0x15e1 ||
2682 + pdev->device == 0x43bb))
2683 xhci->quirks |= XHCI_SUSPEND_DELAY;
2684
2685 if (pdev->vendor == PCI_VENDOR_ID_AMD)
2686 diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
2687 index 6652e2d5bd2e..c435df29cdb8 100644
2688 --- a/drivers/usb/host/xhci-plat.c
2689 +++ b/drivers/usb/host/xhci-plat.c
2690 @@ -419,7 +419,6 @@ MODULE_DEVICE_TABLE(acpi, usb_xhci_acpi_match);
2691 static struct platform_driver usb_xhci_driver = {
2692 .probe = xhci_plat_probe,
2693 .remove = xhci_plat_remove,
2694 - .shutdown = usb_hcd_platform_shutdown,
2695 .driver = {
2696 .name = "xhci-hcd",
2697 .pm = &xhci_plat_pm_ops,
2698 diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
2699 index a646820f5a78..533f127c30ad 100644
2700 --- a/drivers/usb/serial/Kconfig
2701 +++ b/drivers/usb/serial/Kconfig
2702 @@ -62,6 +62,7 @@ config USB_SERIAL_SIMPLE
2703 - Fundamental Software dongle.
2704 - Google USB serial devices
2705 - HP4x calculators
2706 + - Libtransistor USB console
2707 - a number of Motorola phones
2708 - Motorola Tetra devices
2709 - Novatel Wireless GPS receivers
2710 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
2711 index de1e759dd512..eb6c26cbe579 100644
2712 --- a/drivers/usb/serial/cp210x.c
2713 +++ b/drivers/usb/serial/cp210x.c
2714 @@ -214,6 +214,7 @@ static const struct usb_device_id id_table[] = {
2715 { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
2716 { USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
2717 { USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
2718 + { USB_DEVICE(0x3923, 0x7A0B) }, /* National Instruments USB Serial Console */
2719 { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
2720 { } /* Terminating Entry */
2721 };
2722 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2723 index 87202ad5a50d..7ea221d42dba 100644
2724 --- a/drivers/usb/serial/ftdi_sio.c
2725 +++ b/drivers/usb/serial/ftdi_sio.c
2726 @@ -1898,7 +1898,8 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
2727 return ftdi_jtag_probe(serial);
2728
2729 if (udev->product &&
2730 - (!strcmp(udev->product, "BeagleBone/XDS100V2") ||
2731 + (!strcmp(udev->product, "Arrow USB Blaster") ||
2732 + !strcmp(udev->product, "BeagleBone/XDS100V2") ||
2733 !strcmp(udev->product, "SNAP Connect E10")))
2734 return ftdi_jtag_probe(serial);
2735
2736 diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
2737 index 4ef79e29cb26..40864c2bd9dc 100644
2738 --- a/drivers/usb/serial/usb-serial-simple.c
2739 +++ b/drivers/usb/serial/usb-serial-simple.c
2740 @@ -63,6 +63,11 @@ DEVICE(flashloader, FLASHLOADER_IDS);
2741 0x01) }
2742 DEVICE(google, GOOGLE_IDS);
2743
2744 +/* Libtransistor USB console */
2745 +#define LIBTRANSISTOR_IDS() \
2746 + { USB_DEVICE(0x1209, 0x8b00) }
2747 +DEVICE(libtransistor, LIBTRANSISTOR_IDS);
2748 +
2749 /* ViVOpay USB Serial Driver */
2750 #define VIVOPAY_IDS() \
2751 { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */
2752 @@ -110,6 +115,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
2753 &funsoft_device,
2754 &flashloader_device,
2755 &google_device,
2756 + &libtransistor_device,
2757 &vivopay_device,
2758 &moto_modem_device,
2759 &motorola_tetra_device,
2760 @@ -126,6 +132,7 @@ static const struct usb_device_id id_table[] = {
2761 FUNSOFT_IDS(),
2762 FLASHLOADER_IDS(),
2763 GOOGLE_IDS(),
2764 + LIBTRANSISTOR_IDS(),
2765 VIVOPAY_IDS(),
2766 MOTO_IDS(),
2767 MOTOROLA_TETRA_IDS(),
2768 diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
2769 index 79046fe66426..8d95b3a168d2 100644
2770 --- a/drivers/usb/typec/ucsi/ucsi.c
2771 +++ b/drivers/usb/typec/ucsi/ucsi.c
2772 @@ -28,7 +28,7 @@
2773 * difficult to estimate the time it takes for the system to process the command
2774 * before it is actually passed to the PPM.
2775 */
2776 -#define UCSI_TIMEOUT_MS 1000
2777 +#define UCSI_TIMEOUT_MS 5000
2778
2779 /*
2780 * UCSI_SWAP_TIMEOUT_MS - Timeout for role swap requests
2781 diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
2782 index c31c8402a0c5..d41d0cdeec0f 100644
2783 --- a/drivers/usb/usbip/stub_main.c
2784 +++ b/drivers/usb/usbip/stub_main.c
2785 @@ -186,7 +186,12 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf,
2786 if (!bid)
2787 return -ENODEV;
2788
2789 + /* device_attach() callers should hold parent lock for USB */
2790 + if (bid->udev->dev.parent)
2791 + device_lock(bid->udev->dev.parent);
2792 ret = device_attach(&bid->udev->dev);
2793 + if (bid->udev->dev.parent)
2794 + device_unlock(bid->udev->dev.parent);
2795 if (ret < 0) {
2796 dev_err(&bid->udev->dev, "rebind failed\n");
2797 return ret;
2798 diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
2799 index 473fb8a87289..bf8afe9b5883 100644
2800 --- a/drivers/usb/usbip/usbip_common.h
2801 +++ b/drivers/usb/usbip/usbip_common.h
2802 @@ -243,7 +243,7 @@ enum usbip_side {
2803 #define VUDC_EVENT_ERROR_USB (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
2804 #define VUDC_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
2805
2806 -#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_BYE)
2807 +#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE)
2808 #define VDEV_EVENT_DOWN (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
2809 #define VDEV_EVENT_ERROR_TCP (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
2810 #define VDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
2811 diff --git a/drivers/usb/usbip/usbip_event.c b/drivers/usb/usbip/usbip_event.c
2812 index 5b4c0864ad92..5d88917c9631 100644
2813 --- a/drivers/usb/usbip/usbip_event.c
2814 +++ b/drivers/usb/usbip/usbip_event.c
2815 @@ -91,10 +91,6 @@ static void event_handler(struct work_struct *work)
2816 unset_event(ud, USBIP_EH_UNUSABLE);
2817 }
2818
2819 - /* Stop the error handler. */
2820 - if (ud->event & USBIP_EH_BYE)
2821 - usbip_dbg_eh("removed %p\n", ud);
2822 -
2823 wake_up(&ud->eh_waitq);
2824 }
2825 }
2826 diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
2827 index 20e3d4609583..d11f3f8dad40 100644
2828 --- a/drivers/usb/usbip/vhci_hcd.c
2829 +++ b/drivers/usb/usbip/vhci_hcd.c
2830 @@ -354,6 +354,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
2831 usbip_dbg_vhci_rh(" ClearHubFeature\n");
2832 break;
2833 case ClearPortFeature:
2834 + if (rhport < 0)
2835 + goto error;
2836 switch (wValue) {
2837 case USB_PORT_FEAT_SUSPEND:
2838 if (hcd->speed == HCD_USB3) {
2839 @@ -511,11 +513,16 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
2840 goto error;
2841 }
2842
2843 + if (rhport < 0)
2844 + goto error;
2845 +
2846 vhci_hcd->port_status[rhport] |= USB_PORT_STAT_SUSPEND;
2847 break;
2848 case USB_PORT_FEAT_POWER:
2849 usbip_dbg_vhci_rh(
2850 " SetPortFeature: USB_PORT_FEAT_POWER\n");
2851 + if (rhport < 0)
2852 + goto error;
2853 if (hcd->speed == HCD_USB3)
2854 vhci_hcd->port_status[rhport] |= USB_SS_PORT_STAT_POWER;
2855 else
2856 @@ -524,6 +531,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
2857 case USB_PORT_FEAT_BH_PORT_RESET:
2858 usbip_dbg_vhci_rh(
2859 " SetPortFeature: USB_PORT_FEAT_BH_PORT_RESET\n");
2860 + if (rhport < 0)
2861 + goto error;
2862 /* Applicable only for USB3.0 hub */
2863 if (hcd->speed != HCD_USB3) {
2864 pr_err("USB_PORT_FEAT_BH_PORT_RESET req not "
2865 @@ -534,6 +543,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
2866 case USB_PORT_FEAT_RESET:
2867 usbip_dbg_vhci_rh(
2868 " SetPortFeature: USB_PORT_FEAT_RESET\n");
2869 + if (rhport < 0)
2870 + goto error;
2871 /* if it's already enabled, disable */
2872 if (hcd->speed == HCD_USB3) {
2873 vhci_hcd->port_status[rhport] = 0;
2874 @@ -554,6 +565,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
2875 default:
2876 usbip_dbg_vhci_rh(" SetPortFeature: default %d\n",
2877 wValue);
2878 + if (rhport < 0)
2879 + goto error;
2880 if (hcd->speed == HCD_USB3) {
2881 if ((vhci_hcd->port_status[rhport] &
2882 USB_SS_PORT_STAT_POWER) != 0) {
2883 diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c
2884 index 190dbf8cfcb5..7411a535fda2 100644
2885 --- a/drivers/virt/vboxguest/vboxguest_core.c
2886 +++ b/drivers/virt/vboxguest/vboxguest_core.c
2887 @@ -114,7 +114,7 @@ static void vbg_guest_mappings_init(struct vbg_dev *gdev)
2888 }
2889
2890 out:
2891 - kfree(req);
2892 + vbg_req_free(req, sizeof(*req));
2893 kfree(pages);
2894 }
2895
2896 @@ -144,7 +144,7 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
2897
2898 rc = vbg_req_perform(gdev, req);
2899
2900 - kfree(req);
2901 + vbg_req_free(req, sizeof(*req));
2902
2903 if (rc < 0) {
2904 vbg_err("%s error: %d\n", __func__, rc);
2905 @@ -214,8 +214,8 @@ static int vbg_report_guest_info(struct vbg_dev *gdev)
2906 ret = vbg_status_code_to_errno(rc);
2907
2908 out_free:
2909 - kfree(req2);
2910 - kfree(req1);
2911 + vbg_req_free(req2, sizeof(*req2));
2912 + vbg_req_free(req1, sizeof(*req1));
2913 return ret;
2914 }
2915
2916 @@ -245,7 +245,7 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
2917 if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
2918 rc = VINF_SUCCESS;
2919
2920 - kfree(req);
2921 + vbg_req_free(req, sizeof(*req));
2922
2923 return vbg_status_code_to_errno(rc);
2924 }
2925 @@ -431,7 +431,7 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
2926 rc = vbg_req_perform(gdev, req);
2927 do_div(req->interval_ns, 1000000); /* ns -> ms */
2928 gdev->heartbeat_interval_ms = req->interval_ns;
2929 - kfree(req);
2930 + vbg_req_free(req, sizeof(*req));
2931
2932 return vbg_status_code_to_errno(rc);
2933 }
2934 @@ -454,12 +454,6 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev)
2935 if (ret < 0)
2936 return ret;
2937
2938 - /*
2939 - * Preallocate the request to use it from the timer callback because:
2940 - * 1) on Windows vbg_req_alloc must be called at IRQL <= APC_LEVEL
2941 - * and the timer callback runs at DISPATCH_LEVEL;
2942 - * 2) avoid repeated allocations.
2943 - */
2944 gdev->guest_heartbeat_req = vbg_req_alloc(
2945 sizeof(*gdev->guest_heartbeat_req),
2946 VMMDEVREQ_GUEST_HEARTBEAT);
2947 @@ -481,8 +475,8 @@ static void vbg_heartbeat_exit(struct vbg_dev *gdev)
2948 {
2949 del_timer_sync(&gdev->heartbeat_timer);
2950 vbg_heartbeat_host_config(gdev, false);
2951 - kfree(gdev->guest_heartbeat_req);
2952 -
2953 + vbg_req_free(gdev->guest_heartbeat_req,
2954 + sizeof(*gdev->guest_heartbeat_req));
2955 }
2956
2957 /**
2958 @@ -543,7 +537,7 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
2959 if (rc < 0)
2960 vbg_err("%s error, rc: %d\n", __func__, rc);
2961
2962 - kfree(req);
2963 + vbg_req_free(req, sizeof(*req));
2964 return vbg_status_code_to_errno(rc);
2965 }
2966
2967 @@ -617,7 +611,7 @@ static int vbg_set_session_event_filter(struct vbg_dev *gdev,
2968
2969 out:
2970 mutex_unlock(&gdev->session_mutex);
2971 - kfree(req);
2972 + vbg_req_free(req, sizeof(*req));
2973
2974 return ret;
2975 }
2976 @@ -642,7 +636,7 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
2977 if (rc < 0)
2978 vbg_err("%s error, rc: %d\n", __func__, rc);
2979
2980 - kfree(req);
2981 + vbg_req_free(req, sizeof(*req));
2982 return vbg_status_code_to_errno(rc);
2983 }
2984
2985 @@ -712,7 +706,7 @@ static int vbg_set_session_capabilities(struct vbg_dev *gdev,
2986
2987 out:
2988 mutex_unlock(&gdev->session_mutex);
2989 - kfree(req);
2990 + vbg_req_free(req, sizeof(*req));
2991
2992 return ret;
2993 }
2994 @@ -749,7 +743,7 @@ static int vbg_query_host_version(struct vbg_dev *gdev)
2995 }
2996
2997 out:
2998 - kfree(req);
2999 + vbg_req_free(req, sizeof(*req));
3000 return ret;
3001 }
3002
3003 @@ -847,11 +841,16 @@ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
3004 return 0;
3005
3006 err_free_reqs:
3007 - kfree(gdev->mouse_status_req);
3008 - kfree(gdev->ack_events_req);
3009 - kfree(gdev->cancel_req);
3010 - kfree(gdev->mem_balloon.change_req);
3011 - kfree(gdev->mem_balloon.get_req);
3012 + vbg_req_free(gdev->mouse_status_req,
3013 + sizeof(*gdev->mouse_status_req));
3014 + vbg_req_free(gdev->ack_events_req,
3015 + sizeof(*gdev->ack_events_req));
3016 + vbg_req_free(gdev->cancel_req,
3017 + sizeof(*gdev->cancel_req));
3018 + vbg_req_free(gdev->mem_balloon.change_req,
3019 + sizeof(*gdev->mem_balloon.change_req));
3020 + vbg_req_free(gdev->mem_balloon.get_req,
3021 + sizeof(*gdev->mem_balloon.get_req));
3022 return ret;
3023 }
3024
3025 @@ -872,11 +871,16 @@ void vbg_core_exit(struct vbg_dev *gdev)
3026 vbg_reset_host_capabilities(gdev);
3027 vbg_core_set_mouse_status(gdev, 0);
3028
3029 - kfree(gdev->mouse_status_req);
3030 - kfree(gdev->ack_events_req);
3031 - kfree(gdev->cancel_req);
3032 - kfree(gdev->mem_balloon.change_req);
3033 - kfree(gdev->mem_balloon.get_req);
3034 + vbg_req_free(gdev->mouse_status_req,
3035 + sizeof(*gdev->mouse_status_req));
3036 + vbg_req_free(gdev->ack_events_req,
3037 + sizeof(*gdev->ack_events_req));
3038 + vbg_req_free(gdev->cancel_req,
3039 + sizeof(*gdev->cancel_req));
3040 + vbg_req_free(gdev->mem_balloon.change_req,
3041 + sizeof(*gdev->mem_balloon.change_req));
3042 + vbg_req_free(gdev->mem_balloon.get_req,
3043 + sizeof(*gdev->mem_balloon.get_req));
3044 }
3045
3046 /**
3047 @@ -1415,7 +1419,7 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
3048 req->flags = dump->u.in.flags;
3049 dump->hdr.rc = vbg_req_perform(gdev, req);
3050
3051 - kfree(req);
3052 + vbg_req_free(req, sizeof(*req));
3053 return 0;
3054 }
3055
3056 @@ -1513,7 +1517,7 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
3057 if (rc < 0)
3058 vbg_err("%s error, rc: %d\n", __func__, rc);
3059
3060 - kfree(req);
3061 + vbg_req_free(req, sizeof(*req));
3062 return vbg_status_code_to_errno(rc);
3063 }
3064
3065 diff --git a/drivers/virt/vboxguest/vboxguest_core.h b/drivers/virt/vboxguest/vboxguest_core.h
3066 index 6c784bf4fa6d..7ad9ec45bfa9 100644
3067 --- a/drivers/virt/vboxguest/vboxguest_core.h
3068 +++ b/drivers/virt/vboxguest/vboxguest_core.h
3069 @@ -171,4 +171,13 @@ irqreturn_t vbg_core_isr(int irq, void *dev_id);
3070
3071 void vbg_linux_mouse_event(struct vbg_dev *gdev);
3072
3073 +/* Private (non exported) functions form vboxguest_utils.c */
3074 +void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
3075 +void vbg_req_free(void *req, size_t len);
3076 +int vbg_req_perform(struct vbg_dev *gdev, void *req);
3077 +int vbg_hgcm_call32(
3078 + struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
3079 + struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
3080 + int *vbox_status);
3081 +
3082 #endif
3083 diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c
3084 index 82e280d38cc2..398d22693234 100644
3085 --- a/drivers/virt/vboxguest/vboxguest_linux.c
3086 +++ b/drivers/virt/vboxguest/vboxguest_linux.c
3087 @@ -87,6 +87,7 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
3088 struct vbg_session *session = filp->private_data;
3089 size_t returned_size, size;
3090 struct vbg_ioctl_hdr hdr;
3091 + bool is_vmmdev_req;
3092 int ret = 0;
3093 void *buf;
3094
3095 @@ -106,8 +107,17 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
3096 if (size > SZ_16M)
3097 return -E2BIG;
3098
3099 - /* __GFP_DMA32 because IOCTL_VMMDEV_REQUEST passes this to the host */
3100 - buf = kmalloc(size, GFP_KERNEL | __GFP_DMA32);
3101 + /*
3102 + * IOCTL_VMMDEV_REQUEST needs the buffer to be below 4G to avoid
3103 + * the need for a bounce-buffer and another copy later on.
3104 + */
3105 + is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) ||
3106 + req == VBG_IOCTL_VMMDEV_REQUEST_BIG;
3107 +
3108 + if (is_vmmdev_req)
3109 + buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT);
3110 + else
3111 + buf = kmalloc(size, GFP_KERNEL);
3112 if (!buf)
3113 return -ENOMEM;
3114
3115 @@ -132,7 +142,10 @@ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
3116 ret = -EFAULT;
3117
3118 out:
3119 - kfree(buf);
3120 + if (is_vmmdev_req)
3121 + vbg_req_free(buf, size);
3122 + else
3123 + kfree(buf);
3124
3125 return ret;
3126 }
3127 diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c
3128 index 0f0dab8023cf..bf4474214b4d 100644
3129 --- a/drivers/virt/vboxguest/vboxguest_utils.c
3130 +++ b/drivers/virt/vboxguest/vboxguest_utils.c
3131 @@ -65,8 +65,9 @@ VBG_LOG(vbg_debug, pr_debug);
3132 void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
3133 {
3134 struct vmmdev_request_header *req;
3135 + int order = get_order(PAGE_ALIGN(len));
3136
3137 - req = kmalloc(len, GFP_KERNEL | __GFP_DMA32);
3138 + req = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA32, order);
3139 if (!req)
3140 return NULL;
3141
3142 @@ -82,6 +83,14 @@ void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type)
3143 return req;
3144 }
3145
3146 +void vbg_req_free(void *req, size_t len)
3147 +{
3148 + if (!req)
3149 + return;
3150 +
3151 + free_pages((unsigned long)req, get_order(PAGE_ALIGN(len)));
3152 +}
3153 +
3154 /* Note this function returns a VBox status code, not a negative errno!! */
3155 int vbg_req_perform(struct vbg_dev *gdev, void *req)
3156 {
3157 @@ -137,7 +146,7 @@ int vbg_hgcm_connect(struct vbg_dev *gdev,
3158 rc = hgcm_connect->header.result;
3159 }
3160
3161 - kfree(hgcm_connect);
3162 + vbg_req_free(hgcm_connect, sizeof(*hgcm_connect));
3163
3164 *vbox_status = rc;
3165 return 0;
3166 @@ -166,7 +175,7 @@ int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status)
3167 if (rc >= 0)
3168 rc = hgcm_disconnect->header.result;
3169
3170 - kfree(hgcm_disconnect);
3171 + vbg_req_free(hgcm_disconnect, sizeof(*hgcm_disconnect));
3172
3173 *vbox_status = rc;
3174 return 0;
3175 @@ -623,7 +632,7 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
3176 }
3177
3178 if (!leak_it)
3179 - kfree(call);
3180 + vbg_req_free(call, size);
3181
3182 free_bounce_bufs:
3183 if (bounce_bufs) {
3184 diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
3185 index 9ceebf30eb22..a82f91d75f29 100644
3186 --- a/fs/cifs/cifssmb.c
3187 +++ b/fs/cifs/cifssmb.c
3188 @@ -453,6 +453,9 @@ cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required)
3189 server->sign = true;
3190 }
3191
3192 + if (cifs_rdma_enabled(server) && server->sign)
3193 + cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled");
3194 +
3195 return 0;
3196 }
3197
3198 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3199 index dfd6fb02b7a3..1c1940d90c96 100644
3200 --- a/fs/cifs/smb2ops.c
3201 +++ b/fs/cifs/smb2ops.c
3202 @@ -252,9 +252,14 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
3203 wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
3204 wsize = min_t(unsigned int, wsize, server->max_write);
3205 #ifdef CONFIG_CIFS_SMB_DIRECT
3206 - if (server->rdma)
3207 - wsize = min_t(unsigned int,
3208 + if (server->rdma) {
3209 + if (server->sign)
3210 + wsize = min_t(unsigned int,
3211 + wsize, server->smbd_conn->max_fragmented_send_size);
3212 + else
3213 + wsize = min_t(unsigned int,
3214 wsize, server->smbd_conn->max_readwrite_size);
3215 + }
3216 #endif
3217 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
3218 wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
3219 @@ -272,9 +277,14 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
3220 rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
3221 rsize = min_t(unsigned int, rsize, server->max_read);
3222 #ifdef CONFIG_CIFS_SMB_DIRECT
3223 - if (server->rdma)
3224 - rsize = min_t(unsigned int,
3225 + if (server->rdma) {
3226 + if (server->sign)
3227 + rsize = min_t(unsigned int,
3228 + rsize, server->smbd_conn->max_fragmented_recv_size);
3229 + else
3230 + rsize = min_t(unsigned int,
3231 rsize, server->smbd_conn->max_readwrite_size);
3232 + }
3233 #endif
3234
3235 if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
3236 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
3237 index af62c75b17c4..8ae6a089489c 100644
3238 --- a/fs/cifs/smb2pdu.c
3239 +++ b/fs/cifs/smb2pdu.c
3240 @@ -2479,7 +2479,7 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
3241 * If we want to do a RDMA write, fill in and append
3242 * smbd_buffer_descriptor_v1 to the end of read request
3243 */
3244 - if (server->rdma && rdata &&
3245 + if (server->rdma && rdata && !server->sign &&
3246 rdata->bytes >= server->smbd_conn->rdma_readwrite_threshold) {
3247
3248 struct smbd_buffer_descriptor_v1 *v1;
3249 @@ -2857,7 +2857,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
3250 * If we want to do a server RDMA read, fill in and append
3251 * smbd_buffer_descriptor_v1 to the end of write request
3252 */
3253 - if (server->rdma && wdata->bytes >=
3254 + if (server->rdma && !server->sign && wdata->bytes >=
3255 server->smbd_conn->rdma_readwrite_threshold) {
3256
3257 struct smbd_buffer_descriptor_v1 *v1;
3258 diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
3259 index 34be5c5d027f..608ce9abd240 100644
3260 --- a/fs/cifs/smbdirect.c
3261 +++ b/fs/cifs/smbdirect.c
3262 @@ -2086,7 +2086,7 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
3263 int start, i, j;
3264 int max_iov_size =
3265 info->max_send_size - sizeof(struct smbd_data_transfer);
3266 - struct kvec iov[SMBDIRECT_MAX_SGE];
3267 + struct kvec *iov;
3268 int rc;
3269
3270 info->smbd_send_pending++;
3271 @@ -2096,32 +2096,20 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
3272 }
3273
3274 /*
3275 - * This usually means a configuration error
3276 - * We use RDMA read/write for packet size > rdma_readwrite_threshold
3277 - * as long as it's properly configured we should never get into this
3278 - * situation
3279 - */
3280 - if (rqst->rq_nvec + rqst->rq_npages > SMBDIRECT_MAX_SGE) {
3281 - log_write(ERR, "maximum send segment %x exceeding %x\n",
3282 - rqst->rq_nvec + rqst->rq_npages, SMBDIRECT_MAX_SGE);
3283 - rc = -EINVAL;
3284 - goto done;
3285 - }
3286 -
3287 - /*
3288 - * Remove the RFC1002 length defined in MS-SMB2 section 2.1
3289 - * It is used only for TCP transport
3290 + * Skip the RFC1002 length defined in MS-SMB2 section 2.1
3291 + * It is used only for TCP transport in the iov[0]
3292 * In future we may want to add a transport layer under protocol
3293 * layer so this will only be issued to TCP transport
3294 */
3295 - iov[0].iov_base = (char *)rqst->rq_iov[0].iov_base + 4;
3296 - iov[0].iov_len = rqst->rq_iov[0].iov_len - 4;
3297 - buflen += iov[0].iov_len;
3298 +
3299 + if (rqst->rq_iov[0].iov_len != 4) {
3300 + log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len);
3301 + return -EINVAL;
3302 + }
3303 + iov = &rqst->rq_iov[1];
3304
3305 /* total up iov array first */
3306 - for (i = 1; i < rqst->rq_nvec; i++) {
3307 - iov[i].iov_base = rqst->rq_iov[i].iov_base;
3308 - iov[i].iov_len = rqst->rq_iov[i].iov_len;
3309 + for (i = 0; i < rqst->rq_nvec-1; i++) {
3310 buflen += iov[i].iov_len;
3311 }
3312
3313 @@ -2194,14 +2182,14 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
3314 goto done;
3315 }
3316 i++;
3317 - if (i == rqst->rq_nvec)
3318 + if (i == rqst->rq_nvec-1)
3319 break;
3320 }
3321 start = i;
3322 buflen = 0;
3323 } else {
3324 i++;
3325 - if (i == rqst->rq_nvec) {
3326 + if (i == rqst->rq_nvec-1) {
3327 /* send out all remaining vecs */
3328 remaining_data_length -= buflen;
3329 log_write(INFO,
3330 diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
3331 index 665661464067..1b5cd3b8617c 100644
3332 --- a/fs/cifs/transport.c
3333 +++ b/fs/cifs/transport.c
3334 @@ -753,7 +753,7 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
3335 goto out;
3336
3337 #ifdef CONFIG_CIFS_SMB311
3338 - if (ses->status == CifsNew)
3339 + if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
3340 smb311_update_preauth_hash(ses, rqst->rq_iov+1,
3341 rqst->rq_nvec-1);
3342 #endif
3343 @@ -797,7 +797,7 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
3344 *resp_buf_type = CIFS_SMALL_BUFFER;
3345
3346 #ifdef CONFIG_CIFS_SMB311
3347 - if (ses->status == CifsNew) {
3348 + if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
3349 struct kvec iov = {
3350 .iov_base = buf + 4,
3351 .iov_len = get_rfc1002_length(buf)
3352 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
3353 index f82c4966f4ce..508b905d744d 100644
3354 --- a/fs/ext4/balloc.c
3355 +++ b/fs/ext4/balloc.c
3356 @@ -321,6 +321,7 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
3357 struct ext4_sb_info *sbi = EXT4_SB(sb);
3358 ext4_grpblk_t offset;
3359 ext4_grpblk_t next_zero_bit;
3360 + ext4_grpblk_t max_bit = EXT4_CLUSTERS_PER_GROUP(sb);
3361 ext4_fsblk_t blk;
3362 ext4_fsblk_t group_first_block;
3363
3364 @@ -338,20 +339,25 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
3365 /* check whether block bitmap block number is set */
3366 blk = ext4_block_bitmap(sb, desc);
3367 offset = blk - group_first_block;
3368 - if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
3369 + if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
3370 + !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
3371 /* bad block bitmap */
3372 return blk;
3373
3374 /* check whether the inode bitmap block number is set */
3375 blk = ext4_inode_bitmap(sb, desc);
3376 offset = blk - group_first_block;
3377 - if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
3378 + if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
3379 + !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
3380 /* bad block bitmap */
3381 return blk;
3382
3383 /* check whether the inode table block number is set */
3384 blk = ext4_inode_table(sb, desc);
3385 offset = blk - group_first_block;
3386 + if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
3387 + EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= max_bit)
3388 + return blk;
3389 next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
3390 EXT4_B2C(sbi, offset + sbi->s_itb_per_group),
3391 EXT4_B2C(sbi, offset));
3392 @@ -417,6 +423,7 @@ struct buffer_head *
3393 ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
3394 {
3395 struct ext4_group_desc *desc;
3396 + struct ext4_sb_info *sbi = EXT4_SB(sb);
3397 struct buffer_head *bh;
3398 ext4_fsblk_t bitmap_blk;
3399 int err;
3400 @@ -425,6 +432,12 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
3401 if (!desc)
3402 return ERR_PTR(-EFSCORRUPTED);
3403 bitmap_blk = ext4_block_bitmap(sb, desc);
3404 + if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
3405 + (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
3406 + ext4_error(sb, "Invalid block bitmap block %llu in "
3407 + "block_group %u", bitmap_blk, block_group);
3408 + return ERR_PTR(-EFSCORRUPTED);
3409 + }
3410 bh = sb_getblk(sb, bitmap_blk);
3411 if (unlikely(!bh)) {
3412 ext4_error(sb, "Cannot get buffer for block bitmap - "
3413 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
3414 index 054416e9d827..a7ca193a7480 100644
3415 --- a/fs/ext4/extents.c
3416 +++ b/fs/ext4/extents.c
3417 @@ -5334,8 +5334,9 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
3418 stop = le32_to_cpu(extent->ee_block);
3419
3420 /*
3421 - * In case of left shift, Don't start shifting extents until we make
3422 - * sure the hole is big enough to accommodate the shift.
3423 + * For left shifts, make sure the hole on the left is big enough to
3424 + * accommodate the shift. For right shifts, make sure the last extent
3425 + * won't be shifted beyond EXT_MAX_BLOCKS.
3426 */
3427 if (SHIFT == SHIFT_LEFT) {
3428 path = ext4_find_extent(inode, start - 1, &path,
3429 @@ -5355,9 +5356,14 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
3430
3431 if ((start == ex_start && shift > ex_start) ||
3432 (shift > start - ex_end)) {
3433 - ext4_ext_drop_refs(path);
3434 - kfree(path);
3435 - return -EINVAL;
3436 + ret = -EINVAL;
3437 + goto out;
3438 + }
3439 + } else {
3440 + if (shift > EXT_MAX_BLOCKS -
3441 + (stop + ext4_ext_get_actual_len(extent))) {
3442 + ret = -EINVAL;
3443 + goto out;
3444 }
3445 }
3446
3447 diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
3448 index 3fa93665b4a3..df92e3ec9913 100644
3449 --- a/fs/ext4/ialloc.c
3450 +++ b/fs/ext4/ialloc.c
3451 @@ -122,6 +122,7 @@ static struct buffer_head *
3452 ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
3453 {
3454 struct ext4_group_desc *desc;
3455 + struct ext4_sb_info *sbi = EXT4_SB(sb);
3456 struct buffer_head *bh = NULL;
3457 ext4_fsblk_t bitmap_blk;
3458 int err;
3459 @@ -131,6 +132,12 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
3460 return ERR_PTR(-EFSCORRUPTED);
3461
3462 bitmap_blk = ext4_inode_bitmap(sb, desc);
3463 + if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
3464 + (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
3465 + ext4_error(sb, "Invalid inode bitmap blk %llu in "
3466 + "block_group %u", bitmap_blk, block_group);
3467 + return ERR_PTR(-EFSCORRUPTED);
3468 + }
3469 bh = sb_getblk(sb, bitmap_blk);
3470 if (unlikely(!bh)) {
3471 ext4_error(sb, "Cannot read inode bitmap - "
3472 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
3473 index 192c5ad09d71..b8dace7abe09 100644
3474 --- a/fs/ext4/super.c
3475 +++ b/fs/ext4/super.c
3476 @@ -5868,5 +5868,6 @@ static void __exit ext4_exit_fs(void)
3477 MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
3478 MODULE_DESCRIPTION("Fourth Extended Filesystem");
3479 MODULE_LICENSE("GPL");
3480 +MODULE_SOFTDEP("pre: crc32c");
3481 module_init(ext4_init_fs)
3482 module_exit(ext4_exit_fs)
3483 diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
3484 index ac311037d7a5..8aa453784402 100644
3485 --- a/fs/jbd2/transaction.c
3486 +++ b/fs/jbd2/transaction.c
3487 @@ -532,6 +532,7 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
3488 */
3489 ret = start_this_handle(journal, handle, GFP_NOFS);
3490 if (ret < 0) {
3491 + handle->h_journal = journal;
3492 jbd2_journal_free_reserved(handle);
3493 return ret;
3494 }
3495 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
3496 index 1ab0e520d6fc..e17de55c2542 100644
3497 --- a/include/asm-generic/vmlinux.lds.h
3498 +++ b/include/asm-generic/vmlinux.lds.h
3499 @@ -179,7 +179,7 @@
3500 #endif
3501
3502 #ifdef CONFIG_SERIAL_EARLYCON
3503 -#define EARLYCON_TABLE() STRUCT_ALIGN(); \
3504 +#define EARLYCON_TABLE() . = ALIGN(8); \
3505 VMLINUX_SYMBOL(__earlycon_table) = .; \
3506 KEEP(*(__earlycon_table)) \
3507 VMLINUX_SYMBOL(__earlycon_table_end) = .;
3508 diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
3509 index e518e4e3dfb5..4b1548129fa2 100644
3510 --- a/include/kvm/arm_psci.h
3511 +++ b/include/kvm/arm_psci.h
3512 @@ -37,10 +37,15 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
3513 * Our PSCI implementation stays the same across versions from
3514 * v0.2 onward, only adding the few mandatory functions (such
3515 * as FEATURES with 1.0) that are required by newer
3516 - * revisions. It is thus safe to return the latest.
3517 + * revisions. It is thus safe to return the latest, unless
3518 + * userspace has instructed us otherwise.
3519 */
3520 - if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features))
3521 + if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) {
3522 + if (vcpu->kvm->arch.psci_version)
3523 + return vcpu->kvm->arch.psci_version;
3524 +
3525 return KVM_ARM_PSCI_LATEST;
3526 + }
3527
3528 return KVM_ARM_PSCI_0_1;
3529 }
3530 @@ -48,4 +53,11 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
3531
3532 int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
3533
3534 +struct kvm_one_reg;
3535 +
3536 +int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu);
3537 +int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
3538 +int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
3539 +int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
3540 +
3541 #endif /* __KVM_ARM_PSCI_H__ */
3542 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
3543 index ed63f3b69c12..c9e601dce06f 100644
3544 --- a/include/linux/blkdev.h
3545 +++ b/include/linux/blkdev.h
3546 @@ -605,6 +605,11 @@ struct request_queue {
3547 * initialized by the low level device driver (e.g. scsi/sd.c).
3548 * Stacking drivers (device mappers) may or may not initialize
3549 * these fields.
3550 + *
3551 + * Reads of this information must be protected with blk_queue_enter() /
3552 + * blk_queue_exit(). Modifying this information is only allowed while
3553 + * no requests are being processed. See also blk_mq_freeze_queue() and
3554 + * blk_mq_unfreeze_queue().
3555 */
3556 unsigned int nr_zones;
3557 unsigned long *seq_zones_bitmap;
3558 diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
3559 index b63fa457febd..3529683f691e 100644
3560 --- a/include/linux/mtd/flashchip.h
3561 +++ b/include/linux/mtd/flashchip.h
3562 @@ -85,6 +85,7 @@ struct flchip {
3563 unsigned int write_suspended:1;
3564 unsigned int erase_suspended:1;
3565 unsigned long in_progress_block_addr;
3566 + unsigned long in_progress_block_mask;
3567
3568 struct mutex mutex;
3569 wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
3570 diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
3571 index b32df49a3bd5..c4219b9cbb70 100644
3572 --- a/include/linux/serial_core.h
3573 +++ b/include/linux/serial_core.h
3574 @@ -351,10 +351,10 @@ struct earlycon_id {
3575 char name[16];
3576 char compatible[128];
3577 int (*setup)(struct earlycon_device *, const char *options);
3578 -} __aligned(32);
3579 +};
3580
3581 -extern const struct earlycon_id __earlycon_table[];
3582 -extern const struct earlycon_id __earlycon_table_end[];
3583 +extern const struct earlycon_id *__earlycon_table[];
3584 +extern const struct earlycon_id *__earlycon_table_end[];
3585
3586 #if defined(CONFIG_SERIAL_EARLYCON) && !defined(MODULE)
3587 #define EARLYCON_USED_OR_UNUSED __used
3588 @@ -362,12 +362,19 @@ extern const struct earlycon_id __earlycon_table_end[];
3589 #define EARLYCON_USED_OR_UNUSED __maybe_unused
3590 #endif
3591
3592 -#define OF_EARLYCON_DECLARE(_name, compat, fn) \
3593 - static const struct earlycon_id __UNIQUE_ID(__earlycon_##_name) \
3594 - EARLYCON_USED_OR_UNUSED __section(__earlycon_table) \
3595 +#define _OF_EARLYCON_DECLARE(_name, compat, fn, unique_id) \
3596 + static const struct earlycon_id unique_id \
3597 + EARLYCON_USED_OR_UNUSED __initconst \
3598 = { .name = __stringify(_name), \
3599 .compatible = compat, \
3600 - .setup = fn }
3601 + .setup = fn }; \
3602 + static const struct earlycon_id EARLYCON_USED_OR_UNUSED \
3603 + __section(__earlycon_table) \
3604 + * const __PASTE(__p, unique_id) = &unique_id
3605 +
3606 +#define OF_EARLYCON_DECLARE(_name, compat, fn) \
3607 + _OF_EARLYCON_DECLARE(_name, compat, fn, \
3608 + __UNIQUE_ID(__earlycon_##_name))
3609
3610 #define EARLYCON_DECLARE(_name, fn) OF_EARLYCON_DECLARE(_name, "", fn)
3611
3612 diff --git a/include/linux/tty.h b/include/linux/tty.h
3613 index 47f8af22f216..1dd587ba6d88 100644
3614 --- a/include/linux/tty.h
3615 +++ b/include/linux/tty.h
3616 @@ -701,7 +701,7 @@ extern int tty_unregister_ldisc(int disc);
3617 extern int tty_set_ldisc(struct tty_struct *tty, int disc);
3618 extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
3619 extern void tty_ldisc_release(struct tty_struct *tty);
3620 -extern void tty_ldisc_init(struct tty_struct *tty);
3621 +extern int __must_check tty_ldisc_init(struct tty_struct *tty);
3622 extern void tty_ldisc_deinit(struct tty_struct *tty);
3623 extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
3624 char *f, int count);
3625 diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h
3626 index c71def6b310f..a240ed2a0372 100644
3627 --- a/include/linux/vbox_utils.h
3628 +++ b/include/linux/vbox_utils.h
3629 @@ -24,24 +24,6 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...);
3630 #define vbg_debug pr_debug
3631 #endif
3632
3633 -/**
3634 - * Allocate memory for generic request and initialize the request header.
3635 - *
3636 - * Return: the allocated memory
3637 - * @len: Size of memory block required for the request.
3638 - * @req_type: The generic request type.
3639 - */
3640 -void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type);
3641 -
3642 -/**
3643 - * Perform a generic request.
3644 - *
3645 - * Return: VBox status code
3646 - * @gdev: The Guest extension device.
3647 - * @req: Pointer to the request structure.
3648 - */
3649 -int vbg_req_perform(struct vbg_dev *gdev, void *req);
3650 -
3651 int vbg_hgcm_connect(struct vbg_dev *gdev,
3652 struct vmmdev_hgcm_service_location *loc,
3653 u32 *client_id, int *vbox_status);
3654 @@ -52,11 +34,6 @@ int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function,
3655 u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms,
3656 u32 parm_count, int *vbox_status);
3657
3658 -int vbg_hgcm_call32(
3659 - struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms,
3660 - struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count,
3661 - int *vbox_status);
3662 -
3663 /**
3664 * Convert a VirtualBox status code to a standard Linux kernel return value.
3665 * Return: 0 or negative errno value.
3666 diff --git a/include/linux/virtio.h b/include/linux/virtio.h
3667 index 988c7355bc22..fa1b5da2804e 100644
3668 --- a/include/linux/virtio.h
3669 +++ b/include/linux/virtio.h
3670 @@ -157,6 +157,9 @@ int virtio_device_freeze(struct virtio_device *dev);
3671 int virtio_device_restore(struct virtio_device *dev);
3672 #endif
3673
3674 +#define virtio_device_for_each_vq(vdev, vq) \
3675 + list_for_each_entry(vq, &vdev->vqs, list)
3676 +
3677 /**
3678 * virtio_driver - operations for a virtio I/O driver
3679 * @driver: underlying device driver (populate name and owner).
3680 diff --git a/include/sound/control.h b/include/sound/control.h
3681 index ca13a44ae9d4..6011a58d3e20 100644
3682 --- a/include/sound/control.h
3683 +++ b/include/sound/control.h
3684 @@ -23,6 +23,7 @@
3685 */
3686
3687 #include <linux/wait.h>
3688 +#include <linux/nospec.h>
3689 #include <sound/asound.h>
3690
3691 #define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data)
3692 @@ -148,12 +149,14 @@ int snd_ctl_get_preferred_subdevice(struct snd_card *card, int type);
3693
3694 static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
3695 {
3696 - return id->numid - kctl->id.numid;
3697 + unsigned int ioff = id->numid - kctl->id.numid;
3698 + return array_index_nospec(ioff, kctl->count);
3699 }
3700
3701 static inline unsigned int snd_ctl_get_ioffidx(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
3702 {
3703 - return id->index - kctl->id.index;
3704 + unsigned int ioff = id->index - kctl->id.index;
3705 + return array_index_nospec(ioff, kctl->count);
3706 }
3707
3708 static inline unsigned int snd_ctl_get_ioff(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
3709 diff --git a/kernel/module.c b/kernel/module.c
3710 index e42764acedb4..bbb45c038321 100644
3711 --- a/kernel/module.c
3712 +++ b/kernel/module.c
3713 @@ -1472,7 +1472,8 @@ static ssize_t module_sect_show(struct module_attribute *mattr,
3714 {
3715 struct module_sect_attr *sattr =
3716 container_of(mattr, struct module_sect_attr, mattr);
3717 - return sprintf(buf, "0x%pK\n", (void *)sattr->address);
3718 + return sprintf(buf, "0x%px\n", kptr_restrict < 2 ?
3719 + (void *)sattr->address : NULL);
3720 }
3721
3722 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
3723 diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
3724 index 29a5733eff83..741eadbeba58 100644
3725 --- a/kernel/time/tick-sched.c
3726 +++ b/kernel/time/tick-sched.c
3727 @@ -797,12 +797,13 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
3728 goto out;
3729 }
3730
3731 - hrtimer_set_expires(&ts->sched_timer, tick);
3732 -
3733 - if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
3734 - hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
3735 - else
3736 + if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
3737 + hrtimer_start(&ts->sched_timer, tick, HRTIMER_MODE_ABS_PINNED);
3738 + } else {
3739 + hrtimer_set_expires(&ts->sched_timer, tick);
3740 tick_program_event(tick, 1);
3741 + }
3742 +
3743 out:
3744 /*
3745 * Update the estimated sleep length until the next timer
3746 diff --git a/lib/kobject.c b/lib/kobject.c
3747 index afd5a3fc6123..d20a97a7e168 100644
3748 --- a/lib/kobject.c
3749 +++ b/lib/kobject.c
3750 @@ -232,14 +232,12 @@ static int kobject_add_internal(struct kobject *kobj)
3751
3752 /* be noisy on error issues */
3753 if (error == -EEXIST)
3754 - WARN(1, "%s failed for %s with "
3755 - "-EEXIST, don't try to register things with "
3756 - "the same name in the same directory.\n",
3757 - __func__, kobject_name(kobj));
3758 + pr_err("%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n",
3759 + __func__, kobject_name(kobj));
3760 else
3761 - WARN(1, "%s failed for %s (error: %d parent: %s)\n",
3762 - __func__, kobject_name(kobj), error,
3763 - parent ? kobject_name(parent) : "'none'");
3764 + pr_err("%s failed for %s (error: %d parent: %s)\n",
3765 + __func__, kobject_name(kobj), error,
3766 + parent ? kobject_name(parent) : "'none'");
3767 } else
3768 kobj->state_in_sysfs = 1;
3769
3770 diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
3771 index 8a4d3758030b..02572130a77a 100644
3772 --- a/net/ceph/messenger.c
3773 +++ b/net/ceph/messenger.c
3774 @@ -2531,6 +2531,11 @@ static int try_write(struct ceph_connection *con)
3775 int ret = 1;
3776
3777 dout("try_write start %p state %lu\n", con, con->state);
3778 + if (con->state != CON_STATE_PREOPEN &&
3779 + con->state != CON_STATE_CONNECTING &&
3780 + con->state != CON_STATE_NEGOTIATING &&
3781 + con->state != CON_STATE_OPEN)
3782 + return 0;
3783
3784 more:
3785 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
3786 @@ -2556,6 +2561,8 @@ static int try_write(struct ceph_connection *con)
3787 }
3788
3789 more_kvec:
3790 + BUG_ON(!con->sock);
3791 +
3792 /* kvec data queued? */
3793 if (con->out_kvec_left) {
3794 ret = write_partial_kvec(con);
3795 diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
3796 index 1547107f4854..4887443f52dd 100644
3797 --- a/net/ceph/mon_client.c
3798 +++ b/net/ceph/mon_client.c
3799 @@ -209,6 +209,14 @@ static void reopen_session(struct ceph_mon_client *monc)
3800 __open_session(monc);
3801 }
3802
3803 +static void un_backoff(struct ceph_mon_client *monc)
3804 +{
3805 + monc->hunt_mult /= 2; /* reduce by 50% */
3806 + if (monc->hunt_mult < 1)
3807 + monc->hunt_mult = 1;
3808 + dout("%s hunt_mult now %d\n", __func__, monc->hunt_mult);
3809 +}
3810 +
3811 /*
3812 * Reschedule delayed work timer.
3813 */
3814 @@ -963,6 +971,7 @@ static void delayed_work(struct work_struct *work)
3815 if (!monc->hunting) {
3816 ceph_con_keepalive(&monc->con);
3817 __validate_auth(monc);
3818 + un_backoff(monc);
3819 }
3820
3821 if (is_auth &&
3822 @@ -1123,9 +1132,8 @@ static void finish_hunting(struct ceph_mon_client *monc)
3823 dout("%s found mon%d\n", __func__, monc->cur_mon);
3824 monc->hunting = false;
3825 monc->had_a_connection = true;
3826 - monc->hunt_mult /= 2; /* reduce by 50% */
3827 - if (monc->hunt_mult < 1)
3828 - monc->hunt_mult = 1;
3829 + un_backoff(monc);
3830 + __schedule_delayed(monc);
3831 }
3832 }
3833
3834 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
3835 index b719d0bd833e..06d7c40af570 100644
3836 --- a/sound/core/pcm_compat.c
3837 +++ b/sound/core/pcm_compat.c
3838 @@ -27,10 +27,11 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
3839 s32 __user *src)
3840 {
3841 snd_pcm_sframes_t delay;
3842 + int err;
3843
3844 - delay = snd_pcm_delay(substream);
3845 - if (delay < 0)
3846 - return delay;
3847 + err = snd_pcm_delay(substream, &delay);
3848 + if (err)
3849 + return err;
3850 if (put_user(delay, src))
3851 return -EFAULT;
3852 return 0;
3853 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
3854 index d18b3982548b..5ea0c1a3bbe6 100644
3855 --- a/sound/core/pcm_native.c
3856 +++ b/sound/core/pcm_native.c
3857 @@ -2687,7 +2687,8 @@ static int snd_pcm_hwsync(struct snd_pcm_substream *substream)
3858 return err;
3859 }
3860
3861 -static snd_pcm_sframes_t snd_pcm_delay(struct snd_pcm_substream *substream)
3862 +static int snd_pcm_delay(struct snd_pcm_substream *substream,
3863 + snd_pcm_sframes_t *delay)
3864 {
3865 struct snd_pcm_runtime *runtime = substream->runtime;
3866 int err;
3867 @@ -2703,7 +2704,9 @@ static snd_pcm_sframes_t snd_pcm_delay(struct snd_pcm_substream *substream)
3868 n += runtime->delay;
3869 }
3870 snd_pcm_stream_unlock_irq(substream);
3871 - return err < 0 ? err : n;
3872 + if (!err)
3873 + *delay = n;
3874 + return err;
3875 }
3876
3877 static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
3878 @@ -2746,6 +2749,7 @@ static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
3879 sync_ptr.s.status.hw_ptr = status->hw_ptr;
3880 sync_ptr.s.status.tstamp = status->tstamp;
3881 sync_ptr.s.status.suspended_state = status->suspended_state;
3882 + sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
3883 snd_pcm_stream_unlock_irq(substream);
3884 if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
3885 return -EFAULT;
3886 @@ -2911,11 +2915,13 @@ static int snd_pcm_common_ioctl(struct file *file,
3887 return snd_pcm_hwsync(substream);
3888 case SNDRV_PCM_IOCTL_DELAY:
3889 {
3890 - snd_pcm_sframes_t delay = snd_pcm_delay(substream);
3891 + snd_pcm_sframes_t delay;
3892 snd_pcm_sframes_t __user *res = arg;
3893 + int err;
3894
3895 - if (delay < 0)
3896 - return delay;
3897 + err = snd_pcm_delay(substream, &delay);
3898 + if (err)
3899 + return err;
3900 if (put_user(delay, res))
3901 return -EFAULT;
3902 return 0;
3903 @@ -3003,13 +3009,7 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
3904 case SNDRV_PCM_IOCTL_DROP:
3905 return snd_pcm_drop(substream);
3906 case SNDRV_PCM_IOCTL_DELAY:
3907 - {
3908 - result = snd_pcm_delay(substream);
3909 - if (result < 0)
3910 - return result;
3911 - *frames = result;
3912 - return 0;
3913 - }
3914 + return snd_pcm_delay(substream, frames);
3915 default:
3916 return -EINVAL;
3917 }
3918 diff --git a/sound/core/seq/oss/seq_oss_event.c b/sound/core/seq/oss/seq_oss_event.c
3919 index c3908862bc8b..86ca584c27b2 100644
3920 --- a/sound/core/seq/oss/seq_oss_event.c
3921 +++ b/sound/core/seq/oss/seq_oss_event.c
3922 @@ -26,6 +26,7 @@
3923 #include <sound/seq_oss_legacy.h>
3924 #include "seq_oss_readq.h"
3925 #include "seq_oss_writeq.h"
3926 +#include <linux/nospec.h>
3927
3928
3929 /*
3930 @@ -287,10 +288,10 @@ note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, st
3931 {
3932 struct seq_oss_synthinfo *info;
3933
3934 - if (!snd_seq_oss_synth_is_valid(dp, dev))
3935 + info = snd_seq_oss_synth_info(dp, dev);
3936 + if (!info)
3937 return -ENXIO;
3938
3939 - info = &dp->synths[dev];
3940 switch (info->arg.event_passing) {
3941 case SNDRV_SEQ_OSS_PROCESS_EVENTS:
3942 if (! info->ch || ch < 0 || ch >= info->nr_voices) {
3943 @@ -298,6 +299,7 @@ note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, st
3944 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
3945 }
3946
3947 + ch = array_index_nospec(ch, info->nr_voices);
3948 if (note == 255 && info->ch[ch].note >= 0) {
3949 /* volume control */
3950 int type;
3951 @@ -347,10 +349,10 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
3952 {
3953 struct seq_oss_synthinfo *info;
3954
3955 - if (!snd_seq_oss_synth_is_valid(dp, dev))
3956 + info = snd_seq_oss_synth_info(dp, dev);
3957 + if (!info)
3958 return -ENXIO;
3959
3960 - info = &dp->synths[dev];
3961 switch (info->arg.event_passing) {
3962 case SNDRV_SEQ_OSS_PROCESS_EVENTS:
3963 if (! info->ch || ch < 0 || ch >= info->nr_voices) {
3964 @@ -358,6 +360,7 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
3965 return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
3966 }
3967
3968 + ch = array_index_nospec(ch, info->nr_voices);
3969 if (info->ch[ch].note >= 0) {
3970 note = info->ch[ch].note;
3971 info->ch[ch].vel = 0;
3972 @@ -381,7 +384,7 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
3973 static int
3974 set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note, int vel, struct snd_seq_event *ev)
3975 {
3976 - if (! snd_seq_oss_synth_is_valid(dp, dev))
3977 + if (!snd_seq_oss_synth_info(dp, dev))
3978 return -ENXIO;
3979
3980 ev->type = type;
3981 @@ -399,7 +402,7 @@ set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note,
3982 static int
3983 set_control_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int param, int val, struct snd_seq_event *ev)
3984 {
3985 - if (! snd_seq_oss_synth_is_valid(dp, dev))
3986 + if (!snd_seq_oss_synth_info(dp, dev))
3987 return -ENXIO;
3988
3989 ev->type = type;
3990 diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
3991 index b30b2139e3f0..9debd1b8fd28 100644
3992 --- a/sound/core/seq/oss/seq_oss_midi.c
3993 +++ b/sound/core/seq/oss/seq_oss_midi.c
3994 @@ -29,6 +29,7 @@
3995 #include "../seq_lock.h"
3996 #include <linux/init.h>
3997 #include <linux/slab.h>
3998 +#include <linux/nospec.h>
3999
4000
4001 /*
4002 @@ -315,6 +316,7 @@ get_mididev(struct seq_oss_devinfo *dp, int dev)
4003 {
4004 if (dev < 0 || dev >= dp->max_mididev)
4005 return NULL;
4006 + dev = array_index_nospec(dev, dp->max_mididev);
4007 return get_mdev(dev);
4008 }
4009
4010 diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
4011 index cd0e0ebbfdb1..278ebb993122 100644
4012 --- a/sound/core/seq/oss/seq_oss_synth.c
4013 +++ b/sound/core/seq/oss/seq_oss_synth.c
4014 @@ -26,6 +26,7 @@
4015 #include <linux/init.h>
4016 #include <linux/module.h>
4017 #include <linux/slab.h>
4018 +#include <linux/nospec.h>
4019
4020 /*
4021 * constants
4022 @@ -339,17 +340,13 @@ snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp)
4023 dp->max_synthdev = 0;
4024 }
4025
4026 -/*
4027 - * check if the specified device is MIDI mapped device
4028 - */
4029 -static int
4030 -is_midi_dev(struct seq_oss_devinfo *dp, int dev)
4031 +static struct seq_oss_synthinfo *
4032 +get_synthinfo_nospec(struct seq_oss_devinfo *dp, int dev)
4033 {
4034 if (dev < 0 || dev >= dp->max_synthdev)
4035 - return 0;
4036 - if (dp->synths[dev].is_midi)
4037 - return 1;
4038 - return 0;
4039 + return NULL;
4040 + dev = array_index_nospec(dev, SNDRV_SEQ_OSS_MAX_SYNTH_DEVS);
4041 + return &dp->synths[dev];
4042 }
4043
4044 /*
4045 @@ -359,14 +356,20 @@ static struct seq_oss_synth *
4046 get_synthdev(struct seq_oss_devinfo *dp, int dev)
4047 {
4048 struct seq_oss_synth *rec;
4049 - if (dev < 0 || dev >= dp->max_synthdev)
4050 - return NULL;
4051 - if (! dp->synths[dev].opened)
4052 + struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
4053 +
4054 + if (!info)
4055 return NULL;
4056 - if (dp->synths[dev].is_midi)
4057 - return &midi_synth_dev;
4058 - if ((rec = get_sdev(dev)) == NULL)
4059 + if (!info->opened)
4060 return NULL;
4061 + if (info->is_midi) {
4062 + rec = &midi_synth_dev;
4063 + snd_use_lock_use(&rec->use_lock);
4064 + } else {
4065 + rec = get_sdev(dev);
4066 + if (!rec)
4067 + return NULL;
4068 + }
4069 if (! rec->opened) {
4070 snd_use_lock_free(&rec->use_lock);
4071 return NULL;
4072 @@ -402,10 +405,8 @@ snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev)
4073 struct seq_oss_synth *rec;
4074 struct seq_oss_synthinfo *info;
4075
4076 - if (snd_BUG_ON(dev < 0 || dev >= dp->max_synthdev))
4077 - return;
4078 - info = &dp->synths[dev];
4079 - if (! info->opened)
4080 + info = get_synthinfo_nospec(dp, dev);
4081 + if (!info || !info->opened)
4082 return;
4083 if (info->sysex)
4084 info->sysex->len = 0; /* reset sysex */
4085 @@ -454,12 +455,14 @@ snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
4086 const char __user *buf, int p, int c)
4087 {
4088 struct seq_oss_synth *rec;
4089 + struct seq_oss_synthinfo *info;
4090 int rc;
4091
4092 - if (dev < 0 || dev >= dp->max_synthdev)
4093 + info = get_synthinfo_nospec(dp, dev);
4094 + if (!info)
4095 return -ENXIO;
4096
4097 - if (is_midi_dev(dp, dev))
4098 + if (info->is_midi)
4099 return 0;
4100 if ((rec = get_synthdev(dp, dev)) == NULL)
4101 return -ENXIO;
4102 @@ -467,24 +470,25 @@ snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
4103 if (rec->oper.load_patch == NULL)
4104 rc = -ENXIO;
4105 else
4106 - rc = rec->oper.load_patch(&dp->synths[dev].arg, fmt, buf, p, c);
4107 + rc = rec->oper.load_patch(&info->arg, fmt, buf, p, c);
4108 snd_use_lock_free(&rec->use_lock);
4109 return rc;
4110 }
4111
4112 /*
4113 - * check if the device is valid synth device
4114 + * check if the device is valid synth device and return the synth info
4115 */
4116 -int
4117 -snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev)
4118 +struct seq_oss_synthinfo *
4119 +snd_seq_oss_synth_info(struct seq_oss_devinfo *dp, int dev)
4120 {
4121 struct seq_oss_synth *rec;
4122 +
4123 rec = get_synthdev(dp, dev);
4124 if (rec) {
4125 snd_use_lock_free(&rec->use_lock);
4126 - return 1;
4127 + return get_synthinfo_nospec(dp, dev);
4128 }
4129 - return 0;
4130 + return NULL;
4131 }
4132
4133
4134 @@ -499,16 +503,18 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
4135 int i, send;
4136 unsigned char *dest;
4137 struct seq_oss_synth_sysex *sysex;
4138 + struct seq_oss_synthinfo *info;
4139
4140 - if (! snd_seq_oss_synth_is_valid(dp, dev))
4141 + info = snd_seq_oss_synth_info(dp, dev);
4142 + if (!info)
4143 return -ENXIO;
4144
4145 - sysex = dp->synths[dev].sysex;
4146 + sysex = info->sysex;
4147 if (sysex == NULL) {
4148 sysex = kzalloc(sizeof(*sysex), GFP_KERNEL);
4149 if (sysex == NULL)
4150 return -ENOMEM;
4151 - dp->synths[dev].sysex = sysex;
4152 + info->sysex = sysex;
4153 }
4154
4155 send = 0;
4156 @@ -553,10 +559,12 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
4157 int
4158 snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev)
4159 {
4160 - if (! snd_seq_oss_synth_is_valid(dp, dev))
4161 + struct seq_oss_synthinfo *info = snd_seq_oss_synth_info(dp, dev);
4162 +
4163 + if (!info)
4164 return -EINVAL;
4165 - snd_seq_oss_fill_addr(dp, ev, dp->synths[dev].arg.addr.client,
4166 - dp->synths[dev].arg.addr.port);
4167 + snd_seq_oss_fill_addr(dp, ev, info->arg.addr.client,
4168 + info->arg.addr.port);
4169 return 0;
4170 }
4171
4172 @@ -568,16 +576,18 @@ int
4173 snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, unsigned long addr)
4174 {
4175 struct seq_oss_synth *rec;
4176 + struct seq_oss_synthinfo *info;
4177 int rc;
4178
4179 - if (is_midi_dev(dp, dev))
4180 + info = get_synthinfo_nospec(dp, dev);
4181 + if (!info || info->is_midi)
4182 return -ENXIO;
4183 if ((rec = get_synthdev(dp, dev)) == NULL)
4184 return -ENXIO;
4185 if (rec->oper.ioctl == NULL)
4186 rc = -ENXIO;
4187 else
4188 - rc = rec->oper.ioctl(&dp->synths[dev].arg, cmd, addr);
4189 + rc = rec->oper.ioctl(&info->arg, cmd, addr);
4190 snd_use_lock_free(&rec->use_lock);
4191 return rc;
4192 }
4193 @@ -589,7 +599,10 @@ snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, u
4194 int
4195 snd_seq_oss_synth_raw_event(struct seq_oss_devinfo *dp, int dev, unsigned char *data, struct snd_seq_event *ev)
4196 {
4197 - if (! snd_seq_oss_synth_is_valid(dp, dev) || is_midi_dev(dp, dev))
4198 + struct seq_oss_synthinfo *info;
4199 +
4200 + info = snd_seq_oss_synth_info(dp, dev);
4201 + if (!info || info->is_midi)
4202 return -ENXIO;
4203 ev->type = SNDRV_SEQ_EVENT_OSS;
4204 memcpy(ev->data.raw8.d, data, 8);
4205 diff --git a/sound/core/seq/oss/seq_oss_synth.h b/sound/core/seq/oss/seq_oss_synth.h
4206 index 74ac55f166b6..a63f9e22974d 100644
4207 --- a/sound/core/seq/oss/seq_oss_synth.h
4208 +++ b/sound/core/seq/oss/seq_oss_synth.h
4209 @@ -37,7 +37,8 @@ void snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp);
4210 void snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev);
4211 int snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
4212 const char __user *buf, int p, int c);
4213 -int snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev);
4214 +struct seq_oss_synthinfo *snd_seq_oss_synth_info(struct seq_oss_devinfo *dp,
4215 + int dev);
4216 int snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
4217 struct snd_seq_event *ev);
4218 int snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev);
4219 diff --git a/sound/drivers/opl3/opl3_synth.c b/sound/drivers/opl3/opl3_synth.c
4220 index ddcc1a325a61..42920a243328 100644
4221 --- a/sound/drivers/opl3/opl3_synth.c
4222 +++ b/sound/drivers/opl3/opl3_synth.c
4223 @@ -21,6 +21,7 @@
4224
4225 #include <linux/slab.h>
4226 #include <linux/export.h>
4227 +#include <linux/nospec.h>
4228 #include <sound/opl3.h>
4229 #include <sound/asound_fm.h>
4230
4231 @@ -448,7 +449,7 @@ static int snd_opl3_set_voice(struct snd_opl3 * opl3, struct snd_dm_fm_voice * v
4232 {
4233 unsigned short reg_side;
4234 unsigned char op_offset;
4235 - unsigned char voice_offset;
4236 + unsigned char voice_offset, voice_op;
4237
4238 unsigned short opl3_reg;
4239 unsigned char reg_val;
4240 @@ -473,7 +474,9 @@ static int snd_opl3_set_voice(struct snd_opl3 * opl3, struct snd_dm_fm_voice * v
4241 voice_offset = voice->voice - MAX_OPL2_VOICES;
4242 }
4243 /* Get register offset of operator */
4244 - op_offset = snd_opl3_regmap[voice_offset][voice->op];
4245 + voice_offset = array_index_nospec(voice_offset, MAX_OPL2_VOICES);
4246 + voice_op = array_index_nospec(voice->op, 4);
4247 + op_offset = snd_opl3_regmap[voice_offset][voice_op];
4248
4249 reg_val = 0x00;
4250 /* Set amplitude modulation (tremolo) effect */
4251 diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c
4252 index 8573289c381e..928a255bfc35 100644
4253 --- a/sound/firewire/dice/dice-stream.c
4254 +++ b/sound/firewire/dice/dice-stream.c
4255 @@ -435,7 +435,7 @@ int snd_dice_stream_init_duplex(struct snd_dice *dice)
4256 err = init_stream(dice, AMDTP_IN_STREAM, i);
4257 if (err < 0) {
4258 for (; i >= 0; i--)
4259 - destroy_stream(dice, AMDTP_OUT_STREAM, i);
4260 + destroy_stream(dice, AMDTP_IN_STREAM, i);
4261 goto end;
4262 }
4263 }
4264 diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
4265 index 4ddb4cdd054b..96bb01b6b751 100644
4266 --- a/sound/firewire/dice/dice.c
4267 +++ b/sound/firewire/dice/dice.c
4268 @@ -14,7 +14,7 @@ MODULE_LICENSE("GPL v2");
4269 #define OUI_WEISS 0x001c6a
4270 #define OUI_LOUD 0x000ff2
4271 #define OUI_FOCUSRITE 0x00130e
4272 -#define OUI_TCELECTRONIC 0x001486
4273 +#define OUI_TCELECTRONIC 0x000166
4274
4275 #define DICE_CATEGORY_ID 0x04
4276 #define WEISS_CATEGORY_ID 0x00
4277 diff --git a/sound/pci/asihpi/hpimsginit.c b/sound/pci/asihpi/hpimsginit.c
4278 index 7eb617175fde..a31a70dccecf 100644
4279 --- a/sound/pci/asihpi/hpimsginit.c
4280 +++ b/sound/pci/asihpi/hpimsginit.c
4281 @@ -23,6 +23,7 @@
4282
4283 #include "hpi_internal.h"
4284 #include "hpimsginit.h"
4285 +#include <linux/nospec.h>
4286
4287 /* The actual message size for each object type */
4288 static u16 msg_size[HPI_OBJ_MAXINDEX + 1] = HPI_MESSAGE_SIZE_BY_OBJECT;
4289 @@ -39,10 +40,12 @@ static void hpi_init_message(struct hpi_message *phm, u16 object,
4290 {
4291 u16 size;
4292
4293 - if ((object > 0) && (object <= HPI_OBJ_MAXINDEX))
4294 + if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
4295 + object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1);
4296 size = msg_size[object];
4297 - else
4298 + } else {
4299 size = sizeof(*phm);
4300 + }
4301
4302 memset(phm, 0, size);
4303 phm->size = size;
4304 @@ -66,10 +69,12 @@ void hpi_init_response(struct hpi_response *phr, u16 object, u16 function,
4305 {
4306 u16 size;
4307
4308 - if ((object > 0) && (object <= HPI_OBJ_MAXINDEX))
4309 + if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
4310 + object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1);
4311 size = res_size[object];
4312 - else
4313 + } else {
4314 size = sizeof(*phr);
4315 + }
4316
4317 memset(phr, 0, sizeof(*phr));
4318 phr->size = size;
4319 diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
4320 index 5badd08e1d69..b1a2a7ea4172 100644
4321 --- a/sound/pci/asihpi/hpioctl.c
4322 +++ b/sound/pci/asihpi/hpioctl.c
4323 @@ -33,6 +33,7 @@
4324 #include <linux/stringify.h>
4325 #include <linux/module.h>
4326 #include <linux/vmalloc.h>
4327 +#include <linux/nospec.h>
4328
4329 #ifdef MODULE_FIRMWARE
4330 MODULE_FIRMWARE("asihpi/dsp5000.bin");
4331 @@ -186,7 +187,8 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
4332 struct hpi_adapter *pa = NULL;
4333
4334 if (hm->h.adapter_index < ARRAY_SIZE(adapters))
4335 - pa = &adapters[hm->h.adapter_index];
4336 + pa = &adapters[array_index_nospec(hm->h.adapter_index,
4337 + ARRAY_SIZE(adapters))];
4338
4339 if (!pa || !pa->adapter || !pa->adapter->type) {
4340 hpi_init_response(&hr->r0, hm->h.object,
4341 diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
4342 index 57df06e76968..cc009a4a3d1d 100644
4343 --- a/sound/pci/hda/hda_hwdep.c
4344 +++ b/sound/pci/hda/hda_hwdep.c
4345 @@ -21,6 +21,7 @@
4346 #include <linux/init.h>
4347 #include <linux/slab.h>
4348 #include <linux/compat.h>
4349 +#include <linux/nospec.h>
4350 #include <sound/core.h>
4351 #include "hda_codec.h"
4352 #include "hda_local.h"
4353 @@ -51,7 +52,16 @@ static int get_wcap_ioctl(struct hda_codec *codec,
4354
4355 if (get_user(verb, &arg->verb))
4356 return -EFAULT;
4357 - res = get_wcaps(codec, verb >> 24);
4358 + /* open-code get_wcaps(verb>>24) with nospec */
4359 + verb >>= 24;
4360 + if (verb < codec->core.start_nid ||
4361 + verb >= codec->core.start_nid + codec->core.num_nodes) {
4362 + res = 0;
4363 + } else {
4364 + verb -= codec->core.start_nid;
4365 + verb = array_index_nospec(verb, codec->core.num_nodes);
4366 + res = codec->wcaps[verb];
4367 + }
4368 if (put_user(res, &arg->res))
4369 return -EFAULT;
4370 return 0;
4371 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
4372 index b4f1b6e88305..7d7eb1354eee 100644
4373 --- a/sound/pci/hda/patch_hdmi.c
4374 +++ b/sound/pci/hda/patch_hdmi.c
4375 @@ -1383,6 +1383,8 @@ static void hdmi_pcm_setup_pin(struct hdmi_spec *spec,
4376 pcm = get_pcm_rec(spec, per_pin->pcm_idx);
4377 else
4378 return;
4379 + if (!pcm->pcm)
4380 + return;
4381 if (!test_bit(per_pin->pcm_idx, &spec->pcm_in_use))
4382 return;
4383
4384 @@ -2151,8 +2153,13 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
4385 int dev, err;
4386 int pin_idx, pcm_idx;
4387
4388 -
4389 for (pcm_idx = 0; pcm_idx < spec->pcm_used; pcm_idx++) {
4390 + if (!get_pcm_rec(spec, pcm_idx)->pcm) {
4391 + /* no PCM: mark this for skipping permanently */
4392 + set_bit(pcm_idx, &spec->pcm_bitmap);
4393 + continue;
4394 + }
4395 +
4396 err = generic_hdmi_build_jack(codec, pcm_idx);
4397 if (err < 0)
4398 return err;
4399 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4400 index fc77bf7a1544..8c238e51bb5a 100644
4401 --- a/sound/pci/hda/patch_realtek.c
4402 +++ b/sound/pci/hda/patch_realtek.c
4403 @@ -331,6 +331,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
4404 /* fallthrough */
4405 case 0x10ec0215:
4406 case 0x10ec0233:
4407 + case 0x10ec0235:
4408 case 0x10ec0236:
4409 case 0x10ec0255:
4410 case 0x10ec0256:
4411 @@ -6575,6 +6576,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4412 SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
4413 SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
4414 SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
4415 + SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
4416 SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
4417 SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
4418 SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
4419 @@ -7160,8 +7162,11 @@ static int patch_alc269(struct hda_codec *codec)
4420 case 0x10ec0298:
4421 spec->codec_variant = ALC269_TYPE_ALC298;
4422 break;
4423 + case 0x10ec0235:
4424 case 0x10ec0255:
4425 spec->codec_variant = ALC269_TYPE_ALC255;
4426 + spec->shutup = alc256_shutup;
4427 + spec->init_hook = alc256_init;
4428 break;
4429 case 0x10ec0236:
4430 case 0x10ec0256:
4431 diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
4432 index 4c59983158e0..11b5b5e0e058 100644
4433 --- a/sound/pci/rme9652/hdspm.c
4434 +++ b/sound/pci/rme9652/hdspm.c
4435 @@ -137,6 +137,7 @@
4436 #include <linux/pci.h>
4437 #include <linux/math64.h>
4438 #include <linux/io.h>
4439 +#include <linux/nospec.h>
4440
4441 #include <sound/core.h>
4442 #include <sound/control.h>
4443 @@ -5698,40 +5699,43 @@ static int snd_hdspm_channel_info(struct snd_pcm_substream *substream,
4444 struct snd_pcm_channel_info *info)
4445 {
4446 struct hdspm *hdspm = snd_pcm_substream_chip(substream);
4447 + unsigned int channel = info->channel;
4448
4449 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
4450 - if (snd_BUG_ON(info->channel >= hdspm->max_channels_out)) {
4451 + if (snd_BUG_ON(channel >= hdspm->max_channels_out)) {
4452 dev_info(hdspm->card->dev,
4453 "snd_hdspm_channel_info: output channel out of range (%d)\n",
4454 - info->channel);
4455 + channel);
4456 return -EINVAL;
4457 }
4458
4459 - if (hdspm->channel_map_out[info->channel] < 0) {
4460 + channel = array_index_nospec(channel, hdspm->max_channels_out);
4461 + if (hdspm->channel_map_out[channel] < 0) {
4462 dev_info(hdspm->card->dev,
4463 "snd_hdspm_channel_info: output channel %d mapped out\n",
4464 - info->channel);
4465 + channel);
4466 return -EINVAL;
4467 }
4468
4469 - info->offset = hdspm->channel_map_out[info->channel] *
4470 + info->offset = hdspm->channel_map_out[channel] *
4471 HDSPM_CHANNEL_BUFFER_BYTES;
4472 } else {
4473 - if (snd_BUG_ON(info->channel >= hdspm->max_channels_in)) {
4474 + if (snd_BUG_ON(channel >= hdspm->max_channels_in)) {
4475 dev_info(hdspm->card->dev,
4476 "snd_hdspm_channel_info: input channel out of range (%d)\n",
4477 - info->channel);
4478 + channel);
4479 return -EINVAL;
4480 }
4481
4482 - if (hdspm->channel_map_in[info->channel] < 0) {
4483 + channel = array_index_nospec(channel, hdspm->max_channels_in);
4484 + if (hdspm->channel_map_in[channel] < 0) {
4485 dev_info(hdspm->card->dev,
4486 "snd_hdspm_channel_info: input channel %d mapped out\n",
4487 - info->channel);
4488 + channel);
4489 return -EINVAL;
4490 }
4491
4492 - info->offset = hdspm->channel_map_in[info->channel] *
4493 + info->offset = hdspm->channel_map_in[channel] *
4494 HDSPM_CHANNEL_BUFFER_BYTES;
4495 }
4496
4497 diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
4498 index df648b1d9217..edd765e22377 100644
4499 --- a/sound/pci/rme9652/rme9652.c
4500 +++ b/sound/pci/rme9652/rme9652.c
4501 @@ -26,6 +26,7 @@
4502 #include <linux/pci.h>
4503 #include <linux/module.h>
4504 #include <linux/io.h>
4505 +#include <linux/nospec.h>
4506
4507 #include <sound/core.h>
4508 #include <sound/control.h>
4509 @@ -2071,9 +2072,10 @@ static int snd_rme9652_channel_info(struct snd_pcm_substream *substream,
4510 if (snd_BUG_ON(info->channel >= RME9652_NCHANNELS))
4511 return -EINVAL;
4512
4513 - if ((chn = rme9652->channel_map[info->channel]) < 0) {
4514 + chn = rme9652->channel_map[array_index_nospec(info->channel,
4515 + RME9652_NCHANNELS)];
4516 + if (chn < 0)
4517 return -EINVAL;
4518 - }
4519
4520 info->offset = chn * RME9652_CHANNEL_BUFFER_BYTES;
4521 info->first = 0;
4522 diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
4523 index cef79a1a620b..81268760b7a9 100644
4524 --- a/sound/soc/fsl/fsl_esai.c
4525 +++ b/sound/soc/fsl/fsl_esai.c
4526 @@ -144,6 +144,13 @@ static int fsl_esai_divisor_cal(struct snd_soc_dai *dai, bool tx, u32 ratio,
4527
4528 psr = ratio <= 256 * maxfp ? ESAI_xCCR_xPSR_BYPASS : ESAI_xCCR_xPSR_DIV8;
4529
4530 + /* Do not loop-search if PM (1 ~ 256) alone can serve the ratio */
4531 + if (ratio <= 256) {
4532 + pm = ratio;
4533 + fp = 1;
4534 + goto out;
4535 + }
4536 +
4537 /* Set the max fluctuation -- 0.1% of the max devisor */
4538 savesub = (psr ? 1 : 8) * 256 * maxfp / 1000;
4539
4540 diff --git a/sound/soc/omap/omap-dmic.c b/sound/soc/omap/omap-dmic.c
4541 index 09db2aec12a3..b2f5d2fa354d 100644
4542 --- a/sound/soc/omap/omap-dmic.c
4543 +++ b/sound/soc/omap/omap-dmic.c
4544 @@ -281,7 +281,7 @@ static int omap_dmic_dai_trigger(struct snd_pcm_substream *substream,
4545 static int omap_dmic_select_fclk(struct omap_dmic *dmic, int clk_id,
4546 unsigned int freq)
4547 {
4548 - struct clk *parent_clk;
4549 + struct clk *parent_clk, *mux;
4550 char *parent_clk_name;
4551 int ret = 0;
4552
4553 @@ -329,14 +329,21 @@ static int omap_dmic_select_fclk(struct omap_dmic *dmic, int clk_id,
4554 return -ENODEV;
4555 }
4556
4557 + mux = clk_get_parent(dmic->fclk);
4558 + if (IS_ERR(mux)) {
4559 + dev_err(dmic->dev, "can't get fck mux parent\n");
4560 + clk_put(parent_clk);
4561 + return -ENODEV;
4562 + }
4563 +
4564 mutex_lock(&dmic->mutex);
4565 if (dmic->active) {
4566 /* disable clock while reparenting */
4567 pm_runtime_put_sync(dmic->dev);
4568 - ret = clk_set_parent(dmic->fclk, parent_clk);
4569 + ret = clk_set_parent(mux, parent_clk);
4570 pm_runtime_get_sync(dmic->dev);
4571 } else {
4572 - ret = clk_set_parent(dmic->fclk, parent_clk);
4573 + ret = clk_set_parent(mux, parent_clk);
4574 }
4575 mutex_unlock(&dmic->mutex);
4576
4577 @@ -349,6 +356,7 @@ static int omap_dmic_select_fclk(struct omap_dmic *dmic, int clk_id,
4578 dmic->fclk_freq = freq;
4579
4580 err_busy:
4581 + clk_put(mux);
4582 clk_put(parent_clk);
4583
4584 return ret;
4585 diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
4586 index 9038b2e7df73..eaa03acd4686 100644
4587 --- a/sound/usb/mixer_maps.c
4588 +++ b/sound/usb/mixer_maps.c
4589 @@ -353,8 +353,11 @@ static struct usbmix_name_map bose_companion5_map[] = {
4590 /*
4591 * Dell usb dock with ALC4020 codec had a firmware problem where it got
4592 * screwed up when zero volume is passed; just skip it as a workaround
4593 + *
4594 + * Also the extension unit gives an access error, so skip it as well.
4595 */
4596 static const struct usbmix_name_map dell_alc4020_map[] = {
4597 + { 4, NULL }, /* extension unit */
4598 { 16, NULL },
4599 { 19, NULL },
4600 { 0 }
4601 diff --git a/tools/lib/str_error_r.c b/tools/lib/str_error_r.c
4602 index d6d65537b0d9..6aad8308a0ac 100644
4603 --- a/tools/lib/str_error_r.c
4604 +++ b/tools/lib/str_error_r.c
4605 @@ -22,6 +22,6 @@ char *str_error_r(int errnum, char *buf, size_t buflen)
4606 {
4607 int err = strerror_r(errnum, buf, buflen);
4608 if (err)
4609 - snprintf(buf, buflen, "INTERNAL ERROR: strerror_r(%d, %p, %zd)=%d", errnum, buf, buflen, err);
4610 + snprintf(buf, buflen, "INTERNAL ERROR: strerror_r(%d, [buf], %zd)=%d", errnum, buflen, err);
4611 return buf;
4612 }
4613 diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
4614 index 53572304843b..a6483b5576fd 100644
4615 --- a/virt/kvm/arm/arm.c
4616 +++ b/virt/kvm/arm/arm.c
4617 @@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
4618 static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
4619 static u32 kvm_next_vmid;
4620 static unsigned int kvm_vmid_bits __read_mostly;
4621 -static DEFINE_SPINLOCK(kvm_vmid_lock);
4622 +static DEFINE_RWLOCK(kvm_vmid_lock);
4623
4624 static bool vgic_present;
4625
4626 @@ -470,11 +470,16 @@ static void update_vttbr(struct kvm *kvm)
4627 {
4628 phys_addr_t pgd_phys;
4629 u64 vmid;
4630 + bool new_gen;
4631
4632 - if (!need_new_vmid_gen(kvm))
4633 + read_lock(&kvm_vmid_lock);
4634 + new_gen = need_new_vmid_gen(kvm);
4635 + read_unlock(&kvm_vmid_lock);
4636 +
4637 + if (!new_gen)
4638 return;
4639
4640 - spin_lock(&kvm_vmid_lock);
4641 + write_lock(&kvm_vmid_lock);
4642
4643 /*
4644 * We need to re-check the vmid_gen here to ensure that if another vcpu
4645 @@ -482,7 +487,7 @@ static void update_vttbr(struct kvm *kvm)
4646 * use the same vmid.
4647 */
4648 if (!need_new_vmid_gen(kvm)) {
4649 - spin_unlock(&kvm_vmid_lock);
4650 + write_unlock(&kvm_vmid_lock);
4651 return;
4652 }
4653
4654 @@ -516,7 +521,7 @@ static void update_vttbr(struct kvm *kvm)
4655 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
4656 kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid;
4657
4658 - spin_unlock(&kvm_vmid_lock);
4659 + write_unlock(&kvm_vmid_lock);
4660 }
4661
4662 static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
4663 diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c
4664 index 6919352cbf15..c4762bef13c6 100644
4665 --- a/virt/kvm/arm/psci.c
4666 +++ b/virt/kvm/arm/psci.c
4667 @@ -18,6 +18,7 @@
4668 #include <linux/arm-smccc.h>
4669 #include <linux/preempt.h>
4670 #include <linux/kvm_host.h>
4671 +#include <linux/uaccess.h>
4672 #include <linux/wait.h>
4673
4674 #include <asm/cputype.h>
4675 @@ -427,3 +428,62 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
4676 smccc_set_retval(vcpu, val, 0, 0, 0);
4677 return 1;
4678 }
4679 +
4680 +int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
4681 +{
4682 + return 1; /* PSCI version */
4683 +}
4684 +
4685 +int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
4686 +{
4687 + if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices))
4688 + return -EFAULT;
4689 +
4690 + return 0;
4691 +}
4692 +
4693 +int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
4694 +{
4695 + if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
4696 + void __user *uaddr = (void __user *)(long)reg->addr;
4697 + u64 val;
4698 +
4699 + val = kvm_psci_version(vcpu, vcpu->kvm);
4700 + if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
4701 + return -EFAULT;
4702 +
4703 + return 0;
4704 + }
4705 +
4706 + return -EINVAL;
4707 +}
4708 +
4709 +int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
4710 +{
4711 + if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
4712 + void __user *uaddr = (void __user *)(long)reg->addr;
4713 + bool wants_02;
4714 + u64 val;
4715 +
4716 + if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
4717 + return -EFAULT;
4718 +
4719 + wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
4720 +
4721 + switch (val) {
4722 + case KVM_ARM_PSCI_0_1:
4723 + if (wants_02)
4724 + return -EINVAL;
4725 + vcpu->kvm->arch.psci_version = val;
4726 + return 0;
4727 + case KVM_ARM_PSCI_0_2:
4728 + case KVM_ARM_PSCI_1_0:
4729 + if (!wants_02)
4730 + return -EINVAL;
4731 + vcpu->kvm->arch.psci_version = val;
4732 + return 0;
4733 + }
4734 + }
4735 +
4736 + return -EINVAL;
4737 +}