Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0102-5.4.3-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3483 - (show annotations) (download)
Mon May 11 14:35:58 2020 UTC (3 years, 11 months ago) by niro
File size: 106388 byte(s)
-linux-5.4.3
1 diff --git a/Makefile b/Makefile
2 index e67f2e95b71d..07998b60d56c 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 2
10 +SUBLEVEL = 3
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 diff --git a/arch/arm64/boot/dts/exynos/exynos5433.dtsi b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
15 index a76f620f7f35..a5f8752f607b 100644
16 --- a/arch/arm64/boot/dts/exynos/exynos5433.dtsi
17 +++ b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
18 @@ -18,8 +18,8 @@
19
20 / {
21 compatible = "samsung,exynos5433";
22 - #address-cells = <1>;
23 - #size-cells = <1>;
24 + #address-cells = <2>;
25 + #size-cells = <2>;
26
27 interrupt-parent = <&gic>;
28
29 @@ -311,7 +311,7 @@
30 compatible = "simple-bus";
31 #address-cells = <1>;
32 #size-cells = <1>;
33 - ranges;
34 + ranges = <0x0 0x0 0x0 0x18000000>;
35
36 chipid@10000000 {
37 compatible = "samsung,exynos4210-chipid";
38 diff --git a/arch/arm64/boot/dts/exynos/exynos7.dtsi b/arch/arm64/boot/dts/exynos/exynos7.dtsi
39 index bcb9d8cee267..0821489a874d 100644
40 --- a/arch/arm64/boot/dts/exynos/exynos7.dtsi
41 +++ b/arch/arm64/boot/dts/exynos/exynos7.dtsi
42 @@ -12,8 +12,8 @@
43 / {
44 compatible = "samsung,exynos7";
45 interrupt-parent = <&gic>;
46 - #address-cells = <1>;
47 - #size-cells = <1>;
48 + #address-cells = <2>;
49 + #size-cells = <2>;
50
51 aliases {
52 pinctrl0 = &pinctrl_alive;
53 @@ -98,7 +98,7 @@
54 compatible = "simple-bus";
55 #address-cells = <1>;
56 #size-cells = <1>;
57 - ranges;
58 + ranges = <0 0 0 0x18000000>;
59
60 chipid@10000000 {
61 compatible = "samsung,exynos4210-chipid";
62 diff --git a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
63 index 4c38426a6969..02909a48dfcd 100644
64 --- a/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
65 +++ b/arch/arm64/boot/dts/nvidia/tegra194-p2888.dtsi
66 @@ -309,9 +309,8 @@
67 regulator-name = "VDD_12V";
68 regulator-min-microvolt = <1200000>;
69 regulator-max-microvolt = <1200000>;
70 - gpio = <&gpio TEGRA194_MAIN_GPIO(A, 1) GPIO_ACTIVE_LOW>;
71 + gpio = <&gpio TEGRA194_MAIN_GPIO(A, 1) GPIO_ACTIVE_HIGH>;
72 regulator-boot-on;
73 - enable-active-low;
74 };
75 };
76 };
77 diff --git a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
78 index a7dc319214a4..b0095072bc28 100644
79 --- a/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
80 +++ b/arch/arm64/boot/dts/nvidia/tegra210-p2597.dtsi
81 @@ -1612,7 +1612,7 @@
82 regulator-name = "VDD_HDMI_5V0";
83 regulator-min-microvolt = <5000000>;
84 regulator-max-microvolt = <5000000>;
85 - gpio = <&exp1 12 GPIO_ACTIVE_LOW>;
86 + gpio = <&exp1 12 GPIO_ACTIVE_HIGH>;
87 enable-active-high;
88 vin-supply = <&vdd_5v0_sys>;
89 };
90 diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
91 index 127712b0b970..32fc8061aa76 100644
92 --- a/arch/arm64/include/asm/uaccess.h
93 +++ b/arch/arm64/include/asm/uaccess.h
94 @@ -62,8 +62,13 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
95 {
96 unsigned long ret, limit = current_thread_info()->addr_limit;
97
98 + /*
99 + * Asynchronous I/O running in a kernel thread does not have the
100 + * TIF_TAGGED_ADDR flag of the process owning the mm, so always untag
101 + * the user address before checking.
102 + */
103 if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
104 - test_thread_flag(TIF_TAGGED_ADDR))
105 + (current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR)))
106 addr = untagged_addr(addr);
107
108 __chk_user_ptr(addr);
109 diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c
110 index a3f9c665bb5b..baa740815b3c 100644
111 --- a/arch/powerpc/kvm/book3s_xive.c
112 +++ b/arch/powerpc/kvm/book3s_xive.c
113 @@ -2005,6 +2005,10 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
114
115 pr_devel("Creating xive for partition\n");
116
117 + /* Already there ? */
118 + if (kvm->arch.xive)
119 + return -EEXIST;
120 +
121 xive = kvmppc_xive_get_device(kvm, type);
122 if (!xive)
123 return -ENOMEM;
124 @@ -2014,12 +2018,6 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
125 xive->kvm = kvm;
126 mutex_init(&xive->lock);
127
128 - /* Already there ? */
129 - if (kvm->arch.xive)
130 - ret = -EEXIST;
131 - else
132 - kvm->arch.xive = xive;
133 -
134 /* We use the default queue size set by the host */
135 xive->q_order = xive_native_default_eq_shift();
136 if (xive->q_order < PAGE_SHIFT)
137 @@ -2039,6 +2037,7 @@ static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
138 if (ret)
139 return ret;
140
141 + kvm->arch.xive = xive;
142 return 0;
143 }
144
145 diff --git a/arch/powerpc/kvm/book3s_xive_native.c b/arch/powerpc/kvm/book3s_xive_native.c
146 index 78b906ffa0d2..5a3373e06e60 100644
147 --- a/arch/powerpc/kvm/book3s_xive_native.c
148 +++ b/arch/powerpc/kvm/book3s_xive_native.c
149 @@ -50,6 +50,24 @@ static void kvmppc_xive_native_cleanup_queue(struct kvm_vcpu *vcpu, int prio)
150 }
151 }
152
153 +static int kvmppc_xive_native_configure_queue(u32 vp_id, struct xive_q *q,
154 + u8 prio, __be32 *qpage,
155 + u32 order, bool can_escalate)
156 +{
157 + int rc;
158 + __be32 *qpage_prev = q->qpage;
159 +
160 + rc = xive_native_configure_queue(vp_id, q, prio, qpage, order,
161 + can_escalate);
162 + if (rc)
163 + return rc;
164 +
165 + if (qpage_prev)
166 + put_page(virt_to_page(qpage_prev));
167 +
168 + return rc;
169 +}
170 +
171 void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu)
172 {
173 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
174 @@ -582,19 +600,14 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
175 q->guest_qaddr = 0;
176 q->guest_qshift = 0;
177
178 - rc = xive_native_configure_queue(xc->vp_id, q, priority,
179 - NULL, 0, true);
180 + rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority,
181 + NULL, 0, true);
182 if (rc) {
183 pr_err("Failed to reset queue %d for VCPU %d: %d\n",
184 priority, xc->server_num, rc);
185 return rc;
186 }
187
188 - if (q->qpage) {
189 - put_page(virt_to_page(q->qpage));
190 - q->qpage = NULL;
191 - }
192 -
193 return 0;
194 }
195
196 @@ -624,12 +637,6 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
197
198 srcu_idx = srcu_read_lock(&kvm->srcu);
199 gfn = gpa_to_gfn(kvm_eq.qaddr);
200 - page = gfn_to_page(kvm, gfn);
201 - if (is_error_page(page)) {
202 - srcu_read_unlock(&kvm->srcu, srcu_idx);
203 - pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr);
204 - return -EINVAL;
205 - }
206
207 page_size = kvm_host_page_size(kvm, gfn);
208 if (1ull << kvm_eq.qshift > page_size) {
209 @@ -638,6 +645,13 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
210 return -EINVAL;
211 }
212
213 + page = gfn_to_page(kvm, gfn);
214 + if (is_error_page(page)) {
215 + srcu_read_unlock(&kvm->srcu, srcu_idx);
216 + pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr);
217 + return -EINVAL;
218 + }
219 +
220 qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK);
221 srcu_read_unlock(&kvm->srcu, srcu_idx);
222
223 @@ -653,8 +667,8 @@ static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
224 * OPAL level because the use of END ESBs is not supported by
225 * Linux.
226 */
227 - rc = xive_native_configure_queue(xc->vp_id, q, priority,
228 - (__be32 *) qaddr, kvm_eq.qshift, true);
229 + rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority,
230 + (__be32 *) qaddr, kvm_eq.qshift, true);
231 if (rc) {
232 pr_err("Failed to configure queue %d for VCPU %d: %d\n",
233 priority, xc->server_num, rc);
234 @@ -1081,7 +1095,6 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
235 dev->private = xive;
236 xive->dev = dev;
237 xive->kvm = kvm;
238 - kvm->arch.xive = xive;
239 mutex_init(&xive->mapping_lock);
240 mutex_init(&xive->lock);
241
242 @@ -1102,6 +1115,7 @@ static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
243 if (ret)
244 return ret;
245
246 + kvm->arch.xive = xive;
247 return 0;
248 }
249
250 diff --git a/arch/sparc/include/asm/io_64.h b/arch/sparc/include/asm/io_64.h
251 index 688911051b44..f4afa301954a 100644
252 --- a/arch/sparc/include/asm/io_64.h
253 +++ b/arch/sparc/include/asm/io_64.h
254 @@ -407,6 +407,7 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
255 }
256
257 #define ioremap_nocache(X,Y) ioremap((X),(Y))
258 +#define ioremap_uc(X,Y) ioremap((X),(Y))
259 #define ioremap_wc(X,Y) ioremap((X),(Y))
260 #define ioremap_wt(X,Y) ioremap((X),(Y))
261
262 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
263 index f68c0c753c38..53dbcca9af09 100644
264 --- a/arch/x86/kvm/cpuid.c
265 +++ b/arch/x86/kvm/cpuid.c
266 @@ -504,7 +504,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 function,
267
268 r = -E2BIG;
269
270 - if (*nent >= maxnent)
271 + if (WARN_ON(*nent >= maxnent))
272 goto out;
273
274 do_host_cpuid(entry, function, 0);
275 @@ -810,6 +810,9 @@ out:
276 static int do_cpuid_func(struct kvm_cpuid_entry2 *entry, u32 func,
277 int *nent, int maxnent, unsigned int type)
278 {
279 + if (*nent >= maxnent)
280 + return -E2BIG;
281 +
282 if (type == KVM_GET_EMULATED_CPUID)
283 return __do_cpuid_func_emulated(entry, func, nent, maxnent);
284
285 diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
286 index 0e7c9301fe86..d0523741fb03 100644
287 --- a/arch/x86/kvm/vmx/nested.c
288 +++ b/arch/x86/kvm/vmx/nested.c
289 @@ -2418,6 +2418,16 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
290 entry_failure_code))
291 return -EINVAL;
292
293 + /*
294 + * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12
295 + * on nested VM-Exit, which can occur without actually running L2 and
296 + * thus without hitting vmx_set_cr3(), e.g. if L1 is entering L2 with
297 + * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the
298 + * transition to HLT instead of running L2.
299 + */
300 + if (enable_ept)
301 + vmcs_writel(GUEST_CR3, vmcs12->guest_cr3);
302 +
303 /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */
304 if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) &&
305 is_pae_paging(vcpu)) {
306 diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
307 index 04a8212704c1..f09a213fd5cb 100644
308 --- a/arch/x86/kvm/vmx/vmx.c
309 +++ b/arch/x86/kvm/vmx/vmx.c
310 @@ -2995,6 +2995,7 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
311 void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
312 {
313 struct kvm *kvm = vcpu->kvm;
314 + bool update_guest_cr3 = true;
315 unsigned long guest_cr3;
316 u64 eptp;
317
318 @@ -3011,15 +3012,18 @@ void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
319 spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
320 }
321
322 - if (enable_unrestricted_guest || is_paging(vcpu) ||
323 - is_guest_mode(vcpu))
324 + /* Loading vmcs02.GUEST_CR3 is handled by nested VM-Enter. */
325 + if (is_guest_mode(vcpu))
326 + update_guest_cr3 = false;
327 + else if (enable_unrestricted_guest || is_paging(vcpu))
328 guest_cr3 = kvm_read_cr3(vcpu);
329 else
330 guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
331 ept_load_pdptrs(vcpu);
332 }
333
334 - vmcs_writel(GUEST_CR3, guest_cr3);
335 + if (update_guest_cr3)
336 + vmcs_writel(GUEST_CR3, guest_cr3);
337 }
338
339 int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
340 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
341 index 5d530521f11d..8d82ec0482fc 100644
342 --- a/arch/x86/kvm/x86.c
343 +++ b/arch/x86/kvm/x86.c
344 @@ -300,13 +300,14 @@ int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
345 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
346 int err;
347
348 - if (((value ^ smsr->values[slot].curr) & mask) == 0)
349 + value = (value & mask) | (smsr->values[slot].host & ~mask);
350 + if (value == smsr->values[slot].curr)
351 return 0;
352 - smsr->values[slot].curr = value;
353 err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
354 if (err)
355 return 1;
356
357 + smsr->values[slot].curr = value;
358 if (!smsr->registered) {
359 smsr->urn.on_user_return = kvm_on_user_return;
360 user_return_notifier_register(&smsr->urn);
361 @@ -1327,10 +1328,15 @@ static u64 kvm_get_arch_capabilities(void)
362 * If TSX is disabled on the system, guests are also mitigated against
363 * TAA and clear CPU buffer mitigation is not required for guests.
364 */
365 - if (boot_cpu_has_bug(X86_BUG_TAA) && boot_cpu_has(X86_FEATURE_RTM) &&
366 - (data & ARCH_CAP_TSX_CTRL_MSR))
367 + if (!boot_cpu_has(X86_FEATURE_RTM))
368 + data &= ~ARCH_CAP_TAA_NO;
369 + else if (!boot_cpu_has_bug(X86_BUG_TAA))
370 + data |= ARCH_CAP_TAA_NO;
371 + else if (data & ARCH_CAP_TSX_CTRL_MSR)
372 data &= ~ARCH_CAP_MDS_NO;
373
374 + /* KVM does not emulate MSR_IA32_TSX_CTRL. */
375 + data &= ~ARCH_CAP_TSX_CTRL_MSR;
376 return data;
377 }
378
379 @@ -4421,6 +4427,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
380 case KVM_SET_NESTED_STATE: {
381 struct kvm_nested_state __user *user_kvm_nested_state = argp;
382 struct kvm_nested_state kvm_state;
383 + int idx;
384
385 r = -EINVAL;
386 if (!kvm_x86_ops->set_nested_state)
387 @@ -4444,7 +4451,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
388 && !(kvm_state.flags & KVM_STATE_NESTED_GUEST_MODE))
389 break;
390
391 + idx = srcu_read_lock(&vcpu->kvm->srcu);
392 r = kvm_x86_ops->set_nested_state(vcpu, user_kvm_nested_state, &kvm_state);
393 + srcu_read_unlock(&vcpu->kvm->srcu, idx);
394 break;
395 }
396 case KVM_GET_SUPPORTED_HV_CPUID: {
397 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
398 index 9ceacd1156db..304d31d8cbbc 100644
399 --- a/arch/x86/mm/fault.c
400 +++ b/arch/x86/mm/fault.c
401 @@ -197,7 +197,7 @@ void vmalloc_sync_all(void)
402 return;
403
404 for (address = VMALLOC_START & PMD_MASK;
405 - address >= TASK_SIZE_MAX && address < FIXADDR_TOP;
406 + address >= TASK_SIZE_MAX && address < VMALLOC_END;
407 address += PMD_SIZE) {
408 struct page *page;
409
410 diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
411 index 527e69b12002..e723559c386a 100644
412 --- a/arch/x86/pci/fixup.c
413 +++ b/arch/x86/pci/fixup.c
414 @@ -588,6 +588,17 @@ static void pci_fixup_amd_ehci_pme(struct pci_dev *dev)
415 }
416 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7808, pci_fixup_amd_ehci_pme);
417
418 +/*
419 + * Device [1022:7914]
420 + * When in D0, PME# doesn't get asserted when plugging USB 2.0 device.
421 + */
422 +static void pci_fixup_amd_fch_xhci_pme(struct pci_dev *dev)
423 +{
424 + dev_info(&dev->dev, "PME# does not work under D0, disabling it\n");
425 + dev->pme_support &= ~(PCI_PM_CAP_PME_D0 >> PCI_PM_CAP_PME_SHIFT);
426 +}
427 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x7914, pci_fixup_amd_fch_xhci_pme);
428 +
429 /*
430 * Apple MacBook Pro: Avoid [mem 0x7fa00000-0x7fbfffff]
431 *
432 diff --git a/crypto/af_alg.c b/crypto/af_alg.c
433 index 879cf23f7489..0dceaabc6321 100644
434 --- a/crypto/af_alg.c
435 +++ b/crypto/af_alg.c
436 @@ -1043,7 +1043,7 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err)
437 af_alg_free_resources(areq);
438 sock_put(sk);
439
440 - iocb->ki_complete(iocb, err ? err : resultlen, 0);
441 + iocb->ki_complete(iocb, err ? err : (int)resultlen, 0);
442 }
443 EXPORT_SYMBOL_GPL(af_alg_async_cb);
444
445 diff --git a/crypto/crypto_user_base.c b/crypto/crypto_user_base.c
446 index 910e0b46012e..b785c476de67 100644
447 --- a/crypto/crypto_user_base.c
448 +++ b/crypto/crypto_user_base.c
449 @@ -213,8 +213,10 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
450 drop_alg:
451 crypto_mod_put(alg);
452
453 - if (err)
454 + if (err) {
455 + kfree_skb(skb);
456 return err;
457 + }
458
459 return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
460 }
461 diff --git a/crypto/crypto_user_stat.c b/crypto/crypto_user_stat.c
462 index 8bad88413de1..1be95432fa23 100644
463 --- a/crypto/crypto_user_stat.c
464 +++ b/crypto/crypto_user_stat.c
465 @@ -328,8 +328,10 @@ int crypto_reportstat(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
466 drop_alg:
467 crypto_mod_put(alg);
468
469 - if (err)
470 + if (err) {
471 + kfree_skb(skb);
472 return err;
473 + }
474
475 return nlmsg_unicast(net->crypto_nlsk, skb, NETLINK_CB(in_skb).portid);
476 }
477 diff --git a/crypto/ecc.c b/crypto/ecc.c
478 index dfe114bc0c4a..8ee787723c5c 100644
479 --- a/crypto/ecc.c
480 +++ b/crypto/ecc.c
481 @@ -1284,10 +1284,11 @@ EXPORT_SYMBOL(ecc_point_mult_shamir);
482 static inline void ecc_swap_digits(const u64 *in, u64 *out,
483 unsigned int ndigits)
484 {
485 + const __be64 *src = (__force __be64 *)in;
486 int i;
487
488 for (i = 0; i < ndigits; i++)
489 - out[i] = __swab64(in[ndigits - 1 - i]);
490 + out[i] = be64_to_cpu(src[ndigits - 1 - i]);
491 }
492
493 static int __ecc_is_key_valid(const struct ecc_curve *curve,
494 diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
495 index eb76a823fbb2..7067d5542a82 100644
496 --- a/drivers/android/binder_alloc.c
497 +++ b/drivers/android/binder_alloc.c
498 @@ -277,8 +277,7 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
499 return 0;
500
501 free_range:
502 - for (page_addr = end - PAGE_SIZE; page_addr >= start;
503 - page_addr -= PAGE_SIZE) {
504 + for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
505 bool ret;
506 size_t index;
507
508 @@ -291,6 +290,8 @@ free_range:
509 WARN_ON(!ret);
510
511 trace_binder_free_lru_end(alloc, index);
512 + if (page_addr == start)
513 + break;
514 continue;
515
516 err_vm_insert_page_failed:
517 @@ -298,7 +299,8 @@ err_vm_insert_page_failed:
518 page->page_ptr = NULL;
519 err_alloc_page_failed:
520 err_page_ptr_cleared:
521 - ;
522 + if (page_addr == start)
523 + break;
524 }
525 err_no_vma:
526 if (mm) {
527 @@ -681,17 +683,17 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
528 struct binder_buffer *buffer;
529
530 mutex_lock(&binder_alloc_mmap_lock);
531 - if (alloc->buffer) {
532 + if (alloc->buffer_size) {
533 ret = -EBUSY;
534 failure_string = "already mapped";
535 goto err_already_mapped;
536 }
537 + alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
538 + SZ_4M);
539 + mutex_unlock(&binder_alloc_mmap_lock);
540
541 alloc->buffer = (void __user *)vma->vm_start;
542 - mutex_unlock(&binder_alloc_mmap_lock);
543
544 - alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
545 - SZ_4M);
546 alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
547 sizeof(alloc->pages[0]),
548 GFP_KERNEL);
549 @@ -722,8 +724,9 @@ err_alloc_buf_struct_failed:
550 kfree(alloc->pages);
551 alloc->pages = NULL;
552 err_alloc_pages_failed:
553 - mutex_lock(&binder_alloc_mmap_lock);
554 alloc->buffer = NULL;
555 + mutex_lock(&binder_alloc_mmap_lock);
556 + alloc->buffer_size = 0;
557 err_already_mapped:
558 mutex_unlock(&binder_alloc_mmap_lock);
559 binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
560 @@ -841,14 +844,20 @@ void binder_alloc_print_pages(struct seq_file *m,
561 int free = 0;
562
563 mutex_lock(&alloc->mutex);
564 - for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
565 - page = &alloc->pages[i];
566 - if (!page->page_ptr)
567 - free++;
568 - else if (list_empty(&page->lru))
569 - active++;
570 - else
571 - lru++;
572 + /*
573 + * Make sure the binder_alloc is fully initialized, otherwise we might
574 + * read inconsistent state.
575 + */
576 + if (binder_alloc_get_vma(alloc) != NULL) {
577 + for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
578 + page = &alloc->pages[i];
579 + if (!page->page_ptr)
580 + free++;
581 + else if (list_empty(&page->lru))
582 + active++;
583 + else
584 + lru++;
585 + }
586 }
587 mutex_unlock(&alloc->mutex);
588 seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
589 diff --git a/drivers/char/lp.c b/drivers/char/lp.c
590 index 7c9269e3477a..bd95aba1f9fe 100644
591 --- a/drivers/char/lp.c
592 +++ b/drivers/char/lp.c
593 @@ -713,6 +713,10 @@ static int lp_set_timeout64(unsigned int minor, void __user *arg)
594 if (copy_from_user(karg, arg, sizeof(karg)))
595 return -EFAULT;
596
597 + /* sparc64 suseconds_t is 32-bit only */
598 + if (IS_ENABLED(CONFIG_SPARC64) && !in_compat_syscall())
599 + karg[1] >>= 32;
600 +
601 return lp_set_timeout(minor, karg[0], karg[1]);
602 }
603
604 diff --git a/drivers/cpufreq/imx-cpufreq-dt.c b/drivers/cpufreq/imx-cpufreq-dt.c
605 index 35db14cf3102..85a6efd6b68f 100644
606 --- a/drivers/cpufreq/imx-cpufreq-dt.c
607 +++ b/drivers/cpufreq/imx-cpufreq-dt.c
608 @@ -44,19 +44,19 @@ static int imx_cpufreq_dt_probe(struct platform_device *pdev)
609 mkt_segment = (cell_value & OCOTP_CFG3_MKT_SEGMENT_MASK) >> OCOTP_CFG3_MKT_SEGMENT_SHIFT;
610
611 /*
612 - * Early samples without fuses written report "0 0" which means
613 - * consumer segment and minimum speed grading.
614 - *
615 - * According to datasheet minimum speed grading is not supported for
616 - * consumer parts so clamp to 1 to avoid warning for "no OPPs"
617 + * Early samples without fuses written report "0 0" which may NOT
618 + * match any OPP defined in DT. So clamp to minimum OPP defined in
619 + * DT to avoid warning for "no OPPs".
620 *
621 * Applies to i.MX8M series SoCs.
622 */
623 - if (mkt_segment == 0 && speed_grade == 0 && (
624 - of_machine_is_compatible("fsl,imx8mm") ||
625 - of_machine_is_compatible("fsl,imx8mn") ||
626 - of_machine_is_compatible("fsl,imx8mq")))
627 - speed_grade = 1;
628 + if (mkt_segment == 0 && speed_grade == 0) {
629 + if (of_machine_is_compatible("fsl,imx8mm") ||
630 + of_machine_is_compatible("fsl,imx8mq"))
631 + speed_grade = 1;
632 + if (of_machine_is_compatible("fsl,imx8mn"))
633 + speed_grade = 0xb;
634 + }
635
636 supported_hw[0] = BIT(speed_grade);
637 supported_hw[1] = BIT(mkt_segment);
638 diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
639 index de5e9352e920..7d6b695c4ab3 100644
640 --- a/drivers/crypto/amcc/crypto4xx_core.c
641 +++ b/drivers/crypto/amcc/crypto4xx_core.c
642 @@ -365,12 +365,8 @@ static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
643 dma_alloc_coherent(dev->core_dev->device,
644 PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
645 &dev->scatter_buffer_pa, GFP_ATOMIC);
646 - if (!dev->scatter_buffer_va) {
647 - dma_free_coherent(dev->core_dev->device,
648 - sizeof(struct ce_sd) * PPC4XX_NUM_SD,
649 - dev->sdr, dev->sdr_pa);
650 + if (!dev->scatter_buffer_va)
651 return -ENOMEM;
652 - }
653
654 for (i = 0; i < PPC4XX_NUM_SD; i++) {
655 dev->sdr[i].ptr = dev->scatter_buffer_pa +
656 diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
657 index 026f193556f9..00920a2b95ce 100644
658 --- a/drivers/crypto/atmel-aes.c
659 +++ b/drivers/crypto/atmel-aes.c
660 @@ -490,6 +490,29 @@ static inline bool atmel_aes_is_encrypt(const struct atmel_aes_dev *dd)
661 static void atmel_aes_authenc_complete(struct atmel_aes_dev *dd, int err);
662 #endif
663
664 +static void atmel_aes_set_iv_as_last_ciphertext_block(struct atmel_aes_dev *dd)
665 +{
666 + struct ablkcipher_request *req = ablkcipher_request_cast(dd->areq);
667 + struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
668 + struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
669 + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
670 +
671 + if (req->nbytes < ivsize)
672 + return;
673 +
674 + if (rctx->mode & AES_FLAGS_ENCRYPT) {
675 + scatterwalk_map_and_copy(req->info, req->dst,
676 + req->nbytes - ivsize, ivsize, 0);
677 + } else {
678 + if (req->src == req->dst)
679 + memcpy(req->info, rctx->lastc, ivsize);
680 + else
681 + scatterwalk_map_and_copy(req->info, req->src,
682 + req->nbytes - ivsize,
683 + ivsize, 0);
684 + }
685 +}
686 +
687 static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
688 {
689 #ifdef CONFIG_CRYPTO_DEV_ATMEL_AUTHENC
690 @@ -500,26 +523,8 @@ static inline int atmel_aes_complete(struct atmel_aes_dev *dd, int err)
691 clk_disable(dd->iclk);
692 dd->flags &= ~AES_FLAGS_BUSY;
693
694 - if (!dd->ctx->is_aead) {
695 - struct ablkcipher_request *req =
696 - ablkcipher_request_cast(dd->areq);
697 - struct atmel_aes_reqctx *rctx = ablkcipher_request_ctx(req);
698 - struct crypto_ablkcipher *ablkcipher =
699 - crypto_ablkcipher_reqtfm(req);
700 - int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
701 -
702 - if (rctx->mode & AES_FLAGS_ENCRYPT) {
703 - scatterwalk_map_and_copy(req->info, req->dst,
704 - req->nbytes - ivsize, ivsize, 0);
705 - } else {
706 - if (req->src == req->dst) {
707 - memcpy(req->info, rctx->lastc, ivsize);
708 - } else {
709 - scatterwalk_map_and_copy(req->info, req->src,
710 - req->nbytes - ivsize, ivsize, 0);
711 - }
712 - }
713 - }
714 + if (!dd->ctx->is_aead)
715 + atmel_aes_set_iv_as_last_ciphertext_block(dd);
716
717 if (dd->is_async)
718 dd->areq->complete(dd->areq, err);
719 @@ -1125,10 +1130,12 @@ static int atmel_aes_crypt(struct ablkcipher_request *req, unsigned long mode)
720 rctx->mode = mode;
721
722 if (!(mode & AES_FLAGS_ENCRYPT) && (req->src == req->dst)) {
723 - int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
724 + unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
725
726 - scatterwalk_map_and_copy(rctx->lastc, req->src,
727 - (req->nbytes - ivsize), ivsize, 0);
728 + if (req->nbytes >= ivsize)
729 + scatterwalk_map_and_copy(rctx->lastc, req->src,
730 + req->nbytes - ivsize,
731 + ivsize, 0);
732 }
733
734 return atmel_aes_handle_queue(dd, &req->base);
735 diff --git a/drivers/crypto/ccp/ccp-dmaengine.c b/drivers/crypto/ccp/ccp-dmaengine.c
736 index a54f9367a580..0770a83bf1a5 100644
737 --- a/drivers/crypto/ccp/ccp-dmaengine.c
738 +++ b/drivers/crypto/ccp/ccp-dmaengine.c
739 @@ -342,6 +342,7 @@ static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
740 desc->tx_desc.flags = flags;
741 desc->tx_desc.tx_submit = ccp_tx_submit;
742 desc->ccp = chan->ccp;
743 + INIT_LIST_HEAD(&desc->entry);
744 INIT_LIST_HEAD(&desc->pending);
745 INIT_LIST_HEAD(&desc->active);
746 desc->status = DMA_IN_PROGRESS;
747 diff --git a/drivers/crypto/geode-aes.c b/drivers/crypto/geode-aes.c
748 index d81a1297cb9e..940485112d15 100644
749 --- a/drivers/crypto/geode-aes.c
750 +++ b/drivers/crypto/geode-aes.c
751 @@ -10,6 +10,7 @@
752 #include <linux/spinlock.h>
753 #include <crypto/algapi.h>
754 #include <crypto/aes.h>
755 +#include <crypto/skcipher.h>
756
757 #include <linux/io.h>
758 #include <linux/delay.h>
759 @@ -166,13 +167,15 @@ static int geode_setkey_blk(struct crypto_tfm *tfm, const u8 *key,
760 /*
761 * The requested key size is not supported by HW, do a fallback
762 */
763 - op->fallback.blk->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
764 - op->fallback.blk->base.crt_flags |= (tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
765 + crypto_sync_skcipher_clear_flags(op->fallback.blk, CRYPTO_TFM_REQ_MASK);
766 + crypto_sync_skcipher_set_flags(op->fallback.blk,
767 + tfm->crt_flags & CRYPTO_TFM_REQ_MASK);
768
769 - ret = crypto_blkcipher_setkey(op->fallback.blk, key, len);
770 + ret = crypto_sync_skcipher_setkey(op->fallback.blk, key, len);
771 if (ret) {
772 tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
773 - tfm->crt_flags |= (op->fallback.blk->base.crt_flags & CRYPTO_TFM_RES_MASK);
774 + tfm->crt_flags |= crypto_sync_skcipher_get_flags(op->fallback.blk) &
775 + CRYPTO_TFM_RES_MASK;
776 }
777 return ret;
778 }
779 @@ -181,33 +184,28 @@ static int fallback_blk_dec(struct blkcipher_desc *desc,
780 struct scatterlist *dst, struct scatterlist *src,
781 unsigned int nbytes)
782 {
783 - unsigned int ret;
784 - struct crypto_blkcipher *tfm;
785 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
786 + SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback.blk);
787
788 - tfm = desc->tfm;
789 - desc->tfm = op->fallback.blk;
790 -
791 - ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes);
792 + skcipher_request_set_sync_tfm(req, op->fallback.blk);
793 + skcipher_request_set_callback(req, 0, NULL, NULL);
794 + skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
795
796 - desc->tfm = tfm;
797 - return ret;
798 + return crypto_skcipher_decrypt(req);
799 }
800 +
801 static int fallback_blk_enc(struct blkcipher_desc *desc,
802 struct scatterlist *dst, struct scatterlist *src,
803 unsigned int nbytes)
804 {
805 - unsigned int ret;
806 - struct crypto_blkcipher *tfm;
807 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm);
808 + SYNC_SKCIPHER_REQUEST_ON_STACK(req, op->fallback.blk);
809
810 - tfm = desc->tfm;
811 - desc->tfm = op->fallback.blk;
812 -
813 - ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes);
814 + skcipher_request_set_sync_tfm(req, op->fallback.blk);
815 + skcipher_request_set_callback(req, 0, NULL, NULL);
816 + skcipher_request_set_crypt(req, src, dst, nbytes, desc->info);
817
818 - desc->tfm = tfm;
819 - return ret;
820 + return crypto_skcipher_encrypt(req);
821 }
822
823 static void
824 @@ -307,6 +305,9 @@ geode_cbc_decrypt(struct blkcipher_desc *desc,
825 struct blkcipher_walk walk;
826 int err, ret;
827
828 + if (nbytes % AES_BLOCK_SIZE)
829 + return -EINVAL;
830 +
831 if (unlikely(op->keylen != AES_KEYSIZE_128))
832 return fallback_blk_dec(desc, dst, src, nbytes);
833
834 @@ -339,6 +340,9 @@ geode_cbc_encrypt(struct blkcipher_desc *desc,
835 struct blkcipher_walk walk;
836 int err, ret;
837
838 + if (nbytes % AES_BLOCK_SIZE)
839 + return -EINVAL;
840 +
841 if (unlikely(op->keylen != AES_KEYSIZE_128))
842 return fallback_blk_enc(desc, dst, src, nbytes);
843
844 @@ -366,9 +370,8 @@ static int fallback_init_blk(struct crypto_tfm *tfm)
845 const char *name = crypto_tfm_alg_name(tfm);
846 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
847
848 - op->fallback.blk = crypto_alloc_blkcipher(name, 0,
849 - CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
850 -
851 + op->fallback.blk = crypto_alloc_sync_skcipher(name, 0,
852 + CRYPTO_ALG_NEED_FALLBACK);
853 if (IS_ERR(op->fallback.blk)) {
854 printk(KERN_ERR "Error allocating fallback algo %s\n", name);
855 return PTR_ERR(op->fallback.blk);
856 @@ -381,7 +384,7 @@ static void fallback_exit_blk(struct crypto_tfm *tfm)
857 {
858 struct geode_aes_op *op = crypto_tfm_ctx(tfm);
859
860 - crypto_free_blkcipher(op->fallback.blk);
861 + crypto_free_sync_skcipher(op->fallback.blk);
862 op->fallback.blk = NULL;
863 }
864
865 @@ -420,6 +423,9 @@ geode_ecb_decrypt(struct blkcipher_desc *desc,
866 struct blkcipher_walk walk;
867 int err, ret;
868
869 + if (nbytes % AES_BLOCK_SIZE)
870 + return -EINVAL;
871 +
872 if (unlikely(op->keylen != AES_KEYSIZE_128))
873 return fallback_blk_dec(desc, dst, src, nbytes);
874
875 @@ -450,6 +456,9 @@ geode_ecb_encrypt(struct blkcipher_desc *desc,
876 struct blkcipher_walk walk;
877 int err, ret;
878
879 + if (nbytes % AES_BLOCK_SIZE)
880 + return -EINVAL;
881 +
882 if (unlikely(op->keylen != AES_KEYSIZE_128))
883 return fallback_blk_enc(desc, dst, src, nbytes);
884
885 diff --git a/drivers/crypto/geode-aes.h b/drivers/crypto/geode-aes.h
886 index 5c6e131a8f9d..f8a86898ac22 100644
887 --- a/drivers/crypto/geode-aes.h
888 +++ b/drivers/crypto/geode-aes.h
889 @@ -60,7 +60,7 @@ struct geode_aes_op {
890 u8 *iv;
891
892 union {
893 - struct crypto_blkcipher *blk;
894 + struct crypto_sync_skcipher *blk;
895 struct crypto_cipher *cip;
896 } fallback;
897 u32 keylen;
898 diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c
899 index 0bb62857ffb2..f6f6a688c009 100644
900 --- a/drivers/edac/ghes_edac.c
901 +++ b/drivers/edac/ghes_edac.c
902 @@ -26,9 +26,18 @@ struct ghes_edac_pvt {
903 char msg[80];
904 };
905
906 -static atomic_t ghes_init = ATOMIC_INIT(0);
907 +static refcount_t ghes_refcount = REFCOUNT_INIT(0);
908 +
909 +/*
910 + * Access to ghes_pvt must be protected by ghes_lock. The spinlock
911 + * also provides the necessary (implicit) memory barrier for the SMP
912 + * case to make the pointer visible on another CPU.
913 + */
914 static struct ghes_edac_pvt *ghes_pvt;
915
916 +/* GHES registration mutex */
917 +static DEFINE_MUTEX(ghes_reg_mutex);
918 +
919 /*
920 * Sync with other, potentially concurrent callers of
921 * ghes_edac_report_mem_error(). We don't know what the
922 @@ -79,9 +88,8 @@ static void ghes_edac_count_dimms(const struct dmi_header *dh, void *arg)
923 (*num_dimm)++;
924 }
925
926 -static int get_dimm_smbios_index(u16 handle)
927 +static int get_dimm_smbios_index(struct mem_ctl_info *mci, u16 handle)
928 {
929 - struct mem_ctl_info *mci = ghes_pvt->mci;
930 int i;
931
932 for (i = 0; i < mci->tot_dimms; i++) {
933 @@ -198,14 +206,11 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
934 enum hw_event_mc_err_type type;
935 struct edac_raw_error_desc *e;
936 struct mem_ctl_info *mci;
937 - struct ghes_edac_pvt *pvt = ghes_pvt;
938 + struct ghes_edac_pvt *pvt;
939 unsigned long flags;
940 char *p;
941 u8 grain_bits;
942
943 - if (!pvt)
944 - return;
945 -
946 /*
947 * We can do the locking below because GHES defers error processing
948 * from NMI to IRQ context. Whenever that changes, we'd at least
949 @@ -216,6 +221,10 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
950
951 spin_lock_irqsave(&ghes_lock, flags);
952
953 + pvt = ghes_pvt;
954 + if (!pvt)
955 + goto unlock;
956 +
957 mci = pvt->mci;
958 e = &mci->error_desc;
959
960 @@ -348,7 +357,7 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
961 p += sprintf(p, "DIMM DMI handle: 0x%.4x ",
962 mem_err->mem_dev_handle);
963
964 - index = get_dimm_smbios_index(mem_err->mem_dev_handle);
965 + index = get_dimm_smbios_index(mci, mem_err->mem_dev_handle);
966 if (index >= 0) {
967 e->top_layer = index;
968 e->enable_per_layer_report = true;
969 @@ -443,6 +452,8 @@ void ghes_edac_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
970 grain_bits, e->syndrome, pvt->detail_location);
971
972 edac_raw_mc_handle_error(type, mci, e);
973 +
974 +unlock:
975 spin_unlock_irqrestore(&ghes_lock, flags);
976 }
977
978 @@ -457,10 +468,12 @@ static struct acpi_platform_list plat_list[] = {
979 int ghes_edac_register(struct ghes *ghes, struct device *dev)
980 {
981 bool fake = false;
982 - int rc, num_dimm = 0;
983 + int rc = 0, num_dimm = 0;
984 struct mem_ctl_info *mci;
985 + struct ghes_edac_pvt *pvt;
986 struct edac_mc_layer layers[1];
987 struct ghes_edac_dimm_fill dimm_fill;
988 + unsigned long flags;
989 int idx = -1;
990
991 if (IS_ENABLED(CONFIG_X86)) {
992 @@ -472,11 +485,14 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
993 idx = 0;
994 }
995
996 + /* finish another registration/unregistration instance first */
997 + mutex_lock(&ghes_reg_mutex);
998 +
999 /*
1000 * We have only one logical memory controller to which all DIMMs belong.
1001 */
1002 - if (atomic_inc_return(&ghes_init) > 1)
1003 - return 0;
1004 + if (refcount_inc_not_zero(&ghes_refcount))
1005 + goto unlock;
1006
1007 /* Get the number of DIMMs */
1008 dmi_walk(ghes_edac_count_dimms, &num_dimm);
1009 @@ -494,12 +510,13 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
1010 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct ghes_edac_pvt));
1011 if (!mci) {
1012 pr_info("Can't allocate memory for EDAC data\n");
1013 - return -ENOMEM;
1014 + rc = -ENOMEM;
1015 + goto unlock;
1016 }
1017
1018 - ghes_pvt = mci->pvt_info;
1019 - ghes_pvt->ghes = ghes;
1020 - ghes_pvt->mci = mci;
1021 + pvt = mci->pvt_info;
1022 + pvt->ghes = ghes;
1023 + pvt->mci = mci;
1024
1025 mci->pdev = dev;
1026 mci->mtype_cap = MEM_FLAG_EMPTY;
1027 @@ -541,23 +558,48 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev)
1028 if (rc < 0) {
1029 pr_info("Can't register at EDAC core\n");
1030 edac_mc_free(mci);
1031 - return -ENODEV;
1032 + rc = -ENODEV;
1033 + goto unlock;
1034 }
1035 - return 0;
1036 +
1037 + spin_lock_irqsave(&ghes_lock, flags);
1038 + ghes_pvt = pvt;
1039 + spin_unlock_irqrestore(&ghes_lock, flags);
1040 +
1041 + /* only increment on success */
1042 + refcount_inc(&ghes_refcount);
1043 +
1044 +unlock:
1045 + mutex_unlock(&ghes_reg_mutex);
1046 +
1047 + return rc;
1048 }
1049
1050 void ghes_edac_unregister(struct ghes *ghes)
1051 {
1052 struct mem_ctl_info *mci;
1053 + unsigned long flags;
1054
1055 - if (!ghes_pvt)
1056 - return;
1057 + mutex_lock(&ghes_reg_mutex);
1058
1059 - if (atomic_dec_return(&ghes_init))
1060 - return;
1061 + if (!refcount_dec_and_test(&ghes_refcount))
1062 + goto unlock;
1063
1064 - mci = ghes_pvt->mci;
1065 + /*
1066 + * Wait for the irq handler being finished.
1067 + */
1068 + spin_lock_irqsave(&ghes_lock, flags);
1069 + mci = ghes_pvt ? ghes_pvt->mci : NULL;
1070 ghes_pvt = NULL;
1071 - edac_mc_del_mc(mci->pdev);
1072 - edac_mc_free(mci);
1073 + spin_unlock_irqrestore(&ghes_lock, flags);
1074 +
1075 + if (!mci)
1076 + goto unlock;
1077 +
1078 + mci = edac_mc_del_mc(mci->pdev);
1079 + if (mci)
1080 + edac_mc_free(mci);
1081 +
1082 +unlock:
1083 + mutex_unlock(&ghes_reg_mutex);
1084 }
1085 diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c
1086 index 8230dac01a89..3a4126dc2520 100644
1087 --- a/drivers/gpu/drm/drm_damage_helper.c
1088 +++ b/drivers/gpu/drm/drm_damage_helper.c
1089 @@ -212,8 +212,14 @@ retry:
1090 drm_for_each_plane(plane, fb->dev) {
1091 struct drm_plane_state *plane_state;
1092
1093 - if (plane->state->fb != fb)
1094 + ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
1095 + if (ret)
1096 + goto out;
1097 +
1098 + if (plane->state->fb != fb) {
1099 + drm_modeset_unlock(&plane->mutex);
1100 continue;
1101 + }
1102
1103 plane_state = drm_atomic_get_plane_state(state, plane);
1104 if (IS_ERR(plane_state)) {
1105 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
1106 index 2a77823b8e9a..e66c38332df4 100644
1107 --- a/drivers/gpu/drm/i810/i810_dma.c
1108 +++ b/drivers/gpu/drm/i810/i810_dma.c
1109 @@ -728,7 +728,7 @@ static void i810_dma_dispatch_vertex(struct drm_device *dev,
1110 if (nbox > I810_NR_SAREA_CLIPRECTS)
1111 nbox = I810_NR_SAREA_CLIPRECTS;
1112
1113 - if (used > 4 * 1024)
1114 + if (used < 0 || used > 4 * 1024)
1115 used = 0;
1116
1117 if (sarea_priv->dirty)
1118 @@ -1048,7 +1048,7 @@ static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, in
1119 if (u != I810_BUF_CLIENT)
1120 DRM_DEBUG("MC found buffer that isn't mine!\n");
1121
1122 - if (used > 4 * 1024)
1123 + if (used < 0 || used > 4 * 1024)
1124 used = 0;
1125
1126 sarea_priv->dirty = 0x7f;
1127 diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
1128 index 9a09eba53182..5649887d2b90 100644
1129 --- a/drivers/gpu/drm/mcde/mcde_drv.c
1130 +++ b/drivers/gpu/drm/mcde/mcde_drv.c
1131 @@ -484,7 +484,8 @@ static int mcde_probe(struct platform_device *pdev)
1132 }
1133 if (!match) {
1134 dev_err(dev, "no matching components\n");
1135 - return -ENODEV;
1136 + ret = -ENODEV;
1137 + goto clk_disable;
1138 }
1139 if (IS_ERR(match)) {
1140 dev_err(dev, "could not create component match\n");
1141 diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
1142 index 6be879578140..1c74381a4fc9 100644
1143 --- a/drivers/gpu/drm/msm/msm_debugfs.c
1144 +++ b/drivers/gpu/drm/msm/msm_debugfs.c
1145 @@ -47,12 +47,8 @@ static int msm_gpu_release(struct inode *inode, struct file *file)
1146 struct msm_gpu_show_priv *show_priv = m->private;
1147 struct msm_drm_private *priv = show_priv->dev->dev_private;
1148 struct msm_gpu *gpu = priv->gpu;
1149 - int ret;
1150 -
1151 - ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
1152 - if (ret)
1153 - return ret;
1154
1155 + mutex_lock(&show_priv->dev->struct_mutex);
1156 gpu->funcs->gpu_state_put(show_priv->state);
1157 mutex_unlock(&show_priv->dev->struct_mutex);
1158
1159 diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1160 index 219c10eb752c..ee44640edeb5 100644
1161 --- a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1162 +++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
1163 @@ -652,10 +652,13 @@ static ssize_t cyc_threshold_store(struct device *dev,
1164
1165 if (kstrtoul(buf, 16, &val))
1166 return -EINVAL;
1167 +
1168 + /* mask off max threshold before checking min value */
1169 + val &= ETM_CYC_THRESHOLD_MASK;
1170 if (val < drvdata->ccitmin)
1171 return -EINVAL;
1172
1173 - config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
1174 + config->ccctlr = val;
1175 return size;
1176 }
1177 static DEVICE_ATTR_RW(cyc_threshold);
1178 @@ -686,14 +689,16 @@ static ssize_t bb_ctrl_store(struct device *dev,
1179 return -EINVAL;
1180 if (!drvdata->nr_addr_cmp)
1181 return -EINVAL;
1182 +
1183 /*
1184 - * Bit[7:0] selects which address range comparator is used for
1185 - * branch broadcast control.
1186 + * Bit[8] controls include(1) / exclude(0), bits[0-7] select
1187 + * individual range comparators. If include then at least 1
1188 + * range must be selected.
1189 */
1190 - if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
1191 + if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0))
1192 return -EINVAL;
1193
1194 - config->bb_ctrl = val;
1195 + config->bb_ctrl = val & GENMASK(8, 0);
1196 return size;
1197 }
1198 static DEVICE_ATTR_RW(bb_ctrl);
1199 @@ -1324,8 +1329,8 @@ static ssize_t seq_event_store(struct device *dev,
1200
1201 spin_lock(&drvdata->spinlock);
1202 idx = config->seq_idx;
1203 - /* RST, bits[7:0] */
1204 - config->seq_ctrl[idx] = val & 0xFF;
1205 + /* Seq control has two masks B[15:8] F[7:0] */
1206 + config->seq_ctrl[idx] = val & 0xFFFF;
1207 spin_unlock(&drvdata->spinlock);
1208 return size;
1209 }
1210 @@ -1580,7 +1585,7 @@ static ssize_t res_ctrl_store(struct device *dev,
1211 if (idx % 2 != 0)
1212 /* PAIRINV, bit[21] */
1213 val &= ~BIT(21);
1214 - config->res_ctrl[idx] = val;
1215 + config->res_ctrl[idx] = val & GENMASK(21, 0);
1216 spin_unlock(&drvdata->spinlock);
1217 return size;
1218 }
1219 diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
1220 index 3926be78036e..568b21eb6ea1 100644
1221 --- a/drivers/infiniband/hw/qib/qib_sysfs.c
1222 +++ b/drivers/infiniband/hw/qib/qib_sysfs.c
1223 @@ -301,6 +301,9 @@ static ssize_t qib_portattr_show(struct kobject *kobj,
1224 struct qib_pportdata *ppd =
1225 container_of(kobj, struct qib_pportdata, pport_kobj);
1226
1227 + if (!pattr->show)
1228 + return -EIO;
1229 +
1230 return pattr->show(ppd, buf);
1231 }
1232
1233 @@ -312,6 +315,9 @@ static ssize_t qib_portattr_store(struct kobject *kobj,
1234 struct qib_pportdata *ppd =
1235 container_of(kobj, struct qib_pportdata, pport_kobj);
1236
1237 + if (!pattr->store)
1238 + return -EIO;
1239 +
1240 return pattr->store(ppd, buf, len);
1241 }
1242
1243 diff --git a/drivers/input/joystick/psxpad-spi.c b/drivers/input/joystick/psxpad-spi.c
1244 index 7eee1b0e360f..99a6052500ca 100644
1245 --- a/drivers/input/joystick/psxpad-spi.c
1246 +++ b/drivers/input/joystick/psxpad-spi.c
1247 @@ -292,7 +292,7 @@ static int psxpad_spi_probe(struct spi_device *spi)
1248 if (!pad)
1249 return -ENOMEM;
1250
1251 - pdev = input_allocate_polled_device();
1252 + pdev = devm_input_allocate_polled_device(&spi->dev);
1253 if (!pdev) {
1254 dev_err(&spi->dev, "failed to allocate input device\n");
1255 return -ENOMEM;
1256 diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
1257 index 56fae3472114..1ae6f8bba9ae 100644
1258 --- a/drivers/input/mouse/synaptics.c
1259 +++ b/drivers/input/mouse/synaptics.c
1260 @@ -172,6 +172,7 @@ static const char * const smbus_pnp_ids[] = {
1261 "LEN0071", /* T480 */
1262 "LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
1263 "LEN0073", /* X1 Carbon G5 (Elantech) */
1264 + "LEN0091", /* X1 Carbon 6 */
1265 "LEN0092", /* X1 Carbon 6 */
1266 "LEN0093", /* T480 */
1267 "LEN0096", /* X280 */
1268 diff --git a/drivers/input/rmi4/rmi_f34v7.c b/drivers/input/rmi4/rmi_f34v7.c
1269 index a4cabf52740c..74f7c6f214ff 100644
1270 --- a/drivers/input/rmi4/rmi_f34v7.c
1271 +++ b/drivers/input/rmi4/rmi_f34v7.c
1272 @@ -1189,6 +1189,9 @@ int rmi_f34v7_do_reflash(struct f34_data *f34, const struct firmware *fw)
1273 {
1274 int ret;
1275
1276 + f34->fn->rmi_dev->driver->set_irq_bits(f34->fn->rmi_dev,
1277 + f34->fn->irq_mask);
1278 +
1279 rmi_f34v7_read_queries_bl_version(f34);
1280
1281 f34->v7.image = fw->data;
1282 diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c
1283 index 2407ea43de59..b313c579914f 100644
1284 --- a/drivers/input/rmi4/rmi_smbus.c
1285 +++ b/drivers/input/rmi4/rmi_smbus.c
1286 @@ -163,7 +163,6 @@ static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr,
1287 /* prepare to write next block of bytes */
1288 cur_len -= SMB_MAX_COUNT;
1289 databuff += SMB_MAX_COUNT;
1290 - rmiaddr += SMB_MAX_COUNT;
1291 }
1292 exit:
1293 mutex_unlock(&rmi_smb->page_mutex);
1294 @@ -215,7 +214,6 @@ static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr,
1295 /* prepare to read next block of bytes */
1296 cur_len -= SMB_MAX_COUNT;
1297 databuff += SMB_MAX_COUNT;
1298 - rmiaddr += SMB_MAX_COUNT;
1299 }
1300
1301 retval = 0;
1302 diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
1303 index fb43aa708660..0403102e807e 100644
1304 --- a/drivers/input/touchscreen/goodix.c
1305 +++ b/drivers/input/touchscreen/goodix.c
1306 @@ -128,6 +128,15 @@ static const unsigned long goodix_irq_flags[] = {
1307 */
1308 static const struct dmi_system_id rotated_screen[] = {
1309 #if defined(CONFIG_DMI) && defined(CONFIG_X86)
1310 + {
1311 + .ident = "Teclast X89",
1312 + .matches = {
1313 + /* tPAD is too generic, also match on bios date */
1314 + DMI_MATCH(DMI_BOARD_VENDOR, "TECLAST"),
1315 + DMI_MATCH(DMI_BOARD_NAME, "tPAD"),
1316 + DMI_MATCH(DMI_BIOS_DATE, "12/19/2014"),
1317 + },
1318 + },
1319 {
1320 .ident = "WinBook TW100",
1321 .matches = {
1322 diff --git a/drivers/mailbox/tegra-hsp.c b/drivers/mailbox/tegra-hsp.c
1323 index 4c5ba35d48d4..834b35dc3b13 100644
1324 --- a/drivers/mailbox/tegra-hsp.c
1325 +++ b/drivers/mailbox/tegra-hsp.c
1326 @@ -657,7 +657,7 @@ static int tegra_hsp_probe(struct platform_device *pdev)
1327 hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK;
1328 hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK;
1329
1330 - err = platform_get_irq_byname(pdev, "doorbell");
1331 + err = platform_get_irq_byname_optional(pdev, "doorbell");
1332 if (err >= 0)
1333 hsp->doorbell_irq = err;
1334
1335 @@ -677,7 +677,7 @@ static int tegra_hsp_probe(struct platform_device *pdev)
1336 if (!name)
1337 return -ENOMEM;
1338
1339 - err = platform_get_irq_byname(pdev, name);
1340 + err = platform_get_irq_byname_optional(pdev, name);
1341 if (err >= 0) {
1342 hsp->shared_irqs[i] = err;
1343 count++;
1344 diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
1345 index 1e772287b1c8..aa88bdeb9978 100644
1346 --- a/drivers/md/raid0.c
1347 +++ b/drivers/md/raid0.c
1348 @@ -615,7 +615,7 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
1349 tmp_dev = map_sector(mddev, zone, sector, &sector);
1350 break;
1351 default:
1352 - WARN("md/raid0:%s: Invalid layout\n", mdname(mddev));
1353 + WARN(1, "md/raid0:%s: Invalid layout\n", mdname(mddev));
1354 bio_io_error(bio);
1355 return true;
1356 }
1357 diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
1358 index 13da4c5c7d17..7741151606ef 100644
1359 --- a/drivers/media/rc/rc-main.c
1360 +++ b/drivers/media/rc/rc-main.c
1361 @@ -1773,6 +1773,7 @@ static int rc_prepare_rx_device(struct rc_dev *dev)
1362 set_bit(MSC_SCAN, dev->input_dev->mscbit);
1363
1364 /* Pointer/mouse events */
1365 + set_bit(INPUT_PROP_POINTING_STICK, dev->input_dev->propbit);
1366 set_bit(EV_REL, dev->input_dev->evbit);
1367 set_bit(REL_X, dev->input_dev->relbit);
1368 set_bit(REL_Y, dev->input_dev->relbit);
1369 diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
1370 index 0a9f42e5fedf..2e57122f02fb 100644
1371 --- a/drivers/net/can/slcan.c
1372 +++ b/drivers/net/can/slcan.c
1373 @@ -617,6 +617,7 @@ err_free_chan:
1374 sl->tty = NULL;
1375 tty->disc_data = NULL;
1376 clear_bit(SLF_INUSE, &sl->flags);
1377 + slc_free_netdev(sl->dev);
1378 free_netdev(sl->dev);
1379
1380 err_exit:
1381 diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c
1382 index 04aac3bb54ef..81e942f713e6 100644
1383 --- a/drivers/net/can/usb/ucan.c
1384 +++ b/drivers/net/can/usb/ucan.c
1385 @@ -792,7 +792,7 @@ resubmit:
1386 up);
1387
1388 usb_anchor_urb(urb, &up->rx_urbs);
1389 - ret = usb_submit_urb(urb, GFP_KERNEL);
1390 + ret = usb_submit_urb(urb, GFP_ATOMIC);
1391
1392 if (ret < 0) {
1393 netdev_err(up->netdev,
1394 diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
1395 index a9657ae6d782..d14e55e3c9da 100644
1396 --- a/drivers/net/wireless/marvell/mwifiex/main.c
1397 +++ b/drivers/net/wireless/marvell/mwifiex/main.c
1398 @@ -631,6 +631,7 @@ static int _mwifiex_fw_dpc(const struct firmware *firmware, void *context)
1399
1400 mwifiex_drv_get_driver_version(adapter, fmt, sizeof(fmt) - 1);
1401 mwifiex_dbg(adapter, MSG, "driver_version = %s\n", fmt);
1402 + adapter->is_up = true;
1403 goto done;
1404
1405 err_add_intf:
1406 @@ -1469,6 +1470,7 @@ int mwifiex_shutdown_sw(struct mwifiex_adapter *adapter)
1407 mwifiex_deauthenticate(priv, NULL);
1408
1409 mwifiex_uninit_sw(adapter);
1410 + adapter->is_up = false;
1411
1412 if (adapter->if_ops.down_dev)
1413 adapter->if_ops.down_dev(adapter);
1414 @@ -1730,7 +1732,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter)
1415 if (!adapter)
1416 return 0;
1417
1418 - mwifiex_uninit_sw(adapter);
1419 + if (adapter->is_up)
1420 + mwifiex_uninit_sw(adapter);
1421
1422 if (adapter->irq_wakeup >= 0)
1423 device_init_wakeup(adapter->dev, false);
1424 diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
1425 index 095837fba300..547ff3c578ee 100644
1426 --- a/drivers/net/wireless/marvell/mwifiex/main.h
1427 +++ b/drivers/net/wireless/marvell/mwifiex/main.h
1428 @@ -1017,6 +1017,7 @@ struct mwifiex_adapter {
1429
1430 /* For synchronizing FW initialization with device lifecycle. */
1431 struct completion *fw_done;
1432 + bool is_up;
1433
1434 bool ext_scan;
1435 u8 fw_api_ver;
1436 diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
1437 index 24c041dad9f6..fec38b6e86ff 100644
1438 --- a/drivers/net/wireless/marvell/mwifiex/sdio.c
1439 +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
1440 @@ -444,6 +444,9 @@ static int mwifiex_sdio_suspend(struct device *dev)
1441 return 0;
1442 }
1443
1444 + if (!adapter->is_up)
1445 + return -EBUSY;
1446 +
1447 mwifiex_enable_wake(adapter);
1448
1449 /* Enable the Host Sleep */
1450 @@ -2220,22 +2223,30 @@ static void mwifiex_sdio_card_reset_work(struct mwifiex_adapter *adapter)
1451 struct sdio_func *func = card->func;
1452 int ret;
1453
1454 + /* Prepare the adapter for the reset. */
1455 mwifiex_shutdown_sw(adapter);
1456 + clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
1457 + clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
1458
1459 - /* power cycle the adapter */
1460 + /* Run a HW reset of the SDIO interface. */
1461 sdio_claim_host(func);
1462 - mmc_hw_reset(func->card->host);
1463 + ret = mmc_hw_reset(func->card->host);
1464 sdio_release_host(func);
1465
1466 - /* Previous save_adapter won't be valid after this. We will cancel
1467 - * pending work requests.
1468 - */
1469 - clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &card->work_flags);
1470 - clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &card->work_flags);
1471 -
1472 - ret = mwifiex_reinit_sw(adapter);
1473 - if (ret)
1474 - dev_err(&func->dev, "reinit failed: %d\n", ret);
1475 + switch (ret) {
1476 + case 1:
1477 + dev_dbg(&func->dev, "SDIO HW reset asynchronous\n");
1478 + complete_all(adapter->fw_done);
1479 + break;
1480 + case 0:
1481 + ret = mwifiex_reinit_sw(adapter);
1482 + if (ret)
1483 + dev_err(&func->dev, "reinit failed: %d\n", ret);
1484 + break;
1485 + default:
1486 + dev_err(&func->dev, "SDIO HW reset failed: %d\n", ret);
1487 + break;
1488 + }
1489 }
1490
1491 /* This function read/write firmware */
1492 diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
1493 index 6c7f26ef6476..9cc8a335d519 100644
1494 --- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
1495 +++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
1496 @@ -1756,6 +1756,7 @@ static int rsi_send_beacon(struct rsi_common *common)
1497 skb_pull(skb, (64 - dword_align_bytes));
1498 if (rsi_prepare_beacon(common, skb)) {
1499 rsi_dbg(ERR_ZONE, "Failed to prepare beacon\n");
1500 + dev_kfree_skb(skb);
1501 return -EINVAL;
1502 }
1503 skb_queue_tail(&common->tx_queue[MGMT_BEACON_Q], skb);
1504 diff --git a/drivers/soc/mediatek/mtk-cmdq-helper.c b/drivers/soc/mediatek/mtk-cmdq-helper.c
1505 index 7aa0517ff2f3..3c82de5f9417 100644
1506 --- a/drivers/soc/mediatek/mtk-cmdq-helper.c
1507 +++ b/drivers/soc/mediatek/mtk-cmdq-helper.c
1508 @@ -155,7 +155,7 @@ int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
1509 err = cmdq_pkt_append_command(pkt, CMDQ_CODE_MASK, 0, ~mask);
1510 offset_mask |= CMDQ_WRITE_ENABLE_MASK;
1511 }
1512 - err |= cmdq_pkt_write(pkt, value, subsys, offset_mask);
1513 + err |= cmdq_pkt_write(pkt, subsys, offset_mask, value);
1514
1515 return err;
1516 }
1517 diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
1518 index acf318e7330c..ba8eff41b746 100644
1519 --- a/drivers/spi/spi-atmel.c
1520 +++ b/drivers/spi/spi-atmel.c
1521 @@ -1183,10 +1183,8 @@ static int atmel_spi_setup(struct spi_device *spi)
1522 as = spi_master_get_devdata(spi->master);
1523
1524 /* see notes above re chipselect */
1525 - if (!atmel_spi_is_v2(as)
1526 - && spi->chip_select == 0
1527 - && (spi->mode & SPI_CS_HIGH)) {
1528 - dev_dbg(&spi->dev, "setup: can't be active-high\n");
1529 + if (!as->use_cs_gpios && (spi->mode & SPI_CS_HIGH)) {
1530 + dev_warn(&spi->dev, "setup: non GPIO CS can't be active-high\n");
1531 return -EINVAL;
1532 }
1533
1534 diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c
1535 index c02e24c01136..63c9f7edaf6c 100644
1536 --- a/drivers/spi/spi-fsl-qspi.c
1537 +++ b/drivers/spi/spi-fsl-qspi.c
1538 @@ -63,6 +63,11 @@
1539 #define QUADSPI_IPCR 0x08
1540 #define QUADSPI_IPCR_SEQID(x) ((x) << 24)
1541
1542 +#define QUADSPI_FLSHCR 0x0c
1543 +#define QUADSPI_FLSHCR_TCSS_MASK GENMASK(3, 0)
1544 +#define QUADSPI_FLSHCR_TCSH_MASK GENMASK(11, 8)
1545 +#define QUADSPI_FLSHCR_TDH_MASK GENMASK(17, 16)
1546 +
1547 #define QUADSPI_BUF3CR 0x1c
1548 #define QUADSPI_BUF3CR_ALLMST_MASK BIT(31)
1549 #define QUADSPI_BUF3CR_ADATSZ(x) ((x) << 8)
1550 @@ -95,6 +100,9 @@
1551 #define QUADSPI_FR 0x160
1552 #define QUADSPI_FR_TFF_MASK BIT(0)
1553
1554 +#define QUADSPI_RSER 0x164
1555 +#define QUADSPI_RSER_TFIE BIT(0)
1556 +
1557 #define QUADSPI_SPTRCLR 0x16c
1558 #define QUADSPI_SPTRCLR_IPPTRC BIT(8)
1559 #define QUADSPI_SPTRCLR_BFPTRC BIT(0)
1560 @@ -112,9 +120,6 @@
1561 #define QUADSPI_LCKER_LOCK BIT(0)
1562 #define QUADSPI_LCKER_UNLOCK BIT(1)
1563
1564 -#define QUADSPI_RSER 0x164
1565 -#define QUADSPI_RSER_TFIE BIT(0)
1566 -
1567 #define QUADSPI_LUT_BASE 0x310
1568 #define QUADSPI_LUT_OFFSET (SEQID_LUT * 4 * 4)
1569 #define QUADSPI_LUT_REG(idx) \
1570 @@ -181,6 +186,12 @@
1571 */
1572 #define QUADSPI_QUIRK_BASE_INTERNAL BIT(4)
1573
1574 +/*
1575 + * Controller uses TDH bits in register QUADSPI_FLSHCR.
1576 + * They need to be set in accordance with the DDR/SDR mode.
1577 + */
1578 +#define QUADSPI_QUIRK_USE_TDH_SETTING BIT(5)
1579 +
1580 struct fsl_qspi_devtype_data {
1581 unsigned int rxfifo;
1582 unsigned int txfifo;
1583 @@ -209,7 +220,8 @@ static const struct fsl_qspi_devtype_data imx7d_data = {
1584 .rxfifo = SZ_128,
1585 .txfifo = SZ_512,
1586 .ahb_buf_size = SZ_1K,
1587 - .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK,
1588 + .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
1589 + QUADSPI_QUIRK_USE_TDH_SETTING,
1590 .little_endian = true,
1591 };
1592
1593 @@ -217,7 +229,8 @@ static const struct fsl_qspi_devtype_data imx6ul_data = {
1594 .rxfifo = SZ_128,
1595 .txfifo = SZ_512,
1596 .ahb_buf_size = SZ_1K,
1597 - .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK,
1598 + .quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK |
1599 + QUADSPI_QUIRK_USE_TDH_SETTING,
1600 .little_endian = true,
1601 };
1602
1603 @@ -275,6 +288,11 @@ static inline int needs_amba_base_offset(struct fsl_qspi *q)
1604 return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL);
1605 }
1606
1607 +static inline int needs_tdh_setting(struct fsl_qspi *q)
1608 +{
1609 + return q->devtype_data->quirks & QUADSPI_QUIRK_USE_TDH_SETTING;
1610 +}
1611 +
1612 /*
1613 * An IC bug makes it necessary to rearrange the 32-bit data.
1614 * Later chips, such as IMX6SLX, have fixed this bug.
1615 @@ -710,6 +728,16 @@ static int fsl_qspi_default_setup(struct fsl_qspi *q)
1616 qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
1617 base + QUADSPI_MCR);
1618
1619 + /*
1620 + * Previous boot stages (BootROM, bootloader) might have used DDR
1621 + * mode and did not clear the TDH bits. As we currently use SDR mode
1622 + * only, clear the TDH bits if necessary.
1623 + */
1624 + if (needs_tdh_setting(q))
1625 + qspi_writel(q, qspi_readl(q, base + QUADSPI_FLSHCR) &
1626 + ~QUADSPI_FLSHCR_TDH_MASK,
1627 + base + QUADSPI_FLSHCR);
1628 +
1629 reg = qspi_readl(q, base + QUADSPI_SMPR);
1630 qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK
1631 | QUADSPI_SMPR_FSPHS_MASK
1632 diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
1633 index 9ac6f9fe13cf..4e726929bb4f 100644
1634 --- a/drivers/spi/spi-stm32-qspi.c
1635 +++ b/drivers/spi/spi-stm32-qspi.c
1636 @@ -528,7 +528,6 @@ static void stm32_qspi_release(struct stm32_qspi *qspi)
1637 stm32_qspi_dma_free(qspi);
1638 mutex_destroy(&qspi->lock);
1639 clk_disable_unprepare(qspi->clk);
1640 - spi_master_put(qspi->ctrl);
1641 }
1642
1643 static int stm32_qspi_probe(struct platform_device *pdev)
1644 @@ -626,6 +625,8 @@ static int stm32_qspi_probe(struct platform_device *pdev)
1645
1646 err:
1647 stm32_qspi_release(qspi);
1648 + spi_master_put(qspi->ctrl);
1649 +
1650 return ret;
1651 }
1652
1653 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
1654 index f9502dbbb5c1..26b91ee0855d 100644
1655 --- a/drivers/spi/spi.c
1656 +++ b/drivers/spi/spi.c
1657 @@ -1711,15 +1711,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1658 spi->mode |= SPI_3WIRE;
1659 if (of_property_read_bool(nc, "spi-lsb-first"))
1660 spi->mode |= SPI_LSB_FIRST;
1661 -
1662 - /*
1663 - * For descriptors associated with the device, polarity inversion is
1664 - * handled in the gpiolib, so all chip selects are "active high" in
1665 - * the logical sense, the gpiolib will invert the line if need be.
1666 - */
1667 - if (ctlr->use_gpio_descriptors)
1668 - spi->mode |= SPI_CS_HIGH;
1669 - else if (of_property_read_bool(nc, "spi-cs-high"))
1670 + if (of_property_read_bool(nc, "spi-cs-high"))
1671 spi->mode |= SPI_CS_HIGH;
1672
1673 /* Device DUAL/QUAD mode */
1674 @@ -1783,6 +1775,15 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
1675 }
1676 spi->chip_select = value;
1677
1678 + /*
1679 + * For descriptors associated with the device, polarity inversion is
1680 + * handled in the gpiolib, so all gpio chip selects are "active high"
1681 + * in the logical sense, the gpiolib will invert the line if need be.
1682 + */
1683 + if ((ctlr->use_gpio_descriptors) && ctlr->cs_gpiods &&
1684 + ctlr->cs_gpiods[spi->chip_select])
1685 + spi->mode |= SPI_CS_HIGH;
1686 +
1687 /* Device speed */
1688 rc = of_property_read_u32(nc, "spi-max-frequency", &value);
1689 if (rc) {
1690 diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
1691 index a8a864b40913..042220d86d33 100644
1692 --- a/drivers/staging/octeon/octeon-ethernet.h
1693 +++ b/drivers/staging/octeon/octeon-ethernet.h
1694 @@ -14,7 +14,7 @@
1695 #include <linux/of.h>
1696 #include <linux/phy.h>
1697
1698 -#ifdef CONFIG_MIPS
1699 +#ifdef CONFIG_CAVIUM_OCTEON_SOC
1700
1701 #include <asm/octeon/octeon.h>
1702
1703 diff --git a/drivers/staging/octeon/octeon-stubs.h b/drivers/staging/octeon/octeon-stubs.h
1704 index b78ce9eaab85..ae014265064a 100644
1705 --- a/drivers/staging/octeon/octeon-stubs.h
1706 +++ b/drivers/staging/octeon/octeon-stubs.h
1707 @@ -1,5 +1,8 @@
1708 #define CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE 512
1709 -#define XKPHYS_TO_PHYS(p) (p)
1710 +
1711 +#ifndef XKPHYS_TO_PHYS
1712 +# define XKPHYS_TO_PHYS(p) (p)
1713 +#endif
1714
1715 #define OCTEON_IRQ_WORKQ0 0
1716 #define OCTEON_IRQ_RML 0
1717 diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
1718 index d4481cc8958f..c28271817e43 100644
1719 --- a/drivers/thermal/thermal_core.c
1720 +++ b/drivers/thermal/thermal_core.c
1721 @@ -304,7 +304,7 @@ static void thermal_zone_device_set_polling(struct thermal_zone_device *tz,
1722 &tz->poll_queue,
1723 msecs_to_jiffies(delay));
1724 else
1725 - cancel_delayed_work_sync(&tz->poll_queue);
1726 + cancel_delayed_work(&tz->poll_queue);
1727 }
1728
1729 static void monitor_thermal_zone(struct thermal_zone_device *tz)
1730 @@ -1414,7 +1414,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
1731
1732 mutex_unlock(&thermal_list_lock);
1733
1734 - thermal_zone_device_set_polling(tz, 0);
1735 + cancel_delayed_work_sync(&tz->poll_queue);
1736
1737 thermal_set_governor(tz, NULL);
1738
1739 diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
1740 index 1c72fdc2dd37..51a7d3b19b39 100644
1741 --- a/drivers/tty/serial/8250/8250_dw.c
1742 +++ b/drivers/tty/serial/8250/8250_dw.c
1743 @@ -386,10 +386,10 @@ static int dw8250_probe(struct platform_device *pdev)
1744 {
1745 struct uart_8250_port uart = {}, *up = &uart;
1746 struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1747 - int irq = platform_get_irq(pdev, 0);
1748 struct uart_port *p = &up->port;
1749 struct device *dev = &pdev->dev;
1750 struct dw8250_data *data;
1751 + int irq;
1752 int err;
1753 u32 val;
1754
1755 @@ -398,11 +398,9 @@ static int dw8250_probe(struct platform_device *pdev)
1756 return -EINVAL;
1757 }
1758
1759 - if (irq < 0) {
1760 - if (irq != -EPROBE_DEFER)
1761 - dev_err(dev, "cannot get irq\n");
1762 + irq = platform_get_irq(pdev, 0);
1763 + if (irq < 0)
1764 return irq;
1765 - }
1766
1767 spin_lock_init(&p->lock);
1768 p->mapbase = regs->start;
1769 diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
1770 index b411ba4eb5e9..4d067f515f74 100644
1771 --- a/drivers/tty/serial/8250/8250_mtk.c
1772 +++ b/drivers/tty/serial/8250/8250_mtk.c
1773 @@ -544,7 +544,7 @@ static int mtk8250_probe(struct platform_device *pdev)
1774 pm_runtime_set_active(&pdev->dev);
1775 pm_runtime_enable(&pdev->dev);
1776
1777 - data->rx_wakeup_irq = platform_get_irq(pdev, 1);
1778 + data->rx_wakeup_irq = platform_get_irq_optional(pdev, 1);
1779
1780 return 0;
1781 }
1782 diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
1783 index 6adbadd6a56a..8a01d034f9d1 100644
1784 --- a/drivers/tty/serial/8250/8250_pci.c
1785 +++ b/drivers/tty/serial/8250/8250_pci.c
1786 @@ -745,16 +745,8 @@ static int pci_ni8430_init(struct pci_dev *dev)
1787 }
1788
1789 /* UART Port Control Register */
1790 -#define NI16550_PCR_OFFSET 0x0f
1791 -#define NI16550_PCR_RS422 0x00
1792 -#define NI16550_PCR_ECHO_RS485 0x01
1793 -#define NI16550_PCR_DTR_RS485 0x02
1794 -#define NI16550_PCR_AUTO_RS485 0x03
1795 -#define NI16550_PCR_WIRE_MODE_MASK 0x03
1796 -#define NI16550_PCR_TXVR_ENABLE_BIT BIT(3)
1797 -#define NI16550_PCR_RS485_TERMINATION_BIT BIT(6)
1798 -#define NI16550_ACR_DTR_AUTO_DTR (0x2 << 3)
1799 -#define NI16550_ACR_DTR_MANUAL_DTR (0x0 << 3)
1800 +#define NI8430_PORTCON 0x0f
1801 +#define NI8430_PORTCON_TXVR_ENABLE (1 << 3)
1802
1803 static int
1804 pci_ni8430_setup(struct serial_private *priv,
1805 @@ -776,117 +768,14 @@ pci_ni8430_setup(struct serial_private *priv,
1806 return -ENOMEM;
1807
1808 /* enable the transceiver */
1809 - writeb(readb(p + offset + NI16550_PCR_OFFSET) | NI16550_PCR_TXVR_ENABLE_BIT,
1810 - p + offset + NI16550_PCR_OFFSET);
1811 + writeb(readb(p + offset + NI8430_PORTCON) | NI8430_PORTCON_TXVR_ENABLE,
1812 + p + offset + NI8430_PORTCON);
1813
1814 iounmap(p);
1815
1816 return setup_port(priv, port, bar, offset, board->reg_shift);
1817 }
1818
1819 -static int pci_ni8431_config_rs485(struct uart_port *port,
1820 - struct serial_rs485 *rs485)
1821 -{
1822 - u8 pcr, acr;
1823 - struct uart_8250_port *up;
1824 -
1825 - up = container_of(port, struct uart_8250_port, port);
1826 - acr = up->acr;
1827 - pcr = port->serial_in(port, NI16550_PCR_OFFSET);
1828 - pcr &= ~NI16550_PCR_WIRE_MODE_MASK;
1829 -
1830 - if (rs485->flags & SER_RS485_ENABLED) {
1831 - /* RS-485 */
1832 - if ((rs485->flags & SER_RS485_RX_DURING_TX) &&
1833 - (rs485->flags & SER_RS485_RTS_ON_SEND)) {
1834 - dev_dbg(port->dev, "Invalid 2-wire mode\n");
1835 - return -EINVAL;
1836 - }
1837 -
1838 - if (rs485->flags & SER_RS485_RX_DURING_TX) {
1839 - /* Echo */
1840 - dev_vdbg(port->dev, "2-wire DTR with echo\n");
1841 - pcr |= NI16550_PCR_ECHO_RS485;
1842 - acr |= NI16550_ACR_DTR_MANUAL_DTR;
1843 - } else {
1844 - /* Auto or DTR */
1845 - if (rs485->flags & SER_RS485_RTS_ON_SEND) {
1846 - /* Auto */
1847 - dev_vdbg(port->dev, "2-wire Auto\n");
1848 - pcr |= NI16550_PCR_AUTO_RS485;
1849 - acr |= NI16550_ACR_DTR_AUTO_DTR;
1850 - } else {
1851 - /* DTR-controlled */
1852 - /* No Echo */
1853 - dev_vdbg(port->dev, "2-wire DTR no echo\n");
1854 - pcr |= NI16550_PCR_DTR_RS485;
1855 - acr |= NI16550_ACR_DTR_MANUAL_DTR;
1856 - }
1857 - }
1858 - } else {
1859 - /* RS-422 */
1860 - dev_vdbg(port->dev, "4-wire\n");
1861 - pcr |= NI16550_PCR_RS422;
1862 - acr |= NI16550_ACR_DTR_MANUAL_DTR;
1863 - }
1864 -
1865 - dev_dbg(port->dev, "write pcr: 0x%08x\n", pcr);
1866 - port->serial_out(port, NI16550_PCR_OFFSET, pcr);
1867 -
1868 - up->acr = acr;
1869 - port->serial_out(port, UART_SCR, UART_ACR);
1870 - port->serial_out(port, UART_ICR, up->acr);
1871 -
1872 - /* Update the cache. */
1873 - port->rs485 = *rs485;
1874 -
1875 - return 0;
1876 -}
1877 -
1878 -static int pci_ni8431_setup(struct serial_private *priv,
1879 - const struct pciserial_board *board,
1880 - struct uart_8250_port *uart, int idx)
1881 -{
1882 - u8 pcr, acr;
1883 - struct pci_dev *dev = priv->dev;
1884 - void __iomem *addr;
1885 - unsigned int bar, offset = board->first_offset;
1886 -
1887 - if (idx >= board->num_ports)
1888 - return 1;
1889 -
1890 - bar = FL_GET_BASE(board->flags);
1891 - offset += idx * board->uart_offset;
1892 -
1893 - addr = pci_ioremap_bar(dev, bar);
1894 - if (!addr)
1895 - return -ENOMEM;
1896 -
1897 - /* enable the transceiver */
1898 - writeb(readb(addr + NI16550_PCR_OFFSET) | NI16550_PCR_TXVR_ENABLE_BIT,
1899 - addr + NI16550_PCR_OFFSET);
1900 -
1901 - pcr = readb(addr + NI16550_PCR_OFFSET);
1902 - pcr &= ~NI16550_PCR_WIRE_MODE_MASK;
1903 -
1904 - /* set wire mode to default RS-422 */
1905 - pcr |= NI16550_PCR_RS422;
1906 - acr = NI16550_ACR_DTR_MANUAL_DTR;
1907 -
1908 - /* write port configuration to register */
1909 - writeb(pcr, addr + NI16550_PCR_OFFSET);
1910 -
1911 - /* access and write to UART acr register */
1912 - writeb(UART_ACR, addr + UART_SCR);
1913 - writeb(acr, addr + UART_ICR);
1914 -
1915 - uart->port.rs485_config = &pci_ni8431_config_rs485;
1916 -
1917 - iounmap(addr);
1918 -
1919 - return setup_port(priv, uart, bar, offset, board->reg_shift);
1920 -}
1921 -
1922 static int pci_netmos_9900_setup(struct serial_private *priv,
1923 const struct pciserial_board *board,
1924 struct uart_8250_port *port, int idx)
1925 @@ -2023,15 +1912,6 @@ pci_moxa_setup(struct serial_private *priv,
1926 #define PCI_DEVICE_ID_ACCESIO_PCIE_COM_8SM 0x10E9
1927 #define PCI_DEVICE_ID_ACCESIO_PCIE_ICM_4SM 0x11D8
1928
1929 -#define PCIE_DEVICE_ID_NI_PXIE8430_2328 0x74C2
1930 -#define PCIE_DEVICE_ID_NI_PXIE8430_23216 0x74C1
1931 -#define PCI_DEVICE_ID_NI_PXI8431_4852 0x7081
1932 -#define PCI_DEVICE_ID_NI_PXI8431_4854 0x70DE
1933 -#define PCI_DEVICE_ID_NI_PXI8431_4858 0x70E3
1934 -#define PCI_DEVICE_ID_NI_PXI8433_4852 0x70E9
1935 -#define PCI_DEVICE_ID_NI_PXI8433_4854 0x70ED
1936 -#define PCIE_DEVICE_ID_NI_PXIE8431_4858 0x74C4
1937 -#define PCIE_DEVICE_ID_NI_PXIE8431_48516 0x74C3
1938
1939 #define PCI_DEVICE_ID_MOXA_CP102E 0x1024
1940 #define PCI_DEVICE_ID_MOXA_CP102EL 0x1025
1941 @@ -2269,87 +2149,6 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
1942 .setup = pci_ni8430_setup,
1943 .exit = pci_ni8430_exit,
1944 },
1945 - {
1946 - .vendor = PCI_VENDOR_ID_NI,
1947 - .device = PCIE_DEVICE_ID_NI_PXIE8430_2328,
1948 - .subvendor = PCI_ANY_ID,
1949 - .subdevice = PCI_ANY_ID,
1950 - .init = pci_ni8430_init,
1951 - .setup = pci_ni8430_setup,
1952 - .exit = pci_ni8430_exit,
1953 - },
1954 - {
1955 - .vendor = PCI_VENDOR_ID_NI,
1956 - .device = PCIE_DEVICE_ID_NI_PXIE8430_23216,
1957 - .subvendor = PCI_ANY_ID,
1958 - .subdevice = PCI_ANY_ID,
1959 - .init = pci_ni8430_init,
1960 - .setup = pci_ni8430_setup,
1961 - .exit = pci_ni8430_exit,
1962 - },
1963 - {
1964 - .vendor = PCI_VENDOR_ID_NI,
1965 - .device = PCI_DEVICE_ID_NI_PXI8431_4852,
1966 - .subvendor = PCI_ANY_ID,
1967 - .subdevice = PCI_ANY_ID,
1968 - .init = pci_ni8430_init,
1969 - .setup = pci_ni8431_setup,
1970 - .exit = pci_ni8430_exit,
1971 - },
1972 - {
1973 - .vendor = PCI_VENDOR_ID_NI,
1974 - .device = PCI_DEVICE_ID_NI_PXI8431_4854,
1975 - .subvendor = PCI_ANY_ID,
1976 - .subdevice = PCI_ANY_ID,
1977 - .init = pci_ni8430_init,
1978 - .setup = pci_ni8431_setup,
1979 - .exit = pci_ni8430_exit,
1980 - },
1981 - {
1982 - .vendor = PCI_VENDOR_ID_NI,
1983 - .device = PCI_DEVICE_ID_NI_PXI8431_4858,
1984 - .subvendor = PCI_ANY_ID,
1985 - .subdevice = PCI_ANY_ID,
1986 - .init = pci_ni8430_init,
1987 - .setup = pci_ni8431_setup,
1988 - .exit = pci_ni8430_exit,
1989 - },
1990 - {
1991 - .vendor = PCI_VENDOR_ID_NI,
1992 - .device = PCI_DEVICE_ID_NI_PXI8433_4852,
1993 - .subvendor = PCI_ANY_ID,
1994 - .subdevice = PCI_ANY_ID,
1995 - .init = pci_ni8430_init,
1996 - .setup = pci_ni8431_setup,
1997 - .exit = pci_ni8430_exit,
1998 - },
1999 - {
2000 - .vendor = PCI_VENDOR_ID_NI,
2001 - .device = PCI_DEVICE_ID_NI_PXI8433_4854,
2002 - .subvendor = PCI_ANY_ID,
2003 - .subdevice = PCI_ANY_ID,
2004 - .init = pci_ni8430_init,
2005 - .setup = pci_ni8431_setup,
2006 - .exit = pci_ni8430_exit,
2007 - },
2008 - {
2009 - .vendor = PCI_VENDOR_ID_NI,
2010 - .device = PCIE_DEVICE_ID_NI_PXIE8431_4858,
2011 - .subvendor = PCI_ANY_ID,
2012 - .subdevice = PCI_ANY_ID,
2013 - .init = pci_ni8430_init,
2014 - .setup = pci_ni8431_setup,
2015 - .exit = pci_ni8430_exit,
2016 - },
2017 - {
2018 - .vendor = PCI_VENDOR_ID_NI,
2019 - .device = PCIE_DEVICE_ID_NI_PXIE8431_48516,
2020 - .subvendor = PCI_ANY_ID,
2021 - .subdevice = PCI_ANY_ID,
2022 - .init = pci_ni8430_init,
2023 - .setup = pci_ni8431_setup,
2024 - .exit = pci_ni8430_exit,
2025 - },
2026 /* Quatech */
2027 {
2028 .vendor = PCI_VENDOR_ID_QUATECH,
2029 @@ -3106,13 +2905,6 @@ enum pci_board_num_t {
2030 pbn_ni8430_4,
2031 pbn_ni8430_8,
2032 pbn_ni8430_16,
2033 - pbn_ni8430_pxie_8,
2034 - pbn_ni8430_pxie_16,
2035 - pbn_ni8431_2,
2036 - pbn_ni8431_4,
2037 - pbn_ni8431_8,
2038 - pbn_ni8431_pxie_8,
2039 - pbn_ni8431_pxie_16,
2040 pbn_ADDIDATA_PCIe_1_3906250,
2041 pbn_ADDIDATA_PCIe_2_3906250,
2042 pbn_ADDIDATA_PCIe_4_3906250,
2043 @@ -3765,55 +3557,6 @@ static struct pciserial_board pci_boards[] = {
2044 .uart_offset = 0x10,
2045 .first_offset = 0x800,
2046 },
2047 - [pbn_ni8430_pxie_16] = {
2048 - .flags = FL_BASE0,
2049 - .num_ports = 16,
2050 - .base_baud = 3125000,
2051 - .uart_offset = 0x10,
2052 - .first_offset = 0x800,
2053 - },
2054 - [pbn_ni8430_pxie_8] = {
2055 - .flags = FL_BASE0,
2056 - .num_ports = 8,
2057 - .base_baud = 3125000,
2058 - .uart_offset = 0x10,
2059 - .first_offset = 0x800,
2060 - },
2061 - [pbn_ni8431_8] = {
2062 - .flags = FL_BASE0,
2063 - .num_ports = 8,
2064 - .base_baud = 3686400,
2065 - .uart_offset = 0x10,
2066 - .first_offset = 0x800,
2067 - },
2068 - [pbn_ni8431_4] = {
2069 - .flags = FL_BASE0,
2070 - .num_ports = 4,
2071 - .base_baud = 3686400,
2072 - .uart_offset = 0x10,
2073 - .first_offset = 0x800,
2074 - },
2075 - [pbn_ni8431_2] = {
2076 - .flags = FL_BASE0,
2077 - .num_ports = 2,
2078 - .base_baud = 3686400,
2079 - .uart_offset = 0x10,
2080 - .first_offset = 0x800,
2081 - },
2082 - [pbn_ni8431_pxie_16] = {
2083 - .flags = FL_BASE0,
2084 - .num_ports = 16,
2085 - .base_baud = 3125000,
2086 - .uart_offset = 0x10,
2087 - .first_offset = 0x800,
2088 - },
2089 - [pbn_ni8431_pxie_8] = {
2090 - .flags = FL_BASE0,
2091 - .num_ports = 8,
2092 - .base_baud = 3125000,
2093 - .uart_offset = 0x10,
2094 - .first_offset = 0x800,
2095 - },
2096 /*
2097 * ADDI-DATA GmbH PCI-Express communication cards <info@addi-data.com>
2098 */
2099 @@ -5567,33 +5310,6 @@ static const struct pci_device_id serial_pci_tbl[] = {
2100 { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PCI8432_2324,
2101 PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2102 pbn_ni8430_4 },
2103 - { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8430_2328,
2104 - PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2105 - pbn_ni8430_pxie_8 },
2106 - { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8430_23216,
2107 - PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2108 - pbn_ni8430_pxie_16 },
2109 - { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8431_4852,
2110 - PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2111 - pbn_ni8431_2 },
2112 - { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8431_4854,
2113 - PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2114 - pbn_ni8431_4 },
2115 - { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8431_4858,
2116 - PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2117 - pbn_ni8431_8 },
2118 - { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8431_4858,
2119 - PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2120 - pbn_ni8431_pxie_8 },
2121 - { PCI_VENDOR_ID_NI, PCIE_DEVICE_ID_NI_PXIE8431_48516,
2122 - PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2123 - pbn_ni8431_pxie_16 },
2124 - { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8433_4852,
2125 - PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2126 - pbn_ni8431_2 },
2127 - { PCI_VENDOR_ID_NI, PCI_DEVICE_ID_NI_PXI8433_4854,
2128 - PCI_ANY_ID, PCI_ANY_ID, 0, 0,
2129 - pbn_ni8431_4 },
2130
2131 /*
2132 * MOXA
2133 diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
2134 index 3a7d1a66f79c..b0b689546395 100644
2135 --- a/drivers/tty/serial/amba-pl011.c
2136 +++ b/drivers/tty/serial/amba-pl011.c
2137 @@ -813,10 +813,8 @@ __acquires(&uap->port.lock)
2138 if (!uap->using_tx_dma)
2139 return;
2140
2141 - /* Avoid deadlock with the DMA engine callback */
2142 - spin_unlock(&uap->port.lock);
2143 - dmaengine_terminate_all(uap->dmatx.chan);
2144 - spin_lock(&uap->port.lock);
2145 + dmaengine_terminate_async(uap->dmatx.chan);
2146 +
2147 if (uap->dmatx.queued) {
2148 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
2149 DMA_TO_DEVICE);
2150 diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
2151 index 537896c4d887..3f64b08f50ef 100644
2152 --- a/drivers/tty/serial/fsl_lpuart.c
2153 +++ b/drivers/tty/serial/fsl_lpuart.c
2154 @@ -437,8 +437,8 @@ static void lpuart_dma_tx(struct lpuart_port *sport)
2155 }
2156
2157 sport->dma_tx_desc = dmaengine_prep_slave_sg(sport->dma_tx_chan, sgl,
2158 - sport->dma_tx_nents,
2159 - DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
2160 + ret, DMA_MEM_TO_DEV,
2161 + DMA_PREP_INTERRUPT);
2162 if (!sport->dma_tx_desc) {
2163 dma_unmap_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE);
2164 dev_err(dev, "Cannot prepare TX slave DMA!\n");
2165 diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
2166 index ffefd218761e..31033d517e82 100644
2167 --- a/drivers/tty/serial/ifx6x60.c
2168 +++ b/drivers/tty/serial/ifx6x60.c
2169 @@ -1230,6 +1230,9 @@ static int ifx_spi_spi_remove(struct spi_device *spi)
2170 struct ifx_spi_device *ifx_dev = spi_get_drvdata(spi);
2171 /* stop activity */
2172 tasklet_kill(&ifx_dev->io_work_tasklet);
2173 +
2174 + pm_runtime_disable(&spi->dev);
2175 +
2176 /* free irq */
2177 free_irq(gpio_to_irq(ifx_dev->gpio.reset_out), ifx_dev);
2178 free_irq(gpio_to_irq(ifx_dev->gpio.srdy), ifx_dev);
2179 diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
2180 index 3657a24913fc..00964b6e4ac1 100644
2181 --- a/drivers/tty/serial/msm_serial.c
2182 +++ b/drivers/tty/serial/msm_serial.c
2183 @@ -980,6 +980,7 @@ static unsigned int msm_get_mctrl(struct uart_port *port)
2184 static void msm_reset(struct uart_port *port)
2185 {
2186 struct msm_port *msm_port = UART_TO_MSM(port);
2187 + unsigned int mr;
2188
2189 /* reset everything */
2190 msm_write(port, UART_CR_CMD_RESET_RX, UART_CR);
2191 @@ -987,7 +988,10 @@ static void msm_reset(struct uart_port *port)
2192 msm_write(port, UART_CR_CMD_RESET_ERR, UART_CR);
2193 msm_write(port, UART_CR_CMD_RESET_BREAK_INT, UART_CR);
2194 msm_write(port, UART_CR_CMD_RESET_CTS, UART_CR);
2195 - msm_write(port, UART_CR_CMD_SET_RFR, UART_CR);
2196 + msm_write(port, UART_CR_CMD_RESET_RFR, UART_CR);
2197 + mr = msm_read(port, UART_MR1);
2198 + mr &= ~UART_MR1_RX_RDY_CTL;
2199 + msm_write(port, mr, UART_MR1);
2200
2201 /* Disable DM modes */
2202 if (msm_port->is_uartdm)
2203 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
2204 index c4a414a46c7f..b0a6eb106edb 100644
2205 --- a/drivers/tty/serial/serial_core.c
2206 +++ b/drivers/tty/serial/serial_core.c
2207 @@ -1111,7 +1111,7 @@ static int uart_break_ctl(struct tty_struct *tty, int break_state)
2208 if (!uport)
2209 goto out;
2210
2211 - if (uport->type != PORT_UNKNOWN)
2212 + if (uport->type != PORT_UNKNOWN && uport->ops->break_ctl)
2213 uport->ops->break_ctl(uport, break_state);
2214 ret = 0;
2215 out:
2216 diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
2217 index df90747ee3a8..2f72514d63ed 100644
2218 --- a/drivers/tty/serial/stm32-usart.c
2219 +++ b/drivers/tty/serial/stm32-usart.c
2220 @@ -240,8 +240,8 @@ static void stm32_receive_chars(struct uart_port *port, bool threaded)
2221 * cleared by the sequence [read SR - read DR].
2222 */
2223 if ((sr & USART_SR_ERR_MASK) && ofs->icr != UNDEF_REG)
2224 - stm32_clr_bits(port, ofs->icr, USART_ICR_ORECF |
2225 - USART_ICR_PECF | USART_ICR_FECF);
2226 + writel_relaxed(sr & USART_SR_ERR_MASK,
2227 + port->membase + ofs->icr);
2228
2229 c = stm32_get_char(port, &sr, &stm32_port->last_res);
2230 port->icount.rx++;
2231 @@ -435,7 +435,7 @@ static void stm32_transmit_chars(struct uart_port *port)
2232 if (ofs->icr == UNDEF_REG)
2233 stm32_clr_bits(port, ofs->isr, USART_SR_TC);
2234 else
2235 - stm32_set_bits(port, ofs->icr, USART_ICR_TCCF);
2236 + writel_relaxed(USART_ICR_TCCF, port->membase + ofs->icr);
2237
2238 if (stm32_port->tx_ch)
2239 stm32_transmit_chars_dma(port);
2240 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
2241 index 515fc095e3b4..15d33fa0c925 100644
2242 --- a/drivers/tty/vt/keyboard.c
2243 +++ b/drivers/tty/vt/keyboard.c
2244 @@ -1491,7 +1491,7 @@ static void kbd_event(struct input_handle *handle, unsigned int event_type,
2245
2246 if (event_type == EV_MSC && event_code == MSC_RAW && HW_RAW(handle->dev))
2247 kbd_rawcode(value);
2248 - if (event_type == EV_KEY)
2249 + if (event_type == EV_KEY && event_code <= KEY_MAX)
2250 kbd_keycode(event_code, value, HW_RAW(handle->dev));
2251
2252 spin_unlock(&kbd_event_lock);
2253 diff --git a/drivers/tty/vt/vc_screen.c b/drivers/tty/vt/vc_screen.c
2254 index 1f042346e722..778f83ea2249 100644
2255 --- a/drivers/tty/vt/vc_screen.c
2256 +++ b/drivers/tty/vt/vc_screen.c
2257 @@ -456,6 +456,9 @@ vcs_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
2258 size_t ret;
2259 char *con_buf;
2260
2261 + if (use_unicode(inode))
2262 + return -EOPNOTSUPP;
2263 +
2264 con_buf = (char *) __get_free_page(GFP_KERNEL);
2265 if (!con_buf)
2266 return -ENOMEM;
2267 diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
2268 index 65f634ec7fc2..bb1e2e1d0076 100644
2269 --- a/drivers/usb/gadget/function/u_serial.c
2270 +++ b/drivers/usb/gadget/function/u_serial.c
2271 @@ -1239,8 +1239,10 @@ int gserial_alloc_line(unsigned char *line_num)
2272 __func__, port_num, PTR_ERR(tty_dev));
2273
2274 ret = PTR_ERR(tty_dev);
2275 + mutex_lock(&ports[port_num].lock);
2276 port = ports[port_num].port;
2277 ports[port_num].port = NULL;
2278 + mutex_unlock(&ports[port_num].lock);
2279 gserial_free_port(port);
2280 goto err;
2281 }
2282 diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c
2283 index 4ec0906bf12c..7e00960651fa 100644
2284 --- a/drivers/watchdog/aspeed_wdt.c
2285 +++ b/drivers/watchdog/aspeed_wdt.c
2286 @@ -258,11 +258,6 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
2287 if (IS_ERR(wdt->base))
2288 return PTR_ERR(wdt->base);
2289
2290 - /*
2291 - * The ast2400 wdt can run at PCLK, or 1MHz. The ast2500 only
2292 - * runs at 1MHz. We chose to always run at 1MHz, as there's no
2293 - * good reason to have a faster watchdog counter.
2294 - */
2295 wdt->wdd.info = &aspeed_wdt_info;
2296 wdt->wdd.ops = &aspeed_wdt_ops;
2297 wdt->wdd.max_hw_heartbeat_ms = WDT_MAX_TIMEOUT_MS;
2298 @@ -278,7 +273,16 @@ static int aspeed_wdt_probe(struct platform_device *pdev)
2299 return -EINVAL;
2300 config = ofdid->data;
2301
2302 - wdt->ctrl = WDT_CTRL_1MHZ_CLK;
2303 + /*
2304 + * On clock rates:
2305 + * - ast2400 wdt can run at PCLK, or 1MHz
2306 + * - ast2500 only runs at 1MHz, hard coding bit 4 to 1
2307 + * - ast2600 always runs at 1MHz
2308 + *
2309 + * Set the ast2400 to run at 1MHz as it simplifies the driver.
2310 + */
2311 + if (of_device_is_compatible(np, "aspeed,ast2400-wdt"))
2312 + wdt->ctrl = WDT_CTRL_1MHZ_CLK;
2313
2314 /*
2315 * Control reset on a per-device basis to ensure the
2316 diff --git a/fs/block_dev.c b/fs/block_dev.c
2317 index 9c073dbdc1b0..d612468ee66b 100644
2318 --- a/fs/block_dev.c
2319 +++ b/fs/block_dev.c
2320 @@ -1403,11 +1403,7 @@ static void flush_disk(struct block_device *bdev, bool kill_dirty)
2321 "resized disk %s\n",
2322 bdev->bd_disk ? bdev->bd_disk->disk_name : "");
2323 }
2324 -
2325 - if (!bdev->bd_disk)
2326 - return;
2327 - if (disk_part_scan_enabled(bdev->bd_disk))
2328 - bdev->bd_invalidated = 1;
2329 + bdev->bd_invalidated = 1;
2330 }
2331
2332 /**
2333 @@ -1512,6 +1508,19 @@ EXPORT_SYMBOL(bd_set_size);
2334
2335 static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
2336
2337 +static void bdev_disk_changed(struct block_device *bdev, bool invalidate)
2338 +{
2339 + if (disk_part_scan_enabled(bdev->bd_disk)) {
2340 + if (invalidate)
2341 + invalidate_partitions(bdev->bd_disk, bdev);
2342 + else
2343 + rescan_partitions(bdev->bd_disk, bdev);
2344 + } else {
2345 + check_disk_size_change(bdev->bd_disk, bdev, !invalidate);
2346 + bdev->bd_invalidated = 0;
2347 + }
2348 +}
2349 +
2350 /*
2351 * bd_mutex locking:
2352 *
2353 @@ -1594,12 +1603,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
2354 * The latter is necessary to prevent ghost
2355 * partitions on a removed medium.
2356 */
2357 - if (bdev->bd_invalidated) {
2358 - if (!ret)
2359 - rescan_partitions(disk, bdev);
2360 - else if (ret == -ENOMEDIUM)
2361 - invalidate_partitions(disk, bdev);
2362 - }
2363 + if (bdev->bd_invalidated &&
2364 + (!ret || ret == -ENOMEDIUM))
2365 + bdev_disk_changed(bdev, ret == -ENOMEDIUM);
2366
2367 if (ret)
2368 goto out_clear;
2369 @@ -1632,12 +1638,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
2370 if (bdev->bd_disk->fops->open)
2371 ret = bdev->bd_disk->fops->open(bdev, mode);
2372 /* the same as first opener case, read comment there */
2373 - if (bdev->bd_invalidated) {
2374 - if (!ret)
2375 - rescan_partitions(bdev->bd_disk, bdev);
2376 - else if (ret == -ENOMEDIUM)
2377 - invalidate_partitions(bdev->bd_disk, bdev);
2378 - }
2379 + if (bdev->bd_invalidated &&
2380 + (!ret || ret == -ENOMEDIUM))
2381 + bdev_disk_changed(bdev, ret == -ENOMEDIUM);
2382 if (ret)
2383 goto out_unlock_bdev;
2384 }
2385 diff --git a/fs/cifs/file.c b/fs/cifs/file.c
2386 index fa7b0fa72bb3..a3b6be80f8a9 100644
2387 --- a/fs/cifs/file.c
2388 +++ b/fs/cifs/file.c
2389 @@ -313,9 +313,6 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
2390 INIT_LIST_HEAD(&fdlocks->locks);
2391 fdlocks->cfile = cfile;
2392 cfile->llist = fdlocks;
2393 - cifs_down_write(&cinode->lock_sem);
2394 - list_add(&fdlocks->llist, &cinode->llist);
2395 - up_write(&cinode->lock_sem);
2396
2397 cfile->count = 1;
2398 cfile->pid = current->tgid;
2399 @@ -339,6 +336,10 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
2400 oplock = 0;
2401 }
2402
2403 + cifs_down_write(&cinode->lock_sem);
2404 + list_add(&fdlocks->llist, &cinode->llist);
2405 + up_write(&cinode->lock_sem);
2406 +
2407 spin_lock(&tcon->open_file_lock);
2408 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
2409 oplock = fid->pending_open->oplock;
2410 diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
2411 index e311f58dc1c8..449d1584ff72 100644
2412 --- a/fs/cifs/smb2misc.c
2413 +++ b/fs/cifs/smb2misc.c
2414 @@ -673,10 +673,10 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
2415 spin_lock(&cifs_tcp_ses_lock);
2416 list_for_each(tmp, &server->smb_ses_list) {
2417 ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
2418 +
2419 list_for_each(tmp1, &ses->tcon_list) {
2420 tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
2421
2422 - cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
2423 spin_lock(&tcon->open_file_lock);
2424 list_for_each(tmp2, &tcon->openFileList) {
2425 cfile = list_entry(tmp2, struct cifsFileInfo,
2426 @@ -688,6 +688,8 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
2427 continue;
2428
2429 cifs_dbg(FYI, "file id match, oplock break\n");
2430 + cifs_stats_inc(
2431 + &tcon->stats.cifs_stats.num_oplock_brks);
2432 cinode = CIFS_I(d_inode(cfile->dentry));
2433 spin_lock(&cfile->file_info_lock);
2434 if (!CIFS_CACHE_WRITE(cinode) &&
2435 @@ -720,9 +722,6 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
2436 return true;
2437 }
2438 spin_unlock(&tcon->open_file_lock);
2439 - spin_unlock(&cifs_tcp_ses_lock);
2440 - cifs_dbg(FYI, "No matching file for oplock break\n");
2441 - return true;
2442 }
2443 }
2444 spin_unlock(&cifs_tcp_ses_lock);
2445 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
2446 index 54d638f9ba1c..ee190119f45c 100644
2447 --- a/fs/fuse/dir.c
2448 +++ b/fs/fuse/dir.c
2449 @@ -248,7 +248,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
2450 kfree(forget);
2451 if (ret == -ENOMEM)
2452 goto out;
2453 - if (ret || (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
2454 + if (ret || fuse_invalid_attr(&outarg.attr) ||
2455 + (outarg.attr.mode ^ inode->i_mode) & S_IFMT)
2456 goto invalid;
2457
2458 forget_all_cached_acls(inode);
2459 @@ -319,6 +320,12 @@ int fuse_valid_type(int m)
2460 S_ISBLK(m) || S_ISFIFO(m) || S_ISSOCK(m);
2461 }
2462
2463 +bool fuse_invalid_attr(struct fuse_attr *attr)
2464 +{
2465 + return !fuse_valid_type(attr->mode) ||
2466 + attr->size > LLONG_MAX;
2467 +}
2468 +
2469 int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name,
2470 struct fuse_entry_out *outarg, struct inode **inode)
2471 {
2472 @@ -350,7 +357,7 @@ int fuse_lookup_name(struct super_block *sb, u64 nodeid, const struct qstr *name
2473 err = -EIO;
2474 if (!outarg->nodeid)
2475 goto out_put_forget;
2476 - if (!fuse_valid_type(outarg->attr.mode))
2477 + if (fuse_invalid_attr(&outarg->attr))
2478 goto out_put_forget;
2479
2480 *inode = fuse_iget(sb, outarg->nodeid, outarg->generation,
2481 @@ -475,7 +482,8 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
2482 goto out_free_ff;
2483
2484 err = -EIO;
2485 - if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid))
2486 + if (!S_ISREG(outentry.attr.mode) || invalid_nodeid(outentry.nodeid) ||
2487 + fuse_invalid_attr(&outentry.attr))
2488 goto out_free_ff;
2489
2490 ff->fh = outopen.fh;
2491 @@ -583,7 +591,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
2492 goto out_put_forget_req;
2493
2494 err = -EIO;
2495 - if (invalid_nodeid(outarg.nodeid))
2496 + if (invalid_nodeid(outarg.nodeid) || fuse_invalid_attr(&outarg.attr))
2497 goto out_put_forget_req;
2498
2499 if ((outarg.attr.mode ^ mode) & S_IFMT)
2500 @@ -862,7 +870,8 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
2501
2502 spin_lock(&fi->lock);
2503 fi->attr_version = atomic64_inc_return(&fc->attr_version);
2504 - inc_nlink(inode);
2505 + if (likely(inode->i_nlink < UINT_MAX))
2506 + inc_nlink(inode);
2507 spin_unlock(&fi->lock);
2508 fuse_invalidate_attr(inode);
2509 fuse_update_ctime(inode);
2510 @@ -942,7 +951,8 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
2511 args.out_args[0].value = &outarg;
2512 err = fuse_simple_request(fc, &args);
2513 if (!err) {
2514 - if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
2515 + if (fuse_invalid_attr(&outarg.attr) ||
2516 + (inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
2517 make_bad_inode(inode);
2518 err = -EIO;
2519 } else {
2520 @@ -1563,7 +1573,8 @@ int fuse_do_setattr(struct dentry *dentry, struct iattr *attr,
2521 goto error;
2522 }
2523
2524 - if ((inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
2525 + if (fuse_invalid_attr(&outarg.attr) ||
2526 + (inode->i_mode ^ outarg.attr.mode) & S_IFMT) {
2527 make_bad_inode(inode);
2528 err = -EIO;
2529 goto error;
2530 diff --git a/fs/fuse/file.c b/fs/fuse/file.c
2531 index db48a5cf8620..a63d779eac10 100644
2532 --- a/fs/fuse/file.c
2533 +++ b/fs/fuse/file.c
2534 @@ -713,8 +713,10 @@ static ssize_t fuse_async_req_send(struct fuse_conn *fc,
2535
2536 ia->ap.args.end = fuse_aio_complete_req;
2537 err = fuse_simple_background(fc, &ia->ap.args, GFP_KERNEL);
2538 + if (err)
2539 + fuse_aio_complete_req(fc, &ia->ap.args, err);
2540
2541 - return err ?: num_bytes;
2542 + return num_bytes;
2543 }
2544
2545 static ssize_t fuse_send_read(struct fuse_io_args *ia, loff_t pos, size_t count,
2546 @@ -1096,6 +1098,8 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia,
2547 ia->write.in.flags = fuse_write_flags(iocb);
2548
2549 err = fuse_simple_request(fc, &ap->args);
2550 + if (!err && ia->write.out.size > count)
2551 + err = -EIO;
2552
2553 offset = ap->descs[0].offset;
2554 count = ia->write.out.size;
2555 diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
2556 index d148188cfca4..aa75e2305b75 100644
2557 --- a/fs/fuse/fuse_i.h
2558 +++ b/fs/fuse/fuse_i.h
2559 @@ -989,6 +989,8 @@ void fuse_ctl_remove_conn(struct fuse_conn *fc);
2560 */
2561 int fuse_valid_type(int m);
2562
2563 +bool fuse_invalid_attr(struct fuse_attr *attr);
2564 +
2565 /**
2566 * Is current process allowed to perform filesystem operation?
2567 */
2568 diff --git a/fs/fuse/readdir.c b/fs/fuse/readdir.c
2569 index 5c38b9d84c6e..6a40f75a0d25 100644
2570 --- a/fs/fuse/readdir.c
2571 +++ b/fs/fuse/readdir.c
2572 @@ -184,7 +184,7 @@ static int fuse_direntplus_link(struct file *file,
2573
2574 if (invalid_nodeid(o->nodeid))
2575 return -EIO;
2576 - if (!fuse_valid_type(o->attr.mode))
2577 + if (fuse_invalid_attr(&o->attr))
2578 return -EIO;
2579
2580 fc = get_fuse_conn(dir);
2581 diff --git a/fs/io_uring.c b/fs/io_uring.c
2582 index cbe8dabb6479..a340147387ec 100644
2583 --- a/fs/io_uring.c
2584 +++ b/fs/io_uring.c
2585 @@ -70,6 +70,7 @@
2586 #include <linux/nospec.h>
2587 #include <linux/sizes.h>
2588 #include <linux/hugetlb.h>
2589 +#include <linux/highmem.h>
2590
2591 #include <uapi/linux/io_uring.h>
2592
2593 @@ -1351,9 +1352,19 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
2594 return -EAGAIN;
2595
2596 while (iov_iter_count(iter)) {
2597 - struct iovec iovec = iov_iter_iovec(iter);
2598 + struct iovec iovec;
2599 ssize_t nr;
2600
2601 + if (!iov_iter_is_bvec(iter)) {
2602 + iovec = iov_iter_iovec(iter);
2603 + } else {
2604 + /* fixed buffers import bvec */
2605 + iovec.iov_base = kmap(iter->bvec->bv_page)
2606 + + iter->iov_offset;
2607 + iovec.iov_len = min(iter->count,
2608 + iter->bvec->bv_len - iter->iov_offset);
2609 + }
2610 +
2611 if (rw == READ) {
2612 nr = file->f_op->read(file, iovec.iov_base,
2613 iovec.iov_len, &kiocb->ki_pos);
2614 @@ -1362,6 +1373,9 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
2615 iovec.iov_len, &kiocb->ki_pos);
2616 }
2617
2618 + if (iov_iter_is_bvec(iter))
2619 + kunmap(iter->bvec->bv_page);
2620 +
2621 if (nr < 0) {
2622 if (!ret)
2623 ret = nr;
2624 @@ -1654,6 +1668,8 @@ static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
2625 ret = fn(sock, msg, flags);
2626 if (force_nonblock && ret == -EAGAIN)
2627 return ret;
2628 + if (ret == -ERESTARTSYS)
2629 + ret = -EINTR;
2630 }
2631
2632 io_cqring_add_event(req->ctx, sqe->user_data, ret);
2633 @@ -2023,7 +2039,7 @@ add:
2634 }
2635
2636 static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
2637 - const struct io_uring_sqe *sqe)
2638 + struct sqe_submit *s)
2639 {
2640 struct io_uring_sqe *sqe_copy;
2641
2642 @@ -2041,7 +2057,8 @@ static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
2643 return 0;
2644 }
2645
2646 - memcpy(sqe_copy, sqe, sizeof(*sqe_copy));
2647 + memcpy(&req->submit, s, sizeof(*s));
2648 + memcpy(sqe_copy, s->sqe, sizeof(*sqe_copy));
2649 req->submit.sqe = sqe_copy;
2650
2651 INIT_WORK(&req->work, io_sq_wq_submit_work);
2652 @@ -2409,7 +2426,7 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2653 {
2654 int ret;
2655
2656 - ret = io_req_defer(ctx, req, s->sqe);
2657 + ret = io_req_defer(ctx, req, s);
2658 if (ret) {
2659 if (ret != -EIOCBQUEUED) {
2660 io_free_req(req);
2661 @@ -2436,7 +2453,7 @@ static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
2662 * list.
2663 */
2664 req->flags |= REQ_F_IO_DRAIN;
2665 - ret = io_req_defer(ctx, req, s->sqe);
2666 + ret = io_req_defer(ctx, req, s);
2667 if (ret) {
2668 if (ret != -EIOCBQUEUED) {
2669 io_free_req(req);
2670 diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
2671 index 1fc28c2da279..fd46ec83cb04 100644
2672 --- a/fs/iomap/direct-io.c
2673 +++ b/fs/iomap/direct-io.c
2674 @@ -497,8 +497,15 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
2675 }
2676 pos += ret;
2677
2678 - if (iov_iter_rw(iter) == READ && pos >= dio->i_size)
2679 + if (iov_iter_rw(iter) == READ && pos >= dio->i_size) {
2680 + /*
2681 + * We only report that we've read data up to i_size.
2682 + * Revert iter to a state corresponding to that as
2683 + * some callers (such as splice code) rely on it.
2684 + */
2685 + iov_iter_revert(iter, pos - dio->i_size);
2686 break;
2687 + }
2688 } while ((count = iov_iter_count(iter)) > 0);
2689 blk_finish_plug(&plug);
2690
2691 diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
2692 index 6ebae6bbe6a5..7d4af6cea2a6 100644
2693 --- a/fs/kernfs/dir.c
2694 +++ b/fs/kernfs/dir.c
2695 @@ -622,7 +622,6 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
2696 {
2697 struct kernfs_node *kn;
2698 u32 gen;
2699 - int cursor;
2700 int ret;
2701
2702 name = kstrdup_const(name, GFP_KERNEL);
2703 @@ -635,11 +634,11 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
2704
2705 idr_preload(GFP_KERNEL);
2706 spin_lock(&kernfs_idr_lock);
2707 - cursor = idr_get_cursor(&root->ino_idr);
2708 ret = idr_alloc_cyclic(&root->ino_idr, kn, 1, 0, GFP_ATOMIC);
2709 - if (ret >= 0 && ret < cursor)
2710 + if (ret >= 0 && ret < root->last_ino)
2711 root->next_generation++;
2712 gen = root->next_generation;
2713 + root->last_ino = ret;
2714 spin_unlock(&kernfs_idr_lock);
2715 idr_preload_end();
2716 if (ret < 0)
2717 diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
2718 index 4e3e77b76411..38c0aeda500e 100644
2719 --- a/fs/nfsd/nfs4proc.c
2720 +++ b/fs/nfsd/nfs4proc.c
2721 @@ -1077,7 +1077,8 @@ nfsd4_clone(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2722 goto out;
2723
2724 status = nfsd4_clone_file_range(src->nf_file, clone->cl_src_pos,
2725 - dst->nf_file, clone->cl_dst_pos, clone->cl_count);
2726 + dst->nf_file, clone->cl_dst_pos, clone->cl_count,
2727 + EX_ISSYNC(cstate->current_fh.fh_export));
2728
2729 nfsd_file_put(dst);
2730 nfsd_file_put(src);
2731 diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
2732 index fdf7ed4bd5dd..e8bee8ff30c5 100644
2733 --- a/fs/nfsd/nfssvc.c
2734 +++ b/fs/nfsd/nfssvc.c
2735 @@ -95,12 +95,11 @@ static const struct svc_version *nfsd_acl_version[] = {
2736
2737 #define NFSD_ACL_MINVERS 2
2738 #define NFSD_ACL_NRVERS ARRAY_SIZE(nfsd_acl_version)
2739 -static const struct svc_version *nfsd_acl_versions[NFSD_ACL_NRVERS];
2740
2741 static struct svc_program nfsd_acl_program = {
2742 .pg_prog = NFS_ACL_PROGRAM,
2743 .pg_nvers = NFSD_ACL_NRVERS,
2744 - .pg_vers = nfsd_acl_versions,
2745 + .pg_vers = nfsd_acl_version,
2746 .pg_name = "nfsacl",
2747 .pg_class = "nfsd",
2748 .pg_stats = &nfsd_acl_svcstats,
2749 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
2750 index bd0a385df3fc..cf423fea0c6f 100644
2751 --- a/fs/nfsd/vfs.c
2752 +++ b/fs/nfsd/vfs.c
2753 @@ -525,7 +525,7 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *rqstp, struct svc_fh *fhp,
2754 #endif
2755
2756 __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
2757 - u64 dst_pos, u64 count)
2758 + u64 dst_pos, u64 count, bool sync)
2759 {
2760 loff_t cloned;
2761
2762 @@ -534,6 +534,12 @@ __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst,
2763 return nfserrno(cloned);
2764 if (count && cloned != count)
2765 return nfserrno(-EINVAL);
2766 + if (sync) {
2767 + loff_t dst_end = count ? dst_pos + count - 1 : LLONG_MAX;
2768 + int status = vfs_fsync_range(dst, dst_pos, dst_end, 0);
2769 + if (status < 0)
2770 + return nfserrno(status);
2771 + }
2772 return 0;
2773 }
2774
2775 diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h
2776 index a13fd9d7e1f5..cc110a10bfe8 100644
2777 --- a/fs/nfsd/vfs.h
2778 +++ b/fs/nfsd/vfs.h
2779 @@ -56,7 +56,7 @@ __be32 nfsd4_set_nfs4_label(struct svc_rqst *, struct svc_fh *,
2780 __be32 nfsd4_vfs_fallocate(struct svc_rqst *, struct svc_fh *,
2781 struct file *, loff_t, loff_t, int);
2782 __be32 nfsd4_clone_file_range(struct file *, u64, struct file *,
2783 - u64, u64);
2784 + u64, u64, bool);
2785 #endif /* CONFIG_NFSD_V4 */
2786 __be32 nfsd_create_locked(struct svc_rqst *, struct svc_fh *,
2787 char *name, int len, struct iattr *attrs,
2788 diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
2789 index 603fbc4e2f70..10e6049c0ba9 100644
2790 --- a/include/linux/jbd2.h
2791 +++ b/include/linux/jbd2.h
2792 @@ -1582,7 +1582,7 @@ static inline int jbd2_space_needed(journal_t *journal)
2793 static inline unsigned long jbd2_log_space_left(journal_t *journal)
2794 {
2795 /* Allow for rounding errors */
2796 - unsigned long free = journal->j_free - 32;
2797 + long free = journal->j_free - 32;
2798
2799 if (journal->j_committing_transaction) {
2800 unsigned long committing = atomic_read(&journal->
2801 @@ -1591,7 +1591,7 @@ static inline unsigned long jbd2_log_space_left(journal_t *journal)
2802 /* Transaction + control blocks */
2803 free -= committing + (committing >> JBD2_CONTROL_BLOCKS_SHIFT);
2804 }
2805 - return free;
2806 + return max_t(long, free, 0);
2807 }
2808
2809 /*
2810 diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
2811 index 936b61bd504e..f797ccc650e7 100644
2812 --- a/include/linux/kernfs.h
2813 +++ b/include/linux/kernfs.h
2814 @@ -187,6 +187,7 @@ struct kernfs_root {
2815
2816 /* private fields, do not use outside kernfs proper */
2817 struct idr ino_idr;
2818 + u32 last_ino;
2819 u32 next_generation;
2820 struct kernfs_syscall_ops *syscall_ops;
2821
2822 diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
2823 index 3247a3dc7934..b06b75776a32 100644
2824 --- a/include/linux/miscdevice.h
2825 +++ b/include/linux/miscdevice.h
2826 @@ -57,6 +57,7 @@
2827 #define UHID_MINOR 239
2828 #define USERIO_MINOR 240
2829 #define VHOST_VSOCK_MINOR 241
2830 +#define RFKILL_MINOR 242
2831 #define MISC_DYNAMIC_MINOR 255
2832
2833 struct device;
2834 diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
2835 index b260c5fd2337..e05b95e83d5a 100644
2836 --- a/include/sound/hdaudio.h
2837 +++ b/include/sound/hdaudio.h
2838 @@ -493,6 +493,7 @@ struct hdac_stream {
2839 bool prepared:1;
2840 bool no_period_wakeup:1;
2841 bool locked:1;
2842 + bool stripe:1; /* apply stripe control */
2843
2844 /* timestamp */
2845 unsigned long start_wallclk; /* start + minimum wallclk */
2846 diff --git a/kernel/time/time.c b/kernel/time/time.c
2847 index 5c54ca632d08..83f403e7a15c 100644
2848 --- a/kernel/time/time.c
2849 +++ b/kernel/time/time.c
2850 @@ -881,7 +881,8 @@ int get_timespec64(struct timespec64 *ts,
2851 ts->tv_sec = kts.tv_sec;
2852
2853 /* Zero out the padding for 32 bit systems or in compat mode */
2854 - if (IS_ENABLED(CONFIG_64BIT_TIME) && in_compat_syscall())
2855 + if (IS_ENABLED(CONFIG_64BIT_TIME) && (!IS_ENABLED(CONFIG_64BIT) ||
2856 + in_compat_syscall()))
2857 kts.tv_nsec &= 0xFFFFFFFFUL;
2858
2859 ts->tv_nsec = kts.tv_nsec;
2860 diff --git a/net/rfkill/core.c b/net/rfkill/core.c
2861 index f9b08a6d8dbe..0bf9bf1ceb8f 100644
2862 --- a/net/rfkill/core.c
2863 +++ b/net/rfkill/core.c
2864 @@ -1316,10 +1316,12 @@ static const struct file_operations rfkill_fops = {
2865 .llseek = no_llseek,
2866 };
2867
2868 +#define RFKILL_NAME "rfkill"
2869 +
2870 static struct miscdevice rfkill_miscdev = {
2871 - .name = "rfkill",
2872 .fops = &rfkill_fops,
2873 - .minor = MISC_DYNAMIC_MINOR,
2874 + .name = RFKILL_NAME,
2875 + .minor = RFKILL_MINOR,
2876 };
2877
2878 static int __init rfkill_init(void)
2879 @@ -1371,3 +1373,6 @@ static void __exit rfkill_exit(void)
2880 class_unregister(&rfkill_class);
2881 }
2882 module_exit(rfkill_exit);
2883 +
2884 +MODULE_ALIAS_MISCDEV(RFKILL_MINOR);
2885 +MODULE_ALIAS("devname:" RFKILL_NAME);
2886 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
2887 index 360afe153193..987c4b1f0b17 100644
2888 --- a/net/sunrpc/sched.c
2889 +++ b/net/sunrpc/sched.c
2890 @@ -260,7 +260,7 @@ static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const c
2891 rpc_reset_waitqueue_priority(queue);
2892 queue->qlen = 0;
2893 queue->timer_list.expires = 0;
2894 - INIT_DEFERRABLE_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn);
2895 + INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn);
2896 INIT_LIST_HEAD(&queue->timer_list.list);
2897 rpc_assign_waitqueue_name(queue, qname);
2898 }
2899 diff --git a/sound/core/oss/linear.c b/sound/core/oss/linear.c
2900 index 2045697f449d..797d838a2f9e 100644
2901 --- a/sound/core/oss/linear.c
2902 +++ b/sound/core/oss/linear.c
2903 @@ -107,6 +107,8 @@ static snd_pcm_sframes_t linear_transfer(struct snd_pcm_plugin *plugin,
2904 }
2905 }
2906 #endif
2907 + if (frames > dst_channels[0].frames)
2908 + frames = dst_channels[0].frames;
2909 convert(plugin, src_channels, dst_channels, frames);
2910 return frames;
2911 }
2912 diff --git a/sound/core/oss/mulaw.c b/sound/core/oss/mulaw.c
2913 index 7915564bd394..3788906421a7 100644
2914 --- a/sound/core/oss/mulaw.c
2915 +++ b/sound/core/oss/mulaw.c
2916 @@ -269,6 +269,8 @@ static snd_pcm_sframes_t mulaw_transfer(struct snd_pcm_plugin *plugin,
2917 }
2918 }
2919 #endif
2920 + if (frames > dst_channels[0].frames)
2921 + frames = dst_channels[0].frames;
2922 data = (struct mulaw_priv *)plugin->extra_data;
2923 data->func(plugin, src_channels, dst_channels, frames);
2924 return frames;
2925 diff --git a/sound/core/oss/route.c b/sound/core/oss/route.c
2926 index c8171f5783c8..72dea04197ef 100644
2927 --- a/sound/core/oss/route.c
2928 +++ b/sound/core/oss/route.c
2929 @@ -57,6 +57,8 @@ static snd_pcm_sframes_t route_transfer(struct snd_pcm_plugin *plugin,
2930 return -ENXIO;
2931 if (frames == 0)
2932 return 0;
2933 + if (frames > dst_channels[0].frames)
2934 + frames = dst_channels[0].frames;
2935
2936 nsrcs = plugin->src_format.channels;
2937 ndsts = plugin->dst_format.channels;
2938 diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c
2939 index d8fe7ff0cd58..f9707fb05efe 100644
2940 --- a/sound/hda/hdac_stream.c
2941 +++ b/sound/hda/hdac_stream.c
2942 @@ -96,12 +96,14 @@ void snd_hdac_stream_start(struct hdac_stream *azx_dev, bool fresh_start)
2943 1 << azx_dev->index,
2944 1 << azx_dev->index);
2945 /* set stripe control */
2946 - if (azx_dev->substream)
2947 - stripe_ctl = snd_hdac_get_stream_stripe_ctl(bus, azx_dev->substream);
2948 - else
2949 - stripe_ctl = 0;
2950 - snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK,
2951 - stripe_ctl);
2952 + if (azx_dev->stripe) {
2953 + if (azx_dev->substream)
2954 + stripe_ctl = snd_hdac_get_stream_stripe_ctl(bus, azx_dev->substream);
2955 + else
2956 + stripe_ctl = 0;
2957 + snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK,
2958 + stripe_ctl);
2959 + }
2960 /* set DMA start and interrupt mask */
2961 snd_hdac_stream_updateb(azx_dev, SD_CTL,
2962 0, SD_CTL_DMA_START | SD_INT_MASK);
2963 @@ -118,7 +120,10 @@ void snd_hdac_stream_clear(struct hdac_stream *azx_dev)
2964 snd_hdac_stream_updateb(azx_dev, SD_CTL,
2965 SD_CTL_DMA_START | SD_INT_MASK, 0);
2966 snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */
2967 - snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0);
2968 + if (azx_dev->stripe) {
2969 + snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0);
2970 + azx_dev->stripe = 0;
2971 + }
2972 azx_dev->running = false;
2973 }
2974 EXPORT_SYMBOL_GPL(snd_hdac_stream_clear);
2975 diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c
2976 index 8272b50b8349..6a8564566375 100644
2977 --- a/sound/pci/hda/hda_bind.c
2978 +++ b/sound/pci/hda/hda_bind.c
2979 @@ -43,6 +43,10 @@ static void hda_codec_unsol_event(struct hdac_device *dev, unsigned int ev)
2980 {
2981 struct hda_codec *codec = container_of(dev, struct hda_codec, core);
2982
2983 + /* ignore unsol events during shutdown */
2984 + if (codec->bus->shutdown)
2985 + return;
2986 +
2987 if (codec->patch_ops.unsol_event)
2988 codec->patch_ops.unsol_event(codec, ev);
2989 }
2990 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
2991 index c52419376c74..86a416cdeb29 100644
2992 --- a/sound/pci/hda/hda_intel.c
2993 +++ b/sound/pci/hda/hda_intel.c
2994 @@ -1382,8 +1382,11 @@ static int azx_free(struct azx *chip)
2995 static int azx_dev_disconnect(struct snd_device *device)
2996 {
2997 struct azx *chip = device->device_data;
2998 + struct hdac_bus *bus = azx_bus(chip);
2999
3000 chip->bus.shutdown = 1;
3001 + cancel_work_sync(&bus->unsol_work);
3002 +
3003 return 0;
3004 }
3005
3006 diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
3007 index 968d3caab6ac..90aa0f400a57 100644
3008 --- a/sound/pci/hda/patch_conexant.c
3009 +++ b/sound/pci/hda/patch_conexant.c
3010 @@ -910,6 +910,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
3011 SND_PCI_QUIRK(0x103c, 0x837f, "HP ProBook 470 G5", CXT_FIXUP_MUTE_LED_GPIO),
3012 SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
3013 SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
3014 + SND_PCI_QUIRK(0x103c, 0x8402, "HP ProBook 645 G4", CXT_FIXUP_MUTE_LED_GPIO),
3015 SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
3016 SND_PCI_QUIRK(0x103c, 0x8456, "HP Z2 G4 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
3017 SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
3018 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
3019 index d14f6684737d..4dafc864d765 100644
3020 --- a/sound/pci/hda/patch_hdmi.c
3021 +++ b/sound/pci/hda/patch_hdmi.c
3022 @@ -32,6 +32,7 @@
3023 #include <sound/hda_codec.h>
3024 #include "hda_local.h"
3025 #include "hda_jack.h"
3026 +#include "hda_controller.h"
3027
3028 static bool static_hdmi_pcm;
3029 module_param(static_hdmi_pcm, bool, 0644);
3030 @@ -1240,6 +1241,10 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
3031 per_pin->cvt_nid = per_cvt->cvt_nid;
3032 hinfo->nid = per_cvt->cvt_nid;
3033
3034 + /* flip stripe flag for the assigned stream if supported */
3035 + if (get_wcaps(codec, per_cvt->cvt_nid) & AC_WCAP_STRIPE)
3036 + azx_stream(get_azx_dev(substream))->stripe = 1;
3037 +
3038 snd_hda_set_dev_select(codec, per_pin->pin_nid, per_pin->dev_id);
3039 snd_hda_codec_write_cache(codec, per_pin->pin_nid, 0,
3040 AC_VERB_SET_CONNECT_SEL,
3041 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3042 index 80f66ba85f87..ed3e314b5233 100644
3043 --- a/sound/pci/hda/patch_realtek.c
3044 +++ b/sound/pci/hda/patch_realtek.c
3045 @@ -367,9 +367,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
3046 case 0x10ec0215:
3047 case 0x10ec0233:
3048 case 0x10ec0235:
3049 - case 0x10ec0236:
3050 case 0x10ec0255:
3051 - case 0x10ec0256:
3052 case 0x10ec0257:
3053 case 0x10ec0282:
3054 case 0x10ec0283:
3055 @@ -381,6 +379,11 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
3056 case 0x10ec0300:
3057 alc_update_coef_idx(codec, 0x10, 1<<9, 0);
3058 break;
3059 + case 0x10ec0236:
3060 + case 0x10ec0256:
3061 + alc_write_coef_idx(codec, 0x36, 0x5757);
3062 + alc_update_coef_idx(codec, 0x10, 1<<9, 0);
3063 + break;
3064 case 0x10ec0275:
3065 alc_update_coef_idx(codec, 0xe, 0, 1<<0);
3066 break;
3067 @@ -5892,6 +5895,7 @@ enum {
3068 ALC299_FIXUP_PREDATOR_SPK,
3069 ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
3070 ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE,
3071 + ALC294_FIXUP_ASUS_INTSPK_GPIO,
3072 };
3073
3074 static const struct hda_fixup alc269_fixups[] = {
3075 @@ -6982,6 +6986,13 @@ static const struct hda_fixup alc269_fixups[] = {
3076 .chained = true,
3077 .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
3078 },
3079 + [ALC294_FIXUP_ASUS_INTSPK_GPIO] = {
3080 + .type = HDA_FIXUP_FUNC,
3081 + /* The GPIO must be pulled to initialize the AMP */
3082 + .v.func = alc_fixup_gpio4,
3083 + .chained = true,
3084 + .chain_id = ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC
3085 + },
3086 };
3087
3088 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3089 @@ -7141,7 +7152,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3090 SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
3091 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
3092 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
3093 - SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
3094 + SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_GPIO),
3095 SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
3096 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
3097 SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
3098 @@ -7248,6 +7259,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3099 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
3100 SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MACH-WX9", ALC256_FIXUP_HUAWEI_MACH_WX9_PINS),
3101 SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
3102 + SND_PCI_QUIRK(0x1d72, 0x1901, "RedmiBook 14", ALC256_FIXUP_ASUS_HEADSET_MIC),
3103 SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
3104
3105 #if 0
3106 @@ -8455,6 +8467,8 @@ static void alc662_fixup_aspire_ethos_hp(struct hda_codec *codec,
3107 case HDA_FIXUP_ACT_PRE_PROBE:
3108 snd_hda_jack_detect_enable_callback(codec, 0x1b,
3109 alc662_aspire_ethos_mute_speakers);
3110 + /* subwoofer needs an extra GPIO setting to become audible */
3111 + alc_setup_gpio(codec, 0x02);
3112 break;
3113 case HDA_FIXUP_ACT_INIT:
3114 /* Make sure to start in a correct state, i.e. if
3115 @@ -8537,7 +8551,6 @@ enum {
3116 ALC662_FIXUP_USI_HEADSET_MODE,
3117 ALC662_FIXUP_LENOVO_MULTI_CODECS,
3118 ALC669_FIXUP_ACER_ASPIRE_ETHOS,
3119 - ALC669_FIXUP_ACER_ASPIRE_ETHOS_SUBWOOFER,
3120 ALC669_FIXUP_ACER_ASPIRE_ETHOS_HEADSET,
3121 };
3122
3123 @@ -8869,18 +8882,6 @@ static const struct hda_fixup alc662_fixups[] = {
3124 .type = HDA_FIXUP_FUNC,
3125 .v.func = alc662_fixup_aspire_ethos_hp,
3126 },
3127 - [ALC669_FIXUP_ACER_ASPIRE_ETHOS_SUBWOOFER] = {
3128 - .type = HDA_FIXUP_VERBS,
3129 - /* subwoofer needs an extra GPIO setting to become audible */
3130 - .v.verbs = (const struct hda_verb[]) {
3131 - {0x01, AC_VERB_SET_GPIO_MASK, 0x02},
3132 - {0x01, AC_VERB_SET_GPIO_DIRECTION, 0x02},
3133 - {0x01, AC_VERB_SET_GPIO_DATA, 0x00},
3134 - { }
3135 - },
3136 - .chained = true,
3137 - .chain_id = ALC669_FIXUP_ACER_ASPIRE_ETHOS_HEADSET
3138 - },
3139 [ALC669_FIXUP_ACER_ASPIRE_ETHOS] = {
3140 .type = HDA_FIXUP_PINS,
3141 .v.pins = (const struct hda_pintbl[]) {
3142 @@ -8890,7 +8891,7 @@ static const struct hda_fixup alc662_fixups[] = {
3143 { }
3144 },
3145 .chained = true,
3146 - .chain_id = ALC669_FIXUP_ACER_ASPIRE_ETHOS_SUBWOOFER
3147 + .chain_id = ALC669_FIXUP_ACER_ASPIRE_ETHOS_HEADSET
3148 },
3149 };
3150
3151 diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
3152 index 67be8d31afab..6dba8b728d23 100644
3153 --- a/tools/perf/builtin-script.c
3154 +++ b/tools/perf/builtin-script.c
3155 @@ -1084,7 +1084,7 @@ static int perf_sample__fprintf_brstackinsn(struct perf_sample *sample,
3156 insn++;
3157 }
3158 }
3159 - if (off != (unsigned)len)
3160 + if (off != end - start)
3161 printed += fprintf(fp, "\tmismatch of LBR data and executable\n");
3162 }
3163
3164 diff --git a/tools/perf/scripts/python/exported-sql-viewer.py b/tools/perf/scripts/python/exported-sql-viewer.py
3165 index 61b3911d91e6..4b28c9d08d5a 100755
3166 --- a/tools/perf/scripts/python/exported-sql-viewer.py
3167 +++ b/tools/perf/scripts/python/exported-sql-viewer.py
3168 @@ -625,7 +625,7 @@ class CallGraphRootItem(CallGraphLevelItemBase):
3169 self.query_done = True
3170 if_has_calls = ""
3171 if IsSelectable(glb.db, "comms", columns = "has_calls"):
3172 - if_has_calls = " WHERE has_calls = TRUE"
3173 + if_has_calls = " WHERE has_calls = " + glb.dbref.TRUE
3174 query = QSqlQuery(glb.db)
3175 QueryExec(query, "SELECT id, comm FROM comms" + if_has_calls)
3176 while query.next():
3177 @@ -905,7 +905,7 @@ class CallTreeRootItem(CallGraphLevelItemBase):
3178 self.query_done = True
3179 if_has_calls = ""
3180 if IsSelectable(glb.db, "comms", columns = "has_calls"):
3181 - if_has_calls = " WHERE has_calls = TRUE"
3182 + if_has_calls = " WHERE has_calls = " + glb.dbref.TRUE
3183 query = QSqlQuery(glb.db)
3184 QueryExec(query, "SELECT id, comm FROM comms" + if_has_calls)
3185 while query.next():
3186 @@ -3509,6 +3509,12 @@ class DBRef():
3187 def __init__(self, is_sqlite3, dbname):
3188 self.is_sqlite3 = is_sqlite3
3189 self.dbname = dbname
3190 + self.TRUE = "TRUE"
3191 + self.FALSE = "FALSE"
3192 + # SQLite prior to version 3.23 does not support TRUE and FALSE
3193 + if self.is_sqlite3:
3194 + self.TRUE = "1"
3195 + self.FALSE = "0"
3196
3197 def Open(self, connection_name):
3198 dbname = self.dbname
3199 diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
3200 index 4cdbae6f4e61..612f6757015d 100644
3201 --- a/tools/testing/selftests/Makefile
3202 +++ b/tools/testing/selftests/Makefile
3203 @@ -213,7 +213,7 @@ ifdef INSTALL_PATH
3204 @# included in the generated runlist.
3205 for TARGET in $(TARGETS); do \
3206 BUILD_TARGET=$$BUILD/$$TARGET; \
3207 - [ ! -d $$INSTALL_PATH/$$TARGET ] && echo "Skipping non-existent dir: $$TARGET" && continue; \
3208 + [ ! -d $(INSTALL_PATH)/$$TARGET ] && echo "Skipping non-existent dir: $$TARGET" && continue; \
3209 echo "[ -w /dev/kmsg ] && echo \"kselftest: Running tests in $$TARGET\" >> /dev/kmsg" >> $(ALL_SCRIPT); \
3210 echo "cd $$TARGET" >> $(ALL_SCRIPT); \
3211 echo -n "run_many" >> $(ALL_SCRIPT); \
3212 diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
3213 index 8d69f007dd0c..b3a97dcaa30d 100644
3214 --- a/virt/kvm/arm/vgic/vgic-v3.c
3215 +++ b/virt/kvm/arm/vgic/vgic-v3.c
3216 @@ -363,8 +363,8 @@ retry:
3217 int vgic_v3_save_pending_tables(struct kvm *kvm)
3218 {
3219 struct vgic_dist *dist = &kvm->arch.vgic;
3220 - int last_byte_offset = -1;
3221 struct vgic_irq *irq;
3222 + gpa_t last_ptr = ~(gpa_t)0;
3223 int ret;
3224 u8 val;
3225
3226 @@ -384,11 +384,11 @@ int vgic_v3_save_pending_tables(struct kvm *kvm)
3227 bit_nr = irq->intid % BITS_PER_BYTE;
3228 ptr = pendbase + byte_offset;
3229
3230 - if (byte_offset != last_byte_offset) {
3231 + if (ptr != last_ptr) {
3232 ret = kvm_read_guest_lock(kvm, ptr, &val, 1);
3233 if (ret)
3234 return ret;
3235 - last_byte_offset = byte_offset;
3236 + last_ptr = ptr;
3237 }
3238
3239 stored = val & (1U << bit_nr);