Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0144-5.4.45-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3525 - (show annotations) (download)
Thu Jun 25 11:14:57 2020 UTC (3 years, 10 months ago) by niro
File size: 58736 byte(s)
-linux-5.4.45
1 diff --git a/Makefile b/Makefile
2 index ef4697fcb8ea..d57c443d9073 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 44
10 +SUBLEVEL = 45
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
15 index 7ee89dc61f6e..23dc002aa574 100644
16 --- a/arch/arc/kernel/setup.c
17 +++ b/arch/arc/kernel/setup.c
18 @@ -12,6 +12,7 @@
19 #include <linux/clocksource.h>
20 #include <linux/console.h>
21 #include <linux/module.h>
22 +#include <linux/sizes.h>
23 #include <linux/cpu.h>
24 #include <linux/of_fdt.h>
25 #include <linux/of.h>
26 @@ -409,12 +410,12 @@ static void arc_chk_core_config(void)
27 if ((unsigned int)__arc_dccm_base != cpu->dccm.base_addr)
28 panic("Linux built with incorrect DCCM Base address\n");
29
30 - if (CONFIG_ARC_DCCM_SZ != cpu->dccm.sz)
31 + if (CONFIG_ARC_DCCM_SZ * SZ_1K != cpu->dccm.sz)
32 panic("Linux built with incorrect DCCM Size\n");
33 #endif
34
35 #ifdef CONFIG_ARC_HAS_ICCM
36 - if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz)
37 + if (CONFIG_ARC_ICCM_SZ * SZ_1K != cpu->iccm.sz)
38 panic("Linux built with incorrect ICCM Size\n");
39 #endif
40
41 diff --git a/arch/arc/plat-eznps/Kconfig b/arch/arc/plat-eznps/Kconfig
42 index a931d0a256d0..a645bca5899a 100644
43 --- a/arch/arc/plat-eznps/Kconfig
44 +++ b/arch/arc/plat-eznps/Kconfig
45 @@ -6,6 +6,7 @@
46
47 menuconfig ARC_PLAT_EZNPS
48 bool "\"EZchip\" ARC dev platform"
49 + depends on ISA_ARCOMPACT
50 select CPU_BIG_ENDIAN
51 select CLKSRC_NPS if !PHYS_ADDR_T_64BIT
52 select EZNPS_GIC
53 diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
54 index 7ccc5c85c74e..000b350d4060 100644
55 --- a/arch/powerpc/platforms/powernv/opal-imc.c
56 +++ b/arch/powerpc/platforms/powernv/opal-imc.c
57 @@ -59,10 +59,6 @@ static void export_imc_mode_and_cmd(struct device_node *node,
58
59 imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root);
60
61 - /*
62 - * Return here, either because 'imc' directory already exists,
63 - * Or failed to create a new one.
64 - */
65 if (!imc_debugfs_parent)
66 return;
67
68 @@ -135,7 +131,6 @@ static int imc_get_mem_addr_nest(struct device_node *node,
69 }
70
71 pmu_ptr->imc_counter_mmaped = true;
72 - export_imc_mode_and_cmd(node, pmu_ptr);
73 kfree(base_addr_arr);
74 kfree(chipid_arr);
75 return 0;
76 @@ -151,7 +146,7 @@ error:
77 * and domain as the inputs.
78 * Allocates memory for the struct imc_pmu, sets up its domain, size and offsets
79 */
80 -static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
81 +static struct imc_pmu *imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
82 {
83 int ret = 0;
84 struct imc_pmu *pmu_ptr;
85 @@ -159,27 +154,23 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
86
87 /* Return for unknown domain */
88 if (domain < 0)
89 - return -EINVAL;
90 + return NULL;
91
92 /* memory for pmu */
93 pmu_ptr = kzalloc(sizeof(*pmu_ptr), GFP_KERNEL);
94 if (!pmu_ptr)
95 - return -ENOMEM;
96 + return NULL;
97
98 /* Set the domain */
99 pmu_ptr->domain = domain;
100
101 ret = of_property_read_u32(parent, "size", &pmu_ptr->counter_mem_size);
102 - if (ret) {
103 - ret = -EINVAL;
104 + if (ret)
105 goto free_pmu;
106 - }
107
108 if (!of_property_read_u32(parent, "offset", &offset)) {
109 - if (imc_get_mem_addr_nest(parent, pmu_ptr, offset)) {
110 - ret = -EINVAL;
111 + if (imc_get_mem_addr_nest(parent, pmu_ptr, offset))
112 goto free_pmu;
113 - }
114 }
115
116 /* Function to register IMC pmu */
117 @@ -190,14 +181,14 @@ static int imc_pmu_create(struct device_node *parent, int pmu_index, int domain)
118 if (pmu_ptr->domain == IMC_DOMAIN_NEST)
119 kfree(pmu_ptr->mem_info);
120 kfree(pmu_ptr);
121 - return ret;
122 + return NULL;
123 }
124
125 - return 0;
126 + return pmu_ptr;
127
128 free_pmu:
129 kfree(pmu_ptr);
130 - return ret;
131 + return NULL;
132 }
133
134 static void disable_nest_pmu_counters(void)
135 @@ -254,6 +245,7 @@ int get_max_nest_dev(void)
136 static int opal_imc_counters_probe(struct platform_device *pdev)
137 {
138 struct device_node *imc_dev = pdev->dev.of_node;
139 + struct imc_pmu *pmu;
140 int pmu_count = 0, domain;
141 bool core_imc_reg = false, thread_imc_reg = false;
142 u32 type;
143 @@ -269,6 +261,7 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
144 }
145
146 for_each_compatible_node(imc_dev, NULL, IMC_DTB_UNIT_COMPAT) {
147 + pmu = NULL;
148 if (of_property_read_u32(imc_dev, "type", &type)) {
149 pr_warn("IMC Device without type property\n");
150 continue;
151 @@ -300,9 +293,13 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
152 break;
153 }
154
155 - if (!imc_pmu_create(imc_dev, pmu_count, domain)) {
156 - if (domain == IMC_DOMAIN_NEST)
157 + pmu = imc_pmu_create(imc_dev, pmu_count, domain);
158 + if (pmu != NULL) {
159 + if (domain == IMC_DOMAIN_NEST) {
160 + if (!imc_debugfs_parent)
161 + export_imc_mode_and_cmd(imc_dev, pmu);
162 pmu_count++;
163 + }
164 if (domain == IMC_DOMAIN_CORE)
165 core_imc_reg = true;
166 if (domain == IMC_DOMAIN_THREAD)
167 @@ -310,10 +307,6 @@ static int opal_imc_counters_probe(struct platform_device *pdev)
168 }
169 }
170
171 - /* If none of the nest units are registered, remove debugfs interface */
172 - if (pmu_count == 0)
173 - debugfs_remove_recursive(imc_debugfs_parent);
174 -
175 /* If core imc is not registered, unregister thread-imc */
176 if (!core_imc_reg && thread_imc_reg)
177 unregister_thread_imc();
178 diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
179 index 8057aafd5f5e..6d130c89fbd8 100644
180 --- a/arch/powerpc/xmon/xmon.c
181 +++ b/arch/powerpc/xmon/xmon.c
182 @@ -25,6 +25,7 @@
183 #include <linux/nmi.h>
184 #include <linux/ctype.h>
185 #include <linux/highmem.h>
186 +#include <linux/security.h>
187
188 #include <asm/debugfs.h>
189 #include <asm/ptrace.h>
190 @@ -187,6 +188,8 @@ static void dump_tlb_44x(void);
191 static void dump_tlb_book3e(void);
192 #endif
193
194 +static void clear_all_bpt(void);
195 +
196 #ifdef CONFIG_PPC64
197 #define REG "%.16lx"
198 #else
199 @@ -283,10 +286,38 @@ Commands:\n\
200 " U show uptime information\n"
201 " ? help\n"
202 " # n limit output to n lines per page (for dp, dpa, dl)\n"
203 -" zr reboot\n\
204 - zh halt\n"
205 +" zr reboot\n"
206 +" zh halt\n"
207 ;
208
209 +#ifdef CONFIG_SECURITY
210 +static bool xmon_is_locked_down(void)
211 +{
212 + static bool lockdown;
213 +
214 + if (!lockdown) {
215 + lockdown = !!security_locked_down(LOCKDOWN_XMON_RW);
216 + if (lockdown) {
217 + printf("xmon: Disabled due to kernel lockdown\n");
218 + xmon_is_ro = true;
219 + }
220 + }
221 +
222 + if (!xmon_is_ro) {
223 + xmon_is_ro = !!security_locked_down(LOCKDOWN_XMON_WR);
224 + if (xmon_is_ro)
225 + printf("xmon: Read-only due to kernel lockdown\n");
226 + }
227 +
228 + return lockdown;
229 +}
230 +#else /* CONFIG_SECURITY */
231 +static inline bool xmon_is_locked_down(void)
232 +{
233 + return false;
234 +}
235 +#endif
236 +
237 static struct pt_regs *xmon_regs;
238
239 static inline void sync(void)
240 @@ -438,7 +469,10 @@ static bool wait_for_other_cpus(int ncpus)
241
242 return false;
243 }
244 -#endif /* CONFIG_SMP */
245 +#else /* CONFIG_SMP */
246 +static inline void get_output_lock(void) {}
247 +static inline void release_output_lock(void) {}
248 +#endif
249
250 static inline int unrecoverable_excp(struct pt_regs *regs)
251 {
252 @@ -455,6 +489,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
253 int cmd = 0;
254 struct bpt *bp;
255 long recurse_jmp[JMP_BUF_LEN];
256 + bool locked_down;
257 unsigned long offset;
258 unsigned long flags;
259 #ifdef CONFIG_SMP
260 @@ -465,6 +500,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
261 local_irq_save(flags);
262 hard_irq_disable();
263
264 + locked_down = xmon_is_locked_down();
265 +
266 if (!fromipi) {
267 tracing_enabled = tracing_is_on();
268 tracing_off();
269 @@ -518,7 +555,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
270
271 if (!fromipi) {
272 get_output_lock();
273 - excprint(regs);
274 + if (!locked_down)
275 + excprint(regs);
276 if (bp) {
277 printf("cpu 0x%x stopped at breakpoint 0x%tx (",
278 cpu, BP_NUM(bp));
279 @@ -570,10 +608,14 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
280 }
281 remove_bpts();
282 disable_surveillance();
283 - /* for breakpoint or single step, print the current instr. */
284 - if (bp || TRAP(regs) == 0xd00)
285 - ppc_inst_dump(regs->nip, 1, 0);
286 - printf("enter ? for help\n");
287 +
288 + if (!locked_down) {
289 + /* for breakpoint or single step, print curr insn */
290 + if (bp || TRAP(regs) == 0xd00)
291 + ppc_inst_dump(regs->nip, 1, 0);
292 + printf("enter ? for help\n");
293 + }
294 +
295 mb();
296 xmon_gate = 1;
297 barrier();
298 @@ -597,8 +639,9 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
299 spin_cpu_relax();
300 touch_nmi_watchdog();
301 } else {
302 - cmd = cmds(regs);
303 - if (cmd != 0) {
304 + if (!locked_down)
305 + cmd = cmds(regs);
306 + if (locked_down || cmd != 0) {
307 /* exiting xmon */
308 insert_bpts();
309 xmon_gate = 0;
310 @@ -635,13 +678,16 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
311 "can't continue\n");
312 remove_bpts();
313 disable_surveillance();
314 - /* for breakpoint or single step, print the current instr. */
315 - if (bp || TRAP(regs) == 0xd00)
316 - ppc_inst_dump(regs->nip, 1, 0);
317 - printf("enter ? for help\n");
318 + if (!locked_down) {
319 + /* for breakpoint or single step, print current insn */
320 + if (bp || TRAP(regs) == 0xd00)
321 + ppc_inst_dump(regs->nip, 1, 0);
322 + printf("enter ? for help\n");
323 + }
324 }
325
326 - cmd = cmds(regs);
327 + if (!locked_down)
328 + cmd = cmds(regs);
329
330 insert_bpts();
331 in_xmon = 0;
332 @@ -670,7 +716,10 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
333 }
334 }
335 #endif
336 - insert_cpu_bpts();
337 + if (locked_down)
338 + clear_all_bpt();
339 + else
340 + insert_cpu_bpts();
341
342 touch_nmi_watchdog();
343 local_irq_restore(flags);
344 @@ -3761,6 +3810,11 @@ static void xmon_init(int enable)
345 #ifdef CONFIG_MAGIC_SYSRQ
346 static void sysrq_handle_xmon(int key)
347 {
348 + if (xmon_is_locked_down()) {
349 + clear_all_bpt();
350 + xmon_init(0);
351 + return;
352 + }
353 /* ensure xmon is enabled */
354 xmon_init(1);
355 debugger(get_irq_regs());
356 @@ -3782,7 +3836,6 @@ static int __init setup_xmon_sysrq(void)
357 device_initcall(setup_xmon_sysrq);
358 #endif /* CONFIG_MAGIC_SYSRQ */
359
360 -#ifdef CONFIG_DEBUG_FS
361 static void clear_all_bpt(void)
362 {
363 int i;
364 @@ -3800,18 +3853,22 @@ static void clear_all_bpt(void)
365 iabr = NULL;
366 dabr.enabled = 0;
367 }
368 -
369 - printf("xmon: All breakpoints cleared\n");
370 }
371
372 +#ifdef CONFIG_DEBUG_FS
373 static int xmon_dbgfs_set(void *data, u64 val)
374 {
375 xmon_on = !!val;
376 xmon_init(xmon_on);
377
378 /* make sure all breakpoints removed when disabling */
379 - if (!xmon_on)
380 + if (!xmon_on) {
381 clear_all_bpt();
382 + get_output_lock();
383 + printf("xmon: All breakpoints cleared\n");
384 + release_output_lock();
385 + }
386 +
387 return 0;
388 }
389
390 @@ -3837,7 +3894,11 @@ static int xmon_early __initdata;
391
392 static int __init early_parse_xmon(char *p)
393 {
394 - if (!p || strncmp(p, "early", 5) == 0) {
395 + if (xmon_is_locked_down()) {
396 + xmon_init(0);
397 + xmon_early = 0;
398 + xmon_on = 0;
399 + } else if (!p || strncmp(p, "early", 5) == 0) {
400 /* just "xmon" is equivalent to "xmon=early" */
401 xmon_init(1);
402 xmon_early = 1;
403 diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
404 index 3431b2d5e334..f942341429b1 100644
405 --- a/arch/s390/kernel/mcount.S
406 +++ b/arch/s390/kernel/mcount.S
407 @@ -41,6 +41,7 @@ EXPORT_SYMBOL(_mcount)
408 ENTRY(ftrace_caller)
409 .globl ftrace_regs_caller
410 .set ftrace_regs_caller,ftrace_caller
411 + stg %r14,(__SF_GPRS+8*8)(%r15) # save traced function caller
412 lgr %r1,%r15
413 #if !(defined(CC_USING_HOTPATCH) || defined(CC_USING_NOP_MCOUNT))
414 aghi %r0,MCOUNT_RETURN_FIXUP
415 diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
416 index 5674710a4841..7dfae86afa47 100644
417 --- a/arch/s390/mm/hugetlbpage.c
418 +++ b/arch/s390/mm/hugetlbpage.c
419 @@ -159,10 +159,13 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
420 rste &= ~_SEGMENT_ENTRY_NOEXEC;
421
422 /* Set correct table type for 2G hugepages */
423 - if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
424 - rste |= _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE;
425 - else
426 + if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
427 + if (likely(pte_present(pte)))
428 + rste |= _REGION3_ENTRY_LARGE;
429 + rste |= _REGION_ENTRY_TYPE_R3;
430 + } else if (likely(pte_present(pte)))
431 rste |= _SEGMENT_ENTRY_LARGE;
432 +
433 clear_huge_pte_skeys(mm, rste);
434 pte_val(*ptep) = rste;
435 }
436 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
437 index 1e6bb4c25334..ea85f23d9e22 100644
438 --- a/arch/x86/include/asm/pgtable.h
439 +++ b/arch/x86/include/asm/pgtable.h
440 @@ -253,6 +253,7 @@ static inline int pmd_large(pmd_t pte)
441 }
442
443 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
444 +/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_large */
445 static inline int pmd_trans_huge(pmd_t pmd)
446 {
447 return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
448 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
449 index b8ef8557d4b3..2a36902d418c 100644
450 --- a/arch/x86/mm/mmio-mod.c
451 +++ b/arch/x86/mm/mmio-mod.c
452 @@ -372,7 +372,7 @@ static void enter_uniprocessor(void)
453 int cpu;
454 int err;
455
456 - if (downed_cpus == NULL &&
457 + if (!cpumask_available(downed_cpus) &&
458 !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) {
459 pr_notice("Failed to allocate mask\n");
460 goto out;
461 @@ -402,7 +402,7 @@ static void leave_uniprocessor(void)
462 int cpu;
463 int err;
464
465 - if (downed_cpus == NULL || cpumask_weight(downed_cpus) == 0)
466 + if (!cpumask_available(downed_cpus) || cpumask_weight(downed_cpus) == 0)
467 return;
468 pr_notice("Re-enabling CPUs...\n");
469 for_each_cpu(cpu, downed_cpus) {
470 diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c
471 index 3d7fdea872f8..2553e05e0725 100644
472 --- a/drivers/block/null_blk_zoned.c
473 +++ b/drivers/block/null_blk_zoned.c
474 @@ -20,6 +20,10 @@ int null_zone_init(struct nullb_device *dev)
475 pr_err("zone_size must be power-of-two\n");
476 return -EINVAL;
477 }
478 + if (dev->zone_size > dev->size) {
479 + pr_err("Zone size larger than device capacity\n");
480 + return -EINVAL;
481 + }
482
483 dev->zone_size_sects = dev->zone_size << ZONE_SIZE_SHIFT;
484 dev->nr_zones = dev_size >>
485 diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
486 index ea2849338d6c..9b69e55ad701 100644
487 --- a/drivers/gpu/drm/drm_edid.c
488 +++ b/drivers/gpu/drm/drm_edid.c
489 @@ -191,10 +191,11 @@ static const struct edid_quirk {
490 { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
491 { "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
492
493 - /* Oculus Rift DK1, DK2, and CV1 VR Headsets */
494 + /* Oculus Rift DK1, DK2, CV1 and Rift S VR Headsets */
495 { "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
496 { "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP },
497 { "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP },
498 + { "OVR", 0x0012, EDID_QUIRK_NON_DESKTOP },
499
500 /* Windows Mixed Reality Headsets */
501 { "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP },
502 diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
503 index 9b15ac4f2fb6..4ab6531a4a74 100644
504 --- a/drivers/gpu/drm/i915/display/intel_dp.c
505 +++ b/drivers/gpu/drm/i915/display/intel_dp.c
506 @@ -7218,11 +7218,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
507 intel_connector->get_hw_state = intel_connector_get_hw_state;
508
509 /* init MST on ports that can support it */
510 - if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
511 - (port == PORT_B || port == PORT_C ||
512 - port == PORT_D || port == PORT_F))
513 - intel_dp_mst_encoder_init(intel_dig_port,
514 - intel_connector->base.base.id);
515 + intel_dp_mst_encoder_init(intel_dig_port,
516 + intel_connector->base.base.id);
517
518 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
519 intel_dp_aux_fini(intel_dp);
520 diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
521 index 600873c796d0..74d45a0eecb8 100644
522 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
523 +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
524 @@ -653,21 +653,31 @@ intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port)
525 int
526 intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id)
527 {
528 + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
529 struct intel_dp *intel_dp = &intel_dig_port->dp;
530 - struct drm_device *dev = intel_dig_port->base.base.dev;
531 + enum port port = intel_dig_port->base.port;
532 int ret;
533
534 - intel_dp->can_mst = true;
535 + if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
536 + return 0;
537 +
538 + if (INTEL_GEN(i915) < 12 && port == PORT_A)
539 + return 0;
540 +
541 + if (INTEL_GEN(i915) < 11 && port == PORT_E)
542 + return 0;
543 +
544 intel_dp->mst_mgr.cbs = &mst_cbs;
545
546 /* create encoders */
547 intel_dp_create_fake_mst_encoders(intel_dig_port);
548 - ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev,
549 + ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
550 &intel_dp->aux, 16, 3, conn_base_id);
551 - if (ret) {
552 - intel_dp->can_mst = false;
553 + if (ret)
554 return ret;
555 - }
556 +
557 + intel_dp->can_mst = true;
558 +
559 return 0;
560 }
561
562 diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
563 index 03c720b47306..39e4da7468e1 100644
564 --- a/drivers/hid/hid-multitouch.c
565 +++ b/drivers/hid/hid-multitouch.c
566 @@ -69,6 +69,7 @@ MODULE_LICENSE("GPL");
567 #define MT_QUIRK_ASUS_CUSTOM_UP BIT(17)
568 #define MT_QUIRK_WIN8_PTP_BUTTONS BIT(18)
569 #define MT_QUIRK_SEPARATE_APP_REPORT BIT(19)
570 +#define MT_QUIRK_FORCE_MULTI_INPUT BIT(20)
571
572 #define MT_INPUTMODE_TOUCHSCREEN 0x02
573 #define MT_INPUTMODE_TOUCHPAD 0x03
574 @@ -189,6 +190,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
575 #define MT_CLS_WIN_8 0x0012
576 #define MT_CLS_EXPORT_ALL_INPUTS 0x0013
577 #define MT_CLS_WIN_8_DUAL 0x0014
578 +#define MT_CLS_WIN_8_FORCE_MULTI_INPUT 0x0015
579
580 /* vendor specific classes */
581 #define MT_CLS_3M 0x0101
582 @@ -279,6 +281,15 @@ static const struct mt_class mt_classes[] = {
583 MT_QUIRK_CONTACT_CNT_ACCURATE |
584 MT_QUIRK_WIN8_PTP_BUTTONS,
585 .export_all_inputs = true },
586 + { .name = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
587 + .quirks = MT_QUIRK_ALWAYS_VALID |
588 + MT_QUIRK_IGNORE_DUPLICATES |
589 + MT_QUIRK_HOVERING |
590 + MT_QUIRK_CONTACT_CNT_ACCURATE |
591 + MT_QUIRK_STICKY_FINGERS |
592 + MT_QUIRK_WIN8_PTP_BUTTONS |
593 + MT_QUIRK_FORCE_MULTI_INPUT,
594 + .export_all_inputs = true },
595
596 /*
597 * vendor specific classes
598 @@ -1714,6 +1725,11 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
599 if (id->group != HID_GROUP_MULTITOUCH_WIN_8)
600 hdev->quirks |= HID_QUIRK_MULTI_INPUT;
601
602 + if (mtclass->quirks & MT_QUIRK_FORCE_MULTI_INPUT) {
603 + hdev->quirks &= ~HID_QUIRK_INPUT_PER_APP;
604 + hdev->quirks |= HID_QUIRK_MULTI_INPUT;
605 + }
606 +
607 timer_setup(&td->release_timer, mt_expired_timeout, 0);
608
609 ret = hid_parse(hdev);
610 @@ -1926,6 +1942,11 @@ static const struct hid_device_id mt_devices[] = {
611 MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
612 USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) },
613
614 + /* Elan devices */
615 + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
616 + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
617 + USB_VENDOR_ID_ELAN, 0x313a) },
618 +
619 /* Elitegroup panel */
620 { .driver_data = MT_CLS_SERIAL,
621 MT_USB_DEVICE(USB_VENDOR_ID_ELITEGROUP,
622 @@ -2056,6 +2077,11 @@ static const struct hid_device_id mt_devices[] = {
623 MT_USB_DEVICE(USB_VENDOR_ID_STANTUM_STM,
624 USB_DEVICE_ID_MTP_STM)},
625
626 + /* Synaptics devices */
627 + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT,
628 + HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
629 + USB_VENDOR_ID_SYNAPTICS, 0xce08) },
630 +
631 /* TopSeed panels */
632 { .driver_data = MT_CLS_TOPSEED,
633 MT_USB_DEVICE(USB_VENDOR_ID_TOPSEED2,
634 diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
635 index 4c6ed6ef31f1..2f073f536070 100644
636 --- a/drivers/hid/hid-sony.c
637 +++ b/drivers/hid/hid-sony.c
638 @@ -867,6 +867,23 @@ static u8 *sony_report_fixup(struct hid_device *hdev, u8 *rdesc,
639 if (sc->quirks & PS3REMOTE)
640 return ps3remote_fixup(hdev, rdesc, rsize);
641
642 + /*
643 + * Some knock-off USB dongles incorrectly report their button count
644 + * as 13 instead of 16 causing three non-functional buttons.
645 + */
646 + if ((sc->quirks & SIXAXIS_CONTROLLER_USB) && *rsize >= 45 &&
647 + /* Report Count (13) */
648 + rdesc[23] == 0x95 && rdesc[24] == 0x0D &&
649 + /* Usage Maximum (13) */
650 + rdesc[37] == 0x29 && rdesc[38] == 0x0D &&
651 + /* Report Count (3) */
652 + rdesc[43] == 0x95 && rdesc[44] == 0x03) {
653 + hid_info(hdev, "Fixing up USB dongle report descriptor\n");
654 + rdesc[24] = 0x10;
655 + rdesc[38] = 0x10;
656 + rdesc[44] = 0x00;
657 + }
658 +
659 return rdesc;
660 }
661
662 diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
663 index a66f08041a1a..ec142bc8c1da 100644
664 --- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
665 +++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
666 @@ -389,6 +389,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
667 },
668 .driver_data = (void *)&sipodev_desc
669 },
670 + {
671 + .ident = "Schneider SCL142ALM",
672 + .matches = {
673 + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SCHNEIDER"),
674 + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SCL142ALM"),
675 + },
676 + .driver_data = (void *)&sipodev_desc
677 + },
678 { } /* Terminate list */
679 };
680
681 diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
682 index 92d2c706c2a7..a60042431370 100644
683 --- a/drivers/i2c/busses/i2c-altera.c
684 +++ b/drivers/i2c/busses/i2c-altera.c
685 @@ -70,6 +70,7 @@
686 * @isr_mask: cached copy of local ISR enables.
687 * @isr_status: cached copy of local ISR status.
688 * @lock: spinlock for IRQ synchronization.
689 + * @isr_mutex: mutex for IRQ thread.
690 */
691 struct altr_i2c_dev {
692 void __iomem *base;
693 @@ -86,6 +87,7 @@ struct altr_i2c_dev {
694 u32 isr_mask;
695 u32 isr_status;
696 spinlock_t lock; /* IRQ synchronization */
697 + struct mutex isr_mutex;
698 };
699
700 static void
701 @@ -245,10 +247,11 @@ static irqreturn_t altr_i2c_isr(int irq, void *_dev)
702 struct altr_i2c_dev *idev = _dev;
703 u32 status = idev->isr_status;
704
705 + mutex_lock(&idev->isr_mutex);
706 if (!idev->msg) {
707 dev_warn(idev->dev, "unexpected interrupt\n");
708 altr_i2c_int_clear(idev, ALTR_I2C_ALL_IRQ);
709 - return IRQ_HANDLED;
710 + goto out;
711 }
712 read = (idev->msg->flags & I2C_M_RD) != 0;
713
714 @@ -301,6 +304,8 @@ static irqreturn_t altr_i2c_isr(int irq, void *_dev)
715 complete(&idev->msg_complete);
716 dev_dbg(idev->dev, "Message Complete\n");
717 }
718 +out:
719 + mutex_unlock(&idev->isr_mutex);
720
721 return IRQ_HANDLED;
722 }
723 @@ -312,6 +317,7 @@ static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg)
724 u32 value;
725 u8 addr = i2c_8bit_addr_from_msg(msg);
726
727 + mutex_lock(&idev->isr_mutex);
728 idev->msg = msg;
729 idev->msg_len = msg->len;
730 idev->buf = msg->buf;
731 @@ -336,6 +342,7 @@ static int altr_i2c_xfer_msg(struct altr_i2c_dev *idev, struct i2c_msg *msg)
732 altr_i2c_int_enable(idev, imask, true);
733 altr_i2c_fill_tx_fifo(idev);
734 }
735 + mutex_unlock(&idev->isr_mutex);
736
737 time_left = wait_for_completion_timeout(&idev->msg_complete,
738 ALTR_I2C_XFER_TIMEOUT);
739 @@ -409,6 +416,7 @@ static int altr_i2c_probe(struct platform_device *pdev)
740 idev->dev = &pdev->dev;
741 init_completion(&idev->msg_complete);
742 spin_lock_init(&idev->lock);
743 + mutex_init(&idev->isr_mutex);
744
745 ret = device_property_read_u32(idev->dev, "fifo-size",
746 &idev->fifo_size);
747 diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
748 index b462eaca1ee3..4494dab8c3d8 100644
749 --- a/drivers/infiniband/hw/qedr/main.c
750 +++ b/drivers/infiniband/hw/qedr/main.c
751 @@ -360,7 +360,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
752 xa_init_flags(&dev->srqs, XA_FLAGS_LOCK_IRQ);
753
754 if (IS_IWARP(dev)) {
755 - xa_init_flags(&dev->qps, XA_FLAGS_LOCK_IRQ);
756 + xa_init(&dev->qps);
757 dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq");
758 }
759
760 diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
761 index 0cfd849b13d6..8e927f6c1520 100644
762 --- a/drivers/infiniband/hw/qedr/qedr.h
763 +++ b/drivers/infiniband/hw/qedr/qedr.h
764 @@ -40,6 +40,7 @@
765 #include <linux/qed/qed_rdma_if.h>
766 #include <linux/qed/qede_rdma.h>
767 #include <linux/qed/roce_common.h>
768 +#include <linux/completion.h>
769 #include "qedr_hsi_rdma.h"
770
771 #define QEDR_NODE_DESC "QLogic 579xx RoCE HCA"
772 @@ -377,10 +378,20 @@ enum qedr_qp_err_bitmap {
773 QEDR_QP_ERR_RQ_PBL_FULL = 32,
774 };
775
776 +enum qedr_qp_create_type {
777 + QEDR_QP_CREATE_NONE,
778 + QEDR_QP_CREATE_USER,
779 + QEDR_QP_CREATE_KERNEL,
780 +};
781 +
782 +enum qedr_iwarp_cm_flags {
783 + QEDR_IWARP_CM_WAIT_FOR_CONNECT = BIT(0),
784 + QEDR_IWARP_CM_WAIT_FOR_DISCONNECT = BIT(1),
785 +};
786 +
787 struct qedr_qp {
788 struct ib_qp ibqp; /* must be first */
789 struct qedr_dev *dev;
790 - struct qedr_iw_ep *ep;
791 struct qedr_qp_hwq_info sq;
792 struct qedr_qp_hwq_info rq;
793
794 @@ -395,6 +406,7 @@ struct qedr_qp {
795 u32 id;
796 struct qedr_pd *pd;
797 enum ib_qp_type qp_type;
798 + enum qedr_qp_create_type create_type;
799 struct qed_rdma_qp *qed_qp;
800 u32 qp_id;
801 u16 icid;
802 @@ -437,8 +449,11 @@ struct qedr_qp {
803 /* Relevant to qps created from user space only (applications) */
804 struct qedr_userq usq;
805 struct qedr_userq urq;
806 - atomic_t refcnt;
807 - bool destroyed;
808 +
809 + /* synchronization objects used with iwarp ep */
810 + struct kref refcnt;
811 + struct completion iwarp_cm_comp;
812 + unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */
813 };
814
815 struct qedr_ah {
816 @@ -531,7 +546,7 @@ struct qedr_iw_ep {
817 struct iw_cm_id *cm_id;
818 struct qedr_qp *qp;
819 void *qed_context;
820 - u8 during_connect;
821 + struct kref refcnt;
822 };
823
824 static inline
825 diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
826 index 22881d4442b9..5e9732990be5 100644
827 --- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
828 +++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
829 @@ -79,6 +79,27 @@ qedr_fill_sockaddr6(const struct qed_iwarp_cm_info *cm_info,
830 }
831 }
832
833 +static void qedr_iw_free_qp(struct kref *ref)
834 +{
835 + struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt);
836 +
837 + kfree(qp);
838 +}
839 +
840 +static void
841 +qedr_iw_free_ep(struct kref *ref)
842 +{
843 + struct qedr_iw_ep *ep = container_of(ref, struct qedr_iw_ep, refcnt);
844 +
845 + if (ep->qp)
846 + kref_put(&ep->qp->refcnt, qedr_iw_free_qp);
847 +
848 + if (ep->cm_id)
849 + ep->cm_id->rem_ref(ep->cm_id);
850 +
851 + kfree(ep);
852 +}
853 +
854 static void
855 qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params)
856 {
857 @@ -93,6 +114,7 @@ qedr_iw_mpa_request(void *context, struct qed_iwarp_cm_event_params *params)
858
859 ep->dev = dev;
860 ep->qed_context = params->ep_context;
861 + kref_init(&ep->refcnt);
862
863 memset(&event, 0, sizeof(event));
864 event.event = IW_CM_EVENT_CONNECT_REQUEST;
865 @@ -141,12 +163,10 @@ qedr_iw_close_event(void *context, struct qed_iwarp_cm_event_params *params)
866 {
867 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
868
869 - if (ep->cm_id) {
870 + if (ep->cm_id)
871 qedr_iw_issue_event(context, params, IW_CM_EVENT_CLOSE);
872
873 - ep->cm_id->rem_ref(ep->cm_id);
874 - ep->cm_id = NULL;
875 - }
876 + kref_put(&ep->refcnt, qedr_iw_free_ep);
877 }
878
879 static void
880 @@ -186,11 +206,13 @@ static void qedr_iw_disconnect_worker(struct work_struct *work)
881 struct qedr_qp *qp = ep->qp;
882 struct iw_cm_event event;
883
884 - if (qp->destroyed) {
885 - kfree(dwork);
886 - qedr_iw_qp_rem_ref(&qp->ibqp);
887 - return;
888 - }
889 + /* The qp won't be released until we release the ep.
890 + * the ep's refcnt was increased before calling this
891 + * function, therefore it is safe to access qp
892 + */
893 + if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
894 + &qp->iwarp_cm_flags))
895 + goto out;
896
897 memset(&event, 0, sizeof(event));
898 event.status = dwork->status;
899 @@ -204,7 +226,6 @@ static void qedr_iw_disconnect_worker(struct work_struct *work)
900 else
901 qp_params.new_state = QED_ROCE_QP_STATE_SQD;
902
903 - kfree(dwork);
904
905 if (ep->cm_id)
906 ep->cm_id->event_handler(ep->cm_id, &event);
907 @@ -214,7 +235,10 @@ static void qedr_iw_disconnect_worker(struct work_struct *work)
908
909 dev->ops->rdma_modify_qp(dev->rdma_ctx, qp->qed_qp, &qp_params);
910
911 - qedr_iw_qp_rem_ref(&qp->ibqp);
912 + complete(&ep->qp->iwarp_cm_comp);
913 +out:
914 + kfree(dwork);
915 + kref_put(&ep->refcnt, qedr_iw_free_ep);
916 }
917
918 static void
919 @@ -224,13 +248,17 @@ qedr_iw_disconnect_event(void *context,
920 struct qedr_discon_work *work;
921 struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
922 struct qedr_dev *dev = ep->dev;
923 - struct qedr_qp *qp = ep->qp;
924
925 work = kzalloc(sizeof(*work), GFP_ATOMIC);
926 if (!work)
927 return;
928
929 - qedr_iw_qp_add_ref(&qp->ibqp);
930 + /* We can't get a close event before disconnect, but since
931 + * we're scheduling a work queue we need to make sure close
932 + * won't delete the ep, so we increase the refcnt
933 + */
934 + kref_get(&ep->refcnt);
935 +
936 work->ep = ep;
937 work->event = params->event;
938 work->status = params->status;
939 @@ -252,16 +280,30 @@ qedr_iw_passive_complete(void *context,
940 if ((params->status == -ECONNREFUSED) && (!ep->qp)) {
941 DP_DEBUG(dev, QEDR_MSG_IWARP,
942 "PASSIVE connection refused releasing ep...\n");
943 - kfree(ep);
944 + kref_put(&ep->refcnt, qedr_iw_free_ep);
945 return;
946 }
947
948 + complete(&ep->qp->iwarp_cm_comp);
949 qedr_iw_issue_event(context, params, IW_CM_EVENT_ESTABLISHED);
950
951 if (params->status < 0)
952 qedr_iw_close_event(context, params);
953 }
954
955 +static void
956 +qedr_iw_active_complete(void *context,
957 + struct qed_iwarp_cm_event_params *params)
958 +{
959 + struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
960 +
961 + complete(&ep->qp->iwarp_cm_comp);
962 + qedr_iw_issue_event(context, params, IW_CM_EVENT_CONNECT_REPLY);
963 +
964 + if (params->status < 0)
965 + kref_put(&ep->refcnt, qedr_iw_free_ep);
966 +}
967 +
968 static int
969 qedr_iw_mpa_reply(void *context, struct qed_iwarp_cm_event_params *params)
970 {
971 @@ -288,27 +330,15 @@ qedr_iw_event_handler(void *context, struct qed_iwarp_cm_event_params *params)
972 qedr_iw_mpa_reply(context, params);
973 break;
974 case QED_IWARP_EVENT_PASSIVE_COMPLETE:
975 - ep->during_connect = 0;
976 qedr_iw_passive_complete(context, params);
977 break;
978 -
979 case QED_IWARP_EVENT_ACTIVE_COMPLETE:
980 - ep->during_connect = 0;
981 - qedr_iw_issue_event(context,
982 - params,
983 - IW_CM_EVENT_CONNECT_REPLY);
984 - if (params->status < 0) {
985 - struct qedr_iw_ep *ep = (struct qedr_iw_ep *)context;
986 -
987 - ep->cm_id->rem_ref(ep->cm_id);
988 - ep->cm_id = NULL;
989 - }
990 + qedr_iw_active_complete(context, params);
991 break;
992 case QED_IWARP_EVENT_DISCONNECT:
993 qedr_iw_disconnect_event(context, params);
994 break;
995 case QED_IWARP_EVENT_CLOSE:
996 - ep->during_connect = 0;
997 qedr_iw_close_event(context, params);
998 break;
999 case QED_IWARP_EVENT_RQ_EMPTY:
1000 @@ -476,6 +506,19 @@ qedr_addr6_resolve(struct qedr_dev *dev,
1001 return rc;
1002 }
1003
1004 +struct qedr_qp *qedr_iw_load_qp(struct qedr_dev *dev, u32 qpn)
1005 +{
1006 + struct qedr_qp *qp;
1007 +
1008 + xa_lock(&dev->qps);
1009 + qp = xa_load(&dev->qps, qpn);
1010 + if (qp)
1011 + kref_get(&qp->refcnt);
1012 + xa_unlock(&dev->qps);
1013 +
1014 + return qp;
1015 +}
1016 +
1017 int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1018 {
1019 struct qedr_dev *dev = get_qedr_dev(cm_id->device);
1020 @@ -491,10 +534,6 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1021 int rc = 0;
1022 int i;
1023
1024 - qp = xa_load(&dev->qps, conn_param->qpn);
1025 - if (unlikely(!qp))
1026 - return -EINVAL;
1027 -
1028 laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
1029 raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
1030 laddr6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
1031 @@ -516,8 +555,15 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1032 return -ENOMEM;
1033
1034 ep->dev = dev;
1035 + kref_init(&ep->refcnt);
1036 +
1037 + qp = qedr_iw_load_qp(dev, conn_param->qpn);
1038 + if (!qp) {
1039 + rc = -EINVAL;
1040 + goto err;
1041 + }
1042 +
1043 ep->qp = qp;
1044 - qp->ep = ep;
1045 cm_id->add_ref(cm_id);
1046 ep->cm_id = cm_id;
1047
1048 @@ -580,16 +626,20 @@ int qedr_iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1049 in_params.qp = qp->qed_qp;
1050 memcpy(in_params.local_mac_addr, dev->ndev->dev_addr, ETH_ALEN);
1051
1052 - ep->during_connect = 1;
1053 + if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
1054 + &qp->iwarp_cm_flags))
1055 + goto err; /* QP already being destroyed */
1056 +
1057 rc = dev->ops->iwarp_connect(dev->rdma_ctx, &in_params, &out_params);
1058 - if (rc)
1059 + if (rc) {
1060 + complete(&qp->iwarp_cm_comp);
1061 goto err;
1062 + }
1063
1064 return rc;
1065
1066 err:
1067 - cm_id->rem_ref(cm_id);
1068 - kfree(ep);
1069 + kref_put(&ep->refcnt, qedr_iw_free_ep);
1070 return rc;
1071 }
1072
1073 @@ -677,18 +727,17 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1074 struct qedr_dev *dev = ep->dev;
1075 struct qedr_qp *qp;
1076 struct qed_iwarp_accept_in params;
1077 - int rc;
1078 + int rc = 0;
1079
1080 DP_DEBUG(dev, QEDR_MSG_IWARP, "Accept on qpid=%d\n", conn_param->qpn);
1081
1082 - qp = xa_load(&dev->qps, conn_param->qpn);
1083 + qp = qedr_iw_load_qp(dev, conn_param->qpn);
1084 if (!qp) {
1085 DP_ERR(dev, "Invalid QP number %d\n", conn_param->qpn);
1086 return -EINVAL;
1087 }
1088
1089 ep->qp = qp;
1090 - qp->ep = ep;
1091 cm_id->add_ref(cm_id);
1092 ep->cm_id = cm_id;
1093
1094 @@ -700,15 +749,21 @@ int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1095 params.ird = conn_param->ird;
1096 params.ord = conn_param->ord;
1097
1098 - ep->during_connect = 1;
1099 + if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
1100 + &qp->iwarp_cm_flags))
1101 + goto err; /* QP already destroyed */
1102 +
1103 rc = dev->ops->iwarp_accept(dev->rdma_ctx, &params);
1104 - if (rc)
1105 + if (rc) {
1106 + complete(&qp->iwarp_cm_comp);
1107 goto err;
1108 + }
1109
1110 return rc;
1111 +
1112 err:
1113 - ep->during_connect = 0;
1114 - cm_id->rem_ref(cm_id);
1115 + kref_put(&ep->refcnt, qedr_iw_free_ep);
1116 +
1117 return rc;
1118 }
1119
1120 @@ -731,17 +786,14 @@ void qedr_iw_qp_add_ref(struct ib_qp *ibqp)
1121 {
1122 struct qedr_qp *qp = get_qedr_qp(ibqp);
1123
1124 - atomic_inc(&qp->refcnt);
1125 + kref_get(&qp->refcnt);
1126 }
1127
1128 void qedr_iw_qp_rem_ref(struct ib_qp *ibqp)
1129 {
1130 struct qedr_qp *qp = get_qedr_qp(ibqp);
1131
1132 - if (atomic_dec_and_test(&qp->refcnt)) {
1133 - xa_erase_irq(&qp->dev->qps, qp->qp_id);
1134 - kfree(qp);
1135 - }
1136 + kref_put(&qp->refcnt, qedr_iw_free_qp);
1137 }
1138
1139 struct ib_qp *qedr_iw_get_qp(struct ib_device *ibdev, int qpn)
1140 diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
1141 index a7ccca3c4f89..8b4240c1cc76 100644
1142 --- a/drivers/infiniband/hw/qedr/verbs.c
1143 +++ b/drivers/infiniband/hw/qedr/verbs.c
1144 @@ -51,6 +51,7 @@
1145 #include "verbs.h"
1146 #include <rdma/qedr-abi.h>
1147 #include "qedr_roce_cm.h"
1148 +#include "qedr_iw_cm.h"
1149
1150 #define QEDR_SRQ_WQE_ELEM_SIZE sizeof(union rdma_srq_elm)
1151 #define RDMA_MAX_SGE_PER_SRQ (4)
1152 @@ -1193,7 +1194,10 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
1153 struct ib_qp_init_attr *attrs)
1154 {
1155 spin_lock_init(&qp->q_lock);
1156 - atomic_set(&qp->refcnt, 1);
1157 + if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1158 + kref_init(&qp->refcnt);
1159 + init_completion(&qp->iwarp_cm_comp);
1160 + }
1161 qp->pd = pd;
1162 qp->qp_type = attrs->qp_type;
1163 qp->max_inline_data = attrs->cap.max_inline_data;
1164 @@ -1600,6 +1604,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev,
1165 int alloc_and_init = rdma_protocol_roce(&dev->ibdev, 1);
1166 int rc = -EINVAL;
1167
1168 + qp->create_type = QEDR_QP_CREATE_USER;
1169 memset(&ureq, 0, sizeof(ureq));
1170 rc = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1171 if (rc) {
1172 @@ -1813,6 +1818,7 @@ static int qedr_create_kernel_qp(struct qedr_dev *dev,
1173 u32 n_sq_entries;
1174
1175 memset(&in_params, 0, sizeof(in_params));
1176 + qp->create_type = QEDR_QP_CREATE_KERNEL;
1177
1178 /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1179 * the ring. The ring should allow at least a single WR, even if the
1180 @@ -1926,7 +1932,7 @@ struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1181 qp->ibqp.qp_num = qp->qp_id;
1182
1183 if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
1184 - rc = xa_insert_irq(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
1185 + rc = xa_insert(&dev->qps, qp->qp_id, qp, GFP_KERNEL);
1186 if (rc)
1187 goto err;
1188 }
1189 @@ -2445,7 +2451,7 @@ static int qedr_free_qp_resources(struct qedr_dev *dev, struct qedr_qp *qp,
1190 return rc;
1191 }
1192
1193 - if (udata)
1194 + if (qp->create_type == QEDR_QP_CREATE_USER)
1195 qedr_cleanup_user(dev, qp);
1196 else
1197 qedr_cleanup_kernel(dev, qp);
1198 @@ -2475,34 +2481,44 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1199 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
1200 }
1201 } else {
1202 - /* Wait for the connect/accept to complete */
1203 - if (qp->ep) {
1204 - int wait_count = 1;
1205 -
1206 - while (qp->ep->during_connect) {
1207 - DP_DEBUG(dev, QEDR_MSG_QP,
1208 - "Still in during connect/accept\n");
1209 -
1210 - msleep(100);
1211 - if (wait_count++ > 200) {
1212 - DP_NOTICE(dev,
1213 - "during connect timeout\n");
1214 - break;
1215 - }
1216 - }
1217 - }
1218 + /* If connection establishment started the WAIT_FOR_CONNECT
1219 + * bit will be on and we need to Wait for the establishment
1220 + * to complete before destroying the qp.
1221 + */
1222 + if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_CONNECT,
1223 + &qp->iwarp_cm_flags))
1224 + wait_for_completion(&qp->iwarp_cm_comp);
1225 +
1226 + /* If graceful disconnect started, the WAIT_FOR_DISCONNECT
1227 + * bit will be on, and we need to wait for the disconnect to
1228 + * complete before continuing. We can use the same completion,
1229 + * iwarp_cm_comp, since this is the only place that waits for
1230 + * this completion and it is sequential. In addition,
1231 + * disconnect can't occur before the connection is fully
1232 + * established, therefore if WAIT_FOR_DISCONNECT is on it
1233 + * means WAIT_FOR_CONNECT is also on and the completion for
1234 + * CONNECT already occurred.
1235 + */
1236 + if (test_and_set_bit(QEDR_IWARP_CM_WAIT_FOR_DISCONNECT,
1237 + &qp->iwarp_cm_flags))
1238 + wait_for_completion(&qp->iwarp_cm_comp);
1239 }
1240
1241 if (qp->qp_type == IB_QPT_GSI)
1242 qedr_destroy_gsi_qp(dev);
1243
1244 + /* We need to remove the entry from the xarray before we release the
1245 + * qp_id to avoid a race of the qp_id being reallocated and failing
1246 + * on xa_insert
1247 + */
1248 + if (rdma_protocol_iwarp(&dev->ibdev, 1))
1249 + xa_erase(&dev->qps, qp->qp_id);
1250 +
1251 qedr_free_qp_resources(dev, qp, udata);
1252
1253 - if (atomic_dec_and_test(&qp->refcnt) &&
1254 - rdma_protocol_iwarp(&dev->ibdev, 1)) {
1255 - xa_erase_irq(&dev->qps, qp->qp_id);
1256 - kfree(qp);
1257 - }
1258 + if (rdma_protocol_iwarp(&dev->ibdev, 1))
1259 + qedr_iw_qp_rem_ref(&qp->ibqp);
1260 +
1261 return 0;
1262 }
1263
1264 diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
1265 index 6027bb65f7f6..dc9a3bb24114 100644
1266 --- a/drivers/net/dsa/mt7530.c
1267 +++ b/drivers/net/dsa/mt7530.c
1268 @@ -818,10 +818,15 @@ mt7530_port_set_vlan_aware(struct dsa_switch *ds, int port)
1269 PCR_MATRIX_MASK, PCR_MATRIX(MT7530_ALL_MEMBERS));
1270
1271 /* Trapped into security mode allows packet forwarding through VLAN
1272 - * table lookup.
1273 + * table lookup. CPU port is set to fallback mode to let untagged
1274 + * frames pass through.
1275 */
1276 - mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
1277 - MT7530_PORT_SECURITY_MODE);
1278 + if (dsa_is_cpu_port(ds, port))
1279 + mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
1280 + MT7530_PORT_FALLBACK_MODE);
1281 + else
1282 + mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK,
1283 + MT7530_PORT_SECURITY_MODE);
1284
1285 /* Set the port as a user port which is to be able to recognize VID
1286 * from incoming packets before fetching entry within the VLAN table.
1287 diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
1288 index 0e7e36d8f994..3ef7b5a6fc22 100644
1289 --- a/drivers/net/dsa/mt7530.h
1290 +++ b/drivers/net/dsa/mt7530.h
1291 @@ -148,6 +148,12 @@ enum mt7530_port_mode {
1292 /* Port Matrix Mode: Frames are forwarded by the PCR_MATRIX members. */
1293 MT7530_PORT_MATRIX_MODE = PORT_VLAN(0),
1294
1295 + /* Fallback Mode: Forward received frames with ingress ports that do
1296 + * not belong to the VLAN member. Frames whose VID is not listed on
1297 + * the VLAN table are forwarded by the PCR_MATRIX members.
1298 + */
1299 + MT7530_PORT_FALLBACK_MODE = PORT_VLAN(1),
1300 +
1301 /* Security Mode: Discard any frame due to ingress membership
1302 * violation or VID missed on the VLAN table.
1303 */
1304 diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
1305 index a58185b1d8bf..3e3711b60d01 100644
1306 --- a/drivers/net/ethernet/apple/bmac.c
1307 +++ b/drivers/net/ethernet/apple/bmac.c
1308 @@ -1182,7 +1182,7 @@ bmac_get_station_address(struct net_device *dev, unsigned char *ea)
1309 int i;
1310 unsigned short data;
1311
1312 - for (i = 0; i < 6; i++)
1313 + for (i = 0; i < 3; i++)
1314 {
1315 reset_and_select_srom(dev);
1316 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
1317 diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
1318 index f839fa94ebdd..d3b8ce734c1b 100644
1319 --- a/drivers/net/ethernet/freescale/ucc_geth.c
1320 +++ b/drivers/net/ethernet/freescale/ucc_geth.c
1321 @@ -42,6 +42,7 @@
1322 #include <soc/fsl/qe/ucc.h>
1323 #include <soc/fsl/qe/ucc_fast.h>
1324 #include <asm/machdep.h>
1325 +#include <net/sch_generic.h>
1326
1327 #include "ucc_geth.h"
1328
1329 @@ -1548,11 +1549,8 @@ static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
1330
1331 static void ugeth_quiesce(struct ucc_geth_private *ugeth)
1332 {
1333 - /* Prevent any further xmits, plus detach the device. */
1334 - netif_device_detach(ugeth->ndev);
1335 -
1336 - /* Wait for any current xmits to finish. */
1337 - netif_tx_disable(ugeth->ndev);
1338 + /* Prevent any further xmits */
1339 + netif_tx_stop_all_queues(ugeth->ndev);
1340
1341 /* Disable the interrupt to avoid NAPI rescheduling. */
1342 disable_irq(ugeth->ug_info->uf_info.irq);
1343 @@ -1565,7 +1563,10 @@ static void ugeth_activate(struct ucc_geth_private *ugeth)
1344 {
1345 napi_enable(&ugeth->napi);
1346 enable_irq(ugeth->ug_info->uf_info.irq);
1347 - netif_device_attach(ugeth->ndev);
1348 +
1349 + /* allow to xmit again */
1350 + netif_tx_wake_all_queues(ugeth->ndev);
1351 + __netdev_watchdog_up(ugeth->ndev);
1352 }
1353
1354 /* Called every time the controller might need to be made
1355 diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
1356 index 38068fc34141..c7bdada4d1b9 100644
1357 --- a/drivers/net/ethernet/smsc/smsc911x.c
1358 +++ b/drivers/net/ethernet/smsc/smsc911x.c
1359 @@ -2502,20 +2502,20 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
1360
1361 retval = smsc911x_init(dev);
1362 if (retval < 0)
1363 - goto out_disable_resources;
1364 + goto out_init_fail;
1365
1366 netif_carrier_off(dev);
1367
1368 retval = smsc911x_mii_init(pdev, dev);
1369 if (retval) {
1370 SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
1371 - goto out_disable_resources;
1372 + goto out_init_fail;
1373 }
1374
1375 retval = register_netdev(dev);
1376 if (retval) {
1377 SMSC_WARN(pdata, probe, "Error %i registering device", retval);
1378 - goto out_disable_resources;
1379 + goto out_init_fail;
1380 } else {
1381 SMSC_TRACE(pdata, probe,
1382 "Network interface: \"%s\"", dev->name);
1383 @@ -2556,9 +2556,10 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
1384
1385 return 0;
1386
1387 -out_disable_resources:
1388 +out_init_fail:
1389 pm_runtime_put(&pdev->dev);
1390 pm_runtime_disable(&pdev->dev);
1391 +out_disable_resources:
1392 (void)smsc911x_disable_resources(pdev);
1393 out_enable_resources_fail:
1394 smsc911x_free_resources(pdev);
1395 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
1396 index 0d21082ceb93..4d75158c64b2 100644
1397 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
1398 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
1399 @@ -318,6 +318,19 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
1400 /* Enable PTP clock */
1401 regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
1402 val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id);
1403 + switch (gmac->phy_mode) {
1404 + case PHY_INTERFACE_MODE_RGMII:
1405 + val |= NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) |
1406 + NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id);
1407 + break;
1408 + case PHY_INTERFACE_MODE_SGMII:
1409 + val |= NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) |
1410 + NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id);
1411 + break;
1412 + default:
1413 + /* We don't get here; the switch above will have errored out */
1414 + unreachable();
1415 + }
1416 regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
1417
1418 if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) {
1419 diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
1420 index c4c8f1b62e1e..da0d3834b5f0 100644
1421 --- a/drivers/net/wireless/cisco/airo.c
1422 +++ b/drivers/net/wireless/cisco/airo.c
1423 @@ -1925,6 +1925,10 @@ static netdev_tx_t mpi_start_xmit(struct sk_buff *skb,
1424 airo_print_err(dev->name, "%s: skb == NULL!",__func__);
1425 return NETDEV_TX_OK;
1426 }
1427 + if (skb_padto(skb, ETH_ZLEN)) {
1428 + dev->stats.tx_dropped++;
1429 + return NETDEV_TX_OK;
1430 + }
1431 npacks = skb_queue_len (&ai->txq);
1432
1433 if (npacks >= MAXTXQ - 1) {
1434 @@ -2127,6 +2131,10 @@ static netdev_tx_t airo_start_xmit(struct sk_buff *skb,
1435 airo_print_err(dev->name, "%s: skb == NULL!", __func__);
1436 return NETDEV_TX_OK;
1437 }
1438 + if (skb_padto(skb, ETH_ZLEN)) {
1439 + dev->stats.tx_dropped++;
1440 + return NETDEV_TX_OK;
1441 + }
1442
1443 /* Find a vacant FID */
1444 for( i = 0; i < MAX_FIDS / 2 && (fids[i] & 0xffff0000); i++ );
1445 @@ -2201,6 +2209,10 @@ static netdev_tx_t airo_start_xmit11(struct sk_buff *skb,
1446 airo_print_err(dev->name, "%s: skb == NULL!", __func__);
1447 return NETDEV_TX_OK;
1448 }
1449 + if (skb_padto(skb, ETH_ZLEN)) {
1450 + dev->stats.tx_dropped++;
1451 + return NETDEV_TX_OK;
1452 + }
1453
1454 /* Find a vacant FID */
1455 for( i = MAX_FIDS / 2; i < MAX_FIDS && (fids[i] & 0xffff0000); i++ );
1456 diff --git a/drivers/net/wireless/intersil/p54/p54usb.c b/drivers/net/wireless/intersil/p54/p54usb.c
1457 index b94764c88750..ff0e30c0c14c 100644
1458 --- a/drivers/net/wireless/intersil/p54/p54usb.c
1459 +++ b/drivers/net/wireless/intersil/p54/p54usb.c
1460 @@ -61,6 +61,7 @@ static const struct usb_device_id p54u_table[] = {
1461 {USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */
1462 {USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */
1463 {USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */
1464 + {USB_DEVICE(0x124a, 0x4026)}, /* AirVasT USB wireless device */
1465 {USB_DEVICE(0x1435, 0x0210)}, /* Inventel UR054G */
1466 {USB_DEVICE(0x15a9, 0x0002)}, /* Gemtek WUBI-100GW 802.11g */
1467 {USB_DEVICE(0x1630, 0x0005)}, /* 2Wire 802.11g USB (v1) / Z-Com */
1468 diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h
1469 index e858bba8c8ff..0075fba93546 100644
1470 --- a/drivers/net/wireless/mediatek/mt76/mt76x02.h
1471 +++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h
1472 @@ -212,6 +212,7 @@ static inline bool is_mt76x0(struct mt76x02_dev *dev)
1473 static inline bool is_mt76x2(struct mt76x02_dev *dev)
1474 {
1475 return mt76_chip(&dev->mt76) == 0x7612 ||
1476 + mt76_chip(&dev->mt76) == 0x7632 ||
1477 mt76_chip(&dev->mt76) == 0x7662 ||
1478 mt76_chip(&dev->mt76) == 0x7602;
1479 }
1480 diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
1481 index 8b26c6108186..96a2b7ba6764 100644
1482 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
1483 +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c
1484 @@ -18,6 +18,7 @@ static const struct usb_device_id mt76x2u_device_table[] = {
1485 { USB_DEVICE(0x7392, 0xb711) }, /* Edimax EW 7722 UAC */
1486 { USB_DEVICE(0x0846, 0x9053) }, /* Netgear A6210 */
1487 { USB_DEVICE(0x045e, 0x02e6) }, /* XBox One Wireless Adapter */
1488 + { USB_DEVICE(0x045e, 0x02fe) }, /* XBox One Wireless Adapter */
1489 { },
1490 };
1491
1492 diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
1493 index 849335d76cf6..6f4692f0d714 100644
1494 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c
1495 +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
1496 @@ -974,12 +974,13 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy)
1497 struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
1498 struct hisi_sas_phy *phy = sas_phy->lldd_phy;
1499 struct asd_sas_port *sas_port = sas_phy->port;
1500 - struct hisi_sas_port *port = to_hisi_sas_port(sas_port);
1501 + struct hisi_sas_port *port;
1502 unsigned long flags;
1503
1504 if (!sas_port)
1505 return;
1506
1507 + port = to_hisi_sas_port(sas_port);
1508 spin_lock_irqsave(&hisi_hba->lock, flags);
1509 port->port_attached = 1;
1510 port->id = phy->port_id;
1511 diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
1512 index 3717eea37ecb..5f0ad8b32e3a 100644
1513 --- a/drivers/scsi/scsi_pm.c
1514 +++ b/drivers/scsi/scsi_pm.c
1515 @@ -80,6 +80,10 @@ static int scsi_dev_type_resume(struct device *dev,
1516 dev_dbg(dev, "scsi resume: %d\n", err);
1517
1518 if (err == 0) {
1519 + bool was_runtime_suspended;
1520 +
1521 + was_runtime_suspended = pm_runtime_suspended(dev);
1522 +
1523 pm_runtime_disable(dev);
1524 err = pm_runtime_set_active(dev);
1525 pm_runtime_enable(dev);
1526 @@ -93,8 +97,10 @@ static int scsi_dev_type_resume(struct device *dev,
1527 */
1528 if (!err && scsi_is_sdev_device(dev)) {
1529 struct scsi_device *sdev = to_scsi_device(dev);
1530 -
1531 - blk_set_runtime_active(sdev->request_queue);
1532 + if (was_runtime_suspended)
1533 + blk_post_runtime_resume(sdev->request_queue, 0);
1534 + else
1535 + blk_set_runtime_active(sdev->request_queue);
1536 }
1537 }
1538
1539 diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
1540 index 13ab1494c384..bc73181b0405 100644
1541 --- a/drivers/scsi/ufs/ufshcd.c
1542 +++ b/drivers/scsi/ufs/ufshcd.c
1543 @@ -2480,6 +2480,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
1544
1545 err = ufshcd_map_sg(hba, lrbp);
1546 if (err) {
1547 + ufshcd_release(hba);
1548 lrbp->cmd = NULL;
1549 clear_bit_unlock(tag, &hba->lrb_in_use);
1550 goto out;
1551 diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
1552 index 11cac7e10663..d2ca3b357cfe 100644
1553 --- a/drivers/spi/spi-dw.c
1554 +++ b/drivers/spi/spi-dw.c
1555 @@ -297,6 +297,9 @@ static int dw_spi_transfer_one(struct spi_controller *master,
1556 dws->len = transfer->len;
1557 spin_unlock_irqrestore(&dws->buf_lock, flags);
1558
1559 + /* Ensure dw->rx and dw->rx_end are visible */
1560 + smp_mb();
1561 +
1562 spi_enable_chip(dws, 0);
1563
1564 /* Handle per transfer options for bpw and speed */
1565 diff --git a/drivers/staging/media/ipu3/include/intel-ipu3.h b/drivers/staging/media/ipu3/include/intel-ipu3.h
1566 index 0b1cb9f9cbd1..1bfa8c86132a 100644
1567 --- a/drivers/staging/media/ipu3/include/intel-ipu3.h
1568 +++ b/drivers/staging/media/ipu3/include/intel-ipu3.h
1569 @@ -450,7 +450,7 @@ struct ipu3_uapi_awb_fr_config_s {
1570 __u32 bayer_sign;
1571 __u8 bayer_nf;
1572 __u8 reserved2[7];
1573 -} __attribute__((aligned(32))) __packed;
1574 +} __packed;
1575
1576 /**
1577 * struct ipu3_uapi_4a_config - 4A config
1578 @@ -466,7 +466,8 @@ struct ipu3_uapi_4a_config {
1579 struct ipu3_uapi_ae_grid_config ae_grd_config;
1580 __u8 padding[20];
1581 struct ipu3_uapi_af_config_s af_config;
1582 - struct ipu3_uapi_awb_fr_config_s awb_fr_config;
1583 + struct ipu3_uapi_awb_fr_config_s awb_fr_config
1584 + __attribute__((aligned(32)));
1585 } __packed;
1586
1587 /**
1588 @@ -2472,7 +2473,7 @@ struct ipu3_uapi_acc_param {
1589 struct ipu3_uapi_yuvp1_yds_config yds2 __attribute__((aligned(32)));
1590 struct ipu3_uapi_yuvp2_tcc_static_config tcc __attribute__((aligned(32)));
1591 struct ipu3_uapi_anr_config anr;
1592 - struct ipu3_uapi_awb_fr_config_s awb_fr __attribute__((aligned(32)));
1593 + struct ipu3_uapi_awb_fr_config_s awb_fr;
1594 struct ipu3_uapi_ae_config ae;
1595 struct ipu3_uapi_af_config_s af;
1596 struct ipu3_uapi_awb_config awb;
1597 diff --git a/fs/io_uring.c b/fs/io_uring.c
1598 index b2ccb908f6b6..2050100e6e84 100644
1599 --- a/fs/io_uring.c
1600 +++ b/fs/io_uring.c
1601 @@ -409,6 +409,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1602 }
1603
1604 ctx->flags = p->flags;
1605 + init_waitqueue_head(&ctx->sqo_wait);
1606 init_waitqueue_head(&ctx->cq_wait);
1607 init_completion(&ctx->ctx_done);
1608 init_completion(&ctx->sqo_thread_started);
1609 @@ -3237,7 +3238,6 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
1610 {
1611 int ret;
1612
1613 - init_waitqueue_head(&ctx->sqo_wait);
1614 mmgrab(current->mm);
1615 ctx->sqo_mm = current->mm;
1616
1617 diff --git a/include/linux/security.h b/include/linux/security.h
1618 index 9df7547afc0c..fd022768e91d 100644
1619 --- a/include/linux/security.h
1620 +++ b/include/linux/security.h
1621 @@ -117,12 +117,14 @@ enum lockdown_reason {
1622 LOCKDOWN_MODULE_PARAMETERS,
1623 LOCKDOWN_MMIOTRACE,
1624 LOCKDOWN_DEBUGFS,
1625 + LOCKDOWN_XMON_WR,
1626 LOCKDOWN_INTEGRITY_MAX,
1627 LOCKDOWN_KCORE,
1628 LOCKDOWN_KPROBES,
1629 LOCKDOWN_BPF_READ,
1630 LOCKDOWN_PERF,
1631 LOCKDOWN_TRACEFS,
1632 + LOCKDOWN_XMON_RW,
1633 LOCKDOWN_CONFIDENTIALITY_MAX,
1634 };
1635
1636 diff --git a/include/uapi/linux/mmc/ioctl.h b/include/uapi/linux/mmc/ioctl.h
1637 index 00c08120f3ba..27a39847d55c 100644
1638 --- a/include/uapi/linux/mmc/ioctl.h
1639 +++ b/include/uapi/linux/mmc/ioctl.h
1640 @@ -3,6 +3,7 @@
1641 #define LINUX_MMC_IOCTL_H
1642
1643 #include <linux/types.h>
1644 +#include <linux/major.h>
1645
1646 struct mmc_ioc_cmd {
1647 /*
1648 diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
1649 index ca19b4c8acf5..4a942d4e9763 100644
1650 --- a/kernel/cgroup/rstat.c
1651 +++ b/kernel/cgroup/rstat.c
1652 @@ -33,12 +33,9 @@ void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
1653 return;
1654
1655 /*
1656 - * Paired with the one in cgroup_rstat_cpu_pop_upated(). Either we
1657 - * see NULL updated_next or they see our updated stat.
1658 - */
1659 - smp_mb();
1660 -
1661 - /*
1662 + * Speculative already-on-list test. This may race leading to
1663 + * temporary inaccuracies, which is fine.
1664 + *
1665 * Because @parent's updated_children is terminated with @parent
1666 * instead of NULL, we can tell whether @cgrp is on the list by
1667 * testing the next pointer for NULL.
1668 @@ -134,13 +131,6 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
1669 *nextp = rstatc->updated_next;
1670 rstatc->updated_next = NULL;
1671
1672 - /*
1673 - * Paired with the one in cgroup_rstat_cpu_updated().
1674 - * Either they see NULL updated_next or we see their
1675 - * updated stat.
1676 - */
1677 - smp_mb();
1678 -
1679 return pos;
1680 }
1681
1682 diff --git a/kernel/relay.c b/kernel/relay.c
1683 index ade14fb7ce2e..4b760ec16342 100644
1684 --- a/kernel/relay.c
1685 +++ b/kernel/relay.c
1686 @@ -581,6 +581,11 @@ struct rchan *relay_open(const char *base_filename,
1687 return NULL;
1688
1689 chan->buf = alloc_percpu(struct rchan_buf *);
1690 + if (!chan->buf) {
1691 + kfree(chan);
1692 + return NULL;
1693 + }
1694 +
1695 chan->version = RELAYFS_CHANNEL_VERSION;
1696 chan->n_subbufs = n_subbufs;
1697 chan->subbuf_size = subbuf_size;
1698 diff --git a/mm/mremap.c b/mm/mremap.c
1699 index 245bf9c555b2..8005d0b2b843 100644
1700 --- a/mm/mremap.c
1701 +++ b/mm/mremap.c
1702 @@ -266,7 +266,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
1703 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
1704 if (!new_pmd)
1705 break;
1706 - if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd)) {
1707 + if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || pmd_devmap(*old_pmd)) {
1708 if (extent == HPAGE_PMD_SIZE) {
1709 bool moved;
1710 /* See comment in move_ptes() */
1711 diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
1712 index cc826c2767a3..fbc2ee6d46fc 100644
1713 --- a/security/integrity/evm/evm_crypto.c
1714 +++ b/security/integrity/evm/evm_crypto.c
1715 @@ -209,7 +209,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
1716 data->hdr.length = crypto_shash_digestsize(desc->tfm);
1717
1718 error = -ENODATA;
1719 - list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) {
1720 + list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) {
1721 bool is_ima = false;
1722
1723 if (strcmp(xattr->name, XATTR_NAME_IMA) == 0)
1724 diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
1725 index f9a81b187fae..a2c393385db0 100644
1726 --- a/security/integrity/evm/evm_main.c
1727 +++ b/security/integrity/evm/evm_main.c
1728 @@ -99,7 +99,7 @@ static int evm_find_protected_xattrs(struct dentry *dentry)
1729 if (!(inode->i_opflags & IOP_XATTR))
1730 return -EOPNOTSUPP;
1731
1732 - list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) {
1733 + list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) {
1734 error = __vfs_getxattr(dentry, inode, xattr->name, NULL, 0);
1735 if (error < 0) {
1736 if (error == -ENODATA)
1737 @@ -230,7 +230,7 @@ static int evm_protected_xattr(const char *req_xattr_name)
1738 struct xattr_list *xattr;
1739
1740 namelen = strlen(req_xattr_name);
1741 - list_for_each_entry_rcu(xattr, &evm_config_xattrnames, list) {
1742 + list_for_each_entry_lockless(xattr, &evm_config_xattrnames, list) {
1743 if ((strlen(xattr->name) == namelen)
1744 && (strncmp(req_xattr_name, xattr->name, namelen) == 0)) {
1745 found = 1;
1746 diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
1747 index c11c1f7b3ddd..0f37ef27268d 100644
1748 --- a/security/integrity/evm/evm_secfs.c
1749 +++ b/security/integrity/evm/evm_secfs.c
1750 @@ -234,7 +234,14 @@ static ssize_t evm_write_xattrs(struct file *file, const char __user *buf,
1751 goto out;
1752 }
1753
1754 - /* Guard against races in evm_read_xattrs */
1755 + /*
1756 + * xattr_list_mutex guards against races in evm_read_xattrs().
1757 + * Entries are only added to the evm_config_xattrnames list
1758 + * and never deleted. Therefore, the list is traversed
1759 + * using list_for_each_entry_lockless() without holding
1760 + * the mutex in evm_calc_hmac_or_hash(), evm_find_protected_xattrs()
1761 + * and evm_protected_xattr().
1762 + */
1763 mutex_lock(&xattr_list_mutex);
1764 list_for_each_entry(tmp, &evm_config_xattrnames, list) {
1765 if (strcmp(xattr->name, tmp->name) == 0) {
1766 diff --git a/security/lockdown/lockdown.c b/security/lockdown/lockdown.c
1767 index 40b790536def..b2f87015d6e9 100644
1768 --- a/security/lockdown/lockdown.c
1769 +++ b/security/lockdown/lockdown.c
1770 @@ -32,12 +32,14 @@ static const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1] = {
1771 [LOCKDOWN_MODULE_PARAMETERS] = "unsafe module parameters",
1772 [LOCKDOWN_MMIOTRACE] = "unsafe mmio",
1773 [LOCKDOWN_DEBUGFS] = "debugfs access",
1774 + [LOCKDOWN_XMON_WR] = "xmon write access",
1775 [LOCKDOWN_INTEGRITY_MAX] = "integrity",
1776 [LOCKDOWN_KCORE] = "/proc/kcore access",
1777 [LOCKDOWN_KPROBES] = "use of kprobes",
1778 [LOCKDOWN_BPF_READ] = "use of bpf to read kernel RAM",
1779 [LOCKDOWN_PERF] = "unsafe use of perf",
1780 [LOCKDOWN_TRACEFS] = "use of tracefs",
1781 + [LOCKDOWN_XMON_RW] = "xmon read and write access",
1782 [LOCKDOWN_CONFIDENTIALITY_MAX] = "confidentiality",
1783 };
1784
1785 diff --git a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
1786 index 67b276a65a8d..8ad31c91fc75 100644
1787 --- a/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
1788 +++ b/sound/soc/intel/boards/kbl_rt5663_rt5514_max98927.c
1789 @@ -626,7 +626,7 @@ static int kabylake_card_late_probe(struct snd_soc_card *card)
1790 * kabylake audio machine driver for MAX98927 + RT5514 + RT5663
1791 */
1792 static struct snd_soc_card kabylake_audio_card = {
1793 - .name = "kbl_r5514_5663_max",
1794 + .name = "kbl-r5514-5663-max",
1795 .owner = THIS_MODULE,
1796 .dai_link = kabylake_dais,
1797 .num_links = ARRAY_SIZE(kabylake_dais),
1798 diff --git a/sound/soc/intel/boards/skl_hda_dsp_generic.c b/sound/soc/intel/boards/skl_hda_dsp_generic.c
1799 index 1778acdc367c..e8d676c192f6 100644
1800 --- a/sound/soc/intel/boards/skl_hda_dsp_generic.c
1801 +++ b/sound/soc/intel/boards/skl_hda_dsp_generic.c
1802 @@ -90,7 +90,7 @@ skl_hda_add_dai_link(struct snd_soc_card *card, struct snd_soc_dai_link *link)
1803 }
1804
1805 static struct snd_soc_card hda_soc_card = {
1806 - .name = "skl_hda_card",
1807 + .name = "hda-dsp",
1808 .owner = THIS_MODULE,
1809 .dai_link = skl_hda_be_dai_links,
1810 .dapm_widgets = skl_hda_widgets,
1811 diff --git a/sound/soc/intel/boards/sof_rt5682.c b/sound/soc/intel/boards/sof_rt5682.c
1812 index 06b7d6c6c9a0..302ca1920791 100644
1813 --- a/sound/soc/intel/boards/sof_rt5682.c
1814 +++ b/sound/soc/intel/boards/sof_rt5682.c
1815 @@ -374,7 +374,7 @@ static int dmic_init(struct snd_soc_pcm_runtime *rtd)
1816
1817 /* sof audio machine driver for rt5682 codec */
1818 static struct snd_soc_card sof_audio_card_rt5682 = {
1819 - .name = "sof_rt5682",
1820 + .name = "rt5682", /* the sof- prefix is added by the core */
1821 .owner = THIS_MODULE,
1822 .controls = sof_controls,
1823 .num_controls = ARRAY_SIZE(sof_controls),
1824 diff --git a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
1825 index 24dd8ed48580..b025daea062d 100755
1826 --- a/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
1827 +++ b/tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
1828 @@ -300,7 +300,7 @@ test_uc_aware()
1829 local i
1830
1831 for ((i = 0; i < attempts; ++i)); do
1832 - if $ARPING -c 1 -I $h1 -b 192.0.2.66 -q -w 0.1; then
1833 + if $ARPING -c 1 -I $h1 -b 192.0.2.66 -q -w 1; then
1834 ((passes++))
1835 fi
1836