Magellan Linux

Annotation of /trunk/kernel-alx/patches-5.4/0131-5.4.32-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3512 - (hide annotations) (download)
Mon May 11 14:36:38 2020 UTC (4 years ago) by niro
File size: 64165 byte(s)
-linux-5.4.32
1 niro 3512 diff --git a/Makefile b/Makefile
2     index af28533919cc..c2d5975844d9 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 5
8     PATCHLEVEL = 4
9     -SUBLEVEL = 31
10     +SUBLEVEL = 32
11     EXTRAVERSION =
12     NAME = Kleptomaniac Octopus
13    
14     diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
15     index 593bf1519608..95584ee02b55 100644
16     --- a/arch/arm/mach-imx/Kconfig
17     +++ b/arch/arm/mach-imx/Kconfig
18     @@ -520,6 +520,7 @@ config SOC_IMX6UL
19     bool "i.MX6 UltraLite support"
20     select PINCTRL_IMX6UL
21     select SOC_IMX6
22     + select ARM_ERRATA_814220
23    
24     help
25     This enables support for Freescale i.MX6 UltraLite processor.
26     @@ -556,6 +557,7 @@ config SOC_IMX7D
27     select PINCTRL_IMX7D
28     select SOC_IMX7D_CA7 if ARCH_MULTI_V7
29     select SOC_IMX7D_CM4 if ARM_SINGLE_ARMV7M
30     + select ARM_ERRATA_814220 if ARCH_MULTI_V7
31     help
32     This enables support for Freescale i.MX7 Dual processor.
33    
34     diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
35     index 237ee0c4169f..612ed3c6d581 100644
36     --- a/arch/s390/include/asm/lowcore.h
37     +++ b/arch/s390/include/asm/lowcore.h
38     @@ -141,7 +141,9 @@ struct lowcore {
39    
40     /* br %r1 trampoline */
41     __u16 br_r1_trampoline; /* 0x0400 */
42     - __u8 pad_0x0402[0x0e00-0x0402]; /* 0x0402 */
43     + __u32 return_lpswe; /* 0x0402 */
44     + __u32 return_mcck_lpswe; /* 0x0406 */
45     + __u8 pad_0x040a[0x0e00-0x040a]; /* 0x040a */
46    
47     /*
48     * 0xe00 contains the address of the IPL Parameter Information
49     diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
50     index 51a0e4a2dc96..560d8b77b1d1 100644
51     --- a/arch/s390/include/asm/processor.h
52     +++ b/arch/s390/include/asm/processor.h
53     @@ -162,6 +162,7 @@ typedef struct thread_struct thread_struct;
54     #define INIT_THREAD { \
55     .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \
56     .fpu.regs = (void *) init_task.thread.fpu.fprs, \
57     + .last_break = 1, \
58     }
59    
60     /*
61     diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
62     index 6dc6c4fbc8e2..1932088686a6 100644
63     --- a/arch/s390/include/asm/setup.h
64     +++ b/arch/s390/include/asm/setup.h
65     @@ -8,6 +8,7 @@
66    
67     #include <linux/bits.h>
68     #include <uapi/asm/setup.h>
69     +#include <linux/build_bug.h>
70    
71     #define EP_OFFSET 0x10008
72     #define EP_STRING "S390EP"
73     @@ -157,6 +158,12 @@ static inline unsigned long kaslr_offset(void)
74     return __kaslr_offset;
75     }
76    
77     +static inline u32 gen_lpswe(unsigned long addr)
78     +{
79     + BUILD_BUG_ON(addr > 0xfff);
80     + return 0xb2b20000 | addr;
81     +}
82     +
83     #else /* __ASSEMBLY__ */
84    
85     #define IPL_DEVICE (IPL_DEVICE_OFFSET)
86     diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
87     index 41ac4ad21311..b6628586ab70 100644
88     --- a/arch/s390/kernel/asm-offsets.c
89     +++ b/arch/s390/kernel/asm-offsets.c
90     @@ -125,6 +125,8 @@ int main(void)
91     OFFSET(__LC_EXT_DAMAGE_CODE, lowcore, external_damage_code);
92     OFFSET(__LC_MCCK_FAIL_STOR_ADDR, lowcore, failing_storage_address);
93     OFFSET(__LC_LAST_BREAK, lowcore, breaking_event_addr);
94     + OFFSET(__LC_RETURN_LPSWE, lowcore, return_lpswe);
95     + OFFSET(__LC_RETURN_MCCK_LPSWE, lowcore, return_mcck_lpswe);
96     OFFSET(__LC_RST_OLD_PSW, lowcore, restart_old_psw);
97     OFFSET(__LC_EXT_OLD_PSW, lowcore, external_old_psw);
98     OFFSET(__LC_SVC_OLD_PSW, lowcore, svc_old_psw);
99     diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
100     index 270d1d145761..bc85987727f0 100644
101     --- a/arch/s390/kernel/entry.S
102     +++ b/arch/s390/kernel/entry.S
103     @@ -115,26 +115,29 @@ _LPP_OFFSET = __LC_LPP
104    
105     .macro SWITCH_ASYNC savearea,timer
106     tmhh %r8,0x0001 # interrupting from user ?
107     - jnz 1f
108     + jnz 2f
109     lgr %r14,%r9
110     + cghi %r14,__LC_RETURN_LPSWE
111     + je 0f
112     slg %r14,BASED(.Lcritical_start)
113     clg %r14,BASED(.Lcritical_length)
114     - jhe 0f
115     + jhe 1f
116     +0:
117     lghi %r11,\savearea # inside critical section, do cleanup
118     brasl %r14,cleanup_critical
119     tmhh %r8,0x0001 # retest problem state after cleanup
120     - jnz 1f
121     -0: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
122     + jnz 2f
123     +1: lg %r14,__LC_ASYNC_STACK # are we already on the target stack?
124     slgr %r14,%r15
125     srag %r14,%r14,STACK_SHIFT
126     - jnz 2f
127     + jnz 3f
128     CHECK_STACK \savearea
129     aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
130     - j 3f
131     -1: UPDATE_VTIME %r14,%r15,\timer
132     + j 4f
133     +2: UPDATE_VTIME %r14,%r15,\timer
134     BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
135     -2: lg %r15,__LC_ASYNC_STACK # load async stack
136     -3: la %r11,STACK_FRAME_OVERHEAD(%r15)
137     +3: lg %r15,__LC_ASYNC_STACK # load async stack
138     +4: la %r11,STACK_FRAME_OVERHEAD(%r15)
139     .endm
140    
141     .macro UPDATE_VTIME w1,w2,enter_timer
142     @@ -401,7 +404,7 @@ ENTRY(system_call)
143     stpt __LC_EXIT_TIMER
144     mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
145     lmg %r11,%r15,__PT_R11(%r11)
146     - lpswe __LC_RETURN_PSW
147     + b __LC_RETURN_LPSWE(%r0)
148     .Lsysc_done:
149    
150     #
151     @@ -608,43 +611,50 @@ ENTRY(pgm_check_handler)
152     BPOFF
153     stmg %r8,%r15,__LC_SAVE_AREA_SYNC
154     lg %r10,__LC_LAST_BREAK
155     - lg %r12,__LC_CURRENT
156     + srag %r11,%r10,12
157     + jnz 0f
158     + /* if __LC_LAST_BREAK is < 4096, it contains one of
159     + * the lpswe addresses in lowcore. Set it to 1 (initial state)
160     + * to prevent leaking that address to userspace.
161     + */
162     + lghi %r10,1
163     +0: lg %r12,__LC_CURRENT
164     lghi %r11,0
165     larl %r13,cleanup_critical
166     lmg %r8,%r9,__LC_PGM_OLD_PSW
167     tmhh %r8,0x0001 # test problem state bit
168     - jnz 2f # -> fault in user space
169     + jnz 3f # -> fault in user space
170     #if IS_ENABLED(CONFIG_KVM)
171     # cleanup critical section for program checks in sie64a
172     lgr %r14,%r9
173     slg %r14,BASED(.Lsie_critical_start)
174     clg %r14,BASED(.Lsie_critical_length)
175     - jhe 0f
176     + jhe 1f
177     lg %r14,__SF_SIE_CONTROL(%r15) # get control block pointer
178     ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
179     lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
180     larl %r9,sie_exit # skip forward to sie_exit
181     lghi %r11,_PIF_GUEST_FAULT
182     #endif
183     -0: tmhh %r8,0x4000 # PER bit set in old PSW ?
184     - jnz 1f # -> enabled, can't be a double fault
185     +1: tmhh %r8,0x4000 # PER bit set in old PSW ?
186     + jnz 2f # -> enabled, can't be a double fault
187     tm __LC_PGM_ILC+3,0x80 # check for per exception
188     jnz .Lpgm_svcper # -> single stepped svc
189     -1: CHECK_STACK __LC_SAVE_AREA_SYNC
190     +2: CHECK_STACK __LC_SAVE_AREA_SYNC
191     aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
192     - # CHECK_VMAP_STACK branches to stack_overflow or 4f
193     - CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
194     -2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
195     + # CHECK_VMAP_STACK branches to stack_overflow or 5f
196     + CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,5f
197     +3: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
198     BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
199     lg %r15,__LC_KERNEL_STACK
200     lgr %r14,%r12
201     aghi %r14,__TASK_thread # pointer to thread_struct
202     lghi %r13,__LC_PGM_TDB
203     tm __LC_PGM_ILC+2,0x02 # check for transaction abort
204     - jz 3f
205     + jz 4f
206     mvc __THREAD_trap_tdb(256,%r14),0(%r13)
207     -3: stg %r10,__THREAD_last_break(%r14)
208     -4: lgr %r13,%r11
209     +4: stg %r10,__THREAD_last_break(%r14)
210     +5: lgr %r13,%r11
211     la %r11,STACK_FRAME_OVERHEAD(%r15)
212     stmg %r0,%r7,__PT_R0(%r11)
213     # clear user controlled registers to prevent speculative use
214     @@ -663,14 +673,14 @@ ENTRY(pgm_check_handler)
215     stg %r13,__PT_FLAGS(%r11)
216     stg %r10,__PT_ARGS(%r11)
217     tm __LC_PGM_ILC+3,0x80 # check for per exception
218     - jz 5f
219     + jz 6f
220     tmhh %r8,0x0001 # kernel per event ?
221     jz .Lpgm_kprobe
222     oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
223     mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
224     mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
225     mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID
226     -5: REENABLE_IRQS
227     +6: REENABLE_IRQS
228     xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
229     larl %r1,pgm_check_table
230     llgh %r10,__PT_INT_CODE+2(%r11)
231     @@ -775,7 +785,7 @@ ENTRY(io_int_handler)
232     mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
233     .Lio_exit_kernel:
234     lmg %r11,%r15,__PT_R11(%r11)
235     - lpswe __LC_RETURN_PSW
236     + b __LC_RETURN_LPSWE(%r0)
237     .Lio_done:
238    
239     #
240     @@ -1214,7 +1224,7 @@ ENTRY(mcck_int_handler)
241     stpt __LC_EXIT_TIMER
242     mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
243     0: lmg %r11,%r15,__PT_R11(%r11)
244     - lpswe __LC_RETURN_MCCK_PSW
245     + b __LC_RETURN_MCCK_LPSWE
246    
247     .Lmcck_panic:
248     lg %r15,__LC_NODAT_STACK
249     @@ -1271,6 +1281,8 @@ ENDPROC(stack_overflow)
250     #endif
251    
252     ENTRY(cleanup_critical)
253     + cghi %r9,__LC_RETURN_LPSWE
254     + je .Lcleanup_lpswe
255     #if IS_ENABLED(CONFIG_KVM)
256     clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap
257     jl 0f
258     @@ -1424,6 +1436,7 @@ ENDPROC(cleanup_critical)
259     mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
260     mvc 0(64,%r11),__PT_R8(%r9)
261     lmg %r0,%r7,__PT_R0(%r9)
262     +.Lcleanup_lpswe:
263     1: lmg %r8,%r9,__LC_RETURN_PSW
264     BR_EX %r14,%r11
265     .Lcleanup_sysc_restore_insn:
266     diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
267     index b0afec673f77..4e6299e2ca94 100644
268     --- a/arch/s390/kernel/process.c
269     +++ b/arch/s390/kernel/process.c
270     @@ -105,6 +105,7 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
271     p->thread.system_timer = 0;
272     p->thread.hardirq_timer = 0;
273     p->thread.softirq_timer = 0;
274     + p->thread.last_break = 1;
275    
276     frame->sf.back_chain = 0;
277     /* new return point is ret_from_fork */
278     diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
279     index b95e6fa34cc8..4366962f4930 100644
280     --- a/arch/s390/kernel/setup.c
281     +++ b/arch/s390/kernel/setup.c
282     @@ -73,6 +73,7 @@
283     #include <asm/nospec-branch.h>
284     #include <asm/mem_detect.h>
285     #include <asm/uv.h>
286     +#include <asm/asm-offsets.h>
287     #include "entry.h"
288    
289     /*
290     @@ -457,6 +458,8 @@ static void __init setup_lowcore_dat_off(void)
291     lc->spinlock_index = 0;
292     arch_spin_lock_setup(0);
293     lc->br_r1_trampoline = 0x07f1; /* br %r1 */
294     + lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
295     + lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
296    
297     set_prefix((u32)(unsigned long) lc);
298     lowcore_ptr[0] = lc;
299     diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
300     index 06dddd7c4290..f468a10e5206 100644
301     --- a/arch/s390/kernel/smp.c
302     +++ b/arch/s390/kernel/smp.c
303     @@ -212,6 +212,8 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
304     lc->spinlock_lockval = arch_spin_lockval(cpu);
305     lc->spinlock_index = 0;
306     lc->br_r1_trampoline = 0x07f1; /* br %r1 */
307     + lc->return_lpswe = gen_lpswe(__LC_RETURN_PSW);
308     + lc->return_mcck_lpswe = gen_lpswe(__LC_RETURN_MCCK_PSW);
309     if (nmi_alloc_per_cpu(lc))
310     goto out_async;
311     if (vdso_alloc_per_cpu(lc))
312     diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
313     index b403fa14847d..f810930aff42 100644
314     --- a/arch/s390/mm/vmem.c
315     +++ b/arch/s390/mm/vmem.c
316     @@ -415,6 +415,10 @@ void __init vmem_map_init(void)
317     SET_MEMORY_RO | SET_MEMORY_X);
318     __set_memory(__stext_dma, (__etext_dma - __stext_dma) >> PAGE_SHIFT,
319     SET_MEMORY_RO | SET_MEMORY_X);
320     +
321     + /* we need lowcore executable for our LPSWE instructions */
322     + set_memory_x(0, 1);
323     +
324     pr_info("Write protected kernel read-only data: %luk\n",
325     (unsigned long)(__end_rodata - _stext) >> 10);
326     }
327     diff --git a/block/blk-mq.c b/block/blk-mq.c
328     index 3c1abab1fdf5..a8c1a45cedde 100644
329     --- a/block/blk-mq.c
330     +++ b/block/blk-mq.c
331     @@ -3007,6 +3007,14 @@ static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set)
332    
333     static int blk_mq_update_queue_map(struct blk_mq_tag_set *set)
334     {
335     + /*
336     + * blk_mq_map_queues() and multiple .map_queues() implementations
337     + * expect that set->map[HCTX_TYPE_DEFAULT].nr_queues is set to the
338     + * number of hardware queues.
339     + */
340     + if (set->nr_maps == 1)
341     + set->map[HCTX_TYPE_DEFAULT].nr_queues = set->nr_hw_queues;
342     +
343     if (set->ops->map_queues && !is_kdump_kernel()) {
344     int i;
345    
346     diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
347     index ce59a3f32eac..abd39cc5ff88 100644
348     --- a/drivers/acpi/sleep.c
349     +++ b/drivers/acpi/sleep.c
350     @@ -1009,6 +1009,10 @@ static bool acpi_s2idle_wake(void)
351     if (acpi_any_fixed_event_status_set())
352     return true;
353    
354     + /* Check wakeups from drivers sharing the SCI. */
355     + if (acpi_check_wakeup_handlers())
356     + return true;
357     +
358     /*
359     * If there are no EC events to process and at least one of the
360     * other enabled GPEs is active, the wakeup is regarded as a
361     diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
362     index 41675d24a9bc..3d90480ce1b1 100644
363     --- a/drivers/acpi/sleep.h
364     +++ b/drivers/acpi/sleep.h
365     @@ -2,6 +2,7 @@
366    
367     extern void acpi_enable_wakeup_devices(u8 sleep_state);
368     extern void acpi_disable_wakeup_devices(u8 sleep_state);
369     +extern bool acpi_check_wakeup_handlers(void);
370    
371     extern struct list_head acpi_wakeup_device_list;
372     extern struct mutex acpi_device_lock;
373     diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
374     index 9614126bf56e..90c40f992e13 100644
375     --- a/drivers/acpi/wakeup.c
376     +++ b/drivers/acpi/wakeup.c
377     @@ -12,6 +12,15 @@
378     #include "internal.h"
379     #include "sleep.h"
380    
381     +struct acpi_wakeup_handler {
382     + struct list_head list_node;
383     + bool (*wakeup)(void *context);
384     + void *context;
385     +};
386     +
387     +static LIST_HEAD(acpi_wakeup_handler_head);
388     +static DEFINE_MUTEX(acpi_wakeup_handler_mutex);
389     +
390     /*
391     * We didn't lock acpi_device_lock in the file, because it invokes oops in
392     * suspend/resume and isn't really required as this is called in S-state. At
393     @@ -96,3 +105,75 @@ int __init acpi_wakeup_device_init(void)
394     mutex_unlock(&acpi_device_lock);
395     return 0;
396     }
397     +
398     +/**
399     + * acpi_register_wakeup_handler - Register wakeup handler
400     + * @wake_irq: The IRQ through which the device may receive wakeups
401     + * @wakeup: Wakeup-handler to call when the SCI has triggered a wakeup
402     + * @context: Context to pass to the handler when calling it
403     + *
404     + * Drivers which may share an IRQ with the SCI can use this to register
405     + * a handler which returns true when the device they are managing wants
406     + * to trigger a wakeup.
407     + */
408     +int acpi_register_wakeup_handler(int wake_irq, bool (*wakeup)(void *context),
409     + void *context)
410     +{
411     + struct acpi_wakeup_handler *handler;
412     +
413     + /*
414     + * If the device is not sharing its IRQ with the SCI, there is no
415     + * need to register the handler.
416     + */
417     + if (!acpi_sci_irq_valid() || wake_irq != acpi_sci_irq)
418     + return 0;
419     +
420     + handler = kmalloc(sizeof(*handler), GFP_KERNEL);
421     + if (!handler)
422     + return -ENOMEM;
423     +
424     + handler->wakeup = wakeup;
425     + handler->context = context;
426     +
427     + mutex_lock(&acpi_wakeup_handler_mutex);
428     + list_add(&handler->list_node, &acpi_wakeup_handler_head);
429     + mutex_unlock(&acpi_wakeup_handler_mutex);
430     +
431     + return 0;
432     +}
433     +EXPORT_SYMBOL_GPL(acpi_register_wakeup_handler);
434     +
435     +/**
436     + * acpi_unregister_wakeup_handler - Unregister wakeup handler
437     + * @wakeup: Wakeup-handler passed to acpi_register_wakeup_handler()
438     + * @context: Context passed to acpi_register_wakeup_handler()
439     + */
440     +void acpi_unregister_wakeup_handler(bool (*wakeup)(void *context),
441     + void *context)
442     +{
443     + struct acpi_wakeup_handler *handler;
444     +
445     + mutex_lock(&acpi_wakeup_handler_mutex);
446     + list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) {
447     + if (handler->wakeup == wakeup && handler->context == context) {
448     + list_del(&handler->list_node);
449     + kfree(handler);
450     + break;
451     + }
452     + }
453     + mutex_unlock(&acpi_wakeup_handler_mutex);
454     +}
455     +EXPORT_SYMBOL_GPL(acpi_unregister_wakeup_handler);
456     +
457     +bool acpi_check_wakeup_handlers(void)
458     +{
459     + struct acpi_wakeup_handler *handler;
460     +
461     + /* No need to lock, nothing else is running when we're called. */
462     + list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) {
463     + if (handler->wakeup(handler->context))
464     + return true;
465     + }
466     +
467     + return false;
468     +}
469     diff --git a/drivers/char/hw_random/imx-rngc.c b/drivers/char/hw_random/imx-rngc.c
470     index 30cf00f8e9a0..0576801944fd 100644
471     --- a/drivers/char/hw_random/imx-rngc.c
472     +++ b/drivers/char/hw_random/imx-rngc.c
473     @@ -105,8 +105,10 @@ static int imx_rngc_self_test(struct imx_rngc *rngc)
474     return -ETIMEDOUT;
475     }
476    
477     - if (rngc->err_reg != 0)
478     + if (rngc->err_reg != 0) {
479     + imx_rngc_irq_mask_clear(rngc);
480     return -EIO;
481     + }
482    
483     return 0;
484     }
485     diff --git a/drivers/char/random.c b/drivers/char/random.c
486     index a385fc1da1cb..8ff28c14af7e 100644
487     --- a/drivers/char/random.c
488     +++ b/drivers/char/random.c
489     @@ -2358,11 +2358,11 @@ struct batched_entropy {
490    
491     /*
492     * Get a random word for internal kernel use only. The quality of the random
493     - * number is either as good as RDRAND or as good as /dev/urandom, with the
494     - * goal of being quite fast and not depleting entropy. In order to ensure
495     + * number is good as /dev/urandom, but there is no backtrack protection, with
496     + * the goal of being quite fast and not depleting entropy. In order to ensure
497     * that the randomness provided by this function is okay, the function
498     - * wait_for_random_bytes() should be called and return 0 at least once
499     - * at any point prior.
500     + * wait_for_random_bytes() should be called and return 0 at least once at any
501     + * point prior.
502     */
503     static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
504     .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
505     @@ -2375,15 +2375,6 @@ u64 get_random_u64(void)
506     struct batched_entropy *batch;
507     static void *previous;
508    
509     -#if BITS_PER_LONG == 64
510     - if (arch_get_random_long((unsigned long *)&ret))
511     - return ret;
512     -#else
513     - if (arch_get_random_long((unsigned long *)&ret) &&
514     - arch_get_random_long((unsigned long *)&ret + 1))
515     - return ret;
516     -#endif
517     -
518     warn_unseeded_randomness(&previous);
519    
520     batch = raw_cpu_ptr(&batched_entropy_u64);
521     @@ -2408,9 +2399,6 @@ u32 get_random_u32(void)
522     struct batched_entropy *batch;
523     static void *previous;
524    
525     - if (arch_get_random_int(&ret))
526     - return ret;
527     -
528     warn_unseeded_randomness(&previous);
529    
530     batch = raw_cpu_ptr(&batched_entropy_u32);
531     diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
532     index 48e16ad93bbd..51dc8753b527 100644
533     --- a/drivers/gpu/drm/i915/i915_active.c
534     +++ b/drivers/gpu/drm/i915/i915_active.c
535     @@ -121,7 +121,7 @@ static inline void debug_active_assert(struct i915_active *ref) { }
536     #endif
537    
538     static void
539     -__active_retire(struct i915_active *ref)
540     +__active_retire(struct i915_active *ref, bool lock)
541     {
542     struct active_node *it, *n;
543     struct rb_root root;
544     @@ -138,7 +138,8 @@ __active_retire(struct i915_active *ref)
545     retire = true;
546     }
547    
548     - mutex_unlock(&ref->mutex);
549     + if (likely(lock))
550     + mutex_unlock(&ref->mutex);
551     if (!retire)
552     return;
553    
554     @@ -153,21 +154,28 @@ __active_retire(struct i915_active *ref)
555     }
556    
557     static void
558     -active_retire(struct i915_active *ref)
559     +active_retire(struct i915_active *ref, bool lock)
560     {
561     GEM_BUG_ON(!atomic_read(&ref->count));
562     if (atomic_add_unless(&ref->count, -1, 1))
563     return;
564    
565     /* One active may be flushed from inside the acquire of another */
566     - mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
567     - __active_retire(ref);
568     + if (likely(lock))
569     + mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
570     + __active_retire(ref, lock);
571     }
572    
573     static void
574     node_retire(struct i915_active_request *base, struct i915_request *rq)
575     {
576     - active_retire(node_from_active(base)->ref);
577     + active_retire(node_from_active(base)->ref, true);
578     +}
579     +
580     +static void
581     +node_retire_nolock(struct i915_active_request *base, struct i915_request *rq)
582     +{
583     + active_retire(node_from_active(base)->ref, false);
584     }
585    
586     static struct i915_active_request *
587     @@ -364,7 +372,7 @@ int i915_active_acquire(struct i915_active *ref)
588     void i915_active_release(struct i915_active *ref)
589     {
590     debug_active_assert(ref);
591     - active_retire(ref);
592     + active_retire(ref, true);
593     }
594    
595     static void __active_ungrab(struct i915_active *ref)
596     @@ -391,7 +399,7 @@ void i915_active_ungrab(struct i915_active *ref)
597     {
598     GEM_BUG_ON(!test_bit(I915_ACTIVE_GRAB_BIT, &ref->flags));
599    
600     - active_retire(ref);
601     + active_retire(ref, true);
602     __active_ungrab(ref);
603     }
604    
605     @@ -421,12 +429,13 @@ int i915_active_wait(struct i915_active *ref)
606     break;
607     }
608    
609     - err = i915_active_request_retire(&it->base, BKL(ref));
610     + err = i915_active_request_retire(&it->base, BKL(ref),
611     + node_retire_nolock);
612     if (err)
613     break;
614     }
615    
616     - __active_retire(ref);
617     + __active_retire(ref, true);
618     if (err)
619     return err;
620    
621     diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
622     index f95058f99057..0ad7ef60d15f 100644
623     --- a/drivers/gpu/drm/i915/i915_active.h
624     +++ b/drivers/gpu/drm/i915/i915_active.h
625     @@ -309,7 +309,7 @@ i915_active_request_isset(const struct i915_active_request *active)
626     */
627     static inline int __must_check
628     i915_active_request_retire(struct i915_active_request *active,
629     - struct mutex *mutex)
630     + struct mutex *mutex, i915_active_retire_fn retire)
631     {
632     struct i915_request *request;
633     long ret;
634     @@ -327,7 +327,7 @@ i915_active_request_retire(struct i915_active_request *active,
635     list_del_init(&active->link);
636     RCU_INIT_POINTER(active->request, NULL);
637    
638     - active->retire(active, request);
639     + retire(active, request);
640    
641     return 0;
642     }
643     diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
644     index 6c12da176981..8f776b7de45e 100644
645     --- a/drivers/infiniband/core/cma.c
646     +++ b/drivers/infiniband/core/cma.c
647     @@ -2911,6 +2911,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
648     err2:
649     kfree(route->path_rec);
650     route->path_rec = NULL;
651     + route->num_paths = 0;
652     err1:
653     kfree(work);
654     return ret;
655     @@ -4719,6 +4720,19 @@ static int __init cma_init(void)
656     {
657     int ret;
658    
659     + /*
660     + * There is a rare lock ordering dependency in cma_netdev_callback()
661     + * that only happens when bonding is enabled. Teach lockdep that rtnl
662     + * must never be nested under lock so it can find these without having
663     + * to test with bonding.
664     + */
665     + if (IS_ENABLED(CONFIG_LOCKDEP)) {
666     + rtnl_lock();
667     + mutex_lock(&lock);
668     + mutex_unlock(&lock);
669     + rtnl_unlock();
670     + }
671     +
672     cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM);
673     if (!cma_wq)
674     return -ENOMEM;
675     diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
676     index 0274e9b704be..f4f79f1292b9 100644
677     --- a/drivers/infiniband/core/ucma.c
678     +++ b/drivers/infiniband/core/ucma.c
679     @@ -91,6 +91,7 @@ struct ucma_context {
680    
681     struct ucma_file *file;
682     struct rdma_cm_id *cm_id;
683     + struct mutex mutex;
684     u64 uid;
685    
686     struct list_head list;
687     @@ -216,6 +217,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
688     init_completion(&ctx->comp);
689     INIT_LIST_HEAD(&ctx->mc_list);
690     ctx->file = file;
691     + mutex_init(&ctx->mutex);
692    
693     if (xa_alloc(&ctx_table, &ctx->id, ctx, xa_limit_32b, GFP_KERNEL))
694     goto error;
695     @@ -589,6 +591,7 @@ static int ucma_free_ctx(struct ucma_context *ctx)
696     }
697    
698     events_reported = ctx->events_reported;
699     + mutex_destroy(&ctx->mutex);
700     kfree(ctx);
701     return events_reported;
702     }
703     @@ -658,7 +661,10 @@ static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
704     if (IS_ERR(ctx))
705     return PTR_ERR(ctx);
706    
707     + mutex_lock(&ctx->mutex);
708     ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
709     + mutex_unlock(&ctx->mutex);
710     +
711     ucma_put_ctx(ctx);
712     return ret;
713     }
714     @@ -681,7 +687,9 @@ static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
715     if (IS_ERR(ctx))
716     return PTR_ERR(ctx);
717    
718     + mutex_lock(&ctx->mutex);
719     ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
720     + mutex_unlock(&ctx->mutex);
721     ucma_put_ctx(ctx);
722     return ret;
723     }
724     @@ -705,8 +713,10 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
725     if (IS_ERR(ctx))
726     return PTR_ERR(ctx);
727    
728     + mutex_lock(&ctx->mutex);
729     ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
730     (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
731     + mutex_unlock(&ctx->mutex);
732     ucma_put_ctx(ctx);
733     return ret;
734     }
735     @@ -731,8 +741,10 @@ static ssize_t ucma_resolve_addr(struct ucma_file *file,
736     if (IS_ERR(ctx))
737     return PTR_ERR(ctx);
738    
739     + mutex_lock(&ctx->mutex);
740     ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
741     (struct sockaddr *) &cmd.dst_addr, cmd.timeout_ms);
742     + mutex_unlock(&ctx->mutex);
743     ucma_put_ctx(ctx);
744     return ret;
745     }
746     @@ -752,7 +764,9 @@ static ssize_t ucma_resolve_route(struct ucma_file *file,
747     if (IS_ERR(ctx))
748     return PTR_ERR(ctx);
749    
750     + mutex_lock(&ctx->mutex);
751     ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
752     + mutex_unlock(&ctx->mutex);
753     ucma_put_ctx(ctx);
754     return ret;
755     }
756     @@ -841,6 +855,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
757     if (IS_ERR(ctx))
758     return PTR_ERR(ctx);
759    
760     + mutex_lock(&ctx->mutex);
761     memset(&resp, 0, sizeof resp);
762     addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
763     memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
764     @@ -864,6 +879,7 @@ static ssize_t ucma_query_route(struct ucma_file *file,
765     ucma_copy_iw_route(&resp, &ctx->cm_id->route);
766    
767     out:
768     + mutex_unlock(&ctx->mutex);
769     if (copy_to_user(u64_to_user_ptr(cmd.response),
770     &resp, sizeof(resp)))
771     ret = -EFAULT;
772     @@ -1014,6 +1030,7 @@ static ssize_t ucma_query(struct ucma_file *file,
773     if (IS_ERR(ctx))
774     return PTR_ERR(ctx);
775    
776     + mutex_lock(&ctx->mutex);
777     switch (cmd.option) {
778     case RDMA_USER_CM_QUERY_ADDR:
779     ret = ucma_query_addr(ctx, response, out_len);
780     @@ -1028,6 +1045,7 @@ static ssize_t ucma_query(struct ucma_file *file,
781     ret = -ENOSYS;
782     break;
783     }
784     + mutex_unlock(&ctx->mutex);
785    
786     ucma_put_ctx(ctx);
787     return ret;
788     @@ -1068,7 +1086,9 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
789     return PTR_ERR(ctx);
790    
791     ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
792     + mutex_lock(&ctx->mutex);
793     ret = rdma_connect(ctx->cm_id, &conn_param);
794     + mutex_unlock(&ctx->mutex);
795     ucma_put_ctx(ctx);
796     return ret;
797     }
798     @@ -1089,7 +1109,9 @@ static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
799    
800     ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
801     cmd.backlog : max_backlog;
802     + mutex_lock(&ctx->mutex);
803     ret = rdma_listen(ctx->cm_id, ctx->backlog);
804     + mutex_unlock(&ctx->mutex);
805     ucma_put_ctx(ctx);
806     return ret;
807     }
808     @@ -1112,13 +1134,17 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
809     if (cmd.conn_param.valid) {
810     ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
811     mutex_lock(&file->mut);
812     + mutex_lock(&ctx->mutex);
813     ret = __rdma_accept(ctx->cm_id, &conn_param, NULL);
814     + mutex_unlock(&ctx->mutex);
815     if (!ret)
816     ctx->uid = cmd.uid;
817     mutex_unlock(&file->mut);
818     - } else
819     + } else {
820     + mutex_lock(&ctx->mutex);
821     ret = __rdma_accept(ctx->cm_id, NULL, NULL);
822     -
823     + mutex_unlock(&ctx->mutex);
824     + }
825     ucma_put_ctx(ctx);
826     return ret;
827     }
828     @@ -1137,7 +1163,9 @@ static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
829     if (IS_ERR(ctx))
830     return PTR_ERR(ctx);
831    
832     + mutex_lock(&ctx->mutex);
833     ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
834     + mutex_unlock(&ctx->mutex);
835     ucma_put_ctx(ctx);
836     return ret;
837     }
838     @@ -1156,7 +1184,9 @@ static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
839     if (IS_ERR(ctx))
840     return PTR_ERR(ctx);
841    
842     + mutex_lock(&ctx->mutex);
843     ret = rdma_disconnect(ctx->cm_id);
844     + mutex_unlock(&ctx->mutex);
845     ucma_put_ctx(ctx);
846     return ret;
847     }
848     @@ -1187,7 +1217,9 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
849     resp.qp_attr_mask = 0;
850     memset(&qp_attr, 0, sizeof qp_attr);
851     qp_attr.qp_state = cmd.qp_state;
852     + mutex_lock(&ctx->mutex);
853     ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
854     + mutex_unlock(&ctx->mutex);
855     if (ret)
856     goto out;
857    
858     @@ -1273,9 +1305,13 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
859     struct sa_path_rec opa;
860    
861     sa_convert_path_ib_to_opa(&opa, &sa_path);
862     + mutex_lock(&ctx->mutex);
863     ret = rdma_set_ib_path(ctx->cm_id, &opa);
864     + mutex_unlock(&ctx->mutex);
865     } else {
866     + mutex_lock(&ctx->mutex);
867     ret = rdma_set_ib_path(ctx->cm_id, &sa_path);
868     + mutex_unlock(&ctx->mutex);
869     }
870     if (ret)
871     return ret;
872     @@ -1308,7 +1344,9 @@ static int ucma_set_option_level(struct ucma_context *ctx, int level,
873    
874     switch (level) {
875     case RDMA_OPTION_ID:
876     + mutex_lock(&ctx->mutex);
877     ret = ucma_set_option_id(ctx, optname, optval, optlen);
878     + mutex_unlock(&ctx->mutex);
879     break;
880     case RDMA_OPTION_IB:
881     ret = ucma_set_option_ib(ctx, optname, optval, optlen);
882     @@ -1368,8 +1406,10 @@ static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
883     if (IS_ERR(ctx))
884     return PTR_ERR(ctx);
885    
886     + mutex_lock(&ctx->mutex);
887     if (ctx->cm_id->device)
888     ret = rdma_notify(ctx->cm_id, (enum ib_event_type)cmd.event);
889     + mutex_unlock(&ctx->mutex);
890    
891     ucma_put_ctx(ctx);
892     return ret;
893     @@ -1412,8 +1452,10 @@ static ssize_t ucma_process_join(struct ucma_file *file,
894     mc->join_state = join_state;
895     mc->uid = cmd->uid;
896     memcpy(&mc->addr, addr, cmd->addr_size);
897     + mutex_lock(&ctx->mutex);
898     ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
899     join_state, mc);
900     + mutex_unlock(&ctx->mutex);
901     if (ret)
902     goto err2;
903    
904     @@ -1513,7 +1555,10 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file,
905     goto out;
906     }
907    
908     + mutex_lock(&mc->ctx->mutex);
909     rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
910     + mutex_unlock(&mc->ctx->mutex);
911     +
912     mutex_lock(&mc->ctx->file->mut);
913     ucma_cleanup_mc_events(mc);
914     list_del(&mc->list);
915     diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c
916     index 90f62c4bddba..074ec71772d2 100644
917     --- a/drivers/infiniband/hw/hfi1/sysfs.c
918     +++ b/drivers/infiniband/hw/hfi1/sysfs.c
919     @@ -674,7 +674,11 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
920     dd_dev_err(dd,
921     "Skipping sc2vl sysfs info, (err %d) port %u\n",
922     ret, port_num);
923     - goto bail;
924     + /*
925     + * Based on the documentation for kobject_init_and_add(), the
926     + * caller should call kobject_put even if this call fails.
927     + */
928     + goto bail_sc2vl;
929     }
930     kobject_uevent(&ppd->sc2vl_kobj, KOBJ_ADD);
931    
932     @@ -684,7 +688,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
933     dd_dev_err(dd,
934     "Skipping sl2sc sysfs info, (err %d) port %u\n",
935     ret, port_num);
936     - goto bail_sc2vl;
937     + goto bail_sl2sc;
938     }
939     kobject_uevent(&ppd->sl2sc_kobj, KOBJ_ADD);
940    
941     @@ -694,7 +698,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
942     dd_dev_err(dd,
943     "Skipping vl2mtu sysfs info, (err %d) port %u\n",
944     ret, port_num);
945     - goto bail_sl2sc;
946     + goto bail_vl2mtu;
947     }
948     kobject_uevent(&ppd->vl2mtu_kobj, KOBJ_ADD);
949    
950     @@ -704,7 +708,7 @@ int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num,
951     dd_dev_err(dd,
952     "Skipping Congestion Control sysfs info, (err %d) port %u\n",
953     ret, port_num);
954     - goto bail_vl2mtu;
955     + goto bail_cc;
956     }
957    
958     kobject_uevent(&ppd->pport_cc_kobj, KOBJ_ADD);
959     @@ -742,7 +746,6 @@ bail_sl2sc:
960     kobject_put(&ppd->sl2sc_kobj);
961     bail_sc2vl:
962     kobject_put(&ppd->sc2vl_kobj);
963     -bail:
964     return ret;
965     }
966    
967     @@ -853,8 +856,13 @@ int hfi1_verbs_register_sysfs(struct hfi1_devdata *dd)
968    
969     return 0;
970     bail:
971     - for (i = 0; i < dd->num_sdma; i++)
972     - kobject_del(&dd->per_sdma[i].kobj);
973     + /*
974     + * The function kobject_put() will call kobject_del() if the kobject
975     + * has been added successfully. The sysfs files created under the
976     + * kobject directory will also be removed during the process.
977     + */
978     + for (; i >= 0; i--)
979     + kobject_put(&dd->per_sdma[i].kobj);
980    
981     return ret;
982     }
983     @@ -867,6 +875,10 @@ void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd)
984     struct hfi1_pportdata *ppd;
985     int i;
986    
987     + /* Unwind operations in hfi1_verbs_register_sysfs() */
988     + for (i = 0; i < dd->num_sdma; i++)
989     + kobject_put(&dd->per_sdma[i].kobj);
990     +
991     for (i = 0; i < dd->num_pports; i++) {
992     ppd = &dd->pport[i];
993    
994     diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
995     index 0a160fd1383a..4f44a731a48e 100644
996     --- a/drivers/infiniband/hw/mlx5/main.c
997     +++ b/drivers/infiniband/hw/mlx5/main.c
998     @@ -1181,12 +1181,10 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
999     if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre))
1000     resp.tunnel_offloads_caps |=
1001     MLX5_IB_TUNNELED_OFFLOADS_GRE;
1002     - if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
1003     - MLX5_FLEX_PROTO_CW_MPLS_GRE)
1004     + if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre))
1005     resp.tunnel_offloads_caps |=
1006     MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE;
1007     - if (MLX5_CAP_GEN(mdev, flex_parser_protocols) &
1008     - MLX5_FLEX_PROTO_CW_MPLS_UDP)
1009     + if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp))
1010     resp.tunnel_offloads_caps |=
1011     MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP;
1012     }
1013     diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
1014     index 31aa41d85ccf..e3bac1a877bb 100644
1015     --- a/drivers/infiniband/sw/siw/siw_cm.c
1016     +++ b/drivers/infiniband/sw/siw/siw_cm.c
1017     @@ -1783,14 +1783,23 @@ int siw_reject(struct iw_cm_id *id, const void *pdata, u8 pd_len)
1018     return 0;
1019     }
1020    
1021     -static int siw_listen_address(struct iw_cm_id *id, int backlog,
1022     - struct sockaddr *laddr, int addr_family)
1023     +/*
1024     + * siw_create_listen - Create resources for a listener's IWCM ID @id
1025     + *
1026     + * Starts listen on the socket address id->local_addr.
1027     + *
1028     + */
1029     +int siw_create_listen(struct iw_cm_id *id, int backlog)
1030     {
1031     struct socket *s;
1032     struct siw_cep *cep = NULL;
1033     struct siw_device *sdev = to_siw_dev(id->device);
1034     + int addr_family = id->local_addr.ss_family;
1035     int rv = 0, s_val;
1036    
1037     + if (addr_family != AF_INET && addr_family != AF_INET6)
1038     + return -EAFNOSUPPORT;
1039     +
1040     rv = sock_create(addr_family, SOCK_STREAM, IPPROTO_TCP, &s);
1041     if (rv < 0)
1042     return rv;
1043     @@ -1805,9 +1814,25 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
1044     siw_dbg(id->device, "setsockopt error: %d\n", rv);
1045     goto error;
1046     }
1047     - rv = s->ops->bind(s, laddr, addr_family == AF_INET ?
1048     - sizeof(struct sockaddr_in) :
1049     - sizeof(struct sockaddr_in6));
1050     + if (addr_family == AF_INET) {
1051     + struct sockaddr_in *laddr = &to_sockaddr_in(id->local_addr);
1052     +
1053     + /* For wildcard addr, limit binding to current device only */
1054     + if (ipv4_is_zeronet(laddr->sin_addr.s_addr))
1055     + s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
1056     +
1057     + rv = s->ops->bind(s, (struct sockaddr *)laddr,
1058     + sizeof(struct sockaddr_in));
1059     + } else {
1060     + struct sockaddr_in6 *laddr = &to_sockaddr_in6(id->local_addr);
1061     +
1062     + /* For wildcard addr, limit binding to current device only */
1063     + if (ipv6_addr_any(&laddr->sin6_addr))
1064     + s->sk->sk_bound_dev_if = sdev->netdev->ifindex;
1065     +
1066     + rv = s->ops->bind(s, (struct sockaddr *)laddr,
1067     + sizeof(struct sockaddr_in6));
1068     + }
1069     if (rv) {
1070     siw_dbg(id->device, "socket bind error: %d\n", rv);
1071     goto error;
1072     @@ -1866,7 +1891,7 @@ static int siw_listen_address(struct iw_cm_id *id, int backlog,
1073     list_add_tail(&cep->listenq, (struct list_head *)id->provider_data);
1074     cep->state = SIW_EPSTATE_LISTENING;
1075    
1076     - siw_dbg(id->device, "Listen at laddr %pISp\n", laddr);
1077     + siw_dbg(id->device, "Listen at laddr %pISp\n", &id->local_addr);
1078    
1079     return 0;
1080    
1081     @@ -1924,114 +1949,6 @@ static void siw_drop_listeners(struct iw_cm_id *id)
1082     }
1083     }
1084    
1085     -/*
1086     - * siw_create_listen - Create resources for a listener's IWCM ID @id
1087     - *
1088     - * Listens on the socket addresses id->local_addr and id->remote_addr.
1089     - *
1090     - * If the listener's @id provides a specific local IP address, at most one
1091     - * listening socket is created and associated with @id.
1092     - *
1093     - * If the listener's @id provides the wildcard (zero) local IP address,
1094     - * a separate listen is performed for each local IP address of the device
1095     - * by creating a listening socket and binding to that local IP address.
1096     - *
1097     - */
1098     -int siw_create_listen(struct iw_cm_id *id, int backlog)
1099     -{
1100     - struct net_device *dev = to_siw_dev(id->device)->netdev;
1101     - int rv = 0, listeners = 0;
1102     -
1103     - siw_dbg(id->device, "backlog %d\n", backlog);
1104     -
1105     - /*
1106     - * For each attached address of the interface, create a
1107     - * listening socket, if id->local_addr is the wildcard
1108     - * IP address or matches the IP address.
1109     - */
1110     - if (id->local_addr.ss_family == AF_INET) {
1111     - struct in_device *in_dev = in_dev_get(dev);
1112     - struct sockaddr_in s_laddr, *s_raddr;
1113     - const struct in_ifaddr *ifa;
1114     -
1115     - if (!in_dev) {
1116     - rv = -ENODEV;
1117     - goto out;
1118     - }
1119     - memcpy(&s_laddr, &id->local_addr, sizeof(s_laddr));
1120     - s_raddr = (struct sockaddr_in *)&id->remote_addr;
1121     -
1122     - siw_dbg(id->device,
1123     - "laddr %pI4:%d, raddr %pI4:%d\n",
1124     - &s_laddr.sin_addr, ntohs(s_laddr.sin_port),
1125     - &s_raddr->sin_addr, ntohs(s_raddr->sin_port));
1126     -
1127     - rtnl_lock();
1128     - in_dev_for_each_ifa_rtnl(ifa, in_dev) {
1129     - if (ipv4_is_zeronet(s_laddr.sin_addr.s_addr) ||
1130     - s_laddr.sin_addr.s_addr == ifa->ifa_address) {
1131     - s_laddr.sin_addr.s_addr = ifa->ifa_address;
1132     -
1133     - rv = siw_listen_address(id, backlog,
1134     - (struct sockaddr *)&s_laddr,
1135     - AF_INET);
1136     - if (!rv)
1137     - listeners++;
1138     - }
1139     - }
1140     - rtnl_unlock();
1141     - in_dev_put(in_dev);
1142     - } else if (id->local_addr.ss_family == AF_INET6) {
1143     - struct inet6_dev *in6_dev = in6_dev_get(dev);
1144     - struct inet6_ifaddr *ifp;
1145     - struct sockaddr_in6 *s_laddr = &to_sockaddr_in6(id->local_addr),
1146     - *s_raddr = &to_sockaddr_in6(id->remote_addr);
1147     -
1148     - if (!in6_dev) {
1149     - rv = -ENODEV;
1150     - goto out;
1151     - }
1152     - siw_dbg(id->device,
1153     - "laddr %pI6:%d, raddr %pI6:%d\n",
1154     - &s_laddr->sin6_addr, ntohs(s_laddr->sin6_port),
1155     - &s_raddr->sin6_addr, ntohs(s_raddr->sin6_port));
1156     -
1157     - rtnl_lock();
1158     - list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
1159     - if (ifp->flags & (IFA_F_TENTATIVE | IFA_F_DEPRECATED))
1160     - continue;
1161     - if (ipv6_addr_any(&s_laddr->sin6_addr) ||
1162     - ipv6_addr_equal(&s_laddr->sin6_addr, &ifp->addr)) {
1163     - struct sockaddr_in6 bind_addr = {
1164     - .sin6_family = AF_INET6,
1165     - .sin6_port = s_laddr->sin6_port,
1166     - .sin6_flowinfo = 0,
1167     - .sin6_addr = ifp->addr,
1168     - .sin6_scope_id = dev->ifindex };
1169     -
1170     - rv = siw_listen_address(id, backlog,
1171     - (struct sockaddr *)&bind_addr,
1172     - AF_INET6);
1173     - if (!rv)
1174     - listeners++;
1175     - }
1176     - }
1177     - rtnl_unlock();
1178     - in6_dev_put(in6_dev);
1179     - } else {
1180     - rv = -EAFNOSUPPORT;
1181     - }
1182     -out:
1183     - if (listeners)
1184     - rv = 0;
1185     - else if (!rv)
1186     - rv = -EINVAL;
1187     -
1188     - siw_dbg(id->device, "%s\n", rv ? "FAIL" : "OK");
1189     -
1190     - return rv;
1191     -}
1192     -
1193     int siw_destroy_listen(struct iw_cm_id *id)
1194     {
1195     if (!id->provider_data) {
1196     diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1197     index 9d47b227e557..0d922eeae357 100644
1198     --- a/drivers/iommu/intel-iommu.c
1199     +++ b/drivers/iommu/intel-iommu.c
1200     @@ -2762,10 +2762,8 @@ static int __init si_domain_init(int hw)
1201     }
1202    
1203     /*
1204     - * Normally we use DMA domains for devices which have RMRRs. But we
1205     - * loose this requirement for graphic and usb devices. Identity map
1206     - * the RMRRs for graphic and USB devices so that they could use the
1207     - * si_domain.
1208     + * Identity map the RMRRs so that devices with RMRRs could also use
1209     + * the si_domain.
1210     */
1211     for_each_rmrr_units(rmrr) {
1212     for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt,
1213     @@ -2773,9 +2771,6 @@ static int __init si_domain_init(int hw)
1214     unsigned long long start = rmrr->base_address;
1215     unsigned long long end = rmrr->end_address;
1216    
1217     - if (device_is_rmrr_locked(dev))
1218     - continue;
1219     -
1220     if (WARN_ON(end < start ||
1221     end >> agaw_to_width(si_domain->agaw)))
1222     continue;
1223     @@ -2914,9 +2909,6 @@ static int device_def_domain_type(struct device *dev)
1224     if (dev_is_pci(dev)) {
1225     struct pci_dev *pdev = to_pci_dev(dev);
1226    
1227     - if (device_is_rmrr_locked(dev))
1228     - return IOMMU_DOMAIN_DMA;
1229     -
1230     /*
1231     * Prevent any device marked as untrusted from getting
1232     * placed into the statically identity mapping domain.
1233     @@ -2954,9 +2946,6 @@ static int device_def_domain_type(struct device *dev)
1234     return IOMMU_DOMAIN_DMA;
1235     } else if (pci_pcie_type(pdev) == PCI_EXP_TYPE_PCI_BRIDGE)
1236     return IOMMU_DOMAIN_DMA;
1237     - } else {
1238     - if (device_has_rmrr(dev))
1239     - return IOMMU_DOMAIN_DMA;
1240     }
1241    
1242     return (iommu_identity_mapping & IDENTMAP_ALL) ?
1243     diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
1244     index a3664281a33f..4dfa459ef5c7 100644
1245     --- a/drivers/net/can/slcan.c
1246     +++ b/drivers/net/can/slcan.c
1247     @@ -148,7 +148,7 @@ static void slc_bump(struct slcan *sl)
1248     u32 tmpid;
1249     char *cmd = sl->rbuff;
1250    
1251     - cf.can_id = 0;
1252     + memset(&cf, 0, sizeof(cf));
1253    
1254     switch (*cmd) {
1255     case 'r':
1256     @@ -187,8 +187,6 @@ static void slc_bump(struct slcan *sl)
1257     else
1258     return;
1259    
1260     - *(u64 *) (&cf.data) = 0; /* clear payload */
1261     -
1262     /* RTR frames may have a dlc > 0 but they never have any data bytes */
1263     if (!(cf.can_id & CAN_RTR_FLAG)) {
1264     for (i = 0; i < cf.can_dlc; i++) {
1265     diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
1266     index 46dc913da852..9502db66092e 100644
1267     --- a/drivers/net/dsa/bcm_sf2.c
1268     +++ b/drivers/net/dsa/bcm_sf2.c
1269     @@ -459,7 +459,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
1270     priv->slave_mii_bus->parent = ds->dev->parent;
1271     priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask;
1272    
1273     - err = of_mdiobus_register(priv->slave_mii_bus, dn);
1274     + err = mdiobus_register(priv->slave_mii_bus);
1275     if (err && dn)
1276     of_node_put(dn);
1277    
1278     @@ -1053,6 +1053,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
1279     const struct bcm_sf2_of_data *data;
1280     struct b53_platform_data *pdata;
1281     struct dsa_switch_ops *ops;
1282     + struct device_node *ports;
1283     struct bcm_sf2_priv *priv;
1284     struct b53_device *dev;
1285     struct dsa_switch *ds;
1286     @@ -1115,7 +1116,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
1287     set_bit(0, priv->cfp.used);
1288     set_bit(0, priv->cfp.unique);
1289    
1290     - bcm_sf2_identify_ports(priv, dn->child);
1291     + ports = of_find_node_by_name(dn, "ports");
1292     + if (ports) {
1293     + bcm_sf2_identify_ports(priv, ports);
1294     + of_node_put(ports);
1295     + }
1296    
1297     priv->irq0 = irq_of_parse_and_map(dn, 0);
1298     priv->irq1 = irq_of_parse_and_map(dn, 1);
1299     diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
1300     index e0e932f0aed1..8071c3fa3fb7 100644
1301     --- a/drivers/net/dsa/mt7530.c
1302     +++ b/drivers/net/dsa/mt7530.c
1303     @@ -1353,6 +1353,9 @@ mt7530_setup(struct dsa_switch *ds)
1304     continue;
1305    
1306     phy_node = of_parse_phandle(mac_np, "phy-handle", 0);
1307     + if (!phy_node)
1308     + continue;
1309     +
1310     if (phy_node->parent == priv->dev->of_node->parent) {
1311     interface = of_get_phy_mode(mac_np);
1312     id = of_mdio_parse_addr(ds->dev, phy_node);
1313     diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1314     index 38024877751c..069a51847885 100644
1315     --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1316     +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
1317     @@ -3032,7 +3032,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
1318     return ret;
1319    
1320     memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1321     - pi->xact_addr_filt = ret;
1322     return 0;
1323     }
1324    
1325     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
1326     index b607919c8ad0..498de6ef6870 100644
1327     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
1328     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
1329     @@ -123,9 +123,12 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
1330     u8 prio = act->vlan.prio;
1331     u16 vid = act->vlan.vid;
1332    
1333     - return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
1334     - act->id, vid,
1335     - proto, prio, extack);
1336     + err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei,
1337     + act->id, vid,
1338     + proto, prio, extack);
1339     + if (err)
1340     + return err;
1341     + break;
1342     }
1343     default:
1344     NL_SET_ERR_MSG_MOD(extack, "Unsupported action");
1345     diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
1346     index 5ebfc3e66331..3bc6d1ef29ec 100644
1347     --- a/drivers/net/ethernet/realtek/r8169_main.c
1348     +++ b/drivers/net/ethernet/realtek/r8169_main.c
1349     @@ -7167,12 +7167,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1350    
1351     netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT);
1352    
1353     - dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1354     - NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
1355     - NETIF_F_HW_VLAN_CTAG_RX;
1356     - dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1357     - NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX |
1358     - NETIF_F_HW_VLAN_CTAG_RX;
1359     + dev->features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1360     + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1361     + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1362     + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
1363     dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1364     NETIF_F_HIGHDMA;
1365     dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1366     @@ -7190,25 +7188,25 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1367     dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
1368    
1369     if (rtl_chip_supports_csum_v2(tp)) {
1370     - dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
1371     - dev->features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
1372     + dev->hw_features |= NETIF_F_IPV6_CSUM;
1373     + dev->features |= NETIF_F_IPV6_CSUM;
1374     + }
1375     +
1376     + /* There has been a number of reports that using SG/TSO results in
1377     + * tx timeouts. However for a lot of people SG/TSO works fine.
1378     + * Therefore disable both features by default, but allow users to
1379     + * enable them. Use at own risk!
1380     + */
1381     + if (rtl_chip_supports_csum_v2(tp)) {
1382     + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
1383     dev->gso_max_size = RTL_GSO_MAX_SIZE_V2;
1384     dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2;
1385     } else {
1386     + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO;
1387     dev->gso_max_size = RTL_GSO_MAX_SIZE_V1;
1388     dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1;
1389     }
1390    
1391     - /* RTL8168e-vl and one RTL8168c variant are known to have a
1392     - * HW issue with TSO.
1393     - */
1394     - if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1395     - tp->mac_version == RTL_GIGA_MAC_VER_22) {
1396     - dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
1397     - dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
1398     - dev->features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
1399     - }
1400     -
1401     dev->hw_features |= NETIF_F_RXALL;
1402     dev->hw_features |= NETIF_F_RXFCS;
1403    
1404     diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
1405     index 43a785f86c69..bc9b01376e80 100644
1406     --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
1407     +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
1408     @@ -209,7 +209,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
1409     reg++;
1410     }
1411    
1412     - while (reg <= perfect_addr_number) {
1413     + while (reg < perfect_addr_number) {
1414     writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
1415     writel(0, ioaddr + GMAC_ADDR_LOW(reg));
1416     reg++;
1417     diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
1418     index 63dedec0433d..51b64f087717 100644
1419     --- a/drivers/net/phy/micrel.c
1420     +++ b/drivers/net/phy/micrel.c
1421     @@ -25,6 +25,7 @@
1422     #include <linux/micrel_phy.h>
1423     #include <linux/of.h>
1424     #include <linux/clk.h>
1425     +#include <linux/delay.h>
1426    
1427     /* Operation Mode Strap Override */
1428     #define MII_KSZPHY_OMSO 0x16
1429     @@ -902,6 +903,12 @@ static int kszphy_resume(struct phy_device *phydev)
1430    
1431     genphy_resume(phydev);
1432    
1433     + /* After switching from power-down to normal mode, an internal global
1434     + * reset is automatically generated. Wait a minimum of 1 ms before
1435     + * read/write access to the PHY registers.
1436     + */
1437     + usleep_range(1000, 2000);
1438     +
1439     ret = kszphy_config_reset(phydev);
1440     if (ret)
1441     return ret;
1442     diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
1443     index c76df51dd3c5..879ca37c8508 100644
1444     --- a/drivers/net/phy/realtek.c
1445     +++ b/drivers/net/phy/realtek.c
1446     @@ -456,6 +456,15 @@ static struct phy_driver realtek_drvs[] = {
1447     .resume = genphy_resume,
1448     .read_page = rtl821x_read_page,
1449     .write_page = rtl821x_write_page,
1450     + }, {
1451     + PHY_ID_MATCH_MODEL(0x001cc880),
1452     + .name = "RTL8208 Fast Ethernet",
1453     + .read_mmd = genphy_read_mmd_unsupported,
1454     + .write_mmd = genphy_write_mmd_unsupported,
1455     + .suspend = genphy_suspend,
1456     + .resume = genphy_resume,
1457     + .read_page = rtl821x_read_page,
1458     + .write_page = rtl821x_write_page,
1459     }, {
1460     PHY_ID_MATCH_EXACT(0x001cc910),
1461     .name = "RTL8211 Gigabit Ethernet",
1462     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
1463     index 69f553a028ee..16f5cb249ed5 100644
1464     --- a/drivers/net/tun.c
1465     +++ b/drivers/net/tun.c
1466     @@ -1715,8 +1715,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1467     alloc_frag->offset += buflen;
1468     }
1469     err = tun_xdp_act(tun, xdp_prog, &xdp, act);
1470     - if (err < 0)
1471     - goto err_xdp;
1472     + if (err < 0) {
1473     + if (act == XDP_REDIRECT || act == XDP_TX)
1474     + put_page(alloc_frag->page);
1475     + goto out;
1476     + }
1477     +
1478     if (err == XDP_REDIRECT)
1479     xdp_do_flush_map();
1480     if (err != XDP_PASS)
1481     @@ -1730,8 +1734,6 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1482    
1483     return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
1484    
1485     -err_xdp:
1486     - put_page(alloc_frag->page);
1487     out:
1488     rcu_read_unlock();
1489     local_bh_enable();
1490     diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c
1491     index af233b7b77f2..29d12f2f59e0 100644
1492     --- a/drivers/platform/x86/intel_int0002_vgpio.c
1493     +++ b/drivers/platform/x86/intel_int0002_vgpio.c
1494     @@ -127,6 +127,14 @@ static irqreturn_t int0002_irq(int irq, void *data)
1495     return IRQ_HANDLED;
1496     }
1497    
1498     +static bool int0002_check_wake(void *data)
1499     +{
1500     + u32 gpe_sts_reg;
1501     +
1502     + gpe_sts_reg = inl(GPE0A_STS_PORT);
1503     + return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT);
1504     +}
1505     +
1506     static struct irq_chip int0002_byt_irqchip = {
1507     .name = DRV_NAME,
1508     .irq_ack = int0002_irq_ack,
1509     @@ -220,6 +228,7 @@ static int int0002_probe(struct platform_device *pdev)
1510    
1511     gpiochip_set_chained_irqchip(chip, irq_chip, irq, NULL);
1512    
1513     + acpi_register_wakeup_handler(irq, int0002_check_wake, NULL);
1514     device_init_wakeup(dev, true);
1515     return 0;
1516     }
1517     @@ -227,6 +236,7 @@ static int int0002_probe(struct platform_device *pdev)
1518     static int int0002_remove(struct platform_device *pdev)
1519     {
1520     device_init_wakeup(&pdev->dev, false);
1521     + acpi_unregister_wakeup_handler(int0002_check_wake, NULL);
1522     return 0;
1523     }
1524    
1525     diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
1526     index 6ac02ba5e4a1..09d6b11246c9 100644
1527     --- a/drivers/usb/dwc3/gadget.c
1528     +++ b/drivers/usb/dwc3/gadget.c
1529     @@ -1518,7 +1518,7 @@ static void dwc3_gadget_ep_skip_trbs(struct dwc3_ep *dep, struct dwc3_request *r
1530     for (i = 0; i < req->num_trbs; i++) {
1531     struct dwc3_trb *trb;
1532    
1533     - trb = req->trb + i;
1534     + trb = &dep->trb_pool[dep->trb_dequeue];
1535     trb->ctrl &= ~DWC3_TRB_CTRL_HWO;
1536     dwc3_ep_inc_deq(dep);
1537     }
1538     diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
1539     index c9235a2f42f8..22070cfea1d0 100644
1540     --- a/drivers/video/fbdev/core/fbcon.c
1541     +++ b/drivers/video/fbdev/core/fbcon.c
1542     @@ -1276,6 +1276,9 @@ finished:
1543     if (!con_is_bound(&fb_con))
1544     fbcon_exit();
1545    
1546     + if (vc->vc_num == logo_shown)
1547     + logo_shown = FBCON_LOGO_CANSHOW;
1548     +
1549     return;
1550     }
1551    
1552     diff --git a/fs/ceph/super.c b/fs/ceph/super.c
1553     index 62fc7d46032e..d40658d5e808 100644
1554     --- a/fs/ceph/super.c
1555     +++ b/fs/ceph/super.c
1556     @@ -106,7 +106,6 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
1557     return 0;
1558     }
1559    
1560     -
1561     static int ceph_sync_fs(struct super_block *sb, int wait)
1562     {
1563     struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
1564     @@ -215,6 +214,26 @@ static match_table_t fsopt_tokens = {
1565     {-1, NULL}
1566     };
1567    
1568     +/*
1569     + * Remove adjacent slashes and then the trailing slash, unless it is
1570     + * the only remaining character.
1571     + *
1572     + * E.g. "//dir1////dir2///" --> "/dir1/dir2", "///" --> "/".
1573     + */
1574     +static void canonicalize_path(char *path)
1575     +{
1576     + int i, j = 0;
1577     +
1578     + for (i = 0; path[i] != '\0'; i++) {
1579     + if (path[i] != '/' || j < 1 || path[j - 1] != '/')
1580     + path[j++] = path[i];
1581     + }
1582     +
1583     + if (j > 1 && path[j - 1] == '/')
1584     + j--;
1585     + path[j] = '\0';
1586     +}
1587     +
1588     static int parse_fsopt_token(char *c, void *private)
1589     {
1590     struct ceph_mount_options *fsopt = private;
1591     @@ -446,12 +465,15 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
1592     ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
1593     if (ret)
1594     return ret;
1595     +
1596     ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
1597     if (ret)
1598     return ret;
1599     +
1600     ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
1601     if (ret)
1602     return ret;
1603     +
1604     ret = strcmp_null(fsopt1->fscache_uniq, fsopt2->fscache_uniq);
1605     if (ret)
1606     return ret;
1607     @@ -507,13 +529,17 @@ static int parse_mount_options(struct ceph_mount_options **pfsopt,
1608     */
1609     dev_name_end = strchr(dev_name, '/');
1610     if (dev_name_end) {
1611     - if (strlen(dev_name_end) > 1) {
1612     - fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
1613     - if (!fsopt->server_path) {
1614     - err = -ENOMEM;
1615     - goto out;
1616     - }
1617     + /*
1618     + * The server_path will include the whole chars from userland
1619     + * including the leading '/'.
1620     + */
1621     + fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
1622     + if (!fsopt->server_path) {
1623     + err = -ENOMEM;
1624     + goto out;
1625     }
1626     +
1627     + canonicalize_path(fsopt->server_path);
1628     } else {
1629     dev_name_end = dev_name + strlen(dev_name);
1630     }
1631     @@ -842,7 +868,6 @@ static void destroy_caches(void)
1632     ceph_fscache_unregister();
1633     }
1634    
1635     -
1636     /*
1637     * ceph_umount_begin - initiate forced umount. Tear down down the
1638     * mount, skipping steps that may hang while waiting for server(s).
1639     @@ -929,9 +954,6 @@ out:
1640     return root;
1641     }
1642    
1643     -
1644     -
1645     -
1646     /*
1647     * mount: join the ceph cluster, and open root directory.
1648     */
1649     @@ -945,7 +967,9 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
1650     mutex_lock(&fsc->client->mount_mutex);
1651    
1652     if (!fsc->sb->s_root) {
1653     - const char *path;
1654     + const char *path = fsc->mount_options->server_path ?
1655     + fsc->mount_options->server_path + 1 : "";
1656     +
1657     err = __ceph_open_session(fsc->client, started);
1658     if (err < 0)
1659     goto out;
1660     @@ -957,13 +981,7 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
1661     goto out;
1662     }
1663    
1664     - if (!fsc->mount_options->server_path) {
1665     - path = "";
1666     - dout("mount opening path \\t\n");
1667     - } else {
1668     - path = fsc->mount_options->server_path + 1;
1669     - dout("mount opening path %s\n", path);
1670     - }
1671     + dout("mount opening path '%s'\n", path);
1672    
1673     ceph_fs_debugfs_init(fsc);
1674    
1675     diff --git a/fs/ceph/super.h b/fs/ceph/super.h
1676     index f98d9247f9cb..bb12c9f3a218 100644
1677     --- a/fs/ceph/super.h
1678     +++ b/fs/ceph/super.h
1679     @@ -92,7 +92,7 @@ struct ceph_mount_options {
1680    
1681     char *snapdir_name; /* default ".snap" */
1682     char *mds_namespace; /* default NULL */
1683     - char *server_path; /* default "/" */
1684     + char *server_path; /* default NULL (means "/") */
1685     char *fscache_uniq; /* default NULL */
1686     };
1687    
1688     diff --git a/include/linux/acpi.h b/include/linux/acpi.h
1689     index 8b4e516bac00..ce29a014e591 100644
1690     --- a/include/linux/acpi.h
1691     +++ b/include/linux/acpi.h
1692     @@ -473,6 +473,11 @@ void __init acpi_nvs_nosave_s3(void);
1693     void __init acpi_sleep_no_blacklist(void);
1694     #endif /* CONFIG_PM_SLEEP */
1695    
1696     +int acpi_register_wakeup_handler(
1697     + int wake_irq, bool (*wakeup)(void *context), void *context);
1698     +void acpi_unregister_wakeup_handler(
1699     + bool (*wakeup)(void *context), void *context);
1700     +
1701     struct acpi_osc_context {
1702     char *uuid_str; /* UUID string */
1703     int rev;
1704     diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
1705     index 0cdc8d12785a..acd859ea09d4 100644
1706     --- a/include/linux/mlx5/mlx5_ifc.h
1707     +++ b/include/linux/mlx5/mlx5_ifc.h
1708     @@ -857,7 +857,11 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
1709     u8 swp_csum[0x1];
1710     u8 swp_lso[0x1];
1711     u8 cqe_checksum_full[0x1];
1712     - u8 reserved_at_24[0x5];
1713     + u8 tunnel_stateless_geneve_tx[0x1];
1714     + u8 tunnel_stateless_mpls_over_udp[0x1];
1715     + u8 tunnel_stateless_mpls_over_gre[0x1];
1716     + u8 tunnel_stateless_vxlan_gpe[0x1];
1717     + u8 tunnel_stateless_ipv4_over_vxlan[0x1];
1718     u8 tunnel_stateless_ip_over_ip[0x1];
1719     u8 reserved_at_2a[0x6];
1720     u8 max_vxlan_udp_ports[0x8];
1721     diff --git a/include/linux/swab.h b/include/linux/swab.h
1722     index e466fd159c85..bcff5149861a 100644
1723     --- a/include/linux/swab.h
1724     +++ b/include/linux/swab.h
1725     @@ -7,6 +7,7 @@
1726     # define swab16 __swab16
1727     # define swab32 __swab32
1728     # define swab64 __swab64
1729     +# define swab __swab
1730     # define swahw32 __swahw32
1731     # define swahb32 __swahb32
1732     # define swab16p __swab16p
1733     diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
1734     index 23cd84868cc3..7272f85d6d6a 100644
1735     --- a/include/uapi/linux/swab.h
1736     +++ b/include/uapi/linux/swab.h
1737     @@ -4,6 +4,7 @@
1738    
1739     #include <linux/types.h>
1740     #include <linux/compiler.h>
1741     +#include <asm/bitsperlong.h>
1742     #include <asm/swab.h>
1743    
1744     /*
1745     @@ -132,6 +133,15 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
1746     __fswab64(x))
1747     #endif
1748    
1749     +static __always_inline unsigned long __swab(const unsigned long y)
1750     +{
1751     +#if __BITS_PER_LONG == 64
1752     + return __swab64(y);
1753     +#else /* __BITS_PER_LONG == 32 */
1754     + return __swab32(y);
1755     +#endif
1756     +}
1757     +
1758     /**
1759     * __swahw32 - return a word-swapped 32-bit value
1760     * @x: value to wordswap
1761     diff --git a/lib/find_bit.c b/lib/find_bit.c
1762     index 5c51eb45178a..4e68490fa703 100644
1763     --- a/lib/find_bit.c
1764     +++ b/lib/find_bit.c
1765     @@ -149,18 +149,6 @@ EXPORT_SYMBOL(find_last_bit);
1766    
1767     #ifdef __BIG_ENDIAN
1768    
1769     -/* include/linux/byteorder does not support "unsigned long" type */
1770     -static inline unsigned long ext2_swab(const unsigned long y)
1771     -{
1772     -#if BITS_PER_LONG == 64
1773     - return (unsigned long) __swab64((u64) y);
1774     -#elif BITS_PER_LONG == 32
1775     - return (unsigned long) __swab32((u32) y);
1776     -#else
1777     -#error BITS_PER_LONG not defined
1778     -#endif
1779     -}
1780     -
1781     #if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le)
1782     static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
1783     const unsigned long *addr2, unsigned long nbits,
1784     @@ -177,7 +165,7 @@ static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
1785     tmp ^= invert;
1786    
1787     /* Handle 1st word. */
1788     - tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start));
1789     + tmp &= swab(BITMAP_FIRST_WORD_MASK(start));
1790     start = round_down(start, BITS_PER_LONG);
1791    
1792     while (!tmp) {
1793     @@ -191,7 +179,7 @@ static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
1794     tmp ^= invert;
1795     }
1796    
1797     - return min(start + __ffs(ext2_swab(tmp)), nbits);
1798     + return min(start + __ffs(swab(tmp)), nbits);
1799     }
1800     #endif
1801    
1802     diff --git a/mm/slub.c b/mm/slub.c
1803     index 59ed00be02cb..af44807d5b05 100644
1804     --- a/mm/slub.c
1805     +++ b/mm/slub.c
1806     @@ -261,7 +261,7 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr,
1807     * freepointer to be restored incorrectly.
1808     */
1809     return (void *)((unsigned long)ptr ^ s->random ^
1810     - (unsigned long)kasan_reset_tag((void *)ptr_addr));
1811     + swab((unsigned long)kasan_reset_tag((void *)ptr_addr)));
1812     #else
1813     return ptr;
1814     #endif
1815     diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
1816     index 0c7d31c6c18c..a58584949a95 100644
1817     --- a/net/bluetooth/rfcomm/tty.c
1818     +++ b/net/bluetooth/rfcomm/tty.c
1819     @@ -413,10 +413,8 @@ static int __rfcomm_create_dev(struct sock *sk, void __user *arg)
1820     dlc = rfcomm_dlc_exists(&req.src, &req.dst, req.channel);
1821     if (IS_ERR(dlc))
1822     return PTR_ERR(dlc);
1823     - else if (dlc) {
1824     - rfcomm_dlc_put(dlc);
1825     + if (dlc)
1826     return -EBUSY;
1827     - }
1828     dlc = rfcomm_dlc_alloc(GFP_KERNEL);
1829     if (!dlc)
1830     return -ENOMEM;
1831     diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
1832     index d02ccd749a60..6e1d200f30c1 100644
1833     --- a/net/ipv6/addrconf.c
1834     +++ b/net/ipv6/addrconf.c
1835     @@ -3296,6 +3296,10 @@ static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
1836     if (netif_is_l3_master(idev->dev))
1837     return;
1838    
1839     + /* no link local addresses on devices flagged as slaves */
1840     + if (idev->dev->flags & IFF_SLAVE)
1841     + return;
1842     +
1843     ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
1844    
1845     switch (idev->cnf.addr_gen_mode) {
1846     diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
1847     index 9904299424a1..61e95029c18f 100644
1848     --- a/net/sched/cls_tcindex.c
1849     +++ b/net/sched/cls_tcindex.c
1850     @@ -11,6 +11,7 @@
1851     #include <linux/skbuff.h>
1852     #include <linux/errno.h>
1853     #include <linux/slab.h>
1854     +#include <linux/refcount.h>
1855     #include <net/act_api.h>
1856     #include <net/netlink.h>
1857     #include <net/pkt_cls.h>
1858     @@ -26,9 +27,12 @@
1859     #define DEFAULT_HASH_SIZE 64 /* optimized for diffserv */
1860    
1861    
1862     +struct tcindex_data;
1863     +
1864     struct tcindex_filter_result {
1865     struct tcf_exts exts;
1866     struct tcf_result res;
1867     + struct tcindex_data *p;
1868     struct rcu_work rwork;
1869     };
1870    
1871     @@ -49,6 +53,7 @@ struct tcindex_data {
1872     u32 hash; /* hash table size; 0 if undefined */
1873     u32 alloc_hash; /* allocated size */
1874     u32 fall_through; /* 0: only classify if explicit match */
1875     + refcount_t refcnt; /* a temporary refcnt for perfect hash */
1876     struct rcu_work rwork;
1877     };
1878    
1879     @@ -57,6 +62,20 @@ static inline int tcindex_filter_is_set(struct tcindex_filter_result *r)
1880     return tcf_exts_has_actions(&r->exts) || r->res.classid;
1881     }
1882    
1883     +static void tcindex_data_get(struct tcindex_data *p)
1884     +{
1885     + refcount_inc(&p->refcnt);
1886     +}
1887     +
1888     +static void tcindex_data_put(struct tcindex_data *p)
1889     +{
1890     + if (refcount_dec_and_test(&p->refcnt)) {
1891     + kfree(p->perfect);
1892     + kfree(p->h);
1893     + kfree(p);
1894     + }
1895     +}
1896     +
1897     static struct tcindex_filter_result *tcindex_lookup(struct tcindex_data *p,
1898     u16 key)
1899     {
1900     @@ -132,6 +151,7 @@ static int tcindex_init(struct tcf_proto *tp)
1901     p->mask = 0xffff;
1902     p->hash = DEFAULT_HASH_SIZE;
1903     p->fall_through = 1;
1904     + refcount_set(&p->refcnt, 1); /* Paired with tcindex_destroy_work() */
1905    
1906     rcu_assign_pointer(tp->root, p);
1907     return 0;
1908     @@ -141,6 +161,7 @@ static void __tcindex_destroy_rexts(struct tcindex_filter_result *r)
1909     {
1910     tcf_exts_destroy(&r->exts);
1911     tcf_exts_put_net(&r->exts);
1912     + tcindex_data_put(r->p);
1913     }
1914    
1915     static void tcindex_destroy_rexts_work(struct work_struct *work)
1916     @@ -212,6 +233,8 @@ found:
1917     else
1918     __tcindex_destroy_fexts(f);
1919     } else {
1920     + tcindex_data_get(p);
1921     +
1922     if (tcf_exts_get_net(&r->exts))
1923     tcf_queue_work(&r->rwork, tcindex_destroy_rexts_work);
1924     else
1925     @@ -228,9 +251,7 @@ static void tcindex_destroy_work(struct work_struct *work)
1926     struct tcindex_data,
1927     rwork);
1928    
1929     - kfree(p->perfect);
1930     - kfree(p->h);
1931     - kfree(p);
1932     + tcindex_data_put(p);
1933     }
1934    
1935     static inline int
1936     @@ -248,9 +269,11 @@ static const struct nla_policy tcindex_policy[TCA_TCINDEX_MAX + 1] = {
1937     };
1938    
1939     static int tcindex_filter_result_init(struct tcindex_filter_result *r,
1940     + struct tcindex_data *p,
1941     struct net *net)
1942     {
1943     memset(r, 0, sizeof(*r));
1944     + r->p = p;
1945     return tcf_exts_init(&r->exts, net, TCA_TCINDEX_ACT,
1946     TCA_TCINDEX_POLICE);
1947     }
1948     @@ -290,6 +313,7 @@ static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp)
1949     TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE);
1950     if (err < 0)
1951     goto errout;
1952     + cp->perfect[i].p = cp;
1953     }
1954    
1955     return 0;
1956     @@ -334,6 +358,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
1957     cp->alloc_hash = p->alloc_hash;
1958     cp->fall_through = p->fall_through;
1959     cp->tp = tp;
1960     + refcount_set(&cp->refcnt, 1); /* Paired with tcindex_destroy_work() */
1961    
1962     if (tb[TCA_TCINDEX_HASH])
1963     cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]);
1964     @@ -366,7 +391,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
1965     }
1966     cp->h = p->h;
1967    
1968     - err = tcindex_filter_result_init(&new_filter_result, net);
1969     + err = tcindex_filter_result_init(&new_filter_result, cp, net);
1970     if (err < 0)
1971     goto errout_alloc;
1972     if (old_r)
1973     @@ -434,7 +459,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
1974     goto errout_alloc;
1975     f->key = handle;
1976     f->next = NULL;
1977     - err = tcindex_filter_result_init(&f->result, net);
1978     + err = tcindex_filter_result_init(&f->result, cp, net);
1979     if (err < 0) {
1980     kfree(f);
1981     goto errout_alloc;
1982     @@ -447,7 +472,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
1983     }
1984    
1985     if (old_r && old_r != r) {
1986     - err = tcindex_filter_result_init(old_r, net);
1987     + err = tcindex_filter_result_init(old_r, cp, net);
1988     if (err < 0) {
1989     kfree(f);
1990     goto errout_alloc;
1991     @@ -571,6 +596,14 @@ static void tcindex_destroy(struct tcf_proto *tp, bool rtnl_held,
1992     for (i = 0; i < p->hash; i++) {
1993     struct tcindex_filter_result *r = p->perfect + i;
1994    
1995     + /* tcf_queue_work() does not guarantee the ordering we
1996     + * want, so we have to take this refcnt temporarily to
1997     + * ensure 'p' is freed after all tcindex_filter_result
1998     + * here. Imperfect hash does not need this, because it
1999     + * uses linked lists rather than an array.
2000     + */
2001     + tcindex_data_get(p);
2002     +
2003     tcf_unbind_filter(tp, &r->res);
2004     if (tcf_exts_get_net(&r->exts))
2005     tcf_queue_work(&r->rwork,
2006     diff --git a/sound/soc/jz4740/jz4740-i2s.c b/sound/soc/jz4740/jz4740-i2s.c
2007     index 13408de34055..0bbd86390be5 100644
2008     --- a/sound/soc/jz4740/jz4740-i2s.c
2009     +++ b/sound/soc/jz4740/jz4740-i2s.c
2010     @@ -83,7 +83,7 @@
2011     #define JZ_AIC_I2S_STATUS_BUSY BIT(2)
2012    
2013     #define JZ_AIC_CLK_DIV_MASK 0xf
2014     -#define I2SDIV_DV_SHIFT 8
2015     +#define I2SDIV_DV_SHIFT 0
2016     #define I2SDIV_DV_MASK (0xf << I2SDIV_DV_SHIFT)
2017     #define I2SDIV_IDV_SHIFT 8
2018     #define I2SDIV_IDV_MASK (0xf << I2SDIV_IDV_SHIFT)
2019     diff --git a/tools/accounting/getdelays.c b/tools/accounting/getdelays.c
2020     index 8cb504d30384..5ef1c15e88ad 100644
2021     --- a/tools/accounting/getdelays.c
2022     +++ b/tools/accounting/getdelays.c
2023     @@ -136,7 +136,7 @@ static int send_cmd(int sd, __u16 nlmsg_type, __u32 nlmsg_pid,
2024     msg.g.version = 0x1;
2025     na = (struct nlattr *) GENLMSG_DATA(&msg);
2026     na->nla_type = nla_type;
2027     - na->nla_len = nla_len + 1 + NLA_HDRLEN;
2028     + na->nla_len = nla_len + NLA_HDRLEN;
2029     memcpy(NLA_DATA(na), nla_data, nla_len);
2030     msg.n.nlmsg_len += NLMSG_ALIGN(na->nla_len);
2031