Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.19/0168-4.19.69-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3465 - (hide annotations) (download)
Tue Oct 29 10:31:26 2019 UTC (4 years, 10 months ago) by niro
File size: 142083 byte(s)
-linux-4.19.69
1 niro 3465 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
2     index c96a8e9ad5c2..e8ddf0ef232e 100644
3     --- a/Documentation/admin-guide/kernel-parameters.txt
4     +++ b/Documentation/admin-guide/kernel-parameters.txt
5     @@ -3948,6 +3948,13 @@
6     Run specified binary instead of /init from the ramdisk,
7     used for early userspace startup. See initrd.
8    
9     + rdrand= [X86]
10     + force - Override the decision by the kernel to hide the
11     + advertisement of RDRAND support (this affects
12     + certain AMD processors because of buggy BIOS
13     + support, specifically around the suspend/resume
14     + path).
15     +
16     rdt= [HW,X86,RDT]
17     Turn on/off individual RDT features. List is:
18     cmt, mbmtotal, mbmlocal, l3cat, l3cdp, l2cat, l2cdp,
19     diff --git a/Makefile b/Makefile
20     index 6f164b04d953..677341239449 100644
21     --- a/Makefile
22     +++ b/Makefile
23     @@ -1,7 +1,7 @@
24     # SPDX-License-Identifier: GPL-2.0
25     VERSION = 4
26     PATCHLEVEL = 19
27     -SUBLEVEL = 68
28     +SUBLEVEL = 69
29     EXTRAVERSION =
30     NAME = "People's Front"
31    
32     diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
33     index fd6cde23bb5d..871fa50a09f1 100644
34     --- a/arch/arm/kvm/coproc.c
35     +++ b/arch/arm/kvm/coproc.c
36     @@ -658,13 +658,22 @@ int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
37     }
38    
39     static void reset_coproc_regs(struct kvm_vcpu *vcpu,
40     - const struct coproc_reg *table, size_t num)
41     + const struct coproc_reg *table, size_t num,
42     + unsigned long *bmap)
43     {
44     unsigned long i;
45    
46     for (i = 0; i < num; i++)
47     - if (table[i].reset)
48     + if (table[i].reset) {
49     + int reg = table[i].reg;
50     +
51     table[i].reset(vcpu, &table[i]);
52     + if (reg > 0 && reg < NR_CP15_REGS) {
53     + set_bit(reg, bmap);
54     + if (table[i].is_64bit)
55     + set_bit(reg + 1, bmap);
56     + }
57     + }
58     }
59    
60     static struct coproc_params decode_32bit_hsr(struct kvm_vcpu *vcpu)
61     @@ -1439,17 +1448,15 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu)
62     {
63     size_t num;
64     const struct coproc_reg *table;
65     -
66     - /* Catch someone adding a register without putting in reset entry. */
67     - memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15));
68     + DECLARE_BITMAP(bmap, NR_CP15_REGS) = { 0, };
69    
70     /* Generic chip reset first (so target could override). */
71     - reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
72     + reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs), bmap);
73    
74     table = get_target_table(vcpu->arch.target, &num);
75     - reset_coproc_regs(vcpu, table, num);
76     + reset_coproc_regs(vcpu, table, num, bmap);
77    
78     for (num = 1; num < NR_CP15_REGS; num++)
79     - WARN(vcpu_cp15(vcpu, num) == 0x42424242,
80     + WARN(!test_bit(num, bmap),
81     "Didn't reset vcpu_cp15(vcpu, %zi)", num);
82     }
83     diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
84     index d112af75680b..6da2bbdb9648 100644
85     --- a/arch/arm64/kvm/sys_regs.c
86     +++ b/arch/arm64/kvm/sys_regs.c
87     @@ -626,7 +626,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
88     */
89     val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
90     | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
91     - __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
92     + __vcpu_sys_reg(vcpu, r->reg) = val;
93     }
94    
95     static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
96     @@ -968,13 +968,13 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
97     /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
98     #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
99     { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
100     - trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr }, \
101     + trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
102     { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
103     - trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr }, \
104     + trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
105     { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
106     - trap_wvr, reset_wvr, n, 0, get_wvr, set_wvr }, \
107     + trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
108     { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
109     - trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
110     + trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
111    
112     /* Macro to expand the PMEVCNTRn_EL0 register */
113     #define PMU_PMEVCNTR_EL0(n) \
114     @@ -1359,7 +1359,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
115    
116     { SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
117    
118     - { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, },
119     + { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
120     { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
121     { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
122     { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
123     @@ -2072,13 +2072,19 @@ static int emulate_sys_reg(struct kvm_vcpu *vcpu,
124     }
125    
126     static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
127     - const struct sys_reg_desc *table, size_t num)
128     + const struct sys_reg_desc *table, size_t num,
129     + unsigned long *bmap)
130     {
131     unsigned long i;
132    
133     for (i = 0; i < num; i++)
134     - if (table[i].reset)
135     + if (table[i].reset) {
136     + int reg = table[i].reg;
137     +
138     table[i].reset(vcpu, &table[i]);
139     + if (reg > 0 && reg < NR_SYS_REGS)
140     + set_bit(reg, bmap);
141     + }
142     }
143    
144     /**
145     @@ -2576,18 +2582,16 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
146     {
147     size_t num;
148     const struct sys_reg_desc *table;
149     -
150     - /* Catch someone adding a register without putting in reset entry. */
151     - memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs));
152     + DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, };
153    
154     /* Generic chip reset first (so target could override). */
155     - reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
156     + reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap);
157    
158     table = get_target_table(vcpu->arch.target, true, &num);
159     - reset_sys_reg_descs(vcpu, table, num);
160     + reset_sys_reg_descs(vcpu, table, num, bmap);
161    
162     for (num = 1; num < NR_SYS_REGS; num++) {
163     - if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242,
164     + if (WARN(!test_bit(num, bmap),
165     "Didn't reset __vcpu_sys_reg(%zi)\n", num))
166     break;
167     }
168     diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
169     index 97d5239ca47b..428ef2189203 100644
170     --- a/arch/mips/kernel/cacheinfo.c
171     +++ b/arch/mips/kernel/cacheinfo.c
172     @@ -80,6 +80,8 @@ static int __populate_cache_leaves(unsigned int cpu)
173     if (c->tcache.waysize)
174     populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
175    
176     + this_cpu_ci->cpu_map_populated = true;
177     +
178     return 0;
179     }
180    
181     diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
182     index 5f209f111e59..df7ddd246eaa 100644
183     --- a/arch/mips/kernel/i8253.c
184     +++ b/arch/mips/kernel/i8253.c
185     @@ -32,7 +32,8 @@ void __init setup_pit_timer(void)
186    
187     static int __init init_pit_clocksource(void)
188     {
189     - if (num_possible_cpus() > 1) /* PIT does not scale! */
190     + if (num_possible_cpus() > 1 || /* PIT does not scale! */
191     + !clockevent_state_periodic(&i8253_clockevent))
192     return 0;
193    
194     return clocksource_i8253_init();
195     diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
196     index 262ba9481781..1bf6aaefd26a 100644
197     --- a/arch/powerpc/kernel/misc_64.S
198     +++ b/arch/powerpc/kernel/misc_64.S
199     @@ -135,7 +135,7 @@ _GLOBAL_TOC(flush_dcache_range)
200     subf r8,r6,r4 /* compute length */
201     add r8,r8,r5 /* ensure we get enough */
202     lwz r9,DCACHEL1LOGBLOCKSIZE(r10) /* Get log-2 of dcache block size */
203     - srw. r8,r8,r9 /* compute line count */
204     + srd. r8,r8,r9 /* compute line count */
205     beqlr /* nothing to do? */
206     mtctr r8
207     0: dcbst 0,r6
208     @@ -153,7 +153,7 @@ _GLOBAL(flush_inval_dcache_range)
209     subf r8,r6,r4 /* compute length */
210     add r8,r8,r5 /* ensure we get enough */
211     lwz r9,DCACHEL1LOGBLOCKSIZE(r10)/* Get log-2 of dcache block size */
212     - srw. r8,r8,r9 /* compute line count */
213     + srd. r8,r8,r9 /* compute line count */
214     beqlr /* nothing to do? */
215     sync
216     isync
217     diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
218     index b43f8d33a369..18ede6e806b9 100644
219     --- a/arch/s390/kernel/vmlinux.lds.S
220     +++ b/arch/s390/kernel/vmlinux.lds.S
221     @@ -31,10 +31,9 @@ PHDRS {
222     SECTIONS
223     {
224     . = 0x100000;
225     - _stext = .; /* Start of text section */
226     .text : {
227     - /* Text and read-only data */
228     - _text = .;
229     + _stext = .; /* Start of text section */
230     + _text = .; /* Text and read-only data */
231     HEAD_TEXT
232     TEXT_TEXT
233     SCHED_TEXT
234     @@ -46,11 +45,10 @@ SECTIONS
235     *(.text.*_indirect_*)
236     *(.fixup)
237     *(.gnu.warning)
238     + . = ALIGN(PAGE_SIZE);
239     + _etext = .; /* End of text section */
240     } :text = 0x0700
241    
242     - . = ALIGN(PAGE_SIZE);
243     - _etext = .; /* End of text section */
244     -
245     NOTES :text :note
246    
247     .dummy : { *(.dummy) } :data
248     diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
249     index a07ffd23e4dd..d3983fdf1012 100644
250     --- a/arch/x86/include/asm/bootparam_utils.h
251     +++ b/arch/x86/include/asm/bootparam_utils.h
252     @@ -18,6 +18,20 @@
253     * Note: efi_info is commonly left uninitialized, but that field has a
254     * private magic, so it is better to leave it unchanged.
255     */
256     +
257     +#define sizeof_mbr(type, member) ({ sizeof(((type *)0)->member); })
258     +
259     +#define BOOT_PARAM_PRESERVE(struct_member) \
260     + { \
261     + .start = offsetof(struct boot_params, struct_member), \
262     + .len = sizeof_mbr(struct boot_params, struct_member), \
263     + }
264     +
265     +struct boot_params_to_save {
266     + unsigned int start;
267     + unsigned int len;
268     +};
269     +
270     static void sanitize_boot_params(struct boot_params *boot_params)
271     {
272     /*
273     @@ -36,19 +50,40 @@ static void sanitize_boot_params(struct boot_params *boot_params)
274     */
275     if (boot_params->sentinel) {
276     /* fields in boot_params are left uninitialized, clear them */
277     - memset(&boot_params->ext_ramdisk_image, 0,
278     - (char *)&boot_params->efi_info -
279     - (char *)&boot_params->ext_ramdisk_image);
280     - memset(&boot_params->kbd_status, 0,
281     - (char *)&boot_params->hdr -
282     - (char *)&boot_params->kbd_status);
283     - memset(&boot_params->_pad7[0], 0,
284     - (char *)&boot_params->edd_mbr_sig_buffer[0] -
285     - (char *)&boot_params->_pad7[0]);
286     - memset(&boot_params->_pad8[0], 0,
287     - (char *)&boot_params->eddbuf[0] -
288     - (char *)&boot_params->_pad8[0]);
289     - memset(&boot_params->_pad9[0], 0, sizeof(boot_params->_pad9));
290     + static struct boot_params scratch;
291     + char *bp_base = (char *)boot_params;
292     + char *save_base = (char *)&scratch;
293     + int i;
294     +
295     + const struct boot_params_to_save to_save[] = {
296     + BOOT_PARAM_PRESERVE(screen_info),
297     + BOOT_PARAM_PRESERVE(apm_bios_info),
298     + BOOT_PARAM_PRESERVE(tboot_addr),
299     + BOOT_PARAM_PRESERVE(ist_info),
300     + BOOT_PARAM_PRESERVE(hd0_info),
301     + BOOT_PARAM_PRESERVE(hd1_info),
302     + BOOT_PARAM_PRESERVE(sys_desc_table),
303     + BOOT_PARAM_PRESERVE(olpc_ofw_header),
304     + BOOT_PARAM_PRESERVE(efi_info),
305     + BOOT_PARAM_PRESERVE(alt_mem_k),
306     + BOOT_PARAM_PRESERVE(scratch),
307     + BOOT_PARAM_PRESERVE(e820_entries),
308     + BOOT_PARAM_PRESERVE(eddbuf_entries),
309     + BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
310     + BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
311     + BOOT_PARAM_PRESERVE(hdr),
312     + BOOT_PARAM_PRESERVE(e820_table),
313     + BOOT_PARAM_PRESERVE(eddbuf),
314     + };
315     +
316     + memset(&scratch, 0, sizeof(scratch));
317     +
318     + for (i = 0; i < ARRAY_SIZE(to_save); i++) {
319     + memcpy(save_base + to_save[i].start,
320     + bp_base + to_save[i].start, to_save[i].len);
321     + }
322     +
323     + memcpy(boot_params, save_base, sizeof(*boot_params));
324     }
325     }
326    
327     diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
328     index f85f43db9225..a1d22e4428f6 100644
329     --- a/arch/x86/include/asm/msr-index.h
330     +++ b/arch/x86/include/asm/msr-index.h
331     @@ -334,6 +334,7 @@
332     #define MSR_AMD64_PATCH_LEVEL 0x0000008b
333     #define MSR_AMD64_TSC_RATIO 0xc0000104
334     #define MSR_AMD64_NB_CFG 0xc001001f
335     +#define MSR_AMD64_CPUID_FN_1 0xc0011004
336     #define MSR_AMD64_PATCH_LOADER 0xc0010020
337     #define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
338     #define MSR_AMD64_OSVW_STATUS 0xc0010141
339     diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
340     index 599c273f5d00..28cb2b31527a 100644
341     --- a/arch/x86/include/asm/nospec-branch.h
342     +++ b/arch/x86/include/asm/nospec-branch.h
343     @@ -202,7 +202,7 @@
344     " lfence;\n" \
345     " jmp 902b;\n" \
346     " .align 16\n" \
347     - "903: addl $4, %%esp;\n" \
348     + "903: lea 4(%%esp), %%esp;\n" \
349     " pushl %[thunk_target];\n" \
350     " ret;\n" \
351     " .align 16\n" \
352     diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
353     index 272a12865b2a..b316bd61a6ac 100644
354     --- a/arch/x86/kernel/apic/apic.c
355     +++ b/arch/x86/kernel/apic/apic.c
356     @@ -715,7 +715,7 @@ static __initdata unsigned long lapic_cal_pm1, lapic_cal_pm2;
357     static __initdata unsigned long lapic_cal_j1, lapic_cal_j2;
358    
359     /*
360     - * Temporary interrupt handler.
361     + * Temporary interrupt handler and polled calibration function.
362     */
363     static void __init lapic_cal_handler(struct clock_event_device *dev)
364     {
365     @@ -799,7 +799,8 @@ calibrate_by_pmtimer(long deltapm, long *delta, long *deltatsc)
366     static int __init calibrate_APIC_clock(void)
367     {
368     struct clock_event_device *levt = this_cpu_ptr(&lapic_events);
369     - void (*real_handler)(struct clock_event_device *dev);
370     + u64 tsc_perj = 0, tsc_start = 0;
371     + unsigned long jif_start;
372     unsigned long deltaj;
373     long delta, deltatsc;
374     int pm_referenced = 0;
375     @@ -830,28 +831,64 @@ static int __init calibrate_APIC_clock(void)
376     apic_printk(APIC_VERBOSE, "Using local APIC timer interrupts.\n"
377     "calibrating APIC timer ...\n");
378    
379     + /*
380     + * There are platforms w/o global clockevent devices. Instead of
381     + * making the calibration conditional on that, use a polling based
382     + * approach everywhere.
383     + */
384     local_irq_disable();
385    
386     - /* Replace the global interrupt handler */
387     - real_handler = global_clock_event->event_handler;
388     - global_clock_event->event_handler = lapic_cal_handler;
389     -
390     /*
391     * Setup the APIC counter to maximum. There is no way the lapic
392     * can underflow in the 100ms detection time frame
393     */
394     __setup_APIC_LVTT(0xffffffff, 0, 0);
395    
396     - /* Let the interrupts run */
397     + /*
398     + * Methods to terminate the calibration loop:
399     + * 1) Global clockevent if available (jiffies)
400     + * 2) TSC if available and frequency is known
401     + */
402     + jif_start = READ_ONCE(jiffies);
403     +
404     + if (tsc_khz) {
405     + tsc_start = rdtsc();
406     + tsc_perj = div_u64((u64)tsc_khz * 1000, HZ);
407     + }
408     +
409     + /*
410     + * Enable interrupts so the tick can fire, if a global
411     + * clockevent device is available
412     + */
413     local_irq_enable();
414    
415     - while (lapic_cal_loops <= LAPIC_CAL_LOOPS)
416     - cpu_relax();
417     + while (lapic_cal_loops <= LAPIC_CAL_LOOPS) {
418     + /* Wait for a tick to elapse */
419     + while (1) {
420     + if (tsc_khz) {
421     + u64 tsc_now = rdtsc();
422     + if ((tsc_now - tsc_start) >= tsc_perj) {
423     + tsc_start += tsc_perj;
424     + break;
425     + }
426     + } else {
427     + unsigned long jif_now = READ_ONCE(jiffies);
428    
429     - local_irq_disable();
430     + if (time_after(jif_now, jif_start)) {
431     + jif_start = jif_now;
432     + break;
433     + }
434     + }
435     + cpu_relax();
436     + }
437    
438     - /* Restore the real event handler */
439     - global_clock_event->event_handler = real_handler;
440     + /* Invoke the calibration routine */
441     + local_irq_disable();
442     + lapic_cal_handler(NULL);
443     + local_irq_enable();
444     + }
445     +
446     + local_irq_disable();
447    
448     /* Build delta t1-t2 as apic timer counts down */
449     delta = lapic_cal_t1 - lapic_cal_t2;
450     @@ -904,10 +941,11 @@ static int __init calibrate_APIC_clock(void)
451     levt->features &= ~CLOCK_EVT_FEAT_DUMMY;
452    
453     /*
454     - * PM timer calibration failed or not turned on
455     - * so lets try APIC timer based calibration
456     + * PM timer calibration failed or not turned on so lets try APIC
457     + * timer based calibration, if a global clockevent device is
458     + * available.
459     */
460     - if (!pm_referenced) {
461     + if (!pm_referenced && global_clock_event) {
462     apic_printk(APIC_VERBOSE, "... verify APIC timer\n");
463    
464     /*
465     diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
466     index da1f5e78363e..f86f912ce215 100644
467     --- a/arch/x86/kernel/cpu/amd.c
468     +++ b/arch/x86/kernel/cpu/amd.c
469     @@ -799,6 +799,64 @@ static void init_amd_ln(struct cpuinfo_x86 *c)
470     msr_set_bit(MSR_AMD64_DE_CFG, 31);
471     }
472    
473     +static bool rdrand_force;
474     +
475     +static int __init rdrand_cmdline(char *str)
476     +{
477     + if (!str)
478     + return -EINVAL;
479     +
480     + if (!strcmp(str, "force"))
481     + rdrand_force = true;
482     + else
483     + return -EINVAL;
484     +
485     + return 0;
486     +}
487     +early_param("rdrand", rdrand_cmdline);
488     +
489     +static void clear_rdrand_cpuid_bit(struct cpuinfo_x86 *c)
490     +{
491     + /*
492     + * Saving of the MSR used to hide the RDRAND support during
493     + * suspend/resume is done by arch/x86/power/cpu.c, which is
494     + * dependent on CONFIG_PM_SLEEP.
495     + */
496     + if (!IS_ENABLED(CONFIG_PM_SLEEP))
497     + return;
498     +
499     + /*
500     + * The nordrand option can clear X86_FEATURE_RDRAND, so check for
501     + * RDRAND support using the CPUID function directly.
502     + */
503     + if (!(cpuid_ecx(1) & BIT(30)) || rdrand_force)
504     + return;
505     +
506     + msr_clear_bit(MSR_AMD64_CPUID_FN_1, 62);
507     +
508     + /*
509     + * Verify that the CPUID change has occurred in case the kernel is
510     + * running virtualized and the hypervisor doesn't support the MSR.
511     + */
512     + if (cpuid_ecx(1) & BIT(30)) {
513     + pr_info_once("BIOS may not properly restore RDRAND after suspend, but hypervisor does not support hiding RDRAND via CPUID.\n");
514     + return;
515     + }
516     +
517     + clear_cpu_cap(c, X86_FEATURE_RDRAND);
518     + pr_info_once("BIOS may not properly restore RDRAND after suspend, hiding RDRAND via CPUID. Use rdrand=force to reenable.\n");
519     +}
520     +
521     +static void init_amd_jg(struct cpuinfo_x86 *c)
522     +{
523     + /*
524     + * Some BIOS implementations do not restore proper RDRAND support
525     + * across suspend and resume. Check on whether to hide the RDRAND
526     + * instruction support via CPUID.
527     + */
528     + clear_rdrand_cpuid_bit(c);
529     +}
530     +
531     static void init_amd_bd(struct cpuinfo_x86 *c)
532     {
533     u64 value;
534     @@ -813,6 +871,13 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
535     wrmsrl_safe(MSR_F15H_IC_CFG, value);
536     }
537     }
538     +
539     + /*
540     + * Some BIOS implementations do not restore proper RDRAND support
541     + * across suspend and resume. Check on whether to hide the RDRAND
542     + * instruction support via CPUID.
543     + */
544     + clear_rdrand_cpuid_bit(c);
545     }
546    
547     static void init_amd_zn(struct cpuinfo_x86 *c)
548     @@ -855,6 +920,7 @@ static void init_amd(struct cpuinfo_x86 *c)
549     case 0x10: init_amd_gh(c); break;
550     case 0x12: init_amd_ln(c); break;
551     case 0x15: init_amd_bd(c); break;
552     + case 0x16: init_amd_jg(c); break;
553     case 0x17: init_amd_zn(c); break;
554     }
555    
556     diff --git a/arch/x86/lib/cpu.c b/arch/x86/lib/cpu.c
557     index 2dd1fe13a37b..19f707992db2 100644
558     --- a/arch/x86/lib/cpu.c
559     +++ b/arch/x86/lib/cpu.c
560     @@ -1,5 +1,6 @@
561     #include <linux/types.h>
562     #include <linux/export.h>
563     +#include <asm/cpu.h>
564    
565     unsigned int x86_family(unsigned int sig)
566     {
567     diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
568     index 513ce09e9950..3aa3149df07f 100644
569     --- a/arch/x86/power/cpu.c
570     +++ b/arch/x86/power/cpu.c
571     @@ -13,6 +13,7 @@
572     #include <linux/smp.h>
573     #include <linux/perf_event.h>
574     #include <linux/tboot.h>
575     +#include <linux/dmi.h>
576    
577     #include <asm/pgtable.h>
578     #include <asm/proto.h>
579     @@ -24,7 +25,7 @@
580     #include <asm/debugreg.h>
581     #include <asm/cpu.h>
582     #include <asm/mmu_context.h>
583     -#include <linux/dmi.h>
584     +#include <asm/cpu_device_id.h>
585    
586     #ifdef CONFIG_X86_32
587     __visible unsigned long saved_context_ebx;
588     @@ -398,15 +399,14 @@ static int __init bsp_pm_check_init(void)
589    
590     core_initcall(bsp_pm_check_init);
591    
592     -static int msr_init_context(const u32 *msr_id, const int total_num)
593     +static int msr_build_context(const u32 *msr_id, const int num)
594     {
595     - int i = 0;
596     + struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
597     struct saved_msr *msr_array;
598     + int total_num;
599     + int i, j;
600    
601     - if (saved_context.saved_msrs.array || saved_context.saved_msrs.num > 0) {
602     - pr_err("x86/pm: MSR quirk already applied, please check your DMI match table.\n");
603     - return -EINVAL;
604     - }
605     + total_num = saved_msrs->num + num;
606    
607     msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
608     if (!msr_array) {
609     @@ -414,19 +414,30 @@ static int msr_init_context(const u32 *msr_id, const int total_num)
610     return -ENOMEM;
611     }
612    
613     - for (i = 0; i < total_num; i++) {
614     - msr_array[i].info.msr_no = msr_id[i];
615     + if (saved_msrs->array) {
616     + /*
617     + * Multiple callbacks can invoke this function, so copy any
618     + * MSR save requests from previous invocations.
619     + */
620     + memcpy(msr_array, saved_msrs->array,
621     + sizeof(struct saved_msr) * saved_msrs->num);
622     +
623     + kfree(saved_msrs->array);
624     + }
625     +
626     + for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
627     + msr_array[i].info.msr_no = msr_id[j];
628     msr_array[i].valid = false;
629     msr_array[i].info.reg.q = 0;
630     }
631     - saved_context.saved_msrs.num = total_num;
632     - saved_context.saved_msrs.array = msr_array;
633     + saved_msrs->num = total_num;
634     + saved_msrs->array = msr_array;
635    
636     return 0;
637     }
638    
639     /*
640     - * The following section is a quirk framework for problematic BIOSen:
641     + * The following sections are a quirk framework for problematic BIOSen:
642     * Sometimes MSRs are modified by the BIOSen after suspended to
643     * RAM, this might cause unexpected behavior after wakeup.
644     * Thus we save/restore these specified MSRs across suspend/resume
645     @@ -441,7 +452,7 @@ static int msr_initialize_bdw(const struct dmi_system_id *d)
646     u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
647    
648     pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
649     - return msr_init_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
650     + return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
651     }
652    
653     static const struct dmi_system_id msr_save_dmi_table[] = {
654     @@ -456,9 +467,58 @@ static const struct dmi_system_id msr_save_dmi_table[] = {
655     {}
656     };
657    
658     +static int msr_save_cpuid_features(const struct x86_cpu_id *c)
659     +{
660     + u32 cpuid_msr_id[] = {
661     + MSR_AMD64_CPUID_FN_1,
662     + };
663     +
664     + pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
665     + c->family);
666     +
667     + return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
668     +}
669     +
670     +static const struct x86_cpu_id msr_save_cpu_table[] = {
671     + {
672     + .vendor = X86_VENDOR_AMD,
673     + .family = 0x15,
674     + .model = X86_MODEL_ANY,
675     + .feature = X86_FEATURE_ANY,
676     + .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
677     + },
678     + {
679     + .vendor = X86_VENDOR_AMD,
680     + .family = 0x16,
681     + .model = X86_MODEL_ANY,
682     + .feature = X86_FEATURE_ANY,
683     + .driver_data = (kernel_ulong_t)msr_save_cpuid_features,
684     + },
685     + {}
686     +};
687     +
688     +typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
689     +static int pm_cpu_check(const struct x86_cpu_id *c)
690     +{
691     + const struct x86_cpu_id *m;
692     + int ret = 0;
693     +
694     + m = x86_match_cpu(msr_save_cpu_table);
695     + if (m) {
696     + pm_cpu_match_t fn;
697     +
698     + fn = (pm_cpu_match_t)m->driver_data;
699     + ret = fn(m);
700     + }
701     +
702     + return ret;
703     +}
704     +
705     static int pm_check_save_msr(void)
706     {
707     dmi_check_system(msr_save_dmi_table);
708     + pm_cpu_check(msr_save_cpu_table);
709     +
710     return 0;
711     }
712    
713     diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
714     index becd793a258c..d8d2ac294b0c 100644
715     --- a/block/bfq-iosched.c
716     +++ b/block/bfq-iosched.c
717     @@ -1886,9 +1886,14 @@ static void bfq_request_merged(struct request_queue *q, struct request *req,
718     blk_rq_pos(container_of(rb_prev(&req->rb_node),
719     struct request, rb_node))) {
720     struct bfq_queue *bfqq = bfq_init_rq(req);
721     - struct bfq_data *bfqd = bfqq->bfqd;
722     + struct bfq_data *bfqd;
723     struct request *prev, *next_rq;
724    
725     + if (!bfqq)
726     + return;
727     +
728     + bfqd = bfqq->bfqd;
729     +
730     /* Reposition request in its sort_list */
731     elv_rb_del(&bfqq->sort_list, req);
732     elv_rb_add(&bfqq->sort_list, req);
733     @@ -1930,6 +1935,9 @@ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
734     struct bfq_queue *bfqq = bfq_init_rq(rq),
735     *next_bfqq = bfq_init_rq(next);
736    
737     + if (!bfqq)
738     + return;
739     +
740     /*
741     * If next and rq belong to the same bfq_queue and next is older
742     * than rq, then reposition rq in the fifo (by substituting next
743     @@ -4590,12 +4598,12 @@ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
744    
745     spin_lock_irq(&bfqd->lock);
746     bfqq = bfq_init_rq(rq);
747     - if (at_head || blk_rq_is_passthrough(rq)) {
748     + if (!bfqq || at_head || blk_rq_is_passthrough(rq)) {
749     if (at_head)
750     list_add(&rq->queuelist, &bfqd->dispatch);
751     else
752     list_add_tail(&rq->queuelist, &bfqd->dispatch);
753     - } else { /* bfqq is assumed to be non null here */
754     + } else {
755     idle_timer_disabled = __bfq_insert_request(bfqd, rq);
756     /*
757     * Update bfqq, because, if a queue merge has occurred
758     diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
759     index 1984fc78c750..3a64fa4aaf7e 100644
760     --- a/drivers/ata/libata-scsi.c
761     +++ b/drivers/ata/libata-scsi.c
762     @@ -1803,6 +1803,21 @@ nothing_to_do:
763     return 1;
764     }
765    
766     +static bool ata_check_nblocks(struct scsi_cmnd *scmd, u32 n_blocks)
767     +{
768     + struct request *rq = scmd->request;
769     + u32 req_blocks;
770     +
771     + if (!blk_rq_is_passthrough(rq))
772     + return true;
773     +
774     + req_blocks = blk_rq_bytes(rq) / scmd->device->sector_size;
775     + if (n_blocks > req_blocks)
776     + return false;
777     +
778     + return true;
779     +}
780     +
781     /**
782     * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one
783     * @qc: Storage for translated ATA taskfile
784     @@ -1847,6 +1862,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
785     scsi_10_lba_len(cdb, &block, &n_block);
786     if (cdb[1] & (1 << 3))
787     tf_flags |= ATA_TFLAG_FUA;
788     + if (!ata_check_nblocks(scmd, n_block))
789     + goto invalid_fld;
790     break;
791     case READ_6:
792     case WRITE_6:
793     @@ -1861,6 +1878,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
794     */
795     if (!n_block)
796     n_block = 256;
797     + if (!ata_check_nblocks(scmd, n_block))
798     + goto invalid_fld;
799     break;
800     case READ_16:
801     case WRITE_16:
802     @@ -1871,6 +1890,8 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc)
803     scsi_16_lba_len(cdb, &block, &n_block);
804     if (cdb[1] & (1 << 3))
805     tf_flags |= ATA_TFLAG_FUA;
806     + if (!ata_check_nblocks(scmd, n_block))
807     + goto invalid_fld;
808     break;
809     default:
810     DPRINTK("no-byte command\n");
811     diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
812     index c5ea0fc635e5..873cc0906055 100644
813     --- a/drivers/ata/libata-sff.c
814     +++ b/drivers/ata/libata-sff.c
815     @@ -674,6 +674,10 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
816     unsigned int offset;
817     unsigned char *buf;
818    
819     + if (!qc->cursg) {
820     + qc->curbytes = qc->nbytes;
821     + return;
822     + }
823     if (qc->curbytes == qc->nbytes - qc->sect_size)
824     ap->hsm_task_state = HSM_ST_LAST;
825    
826     @@ -699,6 +703,8 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
827    
828     if (qc->cursg_ofs == qc->cursg->length) {
829     qc->cursg = sg_next(qc->cursg);
830     + if (!qc->cursg)
831     + ap->hsm_task_state = HSM_ST_LAST;
832     qc->cursg_ofs = 0;
833     }
834     }
835     diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c
836     index 568f59b58ddf..e7c877d354c7 100644
837     --- a/drivers/clk/socfpga/clk-periph-s10.c
838     +++ b/drivers/clk/socfpga/clk-periph-s10.c
839     @@ -37,7 +37,7 @@ static unsigned long clk_peri_cnt_clk_recalc_rate(struct clk_hw *hwclk,
840     if (socfpgaclk->fixed_div) {
841     div = socfpgaclk->fixed_div;
842     } else {
843     - if (!socfpgaclk->bypass_reg)
844     + if (socfpgaclk->hw.reg)
845     div = ((readl(socfpgaclk->hw.reg) & 0x7ff) + 1);
846     }
847    
848     diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
849     index b308ce92685d..53395852f012 100644
850     --- a/drivers/gpio/gpiolib.c
851     +++ b/drivers/gpio/gpiolib.c
852     @@ -1082,9 +1082,11 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
853     if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
854     lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW;
855     if (test_bit(FLAG_OPEN_DRAIN, &desc->flags))
856     - lineinfo.flags |= GPIOLINE_FLAG_OPEN_DRAIN;
857     + lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN |
858     + GPIOLINE_FLAG_IS_OUT);
859     if (test_bit(FLAG_OPEN_SOURCE, &desc->flags))
860     - lineinfo.flags |= GPIOLINE_FLAG_OPEN_SOURCE;
861     + lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE |
862     + GPIOLINE_FLAG_IS_OUT);
863    
864     if (copy_to_user(ip, &lineinfo, sizeof(lineinfo)))
865     return -EFAULT;
866     diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
867     index b4e7404fe660..a11637b0f6cc 100644
868     --- a/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
869     +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
870     @@ -40,8 +40,7 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
871     u8 *ptr = msg->buf;
872    
873     while (remaining) {
874     - u8 cnt = (remaining > 16) ? 16 : remaining;
875     - u8 cmd;
876     + u8 cnt, retries, cmd;
877    
878     if (msg->flags & I2C_M_RD)
879     cmd = 1;
880     @@ -51,10 +50,19 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
881     if (mcnt || remaining > 16)
882     cmd |= 4; /* MOT */
883    
884     - ret = aux->func->xfer(aux, true, cmd, msg->addr, ptr, &cnt);
885     - if (ret < 0) {
886     - nvkm_i2c_aux_release(aux);
887     - return ret;
888     + for (retries = 0, cnt = 0;
889     + retries < 32 && !cnt;
890     + retries++) {
891     + cnt = min_t(u8, remaining, 16);
892     + ret = aux->func->xfer(aux, true, cmd,
893     + msg->addr, ptr, &cnt);
894     + if (ret < 0)
895     + goto out;
896     + }
897     + if (!cnt) {
898     + AUX_TRACE(aux, "no data after 32 retries");
899     + ret = -EIO;
900     + goto out;
901     }
902    
903     ptr += cnt;
904     @@ -64,8 +72,10 @@ nvkm_i2c_aux_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
905     msg++;
906     }
907    
908     + ret = num;
909     +out:
910     nvkm_i2c_aux_release(aux);
911     - return num;
912     + return ret;
913     }
914    
915     static u32
916     diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
917     index 080f05352195..6a4da3a0ff1c 100644
918     --- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
919     +++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
920     @@ -436,7 +436,7 @@ static int rockchip_dp_resume(struct device *dev)
921    
922     static const struct dev_pm_ops rockchip_dp_pm_ops = {
923     #ifdef CONFIG_PM_SLEEP
924     - .suspend = rockchip_dp_suspend,
925     + .suspend_late = rockchip_dp_suspend,
926     .resume_early = rockchip_dp_resume,
927     #endif
928     };
929     diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
930     index e4e09d47c5c0..59e9d05ab928 100644
931     --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
932     +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
933     @@ -389,8 +389,10 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
934     break;
935     }
936    
937     - if (retries == RETRIES)
938     + if (retries == RETRIES) {
939     + kfree(reply);
940     return -EINVAL;
941     + }
942    
943     *msg_len = reply_len;
944     *msg = reply;
945     diff --git a/drivers/hid/hid-a4tech.c b/drivers/hid/hid-a4tech.c
946     index 9428ea7cdf8a..c52bd163abb3 100644
947     --- a/drivers/hid/hid-a4tech.c
948     +++ b/drivers/hid/hid-a4tech.c
949     @@ -26,12 +26,36 @@
950     #define A4_2WHEEL_MOUSE_HACK_7 0x01
951     #define A4_2WHEEL_MOUSE_HACK_B8 0x02
952    
953     +#define A4_WHEEL_ORIENTATION (HID_UP_GENDESK | 0x000000b8)
954     +
955     struct a4tech_sc {
956     unsigned long quirks;
957     unsigned int hw_wheel;
958     __s32 delayed_value;
959     };
960    
961     +static int a4_input_mapping(struct hid_device *hdev, struct hid_input *hi,
962     + struct hid_field *field, struct hid_usage *usage,
963     + unsigned long **bit, int *max)
964     +{
965     + struct a4tech_sc *a4 = hid_get_drvdata(hdev);
966     +
967     + if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8 &&
968     + usage->hid == A4_WHEEL_ORIENTATION) {
969     + /*
970     + * We do not want to have this usage mapped to anything as it's
971     + * nonstandard and doesn't really behave like an HID report.
972     + * It's only selecting the orientation (vertical/horizontal) of
973     + * the previous mouse wheel report. The input_events will be
974     + * generated once both reports are recorded in a4_event().
975     + */
976     + return -1;
977     + }
978     +
979     + return 0;
980     +
981     +}
982     +
983     static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi,
984     struct hid_field *field, struct hid_usage *usage,
985     unsigned long **bit, int *max)
986     @@ -53,8 +77,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
987     struct a4tech_sc *a4 = hid_get_drvdata(hdev);
988     struct input_dev *input;
989    
990     - if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput ||
991     - !usage->type)
992     + if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
993     return 0;
994    
995     input = field->hidinput->input;
996     @@ -65,7 +88,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
997     return 1;
998     }
999    
1000     - if (usage->hid == 0x000100b8) {
1001     + if (usage->hid == A4_WHEEL_ORIENTATION) {
1002     input_event(input, EV_REL, value ? REL_HWHEEL :
1003     REL_WHEEL, a4->delayed_value);
1004     return 1;
1005     @@ -129,6 +152,7 @@ MODULE_DEVICE_TABLE(hid, a4_devices);
1006     static struct hid_driver a4_driver = {
1007     .name = "a4tech",
1008     .id_table = a4_devices,
1009     + .input_mapping = a4_input_mapping,
1010     .input_mapped = a4_input_mapped,
1011     .event = a4_event,
1012     .probe = a4_probe,
1013     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1014     index 2898bb061945..4a2fa57ddcb8 100644
1015     --- a/drivers/hid/hid-ids.h
1016     +++ b/drivers/hid/hid-ids.h
1017     @@ -971,6 +971,7 @@
1018     #define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
1019     #define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
1020     #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
1021     +#define USB_DEVICE_ID_SAITEK_X52 0x075c
1022    
1023     #define USB_VENDOR_ID_SAMSUNG 0x0419
1024     #define USB_DEVICE_ID_SAMSUNG_IR_REMOTE 0x0001
1025     diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
1026     index d29c7c9cd185..e553f6fae7a4 100644
1027     --- a/drivers/hid/hid-quirks.c
1028     +++ b/drivers/hid/hid-quirks.c
1029     @@ -143,6 +143,7 @@ static const struct hid_device_id hid_quirks[] = {
1030     { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPAD), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
1031     { HID_USB_DEVICE(USB_VENDOR_ID_RETROUSB, USB_DEVICE_ID_RETROUSB_SNES_RETROPORT), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
1032     { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD), HID_QUIRK_BADPAD },
1033     + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_X52), HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE },
1034     { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD2), HID_QUIRK_NO_INIT_REPORTS },
1035     { HID_USB_DEVICE(USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD), HID_QUIRK_NO_INIT_REPORTS },
1036     { HID_USB_DEVICE(USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB), HID_QUIRK_NOGET },
1037     diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
1038     index bea8def64f43..30b8c3256c99 100644
1039     --- a/drivers/hid/hid-tmff.c
1040     +++ b/drivers/hid/hid-tmff.c
1041     @@ -34,6 +34,8 @@
1042    
1043     #include "hid-ids.h"
1044    
1045     +#define THRUSTMASTER_DEVICE_ID_2_IN_1_DT 0xb320
1046     +
1047     static const signed short ff_rumble[] = {
1048     FF_RUMBLE,
1049     -1
1050     @@ -88,6 +90,7 @@ static int tmff_play(struct input_dev *dev, void *data,
1051     struct hid_field *ff_field = tmff->ff_field;
1052     int x, y;
1053     int left, right; /* Rumbling */
1054     + int motor_swap;
1055    
1056     switch (effect->type) {
1057     case FF_CONSTANT:
1058     @@ -112,6 +115,13 @@ static int tmff_play(struct input_dev *dev, void *data,
1059     ff_field->logical_minimum,
1060     ff_field->logical_maximum);
1061    
1062     + /* 2-in-1 strong motor is left */
1063     + if (hid->product == THRUSTMASTER_DEVICE_ID_2_IN_1_DT) {
1064     + motor_swap = left;
1065     + left = right;
1066     + right = motor_swap;
1067     + }
1068     +
1069     dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
1070     ff_field->value[0] = left;
1071     ff_field->value[1] = right;
1072     @@ -238,6 +248,8 @@ static const struct hid_device_id tm_devices[] = {
1073     .driver_data = (unsigned long)ff_rumble },
1074     { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304), /* FireStorm Dual Power 2 (and 3) */
1075     .driver_data = (unsigned long)ff_rumble },
1076     + { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, THRUSTMASTER_DEVICE_ID_2_IN_1_DT), /* Dual Trigger 2-in-1 */
1077     + .driver_data = (unsigned long)ff_rumble },
1078     { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323), /* Dual Trigger 3-in-1 (PC Mode) */
1079     .driver_data = (unsigned long)ff_rumble },
1080     { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324), /* Dual Trigger 3-in-1 (PS3 Mode) */
1081     diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
1082     index e56dc97fe4b6..50ef7b6cd195 100644
1083     --- a/drivers/hid/wacom_wac.c
1084     +++ b/drivers/hid/wacom_wac.c
1085     @@ -848,6 +848,8 @@ static int wacom_intuos_general(struct wacom_wac *wacom)
1086     y >>= 1;
1087     distance >>= 1;
1088     }
1089     + if (features->type == INTUOSHT2)
1090     + distance = features->distance_max - distance;
1091     input_report_abs(input, ABS_X, x);
1092     input_report_abs(input, ABS_Y, y);
1093     input_report_abs(input, ABS_DISTANCE, distance);
1094     @@ -1061,7 +1063,7 @@ static int wacom_remote_irq(struct wacom_wac *wacom_wac, size_t len)
1095     input_report_key(input, BTN_BASE2, (data[11] & 0x02));
1096    
1097     if (data[12] & 0x80)
1098     - input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f));
1099     + input_report_abs(input, ABS_WHEEL, (data[12] & 0x7f) - 1);
1100     else
1101     input_report_abs(input, ABS_WHEEL, 0);
1102    
1103     diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
1104     index 2f164bd74687..fdb0f832fade 100644
1105     --- a/drivers/hv/channel.c
1106     +++ b/drivers/hv/channel.c
1107     @@ -38,7 +38,7 @@
1108    
1109     static unsigned long virt_to_hvpfn(void *addr)
1110     {
1111     - unsigned long paddr;
1112     + phys_addr_t paddr;
1113    
1114     if (is_vmalloc_addr(addr))
1115     paddr = page_to_phys(vmalloc_to_page(addr)) +
1116     diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
1117     index 060dc7fd66c1..c952002c6301 100644
1118     --- a/drivers/isdn/hardware/mISDN/hfcsusb.c
1119     +++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
1120     @@ -1406,6 +1406,7 @@ start_isoc_chain(struct usb_fifo *fifo, int num_packets_per_urb,
1121     printk(KERN_DEBUG
1122     "%s: %s: alloc urb for fifo %i failed",
1123     hw->name, __func__, fifo->fifonum);
1124     + continue;
1125     }
1126     fifo->iso[i].owner_fifo = (struct usb_fifo *) fifo;
1127     fifo->iso[i].indx = i;
1128     @@ -1704,13 +1705,23 @@ hfcsusb_stop_endpoint(struct hfcsusb *hw, int channel)
1129     static int
1130     setup_hfcsusb(struct hfcsusb *hw)
1131     {
1132     + void *dmabuf = kmalloc(sizeof(u_char), GFP_KERNEL);
1133     u_char b;
1134     + int ret;
1135    
1136     if (debug & DBG_HFC_CALL_TRACE)
1137     printk(KERN_DEBUG "%s: %s\n", hw->name, __func__);
1138    
1139     + if (!dmabuf)
1140     + return -ENOMEM;
1141     +
1142     + ret = read_reg_atomic(hw, HFCUSB_CHIP_ID, dmabuf);
1143     +
1144     + memcpy(&b, dmabuf, sizeof(u_char));
1145     + kfree(dmabuf);
1146     +
1147     /* check the chip id */
1148     - if (read_reg_atomic(hw, HFCUSB_CHIP_ID, &b) != 1) {
1149     + if (ret != 1) {
1150     printk(KERN_DEBUG "%s: %s: cannot read chip id\n",
1151     hw->name, __func__);
1152     return 1;
1153     diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
1154     index b1d0ae2dbd3d..dc385b70e4c3 100644
1155     --- a/drivers/md/dm-bufio.c
1156     +++ b/drivers/md/dm-bufio.c
1157     @@ -1602,7 +1602,9 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1158     unsigned long freed;
1159    
1160     c = container_of(shrink, struct dm_bufio_client, shrinker);
1161     - if (!dm_bufio_trylock(c))
1162     + if (sc->gfp_mask & __GFP_FS)
1163     + dm_bufio_lock(c);
1164     + else if (!dm_bufio_trylock(c))
1165     return SHRINK_STOP;
1166    
1167     freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1168     diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
1169     index dbdcc543832d..2e22d588f056 100644
1170     --- a/drivers/md/dm-integrity.c
1171     +++ b/drivers/md/dm-integrity.c
1172     @@ -1749,7 +1749,22 @@ offload_to_thread:
1173     queue_work(ic->wait_wq, &dio->work);
1174     return;
1175     }
1176     + if (journal_read_pos != NOT_FOUND)
1177     + dio->range.n_sectors = ic->sectors_per_block;
1178     wait_and_add_new_range(ic, &dio->range);
1179     + /*
1180     + * wait_and_add_new_range drops the spinlock, so the journal
1181     + * may have been changed arbitrarily. We need to recheck.
1182     + * To simplify the code, we restrict I/O size to just one block.
1183     + */
1184     + if (journal_read_pos != NOT_FOUND) {
1185     + sector_t next_sector;
1186     + unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
1187     + if (unlikely(new_pos != journal_read_pos)) {
1188     + remove_range_unlocked(ic, &dio->range);
1189     + goto retry;
1190     + }
1191     + }
1192     }
1193     spin_unlock_irq(&ic->endio_wait.lock);
1194    
1195     diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
1196     index 671c24332802..3f694d9061ec 100644
1197     --- a/drivers/md/dm-kcopyd.c
1198     +++ b/drivers/md/dm-kcopyd.c
1199     @@ -548,8 +548,10 @@ static int run_io_job(struct kcopyd_job *job)
1200     * no point in continuing.
1201     */
1202     if (test_bit(DM_KCOPYD_WRITE_SEQ, &job->flags) &&
1203     - job->master_job->write_err)
1204     + job->master_job->write_err) {
1205     + job->write_err = job->master_job->write_err;
1206     return -EIO;
1207     + }
1208    
1209     io_job_start(job->kc->throttle);
1210    
1211     @@ -601,6 +603,7 @@ static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
1212     else
1213     job->read_err = 1;
1214     push(&kc->complete_jobs, job);
1215     + wake(kc);
1216     break;
1217     }
1218    
1219     diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
1220     index c44925e4e481..b78a8a4d061c 100644
1221     --- a/drivers/md/dm-raid.c
1222     +++ b/drivers/md/dm-raid.c
1223     @@ -3199,7 +3199,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1224     */
1225     r = rs_prepare_reshape(rs);
1226     if (r)
1227     - return r;
1228     + goto bad;
1229    
1230     /* Reshaping ain't recovery, so disable recovery */
1231     rs_setup_recovery(rs, MaxSector);
1232     diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
1233     index 34ab30dd5de9..36275c59e4e7 100644
1234     --- a/drivers/md/dm-table.c
1235     +++ b/drivers/md/dm-table.c
1236     @@ -1349,7 +1349,7 @@ void dm_table_event(struct dm_table *t)
1237     }
1238     EXPORT_SYMBOL(dm_table_event);
1239    
1240     -sector_t dm_table_get_size(struct dm_table *t)
1241     +inline sector_t dm_table_get_size(struct dm_table *t)
1242     {
1243     return t->num_targets ? (t->highs[t->num_targets - 1] + 1) : 0;
1244     }
1245     @@ -1374,6 +1374,9 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
1246     unsigned int l, n = 0, k = 0;
1247     sector_t *node;
1248    
1249     + if (unlikely(sector >= dm_table_get_size(t)))
1250     + return &t->targets[t->num_targets];
1251     +
1252     for (l = 0; l < t->depth; l++) {
1253     n = get_child(n, k);
1254     node = get_node(t, l, n);
1255     diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c
1256     index 4cdde7a02e94..7e8d7fc99410 100644
1257     --- a/drivers/md/dm-zoned-metadata.c
1258     +++ b/drivers/md/dm-zoned-metadata.c
1259     @@ -401,15 +401,18 @@ static struct dmz_mblock *dmz_get_mblock_slow(struct dmz_metadata *zmd,
1260     sector_t block = zmd->sb[zmd->mblk_primary].block + mblk_no;
1261     struct bio *bio;
1262    
1263     + if (dmz_bdev_is_dying(zmd->dev))
1264     + return ERR_PTR(-EIO);
1265     +
1266     /* Get a new block and a BIO to read it */
1267     mblk = dmz_alloc_mblock(zmd, mblk_no);
1268     if (!mblk)
1269     - return NULL;
1270     + return ERR_PTR(-ENOMEM);
1271    
1272     bio = bio_alloc(GFP_NOIO, 1);
1273     if (!bio) {
1274     dmz_free_mblock(zmd, mblk);
1275     - return NULL;
1276     + return ERR_PTR(-ENOMEM);
1277     }
1278    
1279     spin_lock(&zmd->mblk_lock);
1280     @@ -540,8 +543,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
1281     if (!mblk) {
1282     /* Cache miss: read the block from disk */
1283     mblk = dmz_get_mblock_slow(zmd, mblk_no);
1284     - if (!mblk)
1285     - return ERR_PTR(-ENOMEM);
1286     + if (IS_ERR(mblk))
1287     + return mblk;
1288     }
1289    
1290     /* Wait for on-going read I/O and check for error */
1291     @@ -569,16 +572,19 @@ static void dmz_dirty_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk)
1292     /*
1293     * Issue a metadata block write BIO.
1294     */
1295     -static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
1296     - unsigned int set)
1297     +static int dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
1298     + unsigned int set)
1299     {
1300     sector_t block = zmd->sb[set].block + mblk->no;
1301     struct bio *bio;
1302    
1303     + if (dmz_bdev_is_dying(zmd->dev))
1304     + return -EIO;
1305     +
1306     bio = bio_alloc(GFP_NOIO, 1);
1307     if (!bio) {
1308     set_bit(DMZ_META_ERROR, &mblk->state);
1309     - return;
1310     + return -ENOMEM;
1311     }
1312    
1313     set_bit(DMZ_META_WRITING, &mblk->state);
1314     @@ -590,6 +596,8 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
1315     bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
1316     bio_add_page(bio, mblk->page, DMZ_BLOCK_SIZE, 0);
1317     submit_bio(bio);
1318     +
1319     + return 0;
1320     }
1321    
1322     /*
1323     @@ -601,6 +609,9 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
1324     struct bio *bio;
1325     int ret;
1326    
1327     + if (dmz_bdev_is_dying(zmd->dev))
1328     + return -EIO;
1329     +
1330     bio = bio_alloc(GFP_NOIO, 1);
1331     if (!bio)
1332     return -ENOMEM;
1333     @@ -658,22 +669,29 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd,
1334     {
1335     struct dmz_mblock *mblk;
1336     struct blk_plug plug;
1337     - int ret = 0;
1338     + int ret = 0, nr_mblks_submitted = 0;
1339    
1340     /* Issue writes */
1341     blk_start_plug(&plug);
1342     - list_for_each_entry(mblk, write_list, link)
1343     - dmz_write_mblock(zmd, mblk, set);
1344     + list_for_each_entry(mblk, write_list, link) {
1345     + ret = dmz_write_mblock(zmd, mblk, set);
1346     + if (ret)
1347     + break;
1348     + nr_mblks_submitted++;
1349     + }
1350     blk_finish_plug(&plug);
1351    
1352     /* Wait for completion */
1353     list_for_each_entry(mblk, write_list, link) {
1354     + if (!nr_mblks_submitted)
1355     + break;
1356     wait_on_bit_io(&mblk->state, DMZ_META_WRITING,
1357     TASK_UNINTERRUPTIBLE);
1358     if (test_bit(DMZ_META_ERROR, &mblk->state)) {
1359     clear_bit(DMZ_META_ERROR, &mblk->state);
1360     ret = -EIO;
1361     }
1362     + nr_mblks_submitted--;
1363     }
1364    
1365     /* Flush drive cache (this will also sync data) */
1366     @@ -735,6 +753,11 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
1367     */
1368     dmz_lock_flush(zmd);
1369    
1370     + if (dmz_bdev_is_dying(zmd->dev)) {
1371     + ret = -EIO;
1372     + goto out;
1373     + }
1374     +
1375     /* Get dirty blocks */
1376     spin_lock(&zmd->mblk_lock);
1377     list_splice_init(&zmd->mblk_dirty_list, &write_list);
1378     @@ -1534,7 +1557,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
1379     struct dm_zone *zone;
1380    
1381     if (list_empty(&zmd->map_rnd_list))
1382     - return NULL;
1383     + return ERR_PTR(-EBUSY);
1384    
1385     list_for_each_entry(zone, &zmd->map_rnd_list, link) {
1386     if (dmz_is_buf(zone))
1387     @@ -1545,7 +1568,7 @@ static struct dm_zone *dmz_get_rnd_zone_for_reclaim(struct dmz_metadata *zmd)
1388     return dzone;
1389     }
1390    
1391     - return NULL;
1392     + return ERR_PTR(-EBUSY);
1393     }
1394    
1395     /*
1396     @@ -1556,7 +1579,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
1397     struct dm_zone *zone;
1398    
1399     if (list_empty(&zmd->map_seq_list))
1400     - return NULL;
1401     + return ERR_PTR(-EBUSY);
1402    
1403     list_for_each_entry(zone, &zmd->map_seq_list, link) {
1404     if (!zone->bzone)
1405     @@ -1565,7 +1588,7 @@ static struct dm_zone *dmz_get_seq_zone_for_reclaim(struct dmz_metadata *zmd)
1406     return zone;
1407     }
1408    
1409     - return NULL;
1410     + return ERR_PTR(-EBUSY);
1411     }
1412    
1413     /*
1414     @@ -1623,6 +1646,10 @@ again:
1415     /* Alloate a random zone */
1416     dzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
1417     if (!dzone) {
1418     + if (dmz_bdev_is_dying(zmd->dev)) {
1419     + dzone = ERR_PTR(-EIO);
1420     + goto out;
1421     + }
1422     dmz_wait_for_free_zones(zmd);
1423     goto again;
1424     }
1425     @@ -1720,6 +1747,10 @@ again:
1426     /* Alloate a random zone */
1427     bzone = dmz_alloc_zone(zmd, DMZ_ALLOC_RND);
1428     if (!bzone) {
1429     + if (dmz_bdev_is_dying(zmd->dev)) {
1430     + bzone = ERR_PTR(-EIO);
1431     + goto out;
1432     + }
1433     dmz_wait_for_free_zones(zmd);
1434     goto again;
1435     }
1436     diff --git a/drivers/md/dm-zoned-reclaim.c b/drivers/md/dm-zoned-reclaim.c
1437     index edf4b95eb075..9470b8f77a33 100644
1438     --- a/drivers/md/dm-zoned-reclaim.c
1439     +++ b/drivers/md/dm-zoned-reclaim.c
1440     @@ -37,7 +37,7 @@ enum {
1441     /*
1442     * Number of seconds of target BIO inactivity to consider the target idle.
1443     */
1444     -#define DMZ_IDLE_PERIOD (10UL * HZ)
1445     +#define DMZ_IDLE_PERIOD (10UL * HZ)
1446    
1447     /*
1448     * Percentage of unmapped (free) random zones below which reclaim starts
1449     @@ -134,6 +134,9 @@ static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
1450     set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
1451    
1452     while (block < end_block) {
1453     + if (dev->flags & DMZ_BDEV_DYING)
1454     + return -EIO;
1455     +
1456     /* Get a valid region from the source zone */
1457     ret = dmz_first_valid_block(zmd, src_zone, &block);
1458     if (ret <= 0)
1459     @@ -215,7 +218,7 @@ static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
1460    
1461     dmz_unlock_flush(zmd);
1462    
1463     - return 0;
1464     + return ret;
1465     }
1466    
1467     /*
1468     @@ -259,7 +262,7 @@ static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
1469    
1470     dmz_unlock_flush(zmd);
1471    
1472     - return 0;
1473     + return ret;
1474     }
1475    
1476     /*
1477     @@ -312,7 +315,7 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
1478    
1479     dmz_unlock_flush(zmd);
1480    
1481     - return 0;
1482     + return ret;
1483     }
1484    
1485     /*
1486     @@ -334,7 +337,7 @@ static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
1487     /*
1488     * Find a candidate zone for reclaim and process it.
1489     */
1490     -static void dmz_reclaim(struct dmz_reclaim *zrc)
1491     +static int dmz_do_reclaim(struct dmz_reclaim *zrc)
1492     {
1493     struct dmz_metadata *zmd = zrc->metadata;
1494     struct dm_zone *dzone;
1495     @@ -344,8 +347,8 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
1496    
1497     /* Get a data zone */
1498     dzone = dmz_get_zone_for_reclaim(zmd);
1499     - if (!dzone)
1500     - return;
1501     + if (IS_ERR(dzone))
1502     + return PTR_ERR(dzone);
1503    
1504     start = jiffies;
1505    
1506     @@ -391,13 +394,20 @@ static void dmz_reclaim(struct dmz_reclaim *zrc)
1507     out:
1508     if (ret) {
1509     dmz_unlock_zone_reclaim(dzone);
1510     - return;
1511     + return ret;
1512     }
1513    
1514     - (void) dmz_flush_metadata(zrc->metadata);
1515     + ret = dmz_flush_metadata(zrc->metadata);
1516     + if (ret) {
1517     + dmz_dev_debug(zrc->dev,
1518     + "Metadata flush for zone %u failed, err %d\n",
1519     + dmz_id(zmd, rzone), ret);
1520     + return ret;
1521     + }
1522    
1523     dmz_dev_debug(zrc->dev, "Reclaimed zone %u in %u ms",
1524     dmz_id(zmd, rzone), jiffies_to_msecs(jiffies - start));
1525     + return 0;
1526     }
1527    
1528     /*
1529     @@ -442,6 +452,10 @@ static void dmz_reclaim_work(struct work_struct *work)
1530     struct dmz_metadata *zmd = zrc->metadata;
1531     unsigned int nr_rnd, nr_unmap_rnd;
1532     unsigned int p_unmap_rnd;
1533     + int ret;
1534     +
1535     + if (dmz_bdev_is_dying(zrc->dev))
1536     + return;
1537    
1538     if (!dmz_should_reclaim(zrc)) {
1539     mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
1540     @@ -471,7 +485,17 @@ static void dmz_reclaim_work(struct work_struct *work)
1541     (dmz_target_idle(zrc) ? "Idle" : "Busy"),
1542     p_unmap_rnd, nr_unmap_rnd, nr_rnd);
1543    
1544     - dmz_reclaim(zrc);
1545     + ret = dmz_do_reclaim(zrc);
1546     + if (ret) {
1547     + dmz_dev_debug(zrc->dev, "Reclaim error %d\n", ret);
1548     + if (ret == -EIO)
1549     + /*
1550     + * LLD might be performing some error handling sequence
1551     + * at the underlying device. To not interfere, do not
1552     + * attempt to schedule the next reclaim run immediately.
1553     + */
1554     + return;
1555     + }
1556    
1557     dmz_schedule_reclaim(zrc);
1558     }
1559     diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
1560     index 85fb2baa8a7f..1030c42add05 100644
1561     --- a/drivers/md/dm-zoned-target.c
1562     +++ b/drivers/md/dm-zoned-target.c
1563     @@ -133,6 +133,8 @@ static int dmz_submit_bio(struct dmz_target *dmz, struct dm_zone *zone,
1564    
1565     atomic_inc(&bioctx->ref);
1566     generic_make_request(clone);
1567     + if (clone->bi_status == BLK_STS_IOERR)
1568     + return -EIO;
1569    
1570     if (bio_op(bio) == REQ_OP_WRITE && dmz_is_seq(zone))
1571     zone->wp_block += nr_blocks;
1572     @@ -277,8 +279,8 @@ static int dmz_handle_buffered_write(struct dmz_target *dmz,
1573    
1574     /* Get the buffer zone. One will be allocated if needed */
1575     bzone = dmz_get_chunk_buffer(zmd, zone);
1576     - if (!bzone)
1577     - return -ENOSPC;
1578     + if (IS_ERR(bzone))
1579     + return PTR_ERR(bzone);
1580    
1581     if (dmz_is_readonly(bzone))
1582     return -EROFS;
1583     @@ -389,6 +391,11 @@ static void dmz_handle_bio(struct dmz_target *dmz, struct dm_chunk_work *cw,
1584    
1585     dmz_lock_metadata(zmd);
1586    
1587     + if (dmz->dev->flags & DMZ_BDEV_DYING) {
1588     + ret = -EIO;
1589     + goto out;
1590     + }
1591     +
1592     /*
1593     * Get the data zone mapping the chunk. There may be no
1594     * mapping for read and discard. If a mapping is obtained,
1595     @@ -493,6 +500,8 @@ static void dmz_flush_work(struct work_struct *work)
1596    
1597     /* Flush dirty metadata blocks */
1598     ret = dmz_flush_metadata(dmz->metadata);
1599     + if (ret)
1600     + dmz_dev_debug(dmz->dev, "Metadata flush failed, rc=%d\n", ret);
1601    
1602     /* Process queued flush requests */
1603     while (1) {
1604     @@ -513,22 +522,24 @@ static void dmz_flush_work(struct work_struct *work)
1605     * Get a chunk work and start it to process a new BIO.
1606     * If the BIO chunk has no work yet, create one.
1607     */
1608     -static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
1609     +static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
1610     {
1611     unsigned int chunk = dmz_bio_chunk(dmz->dev, bio);
1612     struct dm_chunk_work *cw;
1613     + int ret = 0;
1614    
1615     mutex_lock(&dmz->chunk_lock);
1616    
1617     /* Get the BIO chunk work. If one is not active yet, create one */
1618     cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
1619     if (!cw) {
1620     - int ret;
1621    
1622     /* Create a new chunk work */
1623     cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
1624     - if (!cw)
1625     + if (unlikely(!cw)) {
1626     + ret = -ENOMEM;
1627     goto out;
1628     + }
1629    
1630     INIT_WORK(&cw->work, dmz_chunk_work);
1631     atomic_set(&cw->refcount, 0);
1632     @@ -539,7 +550,6 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
1633     ret = radix_tree_insert(&dmz->chunk_rxtree, chunk, cw);
1634     if (unlikely(ret)) {
1635     kfree(cw);
1636     - cw = NULL;
1637     goto out;
1638     }
1639     }
1640     @@ -547,10 +557,38 @@ static void dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
1641     bio_list_add(&cw->bio_list, bio);
1642     dmz_get_chunk_work(cw);
1643    
1644     + dmz_reclaim_bio_acc(dmz->reclaim);
1645     if (queue_work(dmz->chunk_wq, &cw->work))
1646     dmz_get_chunk_work(cw);
1647     out:
1648     mutex_unlock(&dmz->chunk_lock);
1649     + return ret;
1650     +}
1651     +
1652     +/*
1653     + * Check the backing device availability. If it's on the way out,
1654     + * start failing I/O. Reclaim and metadata components also call this
1655     + * function to cleanly abort operation in the event of such failure.
1656     + */
1657     +bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev)
1658     +{
1659     + struct gendisk *disk;
1660     +
1661     + if (!(dmz_dev->flags & DMZ_BDEV_DYING)) {
1662     + disk = dmz_dev->bdev->bd_disk;
1663     + if (blk_queue_dying(bdev_get_queue(dmz_dev->bdev))) {
1664     + dmz_dev_warn(dmz_dev, "Backing device queue dying");
1665     + dmz_dev->flags |= DMZ_BDEV_DYING;
1666     + } else if (disk->fops->check_events) {
1667     + if (disk->fops->check_events(disk, 0) &
1668     + DISK_EVENT_MEDIA_CHANGE) {
1669     + dmz_dev_warn(dmz_dev, "Backing device offline");
1670     + dmz_dev->flags |= DMZ_BDEV_DYING;
1671     + }
1672     + }
1673     + }
1674     +
1675     + return dmz_dev->flags & DMZ_BDEV_DYING;
1676     }
1677    
1678     /*
1679     @@ -564,6 +602,10 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
1680     sector_t sector = bio->bi_iter.bi_sector;
1681     unsigned int nr_sectors = bio_sectors(bio);
1682     sector_t chunk_sector;
1683     + int ret;
1684     +
1685     + if (dmz_bdev_is_dying(dmz->dev))
1686     + return DM_MAPIO_KILL;
1687    
1688     dmz_dev_debug(dev, "BIO op %d sector %llu + %u => chunk %llu, block %llu, %u blocks",
1689     bio_op(bio), (unsigned long long)sector, nr_sectors,
1690     @@ -601,8 +643,14 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
1691     dm_accept_partial_bio(bio, dev->zone_nr_sectors - chunk_sector);
1692    
1693     /* Now ready to handle this BIO */
1694     - dmz_reclaim_bio_acc(dmz->reclaim);
1695     - dmz_queue_chunk_work(dmz, bio);
1696     + ret = dmz_queue_chunk_work(dmz, bio);
1697     + if (ret) {
1698     + dmz_dev_debug(dmz->dev,
1699     + "BIO op %d, can't process chunk %llu, err %i\n",
1700     + bio_op(bio), (u64)dmz_bio_chunk(dmz->dev, bio),
1701     + ret);
1702     + return DM_MAPIO_REQUEUE;
1703     + }
1704    
1705     return DM_MAPIO_SUBMITTED;
1706     }
1707     @@ -856,6 +904,9 @@ static int dmz_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
1708     {
1709     struct dmz_target *dmz = ti->private;
1710    
1711     + if (dmz_bdev_is_dying(dmz->dev))
1712     + return -ENODEV;
1713     +
1714     *bdev = dmz->dev->bdev;
1715    
1716     return 0;
1717     diff --git a/drivers/md/dm-zoned.h b/drivers/md/dm-zoned.h
1718     index ed8de49c9a08..93a64529f219 100644
1719     --- a/drivers/md/dm-zoned.h
1720     +++ b/drivers/md/dm-zoned.h
1721     @@ -56,6 +56,8 @@ struct dmz_dev {
1722    
1723     unsigned int nr_zones;
1724    
1725     + unsigned int flags;
1726     +
1727     sector_t zone_nr_sectors;
1728     unsigned int zone_nr_sectors_shift;
1729    
1730     @@ -67,6 +69,9 @@ struct dmz_dev {
1731     (dev)->zone_nr_sectors_shift)
1732     #define dmz_chunk_block(dev, b) ((b) & ((dev)->zone_nr_blocks - 1))
1733    
1734     +/* Device flags. */
1735     +#define DMZ_BDEV_DYING (1 << 0)
1736     +
1737     /*
1738     * Zone descriptor.
1739     */
1740     @@ -245,4 +250,9 @@ void dmz_resume_reclaim(struct dmz_reclaim *zrc);
1741     void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc);
1742     void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
1743    
1744     +/*
1745     + * Functions defined in dm-zoned-target.c
1746     + */
1747     +bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
1748     +
1749     #endif /* DM_ZONED_H */
1750     diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
1751     index 58b319757b1e..8aae0624a297 100644
1752     --- a/drivers/md/persistent-data/dm-btree.c
1753     +++ b/drivers/md/persistent-data/dm-btree.c
1754     @@ -628,39 +628,40 @@ static int btree_split_beneath(struct shadow_spine *s, uint64_t key)
1755    
1756     new_parent = shadow_current(s);
1757    
1758     + pn = dm_block_data(new_parent);
1759     + size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
1760     + sizeof(__le64) : s->info->value_type.size;
1761     +
1762     + /* create & init the left block */
1763     r = new_block(s->info, &left);
1764     if (r < 0)
1765     return r;
1766    
1767     + ln = dm_block_data(left);
1768     + nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
1769     +
1770     + ln->header.flags = pn->header.flags;
1771     + ln->header.nr_entries = cpu_to_le32(nr_left);
1772     + ln->header.max_entries = pn->header.max_entries;
1773     + ln->header.value_size = pn->header.value_size;
1774     + memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
1775     + memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
1776     +
1777     + /* create & init the right block */
1778     r = new_block(s->info, &right);
1779     if (r < 0) {
1780     unlock_block(s->info, left);
1781     return r;
1782     }
1783    
1784     - pn = dm_block_data(new_parent);
1785     - ln = dm_block_data(left);
1786     rn = dm_block_data(right);
1787     -
1788     - nr_left = le32_to_cpu(pn->header.nr_entries) / 2;
1789     nr_right = le32_to_cpu(pn->header.nr_entries) - nr_left;
1790    
1791     - ln->header.flags = pn->header.flags;
1792     - ln->header.nr_entries = cpu_to_le32(nr_left);
1793     - ln->header.max_entries = pn->header.max_entries;
1794     - ln->header.value_size = pn->header.value_size;
1795     -
1796     rn->header.flags = pn->header.flags;
1797     rn->header.nr_entries = cpu_to_le32(nr_right);
1798     rn->header.max_entries = pn->header.max_entries;
1799     rn->header.value_size = pn->header.value_size;
1800     -
1801     - memcpy(ln->keys, pn->keys, nr_left * sizeof(pn->keys[0]));
1802     memcpy(rn->keys, pn->keys + nr_left, nr_right * sizeof(pn->keys[0]));
1803     -
1804     - size = le32_to_cpu(pn->header.flags) & INTERNAL_NODE ?
1805     - sizeof(__le64) : s->info->value_type.size;
1806     - memcpy(value_ptr(ln, 0), value_ptr(pn, 0), nr_left * size);
1807     memcpy(value_ptr(rn, 0), value_ptr(pn, nr_left),
1808     nr_right * size);
1809    
1810     diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
1811     index aec449243966..25328582cc48 100644
1812     --- a/drivers/md/persistent-data/dm-space-map-metadata.c
1813     +++ b/drivers/md/persistent-data/dm-space-map-metadata.c
1814     @@ -249,7 +249,7 @@ static int out(struct sm_metadata *smm)
1815     }
1816    
1817     if (smm->recursion_count == 1)
1818     - apply_bops(smm);
1819     + r = apply_bops(smm);
1820    
1821     smm->recursion_count--;
1822    
1823     diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1824     index 8f14f85b8e95..0d2392c4b625 100644
1825     --- a/drivers/net/bonding/bond_main.c
1826     +++ b/drivers/net/bonding/bond_main.c
1827     @@ -2190,6 +2190,15 @@ static void bond_miimon_commit(struct bonding *bond)
1828     bond_for_each_slave(bond, slave, iter) {
1829     switch (slave->new_link) {
1830     case BOND_LINK_NOCHANGE:
1831     + /* For 802.3ad mode, check current slave speed and
1832     + * duplex again in case its port was disabled after
1833     + * invalid speed/duplex reporting but recovered before
1834     + * link monitoring could make a decision on the actual
1835     + * link status
1836     + */
1837     + if (BOND_MODE(bond) == BOND_MODE_8023AD &&
1838     + slave->link == BOND_LINK_UP)
1839     + bond_3ad_adapter_speed_duplex_changed(slave);
1840     continue;
1841    
1842     case BOND_LINK_UP:
1843     diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
1844     index c05e4d50d43d..bd127ce3aba2 100644
1845     --- a/drivers/net/can/dev.c
1846     +++ b/drivers/net/can/dev.c
1847     @@ -1260,6 +1260,8 @@ int register_candev(struct net_device *dev)
1848     return -EINVAL;
1849    
1850     dev->rtnl_link_ops = &can_link_ops;
1851     + netif_carrier_off(dev);
1852     +
1853     return register_netdev(dev);
1854     }
1855     EXPORT_SYMBOL_GPL(register_candev);
1856     diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
1857     index b8c39ede7cd5..179bfcd541f2 100644
1858     --- a/drivers/net/can/sja1000/peak_pcmcia.c
1859     +++ b/drivers/net/can/sja1000/peak_pcmcia.c
1860     @@ -487,7 +487,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
1861     if (!netdev)
1862     continue;
1863    
1864     - strncpy(name, netdev->name, IFNAMSIZ);
1865     + strlcpy(name, netdev->name, IFNAMSIZ);
1866    
1867     unregister_sja1000dev(netdev);
1868    
1869     diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
1870     index da64e71a62ee..fccb6bf21fad 100644
1871     --- a/drivers/net/can/spi/mcp251x.c
1872     +++ b/drivers/net/can/spi/mcp251x.c
1873     @@ -678,17 +678,6 @@ static int mcp251x_power_enable(struct regulator *reg, int enable)
1874     return regulator_disable(reg);
1875     }
1876    
1877     -static void mcp251x_open_clean(struct net_device *net)
1878     -{
1879     - struct mcp251x_priv *priv = netdev_priv(net);
1880     - struct spi_device *spi = priv->spi;
1881     -
1882     - free_irq(spi->irq, priv);
1883     - mcp251x_hw_sleep(spi);
1884     - mcp251x_power_enable(priv->transceiver, 0);
1885     - close_candev(net);
1886     -}
1887     -
1888     static int mcp251x_stop(struct net_device *net)
1889     {
1890     struct mcp251x_priv *priv = netdev_priv(net);
1891     @@ -954,37 +943,43 @@ static int mcp251x_open(struct net_device *net)
1892     flags | IRQF_ONESHOT, DEVICE_NAME, priv);
1893     if (ret) {
1894     dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
1895     - mcp251x_power_enable(priv->transceiver, 0);
1896     - close_candev(net);
1897     - goto open_unlock;
1898     + goto out_close;
1899     }
1900    
1901     priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM,
1902     0);
1903     + if (!priv->wq) {
1904     + ret = -ENOMEM;
1905     + goto out_clean;
1906     + }
1907     INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
1908     INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
1909    
1910     ret = mcp251x_hw_reset(spi);
1911     - if (ret) {
1912     - mcp251x_open_clean(net);
1913     - goto open_unlock;
1914     - }
1915     + if (ret)
1916     + goto out_free_wq;
1917     ret = mcp251x_setup(net, spi);
1918     - if (ret) {
1919     - mcp251x_open_clean(net);
1920     - goto open_unlock;
1921     - }
1922     + if (ret)
1923     + goto out_free_wq;
1924     ret = mcp251x_set_normal_mode(spi);
1925     - if (ret) {
1926     - mcp251x_open_clean(net);
1927     - goto open_unlock;
1928     - }
1929     + if (ret)
1930     + goto out_free_wq;
1931    
1932     can_led_event(net, CAN_LED_EVENT_OPEN);
1933    
1934     netif_wake_queue(net);
1935     + mutex_unlock(&priv->mcp_lock);
1936    
1937     -open_unlock:
1938     + return 0;
1939     +
1940     +out_free_wq:
1941     + destroy_workqueue(priv->wq);
1942     +out_clean:
1943     + free_irq(spi->irq, priv);
1944     + mcp251x_hw_sleep(spi);
1945     +out_close:
1946     + mcp251x_power_enable(priv->transceiver, 0);
1947     + close_candev(net);
1948     mutex_unlock(&priv->mcp_lock);
1949     return ret;
1950     }
1951     diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1952     index 740ef47eab01..43b0fa2b9932 100644
1953     --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1954     +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
1955     @@ -863,7 +863,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
1956    
1957     dev_prev_siblings = dev->prev_siblings;
1958     dev->state &= ~PCAN_USB_STATE_CONNECTED;
1959     - strncpy(name, netdev->name, IFNAMSIZ);
1960     + strlcpy(name, netdev->name, IFNAMSIZ);
1961    
1962     unregister_netdev(netdev);
1963    
1964     diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1965     index c34ea385fe4a..6be6de0774b6 100644
1966     --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1967     +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
1968     @@ -3270,7 +3270,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1969     if (!adapter->regs) {
1970     dev_err(&pdev->dev, "cannot map device registers\n");
1971     err = -ENOMEM;
1972     - goto out_free_adapter;
1973     + goto out_free_adapter_nofail;
1974     }
1975    
1976     adapter->pdev = pdev;
1977     @@ -3398,6 +3398,9 @@ out_free_dev:
1978     if (adapter->port[i])
1979     free_netdev(adapter->port[i]);
1980    
1981     +out_free_adapter_nofail:
1982     + kfree_skb(adapter->nofail_skb);
1983     +
1984     out_free_adapter:
1985     kfree(adapter);
1986    
1987     diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
1988     index 6127697ede12..a91d49dd92ea 100644
1989     --- a/drivers/net/ethernet/hisilicon/hip04_eth.c
1990     +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
1991     @@ -157,6 +157,7 @@ struct hip04_priv {
1992     unsigned int reg_inten;
1993    
1994     struct napi_struct napi;
1995     + struct device *dev;
1996     struct net_device *ndev;
1997    
1998     struct tx_desc *tx_desc;
1999     @@ -185,7 +186,7 @@ struct hip04_priv {
2000    
2001     static inline unsigned int tx_count(unsigned int head, unsigned int tail)
2002     {
2003     - return (head - tail) % (TX_DESC_NUM - 1);
2004     + return (head - tail) % TX_DESC_NUM;
2005     }
2006    
2007     static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
2008     @@ -387,7 +388,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force)
2009     }
2010    
2011     if (priv->tx_phys[tx_tail]) {
2012     - dma_unmap_single(&ndev->dev, priv->tx_phys[tx_tail],
2013     + dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
2014     priv->tx_skb[tx_tail]->len,
2015     DMA_TO_DEVICE);
2016     priv->tx_phys[tx_tail] = 0;
2017     @@ -437,8 +438,8 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2018     return NETDEV_TX_BUSY;
2019     }
2020    
2021     - phys = dma_map_single(&ndev->dev, skb->data, skb->len, DMA_TO_DEVICE);
2022     - if (dma_mapping_error(&ndev->dev, phys)) {
2023     + phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
2024     + if (dma_mapping_error(priv->dev, phys)) {
2025     dev_kfree_skb(skb);
2026     return NETDEV_TX_OK;
2027     }
2028     @@ -497,6 +498,9 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
2029     u16 len;
2030     u32 err;
2031    
2032     + /* clean up tx descriptors */
2033     + tx_remaining = hip04_tx_reclaim(ndev, false);
2034     +
2035     while (cnt && !last) {
2036     buf = priv->rx_buf[priv->rx_head];
2037     skb = build_skb(buf, priv->rx_buf_size);
2038     @@ -505,7 +509,7 @@ static int hip04_rx_poll(struct napi_struct *napi, int budget)
2039     goto refill;
2040     }
2041    
2042     - dma_unmap_single(&ndev->dev, priv->rx_phys[priv->rx_head],
2043     + dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
2044     RX_BUF_SIZE, DMA_FROM_DEVICE);
2045     priv->rx_phys[priv->rx_head] = 0;
2046    
2047     @@ -534,9 +538,9 @@ refill:
2048     buf = netdev_alloc_frag(priv->rx_buf_size);
2049     if (!buf)
2050     goto done;
2051     - phys = dma_map_single(&ndev->dev, buf,
2052     + phys = dma_map_single(priv->dev, buf,
2053     RX_BUF_SIZE, DMA_FROM_DEVICE);
2054     - if (dma_mapping_error(&ndev->dev, phys))
2055     + if (dma_mapping_error(priv->dev, phys))
2056     goto done;
2057     priv->rx_buf[priv->rx_head] = buf;
2058     priv->rx_phys[priv->rx_head] = phys;
2059     @@ -557,8 +561,7 @@ refill:
2060     }
2061     napi_complete_done(napi, rx);
2062     done:
2063     - /* clean up tx descriptors and start a new timer if necessary */
2064     - tx_remaining = hip04_tx_reclaim(ndev, false);
2065     + /* start a new timer if necessary */
2066     if (rx < budget && tx_remaining)
2067     hip04_start_tx_timer(priv);
2068    
2069     @@ -640,9 +643,9 @@ static int hip04_mac_open(struct net_device *ndev)
2070     for (i = 0; i < RX_DESC_NUM; i++) {
2071     dma_addr_t phys;
2072    
2073     - phys = dma_map_single(&ndev->dev, priv->rx_buf[i],
2074     + phys = dma_map_single(priv->dev, priv->rx_buf[i],
2075     RX_BUF_SIZE, DMA_FROM_DEVICE);
2076     - if (dma_mapping_error(&ndev->dev, phys))
2077     + if (dma_mapping_error(priv->dev, phys))
2078     return -EIO;
2079    
2080     priv->rx_phys[i] = phys;
2081     @@ -676,7 +679,7 @@ static int hip04_mac_stop(struct net_device *ndev)
2082    
2083     for (i = 0; i < RX_DESC_NUM; i++) {
2084     if (priv->rx_phys[i]) {
2085     - dma_unmap_single(&ndev->dev, priv->rx_phys[i],
2086     + dma_unmap_single(priv->dev, priv->rx_phys[i],
2087     RX_BUF_SIZE, DMA_FROM_DEVICE);
2088     priv->rx_phys[i] = 0;
2089     }
2090     @@ -820,6 +823,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
2091     return -ENOMEM;
2092    
2093     priv = netdev_priv(ndev);
2094     + priv->dev = d;
2095     priv->ndev = ndev;
2096     platform_set_drvdata(pdev, ndev);
2097     SET_NETDEV_DEV(ndev, &pdev->dev);
2098     diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2099     index 6455511457ca..9b608d23ff7e 100644
2100     --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2101     +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2102     @@ -4412,9 +4412,9 @@ static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
2103     if (state->pause & MLO_PAUSE_RX)
2104     ctrl0 |= MVPP22_XLG_CTRL0_RX_FLOW_CTRL_EN;
2105    
2106     - ctrl4 &= ~MVPP22_XLG_CTRL4_MACMODSELECT_GMAC;
2107     - ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC |
2108     - MVPP22_XLG_CTRL4_EN_IDLE_CHECK;
2109     + ctrl4 &= ~(MVPP22_XLG_CTRL4_MACMODSELECT_GMAC |
2110     + MVPP22_XLG_CTRL4_EN_IDLE_CHECK);
2111     + ctrl4 |= MVPP22_XLG_CTRL4_FWD_FC | MVPP22_XLG_CTRL4_FWD_PFC;
2112    
2113     writel(ctrl0, port->base + MVPP22_XLG_CTRL0_REG);
2114     writel(ctrl4, port->base + MVPP22_XLG_CTRL4_REG);
2115     diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
2116     index b22f464ea3fa..f9e475075d3e 100644
2117     --- a/drivers/net/ethernet/qlogic/qed/qed_int.c
2118     +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
2119     @@ -939,7 +939,7 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
2120     snprintf(bit_name, 30,
2121     p_aeu->bit_name, num);
2122     else
2123     - strncpy(bit_name,
2124     + strlcpy(bit_name,
2125     p_aeu->bit_name, 30);
2126    
2127     /* We now need to pass bitmask in its
2128     diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
2129     index 13802b825d65..909422d93903 100644
2130     --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
2131     +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
2132     @@ -442,7 +442,7 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
2133     /* Vendor specific information */
2134     dev->vendor_id = cdev->vendor_id;
2135     dev->vendor_part_id = cdev->device_id;
2136     - dev->hw_ver = 0;
2137     + dev->hw_ver = cdev->chip_rev;
2138     dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
2139     (FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
2140    
2141     diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
2142     index d0e6e1503581..48cf5e2b2441 100644
2143     --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
2144     +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
2145     @@ -88,6 +88,8 @@ static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
2146     u32 value;
2147    
2148     base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
2149     + if (queue >= 4)
2150     + queue -= 4;
2151    
2152     value = readl(ioaddr + base_register);
2153    
2154     @@ -105,6 +107,8 @@ static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
2155     u32 value;
2156    
2157     base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
2158     + if (queue >= 4)
2159     + queue -= 4;
2160    
2161     value = readl(ioaddr + base_register);
2162    
2163     diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
2164     index d182f82f7b58..870302a7177e 100644
2165     --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
2166     +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
2167     @@ -106,6 +106,8 @@ static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
2168     u32 value, reg;
2169    
2170     reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
2171     + if (queue >= 4)
2172     + queue -= 4;
2173    
2174     value = readl(ioaddr + reg);
2175     value &= ~XGMAC_PSRQ(queue);
2176     @@ -169,6 +171,8 @@ static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
2177     u32 value, reg;
2178    
2179     reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
2180     + if (queue >= 4)
2181     + queue -= 4;
2182    
2183     value = readl(ioaddr + reg);
2184     value &= ~XGMAC_QxMDMACH(queue);
2185     diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
2186     index 58ea18af9813..37c0bc699cd9 100644
2187     --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
2188     +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
2189     @@ -37,7 +37,7 @@ static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
2190     entry = &priv->tc_entries[i];
2191     if (!entry->in_use && !first && free)
2192     first = entry;
2193     - if (entry->handle == loc && !free)
2194     + if ((entry->handle == loc) && !free && !entry->is_frag)
2195     dup = entry;
2196     }
2197    
2198     diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
2199     index 491efc1bf5c4..7278eca70f9f 100644
2200     --- a/drivers/net/phy/phy_led_triggers.c
2201     +++ b/drivers/net/phy/phy_led_triggers.c
2202     @@ -58,8 +58,9 @@ void phy_led_trigger_change_speed(struct phy_device *phy)
2203     if (!phy->last_triggered)
2204     led_trigger_event(&phy->led_link_trigger->trigger,
2205     LED_FULL);
2206     + else
2207     + led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
2208    
2209     - led_trigger_event(&phy->last_triggered->trigger, LED_OFF);
2210     led_trigger_event(&plt->trigger, LED_FULL);
2211     phy->last_triggered = plt;
2212     }
2213     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2214     index 128c8a327d8e..51017c6bb3bc 100644
2215     --- a/drivers/net/usb/qmi_wwan.c
2216     +++ b/drivers/net/usb/qmi_wwan.c
2217     @@ -1231,6 +1231,7 @@ static const struct usb_device_id products[] = {
2218     {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
2219     {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
2220     {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
2221     + {QMI_FIXED_INTF(0x2020, 0x2060, 4)}, /* BroadMobi BM818 */
2222     {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
2223     {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
2224     {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
2225     diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
2226     index 7cd428c0af43..ce2dd06af62e 100644
2227     --- a/drivers/net/wireless/mac80211_hwsim.c
2228     +++ b/drivers/net/wireless/mac80211_hwsim.c
2229     @@ -3502,10 +3502,12 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb,
2230     hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
2231     cb->nlh->nlmsg_seq, &hwsim_genl_family,
2232     NLM_F_MULTI, HWSIM_CMD_GET_RADIO);
2233     - if (!hdr)
2234     + if (hdr) {
2235     + genl_dump_check_consistent(cb, hdr);
2236     + genlmsg_end(skb, hdr);
2237     + } else {
2238     res = -EMSGSIZE;
2239     - genl_dump_check_consistent(cb, hdr);
2240     - genlmsg_end(skb, hdr);
2241     + }
2242     }
2243    
2244     done:
2245     diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
2246     index f55d082ace71..5d6e7e931bc6 100644
2247     --- a/drivers/nfc/st-nci/se.c
2248     +++ b/drivers/nfc/st-nci/se.c
2249     @@ -344,6 +344,8 @@ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev,
2250    
2251     transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
2252     skb->len - 2, GFP_KERNEL);
2253     + if (!transaction)
2254     + return -ENOMEM;
2255    
2256     transaction->aid_len = skb->data[1];
2257     memcpy(transaction->aid, &skb->data[2], transaction->aid_len);
2258     diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
2259     index 4bed9e842db3..fd967a38a94a 100644
2260     --- a/drivers/nfc/st21nfca/se.c
2261     +++ b/drivers/nfc/st21nfca/se.c
2262     @@ -328,6 +328,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host,
2263    
2264     transaction = (struct nfc_evt_transaction *)devm_kzalloc(dev,
2265     skb->len - 2, GFP_KERNEL);
2266     + if (!transaction)
2267     + return -ENOMEM;
2268    
2269     transaction->aid_len = skb->data[1];
2270     memcpy(transaction->aid, &skb->data[2],
2271     diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
2272     index 9c332a6f6667..476728bdae8c 100644
2273     --- a/fs/ceph/addr.c
2274     +++ b/fs/ceph/addr.c
2275     @@ -913,8 +913,9 @@ get_more_pages:
2276     if (page_offset(page) >= ceph_wbc.i_size) {
2277     dout("%p page eof %llu\n",
2278     page, ceph_wbc.i_size);
2279     - if (ceph_wbc.size_stable ||
2280     - page_offset(page) >= i_size_read(inode))
2281     + if ((ceph_wbc.size_stable ||
2282     + page_offset(page) >= i_size_read(inode)) &&
2283     + clear_page_dirty_for_io(page))
2284     mapping->a_ops->invalidatepage(page,
2285     0, PAGE_SIZE);
2286     unlock_page(page);
2287     diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
2288     index 9dae2ec7e1fa..6a8f4a99582e 100644
2289     --- a/fs/ceph/locks.c
2290     +++ b/fs/ceph/locks.c
2291     @@ -111,8 +111,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
2292     req->r_wait_for_completion = ceph_lock_wait_for_completion;
2293    
2294     err = ceph_mdsc_do_request(mdsc, inode, req);
2295     -
2296     - if (operation == CEPH_MDS_OP_GETFILELOCK) {
2297     + if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
2298     fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
2299     if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
2300     fl->fl_type = F_RDLCK;
2301     diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
2302     index 0ccf8f9b63a2..cc9e846a3865 100644
2303     --- a/fs/cifs/smb2ops.c
2304     +++ b/fs/cifs/smb2ops.c
2305     @@ -2545,7 +2545,15 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
2306     static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
2307     unsigned int buflen)
2308     {
2309     - sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
2310     + void *addr;
2311     + /*
2312     + * VMAP_STACK (at least) puts stack into the vmalloc address space
2313     + */
2314     + if (is_vmalloc_addr(buf))
2315     + addr = vmalloc_to_page(buf);
2316     + else
2317     + addr = virt_to_page(buf);
2318     + sg_set_page(sg, addr, buflen, offset_in_page(buf));
2319     }
2320    
2321     /* Assumes the first rqst has a transform header as the first iov.
2322     @@ -3121,7 +3129,6 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
2323     {
2324     int ret, length;
2325     char *buf = server->smallbuf;
2326     - char *tmpbuf;
2327     struct smb2_sync_hdr *shdr;
2328     unsigned int pdu_length = server->pdu_size;
2329     unsigned int buf_size;
2330     @@ -3151,18 +3158,15 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
2331     return length;
2332    
2333     next_is_large = server->large_buf;
2334     - one_more:
2335     +one_more:
2336     shdr = (struct smb2_sync_hdr *)buf;
2337     if (shdr->NextCommand) {
2338     - if (next_is_large) {
2339     - tmpbuf = server->bigbuf;
2340     + if (next_is_large)
2341     next_buffer = (char *)cifs_buf_get();
2342     - } else {
2343     - tmpbuf = server->smallbuf;
2344     + else
2345     next_buffer = (char *)cifs_small_buf_get();
2346     - }
2347     memcpy(next_buffer,
2348     - tmpbuf + le32_to_cpu(shdr->NextCommand),
2349     + buf + le32_to_cpu(shdr->NextCommand),
2350     pdu_length - le32_to_cpu(shdr->NextCommand));
2351     }
2352    
2353     @@ -3191,12 +3195,21 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
2354     pdu_length -= le32_to_cpu(shdr->NextCommand);
2355     server->large_buf = next_is_large;
2356     if (next_is_large)
2357     - server->bigbuf = next_buffer;
2358     + server->bigbuf = buf = next_buffer;
2359     else
2360     - server->smallbuf = next_buffer;
2361     -
2362     - buf += le32_to_cpu(shdr->NextCommand);
2363     + server->smallbuf = buf = next_buffer;
2364     goto one_more;
2365     + } else if (ret != 0) {
2366     + /*
2367     + * ret != 0 here means that we didn't get to handle_mid() thus
2368     + * server->smallbuf and server->bigbuf are still valid. We need
2369     + * to free next_buffer because it is not going to be used
2370     + * anywhere.
2371     + */
2372     + if (next_is_large)
2373     + free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
2374     + else
2375     + free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
2376     }
2377    
2378     return ret;
2379     diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
2380     index 4dc887813c71..a7bc4e0494f9 100644
2381     --- a/fs/nfs/fscache.c
2382     +++ b/fs/nfs/fscache.c
2383     @@ -118,6 +118,10 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
2384     struct rb_node **p, *parent;
2385     int diff;
2386    
2387     + nfss->fscache_key = NULL;
2388     + nfss->fscache = NULL;
2389     + if (!(nfss->options & NFS_OPTION_FSCACHE))
2390     + return;
2391     if (!uniq) {
2392     uniq = "";
2393     ulen = 1;
2394     @@ -230,10 +234,11 @@ void nfs_fscache_release_super_cookie(struct super_block *sb)
2395     void nfs_fscache_init_inode(struct inode *inode)
2396     {
2397     struct nfs_fscache_inode_auxdata auxdata;
2398     + struct nfs_server *nfss = NFS_SERVER(inode);
2399     struct nfs_inode *nfsi = NFS_I(inode);
2400    
2401     nfsi->fscache = NULL;
2402     - if (!S_ISREG(inode->i_mode))
2403     + if (!(nfss->fscache && S_ISREG(inode->i_mode)))
2404     return;
2405    
2406     memset(&auxdata, 0, sizeof(auxdata));
2407     diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
2408     index 161ba2edb9d0..6363ea956858 100644
2409     --- a/fs/nfs/fscache.h
2410     +++ b/fs/nfs/fscache.h
2411     @@ -186,7 +186,7 @@ static inline void nfs_fscache_wait_on_invalidate(struct inode *inode)
2412     */
2413     static inline const char *nfs_server_fscache_state(struct nfs_server *server)
2414     {
2415     - if (server->fscache && (server->options & NFS_OPTION_FSCACHE))
2416     + if (server->fscache)
2417     return "yes";
2418     return "no ";
2419     }
2420     diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
2421     index 63287d911c08..5b61520dce88 100644
2422     --- a/fs/nfs/nfs4_fs.h
2423     +++ b/fs/nfs/nfs4_fs.h
2424     @@ -469,7 +469,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session,
2425    
2426     extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *, gfp_t);
2427     extern void nfs4_put_state_owner(struct nfs4_state_owner *);
2428     -extern void nfs4_purge_state_owners(struct nfs_server *);
2429     +extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *);
2430     +extern void nfs4_free_state_owners(struct list_head *head);
2431     extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
2432     extern void nfs4_put_open_state(struct nfs4_state *);
2433     extern void nfs4_close_state(struct nfs4_state *, fmode_t);
2434     diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
2435     index 8f53455c4765..86991bcfbeb1 100644
2436     --- a/fs/nfs/nfs4client.c
2437     +++ b/fs/nfs/nfs4client.c
2438     @@ -754,9 +754,12 @@ out:
2439    
2440     static void nfs4_destroy_server(struct nfs_server *server)
2441     {
2442     + LIST_HEAD(freeme);
2443     +
2444     nfs_server_return_all_delegations(server);
2445     unset_pnfs_layoutdriver(server);
2446     - nfs4_purge_state_owners(server);
2447     + nfs4_purge_state_owners(server, &freeme);
2448     + nfs4_free_state_owners(&freeme);
2449     }
2450    
2451     /*
2452     diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
2453     index 3ba2087469ac..c36ef75f2054 100644
2454     --- a/fs/nfs/nfs4state.c
2455     +++ b/fs/nfs/nfs4state.c
2456     @@ -628,24 +628,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
2457     /**
2458     * nfs4_purge_state_owners - Release all cached state owners
2459     * @server: nfs_server with cached state owners to release
2460     + * @head: resulting list of state owners
2461     *
2462     * Called at umount time. Remaining state owners will be on
2463     * the LRU with ref count of zero.
2464     + * Note that the state owners are not freed, but are added
2465     + * to the list @head, which can later be used as an argument
2466     + * to nfs4_free_state_owners.
2467     */
2468     -void nfs4_purge_state_owners(struct nfs_server *server)
2469     +void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head)
2470     {
2471     struct nfs_client *clp = server->nfs_client;
2472     struct nfs4_state_owner *sp, *tmp;
2473     - LIST_HEAD(doomed);
2474    
2475     spin_lock(&clp->cl_lock);
2476     list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
2477     - list_move(&sp->so_lru, &doomed);
2478     + list_move(&sp->so_lru, head);
2479     nfs4_remove_state_owner_locked(sp);
2480     }
2481     spin_unlock(&clp->cl_lock);
2482     +}
2483    
2484     - list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
2485     +/**
2486     + * nfs4_purge_state_owners - Release all cached state owners
2487     + * @head: resulting list of state owners
2488     + *
2489     + * Frees a list of state owners that was generated by
2490     + * nfs4_purge_state_owners
2491     + */
2492     +void nfs4_free_state_owners(struct list_head *head)
2493     +{
2494     + struct nfs4_state_owner *sp, *tmp;
2495     +
2496     + list_for_each_entry_safe(sp, tmp, head, so_lru) {
2497     list_del(&sp->so_lru);
2498     nfs4_free_state_owner(sp);
2499     }
2500     @@ -1843,12 +1858,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
2501     struct nfs4_state_owner *sp;
2502     struct nfs_server *server;
2503     struct rb_node *pos;
2504     + LIST_HEAD(freeme);
2505     int status = 0;
2506    
2507     restart:
2508     rcu_read_lock();
2509     list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
2510     - nfs4_purge_state_owners(server);
2511     + nfs4_purge_state_owners(server, &freeme);
2512     spin_lock(&clp->cl_lock);
2513     for (pos = rb_first(&server->state_owners);
2514     pos != NULL;
2515     @@ -1877,6 +1893,7 @@ restart:
2516     spin_unlock(&clp->cl_lock);
2517     }
2518     rcu_read_unlock();
2519     + nfs4_free_state_owners(&freeme);
2520     return 0;
2521     }
2522    
2523     diff --git a/fs/nfs/super.c b/fs/nfs/super.c
2524     index 6df9b85caf20..d90efdea9fbd 100644
2525     --- a/fs/nfs/super.c
2526     +++ b/fs/nfs/super.c
2527     @@ -2239,6 +2239,7 @@ nfs_compare_remount_data(struct nfs_server *nfss,
2528     data->acdirmin != nfss->acdirmin / HZ ||
2529     data->acdirmax != nfss->acdirmax / HZ ||
2530     data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) ||
2531     + (data->options & NFS_OPTION_FSCACHE) != (nfss->options & NFS_OPTION_FSCACHE) ||
2532     data->nfs_server.port != nfss->port ||
2533     data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen ||
2534     !rpc_cmp_addr((struct sockaddr *)&data->nfs_server.address,
2535     diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
2536     index e1ebdbe40032..9c2955f67f70 100644
2537     --- a/fs/userfaultfd.c
2538     +++ b/fs/userfaultfd.c
2539     @@ -881,6 +881,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
2540     /* len == 0 means wake all */
2541     struct userfaultfd_wake_range range = { .len = 0, };
2542     unsigned long new_flags;
2543     + bool still_valid;
2544    
2545     WRITE_ONCE(ctx->released, true);
2546    
2547     @@ -896,8 +897,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
2548     * taking the mmap_sem for writing.
2549     */
2550     down_write(&mm->mmap_sem);
2551     - if (!mmget_still_valid(mm))
2552     - goto skip_mm;
2553     + still_valid = mmget_still_valid(mm);
2554     prev = NULL;
2555     for (vma = mm->mmap; vma; vma = vma->vm_next) {
2556     cond_resched();
2557     @@ -908,19 +908,20 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
2558     continue;
2559     }
2560     new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
2561     - prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
2562     - new_flags, vma->anon_vma,
2563     - vma->vm_file, vma->vm_pgoff,
2564     - vma_policy(vma),
2565     - NULL_VM_UFFD_CTX);
2566     - if (prev)
2567     - vma = prev;
2568     - else
2569     - prev = vma;
2570     + if (still_valid) {
2571     + prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
2572     + new_flags, vma->anon_vma,
2573     + vma->vm_file, vma->vm_pgoff,
2574     + vma_policy(vma),
2575     + NULL_VM_UFFD_CTX);
2576     + if (prev)
2577     + vma = prev;
2578     + else
2579     + prev = vma;
2580     + }
2581     vma->vm_flags = new_flags;
2582     vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
2583     }
2584     -skip_mm:
2585     up_write(&mm->mmap_sem);
2586     mmput(mm);
2587     wakeup:
2588     diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
2589     index c6299f82a6e4..6410d3e00ce0 100644
2590     --- a/fs/xfs/libxfs/xfs_attr.c
2591     +++ b/fs/xfs/libxfs/xfs_attr.c
2592     @@ -191,6 +191,121 @@ xfs_attr_calc_size(
2593     return nblks;
2594     }
2595    
2596     +STATIC int
2597     +xfs_attr_try_sf_addname(
2598     + struct xfs_inode *dp,
2599     + struct xfs_da_args *args)
2600     +{
2601     +
2602     + struct xfs_mount *mp = dp->i_mount;
2603     + int error, error2;
2604     +
2605     + error = xfs_attr_shortform_addname(args);
2606     + if (error == -ENOSPC)
2607     + return error;
2608     +
2609     + /*
2610     + * Commit the shortform mods, and we're done.
2611     + * NOTE: this is also the error path (EEXIST, etc).
2612     + */
2613     + if (!error && (args->flags & ATTR_KERNOTIME) == 0)
2614     + xfs_trans_ichgtime(args->trans, dp, XFS_ICHGTIME_CHG);
2615     +
2616     + if (mp->m_flags & XFS_MOUNT_WSYNC)
2617     + xfs_trans_set_sync(args->trans);
2618     +
2619     + error2 = xfs_trans_commit(args->trans);
2620     + args->trans = NULL;
2621     + return error ? error : error2;
2622     +}
2623     +
2624     +/*
2625     + * Set the attribute specified in @args.
2626     + */
2627     +int
2628     +xfs_attr_set_args(
2629     + struct xfs_da_args *args)
2630     +{
2631     + struct xfs_inode *dp = args->dp;
2632     + struct xfs_buf *leaf_bp = NULL;
2633     + int error;
2634     +
2635     + /*
2636     + * If the attribute list is non-existent or a shortform list,
2637     + * upgrade it to a single-leaf-block attribute list.
2638     + */
2639     + if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL ||
2640     + (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
2641     + dp->i_d.di_anextents == 0)) {
2642     +
2643     + /*
2644     + * Build initial attribute list (if required).
2645     + */
2646     + if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS)
2647     + xfs_attr_shortform_create(args);
2648     +
2649     + /*
2650     + * Try to add the attr to the attribute list in the inode.
2651     + */
2652     + error = xfs_attr_try_sf_addname(dp, args);
2653     + if (error != -ENOSPC)
2654     + return error;
2655     +
2656     + /*
2657     + * It won't fit in the shortform, transform to a leaf block.
2658     + * GROT: another possible req'mt for a double-split btree op.
2659     + */
2660     + error = xfs_attr_shortform_to_leaf(args, &leaf_bp);
2661     + if (error)
2662     + return error;
2663     +
2664     + /*
2665     + * Prevent the leaf buffer from being unlocked so that a
2666     + * concurrent AIL push cannot grab the half-baked leaf
2667     + * buffer and run into problems with the write verifier.
2668     + * Once we're done rolling the transaction we can release
2669     + * the hold and add the attr to the leaf.
2670     + */
2671     + xfs_trans_bhold(args->trans, leaf_bp);
2672     + error = xfs_defer_finish(&args->trans);
2673     + xfs_trans_bhold_release(args->trans, leaf_bp);
2674     + if (error) {
2675     + xfs_trans_brelse(args->trans, leaf_bp);
2676     + return error;
2677     + }
2678     + }
2679     +
2680     + if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
2681     + error = xfs_attr_leaf_addname(args);
2682     + else
2683     + error = xfs_attr_node_addname(args);
2684     + return error;
2685     +}
2686     +
2687     +/*
2688     + * Remove the attribute specified in @args.
2689     + */
2690     +int
2691     +xfs_attr_remove_args(
2692     + struct xfs_da_args *args)
2693     +{
2694     + struct xfs_inode *dp = args->dp;
2695     + int error;
2696     +
2697     + if (!xfs_inode_hasattr(dp)) {
2698     + error = -ENOATTR;
2699     + } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
2700     + ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
2701     + error = xfs_attr_shortform_remove(args);
2702     + } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
2703     + error = xfs_attr_leaf_removename(args);
2704     + } else {
2705     + error = xfs_attr_node_removename(args);
2706     + }
2707     +
2708     + return error;
2709     +}
2710     +
2711     int
2712     xfs_attr_set(
2713     struct xfs_inode *dp,
2714     @@ -200,11 +315,10 @@ xfs_attr_set(
2715     int flags)
2716     {
2717     struct xfs_mount *mp = dp->i_mount;
2718     - struct xfs_buf *leaf_bp = NULL;
2719     struct xfs_da_args args;
2720     struct xfs_trans_res tres;
2721     int rsvd = (flags & ATTR_ROOT) != 0;
2722     - int error, err2, local;
2723     + int error, local;
2724    
2725     XFS_STATS_INC(mp, xs_attr_set);
2726    
2727     @@ -255,93 +369,17 @@ xfs_attr_set(
2728     error = xfs_trans_reserve_quota_nblks(args.trans, dp, args.total, 0,
2729     rsvd ? XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_FORCE_RES :
2730     XFS_QMOPT_RES_REGBLKS);
2731     - if (error) {
2732     - xfs_iunlock(dp, XFS_ILOCK_EXCL);
2733     - xfs_trans_cancel(args.trans);
2734     - return error;
2735     - }
2736     + if (error)
2737     + goto out_trans_cancel;
2738    
2739     xfs_trans_ijoin(args.trans, dp, 0);
2740     -
2741     - /*
2742     - * If the attribute list is non-existent or a shortform list,
2743     - * upgrade it to a single-leaf-block attribute list.
2744     - */
2745     - if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL ||
2746     - (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS &&
2747     - dp->i_d.di_anextents == 0)) {
2748     -
2749     - /*
2750     - * Build initial attribute list (if required).
2751     - */
2752     - if (dp->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS)
2753     - xfs_attr_shortform_create(&args);
2754     -
2755     - /*
2756     - * Try to add the attr to the attribute list in
2757     - * the inode.
2758     - */
2759     - error = xfs_attr_shortform_addname(&args);
2760     - if (error != -ENOSPC) {
2761     - /*
2762     - * Commit the shortform mods, and we're done.
2763     - * NOTE: this is also the error path (EEXIST, etc).
2764     - */
2765     - ASSERT(args.trans != NULL);
2766     -
2767     - /*
2768     - * If this is a synchronous mount, make sure that
2769     - * the transaction goes to disk before returning
2770     - * to the user.
2771     - */
2772     - if (mp->m_flags & XFS_MOUNT_WSYNC)
2773     - xfs_trans_set_sync(args.trans);
2774     -
2775     - if (!error && (flags & ATTR_KERNOTIME) == 0) {
2776     - xfs_trans_ichgtime(args.trans, dp,
2777     - XFS_ICHGTIME_CHG);
2778     - }
2779     - err2 = xfs_trans_commit(args.trans);
2780     - xfs_iunlock(dp, XFS_ILOCK_EXCL);
2781     -
2782     - return error ? error : err2;
2783     - }
2784     -
2785     - /*
2786     - * It won't fit in the shortform, transform to a leaf block.
2787     - * GROT: another possible req'mt for a double-split btree op.
2788     - */
2789     - error = xfs_attr_shortform_to_leaf(&args, &leaf_bp);
2790     - if (error)
2791     - goto out;
2792     - /*
2793     - * Prevent the leaf buffer from being unlocked so that a
2794     - * concurrent AIL push cannot grab the half-baked leaf
2795     - * buffer and run into problems with the write verifier.
2796     - */
2797     - xfs_trans_bhold(args.trans, leaf_bp);
2798     - error = xfs_defer_finish(&args.trans);
2799     - if (error)
2800     - goto out;
2801     -
2802     - /*
2803     - * Commit the leaf transformation. We'll need another (linked)
2804     - * transaction to add the new attribute to the leaf, which
2805     - * means that we have to hold & join the leaf buffer here too.
2806     - */
2807     - error = xfs_trans_roll_inode(&args.trans, dp);
2808     - if (error)
2809     - goto out;
2810     - xfs_trans_bjoin(args.trans, leaf_bp);
2811     - leaf_bp = NULL;
2812     - }
2813     -
2814     - if (xfs_bmap_one_block(dp, XFS_ATTR_FORK))
2815     - error = xfs_attr_leaf_addname(&args);
2816     - else
2817     - error = xfs_attr_node_addname(&args);
2818     + error = xfs_attr_set_args(&args);
2819     if (error)
2820     - goto out;
2821     + goto out_trans_cancel;
2822     + if (!args.trans) {
2823     + /* shortform attribute has already been committed */
2824     + goto out_unlock;
2825     + }
2826    
2827     /*
2828     * If this is a synchronous mount, make sure that the
2829     @@ -358,17 +396,14 @@ xfs_attr_set(
2830     */
2831     xfs_trans_log_inode(args.trans, dp, XFS_ILOG_CORE);
2832     error = xfs_trans_commit(args.trans);
2833     +out_unlock:
2834     xfs_iunlock(dp, XFS_ILOCK_EXCL);
2835     -
2836     return error;
2837    
2838     -out:
2839     - if (leaf_bp)
2840     - xfs_trans_brelse(args.trans, leaf_bp);
2841     +out_trans_cancel:
2842     if (args.trans)
2843     xfs_trans_cancel(args.trans);
2844     - xfs_iunlock(dp, XFS_ILOCK_EXCL);
2845     - return error;
2846     + goto out_unlock;
2847     }
2848    
2849     /*
2850     @@ -423,17 +458,7 @@ xfs_attr_remove(
2851     */
2852     xfs_trans_ijoin(args.trans, dp, 0);
2853    
2854     - if (!xfs_inode_hasattr(dp)) {
2855     - error = -ENOATTR;
2856     - } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
2857     - ASSERT(dp->i_afp->if_flags & XFS_IFINLINE);
2858     - error = xfs_attr_shortform_remove(&args);
2859     - } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) {
2860     - error = xfs_attr_leaf_removename(&args);
2861     - } else {
2862     - error = xfs_attr_node_removename(&args);
2863     - }
2864     -
2865     + error = xfs_attr_remove_args(&args);
2866     if (error)
2867     goto out;
2868    
2869     diff --git a/fs/xfs/libxfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h
2870     new file mode 100644
2871     index 000000000000..cc04ee0aacfb
2872     --- /dev/null
2873     +++ b/fs/xfs/libxfs/xfs_attr.h
2874     @@ -0,0 +1,150 @@
2875     +// SPDX-License-Identifier: GPL-2.0
2876     +/*
2877     + * Copyright (c) 2000,2002-2003,2005 Silicon Graphics, Inc.
2878     + * All Rights Reserved.
2879     + */
2880     +#ifndef __XFS_ATTR_H__
2881     +#define __XFS_ATTR_H__
2882     +
2883     +struct xfs_inode;
2884     +struct xfs_da_args;
2885     +struct xfs_attr_list_context;
2886     +
2887     +/*
2888     + * Large attribute lists are structured around Btrees where all the data
2889     + * elements are in the leaf nodes. Attribute names are hashed into an int,
2890     + * then that int is used as the index into the Btree. Since the hashval
2891     + * of an attribute name may not be unique, we may have duplicate keys.
2892     + * The internal links in the Btree are logical block offsets into the file.
2893     + *
2894     + * Small attribute lists use a different format and are packed as tightly
2895     + * as possible so as to fit into the literal area of the inode.
2896     + */
2897     +
2898     +/*========================================================================
2899     + * External interfaces
2900     + *========================================================================*/
2901     +
2902     +
2903     +#define ATTR_DONTFOLLOW 0x0001 /* -- unused, from IRIX -- */
2904     +#define ATTR_ROOT 0x0002 /* use attrs in root (trusted) namespace */
2905     +#define ATTR_TRUST 0x0004 /* -- unused, from IRIX -- */
2906     +#define ATTR_SECURE 0x0008 /* use attrs in security namespace */
2907     +#define ATTR_CREATE 0x0010 /* pure create: fail if attr already exists */
2908     +#define ATTR_REPLACE 0x0020 /* pure set: fail if attr does not exist */
2909     +
2910     +#define ATTR_KERNOTIME 0x1000 /* [kernel] don't update inode timestamps */
2911     +#define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */
2912     +
2913     +#define ATTR_INCOMPLETE 0x4000 /* [kernel] return INCOMPLETE attr keys */
2914     +
2915     +#define XFS_ATTR_FLAGS \
2916     + { ATTR_DONTFOLLOW, "DONTFOLLOW" }, \
2917     + { ATTR_ROOT, "ROOT" }, \
2918     + { ATTR_TRUST, "TRUST" }, \
2919     + { ATTR_SECURE, "SECURE" }, \
2920     + { ATTR_CREATE, "CREATE" }, \
2921     + { ATTR_REPLACE, "REPLACE" }, \
2922     + { ATTR_KERNOTIME, "KERNOTIME" }, \
2923     + { ATTR_KERNOVAL, "KERNOVAL" }, \
2924     + { ATTR_INCOMPLETE, "INCOMPLETE" }
2925     +
2926     +/*
2927     + * The maximum size (into the kernel or returned from the kernel) of an
2928     + * attribute value or the buffer used for an attr_list() call. Larger
2929     + * sizes will result in an ERANGE return code.
2930     + */
2931     +#define ATTR_MAX_VALUELEN (64*1024) /* max length of a value */
2932     +
2933     +/*
2934     + * Define how lists of attribute names are returned to the user from
2935     + * the attr_list() call. A large, 32bit aligned, buffer is passed in
2936     + * along with its size. We put an array of offsets at the top that each
2937     + * reference an attrlist_ent_t and pack the attrlist_ent_t's at the bottom.
2938     + */
2939     +typedef struct attrlist {
2940     + __s32 al_count; /* number of entries in attrlist */
2941     + __s32 al_more; /* T/F: more attrs (do call again) */
2942     + __s32 al_offset[1]; /* byte offsets of attrs [var-sized] */
2943     +} attrlist_t;
2944     +
2945     +/*
2946     + * Show the interesting info about one attribute. This is what the
2947     + * al_offset[i] entry points to.
2948     + */
2949     +typedef struct attrlist_ent { /* data from attr_list() */
2950     + __u32 a_valuelen; /* number bytes in value of attr */
2951     + char a_name[1]; /* attr name (NULL terminated) */
2952     +} attrlist_ent_t;
2953     +
2954     +/*
2955     + * Given a pointer to the (char*) buffer containing the attr_list() result,
2956     + * and an index, return a pointer to the indicated attribute in the buffer.
2957     + */
2958     +#define ATTR_ENTRY(buffer, index) \
2959     + ((attrlist_ent_t *) \
2960     + &((char *)buffer)[ ((attrlist_t *)(buffer))->al_offset[index] ])
2961     +
2962     +/*
2963     + * Kernel-internal version of the attrlist cursor.
2964     + */
2965     +typedef struct attrlist_cursor_kern {
2966     + __u32 hashval; /* hash value of next entry to add */
2967     + __u32 blkno; /* block containing entry (suggestion) */
2968     + __u32 offset; /* offset in list of equal-hashvals */
2969     + __u16 pad1; /* padding to match user-level */
2970     + __u8 pad2; /* padding to match user-level */
2971     + __u8 initted; /* T/F: cursor has been initialized */
2972     +} attrlist_cursor_kern_t;
2973     +
2974     +
2975     +/*========================================================================
2976     + * Structure used to pass context around among the routines.
2977     + *========================================================================*/
2978     +
2979     +
2980     +/* void; state communicated via *context */
2981     +typedef void (*put_listent_func_t)(struct xfs_attr_list_context *, int,
2982     + unsigned char *, int, int);
2983     +
2984     +typedef struct xfs_attr_list_context {
2985     + struct xfs_trans *tp;
2986     + struct xfs_inode *dp; /* inode */
2987     + struct attrlist_cursor_kern *cursor; /* position in list */
2988     + char *alist; /* output buffer */
2989     + int seen_enough; /* T/F: seen enough of list? */
2990     + ssize_t count; /* num used entries */
2991     + int dupcnt; /* count dup hashvals seen */
2992     + int bufsize; /* total buffer size */
2993     + int firstu; /* first used byte in buffer */
2994     + int flags; /* from VOP call */
2995     + int resynch; /* T/F: resynch with cursor */
2996     + put_listent_func_t put_listent; /* list output fmt function */
2997     + int index; /* index into output buffer */
2998     +} xfs_attr_list_context_t;
2999     +
3000     +
3001     +/*========================================================================
3002     + * Function prototypes for the kernel.
3003     + *========================================================================*/
3004     +
3005     +/*
3006     + * Overall external interface routines.
3007     + */
3008     +int xfs_attr_inactive(struct xfs_inode *dp);
3009     +int xfs_attr_list_int_ilocked(struct xfs_attr_list_context *);
3010     +int xfs_attr_list_int(struct xfs_attr_list_context *);
3011     +int xfs_inode_hasattr(struct xfs_inode *ip);
3012     +int xfs_attr_get_ilocked(struct xfs_inode *ip, struct xfs_da_args *args);
3013     +int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name,
3014     + unsigned char *value, int *valuelenp, int flags);
3015     +int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
3016     + unsigned char *value, int valuelen, int flags);
3017     +int xfs_attr_set_args(struct xfs_da_args *args);
3018     +int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags);
3019     +int xfs_attr_remove_args(struct xfs_da_args *args);
3020     +int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
3021     + int flags, struct attrlist_cursor_kern *cursor);
3022     +
3023     +
3024     +#endif /* __XFS_ATTR_H__ */
3025     diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
3026     index 3a496ffe6551..06a7da8dbda5 100644
3027     --- a/fs/xfs/libxfs/xfs_bmap.c
3028     +++ b/fs/xfs/libxfs/xfs_bmap.c
3029     @@ -1019,6 +1019,34 @@ xfs_bmap_add_attrfork_local(
3030     return -EFSCORRUPTED;
3031     }
3032    
3033     +/* Set an inode attr fork off based on the format */
3034     +int
3035     +xfs_bmap_set_attrforkoff(
3036     + struct xfs_inode *ip,
3037     + int size,
3038     + int *version)
3039     +{
3040     + switch (ip->i_d.di_format) {
3041     + case XFS_DINODE_FMT_DEV:
3042     + ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
3043     + break;
3044     + case XFS_DINODE_FMT_LOCAL:
3045     + case XFS_DINODE_FMT_EXTENTS:
3046     + case XFS_DINODE_FMT_BTREE:
3047     + ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
3048     + if (!ip->i_d.di_forkoff)
3049     + ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
3050     + else if ((ip->i_mount->m_flags & XFS_MOUNT_ATTR2) && version)
3051     + *version = 2;
3052     + break;
3053     + default:
3054     + ASSERT(0);
3055     + return -EINVAL;
3056     + }
3057     +
3058     + return 0;
3059     +}
3060     +
3061     /*
3062     * Convert inode from non-attributed to attributed.
3063     * Must not be in a transaction, ip must not be locked.
3064     @@ -1070,26 +1098,9 @@ xfs_bmap_add_attrfork(
3065    
3066     xfs_trans_ijoin(tp, ip, 0);
3067     xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
3068     -
3069     - switch (ip->i_d.di_format) {
3070     - case XFS_DINODE_FMT_DEV:
3071     - ip->i_d.di_forkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
3072     - break;
3073     - case XFS_DINODE_FMT_LOCAL:
3074     - case XFS_DINODE_FMT_EXTENTS:
3075     - case XFS_DINODE_FMT_BTREE:
3076     - ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
3077     - if (!ip->i_d.di_forkoff)
3078     - ip->i_d.di_forkoff = xfs_default_attroffset(ip) >> 3;
3079     - else if (mp->m_flags & XFS_MOUNT_ATTR2)
3080     - version = 2;
3081     - break;
3082     - default:
3083     - ASSERT(0);
3084     - error = -EINVAL;
3085     + error = xfs_bmap_set_attrforkoff(ip, size, &version);
3086     + if (error)
3087     goto trans_cancel;
3088     - }
3089     -
3090     ASSERT(ip->i_afp == NULL);
3091     ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
3092     ip->i_afp->if_flags = XFS_IFEXTENTS;
3093     @@ -1178,7 +1189,10 @@ xfs_iread_extents(
3094     * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
3095     */
3096     level = be16_to_cpu(block->bb_level);
3097     - ASSERT(level > 0);
3098     + if (unlikely(level == 0)) {
3099     + XFS_ERROR_REPORT(__func__, XFS_ERRLEVEL_LOW, mp);
3100     + return -EFSCORRUPTED;
3101     + }
3102     pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
3103     bno = be64_to_cpu(*pp);
3104    
3105     diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
3106     index b6e9b639e731..488dc8860fd7 100644
3107     --- a/fs/xfs/libxfs/xfs_bmap.h
3108     +++ b/fs/xfs/libxfs/xfs_bmap.h
3109     @@ -183,6 +183,7 @@ void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno,
3110     xfs_filblks_t len);
3111     void xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *);
3112     int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd);
3113     +int xfs_bmap_set_attrforkoff(struct xfs_inode *ip, int size, int *version);
3114     void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork);
3115     void __xfs_bmap_add_free(struct xfs_trans *tp, xfs_fsblock_t bno,
3116     xfs_filblks_t len, struct xfs_owner_info *oinfo,
3117     diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
3118     index e792b167150a..c52beee31836 100644
3119     --- a/fs/xfs/libxfs/xfs_defer.c
3120     +++ b/fs/xfs/libxfs/xfs_defer.c
3121     @@ -266,13 +266,15 @@ xfs_defer_trans_roll(
3122    
3123     trace_xfs_defer_trans_roll(tp, _RET_IP_);
3124    
3125     - /* Roll the transaction. */
3126     + /*
3127     + * Roll the transaction. Rolling always given a new transaction (even
3128     + * if committing the old one fails!) to hand back to the caller, so we
3129     + * join the held resources to the new transaction so that we always
3130     + * return with the held resources joined to @tpp, no matter what
3131     + * happened.
3132     + */
3133     error = xfs_trans_roll(tpp);
3134     tp = *tpp;
3135     - if (error) {
3136     - trace_xfs_defer_trans_roll_error(tp, error);
3137     - return error;
3138     - }
3139    
3140     /* Rejoin the joined inodes. */
3141     for (i = 0; i < ipcount; i++)
3142     @@ -284,6 +286,8 @@ xfs_defer_trans_roll(
3143     xfs_trans_bhold(tp, bplist[i]);
3144     }
3145    
3146     + if (error)
3147     + trace_xfs_defer_trans_roll_error(tp, error);
3148     return error;
3149     }
3150    
3151     diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
3152     deleted file mode 100644
3153     index 033ff8c478e2..000000000000
3154     --- a/fs/xfs/xfs_attr.h
3155     +++ /dev/null
3156     @@ -1,148 +0,0 @@
3157     -// SPDX-License-Identifier: GPL-2.0
3158     -/*
3159     - * Copyright (c) 2000,2002-2003,2005 Silicon Graphics, Inc.
3160     - * All Rights Reserved.
3161     - */
3162     -#ifndef __XFS_ATTR_H__
3163     -#define __XFS_ATTR_H__
3164     -
3165     -struct xfs_inode;
3166     -struct xfs_da_args;
3167     -struct xfs_attr_list_context;
3168     -
3169     -/*
3170     - * Large attribute lists are structured around Btrees where all the data
3171     - * elements are in the leaf nodes. Attribute names are hashed into an int,
3172     - * then that int is used as the index into the Btree. Since the hashval
3173     - * of an attribute name may not be unique, we may have duplicate keys.
3174     - * The internal links in the Btree are logical block offsets into the file.
3175     - *
3176     - * Small attribute lists use a different format and are packed as tightly
3177     - * as possible so as to fit into the literal area of the inode.
3178     - */
3179     -
3180     -/*========================================================================
3181     - * External interfaces
3182     - *========================================================================*/
3183     -
3184     -
3185     -#define ATTR_DONTFOLLOW 0x0001 /* -- unused, from IRIX -- */
3186     -#define ATTR_ROOT 0x0002 /* use attrs in root (trusted) namespace */
3187     -#define ATTR_TRUST 0x0004 /* -- unused, from IRIX -- */
3188     -#define ATTR_SECURE 0x0008 /* use attrs in security namespace */
3189     -#define ATTR_CREATE 0x0010 /* pure create: fail if attr already exists */
3190     -#define ATTR_REPLACE 0x0020 /* pure set: fail if attr does not exist */
3191     -
3192     -#define ATTR_KERNOTIME 0x1000 /* [kernel] don't update inode timestamps */
3193     -#define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */
3194     -
3195     -#define ATTR_INCOMPLETE 0x4000 /* [kernel] return INCOMPLETE attr keys */
3196     -
3197     -#define XFS_ATTR_FLAGS \
3198     - { ATTR_DONTFOLLOW, "DONTFOLLOW" }, \
3199     - { ATTR_ROOT, "ROOT" }, \
3200     - { ATTR_TRUST, "TRUST" }, \
3201     - { ATTR_SECURE, "SECURE" }, \
3202     - { ATTR_CREATE, "CREATE" }, \
3203     - { ATTR_REPLACE, "REPLACE" }, \
3204     - { ATTR_KERNOTIME, "KERNOTIME" }, \
3205     - { ATTR_KERNOVAL, "KERNOVAL" }, \
3206     - { ATTR_INCOMPLETE, "INCOMPLETE" }
3207     -
3208     -/*
3209     - * The maximum size (into the kernel or returned from the kernel) of an
3210     - * attribute value or the buffer used for an attr_list() call. Larger
3211     - * sizes will result in an ERANGE return code.
3212     - */
3213     -#define ATTR_MAX_VALUELEN (64*1024) /* max length of a value */
3214     -
3215     -/*
3216     - * Define how lists of attribute names are returned to the user from
3217     - * the attr_list() call. A large, 32bit aligned, buffer is passed in
3218     - * along with its size. We put an array of offsets at the top that each
3219     - * reference an attrlist_ent_t and pack the attrlist_ent_t's at the bottom.
3220     - */
3221     -typedef struct attrlist {
3222     - __s32 al_count; /* number of entries in attrlist */
3223     - __s32 al_more; /* T/F: more attrs (do call again) */
3224     - __s32 al_offset[1]; /* byte offsets of attrs [var-sized] */
3225     -} attrlist_t;
3226     -
3227     -/*
3228     - * Show the interesting info about one attribute. This is what the
3229     - * al_offset[i] entry points to.
3230     - */
3231     -typedef struct attrlist_ent { /* data from attr_list() */
3232     - __u32 a_valuelen; /* number bytes in value of attr */
3233     - char a_name[1]; /* attr name (NULL terminated) */
3234     -} attrlist_ent_t;
3235     -
3236     -/*
3237     - * Given a pointer to the (char*) buffer containing the attr_list() result,
3238     - * and an index, return a pointer to the indicated attribute in the buffer.
3239     - */
3240     -#define ATTR_ENTRY(buffer, index) \
3241     - ((attrlist_ent_t *) \
3242     - &((char *)buffer)[ ((attrlist_t *)(buffer))->al_offset[index] ])
3243     -
3244     -/*
3245     - * Kernel-internal version of the attrlist cursor.
3246     - */
3247     -typedef struct attrlist_cursor_kern {
3248     - __u32 hashval; /* hash value of next entry to add */
3249     - __u32 blkno; /* block containing entry (suggestion) */
3250     - __u32 offset; /* offset in list of equal-hashvals */
3251     - __u16 pad1; /* padding to match user-level */
3252     - __u8 pad2; /* padding to match user-level */
3253     - __u8 initted; /* T/F: cursor has been initialized */
3254     -} attrlist_cursor_kern_t;
3255     -
3256     -
3257     -/*========================================================================
3258     - * Structure used to pass context around among the routines.
3259     - *========================================================================*/
3260     -
3261     -
3262     -/* void; state communicated via *context */
3263     -typedef void (*put_listent_func_t)(struct xfs_attr_list_context *, int,
3264     - unsigned char *, int, int);
3265     -
3266     -typedef struct xfs_attr_list_context {
3267     - struct xfs_trans *tp;
3268     - struct xfs_inode *dp; /* inode */
3269     - struct attrlist_cursor_kern *cursor; /* position in list */
3270     - char *alist; /* output buffer */
3271     - int seen_enough; /* T/F: seen enough of list? */
3272     - ssize_t count; /* num used entries */
3273     - int dupcnt; /* count dup hashvals seen */
3274     - int bufsize; /* total buffer size */
3275     - int firstu; /* first used byte in buffer */
3276     - int flags; /* from VOP call */
3277     - int resynch; /* T/F: resynch with cursor */
3278     - put_listent_func_t put_listent; /* list output fmt function */
3279     - int index; /* index into output buffer */
3280     -} xfs_attr_list_context_t;
3281     -
3282     -
3283     -/*========================================================================
3284     - * Function prototypes for the kernel.
3285     - *========================================================================*/
3286     -
3287     -/*
3288     - * Overall external interface routines.
3289     - */
3290     -int xfs_attr_inactive(struct xfs_inode *dp);
3291     -int xfs_attr_list_int_ilocked(struct xfs_attr_list_context *);
3292     -int xfs_attr_list_int(struct xfs_attr_list_context *);
3293     -int xfs_inode_hasattr(struct xfs_inode *ip);
3294     -int xfs_attr_get_ilocked(struct xfs_inode *ip, struct xfs_da_args *args);
3295     -int xfs_attr_get(struct xfs_inode *ip, const unsigned char *name,
3296     - unsigned char *value, int *valuelenp, int flags);
3297     -int xfs_attr_set(struct xfs_inode *dp, const unsigned char *name,
3298     - unsigned char *value, int valuelen, int flags);
3299     -int xfs_attr_remove(struct xfs_inode *dp, const unsigned char *name, int flags);
3300     -int xfs_attr_list(struct xfs_inode *dp, char *buffer, int bufsize,
3301     - int flags, struct attrlist_cursor_kern *cursor);
3302     -
3303     -
3304     -#endif /* __XFS_ATTR_H__ */
3305     diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
3306     index 87e6dd5326d5..a1af984e4913 100644
3307     --- a/fs/xfs/xfs_dquot.c
3308     +++ b/fs/xfs/xfs_dquot.c
3309     @@ -277,7 +277,8 @@ xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
3310    
3311     /*
3312     * Ensure that the given in-core dquot has a buffer on disk backing it, and
3313     - * return the buffer. This is called when the bmapi finds a hole.
3314     + * return the buffer locked and held. This is called when the bmapi finds a
3315     + * hole.
3316     */
3317     STATIC int
3318     xfs_dquot_disk_alloc(
3319     @@ -355,13 +356,14 @@ xfs_dquot_disk_alloc(
3320     * If everything succeeds, the caller of this function is returned a
3321     * buffer that is locked and held to the transaction. The caller
3322     * is responsible for unlocking any buffer passed back, either
3323     - * manually or by committing the transaction.
3324     + * manually or by committing the transaction. On error, the buffer is
3325     + * released and not passed back.
3326     */
3327     xfs_trans_bhold(tp, bp);
3328     error = xfs_defer_finish(tpp);
3329     - tp = *tpp;
3330     if (error) {
3331     - xfs_buf_relse(bp);
3332     + xfs_trans_bhold_release(*tpp, bp);
3333     + xfs_trans_brelse(*tpp, bp);
3334     return error;
3335     }
3336     *bpp = bp;
3337     @@ -521,7 +523,6 @@ xfs_qm_dqread_alloc(
3338     struct xfs_buf **bpp)
3339     {
3340     struct xfs_trans *tp;
3341     - struct xfs_buf *bp;
3342     int error;
3343    
3344     error = xfs_trans_alloc(mp, &M_RES(mp)->tr_qm_dqalloc,
3345     @@ -529,7 +530,7 @@ xfs_qm_dqread_alloc(
3346     if (error)
3347     goto err;
3348    
3349     - error = xfs_dquot_disk_alloc(&tp, dqp, &bp);
3350     + error = xfs_dquot_disk_alloc(&tp, dqp, bpp);
3351     if (error)
3352     goto err_cancel;
3353    
3354     @@ -539,10 +540,10 @@ xfs_qm_dqread_alloc(
3355     * Buffer was held to the transaction, so we have to unlock it
3356     * manually here because we're not passing it back.
3357     */
3358     - xfs_buf_relse(bp);
3359     + xfs_buf_relse(*bpp);
3360     + *bpp = NULL;
3361     goto err;
3362     }
3363     - *bpp = bp;
3364     return 0;
3365    
3366     err_cancel:
3367     diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
3368     index 74047bd0c1ae..e427ad097e2e 100644
3369     --- a/fs/xfs/xfs_iops.c
3370     +++ b/fs/xfs/xfs_iops.c
3371     @@ -803,6 +803,7 @@ xfs_setattr_nonsize(
3372    
3373     out_cancel:
3374     xfs_trans_cancel(tp);
3375     + xfs_iunlock(ip, XFS_ILOCK_EXCL);
3376     out_dqrele:
3377     xfs_qm_dqrele(udqp);
3378     xfs_qm_dqrele(gdqp);
3379     diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
3380     index 147546e0c11b..815dcfa64743 100644
3381     --- a/include/trace/events/rxrpc.h
3382     +++ b/include/trace/events/rxrpc.h
3383     @@ -500,10 +500,10 @@ rxrpc_tx_points;
3384     #define E_(a, b) { a, b }
3385    
3386     TRACE_EVENT(rxrpc_local,
3387     - TP_PROTO(struct rxrpc_local *local, enum rxrpc_local_trace op,
3388     + TP_PROTO(unsigned int local_debug_id, enum rxrpc_local_trace op,
3389     int usage, const void *where),
3390    
3391     - TP_ARGS(local, op, usage, where),
3392     + TP_ARGS(local_debug_id, op, usage, where),
3393    
3394     TP_STRUCT__entry(
3395     __field(unsigned int, local )
3396     @@ -513,7 +513,7 @@ TRACE_EVENT(rxrpc_local,
3397     ),
3398    
3399     TP_fast_assign(
3400     - __entry->local = local->debug_id;
3401     + __entry->local = local_debug_id;
3402     __entry->op = op;
3403     __entry->usage = usage;
3404     __entry->where = where;
3405     diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
3406     index 8e009cee6517..26814a14013c 100644
3407     --- a/kernel/irq/irqdesc.c
3408     +++ b/kernel/irq/irqdesc.c
3409     @@ -294,6 +294,18 @@ static void irq_sysfs_add(int irq, struct irq_desc *desc)
3410     }
3411     }
3412    
3413     +static void irq_sysfs_del(struct irq_desc *desc)
3414     +{
3415     + /*
3416     + * If irq_sysfs_init() has not yet been invoked (early boot), then
3417     + * irq_kobj_base is NULL and the descriptor was never added.
3418     + * kobject_del() complains about a object with no parent, so make
3419     + * it conditional.
3420     + */
3421     + if (irq_kobj_base)
3422     + kobject_del(&desc->kobj);
3423     +}
3424     +
3425     static int __init irq_sysfs_init(void)
3426     {
3427     struct irq_desc *desc;
3428     @@ -324,6 +336,7 @@ static struct kobj_type irq_kobj_type = {
3429     };
3430    
3431     static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
3432     +static void irq_sysfs_del(struct irq_desc *desc) {}
3433    
3434     #endif /* CONFIG_SYSFS */
3435    
3436     @@ -437,7 +450,7 @@ static void free_desc(unsigned int irq)
3437     * The sysfs entry must be serialized against a concurrent
3438     * irq_sysfs_init() as well.
3439     */
3440     - kobject_del(&desc->kobj);
3441     + irq_sysfs_del(desc);
3442     delete_irq_desc(irq);
3443    
3444     /*
3445     diff --git a/mm/huge_memory.c b/mm/huge_memory.c
3446     index 6fad1864ba03..09ce8528bbdd 100644
3447     --- a/mm/huge_memory.c
3448     +++ b/mm/huge_memory.c
3449     @@ -33,6 +33,7 @@
3450     #include <linux/page_idle.h>
3451     #include <linux/shmem_fs.h>
3452     #include <linux/oom.h>
3453     +#include <linux/page_owner.h>
3454    
3455     #include <asm/tlb.h>
3456     #include <asm/pgalloc.h>
3457     @@ -2477,6 +2478,9 @@ static void __split_huge_page(struct page *page, struct list_head *list,
3458     }
3459    
3460     ClearPageCompound(head);
3461     +
3462     + split_page_owner(head, HPAGE_PMD_ORDER);
3463     +
3464     /* See comment in __split_huge_page_tail() */
3465     if (PageAnon(head)) {
3466     /* Additional pin to radix tree of swap cache */
3467     diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
3468     index 9da65552e7ca..c2c4f739da8f 100644
3469     --- a/mm/zsmalloc.c
3470     +++ b/mm/zsmalloc.c
3471     @@ -53,6 +53,7 @@
3472     #include <linux/zpool.h>
3473     #include <linux/mount.h>
3474     #include <linux/migrate.h>
3475     +#include <linux/wait.h>
3476     #include <linux/pagemap.h>
3477     #include <linux/fs.h>
3478    
3479     @@ -267,6 +268,10 @@ struct zs_pool {
3480     #ifdef CONFIG_COMPACTION
3481     struct inode *inode;
3482     struct work_struct free_work;
3483     + /* A wait queue for when migration races with async_free_zspage() */
3484     + struct wait_queue_head migration_wait;
3485     + atomic_long_t isolated_pages;
3486     + bool destroying;
3487     #endif
3488     };
3489    
3490     @@ -1882,6 +1887,31 @@ static void dec_zspage_isolation(struct zspage *zspage)
3491     zspage->isolated--;
3492     }
3493    
3494     +static void putback_zspage_deferred(struct zs_pool *pool,
3495     + struct size_class *class,
3496     + struct zspage *zspage)
3497     +{
3498     + enum fullness_group fg;
3499     +
3500     + fg = putback_zspage(class, zspage);
3501     + if (fg == ZS_EMPTY)
3502     + schedule_work(&pool->free_work);
3503     +
3504     +}
3505     +
3506     +static inline void zs_pool_dec_isolated(struct zs_pool *pool)
3507     +{
3508     + VM_BUG_ON(atomic_long_read(&pool->isolated_pages) <= 0);
3509     + atomic_long_dec(&pool->isolated_pages);
3510     + /*
3511     + * There's no possibility of racing, since wait_for_isolated_drain()
3512     + * checks the isolated count under &class->lock after enqueuing
3513     + * on migration_wait.
3514     + */
3515     + if (atomic_long_read(&pool->isolated_pages) == 0 && pool->destroying)
3516     + wake_up_all(&pool->migration_wait);
3517     +}
3518     +
3519     static void replace_sub_page(struct size_class *class, struct zspage *zspage,
3520     struct page *newpage, struct page *oldpage)
3521     {
3522     @@ -1951,6 +1981,7 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
3523     */
3524     if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
3525     get_zspage_mapping(zspage, &class_idx, &fullness);
3526     + atomic_long_inc(&pool->isolated_pages);
3527     remove_zspage(class, zspage, fullness);
3528     }
3529    
3530     @@ -2050,8 +2081,16 @@ static int zs_page_migrate(struct address_space *mapping, struct page *newpage,
3531     * Page migration is done so let's putback isolated zspage to
3532     * the list if @page is final isolated subpage in the zspage.
3533     */
3534     - if (!is_zspage_isolated(zspage))
3535     - putback_zspage(class, zspage);
3536     + if (!is_zspage_isolated(zspage)) {
3537     + /*
3538     + * We cannot race with zs_destroy_pool() here because we wait
3539     + * for isolation to hit zero before we start destroying.
3540     + * Also, we ensure that everyone can see pool->destroying before
3541     + * we start waiting.
3542     + */
3543     + putback_zspage_deferred(pool, class, zspage);
3544     + zs_pool_dec_isolated(pool);
3545     + }
3546    
3547     reset_page(page);
3548     put_page(page);
3549     @@ -2097,13 +2136,12 @@ static void zs_page_putback(struct page *page)
3550     spin_lock(&class->lock);
3551     dec_zspage_isolation(zspage);
3552     if (!is_zspage_isolated(zspage)) {
3553     - fg = putback_zspage(class, zspage);
3554     /*
3555     * Due to page_lock, we cannot free zspage immediately
3556     * so let's defer.
3557     */
3558     - if (fg == ZS_EMPTY)
3559     - schedule_work(&pool->free_work);
3560     + putback_zspage_deferred(pool, class, zspage);
3561     + zs_pool_dec_isolated(pool);
3562     }
3563     spin_unlock(&class->lock);
3564     }
3565     @@ -2127,8 +2165,36 @@ static int zs_register_migration(struct zs_pool *pool)
3566     return 0;
3567     }
3568    
3569     +static bool pool_isolated_are_drained(struct zs_pool *pool)
3570     +{
3571     + return atomic_long_read(&pool->isolated_pages) == 0;
3572     +}
3573     +
3574     +/* Function for resolving migration */
3575     +static void wait_for_isolated_drain(struct zs_pool *pool)
3576     +{
3577     +
3578     + /*
3579     + * We're in the process of destroying the pool, so there are no
3580     + * active allocations. zs_page_isolate() fails for completely free
3581     + * zspages, so we need only wait for the zs_pool's isolated
3582     + * count to hit zero.
3583     + */
3584     + wait_event(pool->migration_wait,
3585     + pool_isolated_are_drained(pool));
3586     +}
3587     +
3588     static void zs_unregister_migration(struct zs_pool *pool)
3589     {
3590     + pool->destroying = true;
3591     + /*
3592     + * We need a memory barrier here to ensure global visibility of
3593     + * pool->destroying. Thus pool->isolated pages will either be 0 in which
3594     + * case we don't care, or it will be > 0 and pool->destroying will
3595     + * ensure that we wake up once isolation hits 0.
3596     + */
3597     + smp_mb();
3598     + wait_for_isolated_drain(pool); /* This can block */
3599     flush_work(&pool->free_work);
3600     iput(pool->inode);
3601     }
3602     @@ -2366,6 +2432,8 @@ struct zs_pool *zs_create_pool(const char *name)
3603     if (!pool->name)
3604     goto err;
3605    
3606     + init_waitqueue_head(&pool->migration_wait);
3607     +
3608     if (create_cache(pool))
3609     goto err;
3610    
3611     diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
3612     index 995b3842ba7c..62ffc989a44a 100644
3613     --- a/net/bridge/netfilter/ebtables.c
3614     +++ b/net/bridge/netfilter/ebtables.c
3615     @@ -2274,8 +2274,10 @@ static int compat_do_replace(struct net *net, void __user *user,
3616     state.buf_kern_len = size64;
3617    
3618     ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
3619     - if (WARN_ON(ret < 0))
3620     + if (WARN_ON(ret < 0)) {
3621     + vfree(entries_tmp);
3622     goto out_unlock;
3623     + }
3624    
3625     vfree(entries_tmp);
3626     tmp.entries_size = size64;
3627     diff --git a/net/can/gw.c b/net/can/gw.c
3628     index 53859346dc9a..bd2161470e45 100644
3629     --- a/net/can/gw.c
3630     +++ b/net/can/gw.c
3631     @@ -1046,32 +1046,50 @@ static __init int cgw_module_init(void)
3632     pr_info("can: netlink gateway (rev " CAN_GW_VERSION ") max_hops=%d\n",
3633     max_hops);
3634    
3635     - register_pernet_subsys(&cangw_pernet_ops);
3636     + ret = register_pernet_subsys(&cangw_pernet_ops);
3637     + if (ret)
3638     + return ret;
3639     +
3640     + ret = -ENOMEM;
3641     cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job),
3642     0, 0, NULL);
3643     -
3644     if (!cgw_cache)
3645     - return -ENOMEM;
3646     + goto out_cache_create;
3647    
3648     /* set notifier */
3649     notifier.notifier_call = cgw_notifier;
3650     - register_netdevice_notifier(&notifier);
3651     + ret = register_netdevice_notifier(&notifier);
3652     + if (ret)
3653     + goto out_register_notifier;
3654    
3655     ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE,
3656     NULL, cgw_dump_jobs, 0);
3657     - if (ret) {
3658     - unregister_netdevice_notifier(&notifier);
3659     - kmem_cache_destroy(cgw_cache);
3660     - return -ENOBUFS;
3661     - }
3662     -
3663     - /* Only the first call to rtnl_register_module can fail */
3664     - rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
3665     - cgw_create_job, NULL, 0);
3666     - rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
3667     - cgw_remove_job, NULL, 0);
3668     + if (ret)
3669     + goto out_rtnl_register1;
3670     +
3671     + ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
3672     + cgw_create_job, NULL, 0);
3673     + if (ret)
3674     + goto out_rtnl_register2;
3675     + ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
3676     + cgw_remove_job, NULL, 0);
3677     + if (ret)
3678     + goto out_rtnl_register3;
3679    
3680     return 0;
3681     +
3682     +out_rtnl_register3:
3683     + rtnl_unregister(PF_CAN, RTM_NEWROUTE);
3684     +out_rtnl_register2:
3685     + rtnl_unregister(PF_CAN, RTM_GETROUTE);
3686     +out_rtnl_register1:
3687     + unregister_netdevice_notifier(&notifier);
3688     +out_register_notifier:
3689     + kmem_cache_destroy(cgw_cache);
3690     +out_cache_create:
3691     + unregister_pernet_subsys(&cangw_pernet_ops);
3692     +
3693     + return ret;
3694     }
3695    
3696     static __exit void cgw_module_exit(void)
3697     diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
3698     index 60934bd8796c..76c41a84550e 100644
3699     --- a/net/ceph/osd_client.c
3700     +++ b/net/ceph/osd_client.c
3701     @@ -1423,7 +1423,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
3702     struct ceph_osds up, acting;
3703     bool force_resend = false;
3704     bool unpaused = false;
3705     - bool legacy_change;
3706     + bool legacy_change = false;
3707     bool split = false;
3708     bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
3709     bool recovery_deletes = ceph_osdmap_flag(osdc,
3710     @@ -1511,15 +1511,14 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
3711     t->osd = acting.primary;
3712     }
3713    
3714     - if (unpaused || legacy_change || force_resend ||
3715     - (split && con && CEPH_HAVE_FEATURE(con->peer_features,
3716     - RESEND_ON_SPLIT)))
3717     + if (unpaused || legacy_change || force_resend || split)
3718     ct_res = CALC_TARGET_NEED_RESEND;
3719     else
3720     ct_res = CALC_TARGET_NO_ACTION;
3721    
3722     out:
3723     - dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
3724     + dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
3725     + legacy_change, force_resend, split, ct_res, t->osd);
3726     return ct_res;
3727     }
3728    
3729     diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
3730     index 13ade5782847..4f01321e793c 100644
3731     --- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
3732     +++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
3733     @@ -230,7 +230,7 @@ bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb,
3734    
3735     e.id = ip_to_id(map, ip);
3736    
3737     - if (opt->flags & IPSET_DIM_ONE_SRC)
3738     + if (opt->flags & IPSET_DIM_TWO_SRC)
3739     ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
3740     else
3741     ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
3742     diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
3743     index 1577f2f76060..e2538c578671 100644
3744     --- a/net/netfilter/ipset/ip_set_core.c
3745     +++ b/net/netfilter/ipset/ip_set_core.c
3746     @@ -1157,7 +1157,7 @@ static int ip_set_rename(struct net *net, struct sock *ctnl,
3747     return -ENOENT;
3748    
3749     write_lock_bh(&ip_set_ref_lock);
3750     - if (set->ref != 0) {
3751     + if (set->ref != 0 || set->ref_netlink != 0) {
3752     ret = -IPSET_ERR_REFERENCED;
3753     goto out;
3754     }
3755     diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c
3756     index fd87de3ed55b..16ec822e4044 100644
3757     --- a/net/netfilter/ipset/ip_set_hash_ipmac.c
3758     +++ b/net/netfilter/ipset/ip_set_hash_ipmac.c
3759     @@ -95,15 +95,11 @@ hash_ipmac4_kadt(struct ip_set *set, const struct sk_buff *skb,
3760     struct hash_ipmac4_elem e = { .ip = 0, { .foo[0] = 0, .foo[1] = 0 } };
3761     struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
3762    
3763     - /* MAC can be src only */
3764     - if (!(opt->flags & IPSET_DIM_TWO_SRC))
3765     - return 0;
3766     -
3767     if (skb_mac_header(skb) < skb->head ||
3768     (skb_mac_header(skb) + ETH_HLEN) > skb->data)
3769     return -EINVAL;
3770    
3771     - if (opt->flags & IPSET_DIM_ONE_SRC)
3772     + if (opt->flags & IPSET_DIM_TWO_SRC)
3773     ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
3774     else
3775     ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
3776     diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
3777     index d76e5e58905d..7319d3ca30e9 100644
3778     --- a/net/rxrpc/af_rxrpc.c
3779     +++ b/net/rxrpc/af_rxrpc.c
3780     @@ -195,7 +195,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
3781    
3782     service_in_use:
3783     write_unlock(&local->services_lock);
3784     - rxrpc_put_local(local);
3785     + rxrpc_unuse_local(local);
3786     ret = -EADDRINUSE;
3787     error_unlock:
3788     release_sock(&rx->sk);
3789     @@ -908,7 +908,7 @@ static int rxrpc_release_sock(struct sock *sk)
3790     rxrpc_queue_work(&rxnet->service_conn_reaper);
3791     rxrpc_queue_work(&rxnet->client_conn_reaper);
3792    
3793     - rxrpc_put_local(rx->local);
3794     + rxrpc_unuse_local(rx->local);
3795     rx->local = NULL;
3796     key_put(rx->key);
3797     rx->key = NULL;
3798     diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
3799     index 03e0fc8c183f..dfd9eab77cc8 100644
3800     --- a/net/rxrpc/ar-internal.h
3801     +++ b/net/rxrpc/ar-internal.h
3802     @@ -258,7 +258,8 @@ struct rxrpc_security {
3803     */
3804     struct rxrpc_local {
3805     struct rcu_head rcu;
3806     - atomic_t usage;
3807     + atomic_t active_users; /* Number of users of the local endpoint */
3808     + atomic_t usage; /* Number of references to the structure */
3809     struct rxrpc_net *rxnet; /* The network ns in which this resides */
3810     struct list_head link;
3811     struct socket *socket; /* my UDP socket */
3812     @@ -998,6 +999,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *, const struct sockaddr_rxrpc
3813     struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *);
3814     struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *);
3815     void rxrpc_put_local(struct rxrpc_local *);
3816     +struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *);
3817     +void rxrpc_unuse_local(struct rxrpc_local *);
3818     void rxrpc_queue_local(struct rxrpc_local *);
3819     void rxrpc_destroy_all_locals(struct rxrpc_net *);
3820    
3821     @@ -1057,6 +1060,7 @@ void rxrpc_destroy_all_peers(struct rxrpc_net *);
3822     struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *);
3823     struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *);
3824     void rxrpc_put_peer(struct rxrpc_peer *);
3825     +void rxrpc_put_peer_locked(struct rxrpc_peer *);
3826    
3827     /*
3828     * proc.c
3829     diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
3830     index d591f54cb91f..7965600ee5de 100644
3831     --- a/net/rxrpc/input.c
3832     +++ b/net/rxrpc/input.c
3833     @@ -1106,8 +1106,12 @@ static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
3834     {
3835     _enter("%p,%p", local, skb);
3836    
3837     - skb_queue_tail(&local->event_queue, skb);
3838     - rxrpc_queue_local(local);
3839     + if (rxrpc_get_local_maybe(local)) {
3840     + skb_queue_tail(&local->event_queue, skb);
3841     + rxrpc_queue_local(local);
3842     + } else {
3843     + rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
3844     + }
3845     }
3846    
3847     /*
3848     @@ -1117,8 +1121,12 @@ static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
3849     {
3850     CHECK_SLAB_OKAY(&local->usage);
3851    
3852     - skb_queue_tail(&local->reject_queue, skb);
3853     - rxrpc_queue_local(local);
3854     + if (rxrpc_get_local_maybe(local)) {
3855     + skb_queue_tail(&local->reject_queue, skb);
3856     + rxrpc_queue_local(local);
3857     + } else {
3858     + rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
3859     + }
3860     }
3861    
3862     /*
3863     diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c
3864     index 10317dbdab5f..c752ad487067 100644
3865     --- a/net/rxrpc/local_object.c
3866     +++ b/net/rxrpc/local_object.c
3867     @@ -83,6 +83,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
3868     local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
3869     if (local) {
3870     atomic_set(&local->usage, 1);
3871     + atomic_set(&local->active_users, 1);
3872     local->rxnet = rxnet;
3873     INIT_LIST_HEAD(&local->link);
3874     INIT_WORK(&local->processor, rxrpc_local_processor);
3875     @@ -96,7 +97,7 @@ static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
3876     local->debug_id = atomic_inc_return(&rxrpc_debug_id);
3877     memcpy(&local->srx, srx, sizeof(*srx));
3878     local->srx.srx_service = 0;
3879     - trace_rxrpc_local(local, rxrpc_local_new, 1, NULL);
3880     + trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, NULL);
3881     }
3882    
3883     _leave(" = %p", local);
3884     @@ -270,11 +271,8 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
3885     * bind the transport socket may still fail if we're attempting
3886     * to use a local address that the dying object is still using.
3887     */
3888     - if (!rxrpc_get_local_maybe(local)) {
3889     - cursor = cursor->next;
3890     - list_del_init(&local->link);
3891     + if (!rxrpc_use_local(local))
3892     break;
3893     - }
3894    
3895     age = "old";
3896     goto found;
3897     @@ -288,7 +286,10 @@ struct rxrpc_local *rxrpc_lookup_local(struct net *net,
3898     if (ret < 0)
3899     goto sock_error;
3900    
3901     - list_add_tail(&local->link, cursor);
3902     + if (cursor != &rxnet->local_endpoints)
3903     + list_replace_init(cursor, &local->link);
3904     + else
3905     + list_add_tail(&local->link, cursor);
3906     age = "new";
3907    
3908     found:
3909     @@ -324,7 +325,7 @@ struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
3910     int n;
3911    
3912     n = atomic_inc_return(&local->usage);
3913     - trace_rxrpc_local(local, rxrpc_local_got, n, here);
3914     + trace_rxrpc_local(local->debug_id, rxrpc_local_got, n, here);
3915     return local;
3916     }
3917    
3918     @@ -338,7 +339,8 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
3919     if (local) {
3920     int n = atomic_fetch_add_unless(&local->usage, 1, 0);
3921     if (n > 0)
3922     - trace_rxrpc_local(local, rxrpc_local_got, n + 1, here);
3923     + trace_rxrpc_local(local->debug_id, rxrpc_local_got,
3924     + n + 1, here);
3925     else
3926     local = NULL;
3927     }
3928     @@ -346,24 +348,18 @@ struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
3929     }
3930    
3931     /*
3932     - * Queue a local endpoint.
3933     + * Queue a local endpoint and pass the caller's reference to the work item.
3934     */
3935     void rxrpc_queue_local(struct rxrpc_local *local)
3936     {
3937     const void *here = __builtin_return_address(0);
3938     + unsigned int debug_id = local->debug_id;
3939     + int n = atomic_read(&local->usage);
3940    
3941     if (rxrpc_queue_work(&local->processor))
3942     - trace_rxrpc_local(local, rxrpc_local_queued,
3943     - atomic_read(&local->usage), here);
3944     -}
3945     -
3946     -/*
3947     - * A local endpoint reached its end of life.
3948     - */
3949     -static void __rxrpc_put_local(struct rxrpc_local *local)
3950     -{
3951     - _enter("%d", local->debug_id);
3952     - rxrpc_queue_work(&local->processor);
3953     + trace_rxrpc_local(debug_id, rxrpc_local_queued, n, here);
3954     + else
3955     + rxrpc_put_local(local);
3956     }
3957    
3958     /*
3959     @@ -376,10 +372,47 @@ void rxrpc_put_local(struct rxrpc_local *local)
3960    
3961     if (local) {
3962     n = atomic_dec_return(&local->usage);
3963     - trace_rxrpc_local(local, rxrpc_local_put, n, here);
3964     + trace_rxrpc_local(local->debug_id, rxrpc_local_put, n, here);
3965    
3966     if (n == 0)
3967     - __rxrpc_put_local(local);
3968     + call_rcu(&local->rcu, rxrpc_local_rcu);
3969     + }
3970     +}
3971     +
3972     +/*
3973     + * Start using a local endpoint.
3974     + */
3975     +struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local)
3976     +{
3977     + unsigned int au;
3978     +
3979     + local = rxrpc_get_local_maybe(local);
3980     + if (!local)
3981     + return NULL;
3982     +
3983     + au = atomic_fetch_add_unless(&local->active_users, 1, 0);
3984     + if (au == 0) {
3985     + rxrpc_put_local(local);
3986     + return NULL;
3987     + }
3988     +
3989     + return local;
3990     +}
3991     +
3992     +/*
3993     + * Cease using a local endpoint. Once the number of active users reaches 0, we
3994     + * start the closure of the transport in the work processor.
3995     + */
3996     +void rxrpc_unuse_local(struct rxrpc_local *local)
3997     +{
3998     + unsigned int au;
3999     +
4000     + if (local) {
4001     + au = atomic_dec_return(&local->active_users);
4002     + if (au == 0)
4003     + rxrpc_queue_local(local);
4004     + else
4005     + rxrpc_put_local(local);
4006     }
4007     }
4008    
4009     @@ -397,16 +430,6 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
4010    
4011     _enter("%d", local->debug_id);
4012    
4013     - /* We can get a race between an incoming call packet queueing the
4014     - * processor again and the work processor starting the destruction
4015     - * process which will shut down the UDP socket.
4016     - */
4017     - if (local->dead) {
4018     - _leave(" [already dead]");
4019     - return;
4020     - }
4021     - local->dead = true;
4022     -
4023     mutex_lock(&rxnet->local_mutex);
4024     list_del_init(&local->link);
4025     mutex_unlock(&rxnet->local_mutex);
4026     @@ -426,13 +449,11 @@ static void rxrpc_local_destroyer(struct rxrpc_local *local)
4027     */
4028     rxrpc_purge_queue(&local->reject_queue);
4029     rxrpc_purge_queue(&local->event_queue);
4030     -
4031     - _debug("rcu local %d", local->debug_id);
4032     - call_rcu(&local->rcu, rxrpc_local_rcu);
4033     }
4034    
4035     /*
4036     - * Process events on an endpoint
4037     + * Process events on an endpoint. The work item carries a ref which
4038     + * we must release.
4039     */
4040     static void rxrpc_local_processor(struct work_struct *work)
4041     {
4042     @@ -440,13 +461,15 @@ static void rxrpc_local_processor(struct work_struct *work)
4043     container_of(work, struct rxrpc_local, processor);
4044     bool again;
4045    
4046     - trace_rxrpc_local(local, rxrpc_local_processing,
4047     + trace_rxrpc_local(local->debug_id, rxrpc_local_processing,
4048     atomic_read(&local->usage), NULL);
4049    
4050     do {
4051     again = false;
4052     - if (atomic_read(&local->usage) == 0)
4053     - return rxrpc_local_destroyer(local);
4054     + if (atomic_read(&local->active_users) == 0) {
4055     + rxrpc_local_destroyer(local);
4056     + break;
4057     + }
4058    
4059     if (!skb_queue_empty(&local->reject_queue)) {
4060     rxrpc_reject_packets(local);
4061     @@ -458,6 +481,8 @@ static void rxrpc_local_processor(struct work_struct *work)
4062     again = true;
4063     }
4064     } while (again);
4065     +
4066     + rxrpc_put_local(local);
4067     }
4068    
4069     /*
4070     diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
4071     index bd2fa3b7caa7..dc7fdaf20445 100644
4072     --- a/net/rxrpc/peer_event.c
4073     +++ b/net/rxrpc/peer_event.c
4074     @@ -375,7 +375,7 @@ static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
4075     spin_lock_bh(&rxnet->peer_hash_lock);
4076     list_add_tail(&peer->keepalive_link,
4077     &rxnet->peer_keepalive[slot & mask]);
4078     - rxrpc_put_peer(peer);
4079     + rxrpc_put_peer_locked(peer);
4080     }
4081    
4082     spin_unlock_bh(&rxnet->peer_hash_lock);
4083     diff --git a/net/rxrpc/peer_object.c b/net/rxrpc/peer_object.c
4084     index 5691b7d266ca..71547e8673b9 100644
4085     --- a/net/rxrpc/peer_object.c
4086     +++ b/net/rxrpc/peer_object.c
4087     @@ -440,6 +440,24 @@ void rxrpc_put_peer(struct rxrpc_peer *peer)
4088     }
4089     }
4090    
4091     +/*
4092     + * Drop a ref on a peer record where the caller already holds the
4093     + * peer_hash_lock.
4094     + */
4095     +void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
4096     +{
4097     + const void *here = __builtin_return_address(0);
4098     + int n;
4099     +
4100     + n = atomic_dec_return(&peer->usage);
4101     + trace_rxrpc_peer(peer, rxrpc_peer_put, n, here);
4102     + if (n == 0) {
4103     + hash_del_rcu(&peer->hash_link);
4104     + list_del_init(&peer->keepalive_link);
4105     + kfree_rcu(peer, rcu);
4106     + }
4107     +}
4108     +
4109     /*
4110     * Make sure all peer records have been discarded.
4111     */
4112     diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
4113     index be01f9c5d963..5d6ab4f6fd7a 100644
4114     --- a/net/rxrpc/sendmsg.c
4115     +++ b/net/rxrpc/sendmsg.c
4116     @@ -230,6 +230,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
4117     rxrpc_set_call_completion(call,
4118     RXRPC_CALL_LOCAL_ERROR,
4119     0, ret);
4120     + rxrpc_notify_socket(call);
4121     goto out;
4122     }
4123     _debug("need instant resend %d", ret);
4124     diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
4125     index 160b2764b2ad..6a8c279a4b20 100644
4126     --- a/sound/soc/davinci/davinci-mcasp.c
4127     +++ b/sound/soc/davinci/davinci-mcasp.c
4128     @@ -1150,6 +1150,28 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
4129     return ret;
4130     }
4131    
4132     +static int davinci_mcasp_hw_rule_slot_width(struct snd_pcm_hw_params *params,
4133     + struct snd_pcm_hw_rule *rule)
4134     +{
4135     + struct davinci_mcasp_ruledata *rd = rule->private;
4136     + struct snd_mask *fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT);
4137     + struct snd_mask nfmt;
4138     + int i, slot_width;
4139     +
4140     + snd_mask_none(&nfmt);
4141     + slot_width = rd->mcasp->slot_width;
4142     +
4143     + for (i = 0; i <= SNDRV_PCM_FORMAT_LAST; i++) {
4144     + if (snd_mask_test(fmt, i)) {
4145     + if (snd_pcm_format_width(i) <= slot_width) {
4146     + snd_mask_set(&nfmt, i);
4147     + }
4148     + }
4149     + }
4150     +
4151     + return snd_mask_refine(fmt, &nfmt);
4152     +}
4153     +
4154     static const unsigned int davinci_mcasp_dai_rates[] = {
4155     8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000,
4156     88200, 96000, 176400, 192000,
4157     @@ -1257,7 +1279,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
4158     struct davinci_mcasp_ruledata *ruledata =
4159     &mcasp->ruledata[substream->stream];
4160     u32 max_channels = 0;
4161     - int i, dir;
4162     + int i, dir, ret;
4163     int tdm_slots = mcasp->tdm_slots;
4164    
4165     /* Do not allow more then one stream per direction */
4166     @@ -1286,6 +1308,7 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
4167     max_channels++;
4168     }
4169     ruledata->serializers = max_channels;
4170     + ruledata->mcasp = mcasp;
4171     max_channels *= tdm_slots;
4172     /*
4173     * If the already active stream has less channels than the calculated
4174     @@ -1311,20 +1334,22 @@ static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
4175     0, SNDRV_PCM_HW_PARAM_CHANNELS,
4176     &mcasp->chconstr[substream->stream]);
4177    
4178     - if (mcasp->slot_width)
4179     - snd_pcm_hw_constraint_minmax(substream->runtime,
4180     - SNDRV_PCM_HW_PARAM_SAMPLE_BITS,
4181     - 8, mcasp->slot_width);
4182     + if (mcasp->slot_width) {
4183     + /* Only allow formats require <= slot_width bits on the bus */
4184     + ret = snd_pcm_hw_rule_add(substream->runtime, 0,
4185     + SNDRV_PCM_HW_PARAM_FORMAT,
4186     + davinci_mcasp_hw_rule_slot_width,
4187     + ruledata,
4188     + SNDRV_PCM_HW_PARAM_FORMAT, -1);
4189     + if (ret)
4190     + return ret;
4191     + }
4192    
4193     /*
4194     * If we rely on implicit BCLK divider setting we should
4195     * set constraints based on what we can provide.
4196     */
4197     if (mcasp->bclk_master && mcasp->bclk_div == 0 && mcasp->sysclk_freq) {
4198     - int ret;
4199     -
4200     - ruledata->mcasp = mcasp;
4201     -
4202     ret = snd_pcm_hw_rule_add(substream->runtime, 0,
4203     SNDRV_PCM_HW_PARAM_RATE,
4204     davinci_mcasp_hw_rule_rate,
4205     diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
4206     index 60d43d53a8f5..11399f81c92f 100644
4207     --- a/sound/soc/rockchip/rockchip_i2s.c
4208     +++ b/sound/soc/rockchip/rockchip_i2s.c
4209     @@ -329,7 +329,6 @@ static int rockchip_i2s_hw_params(struct snd_pcm_substream *substream,
4210     val |= I2S_CHN_4;
4211     break;
4212     case 2:
4213     - case 1:
4214     val |= I2S_CHN_2;
4215     break;
4216     default:
4217     @@ -462,7 +461,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = {
4218     },
4219     .capture = {
4220     .stream_name = "Capture",
4221     - .channels_min = 1,
4222     + .channels_min = 2,
4223     .channels_max = 2,
4224     .rates = SNDRV_PCM_RATE_8000_192000,
4225     .formats = (SNDRV_PCM_FMTBIT_S8 |
4226     @@ -662,7 +661,7 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
4227     }
4228    
4229     if (!of_property_read_u32(node, "rockchip,capture-channels", &val)) {
4230     - if (val >= 1 && val <= 8)
4231     + if (val >= 2 && val <= 8)
4232     soc_dai->capture.channels_max = val;
4233     }
4234    
4235     diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
4236     index 62aa320c2070..dafc3b7f8d72 100644
4237     --- a/sound/soc/soc-core.c
4238     +++ b/sound/soc/soc-core.c
4239     @@ -1513,8 +1513,11 @@ static int soc_probe_link_dais(struct snd_soc_card *card,
4240     }
4241     }
4242    
4243     - if (dai_link->dai_fmt)
4244     - snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
4245     + if (dai_link->dai_fmt) {
4246     + ret = snd_soc_runtime_set_dai_fmt(rtd, dai_link->dai_fmt);
4247     + if (ret)
4248     + return ret;
4249     + }
4250    
4251     ret = soc_post_component_init(rtd, dai_link->name);
4252     if (ret)
4253     diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
4254     index 3bfc788372f3..4ce57510b623 100644
4255     --- a/sound/soc/soc-dapm.c
4256     +++ b/sound/soc/soc-dapm.c
4257     @@ -1145,8 +1145,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
4258     list_add_tail(&widget->work_list, list);
4259    
4260     if (custom_stop_condition && custom_stop_condition(widget, dir)) {
4261     - widget->endpoints[dir] = 1;
4262     - return widget->endpoints[dir];
4263     + list = NULL;
4264     + custom_stop_condition = NULL;
4265     }
4266    
4267     if ((widget->is_ep & SND_SOC_DAPM_DIR_TO_EP(dir)) && widget->connected) {
4268     @@ -1183,8 +1183,8 @@ static __always_inline int is_connected_ep(struct snd_soc_dapm_widget *widget,
4269     *
4270     * Optionally, can be supplied with a function acting as a stopping condition.
4271     * This function takes the dapm widget currently being examined and the walk
4272     - * direction as an arguments, it should return true if the walk should be
4273     - * stopped and false otherwise.
4274     + * direction as an arguments, it should return true if widgets from that point
4275     + * in the graph onwards should not be added to the widget list.
4276     */
4277     static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
4278     struct list_head *list,
4279     diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
4280     index fa56fde6e8d8..91c0a4434da2 100644
4281     --- a/tools/perf/bench/numa.c
4282     +++ b/tools/perf/bench/numa.c
4283     @@ -378,8 +378,10 @@ static u8 *alloc_data(ssize_t bytes0, int map_flags,
4284    
4285     /* Allocate and initialize all memory on CPU#0: */
4286     if (init_cpu0) {
4287     - orig_mask = bind_to_node(0);
4288     - bind_to_memnode(0);
4289     + int node = numa_node_of_cpu(0);
4290     +
4291     + orig_mask = bind_to_node(node);
4292     + bind_to_memnode(node);
4293     }
4294    
4295     bytes = bytes0 + HPSIZE;
4296     diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
4297     index f42f228e8899..137955197ba8 100644
4298     --- a/tools/perf/builtin-ftrace.c
4299     +++ b/tools/perf/builtin-ftrace.c
4300     @@ -174,7 +174,7 @@ static int set_tracing_cpumask(struct cpu_map *cpumap)
4301     int last_cpu;
4302    
4303     last_cpu = cpu_map__cpu(cpumap, cpumap->nr - 1);
4304     - mask_size = (last_cpu + 3) / 4 + 1;
4305     + mask_size = last_cpu / 4 + 2; /* one more byte for EOS */
4306     mask_size += last_cpu / 32; /* ',' is needed for every 32th cpus */
4307    
4308     cpumask = malloc(mask_size);
4309     diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
4310     index 68c92bb599ee..6b36b7110669 100644
4311     --- a/tools/perf/pmu-events/jevents.c
4312     +++ b/tools/perf/pmu-events/jevents.c
4313     @@ -450,6 +450,7 @@ static struct fixed {
4314     { "inst_retired.any_p", "event=0xc0" },
4315     { "cpu_clk_unhalted.ref", "event=0x0,umask=0x03" },
4316     { "cpu_clk_unhalted.thread", "event=0x3c" },
4317     + { "cpu_clk_unhalted.core", "event=0x3c" },
4318     { "cpu_clk_unhalted.thread_any", "event=0x3c,any=1" },
4319     { NULL, NULL},
4320     };
4321     diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
4322     index 383674f448fc..f93846edc1e0 100644
4323     --- a/tools/perf/util/cpumap.c
4324     +++ b/tools/perf/util/cpumap.c
4325     @@ -701,7 +701,10 @@ size_t cpu_map__snprint_mask(struct cpu_map *map, char *buf, size_t size)
4326     unsigned char *bitmap;
4327     int last_cpu = cpu_map__cpu(map, map->nr - 1);
4328    
4329     - bitmap = zalloc((last_cpu + 7) / 8);
4330     + if (buf == NULL)
4331     + return 0;
4332     +
4333     + bitmap = zalloc(last_cpu / 8 + 1);
4334     if (bitmap == NULL) {
4335     buf[0] = '\0';
4336     return 0;
4337     diff --git a/tools/testing/selftests/bpf/sendmsg6_prog.c b/tools/testing/selftests/bpf/sendmsg6_prog.c
4338     index 5aeaa284fc47..a68062820410 100644
4339     --- a/tools/testing/selftests/bpf/sendmsg6_prog.c
4340     +++ b/tools/testing/selftests/bpf/sendmsg6_prog.c
4341     @@ -41,8 +41,7 @@ int sendmsg_v6_prog(struct bpf_sock_addr *ctx)
4342     }
4343    
4344     /* Rewrite destination. */
4345     - if ((ctx->user_ip6[0] & 0xFFFF) == bpf_htons(0xFACE) &&
4346     - ctx->user_ip6[0] >> 16 == bpf_htons(0xB00C)) {
4347     + if (ctx->user_ip6[0] == bpf_htonl(0xFACEB00C)) {
4348     ctx->user_ip6[0] = bpf_htonl(DST_REWRITE_IP6_0);
4349     ctx->user_ip6[1] = bpf_htonl(DST_REWRITE_IP6_1);
4350     ctx->user_ip6[2] = bpf_htonl(DST_REWRITE_IP6_2);
4351     diff --git a/tools/testing/selftests/kvm/config b/tools/testing/selftests/kvm/config
4352     new file mode 100644
4353     index 000000000000..63ed533f73d6
4354     --- /dev/null
4355     +++ b/tools/testing/selftests/kvm/config
4356     @@ -0,0 +1,3 @@
4357     +CONFIG_KVM=y
4358     +CONFIG_KVM_INTEL=y
4359     +CONFIG_KVM_AMD=y
4360     diff --git a/tools/testing/selftests/net/forwarding/gre_multipath.sh b/tools/testing/selftests/net/forwarding/gre_multipath.sh
4361     index cca2baa03fb8..a8d8e8b3dc81 100755
4362     --- a/tools/testing/selftests/net/forwarding/gre_multipath.sh
4363     +++ b/tools/testing/selftests/net/forwarding/gre_multipath.sh
4364     @@ -93,18 +93,10 @@ sw1_create()
4365     ip route add vrf v$ol1 192.0.2.16/28 \
4366     nexthop dev g1a \
4367     nexthop dev g1b
4368     -
4369     - tc qdisc add dev $ul1 clsact
4370     - tc filter add dev $ul1 egress pref 111 prot ipv4 \
4371     - flower dst_ip 192.0.2.66 action pass
4372     - tc filter add dev $ul1 egress pref 222 prot ipv4 \
4373     - flower dst_ip 192.0.2.82 action pass
4374     }
4375    
4376     sw1_destroy()
4377     {
4378     - tc qdisc del dev $ul1 clsact
4379     -
4380     ip route del vrf v$ol1 192.0.2.16/28
4381    
4382     ip route del vrf v$ol1 192.0.2.82/32 via 192.0.2.146
4383     @@ -139,10 +131,18 @@ sw2_create()
4384     ip route add vrf v$ol2 192.0.2.0/28 \
4385     nexthop dev g2a \
4386     nexthop dev g2b
4387     +
4388     + tc qdisc add dev $ul2 clsact
4389     + tc filter add dev $ul2 ingress pref 111 prot 802.1Q \
4390     + flower vlan_id 111 action pass
4391     + tc filter add dev $ul2 ingress pref 222 prot 802.1Q \
4392     + flower vlan_id 222 action pass
4393     }
4394    
4395     sw2_destroy()
4396     {
4397     + tc qdisc del dev $ul2 clsact
4398     +
4399     ip route del vrf v$ol2 192.0.2.0/28
4400    
4401     ip route del vrf v$ol2 192.0.2.81/32 via 192.0.2.145
4402     @@ -187,12 +187,16 @@ setup_prepare()
4403     sw1_create
4404     sw2_create
4405     h2_create
4406     +
4407     + forwarding_enable
4408     }
4409    
4410     cleanup()
4411     {
4412     pre_cleanup
4413    
4414     + forwarding_restore
4415     +
4416     h2_destroy
4417     sw2_destroy
4418     sw1_destroy
4419     @@ -211,15 +215,15 @@ multipath4_test()
4420     nexthop dev g1a weight $weight1 \
4421     nexthop dev g1b weight $weight2
4422    
4423     - local t0_111=$(tc_rule_stats_get $ul1 111 egress)
4424     - local t0_222=$(tc_rule_stats_get $ul1 222 egress)
4425     + local t0_111=$(tc_rule_stats_get $ul2 111 ingress)
4426     + local t0_222=$(tc_rule_stats_get $ul2 222 ingress)
4427    
4428     ip vrf exec v$h1 \
4429     $MZ $h1 -q -p 64 -A 192.0.2.1 -B 192.0.2.18 \
4430     -d 1msec -t udp "sp=1024,dp=0-32768"
4431    
4432     - local t1_111=$(tc_rule_stats_get $ul1 111 egress)
4433     - local t1_222=$(tc_rule_stats_get $ul1 222 egress)
4434     + local t1_111=$(tc_rule_stats_get $ul2 111 ingress)
4435     + local t1_222=$(tc_rule_stats_get $ul2 222 ingress)
4436    
4437     local d111=$((t1_111 - t0_111))
4438     local d222=$((t1_222 - t0_222))