Magellan Linux

Annotation of /trunk/kernel-alx/patches-5.4/0123-5.4.24-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3504 - (hide annotations) (download)
Mon May 11 14:36:25 2020 UTC (4 years ago) by niro
File size: 291670 byte(s)
-linux-5.4.24
1 niro 3504 diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst
2     index b89c88168d6a..b9b50553bfc5 100644
3     --- a/Documentation/kbuild/makefiles.rst
4     +++ b/Documentation/kbuild/makefiles.rst
5     @@ -1115,23 +1115,6 @@ When kbuild executes, the following steps are followed (roughly):
6     In this example, extra-y is used to list object files that
7     shall be built, but shall not be linked as part of built-in.a.
8    
9     - header-test-y
10     -
11     - header-test-y specifies headers (`*.h`) in the current directory that
12     - should be compile tested to ensure they are self-contained,
13     - i.e. compilable as standalone units. If CONFIG_HEADER_TEST is enabled,
14     - this builds them as part of extra-y.
15     -
16     - header-test-pattern-y
17     -
18     - This works as a weaker version of header-test-y, and accepts wildcard
19     - patterns. The typical usage is::
20     -
21     - header-test-pattern-y += *.h
22     -
23     - This specifies all the files that matches to `*.h` in the current
24     - directory, but the files in 'header-test-' are excluded.
25     -
26     6.7 Commands useful for building a boot image
27     ---------------------------------------------
28    
29     diff --git a/Documentation/networking/nf_flowtable.txt b/Documentation/networking/nf_flowtable.txt
30     index ca2136c76042..0bf32d1121be 100644
31     --- a/Documentation/networking/nf_flowtable.txt
32     +++ b/Documentation/networking/nf_flowtable.txt
33     @@ -76,7 +76,7 @@ flowtable and add one rule to your forward chain.
34    
35     table inet x {
36     flowtable f {
37     - hook ingress priority 0 devices = { eth0, eth1 };
38     + hook ingress priority 0; devices = { eth0, eth1 };
39     }
40     chain y {
41     type filter hook forward priority 0; policy accept;
42     diff --git a/Makefile b/Makefile
43     index af5e90075514..c32c78cf2fe5 100644
44     --- a/Makefile
45     +++ b/Makefile
46     @@ -1,7 +1,7 @@
47     # SPDX-License-Identifier: GPL-2.0
48     VERSION = 5
49     PATCHLEVEL = 4
50     -SUBLEVEL = 23
51     +SUBLEVEL = 24
52     EXTRAVERSION =
53     NAME = Kleptomaniac Octopus
54    
55     @@ -618,7 +618,6 @@ ifeq ($(KBUILD_EXTMOD),)
56     init-y := init/
57     drivers-y := drivers/ sound/
58     drivers-$(CONFIG_SAMPLES) += samples/
59     -drivers-$(CONFIG_KERNEL_HEADER_TEST) += include/
60     net-y := net/
61     libs-y := lib/
62     core-y := usr/
63     @@ -1196,19 +1195,15 @@ headers: $(version_h) scripts_unifdef uapi-asm-generic archheaders archscripts
64     $(Q)$(MAKE) $(hdr-inst)=include/uapi
65     $(Q)$(MAKE) $(hdr-inst)=arch/$(SRCARCH)/include/uapi
66    
67     +# Deprecated. It is no-op now.
68     PHONY += headers_check
69     -headers_check: headers
70     - $(Q)$(MAKE) $(hdr-inst)=include/uapi HDRCHECK=1
71     - $(Q)$(MAKE) $(hdr-inst)=arch/$(SRCARCH)/include/uapi HDRCHECK=1
72     +headers_check:
73     + @:
74    
75     ifdef CONFIG_HEADERS_INSTALL
76     prepare: headers
77     endif
78    
79     -ifdef CONFIG_HEADERS_CHECK
80     -all: headers_check
81     -endif
82     -
83     PHONY += scripts_unifdef
84     scripts_unifdef: scripts_basic
85     $(Q)$(MAKE) $(build)=scripts scripts/unifdef
86     @@ -1476,7 +1471,6 @@ help:
87     @echo ' versioncheck - Sanity check on version.h usage'
88     @echo ' includecheck - Check for duplicate included header files'
89     @echo ' export_report - List the usages of all exported symbols'
90     - @echo ' headers_check - Sanity check on exported headers'
91     @echo ' headerdep - Detect inclusion cycles in headers'
92     @echo ' coccicheck - Check with Coccinelle'
93     @echo ''
94     @@ -1641,6 +1635,50 @@ help:
95     PHONY += prepare
96     endif # KBUILD_EXTMOD
97    
98     +# Single targets
99     +# ---------------------------------------------------------------------------
100     +# To build individual files in subdirectories, you can do like this:
101     +#
102     +# make foo/bar/baz.s
103     +#
104     +# The supported suffixes for single-target are listed in 'single-targets'
105     +#
106     +# To build only under specific subdirectories, you can do like this:
107     +#
108     +# make foo/bar/baz/
109     +
110     +ifdef single-build
111     +
112     +# .ko is special because modpost is needed
113     +single-ko := $(sort $(filter %.ko, $(MAKECMDGOALS)))
114     +single-no-ko := $(sort $(patsubst %.ko,%.mod, $(MAKECMDGOALS)))
115     +
116     +$(single-ko): single_modpost
117     + @:
118     +$(single-no-ko): descend
119     + @:
120     +
121     +ifeq ($(KBUILD_EXTMOD),)
122     +# For the single build of in-tree modules, use a temporary file to avoid
123     +# the situation of modules_install installing an invalid modules.order.
124     +MODORDER := .modules.tmp
125     +endif
126     +
127     +PHONY += single_modpost
128     +single_modpost: $(single-no-ko)
129     + $(Q){ $(foreach m, $(single-ko), echo $(extmod-prefix)$m;) } > $(MODORDER)
130     + $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
131     +
132     +KBUILD_MODULES := 1
133     +
134     +export KBUILD_SINGLE_TARGETS := $(addprefix $(extmod-prefix), $(single-no-ko))
135     +
136     +# trim unrelated directories
137     +build-dirs := $(foreach d, $(build-dirs), \
138     + $(if $(filter $(d)/%, $(KBUILD_SINGLE_TARGETS)), $(d)))
139     +
140     +endif
141     +
142     # Handle descending into subdirectories listed in $(build-dirs)
143     # Preset locale variables to speed up the build process. Limit locale
144     # tweaks to this spot to avoid wrong language settings when running
145     @@ -1649,7 +1687,9 @@ endif # KBUILD_EXTMOD
146     PHONY += descend $(build-dirs)
147     descend: $(build-dirs)
148     $(build-dirs): prepare
149     - $(Q)$(MAKE) $(build)=$@ single-build=$(single-build) need-builtin=1 need-modorder=1
150     + $(Q)$(MAKE) $(build)=$@ \
151     + single-build=$(if $(filter-out $@/, $(single-no-ko)),1) \
152     + need-builtin=1 need-modorder=1
153    
154     clean-dirs := $(addprefix _clean_, $(clean-dirs))
155     PHONY += $(clean-dirs) clean
156     @@ -1753,50 +1793,6 @@ tools/%: FORCE
157     $(Q)mkdir -p $(objtree)/tools
158     $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(tools_silent) $(filter --j% -j,$(MAKEFLAGS))" O=$(abspath $(objtree)) subdir=tools -C $(srctree)/tools/ $*
159    
160     -# Single targets
161     -# ---------------------------------------------------------------------------
162     -# To build individual files in subdirectories, you can do like this:
163     -#
164     -# make foo/bar/baz.s
165     -#
166     -# The supported suffixes for single-target are listed in 'single-targets'
167     -#
168     -# To build only under specific subdirectories, you can do like this:
169     -#
170     -# make foo/bar/baz/
171     -
172     -ifdef single-build
173     -
174     -single-all := $(filter $(single-targets), $(MAKECMDGOALS))
175     -
176     -# .ko is special because modpost is needed
177     -single-ko := $(sort $(filter %.ko, $(single-all)))
178     -single-no-ko := $(sort $(patsubst %.ko,%.mod, $(single-all)))
179     -
180     -$(single-ko): single_modpost
181     - @:
182     -$(single-no-ko): descend
183     - @:
184     -
185     -ifeq ($(KBUILD_EXTMOD),)
186     -# For the single build of in-tree modules, use a temporary file to avoid
187     -# the situation of modules_install installing an invalid modules.order.
188     -MODORDER := .modules.tmp
189     -endif
190     -
191     -PHONY += single_modpost
192     -single_modpost: $(single-no-ko)
193     - $(Q){ $(foreach m, $(single-ko), echo $(extmod-prefix)$m;) } > $(MODORDER)
194     - $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
195     -
196     -KBUILD_MODULES := 1
197     -
198     -export KBUILD_SINGLE_TARGETS := $(addprefix $(extmod-prefix), $(single-no-ko))
199     -
200     -single-build = $(if $(filter-out $@/, $(single-no-ko)),1)
201     -
202     -endif
203     -
204     # FIXME Should go into a make.lib or something
205     # ===========================================================================
206    
207     diff --git a/arch/arm/boot/dts/stihxxx-b2120.dtsi b/arch/arm/boot/dts/stihxxx-b2120.dtsi
208     index 60e11045ad76..d051f080e52e 100644
209     --- a/arch/arm/boot/dts/stihxxx-b2120.dtsi
210     +++ b/arch/arm/boot/dts/stihxxx-b2120.dtsi
211     @@ -46,7 +46,7 @@
212     /* DAC */
213     format = "i2s";
214     mclk-fs = <256>;
215     - frame-inversion = <1>;
216     + frame-inversion;
217     cpu {
218     sound-dai = <&sti_uni_player2>;
219     };
220     diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
221     index 6176b9acba95..d0d832ab3d3b 100644
222     --- a/arch/mips/kernel/vpe.c
223     +++ b/arch/mips/kernel/vpe.c
224     @@ -134,7 +134,7 @@ void release_vpe(struct vpe *v)
225     {
226     list_del(&v->list);
227     if (v->load_addr)
228     - release_progmem(v);
229     + release_progmem(v->load_addr);
230     kfree(v);
231     }
232    
233     diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
234     index fcef678c3423..c531e3f3269e 100644
235     --- a/arch/x86/events/intel/core.c
236     +++ b/arch/x86/events/intel/core.c
237     @@ -4746,6 +4746,7 @@ __init int intel_pmu_init(void)
238     break;
239    
240     case INTEL_FAM6_ATOM_TREMONT_D:
241     + case INTEL_FAM6_ATOM_TREMONT:
242     x86_pmu.late_ack = true;
243     memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
244     sizeof(hw_cache_event_ids));
245     diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
246     index e1daf4151e11..4814c964692c 100644
247     --- a/arch/x86/events/intel/cstate.c
248     +++ b/arch/x86/events/intel/cstate.c
249     @@ -40,17 +40,18 @@
250     * Model specific counters:
251     * MSR_CORE_C1_RES: CORE C1 Residency Counter
252     * perf code: 0x00
253     - * Available model: SLM,AMT,GLM,CNL
254     + * Available model: SLM,AMT,GLM,CNL,TNT
255     * Scope: Core (each processor core has a MSR)
256     * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
257     * perf code: 0x01
258     * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM,
259     - * CNL,KBL,CML
260     + * CNL,KBL,CML,TNT
261     * Scope: Core
262     * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
263     * perf code: 0x02
264     * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
265     - * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL
266     + * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL,
267     + * TNT
268     * Scope: Core
269     * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
270     * perf code: 0x03
271     @@ -60,17 +61,18 @@
272     * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
273     * perf code: 0x00
274     * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
275     - * KBL,CML,ICL,TGL
276     + * KBL,CML,ICL,TGL,TNT
277     * Scope: Package (physical package)
278     * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
279     * perf code: 0x01
280     * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
281     - * GLM,CNL,KBL,CML,ICL,TGL
282     + * GLM,CNL,KBL,CML,ICL,TGL,TNT
283     * Scope: Package (physical package)
284     * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
285     * perf code: 0x02
286     - * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
287     - * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL
288     + * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
289     + * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL,
290     + * TNT
291     * Scope: Package (physical package)
292     * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
293     * perf code: 0x03
294     @@ -87,7 +89,8 @@
295     * Scope: Package (physical package)
296     * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
297     * perf code: 0x06
298     - * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL
299     + * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
300     + * TNT
301     * Scope: Package (physical package)
302     *
303     */
304     @@ -640,8 +643,9 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
305    
306     X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates),
307     X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_D, glm_cstates),
308     -
309     X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
310     + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_TREMONT_D, glm_cstates),
311     + X86_CSTATES_MODEL(INTEL_FAM6_ATOM_TREMONT, glm_cstates),
312    
313     X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_L, icl_cstates),
314     X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE, icl_cstates),
315     diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
316     index 6f86650b3f77..a949f6f55991 100644
317     --- a/arch/x86/events/msr.c
318     +++ b/arch/x86/events/msr.c
319     @@ -75,8 +75,9 @@ static bool test_intel(int idx, void *data)
320    
321     case INTEL_FAM6_ATOM_GOLDMONT:
322     case INTEL_FAM6_ATOM_GOLDMONT_D:
323     -
324     case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
325     + case INTEL_FAM6_ATOM_TREMONT_D:
326     + case INTEL_FAM6_ATOM_TREMONT:
327    
328     case INTEL_FAM6_XEON_PHI_KNL:
329     case INTEL_FAM6_XEON_PHI_KNM:
330     diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h
331     index e49b77283924..181c992f448c 100644
332     --- a/arch/x86/kernel/cpu/resctrl/internal.h
333     +++ b/arch/x86/kernel/cpu/resctrl/internal.h
334     @@ -57,6 +57,7 @@ static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc)
335     }
336    
337     DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
338     +DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key);
339    
340     /**
341     * struct mon_evt - Entry in the event list of a resource
342     diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
343     index 397206f23d14..773124b0e18a 100644
344     --- a/arch/x86/kernel/cpu/resctrl/monitor.c
345     +++ b/arch/x86/kernel/cpu/resctrl/monitor.c
346     @@ -514,7 +514,7 @@ void mbm_handle_overflow(struct work_struct *work)
347    
348     mutex_lock(&rdtgroup_mutex);
349    
350     - if (!static_branch_likely(&rdt_enable_key))
351     + if (!static_branch_likely(&rdt_mon_enable_key))
352     goto out_unlock;
353    
354     d = get_domain_from_cpu(cpu, &rdt_resources_all[RDT_RESOURCE_L3]);
355     @@ -543,7 +543,7 @@ void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms)
356     unsigned long delay = msecs_to_jiffies(delay_ms);
357     int cpu;
358    
359     - if (!static_branch_likely(&rdt_enable_key))
360     + if (!static_branch_likely(&rdt_mon_enable_key))
361     return;
362     cpu = cpumask_any(&dom->cpu_mask);
363     dom->mbm_work_cpu = cpu;
364     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
365     index 207030db3481..07459120a222 100644
366     --- a/arch/x86/kvm/svm.c
367     +++ b/arch/x86/kvm/svm.c
368     @@ -1298,6 +1298,47 @@ static void shrink_ple_window(struct kvm_vcpu *vcpu)
369     }
370     }
371    
372     +/*
373     + * The default MMIO mask is a single bit (excluding the present bit),
374     + * which could conflict with the memory encryption bit. Check for
375     + * memory encryption support and override the default MMIO mask if
376     + * memory encryption is enabled.
377     + */
378     +static __init void svm_adjust_mmio_mask(void)
379     +{
380     + unsigned int enc_bit, mask_bit;
381     + u64 msr, mask;
382     +
383     + /* If there is no memory encryption support, use existing mask */
384     + if (cpuid_eax(0x80000000) < 0x8000001f)
385     + return;
386     +
387     + /* If memory encryption is not enabled, use existing mask */
388     + rdmsrl(MSR_K8_SYSCFG, msr);
389     + if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
390     + return;
391     +
392     + enc_bit = cpuid_ebx(0x8000001f) & 0x3f;
393     + mask_bit = boot_cpu_data.x86_phys_bits;
394     +
395     + /* Increment the mask bit if it is the same as the encryption bit */
396     + if (enc_bit == mask_bit)
397     + mask_bit++;
398     +
399     + /*
400     + * If the mask bit location is below 52, then some bits above the
401     + * physical addressing limit will always be reserved, so use the
402     + * rsvd_bits() function to generate the mask. This mask, along with
403     + * the present bit, will be used to generate a page fault with
404     + * PFER.RSV = 1.
405     + *
406     + * If the mask bit location is 52 (or above), then clear the mask.
407     + */
408     + mask = (mask_bit < 52) ? rsvd_bits(mask_bit, 51) | PT_PRESENT_MASK : 0;
409     +
410     + kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
411     +}
412     +
413     static __init int svm_hardware_setup(void)
414     {
415     int cpu;
416     @@ -1352,6 +1393,8 @@ static __init int svm_hardware_setup(void)
417     }
418     }
419    
420     + svm_adjust_mmio_mask();
421     +
422     for_each_possible_cpu(cpu) {
423     r = svm_cpu_init(cpu);
424     if (r)
425     diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
426     index 802ef7177d53..e1d8062ef12e 100644
427     --- a/arch/x86/kvm/vmx/nested.c
428     +++ b/arch/x86/kvm/vmx/nested.c
429     @@ -4609,32 +4609,28 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
430     {
431     unsigned long field;
432     u64 field_value;
433     + struct vcpu_vmx *vmx = to_vmx(vcpu);
434     unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
435     u32 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
436     int len;
437     gva_t gva = 0;
438     - struct vmcs12 *vmcs12;
439     + struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
440     + : get_vmcs12(vcpu);
441     struct x86_exception e;
442     short offset;
443    
444     if (!nested_vmx_check_permission(vcpu))
445     return 1;
446    
447     - if (to_vmx(vcpu)->nested.current_vmptr == -1ull)
448     + /*
449     + * In VMX non-root operation, when the VMCS-link pointer is -1ull,
450     + * any VMREAD sets the ALU flags for VMfailInvalid.
451     + */
452     + if (vmx->nested.current_vmptr == -1ull ||
453     + (is_guest_mode(vcpu) &&
454     + get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
455     return nested_vmx_failInvalid(vcpu);
456    
457     - if (!is_guest_mode(vcpu))
458     - vmcs12 = get_vmcs12(vcpu);
459     - else {
460     - /*
461     - * When vmcs->vmcs_link_pointer is -1ull, any VMREAD
462     - * to shadowed-field sets the ALU flags for VMfailInvalid.
463     - */
464     - if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
465     - return nested_vmx_failInvalid(vcpu);
466     - vmcs12 = get_shadow_vmcs12(vcpu);
467     - }
468     -
469     /* Decode instruction info and find the field to read */
470     field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
471    
472     @@ -4713,13 +4709,20 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
473     */
474     u64 field_value = 0;
475     struct x86_exception e;
476     - struct vmcs12 *vmcs12;
477     + struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
478     + : get_vmcs12(vcpu);
479     short offset;
480    
481     if (!nested_vmx_check_permission(vcpu))
482     return 1;
483    
484     - if (vmx->nested.current_vmptr == -1ull)
485     + /*
486     + * In VMX non-root operation, when the VMCS-link pointer is -1ull,
487     + * any VMWRITE sets the ALU flags for VMfailInvalid.
488     + */
489     + if (vmx->nested.current_vmptr == -1ull ||
490     + (is_guest_mode(vcpu) &&
491     + get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
492     return nested_vmx_failInvalid(vcpu);
493    
494     if (vmx_instruction_info & (1u << 10))
495     @@ -4738,6 +4741,12 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
496    
497    
498     field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
499     +
500     + offset = vmcs_field_to_offset(field);
501     + if (offset < 0)
502     + return nested_vmx_failValid(vcpu,
503     + VMXERR_UNSUPPORTED_VMCS_COMPONENT);
504     +
505     /*
506     * If the vCPU supports "VMWRITE to any supported field in the
507     * VMCS," then the "read-only" fields are actually read/write.
508     @@ -4747,29 +4756,12 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
509     return nested_vmx_failValid(vcpu,
510     VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
511    
512     - if (!is_guest_mode(vcpu)) {
513     - vmcs12 = get_vmcs12(vcpu);
514     -
515     - /*
516     - * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
517     - * vmcs12, else we may crush a field or consume a stale value.
518     - */
519     - if (!is_shadow_field_rw(field))
520     - copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
521     - } else {
522     - /*
523     - * When vmcs->vmcs_link_pointer is -1ull, any VMWRITE
524     - * to shadowed-field sets the ALU flags for VMfailInvalid.
525     - */
526     - if (get_vmcs12(vcpu)->vmcs_link_pointer == -1ull)
527     - return nested_vmx_failInvalid(vcpu);
528     - vmcs12 = get_shadow_vmcs12(vcpu);
529     - }
530     -
531     - offset = vmcs_field_to_offset(field);
532     - if (offset < 0)
533     - return nested_vmx_failValid(vcpu,
534     - VMXERR_UNSUPPORTED_VMCS_COMPONENT);
535     + /*
536     + * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
537     + * vmcs12, else we may crush a field or consume a stale value.
538     + */
539     + if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field))
540     + copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
541    
542     /*
543     * Some Intel CPUs intentionally drop the reserved bits of the AR byte
544     diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
545     index 8ebcd9de87a2..8129b6b27c93 100644
546     --- a/arch/x86/kvm/vmx/vmx.c
547     +++ b/arch/x86/kvm/vmx/vmx.c
548     @@ -7165,6 +7165,7 @@ static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
549     else
550     intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
551    
552     + /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
553     return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
554     }
555    
556     @@ -7194,6 +7195,20 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
557     case x86_intercept_outs:
558     return vmx_check_intercept_io(vcpu, info);
559    
560     + case x86_intercept_lgdt:
561     + case x86_intercept_lidt:
562     + case x86_intercept_lldt:
563     + case x86_intercept_ltr:
564     + case x86_intercept_sgdt:
565     + case x86_intercept_sidt:
566     + case x86_intercept_sldt:
567     + case x86_intercept_str:
568     + if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC))
569     + return X86EMUL_CONTINUE;
570     +
571     + /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
572     + break;
573     +
574     /* TODO: check more intercepts... */
575     default:
576     break;
577     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
578     index 95180d67d570..c5e15eba8052 100644
579     --- a/arch/x86/kvm/x86.c
580     +++ b/arch/x86/kvm/x86.c
581     @@ -9192,12 +9192,6 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
582    
583     void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
584     {
585     - vcpu->arch.apf.msr_val = 0;
586     -
587     - vcpu_load(vcpu);
588     - kvm_mmu_unload(vcpu);
589     - vcpu_put(vcpu);
590     -
591     kvm_arch_vcpu_free(vcpu);
592     }
593    
594     diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
595     index b5516b04ffc0..d827a4a3e946 100644
596     --- a/drivers/acpi/acpi_watchdog.c
597     +++ b/drivers/acpi/acpi_watchdog.c
598     @@ -126,12 +126,11 @@ void __init acpi_watchdog_init(void)
599     gas = &entries[i].register_region;
600    
601     res.start = gas->address;
602     + res.end = res.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1;
603     if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
604     res.flags = IORESOURCE_MEM;
605     - res.end = res.start + ALIGN(gas->access_width, 4) - 1;
606     } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
607     res.flags = IORESOURCE_IO;
608     - res.end = res.start + gas->access_width - 1;
609     } else {
610     pr_warn("Unsupported address space: %u\n",
611     gas->space_id);
612     diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
613     index 6b331061d34b..47c2bb444ab4 100644
614     --- a/drivers/bus/Kconfig
615     +++ b/drivers/bus/Kconfig
616     @@ -138,7 +138,6 @@ config TEGRA_ACONNECT
617     tristate "Tegra ACONNECT Bus Driver"
618     depends on ARCH_TEGRA_210_SOC
619     depends on OF && PM
620     - select PM_CLK
621     help
622     Driver for the Tegra ACONNECT bus which is used to interface with
623     the devices inside the Audio Processing Engine (APE) for Tegra210.
624     diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
625     index 22c6a2e61236..8ac390c2b514 100644
626     --- a/drivers/char/ipmi/ipmi_ssif.c
627     +++ b/drivers/char/ipmi/ipmi_ssif.c
628     @@ -775,10 +775,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
629     flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
630     msg = ssif_info->curr_msg;
631     if (msg) {
632     + if (data) {
633     + if (len > IPMI_MAX_MSG_LENGTH)
634     + len = IPMI_MAX_MSG_LENGTH;
635     + memcpy(msg->rsp, data, len);
636     + } else {
637     + len = 0;
638     + }
639     msg->rsp_size = len;
640     - if (msg->rsp_size > IPMI_MAX_MSG_LENGTH)
641     - msg->rsp_size = IPMI_MAX_MSG_LENGTH;
642     - memcpy(msg->rsp, data, msg->rsp_size);
643     ssif_info->curr_msg = NULL;
644     }
645    
646     diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
647     index 7679f8a91745..35f8e098e9fa 100644
648     --- a/drivers/cpufreq/cpufreq.c
649     +++ b/drivers/cpufreq/cpufreq.c
650     @@ -1071,9 +1071,17 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
651     pol = policy->last_policy;
652     } else if (def_gov) {
653     pol = cpufreq_parse_policy(def_gov->name);
654     - } else {
655     - return -ENODATA;
656     + /*
657     + * In case the default governor is neiter "performance"
658     + * nor "powersave", fall back to the initial policy
659     + * value set by the driver.
660     + */
661     + if (pol == CPUFREQ_POLICY_UNKNOWN)
662     + pol = policy->policy;
663     }
664     + if (pol != CPUFREQ_POLICY_PERFORMANCE &&
665     + pol != CPUFREQ_POLICY_POWERSAVE)
666     + return -ENODATA;
667     }
668    
669     return cpufreq_set_policy(policy, gov, pol);
670     diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
671     index 174795ecbd3b..ff81b7cdab71 100644
672     --- a/drivers/devfreq/devfreq.c
673     +++ b/drivers/devfreq/devfreq.c
674     @@ -613,7 +613,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
675     {
676     struct devfreq *devfreq;
677     struct devfreq_governor *governor;
678     - static atomic_t devfreq_no = ATOMIC_INIT(-1);
679     int err = 0;
680    
681     if (!dev || !profile || !governor_name) {
682     @@ -677,8 +676,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
683     devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
684     atomic_set(&devfreq->suspend_count, 0);
685    
686     - dev_set_name(&devfreq->dev, "devfreq%d",
687     - atomic_inc_return(&devfreq_no));
688     + dev_set_name(&devfreq->dev, "%s", dev_name(dev));
689     err = device_register(&devfreq->dev);
690     if (err) {
691     mutex_unlock(&devfreq->lock);
692     diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
693     index d8ff63d91b86..a04349c6d17e 100644
694     --- a/drivers/edac/skx_common.c
695     +++ b/drivers/edac/skx_common.c
696     @@ -235,7 +235,7 @@ int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm)
697    
698     pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, NULL);
699     if (!pdev) {
700     - skx_printk(KERN_ERR, "Can't get tolm/tohm\n");
701     + edac_dbg(2, "Can't get tolm/tohm\n");
702     return -ENODEV;
703     }
704    
705     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
706     index bb9a2771a0f9..05d114a72ca1 100644
707     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
708     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
709     @@ -1421,7 +1421,7 @@ amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
710    
711     static struct drm_driver kms_driver = {
712     .driver_features =
713     - DRIVER_USE_AGP | DRIVER_ATOMIC |
714     + DRIVER_ATOMIC |
715     DRIVER_GEM |
716     DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ,
717     .load = amdgpu_driver_load_kms,
718     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
719     index aef6c396bd58..8c0ac66d31d0 100644
720     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
721     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
722     @@ -157,6 +157,7 @@ struct amdgpu_gmc {
723     uint32_t srbm_soft_reset;
724     bool prt_warning;
725     uint64_t stolen_size;
726     + uint32_t sdpif_register;
727     /* apertures */
728     u64 shared_aperture_start;
729     u64 shared_aperture_end;
730     diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
731     index d7caca042173..da53a55bf955 100644
732     --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
733     +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
734     @@ -1382,6 +1382,19 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
735     }
736     }
737    
738     +/**
739     + * gmc_v9_0_restore_registers - restores regs
740     + *
741     + * @adev: amdgpu_device pointer
742     + *
743     + * This restores register values, saved at suspend.
744     + */
745     +static void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
746     +{
747     + if (adev->asic_type == CHIP_RAVEN)
748     + WREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
749     +}
750     +
751     /**
752     * gmc_v9_0_gart_enable - gart enable
753     *
754     @@ -1478,6 +1491,20 @@ static int gmc_v9_0_hw_init(void *handle)
755     return r;
756     }
757    
758     +/**
759     + * gmc_v9_0_save_registers - saves regs
760     + *
761     + * @adev: amdgpu_device pointer
762     + *
763     + * This saves potential register values that should be
764     + * restored upon resume
765     + */
766     +static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
767     +{
768     + if (adev->asic_type == CHIP_RAVEN)
769     + adev->gmc.sdpif_register = RREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
770     +}
771     +
772     /**
773     * gmc_v9_0_gart_disable - gart disable
774     *
775     @@ -1514,9 +1541,16 @@ static int gmc_v9_0_hw_fini(void *handle)
776    
777     static int gmc_v9_0_suspend(void *handle)
778     {
779     + int r;
780     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
781    
782     - return gmc_v9_0_hw_fini(adev);
783     + r = gmc_v9_0_hw_fini(adev);
784     + if (r)
785     + return r;
786     +
787     + gmc_v9_0_save_registers(adev);
788     +
789     + return 0;
790     }
791    
792     static int gmc_v9_0_resume(void *handle)
793     @@ -1524,6 +1558,7 @@ static int gmc_v9_0_resume(void *handle)
794     int r;
795     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
796    
797     + gmc_v9_0_restore_registers(adev);
798     r = gmc_v9_0_hw_init(adev);
799     if (r)
800     return r;
801     diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
802     index b864869cc7e3..6fa7422c51da 100644
803     --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
804     +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
805     @@ -91,6 +91,12 @@ ifdef CONFIG_DRM_AMD_DC_DCN2_1
806     ###############################################################################
807     CLK_MGR_DCN21 = rn_clk_mgr.o rn_clk_mgr_vbios_smu.o
808    
809     +# prevent build errors regarding soft-float vs hard-float FP ABI tags
810     +# this code is currently unused on ppc64, as it applies to Renoir APUs only
811     +ifdef CONFIG_PPC64
812     +CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
813     +endif
814     +
815     AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21))
816    
817     AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
818     diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
819     index 787f94d815f4..dd92f9c295b4 100644
820     --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
821     +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
822     @@ -91,6 +91,12 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
823     rn_vbios_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
824     }
825    
826     + // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
827     + if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
828     + if (new_clocks->dppclk_khz < 100000)
829     + new_clocks->dppclk_khz = 100000;
830     + }
831     +
832     if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
833     if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
834     dpp_clock_lowered = true;
835     diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
836     index c3f9f4185ce8..cf877238fff9 100644
837     --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
838     +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
839     @@ -386,7 +386,7 @@ static bool acquire(
840     {
841     enum gpio_result result;
842    
843     - if (!is_engine_available(engine))
844     + if ((engine == NULL) || !is_engine_available(engine))
845     return false;
846    
847     result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
848     diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
849     index 937a8ba81160..e933f6a369f9 100644
850     --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
851     +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
852     @@ -493,7 +493,6 @@ static void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
853     dpp->funcs->dpp_dppclk_control(dpp, false, false);
854    
855     hubp->power_gated = true;
856     - dc->optimized_required = false; /* We're powering off, no need to optimize */
857    
858     dc->hwss.plane_atomic_power_down(dc,
859     pipe_ctx->plane_res.dpp,
860     diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
861     index b0e5e64df212..161bf7caf3ae 100644
862     --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
863     +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
864     @@ -57,6 +57,7 @@
865     #include "dcn20/dcn20_dccg.h"
866     #include "dcn21_hubbub.h"
867     #include "dcn10/dcn10_resource.h"
868     +#include "dce110/dce110_resource.h"
869    
870     #include "dcn20/dcn20_dwb.h"
871     #include "dcn20/dcn20_mmhubbub.h"
872     @@ -824,6 +825,7 @@ static const struct dc_debug_options debug_defaults_diags = {
873     enum dcn20_clk_src_array_id {
874     DCN20_CLK_SRC_PLL0,
875     DCN20_CLK_SRC_PLL1,
876     + DCN20_CLK_SRC_PLL2,
877     DCN20_CLK_SRC_TOTAL_DCN21
878     };
879    
880     @@ -1492,6 +1494,10 @@ static bool construct(
881     dcn21_clock_source_create(ctx, ctx->dc_bios,
882     CLOCK_SOURCE_COMBO_PHY_PLL1,
883     &clk_src_regs[1], false);
884     + pool->base.clock_sources[DCN20_CLK_SRC_PLL2] =
885     + dcn21_clock_source_create(ctx, ctx->dc_bios,
886     + CLOCK_SOURCE_COMBO_PHY_PLL2,
887     + &clk_src_regs[2], false);
888    
889     pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;
890    
891     diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
892     index b6f74bf4af02..27bb8c1ab858 100644
893     --- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
894     +++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
895     @@ -7376,6 +7376,8 @@
896     #define mmCRTC4_CRTC_DRR_CONTROL 0x0f3e
897     #define mmCRTC4_CRTC_DRR_CONTROL_BASE_IDX 2
898    
899     +#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x395d
900     +#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
901    
902     // addressBlock: dce_dc_fmt4_dispdec
903     // base address: 0x2000
904     diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
905     index 00786a142ff0..41c8e39a73ba 100644
906     --- a/drivers/gpu/drm/i915/Kconfig.debug
907     +++ b/drivers/gpu/drm/i915/Kconfig.debug
908     @@ -7,7 +7,6 @@ config DRM_I915_WERROR
909     # We use the dependency on !COMPILE_TEST to not be enabled in
910     # allmodconfig or allyesconfig configurations
911     depends on !COMPILE_TEST
912     - select HEADER_TEST
913     default n
914     help
915     Add -Werror to the build flags for (and only for) i915.ko.
916     diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
917     index 4bfaefdf548d..c0347956f7cf 100644
918     --- a/drivers/gpu/drm/i915/gvt/dmabuf.c
919     +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
920     @@ -96,12 +96,12 @@ static void dmabuf_gem_object_free(struct kref *kref)
921     dmabuf_obj = container_of(pos,
922     struct intel_vgpu_dmabuf_obj, list);
923     if (dmabuf_obj == obj) {
924     + list_del(pos);
925     intel_gvt_hypervisor_put_vfio_device(vgpu);
926     idr_remove(&vgpu->object_idr,
927     dmabuf_obj->dmabuf_id);
928     kfree(dmabuf_obj->info);
929     kfree(dmabuf_obj);
930     - list_del(pos);
931     break;
932     }
933     }
934     diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
935     index d5a6e4e3d0fd..b232965b45b5 100644
936     --- a/drivers/gpu/drm/i915/gvt/vgpu.c
937     +++ b/drivers/gpu/drm/i915/gvt/vgpu.c
938     @@ -560,9 +560,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
939    
940     intel_vgpu_reset_mmio(vgpu, dmlr);
941     populate_pvinfo_page(vgpu);
942     - intel_vgpu_reset_display(vgpu);
943    
944     if (dmlr) {
945     + intel_vgpu_reset_display(vgpu);
946     intel_vgpu_reset_cfg_space(vgpu);
947     /* only reset the failsafe mode when dmlr reset */
948     vgpu->failsafe = false;
949     diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
950     index c84f0a8b3f2c..b73fbb65e14b 100644
951     --- a/drivers/gpu/drm/msm/msm_drv.c
952     +++ b/drivers/gpu/drm/msm/msm_drv.c
953     @@ -441,6 +441,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
954     if (ret)
955     goto err_msm_uninit;
956    
957     + if (!dev->dma_parms) {
958     + dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
959     + GFP_KERNEL);
960     + if (!dev->dma_parms)
961     + return -ENOMEM;
962     + }
963     + dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
964     +
965     msm_gem_shrinker_init(ddev);
966    
967     switch (get_mdp_ver(pdev)) {
968     diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
969     index 4528f4dc0b2d..6128792ab883 100644
970     --- a/drivers/gpu/drm/radeon/radeon_drv.c
971     +++ b/drivers/gpu/drm/radeon/radeon_drv.c
972     @@ -37,6 +37,7 @@
973     #include <linux/vga_switcheroo.h>
974     #include <linux/mmu_notifier.h>
975    
976     +#include <drm/drm_agpsupport.h>
977     #include <drm/drm_crtc_helper.h>
978     #include <drm/drm_drv.h>
979     #include <drm/drm_fb_helper.h>
980     @@ -325,6 +326,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
981     const struct pci_device_id *ent)
982     {
983     unsigned long flags = 0;
984     + struct drm_device *dev;
985     int ret;
986    
987     if (!ent)
988     @@ -365,7 +367,44 @@ static int radeon_pci_probe(struct pci_dev *pdev,
989     if (ret)
990     return ret;
991    
992     - return drm_get_pci_dev(pdev, ent, &kms_driver);
993     + dev = drm_dev_alloc(&kms_driver, &pdev->dev);
994     + if (IS_ERR(dev))
995     + return PTR_ERR(dev);
996     +
997     + ret = pci_enable_device(pdev);
998     + if (ret)
999     + goto err_free;
1000     +
1001     + dev->pdev = pdev;
1002     +#ifdef __alpha__
1003     + dev->hose = pdev->sysdata;
1004     +#endif
1005     +
1006     + pci_set_drvdata(pdev, dev);
1007     +
1008     + if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
1009     + dev->agp = drm_agp_init(dev);
1010     + if (dev->agp) {
1011     + dev->agp->agp_mtrr = arch_phys_wc_add(
1012     + dev->agp->agp_info.aper_base,
1013     + dev->agp->agp_info.aper_size *
1014     + 1024 * 1024);
1015     + }
1016     +
1017     + ret = drm_dev_register(dev, ent->driver_data);
1018     + if (ret)
1019     + goto err_agp;
1020     +
1021     + return 0;
1022     +
1023     +err_agp:
1024     + if (dev->agp)
1025     + arch_phys_wc_del(dev->agp->agp_mtrr);
1026     + kfree(dev->agp);
1027     + pci_disable_device(pdev);
1028     +err_free:
1029     + drm_dev_put(dev);
1030     + return ret;
1031     }
1032    
1033     static void
1034     @@ -578,7 +617,7 @@ radeon_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
1035    
1036     static struct drm_driver kms_driver = {
1037     .driver_features =
1038     - DRIVER_USE_AGP | DRIVER_GEM | DRIVER_RENDER,
1039     + DRIVER_GEM | DRIVER_RENDER,
1040     .load = radeon_driver_load_kms,
1041     .open = radeon_driver_open_kms,
1042     .postclose = radeon_driver_postclose_kms,
1043     diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
1044     index e85c554eeaa9..2bb0187c5bc7 100644
1045     --- a/drivers/gpu/drm/radeon/radeon_kms.c
1046     +++ b/drivers/gpu/drm/radeon/radeon_kms.c
1047     @@ -31,6 +31,7 @@
1048     #include <linux/uaccess.h>
1049     #include <linux/vga_switcheroo.h>
1050    
1051     +#include <drm/drm_agpsupport.h>
1052     #include <drm/drm_fb_helper.h>
1053     #include <drm/drm_file.h>
1054     #include <drm/drm_ioctl.h>
1055     @@ -77,6 +78,11 @@ void radeon_driver_unload_kms(struct drm_device *dev)
1056     radeon_modeset_fini(rdev);
1057     radeon_device_fini(rdev);
1058    
1059     + if (dev->agp)
1060     + arch_phys_wc_del(dev->agp->agp_mtrr);
1061     + kfree(dev->agp);
1062     + dev->agp = NULL;
1063     +
1064     done_free:
1065     kfree(rdev);
1066     dev->dev_private = NULL;
1067     diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
1068     index ae79a7c66737..fa704153cb00 100644
1069     --- a/drivers/hid/hid-alps.c
1070     +++ b/drivers/hid/hid-alps.c
1071     @@ -730,7 +730,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
1072     if (data->has_sp) {
1073     input2 = input_allocate_device();
1074     if (!input2) {
1075     - input_free_device(input2);
1076     + ret = -ENOMEM;
1077     goto exit;
1078     }
1079    
1080     diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1081     index 851fe54ea59e..359616e3efbb 100644
1082     --- a/drivers/hid/hid-core.c
1083     +++ b/drivers/hid/hid-core.c
1084     @@ -1741,7 +1741,9 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
1085    
1086     rsize = ((report->size - 1) >> 3) + 1;
1087    
1088     - if (rsize > HID_MAX_BUFFER_SIZE)
1089     + if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
1090     + rsize = HID_MAX_BUFFER_SIZE - 1;
1091     + else if (rsize > HID_MAX_BUFFER_SIZE)
1092     rsize = HID_MAX_BUFFER_SIZE;
1093    
1094     if (csize < rsize) {
1095     diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
1096     index c436e12feb23..6c55682c5974 100644
1097     --- a/drivers/hid/hid-ite.c
1098     +++ b/drivers/hid/hid-ite.c
1099     @@ -41,8 +41,9 @@ static const struct hid_device_id ite_devices[] = {
1100     { HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
1101     { HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
1102     /* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */
1103     - { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS,
1104     - USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
1105     + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
1106     + USB_VENDOR_ID_SYNAPTICS,
1107     + USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
1108     { }
1109     };
1110     MODULE_DEVICE_TABLE(hid, ite_devices);
1111     diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
1112     index c879b214a479..35b1fa6d962e 100644
1113     --- a/drivers/hid/usbhid/hiddev.c
1114     +++ b/drivers/hid/usbhid/hiddev.c
1115     @@ -941,9 +941,9 @@ void hiddev_disconnect(struct hid_device *hid)
1116     hiddev->exist = 0;
1117    
1118     if (hiddev->open) {
1119     - mutex_unlock(&hiddev->existancelock);
1120     hid_hw_close(hiddev->hid);
1121     wake_up_interruptible(&hiddev->wait);
1122     + mutex_unlock(&hiddev->existancelock);
1123     } else {
1124     mutex_unlock(&hiddev->existancelock);
1125     kfree(hiddev);
1126     diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
1127     index 5255d3755411..1de23b4f3809 100644
1128     --- a/drivers/i2c/busses/i2c-altera.c
1129     +++ b/drivers/i2c/busses/i2c-altera.c
1130     @@ -171,7 +171,7 @@ static void altr_i2c_init(struct altr_i2c_dev *idev)
1131     /* SCL Low Time */
1132     writel(t_low, idev->base + ALTR_I2C_SCL_LOW);
1133     /* SDA Hold Time, 300ns */
1134     - writel(div_u64(300 * clk_mhz, 1000), idev->base + ALTR_I2C_SDA_HOLD);
1135     + writel(3 * clk_mhz / 10, idev->base + ALTR_I2C_SDA_HOLD);
1136    
1137     /* Mask all master interrupt bits */
1138     altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false);
1139     diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
1140     index 25dcd73acd63..8f0e1f802f2d 100644
1141     --- a/drivers/i2c/busses/i2c-jz4780.c
1142     +++ b/drivers/i2c/busses/i2c-jz4780.c
1143     @@ -73,25 +73,6 @@
1144     #define JZ4780_I2C_STA_TFNF BIT(1)
1145     #define JZ4780_I2C_STA_ACT BIT(0)
1146    
1147     -static const char * const jz4780_i2c_abrt_src[] = {
1148     - "ABRT_7B_ADDR_NOACK",
1149     - "ABRT_10ADDR1_NOACK",
1150     - "ABRT_10ADDR2_NOACK",
1151     - "ABRT_XDATA_NOACK",
1152     - "ABRT_GCALL_NOACK",
1153     - "ABRT_GCALL_READ",
1154     - "ABRT_HS_ACKD",
1155     - "SBYTE_ACKDET",
1156     - "ABRT_HS_NORSTRT",
1157     - "SBYTE_NORSTRT",
1158     - "ABRT_10B_RD_NORSTRT",
1159     - "ABRT_MASTER_DIS",
1160     - "ARB_LOST",
1161     - "SLVFLUSH_TXFIFO",
1162     - "SLV_ARBLOST",
1163     - "SLVRD_INTX",
1164     -};
1165     -
1166     #define JZ4780_I2C_INTST_IGC BIT(11)
1167     #define JZ4780_I2C_INTST_ISTT BIT(10)
1168     #define JZ4780_I2C_INTST_ISTP BIT(9)
1169     @@ -529,21 +510,8 @@ done:
1170    
1171     static void jz4780_i2c_txabrt(struct jz4780_i2c *i2c, int src)
1172     {
1173     - int i;
1174     -
1175     - dev_err(&i2c->adap.dev, "txabrt: 0x%08x\n", src);
1176     - dev_err(&i2c->adap.dev, "device addr=%x\n",
1177     - jz4780_i2c_readw(i2c, JZ4780_I2C_TAR));
1178     - dev_err(&i2c->adap.dev, "send cmd count:%d %d\n",
1179     - i2c->cmd, i2c->cmd_buf[i2c->cmd]);
1180     - dev_err(&i2c->adap.dev, "receive data count:%d %d\n",
1181     - i2c->cmd, i2c->data_buf[i2c->cmd]);
1182     -
1183     - for (i = 0; i < 16; i++) {
1184     - if (src & BIT(i))
1185     - dev_dbg(&i2c->adap.dev, "I2C TXABRT[%d]=%s\n",
1186     - i, jz4780_i2c_abrt_src[i]);
1187     - }
1188     + dev_dbg(&i2c->adap.dev, "txabrt: 0x%08x, cmd: %d, send: %d, recv: %d\n",
1189     + src, i2c->cmd, i2c->cmd_buf[i2c->cmd], i2c->data_buf[i2c->cmd]);
1190     }
1191    
1192     static inline int jz4780_i2c_xfer_read(struct jz4780_i2c *i2c,
1193     diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
1194     index 96d1302abde1..e36d31569081 100644
1195     --- a/drivers/infiniband/hw/hns/hns_roce_device.h
1196     +++ b/drivers/infiniband/hw/hns/hns_roce_device.h
1197     @@ -425,7 +425,7 @@ struct hns_roce_mr_table {
1198     struct hns_roce_wq {
1199     u64 *wrid; /* Work request ID */
1200     spinlock_t lock;
1201     - int wqe_cnt; /* WQE num */
1202     + u32 wqe_cnt; /* WQE num */
1203     u32 max_post;
1204     int max_gs;
1205     int offset;
1206     @@ -658,7 +658,6 @@ struct hns_roce_qp {
1207     u8 sdb_en;
1208     u32 doorbell_qpn;
1209     u32 sq_signal_bits;
1210     - u32 sq_next_wqe;
1211     struct hns_roce_wq sq;
1212    
1213     struct ib_umem *umem;
1214     diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
1215     index 5f74bf55f471..a79fa67df871 100644
1216     --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
1217     +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
1218     @@ -74,8 +74,8 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
1219     unsigned long flags = 0;
1220     void *wqe = NULL;
1221     __le32 doorbell[2];
1222     + u32 wqe_idx = 0;
1223     int nreq = 0;
1224     - u32 ind = 0;
1225     int ret = 0;
1226     u8 *smac;
1227     int loopback;
1228     @@ -88,7 +88,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
1229     }
1230    
1231     spin_lock_irqsave(&qp->sq.lock, flags);
1232     - ind = qp->sq_next_wqe;
1233     +
1234     for (nreq = 0; wr; ++nreq, wr = wr->next) {
1235     if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1236     ret = -ENOMEM;
1237     @@ -96,6 +96,8 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
1238     goto out;
1239     }
1240    
1241     + wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
1242     +
1243     if (unlikely(wr->num_sge > qp->sq.max_gs)) {
1244     dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
1245     wr->num_sge, qp->sq.max_gs);
1246     @@ -104,9 +106,8 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
1247     goto out;
1248     }
1249    
1250     - wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
1251     - qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
1252     - wr->wr_id;
1253     + wqe = get_send_wqe(qp, wqe_idx);
1254     + qp->sq.wrid[wqe_idx] = wr->wr_id;
1255    
1256     /* Corresponding to the RC and RD type wqe process separately */
1257     if (ibqp->qp_type == IB_QPT_GSI) {
1258     @@ -210,7 +211,6 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
1259     cpu_to_le32((wr->sg_list[1].addr) >> 32);
1260     ud_sq_wqe->l_key1 =
1261     cpu_to_le32(wr->sg_list[1].lkey);
1262     - ind++;
1263     } else if (ibqp->qp_type == IB_QPT_RC) {
1264     u32 tmp_len = 0;
1265    
1266     @@ -308,7 +308,6 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
1267     ctrl->flag |= cpu_to_le32(wr->num_sge <<
1268     HNS_ROCE_WQE_SGE_NUM_BIT);
1269     }
1270     - ind++;
1271     }
1272     }
1273    
1274     @@ -336,7 +335,6 @@ out:
1275     doorbell[1] = sq_db.u32_8;
1276    
1277     hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
1278     - qp->sq_next_wqe = ind;
1279     }
1280    
1281     spin_unlock_irqrestore(&qp->sq.lock, flags);
1282     @@ -348,12 +346,6 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
1283     const struct ib_recv_wr *wr,
1284     const struct ib_recv_wr **bad_wr)
1285     {
1286     - int ret = 0;
1287     - int nreq = 0;
1288     - int ind = 0;
1289     - int i = 0;
1290     - u32 reg_val;
1291     - unsigned long flags = 0;
1292     struct hns_roce_rq_wqe_ctrl *ctrl = NULL;
1293     struct hns_roce_wqe_data_seg *scat = NULL;
1294     struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
1295     @@ -361,9 +353,14 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
1296     struct device *dev = &hr_dev->pdev->dev;
1297     struct hns_roce_rq_db rq_db;
1298     __le32 doorbell[2] = {0};
1299     + unsigned long flags = 0;
1300     + unsigned int wqe_idx;
1301     + int ret = 0;
1302     + int nreq = 0;
1303     + int i = 0;
1304     + u32 reg_val;
1305    
1306     spin_lock_irqsave(&hr_qp->rq.lock, flags);
1307     - ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
1308    
1309     for (nreq = 0; wr; ++nreq, wr = wr->next) {
1310     if (hns_roce_wq_overflow(&hr_qp->rq, nreq,
1311     @@ -373,6 +370,8 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
1312     goto out;
1313     }
1314    
1315     + wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
1316     +
1317     if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
1318     dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
1319     wr->num_sge, hr_qp->rq.max_gs);
1320     @@ -381,7 +380,7 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
1321     goto out;
1322     }
1323    
1324     - ctrl = get_recv_wqe(hr_qp, ind);
1325     + ctrl = get_recv_wqe(hr_qp, wqe_idx);
1326    
1327     roce_set_field(ctrl->rwqe_byte_12,
1328     RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M,
1329     @@ -393,9 +392,7 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp,
1330     for (i = 0; i < wr->num_sge; i++)
1331     set_data_seg(scat + i, wr->sg_list + i);
1332    
1333     - hr_qp->rq.wrid[ind] = wr->wr_id;
1334     -
1335     - ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
1336     + hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
1337     }
1338    
1339     out:
1340     @@ -2702,7 +2699,6 @@ static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
1341     hr_qp->rq.tail = 0;
1342     hr_qp->sq.head = 0;
1343     hr_qp->sq.tail = 0;
1344     - hr_qp->sq_next_wqe = 0;
1345     }
1346    
1347     kfree(context);
1348     @@ -3316,7 +3312,6 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
1349     hr_qp->rq.tail = 0;
1350     hr_qp->sq.head = 0;
1351     hr_qp->sq.tail = 0;
1352     - hr_qp->sq_next_wqe = 0;
1353     }
1354     out:
1355     kfree(context);
1356     diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
1357     index 79294f278b26..4540b00ccee9 100644
1358     --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
1359     +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
1360     @@ -110,7 +110,7 @@ static void set_atomic_seg(struct hns_roce_wqe_atomic_seg *aseg,
1361     }
1362    
1363     static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
1364     - unsigned int *sge_ind)
1365     + unsigned int *sge_ind, int valid_num_sge)
1366     {
1367     struct hns_roce_v2_wqe_data_seg *dseg;
1368     struct ib_sge *sg;
1369     @@ -123,7 +123,7 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
1370    
1371     if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC)
1372     num_in_wqe = HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE;
1373     - extend_sge_num = wr->num_sge - num_in_wqe;
1374     + extend_sge_num = valid_num_sge - num_in_wqe;
1375     sg = wr->sg_list + num_in_wqe;
1376     shift = qp->hr_buf.page_shift;
1377    
1378     @@ -159,14 +159,16 @@ static void set_extend_sge(struct hns_roce_qp *qp, const struct ib_send_wr *wr,
1379     static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1380     struct hns_roce_v2_rc_send_wqe *rc_sq_wqe,
1381     void *wqe, unsigned int *sge_ind,
1382     + int valid_num_sge,
1383     const struct ib_send_wr **bad_wr)
1384     {
1385     struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
1386     struct hns_roce_v2_wqe_data_seg *dseg = wqe;
1387     struct hns_roce_qp *qp = to_hr_qp(ibqp);
1388     + int j = 0;
1389     int i;
1390    
1391     - if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
1392     + if (wr->send_flags & IB_SEND_INLINE && valid_num_sge) {
1393     if (le32_to_cpu(rc_sq_wqe->msg_len) >
1394     hr_dev->caps.max_sq_inline) {
1395     *bad_wr = wr;
1396     @@ -190,7 +192,7 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1397     roce_set_bit(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_INLINE_S,
1398     1);
1399     } else {
1400     - if (wr->num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
1401     + if (valid_num_sge <= HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE) {
1402     for (i = 0; i < wr->num_sge; i++) {
1403     if (likely(wr->sg_list[i].length)) {
1404     set_data_seg_v2(dseg, wr->sg_list + i);
1405     @@ -203,19 +205,21 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1406     V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
1407     (*sge_ind) & (qp->sge.sge_cnt - 1));
1408    
1409     - for (i = 0; i < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
1410     + for (i = 0; i < wr->num_sge &&
1411     + j < HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE; i++) {
1412     if (likely(wr->sg_list[i].length)) {
1413     set_data_seg_v2(dseg, wr->sg_list + i);
1414     dseg++;
1415     + j++;
1416     }
1417     }
1418    
1419     - set_extend_sge(qp, wr, sge_ind);
1420     + set_extend_sge(qp, wr, sge_ind, valid_num_sge);
1421     }
1422    
1423     roce_set_field(rc_sq_wqe->byte_16,
1424     V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
1425     - V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, wr->num_sge);
1426     + V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S, valid_num_sge);
1427     }
1428    
1429     return 0;
1430     @@ -239,10 +243,11 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
1431     struct device *dev = hr_dev->dev;
1432     struct hns_roce_v2_db sq_db;
1433     struct ib_qp_attr attr;
1434     - unsigned int sge_ind;
1435     unsigned int owner_bit;
1436     + unsigned int sge_idx;
1437     + unsigned int wqe_idx;
1438     unsigned long flags;
1439     - unsigned int ind;
1440     + int valid_num_sge;
1441     void *wqe = NULL;
1442     bool loopback;
1443     int attr_mask;
1444     @@ -269,8 +274,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
1445     }
1446    
1447     spin_lock_irqsave(&qp->sq.lock, flags);
1448     - ind = qp->sq_next_wqe;
1449     - sge_ind = qp->next_sge;
1450     + sge_idx = qp->next_sge;
1451    
1452     for (nreq = 0; wr; ++nreq, wr = wr->next) {
1453     if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1454     @@ -279,6 +283,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
1455     goto out;
1456     }
1457    
1458     + wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1);
1459     +
1460     if (unlikely(wr->num_sge > qp->sq.max_gs)) {
1461     dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n",
1462     wr->num_sge, qp->sq.max_gs);
1463     @@ -287,14 +293,20 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
1464     goto out;
1465     }
1466    
1467     - wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
1468     - qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
1469     - wr->wr_id;
1470     -
1471     + wqe = get_send_wqe(qp, wqe_idx);
1472     + qp->sq.wrid[wqe_idx] = wr->wr_id;
1473     owner_bit =
1474     ~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
1475     + valid_num_sge = 0;
1476     tmp_len = 0;
1477    
1478     + for (i = 0; i < wr->num_sge; i++) {
1479     + if (likely(wr->sg_list[i].length)) {
1480     + tmp_len += wr->sg_list[i].length;
1481     + valid_num_sge++;
1482     + }
1483     + }
1484     +
1485     /* Corresponding to the QP type, wqe process separately */
1486     if (ibqp->qp_type == IB_QPT_GSI) {
1487     ud_sq_wqe = wqe;
1488     @@ -330,9 +342,6 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
1489     V2_UD_SEND_WQE_BYTE_4_OPCODE_S,
1490     HNS_ROCE_V2_WQE_OP_SEND);
1491    
1492     - for (i = 0; i < wr->num_sge; i++)
1493     - tmp_len += wr->sg_list[i].length;
1494     -
1495     ud_sq_wqe->msg_len =
1496     cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
1497    
1498     @@ -368,12 +377,12 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
1499     roce_set_field(ud_sq_wqe->byte_16,
1500     V2_UD_SEND_WQE_BYTE_16_SGE_NUM_M,
1501     V2_UD_SEND_WQE_BYTE_16_SGE_NUM_S,
1502     - wr->num_sge);
1503     + valid_num_sge);
1504    
1505     roce_set_field(ud_sq_wqe->byte_20,
1506     V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M,
1507     V2_UD_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_S,
1508     - sge_ind & (qp->sge.sge_cnt - 1));
1509     + sge_idx & (qp->sge.sge_cnt - 1));
1510    
1511     roce_set_field(ud_sq_wqe->byte_24,
1512     V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
1513     @@ -423,13 +432,10 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
1514     memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0],
1515     GID_LEN_V2);
1516    
1517     - set_extend_sge(qp, wr, &sge_ind);
1518     - ind++;
1519     + set_extend_sge(qp, wr, &sge_idx, valid_num_sge);
1520     } else if (ibqp->qp_type == IB_QPT_RC) {
1521     rc_sq_wqe = wqe;
1522     memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
1523     - for (i = 0; i < wr->num_sge; i++)
1524     - tmp_len += wr->sg_list[i].length;
1525    
1526     rc_sq_wqe->msg_len =
1527     cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
1528     @@ -550,15 +556,14 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp,
1529     roce_set_field(rc_sq_wqe->byte_16,
1530     V2_RC_SEND_WQE_BYTE_16_SGE_NUM_M,
1531     V2_RC_SEND_WQE_BYTE_16_SGE_NUM_S,
1532     - wr->num_sge);
1533     + valid_num_sge);
1534     } else if (wr->opcode != IB_WR_REG_MR) {
1535     ret = set_rwqe_data_seg(ibqp, wr, rc_sq_wqe,
1536     - wqe, &sge_ind, bad_wr);
1537     + wqe, &sge_idx,
1538     + valid_num_sge, bad_wr);
1539     if (ret)
1540     goto out;
1541     }
1542     -
1543     - ind++;
1544     } else {
1545     dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
1546     spin_unlock_irqrestore(&qp->sq.lock, flags);
1547     @@ -588,8 +593,7 @@ out:
1548    
1549     hns_roce_write64(hr_dev, (__le32 *)&sq_db, qp->sq.db_reg_l);
1550    
1551     - qp->sq_next_wqe = ind;
1552     - qp->next_sge = sge_ind;
1553     + qp->next_sge = sge_idx;
1554    
1555     if (qp->state == IB_QPS_ERR) {
1556     attr_mask = IB_QP_STATE;
1557     @@ -623,13 +627,12 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
1558     unsigned long flags;
1559     void *wqe = NULL;
1560     int attr_mask;
1561     + u32 wqe_idx;
1562     int ret = 0;
1563     int nreq;
1564     - int ind;
1565     int i;
1566    
1567     spin_lock_irqsave(&hr_qp->rq.lock, flags);
1568     - ind = hr_qp->rq.head & (hr_qp->rq.wqe_cnt - 1);
1569    
1570     if (hr_qp->state == IB_QPS_RESET) {
1571     spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
1572     @@ -645,6 +648,8 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
1573     goto out;
1574     }
1575    
1576     + wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
1577     +
1578     if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
1579     dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n",
1580     wr->num_sge, hr_qp->rq.max_gs);
1581     @@ -653,7 +658,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
1582     goto out;
1583     }
1584    
1585     - wqe = get_recv_wqe(hr_qp, ind);
1586     + wqe = get_recv_wqe(hr_qp, wqe_idx);
1587     dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
1588     for (i = 0; i < wr->num_sge; i++) {
1589     if (!wr->sg_list[i].length)
1590     @@ -669,8 +674,8 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
1591    
1592     /* rq support inline data */
1593     if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_RQ_INLINE) {
1594     - sge_list = hr_qp->rq_inl_buf.wqe_list[ind].sg_list;
1595     - hr_qp->rq_inl_buf.wqe_list[ind].sge_cnt =
1596     + sge_list = hr_qp->rq_inl_buf.wqe_list[wqe_idx].sg_list;
1597     + hr_qp->rq_inl_buf.wqe_list[wqe_idx].sge_cnt =
1598     (u32)wr->num_sge;
1599     for (i = 0; i < wr->num_sge; i++) {
1600     sge_list[i].addr =
1601     @@ -679,9 +684,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
1602     }
1603     }
1604    
1605     - hr_qp->rq.wrid[ind] = wr->wr_id;
1606     -
1607     - ind = (ind + 1) & (hr_qp->rq.wqe_cnt - 1);
1608     + hr_qp->rq.wrid[wqe_idx] = wr->wr_id;
1609     }
1610    
1611     out:
1612     @@ -4465,7 +4468,6 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
1613     hr_qp->rq.tail = 0;
1614     hr_qp->sq.head = 0;
1615     hr_qp->sq.tail = 0;
1616     - hr_qp->sq_next_wqe = 0;
1617     hr_qp->next_sge = 0;
1618     if (hr_qp->rq.wqe_cnt)
1619     *hr_qp->rdb.db_record = 0;
1620     diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
1621     index 0454561718d9..31aa41d85ccf 100644
1622     --- a/drivers/infiniband/sw/siw/siw_cm.c
1623     +++ b/drivers/infiniband/sw/siw/siw_cm.c
1624     @@ -1225,10 +1225,9 @@ static void siw_cm_llp_data_ready(struct sock *sk)
1625     read_lock(&sk->sk_callback_lock);
1626    
1627     cep = sk_to_cep(sk);
1628     - if (!cep) {
1629     - WARN_ON(1);
1630     + if (!cep)
1631     goto out;
1632     - }
1633     +
1634     siw_dbg_cep(cep, "state: %d\n", cep->state);
1635    
1636     switch (cep->state) {
1637     diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
1638     index 8c744578122a..a0d87ed9da69 100644
1639     --- a/drivers/macintosh/therm_windtunnel.c
1640     +++ b/drivers/macintosh/therm_windtunnel.c
1641     @@ -300,9 +300,11 @@ static int control_loop(void *dummy)
1642     /* i2c probing and setup */
1643     /************************************************************************/
1644    
1645     -static int
1646     -do_attach( struct i2c_adapter *adapter )
1647     +static void do_attach(struct i2c_adapter *adapter)
1648     {
1649     + struct i2c_board_info info = { };
1650     + struct device_node *np;
1651     +
1652     /* scan 0x48-0x4f (DS1775) and 0x2c-2x2f (ADM1030) */
1653     static const unsigned short scan_ds1775[] = {
1654     0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
1655     @@ -313,25 +315,24 @@ do_attach( struct i2c_adapter *adapter )
1656     I2C_CLIENT_END
1657     };
1658    
1659     - if( strncmp(adapter->name, "uni-n", 5) )
1660     - return 0;
1661     -
1662     - if( !x.running ) {
1663     - struct i2c_board_info info;
1664     + if (x.running || strncmp(adapter->name, "uni-n", 5))
1665     + return;
1666    
1667     - memset(&info, 0, sizeof(struct i2c_board_info));
1668     - strlcpy(info.type, "therm_ds1775", I2C_NAME_SIZE);
1669     + np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,ds1775");
1670     + if (np) {
1671     + of_node_put(np);
1672     + } else {
1673     + strlcpy(info.type, "MAC,ds1775", I2C_NAME_SIZE);
1674     i2c_new_probed_device(adapter, &info, scan_ds1775, NULL);
1675     + }
1676    
1677     - strlcpy(info.type, "therm_adm1030", I2C_NAME_SIZE);
1678     + np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,adm1030");
1679     + if (np) {
1680     + of_node_put(np);
1681     + } else {
1682     + strlcpy(info.type, "MAC,adm1030", I2C_NAME_SIZE);
1683     i2c_new_probed_device(adapter, &info, scan_adm1030, NULL);
1684     -
1685     - if( x.thermostat && x.fan ) {
1686     - x.running = 1;
1687     - x.poll_task = kthread_run(control_loop, NULL, "g4fand");
1688     - }
1689     }
1690     - return 0;
1691     }
1692    
1693     static int
1694     @@ -404,8 +405,8 @@ out:
1695     enum chip { ds1775, adm1030 };
1696    
1697     static const struct i2c_device_id therm_windtunnel_id[] = {
1698     - { "therm_ds1775", ds1775 },
1699     - { "therm_adm1030", adm1030 },
1700     + { "MAC,ds1775", ds1775 },
1701     + { "MAC,adm1030", adm1030 },
1702     { }
1703     };
1704     MODULE_DEVICE_TABLE(i2c, therm_windtunnel_id);
1705     @@ -414,6 +415,7 @@ static int
1706     do_probe(struct i2c_client *cl, const struct i2c_device_id *id)
1707     {
1708     struct i2c_adapter *adapter = cl->adapter;
1709     + int ret = 0;
1710    
1711     if( !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA
1712     | I2C_FUNC_SMBUS_WRITE_BYTE) )
1713     @@ -421,11 +423,19 @@ do_probe(struct i2c_client *cl, const struct i2c_device_id *id)
1714    
1715     switch (id->driver_data) {
1716     case adm1030:
1717     - return attach_fan( cl );
1718     + ret = attach_fan(cl);
1719     + break;
1720     case ds1775:
1721     - return attach_thermostat(cl);
1722     + ret = attach_thermostat(cl);
1723     + break;
1724     }
1725     - return 0;
1726     +
1727     + if (!x.running && x.thermostat && x.fan) {
1728     + x.running = 1;
1729     + x.poll_task = kthread_run(control_loop, NULL, "g4fand");
1730     + }
1731     +
1732     + return ret;
1733     }
1734    
1735     static struct i2c_driver g4fan_driver = {
1736     diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1737     index 7dcd709f4ac3..703c5c2c80e5 100644
1738     --- a/drivers/net/bonding/bond_main.c
1739     +++ b/drivers/net/bonding/bond_main.c
1740     @@ -3436,6 +3436,47 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
1741     }
1742     }
1743    
1744     +#ifdef CONFIG_LOCKDEP
1745     +static int bond_get_lowest_level_rcu(struct net_device *dev)
1746     +{
1747     + struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
1748     + struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
1749     + int cur = 0, max = 0;
1750     +
1751     + now = dev;
1752     + iter = &dev->adj_list.lower;
1753     +
1754     + while (1) {
1755     + next = NULL;
1756     + while (1) {
1757     + ldev = netdev_next_lower_dev_rcu(now, &iter);
1758     + if (!ldev)
1759     + break;
1760     +
1761     + next = ldev;
1762     + niter = &ldev->adj_list.lower;
1763     + dev_stack[cur] = now;
1764     + iter_stack[cur++] = iter;
1765     + if (max <= cur)
1766     + max = cur;
1767     + break;
1768     + }
1769     +
1770     + if (!next) {
1771     + if (!cur)
1772     + return max;
1773     + next = dev_stack[--cur];
1774     + niter = iter_stack[cur];
1775     + }
1776     +
1777     + now = next;
1778     + iter = niter;
1779     + }
1780     +
1781     + return max;
1782     +}
1783     +#endif
1784     +
1785     static void bond_get_stats(struct net_device *bond_dev,
1786     struct rtnl_link_stats64 *stats)
1787     {
1788     @@ -3443,11 +3484,17 @@ static void bond_get_stats(struct net_device *bond_dev,
1789     struct rtnl_link_stats64 temp;
1790     struct list_head *iter;
1791     struct slave *slave;
1792     + int nest_level = 0;
1793    
1794     - spin_lock(&bond->stats_lock);
1795     - memcpy(stats, &bond->bond_stats, sizeof(*stats));
1796    
1797     rcu_read_lock();
1798     +#ifdef CONFIG_LOCKDEP
1799     + nest_level = bond_get_lowest_level_rcu(bond_dev);
1800     +#endif
1801     +
1802     + spin_lock_nested(&bond->stats_lock, nest_level);
1803     + memcpy(stats, &bond->bond_stats, sizeof(*stats));
1804     +
1805     bond_for_each_slave_rcu(bond, slave, iter) {
1806     const struct rtnl_link_stats64 *new =
1807     dev_get_stats(slave->dev, &temp);
1808     @@ -3457,10 +3504,10 @@ static void bond_get_stats(struct net_device *bond_dev,
1809     /* save off the slave stats for the next run */
1810     memcpy(&slave->slave_stats, new, sizeof(*new));
1811     }
1812     - rcu_read_unlock();
1813    
1814     memcpy(&bond->bond_stats, stats, sizeof(*stats));
1815     spin_unlock(&bond->stats_lock);
1816     + rcu_read_unlock();
1817     }
1818    
1819     static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
1820     @@ -3550,6 +3597,8 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
1821     case BOND_RELEASE_OLD:
1822     case SIOCBONDRELEASE:
1823     res = bond_release(bond_dev, slave_dev);
1824     + if (!res)
1825     + netdev_update_lockdep_key(slave_dev);
1826     break;
1827     case BOND_SETHWADDR_OLD:
1828     case SIOCBONDSETHWADDR:
1829     diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
1830     index ddb3916d3506..215c10923289 100644
1831     --- a/drivers/net/bonding/bond_options.c
1832     +++ b/drivers/net/bonding/bond_options.c
1833     @@ -1398,6 +1398,8 @@ static int bond_option_slaves_set(struct bonding *bond,
1834     case '-':
1835     slave_dbg(bond->dev, dev, "Releasing interface\n");
1836     ret = bond_release(bond->dev, dev);
1837     + if (!ret)
1838     + netdev_update_lockdep_key(dev);
1839     break;
1840    
1841     default:
1842     diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
1843     index 7ed667b304d1..d618650533b6 100644
1844     --- a/drivers/net/dsa/b53/b53_common.c
1845     +++ b/drivers/net/dsa/b53/b53_common.c
1846     @@ -1353,6 +1353,9 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
1847    
1848     b53_get_vlan_entry(dev, vid, vl);
1849    
1850     + if (vid == 0 && vid == b53_default_pvid(dev))
1851     + untagged = true;
1852     +
1853     vl->members |= BIT(port);
1854     if (untagged && !dsa_is_cpu_port(ds, port))
1855     vl->untag |= BIT(port);
1856     diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
1857     index ea62604fdf8c..48de4bee209e 100644
1858     --- a/drivers/net/ethernet/amazon/ena/ena_com.c
1859     +++ b/drivers/net/ethernet/amazon/ena/ena_com.c
1860     @@ -200,6 +200,11 @@ static void comp_ctxt_release(struct ena_com_admin_queue *queue,
1861     static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
1862     u16 command_id, bool capture)
1863     {
1864     + if (unlikely(!queue->comp_ctx)) {
1865     + pr_err("Completion context is NULL\n");
1866     + return NULL;
1867     + }
1868     +
1869     if (unlikely(command_id >= queue->q_depth)) {
1870     pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
1871     command_id, queue->q_depth);
1872     @@ -1041,9 +1046,41 @@ static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1873     feature_ver);
1874     }
1875    
1876     +static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1877     +{
1878     + struct ena_admin_feature_rss_flow_hash_control *hash_key =
1879     + (ena_dev->rss).hash_key;
1880     +
1881     + netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
1882     + /* The key is stored in the device in u32 array
1883     + * as well as the API requires the key to be passed in this
1884     + * format. Thus the size of our array should be divided by 4
1885     + */
1886     + hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
1887     +}
1888     +
1889     +int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1890     +{
1891     + return ena_dev->rss.hash_func;
1892     +}
1893     +
1894     static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1895     {
1896     struct ena_rss *rss = &ena_dev->rss;
1897     + struct ena_admin_feature_rss_flow_hash_control *hash_key;
1898     + struct ena_admin_get_feat_resp get_resp;
1899     + int rc;
1900     +
1901     + hash_key = (ena_dev->rss).hash_key;
1902     +
1903     + rc = ena_com_get_feature_ex(ena_dev, &get_resp,
1904     + ENA_ADMIN_RSS_HASH_FUNCTION,
1905     + ena_dev->rss.hash_key_dma_addr,
1906     + sizeof(ena_dev->rss.hash_key), 0);
1907     + if (unlikely(rc)) {
1908     + hash_key = NULL;
1909     + return -EOPNOTSUPP;
1910     + }
1911    
1912     rss->hash_key =
1913     dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1914     @@ -1254,30 +1291,6 @@ static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1915     return 0;
1916     }
1917    
1918     -static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1919     -{
1920     - u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
1921     - struct ena_rss *rss = &ena_dev->rss;
1922     - u8 idx;
1923     - u16 i;
1924     -
1925     - for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1926     - dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1927     -
1928     - for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1929     - if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
1930     - return -EINVAL;
1931     - idx = (u8)rss->rss_ind_tbl[i].cq_idx;
1932     -
1933     - if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1934     - return -EINVAL;
1935     -
1936     - rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1937     - }
1938     -
1939     - return 0;
1940     -}
1941     -
1942     static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1943     u16 intr_delay_resolution)
1944     {
1945     @@ -2297,15 +2310,16 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
1946    
1947     switch (func) {
1948     case ENA_ADMIN_TOEPLITZ:
1949     - if (key_len > sizeof(hash_key->key)) {
1950     - pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
1951     - key_len, sizeof(hash_key->key));
1952     - return -EINVAL;
1953     + if (key) {
1954     + if (key_len != sizeof(hash_key->key)) {
1955     + pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
1956     + key_len, sizeof(hash_key->key));
1957     + return -EINVAL;
1958     + }
1959     + memcpy(hash_key->key, key, key_len);
1960     + rss->hash_init_val = init_val;
1961     + hash_key->keys_num = key_len >> 2;
1962     }
1963     -
1964     - memcpy(hash_key->key, key, key_len);
1965     - rss->hash_init_val = init_val;
1966     - hash_key->keys_num = key_len >> 2;
1967     break;
1968     case ENA_ADMIN_CRC32:
1969     rss->hash_init_val = init_val;
1970     @@ -2342,7 +2356,11 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
1971     if (unlikely(rc))
1972     return rc;
1973    
1974     - rss->hash_func = get_resp.u.flow_hash_func.selected_func;
1975     + /* ffs() returns 1 in case the lsb is set */
1976     + rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
1977     + if (rss->hash_func)
1978     + rss->hash_func--;
1979     +
1980     if (func)
1981     *func = rss->hash_func;
1982    
1983     @@ -2606,10 +2624,6 @@ int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
1984     if (!ind_tbl)
1985     return 0;
1986    
1987     - rc = ena_com_ind_tbl_convert_from_device(ena_dev);
1988     - if (unlikely(rc))
1989     - return rc;
1990     -
1991     for (i = 0; i < (1 << rss->tbl_log_size); i++)
1992     ind_tbl[i] = rss->host_rss_ind_tbl[i];
1993    
1994     @@ -2626,9 +2640,15 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
1995     if (unlikely(rc))
1996     goto err_indr_tbl;
1997    
1998     + /* The following function might return unsupported in case the
1999     + * device doesn't support setting the key / hash function. We can safely
2000     + * ignore this error and have indirection table support only.
2001     + */
2002     rc = ena_com_hash_key_allocate(ena_dev);
2003     - if (unlikely(rc))
2004     + if (unlikely(rc) && rc != -EOPNOTSUPP)
2005     goto err_hash_key;
2006     + else if (rc != -EOPNOTSUPP)
2007     + ena_com_hash_key_fill_default_key(ena_dev);
2008    
2009     rc = ena_com_hash_ctrl_init(ena_dev);
2010     if (unlikely(rc))
2011     diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
2012     index 0ce37d54ed10..469f298199a7 100644
2013     --- a/drivers/net/ethernet/amazon/ena/ena_com.h
2014     +++ b/drivers/net/ethernet/amazon/ena/ena_com.h
2015     @@ -44,6 +44,7 @@
2016     #include <linux/spinlock.h>
2017     #include <linux/types.h>
2018     #include <linux/wait.h>
2019     +#include <linux/netdevice.h>
2020    
2021     #include "ena_common_defs.h"
2022     #include "ena_admin_defs.h"
2023     @@ -655,6 +656,14 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
2024     */
2025     void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
2026    
2027     +/* ena_com_get_current_hash_function - Get RSS hash function
2028     + * @ena_dev: ENA communication layer struct
2029     + *
2030     + * Return the current hash function.
2031     + * @return: 0 or one of the ena_admin_hash_functions values.
2032     + */
2033     +int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev);
2034     +
2035     /* ena_com_fill_hash_function - Fill RSS hash function
2036     * @ena_dev: ENA communication layer struct
2037     * @func: The hash function (Toeplitz or crc)
2038     diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
2039     index 8c1c73b0ced7..ae631b8770fc 100644
2040     --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
2041     +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
2042     @@ -636,6 +636,28 @@ static u32 ena_get_rxfh_key_size(struct net_device *netdev)
2043     return ENA_HASH_KEY_SIZE;
2044     }
2045    
2046     +static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir)
2047     +{
2048     + struct ena_com_dev *ena_dev = adapter->ena_dev;
2049     + int i, rc;
2050     +
2051     + if (!indir)
2052     + return 0;
2053     +
2054     + rc = ena_com_indirect_table_get(ena_dev, indir);
2055     + if (rc)
2056     + return rc;
2057     +
2058     + /* Our internal representation of the indices is: even indices
2059     + * for Tx and uneven indices for Rx. We need to convert the Rx
2060     + * indices to be consecutive
2061     + */
2062     + for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++)
2063     + indir[i] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir[i]);
2064     +
2065     + return rc;
2066     +}
2067     +
2068     static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
2069     u8 *hfunc)
2070     {
2071     @@ -644,11 +666,25 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
2072     u8 func;
2073     int rc;
2074    
2075     - rc = ena_com_indirect_table_get(adapter->ena_dev, indir);
2076     + rc = ena_indirection_table_get(adapter, indir);
2077     if (rc)
2078     return rc;
2079    
2080     + /* We call this function in order to check if the device
2081     + * supports getting/setting the hash function.
2082     + */
2083     rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key);
2084     +
2085     + if (rc) {
2086     + if (rc == -EOPNOTSUPP) {
2087     + key = NULL;
2088     + hfunc = NULL;
2089     + rc = 0;
2090     + }
2091     +
2092     + return rc;
2093     + }
2094     +
2095     if (rc)
2096     return rc;
2097    
2098     @@ -657,7 +693,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
2099     func = ETH_RSS_HASH_TOP;
2100     break;
2101     case ENA_ADMIN_CRC32:
2102     - func = ETH_RSS_HASH_XOR;
2103     + func = ETH_RSS_HASH_CRC32;
2104     break;
2105     default:
2106     netif_err(adapter, drv, netdev,
2107     @@ -700,10 +736,13 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
2108     }
2109    
2110     switch (hfunc) {
2111     + case ETH_RSS_HASH_NO_CHANGE:
2112     + func = ena_com_get_current_hash_function(ena_dev);
2113     + break;
2114     case ETH_RSS_HASH_TOP:
2115     func = ENA_ADMIN_TOEPLITZ;
2116     break;
2117     - case ETH_RSS_HASH_XOR:
2118     + case ETH_RSS_HASH_CRC32:
2119     func = ENA_ADMIN_CRC32;
2120     break;
2121     default:
2122     @@ -805,6 +844,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
2123     .get_channels = ena_get_channels,
2124     .get_tunable = ena_get_tunable,
2125     .set_tunable = ena_set_tunable,
2126     + .get_ts_info = ethtool_op_get_ts_info,
2127     };
2128    
2129     void ena_set_ethtool_ops(struct net_device *netdev)
2130     diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
2131     index b4a145220aba..f0cddf250cfd 100644
2132     --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
2133     +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
2134     @@ -3035,8 +3035,8 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
2135     if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
2136     return;
2137    
2138     - keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies +
2139     - adapter->keep_alive_timeout);
2140     + keep_alive_expired = adapter->last_keep_alive_jiffies +
2141     + adapter->keep_alive_timeout;
2142     if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
2143     netif_err(adapter, drv, adapter->netdev,
2144     "Keep alive watchdog timeout.\n");
2145     @@ -3138,7 +3138,7 @@ static void ena_timer_service(struct timer_list *t)
2146     }
2147    
2148     /* Reset the timer */
2149     - mod_timer(&adapter->timer_service, jiffies + HZ);
2150     + mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
2151     }
2152    
2153     static int ena_calc_io_queue_num(struct pci_dev *pdev,
2154     diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
2155     index 72ee51a82ec7..dc02950a96b8 100644
2156     --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
2157     +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
2158     @@ -127,6 +127,8 @@
2159    
2160     #define ENA_IO_TXQ_IDX(q) (2 * (q))
2161     #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
2162     +#define ENA_IO_TXQ_IDX_TO_COMBINED_IDX(q) ((q) / 2)
2163     +#define ENA_IO_RXQ_IDX_TO_COMBINED_IDX(q) (((q) - 1) / 2)
2164    
2165     #define ENA_MGMNT_IRQ_IDX 0
2166     #define ENA_IO_IRQ_FIRST_IDX 1
2167     diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
2168     index d8612131c55e..cc8031ae9aa3 100644
2169     --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
2170     +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
2171     @@ -2020,7 +2020,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
2172     int ret;
2173    
2174     ndev = alloc_etherdev_mqs(sizeof(struct xgene_enet_pdata),
2175     - XGENE_NUM_RX_RING, XGENE_NUM_TX_RING);
2176     + XGENE_NUM_TX_RING, XGENE_NUM_RX_RING);
2177     if (!ndev)
2178     return -ENOMEM;
2179    
2180     diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
2181     index aee827f07c16..01af0f028693 100644
2182     --- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
2183     +++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
2184     @@ -158,7 +158,7 @@ aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
2185     }
2186    
2187     if ((aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2188     - (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci),
2189     + (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK,
2190     aq_nic->active_vlans))) {
2191     netdev_err(aq_nic->ndev,
2192     "ethtool: unknown vlan-id specified");
2193     diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
2194     index 137c1de4c6ec..12949f1ec1ea 100644
2195     --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
2196     +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
2197     @@ -467,8 +467,10 @@ static unsigned int aq_nic_map_skb(struct aq_nic_s *self,
2198     dx_buff->len,
2199     DMA_TO_DEVICE);
2200    
2201     - if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
2202     + if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) {
2203     + ret = 0;
2204     goto exit;
2205     + }
2206    
2207     first = dx_buff;
2208     dx_buff->len_pkt = skb->len;
2209     @@ -598,10 +600,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
2210     if (likely(frags)) {
2211     err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
2212     ring, frags);
2213     - if (err >= 0) {
2214     - ++ring->stats.tx.packets;
2215     - ring->stats.tx.bytes += skb->len;
2216     - }
2217     } else {
2218     err = NETDEV_TX_BUSY;
2219     }
2220     diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
2221     index 76bdbe1596d6..03821b46a8cb 100644
2222     --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
2223     +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
2224     @@ -243,9 +243,12 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
2225     }
2226     }
2227    
2228     - if (unlikely(buff->is_eop))
2229     - dev_kfree_skb_any(buff->skb);
2230     + if (unlikely(buff->is_eop)) {
2231     + ++self->stats.rx.packets;
2232     + self->stats.tx.bytes += buff->skb->len;
2233    
2234     + dev_kfree_skb_any(buff->skb);
2235     + }
2236     buff->pa = 0U;
2237     buff->eop_index = 0xffffU;
2238     self->sw_head = aq_ring_next_dx(self, self->sw_head);
2239     diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2240     index 68618891b0e4..374e11a91790 100644
2241     --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2242     +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2243     @@ -11712,6 +11712,14 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2244     if (version_printed++ == 0)
2245     pr_info("%s", version);
2246    
2247     + /* Clear any pending DMA transactions from crash kernel
2248     + * while loading driver in capture kernel.
2249     + */
2250     + if (is_kdump_kernel()) {
2251     + pci_clear_master(pdev);
2252     + pcie_flr(pdev);
2253     + }
2254     +
2255     max_irqs = bnxt_get_max_irq(pdev);
2256     dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
2257     if (!dev)
2258     @@ -11908,10 +11916,10 @@ static void bnxt_shutdown(struct pci_dev *pdev)
2259     dev_close(dev);
2260    
2261     bnxt_ulp_shutdown(bp);
2262     + bnxt_clear_int_mode(bp);
2263     + pci_disable_device(pdev);
2264    
2265     if (system_state == SYSTEM_POWER_OFF) {
2266     - bnxt_clear_int_mode(bp);
2267     - pci_disable_device(pdev);
2268     pci_wake_from_d3(pdev, bp->wol);
2269     pci_set_power_state(pdev, PCI_D3hot);
2270     }
2271     diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
2272     index 95a94507cec1..234c13ebbc41 100644
2273     --- a/drivers/net/ethernet/cadence/macb_main.c
2274     +++ b/drivers/net/ethernet/cadence/macb_main.c
2275     @@ -3690,6 +3690,10 @@ static int at91ether_open(struct net_device *dev)
2276     u32 ctl;
2277     int ret;
2278    
2279     + ret = pm_runtime_get_sync(&lp->pdev->dev);
2280     + if (ret < 0)
2281     + return ret;
2282     +
2283     /* Clear internal statistics */
2284     ctl = macb_readl(lp, NCR);
2285     macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
2286     @@ -3750,7 +3754,7 @@ static int at91ether_close(struct net_device *dev)
2287     q->rx_buffers, q->rx_buffers_dma);
2288     q->rx_buffers = NULL;
2289    
2290     - return 0;
2291     + return pm_runtime_put(&lp->pdev->dev);
2292     }
2293    
2294     /* Transmit packet */
2295     diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2296     index 162881005a6d..c01cf8ef69df 100644
2297     --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2298     +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
2299     @@ -6003,6 +6003,9 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
2300     static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
2301     struct hclge_fd_rule_tuples *tuples)
2302     {
2303     +#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
2304     +#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
2305     +
2306     tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
2307     tuples->ip_proto = fkeys->basic.ip_proto;
2308     tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
2309     @@ -6011,12 +6014,12 @@ static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
2310     tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
2311     tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
2312     } else {
2313     - memcpy(tuples->src_ip,
2314     - fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
2315     - sizeof(tuples->src_ip));
2316     - memcpy(tuples->dst_ip,
2317     - fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
2318     - sizeof(tuples->dst_ip));
2319     + int i;
2320     +
2321     + for (i = 0; i < IPV6_SIZE; i++) {
2322     + tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
2323     + tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
2324     + }
2325     }
2326     }
2327    
2328     @@ -9437,6 +9440,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
2329     return ret;
2330     }
2331    
2332     + ret = init_mgr_tbl(hdev);
2333     + if (ret) {
2334     + dev_err(&pdev->dev,
2335     + "failed to reinit manager table, ret = %d\n", ret);
2336     + return ret;
2337     + }
2338     +
2339     ret = hclge_init_fd_config(hdev);
2340     if (ret) {
2341     dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
2342     diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2343     index 3515ace0f020..38042d610f82 100644
2344     --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2345     +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
2346     @@ -2363,7 +2363,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
2347     goto error_param;
2348     }
2349    
2350     - if (i40e_vc_validate_vqs_bitmaps(vqs)) {
2351     + if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2352     aq_ret = I40E_ERR_PARAM;
2353     goto error_param;
2354     }
2355     @@ -2425,7 +2425,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
2356     goto error_param;
2357     }
2358    
2359     - if (i40e_vc_validate_vqs_bitmaps(vqs)) {
2360     + if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
2361     aq_ret = I40E_ERR_PARAM;
2362     goto error_param;
2363     }
2364     diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
2365     index 3a6b3950eb0e..171f0b625407 100644
2366     --- a/drivers/net/ethernet/intel/ice/ice_common.c
2367     +++ b/drivers/net/ethernet/intel/ice/ice_common.c
2368     @@ -934,7 +934,7 @@ void ice_deinit_hw(struct ice_hw *hw)
2369     */
2370     enum ice_status ice_check_reset(struct ice_hw *hw)
2371     {
2372     - u32 cnt, reg = 0, grst_delay;
2373     + u32 cnt, reg = 0, grst_delay, uld_mask;
2374    
2375     /* Poll for Device Active state in case a recent CORER, GLOBR,
2376     * or EMPR has occurred. The grst delay value is in 100ms units.
2377     @@ -956,13 +956,20 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
2378     return ICE_ERR_RESET_FAILED;
2379     }
2380    
2381     -#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
2382     - GLNVM_ULD_GLOBR_DONE_M)
2383     +#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
2384     + GLNVM_ULD_PCIER_DONE_1_M |\
2385     + GLNVM_ULD_CORER_DONE_M |\
2386     + GLNVM_ULD_GLOBR_DONE_M |\
2387     + GLNVM_ULD_POR_DONE_M |\
2388     + GLNVM_ULD_POR_DONE_1_M |\
2389     + GLNVM_ULD_PCIER_DONE_2_M)
2390     +
2391     + uld_mask = ICE_RESET_DONE_MASK;
2392    
2393     /* Device is Active; check Global Reset processes are done */
2394     for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
2395     - reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
2396     - if (reg == ICE_RESET_DONE_MASK) {
2397     + reg = rd32(hw, GLNVM_ULD) & uld_mask;
2398     + if (reg == uld_mask) {
2399     ice_debug(hw, ICE_DBG_INIT,
2400     "Global reset processes done. %d\n", cnt);
2401     break;
2402     diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
2403     index 152fbd556e9b..9138b19de87e 100644
2404     --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
2405     +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
2406     @@ -273,8 +273,14 @@
2407     #define GLNVM_GENS_SR_SIZE_S 5
2408     #define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, 5)
2409     #define GLNVM_ULD 0x000B6008
2410     +#define GLNVM_ULD_PCIER_DONE_M BIT(0)
2411     +#define GLNVM_ULD_PCIER_DONE_1_M BIT(1)
2412     #define GLNVM_ULD_CORER_DONE_M BIT(3)
2413     #define GLNVM_ULD_GLOBR_DONE_M BIT(4)
2414     +#define GLNVM_ULD_POR_DONE_M BIT(5)
2415     +#define GLNVM_ULD_POR_DONE_1_M BIT(8)
2416     +#define GLNVM_ULD_PCIER_DONE_2_M BIT(9)
2417     +#define GLNVM_ULD_PE_DONE_M BIT(10)
2418     #define GLPCI_CNF2 0x000BE004
2419     #define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
2420     #define PF_FUNC_RID 0x0009E880
2421     diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
2422     index aac115136720..337156232501 100644
2423     --- a/drivers/net/ethernet/mscc/ocelot_board.c
2424     +++ b/drivers/net/ethernet/mscc/ocelot_board.c
2425     @@ -112,6 +112,14 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
2426     if (err != 4)
2427     break;
2428    
2429     + /* At this point the IFH was read correctly, so it is safe to
2430     + * presume that there is no error. The err needs to be reset
2431     + * otherwise a frame could come in CPU queue between the while
2432     + * condition and the check for error later on. And in that case
2433     + * the new frame is just removed and not processed.
2434     + */
2435     + err = 0;
2436     +
2437     ocelot_parse_ifh(ifh, &info);
2438    
2439     dev = ocelot->ports[info.port]->dev;
2440     diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
2441     index c303a92d5b06..1f27f9866b80 100644
2442     --- a/drivers/net/ethernet/qlogic/qede/qede.h
2443     +++ b/drivers/net/ethernet/qlogic/qede/qede.h
2444     @@ -163,6 +163,8 @@ struct qede_rdma_dev {
2445     struct list_head entry;
2446     struct list_head rdma_event_list;
2447     struct workqueue_struct *rdma_wq;
2448     + struct kref refcnt;
2449     + struct completion event_comp;
2450     bool exp_recovery;
2451     };
2452    
2453     diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
2454     index ffabc2d2f082..2d873ae8a234 100644
2455     --- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c
2456     +++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
2457     @@ -59,6 +59,9 @@ static void _qede_rdma_dev_add(struct qede_dev *edev)
2458     static int qede_rdma_create_wq(struct qede_dev *edev)
2459     {
2460     INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list);
2461     + kref_init(&edev->rdma_info.refcnt);
2462     + init_completion(&edev->rdma_info.event_comp);
2463     +
2464     edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq");
2465     if (!edev->rdma_info.rdma_wq) {
2466     DP_NOTICE(edev, "qedr: Could not create workqueue\n");
2467     @@ -83,8 +86,23 @@ static void qede_rdma_cleanup_event(struct qede_dev *edev)
2468     }
2469     }
2470    
2471     +static void qede_rdma_complete_event(struct kref *ref)
2472     +{
2473     + struct qede_rdma_dev *rdma_dev =
2474     + container_of(ref, struct qede_rdma_dev, refcnt);
2475     +
2476     + /* no more events will be added after this */
2477     + complete(&rdma_dev->event_comp);
2478     +}
2479     +
2480     static void qede_rdma_destroy_wq(struct qede_dev *edev)
2481     {
2482     + /* Avoid race with add_event flow, make sure it finishes before
2483     + * we start accessing the list and cleaning up the work
2484     + */
2485     + kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event);
2486     + wait_for_completion(&edev->rdma_info.event_comp);
2487     +
2488     qede_rdma_cleanup_event(edev);
2489     destroy_workqueue(edev->rdma_info.rdma_wq);
2490     }
2491     @@ -310,15 +328,24 @@ static void qede_rdma_add_event(struct qede_dev *edev,
2492     if (!edev->rdma_info.qedr_dev)
2493     return;
2494    
2495     + /* We don't want the cleanup flow to start while we're allocating and
2496     + * scheduling the work
2497     + */
2498     + if (!kref_get_unless_zero(&edev->rdma_info.refcnt))
2499     + return; /* already being destroyed */
2500     +
2501     event_node = qede_rdma_get_free_event_node(edev);
2502     if (!event_node)
2503     - return;
2504     + goto out;
2505    
2506     event_node->event = event;
2507     event_node->ptr = edev;
2508    
2509     INIT_WORK(&event_node->work, qede_rdma_handle_event);
2510     queue_work(edev->rdma_info.rdma_wq, &event_node->work);
2511     +
2512     +out:
2513     + kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event);
2514     }
2515    
2516     void qede_rdma_dev_event_open(struct qede_dev *edev)
2517     diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
2518     index eab83e71567a..6c0732fc8c25 100644
2519     --- a/drivers/net/hyperv/netvsc.c
2520     +++ b/drivers/net/hyperv/netvsc.c
2521     @@ -99,7 +99,7 @@ static struct netvsc_device *alloc_net_device(void)
2522    
2523     init_waitqueue_head(&net_device->wait_drain);
2524     net_device->destroy = false;
2525     - net_device->tx_disable = false;
2526     + net_device->tx_disable = true;
2527    
2528     net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
2529     net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
2530     diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
2531     index 0dee358864f3..ca16ae8c8332 100644
2532     --- a/drivers/net/hyperv/netvsc_drv.c
2533     +++ b/drivers/net/hyperv/netvsc_drv.c
2534     @@ -973,6 +973,7 @@ static int netvsc_attach(struct net_device *ndev,
2535     }
2536    
2537     /* In any case device is now ready */
2538     + nvdev->tx_disable = false;
2539     netif_device_attach(ndev);
2540    
2541     /* Note: enable and attach happen when sub-channels setup */
2542     @@ -2350,6 +2351,8 @@ static int netvsc_probe(struct hv_device *dev,
2543     else
2544     net->max_mtu = ETH_DATA_LEN;
2545    
2546     + nvdev->tx_disable = false;
2547     +
2548     ret = register_netdevice(net);
2549     if (ret != 0) {
2550     pr_err("Unable to register netdev.\n");
2551     diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c
2552     index 7e9975d25066..f1ded03f0229 100644
2553     --- a/drivers/net/phy/mdio-bcm-iproc.c
2554     +++ b/drivers/net/phy/mdio-bcm-iproc.c
2555     @@ -178,6 +178,23 @@ static int iproc_mdio_remove(struct platform_device *pdev)
2556     return 0;
2557     }
2558    
2559     +#ifdef CONFIG_PM_SLEEP
2560     +int iproc_mdio_resume(struct device *dev)
2561     +{
2562     + struct platform_device *pdev = to_platform_device(dev);
2563     + struct iproc_mdio_priv *priv = platform_get_drvdata(pdev);
2564     +
2565     + /* restore the mii clock configuration */
2566     + iproc_mdio_config_clk(priv->base);
2567     +
2568     + return 0;
2569     +}
2570     +
2571     +static const struct dev_pm_ops iproc_mdio_pm_ops = {
2572     + .resume = iproc_mdio_resume
2573     +};
2574     +#endif /* CONFIG_PM_SLEEP */
2575     +
2576     static const struct of_device_id iproc_mdio_of_match[] = {
2577     { .compatible = "brcm,iproc-mdio", },
2578     { /* sentinel */ },
2579     @@ -188,6 +205,9 @@ static struct platform_driver iproc_mdio_driver = {
2580     .driver = {
2581     .name = "iproc-mdio",
2582     .of_match_table = iproc_mdio_of_match,
2583     +#ifdef CONFIG_PM_SLEEP
2584     + .pm = &iproc_mdio_pm_ops,
2585     +#endif
2586     },
2587     .probe = iproc_mdio_probe,
2588     .remove = iproc_mdio_remove,
2589     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2590     index 9485c8d1de8a..3b7a3b8a5e06 100644
2591     --- a/drivers/net/usb/qmi_wwan.c
2592     +++ b/drivers/net/usb/qmi_wwan.c
2593     @@ -61,7 +61,6 @@ enum qmi_wwan_flags {
2594    
2595     enum qmi_wwan_quirks {
2596     QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */
2597     - QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1, /* check num. endpoints */
2598     };
2599    
2600     struct qmimux_hdr {
2601     @@ -916,16 +915,6 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = {
2602     .data = QMI_WWAN_QUIRK_DTR,
2603     };
2604    
2605     -static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = {
2606     - .description = "WWAN/QMI device",
2607     - .flags = FLAG_WWAN | FLAG_SEND_ZLP,
2608     - .bind = qmi_wwan_bind,
2609     - .unbind = qmi_wwan_unbind,
2610     - .manage_power = qmi_wwan_manage_power,
2611     - .rx_fixup = qmi_wwan_rx_fixup,
2612     - .data = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG,
2613     -};
2614     -
2615     #define HUAWEI_VENDOR_ID 0x12D1
2616    
2617     /* map QMI/wwan function by a fixed interface number */
2618     @@ -946,14 +935,18 @@ static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = {
2619     #define QMI_GOBI_DEVICE(vend, prod) \
2620     QMI_FIXED_INTF(vend, prod, 0)
2621    
2622     -/* Quectel does not use fixed interface numbers on at least some of their
2623     - * devices. We need to check the number of endpoints to ensure that we bind to
2624     - * the correct interface.
2625     +/* Many devices have QMI and DIAG functions which are distinguishable
2626     + * from other vendor specific functions by class, subclass and
2627     + * protocol all being 0xff. The DIAG function has exactly 2 endpoints
2628     + * and is silently rejected when probed.
2629     + *
2630     + * This makes it possible to match dynamically numbered QMI functions
2631     + * as seen on e.g. many Quectel modems.
2632     */
2633     -#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \
2634     +#define QMI_MATCH_FF_FF_FF(vend, prod) \
2635     USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \
2636     USB_SUBCLASS_VENDOR_SPEC, 0xff), \
2637     - .driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg
2638     + .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr
2639    
2640     static const struct usb_device_id products[] = {
2641     /* 1. CDC ECM like devices match on the control interface */
2642     @@ -1059,10 +1052,10 @@ static const struct usb_device_id products[] = {
2643     USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
2644     .driver_info = (unsigned long)&qmi_wwan_info,
2645     },
2646     - {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
2647     - {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
2648     - {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
2649     - {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
2650     + {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
2651     + {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
2652     + {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
2653     + {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
2654    
2655     /* 3. Combined interface devices matching on interface number */
2656     {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
2657     @@ -1363,6 +1356,7 @@ static const struct usb_device_id products[] = {
2658     {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
2659     {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
2660     {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
2661     + {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
2662     {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
2663     {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
2664     {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
2665     @@ -1454,7 +1448,6 @@ static int qmi_wwan_probe(struct usb_interface *intf,
2666     {
2667     struct usb_device_id *id = (struct usb_device_id *)prod;
2668     struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
2669     - const struct driver_info *info;
2670    
2671     /* Workaround to enable dynamic IDs. This disables usbnet
2672     * blacklisting functionality. Which, if required, can be
2673     @@ -1490,12 +1483,8 @@ static int qmi_wwan_probe(struct usb_interface *intf,
2674     * different. Ignore the current interface if the number of endpoints
2675     * equals the number for the diag interface (two).
2676     */
2677     - info = (void *)id->driver_info;
2678     -
2679     - if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
2680     - if (desc->bNumEndpoints == 2)
2681     - return -ENODEV;
2682     - }
2683     + if (desc->bNumEndpoints == 2)
2684     + return -ENODEV;
2685    
2686     return usbnet_probe(intf, id);
2687     }
2688     diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
2689     index 547ff3c578ee..fa5634af40f7 100644
2690     --- a/drivers/net/wireless/marvell/mwifiex/main.h
2691     +++ b/drivers/net/wireless/marvell/mwifiex/main.h
2692     @@ -1295,19 +1295,6 @@ mwifiex_copy_rates(u8 *dest, u32 pos, u8 *src, int len)
2693     return pos;
2694     }
2695    
2696     -/* This function return interface number with the same bss_type.
2697     - */
2698     -static inline u8
2699     -mwifiex_get_intf_num(struct mwifiex_adapter *adapter, u8 bss_type)
2700     -{
2701     - u8 i, num = 0;
2702     -
2703     - for (i = 0; i < adapter->priv_num; i++)
2704     - if (adapter->priv[i] && adapter->priv[i]->bss_type == bss_type)
2705     - num++;
2706     - return num;
2707     -}
2708     -
2709     /*
2710     * This function returns the correct private structure pointer based
2711     * upon the BSS type and BSS number.
2712     diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
2713     index 7caf1d26124a..f8f282ce39bd 100644
2714     --- a/drivers/net/wireless/marvell/mwifiex/tdls.c
2715     +++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
2716     @@ -894,7 +894,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
2717     u8 *peer, *pos, *end;
2718     u8 i, action, basic;
2719     u16 cap = 0;
2720     - int ie_len = 0;
2721     + int ies_len = 0;
2722    
2723     if (len < (sizeof(struct ethhdr) + 3))
2724     return;
2725     @@ -916,7 +916,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
2726     pos = buf + sizeof(struct ethhdr) + 4;
2727     /* payload 1+ category 1 + action 1 + dialog 1 */
2728     cap = get_unaligned_le16(pos);
2729     - ie_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
2730     + ies_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
2731     pos += 2;
2732     break;
2733    
2734     @@ -926,7 +926,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
2735     /* payload 1+ category 1 + action 1 + dialog 1 + status code 2*/
2736     pos = buf + sizeof(struct ethhdr) + 6;
2737     cap = get_unaligned_le16(pos);
2738     - ie_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
2739     + ies_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
2740     pos += 2;
2741     break;
2742    
2743     @@ -934,7 +934,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
2744     if (len < (sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN))
2745     return;
2746     pos = buf + sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN;
2747     - ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
2748     + ies_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
2749     break;
2750     default:
2751     mwifiex_dbg(priv->adapter, ERROR, "Unknown TDLS frame type.\n");
2752     @@ -947,33 +947,33 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
2753    
2754     sta_ptr->tdls_cap.capab = cpu_to_le16(cap);
2755    
2756     - for (end = pos + ie_len; pos + 1 < end; pos += 2 + pos[1]) {
2757     - if (pos + 2 + pos[1] > end)
2758     + for (end = pos + ies_len; pos + 1 < end; pos += 2 + pos[1]) {
2759     + u8 ie_len = pos[1];
2760     +
2761     + if (pos + 2 + ie_len > end)
2762     break;
2763    
2764     switch (*pos) {
2765     case WLAN_EID_SUPP_RATES:
2766     - if (pos[1] > 32)
2767     + if (ie_len > sizeof(sta_ptr->tdls_cap.rates))
2768     return;
2769     - sta_ptr->tdls_cap.rates_len = pos[1];
2770     - for (i = 0; i < pos[1]; i++)
2771     + sta_ptr->tdls_cap.rates_len = ie_len;
2772     + for (i = 0; i < ie_len; i++)
2773     sta_ptr->tdls_cap.rates[i] = pos[i + 2];
2774     break;
2775    
2776     case WLAN_EID_EXT_SUPP_RATES:
2777     - if (pos[1] > 32)
2778     + if (ie_len > sizeof(sta_ptr->tdls_cap.rates))
2779     return;
2780     basic = sta_ptr->tdls_cap.rates_len;
2781     - if (pos[1] > 32 - basic)
2782     + if (ie_len > sizeof(sta_ptr->tdls_cap.rates) - basic)
2783     return;
2784     - for (i = 0; i < pos[1]; i++)
2785     + for (i = 0; i < ie_len; i++)
2786     sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
2787     - sta_ptr->tdls_cap.rates_len += pos[1];
2788     + sta_ptr->tdls_cap.rates_len += ie_len;
2789     break;
2790     case WLAN_EID_HT_CAPABILITY:
2791     - if (pos > end - sizeof(struct ieee80211_ht_cap) - 2)
2792     - return;
2793     - if (pos[1] != sizeof(struct ieee80211_ht_cap))
2794     + if (ie_len != sizeof(struct ieee80211_ht_cap))
2795     return;
2796     /* copy the ie's value into ht_capb*/
2797     memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2,
2798     @@ -981,59 +981,45 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
2799     sta_ptr->is_11n_enabled = 1;
2800     break;
2801     case WLAN_EID_HT_OPERATION:
2802     - if (pos > end -
2803     - sizeof(struct ieee80211_ht_operation) - 2)
2804     - return;
2805     - if (pos[1] != sizeof(struct ieee80211_ht_operation))
2806     + if (ie_len != sizeof(struct ieee80211_ht_operation))
2807     return;
2808     /* copy the ie's value into ht_oper*/
2809     memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2,
2810     sizeof(struct ieee80211_ht_operation));
2811     break;
2812     case WLAN_EID_BSS_COEX_2040:
2813     - if (pos > end - 3)
2814     - return;
2815     - if (pos[1] != 1)
2816     + if (ie_len != sizeof(pos[2]))
2817     return;
2818     sta_ptr->tdls_cap.coex_2040 = pos[2];
2819     break;
2820     case WLAN_EID_EXT_CAPABILITY:
2821     - if (pos > end - sizeof(struct ieee_types_header))
2822     - return;
2823     - if (pos[1] < sizeof(struct ieee_types_header))
2824     + if (ie_len < sizeof(struct ieee_types_header))
2825     return;
2826     - if (pos[1] > 8)
2827     + if (ie_len > 8)
2828     return;
2829     memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
2830     sizeof(struct ieee_types_header) +
2831     - min_t(u8, pos[1], 8));
2832     + min_t(u8, ie_len, 8));
2833     break;
2834     case WLAN_EID_RSN:
2835     - if (pos > end - sizeof(struct ieee_types_header))
2836     + if (ie_len < sizeof(struct ieee_types_header))
2837     return;
2838     - if (pos[1] < sizeof(struct ieee_types_header))
2839     - return;
2840     - if (pos[1] > IEEE_MAX_IE_SIZE -
2841     + if (ie_len > IEEE_MAX_IE_SIZE -
2842     sizeof(struct ieee_types_header))
2843     return;
2844     memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
2845     sizeof(struct ieee_types_header) +
2846     - min_t(u8, pos[1], IEEE_MAX_IE_SIZE -
2847     + min_t(u8, ie_len, IEEE_MAX_IE_SIZE -
2848     sizeof(struct ieee_types_header)));
2849     break;
2850     case WLAN_EID_QOS_CAPA:
2851     - if (pos > end - 3)
2852     - return;
2853     - if (pos[1] != 1)
2854     + if (ie_len != sizeof(pos[2]))
2855     return;
2856     sta_ptr->tdls_cap.qos_info = pos[2];
2857     break;
2858     case WLAN_EID_VHT_OPERATION:
2859     if (priv->adapter->is_hw_11ac_capable) {
2860     - if (pos > end -
2861     - sizeof(struct ieee80211_vht_operation) - 2)
2862     - return;
2863     - if (pos[1] !=
2864     + if (ie_len !=
2865     sizeof(struct ieee80211_vht_operation))
2866     return;
2867     /* copy the ie's value into vhtoper*/
2868     @@ -1043,10 +1029,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
2869     break;
2870     case WLAN_EID_VHT_CAPABILITY:
2871     if (priv->adapter->is_hw_11ac_capable) {
2872     - if (pos > end -
2873     - sizeof(struct ieee80211_vht_cap) - 2)
2874     - return;
2875     - if (pos[1] != sizeof(struct ieee80211_vht_cap))
2876     + if (ie_len != sizeof(struct ieee80211_vht_cap))
2877     return;
2878     /* copy the ie's value into vhtcap*/
2879     memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2,
2880     @@ -1056,9 +1039,7 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
2881     break;
2882     case WLAN_EID_AID:
2883     if (priv->adapter->is_hw_11ac_capable) {
2884     - if (pos > end - 4)
2885     - return;
2886     - if (pos[1] != 2)
2887     + if (ie_len != sizeof(u16))
2888     return;
2889     sta_ptr->tdls_cap.aid =
2890     get_unaligned_le16((pos + 2));
2891     diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
2892     index 720c89d6066e..4ac8cb262559 100644
2893     --- a/drivers/nfc/pn544/i2c.c
2894     +++ b/drivers/nfc/pn544/i2c.c
2895     @@ -225,6 +225,7 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
2896    
2897     out:
2898     gpiod_set_value_cansleep(phy->gpiod_en, !phy->en_polarity);
2899     + usleep_range(10000, 15000);
2900     }
2901    
2902     static void pn544_hci_i2c_enable_mode(struct pn544_i2c_phy *phy, int run_mode)
2903     diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
2904     index e703827d27e9..7dacfd102a99 100644
2905     --- a/drivers/nvme/host/core.c
2906     +++ b/drivers/nvme/host/core.c
2907     @@ -66,8 +66,8 @@ MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
2908     * nvme_reset_wq - hosts nvme reset works
2909     * nvme_delete_wq - hosts nvme delete works
2910     *
2911     - * nvme_wq will host works such are scan, aen handling, fw activation,
2912     - * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq
2913     + * nvme_wq will host works such as scan, aen handling, fw activation,
2914     + * keep-alive, periodic reconnects etc. nvme_reset_wq
2915     * runs reset works which also flush works hosted on nvme_wq for
2916     * serialization purposes. nvme_delete_wq host controller deletion
2917     * works which flush reset works for serialization.
2918     @@ -972,7 +972,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
2919     startka = true;
2920     spin_unlock_irqrestore(&ctrl->lock, flags);
2921     if (startka)
2922     - schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
2923     + queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
2924     }
2925    
2926     static int nvme_keep_alive(struct nvme_ctrl *ctrl)
2927     @@ -1002,7 +1002,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
2928     dev_dbg(ctrl->device,
2929     "reschedule traffic based keep-alive timer\n");
2930     ctrl->comp_seen = false;
2931     - schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
2932     + queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
2933     return;
2934     }
2935    
2936     @@ -1019,7 +1019,7 @@ static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
2937     if (unlikely(ctrl->kato == 0))
2938     return;
2939    
2940     - schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
2941     + queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
2942     }
2943    
2944     void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
2945     diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
2946     index f34a56d588d3..570c75c92e29 100644
2947     --- a/drivers/nvme/host/pci.c
2948     +++ b/drivers/nvme/host/pci.c
2949     @@ -1084,9 +1084,9 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx)
2950    
2951     spin_lock(&nvmeq->cq_poll_lock);
2952     found = nvme_process_cq(nvmeq, &start, &end, -1);
2953     + nvme_complete_cqes(nvmeq, start, end);
2954     spin_unlock(&nvmeq->cq_poll_lock);
2955    
2956     - nvme_complete_cqes(nvmeq, start, end);
2957     return found;
2958     }
2959    
2960     @@ -1407,6 +1407,23 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
2961     nvme_poll_irqdisable(nvmeq, -1);
2962     }
2963    
2964     +/*
2965     + * Called only on a device that has been disabled and after all other threads
2966     + * that can check this device's completion queues have synced. This is the
2967     + * last chance for the driver to see a natural completion before
2968     + * nvme_cancel_request() terminates all incomplete requests.
2969     + */
2970     +static void nvme_reap_pending_cqes(struct nvme_dev *dev)
2971     +{
2972     + u16 start, end;
2973     + int i;
2974     +
2975     + for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
2976     + nvme_process_cq(&dev->queues[i], &start, &end, -1);
2977     + nvme_complete_cqes(&dev->queues[i], start, end);
2978     + }
2979     +}
2980     +
2981     static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
2982     int entry_size)
2983     {
2984     @@ -2241,11 +2258,6 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
2985     if (timeout == 0)
2986     return false;
2987    
2988     - /* handle any remaining CQEs */
2989     - if (opcode == nvme_admin_delete_cq &&
2990     - !test_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags))
2991     - nvme_poll_irqdisable(nvmeq, -1);
2992     -
2993     sent--;
2994     if (nr_queues)
2995     goto retry;
2996     @@ -2434,6 +2446,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
2997     nvme_suspend_io_queues(dev);
2998     nvme_suspend_queue(&dev->queues[0]);
2999     nvme_pci_disable(dev);
3000     + nvme_reap_pending_cqes(dev);
3001    
3002     blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
3003     blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
3004     diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
3005     index cb4c3000a57e..4ff51da3b13f 100644
3006     --- a/drivers/nvme/host/rdma.c
3007     +++ b/drivers/nvme/host/rdma.c
3008     @@ -1088,7 +1088,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
3009     if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
3010     return;
3011    
3012     - queue_work(nvme_wq, &ctrl->err_work);
3013     + queue_work(nvme_reset_wq, &ctrl->err_work);
3014     }
3015    
3016     static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
3017     diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
3018     index 7544be84ab35..244984420b41 100644
3019     --- a/drivers/nvme/host/tcp.c
3020     +++ b/drivers/nvme/host/tcp.c
3021     @@ -422,7 +422,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
3022     if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
3023     return;
3024    
3025     - queue_work(nvme_wq, &to_tcp_ctrl(ctrl)->err_work);
3026     + queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
3027     }
3028    
3029     static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
3030     @@ -1054,7 +1054,12 @@ static void nvme_tcp_io_work(struct work_struct *w)
3031     } else if (unlikely(result < 0)) {
3032     dev_err(queue->ctrl->ctrl.device,
3033     "failed to send request %d\n", result);
3034     - if (result != -EPIPE)
3035     +
3036     + /*
3037     + * Fail the request unless peer closed the connection,
3038     + * in which case error recovery flow will complete all.
3039     + */
3040     + if ((result != -EPIPE) && (result != -ECONNRESET))
3041     nvme_tcp_fail_request(queue->request);
3042     nvme_tcp_done_send_req(queue);
3043     return;
3044     diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
3045     index 191f410cf35c..2f8787276d9b 100644
3046     --- a/drivers/perf/arm_smmuv3_pmu.c
3047     +++ b/drivers/perf/arm_smmuv3_pmu.c
3048     @@ -772,7 +772,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
3049     smmu_pmu->reloc_base = smmu_pmu->reg_base;
3050     }
3051    
3052     - irq = platform_get_irq(pdev, 0);
3053     + irq = platform_get_irq_optional(pdev, 0);
3054     if (irq > 0)
3055     smmu_pmu->irq = irq;
3056    
3057     diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c
3058     index e36fcad668a6..88a3c5690fea 100644
3059     --- a/drivers/pwm/pwm-omap-dmtimer.c
3060     +++ b/drivers/pwm/pwm-omap-dmtimer.c
3061     @@ -256,7 +256,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
3062     if (!timer_pdev) {
3063     dev_err(&pdev->dev, "Unable to find Timer pdev\n");
3064     ret = -ENODEV;
3065     - goto put;
3066     + goto err_find_timer_pdev;
3067     }
3068    
3069     timer_pdata = dev_get_platdata(&timer_pdev->dev);
3070     @@ -264,7 +264,7 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
3071     dev_dbg(&pdev->dev,
3072     "dmtimer pdata structure NULL, deferring probe\n");
3073     ret = -EPROBE_DEFER;
3074     - goto put;
3075     + goto err_platdata;
3076     }
3077    
3078     pdata = timer_pdata->timer_ops;
3079     @@ -283,19 +283,19 @@ static int pwm_omap_dmtimer_probe(struct platform_device *pdev)
3080     !pdata->write_counter) {
3081     dev_err(&pdev->dev, "Incomplete dmtimer pdata structure\n");
3082     ret = -EINVAL;
3083     - goto put;
3084     + goto err_platdata;
3085     }
3086    
3087     if (!of_get_property(timer, "ti,timer-pwm", NULL)) {
3088     dev_err(&pdev->dev, "Missing ti,timer-pwm capability\n");
3089     ret = -ENODEV;
3090     - goto put;
3091     + goto err_timer_property;
3092     }
3093    
3094     dm_timer = pdata->request_by_node(timer);
3095     if (!dm_timer) {
3096     ret = -EPROBE_DEFER;
3097     - goto put;
3098     + goto err_request_timer;
3099     }
3100    
3101     omap = devm_kzalloc(&pdev->dev, sizeof(*omap), GFP_KERNEL);
3102     @@ -352,7 +352,14 @@ err_pwmchip_add:
3103     err_alloc_omap:
3104    
3105     pdata->free(dm_timer);
3106     -put:
3107     +err_request_timer:
3108     +
3109     +err_timer_property:
3110     +err_platdata:
3111     +
3112     + put_device(&timer_pdev->dev);
3113     +err_find_timer_pdev:
3114     +
3115     of_node_put(timer);
3116    
3117     return ret;
3118     @@ -372,6 +379,8 @@ static int pwm_omap_dmtimer_remove(struct platform_device *pdev)
3119    
3120     omap->pdata->free(omap->dm_timer);
3121    
3122     + put_device(&omap->dm_timer_pdev->dev);
3123     +
3124     mutex_destroy(&omap->mutex);
3125    
3126     return 0;
3127     diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
3128     index bb35ba4a8d24..4348fdff1c61 100644
3129     --- a/drivers/s390/crypto/ap_bus.h
3130     +++ b/drivers/s390/crypto/ap_bus.h
3131     @@ -162,7 +162,7 @@ struct ap_card {
3132     unsigned int functions; /* AP device function bitfield. */
3133     int queue_depth; /* AP queue depth.*/
3134     int id; /* AP card number. */
3135     - atomic_t total_request_count; /* # requests ever for this AP device.*/
3136     + atomic64_t total_request_count; /* # requests ever for this AP device.*/
3137     };
3138    
3139     #define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
3140     @@ -179,7 +179,7 @@ struct ap_queue {
3141     enum ap_state state; /* State of the AP device. */
3142     int pendingq_count; /* # requests on pendingq list. */
3143     int requestq_count; /* # requests on requestq list. */
3144     - int total_request_count; /* # requests ever for this AP device.*/
3145     + u64 total_request_count; /* # requests ever for this AP device.*/
3146     int request_timeout; /* Request timeout in jiffies. */
3147     struct timer_list timeout; /* Timer for request timeouts. */
3148     struct list_head pendingq; /* List of message sent to AP queue. */
3149     diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
3150     index 63b4cc6cd7e5..e85bfca1ed16 100644
3151     --- a/drivers/s390/crypto/ap_card.c
3152     +++ b/drivers/s390/crypto/ap_card.c
3153     @@ -63,13 +63,13 @@ static ssize_t request_count_show(struct device *dev,
3154     char *buf)
3155     {
3156     struct ap_card *ac = to_ap_card(dev);
3157     - unsigned int req_cnt;
3158     + u64 req_cnt;
3159    
3160     req_cnt = 0;
3161     spin_lock_bh(&ap_list_lock);
3162     - req_cnt = atomic_read(&ac->total_request_count);
3163     + req_cnt = atomic64_read(&ac->total_request_count);
3164     spin_unlock_bh(&ap_list_lock);
3165     - return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
3166     + return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
3167     }
3168    
3169     static ssize_t request_count_store(struct device *dev,
3170     @@ -83,7 +83,7 @@ static ssize_t request_count_store(struct device *dev,
3171     for_each_ap_queue(aq, ac)
3172     aq->total_request_count = 0;
3173     spin_unlock_bh(&ap_list_lock);
3174     - atomic_set(&ac->total_request_count, 0);
3175     + atomic64_set(&ac->total_request_count, 0);
3176    
3177     return count;
3178     }
3179     diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
3180     index 37c3bdc3642d..a317ab484932 100644
3181     --- a/drivers/s390/crypto/ap_queue.c
3182     +++ b/drivers/s390/crypto/ap_queue.c
3183     @@ -479,12 +479,12 @@ static ssize_t request_count_show(struct device *dev,
3184     char *buf)
3185     {
3186     struct ap_queue *aq = to_ap_queue(dev);
3187     - unsigned int req_cnt;
3188     + u64 req_cnt;
3189    
3190     spin_lock_bh(&aq->lock);
3191     req_cnt = aq->total_request_count;
3192     spin_unlock_bh(&aq->lock);
3193     - return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
3194     + return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
3195     }
3196    
3197     static ssize_t request_count_store(struct device *dev,
3198     @@ -676,7 +676,7 @@ void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
3199     list_add_tail(&ap_msg->list, &aq->requestq);
3200     aq->requestq_count++;
3201     aq->total_request_count++;
3202     - atomic_inc(&aq->card->total_request_count);
3203     + atomic64_inc(&aq->card->total_request_count);
3204     /* Send/receive as many request from the queue as possible. */
3205     ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL));
3206     spin_unlock_bh(&aq->lock);
3207     diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
3208     index 9157e728a362..7fa0262e91af 100644
3209     --- a/drivers/s390/crypto/zcrypt_api.c
3210     +++ b/drivers/s390/crypto/zcrypt_api.c
3211     @@ -605,8 +605,8 @@ static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
3212     weight += atomic_read(&zc->load);
3213     pref_weight += atomic_read(&pref_zc->load);
3214     if (weight == pref_weight)
3215     - return atomic_read(&zc->card->total_request_count) >
3216     - atomic_read(&pref_zc->card->total_request_count);
3217     + return atomic64_read(&zc->card->total_request_count) >
3218     + atomic64_read(&pref_zc->card->total_request_count);
3219     return weight > pref_weight;
3220     }
3221    
3222     @@ -1216,11 +1216,12 @@ static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
3223     spin_unlock(&zcrypt_list_lock);
3224     }
3225    
3226     -static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
3227     +static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
3228     {
3229     struct zcrypt_card *zc;
3230     struct zcrypt_queue *zq;
3231     int card;
3232     + u64 cnt;
3233    
3234     memset(reqcnt, 0, sizeof(int) * max_adapters);
3235     spin_lock(&zcrypt_list_lock);
3236     @@ -1232,8 +1233,9 @@ static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
3237     || card >= max_adapters)
3238     continue;
3239     spin_lock(&zq->queue->lock);
3240     - reqcnt[card] = zq->queue->total_request_count;
3241     + cnt = zq->queue->total_request_count;
3242     spin_unlock(&zq->queue->lock);
3243     + reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX;
3244     }
3245     }
3246     local_bh_enable();
3247     @@ -1411,9 +1413,9 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
3248     return 0;
3249     }
3250     case ZCRYPT_PERDEV_REQCNT: {
3251     - int *reqcnt;
3252     + u32 *reqcnt;
3253    
3254     - reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL);
3255     + reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
3256     if (!reqcnt)
3257     return -ENOMEM;
3258     zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
3259     @@ -1470,7 +1472,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
3260     }
3261     case Z90STAT_PERDEV_REQCNT: {
3262     /* the old ioctl supports only 64 adapters */
3263     - int reqcnt[MAX_ZDEV_CARDIDS];
3264     + u32 reqcnt[MAX_ZDEV_CARDIDS];
3265    
3266     zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
3267     if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
3268     diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
3269     index 59e220749ad1..92bace3b28fd 100644
3270     --- a/drivers/s390/net/qeth_l2_main.c
3271     +++ b/drivers/s390/net/qeth_l2_main.c
3272     @@ -1846,15 +1846,14 @@ int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state)
3273    
3274     QETH_CARD_TEXT(card, 2, "vniccsch");
3275    
3276     - /* do not change anything if BridgePort is enabled */
3277     - if (qeth_bridgeport_is_in_use(card))
3278     - return -EBUSY;
3279     -
3280     /* check if characteristic and enable/disable are supported */
3281     if (!(card->options.vnicc.sup_chars & vnicc) ||
3282     !(card->options.vnicc.set_char_sup & vnicc))
3283     return -EOPNOTSUPP;
3284    
3285     + if (qeth_bridgeport_is_in_use(card))
3286     + return -EBUSY;
3287     +
3288     /* set enable/disable command and store wanted characteristic */
3289     if (state) {
3290     cmd = IPA_VNICC_ENABLE;
3291     @@ -1900,14 +1899,13 @@ int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state)
3292    
3293     QETH_CARD_TEXT(card, 2, "vniccgch");
3294    
3295     - /* do not get anything if BridgePort is enabled */
3296     - if (qeth_bridgeport_is_in_use(card))
3297     - return -EBUSY;
3298     -
3299     /* check if characteristic is supported */
3300     if (!(card->options.vnicc.sup_chars & vnicc))
3301     return -EOPNOTSUPP;
3302    
3303     + if (qeth_bridgeport_is_in_use(card))
3304     + return -EBUSY;
3305     +
3306     /* if card is ready, query current VNICC state */
3307     if (qeth_card_hw_is_reachable(card))
3308     rc = qeth_l2_vnicc_query_chars(card);
3309     @@ -1925,15 +1923,14 @@ int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout)
3310    
3311     QETH_CARD_TEXT(card, 2, "vniccsto");
3312    
3313     - /* do not change anything if BridgePort is enabled */
3314     - if (qeth_bridgeport_is_in_use(card))
3315     - return -EBUSY;
3316     -
3317     /* check if characteristic and set_timeout are supported */
3318     if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
3319     !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
3320     return -EOPNOTSUPP;
3321    
3322     + if (qeth_bridgeport_is_in_use(card))
3323     + return -EBUSY;
3324     +
3325     /* do we need to do anything? */
3326     if (card->options.vnicc.learning_timeout == timeout)
3327     return rc;
3328     @@ -1962,14 +1959,14 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout)
3329    
3330     QETH_CARD_TEXT(card, 2, "vniccgto");
3331    
3332     - /* do not get anything if BridgePort is enabled */
3333     - if (qeth_bridgeport_is_in_use(card))
3334     - return -EBUSY;
3335     -
3336     /* check if characteristic and get_timeout are supported */
3337     if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
3338     !(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
3339     return -EOPNOTSUPP;
3340     +
3341     + if (qeth_bridgeport_is_in_use(card))
3342     + return -EBUSY;
3343     +
3344     /* if card is ready, get timeout. Otherwise, just return stored value */
3345     *timeout = card->options.vnicc.learning_timeout;
3346     if (qeth_card_hw_is_reachable(card))
3347     diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
3348     index be9424a87173..9c3ef0a02fd4 100644
3349     --- a/drivers/soc/tegra/fuse/fuse-tegra30.c
3350     +++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
3351     @@ -35,7 +35,8 @@
3352     defined(CONFIG_ARCH_TEGRA_124_SOC) || \
3353     defined(CONFIG_ARCH_TEGRA_132_SOC) || \
3354     defined(CONFIG_ARCH_TEGRA_210_SOC) || \
3355     - defined(CONFIG_ARCH_TEGRA_186_SOC)
3356     + defined(CONFIG_ARCH_TEGRA_186_SOC) || \
3357     + defined(CONFIG_ARCH_TEGRA_194_SOC)
3358     static u32 tegra30_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
3359     {
3360     if (WARN_ON(!fuse->base))
3361     diff --git a/drivers/thermal/broadcom/brcmstb_thermal.c b/drivers/thermal/broadcom/brcmstb_thermal.c
3362     index 5825ac581f56..680f1a070606 100644
3363     --- a/drivers/thermal/broadcom/brcmstb_thermal.c
3364     +++ b/drivers/thermal/broadcom/brcmstb_thermal.c
3365     @@ -49,7 +49,7 @@
3366     #define AVS_TMON_TP_TEST_ENABLE 0x20
3367    
3368     /* Default coefficients */
3369     -#define AVS_TMON_TEMP_SLOPE -487
3370     +#define AVS_TMON_TEMP_SLOPE 487
3371     #define AVS_TMON_TEMP_OFFSET 410040
3372    
3373     /* HW related temperature constants */
3374     @@ -108,23 +108,12 @@ struct brcmstb_thermal_priv {
3375     struct thermal_zone_device *thermal;
3376     };
3377    
3378     -static void avs_tmon_get_coeffs(struct thermal_zone_device *tz, int *slope,
3379     - int *offset)
3380     -{
3381     - *slope = thermal_zone_get_slope(tz);
3382     - *offset = thermal_zone_get_offset(tz);
3383     -}
3384     -
3385     /* Convert a HW code to a temperature reading (millidegree celsius) */
3386     static inline int avs_tmon_code_to_temp(struct thermal_zone_device *tz,
3387     u32 code)
3388     {
3389     - const int val = code & AVS_TMON_TEMP_MASK;
3390     - int slope, offset;
3391     -
3392     - avs_tmon_get_coeffs(tz, &slope, &offset);
3393     -
3394     - return slope * val + offset;
3395     + return (AVS_TMON_TEMP_OFFSET -
3396     + (int)((code & AVS_TMON_TEMP_MAX) * AVS_TMON_TEMP_SLOPE));
3397     }
3398    
3399     /*
3400     @@ -136,20 +125,18 @@ static inline int avs_tmon_code_to_temp(struct thermal_zone_device *tz,
3401     static inline u32 avs_tmon_temp_to_code(struct thermal_zone_device *tz,
3402     int temp, bool low)
3403     {
3404     - int slope, offset;
3405     -
3406     if (temp < AVS_TMON_TEMP_MIN)
3407     - return AVS_TMON_TEMP_MAX; /* Maximum code value */
3408     -
3409     - avs_tmon_get_coeffs(tz, &slope, &offset);
3410     + return AVS_TMON_TEMP_MAX; /* Maximum code value */
3411    
3412     - if (temp >= offset)
3413     + if (temp >= AVS_TMON_TEMP_OFFSET)
3414     return 0; /* Minimum code value */
3415    
3416     if (low)
3417     - return (u32)(DIV_ROUND_UP(offset - temp, abs(slope)));
3418     + return (u32)(DIV_ROUND_UP(AVS_TMON_TEMP_OFFSET - temp,
3419     + AVS_TMON_TEMP_SLOPE));
3420     else
3421     - return (u32)((offset - temp) / abs(slope));
3422     + return (u32)((AVS_TMON_TEMP_OFFSET - temp) /
3423     + AVS_TMON_TEMP_SLOPE);
3424     }
3425    
3426     static int brcmstb_get_temp(void *data, int *temp)
3427     diff --git a/drivers/thermal/db8500_thermal.c b/drivers/thermal/db8500_thermal.c
3428     index 372dbbaaafb8..21d4d6e6409a 100644
3429     --- a/drivers/thermal/db8500_thermal.c
3430     +++ b/drivers/thermal/db8500_thermal.c
3431     @@ -152,8 +152,8 @@ static irqreturn_t prcmu_high_irq_handler(int irq, void *irq_data)
3432     db8500_thermal_update_config(th, idx, THERMAL_TREND_RAISING,
3433     next_low, next_high);
3434    
3435     - dev_info(&th->tz->device,
3436     - "PRCMU set max %ld, min %ld\n", next_high, next_low);
3437     + dev_dbg(&th->tz->device,
3438     + "PRCMU set max %ld, min %ld\n", next_high, next_low);
3439     } else if (idx == num_points - 1)
3440     /* So we roof out 1 degree over the max point */
3441     th->interpolated_temp = db8500_thermal_points[idx] + 1;
3442     diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
3443     index 1a2dd53caade..b53b6528d6ce 100644
3444     --- a/drivers/vhost/net.c
3445     +++ b/drivers/vhost/net.c
3446     @@ -1414,10 +1414,6 @@ static int vhost_net_release(struct inode *inode, struct file *f)
3447    
3448     static struct socket *get_raw_socket(int fd)
3449     {
3450     - struct {
3451     - struct sockaddr_ll sa;
3452     - char buf[MAX_ADDR_LEN];
3453     - } uaddr;
3454     int r;
3455     struct socket *sock = sockfd_lookup(fd, &r);
3456    
3457     @@ -1430,11 +1426,7 @@ static struct socket *get_raw_socket(int fd)
3458     goto err;
3459     }
3460    
3461     - r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, 0);
3462     - if (r < 0)
3463     - goto err;
3464     -
3465     - if (uaddr.sa.sll_family != AF_PACKET) {
3466     + if (sock->sk->sk_family != AF_PACKET) {
3467     r = -EPFNOSUPPORT;
3468     goto err;
3469     }
3470     diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
3471     index e7cf41aa26c3..1ce39de917f0 100644
3472     --- a/drivers/watchdog/wdat_wdt.c
3473     +++ b/drivers/watchdog/wdat_wdt.c
3474     @@ -389,7 +389,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
3475    
3476     memset(&r, 0, sizeof(r));
3477     r.start = gas->address;
3478     - r.end = r.start + gas->access_width - 1;
3479     + r.end = r.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1;
3480     if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
3481     r.flags = IORESOURCE_MEM;
3482     } else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
3483     diff --git a/fs/ceph/file.c b/fs/ceph/file.c
3484     index 11929d2bb594..cd09e63d682b 100644
3485     --- a/fs/ceph/file.c
3486     +++ b/fs/ceph/file.c
3487     @@ -1418,6 +1418,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
3488     struct ceph_cap_flush *prealloc_cf;
3489     ssize_t count, written = 0;
3490     int err, want, got;
3491     + bool direct_lock = false;
3492     loff_t pos;
3493     loff_t limit = max(i_size_read(inode), fsc->max_file_size);
3494    
3495     @@ -1428,8 +1429,11 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
3496     if (!prealloc_cf)
3497     return -ENOMEM;
3498    
3499     + if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
3500     + direct_lock = true;
3501     +
3502     retry_snap:
3503     - if (iocb->ki_flags & IOCB_DIRECT)
3504     + if (direct_lock)
3505     ceph_start_io_direct(inode);
3506     else
3507     ceph_start_io_write(inode);
3508     @@ -1519,14 +1523,15 @@ retry_snap:
3509    
3510     /* we might need to revert back to that point */
3511     data = *from;
3512     - if (iocb->ki_flags & IOCB_DIRECT) {
3513     + if (iocb->ki_flags & IOCB_DIRECT)
3514     written = ceph_direct_read_write(iocb, &data, snapc,
3515     &prealloc_cf);
3516     - ceph_end_io_direct(inode);
3517     - } else {
3518     + else
3519     written = ceph_sync_write(iocb, &data, pos, snapc);
3520     + if (direct_lock)
3521     + ceph_end_io_direct(inode);
3522     + else
3523     ceph_end_io_write(inode);
3524     - }
3525     if (written > 0)
3526     iov_iter_advance(from, written);
3527     ceph_put_snap_context(snapc);
3528     @@ -1577,7 +1582,7 @@ retry_snap:
3529    
3530     goto out_unlocked;
3531     out:
3532     - if (iocb->ki_flags & IOCB_DIRECT)
3533     + if (direct_lock)
3534     ceph_end_io_direct(inode);
3535     else
3536     ceph_end_io_write(inode);
3537     diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
3538     index f842944a5c76..1619af216677 100644
3539     --- a/fs/cifs/cifsacl.c
3540     +++ b/fs/cifs/cifsacl.c
3541     @@ -603,7 +603,7 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
3542     ((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
3543     *pmode |= (S_IXUGO & (*pbits_to_set));
3544    
3545     - cifs_dbg(NOISY, "access flags 0x%x mode now 0x%x\n", flags, *pmode);
3546     + cifs_dbg(NOISY, "access flags 0x%x mode now %04o\n", flags, *pmode);
3547     return;
3548     }
3549    
3550     @@ -632,7 +632,7 @@ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
3551     if (mode & S_IXUGO)
3552     *pace_flags |= SET_FILE_EXEC_RIGHTS;
3553    
3554     - cifs_dbg(NOISY, "mode: 0x%x, access flags now 0x%x\n",
3555     + cifs_dbg(NOISY, "mode: %04o, access flags now 0x%x\n",
3556     mode, *pace_flags);
3557     return;
3558     }
3559     diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
3560     index 5d3c867bdc80..bcda48c03882 100644
3561     --- a/fs/cifs/connect.c
3562     +++ b/fs/cifs/connect.c
3563     @@ -4094,7 +4094,7 @@ int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
3564     cifs_sb->mnt_gid = pvolume_info->linux_gid;
3565     cifs_sb->mnt_file_mode = pvolume_info->file_mode;
3566     cifs_sb->mnt_dir_mode = pvolume_info->dir_mode;
3567     - cifs_dbg(FYI, "file mode: 0x%hx dir mode: 0x%hx\n",
3568     + cifs_dbg(FYI, "file mode: %04ho dir mode: %04ho\n",
3569     cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode);
3570    
3571     cifs_sb->actimeo = pvolume_info->actimeo;
3572     diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
3573     index ed59e4a8db59..aafcd79c4772 100644
3574     --- a/fs/cifs/inode.c
3575     +++ b/fs/cifs/inode.c
3576     @@ -1586,7 +1586,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
3577     struct TCP_Server_Info *server;
3578     char *full_path;
3579    
3580     - cifs_dbg(FYI, "In cifs_mkdir, mode = 0x%hx inode = 0x%p\n",
3581     + cifs_dbg(FYI, "In cifs_mkdir, mode = %04ho inode = 0x%p\n",
3582     mode, inode);
3583    
3584     cifs_sb = CIFS_SB(inode->i_sb);
3585     diff --git a/fs/dax.c b/fs/dax.c
3586     index 2cc43cd914eb..cc56313c6b3b 100644
3587     --- a/fs/dax.c
3588     +++ b/fs/dax.c
3589     @@ -1207,6 +1207,9 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
3590     lockdep_assert_held(&inode->i_rwsem);
3591     }
3592    
3593     + if (iocb->ki_flags & IOCB_NOWAIT)
3594     + flags |= IOMAP_NOWAIT;
3595     +
3596     while (iov_iter_count(iter)) {
3597     ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
3598     iter, dax_iomap_actor);
3599     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
3600     index 3ca604807839..8bd806a03a90 100644
3601     --- a/fs/ext4/super.c
3602     +++ b/fs/ext4/super.c
3603     @@ -2343,7 +2343,7 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
3604     {
3605     struct ext4_sb_info *sbi = EXT4_SB(sb);
3606     struct flex_groups **old_groups, **new_groups;
3607     - int size, i;
3608     + int size, i, j;
3609    
3610     if (!sbi->s_log_groups_per_flex)
3611     return 0;
3612     @@ -2364,8 +2364,8 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
3613     sizeof(struct flex_groups)),
3614     GFP_KERNEL);
3615     if (!new_groups[i]) {
3616     - for (i--; i >= sbi->s_flex_groups_allocated; i--)
3617     - kvfree(new_groups[i]);
3618     + for (j = sbi->s_flex_groups_allocated; j < i; j++)
3619     + kvfree(new_groups[j]);
3620     kvfree(new_groups);
3621     ext4_msg(sb, KERN_ERR,
3622     "not enough memory for %d flex groups", size);
3623     diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
3624     index 5d6fd940aab2..ec9a1f9ce2dd 100644
3625     --- a/fs/f2fs/data.c
3626     +++ b/fs/f2fs/data.c
3627     @@ -3030,7 +3030,8 @@ int f2fs_migrate_page(struct address_space *mapping,
3628    
3629     #ifdef CONFIG_SWAP
3630     /* Copied from generic_swapfile_activate() to check any holes */
3631     -static int check_swap_activate(struct file *swap_file, unsigned int max)
3632     +static int check_swap_activate(struct swap_info_struct *sis,
3633     + struct file *swap_file, sector_t *span)
3634     {
3635     struct address_space *mapping = swap_file->f_mapping;
3636     struct inode *inode = mapping->host;
3637     @@ -3041,6 +3042,8 @@ static int check_swap_activate(struct file *swap_file, unsigned int max)
3638     sector_t last_block;
3639     sector_t lowest_block = -1;
3640     sector_t highest_block = 0;
3641     + int nr_extents = 0;
3642     + int ret;
3643    
3644     blkbits = inode->i_blkbits;
3645     blocks_per_page = PAGE_SIZE >> blkbits;
3646     @@ -3052,7 +3055,8 @@ static int check_swap_activate(struct file *swap_file, unsigned int max)
3647     probe_block = 0;
3648     page_no = 0;
3649     last_block = i_size_read(inode) >> blkbits;
3650     - while ((probe_block + blocks_per_page) <= last_block && page_no < max) {
3651     + while ((probe_block + blocks_per_page) <= last_block &&
3652     + page_no < sis->max) {
3653     unsigned block_in_page;
3654     sector_t first_block;
3655    
3656     @@ -3092,13 +3096,27 @@ static int check_swap_activate(struct file *swap_file, unsigned int max)
3657     highest_block = first_block;
3658     }
3659    
3660     + /*
3661     + * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
3662     + */
3663     + ret = add_swap_extent(sis, page_no, 1, first_block);
3664     + if (ret < 0)
3665     + goto out;
3666     + nr_extents += ret;
3667     page_no++;
3668     probe_block += blocks_per_page;
3669     reprobe:
3670     continue;
3671     }
3672     - return 0;
3673     -
3674     + ret = nr_extents;
3675     + *span = 1 + highest_block - lowest_block;
3676     + if (page_no == 0)
3677     + page_no = 1; /* force Empty message */
3678     + sis->max = page_no;
3679     + sis->pages = page_no - 1;
3680     + sis->highest_bit = page_no - 1;
3681     +out:
3682     + return ret;
3683     bad_bmap:
3684     pr_err("swapon: swapfile has holes\n");
3685     return -EINVAL;
3686     @@ -3120,14 +3138,14 @@ static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
3687     if (ret)
3688     return ret;
3689    
3690     - ret = check_swap_activate(file, sis->max);
3691     - if (ret)
3692     + ret = check_swap_activate(sis, file, span);
3693     + if (ret < 0)
3694     return ret;
3695    
3696     set_inode_flag(inode, FI_PIN_FILE);
3697     f2fs_precache_extents(inode);
3698     f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3699     - return 0;
3700     + return ret;
3701     }
3702    
3703     static void f2fs_swap_deactivate(struct file *file)
3704     diff --git a/fs/io_uring.c b/fs/io_uring.c
3705     index ed9a551882cf..e37b84146453 100644
3706     --- a/fs/io_uring.c
3707     +++ b/fs/io_uring.c
3708     @@ -71,6 +71,7 @@
3709     #include <linux/sizes.h>
3710     #include <linux/hugetlb.h>
3711     #include <linux/highmem.h>
3712     +#include <linux/fs_struct.h>
3713    
3714     #include <uapi/linux/io_uring.h>
3715    
3716     @@ -334,6 +335,8 @@ struct io_kiocb {
3717     u32 result;
3718     u32 sequence;
3719    
3720     + struct fs_struct *fs;
3721     +
3722     struct work_struct work;
3723     };
3724    
3725     @@ -651,6 +654,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
3726     /* one is dropped after submission, the other at completion */
3727     refcount_set(&req->refs, 2);
3728     req->result = 0;
3729     + req->fs = NULL;
3730     return req;
3731     out:
3732     percpu_ref_put(&ctx->refs);
3733     @@ -1653,6 +1657,11 @@ static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
3734     else if (force_nonblock)
3735     flags |= MSG_DONTWAIT;
3736    
3737     +#ifdef CONFIG_COMPAT
3738     + if (req->ctx->compat)
3739     + flags |= MSG_CMSG_COMPAT;
3740     +#endif
3741     +
3742     msg = (struct user_msghdr __user *) (unsigned long)
3743     READ_ONCE(sqe->addr);
3744    
3745     @@ -1663,6 +1672,16 @@ static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
3746     ret = -EINTR;
3747     }
3748    
3749     + if (req->fs) {
3750     + struct fs_struct *fs = req->fs;
3751     +
3752     + spin_lock(&req->fs->lock);
3753     + if (--fs->users)
3754     + fs = NULL;
3755     + spin_unlock(&req->fs->lock);
3756     + if (fs)
3757     + free_fs_struct(fs);
3758     + }
3759     io_cqring_add_event(req->ctx, sqe->user_data, ret);
3760     io_put_req(req);
3761     return 0;
3762     @@ -2159,6 +2178,7 @@ static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
3763     static void io_sq_wq_submit_work(struct work_struct *work)
3764     {
3765     struct io_kiocb *req = container_of(work, struct io_kiocb, work);
3766     + struct fs_struct *old_fs_struct = current->fs;
3767     struct io_ring_ctx *ctx = req->ctx;
3768     struct mm_struct *cur_mm = NULL;
3769     struct async_list *async_list;
3770     @@ -2178,6 +2198,15 @@ restart:
3771     /* Ensure we clear previously set non-block flag */
3772     req->rw.ki_flags &= ~IOCB_NOWAIT;
3773    
3774     + if (req->fs != current->fs && current->fs != old_fs_struct) {
3775     + task_lock(current);
3776     + if (req->fs)
3777     + current->fs = req->fs;
3778     + else
3779     + current->fs = old_fs_struct;
3780     + task_unlock(current);
3781     + }
3782     +
3783     ret = 0;
3784     if (io_sqe_needs_user(sqe) && !cur_mm) {
3785     if (!mmget_not_zero(ctx->sqo_mm)) {
3786     @@ -2276,6 +2305,11 @@ out:
3787     mmput(cur_mm);
3788     }
3789     revert_creds(old_cred);
3790     + if (old_fs_struct) {
3791     + task_lock(current);
3792     + current->fs = old_fs_struct;
3793     + task_unlock(current);
3794     + }
3795     }
3796    
3797     /*
3798     @@ -2503,6 +2537,23 @@ err:
3799    
3800     req->user_data = s->sqe->user_data;
3801    
3802     +#if defined(CONFIG_NET)
3803     + switch (READ_ONCE(s->sqe->opcode)) {
3804     + case IORING_OP_SENDMSG:
3805     + case IORING_OP_RECVMSG:
3806     + spin_lock(&current->fs->lock);
3807     + if (!current->fs->in_exec) {
3808     + req->fs = current->fs;
3809     + req->fs->users++;
3810     + }
3811     + spin_unlock(&current->fs->lock);
3812     + if (!req->fs) {
3813     + ret = -EAGAIN;
3814     + goto err_req;
3815     + }
3816     + }
3817     +#endif
3818     +
3819     /*
3820     * If we already have a head request, queue this one for async
3821     * submittal once the head completes. If we don't have a head but
3822     diff --git a/fs/namei.c b/fs/namei.c
3823     index bd1c0ca4151c..5b5759d70822 100644
3824     --- a/fs/namei.c
3825     +++ b/fs/namei.c
3826     @@ -1360,7 +1360,7 @@ static int follow_dotdot_rcu(struct nameidata *nd)
3827     nd->path.dentry = parent;
3828     nd->seq = seq;
3829     if (unlikely(!path_connected(&nd->path)))
3830     - return -ENOENT;
3831     + return -ECHILD;
3832     break;
3833     } else {
3834     struct mount *mnt = real_mount(nd->path.mnt);
3835     diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
3836     index 339663d04bf8..54f1c1f626fc 100644
3837     --- a/fs/nfs/nfs4file.c
3838     +++ b/fs/nfs/nfs4file.c
3839     @@ -86,7 +86,6 @@ nfs4_file_open(struct inode *inode, struct file *filp)
3840     if (inode != d_inode(dentry))
3841     goto out_drop;
3842    
3843     - nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
3844     nfs_file_set_open_context(filp, ctx);
3845     nfs_fscache_open_file(inode, filp);
3846     err = 0;
3847     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3848     index f808fb34b110..6b29703d2fe1 100644
3849     --- a/fs/nfs/nfs4proc.c
3850     +++ b/fs/nfs/nfs4proc.c
3851     @@ -2962,10 +2962,13 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
3852     struct dentry *dentry;
3853     struct nfs4_state *state;
3854     fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx);
3855     + struct inode *dir = d_inode(opendata->dir);
3856     + unsigned long dir_verifier;
3857     unsigned int seq;
3858     int ret;
3859    
3860     seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
3861     + dir_verifier = nfs_save_change_attribute(dir);
3862    
3863     ret = _nfs4_proc_open(opendata, ctx);
3864     if (ret != 0)
3865     @@ -2993,8 +2996,19 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
3866     dput(ctx->dentry);
3867     ctx->dentry = dentry = alias;
3868     }
3869     - nfs_set_verifier(dentry,
3870     - nfs_save_change_attribute(d_inode(opendata->dir)));
3871     + }
3872     +
3873     + switch(opendata->o_arg.claim) {
3874     + default:
3875     + break;
3876     + case NFS4_OPEN_CLAIM_NULL:
3877     + case NFS4_OPEN_CLAIM_DELEGATE_CUR:
3878     + case NFS4_OPEN_CLAIM_DELEGATE_PREV:
3879     + if (!opendata->rpc_done)
3880     + break;
3881     + if (opendata->o_res.delegation_type != 0)
3882     + dir_verifier = nfs_save_change_attribute(dir);
3883     + nfs_set_verifier(dentry, dir_verifier);
3884     }
3885    
3886     /* Parse layoutget results before we check for access */
3887     diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
3888     index 54d6db61106f..edf43ddd7dce 100644
3889     --- a/fs/ubifs/orphan.c
3890     +++ b/fs/ubifs/orphan.c
3891     @@ -129,7 +129,7 @@ static void __orphan_drop(struct ubifs_info *c, struct ubifs_orphan *o)
3892     static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
3893     {
3894     if (orph->del) {
3895     - dbg_gen("deleted twice ino %lu", orph->inum);
3896     + dbg_gen("deleted twice ino %lu", (unsigned long)orph->inum);
3897     return;
3898     }
3899    
3900     @@ -137,7 +137,7 @@ static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
3901     orph->del = 1;
3902     orph->dnext = c->orph_dnext;
3903     c->orph_dnext = orph;
3904     - dbg_gen("delete later ino %lu", orph->inum);
3905     + dbg_gen("delete later ino %lu", (unsigned long)orph->inum);
3906     return;
3907     }
3908    
3909     diff --git a/fs/xfs/libxfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h
3910     index 94badfa1743e..91c2cb14276e 100644
3911     --- a/fs/xfs/libxfs/xfs_attr.h
3912     +++ b/fs/xfs/libxfs/xfs_attr.h
3913     @@ -26,7 +26,7 @@ struct xfs_attr_list_context;
3914     *========================================================================*/
3915    
3916    
3917     -#define ATTR_DONTFOLLOW 0x0001 /* -- unused, from IRIX -- */
3918     +#define ATTR_DONTFOLLOW 0x0001 /* -- ignored, from IRIX -- */
3919     #define ATTR_ROOT 0x0002 /* use attrs in root (trusted) namespace */
3920     #define ATTR_TRUST 0x0004 /* -- unused, from IRIX -- */
3921     #define ATTR_SECURE 0x0008 /* use attrs in security namespace */
3922     @@ -37,7 +37,10 @@ struct xfs_attr_list_context;
3923     #define ATTR_KERNOVAL 0x2000 /* [kernel] get attr size only, not value */
3924    
3925     #define ATTR_INCOMPLETE 0x4000 /* [kernel] return INCOMPLETE attr keys */
3926     -#define ATTR_ALLOC 0x8000 /* allocate xattr buffer on demand */
3927     +#define ATTR_ALLOC 0x8000 /* [kernel] allocate xattr buffer on demand */
3928     +
3929     +#define ATTR_KERNEL_FLAGS \
3930     + (ATTR_KERNOTIME | ATTR_KERNOVAL | ATTR_INCOMPLETE | ATTR_ALLOC)
3931    
3932     #define XFS_ATTR_FLAGS \
3933     { ATTR_DONTFOLLOW, "DONTFOLLOW" }, \
3934     diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
3935     index d58f0d6a699e..2a1909397cb4 100644
3936     --- a/fs/xfs/xfs_ioctl.c
3937     +++ b/fs/xfs/xfs_ioctl.c
3938     @@ -536,6 +536,8 @@ xfs_attrmulti_by_handle(
3939    
3940     error = 0;
3941     for (i = 0; i < am_hreq.opcount; i++) {
3942     + ops[i].am_flags &= ~ATTR_KERNEL_FLAGS;
3943     +
3944     ops[i].am_error = strncpy_from_user((char *)attr_name,
3945     ops[i].am_attrname, MAXNAMELEN);
3946     if (ops[i].am_error == 0 || ops[i].am_error == MAXNAMELEN)
3947     diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
3948     index 1e08bf79b478..e61cc41189f8 100644
3949     --- a/fs/xfs/xfs_ioctl32.c
3950     +++ b/fs/xfs/xfs_ioctl32.c
3951     @@ -450,6 +450,8 @@ xfs_compat_attrmulti_by_handle(
3952    
3953     error = 0;
3954     for (i = 0; i < am_hreq.opcount; i++) {
3955     + ops[i].am_flags &= ~ATTR_KERNEL_FLAGS;
3956     +
3957     ops[i].am_error = strncpy_from_user((char *)attr_name,
3958     compat_ptr(ops[i].am_attrname),
3959     MAXNAMELEN);
3960     diff --git a/include/Kbuild b/include/Kbuild
3961     deleted file mode 100644
3962     index ffba79483cc5..000000000000
3963     --- a/include/Kbuild
3964     +++ /dev/null
3965     @@ -1,1185 +0,0 @@
3966     -# SPDX-License-Identifier: GPL-2.0-only
3967     -
3968     -# Add header-test-$(CONFIG_...) guard to headers that are only compiled
3969     -# for particular architectures.
3970     -#
3971     -# Headers listed in header-test- are excluded from the test coverage.
3972     -# Many headers are excluded for now because they fail to build. Please
3973     -# consider to fix headers first before adding new ones to the blacklist.
3974     -#
3975     -# Sorted alphabetically.
3976     -header-test- += acpi/acbuffer.h
3977     -header-test- += acpi/acpi.h
3978     -header-test- += acpi/acpi_bus.h
3979     -header-test- += acpi/acpi_drivers.h
3980     -header-test- += acpi/acpi_io.h
3981     -header-test- += acpi/acpi_lpat.h
3982     -header-test- += acpi/acpiosxf.h
3983     -header-test- += acpi/acpixf.h
3984     -header-test- += acpi/acrestyp.h
3985     -header-test- += acpi/actbl.h
3986     -header-test- += acpi/actbl1.h
3987     -header-test- += acpi/actbl2.h
3988     -header-test- += acpi/actbl3.h
3989     -header-test- += acpi/actypes.h
3990     -header-test- += acpi/battery.h
3991     -header-test- += acpi/cppc_acpi.h
3992     -header-test- += acpi/nfit.h
3993     -header-test- += acpi/platform/acenv.h
3994     -header-test- += acpi/platform/acenvex.h
3995     -header-test- += acpi/platform/acintel.h
3996     -header-test- += acpi/platform/aclinux.h
3997     -header-test- += acpi/platform/aclinuxex.h
3998     -header-test- += acpi/processor.h
3999     -header-test-$(CONFIG_X86) += clocksource/hyperv_timer.h
4000     -header-test- += clocksource/timer-sp804.h
4001     -header-test- += crypto/cast_common.h
4002     -header-test- += crypto/internal/cryptouser.h
4003     -header-test- += crypto/pkcs7.h
4004     -header-test- += crypto/poly1305.h
4005     -header-test- += crypto/sha3.h
4006     -header-test- += drm/ati_pcigart.h
4007     -header-test- += drm/bridge/dw_hdmi.h
4008     -header-test- += drm/bridge/dw_mipi_dsi.h
4009     -header-test- += drm/drm_audio_component.h
4010     -header-test- += drm/drm_auth.h
4011     -header-test- += drm/drm_debugfs.h
4012     -header-test- += drm/drm_debugfs_crc.h
4013     -header-test- += drm/drm_displayid.h
4014     -header-test- += drm/drm_encoder_slave.h
4015     -header-test- += drm/drm_fb_cma_helper.h
4016     -header-test- += drm/drm_fb_helper.h
4017     -header-test- += drm/drm_fixed.h
4018     -header-test- += drm/drm_format_helper.h
4019     -header-test- += drm/drm_lease.h
4020     -header-test- += drm/drm_legacy.h
4021     -header-test- += drm/drm_panel.h
4022     -header-test- += drm/drm_plane_helper.h
4023     -header-test- += drm/drm_rect.h
4024     -header-test- += drm/i915_component.h
4025     -header-test- += drm/intel-gtt.h
4026     -header-test- += drm/tinydrm/tinydrm-helpers.h
4027     -header-test- += drm/ttm/ttm_debug.h
4028     -header-test- += keys/asymmetric-parser.h
4029     -header-test- += keys/asymmetric-subtype.h
4030     -header-test- += keys/asymmetric-type.h
4031     -header-test- += keys/big_key-type.h
4032     -header-test- += keys/request_key_auth-type.h
4033     -header-test- += keys/trusted.h
4034     -header-test- += kvm/arm_arch_timer.h
4035     -header-test- += kvm/arm_pmu.h
4036     -header-test-$(CONFIG_ARM) += kvm/arm_psci.h
4037     -header-test-$(CONFIG_ARM64) += kvm/arm_psci.h
4038     -header-test- += kvm/arm_vgic.h
4039     -header-test- += linux/8250_pci.h
4040     -header-test- += linux/a.out.h
4041     -header-test- += linux/adxl.h
4042     -header-test- += linux/agpgart.h
4043     -header-test- += linux/alcor_pci.h
4044     -header-test- += linux/amba/clcd.h
4045     -header-test- += linux/amba/pl080.h
4046     -header-test- += linux/amd-iommu.h
4047     -header-test-$(CONFIG_ARM) += linux/arm-cci.h
4048     -header-test-$(CONFIG_ARM64) += linux/arm-cci.h
4049     -header-test- += linux/arm_sdei.h
4050     -header-test- += linux/asn1_decoder.h
4051     -header-test- += linux/ata_platform.h
4052     -header-test- += linux/ath9k_platform.h
4053     -header-test- += linux/atm_tcp.h
4054     -header-test- += linux/atomic-fallback.h
4055     -header-test- += linux/avf/virtchnl.h
4056     -header-test- += linux/bcm47xx_sprom.h
4057     -header-test- += linux/bcma/bcma_driver_gmac_cmn.h
4058     -header-test- += linux/bcma/bcma_driver_mips.h
4059     -header-test- += linux/bcma/bcma_driver_pci.h
4060     -header-test- += linux/bcma/bcma_driver_pcie2.h
4061     -header-test- += linux/bit_spinlock.h
4062     -header-test- += linux/blk-mq-rdma.h
4063     -header-test- += linux/blk-mq.h
4064     -header-test- += linux/blktrace_api.h
4065     -header-test- += linux/blockgroup_lock.h
4066     -header-test- += linux/bma150.h
4067     -header-test- += linux/bpf_lirc.h
4068     -header-test- += linux/bpf_types.h
4069     -header-test- += linux/bsg-lib.h
4070     -header-test- += linux/bsg.h
4071     -header-test- += linux/btf.h
4072     -header-test- += linux/btree-128.h
4073     -header-test- += linux/btree-type.h
4074     -header-test-$(CONFIG_CPU_BIG_ENDIAN) += linux/byteorder/big_endian.h
4075     -header-test- += linux/byteorder/generic.h
4076     -header-test-$(CONFIG_CPU_LITTLE_ENDIAN) += linux/byteorder/little_endian.h
4077     -header-test- += linux/c2port.h
4078     -header-test- += linux/can/dev/peak_canfd.h
4079     -header-test- += linux/can/platform/cc770.h
4080     -header-test- += linux/can/platform/sja1000.h
4081     -header-test- += linux/ceph/ceph_features.h
4082     -header-test- += linux/ceph/ceph_frag.h
4083     -header-test- += linux/ceph/ceph_fs.h
4084     -header-test- += linux/ceph/debugfs.h
4085     -header-test- += linux/ceph/msgr.h
4086     -header-test- += linux/ceph/rados.h
4087     -header-test- += linux/cgroup_subsys.h
4088     -header-test- += linux/clk/sunxi-ng.h
4089     -header-test- += linux/clk/ti.h
4090     -header-test- += linux/cn_proc.h
4091     -header-test- += linux/coda_psdev.h
4092     -header-test- += linux/compaction.h
4093     -header-test- += linux/console_struct.h
4094     -header-test- += linux/count_zeros.h
4095     -header-test- += linux/cs5535.h
4096     -header-test- += linux/cuda.h
4097     -header-test- += linux/cyclades.h
4098     -header-test- += linux/dcookies.h
4099     -header-test- += linux/delayacct.h
4100     -header-test- += linux/delayed_call.h
4101     -header-test- += linux/device-mapper.h
4102     -header-test- += linux/devpts_fs.h
4103     -header-test- += linux/dio.h
4104     -header-test- += linux/dirent.h
4105     -header-test- += linux/dlm_plock.h
4106     -header-test- += linux/dm-dirty-log.h
4107     -header-test- += linux/dm-region-hash.h
4108     -header-test- += linux/dma-debug.h
4109     -header-test- += linux/dma/mmp-pdma.h
4110     -header-test- += linux/dma/sprd-dma.h
4111     -header-test- += linux/dns_resolver.h
4112     -header-test- += linux/drbd_genl.h
4113     -header-test- += linux/drbd_genl_api.h
4114     -header-test- += linux/dw_apb_timer.h
4115     -header-test- += linux/dynamic_debug.h
4116     -header-test- += linux/dynamic_queue_limits.h
4117     -header-test- += linux/ecryptfs.h
4118     -header-test- += linux/edma.h
4119     -header-test- += linux/eeprom_93cx6.h
4120     -header-test- += linux/efs_vh.h
4121     -header-test- += linux/elevator.h
4122     -header-test- += linux/elfcore-compat.h
4123     -header-test- += linux/error-injection.h
4124     -header-test- += linux/errseq.h
4125     -header-test- += linux/eventpoll.h
4126     -header-test- += linux/ext2_fs.h
4127     -header-test- += linux/f75375s.h
4128     -header-test- += linux/falloc.h
4129     -header-test- += linux/fault-inject.h
4130     -header-test- += linux/fbcon.h
4131     -header-test- += linux/firmware/intel/stratix10-svc-client.h
4132     -header-test- += linux/firmware/meson/meson_sm.h
4133     -header-test- += linux/firmware/trusted_foundations.h
4134     -header-test- += linux/firmware/xlnx-zynqmp.h
4135     -header-test- += linux/fixp-arith.h
4136     -header-test- += linux/flat.h
4137     -header-test- += linux/fs_types.h
4138     -header-test- += linux/fs_uart_pd.h
4139     -header-test- += linux/fsi-occ.h
4140     -header-test- += linux/fsi-sbefifo.h
4141     -header-test- += linux/fsl/bestcomm/ata.h
4142     -header-test- += linux/fsl/bestcomm/bestcomm.h
4143     -header-test- += linux/fsl/bestcomm/bestcomm_priv.h
4144     -header-test- += linux/fsl/bestcomm/fec.h
4145     -header-test- += linux/fsl/bestcomm/gen_bd.h
4146     -header-test- += linux/fsl/bestcomm/sram.h
4147     -header-test- += linux/fsl_hypervisor.h
4148     -header-test- += linux/fsldma.h
4149     -header-test- += linux/ftrace_irq.h
4150     -header-test- += linux/gameport.h
4151     -header-test- += linux/genl_magic_func.h
4152     -header-test- += linux/genl_magic_struct.h
4153     -header-test- += linux/gpio/aspeed.h
4154     -header-test- += linux/gpio/gpio-reg.h
4155     -header-test- += linux/hid-debug.h
4156     -header-test- += linux/hiddev.h
4157     -header-test- += linux/hippidevice.h
4158     -header-test- += linux/hmm.h
4159     -header-test- += linux/hp_sdc.h
4160     -header-test- += linux/huge_mm.h
4161     -header-test- += linux/hugetlb_cgroup.h
4162     -header-test- += linux/hugetlb_inline.h
4163     -header-test- += linux/hwmon-vid.h
4164     -header-test- += linux/hyperv.h
4165     -header-test- += linux/i2c-algo-pca.h
4166     -header-test- += linux/i2c-algo-pcf.h
4167     -header-test- += linux/i3c/ccc.h
4168     -header-test- += linux/i3c/device.h
4169     -header-test- += linux/i3c/master.h
4170     -header-test- += linux/i8042.h
4171     -header-test- += linux/ide.h
4172     -header-test- += linux/idle_inject.h
4173     -header-test- += linux/if_frad.h
4174     -header-test- += linux/if_rmnet.h
4175     -header-test- += linux/if_tap.h
4176     -header-test- += linux/iio/accel/kxcjk_1013.h
4177     -header-test- += linux/iio/adc/ad_sigma_delta.h
4178     -header-test- += linux/iio/buffer-dma.h
4179     -header-test- += linux/iio/buffer_impl.h
4180     -header-test- += linux/iio/common/st_sensors.h
4181     -header-test- += linux/iio/common/st_sensors_i2c.h
4182     -header-test- += linux/iio/common/st_sensors_spi.h
4183     -header-test- += linux/iio/dac/ad5421.h
4184     -header-test- += linux/iio/dac/ad5504.h
4185     -header-test- += linux/iio/dac/ad5791.h
4186     -header-test- += linux/iio/dac/max517.h
4187     -header-test- += linux/iio/dac/mcp4725.h
4188     -header-test- += linux/iio/frequency/ad9523.h
4189     -header-test- += linux/iio/frequency/adf4350.h
4190     -header-test- += linux/iio/hw-consumer.h
4191     -header-test- += linux/iio/imu/adis.h
4192     -header-test- += linux/iio/sysfs.h
4193     -header-test- += linux/iio/timer/stm32-timer-trigger.h
4194     -header-test- += linux/iio/trigger.h
4195     -header-test- += linux/iio/triggered_event.h
4196     -header-test- += linux/imx-media.h
4197     -header-test- += linux/inet_diag.h
4198     -header-test- += linux/init_ohci1394_dma.h
4199     -header-test- += linux/initrd.h
4200     -header-test- += linux/input/adp5589.h
4201     -header-test- += linux/input/bu21013.h
4202     -header-test- += linux/input/cma3000.h
4203     -header-test- += linux/input/kxtj9.h
4204     -header-test- += linux/input/lm8333.h
4205     -header-test- += linux/input/sparse-keymap.h
4206     -header-test- += linux/input/touchscreen.h
4207     -header-test- += linux/input/tps6507x-ts.h
4208     -header-test-$(CONFIG_X86) += linux/intel-iommu.h
4209     -header-test- += linux/intel-ish-client-if.h
4210     -header-test- += linux/intel-pti.h
4211     -header-test- += linux/intel-svm.h
4212     -header-test- += linux/interconnect-provider.h
4213     -header-test- += linux/ioc3.h
4214     -header-test-$(CONFIG_BLOCK) += linux/iomap.h
4215     -header-test- += linux/ipack.h
4216     -header-test- += linux/irq_cpustat.h
4217     -header-test- += linux/irq_poll.h
4218     -header-test- += linux/irqchip/arm-gic-v3.h
4219     -header-test- += linux/irqchip/arm-gic-v4.h
4220     -header-test- += linux/irqchip/irq-madera.h
4221     -header-test- += linux/irqchip/irq-sa11x0.h
4222     -header-test- += linux/irqchip/mxs.h
4223     -header-test- += linux/irqchip/versatile-fpga.h
4224     -header-test- += linux/irqdesc.h
4225     -header-test- += linux/irqflags.h
4226     -header-test- += linux/iscsi_boot_sysfs.h
4227     -header-test- += linux/isdn/capiutil.h
4228     -header-test- += linux/isdn/hdlc.h
4229     -header-test- += linux/isdn_ppp.h
4230     -header-test- += linux/jbd2.h
4231     -header-test- += linux/jump_label.h
4232     -header-test- += linux/jump_label_ratelimit.h
4233     -header-test- += linux/jz4740-adc.h
4234     -header-test- += linux/kasan.h
4235     -header-test- += linux/kcore.h
4236     -header-test- += linux/kdev_t.h
4237     -header-test- += linux/kernelcapi.h
4238     -header-test- += linux/khugepaged.h
4239     -header-test- += linux/kobj_map.h
4240     -header-test- += linux/kobject_ns.h
4241     -header-test- += linux/kvm_host.h
4242     -header-test- += linux/kvm_irqfd.h
4243     -header-test- += linux/kvm_para.h
4244     -header-test- += linux/lantiq.h
4245     -header-test- += linux/lapb.h
4246     -header-test- += linux/latencytop.h
4247     -header-test- += linux/led-lm3530.h
4248     -header-test- += linux/leds-bd2802.h
4249     -header-test- += linux/leds-lp3944.h
4250     -header-test- += linux/leds-lp3952.h
4251     -header-test- += linux/leds_pwm.h
4252     -header-test- += linux/libata.h
4253     -header-test- += linux/license.h
4254     -header-test- += linux/lightnvm.h
4255     -header-test- += linux/lis3lv02d.h
4256     -header-test- += linux/list_bl.h
4257     -header-test- += linux/list_lru.h
4258     -header-test- += linux/list_nulls.h
4259     -header-test- += linux/lockd/share.h
4260     -header-test- += linux/lzo.h
4261     -header-test- += linux/mailbox/zynqmp-ipi-message.h
4262     -header-test- += linux/maple.h
4263     -header-test- += linux/mbcache.h
4264     -header-test- += linux/mbus.h
4265     -header-test- += linux/mc146818rtc.h
4266     -header-test- += linux/mc6821.h
4267     -header-test- += linux/mdev.h
4268     -header-test- += linux/mem_encrypt.h
4269     -header-test- += linux/memfd.h
4270     -header-test- += linux/mfd/88pm80x.h
4271     -header-test- += linux/mfd/88pm860x.h
4272     -header-test- += linux/mfd/abx500/ab8500-bm.h
4273     -header-test- += linux/mfd/abx500/ab8500-gpadc.h
4274     -header-test- += linux/mfd/adp5520.h
4275     -header-test- += linux/mfd/arizona/pdata.h
4276     -header-test- += linux/mfd/as3711.h
4277     -header-test- += linux/mfd/as3722.h
4278     -header-test- += linux/mfd/da903x.h
4279     -header-test- += linux/mfd/da9055/pdata.h
4280     -header-test- += linux/mfd/db8500-prcmu.h
4281     -header-test- += linux/mfd/dbx500-prcmu.h
4282     -header-test- += linux/mfd/dln2.h
4283     -header-test- += linux/mfd/dm355evm_msp.h
4284     -header-test- += linux/mfd/ds1wm.h
4285     -header-test- += linux/mfd/ezx-pcap.h
4286     -header-test- += linux/mfd/intel_msic.h
4287     -header-test- += linux/mfd/janz.h
4288     -header-test- += linux/mfd/kempld.h
4289     -header-test- += linux/mfd/lm3533.h
4290     -header-test- += linux/mfd/lp8788-isink.h
4291     -header-test- += linux/mfd/lpc_ich.h
4292     -header-test- += linux/mfd/max77693.h
4293     -header-test- += linux/mfd/max8998-private.h
4294     -header-test- += linux/mfd/menelaus.h
4295     -header-test- += linux/mfd/mt6397/core.h
4296     -header-test- += linux/mfd/palmas.h
4297     -header-test- += linux/mfd/pcf50633/backlight.h
4298     -header-test- += linux/mfd/rc5t583.h
4299     -header-test- += linux/mfd/retu.h
4300     -header-test- += linux/mfd/samsung/core.h
4301     -header-test- += linux/mfd/si476x-platform.h
4302     -header-test- += linux/mfd/si476x-reports.h
4303     -header-test- += linux/mfd/sky81452.h
4304     -header-test- += linux/mfd/smsc.h
4305     -header-test- += linux/mfd/sta2x11-mfd.h
4306     -header-test- += linux/mfd/stmfx.h
4307     -header-test- += linux/mfd/tc3589x.h
4308     -header-test- += linux/mfd/tc6387xb.h
4309     -header-test- += linux/mfd/tc6393xb.h
4310     -header-test- += linux/mfd/tps65090.h
4311     -header-test- += linux/mfd/tps6586x.h
4312     -header-test- += linux/mfd/tps65910.h
4313     -header-test- += linux/mfd/tps80031.h
4314     -header-test- += linux/mfd/ucb1x00.h
4315     -header-test- += linux/mfd/viperboard.h
4316     -header-test- += linux/mfd/wm831x/core.h
4317     -header-test- += linux/mfd/wm831x/otp.h
4318     -header-test- += linux/mfd/wm831x/pdata.h
4319     -header-test- += linux/mfd/wm8994/core.h
4320     -header-test- += linux/mfd/wm8994/pdata.h
4321     -header-test- += linux/mlx4/doorbell.h
4322     -header-test- += linux/mlx4/srq.h
4323     -header-test- += linux/mlx5/doorbell.h
4324     -header-test- += linux/mlx5/eq.h
4325     -header-test- += linux/mlx5/fs_helpers.h
4326     -header-test- += linux/mlx5/mlx5_ifc.h
4327     -header-test- += linux/mlx5/mlx5_ifc_fpga.h
4328     -header-test- += linux/mm-arch-hooks.h
4329     -header-test- += linux/mm_inline.h
4330     -header-test- += linux/mmu_context.h
4331     -header-test- += linux/mpage.h
4332     -header-test- += linux/mtd/bbm.h
4333     -header-test- += linux/mtd/cfi.h
4334     -header-test- += linux/mtd/doc2000.h
4335     -header-test- += linux/mtd/flashchip.h
4336     -header-test- += linux/mtd/ftl.h
4337     -header-test- += linux/mtd/gen_probe.h
4338     -header-test- += linux/mtd/jedec.h
4339     -header-test- += linux/mtd/nand_bch.h
4340     -header-test- += linux/mtd/nand_ecc.h
4341     -header-test- += linux/mtd/ndfc.h
4342     -header-test- += linux/mtd/onenand.h
4343     -header-test- += linux/mtd/pismo.h
4344     -header-test- += linux/mtd/plat-ram.h
4345     -header-test- += linux/mtd/spi-nor.h
4346     -header-test- += linux/mv643xx.h
4347     -header-test- += linux/mv643xx_eth.h
4348     -header-test- += linux/mvebu-pmsu.h
4349     -header-test- += linux/mxm-wmi.h
4350     -header-test- += linux/n_r3964.h
4351     -header-test- += linux/ndctl.h
4352     -header-test- += linux/nfs.h
4353     -header-test- += linux/nfs_fs_i.h
4354     -header-test- += linux/nfs_fs_sb.h
4355     -header-test- += linux/nfs_page.h
4356     -header-test- += linux/nfs_xdr.h
4357     -header-test- += linux/nfsacl.h
4358     -header-test- += linux/nl802154.h
4359     -header-test- += linux/ns_common.h
4360     -header-test- += linux/nsc_gpio.h
4361     -header-test- += linux/ntb_transport.h
4362     -header-test- += linux/nubus.h
4363     -header-test- += linux/nvme-fc-driver.h
4364     -header-test- += linux/nvme-fc.h
4365     -header-test- += linux/nvme-rdma.h
4366     -header-test- += linux/nvram.h
4367     -header-test- += linux/objagg.h
4368     -header-test- += linux/of_clk.h
4369     -header-test- += linux/of_net.h
4370     -header-test- += linux/of_pdt.h
4371     -header-test- += linux/olpc-ec.h
4372     -header-test- += linux/omap-dma.h
4373     -header-test- += linux/omap-dmaengine.h
4374     -header-test- += linux/omap-gpmc.h
4375     -header-test- += linux/omap-iommu.h
4376     -header-test- += linux/omap-mailbox.h
4377     -header-test- += linux/once.h
4378     -header-test- += linux/osq_lock.h
4379     -header-test- += linux/overflow.h
4380     -header-test- += linux/page-flags-layout.h
4381     -header-test- += linux/page-isolation.h
4382     -header-test- += linux/page_ext.h
4383     -header-test- += linux/page_owner.h
4384     -header-test- += linux/parport_pc.h
4385     -header-test- += linux/parser.h
4386     -header-test- += linux/pci-acpi.h
4387     -header-test- += linux/pci-dma-compat.h
4388     -header-test- += linux/pci_hotplug.h
4389     -header-test- += linux/pda_power.h
4390     -header-test- += linux/perf/arm_pmu.h
4391     -header-test- += linux/perf_regs.h
4392     -header-test- += linux/phy/omap_control_phy.h
4393     -header-test- += linux/phy/tegra/xusb.h
4394     -header-test- += linux/phy/ulpi_phy.h
4395     -header-test- += linux/phy_fixed.h
4396     -header-test- += linux/pipe_fs_i.h
4397     -header-test- += linux/pktcdvd.h
4398     -header-test- += linux/pl320-ipc.h
4399     -header-test- += linux/pl353-smc.h
4400     -header-test- += linux/platform_data/ad5449.h
4401     -header-test- += linux/platform_data/ad5755.h
4402     -header-test- += linux/platform_data/ad7266.h
4403     -header-test- += linux/platform_data/ad7291.h
4404     -header-test- += linux/platform_data/ad7298.h
4405     -header-test- += linux/platform_data/ad7303.h
4406     -header-test- += linux/platform_data/ad7791.h
4407     -header-test- += linux/platform_data/ad7793.h
4408     -header-test- += linux/platform_data/ad7887.h
4409     -header-test- += linux/platform_data/adau17x1.h
4410     -header-test- += linux/platform_data/adp8870.h
4411     -header-test- += linux/platform_data/ads1015.h
4412     -header-test- += linux/platform_data/ads7828.h
4413     -header-test- += linux/platform_data/apds990x.h
4414     -header-test- += linux/platform_data/arm-ux500-pm.h
4415     -header-test- += linux/platform_data/asoc-s3c.h
4416     -header-test- += linux/platform_data/at91_adc.h
4417     -header-test- += linux/platform_data/ata-pxa.h
4418     -header-test- += linux/platform_data/atmel.h
4419     -header-test- += linux/platform_data/bh1770glc.h
4420     -header-test- += linux/platform_data/brcmfmac.h
4421     -header-test- += linux/platform_data/cros_ec_commands.h
4422     -header-test- += linux/platform_data/clk-u300.h
4423     -header-test- += linux/platform_data/cyttsp4.h
4424     -header-test- += linux/platform_data/dma-coh901318.h
4425     -header-test- += linux/platform_data/dma-imx-sdma.h
4426     -header-test- += linux/platform_data/dma-mcf-edma.h
4427     -header-test- += linux/platform_data/dma-s3c24xx.h
4428     -header-test- += linux/platform_data/dmtimer-omap.h
4429     -header-test- += linux/platform_data/dsa.h
4430     -header-test- += linux/platform_data/edma.h
4431     -header-test- += linux/platform_data/elm.h
4432     -header-test- += linux/platform_data/emif_plat.h
4433     -header-test- += linux/platform_data/fsa9480.h
4434     -header-test- += linux/platform_data/g762.h
4435     -header-test- += linux/platform_data/gpio-ath79.h
4436     -header-test- += linux/platform_data/gpio-davinci.h
4437     -header-test- += linux/platform_data/gpio-dwapb.h
4438     -header-test- += linux/platform_data/gpio-htc-egpio.h
4439     -header-test- += linux/platform_data/gpmc-omap.h
4440     -header-test- += linux/platform_data/hsmmc-omap.h
4441     -header-test- += linux/platform_data/hwmon-s3c.h
4442     -header-test- += linux/platform_data/i2c-davinci.h
4443     -header-test- += linux/platform_data/i2c-imx.h
4444     -header-test- += linux/platform_data/i2c-mux-reg.h
4445     -header-test- += linux/platform_data/i2c-ocores.h
4446     -header-test- += linux/platform_data/i2c-xiic.h
4447     -header-test- += linux/platform_data/intel-spi.h
4448     -header-test- += linux/platform_data/invensense_mpu6050.h
4449     -header-test- += linux/platform_data/irda-pxaficp.h
4450     -header-test- += linux/platform_data/irda-sa11x0.h
4451     -header-test- += linux/platform_data/itco_wdt.h
4452     -header-test- += linux/platform_data/jz4740/jz4740_nand.h
4453     -header-test- += linux/platform_data/keyboard-pxa930_rotary.h
4454     -header-test- += linux/platform_data/keypad-omap.h
4455     -header-test- += linux/platform_data/leds-lp55xx.h
4456     -header-test- += linux/platform_data/leds-omap.h
4457     -header-test- += linux/platform_data/lp855x.h
4458     -header-test- += linux/platform_data/lp8727.h
4459     -header-test- += linux/platform_data/max197.h
4460     -header-test- += linux/platform_data/max3421-hcd.h
4461     -header-test- += linux/platform_data/max732x.h
4462     -header-test- += linux/platform_data/mcs.h
4463     -header-test- += linux/platform_data/mdio-bcm-unimac.h
4464     -header-test- += linux/platform_data/mdio-gpio.h
4465     -header-test- += linux/platform_data/media/si4713.h
4466     -header-test- += linux/platform_data/mlxreg.h
4467     -header-test- += linux/platform_data/mmc-omap.h
4468     -header-test- += linux/platform_data/mmc-sdhci-s3c.h
4469     -header-test- += linux/platform_data/mmp_audio.h
4470     -header-test- += linux/platform_data/mtd-orion_nand.h
4471     -header-test- += linux/platform_data/mv88e6xxx.h
4472     -header-test- += linux/platform_data/net-cw1200.h
4473     -header-test- += linux/platform_data/omap-twl4030.h
4474     -header-test- += linux/platform_data/omapdss.h
4475     -header-test- += linux/platform_data/pcf857x.h
4476     -header-test- += linux/platform_data/pixcir_i2c_ts.h
4477     -header-test- += linux/platform_data/pwm_omap_dmtimer.h
4478     -header-test- += linux/platform_data/pxa2xx_udc.h
4479     -header-test- += linux/platform_data/pxa_sdhci.h
4480     -header-test- += linux/platform_data/remoteproc-omap.h
4481     -header-test- += linux/platform_data/sa11x0-serial.h
4482     -header-test- += linux/platform_data/sc18is602.h
4483     -header-test- += linux/platform_data/sdhci-pic32.h
4484     -header-test- += linux/platform_data/serial-sccnxp.h
4485     -header-test- += linux/platform_data/sht3x.h
4486     -header-test- += linux/platform_data/shtc1.h
4487     -header-test- += linux/platform_data/si5351.h
4488     -header-test- += linux/platform_data/sky81452-backlight.h
4489     -header-test- += linux/platform_data/spi-davinci.h
4490     -header-test- += linux/platform_data/spi-ep93xx.h
4491     -header-test- += linux/platform_data/spi-mt65xx.h
4492     -header-test- += linux/platform_data/st_sensors_pdata.h
4493     -header-test- += linux/platform_data/ti-sysc.h
4494     -header-test- += linux/platform_data/timer-ixp4xx.h
4495     -header-test- += linux/platform_data/touchscreen-s3c2410.h
4496     -header-test- += linux/platform_data/tsc2007.h
4497     -header-test- += linux/platform_data/tsl2772.h
4498     -header-test- += linux/platform_data/uio_pruss.h
4499     -header-test- += linux/platform_data/usb-davinci.h
4500     -header-test- += linux/platform_data/usb-ehci-mxc.h
4501     -header-test- += linux/platform_data/usb-ehci-orion.h
4502     -header-test- += linux/platform_data/usb-mx2.h
4503     -header-test- += linux/platform_data/usb-ohci-s3c2410.h
4504     -header-test- += linux/platform_data/usb-omap.h
4505     -header-test- += linux/platform_data/usb-s3c2410_udc.h
4506     -header-test- += linux/platform_data/usb3503.h
4507     -header-test- += linux/platform_data/ux500_wdt.h
4508     -header-test- += linux/platform_data/video-clcd-versatile.h
4509     -header-test- += linux/platform_data/video-imxfb.h
4510     -header-test- += linux/platform_data/video-pxafb.h
4511     -header-test- += linux/platform_data/video_s3c.h
4512     -header-test- += linux/platform_data/voltage-omap.h
4513     -header-test- += linux/platform_data/x86/apple.h
4514     -header-test- += linux/platform_data/x86/clk-pmc-atom.h
4515     -header-test- += linux/platform_data/x86/pmc_atom.h
4516     -header-test- += linux/platform_data/xtalk-bridge.h
4517     -header-test- += linux/pm2301_charger.h
4518     -header-test- += linux/pm_wakeirq.h
4519     -header-test- += linux/pm_wakeup.h
4520     -header-test- += linux/pmbus.h
4521     -header-test- += linux/pmu.h
4522     -header-test- += linux/posix_acl.h
4523     -header-test- += linux/posix_acl_xattr.h
4524     -header-test- += linux/power/ab8500.h
4525     -header-test- += linux/power/bq27xxx_battery.h
4526     -header-test- += linux/power/generic-adc-battery.h
4527     -header-test- += linux/power/jz4740-battery.h
4528     -header-test- += linux/power/max17042_battery.h
4529     -header-test- += linux/power/max8903_charger.h
4530     -header-test- += linux/ppp-comp.h
4531     -header-test- += linux/pps-gpio.h
4532     -header-test- += linux/pr.h
4533     -header-test- += linux/proc_ns.h
4534     -header-test- += linux/processor.h
4535     -header-test- += linux/psi.h
4536     -header-test- += linux/psp-sev.h
4537     -header-test- += linux/pstore.h
4538     -header-test- += linux/ptr_ring.h
4539     -header-test- += linux/ptrace.h
4540     -header-test- += linux/qcom-geni-se.h
4541     -header-test- += linux/qed/eth_common.h
4542     -header-test- += linux/qed/fcoe_common.h
4543     -header-test- += linux/qed/iscsi_common.h
4544     -header-test- += linux/qed/iwarp_common.h
4545     -header-test- += linux/qed/qed_eth_if.h
4546     -header-test- += linux/qed/qed_fcoe_if.h
4547     -header-test- += linux/qed/rdma_common.h
4548     -header-test- += linux/qed/storage_common.h
4549     -header-test- += linux/qed/tcp_common.h
4550     -header-test- += linux/qnx6_fs.h
4551     -header-test- += linux/quicklist.h
4552     -header-test- += linux/ramfs.h
4553     -header-test- += linux/range.h
4554     -header-test- += linux/rcu_node_tree.h
4555     -header-test- += linux/rculist_bl.h
4556     -header-test- += linux/rculist_nulls.h
4557     -header-test- += linux/rcutiny.h
4558     -header-test- += linux/rcutree.h
4559     -header-test- += linux/reboot-mode.h
4560     -header-test- += linux/regulator/fixed.h
4561     -header-test- += linux/regulator/gpio-regulator.h
4562     -header-test- += linux/regulator/max8973-regulator.h
4563     -header-test- += linux/regulator/of_regulator.h
4564     -header-test- += linux/regulator/tps51632-regulator.h
4565     -header-test- += linux/regulator/tps62360.h
4566     -header-test- += linux/regulator/tps6507x.h
4567     -header-test- += linux/regulator/userspace-consumer.h
4568     -header-test- += linux/remoteproc/st_slim_rproc.h
4569     -header-test- += linux/reset/socfpga.h
4570     -header-test- += linux/reset/sunxi.h
4571     -header-test- += linux/rtc/m48t59.h
4572     -header-test- += linux/rtc/rtc-omap.h
4573     -header-test- += linux/rtc/sirfsoc_rtciobrg.h
4574     -header-test- += linux/rwlock.h
4575     -header-test- += linux/rwlock_types.h
4576     -header-test- += linux/scc.h
4577     -header-test- += linux/sched/deadline.h
4578     -header-test- += linux/sched/smt.h
4579     -header-test- += linux/sched/sysctl.h
4580     -header-test- += linux/sched_clock.h
4581     -header-test- += linux/scpi_protocol.h
4582     -header-test- += linux/scx200_gpio.h
4583     -header-test- += linux/seccomp.h
4584     -header-test- += linux/sed-opal.h
4585     -header-test- += linux/seg6_iptunnel.h
4586     -header-test- += linux/selection.h
4587     -header-test- += linux/set_memory.h
4588     -header-test- += linux/shrinker.h
4589     -header-test- += linux/sirfsoc_dma.h
4590     -header-test- += linux/skb_array.h
4591     -header-test- += linux/slab_def.h
4592     -header-test- += linux/slub_def.h
4593     -header-test- += linux/sm501.h
4594     -header-test- += linux/smc91x.h
4595     -header-test- += linux/static_key.h
4596     -header-test- += linux/soc/actions/owl-sps.h
4597     -header-test- += linux/soc/amlogic/meson-canvas.h
4598     -header-test- += linux/soc/brcmstb/brcmstb.h
4599     -header-test- += linux/soc/ixp4xx/npe.h
4600     -header-test- += linux/soc/mediatek/infracfg.h
4601     -header-test- += linux/soc/qcom/smd-rpm.h
4602     -header-test- += linux/soc/qcom/smem.h
4603     -header-test- += linux/soc/qcom/smem_state.h
4604     -header-test- += linux/soc/qcom/wcnss_ctrl.h
4605     -header-test- += linux/soc/renesas/rcar-rst.h
4606     -header-test- += linux/soc/samsung/exynos-pmu.h
4607     -header-test- += linux/soc/sunxi/sunxi_sram.h
4608     -header-test- += linux/soc/ti/ti-msgmgr.h
4609     -header-test- += linux/soc/ti/ti_sci_inta_msi.h
4610     -header-test- += linux/soc/ti/ti_sci_protocol.h
4611     -header-test- += linux/soundwire/sdw.h
4612     -header-test- += linux/soundwire/sdw_intel.h
4613     -header-test- += linux/soundwire/sdw_type.h
4614     -header-test- += linux/spi/ad7877.h
4615     -header-test- += linux/spi/ads7846.h
4616     -header-test- += linux/spi/at86rf230.h
4617     -header-test- += linux/spi/ds1305.h
4618     -header-test- += linux/spi/libertas_spi.h
4619     -header-test- += linux/spi/lms283gf05.h
4620     -header-test- += linux/spi/max7301.h
4621     -header-test- += linux/spi/mcp23s08.h
4622     -header-test- += linux/spi/rspi.h
4623     -header-test- += linux/spi/s3c24xx.h
4624     -header-test- += linux/spi/sh_msiof.h
4625     -header-test- += linux/spi/spi-fsl-dspi.h
4626     -header-test- += linux/spi/spi_bitbang.h
4627     -header-test- += linux/spi/spi_gpio.h
4628     -header-test- += linux/spi/xilinx_spi.h
4629     -header-test- += linux/spinlock_api_smp.h
4630     -header-test- += linux/spinlock_api_up.h
4631     -header-test- += linux/spinlock_types.h
4632     -header-test- += linux/splice.h
4633     -header-test- += linux/sram.h
4634     -header-test- += linux/srcutiny.h
4635     -header-test- += linux/srcutree.h
4636     -header-test- += linux/ssb/ssb_driver_chipcommon.h
4637     -header-test- += linux/ssb/ssb_driver_extif.h
4638     -header-test- += linux/ssb/ssb_driver_mips.h
4639     -header-test- += linux/ssb/ssb_driver_pci.h
4640     -header-test- += linux/ssbi.h
4641     -header-test- += linux/stackdepot.h
4642     -header-test- += linux/stmp3xxx_rtc_wdt.h
4643     -header-test- += linux/string_helpers.h
4644     -header-test- += linux/sungem_phy.h
4645     -header-test- += linux/sunrpc/msg_prot.h
4646     -header-test- += linux/sunrpc/rpc_pipe_fs.h
4647     -header-test- += linux/sunrpc/xprtmultipath.h
4648     -header-test- += linux/sunrpc/xprtsock.h
4649     -header-test- += linux/sunxi-rsb.h
4650     -header-test- += linux/svga.h
4651     -header-test- += linux/sw842.h
4652     -header-test- += linux/swapfile.h
4653     -header-test- += linux/swapops.h
4654     -header-test- += linux/swiotlb.h
4655     -header-test- += linux/sysv_fs.h
4656     -header-test- += linux/t10-pi.h
4657     -header-test- += linux/task_io_accounting.h
4658     -header-test- += linux/tick.h
4659     -header-test- += linux/timb_dma.h
4660     -header-test- += linux/timekeeping.h
4661     -header-test- += linux/timekeeping32.h
4662     -header-test- += linux/ts-nbus.h
4663     -header-test- += linux/tsacct_kern.h
4664     -header-test- += linux/tty_flip.h
4665     -header-test- += linux/tty_ldisc.h
4666     -header-test- += linux/ucb1400.h
4667     -header-test- += linux/usb/association.h
4668     -header-test- += linux/usb/cdc-wdm.h
4669     -header-test- += linux/usb/cdc_ncm.h
4670     -header-test- += linux/usb/ezusb.h
4671     -header-test- += linux/usb/gadget_configfs.h
4672     -header-test- += linux/usb/gpio_vbus.h
4673     -header-test- += linux/usb/hcd.h
4674     -header-test- += linux/usb/iowarrior.h
4675     -header-test- += linux/usb/irda.h
4676     -header-test- += linux/usb/isp116x.h
4677     -header-test- += linux/usb/isp1362.h
4678     -header-test- += linux/usb/musb.h
4679     -header-test- += linux/usb/net2280.h
4680     -header-test- += linux/usb/ohci_pdriver.h
4681     -header-test- += linux/usb/otg-fsm.h
4682     -header-test- += linux/usb/pd_ado.h
4683     -header-test- += linux/usb/r8a66597.h
4684     -header-test- += linux/usb/rndis_host.h
4685     -header-test- += linux/usb/serial.h
4686     -header-test- += linux/usb/sl811.h
4687     -header-test- += linux/usb/storage.h
4688     -header-test- += linux/usb/uas.h
4689     -header-test- += linux/usb/usb338x.h
4690     -header-test- += linux/usb/usbnet.h
4691     -header-test- += linux/usb/wusb-wa.h
4692     -header-test- += linux/usb/xhci-dbgp.h
4693     -header-test- += linux/usb_usual.h
4694     -header-test- += linux/user-return-notifier.h
4695     -header-test- += linux/userfaultfd_k.h
4696     -header-test- += linux/verification.h
4697     -header-test- += linux/vgaarb.h
4698     -header-test- += linux/via_core.h
4699     -header-test- += linux/via_i2c.h
4700     -header-test- += linux/virtio_byteorder.h
4701     -header-test- += linux/virtio_ring.h
4702     -header-test- += linux/visorbus.h
4703     -header-test- += linux/vme.h
4704     -header-test- += linux/vmstat.h
4705     -header-test- += linux/vmw_vmci_api.h
4706     -header-test- += linux/vmw_vmci_defs.h
4707     -header-test- += linux/vringh.h
4708     -header-test- += linux/vt_buffer.h
4709     -header-test- += linux/zorro.h
4710     -header-test- += linux/zpool.h
4711     -header-test- += math-emu/double.h
4712     -header-test- += math-emu/op-common.h
4713     -header-test- += math-emu/quad.h
4714     -header-test- += math-emu/single.h
4715     -header-test- += math-emu/soft-fp.h
4716     -header-test- += media/davinci/dm355_ccdc.h
4717     -header-test- += media/davinci/dm644x_ccdc.h
4718     -header-test- += media/davinci/isif.h
4719     -header-test- += media/davinci/vpbe_osd.h
4720     -header-test- += media/davinci/vpbe_types.h
4721     -header-test- += media/davinci/vpif_types.h
4722     -header-test- += media/demux.h
4723     -header-test- += media/drv-intf/soc_mediabus.h
4724     -header-test- += media/dvb_net.h
4725     -header-test- += media/fwht-ctrls.h
4726     -header-test- += media/i2c/ad9389b.h
4727     -header-test- += media/i2c/adv7343.h
4728     -header-test- += media/i2c/adv7511.h
4729     -header-test- += media/i2c/adv7842.h
4730     -header-test- += media/i2c/m5mols.h
4731     -header-test- += media/i2c/mt9m032.h
4732     -header-test- += media/i2c/mt9t112.h
4733     -header-test- += media/i2c/mt9v032.h
4734     -header-test- += media/i2c/ov2659.h
4735     -header-test- += media/i2c/ov7670.h
4736     -header-test- += media/i2c/rj54n1cb0c.h
4737     -header-test- += media/i2c/saa6588.h
4738     -header-test- += media/i2c/saa7115.h
4739     -header-test- += media/i2c/sr030pc30.h
4740     -header-test- += media/i2c/tc358743.h
4741     -header-test- += media/i2c/tda1997x.h
4742     -header-test- += media/i2c/ths7303.h
4743     -header-test- += media/i2c/tvaudio.h
4744     -header-test- += media/i2c/tvp514x.h
4745     -header-test- += media/i2c/tvp7002.h
4746     -header-test- += media/i2c/wm8775.h
4747     -header-test- += media/imx.h
4748     -header-test- += media/media-dev-allocator.h
4749     -header-test- += media/mpeg2-ctrls.h
4750     -header-test- += media/rcar-fcp.h
4751     -header-test- += media/tuner-types.h
4752     -header-test- += media/tveeprom.h
4753     -header-test- += media/v4l2-flash-led-class.h
4754     -header-test- += misc/altera.h
4755     -header-test- += misc/cxl-base.h
4756     -header-test- += misc/cxllib.h
4757     -header-test- += net/9p/9p.h
4758     -header-test- += net/9p/client.h
4759     -header-test- += net/9p/transport.h
4760     -header-test- += net/af_vsock.h
4761     -header-test- += net/ax88796.h
4762     -header-test- += net/bluetooth/hci.h
4763     -header-test- += net/bluetooth/hci_core.h
4764     -header-test- += net/bluetooth/hci_mon.h
4765     -header-test- += net/bluetooth/hci_sock.h
4766     -header-test- += net/bluetooth/l2cap.h
4767     -header-test- += net/bluetooth/mgmt.h
4768     -header-test- += net/bluetooth/rfcomm.h
4769     -header-test- += net/bluetooth/sco.h
4770     -header-test- += net/bond_options.h
4771     -header-test- += net/caif/cfsrvl.h
4772     -header-test- += net/codel_impl.h
4773     -header-test- += net/codel_qdisc.h
4774     -header-test- += net/compat.h
4775     -header-test- += net/datalink.h
4776     -header-test- += net/dcbevent.h
4777     -header-test- += net/dcbnl.h
4778     -header-test- += net/dn_dev.h
4779     -header-test- += net/dn_fib.h
4780     -header-test- += net/dn_neigh.h
4781     -header-test- += net/dn_nsp.h
4782     -header-test- += net/dn_route.h
4783     -header-test- += net/erspan.h
4784     -header-test- += net/esp.h
4785     -header-test- += net/ethoc.h
4786     -header-test- += net/firewire.h
4787     -header-test- += net/flow_offload.h
4788     -header-test- += net/fq.h
4789     -header-test- += net/fq_impl.h
4790     -header-test- += net/garp.h
4791     -header-test- += net/gtp.h
4792     -header-test- += net/gue.h
4793     -header-test- += net/hwbm.h
4794     -header-test- += net/ila.h
4795     -header-test- += net/inet6_connection_sock.h
4796     -header-test- += net/inet_common.h
4797     -header-test- += net/inet_frag.h
4798     -header-test- += net/ip6_route.h
4799     -header-test- += net/ip_vs.h
4800     -header-test- += net/ipcomp.h
4801     -header-test- += net/ipconfig.h
4802     -header-test- += net/iucv/af_iucv.h
4803     -header-test- += net/iucv/iucv.h
4804     -header-test- += net/lapb.h
4805     -header-test- += net/llc_c_ac.h
4806     -header-test- += net/llc_c_st.h
4807     -header-test- += net/llc_s_ac.h
4808     -header-test- += net/llc_s_ev.h
4809     -header-test- += net/llc_s_st.h
4810     -header-test- += net/mpls_iptunnel.h
4811     -header-test- += net/mrp.h
4812     -header-test- += net/ncsi.h
4813     -header-test- += net/netevent.h
4814     -header-test- += net/netns/can.h
4815     -header-test- += net/netns/generic.h
4816     -header-test- += net/netns/ieee802154_6lowpan.h
4817     -header-test- += net/netns/ipv4.h
4818     -header-test- += net/netns/ipv6.h
4819     -header-test- += net/netns/mpls.h
4820     -header-test- += net/netns/nftables.h
4821     -header-test- += net/netns/sctp.h
4822     -header-test- += net/netrom.h
4823     -header-test- += net/p8022.h
4824     -header-test- += net/phonet/pep.h
4825     -header-test- += net/phonet/phonet.h
4826     -header-test- += net/phonet/pn_dev.h
4827     -header-test- += net/pptp.h
4828     -header-test- += net/psample.h
4829     -header-test- += net/psnap.h
4830     -header-test- += net/regulatory.h
4831     -header-test- += net/rose.h
4832     -header-test- += net/sctp/auth.h
4833     -header-test- += net/sctp/stream_interleave.h
4834     -header-test- += net/sctp/stream_sched.h
4835     -header-test- += net/sctp/tsnmap.h
4836     -header-test- += net/sctp/ulpevent.h
4837     -header-test- += net/sctp/ulpqueue.h
4838     -header-test- += net/secure_seq.h
4839     -header-test- += net/smc.h
4840     -header-test- += net/stp.h
4841     -header-test- += net/transp_v6.h
4842     -header-test- += net/tun_proto.h
4843     -header-test- += net/udplite.h
4844     -header-test- += net/xdp.h
4845     -header-test- += net/xdp_priv.h
4846     -header-test- += pcmcia/cistpl.h
4847     -header-test- += pcmcia/ds.h
4848     -header-test- += rdma/tid_rdma_defs.h
4849     -header-test- += scsi/fc/fc_encaps.h
4850     -header-test- += scsi/fc/fc_fc2.h
4851     -header-test- += scsi/fc/fc_fcoe.h
4852     -header-test- += scsi/fc/fc_fip.h
4853     -header-test- += scsi/fc_encode.h
4854     -header-test- += scsi/fc_frame.h
4855     -header-test- += scsi/iser.h
4856     -header-test- += scsi/libfc.h
4857     -header-test- += scsi/libfcoe.h
4858     -header-test- += scsi/libsas.h
4859     -header-test- += scsi/sas_ata.h
4860     -header-test- += scsi/scsi_cmnd.h
4861     -header-test- += scsi/scsi_dbg.h
4862     -header-test- += scsi/scsi_device.h
4863     -header-test- += scsi/scsi_dh.h
4864     -header-test- += scsi/scsi_eh.h
4865     -header-test- += scsi/scsi_host.h
4866     -header-test- += scsi/scsi_ioctl.h
4867     -header-test- += scsi/scsi_request.h
4868     -header-test- += scsi/scsi_tcq.h
4869     -header-test- += scsi/scsi_transport.h
4870     -header-test- += scsi/scsi_transport_fc.h
4871     -header-test- += scsi/scsi_transport_sas.h
4872     -header-test- += scsi/scsi_transport_spi.h
4873     -header-test- += scsi/scsi_transport_srp.h
4874     -header-test- += scsi/scsicam.h
4875     -header-test- += scsi/sg.h
4876     -header-test- += soc/arc/aux.h
4877     -header-test- += soc/arc/mcip.h
4878     -header-test- += soc/arc/timers.h
4879     -header-test- += soc/brcmstb/common.h
4880     -header-test- += soc/fsl/bman.h
4881     -header-test- += soc/fsl/qe/qe.h
4882     -header-test- += soc/fsl/qe/qe_ic.h
4883     -header-test- += soc/fsl/qe/qe_tdm.h
4884     -header-test- += soc/fsl/qe/ucc.h
4885     -header-test- += soc/fsl/qe/ucc_fast.h
4886     -header-test- += soc/fsl/qe/ucc_slow.h
4887     -header-test- += soc/fsl/qman.h
4888     -header-test- += soc/nps/common.h
4889     -header-test-$(CONFIG_ARC) += soc/nps/mtm.h
4890     -header-test- += soc/qcom/cmd-db.h
4891     -header-test- += soc/qcom/rpmh.h
4892     -header-test- += soc/qcom/tcs.h
4893     -header-test- += soc/tegra/ahb.h
4894     -header-test- += soc/tegra/bpmp-abi.h
4895     -header-test- += soc/tegra/common.h
4896     -header-test- += soc/tegra/flowctrl.h
4897     -header-test- += soc/tegra/fuse.h
4898     -header-test- += soc/tegra/mc.h
4899     -header-test- += sound/ac97/compat.h
4900     -header-test- += sound/aci.h
4901     -header-test- += sound/ad1843.h
4902     -header-test- += sound/adau1373.h
4903     -header-test- += sound/ak4113.h
4904     -header-test- += sound/ak4114.h
4905     -header-test- += sound/ak4117.h
4906     -header-test- += sound/cs35l33.h
4907     -header-test- += sound/cs35l34.h
4908     -header-test- += sound/cs35l35.h
4909     -header-test- += sound/cs35l36.h
4910     -header-test- += sound/cs4271.h
4911     -header-test- += sound/cs42l52.h
4912     -header-test- += sound/cs8427.h
4913     -header-test- += sound/da7218.h
4914     -header-test- += sound/da7219-aad.h
4915     -header-test- += sound/da7219.h
4916     -header-test- += sound/da9055.h
4917     -header-test- += sound/emu8000.h
4918     -header-test- += sound/emux_synth.h
4919     -header-test- += sound/hda_component.h
4920     -header-test- += sound/hda_hwdep.h
4921     -header-test- += sound/hda_i915.h
4922     -header-test- += sound/hwdep.h
4923     -header-test- += sound/i2c.h
4924     -header-test- += sound/l3.h
4925     -header-test- += sound/max98088.h
4926     -header-test- += sound/max98095.h
4927     -header-test- += sound/mixer_oss.h
4928     -header-test- += sound/omap-hdmi-audio.h
4929     -header-test- += sound/pcm_drm_eld.h
4930     -header-test- += sound/pcm_iec958.h
4931     -header-test- += sound/pcm_oss.h
4932     -header-test- += sound/pxa2xx-lib.h
4933     -header-test- += sound/rt286.h
4934     -header-test- += sound/rt298.h
4935     -header-test- += sound/rt5645.h
4936     -header-test- += sound/rt5659.h
4937     -header-test- += sound/rt5660.h
4938     -header-test- += sound/rt5665.h
4939     -header-test- += sound/rt5670.h
4940     -header-test- += sound/s3c24xx_uda134x.h
4941     -header-test- += sound/seq_device.h
4942     -header-test- += sound/seq_kernel.h
4943     -header-test- += sound/seq_midi_emul.h
4944     -header-test- += sound/seq_oss.h
4945     -header-test- += sound/soc-acpi-intel-match.h
4946     -header-test- += sound/soc-dai.h
4947     -header-test- += sound/soc-dapm.h
4948     -header-test- += sound/soc-dpcm.h
4949     -header-test- += sound/sof/control.h
4950     -header-test- += sound/sof/dai-intel.h
4951     -header-test- += sound/sof/dai.h
4952     -header-test- += sound/sof/header.h
4953     -header-test- += sound/sof/info.h
4954     -header-test- += sound/sof/pm.h
4955     -header-test- += sound/sof/stream.h
4956     -header-test- += sound/sof/topology.h
4957     -header-test- += sound/sof/trace.h
4958     -header-test- += sound/sof/xtensa.h
4959     -header-test- += sound/spear_spdif.h
4960     -header-test- += sound/sta32x.h
4961     -header-test- += sound/sta350.h
4962     -header-test- += sound/tea6330t.h
4963     -header-test- += sound/tlv320aic32x4.h
4964     -header-test- += sound/tlv320dac33-plat.h
4965     -header-test- += sound/uda134x.h
4966     -header-test- += sound/wavefront.h
4967     -header-test- += sound/wm8903.h
4968     -header-test- += sound/wm8904.h
4969     -header-test- += sound/wm8960.h
4970     -header-test- += sound/wm8962.h
4971     -header-test- += sound/wm8993.h
4972     -header-test- += sound/wm8996.h
4973     -header-test- += sound/wm9081.h
4974     -header-test- += sound/wm9090.h
4975     -header-test- += target/iscsi/iscsi_target_stat.h
4976     -header-test- += trace/bpf_probe.h
4977     -header-test- += trace/events/9p.h
4978     -header-test- += trace/events/afs.h
4979     -header-test- += trace/events/asoc.h
4980     -header-test- += trace/events/bcache.h
4981     -header-test- += trace/events/block.h
4982     -header-test- += trace/events/cachefiles.h
4983     -header-test- += trace/events/cgroup.h
4984     -header-test- += trace/events/clk.h
4985     -header-test- += trace/events/cma.h
4986     -header-test- += trace/events/ext4.h
4987     -header-test- += trace/events/f2fs.h
4988     -header-test- += trace/events/fs_dax.h
4989     -header-test- += trace/events/fscache.h
4990     -header-test- += trace/events/fsi.h
4991     -header-test- += trace/events/fsi_master_ast_cf.h
4992     -header-test- += trace/events/fsi_master_gpio.h
4993     -header-test- += trace/events/huge_memory.h
4994     -header-test- += trace/events/ib_mad.h
4995     -header-test- += trace/events/ib_umad.h
4996     -header-test- += trace/events/iscsi.h
4997     -header-test- += trace/events/jbd2.h
4998     -header-test- += trace/events/kvm.h
4999     -header-test- += trace/events/kyber.h
5000     -header-test- += trace/events/libata.h
5001     -header-test- += trace/events/mce.h
5002     -header-test- += trace/events/mdio.h
5003     -header-test- += trace/events/migrate.h
5004     -header-test- += trace/events/mmflags.h
5005     -header-test- += trace/events/nbd.h
5006     -header-test- += trace/events/nilfs2.h
5007     -header-test- += trace/events/pwc.h
5008     -header-test- += trace/events/rdma.h
5009     -header-test- += trace/events/rpcgss.h
5010     -header-test- += trace/events/rpcrdma.h
5011     -header-test- += trace/events/rxrpc.h
5012     -header-test- += trace/events/scsi.h
5013     -header-test- += trace/events/siox.h
5014     -header-test- += trace/events/spi.h
5015     -header-test- += trace/events/swiotlb.h
5016     -header-test- += trace/events/syscalls.h
5017     -header-test- += trace/events/target.h
5018     -header-test- += trace/events/thermal_power_allocator.h
5019     -header-test- += trace/events/timer.h
5020     -header-test- += trace/events/wbt.h
5021     -header-test- += trace/events/xen.h
5022     -header-test- += trace/perf.h
5023     -header-test- += trace/trace_events.h
5024     -header-test- += uapi/drm/vmwgfx_drm.h
5025     -header-test- += uapi/linux/a.out.h
5026     -header-test- += uapi/linux/coda.h
5027     -header-test- += uapi/linux/coda_psdev.h
5028     -header-test- += uapi/linux/errqueue.h
5029     -header-test- += uapi/linux/eventpoll.h
5030     -header-test- += uapi/linux/hdlc/ioctl.h
5031     -header-test- += uapi/linux/input.h
5032     -header-test- += uapi/linux/kvm.h
5033     -header-test- += uapi/linux/kvm_para.h
5034     -header-test- += uapi/linux/lightnvm.h
5035     -header-test- += uapi/linux/mic_common.h
5036     -header-test- += uapi/linux/mman.h
5037     -header-test- += uapi/linux/nilfs2_ondisk.h
5038     -header-test- += uapi/linux/patchkey.h
5039     -header-test- += uapi/linux/ptrace.h
5040     -header-test- += uapi/linux/scc.h
5041     -header-test- += uapi/linux/seg6_iptunnel.h
5042     -header-test- += uapi/linux/smc_diag.h
5043     -header-test- += uapi/linux/timex.h
5044     -header-test- += uapi/linux/videodev2.h
5045     -header-test- += uapi/scsi/scsi_bsg_fc.h
5046     -header-test- += uapi/sound/asound.h
5047     -header-test- += uapi/sound/sof/eq.h
5048     -header-test- += uapi/sound/sof/fw.h
5049     -header-test- += uapi/sound/sof/header.h
5050     -header-test- += uapi/sound/sof/manifest.h
5051     -header-test- += uapi/sound/sof/trace.h
5052     -header-test- += uapi/xen/evtchn.h
5053     -header-test- += uapi/xen/gntdev.h
5054     -header-test- += uapi/xen/privcmd.h
5055     -header-test- += vdso/vsyscall.h
5056     -header-test- += video/broadsheetfb.h
5057     -header-test- += video/cvisionppc.h
5058     -header-test- += video/gbe.h
5059     -header-test- += video/kyro.h
5060     -header-test- += video/maxinefb.h
5061     -header-test- += video/metronomefb.h
5062     -header-test- += video/neomagic.h
5063     -header-test- += video/of_display_timing.h
5064     -header-test- += video/omapvrfb.h
5065     -header-test- += video/s1d13xxxfb.h
5066     -header-test- += video/sstfb.h
5067     -header-test- += video/tgafb.h
5068     -header-test- += video/udlfb.h
5069     -header-test- += video/uvesafb.h
5070     -header-test- += video/vga.h
5071     -header-test- += video/w100fb.h
5072     -header-test- += xen/acpi.h
5073     -header-test- += xen/arm/hypercall.h
5074     -header-test- += xen/arm/page-coherent.h
5075     -header-test- += xen/arm/page.h
5076     -header-test- += xen/balloon.h
5077     -header-test- += xen/events.h
5078     -header-test- += xen/features.h
5079     -header-test- += xen/grant_table.h
5080     -header-test- += xen/hvm.h
5081     -header-test- += xen/interface/callback.h
5082     -header-test- += xen/interface/event_channel.h
5083     -header-test- += xen/interface/grant_table.h
5084     -header-test- += xen/interface/hvm/dm_op.h
5085     -header-test- += xen/interface/hvm/hvm_op.h
5086     -header-test- += xen/interface/hvm/hvm_vcpu.h
5087     -header-test- += xen/interface/hvm/params.h
5088     -header-test- += xen/interface/hvm/start_info.h
5089     -header-test- += xen/interface/io/9pfs.h
5090     -header-test- += xen/interface/io/blkif.h
5091     -header-test- += xen/interface/io/console.h
5092     -header-test- += xen/interface/io/displif.h
5093     -header-test- += xen/interface/io/fbif.h
5094     -header-test- += xen/interface/io/kbdif.h
5095     -header-test- += xen/interface/io/netif.h
5096     -header-test- += xen/interface/io/pciif.h
5097     -header-test- += xen/interface/io/protocols.h
5098     -header-test- += xen/interface/io/pvcalls.h
5099     -header-test- += xen/interface/io/ring.h
5100     -header-test- += xen/interface/io/sndif.h
5101     -header-test- += xen/interface/io/tpmif.h
5102     -header-test- += xen/interface/io/vscsiif.h
5103     -header-test- += xen/interface/io/xs_wire.h
5104     -header-test- += xen/interface/memory.h
5105     -header-test- += xen/interface/nmi.h
5106     -header-test- += xen/interface/physdev.h
5107     -header-test- += xen/interface/platform.h
5108     -header-test- += xen/interface/sched.h
5109     -header-test- += xen/interface/vcpu.h
5110     -header-test- += xen/interface/version.h
5111     -header-test- += xen/interface/xen-mca.h
5112     -header-test- += xen/interface/xen.h
5113     -header-test- += xen/interface/xenpmu.h
5114     -header-test- += xen/mem-reservation.h
5115     -header-test- += xen/page.h
5116     -header-test- += xen/platform_pci.h
5117     -header-test- += xen/swiotlb-xen.h
5118     -header-test- += xen/xen-front-pgdir-shbuf.h
5119     -header-test- += xen/xen-ops.h
5120     -header-test- += xen/xen.h
5121     -header-test- += xen/xenbus.h
5122     -
5123     -# Do not include directly
5124     -header-test- += linux/compiler-clang.h
5125     -header-test- += linux/compiler-gcc.h
5126     -header-test- += linux/patchkey.h
5127     -header-test- += linux/rwlock_api_smp.h
5128     -header-test- += linux/spinlock_types_up.h
5129     -header-test- += linux/spinlock_up.h
5130     -header-test- += linux/wimax/debug.h
5131     -header-test- += rdma/uverbs_named_ioctl.h
5132     -
5133     -# asm-generic/*.h is used by asm/*.h, and should not be included directly
5134     -header-test- += asm-generic/% uapi/asm-generic/%
5135     -
5136     -# Timestamp files touched by Kconfig
5137     -header-test- += config/%
5138     -
5139     -# Timestamp files touched by scripts/adjust_autoksyms.sh
5140     -header-test- += ksym/%
5141     -
5142     -# You could compile-test these, but maybe not so useful...
5143     -header-test- += dt-bindings/%
5144     -
5145     -# Do not test generated headers. Stale headers are often left over when you
5146     -# traverse the git history without cleaning.
5147     -header-test- += generated/%
5148     -
5149     -# The rest are compile-tested
5150     -header-test-pattern-y += */*.h */*/*.h */*/*/*.h */*/*/*/*.h
5151     diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
5152     index 2f3f28c7cea3..9373662cdb44 100644
5153     --- a/include/acpi/actypes.h
5154     +++ b/include/acpi/actypes.h
5155     @@ -532,11 +532,12 @@ typedef u64 acpi_integer;
5156     strnlen (a, ACPI_NAMESEG_SIZE) == ACPI_NAMESEG_SIZE)
5157    
5158     /*
5159     - * Algorithm to obtain access bit width.
5160     + * Algorithm to obtain access bit or byte width.
5161     * Can be used with access_width of struct acpi_generic_address and access_size of
5162     * struct acpi_resource_generic_register.
5163     */
5164     #define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + 2))
5165     +#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) - 1))
5166    
5167     /*******************************************************************************
5168     *
5169     diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h
5170     index ce4103208619..cec543d9e87b 100644
5171     --- a/include/asm-generic/vdso/vsyscall.h
5172     +++ b/include/asm-generic/vdso/vsyscall.h
5173     @@ -12,9 +12,9 @@ static __always_inline struct vdso_data *__arch_get_k_vdso_data(void)
5174     #endif /* __arch_get_k_vdso_data */
5175    
5176     #ifndef __arch_update_vdso_data
5177     -static __always_inline int __arch_update_vdso_data(void)
5178     +static __always_inline bool __arch_update_vdso_data(void)
5179     {
5180     - return 0;
5181     + return true;
5182     }
5183     #endif /* __arch_update_vdso_data */
5184    
5185     diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
5186     index 51ccb4b8770a..bff1def62eed 100644
5187     --- a/include/linux/blkdev.h
5188     +++ b/include/linux/blkdev.h
5189     @@ -531,7 +531,7 @@ struct request_queue {
5190     unsigned int sg_reserved_size;
5191     int node;
5192     #ifdef CONFIG_BLK_DEV_IO_TRACE
5193     - struct blk_trace *blk_trace;
5194     + struct blk_trace __rcu *blk_trace;
5195     struct mutex blk_trace_mutex;
5196     #endif
5197     /*
5198     diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
5199     index 7bb2d8de9f30..3b6ff5902edc 100644
5200     --- a/include/linux/blktrace_api.h
5201     +++ b/include/linux/blktrace_api.h
5202     @@ -51,9 +51,13 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
5203     **/
5204     #define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \
5205     do { \
5206     - struct blk_trace *bt = (q)->blk_trace; \
5207     + struct blk_trace *bt; \
5208     + \
5209     + rcu_read_lock(); \
5210     + bt = rcu_dereference((q)->blk_trace); \
5211     if (unlikely(bt)) \
5212     __trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\
5213     + rcu_read_unlock(); \
5214     } while (0)
5215     #define blk_add_trace_msg(q, fmt, ...) \
5216     blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__)
5217     @@ -61,10 +65,14 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
5218    
5219     static inline bool blk_trace_note_message_enabled(struct request_queue *q)
5220     {
5221     - struct blk_trace *bt = q->blk_trace;
5222     - if (likely(!bt))
5223     - return false;
5224     - return bt->act_mask & BLK_TC_NOTIFY;
5225     + struct blk_trace *bt;
5226     + bool ret;
5227     +
5228     + rcu_read_lock();
5229     + bt = rcu_dereference(q->blk_trace);
5230     + ret = bt && (bt->act_mask & BLK_TC_NOTIFY);
5231     + rcu_read_unlock();
5232     + return ret;
5233     }
5234    
5235     extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
5236     diff --git a/include/linux/hid.h b/include/linux/hid.h
5237     index cd41f209043f..875f71132b14 100644
5238     --- a/include/linux/hid.h
5239     +++ b/include/linux/hid.h
5240     @@ -492,7 +492,7 @@ struct hid_report_enum {
5241     };
5242    
5243     #define HID_MIN_BUFFER_SIZE 64 /* make sure there is at least a packet size of space */
5244     -#define HID_MAX_BUFFER_SIZE 4096 /* 4kb */
5245     +#define HID_MAX_BUFFER_SIZE 8192 /* 8kb */
5246     #define HID_CONTROL_FIFO_SIZE 256 /* to init devices with >100 reports */
5247     #define HID_OUTPUT_FIFO_SIZE 64
5248    
5249     diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
5250     index f8fde9fa479c..b580a35f50ea 100644
5251     --- a/include/linux/netdevice.h
5252     +++ b/include/linux/netdevice.h
5253     @@ -72,6 +72,8 @@ void netdev_set_default_ethtool_ops(struct net_device *dev,
5254     #define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
5255     #define NET_RX_DROP 1 /* packet dropped */
5256    
5257     +#define MAX_NEST_DEV 8
5258     +
5259     /*
5260     * Transmit return codes: transmit return codes originate from three different
5261     * namespaces:
5262     @@ -4294,11 +4296,8 @@ void *netdev_lower_get_next(struct net_device *dev,
5263     ldev; \
5264     ldev = netdev_lower_get_next(dev, &(iter)))
5265    
5266     -struct net_device *netdev_all_lower_get_next(struct net_device *dev,
5267     +struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
5268     struct list_head **iter);
5269     -struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
5270     - struct list_head **iter);
5271     -
5272     int netdev_walk_all_lower_dev(struct net_device *dev,
5273     int (*fn)(struct net_device *lower_dev,
5274     void *data),
5275     diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
5276     index 77336f4c4b1c..32658749e9db 100644
5277     --- a/include/linux/netfilter/ipset/ip_set.h
5278     +++ b/include/linux/netfilter/ipset/ip_set.h
5279     @@ -121,6 +121,7 @@ struct ip_set_ext {
5280     u32 timeout;
5281     u8 packets_op;
5282     u8 bytes_op;
5283     + bool target;
5284     };
5285    
5286     struct ip_set;
5287     @@ -187,6 +188,14 @@ struct ip_set_type_variant {
5288     /* Return true if "b" set is the same as "a"
5289     * according to the create set parameters */
5290     bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
5291     + /* Region-locking is used */
5292     + bool region_lock;
5293     +};
5294     +
5295     +struct ip_set_region {
5296     + spinlock_t lock; /* Region lock */
5297     + size_t ext_size; /* Size of the dynamic extensions */
5298     + u32 elements; /* Number of elements vs timeout */
5299     };
5300    
5301     /* The core set type structure */
5302     @@ -681,7 +690,7 @@ ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
5303     }
5304    
5305     #define IP_SET_INIT_KEXT(skb, opt, set) \
5306     - { .bytes = (skb)->len, .packets = 1, \
5307     + { .bytes = (skb)->len, .packets = 1, .target = true,\
5308     .timeout = ip_set_adt_opt_timeout(opt, set) }
5309    
5310     #define IP_SET_INIT_UEXT(set) \
5311     diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
5312     index 1abe91ff6e4a..6d67e9a5af6b 100644
5313     --- a/include/linux/sched/nohz.h
5314     +++ b/include/linux/sched/nohz.h
5315     @@ -15,9 +15,11 @@ static inline void nohz_balance_enter_idle(int cpu) { }
5316    
5317     #ifdef CONFIG_NO_HZ_COMMON
5318     void calc_load_nohz_start(void);
5319     +void calc_load_nohz_remote(struct rq *rq);
5320     void calc_load_nohz_stop(void);
5321     #else
5322     static inline void calc_load_nohz_start(void) { }
5323     +static inline void calc_load_nohz_remote(struct rq *rq) { }
5324     static inline void calc_load_nohz_stop(void) { }
5325     #endif /* CONFIG_NO_HZ_COMMON */
5326    
5327     diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
5328     index e5fc8db1f783..78f6437cbc3a 100644
5329     --- a/include/net/flow_dissector.h
5330     +++ b/include/net/flow_dissector.h
5331     @@ -5,6 +5,7 @@
5332     #include <linux/types.h>
5333     #include <linux/in6.h>
5334     #include <linux/siphash.h>
5335     +#include <linux/string.h>
5336     #include <uapi/linux/if_ether.h>
5337    
5338     /**
5339     @@ -338,4 +339,12 @@ struct bpf_flow_dissector {
5340     void *data_end;
5341     };
5342    
5343     +static inline void
5344     +flow_dissector_init_keys(struct flow_dissector_key_control *key_control,
5345     + struct flow_dissector_key_basic *key_basic)
5346     +{
5347     + memset(key_control, 0, sizeof(*key_control));
5348     + memset(key_basic, 0, sizeof(*key_basic));
5349     +}
5350     +
5351     #endif
5352     diff --git a/include/uapi/linux/usb/charger.h b/include/uapi/linux/usb/charger.h
5353     index 5f72af35b3ed..ad22079125bf 100644
5354     --- a/include/uapi/linux/usb/charger.h
5355     +++ b/include/uapi/linux/usb/charger.h
5356     @@ -14,18 +14,18 @@
5357     * ACA (Accessory Charger Adapters)
5358     */
5359     enum usb_charger_type {
5360     - UNKNOWN_TYPE,
5361     - SDP_TYPE,
5362     - DCP_TYPE,
5363     - CDP_TYPE,
5364     - ACA_TYPE,
5365     + UNKNOWN_TYPE = 0,
5366     + SDP_TYPE = 1,
5367     + DCP_TYPE = 2,
5368     + CDP_TYPE = 3,
5369     + ACA_TYPE = 4,
5370     };
5371    
5372     /* USB charger state */
5373     enum usb_charger_state {
5374     - USB_CHARGER_DEFAULT,
5375     - USB_CHARGER_PRESENT,
5376     - USB_CHARGER_ABSENT,
5377     + USB_CHARGER_DEFAULT = 0,
5378     + USB_CHARGER_PRESENT = 1,
5379     + USB_CHARGER_ABSENT = 2,
5380     };
5381    
5382     #endif /* _UAPI__LINUX_USB_CHARGER_H */
5383     diff --git a/init/Kconfig b/init/Kconfig
5384     index 0328b53d09ad..0bffc8fdbf3d 100644
5385     --- a/init/Kconfig
5386     +++ b/init/Kconfig
5387     @@ -105,29 +105,9 @@ config COMPILE_TEST
5388     here. If you are a user/distributor, say N here to exclude useless
5389     drivers to be distributed.
5390    
5391     -config HEADER_TEST
5392     - bool "Compile test headers that should be standalone compilable"
5393     - help
5394     - Compile test headers listed in header-test-y target to ensure they are
5395     - self-contained, i.e. compilable as standalone units.
5396     -
5397     - If you are a developer or tester and want to ensure the requested
5398     - headers are self-contained, say Y here. Otherwise, choose N.
5399     -
5400     -config KERNEL_HEADER_TEST
5401     - bool "Compile test kernel headers"
5402     - depends on HEADER_TEST
5403     - help
5404     - Headers in include/ are used to build external moduls.
5405     - Compile test them to ensure they are self-contained, i.e.
5406     - compilable as standalone units.
5407     -
5408     - If you are a developer or tester and want to ensure the headers
5409     - in include/ are self-contained, say Y here. Otherwise, choose N.
5410     -
5411     config UAPI_HEADER_TEST
5412     bool "Compile test UAPI headers"
5413     - depends on HEADER_TEST && HEADERS_INSTALL && CC_CAN_LINK
5414     + depends on HEADERS_INSTALL && CC_CAN_LINK
5415     help
5416     Compile test headers exported to user-space to ensure they are
5417     self-contained, i.e. compilable as standalone units.
5418     diff --git a/kernel/audit.c b/kernel/audit.c
5419     index da8dc0db5bd3..dfc45063cb56 100644
5420     --- a/kernel/audit.c
5421     +++ b/kernel/audit.c
5422     @@ -1100,13 +1100,11 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature
5423     audit_log_end(ab);
5424     }
5425    
5426     -static int audit_set_feature(struct sk_buff *skb)
5427     +static int audit_set_feature(struct audit_features *uaf)
5428     {
5429     - struct audit_features *uaf;
5430     int i;
5431    
5432     BUILD_BUG_ON(AUDIT_LAST_FEATURE + 1 > ARRAY_SIZE(audit_feature_names));
5433     - uaf = nlmsg_data(nlmsg_hdr(skb));
5434    
5435     /* if there is ever a version 2 we should handle that here */
5436    
5437     @@ -1174,6 +1172,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
5438     {
5439     u32 seq;
5440     void *data;
5441     + int data_len;
5442     int err;
5443     struct audit_buffer *ab;
5444     u16 msg_type = nlh->nlmsg_type;
5445     @@ -1187,6 +1186,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
5446    
5447     seq = nlh->nlmsg_seq;
5448     data = nlmsg_data(nlh);
5449     + data_len = nlmsg_len(nlh);
5450    
5451     switch (msg_type) {
5452     case AUDIT_GET: {
5453     @@ -1210,7 +1210,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
5454     struct audit_status s;
5455     memset(&s, 0, sizeof(s));
5456     /* guard against past and future API changes */
5457     - memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh)));
5458     + memcpy(&s, data, min_t(size_t, sizeof(s), data_len));
5459     if (s.mask & AUDIT_STATUS_ENABLED) {
5460     err = audit_set_enabled(s.enabled);
5461     if (err < 0)
5462     @@ -1314,7 +1314,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
5463     return err;
5464     break;
5465     case AUDIT_SET_FEATURE:
5466     - err = audit_set_feature(skb);
5467     + if (data_len < sizeof(struct audit_features))
5468     + return -EINVAL;
5469     + err = audit_set_feature(data);
5470     if (err)
5471     return err;
5472     break;
5473     @@ -1326,6 +1328,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
5474    
5475     err = audit_filter(msg_type, AUDIT_FILTER_USER);
5476     if (err == 1) { /* match or error */
5477     + char *str = data;
5478     +
5479     err = 0;
5480     if (msg_type == AUDIT_USER_TTY) {
5481     err = tty_audit_push();
5482     @@ -1333,26 +1337,24 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
5483     break;
5484     }
5485     audit_log_user_recv_msg(&ab, msg_type);
5486     - if (msg_type != AUDIT_USER_TTY)
5487     + if (msg_type != AUDIT_USER_TTY) {
5488     + /* ensure NULL termination */
5489     + str[data_len - 1] = '\0';
5490     audit_log_format(ab, " msg='%.*s'",
5491     AUDIT_MESSAGE_TEXT_MAX,
5492     - (char *)data);
5493     - else {
5494     - int size;
5495     -
5496     + str);
5497     + } else {
5498     audit_log_format(ab, " data=");
5499     - size = nlmsg_len(nlh);
5500     - if (size > 0 &&
5501     - ((unsigned char *)data)[size - 1] == '\0')
5502     - size--;
5503     - audit_log_n_untrustedstring(ab, data, size);
5504     + if (data_len > 0 && str[data_len - 1] == '\0')
5505     + data_len--;
5506     + audit_log_n_untrustedstring(ab, str, data_len);
5507     }
5508     audit_log_end(ab);
5509     }
5510     break;
5511     case AUDIT_ADD_RULE:
5512     case AUDIT_DEL_RULE:
5513     - if (nlmsg_len(nlh) < sizeof(struct audit_rule_data))
5514     + if (data_len < sizeof(struct audit_rule_data))
5515     return -EINVAL;
5516     if (audit_enabled == AUDIT_LOCKED) {
5517     audit_log_common_recv_msg(audit_context(), &ab,
5518     @@ -1364,7 +1366,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
5519     audit_log_end(ab);
5520     return -EPERM;
5521     }
5522     - err = audit_rule_change(msg_type, seq, data, nlmsg_len(nlh));
5523     + err = audit_rule_change(msg_type, seq, data, data_len);
5524     break;
5525     case AUDIT_LIST_RULES:
5526     err = audit_list_rules_send(skb, seq);
5527     @@ -1379,7 +1381,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
5528     case AUDIT_MAKE_EQUIV: {
5529     void *bufp = data;
5530     u32 sizes[2];
5531     - size_t msglen = nlmsg_len(nlh);
5532     + size_t msglen = data_len;
5533     char *old, *new;
5534    
5535     err = -EINVAL;
5536     @@ -1455,7 +1457,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
5537    
5538     memset(&s, 0, sizeof(s));
5539     /* guard against past and future API changes */
5540     - memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh)));
5541     + memcpy(&s, data, min_t(size_t, sizeof(s), data_len));
5542     /* check if new data is valid */
5543     if ((s.enabled != 0 && s.enabled != 1) ||
5544     (s.log_passwd != 0 && s.log_passwd != 1))
5545     diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
5546     index b0126e9c0743..026e34da4ace 100644
5547     --- a/kernel/auditfilter.c
5548     +++ b/kernel/auditfilter.c
5549     @@ -456,6 +456,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
5550     bufp = data->buf;
5551     for (i = 0; i < data->field_count; i++) {
5552     struct audit_field *f = &entry->rule.fields[i];
5553     + u32 f_val;
5554    
5555     err = -EINVAL;
5556    
5557     @@ -464,12 +465,12 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
5558     goto exit_free;
5559    
5560     f->type = data->fields[i];
5561     - f->val = data->values[i];
5562     + f_val = data->values[i];
5563    
5564     /* Support legacy tests for a valid loginuid */
5565     - if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) {
5566     + if ((f->type == AUDIT_LOGINUID) && (f_val == AUDIT_UID_UNSET)) {
5567     f->type = AUDIT_LOGINUID_SET;
5568     - f->val = 0;
5569     + f_val = 0;
5570     entry->rule.pflags |= AUDIT_LOGINUID_LEGACY;
5571     }
5572    
5573     @@ -485,7 +486,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
5574     case AUDIT_SUID:
5575     case AUDIT_FSUID:
5576     case AUDIT_OBJ_UID:
5577     - f->uid = make_kuid(current_user_ns(), f->val);
5578     + f->uid = make_kuid(current_user_ns(), f_val);
5579     if (!uid_valid(f->uid))
5580     goto exit_free;
5581     break;
5582     @@ -494,11 +495,12 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
5583     case AUDIT_SGID:
5584     case AUDIT_FSGID:
5585     case AUDIT_OBJ_GID:
5586     - f->gid = make_kgid(current_user_ns(), f->val);
5587     + f->gid = make_kgid(current_user_ns(), f_val);
5588     if (!gid_valid(f->gid))
5589     goto exit_free;
5590     break;
5591     case AUDIT_ARCH:
5592     + f->val = f_val;
5593     entry->rule.arch_f = f;
5594     break;
5595     case AUDIT_SUBJ_USER:
5596     @@ -511,11 +513,13 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
5597     case AUDIT_OBJ_TYPE:
5598     case AUDIT_OBJ_LEV_LOW:
5599     case AUDIT_OBJ_LEV_HIGH:
5600     - str = audit_unpack_string(&bufp, &remain, f->val);
5601     - if (IS_ERR(str))
5602     + str = audit_unpack_string(&bufp, &remain, f_val);
5603     + if (IS_ERR(str)) {
5604     + err = PTR_ERR(str);
5605     goto exit_free;
5606     - entry->rule.buflen += f->val;
5607     -
5608     + }
5609     + entry->rule.buflen += f_val;
5610     + f->lsm_str = str;
5611     err = security_audit_rule_init(f->type, f->op, str,
5612     (void **)&f->lsm_rule);
5613     /* Keep currently invalid fields around in case they
5614     @@ -524,68 +528,71 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
5615     pr_warn("audit rule for LSM \'%s\' is invalid\n",
5616     str);
5617     err = 0;
5618     - }
5619     - if (err) {
5620     - kfree(str);
5621     + } else if (err)
5622     goto exit_free;
5623     - } else
5624     - f->lsm_str = str;
5625     break;
5626     case AUDIT_WATCH:
5627     - str = audit_unpack_string(&bufp, &remain, f->val);
5628     - if (IS_ERR(str))
5629     + str = audit_unpack_string(&bufp, &remain, f_val);
5630     + if (IS_ERR(str)) {
5631     + err = PTR_ERR(str);
5632     goto exit_free;
5633     - entry->rule.buflen += f->val;
5634     -
5635     - err = audit_to_watch(&entry->rule, str, f->val, f->op);
5636     + }
5637     + err = audit_to_watch(&entry->rule, str, f_val, f->op);
5638     if (err) {
5639     kfree(str);
5640     goto exit_free;
5641     }
5642     + entry->rule.buflen += f_val;
5643     break;
5644     case AUDIT_DIR:
5645     - str = audit_unpack_string(&bufp, &remain, f->val);
5646     - if (IS_ERR(str))
5647     + str = audit_unpack_string(&bufp, &remain, f_val);
5648     + if (IS_ERR(str)) {
5649     + err = PTR_ERR(str);
5650     goto exit_free;
5651     - entry->rule.buflen += f->val;
5652     -
5653     + }
5654     err = audit_make_tree(&entry->rule, str, f->op);
5655     kfree(str);
5656     if (err)
5657     goto exit_free;
5658     + entry->rule.buflen += f_val;
5659     break;
5660     case AUDIT_INODE:
5661     + f->val = f_val;
5662     err = audit_to_inode(&entry->rule, f);
5663     if (err)
5664     goto exit_free;
5665     break;
5666     case AUDIT_FILTERKEY:
5667     - if (entry->rule.filterkey || f->val > AUDIT_MAX_KEY_LEN)
5668     + if (entry->rule.filterkey || f_val > AUDIT_MAX_KEY_LEN)
5669     goto exit_free;
5670     - str = audit_unpack_string(&bufp, &remain, f->val);
5671     - if (IS_ERR(str))
5672     + str = audit_unpack_string(&bufp, &remain, f_val);
5673     + if (IS_ERR(str)) {
5674     + err = PTR_ERR(str);
5675     goto exit_free;
5676     - entry->rule.buflen += f->val;
5677     + }
5678     + entry->rule.buflen += f_val;
5679     entry->rule.filterkey = str;
5680     break;
5681     case AUDIT_EXE:
5682     - if (entry->rule.exe || f->val > PATH_MAX)
5683     + if (entry->rule.exe || f_val > PATH_MAX)
5684     goto exit_free;
5685     - str = audit_unpack_string(&bufp, &remain, f->val);
5686     + str = audit_unpack_string(&bufp, &remain, f_val);
5687     if (IS_ERR(str)) {
5688     err = PTR_ERR(str);
5689     goto exit_free;
5690     }
5691     - entry->rule.buflen += f->val;
5692     -
5693     - audit_mark = audit_alloc_mark(&entry->rule, str, f->val);
5694     + audit_mark = audit_alloc_mark(&entry->rule, str, f_val);
5695     if (IS_ERR(audit_mark)) {
5696     kfree(str);
5697     err = PTR_ERR(audit_mark);
5698     goto exit_free;
5699     }
5700     + entry->rule.buflen += f_val;
5701     entry->rule.exe = audit_mark;
5702     break;
5703     + default:
5704     + f->val = f_val;
5705     + break;
5706     }
5707     }
5708    
5709     diff --git a/kernel/kprobes.c b/kernel/kprobes.c
5710     index 53534aa258a6..34e28b236d68 100644
5711     --- a/kernel/kprobes.c
5712     +++ b/kernel/kprobes.c
5713     @@ -510,6 +510,8 @@ static void do_unoptimize_kprobes(void)
5714     arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
5715     /* Loop free_list for disarming */
5716     list_for_each_entry_safe(op, tmp, &freeing_list, list) {
5717     + /* Switching from detour code to origin */
5718     + op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
5719     /* Disarm probes if marked disabled */
5720     if (kprobe_disabled(&op->kp))
5721     arch_disarm_kprobe(&op->kp);
5722     @@ -649,6 +651,7 @@ static void force_unoptimize_kprobe(struct optimized_kprobe *op)
5723     {
5724     lockdep_assert_cpus_held();
5725     arch_unoptimize_kprobe(op);
5726     + op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
5727     if (kprobe_disabled(&op->kp))
5728     arch_disarm_kprobe(&op->kp);
5729     }
5730     @@ -676,7 +679,6 @@ static void unoptimize_kprobe(struct kprobe *p, bool force)
5731     return;
5732     }
5733    
5734     - op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
5735     if (!list_empty(&op->list)) {
5736     /* Dequeue from the optimization queue */
5737     list_del_init(&op->list);
5738     diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
5739     index dadb7b7fba37..9bb6d2497b04 100644
5740     --- a/kernel/locking/lockdep_proc.c
5741     +++ b/kernel/locking/lockdep_proc.c
5742     @@ -286,9 +286,9 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
5743     seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
5744     nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
5745     #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
5746     - seq_printf(m, " number of stack traces: %llu\n",
5747     + seq_printf(m, " number of stack traces: %11llu\n",
5748     lockdep_stack_trace_count());
5749     - seq_printf(m, " number of stack hash chains: %llu\n",
5750     + seq_printf(m, " number of stack hash chains: %11llu\n",
5751     lockdep_stack_hash_count());
5752     #endif
5753     seq_printf(m, " combined max dependencies: %11u\n",
5754     diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
5755     index f504ac831779..df90d4d7ad2e 100644
5756     --- a/kernel/rcu/tree_exp.h
5757     +++ b/kernel/rcu/tree_exp.h
5758     @@ -540,14 +540,13 @@ static void rcu_exp_wait_wake(unsigned long s)
5759     struct rcu_node *rnp;
5760    
5761     synchronize_sched_expedited_wait();
5762     - rcu_exp_gp_seq_end();
5763     - trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
5764    
5765     - /*
5766     - * Switch over to wakeup mode, allowing the next GP, but -only- the
5767     - * next GP, to proceed.
5768     - */
5769     + // Switch over to wakeup mode, allowing the next GP to proceed.
5770     + // End the previous grace period only after acquiring the mutex
5771     + // to ensure that only one GP runs concurrently with wakeups.
5772     mutex_lock(&rcu_state.exp_wake_mutex);
5773     + rcu_exp_gp_seq_end();
5774     + trace_rcu_exp_grace_period(rcu_state.name, s, TPS("end"));
5775    
5776     rcu_for_each_node_breadth_first(rnp) {
5777     if (ULONG_CMP_LT(READ_ONCE(rnp->exp_seq_rq), s)) {
5778     diff --git a/kernel/sched/core.c b/kernel/sched/core.c
5779     index e6c65725b7ce..e921126aec84 100644
5780     --- a/kernel/sched/core.c
5781     +++ b/kernel/sched/core.c
5782     @@ -3668,28 +3668,32 @@ static void sched_tick_remote(struct work_struct *work)
5783     * statistics and checks timeslices in a time-independent way, regardless
5784     * of when exactly it is running.
5785     */
5786     - if (idle_cpu(cpu) || !tick_nohz_tick_stopped_cpu(cpu))
5787     + if (!tick_nohz_tick_stopped_cpu(cpu))
5788     goto out_requeue;
5789    
5790     rq_lock_irq(rq, &rf);
5791     curr = rq->curr;
5792     - if (is_idle_task(curr) || cpu_is_offline(cpu))
5793     + if (cpu_is_offline(cpu))
5794     goto out_unlock;
5795    
5796     + curr = rq->curr;
5797     update_rq_clock(rq);
5798     - delta = rq_clock_task(rq) - curr->se.exec_start;
5799    
5800     - /*
5801     - * Make sure the next tick runs within a reasonable
5802     - * amount of time.
5803     - */
5804     - WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
5805     + if (!is_idle_task(curr)) {
5806     + /*
5807     + * Make sure the next tick runs within a reasonable
5808     + * amount of time.
5809     + */
5810     + delta = rq_clock_task(rq) - curr->se.exec_start;
5811     + WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
5812     + }
5813     curr->sched_class->task_tick(rq, curr, 0);
5814    
5815     + calc_load_nohz_remote(rq);
5816     out_unlock:
5817     rq_unlock_irq(rq, &rf);
5818     -
5819     out_requeue:
5820     +
5821     /*
5822     * Run the remote tick once per second (1Hz). This arbitrary
5823     * frequency is large enough to avoid overload but short enough
5824     @@ -7054,8 +7058,15 @@ void sched_move_task(struct task_struct *tsk)
5825    
5826     if (queued)
5827     enqueue_task(rq, tsk, queue_flags);
5828     - if (running)
5829     + if (running) {
5830     set_next_task(rq, tsk);
5831     + /*
5832     + * After changing group, the running task may have joined a
5833     + * throttled one but it's still the running task. Trigger a
5834     + * resched to make sure that task can still run.
5835     + */
5836     + resched_curr(rq);
5837     + }
5838    
5839     task_rq_unlock(rq, tsk, &rf);
5840     }
5841     diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
5842     index c87a798d1456..f32ce3a359fa 100644
5843     --- a/kernel/sched/fair.c
5844     +++ b/kernel/sched/fair.c
5845     @@ -5933,6 +5933,7 @@ static inline int select_idle_smt(struct task_struct *p, int target)
5846     */
5847     static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
5848     {
5849     + struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
5850     struct sched_domain *this_sd;
5851     u64 avg_cost, avg_idle;
5852     u64 time, cost;
5853     @@ -5964,11 +5965,11 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
5854    
5855     time = cpu_clock(this);
5856    
5857     - for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
5858     + cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
5859     +
5860     + for_each_cpu_wrap(cpu, cpus, target) {
5861     if (!--nr)
5862     return si_cpu;
5863     - if (!cpumask_test_cpu(cpu, p->cpus_ptr))
5864     - continue;
5865     if (available_idle_cpu(cpu))
5866     break;
5867     if (si_cpu == -1 && sched_idle_cpu(cpu))
5868     diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
5869     index 28a516575c18..de22da666ac7 100644
5870     --- a/kernel/sched/loadavg.c
5871     +++ b/kernel/sched/loadavg.c
5872     @@ -231,16 +231,11 @@ static inline int calc_load_read_idx(void)
5873     return calc_load_idx & 1;
5874     }
5875    
5876     -void calc_load_nohz_start(void)
5877     +static void calc_load_nohz_fold(struct rq *rq)
5878     {
5879     - struct rq *this_rq = this_rq();
5880     long delta;
5881    
5882     - /*
5883     - * We're going into NO_HZ mode, if there's any pending delta, fold it
5884     - * into the pending NO_HZ delta.
5885     - */
5886     - delta = calc_load_fold_active(this_rq, 0);
5887     + delta = calc_load_fold_active(rq, 0);
5888     if (delta) {
5889     int idx = calc_load_write_idx();
5890    
5891     @@ -248,6 +243,24 @@ void calc_load_nohz_start(void)
5892     }
5893     }
5894    
5895     +void calc_load_nohz_start(void)
5896     +{
5897     + /*
5898     + * We're going into NO_HZ mode, if there's any pending delta, fold it
5899     + * into the pending NO_HZ delta.
5900     + */
5901     + calc_load_nohz_fold(this_rq());
5902     +}
5903     +
5904     +/*
5905     + * Keep track of the load for NOHZ_FULL, must be called between
5906     + * calc_load_nohz_{start,stop}().
5907     + */
5908     +void calc_load_nohz_remote(struct rq *rq)
5909     +{
5910     + calc_load_nohz_fold(rq);
5911     +}
5912     +
5913     void calc_load_nohz_stop(void)
5914     {
5915     struct rq *this_rq = this_rq();
5916     @@ -268,7 +281,7 @@ void calc_load_nohz_stop(void)
5917     this_rq->calc_load_update += LOAD_FREQ;
5918     }
5919    
5920     -static long calc_load_nohz_fold(void)
5921     +static long calc_load_nohz_read(void)
5922     {
5923     int idx = calc_load_read_idx();
5924     long delta = 0;
5925     @@ -323,7 +336,7 @@ static void calc_global_nohz(void)
5926     }
5927     #else /* !CONFIG_NO_HZ_COMMON */
5928    
5929     -static inline long calc_load_nohz_fold(void) { return 0; }
5930     +static inline long calc_load_nohz_read(void) { return 0; }
5931     static inline void calc_global_nohz(void) { }
5932    
5933     #endif /* CONFIG_NO_HZ_COMMON */
5934     @@ -346,7 +359,7 @@ void calc_global_load(unsigned long ticks)
5935     /*
5936     * Fold the 'old' NO_HZ-delta to include all NO_HZ CPUs.
5937     */
5938     - delta = calc_load_nohz_fold();
5939     + delta = calc_load_nohz_read();
5940     if (delta)
5941     atomic_long_add(delta, &calc_load_tasks);
5942    
5943     diff --git a/kernel/time/vsyscall.c b/kernel/time/vsyscall.c
5944     index 5ee0f7709410..9577c89179cd 100644
5945     --- a/kernel/time/vsyscall.c
5946     +++ b/kernel/time/vsyscall.c
5947     @@ -28,11 +28,6 @@ static inline void update_vdso_data(struct vdso_data *vdata,
5948     vdata[CS_RAW].mult = tk->tkr_raw.mult;
5949     vdata[CS_RAW].shift = tk->tkr_raw.shift;
5950    
5951     - /* CLOCK_REALTIME */
5952     - vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME];
5953     - vdso_ts->sec = tk->xtime_sec;
5954     - vdso_ts->nsec = tk->tkr_mono.xtime_nsec;
5955     -
5956     /* CLOCK_MONOTONIC */
5957     vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC];
5958     vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
5959     @@ -70,12 +65,6 @@ static inline void update_vdso_data(struct vdso_data *vdata,
5960     vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_TAI];
5961     vdso_ts->sec = tk->xtime_sec + (s64)tk->tai_offset;
5962     vdso_ts->nsec = tk->tkr_mono.xtime_nsec;
5963     -
5964     - /*
5965     - * Read without the seqlock held by clock_getres().
5966     - * Note: No need to have a second copy.
5967     - */
5968     - WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution);
5969     }
5970    
5971     void update_vsyscall(struct timekeeper *tk)
5972     @@ -84,20 +73,17 @@ void update_vsyscall(struct timekeeper *tk)
5973     struct vdso_timestamp *vdso_ts;
5974     u64 nsec;
5975    
5976     - if (__arch_update_vdso_data()) {
5977     - /*
5978     - * Some architectures might want to skip the update of the
5979     - * data page.
5980     - */
5981     - return;
5982     - }
5983     -
5984     /* copy vsyscall data */
5985     vdso_write_begin(vdata);
5986    
5987     vdata[CS_HRES_COARSE].clock_mode = __arch_get_clock_mode(tk);
5988     vdata[CS_RAW].clock_mode = __arch_get_clock_mode(tk);
5989    
5990     + /* CLOCK_REALTIME also required for time() */
5991     + vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME];
5992     + vdso_ts->sec = tk->xtime_sec;
5993     + vdso_ts->nsec = tk->tkr_mono.xtime_nsec;
5994     +
5995     /* CLOCK_REALTIME_COARSE */
5996     vdso_ts = &vdata[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE];
5997     vdso_ts->sec = tk->xtime_sec;
5998     @@ -110,7 +96,18 @@ void update_vsyscall(struct timekeeper *tk)
5999     nsec = nsec + tk->wall_to_monotonic.tv_nsec;
6000     vdso_ts->sec += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec);
6001    
6002     - update_vdso_data(vdata, tk);
6003     + /*
6004     + * Read without the seqlock held by clock_getres().
6005     + * Note: No need to have a second copy.
6006     + */
6007     + WRITE_ONCE(vdata[CS_HRES_COARSE].hrtimer_res, hrtimer_resolution);
6008     +
6009     + /*
6010     + * Architectures can opt out of updating the high resolution part
6011     + * of the VDSO.
6012     + */
6013     + if (__arch_update_vdso_data())
6014     + update_vdso_data(vdata, tk);
6015    
6016     __arch_update_vsyscall(vdata, tk);
6017    
6018     diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
6019     index 2d6e93ab0478..4b2ad374167b 100644
6020     --- a/kernel/trace/blktrace.c
6021     +++ b/kernel/trace/blktrace.c
6022     @@ -336,6 +336,7 @@ static void put_probe_ref(void)
6023    
6024     static void blk_trace_cleanup(struct blk_trace *bt)
6025     {
6026     + synchronize_rcu();
6027     blk_trace_free(bt);
6028     put_probe_ref();
6029     }
6030     @@ -630,8 +631,10 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,
6031     static int __blk_trace_startstop(struct request_queue *q, int start)
6032     {
6033     int ret;
6034     - struct blk_trace *bt = q->blk_trace;
6035     + struct blk_trace *bt;
6036    
6037     + bt = rcu_dereference_protected(q->blk_trace,
6038     + lockdep_is_held(&q->blk_trace_mutex));
6039     if (bt == NULL)
6040     return -EINVAL;
6041    
6042     @@ -741,8 +744,8 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
6043     void blk_trace_shutdown(struct request_queue *q)
6044     {
6045     mutex_lock(&q->blk_trace_mutex);
6046     -
6047     - if (q->blk_trace) {
6048     + if (rcu_dereference_protected(q->blk_trace,
6049     + lockdep_is_held(&q->blk_trace_mutex))) {
6050     __blk_trace_startstop(q, 0);
6051     __blk_trace_remove(q);
6052     }
6053     @@ -754,8 +757,10 @@ void blk_trace_shutdown(struct request_queue *q)
6054     static union kernfs_node_id *
6055     blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
6056     {
6057     - struct blk_trace *bt = q->blk_trace;
6058     + struct blk_trace *bt;
6059    
6060     + /* We don't use the 'bt' value here except as an optimization... */
6061     + bt = rcu_dereference_protected(q->blk_trace, 1);
6062     if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
6063     return NULL;
6064    
6065     @@ -800,10 +805,14 @@ static void blk_add_trace_rq(struct request *rq, int error,
6066     unsigned int nr_bytes, u32 what,
6067     union kernfs_node_id *cgid)
6068     {
6069     - struct blk_trace *bt = rq->q->blk_trace;
6070     + struct blk_trace *bt;
6071    
6072     - if (likely(!bt))
6073     + rcu_read_lock();
6074     + bt = rcu_dereference(rq->q->blk_trace);
6075     + if (likely(!bt)) {
6076     + rcu_read_unlock();
6077     return;
6078     + }
6079    
6080     if (blk_rq_is_passthrough(rq))
6081     what |= BLK_TC_ACT(BLK_TC_PC);
6082     @@ -812,6 +821,7 @@ static void blk_add_trace_rq(struct request *rq, int error,
6083    
6084     __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
6085     rq->cmd_flags, what, error, 0, NULL, cgid);
6086     + rcu_read_unlock();
6087     }
6088    
6089     static void blk_add_trace_rq_insert(void *ignore,
6090     @@ -857,14 +867,19 @@ static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
6091     static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
6092     u32 what, int error)
6093     {
6094     - struct blk_trace *bt = q->blk_trace;
6095     + struct blk_trace *bt;
6096    
6097     - if (likely(!bt))
6098     + rcu_read_lock();
6099     + bt = rcu_dereference(q->blk_trace);
6100     + if (likely(!bt)) {
6101     + rcu_read_unlock();
6102     return;
6103     + }
6104    
6105     __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
6106     bio_op(bio), bio->bi_opf, what, error, 0, NULL,
6107     blk_trace_bio_get_cgid(q, bio));
6108     + rcu_read_unlock();
6109     }
6110    
6111     static void blk_add_trace_bio_bounce(void *ignore,
6112     @@ -909,11 +924,14 @@ static void blk_add_trace_getrq(void *ignore,
6113     if (bio)
6114     blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
6115     else {
6116     - struct blk_trace *bt = q->blk_trace;
6117     + struct blk_trace *bt;
6118    
6119     + rcu_read_lock();
6120     + bt = rcu_dereference(q->blk_trace);
6121     if (bt)
6122     __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
6123     NULL, NULL);
6124     + rcu_read_unlock();
6125     }
6126     }
6127    
6128     @@ -925,27 +943,35 @@ static void blk_add_trace_sleeprq(void *ignore,
6129     if (bio)
6130     blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
6131     else {
6132     - struct blk_trace *bt = q->blk_trace;
6133     + struct blk_trace *bt;
6134    
6135     + rcu_read_lock();
6136     + bt = rcu_dereference(q->blk_trace);
6137     if (bt)
6138     __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
6139     0, 0, NULL, NULL);
6140     + rcu_read_unlock();
6141     }
6142     }
6143    
6144     static void blk_add_trace_plug(void *ignore, struct request_queue *q)
6145     {
6146     - struct blk_trace *bt = q->blk_trace;
6147     + struct blk_trace *bt;
6148    
6149     + rcu_read_lock();
6150     + bt = rcu_dereference(q->blk_trace);
6151     if (bt)
6152     __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL);
6153     + rcu_read_unlock();
6154     }
6155    
6156     static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
6157     unsigned int depth, bool explicit)
6158     {
6159     - struct blk_trace *bt = q->blk_trace;
6160     + struct blk_trace *bt;
6161    
6162     + rcu_read_lock();
6163     + bt = rcu_dereference(q->blk_trace);
6164     if (bt) {
6165     __be64 rpdu = cpu_to_be64(depth);
6166     u32 what;
6167     @@ -957,14 +983,17 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
6168    
6169     __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL);
6170     }
6171     + rcu_read_unlock();
6172     }
6173    
6174     static void blk_add_trace_split(void *ignore,
6175     struct request_queue *q, struct bio *bio,
6176     unsigned int pdu)
6177     {
6178     - struct blk_trace *bt = q->blk_trace;
6179     + struct blk_trace *bt;
6180    
6181     + rcu_read_lock();
6182     + bt = rcu_dereference(q->blk_trace);
6183     if (bt) {
6184     __be64 rpdu = cpu_to_be64(pdu);
6185    
6186     @@ -973,6 +1002,7 @@ static void blk_add_trace_split(void *ignore,
6187     BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu),
6188     &rpdu, blk_trace_bio_get_cgid(q, bio));
6189     }
6190     + rcu_read_unlock();
6191     }
6192    
6193     /**
6194     @@ -992,11 +1022,15 @@ static void blk_add_trace_bio_remap(void *ignore,
6195     struct request_queue *q, struct bio *bio,
6196     dev_t dev, sector_t from)
6197     {
6198     - struct blk_trace *bt = q->blk_trace;
6199     + struct blk_trace *bt;
6200     struct blk_io_trace_remap r;
6201    
6202     - if (likely(!bt))
6203     + rcu_read_lock();
6204     + bt = rcu_dereference(q->blk_trace);
6205     + if (likely(!bt)) {
6206     + rcu_read_unlock();
6207     return;
6208     + }
6209    
6210     r.device_from = cpu_to_be32(dev);
6211     r.device_to = cpu_to_be32(bio_dev(bio));
6212     @@ -1005,6 +1039,7 @@ static void blk_add_trace_bio_remap(void *ignore,
6213     __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
6214     bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status,
6215     sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
6216     + rcu_read_unlock();
6217     }
6218    
6219     /**
6220     @@ -1025,11 +1060,15 @@ static void blk_add_trace_rq_remap(void *ignore,
6221     struct request *rq, dev_t dev,
6222     sector_t from)
6223     {
6224     - struct blk_trace *bt = q->blk_trace;
6225     + struct blk_trace *bt;
6226     struct blk_io_trace_remap r;
6227    
6228     - if (likely(!bt))
6229     + rcu_read_lock();
6230     + bt = rcu_dereference(q->blk_trace);
6231     + if (likely(!bt)) {
6232     + rcu_read_unlock();
6233     return;
6234     + }
6235    
6236     r.device_from = cpu_to_be32(dev);
6237     r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
6238     @@ -1038,6 +1077,7 @@ static void blk_add_trace_rq_remap(void *ignore,
6239     __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
6240     rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
6241     sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
6242     + rcu_read_unlock();
6243     }
6244    
6245     /**
6246     @@ -1055,14 +1095,19 @@ void blk_add_driver_data(struct request_queue *q,
6247     struct request *rq,
6248     void *data, size_t len)
6249     {
6250     - struct blk_trace *bt = q->blk_trace;
6251     + struct blk_trace *bt;
6252    
6253     - if (likely(!bt))
6254     + rcu_read_lock();
6255     + bt = rcu_dereference(q->blk_trace);
6256     + if (likely(!bt)) {
6257     + rcu_read_unlock();
6258     return;
6259     + }
6260    
6261     __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
6262     BLK_TA_DRV_DATA, 0, len, data,
6263     blk_trace_request_get_cgid(q, rq));
6264     + rcu_read_unlock();
6265     }
6266     EXPORT_SYMBOL_GPL(blk_add_driver_data);
6267    
6268     @@ -1589,6 +1634,7 @@ static int blk_trace_remove_queue(struct request_queue *q)
6269     return -EINVAL;
6270    
6271     put_probe_ref();
6272     + synchronize_rcu();
6273     blk_trace_free(bt);
6274     return 0;
6275     }
6276     @@ -1750,6 +1796,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
6277     struct hd_struct *p = dev_to_part(dev);
6278     struct request_queue *q;
6279     struct block_device *bdev;
6280     + struct blk_trace *bt;
6281     ssize_t ret = -ENXIO;
6282    
6283     bdev = bdget(part_devt(p));
6284     @@ -1762,21 +1809,23 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
6285    
6286     mutex_lock(&q->blk_trace_mutex);
6287    
6288     + bt = rcu_dereference_protected(q->blk_trace,
6289     + lockdep_is_held(&q->blk_trace_mutex));
6290     if (attr == &dev_attr_enable) {
6291     - ret = sprintf(buf, "%u\n", !!q->blk_trace);
6292     + ret = sprintf(buf, "%u\n", !!bt);
6293     goto out_unlock_bdev;
6294     }
6295    
6296     - if (q->blk_trace == NULL)
6297     + if (bt == NULL)
6298     ret = sprintf(buf, "disabled\n");
6299     else if (attr == &dev_attr_act_mask)
6300     - ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
6301     + ret = blk_trace_mask2str(buf, bt->act_mask);
6302     else if (attr == &dev_attr_pid)
6303     - ret = sprintf(buf, "%u\n", q->blk_trace->pid);
6304     + ret = sprintf(buf, "%u\n", bt->pid);
6305     else if (attr == &dev_attr_start_lba)
6306     - ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
6307     + ret = sprintf(buf, "%llu\n", bt->start_lba);
6308     else if (attr == &dev_attr_end_lba)
6309     - ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
6310     + ret = sprintf(buf, "%llu\n", bt->end_lba);
6311    
6312     out_unlock_bdev:
6313     mutex_unlock(&q->blk_trace_mutex);
6314     @@ -1793,6 +1842,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
6315     struct block_device *bdev;
6316     struct request_queue *q;
6317     struct hd_struct *p;
6318     + struct blk_trace *bt;
6319     u64 value;
6320     ssize_t ret = -EINVAL;
6321    
6322     @@ -1823,8 +1873,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
6323    
6324     mutex_lock(&q->blk_trace_mutex);
6325    
6326     + bt = rcu_dereference_protected(q->blk_trace,
6327     + lockdep_is_held(&q->blk_trace_mutex));
6328     if (attr == &dev_attr_enable) {
6329     - if (!!value == !!q->blk_trace) {
6330     + if (!!value == !!bt) {
6331     ret = 0;
6332     goto out_unlock_bdev;
6333     }
6334     @@ -1836,18 +1888,18 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
6335     }
6336    
6337     ret = 0;
6338     - if (q->blk_trace == NULL)
6339     + if (bt == NULL)
6340     ret = blk_trace_setup_queue(q, bdev);
6341    
6342     if (ret == 0) {
6343     if (attr == &dev_attr_act_mask)
6344     - q->blk_trace->act_mask = value;
6345     + bt->act_mask = value;
6346     else if (attr == &dev_attr_pid)
6347     - q->blk_trace->pid = value;
6348     + bt->pid = value;
6349     else if (attr == &dev_attr_start_lba)
6350     - q->blk_trace->start_lba = value;
6351     + bt->start_lba = value;
6352     else if (attr == &dev_attr_end_lba)
6353     - q->blk_trace->end_lba = value;
6354     + bt->end_lba = value;
6355     }
6356    
6357     out_unlock_bdev:
6358     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
6359     index 341aab32c946..c6ccaf6c62f7 100644
6360     --- a/kernel/trace/trace.c
6361     +++ b/kernel/trace/trace.c
6362     @@ -1743,6 +1743,7 @@ static __init int init_trace_selftests(void)
6363    
6364     pr_info("Running postponed tracer tests:\n");
6365    
6366     + tracing_selftest_running = true;
6367     list_for_each_entry_safe(p, n, &postponed_selftests, list) {
6368     /* This loop can take minutes when sanitizers are enabled, so
6369     * lets make sure we allow RCU processing.
6370     @@ -1765,6 +1766,7 @@ static __init int init_trace_selftests(void)
6371     list_del(&p->list);
6372     kfree(p);
6373     }
6374     + tracing_selftest_running = false;
6375    
6376     out:
6377     mutex_unlock(&trace_types_lock);
6378     diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
6379     index 93d97f9b0157..f61d834e02fe 100644
6380     --- a/lib/Kconfig.debug
6381     +++ b/lib/Kconfig.debug
6382     @@ -299,17 +299,6 @@ config HEADERS_INSTALL
6383     user-space program samples. It is also needed by some features such
6384     as uapi header sanity checks.
6385    
6386     -config HEADERS_CHECK
6387     - bool "Run sanity checks on uapi headers when building 'all'"
6388     - depends on HEADERS_INSTALL
6389     - help
6390     - This option will run basic sanity checks on uapi headers when
6391     - building the 'all' target, for example, ensure that they do not
6392     - attempt to include files which were not exported, etc.
6393     -
6394     - If you're making modifications to header files which are
6395     - relevant for userspace, say 'Y'.
6396     -
6397     config OPTIMIZE_INLINING
6398     def_bool y
6399     help
6400     diff --git a/mm/debug.c b/mm/debug.c
6401     index 0461df1207cb..6a52316af839 100644
6402     --- a/mm/debug.c
6403     +++ b/mm/debug.c
6404     @@ -47,6 +47,7 @@ void __dump_page(struct page *page, const char *reason)
6405     struct address_space *mapping;
6406     bool page_poisoned = PagePoisoned(page);
6407     int mapcount;
6408     + char *type = "";
6409    
6410     /*
6411     * If struct page is poisoned don't access Page*() functions as that
6412     @@ -78,9 +79,9 @@ void __dump_page(struct page *page, const char *reason)
6413     page, page_ref_count(page), mapcount,
6414     page->mapping, page_to_pgoff(page));
6415     if (PageKsm(page))
6416     - pr_warn("ksm flags: %#lx(%pGp)\n", page->flags, &page->flags);
6417     + type = "ksm ";
6418     else if (PageAnon(page))
6419     - pr_warn("anon flags: %#lx(%pGp)\n", page->flags, &page->flags);
6420     + type = "anon ";
6421     else if (mapping) {
6422     if (mapping->host && mapping->host->i_dentry.first) {
6423     struct dentry *dentry;
6424     @@ -88,10 +89,11 @@ void __dump_page(struct page *page, const char *reason)
6425     pr_warn("%ps name:\"%pd\"\n", mapping->a_ops, dentry);
6426     } else
6427     pr_warn("%ps\n", mapping->a_ops);
6428     - pr_warn("flags: %#lx(%pGp)\n", page->flags, &page->flags);
6429     }
6430     BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
6431    
6432     + pr_warn("%sflags: %#lx(%pGp)\n", type, page->flags, &page->flags);
6433     +
6434     hex_only:
6435     print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
6436     sizeof(unsigned long), page,
6437     diff --git a/mm/gup.c b/mm/gup.c
6438     index 8f236a335ae9..745b4036cdfd 100644
6439     --- a/mm/gup.c
6440     +++ b/mm/gup.c
6441     @@ -2401,7 +2401,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages,
6442     unsigned long addr, len, end;
6443     int nr = 0, ret = 0;
6444    
6445     - if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM)))
6446     + if (WARN_ON_ONCE(gup_flags & ~(FOLL_WRITE | FOLL_LONGTERM |
6447     + FOLL_FORCE)))
6448     return -EINVAL;
6449    
6450     start = untagged_addr(start) & PAGE_MASK;
6451     diff --git a/mm/huge_memory.c b/mm/huge_memory.c
6452     index 1de7f53621a0..6c9689281c07 100644
6453     --- a/mm/huge_memory.c
6454     +++ b/mm/huge_memory.c
6455     @@ -177,16 +177,13 @@ static ssize_t enabled_store(struct kobject *kobj,
6456     {
6457     ssize_t ret = count;
6458    
6459     - if (!memcmp("always", buf,
6460     - min(sizeof("always")-1, count))) {
6461     + if (sysfs_streq(buf, "always")) {
6462     clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
6463     set_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
6464     - } else if (!memcmp("madvise", buf,
6465     - min(sizeof("madvise")-1, count))) {
6466     + } else if (sysfs_streq(buf, "madvise")) {
6467     clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
6468     set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
6469     - } else if (!memcmp("never", buf,
6470     - min(sizeof("never")-1, count))) {
6471     + } else if (sysfs_streq(buf, "never")) {
6472     clear_bit(TRANSPARENT_HUGEPAGE_FLAG, &transparent_hugepage_flags);
6473     clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, &transparent_hugepage_flags);
6474     } else
6475     @@ -250,32 +247,27 @@ static ssize_t defrag_store(struct kobject *kobj,
6476     struct kobj_attribute *attr,
6477     const char *buf, size_t count)
6478     {
6479     - if (!memcmp("always", buf,
6480     - min(sizeof("always")-1, count))) {
6481     + if (sysfs_streq(buf, "always")) {
6482     clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
6483     clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
6484     clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
6485     set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
6486     - } else if (!memcmp("defer+madvise", buf,
6487     - min(sizeof("defer+madvise")-1, count))) {
6488     + } else if (sysfs_streq(buf, "defer+madvise")) {
6489     clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
6490     clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
6491     clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
6492     set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
6493     - } else if (!memcmp("defer", buf,
6494     - min(sizeof("defer")-1, count))) {
6495     + } else if (sysfs_streq(buf, "defer")) {
6496     clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
6497     clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
6498     clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
6499     set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
6500     - } else if (!memcmp("madvise", buf,
6501     - min(sizeof("madvise")-1, count))) {
6502     + } else if (sysfs_streq(buf, "madvise")) {
6503     clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
6504     clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
6505     clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
6506     set_bit(TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG, &transparent_hugepage_flags);
6507     - } else if (!memcmp("never", buf,
6508     - min(sizeof("never")-1, count))) {
6509     + } else if (sysfs_streq(buf, "never")) {
6510     clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG, &transparent_hugepage_flags);
6511     clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG, &transparent_hugepage_flags);
6512     clear_bit(TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG, &transparent_hugepage_flags);
6513     @@ -2712,7 +2704,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
6514     unsigned long flags;
6515     pgoff_t end;
6516    
6517     - VM_BUG_ON_PAGE(is_huge_zero_page(page), page);
6518     + VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
6519     VM_BUG_ON_PAGE(!PageLocked(page), page);
6520     VM_BUG_ON_PAGE(!PageCompound(page), page);
6521    
6522     diff --git a/net/core/dev.c b/net/core/dev.c
6523     index a7e2e57af63a..db8c229e0f4a 100644
6524     --- a/net/core/dev.c
6525     +++ b/net/core/dev.c
6526     @@ -146,7 +146,6 @@
6527     #include "net-sysfs.h"
6528    
6529     #define MAX_GRO_SKBS 8
6530     -#define MAX_NEST_DEV 8
6531    
6532     /* This should be increased if a protocol with a bigger head is added. */
6533     #define GRO_MAX_HEAD (MAX_HEADER + 128)
6534     @@ -3386,26 +3385,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
6535     qdisc_calculate_pkt_len(skb, q);
6536    
6537     if (q->flags & TCQ_F_NOLOCK) {
6538     - if ((q->flags & TCQ_F_CAN_BYPASS) && READ_ONCE(q->empty) &&
6539     - qdisc_run_begin(q)) {
6540     - if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
6541     - &q->state))) {
6542     - __qdisc_drop(skb, &to_free);
6543     - rc = NET_XMIT_DROP;
6544     - goto end_run;
6545     - }
6546     - qdisc_bstats_cpu_update(q, skb);
6547     -
6548     - rc = NET_XMIT_SUCCESS;
6549     - if (sch_direct_xmit(skb, q, dev, txq, NULL, true))
6550     - __qdisc_run(q);
6551     -
6552     -end_run:
6553     - qdisc_run_end(q);
6554     - } else {
6555     - rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
6556     - qdisc_run(q);
6557     - }
6558     + rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
6559     + qdisc_run(q);
6560    
6561     if (unlikely(to_free))
6562     kfree_skb_list(to_free);
6563     @@ -6932,8 +6913,8 @@ static int __netdev_walk_all_lower_dev(struct net_device *dev,
6564     return 0;
6565     }
6566    
6567     -static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
6568     - struct list_head **iter)
6569     +struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
6570     + struct list_head **iter)
6571     {
6572     struct netdev_adjacent *lower;
6573    
6574     @@ -6945,6 +6926,7 @@ static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
6575    
6576     return lower->dev;
6577     }
6578     +EXPORT_SYMBOL(netdev_next_lower_dev_rcu);
6579    
6580     static u8 __netdev_upper_depth(struct net_device *dev)
6581     {
6582     diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
6583     index dd220ce7ca7a..bb11fc87bbae 100644
6584     --- a/net/core/fib_rules.c
6585     +++ b/net/core/fib_rules.c
6586     @@ -967,7 +967,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
6587    
6588     frh = nlmsg_data(nlh);
6589     frh->family = ops->family;
6590     - frh->table = rule->table;
6591     + frh->table = rule->table < 256 ? rule->table : RT_TABLE_COMPAT;
6592     if (nla_put_u32(skb, FRA_TABLE, rule->table))
6593     goto nla_put_failure;
6594     if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
6595     diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
6596     index 7ae7065758bd..f3b7cb725c1b 100644
6597     --- a/net/ipv4/udp.c
6598     +++ b/net/ipv4/udp.c
6599     @@ -1856,8 +1856,12 @@ int __udp_disconnect(struct sock *sk, int flags)
6600     inet->inet_dport = 0;
6601     sock_rps_reset_rxhash(sk);
6602     sk->sk_bound_dev_if = 0;
6603     - if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
6604     + if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) {
6605     inet_reset_saddr(sk);
6606     + if (sk->sk_prot->rehash &&
6607     + (sk->sk_userlocks & SOCK_BINDPORT_LOCK))
6608     + sk->sk_prot->rehash(sk);
6609     + }
6610    
6611     if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
6612     sk->sk_prot->unhash(sk);
6613     diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
6614     index 6e2af411cd9c..c75274e0745c 100644
6615     --- a/net/ipv6/ip6_fib.c
6616     +++ b/net/ipv6/ip6_fib.c
6617     @@ -1050,8 +1050,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
6618     found++;
6619     break;
6620     }
6621     - if (rt_can_ecmp)
6622     - fallback_ins = fallback_ins ?: ins;
6623     + fallback_ins = fallback_ins ?: ins;
6624     goto next_iter;
6625     }
6626    
6627     @@ -1094,7 +1093,9 @@ next_iter:
6628     }
6629    
6630     if (fallback_ins && !found) {
6631     - /* No ECMP-able route found, replace first non-ECMP one */
6632     + /* No matching route with same ecmp-able-ness found, replace
6633     + * first matching route
6634     + */
6635     ins = fallback_ins;
6636     iter = rcu_dereference_protected(*ins,
6637     lockdep_is_held(&rt->fib6_table->tb6_lock));
6638     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
6639     index e4ed9c7b43b0..894c7370c1bd 100644
6640     --- a/net/ipv6/route.c
6641     +++ b/net/ipv6/route.c
6642     @@ -5155,6 +5155,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
6643     */
6644     cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
6645     NLM_F_REPLACE);
6646     + cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
6647     nhn++;
6648     }
6649    
6650     diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
6651     index 1e3b9d34aaa4..c7d8044ff0fa 100644
6652     --- a/net/mac80211/mlme.c
6653     +++ b/net/mac80211/mlme.c
6654     @@ -2959,7 +2959,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
6655     (auth_transaction == 2 &&
6656     ifmgd->auth_data->expected_transaction == 2)) {
6657     if (!ieee80211_mark_sta_auth(sdata, bssid))
6658     - goto out_err;
6659     + return; /* ignore frame -- wait for timeout */
6660     } else if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE &&
6661     auth_transaction == 2) {
6662     sdata_info(sdata, "SAE peer confirmed\n");
6663     @@ -2967,10 +2967,6 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
6664     }
6665    
6666     cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
6667     - return;
6668     - out_err:
6669     - mutex_unlock(&sdata->local->sta_mtx);
6670     - /* ignore frame -- wait for timeout */
6671     }
6672    
6673     #define case_WLAN(type) \
6674     diff --git a/net/mac80211/util.c b/net/mac80211/util.c
6675     index 32a7a53833c0..decd46b38393 100644
6676     --- a/net/mac80211/util.c
6677     +++ b/net/mac80211/util.c
6678     @@ -1063,16 +1063,22 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
6679     elem_parse_failed = true;
6680     break;
6681     case WLAN_EID_VHT_OPERATION:
6682     - if (elen >= sizeof(struct ieee80211_vht_operation))
6683     + if (elen >= sizeof(struct ieee80211_vht_operation)) {
6684     elems->vht_operation = (void *)pos;
6685     - else
6686     - elem_parse_failed = true;
6687     + if (calc_crc)
6688     + crc = crc32_be(crc, pos - 2, elen + 2);
6689     + break;
6690     + }
6691     + elem_parse_failed = true;
6692     break;
6693     case WLAN_EID_OPMODE_NOTIF:
6694     - if (elen > 0)
6695     + if (elen > 0) {
6696     elems->opmode_notif = pos;
6697     - else
6698     - elem_parse_failed = true;
6699     + if (calc_crc)
6700     + crc = crc32_be(crc, pos - 2, elen + 2);
6701     + break;
6702     + }
6703     + elem_parse_failed = true;
6704     break;
6705     case WLAN_EID_MESH_ID:
6706     elems->mesh_id = pos;
6707     @@ -2987,10 +2993,22 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw,
6708     int cf0, cf1;
6709     int ccfs0, ccfs1, ccfs2;
6710     int ccf0, ccf1;
6711     + u32 vht_cap;
6712     + bool support_80_80 = false;
6713     + bool support_160 = false;
6714    
6715     if (!oper || !htop)
6716     return false;
6717    
6718     + vht_cap = hw->wiphy->bands[chandef->chan->band]->vht_cap.cap;
6719     + support_160 = (vht_cap & (IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK |
6720     + IEEE80211_VHT_CAP_EXT_NSS_BW_MASK));
6721     + support_80_80 = ((vht_cap &
6722     + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) ||
6723     + (vht_cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
6724     + vht_cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) ||
6725     + ((vht_cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) >>
6726     + IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT > 1));
6727     ccfs0 = oper->center_freq_seg0_idx;
6728     ccfs1 = oper->center_freq_seg1_idx;
6729     ccfs2 = (le16_to_cpu(htop->operation_mode) &
6730     @@ -3018,10 +3036,10 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw,
6731     unsigned int diff;
6732    
6733     diff = abs(ccf1 - ccf0);
6734     - if (diff == 8) {
6735     + if ((diff == 8) && support_160) {
6736     new.width = NL80211_CHAN_WIDTH_160;
6737     new.center_freq1 = cf1;
6738     - } else if (diff > 8) {
6739     + } else if ((diff > 8) && support_80_80) {
6740     new.width = NL80211_CHAN_WIDTH_80P80;
6741     new.center_freq2 = cf1;
6742     }
6743     diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
6744     index a9df9dac57b2..75da200aa5d8 100644
6745     --- a/net/netfilter/ipset/ip_set_core.c
6746     +++ b/net/netfilter/ipset/ip_set_core.c
6747     @@ -557,6 +557,20 @@ ip_set_rcu_get(struct net *net, ip_set_id_t index)
6748     return set;
6749     }
6750    
6751     +static inline void
6752     +ip_set_lock(struct ip_set *set)
6753     +{
6754     + if (!set->variant->region_lock)
6755     + spin_lock_bh(&set->lock);
6756     +}
6757     +
6758     +static inline void
6759     +ip_set_unlock(struct ip_set *set)
6760     +{
6761     + if (!set->variant->region_lock)
6762     + spin_unlock_bh(&set->lock);
6763     +}
6764     +
6765     int
6766     ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
6767     const struct xt_action_param *par, struct ip_set_adt_opt *opt)
6768     @@ -578,9 +592,9 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
6769     if (ret == -EAGAIN) {
6770     /* Type requests element to be completed */
6771     pr_debug("element must be completed, ADD is triggered\n");
6772     - spin_lock_bh(&set->lock);
6773     + ip_set_lock(set);
6774     set->variant->kadt(set, skb, par, IPSET_ADD, opt);
6775     - spin_unlock_bh(&set->lock);
6776     + ip_set_unlock(set);
6777     ret = 1;
6778     } else {
6779     /* --return-nomatch: invert matched element */
6780     @@ -609,9 +623,9 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
6781     !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
6782     return -IPSET_ERR_TYPE_MISMATCH;
6783    
6784     - spin_lock_bh(&set->lock);
6785     + ip_set_lock(set);
6786     ret = set->variant->kadt(set, skb, par, IPSET_ADD, opt);
6787     - spin_unlock_bh(&set->lock);
6788     + ip_set_unlock(set);
6789    
6790     return ret;
6791     }
6792     @@ -631,9 +645,9 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
6793     !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
6794     return -IPSET_ERR_TYPE_MISMATCH;
6795    
6796     - spin_lock_bh(&set->lock);
6797     + ip_set_lock(set);
6798     ret = set->variant->kadt(set, skb, par, IPSET_DEL, opt);
6799     - spin_unlock_bh(&set->lock);
6800     + ip_set_unlock(set);
6801    
6802     return ret;
6803     }
6804     @@ -1098,9 +1112,9 @@ ip_set_flush_set(struct ip_set *set)
6805     {
6806     pr_debug("set: %s\n", set->name);
6807    
6808     - spin_lock_bh(&set->lock);
6809     + ip_set_lock(set);
6810     set->variant->flush(set);
6811     - spin_unlock_bh(&set->lock);
6812     + ip_set_unlock(set);
6813     }
6814    
6815     static int ip_set_flush(struct net *net, struct sock *ctnl, struct sk_buff *skb,
6816     @@ -1523,9 +1537,9 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
6817     bool eexist = flags & IPSET_FLAG_EXIST, retried = false;
6818    
6819     do {
6820     - spin_lock_bh(&set->lock);
6821     + ip_set_lock(set);
6822     ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
6823     - spin_unlock_bh(&set->lock);
6824     + ip_set_unlock(set);
6825     retried = true;
6826     } while (ret == -EAGAIN &&
6827     set->variant->resize &&
6828     diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
6829     index d098d87bc331..2389c9f89e48 100644
6830     --- a/net/netfilter/ipset/ip_set_hash_gen.h
6831     +++ b/net/netfilter/ipset/ip_set_hash_gen.h
6832     @@ -7,13 +7,21 @@
6833     #include <linux/rcupdate.h>
6834     #include <linux/jhash.h>
6835     #include <linux/types.h>
6836     +#include <linux/netfilter/nfnetlink.h>
6837     #include <linux/netfilter/ipset/ip_set.h>
6838    
6839     -#define __ipset_dereference_protected(p, c) rcu_dereference_protected(p, c)
6840     -#define ipset_dereference_protected(p, set) \
6841     - __ipset_dereference_protected(p, lockdep_is_held(&(set)->lock))
6842     -
6843     -#define rcu_dereference_bh_nfnl(p) rcu_dereference_bh_check(p, 1)
6844     +#define __ipset_dereference(p) \
6845     + rcu_dereference_protected(p, 1)
6846     +#define ipset_dereference_nfnl(p) \
6847     + rcu_dereference_protected(p, \
6848     + lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
6849     +#define ipset_dereference_set(p, set) \
6850     + rcu_dereference_protected(p, \
6851     + lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
6852     + lockdep_is_held(&(set)->lock))
6853     +#define ipset_dereference_bh_nfnl(p) \
6854     + rcu_dereference_bh_check(p, \
6855     + lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
6856    
6857     /* Hashing which uses arrays to resolve clashing. The hash table is resized
6858     * (doubled) when searching becomes too long.
6859     @@ -72,11 +80,35 @@ struct hbucket {
6860     __aligned(__alignof__(u64));
6861     };
6862    
6863     +/* Region size for locking == 2^HTABLE_REGION_BITS */
6864     +#define HTABLE_REGION_BITS 10
6865     +#define ahash_numof_locks(htable_bits) \
6866     + ((htable_bits) < HTABLE_REGION_BITS ? 1 \
6867     + : jhash_size((htable_bits) - HTABLE_REGION_BITS))
6868     +#define ahash_sizeof_regions(htable_bits) \
6869     + (ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region))
6870     +#define ahash_region(n, htable_bits) \
6871     + ((n) % ahash_numof_locks(htable_bits))
6872     +#define ahash_bucket_start(h, htable_bits) \
6873     + ((htable_bits) < HTABLE_REGION_BITS ? 0 \
6874     + : (h) * jhash_size(HTABLE_REGION_BITS))
6875     +#define ahash_bucket_end(h, htable_bits) \
6876     + ((htable_bits) < HTABLE_REGION_BITS ? jhash_size(htable_bits) \
6877     + : ((h) + 1) * jhash_size(HTABLE_REGION_BITS))
6878     +
6879     +struct htable_gc {
6880     + struct delayed_work dwork;
6881     + struct ip_set *set; /* Set the gc belongs to */
6882     + u32 region; /* Last gc run position */
6883     +};
6884     +
6885     /* The hash table: the table size stored here in order to make resizing easy */
6886     struct htable {
6887     atomic_t ref; /* References for resizing */
6888     - atomic_t uref; /* References for dumping */
6889     + atomic_t uref; /* References for dumping and gc */
6890     u8 htable_bits; /* size of hash table == 2^htable_bits */
6891     + u32 maxelem; /* Maxelem per region */
6892     + struct ip_set_region *hregion; /* Region locks and ext sizes */
6893     struct hbucket __rcu *bucket[0]; /* hashtable buckets */
6894     };
6895    
6896     @@ -162,6 +194,10 @@ htable_bits(u32 hashsize)
6897     #define NLEN 0
6898     #endif /* IP_SET_HASH_WITH_NETS */
6899    
6900     +#define SET_ELEM_EXPIRED(set, d) \
6901     + (SET_WITH_TIMEOUT(set) && \
6902     + ip_set_timeout_expired(ext_timeout(d, set)))
6903     +
6904     #endif /* _IP_SET_HASH_GEN_H */
6905    
6906     #ifndef MTYPE
6907     @@ -205,10 +241,12 @@ htable_bits(u32 hashsize)
6908     #undef mtype_test_cidrs
6909     #undef mtype_test
6910     #undef mtype_uref
6911     -#undef mtype_expire
6912     #undef mtype_resize
6913     +#undef mtype_ext_size
6914     +#undef mtype_resize_ad
6915     #undef mtype_head
6916     #undef mtype_list
6917     +#undef mtype_gc_do
6918     #undef mtype_gc
6919     #undef mtype_gc_init
6920     #undef mtype_variant
6921     @@ -247,10 +285,12 @@ htable_bits(u32 hashsize)
6922     #define mtype_test_cidrs IPSET_TOKEN(MTYPE, _test_cidrs)
6923     #define mtype_test IPSET_TOKEN(MTYPE, _test)
6924     #define mtype_uref IPSET_TOKEN(MTYPE, _uref)
6925     -#define mtype_expire IPSET_TOKEN(MTYPE, _expire)
6926     #define mtype_resize IPSET_TOKEN(MTYPE, _resize)
6927     +#define mtype_ext_size IPSET_TOKEN(MTYPE, _ext_size)
6928     +#define mtype_resize_ad IPSET_TOKEN(MTYPE, _resize_ad)
6929     #define mtype_head IPSET_TOKEN(MTYPE, _head)
6930     #define mtype_list IPSET_TOKEN(MTYPE, _list)
6931     +#define mtype_gc_do IPSET_TOKEN(MTYPE, _gc_do)
6932     #define mtype_gc IPSET_TOKEN(MTYPE, _gc)
6933     #define mtype_gc_init IPSET_TOKEN(MTYPE, _gc_init)
6934     #define mtype_variant IPSET_TOKEN(MTYPE, _variant)
6935     @@ -275,8 +315,7 @@ htable_bits(u32 hashsize)
6936     /* The generic hash structure */
6937     struct htype {
6938     struct htable __rcu *table; /* the hash table */
6939     - struct timer_list gc; /* garbage collection when timeout enabled */
6940     - struct ip_set *set; /* attached to this ip_set */
6941     + struct htable_gc gc; /* gc workqueue */
6942     u32 maxelem; /* max elements in the hash */
6943     u32 initval; /* random jhash init value */
6944     #ifdef IP_SET_HASH_WITH_MARKMASK
6945     @@ -288,21 +327,33 @@ struct htype {
6946     #ifdef IP_SET_HASH_WITH_NETMASK
6947     u8 netmask; /* netmask value for subnets to store */
6948     #endif
6949     + struct list_head ad; /* Resize add|del backlist */
6950     struct mtype_elem next; /* temporary storage for uadd */
6951     #ifdef IP_SET_HASH_WITH_NETS
6952     struct net_prefixes nets[NLEN]; /* book-keeping of prefixes */
6953     #endif
6954     };
6955    
6956     +/* ADD|DEL entries saved during resize */
6957     +struct mtype_resize_ad {
6958     + struct list_head list;
6959     + enum ipset_adt ad; /* ADD|DEL element */
6960     + struct mtype_elem d; /* Element value */
6961     + struct ip_set_ext ext; /* Extensions for ADD */
6962     + struct ip_set_ext mext; /* Target extensions for ADD */
6963     + u32 flags; /* Flags for ADD */
6964     +};
6965     +
6966     #ifdef IP_SET_HASH_WITH_NETS
6967     /* Network cidr size book keeping when the hash stores different
6968     * sized networks. cidr == real cidr + 1 to support /0.
6969     */
6970     static void
6971     -mtype_add_cidr(struct htype *h, u8 cidr, u8 n)
6972     +mtype_add_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n)
6973     {
6974     int i, j;
6975    
6976     + spin_lock_bh(&set->lock);
6977     /* Add in increasing prefix order, so larger cidr first */
6978     for (i = 0, j = -1; i < NLEN && h->nets[i].cidr[n]; i++) {
6979     if (j != -1) {
6980     @@ -311,7 +362,7 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 n)
6981     j = i;
6982     } else if (h->nets[i].cidr[n] == cidr) {
6983     h->nets[CIDR_POS(cidr)].nets[n]++;
6984     - return;
6985     + goto unlock;
6986     }
6987     }
6988     if (j != -1) {
6989     @@ -320,24 +371,29 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 n)
6990     }
6991     h->nets[i].cidr[n] = cidr;
6992     h->nets[CIDR_POS(cidr)].nets[n] = 1;
6993     +unlock:
6994     + spin_unlock_bh(&set->lock);
6995     }
6996    
6997     static void
6998     -mtype_del_cidr(struct htype *h, u8 cidr, u8 n)
6999     +mtype_del_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n)
7000     {
7001     u8 i, j, net_end = NLEN - 1;
7002    
7003     + spin_lock_bh(&set->lock);
7004     for (i = 0; i < NLEN; i++) {
7005     if (h->nets[i].cidr[n] != cidr)
7006     continue;
7007     h->nets[CIDR_POS(cidr)].nets[n]--;
7008     if (h->nets[CIDR_POS(cidr)].nets[n] > 0)
7009     - return;
7010     + goto unlock;
7011     for (j = i; j < net_end && h->nets[j].cidr[n]; j++)
7012     h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
7013     h->nets[j].cidr[n] = 0;
7014     - return;
7015     + goto unlock;
7016     }
7017     +unlock:
7018     + spin_unlock_bh(&set->lock);
7019     }
7020     #endif
7021    
7022     @@ -345,7 +401,7 @@ mtype_del_cidr(struct htype *h, u8 cidr, u8 n)
7023     static size_t
7024     mtype_ahash_memsize(const struct htype *h, const struct htable *t)
7025     {
7026     - return sizeof(*h) + sizeof(*t);
7027     + return sizeof(*h) + sizeof(*t) + ahash_sizeof_regions(t->htable_bits);
7028     }
7029    
7030     /* Get the ith element from the array block n */
7031     @@ -369,24 +425,29 @@ mtype_flush(struct ip_set *set)
7032     struct htype *h = set->data;
7033     struct htable *t;
7034     struct hbucket *n;
7035     - u32 i;
7036     -
7037     - t = ipset_dereference_protected(h->table, set);
7038     - for (i = 0; i < jhash_size(t->htable_bits); i++) {
7039     - n = __ipset_dereference_protected(hbucket(t, i), 1);
7040     - if (!n)
7041     - continue;
7042     - if (set->extensions & IPSET_EXT_DESTROY)
7043     - mtype_ext_cleanup(set, n);
7044     - /* FIXME: use slab cache */
7045     - rcu_assign_pointer(hbucket(t, i), NULL);
7046     - kfree_rcu(n, rcu);
7047     + u32 r, i;
7048     +
7049     + t = ipset_dereference_nfnl(h->table);
7050     + for (r = 0; r < ahash_numof_locks(t->htable_bits); r++) {
7051     + spin_lock_bh(&t->hregion[r].lock);
7052     + for (i = ahash_bucket_start(r, t->htable_bits);
7053     + i < ahash_bucket_end(r, t->htable_bits); i++) {
7054     + n = __ipset_dereference(hbucket(t, i));
7055     + if (!n)
7056     + continue;
7057     + if (set->extensions & IPSET_EXT_DESTROY)
7058     + mtype_ext_cleanup(set, n);
7059     + /* FIXME: use slab cache */
7060     + rcu_assign_pointer(hbucket(t, i), NULL);
7061     + kfree_rcu(n, rcu);
7062     + }
7063     + t->hregion[r].ext_size = 0;
7064     + t->hregion[r].elements = 0;
7065     + spin_unlock_bh(&t->hregion[r].lock);
7066     }
7067     #ifdef IP_SET_HASH_WITH_NETS
7068     memset(h->nets, 0, sizeof(h->nets));
7069     #endif
7070     - set->elements = 0;
7071     - set->ext_size = 0;
7072     }
7073    
7074     /* Destroy the hashtable part of the set */
7075     @@ -397,7 +458,7 @@ mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
7076     u32 i;
7077    
7078     for (i = 0; i < jhash_size(t->htable_bits); i++) {
7079     - n = __ipset_dereference_protected(hbucket(t, i), 1);
7080     + n = __ipset_dereference(hbucket(t, i));
7081     if (!n)
7082     continue;
7083     if (set->extensions & IPSET_EXT_DESTROY && ext_destroy)
7084     @@ -406,6 +467,7 @@ mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
7085     kfree(n);
7086     }
7087    
7088     + ip_set_free(t->hregion);
7089     ip_set_free(t);
7090     }
7091    
7092     @@ -414,28 +476,21 @@ static void
7093     mtype_destroy(struct ip_set *set)
7094     {
7095     struct htype *h = set->data;
7096     + struct list_head *l, *lt;
7097    
7098     if (SET_WITH_TIMEOUT(set))
7099     - del_timer_sync(&h->gc);
7100     + cancel_delayed_work_sync(&h->gc.dwork);
7101    
7102     - mtype_ahash_destroy(set,
7103     - __ipset_dereference_protected(h->table, 1), true);
7104     + mtype_ahash_destroy(set, ipset_dereference_nfnl(h->table), true);
7105     + list_for_each_safe(l, lt, &h->ad) {
7106     + list_del(l);
7107     + kfree(l);
7108     + }
7109     kfree(h);
7110    
7111     set->data = NULL;
7112     }
7113    
7114     -static void
7115     -mtype_gc_init(struct ip_set *set, void (*gc)(struct timer_list *t))
7116     -{
7117     - struct htype *h = set->data;
7118     -
7119     - timer_setup(&h->gc, gc, 0);
7120     - mod_timer(&h->gc, jiffies + IPSET_GC_PERIOD(set->timeout) * HZ);
7121     - pr_debug("gc initialized, run in every %u\n",
7122     - IPSET_GC_PERIOD(set->timeout));
7123     -}
7124     -
7125     static bool
7126     mtype_same_set(const struct ip_set *a, const struct ip_set *b)
7127     {
7128     @@ -454,11 +509,9 @@ mtype_same_set(const struct ip_set *a, const struct ip_set *b)
7129     a->extensions == b->extensions;
7130     }
7131    
7132     -/* Delete expired elements from the hashtable */
7133     static void
7134     -mtype_expire(struct ip_set *set, struct htype *h)
7135     +mtype_gc_do(struct ip_set *set, struct htype *h, struct htable *t, u32 r)
7136     {
7137     - struct htable *t;
7138     struct hbucket *n, *tmp;
7139     struct mtype_elem *data;
7140     u32 i, j, d;
7141     @@ -466,10 +519,12 @@ mtype_expire(struct ip_set *set, struct htype *h)
7142     #ifdef IP_SET_HASH_WITH_NETS
7143     u8 k;
7144     #endif
7145     + u8 htable_bits = t->htable_bits;
7146    
7147     - t = ipset_dereference_protected(h->table, set);
7148     - for (i = 0; i < jhash_size(t->htable_bits); i++) {
7149     - n = __ipset_dereference_protected(hbucket(t, i), 1);
7150     + spin_lock_bh(&t->hregion[r].lock);
7151     + for (i = ahash_bucket_start(r, htable_bits);
7152     + i < ahash_bucket_end(r, htable_bits); i++) {
7153     + n = __ipset_dereference(hbucket(t, i));
7154     if (!n)
7155     continue;
7156     for (j = 0, d = 0; j < n->pos; j++) {
7157     @@ -485,58 +540,100 @@ mtype_expire(struct ip_set *set, struct htype *h)
7158     smp_mb__after_atomic();
7159     #ifdef IP_SET_HASH_WITH_NETS
7160     for (k = 0; k < IPSET_NET_COUNT; k++)
7161     - mtype_del_cidr(h,
7162     + mtype_del_cidr(set, h,
7163     NCIDR_PUT(DCIDR_GET(data->cidr, k)),
7164     k);
7165     #endif
7166     + t->hregion[r].elements--;
7167     ip_set_ext_destroy(set, data);
7168     - set->elements--;
7169     d++;
7170     }
7171     if (d >= AHASH_INIT_SIZE) {
7172     if (d >= n->size) {
7173     + t->hregion[r].ext_size -=
7174     + ext_size(n->size, dsize);
7175     rcu_assign_pointer(hbucket(t, i), NULL);
7176     kfree_rcu(n, rcu);
7177     continue;
7178     }
7179     tmp = kzalloc(sizeof(*tmp) +
7180     - (n->size - AHASH_INIT_SIZE) * dsize,
7181     - GFP_ATOMIC);
7182     + (n->size - AHASH_INIT_SIZE) * dsize,
7183     + GFP_ATOMIC);
7184     if (!tmp)
7185     - /* Still try to delete expired elements */
7186     + /* Still try to delete expired elements. */
7187     continue;
7188     tmp->size = n->size - AHASH_INIT_SIZE;
7189     for (j = 0, d = 0; j < n->pos; j++) {
7190     if (!test_bit(j, n->used))
7191     continue;
7192     data = ahash_data(n, j, dsize);
7193     - memcpy(tmp->value + d * dsize, data, dsize);
7194     + memcpy(tmp->value + d * dsize,
7195     + data, dsize);
7196     set_bit(d, tmp->used);
7197     d++;
7198     }
7199     tmp->pos = d;
7200     - set->ext_size -= ext_size(AHASH_INIT_SIZE, dsize);
7201     + t->hregion[r].ext_size -=
7202     + ext_size(AHASH_INIT_SIZE, dsize);
7203     rcu_assign_pointer(hbucket(t, i), tmp);
7204     kfree_rcu(n, rcu);
7205     }
7206     }
7207     + spin_unlock_bh(&t->hregion[r].lock);
7208     }
7209    
7210     static void
7211     -mtype_gc(struct timer_list *t)
7212     +mtype_gc(struct work_struct *work)
7213     {
7214     - struct htype *h = from_timer(h, t, gc);
7215     - struct ip_set *set = h->set;
7216     + struct htable_gc *gc;
7217     + struct ip_set *set;
7218     + struct htype *h;
7219     + struct htable *t;
7220     + u32 r, numof_locks;
7221     + unsigned int next_run;
7222     +
7223     + gc = container_of(work, struct htable_gc, dwork.work);
7224     + set = gc->set;
7225     + h = set->data;
7226    
7227     - pr_debug("called\n");
7228     spin_lock_bh(&set->lock);
7229     - mtype_expire(set, h);
7230     + t = ipset_dereference_set(h->table, set);
7231     + atomic_inc(&t->uref);
7232     + numof_locks = ahash_numof_locks(t->htable_bits);
7233     + r = gc->region++;
7234     + if (r >= numof_locks) {
7235     + r = gc->region = 0;
7236     + }
7237     + next_run = (IPSET_GC_PERIOD(set->timeout) * HZ) / numof_locks;
7238     + if (next_run < HZ/10)
7239     + next_run = HZ/10;
7240     spin_unlock_bh(&set->lock);
7241    
7242     - h->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
7243     - add_timer(&h->gc);
7244     + mtype_gc_do(set, h, t, r);
7245     +
7246     + if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
7247     + pr_debug("Table destroy after resize by expire: %p\n", t);
7248     + mtype_ahash_destroy(set, t, false);
7249     + }
7250     +
7251     + queue_delayed_work(system_power_efficient_wq, &gc->dwork, next_run);
7252     +
7253     +}
7254     +
7255     +static void
7256     +mtype_gc_init(struct htable_gc *gc)
7257     +{
7258     + INIT_DEFERRABLE_WORK(&gc->dwork, mtype_gc);
7259     + queue_delayed_work(system_power_efficient_wq, &gc->dwork, HZ);
7260     }
7261    
7262     +static int
7263     +mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
7264     + struct ip_set_ext *mext, u32 flags);
7265     +static int
7266     +mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
7267     + struct ip_set_ext *mext, u32 flags);
7268     +
7269     /* Resize a hash: create a new hash table with doubling the hashsize
7270     * and inserting the elements to it. Repeat until we succeed or
7271     * fail due to memory pressures.
7272     @@ -547,7 +644,7 @@ mtype_resize(struct ip_set *set, bool retried)
7273     struct htype *h = set->data;
7274     struct htable *t, *orig;
7275     u8 htable_bits;
7276     - size_t extsize, dsize = set->dsize;
7277     + size_t dsize = set->dsize;
7278     #ifdef IP_SET_HASH_WITH_NETS
7279     u8 flags;
7280     struct mtype_elem *tmp;
7281     @@ -555,7 +652,9 @@ mtype_resize(struct ip_set *set, bool retried)
7282     struct mtype_elem *data;
7283     struct mtype_elem *d;
7284     struct hbucket *n, *m;
7285     - u32 i, j, key;
7286     + struct list_head *l, *lt;
7287     + struct mtype_resize_ad *x;
7288     + u32 i, j, r, nr, key;
7289     int ret;
7290    
7291     #ifdef IP_SET_HASH_WITH_NETS
7292     @@ -563,10 +662,8 @@ mtype_resize(struct ip_set *set, bool retried)
7293     if (!tmp)
7294     return -ENOMEM;
7295     #endif
7296     - rcu_read_lock_bh();
7297     - orig = rcu_dereference_bh_nfnl(h->table);
7298     + orig = ipset_dereference_bh_nfnl(h->table);
7299     htable_bits = orig->htable_bits;
7300     - rcu_read_unlock_bh();
7301    
7302     retry:
7303     ret = 0;
7304     @@ -583,88 +680,124 @@ retry:
7305     ret = -ENOMEM;
7306     goto out;
7307     }
7308     + t->hregion = ip_set_alloc(ahash_sizeof_regions(htable_bits));
7309     + if (!t->hregion) {
7310     + kfree(t);
7311     + ret = -ENOMEM;
7312     + goto out;
7313     + }
7314     t->htable_bits = htable_bits;
7315     + t->maxelem = h->maxelem / ahash_numof_locks(htable_bits);
7316     + for (i = 0; i < ahash_numof_locks(htable_bits); i++)
7317     + spin_lock_init(&t->hregion[i].lock);
7318    
7319     - spin_lock_bh(&set->lock);
7320     - orig = __ipset_dereference_protected(h->table, 1);
7321     - /* There can't be another parallel resizing, but dumping is possible */
7322     + /* There can't be another parallel resizing,
7323     + * but dumping, gc, kernel side add/del are possible
7324     + */
7325     + orig = ipset_dereference_bh_nfnl(h->table);
7326     atomic_set(&orig->ref, 1);
7327     atomic_inc(&orig->uref);
7328     - extsize = 0;
7329     pr_debug("attempt to resize set %s from %u to %u, t %p\n",
7330     set->name, orig->htable_bits, htable_bits, orig);
7331     - for (i = 0; i < jhash_size(orig->htable_bits); i++) {
7332     - n = __ipset_dereference_protected(hbucket(orig, i), 1);
7333     - if (!n)
7334     - continue;
7335     - for (j = 0; j < n->pos; j++) {
7336     - if (!test_bit(j, n->used))
7337     + for (r = 0; r < ahash_numof_locks(orig->htable_bits); r++) {
7338     + /* Expire may replace a hbucket with another one */
7339     + rcu_read_lock_bh();
7340     + for (i = ahash_bucket_start(r, orig->htable_bits);
7341     + i < ahash_bucket_end(r, orig->htable_bits); i++) {
7342     + n = __ipset_dereference(hbucket(orig, i));
7343     + if (!n)
7344     continue;
7345     - data = ahash_data(n, j, dsize);
7346     + for (j = 0; j < n->pos; j++) {
7347     + if (!test_bit(j, n->used))
7348     + continue;
7349     + data = ahash_data(n, j, dsize);
7350     + if (SET_ELEM_EXPIRED(set, data))
7351     + continue;
7352     #ifdef IP_SET_HASH_WITH_NETS
7353     - /* We have readers running parallel with us,
7354     - * so the live data cannot be modified.
7355     - */
7356     - flags = 0;
7357     - memcpy(tmp, data, dsize);
7358     - data = tmp;
7359     - mtype_data_reset_flags(data, &flags);
7360     + /* We have readers running parallel with us,
7361     + * so the live data cannot be modified.
7362     + */
7363     + flags = 0;
7364     + memcpy(tmp, data, dsize);
7365     + data = tmp;
7366     + mtype_data_reset_flags(data, &flags);
7367     #endif
7368     - key = HKEY(data, h->initval, htable_bits);
7369     - m = __ipset_dereference_protected(hbucket(t, key), 1);
7370     - if (!m) {
7371     - m = kzalloc(sizeof(*m) +
7372     + key = HKEY(data, h->initval, htable_bits);
7373     + m = __ipset_dereference(hbucket(t, key));
7374     + nr = ahash_region(key, htable_bits);
7375     + if (!m) {
7376     + m = kzalloc(sizeof(*m) +
7377     AHASH_INIT_SIZE * dsize,
7378     GFP_ATOMIC);
7379     - if (!m) {
7380     - ret = -ENOMEM;
7381     - goto cleanup;
7382     - }
7383     - m->size = AHASH_INIT_SIZE;
7384     - extsize += ext_size(AHASH_INIT_SIZE, dsize);
7385     - RCU_INIT_POINTER(hbucket(t, key), m);
7386     - } else if (m->pos >= m->size) {
7387     - struct hbucket *ht;
7388     -
7389     - if (m->size >= AHASH_MAX(h)) {
7390     - ret = -EAGAIN;
7391     - } else {
7392     - ht = kzalloc(sizeof(*ht) +
7393     + if (!m) {
7394     + ret = -ENOMEM;
7395     + goto cleanup;
7396     + }
7397     + m->size = AHASH_INIT_SIZE;
7398     + t->hregion[nr].ext_size +=
7399     + ext_size(AHASH_INIT_SIZE,
7400     + dsize);
7401     + RCU_INIT_POINTER(hbucket(t, key), m);
7402     + } else if (m->pos >= m->size) {
7403     + struct hbucket *ht;
7404     +
7405     + if (m->size >= AHASH_MAX(h)) {
7406     + ret = -EAGAIN;
7407     + } else {
7408     + ht = kzalloc(sizeof(*ht) +
7409     (m->size + AHASH_INIT_SIZE)
7410     * dsize,
7411     GFP_ATOMIC);
7412     - if (!ht)
7413     - ret = -ENOMEM;
7414     + if (!ht)
7415     + ret = -ENOMEM;
7416     + }
7417     + if (ret < 0)
7418     + goto cleanup;
7419     + memcpy(ht, m, sizeof(struct hbucket) +
7420     + m->size * dsize);
7421     + ht->size = m->size + AHASH_INIT_SIZE;
7422     + t->hregion[nr].ext_size +=
7423     + ext_size(AHASH_INIT_SIZE,
7424     + dsize);
7425     + kfree(m);
7426     + m = ht;
7427     + RCU_INIT_POINTER(hbucket(t, key), ht);
7428     }
7429     - if (ret < 0)
7430     - goto cleanup;
7431     - memcpy(ht, m, sizeof(struct hbucket) +
7432     - m->size * dsize);
7433     - ht->size = m->size + AHASH_INIT_SIZE;
7434     - extsize += ext_size(AHASH_INIT_SIZE, dsize);
7435     - kfree(m);
7436     - m = ht;
7437     - RCU_INIT_POINTER(hbucket(t, key), ht);
7438     - }
7439     - d = ahash_data(m, m->pos, dsize);
7440     - memcpy(d, data, dsize);
7441     - set_bit(m->pos++, m->used);
7442     + d = ahash_data(m, m->pos, dsize);
7443     + memcpy(d, data, dsize);
7444     + set_bit(m->pos++, m->used);
7445     + t->hregion[nr].elements++;
7446     #ifdef IP_SET_HASH_WITH_NETS
7447     - mtype_data_reset_flags(d, &flags);
7448     + mtype_data_reset_flags(d, &flags);
7449     #endif
7450     + }
7451     }
7452     + rcu_read_unlock_bh();
7453     }
7454     - rcu_assign_pointer(h->table, t);
7455     - set->ext_size = extsize;
7456    
7457     - spin_unlock_bh(&set->lock);
7458     + /* There can't be any other writer. */
7459     + rcu_assign_pointer(h->table, t);
7460    
7461     /* Give time to other readers of the set */
7462     synchronize_rcu();
7463    
7464     pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
7465     orig->htable_bits, orig, t->htable_bits, t);
7466     - /* If there's nobody else dumping the table, destroy it */
7467     + /* Add/delete elements processed by the SET target during resize.
7468     + * Kernel-side add cannot trigger a resize and userspace actions
7469     + * are serialized by the mutex.
7470     + */
7471     + list_for_each_safe(l, lt, &h->ad) {
7472     + x = list_entry(l, struct mtype_resize_ad, list);
7473     + if (x->ad == IPSET_ADD) {
7474     + mtype_add(set, &x->d, &x->ext, &x->mext, x->flags);
7475     + } else {
7476     + mtype_del(set, &x->d, NULL, NULL, 0);
7477     + }
7478     + list_del(l);
7479     + kfree(l);
7480     + }
7481     + /* If there's nobody else using the table, destroy it */
7482     if (atomic_dec_and_test(&orig->uref)) {
7483     pr_debug("Table destroy by resize %p\n", orig);
7484     mtype_ahash_destroy(set, orig, false);
7485     @@ -677,15 +810,44 @@ out:
7486     return ret;
7487    
7488     cleanup:
7489     + rcu_read_unlock_bh();
7490     atomic_set(&orig->ref, 0);
7491     atomic_dec(&orig->uref);
7492     - spin_unlock_bh(&set->lock);
7493     mtype_ahash_destroy(set, t, false);
7494     if (ret == -EAGAIN)
7495     goto retry;
7496     goto out;
7497     }
7498    
7499     +/* Get the current number of elements and ext_size in the set */
7500     +static void
7501     +mtype_ext_size(struct ip_set *set, u32 *elements, size_t *ext_size)
7502     +{
7503     + struct htype *h = set->data;
7504     + const struct htable *t;
7505     + u32 i, j, r;
7506     + struct hbucket *n;
7507     + struct mtype_elem *data;
7508     +
7509     + t = rcu_dereference_bh(h->table);
7510     + for (r = 0; r < ahash_numof_locks(t->htable_bits); r++) {
7511     + for (i = ahash_bucket_start(r, t->htable_bits);
7512     + i < ahash_bucket_end(r, t->htable_bits); i++) {
7513     + n = rcu_dereference_bh(hbucket(t, i));
7514     + if (!n)
7515     + continue;
7516     + for (j = 0; j < n->pos; j++) {
7517     + if (!test_bit(j, n->used))
7518     + continue;
7519     + data = ahash_data(n, j, set->dsize);
7520     + if (!SET_ELEM_EXPIRED(set, data))
7521     + (*elements)++;
7522     + }
7523     + }
7524     + *ext_size += t->hregion[r].ext_size;
7525     + }
7526     +}
7527     +
7528     /* Add an element to a hash and update the internal counters when succeeded,
7529     * otherwise report the proper error code.
7530     */
7531     @@ -698,32 +860,49 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
7532     const struct mtype_elem *d = value;
7533     struct mtype_elem *data;
7534     struct hbucket *n, *old = ERR_PTR(-ENOENT);
7535     - int i, j = -1;
7536     + int i, j = -1, ret;
7537     bool flag_exist = flags & IPSET_FLAG_EXIST;
7538     bool deleted = false, forceadd = false, reuse = false;
7539     - u32 key, multi = 0;
7540     + u32 r, key, multi = 0, elements, maxelem;
7541    
7542     - if (set->elements >= h->maxelem) {
7543     - if (SET_WITH_TIMEOUT(set))
7544     - /* FIXME: when set is full, we slow down here */
7545     - mtype_expire(set, h);
7546     - if (set->elements >= h->maxelem && SET_WITH_FORCEADD(set))
7547     + rcu_read_lock_bh();
7548     + t = rcu_dereference_bh(h->table);
7549     + key = HKEY(value, h->initval, t->htable_bits);
7550     + r = ahash_region(key, t->htable_bits);
7551     + atomic_inc(&t->uref);
7552     + elements = t->hregion[r].elements;
7553     + maxelem = t->maxelem;
7554     + if (elements >= maxelem) {
7555     + u32 e;
7556     + if (SET_WITH_TIMEOUT(set)) {
7557     + rcu_read_unlock_bh();
7558     + mtype_gc_do(set, h, t, r);
7559     + rcu_read_lock_bh();
7560     + }
7561     + maxelem = h->maxelem;
7562     + elements = 0;
7563     + for (e = 0; e < ahash_numof_locks(t->htable_bits); e++)
7564     + elements += t->hregion[e].elements;
7565     + if (elements >= maxelem && SET_WITH_FORCEADD(set))
7566     forceadd = true;
7567     }
7568     + rcu_read_unlock_bh();
7569    
7570     - t = ipset_dereference_protected(h->table, set);
7571     - key = HKEY(value, h->initval, t->htable_bits);
7572     - n = __ipset_dereference_protected(hbucket(t, key), 1);
7573     + spin_lock_bh(&t->hregion[r].lock);
7574     + n = rcu_dereference_bh(hbucket(t, key));
7575     if (!n) {
7576     - if (forceadd || set->elements >= h->maxelem)
7577     + if (forceadd || elements >= maxelem)
7578     goto set_full;
7579     old = NULL;
7580     n = kzalloc(sizeof(*n) + AHASH_INIT_SIZE * set->dsize,
7581     GFP_ATOMIC);
7582     - if (!n)
7583     - return -ENOMEM;
7584     + if (!n) {
7585     + ret = -ENOMEM;
7586     + goto unlock;
7587     + }
7588     n->size = AHASH_INIT_SIZE;
7589     - set->ext_size += ext_size(AHASH_INIT_SIZE, set->dsize);
7590     + t->hregion[r].ext_size +=
7591     + ext_size(AHASH_INIT_SIZE, set->dsize);
7592     goto copy_elem;
7593     }
7594     for (i = 0; i < n->pos; i++) {
7595     @@ -737,38 +916,37 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
7596     }
7597     data = ahash_data(n, i, set->dsize);
7598     if (mtype_data_equal(data, d, &multi)) {
7599     - if (flag_exist ||
7600     - (SET_WITH_TIMEOUT(set) &&
7601     - ip_set_timeout_expired(ext_timeout(data, set)))) {
7602     + if (flag_exist || SET_ELEM_EXPIRED(set, data)) {
7603     /* Just the extensions could be overwritten */
7604     j = i;
7605     goto overwrite_extensions;
7606     }
7607     - return -IPSET_ERR_EXIST;
7608     + ret = -IPSET_ERR_EXIST;
7609     + goto unlock;
7610     }
7611     /* Reuse first timed out entry */
7612     - if (SET_WITH_TIMEOUT(set) &&
7613     - ip_set_timeout_expired(ext_timeout(data, set)) &&
7614     - j == -1) {
7615     + if (SET_ELEM_EXPIRED(set, data) && j == -1) {
7616     j = i;
7617     reuse = true;
7618     }
7619     }
7620     if (reuse || forceadd) {
7621     + if (j == -1)
7622     + j = 0;
7623     data = ahash_data(n, j, set->dsize);
7624     if (!deleted) {
7625     #ifdef IP_SET_HASH_WITH_NETS
7626     for (i = 0; i < IPSET_NET_COUNT; i++)
7627     - mtype_del_cidr(h,
7628     + mtype_del_cidr(set, h,
7629     NCIDR_PUT(DCIDR_GET(data->cidr, i)),
7630     i);
7631     #endif
7632     ip_set_ext_destroy(set, data);
7633     - set->elements--;
7634     + t->hregion[r].elements--;
7635     }
7636     goto copy_data;
7637     }
7638     - if (set->elements >= h->maxelem)
7639     + if (elements >= maxelem)
7640     goto set_full;
7641     /* Create a new slot */
7642     if (n->pos >= n->size) {
7643     @@ -776,28 +954,32 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
7644     if (n->size >= AHASH_MAX(h)) {
7645     /* Trigger rehashing */
7646     mtype_data_next(&h->next, d);
7647     - return -EAGAIN;
7648     + ret = -EAGAIN;
7649     + goto resize;
7650     }
7651     old = n;
7652     n = kzalloc(sizeof(*n) +
7653     (old->size + AHASH_INIT_SIZE) * set->dsize,
7654     GFP_ATOMIC);
7655     - if (!n)
7656     - return -ENOMEM;
7657     + if (!n) {
7658     + ret = -ENOMEM;
7659     + goto unlock;
7660     + }
7661     memcpy(n, old, sizeof(struct hbucket) +
7662     old->size * set->dsize);
7663     n->size = old->size + AHASH_INIT_SIZE;
7664     - set->ext_size += ext_size(AHASH_INIT_SIZE, set->dsize);
7665     + t->hregion[r].ext_size +=
7666     + ext_size(AHASH_INIT_SIZE, set->dsize);
7667     }
7668    
7669     copy_elem:
7670     j = n->pos++;
7671     data = ahash_data(n, j, set->dsize);
7672     copy_data:
7673     - set->elements++;
7674     + t->hregion[r].elements++;
7675     #ifdef IP_SET_HASH_WITH_NETS
7676     for (i = 0; i < IPSET_NET_COUNT; i++)
7677     - mtype_add_cidr(h, NCIDR_PUT(DCIDR_GET(d->cidr, i)), i);
7678     + mtype_add_cidr(set, h, NCIDR_PUT(DCIDR_GET(d->cidr, i)), i);
7679     #endif
7680     memcpy(data, d, sizeof(struct mtype_elem));
7681     overwrite_extensions:
7682     @@ -820,13 +1002,41 @@ overwrite_extensions:
7683     if (old)
7684     kfree_rcu(old, rcu);
7685     }
7686     + ret = 0;
7687     +resize:
7688     + spin_unlock_bh(&t->hregion[r].lock);
7689     + if (atomic_read(&t->ref) && ext->target) {
7690     + /* Resize is in process and kernel side add, save values */
7691     + struct mtype_resize_ad *x;
7692     +
7693     + x = kzalloc(sizeof(struct mtype_resize_ad), GFP_ATOMIC);
7694     + if (!x)
7695     + /* Don't bother */
7696     + goto out;
7697     + x->ad = IPSET_ADD;
7698     + memcpy(&x->d, value, sizeof(struct mtype_elem));
7699     + memcpy(&x->ext, ext, sizeof(struct ip_set_ext));
7700     + memcpy(&x->mext, mext, sizeof(struct ip_set_ext));
7701     + x->flags = flags;
7702     + spin_lock_bh(&set->lock);
7703     + list_add_tail(&x->list, &h->ad);
7704     + spin_unlock_bh(&set->lock);
7705     + }
7706     + goto out;
7707    
7708     - return 0;
7709     set_full:
7710     if (net_ratelimit())
7711     pr_warn("Set %s is full, maxelem %u reached\n",
7712     - set->name, h->maxelem);
7713     - return -IPSET_ERR_HASH_FULL;
7714     + set->name, maxelem);
7715     + ret = -IPSET_ERR_HASH_FULL;
7716     +unlock:
7717     + spin_unlock_bh(&t->hregion[r].lock);
7718     +out:
7719     + if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
7720     + pr_debug("Table destroy after resize by add: %p\n", t);
7721     + mtype_ahash_destroy(set, t, false);
7722     + }
7723     + return ret;
7724     }
7725    
7726     /* Delete an element from the hash and free up space if possible.
7727     @@ -840,13 +1050,23 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
7728     const struct mtype_elem *d = value;
7729     struct mtype_elem *data;
7730     struct hbucket *n;
7731     - int i, j, k, ret = -IPSET_ERR_EXIST;
7732     + struct mtype_resize_ad *x = NULL;
7733     + int i, j, k, r, ret = -IPSET_ERR_EXIST;
7734     u32 key, multi = 0;
7735     size_t dsize = set->dsize;
7736    
7737     - t = ipset_dereference_protected(h->table, set);
7738     + /* Userspace add and resize is excluded by the mutex.
7739     + * Kernespace add does not trigger resize.
7740     + */
7741     + rcu_read_lock_bh();
7742     + t = rcu_dereference_bh(h->table);
7743     key = HKEY(value, h->initval, t->htable_bits);
7744     - n = __ipset_dereference_protected(hbucket(t, key), 1);
7745     + r = ahash_region(key, t->htable_bits);
7746     + atomic_inc(&t->uref);
7747     + rcu_read_unlock_bh();
7748     +
7749     + spin_lock_bh(&t->hregion[r].lock);
7750     + n = rcu_dereference_bh(hbucket(t, key));
7751     if (!n)
7752     goto out;
7753     for (i = 0, k = 0; i < n->pos; i++) {
7754     @@ -857,8 +1077,7 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
7755     data = ahash_data(n, i, dsize);
7756     if (!mtype_data_equal(data, d, &multi))
7757     continue;
7758     - if (SET_WITH_TIMEOUT(set) &&
7759     - ip_set_timeout_expired(ext_timeout(data, set)))
7760     + if (SET_ELEM_EXPIRED(set, data))
7761     goto out;
7762    
7763     ret = 0;
7764     @@ -866,20 +1085,33 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
7765     smp_mb__after_atomic();
7766     if (i + 1 == n->pos)
7767     n->pos--;
7768     - set->elements--;
7769     + t->hregion[r].elements--;
7770     #ifdef IP_SET_HASH_WITH_NETS
7771     for (j = 0; j < IPSET_NET_COUNT; j++)
7772     - mtype_del_cidr(h, NCIDR_PUT(DCIDR_GET(d->cidr, j)),
7773     - j);
7774     + mtype_del_cidr(set, h,
7775     + NCIDR_PUT(DCIDR_GET(d->cidr, j)), j);
7776     #endif
7777     ip_set_ext_destroy(set, data);
7778    
7779     + if (atomic_read(&t->ref) && ext->target) {
7780     + /* Resize is in process and kernel side del,
7781     + * save values
7782     + */
7783     + x = kzalloc(sizeof(struct mtype_resize_ad),
7784     + GFP_ATOMIC);
7785     + if (x) {
7786     + x->ad = IPSET_DEL;
7787     + memcpy(&x->d, value,
7788     + sizeof(struct mtype_elem));
7789     + x->flags = flags;
7790     + }
7791     + }
7792     for (; i < n->pos; i++) {
7793     if (!test_bit(i, n->used))
7794     k++;
7795     }
7796     if (n->pos == 0 && k == 0) {
7797     - set->ext_size -= ext_size(n->size, dsize);
7798     + t->hregion[r].ext_size -= ext_size(n->size, dsize);
7799     rcu_assign_pointer(hbucket(t, key), NULL);
7800     kfree_rcu(n, rcu);
7801     } else if (k >= AHASH_INIT_SIZE) {
7802     @@ -898,7 +1130,8 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
7803     k++;
7804     }
7805     tmp->pos = k;
7806     - set->ext_size -= ext_size(AHASH_INIT_SIZE, dsize);
7807     + t->hregion[r].ext_size -=
7808     + ext_size(AHASH_INIT_SIZE, dsize);
7809     rcu_assign_pointer(hbucket(t, key), tmp);
7810     kfree_rcu(n, rcu);
7811     }
7812     @@ -906,6 +1139,16 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
7813     }
7814    
7815     out:
7816     + spin_unlock_bh(&t->hregion[r].lock);
7817     + if (x) {
7818     + spin_lock_bh(&set->lock);
7819     + list_add(&x->list, &h->ad);
7820     + spin_unlock_bh(&set->lock);
7821     + }
7822     + if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
7823     + pr_debug("Table destroy after resize by del: %p\n", t);
7824     + mtype_ahash_destroy(set, t, false);
7825     + }
7826     return ret;
7827     }
7828    
7829     @@ -991,6 +1234,7 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
7830     int i, ret = 0;
7831     u32 key, multi = 0;
7832    
7833     + rcu_read_lock_bh();
7834     t = rcu_dereference_bh(h->table);
7835     #ifdef IP_SET_HASH_WITH_NETS
7836     /* If we test an IP address and not a network address,
7837     @@ -1022,6 +1266,7 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
7838     goto out;
7839     }
7840     out:
7841     + rcu_read_unlock_bh();
7842     return ret;
7843     }
7844    
7845     @@ -1033,23 +1278,14 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
7846     const struct htable *t;
7847     struct nlattr *nested;
7848     size_t memsize;
7849     + u32 elements = 0;
7850     + size_t ext_size = 0;
7851     u8 htable_bits;
7852    
7853     - /* If any members have expired, set->elements will be wrong
7854     - * mytype_expire function will update it with the right count.
7855     - * we do not hold set->lock here, so grab it first.
7856     - * set->elements can still be incorrect in the case of a huge set,
7857     - * because elements might time out during the listing.
7858     - */
7859     - if (SET_WITH_TIMEOUT(set)) {
7860     - spin_lock_bh(&set->lock);
7861     - mtype_expire(set, h);
7862     - spin_unlock_bh(&set->lock);
7863     - }
7864     -
7865     rcu_read_lock_bh();
7866     - t = rcu_dereference_bh_nfnl(h->table);
7867     - memsize = mtype_ahash_memsize(h, t) + set->ext_size;
7868     + t = rcu_dereference_bh(h->table);
7869     + mtype_ext_size(set, &elements, &ext_size);
7870     + memsize = mtype_ahash_memsize(h, t) + ext_size + set->ext_size;
7871     htable_bits = t->htable_bits;
7872     rcu_read_unlock_bh();
7873    
7874     @@ -1071,7 +1307,7 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
7875     #endif
7876     if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
7877     nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
7878     - nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(set->elements)))
7879     + nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(elements)))
7880     goto nla_put_failure;
7881     if (unlikely(ip_set_put_flags(skb, set)))
7882     goto nla_put_failure;
7883     @@ -1091,15 +1327,15 @@ mtype_uref(struct ip_set *set, struct netlink_callback *cb, bool start)
7884    
7885     if (start) {
7886     rcu_read_lock_bh();
7887     - t = rcu_dereference_bh_nfnl(h->table);
7888     + t = ipset_dereference_bh_nfnl(h->table);
7889     atomic_inc(&t->uref);
7890     cb->args[IPSET_CB_PRIVATE] = (unsigned long)t;
7891     rcu_read_unlock_bh();
7892     } else if (cb->args[IPSET_CB_PRIVATE]) {
7893     t = (struct htable *)cb->args[IPSET_CB_PRIVATE];
7894     if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
7895     - /* Resizing didn't destroy the hash table */
7896     - pr_debug("Table destroy by dump: %p\n", t);
7897     + pr_debug("Table destroy after resize "
7898     + " by dump: %p\n", t);
7899     mtype_ahash_destroy(set, t, false);
7900     }
7901     cb->args[IPSET_CB_PRIVATE] = 0;
7902     @@ -1141,8 +1377,7 @@ mtype_list(const struct ip_set *set,
7903     if (!test_bit(i, n->used))
7904     continue;
7905     e = ahash_data(n, i, set->dsize);
7906     - if (SET_WITH_TIMEOUT(set) &&
7907     - ip_set_timeout_expired(ext_timeout(e, set)))
7908     + if (SET_ELEM_EXPIRED(set, e))
7909     continue;
7910     pr_debug("list hash %lu hbucket %p i %u, data %p\n",
7911     cb->args[IPSET_CB_ARG0], n, i, e);
7912     @@ -1208,6 +1443,7 @@ static const struct ip_set_type_variant mtype_variant = {
7913     .uref = mtype_uref,
7914     .resize = mtype_resize,
7915     .same_set = mtype_same_set,
7916     + .region_lock = true,
7917     };
7918    
7919     #ifdef IP_SET_EMIT_CREATE
7920     @@ -1226,6 +1462,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
7921     size_t hsize;
7922     struct htype *h;
7923     struct htable *t;
7924     + u32 i;
7925    
7926     pr_debug("Create set %s with family %s\n",
7927     set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6");
7928     @@ -1294,6 +1531,15 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
7929     kfree(h);
7930     return -ENOMEM;
7931     }
7932     + t->hregion = ip_set_alloc(ahash_sizeof_regions(hbits));
7933     + if (!t->hregion) {
7934     + kfree(t);
7935     + kfree(h);
7936     + return -ENOMEM;
7937     + }
7938     + h->gc.set = set;
7939     + for (i = 0; i < ahash_numof_locks(hbits); i++)
7940     + spin_lock_init(&t->hregion[i].lock);
7941     h->maxelem = maxelem;
7942     #ifdef IP_SET_HASH_WITH_NETMASK
7943     h->netmask = netmask;
7944     @@ -1304,9 +1550,10 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
7945     get_random_bytes(&h->initval, sizeof(h->initval));
7946    
7947     t->htable_bits = hbits;
7948     + t->maxelem = h->maxelem / ahash_numof_locks(hbits);
7949     RCU_INIT_POINTER(h->table, t);
7950    
7951     - h->set = set;
7952     + INIT_LIST_HEAD(&h->ad);
7953     set->data = h;
7954     #ifndef IP_SET_PROTO_UNDEF
7955     if (set->family == NFPROTO_IPV4) {
7956     @@ -1329,12 +1576,10 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
7957     #ifndef IP_SET_PROTO_UNDEF
7958     if (set->family == NFPROTO_IPV4)
7959     #endif
7960     - IPSET_TOKEN(HTYPE, 4_gc_init)(set,
7961     - IPSET_TOKEN(HTYPE, 4_gc));
7962     + IPSET_TOKEN(HTYPE, 4_gc_init)(&h->gc);
7963     #ifndef IP_SET_PROTO_UNDEF
7964     else
7965     - IPSET_TOKEN(HTYPE, 6_gc_init)(set,
7966     - IPSET_TOKEN(HTYPE, 6_gc));
7967     + IPSET_TOKEN(HTYPE, 6_gc_init)(&h->gc);
7968     #endif
7969     }
7970     pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
7971     diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
7972     index f8d2919cf9fd..037e8fce9b30 100644
7973     --- a/net/netfilter/nft_tunnel.c
7974     +++ b/net/netfilter/nft_tunnel.c
7975     @@ -505,8 +505,8 @@ static int nft_tunnel_opts_dump(struct sk_buff *skb,
7976     static int nft_tunnel_ports_dump(struct sk_buff *skb,
7977     struct ip_tunnel_info *info)
7978     {
7979     - if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, htons(info->key.tp_src)) < 0 ||
7980     - nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, htons(info->key.tp_dst)) < 0)
7981     + if (nla_put_be16(skb, NFTA_TUNNEL_KEY_SPORT, info->key.tp_src) < 0 ||
7982     + nla_put_be16(skb, NFTA_TUNNEL_KEY_DPORT, info->key.tp_dst) < 0)
7983     return -1;
7984    
7985     return 0;
7986     diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
7987     index 6520d9ec1297..1b68a131083c 100644
7988     --- a/net/netfilter/xt_hashlimit.c
7989     +++ b/net/netfilter/xt_hashlimit.c
7990     @@ -36,6 +36,7 @@
7991     #include <linux/netfilter_ipv6/ip6_tables.h>
7992     #include <linux/mutex.h>
7993     #include <linux/kernel.h>
7994     +#include <linux/refcount.h>
7995     #include <uapi/linux/netfilter/xt_hashlimit.h>
7996    
7997     #define XT_HASHLIMIT_ALL (XT_HASHLIMIT_HASH_DIP | XT_HASHLIMIT_HASH_DPT | \
7998     @@ -114,7 +115,7 @@ struct dsthash_ent {
7999    
8000     struct xt_hashlimit_htable {
8001     struct hlist_node node; /* global list of all htables */
8002     - int use;
8003     + refcount_t use;
8004     u_int8_t family;
8005     bool rnd_initialized;
8006    
8007     @@ -315,7 +316,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
8008     for (i = 0; i < hinfo->cfg.size; i++)
8009     INIT_HLIST_HEAD(&hinfo->hash[i]);
8010    
8011     - hinfo->use = 1;
8012     + refcount_set(&hinfo->use, 1);
8013     hinfo->count = 0;
8014     hinfo->family = family;
8015     hinfo->rnd_initialized = false;
8016     @@ -434,7 +435,7 @@ static struct xt_hashlimit_htable *htable_find_get(struct net *net,
8017     hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) {
8018     if (!strcmp(name, hinfo->name) &&
8019     hinfo->family == family) {
8020     - hinfo->use++;
8021     + refcount_inc(&hinfo->use);
8022     return hinfo;
8023     }
8024     }
8025     @@ -443,12 +444,11 @@ static struct xt_hashlimit_htable *htable_find_get(struct net *net,
8026    
8027     static void htable_put(struct xt_hashlimit_htable *hinfo)
8028     {
8029     - mutex_lock(&hashlimit_mutex);
8030     - if (--hinfo->use == 0) {
8031     + if (refcount_dec_and_mutex_lock(&hinfo->use, &hashlimit_mutex)) {
8032     hlist_del(&hinfo->node);
8033     + mutex_unlock(&hashlimit_mutex);
8034     htable_destroy(hinfo);
8035     }
8036     - mutex_unlock(&hashlimit_mutex);
8037     }
8038    
8039     /* The algorithm used is the Simple Token Bucket Filter (TBF)
8040     diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
8041     index 90b2ab9dd449..e64b8784d487 100644
8042     --- a/net/netlink/af_netlink.c
8043     +++ b/net/netlink/af_netlink.c
8044     @@ -1014,7 +1014,8 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
8045     if (nlk->netlink_bind && groups) {
8046     int group;
8047    
8048     - for (group = 0; group < nlk->ngroups; group++) {
8049     + /* nl_groups is a u32, so cap the maximum groups we can bind */
8050     + for (group = 0; group < BITS_PER_TYPE(u32); group++) {
8051     if (!test_bit(group, &groups))
8052     continue;
8053     err = nlk->netlink_bind(net, group + 1);
8054     @@ -1033,7 +1034,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
8055     netlink_insert(sk, nladdr->nl_pid) :
8056     netlink_autobind(sock);
8057     if (err) {
8058     - netlink_undo_bind(nlk->ngroups, groups, sk);
8059     + netlink_undo_bind(BITS_PER_TYPE(u32), groups, sk);
8060     goto unlock;
8061     }
8062     }
8063     diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
8064     index 5eceeee593cf..1d270540e74d 100644
8065     --- a/net/sched/cls_flower.c
8066     +++ b/net/sched/cls_flower.c
8067     @@ -303,6 +303,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
8068     struct cls_fl_filter *f;
8069    
8070     list_for_each_entry_rcu(mask, &head->masks, list) {
8071     + flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
8072     fl_clear_masked_range(&skb_key, mask);
8073    
8074     skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
8075     diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
8076     index 4ab8208a2dd4..c6d83a64eac3 100644
8077     --- a/net/sctp/sm_statefuns.c
8078     +++ b/net/sctp/sm_statefuns.c
8079     @@ -170,6 +170,16 @@ static inline bool sctp_chunk_length_valid(struct sctp_chunk *chunk,
8080     return true;
8081     }
8082    
8083     +/* Check for format error in an ABORT chunk */
8084     +static inline bool sctp_err_chunk_valid(struct sctp_chunk *chunk)
8085     +{
8086     + struct sctp_errhdr *err;
8087     +
8088     + sctp_walk_errors(err, chunk->chunk_hdr);
8089     +
8090     + return (void *)err == (void *)chunk->chunk_end;
8091     +}
8092     +
8093     /**********************************************************
8094     * These are the state functions for handling chunk events.
8095     **********************************************************/
8096     @@ -2255,6 +2265,9 @@ enum sctp_disposition sctp_sf_shutdown_pending_abort(
8097     sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
8098     return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
8099    
8100     + if (!sctp_err_chunk_valid(chunk))
8101     + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
8102     +
8103     return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
8104     }
8105    
8106     @@ -2298,6 +2311,9 @@ enum sctp_disposition sctp_sf_shutdown_sent_abort(
8107     sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
8108     return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
8109    
8110     + if (!sctp_err_chunk_valid(chunk))
8111     + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
8112     +
8113     /* Stop the T2-shutdown timer. */
8114     sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
8115     SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
8116     @@ -2565,6 +2581,9 @@ enum sctp_disposition sctp_sf_do_9_1_abort(
8117     sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
8118     return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
8119    
8120     + if (!sctp_err_chunk_valid(chunk))
8121     + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
8122     +
8123     return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
8124     }
8125    
8126     @@ -2582,16 +2601,8 @@ static enum sctp_disposition __sctp_sf_do_9_1_abort(
8127    
8128     /* See if we have an error cause code in the chunk. */
8129     len = ntohs(chunk->chunk_hdr->length);
8130     - if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) {
8131     - struct sctp_errhdr *err;
8132     -
8133     - sctp_walk_errors(err, chunk->chunk_hdr);
8134     - if ((void *)err != (void *)chunk->chunk_end)
8135     - return sctp_sf_pdiscard(net, ep, asoc, type, arg,
8136     - commands);
8137     -
8138     + if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
8139     error = ((struct sctp_errhdr *)chunk->skb->data)->cause;
8140     - }
8141    
8142     sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
8143     /* ASSOC_FAILED will DELETE_TCB. */
8144     diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
8145     index 6a6d3b2aa5a9..dc09a72f8110 100644
8146     --- a/net/smc/af_smc.c
8147     +++ b/net/smc/af_smc.c
8148     @@ -467,6 +467,8 @@ static void smc_switch_to_fallback(struct smc_sock *smc)
8149     if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
8150     smc->clcsock->file = smc->sk.sk_socket->file;
8151     smc->clcsock->file->private_data = smc->clcsock;
8152     + smc->clcsock->wq.fasync_list =
8153     + smc->sk.sk_socket->wq.fasync_list;
8154     }
8155     }
8156    
8157     diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
8158     index 49bcebff6378..aee9ccfa99c2 100644
8159     --- a/net/smc/smc_clc.c
8160     +++ b/net/smc/smc_clc.c
8161     @@ -372,7 +372,9 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
8162     dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline));
8163     dclc.hdr.version = SMC_CLC_V1;
8164     dclc.hdr.flag = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ? 1 : 0;
8165     - memcpy(dclc.id_for_peer, local_systemid, sizeof(local_systemid));
8166     + if (smc->conn.lgr && !smc->conn.lgr->is_smcd)
8167     + memcpy(dclc.id_for_peer, local_systemid,
8168     + sizeof(local_systemid));
8169     dclc.peer_diagnosis = htonl(peer_diag_info);
8170     memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
8171    
8172     diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
8173     index 3f5209e2d4ee..1adeb1c0473b 100644
8174     --- a/net/tls/tls_device.c
8175     +++ b/net/tls/tls_device.c
8176     @@ -581,7 +581,7 @@ struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
8177     u32 seq, u64 *p_record_sn)
8178     {
8179     u64 record_sn = context->hint_record_sn;
8180     - struct tls_record_info *info;
8181     + struct tls_record_info *info, *last;
8182    
8183     info = context->retransmit_hint;
8184     if (!info ||
8185     @@ -593,6 +593,24 @@ struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
8186     struct tls_record_info, list);
8187     if (!info)
8188     return NULL;
8189     + /* send the start_marker record if seq number is before the
8190     + * tls offload start marker sequence number. This record is
8191     + * required to handle TCP packets which are before TLS offload
8192     + * started.
8193     + * And if it's not start marker, look if this seq number
8194     + * belongs to the list.
8195     + */
8196     + if (likely(!tls_record_is_start_marker(info))) {
8197     + /* we have the first record, get the last record to see
8198     + * if this seq number belongs to the list.
8199     + */
8200     + last = list_last_entry(&context->records_list,
8201     + struct tls_record_info, list);
8202     +
8203     + if (!between(seq, tls_record_start_seq(info),
8204     + last->end_seq))
8205     + return NULL;
8206     + }
8207     record_sn = context->unacked_record_sn;
8208     }
8209    
8210     diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c
8211     index a9c0f368db5d..24e18405cdb4 100644
8212     --- a/net/wireless/ethtool.c
8213     +++ b/net/wireless/ethtool.c
8214     @@ -7,9 +7,13 @@
8215     void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
8216     {
8217     struct wireless_dev *wdev = dev->ieee80211_ptr;
8218     + struct device *pdev = wiphy_dev(wdev->wiphy);
8219    
8220     - strlcpy(info->driver, wiphy_dev(wdev->wiphy)->driver->name,
8221     - sizeof(info->driver));
8222     + if (pdev->driver)
8223     + strlcpy(info->driver, pdev->driver->name,
8224     + sizeof(info->driver));
8225     + else
8226     + strlcpy(info->driver, "N/A", sizeof(info->driver));
8227    
8228     strlcpy(info->version, init_utsname()->release, sizeof(info->version));
8229    
8230     diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
8231     index c74646b7a751..17514744af9e 100644
8232     --- a/net/wireless/nl80211.c
8233     +++ b/net/wireless/nl80211.c
8234     @@ -437,6 +437,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
8235     [NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT] = { .type = NLA_FLAG },
8236     [NL80211_ATTR_CONTROL_PORT_OVER_NL80211] = { .type = NLA_FLAG },
8237     [NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG },
8238     + [NL80211_ATTR_STATUS_CODE] = { .type = NLA_U16 },
8239     [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 },
8240     [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
8241     [NL80211_ATTR_PID] = { .type = NLA_U32 },
8242     @@ -4794,8 +4795,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
8243     err = nl80211_parse_he_obss_pd(
8244     info->attrs[NL80211_ATTR_HE_OBSS_PD],
8245     &params.he_obss_pd);
8246     - if (err)
8247     - return err;
8248     + goto out;
8249     }
8250    
8251     nl80211_calculate_ap_params(&params);
8252     @@ -4817,6 +4817,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
8253     }
8254     wdev_unlock(wdev);
8255    
8256     +out:
8257     kfree(params.acl);
8258    
8259     return err;
8260     diff --git a/scripts/Makefile.build b/scripts/Makefile.build
8261     index a9e47953ca53..24a33c01bbf7 100644
8262     --- a/scripts/Makefile.build
8263     +++ b/scripts/Makefile.build
8264     @@ -283,15 +283,6 @@ quiet_cmd_cc_lst_c = MKLST $@
8265     $(obj)/%.lst: $(src)/%.c FORCE
8266     $(call if_changed_dep,cc_lst_c)
8267    
8268     -# header test (header-test-y, header-test-m target)
8269     -# ---------------------------------------------------------------------------
8270     -
8271     -quiet_cmd_cc_s_h = CC $@
8272     - cmd_cc_s_h = $(CC) $(c_flags) -S -o $@ -x c /dev/null -include $<
8273     -
8274     -$(obj)/%.h.s: $(src)/%.h FORCE
8275     - $(call if_changed_dep,cc_s_h)
8276     -
8277     # Compile assembler sources (.S)
8278     # ---------------------------------------------------------------------------
8279    
8280     diff --git a/scripts/Makefile.headersinst b/scripts/Makefile.headersinst
8281     index 1b405a7ed14f..708fbd08a2c5 100644
8282     --- a/scripts/Makefile.headersinst
8283     +++ b/scripts/Makefile.headersinst
8284     @@ -56,9 +56,6 @@ new-dirs := $(filter-out $(existing-dirs), $(wanted-dirs))
8285     $(if $(new-dirs), $(shell mkdir -p $(new-dirs)))
8286    
8287     # Rules
8288     -
8289     -ifndef HDRCHECK
8290     -
8291     quiet_cmd_install = HDRINST $@
8292     cmd_install = $(CONFIG_SHELL) $(srctree)/scripts/headers_install.sh $< $@
8293    
8294     @@ -81,21 +78,6 @@ existing-headers := $(filter $(old-headers), $(all-headers))
8295    
8296     -include $(foreach f,$(existing-headers),$(dir $(f)).$(notdir $(f)).cmd)
8297    
8298     -else
8299     -
8300     -quiet_cmd_check = HDRCHK $<
8301     - cmd_check = $(PERL) $(srctree)/scripts/headers_check.pl $(dst) $(SRCARCH) $<; touch $@
8302     -
8303     -check-files := $(addsuffix .chk, $(all-headers))
8304     -
8305     -$(check-files): $(dst)/%.chk : $(dst)/% $(srctree)/scripts/headers_check.pl
8306     - $(call cmd,check)
8307     -
8308     -__headers: $(check-files)
8309     - @:
8310     -
8311     -endif
8312     -
8313     PHONY += FORCE
8314     FORCE:
8315    
8316     diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
8317     index 179d55af5852..a66fc0acad1e 100644
8318     --- a/scripts/Makefile.lib
8319     +++ b/scripts/Makefile.lib
8320     @@ -65,20 +65,6 @@ extra-y += $(patsubst %.dtb,%.dt.yaml, $(dtb-y))
8321     extra-$(CONFIG_OF_ALL_DTBS) += $(patsubst %.dtb,%.dt.yaml, $(dtb-))
8322     endif
8323    
8324     -# Test self-contained headers
8325     -
8326     -# Wildcard searches in $(srctree)/$(src)/, but not in $(objtree)/$(obj)/.
8327     -# Stale generated headers are often left over, so pattern matching should
8328     -# be avoided. Please notice $(srctree)/$(src)/ and $(objtree)/$(obj) point
8329     -# to the same location for in-tree building. So, header-test-pattern-y should
8330     -# be used with care.
8331     -header-test-y += $(filter-out $(header-test-), \
8332     - $(patsubst $(srctree)/$(src)/%, %, \
8333     - $(wildcard $(addprefix $(srctree)/$(src)/, \
8334     - $(header-test-pattern-y)))))
8335     -
8336     -extra-$(CONFIG_HEADER_TEST) += $(addsuffix .s, $(header-test-y) $(header-test-m))
8337     -
8338     # Add subdir path
8339    
8340     extra-y := $(addprefix $(obj)/,$(extra-y))
8341     @@ -305,13 +291,13 @@ DT_TMP_SCHEMA := $(objtree)/$(DT_BINDING_DIR)/processed-schema.yaml
8342     quiet_cmd_dtb_check = CHECK $@
8343     cmd_dtb_check = $(DT_CHECKER) -u $(srctree)/$(DT_BINDING_DIR) -p $(DT_TMP_SCHEMA) $@ ;
8344    
8345     -define rule_dtc_dt_yaml
8346     +define rule_dtc
8347     $(call cmd_and_fixdep,dtc,yaml)
8348     $(call cmd,dtb_check)
8349     endef
8350    
8351     $(obj)/%.dt.yaml: $(src)/%.dts $(DTC) $(DT_TMP_SCHEMA) FORCE
8352     - $(call if_changed_rule,dtc_dt_yaml)
8353     + $(call if_changed_rule,dtc)
8354    
8355     dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp)
8356    
8357     diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
8358     index 5380aca2b351..ee9aec5e98f0 100644
8359     --- a/security/integrity/ima/ima_policy.c
8360     +++ b/security/integrity/ima/ima_policy.c
8361     @@ -263,7 +263,7 @@ static void ima_lsm_free_rule(struct ima_rule_entry *entry)
8362     static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
8363     {
8364     struct ima_rule_entry *nentry;
8365     - int i, result;
8366     + int i;
8367    
8368     nentry = kmalloc(sizeof(*nentry), GFP_KERNEL);
8369     if (!nentry)
8370     @@ -277,7 +277,7 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
8371     memset(nentry->lsm, 0, FIELD_SIZEOF(struct ima_rule_entry, lsm));
8372    
8373     for (i = 0; i < MAX_LSM_RULES; i++) {
8374     - if (!entry->lsm[i].rule)
8375     + if (!entry->lsm[i].args_p)
8376     continue;
8377    
8378     nentry->lsm[i].type = entry->lsm[i].type;
8379     @@ -286,13 +286,13 @@ static struct ima_rule_entry *ima_lsm_copy_rule(struct ima_rule_entry *entry)
8380     if (!nentry->lsm[i].args_p)
8381     goto out_err;
8382    
8383     - result = security_filter_rule_init(nentry->lsm[i].type,
8384     - Audit_equal,
8385     - nentry->lsm[i].args_p,
8386     - &nentry->lsm[i].rule);
8387     - if (result == -EINVAL)
8388     - pr_warn("ima: rule for LSM \'%d\' is undefined\n",
8389     - entry->lsm[i].type);
8390     + security_filter_rule_init(nentry->lsm[i].type,
8391     + Audit_equal,
8392     + nentry->lsm[i].args_p,
8393     + &nentry->lsm[i].rule);
8394     + if (!nentry->lsm[i].rule)
8395     + pr_warn("rule for LSM \'%s\' is undefined\n",
8396     + (char *)entry->lsm[i].args_p);
8397     }
8398     return nentry;
8399    
8400     @@ -329,7 +329,7 @@ static void ima_lsm_update_rules(void)
8401     list_for_each_entry_safe(entry, e, &ima_policy_rules, list) {
8402     needs_update = 0;
8403     for (i = 0; i < MAX_LSM_RULES; i++) {
8404     - if (entry->lsm[i].rule) {
8405     + if (entry->lsm[i].args_p) {
8406     needs_update = 1;
8407     break;
8408     }
8409     @@ -339,8 +339,7 @@ static void ima_lsm_update_rules(void)
8410    
8411     result = ima_lsm_update_rule(entry);
8412     if (result) {
8413     - pr_err("ima: lsm rule update error %d\n",
8414     - result);
8415     + pr_err("lsm rule update error %d\n", result);
8416     return;
8417     }
8418     }
8419     @@ -357,7 +356,7 @@ int ima_lsm_policy_change(struct notifier_block *nb, unsigned long event,
8420     }
8421    
8422     /**
8423     - * ima_match_rules - determine whether an inode matches the measure rule.
8424     + * ima_match_rules - determine whether an inode matches the policy rule.
8425     * @rule: a pointer to a rule
8426     * @inode: a pointer to an inode
8427     * @cred: a pointer to a credentials structure for user validation
8428     @@ -415,9 +414,12 @@ static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode,
8429     int rc = 0;
8430     u32 osid;
8431    
8432     - if (!rule->lsm[i].rule)
8433     - continue;
8434     -
8435     + if (!rule->lsm[i].rule) {
8436     + if (!rule->lsm[i].args_p)
8437     + continue;
8438     + else
8439     + return false;
8440     + }
8441     switch (i) {
8442     case LSM_OBJ_USER:
8443     case LSM_OBJ_ROLE:
8444     @@ -822,8 +824,14 @@ static int ima_lsm_rule_init(struct ima_rule_entry *entry,
8445     entry->lsm[lsm_rule].args_p,
8446     &entry->lsm[lsm_rule].rule);
8447     if (!entry->lsm[lsm_rule].rule) {
8448     - kfree(entry->lsm[lsm_rule].args_p);
8449     - return -EINVAL;
8450     + pr_warn("rule for LSM \'%s\' is undefined\n",
8451     + (char *)entry->lsm[lsm_rule].args_p);
8452     +
8453     + if (ima_rules == &ima_default_rules) {
8454     + kfree(entry->lsm[lsm_rule].args_p);
8455     + result = -EINVAL;
8456     + } else
8457     + result = 0;
8458     }
8459    
8460     return result;
8461     diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
8462     index 7a7187e069b4..88c3df24b748 100644
8463     --- a/tools/perf/ui/browsers/hists.c
8464     +++ b/tools/perf/ui/browsers/hists.c
8465     @@ -3054,6 +3054,7 @@ static int perf_evsel__hists_browse(struct evsel *evsel, int nr_events,
8466    
8467     continue;
8468     }
8469     + actions->ms.map = map;
8470     top = pstack__peek(browser->pstack);
8471     if (top == &browser->hists->dso_filter) {
8472     /*
8473     diff --git a/tools/perf/ui/gtk/Build b/tools/perf/ui/gtk/Build
8474     index ec22e899a224..9b5d5cbb7af7 100644
8475     --- a/tools/perf/ui/gtk/Build
8476     +++ b/tools/perf/ui/gtk/Build
8477     @@ -7,3 +7,8 @@ gtk-y += util.o
8478     gtk-y += helpline.o
8479     gtk-y += progress.o
8480     gtk-y += annotate.o
8481     +gtk-y += zalloc.o
8482     +
8483     +$(OUTPUT)ui/gtk/zalloc.o: ../lib/zalloc.c FORCE
8484     + $(call rule_mkdir)
8485     + $(call if_changed_dep,cc_o_c)
8486     diff --git a/tools/testing/selftests/ftrace/Makefile b/tools/testing/selftests/ftrace/Makefile
8487     index cd1f5b3a7774..d6e106fbce11 100644
8488     --- a/tools/testing/selftests/ftrace/Makefile
8489     +++ b/tools/testing/selftests/ftrace/Makefile
8490     @@ -2,7 +2,7 @@
8491     all:
8492    
8493     TEST_PROGS := ftracetest
8494     -TEST_FILES := test.d
8495     +TEST_FILES := test.d settings
8496     EXTRA_CLEAN := $(OUTPUT)/logs/*
8497    
8498     include ../lib.mk
8499     diff --git a/tools/testing/selftests/livepatch/Makefile b/tools/testing/selftests/livepatch/Makefile
8500     index fd405402c3ff..485696a01989 100644
8501     --- a/tools/testing/selftests/livepatch/Makefile
8502     +++ b/tools/testing/selftests/livepatch/Makefile
8503     @@ -6,4 +6,6 @@ TEST_PROGS := \
8504     test-callbacks.sh \
8505     test-shadow-vars.sh
8506    
8507     +TEST_FILES := settings
8508     +
8509     include ../lib.mk
8510     diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
8511     index 76c1897e6352..474638ef2697 100755
8512     --- a/tools/testing/selftests/net/fib_tests.sh
8513     +++ b/tools/testing/selftests/net/fib_tests.sh
8514     @@ -910,6 +910,12 @@ ipv6_rt_replace_mpath()
8515     check_route6 "2001:db8:104::/64 via 2001:db8:101::3 dev veth1 metric 1024"
8516     log_test $? 0 "Multipath with single path via multipath attribute"
8517    
8518     + # multipath with dev-only
8519     + add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
8520     + run_cmd "$IP -6 ro replace 2001:db8:104::/64 dev veth1"
8521     + check_route6 "2001:db8:104::/64 dev veth1 metric 1024"
8522     + log_test $? 0 "Multipath with dev-only"
8523     +
8524     # route replace fails - invalid nexthop 1
8525     add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
8526     run_cmd "$IP -6 ro replace 2001:db8:104::/64 nexthop via 2001:db8:111::3 nexthop via 2001:db8:103::3"
8527     diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile
8528     index d6469535630a..f1053630bb6f 100644
8529     --- a/tools/testing/selftests/rseq/Makefile
8530     +++ b/tools/testing/selftests/rseq/Makefile
8531     @@ -19,6 +19,8 @@ TEST_GEN_PROGS_EXTENDED = librseq.so
8532    
8533     TEST_PROGS = run_param_test.sh
8534    
8535     +TEST_FILES := settings
8536     +
8537     include ../lib.mk
8538    
8539     $(OUTPUT)/librseq.so: rseq.c rseq.h rseq-*.h
8540     diff --git a/tools/testing/selftests/rtc/Makefile b/tools/testing/selftests/rtc/Makefile
8541     index de9c8566672a..90fa1a346908 100644
8542     --- a/tools/testing/selftests/rtc/Makefile
8543     +++ b/tools/testing/selftests/rtc/Makefile
8544     @@ -6,4 +6,6 @@ TEST_GEN_PROGS = rtctest
8545    
8546     TEST_GEN_PROGS_EXTENDED = setdate
8547    
8548     +TEST_FILES := settings
8549     +
8550     include ../lib.mk
8551     diff --git a/usr/include/Makefile b/usr/include/Makefile
8552     index 57b20f7b6729..47cb91d3a51d 100644
8553     --- a/usr/include/Makefile
8554     +++ b/usr/include/Makefile
8555     @@ -99,9 +99,16 @@ endif
8556     # asm-generic/*.h is used by asm/*.h, and should not be included directly
8557     header-test- += asm-generic/%
8558    
8559     -# The rest are compile-tested
8560     -header-test-y += $(filter-out $(header-test-), \
8561     - $(patsubst $(obj)/%,%, $(wildcard \
8562     - $(addprefix $(obj)/, *.h */*.h */*/*.h */*/*/*.h))))
8563     +extra-y := $(patsubst $(obj)/%.h,%.hdrtest, $(shell find $(obj) -name '*.h'))
8564     +
8565     +quiet_cmd_hdrtest = HDRTEST $<
8566     + cmd_hdrtest = \
8567     + $(CC) $(c_flags) -S -o /dev/null -x c /dev/null \
8568     + $(if $(filter-out $(header-test-), $*.h), -include $<); \
8569     + $(PERL) $(srctree)/scripts/headers_check.pl $(obj) $(SRCARCH) $<; \
8570     + touch $@
8571     +
8572     +$(obj)/%.hdrtest: $(obj)/%.h FORCE
8573     + $(call if_changed_dep,hdrtest)
8574    
8575     clean-files += $(filter-out Makefile, $(notdir $(wildcard $(obj)/*)))
8576     diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
8577     index b5ea1bafe513..03c681568ab1 100644
8578     --- a/virt/kvm/kvm_main.c
8579     +++ b/virt/kvm/kvm_main.c
8580     @@ -2275,12 +2275,12 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
8581     if (slots->generation != ghc->generation)
8582     __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
8583    
8584     - if (unlikely(!ghc->memslot))
8585     - return kvm_write_guest(kvm, gpa, data, len);
8586     -
8587     if (kvm_is_error_hva(ghc->hva))
8588     return -EFAULT;
8589    
8590     + if (unlikely(!ghc->memslot))
8591     + return kvm_write_guest(kvm, gpa, data, len);
8592     +
8593     r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
8594     if (r)
8595     return -EFAULT;
8596     @@ -2308,12 +2308,12 @@ int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
8597     if (slots->generation != ghc->generation)
8598     __kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len);
8599    
8600     - if (unlikely(!ghc->memslot))
8601     - return kvm_read_guest(kvm, ghc->gpa, data, len);
8602     -
8603     if (kvm_is_error_hva(ghc->hva))
8604     return -EFAULT;
8605    
8606     + if (unlikely(!ghc->memslot))
8607     + return kvm_read_guest(kvm, ghc->gpa, data, len);
8608     +
8609     r = __copy_from_user(data, (void __user *)ghc->hva, len);
8610     if (r)
8611     return -EFAULT;