Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.14/0153-4.14.54-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (hide annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 6 months ago) by niro
File size: 95576 byte(s)
-added up to patches-4.14.79
1 niro 3238 diff --git a/Documentation/devicetree/bindings/net/dsa/b53.txt b/Documentation/devicetree/bindings/net/dsa/b53.txt
2     index 8acf51a4dfa8..47a6a7fe0b86 100644
3     --- a/Documentation/devicetree/bindings/net/dsa/b53.txt
4     +++ b/Documentation/devicetree/bindings/net/dsa/b53.txt
5     @@ -10,6 +10,7 @@ Required properties:
6     "brcm,bcm53128"
7     "brcm,bcm5365"
8     "brcm,bcm5395"
9     + "brcm,bcm5389"
10     "brcm,bcm5397"
11     "brcm,bcm5398"
12    
13     diff --git a/Makefile b/Makefile
14     index fb66998408f4..de0955d8dfa3 100644
15     --- a/Makefile
16     +++ b/Makefile
17     @@ -1,7 +1,7 @@
18     # SPDX-License-Identifier: GPL-2.0
19     VERSION = 4
20     PATCHLEVEL = 14
21     -SUBLEVEL = 53
22     +SUBLEVEL = 54
23     EXTRAVERSION =
24     NAME = Petit Gorille
25    
26     diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
27     index 90a741732f60..4747ede61acd 100644
28     --- a/arch/arm/boot/dts/imx6q.dtsi
29     +++ b/arch/arm/boot/dts/imx6q.dtsi
30     @@ -96,7 +96,7 @@
31     clocks = <&clks IMX6Q_CLK_ECSPI5>,
32     <&clks IMX6Q_CLK_ECSPI5>;
33     clock-names = "ipg", "per";
34     - dmas = <&sdma 11 7 1>, <&sdma 12 7 2>;
35     + dmas = <&sdma 11 8 1>, <&sdma 12 8 2>;
36     dma-names = "rx", "tx";
37     status = "disabled";
38     };
39     diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
40     index db78d353bab1..191e86c62037 100644
41     --- a/drivers/acpi/osl.c
42     +++ b/drivers/acpi/osl.c
43     @@ -45,6 +45,8 @@
44     #include <linux/uaccess.h>
45     #include <linux/io-64-nonatomic-lo-hi.h>
46    
47     +#include "acpica/accommon.h"
48     +#include "acpica/acnamesp.h"
49     #include "internal.h"
50    
51     #define _COMPONENT ACPI_OS_SERVICES
52     @@ -1477,6 +1479,76 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
53     }
54     EXPORT_SYMBOL(acpi_check_region);
55    
56     +static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level,
57     + void *_res, void **return_value)
58     +{
59     + struct acpi_mem_space_context **mem_ctx;
60     + union acpi_operand_object *handler_obj;
61     + union acpi_operand_object *region_obj2;
62     + union acpi_operand_object *region_obj;
63     + struct resource *res = _res;
64     + acpi_status status;
65     +
66     + region_obj = acpi_ns_get_attached_object(handle);
67     + if (!region_obj)
68     + return AE_OK;
69     +
70     + handler_obj = region_obj->region.handler;
71     + if (!handler_obj)
72     + return AE_OK;
73     +
74     + if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
75     + return AE_OK;
76     +
77     + if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE))
78     + return AE_OK;
79     +
80     + region_obj2 = acpi_ns_get_secondary_object(region_obj);
81     + if (!region_obj2)
82     + return AE_OK;
83     +
84     + mem_ctx = (void *)&region_obj2->extra.region_context;
85     +
86     + if (!(mem_ctx[0]->address >= res->start &&
87     + mem_ctx[0]->address < res->end))
88     + return AE_OK;
89     +
90     + status = handler_obj->address_space.setup(region_obj,
91     + ACPI_REGION_DEACTIVATE,
92     + NULL, (void **)mem_ctx);
93     + if (ACPI_SUCCESS(status))
94     + region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE);
95     +
96     + return status;
97     +}
98     +
99     +/**
100     + * acpi_release_memory - Release any mappings done to a memory region
101     + * @handle: Handle to namespace node
102     + * @res: Memory resource
103     + * @level: A level that terminates the search
104     + *
105     + * Walks through @handle and unmaps all SystemMemory Operation Regions that
106     + * overlap with @res and that have already been activated (mapped).
107     + *
108     + * This is a helper that allows drivers to place special requirements on memory
109     + * region that may overlap with operation regions, primarily allowing them to
110     + * safely map the region as non-cached memory.
111     + *
112     + * The unmapped Operation Regions will be automatically remapped next time they
113     + * are called, so the drivers do not need to do anything else.
114     + */
115     +acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
116     + u32 level)
117     +{
118     + if (!(res->flags & IORESOURCE_MEM))
119     + return AE_TYPE;
120     +
121     + return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level,
122     + acpi_deactivate_mem_region, NULL, res, NULL);
123     +}
124     +EXPORT_SYMBOL_GPL(acpi_release_memory);
125     +
126     /*
127     * Let drivers know whether the resource checks are effective
128     */
129     diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
130     index 9c9a22958717..a8d2eb0ceb8d 100644
131     --- a/drivers/atm/zatm.c
132     +++ b/drivers/atm/zatm.c
133     @@ -1151,8 +1151,8 @@ static void eprom_get_byte(struct zatm_dev *zatm_dev, unsigned char *byte,
134     }
135    
136    
137     -static unsigned char eprom_try_esi(struct atm_dev *dev, unsigned short cmd,
138     - int offset, int swap)
139     +static int eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset,
140     + int swap)
141     {
142     unsigned char buf[ZEPROM_SIZE];
143     struct zatm_dev *zatm_dev;
144     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
145     index 4d08957d2108..1360a24d2ede 100644
146     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
147     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
148     @@ -747,8 +747,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
149     }
150     if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
151     adev->vram_pin_size += amdgpu_bo_size(bo);
152     - if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
153     - adev->invisible_pin_size += amdgpu_bo_size(bo);
154     + adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
155     } else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
156     adev->gart_pin_size += amdgpu_bo_size(bo);
157     }
158     @@ -786,8 +785,7 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
159    
160     if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
161     adev->vram_pin_size -= amdgpu_bo_size(bo);
162     - if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
163     - adev->invisible_pin_size -= amdgpu_bo_size(bo);
164     + adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
165     } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
166     adev->gart_pin_size -= amdgpu_bo_size(bo);
167     }
168     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
169     index 43093bffa2cf..557829a84778 100644
170     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
171     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
172     @@ -64,6 +64,7 @@ extern const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func;
173     bool amdgpu_gtt_mgr_is_allocated(struct ttm_mem_reg *mem);
174     uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
175    
176     +u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
177     uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
178     uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
179    
180     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
181     index 041e0121590c..308a9755eae3 100644
182     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
183     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
184     @@ -85,6 +85,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
185     }
186    
187     hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
188     + adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
189     family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
190     version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
191     version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
192     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
193     index 26e900627971..86d8a961518e 100644
194     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
195     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
196     @@ -101,6 +101,22 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
197     adev->mc.visible_vram_size : end) - start;
198     }
199    
200     +/**
201     + * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size
202     + *
203     + * @bo: &amdgpu_bo buffer object (must be in VRAM)
204     + *
205     + * Returns:
206     + * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM.
207     + */
208     +u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
209     +{
210     + if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
211     + return amdgpu_bo_size(bo);
212     +
213     + return 0;
214     +}
215     +
216     /**
217     * amdgpu_vram_mgr_new - allocate new ranges
218     *
219     @@ -140,7 +156,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
220     num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
221     }
222    
223     - nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
224     + nodes = kvmalloc_array(num_nodes, sizeof(*nodes),
225     + GFP_KERNEL | __GFP_ZERO);
226     if (!nodes)
227     return -ENOMEM;
228    
229     @@ -195,7 +212,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
230     drm_mm_remove_node(&nodes[i]);
231     spin_unlock(&mgr->lock);
232    
233     - kfree(nodes);
234     + kvfree(nodes);
235     return r == -ENOSPC ? 0 : r;
236     }
237    
238     @@ -234,7 +251,7 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
239     atomic64_sub(usage, &mgr->usage);
240     atomic64_sub(vis_usage, &mgr->vis_usage);
241    
242     - kfree(mem->mm_node);
243     + kvfree(mem->mm_node);
244     mem->mm_node = NULL;
245     }
246    
247     diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
248     index cf81065e3c5a..5183b46563f6 100644
249     --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
250     +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
251     @@ -467,8 +467,8 @@ static int vce_v3_0_hw_init(void *handle)
252     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
253    
254     vce_v3_0_override_vce_clock_gating(adev, true);
255     - if (!(adev->flags & AMD_IS_APU))
256     - amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
257     +
258     + amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
259    
260     for (i = 0; i < adev->vce.num_rings; i++)
261     adev->vce.ring[i].ready = false;
262     diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
263     index 4968b6bb9466..0327e0a6802b 100644
264     --- a/drivers/gpu/drm/amd/amdgpu/vi.c
265     +++ b/drivers/gpu/drm/amd/amdgpu/vi.c
266     @@ -729,33 +729,59 @@ static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
267     return r;
268    
269     tmp = RREG32_SMC(cntl_reg);
270     - tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
271     - CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
272     +
273     + if (adev->flags & AMD_IS_APU)
274     + tmp &= ~CG_DCLK_CNTL__DCLK_DIVIDER_MASK;
275     + else
276     + tmp &= ~(CG_DCLK_CNTL__DCLK_DIR_CNTL_EN_MASK |
277     + CG_DCLK_CNTL__DCLK_DIVIDER_MASK);
278     tmp |= dividers.post_divider;
279     WREG32_SMC(cntl_reg, tmp);
280    
281     for (i = 0; i < 100; i++) {
282     - if (RREG32_SMC(status_reg) & CG_DCLK_STATUS__DCLK_STATUS_MASK)
283     - break;
284     + tmp = RREG32_SMC(status_reg);
285     + if (adev->flags & AMD_IS_APU) {
286     + if (tmp & 0x10000)
287     + break;
288     + } else {
289     + if (tmp & CG_DCLK_STATUS__DCLK_STATUS_MASK)
290     + break;
291     + }
292     mdelay(10);
293     }
294     if (i == 100)
295     return -ETIMEDOUT;
296     -
297     return 0;
298     }
299    
300     +#define ixGNB_CLK1_DFS_CNTL 0xD82200F0
301     +#define ixGNB_CLK1_STATUS 0xD822010C
302     +#define ixGNB_CLK2_DFS_CNTL 0xD8220110
303     +#define ixGNB_CLK2_STATUS 0xD822012C
304     +#define ixGNB_CLK3_DFS_CNTL 0xD8220130
305     +#define ixGNB_CLK3_STATUS 0xD822014C
306     +
307     static int vi_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk)
308     {
309     int r;
310    
311     - r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
312     - if (r)
313     - return r;
314     + if (adev->flags & AMD_IS_APU) {
315     + r = vi_set_uvd_clock(adev, vclk, ixGNB_CLK2_DFS_CNTL, ixGNB_CLK2_STATUS);
316     + if (r)
317     + return r;
318    
319     - r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
320     - if (r)
321     - return r;
322     + r = vi_set_uvd_clock(adev, dclk, ixGNB_CLK1_DFS_CNTL, ixGNB_CLK1_STATUS);
323     + if (r)
324     + return r;
325     + } else {
326     + r = vi_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS);
327     + if (r)
328     + return r;
329     +
330     + r = vi_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS);
331     + if (r)
332     + return r;
333     + }
334    
335     return 0;
336     }
337     @@ -765,6 +791,22 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
338     int r, i;
339     struct atom_clock_dividers dividers;
340     u32 tmp;
341     + u32 reg_ctrl;
342     + u32 reg_status;
343     + u32 status_mask;
344     + u32 reg_mask;
345     +
346     + if (adev->flags & AMD_IS_APU) {
347     + reg_ctrl = ixGNB_CLK3_DFS_CNTL;
348     + reg_status = ixGNB_CLK3_STATUS;
349     + status_mask = 0x00010000;
350     + reg_mask = CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
351     + } else {
352     + reg_ctrl = ixCG_ECLK_CNTL;
353     + reg_status = ixCG_ECLK_STATUS;
354     + status_mask = CG_ECLK_STATUS__ECLK_STATUS_MASK;
355     + reg_mask = CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK | CG_ECLK_CNTL__ECLK_DIVIDER_MASK;
356     + }
357    
358     r = amdgpu_atombios_get_clock_dividers(adev,
359     COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
360     @@ -773,24 +815,25 @@ static int vi_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)
361     return r;
362    
363     for (i = 0; i < 100; i++) {
364     - if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
365     + if (RREG32_SMC(reg_status) & status_mask)
366     break;
367     mdelay(10);
368     }
369     +
370     if (i == 100)
371     return -ETIMEDOUT;
372    
373     - tmp = RREG32_SMC(ixCG_ECLK_CNTL);
374     - tmp &= ~(CG_ECLK_CNTL__ECLK_DIR_CNTL_EN_MASK |
375     - CG_ECLK_CNTL__ECLK_DIVIDER_MASK);
376     + tmp = RREG32_SMC(reg_ctrl);
377     + tmp &= ~reg_mask;
378     tmp |= dividers.post_divider;
379     - WREG32_SMC(ixCG_ECLK_CNTL, tmp);
380     + WREG32_SMC(reg_ctrl, tmp);
381    
382     for (i = 0; i < 100; i++) {
383     - if (RREG32_SMC(ixCG_ECLK_STATUS) & CG_ECLK_STATUS__ECLK_STATUS_MASK)
384     + if (RREG32_SMC(reg_status) & status_mask)
385     break;
386     mdelay(10);
387     }
388     +
389     if (i == 100)
390     return -ETIMEDOUT;
391    
392     diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
393     index 703c2d13603f..eb7c4cf19bf6 100644
394     --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
395     +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
396     @@ -889,7 +889,7 @@ static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane,
397     drm_object_attach_property(&plane->base.base,
398     props->alpha, 255);
399    
400     - if (desc->layout.xstride && desc->layout.pstride) {
401     + if (desc->layout.xstride[0] && desc->layout.pstride[0]) {
402     int ret;
403    
404     ret = drm_plane_create_rotation_property(&plane->base,
405     diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
406     index 61a2203b75df..be813b2738c1 100644
407     --- a/drivers/gpu/drm/i915/i915_reg.h
408     +++ b/drivers/gpu/drm/i915/i915_reg.h
409     @@ -2484,12 +2484,17 @@ enum i915_power_well_id {
410     #define _3D_CHICKEN _MMIO(0x2084)
411     #define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
412     #define _3D_CHICKEN2 _MMIO(0x208c)
413     +
414     +#define FF_SLICE_CHICKEN _MMIO(0x2088)
415     +#define FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX (1 << 1)
416     +
417     /* Disables pipelining of read flushes past the SF-WIZ interface.
418     * Required on all Ironlake steppings according to the B-Spec, but the
419     * particular danger of not doing so is not specified.
420     */
421     # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
422     #define _3D_CHICKEN3 _MMIO(0x2090)
423     +#define _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX (1 << 12)
424     #define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
425     #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
426     #define _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(x) ((x)<<1) /* gen8+ */
427     diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
428     index 6f972e6ec663..d638b641b760 100644
429     --- a/drivers/gpu/drm/i915/intel_lrc.c
430     +++ b/drivers/gpu/drm/i915/intel_lrc.c
431     @@ -1067,11 +1067,21 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
432     /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
433     batch = gen8_emit_flush_coherentl3_wa(engine, batch);
434    
435     + *batch++ = MI_LOAD_REGISTER_IMM(3);
436     +
437     /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
438     - *batch++ = MI_LOAD_REGISTER_IMM(1);
439     *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2);
440     *batch++ = _MASKED_BIT_DISABLE(
441     GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE);
442     +
443     + /* BSpec: 11391 */
444     + *batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN);
445     + *batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX);
446     +
447     + /* BSpec: 11299 */
448     + *batch++ = i915_mmio_reg_offset(_3D_CHICKEN3);
449     + *batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX);
450     +
451     *batch++ = MI_NOOP;
452    
453     /* WaClearSlmSpaceAtContextSwitch:kbl */
454     diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
455     index 9a9214ae0fb5..573bab222123 100644
456     --- a/drivers/gpu/drm/qxl/qxl_display.c
457     +++ b/drivers/gpu/drm/qxl/qxl_display.c
458     @@ -630,7 +630,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
459     struct qxl_cursor_cmd *cmd;
460     struct qxl_cursor *cursor;
461     struct drm_gem_object *obj;
462     - struct qxl_bo *cursor_bo = NULL, *user_bo = NULL;
463     + struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL;
464     int ret;
465     void *user_ptr;
466     int size = 64*64*4;
467     @@ -684,7 +684,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
468     cursor_bo, 0);
469     cmd->type = QXL_CURSOR_SET;
470    
471     - qxl_bo_unref(&qcrtc->cursor_bo);
472     + old_cursor_bo = qcrtc->cursor_bo;
473     qcrtc->cursor_bo = cursor_bo;
474     cursor_bo = NULL;
475     } else {
476     @@ -704,6 +704,9 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
477     qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
478     qxl_release_fence_buffer_objects(release);
479    
480     + if (old_cursor_bo)
481     + qxl_bo_unref(&old_cursor_bo);
482     +
483     qxl_bo_unref(&cursor_bo);
484    
485     return;
486     diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
487     index 33834db7c0a0..38a2ac24428e 100644
488     --- a/drivers/md/dm-raid.c
489     +++ b/drivers/md/dm-raid.c
490     @@ -3637,8 +3637,11 @@ static void raid_postsuspend(struct dm_target *ti)
491     {
492     struct raid_set *rs = ti->private;
493    
494     - if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
495     + if (!test_and_set_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
496     + mddev_lock_nointr(&rs->md);
497     mddev_suspend(&rs->md);
498     + mddev_unlock(&rs->md);
499     + }
500    
501     rs->md.ro = 1;
502     }
503     @@ -3898,8 +3901,11 @@ static void raid_resume(struct dm_target *ti)
504     if (!(rs->ctr_flags & RESUME_STAY_FROZEN_FLAGS))
505     clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
506    
507     - if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags))
508     + if (test_and_clear_bit(RT_FLAG_RS_SUSPENDED, &rs->runtime_flags)) {
509     + mddev_lock_nointr(mddev);
510     mddev_resume(mddev);
511     + mddev_unlock(mddev);
512     + }
513     }
514    
515     static struct target_type raid_target = {
516     diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
517     index 03082e17c65c..72ce0bccc865 100644
518     --- a/drivers/md/md-cluster.c
519     +++ b/drivers/md/md-cluster.c
520     @@ -442,10 +442,11 @@ static void __remove_suspend_info(struct md_cluster_info *cinfo, int slot)
521     static void remove_suspend_info(struct mddev *mddev, int slot)
522     {
523     struct md_cluster_info *cinfo = mddev->cluster_info;
524     + mddev->pers->quiesce(mddev, 1);
525     spin_lock_irq(&cinfo->suspend_lock);
526     __remove_suspend_info(cinfo, slot);
527     spin_unlock_irq(&cinfo->suspend_lock);
528     - mddev->pers->quiesce(mddev, 2);
529     + mddev->pers->quiesce(mddev, 0);
530     }
531    
532    
533     @@ -492,13 +493,12 @@ static void process_suspend_info(struct mddev *mddev,
534     s->lo = lo;
535     s->hi = hi;
536     mddev->pers->quiesce(mddev, 1);
537     - mddev->pers->quiesce(mddev, 0);
538     spin_lock_irq(&cinfo->suspend_lock);
539     /* Remove existing entry (if exists) before adding */
540     __remove_suspend_info(cinfo, slot);
541     list_add(&s->list, &cinfo->suspend_list);
542     spin_unlock_irq(&cinfo->suspend_lock);
543     - mddev->pers->quiesce(mddev, 2);
544     + mddev->pers->quiesce(mddev, 0);
545     }
546    
547     static void process_add_new_disk(struct mddev *mddev, struct cluster_msg *cmsg)
548     diff --git a/drivers/md/md.c b/drivers/md/md.c
549     index 7143c8b9284b..11a67eac55b1 100644
550     --- a/drivers/md/md.c
551     +++ b/drivers/md/md.c
552     @@ -266,16 +266,31 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
553     * call has finished, the bio has been linked into some internal structure
554     * and so is visible to ->quiesce(), so we don't need the refcount any more.
555     */
556     +static bool is_suspended(struct mddev *mddev, struct bio *bio)
557     +{
558     + if (mddev->suspended)
559     + return true;
560     + if (bio_data_dir(bio) != WRITE)
561     + return false;
562     + if (mddev->suspend_lo >= mddev->suspend_hi)
563     + return false;
564     + if (bio->bi_iter.bi_sector >= mddev->suspend_hi)
565     + return false;
566     + if (bio_end_sector(bio) < mddev->suspend_lo)
567     + return false;
568     + return true;
569     +}
570     +
571     void md_handle_request(struct mddev *mddev, struct bio *bio)
572     {
573     check_suspended:
574     rcu_read_lock();
575     - if (mddev->suspended) {
576     + if (is_suspended(mddev, bio)) {
577     DEFINE_WAIT(__wait);
578     for (;;) {
579     prepare_to_wait(&mddev->sb_wait, &__wait,
580     TASK_UNINTERRUPTIBLE);
581     - if (!mddev->suspended)
582     + if (!is_suspended(mddev, bio))
583     break;
584     rcu_read_unlock();
585     schedule();
586     @@ -344,12 +359,17 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
587     void mddev_suspend(struct mddev *mddev)
588     {
589     WARN_ON_ONCE(mddev->thread && current == mddev->thread->tsk);
590     + lockdep_assert_held(&mddev->reconfig_mutex);
591     if (mddev->suspended++)
592     return;
593     synchronize_rcu();
594     wake_up(&mddev->sb_wait);
595     + set_bit(MD_ALLOW_SB_UPDATE, &mddev->flags);
596     + smp_mb__after_atomic();
597     wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
598     mddev->pers->quiesce(mddev, 1);
599     + clear_bit_unlock(MD_ALLOW_SB_UPDATE, &mddev->flags);
600     + wait_event(mddev->sb_wait, !test_bit(MD_UPDATING_SB, &mddev->flags));
601    
602     del_timer_sync(&mddev->safemode_timer);
603     }
604     @@ -357,6 +377,7 @@ EXPORT_SYMBOL_GPL(mddev_suspend);
605    
606     void mddev_resume(struct mddev *mddev)
607     {
608     + lockdep_assert_held(&mddev->reconfig_mutex);
609     if (--mddev->suspended)
610     return;
611     wake_up(&mddev->sb_wait);
612     @@ -663,6 +684,7 @@ void mddev_unlock(struct mddev *mddev)
613     */
614     spin_lock(&pers_lock);
615     md_wakeup_thread(mddev->thread);
616     + wake_up(&mddev->sb_wait);
617     spin_unlock(&pers_lock);
618     }
619     EXPORT_SYMBOL_GPL(mddev_unlock);
620     @@ -4828,7 +4850,7 @@ suspend_lo_show(struct mddev *mddev, char *page)
621     static ssize_t
622     suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
623     {
624     - unsigned long long old, new;
625     + unsigned long long new;
626     int err;
627    
628     err = kstrtoull(buf, 10, &new);
629     @@ -4844,16 +4866,10 @@ suspend_lo_store(struct mddev *mddev, const char *buf, size_t len)
630     if (mddev->pers == NULL ||
631     mddev->pers->quiesce == NULL)
632     goto unlock;
633     - old = mddev->suspend_lo;
634     + mddev_suspend(mddev);
635     mddev->suspend_lo = new;
636     - if (new >= old)
637     - /* Shrinking suspended region */
638     - mddev->pers->quiesce(mddev, 2);
639     - else {
640     - /* Expanding suspended region - need to wait */
641     - mddev->pers->quiesce(mddev, 1);
642     - mddev->pers->quiesce(mddev, 0);
643     - }
644     + mddev_resume(mddev);
645     +
646     err = 0;
647     unlock:
648     mddev_unlock(mddev);
649     @@ -4871,7 +4887,7 @@ suspend_hi_show(struct mddev *mddev, char *page)
650     static ssize_t
651     suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
652     {
653     - unsigned long long old, new;
654     + unsigned long long new;
655     int err;
656    
657     err = kstrtoull(buf, 10, &new);
658     @@ -4884,19 +4900,13 @@ suspend_hi_store(struct mddev *mddev, const char *buf, size_t len)
659     if (err)
660     return err;
661     err = -EINVAL;
662     - if (mddev->pers == NULL ||
663     - mddev->pers->quiesce == NULL)
664     + if (mddev->pers == NULL)
665     goto unlock;
666     - old = mddev->suspend_hi;
667     +
668     + mddev_suspend(mddev);
669     mddev->suspend_hi = new;
670     - if (new <= old)
671     - /* Shrinking suspended region */
672     - mddev->pers->quiesce(mddev, 2);
673     - else {
674     - /* Expanding suspended region - need to wait */
675     - mddev->pers->quiesce(mddev, 1);
676     - mddev->pers->quiesce(mddev, 0);
677     - }
678     + mddev_resume(mddev);
679     +
680     err = 0;
681     unlock:
682     mddev_unlock(mddev);
683     @@ -6642,22 +6652,26 @@ static int set_bitmap_file(struct mddev *mddev, int fd)
684     return -ENOENT; /* cannot remove what isn't there */
685     err = 0;
686     if (mddev->pers) {
687     - mddev->pers->quiesce(mddev, 1);
688     if (fd >= 0) {
689     struct bitmap *bitmap;
690    
691     bitmap = bitmap_create(mddev, -1);
692     + mddev_suspend(mddev);
693     if (!IS_ERR(bitmap)) {
694     mddev->bitmap = bitmap;
695     err = bitmap_load(mddev);
696     } else
697     err = PTR_ERR(bitmap);
698     - }
699     - if (fd < 0 || err) {
700     + if (err) {
701     + bitmap_destroy(mddev);
702     + fd = -1;
703     + }
704     + mddev_resume(mddev);
705     + } else if (fd < 0) {
706     + mddev_suspend(mddev);
707     bitmap_destroy(mddev);
708     - fd = -1; /* make sure to put the file */
709     + mddev_resume(mddev);
710     }
711     - mddev->pers->quiesce(mddev, 0);
712     }
713     if (fd < 0) {
714     struct file *f = mddev->bitmap_info.file;
715     @@ -6941,8 +6955,8 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
716     mddev->bitmap_info.default_offset;
717     mddev->bitmap_info.space =
718     mddev->bitmap_info.default_space;
719     - mddev->pers->quiesce(mddev, 1);
720     bitmap = bitmap_create(mddev, -1);
721     + mddev_suspend(mddev);
722     if (!IS_ERR(bitmap)) {
723     mddev->bitmap = bitmap;
724     rv = bitmap_load(mddev);
725     @@ -6950,7 +6964,7 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
726     rv = PTR_ERR(bitmap);
727     if (rv)
728     bitmap_destroy(mddev);
729     - mddev->pers->quiesce(mddev, 0);
730     + mddev_resume(mddev);
731     } else {
732     /* remove the bitmap */
733     if (!mddev->bitmap) {
734     @@ -6973,9 +6987,9 @@ static int update_array_info(struct mddev *mddev, mdu_array_info_t *info)
735     mddev->bitmap_info.nodes = 0;
736     md_cluster_ops->leave(mddev);
737     }
738     - mddev->pers->quiesce(mddev, 1);
739     + mddev_suspend(mddev);
740     bitmap_destroy(mddev);
741     - mddev->pers->quiesce(mddev, 0);
742     + mddev_resume(mddev);
743     mddev->bitmap_info.offset = 0;
744     }
745     }
746     @@ -8858,6 +8872,16 @@ void md_check_recovery(struct mddev *mddev)
747     unlock:
748     wake_up(&mddev->sb_wait);
749     mddev_unlock(mddev);
750     + } else if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags) && mddev->sb_flags) {
751     + /* Write superblock - thread that called mddev_suspend()
752     + * holds reconfig_mutex for us.
753     + */
754     + set_bit(MD_UPDATING_SB, &mddev->flags);
755     + smp_mb__after_atomic();
756     + if (test_bit(MD_ALLOW_SB_UPDATE, &mddev->flags))
757     + md_update_sb(mddev, 0);
758     + clear_bit_unlock(MD_UPDATING_SB, &mddev->flags);
759     + wake_up(&mddev->sb_wait);
760     }
761     }
762     EXPORT_SYMBOL(md_check_recovery);
763     diff --git a/drivers/md/md.h b/drivers/md/md.h
764     index 9b0a896890ef..11696aba94e3 100644
765     --- a/drivers/md/md.h
766     +++ b/drivers/md/md.h
767     @@ -237,6 +237,12 @@ enum mddev_flags {
768     */
769     MD_HAS_PPL, /* The raid array has PPL feature set */
770     MD_HAS_MULTIPLE_PPLS, /* The raid array has multiple PPLs feature set */
771     + MD_ALLOW_SB_UPDATE, /* md_check_recovery is allowed to update
772     + * the metadata without taking reconfig_mutex.
773     + */
774     + MD_UPDATING_SB, /* md_check_recovery is updating the metadata
775     + * without explicitly holding reconfig_mutex.
776     + */
777     };
778    
779     enum mddev_sb_flags {
780     @@ -540,12 +546,11 @@ struct md_personality
781     int (*check_reshape) (struct mddev *mddev);
782     int (*start_reshape) (struct mddev *mddev);
783     void (*finish_reshape) (struct mddev *mddev);
784     - /* quiesce moves between quiescence states
785     - * 0 - fully active
786     - * 1 - no new requests allowed
787     - * others - reserved
788     + /* quiesce suspends or resumes internal processing.
789     + * 1 - stop new actions and wait for action io to complete
790     + * 0 - return to normal behaviour
791     */
792     - void (*quiesce) (struct mddev *mddev, int state);
793     + void (*quiesce) (struct mddev *mddev, int quiesce);
794     /* takeover is used to transition an array from one
795     * personality to another. The new personality must be able
796     * to handle the data in the current layout.
797     diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
798     index 5a00fc118470..5ecba9eef441 100644
799     --- a/drivers/md/raid0.c
800     +++ b/drivers/md/raid0.c
801     @@ -768,7 +768,7 @@ static void *raid0_takeover(struct mddev *mddev)
802     return ERR_PTR(-EINVAL);
803     }
804    
805     -static void raid0_quiesce(struct mddev *mddev, int state)
806     +static void raid0_quiesce(struct mddev *mddev, int quiesce)
807     {
808     }
809    
810     diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
811     index e4e01d3bab81..029ecba60727 100644
812     --- a/drivers/md/raid1.c
813     +++ b/drivers/md/raid1.c
814     @@ -1298,11 +1298,9 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
815     */
816    
817    
818     - if ((bio_end_sector(bio) > mddev->suspend_lo &&
819     - bio->bi_iter.bi_sector < mddev->suspend_hi) ||
820     - (mddev_is_clustered(mddev) &&
821     + if (mddev_is_clustered(mddev) &&
822     md_cluster_ops->area_resyncing(mddev, WRITE,
823     - bio->bi_iter.bi_sector, bio_end_sector(bio)))) {
824     + bio->bi_iter.bi_sector, bio_end_sector(bio))) {
825    
826     /*
827     * As the suspend_* range is controlled by userspace, we want
828     @@ -1313,12 +1311,10 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
829     sigset_t full, old;
830     prepare_to_wait(&conf->wait_barrier,
831     &w, TASK_INTERRUPTIBLE);
832     - if ((bio_end_sector(bio) <= mddev->suspend_lo ||
833     - bio->bi_iter.bi_sector >= mddev->suspend_hi) &&
834     - (!mddev_is_clustered(mddev) ||
835     - !md_cluster_ops->area_resyncing(mddev, WRITE,
836     + if (!mddev_is_clustered(mddev) ||
837     + !md_cluster_ops->area_resyncing(mddev, WRITE,
838     bio->bi_iter.bi_sector,
839     - bio_end_sector(bio))))
840     + bio_end_sector(bio)))
841     break;
842     sigfillset(&full);
843     sigprocmask(SIG_BLOCK, &full, &old);
844     @@ -3280,21 +3276,14 @@ static int raid1_reshape(struct mddev *mddev)
845     return 0;
846     }
847    
848     -static void raid1_quiesce(struct mddev *mddev, int state)
849     +static void raid1_quiesce(struct mddev *mddev, int quiesce)
850     {
851     struct r1conf *conf = mddev->private;
852    
853     - switch(state) {
854     - case 2: /* wake for suspend */
855     - wake_up(&conf->wait_barrier);
856     - break;
857     - case 1:
858     + if (quiesce)
859     freeze_array(conf, 0);
860     - break;
861     - case 0:
862     + else
863     unfreeze_array(conf);
864     - break;
865     - }
866     }
867    
868     static void *raid1_takeover(struct mddev *mddev)
869     diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
870     index 5fb31ef52945..b20c23f970f4 100644
871     --- a/drivers/md/raid10.c
872     +++ b/drivers/md/raid10.c
873     @@ -3838,18 +3838,14 @@ static void raid10_free(struct mddev *mddev, void *priv)
874     kfree(conf);
875     }
876    
877     -static void raid10_quiesce(struct mddev *mddev, int state)
878     +static void raid10_quiesce(struct mddev *mddev, int quiesce)
879     {
880     struct r10conf *conf = mddev->private;
881    
882     - switch(state) {
883     - case 1:
884     + if (quiesce)
885     raise_barrier(conf, 0);
886     - break;
887     - case 0:
888     + else
889     lower_barrier(conf);
890     - break;
891     - }
892     }
893    
894     static int raid10_resize(struct mddev *mddev, sector_t sectors)
895     diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
896     index 9a340728b846..0d535b40cb3b 100644
897     --- a/drivers/md/raid5-cache.c
898     +++ b/drivers/md/raid5-cache.c
899     @@ -693,6 +693,8 @@ static void r5c_disable_writeback_async(struct work_struct *work)
900     struct r5l_log *log = container_of(work, struct r5l_log,
901     disable_writeback_work);
902     struct mddev *mddev = log->rdev->mddev;
903     + struct r5conf *conf = mddev->private;
904     + int locked = 0;
905    
906     if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
907     return;
908     @@ -701,11 +703,15 @@ static void r5c_disable_writeback_async(struct work_struct *work)
909    
910     /* wait superblock change before suspend */
911     wait_event(mddev->sb_wait,
912     - !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags));
913     -
914     - mddev_suspend(mddev);
915     - log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
916     - mddev_resume(mddev);
917     + conf->log == NULL ||
918     + (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) &&
919     + (locked = mddev_trylock(mddev))));
920     + if (locked) {
921     + mddev_suspend(mddev);
922     + log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH;
923     + mddev_resume(mddev);
924     + mddev_unlock(mddev);
925     + }
926     }
927    
928     static void r5l_submit_current_io(struct r5l_log *log)
929     @@ -1583,21 +1589,21 @@ void r5l_wake_reclaim(struct r5l_log *log, sector_t space)
930     md_wakeup_thread(log->reclaim_thread);
931     }
932    
933     -void r5l_quiesce(struct r5l_log *log, int state)
934     +void r5l_quiesce(struct r5l_log *log, int quiesce)
935     {
936     struct mddev *mddev;
937     - if (!log || state == 2)
938     + if (!log)
939     return;
940     - if (state == 0)
941     - kthread_unpark(log->reclaim_thread->tsk);
942     - else if (state == 1) {
943     +
944     + if (quiesce) {
945     /* make sure r5l_write_super_and_discard_space exits */
946     mddev = log->rdev->mddev;
947     wake_up(&mddev->sb_wait);
948     kthread_park(log->reclaim_thread->tsk);
949     r5l_wake_reclaim(log, MaxSector);
950     r5l_do_reclaim(log);
951     - }
952     + } else
953     + kthread_unpark(log->reclaim_thread->tsk);
954     }
955    
956     bool r5l_log_disk_error(struct r5conf *conf)
957     @@ -3161,6 +3167,8 @@ void r5l_exit_log(struct r5conf *conf)
958     conf->log = NULL;
959     synchronize_rcu();
960    
961     + /* Ensure disable_writeback_work wakes up and exits */
962     + wake_up(&conf->mddev->sb_wait);
963     flush_work(&log->disable_writeback_work);
964     md_unregister_thread(&log->reclaim_thread);
965     mempool_destroy(log->meta_pool);
966     diff --git a/drivers/md/raid5-log.h b/drivers/md/raid5-log.h
967     index 7f9ad5f7cda0..284578b0a349 100644
968     --- a/drivers/md/raid5-log.h
969     +++ b/drivers/md/raid5-log.h
970     @@ -9,7 +9,7 @@ extern void r5l_write_stripe_run(struct r5l_log *log);
971     extern void r5l_flush_stripe_to_raid(struct r5l_log *log);
972     extern void r5l_stripe_write_finished(struct stripe_head *sh);
973     extern int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio);
974     -extern void r5l_quiesce(struct r5l_log *log, int state);
975     +extern void r5l_quiesce(struct r5l_log *log, int quiesce);
976     extern bool r5l_log_disk_error(struct r5conf *conf);
977     extern bool r5c_is_writeback(struct r5l_log *log);
978     extern int
979     diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
980     index de1ef6264ee7..07ca2fd10189 100644
981     --- a/drivers/md/raid5.c
982     +++ b/drivers/md/raid5.c
983     @@ -5686,28 +5686,6 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
984     goto retry;
985     }
986    
987     - if (rw == WRITE &&
988     - logical_sector >= mddev->suspend_lo &&
989     - logical_sector < mddev->suspend_hi) {
990     - raid5_release_stripe(sh);
991     - /* As the suspend_* range is controlled by
992     - * userspace, we want an interruptible
993     - * wait.
994     - */
995     - prepare_to_wait(&conf->wait_for_overlap,
996     - &w, TASK_INTERRUPTIBLE);
997     - if (logical_sector >= mddev->suspend_lo &&
998     - logical_sector < mddev->suspend_hi) {
999     - sigset_t full, old;
1000     - sigfillset(&full);
1001     - sigprocmask(SIG_BLOCK, &full, &old);
1002     - schedule();
1003     - sigprocmask(SIG_SETMASK, &old, NULL);
1004     - do_prepare = true;
1005     - }
1006     - goto retry;
1007     - }
1008     -
1009     if (test_bit(STRIPE_EXPANDING, &sh->state) ||
1010     !add_stripe_bio(sh, bi, dd_idx, rw, previous)) {
1011     /* Stripe is busy expanding or
1012     @@ -8025,16 +8003,12 @@ static void raid5_finish_reshape(struct mddev *mddev)
1013     }
1014     }
1015    
1016     -static void raid5_quiesce(struct mddev *mddev, int state)
1017     +static void raid5_quiesce(struct mddev *mddev, int quiesce)
1018     {
1019     struct r5conf *conf = mddev->private;
1020    
1021     - switch(state) {
1022     - case 2: /* resume for a suspend */
1023     - wake_up(&conf->wait_for_overlap);
1024     - break;
1025     -
1026     - case 1: /* stop all writes */
1027     + if (quiesce) {
1028     + /* stop all writes */
1029     lock_all_device_hash_locks_irq(conf);
1030     /* '2' tells resync/reshape to pause so that all
1031     * active stripes can drain
1032     @@ -8050,17 +8024,15 @@ static void raid5_quiesce(struct mddev *mddev, int state)
1033     unlock_all_device_hash_locks_irq(conf);
1034     /* allow reshape to continue */
1035     wake_up(&conf->wait_for_overlap);
1036     - break;
1037     -
1038     - case 0: /* re-enable writes */
1039     + } else {
1040     + /* re-enable writes */
1041     lock_all_device_hash_locks_irq(conf);
1042     conf->quiesce = 0;
1043     wake_up(&conf->wait_for_quiescent);
1044     wake_up(&conf->wait_for_overlap);
1045     unlock_all_device_hash_locks_irq(conf);
1046     - break;
1047     }
1048     - r5l_quiesce(conf->log, state);
1049     + r5l_quiesce(conf->log, quiesce);
1050     }
1051    
1052     static void *raid45_takeover_raid0(struct mddev *mddev, int level)
1053     diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
1054     index 528e04f96c13..d410de331854 100644
1055     --- a/drivers/mtd/nand/nand_base.c
1056     +++ b/drivers/mtd/nand/nand_base.c
1057     @@ -440,7 +440,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
1058    
1059     for (; page < page_end; page++) {
1060     res = chip->ecc.read_oob(mtd, chip, page);
1061     - if (res)
1062     + if (res < 0)
1063     return res;
1064    
1065     bad = chip->oob_poi[chip->badblockpos];
1066     diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
1067     index 274f3679f33d..acf64d4cd94c 100644
1068     --- a/drivers/net/dsa/b53/b53_common.c
1069     +++ b/drivers/net/dsa/b53/b53_common.c
1070     @@ -1549,6 +1549,18 @@ static const struct b53_chip_data b53_switch_chips[] = {
1071     .cpu_port = B53_CPU_PORT_25,
1072     .duplex_reg = B53_DUPLEX_STAT_FE,
1073     },
1074     + {
1075     + .chip_id = BCM5389_DEVICE_ID,
1076     + .dev_name = "BCM5389",
1077     + .vlans = 4096,
1078     + .enabled_ports = 0x1f,
1079     + .arl_entries = 4,
1080     + .cpu_port = B53_CPU_PORT,
1081     + .vta_regs = B53_VTA_REGS,
1082     + .duplex_reg = B53_DUPLEX_STAT_GE,
1083     + .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
1084     + .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
1085     + },
1086     {
1087     .chip_id = BCM5395_DEVICE_ID,
1088     .dev_name = "BCM5395",
1089     @@ -1872,6 +1884,7 @@ int b53_switch_detect(struct b53_device *dev)
1090     else
1091     dev->chip_id = BCM5365_DEVICE_ID;
1092     break;
1093     + case BCM5389_DEVICE_ID:
1094     case BCM5395_DEVICE_ID:
1095     case BCM5397_DEVICE_ID:
1096     case BCM5398_DEVICE_ID:
1097     diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
1098     index fa7556f5d4fb..a533a90e3904 100644
1099     --- a/drivers/net/dsa/b53/b53_mdio.c
1100     +++ b/drivers/net/dsa/b53/b53_mdio.c
1101     @@ -285,6 +285,7 @@ static const struct b53_io_ops b53_mdio_ops = {
1102     #define B53_BRCM_OUI_1 0x0143bc00
1103     #define B53_BRCM_OUI_2 0x03625c00
1104     #define B53_BRCM_OUI_3 0x00406000
1105     +#define B53_BRCM_OUI_4 0x01410c00
1106    
1107     static int b53_mdio_probe(struct mdio_device *mdiodev)
1108     {
1109     @@ -311,7 +312,8 @@ static int b53_mdio_probe(struct mdio_device *mdiodev)
1110     */
1111     if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 &&
1112     (phy_id & 0xfffffc00) != B53_BRCM_OUI_2 &&
1113     - (phy_id & 0xfffffc00) != B53_BRCM_OUI_3) {
1114     + (phy_id & 0xfffffc00) != B53_BRCM_OUI_3 &&
1115     + (phy_id & 0xfffffc00) != B53_BRCM_OUI_4) {
1116     dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id);
1117     return -ENODEV;
1118     }
1119     @@ -360,6 +362,7 @@ static const struct of_device_id b53_of_match[] = {
1120     { .compatible = "brcm,bcm53125" },
1121     { .compatible = "brcm,bcm53128" },
1122     { .compatible = "brcm,bcm5365" },
1123     + { .compatible = "brcm,bcm5389" },
1124     { .compatible = "brcm,bcm5395" },
1125     { .compatible = "brcm,bcm5397" },
1126     { .compatible = "brcm,bcm5398" },
1127     diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
1128     index 01bd8cbe9a3f..6b9e39ddaec1 100644
1129     --- a/drivers/net/dsa/b53/b53_priv.h
1130     +++ b/drivers/net/dsa/b53/b53_priv.h
1131     @@ -48,6 +48,7 @@ struct b53_io_ops {
1132     enum {
1133     BCM5325_DEVICE_ID = 0x25,
1134     BCM5365_DEVICE_ID = 0x65,
1135     + BCM5389_DEVICE_ID = 0x89,
1136     BCM5395_DEVICE_ID = 0x95,
1137     BCM5397_DEVICE_ID = 0x97,
1138     BCM5398_DEVICE_ID = 0x98,
1139     diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
1140     index 612c7a44b26c..23821540ab07 100644
1141     --- a/drivers/net/ethernet/natsemi/sonic.c
1142     +++ b/drivers/net/ethernet/natsemi/sonic.c
1143     @@ -71,7 +71,7 @@ static int sonic_open(struct net_device *dev)
1144     for (i = 0; i < SONIC_NUM_RRS; i++) {
1145     dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
1146     SONIC_RBSIZE, DMA_FROM_DEVICE);
1147     - if (!laddr) {
1148     + if (dma_mapping_error(lp->device, laddr)) {
1149     while(i > 0) { /* free any that were mapped successfully */
1150     i--;
1151     dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
1152     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1153     index 8e06f308ce44..b23ee948e7c9 100644
1154     --- a/drivers/net/usb/qmi_wwan.c
1155     +++ b/drivers/net/usb/qmi_wwan.c
1156     @@ -1103,6 +1103,7 @@ static const struct usb_device_id products[] = {
1157     {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
1158     {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */
1159     {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
1160     + {QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */
1161     {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */
1162     {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */
1163     {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
1164     diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
1165     index 12a9b86d71ea..dffa697d71e0 100644
1166     --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
1167     +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
1168     @@ -1499,14 +1499,13 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
1169     struct iwl_trans *trans)
1170     {
1171     struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1172     - int max_irqs, num_irqs, i, ret, nr_online_cpus;
1173     + int max_irqs, num_irqs, i, ret;
1174     u16 pci_cmd;
1175    
1176     if (!trans->cfg->mq_rx_supported)
1177     goto enable_msi;
1178    
1179     - nr_online_cpus = num_online_cpus();
1180     - max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES);
1181     + max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES);
1182     for (i = 0; i < max_irqs; i++)
1183     trans_pcie->msix_entries[i].entry = i;
1184    
1185     @@ -1532,16 +1531,17 @@ static void iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
1186     * Two interrupts less: non rx causes shared with FBQ and RSS.
1187     * More than two interrupts: we will use fewer RSS queues.
1188     */
1189     - if (num_irqs <= nr_online_cpus) {
1190     + if (num_irqs <= max_irqs - 2) {
1191     trans_pcie->trans->num_rx_queues = num_irqs + 1;
1192     trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
1193     IWL_SHARED_IRQ_FIRST_RSS;
1194     - } else if (num_irqs == nr_online_cpus + 1) {
1195     + } else if (num_irqs == max_irqs - 1) {
1196     trans_pcie->trans->num_rx_queues = num_irqs;
1197     trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
1198     } else {
1199     trans_pcie->trans->num_rx_queues = num_irqs - 1;
1200     }
1201     + WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
1202    
1203     trans_pcie->alloc_vecs = num_irqs;
1204     trans_pcie->msix_enabled = true;
1205     diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
1206     index 48e1541dc8d4..7440f650e81a 100644
1207     --- a/drivers/platform/x86/asus-wmi.c
1208     +++ b/drivers/platform/x86/asus-wmi.c
1209     @@ -161,6 +161,16 @@ MODULE_LICENSE("GPL");
1210    
1211     static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
1212    
1213     +static bool ashs_present(void)
1214     +{
1215     + int i = 0;
1216     + while (ashs_ids[i]) {
1217     + if (acpi_dev_found(ashs_ids[i++]))
1218     + return true;
1219     + }
1220     + return false;
1221     +}
1222     +
1223     struct bios_args {
1224     u32 arg0;
1225     u32 arg1;
1226     @@ -962,6 +972,9 @@ static int asus_new_rfkill(struct asus_wmi *asus,
1227    
1228     static void asus_wmi_rfkill_exit(struct asus_wmi *asus)
1229     {
1230     + if (asus->driver->wlan_ctrl_by_user && ashs_present())
1231     + return;
1232     +
1233     asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P5");
1234     asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P6");
1235     asus_unregister_rfkill_notifier(asus, "\\_SB.PCI0.P0P7");
1236     @@ -2058,16 +2071,6 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
1237     return 0;
1238     }
1239    
1240     -static bool ashs_present(void)
1241     -{
1242     - int i = 0;
1243     - while (ashs_ids[i]) {
1244     - if (acpi_dev_found(ashs_ids[i++]))
1245     - return true;
1246     - }
1247     - return false;
1248     -}
1249     -
1250     /*
1251     * WMI Driver
1252     */
1253     diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
1254     index e67c1d8a193d..d072f84a8535 100644
1255     --- a/drivers/s390/block/dasd.c
1256     +++ b/drivers/s390/block/dasd.c
1257     @@ -3049,7 +3049,8 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
1258     cqr->callback_data = req;
1259     cqr->status = DASD_CQR_FILLED;
1260     cqr->dq = dq;
1261     - req->completion_data = cqr;
1262     + *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr;
1263     +
1264     blk_mq_start_request(req);
1265     spin_lock(&block->queue_lock);
1266     list_add_tail(&cqr->blocklist, &block->ccw_queue);
1267     @@ -3073,12 +3074,13 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
1268     */
1269     enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
1270     {
1271     - struct dasd_ccw_req *cqr = req->completion_data;
1272     struct dasd_block *block = req->q->queuedata;
1273     struct dasd_device *device;
1274     + struct dasd_ccw_req *cqr;
1275     unsigned long flags;
1276     int rc = 0;
1277    
1278     + cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req));
1279     if (!cqr)
1280     return BLK_EH_NOT_HANDLED;
1281    
1282     @@ -3184,6 +3186,7 @@ static int dasd_alloc_queue(struct dasd_block *block)
1283     int rc;
1284    
1285     block->tag_set.ops = &dasd_mq_ops;
1286     + block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *);
1287     block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES;
1288     block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV;
1289     block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
1290     diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
1291     index 91faa7f035b9..babbd94c32d9 100644
1292     --- a/drivers/staging/android/ion/ion_heap.c
1293     +++ b/drivers/staging/android/ion/ion_heap.c
1294     @@ -38,7 +38,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
1295     struct page **tmp = pages;
1296    
1297     if (!pages)
1298     - return NULL;
1299     + return ERR_PTR(-ENOMEM);
1300    
1301     if (buffer->flags & ION_FLAG_CACHED)
1302     pgprot = PAGE_KERNEL;
1303     diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
1304     index 1c70541a1467..0475f9685a41 100644
1305     --- a/drivers/tty/n_tty.c
1306     +++ b/drivers/tty/n_tty.c
1307     @@ -126,6 +126,8 @@ struct n_tty_data {
1308     struct mutex output_lock;
1309     };
1310    
1311     +#define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1))
1312     +
1313     static inline size_t read_cnt(struct n_tty_data *ldata)
1314     {
1315     return ldata->read_head - ldata->read_tail;
1316     @@ -143,6 +145,7 @@ static inline unsigned char *read_buf_addr(struct n_tty_data *ldata, size_t i)
1317    
1318     static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i)
1319     {
1320     + smp_rmb(); /* Matches smp_wmb() in add_echo_byte(). */
1321     return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)];
1322     }
1323    
1324     @@ -318,9 +321,7 @@ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
1325     static void reset_buffer_flags(struct n_tty_data *ldata)
1326     {
1327     ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
1328     - ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
1329     ldata->commit_head = 0;
1330     - ldata->echo_mark = 0;
1331     ldata->line_start = 0;
1332    
1333     ldata->erasing = 0;
1334     @@ -619,12 +620,19 @@ static size_t __process_echoes(struct tty_struct *tty)
1335     old_space = space = tty_write_room(tty);
1336    
1337     tail = ldata->echo_tail;
1338     - while (ldata->echo_commit != tail) {
1339     + while (MASK(ldata->echo_commit) != MASK(tail)) {
1340     c = echo_buf(ldata, tail);
1341     if (c == ECHO_OP_START) {
1342     unsigned char op;
1343     int no_space_left = 0;
1344    
1345     + /*
1346     + * Since add_echo_byte() is called without holding
1347     + * output_lock, we might see only portion of multi-byte
1348     + * operation.
1349     + */
1350     + if (MASK(ldata->echo_commit) == MASK(tail + 1))
1351     + goto not_yet_stored;
1352     /*
1353     * If the buffer byte is the start of a multi-byte
1354     * operation, get the next byte, which is either the
1355     @@ -636,6 +644,8 @@ static size_t __process_echoes(struct tty_struct *tty)
1356     unsigned int num_chars, num_bs;
1357    
1358     case ECHO_OP_ERASE_TAB:
1359     + if (MASK(ldata->echo_commit) == MASK(tail + 2))
1360     + goto not_yet_stored;
1361     num_chars = echo_buf(ldata, tail + 2);
1362    
1363     /*
1364     @@ -730,7 +740,8 @@ static size_t __process_echoes(struct tty_struct *tty)
1365     /* If the echo buffer is nearly full (so that the possibility exists
1366     * of echo overrun before the next commit), then discard enough
1367     * data at the tail to prevent a subsequent overrun */
1368     - while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
1369     + while (ldata->echo_commit > tail &&
1370     + ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
1371     if (echo_buf(ldata, tail) == ECHO_OP_START) {
1372     if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB)
1373     tail += 3;
1374     @@ -740,6 +751,7 @@ static size_t __process_echoes(struct tty_struct *tty)
1375     tail++;
1376     }
1377    
1378     + not_yet_stored:
1379     ldata->echo_tail = tail;
1380     return old_space - space;
1381     }
1382     @@ -750,6 +762,7 @@ static void commit_echoes(struct tty_struct *tty)
1383     size_t nr, old, echoed;
1384     size_t head;
1385    
1386     + mutex_lock(&ldata->output_lock);
1387     head = ldata->echo_head;
1388     ldata->echo_mark = head;
1389     old = ldata->echo_commit - ldata->echo_tail;
1390     @@ -758,10 +771,12 @@ static void commit_echoes(struct tty_struct *tty)
1391     * is over the threshold (and try again each time another
1392     * block is accumulated) */
1393     nr = head - ldata->echo_tail;
1394     - if (nr < ECHO_COMMIT_WATERMARK || (nr % ECHO_BLOCK > old % ECHO_BLOCK))
1395     + if (nr < ECHO_COMMIT_WATERMARK ||
1396     + (nr % ECHO_BLOCK > old % ECHO_BLOCK)) {
1397     + mutex_unlock(&ldata->output_lock);
1398     return;
1399     + }
1400    
1401     - mutex_lock(&ldata->output_lock);
1402     ldata->echo_commit = head;
1403     echoed = __process_echoes(tty);
1404     mutex_unlock(&ldata->output_lock);
1405     @@ -812,7 +827,9 @@ static void flush_echoes(struct tty_struct *tty)
1406    
1407     static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata)
1408     {
1409     - *echo_buf_addr(ldata, ldata->echo_head++) = c;
1410     + *echo_buf_addr(ldata, ldata->echo_head) = c;
1411     + smp_wmb(); /* Matches smp_rmb() in echo_buf(). */
1412     + ldata->echo_head++;
1413     }
1414    
1415     /**
1416     @@ -980,14 +997,15 @@ static void eraser(unsigned char c, struct tty_struct *tty)
1417     }
1418    
1419     seen_alnums = 0;
1420     - while (ldata->read_head != ldata->canon_head) {
1421     + while (MASK(ldata->read_head) != MASK(ldata->canon_head)) {
1422     head = ldata->read_head;
1423    
1424     /* erase a single possibly multibyte character */
1425     do {
1426     head--;
1427     c = read_buf(ldata, head);
1428     - } while (is_continuation(c, tty) && head != ldata->canon_head);
1429     + } while (is_continuation(c, tty) &&
1430     + MASK(head) != MASK(ldata->canon_head));
1431    
1432     /* do not partially erase */
1433     if (is_continuation(c, tty))
1434     @@ -1029,7 +1047,7 @@ static void eraser(unsigned char c, struct tty_struct *tty)
1435     * This info is used to go back the correct
1436     * number of columns.
1437     */
1438     - while (tail != ldata->canon_head) {
1439     + while (MASK(tail) != MASK(ldata->canon_head)) {
1440     tail--;
1441     c = read_buf(ldata, tail);
1442     if (c == '\t') {
1443     @@ -1304,7 +1322,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
1444     finish_erasing(ldata);
1445     echo_char(c, tty);
1446     echo_char_raw('\n', ldata);
1447     - while (tail != ldata->read_head) {
1448     + while (MASK(tail) != MASK(ldata->read_head)) {
1449     echo_char(read_buf(ldata, tail), tty);
1450     tail++;
1451     }
1452     @@ -1880,30 +1898,21 @@ static int n_tty_open(struct tty_struct *tty)
1453     struct n_tty_data *ldata;
1454    
1455     /* Currently a malloc failure here can panic */
1456     - ldata = vmalloc(sizeof(*ldata));
1457     + ldata = vzalloc(sizeof(*ldata));
1458     if (!ldata)
1459     - goto err;
1460     + return -ENOMEM;
1461    
1462     ldata->overrun_time = jiffies;
1463     mutex_init(&ldata->atomic_read_lock);
1464     mutex_init(&ldata->output_lock);
1465    
1466     tty->disc_data = ldata;
1467     - reset_buffer_flags(tty->disc_data);
1468     - ldata->column = 0;
1469     - ldata->canon_column = 0;
1470     - ldata->num_overrun = 0;
1471     - ldata->no_room = 0;
1472     - ldata->lnext = 0;
1473     tty->closing = 0;
1474     /* indicate buffer work may resume */
1475     clear_bit(TTY_LDISC_HALTED, &tty->flags);
1476     n_tty_set_termios(tty, NULL);
1477     tty_unthrottle(tty);
1478     -
1479     return 0;
1480     -err:
1481     - return -ENOMEM;
1482     }
1483    
1484     static inline int input_available_p(struct tty_struct *tty, int poll)
1485     @@ -2413,7 +2422,7 @@ static unsigned long inq_canon(struct n_tty_data *ldata)
1486     tail = ldata->read_tail;
1487     nr = head - tail;
1488     /* Skip EOF-chars.. */
1489     - while (head != tail) {
1490     + while (MASK(head) != MASK(tail)) {
1491     if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) &&
1492     read_buf(ldata, tail) == __DISABLED_CHAR)
1493     nr--;
1494     diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
1495     index 97db76afced2..ae2564ecddcd 100644
1496     --- a/drivers/tty/serdev/core.c
1497     +++ b/drivers/tty/serdev/core.c
1498     @@ -482,6 +482,7 @@ EXPORT_SYMBOL_GPL(__serdev_device_driver_register);
1499     static void __exit serdev_exit(void)
1500     {
1501     bus_unregister(&serdev_bus_type);
1502     + ida_destroy(&ctrl_ida);
1503     }
1504     module_exit(serdev_exit);
1505    
1506     diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
1507     index 0d814a87acb2..4986b4aebe80 100644
1508     --- a/drivers/tty/serial/8250/8250_pci.c
1509     +++ b/drivers/tty/serial/8250/8250_pci.c
1510     @@ -3345,9 +3345,7 @@ static const struct pci_device_id blacklist[] = {
1511     /* multi-io cards handled by parport_serial */
1512     { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
1513     { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
1514     - { PCI_DEVICE(0x4348, 0x7173), }, /* WCH CH355 4S */
1515     { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
1516     - { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */
1517    
1518     /* Moxa Smartio MUE boards handled by 8250_moxa */
1519     { PCI_VDEVICE(MOXA, 0x1024), },
1520     diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
1521     index de67abbda921..e77421e7bf46 100644
1522     --- a/drivers/tty/vt/vt.c
1523     +++ b/drivers/tty/vt/vt.c
1524     @@ -782,7 +782,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
1525     if (!*vc->vc_uni_pagedir_loc)
1526     con_set_default_unimap(vc);
1527    
1528     - vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
1529     + vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL);
1530     if (!vc->vc_screenbuf)
1531     goto err_free;
1532    
1533     @@ -869,7 +869,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
1534    
1535     if (new_screen_size > (4 << 20))
1536     return -EINVAL;
1537     - newscreen = kmalloc(new_screen_size, GFP_USER);
1538     + newscreen = kzalloc(new_screen_size, GFP_USER);
1539     if (!newscreen)
1540     return -ENOMEM;
1541    
1542     diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1543     index 22952d70b981..3b9aadd007f5 100644
1544     --- a/drivers/usb/class/cdc-acm.c
1545     +++ b/drivers/usb/class/cdc-acm.c
1546     @@ -1771,6 +1771,9 @@ static const struct usb_device_id acm_ids[] = {
1547     { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
1548     .driver_info = SINGLE_RX_URB,
1549     },
1550     + { USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */
1551     + .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1552     + },
1553     { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
1554     .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1555     },
1556     diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
1557     index 3ae8b1bbaa55..7f51a77bc5cc 100644
1558     --- a/drivers/usb/dwc2/hcd_queue.c
1559     +++ b/drivers/usb/dwc2/hcd_queue.c
1560     @@ -379,7 +379,7 @@ static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
1561     /* Get the map and adjust if this is a multi_tt hub */
1562     map = qh->dwc_tt->periodic_bitmaps;
1563     if (qh->dwc_tt->usb_tt->multi)
1564     - map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport;
1565     + map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1);
1566    
1567     return map;
1568     }
1569     diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
1570     index efd7e4882d66..00b710016d21 100644
1571     --- a/drivers/usb/host/xhci-mem.c
1572     +++ b/drivers/usb/host/xhci-mem.c
1573     @@ -891,12 +891,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
1574    
1575     dev = xhci->devs[slot_id];
1576    
1577     - trace_xhci_free_virt_device(dev);
1578     -
1579     xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
1580     if (!dev)
1581     return;
1582    
1583     + trace_xhci_free_virt_device(dev);
1584     +
1585     if (dev->tt_info)
1586     old_active_eps = dev->tt_info->active_eps;
1587    
1588     diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
1589     index f20753b99624..02a1164ca599 100644
1590     --- a/drivers/usb/host/xhci-trace.h
1591     +++ b/drivers/usb/host/xhci-trace.h
1592     @@ -158,6 +158,37 @@ DEFINE_EVENT(xhci_log_trb, xhci_queue_trb,
1593     TP_ARGS(ring, trb)
1594     );
1595    
1596     +DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
1597     + TP_PROTO(struct xhci_virt_device *vdev),
1598     + TP_ARGS(vdev),
1599     + TP_STRUCT__entry(
1600     + __field(void *, vdev)
1601     + __field(unsigned long long, out_ctx)
1602     + __field(unsigned long long, in_ctx)
1603     + __field(u8, fake_port)
1604     + __field(u8, real_port)
1605     + __field(u16, current_mel)
1606     +
1607     + ),
1608     + TP_fast_assign(
1609     + __entry->vdev = vdev;
1610     + __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
1611     + __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
1612     + __entry->fake_port = (u8) vdev->fake_port;
1613     + __entry->real_port = (u8) vdev->real_port;
1614     + __entry->current_mel = (u16) vdev->current_mel;
1615     + ),
1616     + TP_printk("vdev %p ctx %llx | %llx fake_port %d real_port %d current_mel %d",
1617     + __entry->vdev, __entry->in_ctx, __entry->out_ctx,
1618     + __entry->fake_port, __entry->real_port, __entry->current_mel
1619     + )
1620     +);
1621     +
1622     +DEFINE_EVENT(xhci_log_free_virt_dev, xhci_free_virt_device,
1623     + TP_PROTO(struct xhci_virt_device *vdev),
1624     + TP_ARGS(vdev)
1625     +);
1626     +
1627     DECLARE_EVENT_CLASS(xhci_log_virt_dev,
1628     TP_PROTO(struct xhci_virt_device *vdev),
1629     TP_ARGS(vdev),
1630     @@ -195,11 +226,6 @@ DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device,
1631     TP_ARGS(vdev)
1632     );
1633    
1634     -DEFINE_EVENT(xhci_log_virt_dev, xhci_free_virt_device,
1635     - TP_PROTO(struct xhci_virt_device *vdev),
1636     - TP_ARGS(vdev)
1637     -);
1638     -
1639     DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device,
1640     TP_PROTO(struct xhci_virt_device *vdev),
1641     TP_ARGS(vdev)
1642     diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1643     index d0f00274d16c..142a83e5974c 100644
1644     --- a/drivers/usb/serial/cp210x.c
1645     +++ b/drivers/usb/serial/cp210x.c
1646     @@ -98,6 +98,9 @@ static const struct usb_device_id id_table[] = {
1647     { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
1648     { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
1649     { USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
1650     + { USB_DEVICE(0x10C4, 0x817C) }, /* CESINEL MEDCAL N Power Quality Monitor */
1651     + { USB_DEVICE(0x10C4, 0x817D) }, /* CESINEL MEDCAL NT Power Quality Monitor */
1652     + { USB_DEVICE(0x10C4, 0x817E) }, /* CESINEL MEDCAL S Power Quality Monitor */
1653     { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
1654     { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
1655     { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
1656     @@ -115,6 +118,9 @@ static const struct usb_device_id id_table[] = {
1657     { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
1658     { USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
1659     { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
1660     + { USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */
1661     + { USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */
1662     + { USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */
1663     { USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
1664     { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
1665     { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
1666     @@ -127,7 +133,9 @@ static const struct usb_device_id id_table[] = {
1667     { USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
1668     { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
1669     { USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
1670     + { USB_DEVICE(0x10C4, 0x851E) }, /* CESINEL MEDCAL PT Network Analyzer */
1671     { USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
1672     + { USB_DEVICE(0x10C4, 0x85B8) }, /* CESINEL ReCon T Energy Logger */
1673     { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
1674     { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
1675     { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
1676     @@ -137,17 +145,23 @@ static const struct usb_device_id id_table[] = {
1677     { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
1678     { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
1679     { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
1680     + { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */
1681     + { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */
1682     { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
1683     { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
1684     { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
1685     { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
1686     + { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
1687     { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
1688     { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
1689     { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
1690     { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
1691     { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
1692     + { USB_DEVICE(0x10C4, 0xEA63) }, /* Silicon Labs Windows Update (CP2101-4/CP2102N) */
1693     { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
1694     { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
1695     + { USB_DEVICE(0x10C4, 0xEA7A) }, /* Silicon Labs Windows Update (CP2105) */
1696     + { USB_DEVICE(0x10C4, 0xEA7B) }, /* Silicon Labs Windows Update (CP2108) */
1697     { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
1698     { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
1699     { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
1700     diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
1701     index dd24c5c1534d..251f5d66651e 100644
1702     --- a/drivers/usb/typec/ucsi/ucsi.c
1703     +++ b/drivers/usb/typec/ucsi/ucsi.c
1704     @@ -346,6 +346,19 @@ static void ucsi_connector_change(struct work_struct *work)
1705     }
1706    
1707     if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) {
1708     + typec_set_pwr_role(con->port, con->status.pwr_dir);
1709     +
1710     + switch (con->status.partner_type) {
1711     + case UCSI_CONSTAT_PARTNER_TYPE_UFP:
1712     + typec_set_data_role(con->port, TYPEC_HOST);
1713     + break;
1714     + case UCSI_CONSTAT_PARTNER_TYPE_DFP:
1715     + typec_set_data_role(con->port, TYPEC_DEVICE);
1716     + break;
1717     + default:
1718     + break;
1719     + }
1720     +
1721     if (con->status.connected)
1722     ucsi_register_partner(con);
1723     else
1724     diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
1725     index cabd47612b0a..494d2a49203a 100644
1726     --- a/drivers/usb/typec/ucsi/ucsi_acpi.c
1727     +++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
1728     @@ -82,6 +82,11 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
1729     return -ENODEV;
1730     }
1731    
1732     + /* This will make sure we can use ioremap_nocache() */
1733     + status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1);
1734     + if (ACPI_FAILURE(status))
1735     + return -ENOMEM;
1736     +
1737     /*
1738     * NOTE: The memory region for the data structures is used also in an
1739     * operation region, which means ACPI has already reserved it. Therefore
1740     diff --git a/fs/afs/security.c b/fs/afs/security.c
1741     index faca66227ecf..859096e25f2c 100644
1742     --- a/fs/afs/security.c
1743     +++ b/fs/afs/security.c
1744     @@ -323,18 +323,14 @@ int afs_permission(struct inode *inode, int mask)
1745     mask, access, S_ISDIR(inode->i_mode) ? "dir" : "file");
1746    
1747     if (S_ISDIR(inode->i_mode)) {
1748     - if (mask & MAY_EXEC) {
1749     + if (mask & (MAY_EXEC | MAY_READ | MAY_CHDIR)) {
1750     if (!(access & AFS_ACE_LOOKUP))
1751     goto permission_denied;
1752     - } else if (mask & MAY_READ) {
1753     - if (!(access & AFS_ACE_LOOKUP))
1754     - goto permission_denied;
1755     - } else if (mask & MAY_WRITE) {
1756     + }
1757     + if (mask & MAY_WRITE) {
1758     if (!(access & (AFS_ACE_DELETE | /* rmdir, unlink, rename from */
1759     AFS_ACE_INSERT))) /* create, mkdir, symlink, rename to */
1760     goto permission_denied;
1761     - } else {
1762     - BUG();
1763     }
1764     } else {
1765     if (!(access & AFS_ACE_LOOKUP))
1766     diff --git a/fs/inode.c b/fs/inode.c
1767     index d1e35b53bb23..e07b3e1f5970 100644
1768     --- a/fs/inode.c
1769     +++ b/fs/inode.c
1770     @@ -177,6 +177,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
1771     mapping->a_ops = &empty_aops;
1772     mapping->host = inode;
1773     mapping->flags = 0;
1774     + mapping->wb_err = 0;
1775     atomic_set(&mapping->i_mmap_writable, 0);
1776     mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
1777     mapping->private_data = NULL;
1778     diff --git a/include/linux/acpi.h b/include/linux/acpi.h
1779     index 502af53ec012..13c105121a18 100644
1780     --- a/include/linux/acpi.h
1781     +++ b/include/linux/acpi.h
1782     @@ -441,6 +441,9 @@ int acpi_check_resource_conflict(const struct resource *res);
1783     int acpi_check_region(resource_size_t start, resource_size_t n,
1784     const char *name);
1785    
1786     +acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
1787     + u32 level);
1788     +
1789     int acpi_resources_are_enforced(void);
1790    
1791     #ifdef CONFIG_HIBERNATION
1792     diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
1793     index 079c69cae2f6..59a4f50ffe8d 100644
1794     --- a/include/net/netfilter/nf_tables.h
1795     +++ b/include/net/netfilter/nf_tables.h
1796     @@ -177,6 +177,7 @@ struct nft_data_desc {
1797     int nft_data_init(const struct nft_ctx *ctx,
1798     struct nft_data *data, unsigned int size,
1799     struct nft_data_desc *desc, const struct nlattr *nla);
1800     +void nft_data_hold(const struct nft_data *data, enum nft_data_types type);
1801     void nft_data_release(const struct nft_data *data, enum nft_data_types type);
1802     int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
1803     enum nft_data_types type, unsigned int len);
1804     @@ -731,6 +732,10 @@ struct nft_expr_ops {
1805     int (*init)(const struct nft_ctx *ctx,
1806     const struct nft_expr *expr,
1807     const struct nlattr * const tb[]);
1808     + void (*activate)(const struct nft_ctx *ctx,
1809     + const struct nft_expr *expr);
1810     + void (*deactivate)(const struct nft_ctx *ctx,
1811     + const struct nft_expr *expr);
1812     void (*destroy)(const struct nft_ctx *ctx,
1813     const struct nft_expr *expr);
1814     int (*dump)(struct sk_buff *skb,
1815     diff --git a/kernel/sched/core.c b/kernel/sched/core.c
1816     index f287dcbe8cb2..31615d1ae44c 100644
1817     --- a/kernel/sched/core.c
1818     +++ b/kernel/sched/core.c
1819     @@ -894,6 +894,33 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
1820     }
1821    
1822     #ifdef CONFIG_SMP
1823     +
1824     +static inline bool is_per_cpu_kthread(struct task_struct *p)
1825     +{
1826     + if (!(p->flags & PF_KTHREAD))
1827     + return false;
1828     +
1829     + if (p->nr_cpus_allowed != 1)
1830     + return false;
1831     +
1832     + return true;
1833     +}
1834     +
1835     +/*
1836     + * Per-CPU kthreads are allowed to run on !actie && online CPUs, see
1837     + * __set_cpus_allowed_ptr() and select_fallback_rq().
1838     + */
1839     +static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
1840     +{
1841     + if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
1842     + return false;
1843     +
1844     + if (is_per_cpu_kthread(p))
1845     + return cpu_online(cpu);
1846     +
1847     + return cpu_active(cpu);
1848     +}
1849     +
1850     /*
1851     * This is how migration works:
1852     *
1853     @@ -951,16 +978,8 @@ struct migration_arg {
1854     static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
1855     struct task_struct *p, int dest_cpu)
1856     {
1857     - if (p->flags & PF_KTHREAD) {
1858     - if (unlikely(!cpu_online(dest_cpu)))
1859     - return rq;
1860     - } else {
1861     - if (unlikely(!cpu_active(dest_cpu)))
1862     - return rq;
1863     - }
1864     -
1865     /* Affinity changed (again). */
1866     - if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
1867     + if (!is_cpu_allowed(p, dest_cpu))
1868     return rq;
1869    
1870     update_rq_clock(rq);
1871     @@ -1489,10 +1508,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
1872     for (;;) {
1873     /* Any allowed, online CPU? */
1874     for_each_cpu(dest_cpu, &p->cpus_allowed) {
1875     - if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
1876     - continue;
1877     - if (!cpu_online(dest_cpu))
1878     + if (!is_cpu_allowed(p, dest_cpu))
1879     continue;
1880     +
1881     goto out;
1882     }
1883    
1884     @@ -1555,8 +1573,7 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
1885     * [ this allows ->select_task() to simply return task_cpu(p) and
1886     * not worry about this generic constraint ]
1887     */
1888     - if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
1889     - !cpu_online(cpu)))
1890     + if (unlikely(!is_cpu_allowed(p, cpu)))
1891     cpu = select_fallback_rq(task_cpu(p), p);
1892    
1893     return cpu;
1894     diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
1895     index 5b8cd359c4c0..e27fb6e97d18 100644
1896     --- a/net/bridge/netfilter/ebtables.c
1897     +++ b/net/bridge/netfilter/ebtables.c
1898     @@ -1950,7 +1950,8 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1899     int off, pad = 0;
1900     unsigned int size_kern, match_size = mwt->match_size;
1901    
1902     - strlcpy(name, mwt->u.name, sizeof(name));
1903     + if (strscpy(name, mwt->u.name, sizeof(name)) < 0)
1904     + return -EINVAL;
1905    
1906     if (state->buf_kern_start)
1907     dst = state->buf_kern_start + state->buf_kern_offset;
1908     diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
1909     index b12e61b7b16c..1c4a5de3f301 100644
1910     --- a/net/ipv6/netfilter/ip6t_rpfilter.c
1911     +++ b/net/ipv6/netfilter/ip6t_rpfilter.c
1912     @@ -48,10 +48,8 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
1913     }
1914    
1915     fl6.flowi6_mark = flags & XT_RPFILTER_VALID_MARK ? skb->mark : 0;
1916     - if ((flags & XT_RPFILTER_LOOSE) == 0) {
1917     + if ((flags & XT_RPFILTER_LOOSE) == 0)
1918     fl6.flowi6_oif = dev->ifindex;
1919     - lookup_flags |= RT6_LOOKUP_F_IFACE;
1920     - }
1921    
1922     rt = (void *) ip6_route_lookup(net, &fl6, lookup_flags);
1923     if (rt->dst.error)
1924     diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
1925     index 54b5899543ef..fd9a45cbd709 100644
1926     --- a/net/ipv6/netfilter/nft_fib_ipv6.c
1927     +++ b/net/ipv6/netfilter/nft_fib_ipv6.c
1928     @@ -182,7 +182,6 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
1929     }
1930    
1931     *dest = 0;
1932     - again:
1933     rt = (void *)ip6_route_lookup(nft_net(pkt), &fl6, lookup_flags);
1934     if (rt->dst.error)
1935     goto put_rt_err;
1936     @@ -191,15 +190,8 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
1937     if (rt->rt6i_flags & (RTF_REJECT | RTF_ANYCAST | RTF_LOCAL))
1938     goto put_rt_err;
1939    
1940     - if (oif && oif != rt->rt6i_idev->dev) {
1941     - /* multipath route? Try again with F_IFACE */
1942     - if ((lookup_flags & RT6_LOOKUP_F_IFACE) == 0) {
1943     - lookup_flags |= RT6_LOOKUP_F_IFACE;
1944     - fl6.flowi6_oif = oif->ifindex;
1945     - ip6_rt_put(rt);
1946     - goto again;
1947     - }
1948     - }
1949     + if (oif && oif != rt->rt6i_idev->dev)
1950     + goto put_rt_err;
1951    
1952     switch (priv->result) {
1953     case NFT_FIB_RESULT_OIF:
1954     diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
1955     index 17e95a0386b3..d6b012295b45 100644
1956     --- a/net/ipv6/xfrm6_policy.c
1957     +++ b/net/ipv6/xfrm6_policy.c
1958     @@ -123,7 +123,7 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
1959     struct flowi6 *fl6 = &fl->u.ip6;
1960     int onlyproto = 0;
1961     const struct ipv6hdr *hdr = ipv6_hdr(skb);
1962     - u16 offset = sizeof(*hdr);
1963     + u32 offset = sizeof(*hdr);
1964     struct ipv6_opt_hdr *exthdr;
1965     const unsigned char *nh = skb_network_header(skb);
1966     u16 nhoff = IP6CB(skb)->nhoff;
1967     diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
1968     index e8f1556fa446..327ebe786eeb 100644
1969     --- a/net/netfilter/ipvs/ip_vs_ctl.c
1970     +++ b/net/netfilter/ipvs/ip_vs_ctl.c
1971     @@ -2384,8 +2384,10 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1972     struct ipvs_sync_daemon_cfg cfg;
1973    
1974     memset(&cfg, 0, sizeof(cfg));
1975     - strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
1976     - sizeof(cfg.mcast_ifn));
1977     + ret = -EINVAL;
1978     + if (strscpy(cfg.mcast_ifn, dm->mcast_ifn,
1979     + sizeof(cfg.mcast_ifn)) <= 0)
1980     + goto out_dec;
1981     cfg.syncid = dm->syncid;
1982     ret = start_sync_thread(ipvs, &cfg, dm->state);
1983     } else {
1984     @@ -2423,12 +2425,19 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
1985     }
1986     }
1987    
1988     + if ((cmd == IP_VS_SO_SET_ADD || cmd == IP_VS_SO_SET_EDIT) &&
1989     + strnlen(usvc.sched_name, IP_VS_SCHEDNAME_MAXLEN) ==
1990     + IP_VS_SCHEDNAME_MAXLEN) {
1991     + ret = -EINVAL;
1992     + goto out_unlock;
1993     + }
1994     +
1995     /* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */
1996     if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP &&
1997     usvc.protocol != IPPROTO_SCTP) {
1998     - pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n",
1999     + pr_err("set_ctl: invalid protocol: %d %pI4:%d\n",
2000     usvc.protocol, &usvc.addr.ip,
2001     - ntohs(usvc.port), usvc.sched_name);
2002     + ntohs(usvc.port));
2003     ret = -EFAULT;
2004     goto out_unlock;
2005     }
2006     @@ -2850,7 +2859,7 @@ static const struct nla_policy ip_vs_cmd_policy[IPVS_CMD_ATTR_MAX + 1] = {
2007     static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = {
2008     [IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 },
2009     [IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING,
2010     - .len = IP_VS_IFNAME_MAXLEN },
2011     + .len = IP_VS_IFNAME_MAXLEN - 1 },
2012     [IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 },
2013     [IPVS_DAEMON_ATTR_SYNC_MAXLEN] = { .type = NLA_U16 },
2014     [IPVS_DAEMON_ATTR_MCAST_GROUP] = { .type = NLA_U32 },
2015     @@ -2868,7 +2877,7 @@ static const struct nla_policy ip_vs_svc_policy[IPVS_SVC_ATTR_MAX + 1] = {
2016     [IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 },
2017     [IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 },
2018     [IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING,
2019     - .len = IP_VS_SCHEDNAME_MAXLEN },
2020     + .len = IP_VS_SCHEDNAME_MAXLEN - 1 },
2021     [IPVS_SVC_ATTR_PE_NAME] = { .type = NLA_NUL_STRING,
2022     .len = IP_VS_PENAME_MAXLEN },
2023     [IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY,
2024     diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
2025     index cf30c440f7a7..85b549e84104 100644
2026     --- a/net/netfilter/nf_tables_api.c
2027     +++ b/net/netfilter/nf_tables_api.c
2028     @@ -220,6 +220,34 @@ static int nft_delchain(struct nft_ctx *ctx)
2029     return err;
2030     }
2031    
2032     +static void nft_rule_expr_activate(const struct nft_ctx *ctx,
2033     + struct nft_rule *rule)
2034     +{
2035     + struct nft_expr *expr;
2036     +
2037     + expr = nft_expr_first(rule);
2038     + while (expr != nft_expr_last(rule) && expr->ops) {
2039     + if (expr->ops->activate)
2040     + expr->ops->activate(ctx, expr);
2041     +
2042     + expr = nft_expr_next(expr);
2043     + }
2044     +}
2045     +
2046     +static void nft_rule_expr_deactivate(const struct nft_ctx *ctx,
2047     + struct nft_rule *rule)
2048     +{
2049     + struct nft_expr *expr;
2050     +
2051     + expr = nft_expr_first(rule);
2052     + while (expr != nft_expr_last(rule) && expr->ops) {
2053     + if (expr->ops->deactivate)
2054     + expr->ops->deactivate(ctx, expr);
2055     +
2056     + expr = nft_expr_next(expr);
2057     + }
2058     +}
2059     +
2060     static int
2061     nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule)
2062     {
2063     @@ -265,6 +293,7 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule)
2064     nft_trans_destroy(trans);
2065     return err;
2066     }
2067     + nft_rule_expr_deactivate(ctx, rule);
2068    
2069     return 0;
2070     }
2071     @@ -1237,8 +1266,10 @@ static void nft_chain_stats_replace(struct nft_base_chain *chain,
2072     rcu_assign_pointer(chain->stats, newstats);
2073     synchronize_rcu();
2074     free_percpu(oldstats);
2075     - } else
2076     + } else {
2077     rcu_assign_pointer(chain->stats, newstats);
2078     + static_branch_inc(&nft_counters_enabled);
2079     + }
2080     }
2081    
2082     static void nf_tables_chain_destroy(struct nft_chain *chain)
2083     @@ -1947,6 +1978,7 @@ static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
2084     [NFTA_RULE_POSITION] = { .type = NLA_U64 },
2085     [NFTA_RULE_USERDATA] = { .type = NLA_BINARY,
2086     .len = NFT_USERDATA_MAXLEN },
2087     + [NFTA_RULE_ID] = { .type = NLA_U32 },
2088     };
2089    
2090     static int nf_tables_fill_rule_info(struct sk_buff *skb, struct net *net,
2091     @@ -2218,6 +2250,13 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
2092     kfree(rule);
2093     }
2094    
2095     +static void nf_tables_rule_release(const struct nft_ctx *ctx,
2096     + struct nft_rule *rule)
2097     +{
2098     + nft_rule_expr_deactivate(ctx, rule);
2099     + nf_tables_rule_destroy(ctx, rule);
2100     +}
2101     +
2102     #define NFT_RULE_MAXEXPRS 128
2103    
2104     static struct nft_expr_info *info;
2105     @@ -2385,7 +2424,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
2106     return 0;
2107    
2108     err2:
2109     - nf_tables_rule_destroy(&ctx, rule);
2110     + nf_tables_rule_release(&ctx, rule);
2111     err1:
2112     for (i = 0; i < n; i++) {
2113     if (info[i].ops != NULL)
2114     @@ -3374,6 +3413,8 @@ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
2115     [NFTA_SET_ELEM_TIMEOUT] = { .type = NLA_U64 },
2116     [NFTA_SET_ELEM_USERDATA] = { .type = NLA_BINARY,
2117     .len = NFT_USERDATA_MAXLEN },
2118     + [NFTA_SET_ELEM_EXPR] = { .type = NLA_NESTED },
2119     + [NFTA_SET_ELEM_OBJREF] = { .type = NLA_STRING },
2120     };
2121    
2122     static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
2123     @@ -3961,8 +4002,10 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
2124     if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA) ^
2125     nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) ||
2126     nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF) ^
2127     - nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF))
2128     - return -EBUSY;
2129     + nft_set_ext_exists(ext2, NFT_SET_EXT_OBJREF)) {
2130     + err = -EBUSY;
2131     + goto err5;
2132     + }
2133     if ((nft_set_ext_exists(ext, NFT_SET_EXT_DATA) &&
2134     nft_set_ext_exists(ext2, NFT_SET_EXT_DATA) &&
2135     memcmp(nft_set_ext_data(ext),
2136     @@ -4054,7 +4097,7 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
2137     * NFT_GOTO verdicts. This function must be called on active data objects
2138     * from the second phase of the commit protocol.
2139     */
2140     -static void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
2141     +void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
2142     {
2143     if (type == NFT_DATA_VERDICT) {
2144     switch (data->verdict.code) {
2145     @@ -4571,7 +4614,7 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
2146     if (idx > s_idx)
2147     memset(&cb->args[1], 0,
2148     sizeof(cb->args) - sizeof(cb->args[0]));
2149     - if (filter && filter->table[0] &&
2150     + if (filter && filter->table &&
2151     strcmp(filter->table, table->name))
2152     goto cont;
2153     if (filter &&
2154     @@ -5221,10 +5264,12 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
2155     case NFT_MSG_NEWRULE:
2156     trans->ctx.chain->use--;
2157     list_del_rcu(&nft_trans_rule(trans)->list);
2158     + nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans));
2159     break;
2160     case NFT_MSG_DELRULE:
2161     trans->ctx.chain->use++;
2162     nft_clear(trans->ctx.net, nft_trans_rule(trans));
2163     + nft_rule_expr_activate(&trans->ctx, nft_trans_rule(trans));
2164     nft_trans_destroy(trans);
2165     break;
2166     case NFT_MSG_NEWSET:
2167     @@ -5798,7 +5843,7 @@ int __nft_release_basechain(struct nft_ctx *ctx)
2168     list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) {
2169     list_del(&rule->list);
2170     ctx->chain->use--;
2171     - nf_tables_rule_destroy(ctx, rule);
2172     + nf_tables_rule_release(ctx, rule);
2173     }
2174     list_del(&ctx->chain->list);
2175     ctx->table->use--;
2176     @@ -5832,7 +5877,7 @@ static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi)
2177     list_for_each_entry_safe(rule, nr, &chain->rules, list) {
2178     list_del(&rule->list);
2179     chain->use--;
2180     - nf_tables_rule_destroy(&ctx, rule);
2181     + nf_tables_rule_release(&ctx, rule);
2182     }
2183     }
2184     list_for_each_entry_safe(set, ns, &table->sets, list) {
2185     diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
2186     index dfd0bf3810d2..32b7896929f3 100644
2187     --- a/net/netfilter/nf_tables_core.c
2188     +++ b/net/netfilter/nf_tables_core.c
2189     @@ -119,14 +119,21 @@ DEFINE_STATIC_KEY_FALSE(nft_counters_enabled);
2190     static noinline void nft_update_chain_stats(const struct nft_chain *chain,
2191     const struct nft_pktinfo *pkt)
2192     {
2193     + struct nft_base_chain *base_chain;
2194     struct nft_stats *stats;
2195    
2196     + base_chain = nft_base_chain(chain);
2197     + if (!base_chain->stats)
2198     + return;
2199     +
2200     local_bh_disable();
2201     - stats = this_cpu_ptr(rcu_dereference(nft_base_chain(chain)->stats));
2202     - u64_stats_update_begin(&stats->syncp);
2203     - stats->pkts++;
2204     - stats->bytes += pkt->skb->len;
2205     - u64_stats_update_end(&stats->syncp);
2206     + stats = this_cpu_ptr(rcu_dereference(base_chain->stats));
2207     + if (stats) {
2208     + u64_stats_update_begin(&stats->syncp);
2209     + stats->pkts++;
2210     + stats->bytes += pkt->skb->len;
2211     + u64_stats_update_end(&stats->syncp);
2212     + }
2213     local_bh_enable();
2214     }
2215    
2216     @@ -201,7 +208,8 @@ nft_do_chain(struct nft_pktinfo *pkt, void *priv)
2217    
2218     switch (regs.verdict.code) {
2219     case NFT_JUMP:
2220     - BUG_ON(stackptr >= NFT_JUMP_STACK_SIZE);
2221     + if (WARN_ON_ONCE(stackptr >= NFT_JUMP_STACK_SIZE))
2222     + return NF_DROP;
2223     jumpstack[stackptr].chain = chain;
2224     jumpstack[stackptr].rule = rule;
2225     jumpstack[stackptr].rulenum = rulenum;
2226     diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
2227     index b89f4f65b2a0..3bd637eadc42 100644
2228     --- a/net/netfilter/nft_compat.c
2229     +++ b/net/netfilter/nft_compat.c
2230     @@ -27,14 +27,31 @@ struct nft_xt {
2231     struct list_head head;
2232     struct nft_expr_ops ops;
2233     unsigned int refcnt;
2234     +
2235     + /* Unlike other expressions, ops doesn't have static storage duration.
2236     + * nft core assumes they do. We use kfree_rcu so that nft core can
2237     + * can check expr->ops->size even after nft_compat->destroy() frees
2238     + * the nft_xt struct that holds the ops structure.
2239     + */
2240     + struct rcu_head rcu_head;
2241     +};
2242     +
2243     +/* Used for matches where *info is larger than X byte */
2244     +#define NFT_MATCH_LARGE_THRESH 192
2245     +
2246     +struct nft_xt_match_priv {
2247     + void *info;
2248     };
2249    
2250     -static void nft_xt_put(struct nft_xt *xt)
2251     +static bool nft_xt_put(struct nft_xt *xt)
2252     {
2253     if (--xt->refcnt == 0) {
2254     list_del(&xt->head);
2255     - kfree(xt);
2256     + kfree_rcu(xt, rcu_head);
2257     + return true;
2258     }
2259     +
2260     + return false;
2261     }
2262    
2263     static int nft_compat_chain_validate_dependency(const char *tablename,
2264     @@ -226,6 +243,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
2265     struct xt_target *target = expr->ops->data;
2266     struct xt_tgchk_param par;
2267     size_t size = XT_ALIGN(nla_len(tb[NFTA_TARGET_INFO]));
2268     + struct nft_xt *nft_xt;
2269     u16 proto = 0;
2270     bool inv = false;
2271     union nft_entry e = {};
2272     @@ -236,25 +254,22 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
2273     if (ctx->nla[NFTA_RULE_COMPAT]) {
2274     ret = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &proto, &inv);
2275     if (ret < 0)
2276     - goto err;
2277     + return ret;
2278     }
2279    
2280     nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv);
2281    
2282     ret = xt_check_target(&par, size, proto, inv);
2283     if (ret < 0)
2284     - goto err;
2285     + return ret;
2286    
2287     /* The standard target cannot be used */
2288     - if (target->target == NULL) {
2289     - ret = -EINVAL;
2290     - goto err;
2291     - }
2292     + if (!target->target)
2293     + return -EINVAL;
2294    
2295     + nft_xt = container_of(expr->ops, struct nft_xt, ops);
2296     + nft_xt->refcnt++;
2297     return 0;
2298     -err:
2299     - module_put(target->me);
2300     - return ret;
2301     }
2302    
2303     static void
2304     @@ -271,8 +286,8 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
2305     if (par.target->destroy != NULL)
2306     par.target->destroy(&par);
2307    
2308     - nft_xt_put(container_of(expr->ops, struct nft_xt, ops));
2309     - module_put(target->me);
2310     + if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
2311     + module_put(target->me);
2312     }
2313    
2314     static int nft_target_dump(struct sk_buff *skb, const struct nft_expr *expr)
2315     @@ -316,11 +331,11 @@ static int nft_target_validate(const struct nft_ctx *ctx,
2316     return 0;
2317     }
2318    
2319     -static void nft_match_eval(const struct nft_expr *expr,
2320     - struct nft_regs *regs,
2321     - const struct nft_pktinfo *pkt)
2322     +static void __nft_match_eval(const struct nft_expr *expr,
2323     + struct nft_regs *regs,
2324     + const struct nft_pktinfo *pkt,
2325     + void *info)
2326     {
2327     - void *info = nft_expr_priv(expr);
2328     struct xt_match *match = expr->ops->data;
2329     struct sk_buff *skb = pkt->skb;
2330     bool ret;
2331     @@ -344,6 +359,22 @@ static void nft_match_eval(const struct nft_expr *expr,
2332     }
2333     }
2334    
2335     +static void nft_match_large_eval(const struct nft_expr *expr,
2336     + struct nft_regs *regs,
2337     + const struct nft_pktinfo *pkt)
2338     +{
2339     + struct nft_xt_match_priv *priv = nft_expr_priv(expr);
2340     +
2341     + __nft_match_eval(expr, regs, pkt, priv->info);
2342     +}
2343     +
2344     +static void nft_match_eval(const struct nft_expr *expr,
2345     + struct nft_regs *regs,
2346     + const struct nft_pktinfo *pkt)
2347     +{
2348     + __nft_match_eval(expr, regs, pkt, nft_expr_priv(expr));
2349     +}
2350     +
2351     static const struct nla_policy nft_match_policy[NFTA_MATCH_MAX + 1] = {
2352     [NFTA_MATCH_NAME] = { .type = NLA_NUL_STRING },
2353     [NFTA_MATCH_REV] = { .type = NLA_U32 },
2354     @@ -404,13 +435,14 @@ static void match_compat_from_user(struct xt_match *m, void *in, void *out)
2355     }
2356    
2357     static int
2358     -nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
2359     - const struct nlattr * const tb[])
2360     +__nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
2361     + const struct nlattr * const tb[],
2362     + void *info)
2363     {
2364     - void *info = nft_expr_priv(expr);
2365     struct xt_match *match = expr->ops->data;
2366     struct xt_mtchk_param par;
2367     size_t size = XT_ALIGN(nla_len(tb[NFTA_MATCH_INFO]));
2368     + struct nft_xt *nft_xt;
2369     u16 proto = 0;
2370     bool inv = false;
2371     union nft_entry e = {};
2372     @@ -421,26 +453,50 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
2373     if (ctx->nla[NFTA_RULE_COMPAT]) {
2374     ret = nft_parse_compat(ctx->nla[NFTA_RULE_COMPAT], &proto, &inv);
2375     if (ret < 0)
2376     - goto err;
2377     + return ret;
2378     }
2379    
2380     nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
2381    
2382     ret = xt_check_match(&par, size, proto, inv);
2383     if (ret < 0)
2384     - goto err;
2385     + return ret;
2386    
2387     + nft_xt = container_of(expr->ops, struct nft_xt, ops);
2388     + nft_xt->refcnt++;
2389     return 0;
2390     -err:
2391     - module_put(match->me);
2392     +}
2393     +
2394     +static int
2395     +nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
2396     + const struct nlattr * const tb[])
2397     +{
2398     + return __nft_match_init(ctx, expr, tb, nft_expr_priv(expr));
2399     +}
2400     +
2401     +static int
2402     +nft_match_large_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
2403     + const struct nlattr * const tb[])
2404     +{
2405     + struct nft_xt_match_priv *priv = nft_expr_priv(expr);
2406     + struct xt_match *m = expr->ops->data;
2407     + int ret;
2408     +
2409     + priv->info = kmalloc(XT_ALIGN(m->matchsize), GFP_KERNEL);
2410     + if (!priv->info)
2411     + return -ENOMEM;
2412     +
2413     + ret = __nft_match_init(ctx, expr, tb, priv->info);
2414     + if (ret)
2415     + kfree(priv->info);
2416     return ret;
2417     }
2418    
2419     static void
2420     -nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
2421     +__nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr,
2422     + void *info)
2423     {
2424     struct xt_match *match = expr->ops->data;
2425     - void *info = nft_expr_priv(expr);
2426     struct xt_mtdtor_param par;
2427    
2428     par.net = ctx->net;
2429     @@ -450,13 +506,28 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
2430     if (par.match->destroy != NULL)
2431     par.match->destroy(&par);
2432    
2433     - nft_xt_put(container_of(expr->ops, struct nft_xt, ops));
2434     - module_put(match->me);
2435     + if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops)))
2436     + module_put(match->me);
2437     }
2438    
2439     -static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr)
2440     +static void
2441     +nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
2442     +{
2443     + __nft_match_destroy(ctx, expr, nft_expr_priv(expr));
2444     +}
2445     +
2446     +static void
2447     +nft_match_large_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
2448     +{
2449     + struct nft_xt_match_priv *priv = nft_expr_priv(expr);
2450     +
2451     + __nft_match_destroy(ctx, expr, priv->info);
2452     + kfree(priv->info);
2453     +}
2454     +
2455     +static int __nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr,
2456     + void *info)
2457     {
2458     - void *info = nft_expr_priv(expr);
2459     struct xt_match *match = expr->ops->data;
2460    
2461     if (nla_put_string(skb, NFTA_MATCH_NAME, match->name) ||
2462     @@ -470,6 +541,18 @@ static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr)
2463     return -1;
2464     }
2465    
2466     +static int nft_match_dump(struct sk_buff *skb, const struct nft_expr *expr)
2467     +{
2468     + return __nft_match_dump(skb, expr, nft_expr_priv(expr));
2469     +}
2470     +
2471     +static int nft_match_large_dump(struct sk_buff *skb, const struct nft_expr *e)
2472     +{
2473     + struct nft_xt_match_priv *priv = nft_expr_priv(e);
2474     +
2475     + return __nft_match_dump(skb, e, priv->info);
2476     +}
2477     +
2478     static int nft_match_validate(const struct nft_ctx *ctx,
2479     const struct nft_expr *expr,
2480     const struct nft_data **data)
2481     @@ -637,6 +720,7 @@ nft_match_select_ops(const struct nft_ctx *ctx,
2482     {
2483     struct nft_xt *nft_match;
2484     struct xt_match *match;
2485     + unsigned int matchsize;
2486     char *mt_name;
2487     u32 rev, family;
2488     int err;
2489     @@ -654,13 +738,8 @@ nft_match_select_ops(const struct nft_ctx *ctx,
2490     list_for_each_entry(nft_match, &nft_match_list, head) {
2491     struct xt_match *match = nft_match->ops.data;
2492    
2493     - if (nft_match_cmp(match, mt_name, rev, family)) {
2494     - if (!try_module_get(match->me))
2495     - return ERR_PTR(-ENOENT);
2496     -
2497     - nft_match->refcnt++;
2498     + if (nft_match_cmp(match, mt_name, rev, family))
2499     return &nft_match->ops;
2500     - }
2501     }
2502    
2503     match = xt_request_find_match(family, mt_name, rev);
2504     @@ -679,9 +758,8 @@ nft_match_select_ops(const struct nft_ctx *ctx,
2505     goto err;
2506     }
2507    
2508     - nft_match->refcnt = 1;
2509     + nft_match->refcnt = 0;
2510     nft_match->ops.type = &nft_match_type;
2511     - nft_match->ops.size = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize));
2512     nft_match->ops.eval = nft_match_eval;
2513     nft_match->ops.init = nft_match_init;
2514     nft_match->ops.destroy = nft_match_destroy;
2515     @@ -689,6 +767,18 @@ nft_match_select_ops(const struct nft_ctx *ctx,
2516     nft_match->ops.validate = nft_match_validate;
2517     nft_match->ops.data = match;
2518    
2519     + matchsize = NFT_EXPR_SIZE(XT_ALIGN(match->matchsize));
2520     + if (matchsize > NFT_MATCH_LARGE_THRESH) {
2521     + matchsize = NFT_EXPR_SIZE(sizeof(struct nft_xt_match_priv));
2522     +
2523     + nft_match->ops.eval = nft_match_large_eval;
2524     + nft_match->ops.init = nft_match_large_init;
2525     + nft_match->ops.destroy = nft_match_large_destroy;
2526     + nft_match->ops.dump = nft_match_large_dump;
2527     + }
2528     +
2529     + nft_match->ops.size = matchsize;
2530     +
2531     list_add(&nft_match->head, &nft_match_list);
2532    
2533     return &nft_match->ops;
2534     @@ -739,13 +829,8 @@ nft_target_select_ops(const struct nft_ctx *ctx,
2535     list_for_each_entry(nft_target, &nft_target_list, head) {
2536     struct xt_target *target = nft_target->ops.data;
2537    
2538     - if (nft_target_cmp(target, tg_name, rev, family)) {
2539     - if (!try_module_get(target->me))
2540     - return ERR_PTR(-ENOENT);
2541     -
2542     - nft_target->refcnt++;
2543     + if (nft_target_cmp(target, tg_name, rev, family))
2544     return &nft_target->ops;
2545     - }
2546     }
2547    
2548     target = xt_request_find_target(family, tg_name, rev);
2549     @@ -764,7 +849,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
2550     goto err;
2551     }
2552    
2553     - nft_target->refcnt = 1;
2554     + nft_target->refcnt = 0;
2555     nft_target->ops.type = &nft_target_type;
2556     nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
2557     nft_target->ops.init = nft_target_init;
2558     @@ -825,6 +910,32 @@ static int __init nft_compat_module_init(void)
2559    
2560     static void __exit nft_compat_module_exit(void)
2561     {
2562     + struct nft_xt *xt, *next;
2563     +
2564     + /* list should be empty here, it can be non-empty only in case there
2565     + * was an error that caused nft_xt expr to not be initialized fully
2566     + * and noone else requested the same expression later.
2567     + *
2568     + * In this case, the lists contain 0-refcount entries that still
2569     + * hold module reference.
2570     + */
2571     + list_for_each_entry_safe(xt, next, &nft_target_list, head) {
2572     + struct xt_target *target = xt->ops.data;
2573     +
2574     + if (WARN_ON_ONCE(xt->refcnt))
2575     + continue;
2576     + module_put(target->me);
2577     + kfree(xt);
2578     + }
2579     +
2580     + list_for_each_entry_safe(xt, next, &nft_match_list, head) {
2581     + struct xt_match *match = xt->ops.data;
2582     +
2583     + if (WARN_ON_ONCE(xt->refcnt))
2584     + continue;
2585     + module_put(match->me);
2586     + kfree(xt);
2587     + }
2588     nfnetlink_subsys_unregister(&nfnl_compat_subsys);
2589     nft_unregister_expr(&nft_target_type);
2590     nft_unregister_expr(&nft_match_type);
2591     diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
2592     index 4717d7796927..aa87ff8beae8 100644
2593     --- a/net/netfilter/nft_immediate.c
2594     +++ b/net/netfilter/nft_immediate.c
2595     @@ -69,8 +69,16 @@ static int nft_immediate_init(const struct nft_ctx *ctx,
2596     return err;
2597     }
2598    
2599     -static void nft_immediate_destroy(const struct nft_ctx *ctx,
2600     - const struct nft_expr *expr)
2601     +static void nft_immediate_activate(const struct nft_ctx *ctx,
2602     + const struct nft_expr *expr)
2603     +{
2604     + const struct nft_immediate_expr *priv = nft_expr_priv(expr);
2605     +
2606     + return nft_data_hold(&priv->data, nft_dreg_to_type(priv->dreg));
2607     +}
2608     +
2609     +static void nft_immediate_deactivate(const struct nft_ctx *ctx,
2610     + const struct nft_expr *expr)
2611     {
2612     const struct nft_immediate_expr *priv = nft_expr_priv(expr);
2613    
2614     @@ -108,7 +116,8 @@ static const struct nft_expr_ops nft_imm_ops = {
2615     .size = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)),
2616     .eval = nft_immediate_eval,
2617     .init = nft_immediate_init,
2618     - .destroy = nft_immediate_destroy,
2619     + .activate = nft_immediate_activate,
2620     + .deactivate = nft_immediate_deactivate,
2621     .dump = nft_immediate_dump,
2622     .validate = nft_immediate_validate,
2623     };
2624     diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c
2625     index a9fc298ef4c3..72f13a1144dd 100644
2626     --- a/net/netfilter/nft_limit.c
2627     +++ b/net/netfilter/nft_limit.c
2628     @@ -51,10 +51,13 @@ static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost)
2629     return !limit->invert;
2630     }
2631    
2632     +/* Use same default as in iptables. */
2633     +#define NFT_LIMIT_PKT_BURST_DEFAULT 5
2634     +
2635     static int nft_limit_init(struct nft_limit *limit,
2636     - const struct nlattr * const tb[])
2637     + const struct nlattr * const tb[], bool pkts)
2638     {
2639     - u64 unit;
2640     + u64 unit, tokens;
2641    
2642     if (tb[NFTA_LIMIT_RATE] == NULL ||
2643     tb[NFTA_LIMIT_UNIT] == NULL)
2644     @@ -68,18 +71,25 @@ static int nft_limit_init(struct nft_limit *limit,
2645    
2646     if (tb[NFTA_LIMIT_BURST])
2647     limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST]));
2648     - else
2649     - limit->burst = 0;
2650     +
2651     + if (pkts && limit->burst == 0)
2652     + limit->burst = NFT_LIMIT_PKT_BURST_DEFAULT;
2653    
2654     if (limit->rate + limit->burst < limit->rate)
2655     return -EOVERFLOW;
2656    
2657     - /* The token bucket size limits the number of tokens can be
2658     - * accumulated. tokens_max specifies the bucket size.
2659     - * tokens_max = unit * (rate + burst) / rate.
2660     - */
2661     - limit->tokens = div_u64(limit->nsecs * (limit->rate + limit->burst),
2662     - limit->rate);
2663     + if (pkts) {
2664     + tokens = div_u64(limit->nsecs, limit->rate) * limit->burst;
2665     + } else {
2666     + /* The token bucket size limits the number of tokens can be
2667     + * accumulated. tokens_max specifies the bucket size.
2668     + * tokens_max = unit * (rate + burst) / rate.
2669     + */
2670     + tokens = div_u64(limit->nsecs * (limit->rate + limit->burst),
2671     + limit->rate);
2672     + }
2673     +
2674     + limit->tokens = tokens;
2675     limit->tokens_max = limit->tokens;
2676    
2677     if (tb[NFTA_LIMIT_FLAGS]) {
2678     @@ -144,7 +154,7 @@ static int nft_limit_pkts_init(const struct nft_ctx *ctx,
2679     struct nft_limit_pkts *priv = nft_expr_priv(expr);
2680     int err;
2681    
2682     - err = nft_limit_init(&priv->limit, tb);
2683     + err = nft_limit_init(&priv->limit, tb, true);
2684     if (err < 0)
2685     return err;
2686    
2687     @@ -185,7 +195,7 @@ static int nft_limit_bytes_init(const struct nft_ctx *ctx,
2688     {
2689     struct nft_limit *priv = nft_expr_priv(expr);
2690    
2691     - return nft_limit_init(priv, tb);
2692     + return nft_limit_init(priv, tb, false);
2693     }
2694    
2695     static int nft_limit_bytes_dump(struct sk_buff *skb,
2696     @@ -246,7 +256,7 @@ static int nft_limit_obj_pkts_init(const struct nft_ctx *ctx,
2697     struct nft_limit_pkts *priv = nft_obj_data(obj);
2698     int err;
2699    
2700     - err = nft_limit_init(&priv->limit, tb);
2701     + err = nft_limit_init(&priv->limit, tb, true);
2702     if (err < 0)
2703     return err;
2704    
2705     @@ -289,7 +299,7 @@ static int nft_limit_obj_bytes_init(const struct nft_ctx *ctx,
2706     {
2707     struct nft_limit *priv = nft_obj_data(obj);
2708    
2709     - return nft_limit_init(priv, tb);
2710     + return nft_limit_init(priv, tb, false);
2711     }
2712    
2713     static int nft_limit_obj_bytes_dump(struct sk_buff *skb,
2714     diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
2715     index 5a60eb23a7ed..c71184d4eac1 100644
2716     --- a/net/netfilter/nft_meta.c
2717     +++ b/net/netfilter/nft_meta.c
2718     @@ -229,7 +229,7 @@ void nft_meta_set_eval(const struct nft_expr *expr,
2719     struct sk_buff *skb = pkt->skb;
2720     u32 *sreg = &regs->data[meta->sreg];
2721     u32 value = *sreg;
2722     - u8 pkt_type;
2723     + u8 value8;
2724    
2725     switch (meta->key) {
2726     case NFT_META_MARK:
2727     @@ -239,15 +239,17 @@ void nft_meta_set_eval(const struct nft_expr *expr,
2728     skb->priority = value;
2729     break;
2730     case NFT_META_PKTTYPE:
2731     - pkt_type = nft_reg_load8(sreg);
2732     + value8 = nft_reg_load8(sreg);
2733    
2734     - if (skb->pkt_type != pkt_type &&
2735     - skb_pkt_type_ok(pkt_type) &&
2736     + if (skb->pkt_type != value8 &&
2737     + skb_pkt_type_ok(value8) &&
2738     skb_pkt_type_ok(skb->pkt_type))
2739     - skb->pkt_type = pkt_type;
2740     + skb->pkt_type = value8;
2741     break;
2742     case NFT_META_NFTRACE:
2743     - skb->nf_trace = !!value;
2744     + value8 = nft_reg_load8(sreg);
2745     +
2746     + skb->nf_trace = !!value8;
2747     break;
2748     default:
2749     WARN_ON(1);
2750     diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
2751     index a59db7c45a65..81ede20f49d7 100644
2752     --- a/tools/perf/tests/topology.c
2753     +++ b/tools/perf/tests/topology.c
2754     @@ -66,6 +66,27 @@ static int check_cpu_topology(char *path, struct cpu_map *map)
2755     session = perf_session__new(&file, false, NULL);
2756     TEST_ASSERT_VAL("can't get session", session);
2757    
2758     + /* On platforms with large numbers of CPUs process_cpu_topology()
2759     + * might issue an error while reading the perf.data file section
2760     + * HEADER_CPU_TOPOLOGY and the cpu_topology_map pointed to by member
2761     + * cpu is a NULL pointer.
2762     + * Example: On s390
2763     + * CPU 0 is on core_id 0 and physical_package_id 6
2764     + * CPU 1 is on core_id 1 and physical_package_id 3
2765     + *
2766     + * Core_id and physical_package_id are platform and architecture
2767     + * dependend and might have higher numbers than the CPU id.
2768     + * This actually depends on the configuration.
2769     + *
2770     + * In this case process_cpu_topology() prints error message:
2771     + * "socket_id number is too big. You may need to upgrade the
2772     + * perf tool."
2773     + *
2774     + * This is the reason why this test might be skipped.
2775     + */
2776     + if (!session->header.env.cpu)
2777     + return TEST_SKIP;
2778     +
2779     for (i = 0; i < session->header.env.nr_cpus_avail; i++) {
2780     if (!cpu_map__has(map, i))
2781     continue;
2782     @@ -91,7 +112,7 @@ int test__session_topology(struct test *test __maybe_unused, int subtest __maybe
2783     {
2784     char path[PATH_MAX];
2785     struct cpu_map *map;
2786     - int ret = -1;
2787     + int ret = TEST_FAIL;
2788    
2789     TEST_ASSERT_VAL("can't get templ file", !get_temp(path));
2790    
2791     @@ -106,12 +127,9 @@ int test__session_topology(struct test *test __maybe_unused, int subtest __maybe
2792     goto free_path;
2793     }
2794    
2795     - if (check_cpu_topology(path, map))
2796     - goto free_map;
2797     - ret = 0;
2798     -
2799     -free_map:
2800     + ret = check_cpu_topology(path, map);
2801     cpu_map__put(map);
2802     +
2803     free_path:
2804     unlink(path);
2805     return ret;
2806     diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
2807     index 72c107fcbc5a..c02d2cfd3aea 100644
2808     --- a/tools/perf/util/bpf-loader.c
2809     +++ b/tools/perf/util/bpf-loader.c
2810     @@ -66,7 +66,7 @@ bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
2811     }
2812    
2813     obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name);
2814     - if (IS_ERR(obj)) {
2815     + if (IS_ERR_OR_NULL(obj)) {
2816     pr_debug("bpf: failed to load buffer\n");
2817     return ERR_PTR(-EINVAL);
2818     }
2819     @@ -102,14 +102,14 @@ struct bpf_object *bpf__prepare_load(const char *filename, bool source)
2820     pr_debug("bpf: successfull builtin compilation\n");
2821     obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename);
2822    
2823     - if (!IS_ERR(obj) && llvm_param.dump_obj)
2824     + if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
2825     llvm__dump_obj(filename, obj_buf, obj_buf_sz);
2826    
2827     free(obj_buf);
2828     } else
2829     obj = bpf_object__open(filename);
2830    
2831     - if (IS_ERR(obj)) {
2832     + if (IS_ERR_OR_NULL(obj)) {
2833     pr_debug("bpf: failed to load %s\n", filename);
2834     return obj;
2835     }