Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0190-5.4.91-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3635 - (show annotations) (download)
Mon Oct 24 12:34:12 2022 UTC (18 months, 3 weeks ago) by niro
File size: 79225 byte(s)
-sync kernel patches
1 diff --git a/Makefile b/Makefile
2 index 5c9d680b7ce51..a5edbd4f34145 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 90
10 +SUBLEVEL = 91
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 @@ -480,7 +480,7 @@ KBUILD_AFLAGS := -D__ASSEMBLY__ -fno-PIE
15 KBUILD_CFLAGS := -Wall -Wundef -Werror=strict-prototypes -Wno-trigraphs \
16 -fno-strict-aliasing -fno-common -fshort-wchar -fno-PIE \
17 -Werror=implicit-function-declaration -Werror=implicit-int \
18 - -Wno-format-security \
19 + -Werror=return-type -Wno-format-security \
20 -std=gnu89
21 KBUILD_CPPFLAGS := -D__KERNEL__
22 KBUILD_AFLAGS_KERNEL :=
23 diff --git a/arch/arc/Makefile b/arch/arc/Makefile
24 index f1c44cccf8d6c..6f05e509889f6 100644
25 --- a/arch/arc/Makefile
26 +++ b/arch/arc/Makefile
27 @@ -90,16 +90,22 @@ libs-y += arch/arc/lib/ $(LIBGCC)
28
29 boot := arch/arc/boot
30
31 -#default target for make without any arguments.
32 -KBUILD_IMAGE := $(boot)/bootpImage
33 -
34 -all: bootpImage
35 -bootpImage: vmlinux
36 -
37 -boot_targets += uImage uImage.bin uImage.gz
38 +boot_targets := uImage.bin uImage.gz uImage.lzma
39
40 +PHONY += $(boot_targets)
41 $(boot_targets): vmlinux
42 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
43
44 +uimage-default-y := uImage.bin
45 +uimage-default-$(CONFIG_KERNEL_GZIP) := uImage.gz
46 +uimage-default-$(CONFIG_KERNEL_LZMA) := uImage.lzma
47 +
48 +PHONY += uImage
49 +uImage: $(uimage-default-y)
50 + @ln -sf $< $(boot)/uImage
51 + @$(kecho) ' Image $(boot)/uImage is ready'
52 +
53 +CLEAN_FILES += $(boot)/uImage
54 +
55 archclean:
56 $(Q)$(MAKE) $(clean)=$(boot)
57 diff --git a/arch/arc/boot/Makefile b/arch/arc/boot/Makefile
58 index 538b92f4dd253..3b1f8a69a89ef 100644
59 --- a/arch/arc/boot/Makefile
60 +++ b/arch/arc/boot/Makefile
61 @@ -1,5 +1,5 @@
62 # SPDX-License-Identifier: GPL-2.0
63 -targets := vmlinux.bin vmlinux.bin.gz uImage
64 +targets := vmlinux.bin vmlinux.bin.gz
65
66 # uImage build relies on mkimage being availble on your host for ARC target
67 # You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage
68 @@ -13,11 +13,6 @@ LINUX_START_TEXT = $$(readelf -h vmlinux | \
69 UIMAGE_LOADADDR = $(CONFIG_LINUX_LINK_BASE)
70 UIMAGE_ENTRYADDR = $(LINUX_START_TEXT)
71
72 -suffix-y := bin
73 -suffix-$(CONFIG_KERNEL_GZIP) := gz
74 -suffix-$(CONFIG_KERNEL_LZMA) := lzma
75 -
76 -targets += uImage
77 targets += uImage.bin
78 targets += uImage.gz
79 targets += uImage.lzma
80 @@ -42,7 +37,3 @@ $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
81
82 $(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
83 $(call if_changed,uimage,lzma)
84 -
85 -$(obj)/uImage: $(obj)/uImage.$(suffix-y)
86 - @ln -sf $(notdir $<) $@
87 - @echo ' Image $@ is ready'
88 diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
89 index 0a32e8cfd074d..bcd1920ae75a3 100644
90 --- a/arch/arc/include/asm/page.h
91 +++ b/arch/arc/include/asm/page.h
92 @@ -10,6 +10,7 @@
93 #ifndef __ASSEMBLY__
94
95 #define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
96 +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
97 #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
98
99 struct vm_area_struct;
100 diff --git a/arch/arm/boot/dts/picoxcell-pc3x2.dtsi b/arch/arm/boot/dts/picoxcell-pc3x2.dtsi
101 index 5ae8607883395..3fcc86d7b735f 100644
102 --- a/arch/arm/boot/dts/picoxcell-pc3x2.dtsi
103 +++ b/arch/arm/boot/dts/picoxcell-pc3x2.dtsi
104 @@ -45,18 +45,21 @@
105 emac: gem@30000 {
106 compatible = "cadence,gem";
107 reg = <0x30000 0x10000>;
108 + interrupt-parent = <&vic0>;
109 interrupts = <31>;
110 };
111
112 dmac1: dmac@40000 {
113 compatible = "snps,dw-dmac";
114 reg = <0x40000 0x10000>;
115 + interrupt-parent = <&vic0>;
116 interrupts = <25>;
117 };
118
119 dmac2: dmac@50000 {
120 compatible = "snps,dw-dmac";
121 reg = <0x50000 0x10000>;
122 + interrupt-parent = <&vic0>;
123 interrupts = <26>;
124 };
125
126 @@ -234,6 +237,7 @@
127 axi2pico@c0000000 {
128 compatible = "picochip,axi2pico-pc3x2";
129 reg = <0xc0000000 0x10000>;
130 + interrupt-parent = <&vic0>;
131 interrupts = <13 14 15 16 17 18 19 20 21>;
132 };
133 };
134 diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
135 index 88f5d637b1c49..a52e929381ea4 100644
136 --- a/arch/mips/boot/compressed/decompress.c
137 +++ b/arch/mips/boot/compressed/decompress.c
138 @@ -13,6 +13,7 @@
139 #include <linux/libfdt.h>
140
141 #include <asm/addrspace.h>
142 +#include <asm/unaligned.h>
143
144 /*
145 * These two variables specify the free mem region
146 @@ -113,7 +114,7 @@ void decompress_kernel(unsigned long boot_heap_start)
147 dtb_size = fdt_totalsize((void *)&__appended_dtb);
148
149 /* last four bytes is always image size in little endian */
150 - image_size = le32_to_cpup((void *)&__image_end - 4);
151 + image_size = get_unaligned_le32((void *)&__image_end - 4);
152
153 /* copy dtb to where the booted kernel will expect it */
154 memcpy((void *)VMLINUX_LOAD_ADDRESS_ULL + image_size,
155 diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
156 index 3d80a51256de6..dab8febb57419 100644
157 --- a/arch/mips/kernel/relocate.c
158 +++ b/arch/mips/kernel/relocate.c
159 @@ -187,8 +187,14 @@ static int __init relocate_exception_table(long offset)
160 static inline __init unsigned long rotate_xor(unsigned long hash,
161 const void *area, size_t size)
162 {
163 - size_t i;
164 - unsigned long *ptr = (unsigned long *)area;
165 + const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash));
166 + size_t diff, i;
167 +
168 + diff = (void *)ptr - area;
169 + if (unlikely(size < diff + sizeof(hash)))
170 + return hash;
171 +
172 + size = ALIGN_DOWN(size - diff, sizeof(hash));
173
174 for (i = 0; i < size / sizeof(hash); i++) {
175 /* Rotate by odd number of bits and XOR. */
176 diff --git a/arch/mips/lib/uncached.c b/arch/mips/lib/uncached.c
177 index 09d5deea747f2..f80a67c092b63 100644
178 --- a/arch/mips/lib/uncached.c
179 +++ b/arch/mips/lib/uncached.c
180 @@ -37,10 +37,12 @@
181 */
182 unsigned long run_uncached(void *func)
183 {
184 - register long sp __asm__("$sp");
185 register long ret __asm__("$2");
186 long lfunc = (long)func, ufunc;
187 long usp;
188 + long sp;
189 +
190 + __asm__("move %0, $sp" : "=r" (sp));
191
192 if (sp >= (long)CKSEG0 && sp < (long)CKSEG2)
193 usp = CKSEG1ADDR(sp);
194 diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
195 index c4785a456dedc..504fd61592405 100644
196 --- a/arch/mips/mm/c-r4k.c
197 +++ b/arch/mips/mm/c-r4k.c
198 @@ -1576,7 +1576,7 @@ static void __init loongson2_sc_init(void)
199 c->options |= MIPS_CPU_INCLUSIVE_CACHES;
200 }
201
202 -static void __init loongson3_sc_init(void)
203 +static void loongson3_sc_init(void)
204 {
205 struct cpuinfo_mips *c = &current_cpu_data;
206 unsigned int config2, lsize;
207 diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
208 index dbdbfe5d84086..e67374268b42d 100644
209 --- a/arch/mips/mm/sc-mips.c
210 +++ b/arch/mips/mm/sc-mips.c
211 @@ -147,7 +147,7 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
212 return 1;
213 }
214
215 -static int __init mips_sc_probe_cm3(void)
216 +static int mips_sc_probe_cm3(void)
217 {
218 struct cpuinfo_mips *c = &current_cpu_data;
219 unsigned long cfg = read_gcr_l2_config();
220 @@ -181,7 +181,7 @@ static int __init mips_sc_probe_cm3(void)
221 return 0;
222 }
223
224 -static inline int __init mips_sc_probe(void)
225 +static inline int mips_sc_probe(void)
226 {
227 struct cpuinfo_mips *c = &current_cpu_data;
228 unsigned int config1, config2;
229 diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
230 index 5208ba49c89a9..2c87350c1fb09 100644
231 --- a/arch/x86/hyperv/mmu.c
232 +++ b/arch/x86/hyperv/mmu.c
233 @@ -66,11 +66,17 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
234 if (!hv_hypercall_pg)
235 goto do_native;
236
237 - if (cpumask_empty(cpus))
238 - return;
239 -
240 local_irq_save(flags);
241
242 + /*
243 + * Only check the mask _after_ interrupt has been disabled to avoid the
244 + * mask changing under our feet.
245 + */
246 + if (cpumask_empty(cpus)) {
247 + local_irq_restore(flags);
248 + return;
249 + }
250 +
251 flush_pcpu = (struct hv_tlb_flush **)
252 this_cpu_ptr(hyperv_pcpu_input_arg);
253
254 diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
255 index ba32adaeefdd0..7d19aae015aeb 100644
256 --- a/block/bfq-iosched.c
257 +++ b/block/bfq-iosched.c
258 @@ -6320,13 +6320,13 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
259 * limit 'something'.
260 */
261 /* no more than 50% of tags for async I/O */
262 - bfqd->word_depths[0][0] = max((1U << bt->sb.shift) >> 1, 1U);
263 + bfqd->word_depths[0][0] = max(bt->sb.depth >> 1, 1U);
264 /*
265 * no more than 75% of tags for sync writes (25% extra tags
266 * w.r.t. async I/O, to prevent async I/O from starving sync
267 * writes)
268 */
269 - bfqd->word_depths[0][1] = max(((1U << bt->sb.shift) * 3) >> 2, 1U);
270 + bfqd->word_depths[0][1] = max((bt->sb.depth * 3) >> 2, 1U);
271
272 /*
273 * In-word depths in case some bfq_queue is being weight-
274 @@ -6336,9 +6336,9 @@ static unsigned int bfq_update_depths(struct bfq_data *bfqd,
275 * shortage.
276 */
277 /* no more than ~18% of tags for async I/O */
278 - bfqd->word_depths[1][0] = max(((1U << bt->sb.shift) * 3) >> 4, 1U);
279 + bfqd->word_depths[1][0] = max((bt->sb.depth * 3) >> 4, 1U);
280 /* no more than ~37% of tags for sync writes (~20% extra tags) */
281 - bfqd->word_depths[1][1] = max(((1U << bt->sb.shift) * 6) >> 4, 1U);
282 + bfqd->word_depths[1][1] = max((bt->sb.depth * 6) >> 4, 1U);
283
284 for (i = 0; i < 2; i++)
285 for (j = 0; j < 2; j++)
286 diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
287 index afe6636f9ad39..1db2e1bb72ba6 100644
288 --- a/drivers/acpi/internal.h
289 +++ b/drivers/acpi/internal.h
290 @@ -97,7 +97,7 @@ void acpi_scan_table_handler(u32 event, void *table, void *context);
291 extern struct list_head acpi_bus_id_list;
292
293 struct acpi_device_bus_id {
294 - char bus_id[15];
295 + const char *bus_id;
296 unsigned int instance_no;
297 struct list_head node;
298 };
299 diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
300 index 2527938a30b54..6c8c9509e03d1 100644
301 --- a/drivers/acpi/scan.c
302 +++ b/drivers/acpi/scan.c
303 @@ -487,6 +487,7 @@ static void acpi_device_del(struct acpi_device *device)
304 acpi_device_bus_id->instance_no--;
305 else {
306 list_del(&acpi_device_bus_id->node);
307 + kfree_const(acpi_device_bus_id->bus_id);
308 kfree(acpi_device_bus_id);
309 }
310 break;
311 @@ -675,7 +676,14 @@ int acpi_device_add(struct acpi_device *device,
312 }
313 if (!found) {
314 acpi_device_bus_id = new_bus_id;
315 - strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device));
316 + acpi_device_bus_id->bus_id =
317 + kstrdup_const(acpi_device_hid(device), GFP_KERNEL);
318 + if (!acpi_device_bus_id->bus_id) {
319 + pr_err(PREFIX "Memory allocation error for bus id\n");
320 + result = -ENOMEM;
321 + goto err_free_new_bus_id;
322 + }
323 +
324 acpi_device_bus_id->instance_no = 0;
325 list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
326 }
327 @@ -710,6 +718,11 @@ int acpi_device_add(struct acpi_device *device,
328 if (device->parent)
329 list_del(&device->node);
330 list_del(&device->wakeup_list);
331 +
332 + err_free_new_bus_id:
333 + if (!found)
334 + kfree(new_bus_id);
335 +
336 mutex_unlock(&acpi_device_lock);
337
338 err_detach:
339 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
340 index 29141bff4b572..3b3fc9a426e91 100644
341 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
342 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
343 @@ -2057,11 +2057,11 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
344 if (adev->gmc.xgmi.num_physical_nodes > 1)
345 amdgpu_xgmi_remove_device(adev);
346
347 - amdgpu_amdkfd_device_fini(adev);
348 -
349 amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE);
350 amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE);
351
352 + amdgpu_amdkfd_device_fini(adev);
353 +
354 /* need to disable SMC first */
355 for (i = 0; i < adev->num_ip_blocks; i++) {
356 if (!adev->ip_blocks[i].status.hw)
357 diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
358 index bc14e9c0285a0..23edc1b8e43fa 100644
359 --- a/drivers/gpu/drm/i915/display/intel_panel.c
360 +++ b/drivers/gpu/drm/i915/display/intel_panel.c
361 @@ -1603,20 +1603,21 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
362 val = pch_get_backlight(connector);
363 else
364 val = lpt_get_backlight(connector);
365 - val = intel_panel_compute_brightness(connector, val);
366 - panel->backlight.level = clamp(val, panel->backlight.min,
367 - panel->backlight.max);
368
369 if (cpu_mode) {
370 DRM_DEBUG_KMS("CPU backlight register was enabled, switching to PCH override\n");
371
372 /* Write converted CPU PWM value to PCH override register */
373 - lpt_set_backlight(connector->base.state, panel->backlight.level);
374 + lpt_set_backlight(connector->base.state, val);
375 I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
376
377 I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 & ~BLM_PWM_ENABLE);
378 }
379
380 + val = intel_panel_compute_brightness(connector, val);
381 + panel->backlight.level = clamp(val, panel->backlight.min,
382 + panel->backlight.max);
383 +
384 return 0;
385 }
386
387 diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
388 index a71b22bdd95b5..7f329d8118a46 100644
389 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c
390 +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
391 @@ -797,10 +797,20 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
392 if (intel_dsi->gpio_panel)
393 gpiod_set_value_cansleep(intel_dsi->gpio_panel, 1);
394 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON);
395 - intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
396
397 - /* Deassert reset */
398 - intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
399 + /*
400 + * Give the panel time to power-on and then deassert its reset.
401 + * Depending on the VBT MIPI sequences version the deassert-seq
402 + * may contain the necessary delay, intel_dsi_msleep() will skip
403 + * the delay in that case. If there is no deassert-seq, then an
404 + * unconditional msleep is used to give the panel time to power-on.
405 + */
406 + if (dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) {
407 + intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay);
408 + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET);
409 + } else {
410 + msleep(intel_dsi->panel_on_delay);
411 + }
412
413 if (IS_GEMINILAKE(dev_priv)) {
414 glk_cold_boot = glk_dsi_enable_io(encoder);
415 diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
416 index 108632a1f2438..8d9d86c76a4e9 100644
417 --- a/drivers/gpu/drm/msm/msm_drv.c
418 +++ b/drivers/gpu/drm/msm/msm_drv.c
419 @@ -432,14 +432,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
420
421 drm_mode_config_init(ddev);
422
423 - /* Bind all our sub-components: */
424 - ret = component_bind_all(dev, ddev);
425 + ret = msm_init_vram(ddev);
426 if (ret)
427 goto err_destroy_mdss;
428
429 - ret = msm_init_vram(ddev);
430 + /* Bind all our sub-components: */
431 + ret = component_bind_all(dev, ddev);
432 if (ret)
433 - goto err_msm_uninit;
434 + goto err_destroy_mdss;
435
436 if (!dev->dma_parms) {
437 dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
438 diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
439 index c88ce77fe6763..df6f042fb605e 100644
440 --- a/drivers/hwmon/pwm-fan.c
441 +++ b/drivers/hwmon/pwm-fan.c
442 @@ -330,8 +330,18 @@ static int pwm_fan_probe(struct platform_device *pdev)
443
444 ctx->pwm_value = MAX_PWM;
445
446 - /* Set duty cycle to maximum allowed and enable PWM output */
447 pwm_init_state(ctx->pwm, &state);
448 + /*
449 + * __set_pwm assumes that MAX_PWM * (period - 1) fits into an unsigned
450 + * long. Check this here to prevent the fan running at a too low
451 + * frequency.
452 + */
453 + if (state.period > ULONG_MAX / MAX_PWM + 1) {
454 + dev_err(dev, "Configured period too big\n");
455 + return -EINVAL;
456 + }
457 +
458 + /* Set duty cycle to maximum allowed and enable PWM output */
459 state.duty_cycle = ctx->pwm->args.period - 1;
460 state.enabled = true;
461
462 diff --git a/drivers/infiniband/core/restrack.c b/drivers/infiniband/core/restrack.c
463 index a07665f7ef8ce..f1b4db80913f7 100644
464 --- a/drivers/infiniband/core/restrack.c
465 +++ b/drivers/infiniband/core/restrack.c
466 @@ -234,6 +234,7 @@ static void rdma_restrack_add(struct rdma_restrack_entry *res)
467 } else {
468 ret = xa_alloc_cyclic(&rt->xa, &res->id, res, xa_limit_32b,
469 &rt->next_id, GFP_KERNEL);
470 + ret = (ret < 0) ? ret : 0;
471 }
472
473 if (!ret)
474 diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
475 index 40c1a05c2445d..c9e583c05ef27 100644
476 --- a/drivers/infiniband/hw/mlx5/main.c
477 +++ b/drivers/infiniband/hw/mlx5/main.c
478 @@ -6173,7 +6173,7 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
479
480 err = set_has_smi_cap(dev);
481 if (err)
482 - return err;
483 + goto err_mp;
484
485 if (!mlx5_core_mp_enabled(mdev)) {
486 for (i = 1; i <= dev->num_ports; i++) {
487 @@ -6626,7 +6626,7 @@ static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
488
489 err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true);
490 if (err)
491 - mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
492 + mlx5_free_bfreg(dev->mdev, &dev->bfreg);
493
494 return err;
495 }
496 diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
497 index e8267e5907722..55bd8873da466 100644
498 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
499 +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
500 @@ -442,9 +442,9 @@ static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
501 pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
502 __func__, dev->id, pd->id);
503 }
504 - kfree(uctx->cntxt_pd);
505 uctx->cntxt_pd = NULL;
506 _ocrdma_dealloc_pd(dev, pd);
507 + kfree(pd);
508 }
509
510 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
511 diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
512 index 556b8e44a51c4..a102a5d8769f2 100644
513 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
514 +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c
515 @@ -214,6 +214,7 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
516
517 }
518 usnic_uiom_free_dev_list(dev_list);
519 + dev_list = NULL;
520 }
521
522 /* Try to find resources on an unused vf */
523 @@ -239,6 +240,8 @@ find_free_vf_and_create_qp_grp(struct usnic_ib_dev *us_ibdev,
524 qp_grp_check:
525 if (IS_ERR_OR_NULL(qp_grp)) {
526 usnic_err("Failed to allocate qp_grp\n");
527 + if (usnic_ib_share_vf)
528 + usnic_uiom_free_dev_list(dev_list);
529 return ERR_PTR(qp_grp ? PTR_ERR(qp_grp) : -ENOMEM);
530 }
531 return qp_grp;
532 diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
533 index ec69a99b99bab..a3739f626629c 100644
534 --- a/drivers/iommu/intel-svm.c
535 +++ b/drivers/iommu/intel-svm.c
536 @@ -99,8 +99,10 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
537 return 0;
538 }
539
540 -static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
541 - unsigned long address, unsigned long pages, int ih)
542 +static void __flush_svm_range_dev(struct intel_svm *svm,
543 + struct intel_svm_dev *sdev,
544 + unsigned long address,
545 + unsigned long pages, int ih)
546 {
547 struct qi_desc desc;
548
549 @@ -151,6 +153,22 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
550 }
551 }
552
553 +static void intel_flush_svm_range_dev(struct intel_svm *svm,
554 + struct intel_svm_dev *sdev,
555 + unsigned long address,
556 + unsigned long pages, int ih)
557 +{
558 + unsigned long shift = ilog2(__roundup_pow_of_two(pages));
559 + unsigned long align = (1ULL << (VTD_PAGE_SHIFT + shift));
560 + unsigned long start = ALIGN_DOWN(address, align);
561 + unsigned long end = ALIGN(address + (pages << VTD_PAGE_SHIFT), align);
562 +
563 + while (start < end) {
564 + __flush_svm_range_dev(svm, sdev, start, align >> VTD_PAGE_SHIFT, ih);
565 + start += align;
566 + }
567 +}
568 +
569 static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
570 unsigned long pages, int ih)
571 {
572 diff --git a/drivers/isdn/mISDN/Kconfig b/drivers/isdn/mISDN/Kconfig
573 index 26cf0ac9c4ad0..c9a53c2224728 100644
574 --- a/drivers/isdn/mISDN/Kconfig
575 +++ b/drivers/isdn/mISDN/Kconfig
576 @@ -13,6 +13,7 @@ if MISDN != n
577 config MISDN_DSP
578 tristate "Digital Audio Processing of transparent data"
579 depends on MISDN
580 + select BITREVERSE
581 help
582 Enable support for digital audio processing capability.
583
584 diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
585 index 2d519c2235626..a9529dc2b26e6 100644
586 --- a/drivers/md/dm-bufio.c
587 +++ b/drivers/md/dm-bufio.c
588 @@ -1446,6 +1446,12 @@ sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
589 }
590 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
591
592 +struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
593 +{
594 + return c->dm_io;
595 +}
596 +EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
597 +
598 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
599 {
600 return b->block;
601 diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
602 index d6edfe84e7490..25efe382e78fa 100644
603 --- a/drivers/md/dm-integrity.c
604 +++ b/drivers/md/dm-integrity.c
605 @@ -1343,12 +1343,52 @@ static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, se
606 return 0;
607 }
608
609 -static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
610 +struct flush_request {
611 + struct dm_io_request io_req;
612 + struct dm_io_region io_reg;
613 + struct dm_integrity_c *ic;
614 + struct completion comp;
615 +};
616 +
617 +static void flush_notify(unsigned long error, void *fr_)
618 +{
619 + struct flush_request *fr = fr_;
620 + if (unlikely(error != 0))
621 + dm_integrity_io_error(fr->ic, "flusing disk cache", -EIO);
622 + complete(&fr->comp);
623 +}
624 +
625 +static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
626 {
627 int r;
628 +
629 + struct flush_request fr;
630 +
631 + if (!ic->meta_dev)
632 + flush_data = false;
633 + if (flush_data) {
634 + fr.io_req.bi_op = REQ_OP_WRITE,
635 + fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
636 + fr.io_req.mem.type = DM_IO_KMEM,
637 + fr.io_req.mem.ptr.addr = NULL,
638 + fr.io_req.notify.fn = flush_notify,
639 + fr.io_req.notify.context = &fr;
640 + fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
641 + fr.io_reg.bdev = ic->dev->bdev,
642 + fr.io_reg.sector = 0,
643 + fr.io_reg.count = 0,
644 + fr.ic = ic;
645 + init_completion(&fr.comp);
646 + r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
647 + BUG_ON(r);
648 + }
649 +
650 r = dm_bufio_write_dirty_buffers(ic->bufio);
651 if (unlikely(r))
652 dm_integrity_io_error(ic, "writing tags", r);
653 +
654 + if (flush_data)
655 + wait_for_completion(&fr.comp);
656 }
657
658 static void sleep_on_endio_wait(struct dm_integrity_c *ic)
659 @@ -2077,7 +2117,7 @@ static void integrity_commit(struct work_struct *w)
660 flushes = bio_list_get(&ic->flush_bio_list);
661 if (unlikely(ic->mode != 'J')) {
662 spin_unlock_irq(&ic->endio_wait.lock);
663 - dm_integrity_flush_buffers(ic);
664 + dm_integrity_flush_buffers(ic, true);
665 goto release_flush_bios;
666 }
667
668 @@ -2287,7 +2327,7 @@ skip_io:
669 complete_journal_op(&comp);
670 wait_for_completion_io(&comp.comp);
671
672 - dm_integrity_flush_buffers(ic);
673 + dm_integrity_flush_buffers(ic, true);
674 }
675
676 static void integrity_writer(struct work_struct *w)
677 @@ -2329,7 +2369,7 @@ static void recalc_write_super(struct dm_integrity_c *ic)
678 {
679 int r;
680
681 - dm_integrity_flush_buffers(ic);
682 + dm_integrity_flush_buffers(ic, false);
683 if (dm_integrity_failed(ic))
684 return;
685
686 @@ -2532,7 +2572,7 @@ static void bitmap_flush_work(struct work_struct *work)
687 unsigned long limit;
688 struct bio *bio;
689
690 - dm_integrity_flush_buffers(ic);
691 + dm_integrity_flush_buffers(ic, false);
692
693 range.logical_sector = 0;
694 range.n_sectors = ic->provided_data_sectors;
695 @@ -2541,7 +2581,7 @@ static void bitmap_flush_work(struct work_struct *work)
696 add_new_range_and_wait(ic, &range);
697 spin_unlock_irq(&ic->endio_wait.lock);
698
699 - dm_integrity_flush_buffers(ic);
700 + dm_integrity_flush_buffers(ic, true);
701 if (ic->meta_dev)
702 blkdev_issue_flush(ic->dev->bdev, GFP_NOIO, NULL);
703
704 @@ -2812,11 +2852,11 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
705 if (ic->meta_dev)
706 queue_work(ic->writer_wq, &ic->writer_work);
707 drain_workqueue(ic->writer_wq);
708 - dm_integrity_flush_buffers(ic);
709 + dm_integrity_flush_buffers(ic, true);
710 }
711
712 if (ic->mode == 'B') {
713 - dm_integrity_flush_buffers(ic);
714 + dm_integrity_flush_buffers(ic, true);
715 #if 1
716 /* set to 0 to test bitmap replay code */
717 init_journal(ic, 0, ic->journal_sections, 0);
718 @@ -3585,7 +3625,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
719 unsigned extra_args;
720 struct dm_arg_set as;
721 static const struct dm_arg _args[] = {
722 - {0, 9, "Invalid number of feature args"},
723 + {0, 15, "Invalid number of feature args"},
724 };
725 unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
726 bool should_write_sb;
727 diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
728 index b0aa595e4375d..70210a7e4bc80 100644
729 --- a/drivers/md/dm-raid.c
730 +++ b/drivers/md/dm-raid.c
731 @@ -3744,10 +3744,10 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
732 blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs));
733
734 /*
735 - * RAID1 and RAID10 personalities require bio splitting,
736 - * RAID0/4/5/6 don't and process large discard bios properly.
737 + * RAID0 and RAID10 personalities require bio splitting,
738 + * RAID1/4/5/6 don't and process large discard bios properly.
739 */
740 - if (rs_is_raid1(rs) || rs_is_raid10(rs)) {
741 + if (rs_is_raid0(rs) || rs_is_raid10(rs)) {
742 limits->discard_granularity = chunk_size_bytes;
743 limits->max_discard_sectors = rs->md.chunk_sectors;
744 }
745 diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
746 index 4fb1a40e68a08..0164c9ca984ba 100644
747 --- a/drivers/md/dm-snap.c
748 +++ b/drivers/md/dm-snap.c
749 @@ -141,6 +141,11 @@ struct dm_snapshot {
750 * for them to be committed.
751 */
752 struct bio_list bios_queued_during_merge;
753 +
754 + /*
755 + * Flush data after merge.
756 + */
757 + struct bio flush_bio;
758 };
759
760 /*
761 @@ -1121,6 +1126,17 @@ shut:
762
763 static void error_bios(struct bio *bio);
764
765 +static int flush_data(struct dm_snapshot *s)
766 +{
767 + struct bio *flush_bio = &s->flush_bio;
768 +
769 + bio_reset(flush_bio);
770 + bio_set_dev(flush_bio, s->origin->bdev);
771 + flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
772 +
773 + return submit_bio_wait(flush_bio);
774 +}
775 +
776 static void merge_callback(int read_err, unsigned long write_err, void *context)
777 {
778 struct dm_snapshot *s = context;
779 @@ -1134,6 +1150,11 @@ static void merge_callback(int read_err, unsigned long write_err, void *context)
780 goto shut;
781 }
782
783 + if (flush_data(s) < 0) {
784 + DMERR("Flush after merge failed: shutting down merge");
785 + goto shut;
786 + }
787 +
788 if (s->store->type->commit_merge(s->store,
789 s->num_merging_chunks) < 0) {
790 DMERR("Write error in exception store: shutting down merge");
791 @@ -1318,6 +1339,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
792 s->first_merging_chunk = 0;
793 s->num_merging_chunks = 0;
794 bio_list_init(&s->bios_queued_during_merge);
795 + bio_init(&s->flush_bio, NULL, 0);
796
797 /* Allocate hash table for COW data */
798 if (init_hash_tables(s)) {
799 @@ -1504,6 +1526,8 @@ static void snapshot_dtr(struct dm_target *ti)
800
801 dm_exception_store_destroy(s->store);
802
803 + bio_uninit(&s->flush_bio);
804 +
805 dm_put_device(ti, s->cow);
806
807 dm_put_device(ti, s->origin);
808 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
809 index bf3c2a1159e68..c6ce42daff27b 100644
810 --- a/drivers/md/dm.c
811 +++ b/drivers/md/dm.c
812 @@ -548,7 +548,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
813 * subset of the parent bdev; require extra privileges.
814 */
815 if (!capable(CAP_SYS_RAWIO)) {
816 - DMWARN_LIMIT(
817 + DMDEBUG_LIMIT(
818 "%s: sending ioctl %x to DM device without required privilege.",
819 current->comm, cmd);
820 r = -ENOIOCTLCMD;
821 diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
822 index 3eeb1920ddb43..3486bf33474d9 100644
823 --- a/drivers/misc/habanalabs/device.c
824 +++ b/drivers/misc/habanalabs/device.c
825 @@ -959,6 +959,7 @@ again:
826 GFP_KERNEL);
827 if (!hdev->kernel_ctx) {
828 rc = -ENOMEM;
829 + hl_mmu_fini(hdev);
830 goto out_err;
831 }
832
833 @@ -970,6 +971,7 @@ again:
834 "failed to init kernel ctx in hard reset\n");
835 kfree(hdev->kernel_ctx);
836 hdev->kernel_ctx = NULL;
837 + hl_mmu_fini(hdev);
838 goto out_err;
839 }
840 }
841 diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c
842 index 8c342fb499ca6..ae50bd55f30af 100644
843 --- a/drivers/misc/habanalabs/habanalabs_drv.c
844 +++ b/drivers/misc/habanalabs/habanalabs_drv.c
845 @@ -443,6 +443,7 @@ static struct pci_driver hl_pci_driver = {
846 .id_table = ids,
847 .probe = hl_pci_probe,
848 .remove = hl_pci_remove,
849 + .shutdown = hl_pci_remove,
850 .driver.pm = &hl_pm_ops,
851 };
852
853 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
854 index 30816ec4fa915..13ef6a9afaa09 100644
855 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
856 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
857 @@ -216,8 +216,12 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp)
858
859 int bnxt_get_ulp_stat_ctxs(struct bnxt *bp)
860 {
861 - if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
862 - return BNXT_MIN_ROCE_STAT_CTXS;
863 + if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) {
864 + struct bnxt_en_dev *edev = bp->edev;
865 +
866 + if (edev->ulp_tbl[BNXT_ROCE_ULP].msix_requested)
867 + return BNXT_MIN_ROCE_STAT_CTXS;
868 + }
869
870 return 0;
871 }
872 diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
873 index c8e5d889bd81f..21de56345503f 100644
874 --- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
875 +++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
876 @@ -223,3 +223,4 @@ static struct platform_driver fs_enet_bb_mdio_driver = {
877 };
878
879 module_platform_driver(fs_enet_bb_mdio_driver);
880 +MODULE_LICENSE("GPL");
881 diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
882 index 1582d82483eca..4e6a9c5d8af55 100644
883 --- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
884 +++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
885 @@ -224,3 +224,4 @@ static struct platform_driver fs_enet_fec_mdio_driver = {
886 };
887
888 module_platform_driver(fs_enet_fec_mdio_driver);
889 +MODULE_LICENSE("GPL");
890 diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
891 index a86a42131fc71..b00fbef612cfe 100644
892 --- a/drivers/net/ethernet/freescale/ucc_geth.h
893 +++ b/drivers/net/ethernet/freescale/ucc_geth.h
894 @@ -576,7 +576,14 @@ struct ucc_geth_tx_global_pram {
895 u32 vtagtable[0x8]; /* 8 4-byte VLAN tags */
896 u32 tqptr; /* a base pointer to the Tx Queues Memory
897 Region */
898 - u8 res2[0x80 - 0x74];
899 + u8 res2[0x78 - 0x74];
900 + u64 snums_en;
901 + u32 l2l3baseptr; /* top byte consists of a few other bit fields */
902 +
903 + u16 mtu[8];
904 + u8 res3[0xa8 - 0x94];
905 + u32 wrrtablebase; /* top byte is reserved */
906 + u8 res4[0xc0 - 0xac];
907 } __packed;
908
909 /* structure representing Extended Filtering Global Parameters in PRAM */
910 diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
911 index 9bb37ac99a108..8325f6d65dccc 100644
912 --- a/drivers/net/usb/cdc_ether.c
913 +++ b/drivers/net/usb/cdc_ether.c
914 @@ -787,6 +787,13 @@ static const struct usb_device_id products[] = {
915 .driver_info = 0,
916 },
917
918 +/* Lenovo Powered USB-C Travel Hub (4X90S92381, based on Realtek RTL8153) */
919 +{
920 + USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x721e, USB_CLASS_COMM,
921 + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
922 + .driver_info = 0,
923 +},
924 +
925 /* ThinkPad USB-C Dock Gen 2 (based on Realtek RTL8153) */
926 {
927 USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0xa387, USB_CLASS_COMM,
928 diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
929 index cd1a07175e111..22f093797f417 100644
930 --- a/drivers/net/usb/r8152.c
931 +++ b/drivers/net/usb/r8152.c
932 @@ -5845,6 +5845,7 @@ static const struct usb_device_id rtl8152_table[] = {
933 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
934 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x720c)},
935 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)},
936 + {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x721e)},
937 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0xa387)},
938 {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
939 {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
940 diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
941 index 9b1fc8633cfe1..ef93bd3ed339c 100644
942 --- a/drivers/nvme/host/pci.c
943 +++ b/drivers/nvme/host/pci.c
944 @@ -3145,7 +3145,8 @@ static const struct pci_device_id nvme_id_table[] = {
945 { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
946 .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
947 { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */
948 - .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
949 + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY |
950 + NVME_QUIRK_IGNORE_DEV_SUBNQN, },
951 { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */
952 .driver_data = NVME_QUIRK_LIGHTNVM, },
953 { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */
954 diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
955 index a31c6e1f6063a..a554021e1ab92 100644
956 --- a/drivers/nvme/host/tcp.c
957 +++ b/drivers/nvme/host/tcp.c
958 @@ -186,7 +186,7 @@ static inline size_t nvme_tcp_req_cur_offset(struct nvme_tcp_request *req)
959
960 static inline size_t nvme_tcp_req_cur_length(struct nvme_tcp_request *req)
961 {
962 - return min_t(size_t, req->iter.bvec->bv_len - req->iter.iov_offset,
963 + return min_t(size_t, iov_iter_single_seg_count(&req->iter),
964 req->pdu_len - req->pdu_sent);
965 }
966
967 diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
968 index b5314164479e9..50e2007092bc0 100644
969 --- a/drivers/nvme/target/rdma.c
970 +++ b/drivers/nvme/target/rdma.c
971 @@ -1351,6 +1351,16 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
972 spin_lock_irqsave(&queue->state_lock, flags);
973 switch (queue->state) {
974 case NVMET_RDMA_Q_CONNECTING:
975 + while (!list_empty(&queue->rsp_wait_list)) {
976 + struct nvmet_rdma_rsp *rsp;
977 +
978 + rsp = list_first_entry(&queue->rsp_wait_list,
979 + struct nvmet_rdma_rsp,
980 + wait_list);
981 + list_del(&rsp->wait_list);
982 + nvmet_rdma_put_rsp(rsp);
983 + }
984 + fallthrough;
985 case NVMET_RDMA_Q_LIVE:
986 queue->state = NVMET_RDMA_Q_DISCONNECTING;
987 disconnect = true;
988 diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c
989 index bdab46a5c4617..6c431456d2983 100644
990 --- a/drivers/regulator/bd718x7-regulator.c
991 +++ b/drivers/regulator/bd718x7-regulator.c
992 @@ -15,6 +15,36 @@
993 #include <linux/regulator/of_regulator.h>
994 #include <linux/slab.h>
995
996 +/* Typical regulator startup times as per data sheet in uS */
997 +#define BD71847_BUCK1_STARTUP_TIME 144
998 +#define BD71847_BUCK2_STARTUP_TIME 162
999 +#define BD71847_BUCK3_STARTUP_TIME 162
1000 +#define BD71847_BUCK4_STARTUP_TIME 240
1001 +#define BD71847_BUCK5_STARTUP_TIME 270
1002 +#define BD71847_BUCK6_STARTUP_TIME 200
1003 +#define BD71847_LDO1_STARTUP_TIME 440
1004 +#define BD71847_LDO2_STARTUP_TIME 370
1005 +#define BD71847_LDO3_STARTUP_TIME 310
1006 +#define BD71847_LDO4_STARTUP_TIME 400
1007 +#define BD71847_LDO5_STARTUP_TIME 530
1008 +#define BD71847_LDO6_STARTUP_TIME 400
1009 +
1010 +#define BD71837_BUCK1_STARTUP_TIME 160
1011 +#define BD71837_BUCK2_STARTUP_TIME 180
1012 +#define BD71837_BUCK3_STARTUP_TIME 180
1013 +#define BD71837_BUCK4_STARTUP_TIME 180
1014 +#define BD71837_BUCK5_STARTUP_TIME 160
1015 +#define BD71837_BUCK6_STARTUP_TIME 240
1016 +#define BD71837_BUCK7_STARTUP_TIME 220
1017 +#define BD71837_BUCK8_STARTUP_TIME 200
1018 +#define BD71837_LDO1_STARTUP_TIME 440
1019 +#define BD71837_LDO2_STARTUP_TIME 370
1020 +#define BD71837_LDO3_STARTUP_TIME 310
1021 +#define BD71837_LDO4_STARTUP_TIME 400
1022 +#define BD71837_LDO5_STARTUP_TIME 310
1023 +#define BD71837_LDO6_STARTUP_TIME 400
1024 +#define BD71837_LDO7_STARTUP_TIME 530
1025 +
1026 /*
1027 * BUCK1/2/3/4
1028 * BUCK1RAMPRATE[1:0] BUCK1 DVS ramp rate setting
1029 @@ -495,6 +525,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
1030 .vsel_mask = DVS_BUCK_RUN_MASK,
1031 .enable_reg = BD718XX_REG_BUCK1_CTRL,
1032 .enable_mask = BD718XX_BUCK_EN,
1033 + .enable_time = BD71847_BUCK1_STARTUP_TIME,
1034 .owner = THIS_MODULE,
1035 .of_parse_cb = buck1_set_hw_dvs_levels,
1036 },
1037 @@ -519,6 +550,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
1038 .vsel_mask = DVS_BUCK_RUN_MASK,
1039 .enable_reg = BD718XX_REG_BUCK2_CTRL,
1040 .enable_mask = BD718XX_BUCK_EN,
1041 + .enable_time = BD71847_BUCK2_STARTUP_TIME,
1042 .owner = THIS_MODULE,
1043 .of_parse_cb = buck2_set_hw_dvs_levels,
1044 },
1045 @@ -547,6 +579,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
1046 .linear_range_selectors = bd71847_buck3_volt_range_sel,
1047 .enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
1048 .enable_mask = BD718XX_BUCK_EN,
1049 + .enable_time = BD71847_BUCK3_STARTUP_TIME,
1050 .owner = THIS_MODULE,
1051 },
1052 .init = {
1053 @@ -574,6 +607,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
1054 .vsel_range_mask = BD71847_BUCK4_RANGE_MASK,
1055 .linear_range_selectors = bd71847_buck4_volt_range_sel,
1056 .enable_mask = BD718XX_BUCK_EN,
1057 + .enable_time = BD71847_BUCK4_STARTUP_TIME,
1058 .owner = THIS_MODULE,
1059 },
1060 .init = {
1061 @@ -596,6 +630,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
1062 .vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
1063 .enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
1064 .enable_mask = BD718XX_BUCK_EN,
1065 + .enable_time = BD71847_BUCK5_STARTUP_TIME,
1066 .owner = THIS_MODULE,
1067 },
1068 .init = {
1069 @@ -620,6 +655,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
1070 .vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
1071 .enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
1072 .enable_mask = BD718XX_BUCK_EN,
1073 + .enable_time = BD71847_BUCK6_STARTUP_TIME,
1074 .owner = THIS_MODULE,
1075 },
1076 .init = {
1077 @@ -646,6 +682,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
1078 .linear_range_selectors = bd718xx_ldo1_volt_range_sel,
1079 .enable_reg = BD718XX_REG_LDO1_VOLT,
1080 .enable_mask = BD718XX_LDO_EN,
1081 + .enable_time = BD71847_LDO1_STARTUP_TIME,
1082 .owner = THIS_MODULE,
1083 },
1084 .init = {
1085 @@ -668,6 +705,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
1086 .n_voltages = ARRAY_SIZE(ldo_2_volts),
1087 .enable_reg = BD718XX_REG_LDO2_VOLT,
1088 .enable_mask = BD718XX_LDO_EN,
1089 + .enable_time = BD71847_LDO2_STARTUP_TIME,
1090 .owner = THIS_MODULE,
1091 },
1092 .init = {
1093 @@ -691,6 +729,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
1094 .vsel_mask = BD718XX_LDO3_MASK,
1095 .enable_reg = BD718XX_REG_LDO3_VOLT,
1096 .enable_mask = BD718XX_LDO_EN,
1097 + .enable_time = BD71847_LDO3_STARTUP_TIME,
1098 .owner = THIS_MODULE,
1099 },
1100 .init = {
1101 @@ -714,6 +753,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
1102 .vsel_mask = BD718XX_LDO4_MASK,
1103 .enable_reg = BD718XX_REG_LDO4_VOLT,
1104 .enable_mask = BD718XX_LDO_EN,
1105 + .enable_time = BD71847_LDO4_STARTUP_TIME,
1106 .owner = THIS_MODULE,
1107 },
1108 .init = {
1109 @@ -740,6 +780,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
1110 .linear_range_selectors = bd71847_ldo5_volt_range_sel,
1111 .enable_reg = BD718XX_REG_LDO5_VOLT,
1112 .enable_mask = BD718XX_LDO_EN,
1113 + .enable_time = BD71847_LDO5_STARTUP_TIME,
1114 .owner = THIS_MODULE,
1115 },
1116 .init = {
1117 @@ -765,6 +806,7 @@ static const struct bd718xx_regulator_data bd71847_regulators[] = {
1118 .vsel_mask = BD718XX_LDO6_MASK,
1119 .enable_reg = BD718XX_REG_LDO6_VOLT,
1120 .enable_mask = BD718XX_LDO_EN,
1121 + .enable_time = BD71847_LDO6_STARTUP_TIME,
1122 .owner = THIS_MODULE,
1123 },
1124 .init = {
1125 @@ -791,6 +833,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
1126 .vsel_mask = DVS_BUCK_RUN_MASK,
1127 .enable_reg = BD718XX_REG_BUCK1_CTRL,
1128 .enable_mask = BD718XX_BUCK_EN,
1129 + .enable_time = BD71837_BUCK1_STARTUP_TIME,
1130 .owner = THIS_MODULE,
1131 .of_parse_cb = buck1_set_hw_dvs_levels,
1132 },
1133 @@ -815,6 +858,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
1134 .vsel_mask = DVS_BUCK_RUN_MASK,
1135 .enable_reg = BD718XX_REG_BUCK2_CTRL,
1136 .enable_mask = BD718XX_BUCK_EN,
1137 + .enable_time = BD71837_BUCK2_STARTUP_TIME,
1138 .owner = THIS_MODULE,
1139 .of_parse_cb = buck2_set_hw_dvs_levels,
1140 },
1141 @@ -839,6 +883,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
1142 .vsel_mask = DVS_BUCK_RUN_MASK,
1143 .enable_reg = BD71837_REG_BUCK3_CTRL,
1144 .enable_mask = BD718XX_BUCK_EN,
1145 + .enable_time = BD71837_BUCK3_STARTUP_TIME,
1146 .owner = THIS_MODULE,
1147 .of_parse_cb = buck3_set_hw_dvs_levels,
1148 },
1149 @@ -863,6 +908,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
1150 .vsel_mask = DVS_BUCK_RUN_MASK,
1151 .enable_reg = BD71837_REG_BUCK4_CTRL,
1152 .enable_mask = BD718XX_BUCK_EN,
1153 + .enable_time = BD71837_BUCK4_STARTUP_TIME,
1154 .owner = THIS_MODULE,
1155 .of_parse_cb = buck4_set_hw_dvs_levels,
1156 },
1157 @@ -891,6 +937,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
1158 .linear_range_selectors = bd71837_buck5_volt_range_sel,
1159 .enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
1160 .enable_mask = BD718XX_BUCK_EN,
1161 + .enable_time = BD71837_BUCK5_STARTUP_TIME,
1162 .owner = THIS_MODULE,
1163 },
1164 .init = {
1165 @@ -915,6 +962,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
1166 .vsel_mask = BD71837_BUCK6_MASK,
1167 .enable_reg = BD718XX_REG_2ND_NODVS_BUCK_CTRL,
1168 .enable_mask = BD718XX_BUCK_EN,
1169 + .enable_time = BD71837_BUCK6_STARTUP_TIME,
1170 .owner = THIS_MODULE,
1171 },
1172 .init = {
1173 @@ -937,6 +985,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
1174 .vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
1175 .enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
1176 .enable_mask = BD718XX_BUCK_EN,
1177 + .enable_time = BD71837_BUCK7_STARTUP_TIME,
1178 .owner = THIS_MODULE,
1179 },
1180 .init = {
1181 @@ -961,6 +1010,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
1182 .vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
1183 .enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
1184 .enable_mask = BD718XX_BUCK_EN,
1185 + .enable_time = BD71837_BUCK8_STARTUP_TIME,
1186 .owner = THIS_MODULE,
1187 },
1188 .init = {
1189 @@ -987,6 +1037,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
1190 .linear_range_selectors = bd718xx_ldo1_volt_range_sel,
1191 .enable_reg = BD718XX_REG_LDO1_VOLT,
1192 .enable_mask = BD718XX_LDO_EN,
1193 + .enable_time = BD71837_LDO1_STARTUP_TIME,
1194 .owner = THIS_MODULE,
1195 },
1196 .init = {
1197 @@ -1009,6 +1060,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
1198 .n_voltages = ARRAY_SIZE(ldo_2_volts),
1199 .enable_reg = BD718XX_REG_LDO2_VOLT,
1200 .enable_mask = BD718XX_LDO_EN,
1201 + .enable_time = BD71837_LDO2_STARTUP_TIME,
1202 .owner = THIS_MODULE,
1203 },
1204 .init = {
1205 @@ -1032,6 +1084,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
1206 .vsel_mask = BD718XX_LDO3_MASK,
1207 .enable_reg = BD718XX_REG_LDO3_VOLT,
1208 .enable_mask = BD718XX_LDO_EN,
1209 + .enable_time = BD71837_LDO3_STARTUP_TIME,
1210 .owner = THIS_MODULE,
1211 },
1212 .init = {
1213 @@ -1055,6 +1108,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
1214 .vsel_mask = BD718XX_LDO4_MASK,
1215 .enable_reg = BD718XX_REG_LDO4_VOLT,
1216 .enable_mask = BD718XX_LDO_EN,
1217 + .enable_time = BD71837_LDO4_STARTUP_TIME,
1218 .owner = THIS_MODULE,
1219 },
1220 .init = {
1221 @@ -1080,6 +1134,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
1222 .vsel_mask = BD71837_LDO5_MASK,
1223 .enable_reg = BD718XX_REG_LDO5_VOLT,
1224 .enable_mask = BD718XX_LDO_EN,
1225 + .enable_time = BD71837_LDO5_STARTUP_TIME,
1226 .owner = THIS_MODULE,
1227 },
1228 .init = {
1229 @@ -1107,6 +1162,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
1230 .vsel_mask = BD718XX_LDO6_MASK,
1231 .enable_reg = BD718XX_REG_LDO6_VOLT,
1232 .enable_mask = BD718XX_LDO_EN,
1233 + .enable_time = BD71837_LDO6_STARTUP_TIME,
1234 .owner = THIS_MODULE,
1235 },
1236 .init = {
1237 @@ -1132,6 +1188,7 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = {
1238 .vsel_mask = BD71837_LDO7_MASK,
1239 .enable_reg = BD71837_REG_LDO7_VOLT,
1240 .enable_mask = BD718XX_LDO_EN,
1241 + .enable_time = BD71837_LDO7_STARTUP_TIME,
1242 .owner = THIS_MODULE,
1243 },
1244 .init = {
1245 diff --git a/drivers/usb/typec/altmodes/Kconfig b/drivers/usb/typec/altmodes/Kconfig
1246 index 187690fd1a5bd..60d375e9c3c7c 100644
1247 --- a/drivers/usb/typec/altmodes/Kconfig
1248 +++ b/drivers/usb/typec/altmodes/Kconfig
1249 @@ -20,6 +20,6 @@ config TYPEC_NVIDIA_ALTMODE
1250 to enable support for VirtualLink devices with NVIDIA GPUs.
1251
1252 To compile this driver as a module, choose M here: the
1253 - module will be called typec_displayport.
1254 + module will be called typec_nvidia.
1255
1256 endmenu
1257 diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
1258 index 60c21cfb19480..95205bde240f7 100644
1259 --- a/fs/btrfs/extent_io.c
1260 +++ b/fs/btrfs/extent_io.c
1261 @@ -647,9 +647,7 @@ alloc_extent_state_atomic(struct extent_state *prealloc)
1262
1263 static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
1264 {
1265 - struct inode *inode = tree->private_data;
1266 -
1267 - btrfs_panic(btrfs_sb(inode->i_sb), err,
1268 + btrfs_panic(tree->fs_info, err,
1269 "locking error: extent tree was modified by another thread while locked");
1270 }
1271
1272 diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
1273 index d9246fb8cea65..cd8e81c02f63f 100644
1274 --- a/fs/btrfs/qgroup.c
1275 +++ b/fs/btrfs/qgroup.c
1276 @@ -3144,6 +3144,12 @@ out:
1277 return ret;
1278 }
1279
1280 +static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
1281 +{
1282 + return btrfs_fs_closing(fs_info) ||
1283 + test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
1284 +}
1285 +
1286 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
1287 {
1288 struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
1289 @@ -3152,6 +3158,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
1290 struct btrfs_trans_handle *trans = NULL;
1291 int err = -ENOMEM;
1292 int ret = 0;
1293 + bool stopped = false;
1294
1295 path = btrfs_alloc_path();
1296 if (!path)
1297 @@ -3164,7 +3171,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
1298 path->skip_locking = 1;
1299
1300 err = 0;
1301 - while (!err && !btrfs_fs_closing(fs_info)) {
1302 + while (!err && !(stopped = rescan_should_stop(fs_info))) {
1303 trans = btrfs_start_transaction(fs_info->fs_root, 0);
1304 if (IS_ERR(trans)) {
1305 err = PTR_ERR(trans);
1306 @@ -3207,7 +3214,7 @@ out:
1307 }
1308
1309 mutex_lock(&fs_info->qgroup_rescan_lock);
1310 - if (!btrfs_fs_closing(fs_info))
1311 + if (!stopped)
1312 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
1313 if (trans) {
1314 ret = update_qgroup_status_item(trans);
1315 @@ -3226,7 +3233,7 @@ out:
1316
1317 btrfs_end_transaction(trans);
1318
1319 - if (btrfs_fs_closing(fs_info)) {
1320 + if (stopped) {
1321 btrfs_info(fs_info, "qgroup scan paused");
1322 } else if (err >= 0) {
1323 btrfs_info(fs_info, "qgroup scan completed%s",
1324 diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
1325 index 6a2ae208ff80a..1a69bdb96fb2a 100644
1326 --- a/fs/btrfs/super.c
1327 +++ b/fs/btrfs/super.c
1328 @@ -1792,6 +1792,14 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1329 btrfs_scrub_cancel(fs_info);
1330 btrfs_pause_balance(fs_info);
1331
1332 + /*
1333 + * Pause the qgroup rescan worker if it is running. We don't want
1334 + * it to be still running after we are in RO mode, as after that,
1335 + * by the time we unmount, it might have left a transaction open,
1336 + * so we would leak the transaction and/or crash.
1337 + */
1338 + btrfs_qgroup_wait_for_completion(fs_info, false);
1339 +
1340 ret = btrfs_commit_super(fs_info);
1341 if (ret)
1342 goto restore;
1343 diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
1344 index 9feb8a1793efb..7d06842a3d747 100644
1345 --- a/fs/btrfs/tree-checker.c
1346 +++ b/fs/btrfs/tree-checker.c
1347 @@ -571,6 +571,7 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
1348 {
1349 struct btrfs_fs_info *fs_info = leaf->fs_info;
1350 u64 length;
1351 + u64 chunk_end;
1352 u64 stripe_len;
1353 u16 num_stripes;
1354 u16 sub_stripes;
1355 @@ -625,6 +626,12 @@ int btrfs_check_chunk_valid(struct extent_buffer *leaf,
1356 "invalid chunk length, have %llu", length);
1357 return -EUCLEAN;
1358 }
1359 + if (unlikely(check_add_overflow(logical, length, &chunk_end))) {
1360 + chunk_err(leaf, chunk, logical,
1361 +"invalid chunk logical start and length, have logical start %llu length %llu",
1362 + logical, length);
1363 + return -EUCLEAN;
1364 + }
1365 if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) {
1366 chunk_err(leaf, chunk, logical,
1367 "invalid chunk stripe length: %llu",
1368 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
1369 index 7ff05c06f2a4c..be06b26d6ca03 100644
1370 --- a/fs/cifs/smb2pdu.c
1371 +++ b/fs/cifs/smb2pdu.c
1372 @@ -2945,8 +2945,8 @@ SMB2_close_free(struct smb_rqst *rqst)
1373 }
1374
1375 int
1376 -SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
1377 - u64 persistent_fid, u64 volatile_fid, int flags)
1378 +SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
1379 + u64 persistent_fid, u64 volatile_fid)
1380 {
1381 struct smb_rqst rqst;
1382 struct smb2_close_rsp *rsp = NULL;
1383 @@ -2955,6 +2955,7 @@ SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
1384 struct kvec rsp_iov;
1385 int resp_buftype = CIFS_NO_BUFFER;
1386 int rc = 0;
1387 + int flags = 0;
1388
1389 cifs_dbg(FYI, "Close\n");
1390
1391 @@ -2993,27 +2994,17 @@ SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
1392 close_exit:
1393 SMB2_close_free(&rqst);
1394 free_rsp_buf(resp_buftype, rsp);
1395 - return rc;
1396 -}
1397 -
1398 -int
1399 -SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
1400 - u64 persistent_fid, u64 volatile_fid)
1401 -{
1402 - int rc;
1403 - int tmp_rc;
1404 -
1405 - rc = SMB2_close_flags(xid, tcon, persistent_fid, volatile_fid, 0);
1406
1407 /* retry close in a worker thread if this one is interrupted */
1408 - if (rc == -EINTR) {
1409 + if (is_interrupt_error(rc)) {
1410 + int tmp_rc;
1411 +
1412 tmp_rc = smb2_handle_cancelled_close(tcon, persistent_fid,
1413 volatile_fid);
1414 if (tmp_rc)
1415 cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n",
1416 persistent_fid, tmp_rc);
1417 }
1418 -
1419 return rc;
1420 }
1421
1422 diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
1423 index 2a12a2fa38a22..57f7075a35871 100644
1424 --- a/fs/cifs/smb2proto.h
1425 +++ b/fs/cifs/smb2proto.h
1426 @@ -156,8 +156,6 @@ extern int SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
1427
1428 extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
1429 u64 persistent_file_id, u64 volatile_file_id);
1430 -extern int SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
1431 - u64 persistent_fid, u64 volatile_fid, int flags);
1432 extern int SMB2_close_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
1433 u64 persistent_file_id, u64 volatile_file_id);
1434 extern void SMB2_close_free(struct smb_rqst *rqst);
1435 diff --git a/fs/ext4/file.c b/fs/ext4/file.c
1436 index fd7ce3573a00a..1513e90fb6d2f 100644
1437 --- a/fs/ext4/file.c
1438 +++ b/fs/ext4/file.c
1439 @@ -432,7 +432,7 @@ static int ext4_sample_last_mounted(struct super_block *sb,
1440 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
1441 if (err)
1442 goto out_journal;
1443 - strlcpy(sbi->s_es->s_last_mounted, cp,
1444 + strncpy(sbi->s_es->s_last_mounted, cp,
1445 sizeof(sbi->s_es->s_last_mounted));
1446 ext4_handle_dirty_super(handle, sb);
1447 out_journal:
1448 diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
1449 index 0b7f316fd30f4..ba13fbb443d58 100644
1450 --- a/fs/ext4/ioctl.c
1451 +++ b/fs/ext4/ioctl.c
1452 @@ -1160,7 +1160,10 @@ resizefs_out:
1453 err = ext4_journal_get_write_access(handle, sbi->s_sbh);
1454 if (err)
1455 goto pwsalt_err_journal;
1456 + lock_buffer(sbi->s_sbh);
1457 generate_random_uuid(sbi->s_es->s_encrypt_pw_salt);
1458 + ext4_superblock_csum_set(sb);
1459 + unlock_buffer(sbi->s_sbh);
1460 err = ext4_handle_dirty_metadata(handle, NULL,
1461 sbi->s_sbh);
1462 pwsalt_err_journal:
1463 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
1464 index 59038e361337c..f05ec9bfbf4fd 100644
1465 --- a/fs/ext4/namei.c
1466 +++ b/fs/ext4/namei.c
1467 @@ -3544,8 +3544,6 @@ static int ext4_setent(handle_t *handle, struct ext4_renament *ent,
1468 return retval;
1469 }
1470 }
1471 - brelse(ent->bh);
1472 - ent->bh = NULL;
1473
1474 return 0;
1475 }
1476 @@ -3745,6 +3743,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
1477 }
1478 }
1479
1480 + old_file_type = old.de->file_type;
1481 if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir))
1482 ext4_handle_sync(handle);
1483
1484 @@ -3772,7 +3771,6 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
1485 force_reread = (new.dir->i_ino == old.dir->i_ino &&
1486 ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA));
1487
1488 - old_file_type = old.de->file_type;
1489 if (whiteout) {
1490 /*
1491 * Do this before adding a new entry, so the old entry is sure
1492 @@ -3844,15 +3842,19 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
1493 retval = 0;
1494
1495 end_rename:
1496 - brelse(old.dir_bh);
1497 - brelse(old.bh);
1498 - brelse(new.bh);
1499 if (whiteout) {
1500 - if (retval)
1501 + if (retval) {
1502 + ext4_setent(handle, &old,
1503 + old.inode->i_ino, old_file_type);
1504 drop_nlink(whiteout);
1505 + }
1506 unlock_new_inode(whiteout);
1507 iput(whiteout);
1508 +
1509 }
1510 + brelse(old.dir_bh);
1511 + brelse(old.bh);
1512 + brelse(new.bh);
1513 if (handle)
1514 ext4_journal_stop(handle);
1515 return retval;
1516 diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
1517 index 447a3c17fa8e6..9e717796e57b7 100644
1518 --- a/fs/nfs/internal.h
1519 +++ b/fs/nfs/internal.h
1520 @@ -569,12 +569,14 @@ extern void nfs4_test_session_trunk(struct rpc_clnt *clnt,
1521
1522 static inline struct inode *nfs_igrab_and_active(struct inode *inode)
1523 {
1524 - inode = igrab(inode);
1525 - if (inode != NULL && !nfs_sb_active(inode->i_sb)) {
1526 - iput(inode);
1527 - inode = NULL;
1528 + struct super_block *sb = inode->i_sb;
1529 +
1530 + if (sb && nfs_sb_active(sb)) {
1531 + if (igrab(inode))
1532 + return inode;
1533 + nfs_sb_deactive(sb);
1534 }
1535 - return inode;
1536 + return NULL;
1537 }
1538
1539 static inline void nfs_iput_and_deactive(struct inode *inode)
1540 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
1541 index 8598eba3fc234..30e44b33040a4 100644
1542 --- a/fs/nfs/nfs4proc.c
1543 +++ b/fs/nfs/nfs4proc.c
1544 @@ -3488,10 +3488,8 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
1545 trace_nfs4_close(state, &calldata->arg, &calldata->res, task->tk_status);
1546
1547 /* Handle Layoutreturn errors */
1548 - if (pnfs_roc_done(task, calldata->inode,
1549 - &calldata->arg.lr_args,
1550 - &calldata->res.lr_res,
1551 - &calldata->res.lr_ret) == -EAGAIN)
1552 + if (pnfs_roc_done(task, &calldata->arg.lr_args, &calldata->res.lr_res,
1553 + &calldata->res.lr_ret) == -EAGAIN)
1554 goto out_restart;
1555
1556 /* hmm. we are done with the inode, and in the process of freeing
1557 @@ -6238,10 +6236,8 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata)
1558 trace_nfs4_delegreturn_exit(&data->args, &data->res, task->tk_status);
1559
1560 /* Handle Layoutreturn errors */
1561 - if (pnfs_roc_done(task, data->inode,
1562 - &data->args.lr_args,
1563 - &data->res.lr_res,
1564 - &data->res.lr_ret) == -EAGAIN)
1565 + if (pnfs_roc_done(task, &data->args.lr_args, &data->res.lr_res,
1566 + &data->res.lr_ret) == -EAGAIN)
1567 goto out_restart;
1568
1569 switch (task->tk_status) {
1570 @@ -6290,10 +6286,10 @@ static void nfs4_delegreturn_release(void *calldata)
1571 struct nfs4_delegreturndata *data = calldata;
1572 struct inode *inode = data->inode;
1573
1574 + if (data->lr.roc)
1575 + pnfs_roc_release(&data->lr.arg, &data->lr.res,
1576 + data->res.lr_ret);
1577 if (inode) {
1578 - if (data->lr.roc)
1579 - pnfs_roc_release(&data->lr.arg, &data->lr.res,
1580 - data->res.lr_ret);
1581 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
1582 nfs_iput_and_deactive(inode);
1583 }
1584 @@ -6368,16 +6364,14 @@ static int _nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred,
1585 nfs_fattr_init(data->res.fattr);
1586 data->timestamp = jiffies;
1587 data->rpc_status = 0;
1588 - data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res, cred);
1589 data->inode = nfs_igrab_and_active(inode);
1590 - if (data->inode) {
1591 + if (data->inode || issync) {
1592 + data->lr.roc = pnfs_roc(inode, &data->lr.arg, &data->lr.res,
1593 + cred);
1594 if (data->lr.roc) {
1595 data->args.lr_args = &data->lr.arg;
1596 data->res.lr_res = &data->lr.res;
1597 }
1598 - } else if (data->lr.roc) {
1599 - pnfs_roc_release(&data->lr.arg, &data->lr.res, 0);
1600 - data->lr.roc = false;
1601 }
1602
1603 task_setup_data.callback_data = data;
1604 @@ -6959,9 +6953,9 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
1605 data->arg.new_lock_owner, ret);
1606 } else
1607 data->cancelled = true;
1608 + trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
1609 rpc_put_task(task);
1610 dprintk("%s: done, ret = %d!\n", __func__, ret);
1611 - trace_nfs4_set_lock(fl, state, &data->res.stateid, cmd, ret);
1612 return ret;
1613 }
1614
1615 diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
1616 index 9fd115c4d0a2f..4232f956bdac0 100644
1617 --- a/fs/nfs/pnfs.c
1618 +++ b/fs/nfs/pnfs.c
1619 @@ -1475,10 +1475,8 @@ out_noroc:
1620 return false;
1621 }
1622
1623 -int pnfs_roc_done(struct rpc_task *task, struct inode *inode,
1624 - struct nfs4_layoutreturn_args **argpp,
1625 - struct nfs4_layoutreturn_res **respp,
1626 - int *ret)
1627 +int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
1628 + struct nfs4_layoutreturn_res **respp, int *ret)
1629 {
1630 struct nfs4_layoutreturn_args *arg = *argpp;
1631 int retval = -EAGAIN;
1632 @@ -1511,7 +1509,7 @@ int pnfs_roc_done(struct rpc_task *task, struct inode *inode,
1633 return 0;
1634 case -NFS4ERR_OLD_STATEID:
1635 if (!nfs4_layout_refresh_old_stateid(&arg->stateid,
1636 - &arg->range, inode))
1637 + &arg->range, arg->inode))
1638 break;
1639 *ret = -NFS4ERR_NOMATCHING_LAYOUT;
1640 return -EAGAIN;
1641 @@ -1526,12 +1524,18 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
1642 int ret)
1643 {
1644 struct pnfs_layout_hdr *lo = args->layout;
1645 + struct inode *inode = args->inode;
1646 const nfs4_stateid *arg_stateid = NULL;
1647 const nfs4_stateid *res_stateid = NULL;
1648 struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
1649
1650 switch (ret) {
1651 case -NFS4ERR_NOMATCHING_LAYOUT:
1652 + spin_lock(&inode->i_lock);
1653 + if (pnfs_layout_is_valid(lo) &&
1654 + nfs4_stateid_match_other(&args->stateid, &lo->plh_stateid))
1655 + pnfs_set_plh_return_info(lo, args->range.iomode, 0);
1656 + spin_unlock(&inode->i_lock);
1657 break;
1658 case 0:
1659 if (res->lrs_present)
1660 @@ -1982,6 +1986,27 @@ lookup_again:
1661 goto lookup_again;
1662 }
1663
1664 + /*
1665 + * Because we free lsegs when sending LAYOUTRETURN, we need to wait
1666 + * for LAYOUTRETURN.
1667 + */
1668 + if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
1669 + spin_unlock(&ino->i_lock);
1670 + dprintk("%s wait for layoutreturn\n", __func__);
1671 + lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
1672 + if (!IS_ERR(lseg)) {
1673 + pnfs_put_layout_hdr(lo);
1674 + dprintk("%s retrying\n", __func__);
1675 + trace_pnfs_update_layout(ino, pos, count, iomode, lo,
1676 + lseg,
1677 + PNFS_UPDATE_LAYOUT_RETRY);
1678 + goto lookup_again;
1679 + }
1680 + trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1681 + PNFS_UPDATE_LAYOUT_RETURN);
1682 + goto out_put_layout_hdr;
1683 + }
1684 +
1685 lseg = pnfs_find_lseg(lo, &arg, strict_iomode);
1686 if (lseg) {
1687 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1688 @@ -2035,28 +2060,6 @@ lookup_again:
1689 nfs4_stateid_copy(&stateid, &lo->plh_stateid);
1690 }
1691
1692 - /*
1693 - * Because we free lsegs before sending LAYOUTRETURN, we need to wait
1694 - * for LAYOUTRETURN even if first is true.
1695 - */
1696 - if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
1697 - spin_unlock(&ino->i_lock);
1698 - dprintk("%s wait for layoutreturn\n", __func__);
1699 - lseg = ERR_PTR(pnfs_prepare_to_retry_layoutget(lo));
1700 - if (!IS_ERR(lseg)) {
1701 - if (first)
1702 - pnfs_clear_first_layoutget(lo);
1703 - pnfs_put_layout_hdr(lo);
1704 - dprintk("%s retrying\n", __func__);
1705 - trace_pnfs_update_layout(ino, pos, count, iomode, lo,
1706 - lseg, PNFS_UPDATE_LAYOUT_RETRY);
1707 - goto lookup_again;
1708 - }
1709 - trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1710 - PNFS_UPDATE_LAYOUT_RETURN);
1711 - goto out_put_layout_hdr;
1712 - }
1713 -
1714 if (pnfs_layoutgets_blocked(lo)) {
1715 trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1716 PNFS_UPDATE_LAYOUT_BLOCKED);
1717 @@ -2212,6 +2215,7 @@ static void _lgopen_prepare_attached(struct nfs4_opendata *data,
1718 &rng, GFP_KERNEL);
1719 if (!lgp) {
1720 pnfs_clear_first_layoutget(lo);
1721 + nfs_layoutget_end(lo);
1722 pnfs_put_layout_hdr(lo);
1723 return;
1724 }
1725 diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
1726 index 63da33a92d831..3d55edd6b25ad 100644
1727 --- a/fs/nfs/pnfs.h
1728 +++ b/fs/nfs/pnfs.h
1729 @@ -283,10 +283,8 @@ bool pnfs_roc(struct inode *ino,
1730 struct nfs4_layoutreturn_args *args,
1731 struct nfs4_layoutreturn_res *res,
1732 const struct cred *cred);
1733 -int pnfs_roc_done(struct rpc_task *task, struct inode *inode,
1734 - struct nfs4_layoutreturn_args **argpp,
1735 - struct nfs4_layoutreturn_res **respp,
1736 - int *ret);
1737 +int pnfs_roc_done(struct rpc_task *task, struct nfs4_layoutreturn_args **argpp,
1738 + struct nfs4_layoutreturn_res **respp, int *ret);
1739 void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
1740 struct nfs4_layoutreturn_res *res,
1741 int ret);
1742 @@ -711,7 +709,7 @@ pnfs_roc(struct inode *ino,
1743 }
1744
1745 static inline int
1746 -pnfs_roc_done(struct rpc_task *task, struct inode *inode,
1747 +pnfs_roc_done(struct rpc_task *task,
1748 struct nfs4_layoutreturn_args **argpp,
1749 struct nfs4_layoutreturn_res **respp,
1750 int *ret)
1751 diff --git a/include/linux/acpi.h b/include/linux/acpi.h
1752 index ce29a014e591c..dd6170357ec72 100644
1753 --- a/include/linux/acpi.h
1754 +++ b/include/linux/acpi.h
1755 @@ -837,6 +837,13 @@ static inline int acpi_device_modalias(struct device *dev,
1756 return -ENODEV;
1757 }
1758
1759 +static inline struct platform_device *
1760 +acpi_create_platform_device(struct acpi_device *adev,
1761 + struct property_entry *properties)
1762 +{
1763 + return NULL;
1764 +}
1765 +
1766 static inline bool acpi_dma_supported(struct acpi_device *adev)
1767 {
1768 return false;
1769 diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h
1770 index 3c8b7d274bd9b..45ba37aaf6b78 100644
1771 --- a/include/linux/dm-bufio.h
1772 +++ b/include/linux/dm-bufio.h
1773 @@ -138,6 +138,7 @@ void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n);
1774
1775 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c);
1776 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c);
1777 +struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c);
1778 sector_t dm_bufio_get_block_number(struct dm_buffer *b);
1779 void *dm_bufio_get_block_data(struct dm_buffer *b);
1780 void *dm_bufio_get_aux_data(struct dm_buffer *b);
1781 diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
1782 index f3f2fc8ad81aa..9fa01dad655b4 100644
1783 --- a/kernel/trace/Kconfig
1784 +++ b/kernel/trace/Kconfig
1785 @@ -478,7 +478,7 @@ config KPROBE_EVENTS
1786 config KPROBE_EVENTS_ON_NOTRACE
1787 bool "Do NOT protect notrace function from kprobe events"
1788 depends on KPROBE_EVENTS
1789 - depends on KPROBES_ON_FTRACE
1790 + depends on DYNAMIC_FTRACE
1791 default n
1792 help
1793 This is only for the developers who want to debug ftrace itself
1794 diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
1795 index 26de9c6549566..1074a69beff3f 100644
1796 --- a/kernel/trace/trace_kprobe.c
1797 +++ b/kernel/trace/trace_kprobe.c
1798 @@ -433,7 +433,7 @@ static int disable_trace_kprobe(struct trace_event_call *call,
1799 return 0;
1800 }
1801
1802 -#if defined(CONFIG_KPROBES_ON_FTRACE) && \
1803 +#if defined(CONFIG_DYNAMIC_FTRACE) && \
1804 !defined(CONFIG_KPROBE_EVENTS_ON_NOTRACE)
1805 static bool __within_notrace_func(unsigned long addr)
1806 {
1807 diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
1808 index 0083b5cc646c9..d4d56ca6eafce 100644
1809 --- a/lib/raid6/Makefile
1810 +++ b/lib/raid6/Makefile
1811 @@ -48,7 +48,7 @@ endif
1812 endif
1813
1814 quiet_cmd_unroll = UNROLL $@
1815 - cmd_unroll = $(AWK) -f$(srctree)/$(src)/unroll.awk -vN=$* < $< > $@
1816 + cmd_unroll = $(AWK) -v N=$* -f $(srctree)/$(src)/unroll.awk < $< > $@
1817
1818 targets += int1.c int2.c int4.c int8.c int16.c int32.c
1819 $(obj)/int%.c: $(src)/int.uc $(src)/unroll.awk FORCE
1820 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1821 index 62ec514dae658..3bc33fa838177 100644
1822 --- a/mm/hugetlb.c
1823 +++ b/mm/hugetlb.c
1824 @@ -4036,7 +4036,7 @@ retry:
1825 * So we need to block hugepage fault by PG_hwpoison bit check.
1826 */
1827 if (unlikely(PageHWPoison(page))) {
1828 - ret = VM_FAULT_HWPOISON |
1829 + ret = VM_FAULT_HWPOISON_LARGE |
1830 VM_FAULT_SET_HINDEX(hstate_index(h));
1831 goto backout_unlocked;
1832 }
1833 diff --git a/mm/slub.c b/mm/slub.c
1834 index f41414571c9eb..8b3ef45a0f103 100644
1835 --- a/mm/slub.c
1836 +++ b/mm/slub.c
1837 @@ -1887,7 +1887,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1838
1839 t = acquire_slab(s, n, page, object == NULL, &objects);
1840 if (!t)
1841 - break;
1842 + continue; /* cmpxchg raced */
1843
1844 available += objects;
1845 if (!object) {
1846 diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
1847 index 1a58cfdb862d6..500de37858ac8 100644
1848 --- a/net/netfilter/ipset/ip_set_hash_gen.h
1849 +++ b/net/netfilter/ipset/ip_set_hash_gen.h
1850 @@ -630,7 +630,7 @@ mtype_resize(struct ip_set *set, bool retried)
1851 struct htype *h = set->data;
1852 struct htable *t, *orig;
1853 u8 htable_bits;
1854 - size_t dsize = set->dsize;
1855 + size_t hsize, dsize = set->dsize;
1856 #ifdef IP_SET_HASH_WITH_NETS
1857 u8 flags;
1858 struct mtype_elem *tmp;
1859 @@ -654,14 +654,12 @@ mtype_resize(struct ip_set *set, bool retried)
1860 retry:
1861 ret = 0;
1862 htable_bits++;
1863 - if (!htable_bits) {
1864 - /* In case we have plenty of memory :-) */
1865 - pr_warn("Cannot increase the hashsize of set %s further\n",
1866 - set->name);
1867 - ret = -IPSET_ERR_HASH_FULL;
1868 - goto out;
1869 - }
1870 - t = ip_set_alloc(htable_size(htable_bits));
1871 + if (!htable_bits)
1872 + goto hbwarn;
1873 + hsize = htable_size(htable_bits);
1874 + if (!hsize)
1875 + goto hbwarn;
1876 + t = ip_set_alloc(hsize);
1877 if (!t) {
1878 ret = -ENOMEM;
1879 goto out;
1880 @@ -803,6 +801,12 @@ cleanup:
1881 if (ret == -EAGAIN)
1882 goto retry;
1883 goto out;
1884 +
1885 +hbwarn:
1886 + /* In case we have plenty of memory :-) */
1887 + pr_warn("Cannot increase the hashsize of set %s further\n", set->name);
1888 + ret = -IPSET_ERR_HASH_FULL;
1889 + goto out;
1890 }
1891
1892 /* Get the current number of elements and ext_size in the set */
1893 diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
1894 index 4912069627b65..dc57f530df9db 100644
1895 --- a/net/netfilter/nf_conntrack_standalone.c
1896 +++ b/net/netfilter/nf_conntrack_standalone.c
1897 @@ -521,6 +521,9 @@ nf_conntrack_hash_sysctl(struct ctl_table *table, int write,
1898 {
1899 int ret;
1900
1901 + /* module_param hashsize could have changed value */
1902 + nf_conntrack_htable_size_user = nf_conntrack_htable_size;
1903 +
1904 ret = proc_dointvec(table, write, buffer, lenp, ppos);
1905 if (ret < 0 || !write)
1906 return ret;
1907 diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
1908 index bfc555fcbc729..89b58aa890a74 100644
1909 --- a/net/netfilter/nf_nat_core.c
1910 +++ b/net/netfilter/nf_nat_core.c
1911 @@ -1174,6 +1174,7 @@ static int __init nf_nat_init(void)
1912 ret = register_pernet_subsys(&nat_net_ops);
1913 if (ret < 0) {
1914 nf_ct_extend_unregister(&nat_extend);
1915 + kvfree(nf_nat_bysource);
1916 return ret;
1917 }
1918
1919 diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
1920 index 0e3e0ff805812..bbe03b9a03b12 100644
1921 --- a/net/netfilter/nft_compat.c
1922 +++ b/net/netfilter/nft_compat.c
1923 @@ -27,8 +27,6 @@ struct nft_xt_match_priv {
1924 void *info;
1925 };
1926
1927 -static refcount_t nft_compat_pending_destroy = REFCOUNT_INIT(1);
1928 -
1929 static int nft_compat_chain_validate_dependency(const struct nft_ctx *ctx,
1930 const char *tablename)
1931 {
1932 @@ -215,6 +213,17 @@ static int nft_parse_compat(const struct nlattr *attr, u16 *proto, bool *inv)
1933 return 0;
1934 }
1935
1936 +static void nft_compat_wait_for_destructors(void)
1937 +{
1938 + /* xtables matches or targets can have side effects, e.g.
1939 + * creation/destruction of /proc files.
1940 + * The xt ->destroy functions are run asynchronously from
1941 + * work queue. If we have pending invocations we thus
1942 + * need to wait for those to finish.
1943 + */
1944 + nf_tables_trans_destroy_flush_work();
1945 +}
1946 +
1947 static int
1948 nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
1949 const struct nlattr * const tb[])
1950 @@ -238,14 +247,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
1951
1952 nft_target_set_tgchk_param(&par, ctx, target, info, &e, proto, inv);
1953
1954 - /* xtables matches or targets can have side effects, e.g.
1955 - * creation/destruction of /proc files.
1956 - * The xt ->destroy functions are run asynchronously from
1957 - * work queue. If we have pending invocations we thus
1958 - * need to wait for those to finish.
1959 - */
1960 - if (refcount_read(&nft_compat_pending_destroy) > 1)
1961 - nf_tables_trans_destroy_flush_work();
1962 + nft_compat_wait_for_destructors();
1963
1964 ret = xt_check_target(&par, size, proto, inv);
1965 if (ret < 0)
1966 @@ -260,7 +262,6 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
1967
1968 static void __nft_mt_tg_destroy(struct module *me, const struct nft_expr *expr)
1969 {
1970 - refcount_dec(&nft_compat_pending_destroy);
1971 module_put(me);
1972 kfree(expr->ops);
1973 }
1974 @@ -468,6 +469,8 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
1975
1976 nft_match_set_mtchk_param(&par, ctx, match, info, &e, proto, inv);
1977
1978 + nft_compat_wait_for_destructors();
1979 +
1980 return xt_check_match(&par, size, proto, inv);
1981 }
1982
1983 @@ -716,14 +719,6 @@ static const struct nfnetlink_subsystem nfnl_compat_subsys = {
1984
1985 static struct nft_expr_type nft_match_type;
1986
1987 -static void nft_mt_tg_deactivate(const struct nft_ctx *ctx,
1988 - const struct nft_expr *expr,
1989 - enum nft_trans_phase phase)
1990 -{
1991 - if (phase == NFT_TRANS_COMMIT)
1992 - refcount_inc(&nft_compat_pending_destroy);
1993 -}
1994 -
1995 static const struct nft_expr_ops *
1996 nft_match_select_ops(const struct nft_ctx *ctx,
1997 const struct nlattr * const tb[])
1998 @@ -762,7 +757,6 @@ nft_match_select_ops(const struct nft_ctx *ctx,
1999 ops->type = &nft_match_type;
2000 ops->eval = nft_match_eval;
2001 ops->init = nft_match_init;
2002 - ops->deactivate = nft_mt_tg_deactivate,
2003 ops->destroy = nft_match_destroy;
2004 ops->dump = nft_match_dump;
2005 ops->validate = nft_match_validate;
2006 @@ -853,7 +847,6 @@ nft_target_select_ops(const struct nft_ctx *ctx,
2007 ops->size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize));
2008 ops->init = nft_target_init;
2009 ops->destroy = nft_target_destroy;
2010 - ops->deactivate = nft_mt_tg_deactivate,
2011 ops->dump = nft_target_dump;
2012 ops->validate = nft_target_validate;
2013 ops->data = target;
2014 @@ -917,8 +910,6 @@ static void __exit nft_compat_module_exit(void)
2015 nfnetlink_subsys_unregister(&nfnl_compat_subsys);
2016 nft_unregister_expr(&nft_target_type);
2017 nft_unregister_expr(&nft_match_type);
2018 -
2019 - WARN_ON_ONCE(refcount_read(&nft_compat_pending_destroy) != 1);
2020 }
2021
2022 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT);
2023 diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
2024 index 105d17af4abcc..0d4a2bb09589c 100644
2025 --- a/net/sunrpc/addr.c
2026 +++ b/net/sunrpc/addr.c
2027 @@ -185,7 +185,7 @@ static int rpc_parse_scope_id(struct net *net, const char *buf,
2028 scope_id = dev->ifindex;
2029 dev_put(dev);
2030 } else {
2031 - if (kstrtou32(p, 10, &scope_id) == 0) {
2032 + if (kstrtou32(p, 10, &scope_id) != 0) {
2033 kfree(p);
2034 return 0;
2035 }
2036 diff --git a/security/lsm_audit.c b/security/lsm_audit.c
2037 index e40874373f2b4..d025f575a9e30 100644
2038 --- a/security/lsm_audit.c
2039 +++ b/security/lsm_audit.c
2040 @@ -274,7 +274,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
2041 struct inode *inode;
2042
2043 audit_log_format(ab, " name=");
2044 + spin_lock(&a->u.dentry->d_lock);
2045 audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
2046 + spin_unlock(&a->u.dentry->d_lock);
2047
2048 inode = d_backing_inode(a->u.dentry);
2049 if (inode) {
2050 @@ -292,8 +294,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
2051 dentry = d_find_alias(inode);
2052 if (dentry) {
2053 audit_log_format(ab, " name=");
2054 - audit_log_untrustedstring(ab,
2055 - dentry->d_name.name);
2056 + spin_lock(&dentry->d_lock);
2057 + audit_log_untrustedstring(ab, dentry->d_name.name);
2058 + spin_unlock(&dentry->d_lock);
2059 dput(dentry);
2060 }
2061 audit_log_format(ab, " dev=");
2062 diff --git a/sound/firewire/fireface/ff-transaction.c b/sound/firewire/fireface/ff-transaction.c
2063 index 7f82762ccc8c8..ee7122c461d46 100644
2064 --- a/sound/firewire/fireface/ff-transaction.c
2065 +++ b/sound/firewire/fireface/ff-transaction.c
2066 @@ -88,7 +88,7 @@ static void transmit_midi_msg(struct snd_ff *ff, unsigned int port)
2067
2068 /* Set interval to next transaction. */
2069 ff->next_ktime[port] = ktime_add_ns(ktime_get(),
2070 - ff->rx_bytes[port] * 8 * NSEC_PER_SEC / 31250);
2071 + ff->rx_bytes[port] * 8 * (NSEC_PER_SEC / 31250));
2072
2073 if (quad_count == 1)
2074 tcode = TCODE_WRITE_QUADLET_REQUEST;
2075 diff --git a/sound/firewire/tascam/tascam-transaction.c b/sound/firewire/tascam/tascam-transaction.c
2076 index 90288b4b46379..a073cece4a7d5 100644
2077 --- a/sound/firewire/tascam/tascam-transaction.c
2078 +++ b/sound/firewire/tascam/tascam-transaction.c
2079 @@ -209,7 +209,7 @@ static void midi_port_work(struct work_struct *work)
2080
2081 /* Set interval to next transaction. */
2082 port->next_ktime = ktime_add_ns(ktime_get(),
2083 - port->consume_bytes * 8 * NSEC_PER_SEC / 31250);
2084 + port->consume_bytes * 8 * (NSEC_PER_SEC / 31250));
2085
2086 /* Start this transaction. */
2087 port->idling = false;
2088 diff --git a/sound/soc/intel/skylake/cnl-sst.c b/sound/soc/intel/skylake/cnl-sst.c
2089 index c6abcd5aa67b9..e808f62960ba7 100644
2090 --- a/sound/soc/intel/skylake/cnl-sst.c
2091 +++ b/sound/soc/intel/skylake/cnl-sst.c
2092 @@ -224,6 +224,7 @@ static int cnl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
2093 "dsp boot timeout, status=%#x error=%#x\n",
2094 sst_dsp_shim_read(ctx, CNL_ADSP_FW_STATUS),
2095 sst_dsp_shim_read(ctx, CNL_ADSP_ERROR_CODE));
2096 + ret = -ETIMEDOUT;
2097 goto err;
2098 }
2099 } else {
2100 diff --git a/sound/soc/meson/axg-tdm-interface.c b/sound/soc/meson/axg-tdm-interface.c
2101 index e25336f739123..f5a431b8de6c3 100644
2102 --- a/sound/soc/meson/axg-tdm-interface.c
2103 +++ b/sound/soc/meson/axg-tdm-interface.c
2104 @@ -467,8 +467,20 @@ static int axg_tdm_iface_set_bias_level(struct snd_soc_component *component,
2105 return ret;
2106 }
2107
2108 +static const struct snd_soc_dapm_widget axg_tdm_iface_dapm_widgets[] = {
2109 + SND_SOC_DAPM_SIGGEN("Playback Signal"),
2110 +};
2111 +
2112 +static const struct snd_soc_dapm_route axg_tdm_iface_dapm_routes[] = {
2113 + { "Loopback", NULL, "Playback Signal" },
2114 +};
2115 +
2116 static const struct snd_soc_component_driver axg_tdm_iface_component_drv = {
2117 - .set_bias_level = axg_tdm_iface_set_bias_level,
2118 + .dapm_widgets = axg_tdm_iface_dapm_widgets,
2119 + .num_dapm_widgets = ARRAY_SIZE(axg_tdm_iface_dapm_widgets),
2120 + .dapm_routes = axg_tdm_iface_dapm_routes,
2121 + .num_dapm_routes = ARRAY_SIZE(axg_tdm_iface_dapm_routes),
2122 + .set_bias_level = axg_tdm_iface_set_bias_level,
2123 };
2124
2125 static const struct of_device_id axg_tdm_iface_of_match[] = {
2126 diff --git a/sound/soc/meson/axg-tdmin.c b/sound/soc/meson/axg-tdmin.c
2127 index 88ed95ae886bb..b4faf9d5c1aad 100644
2128 --- a/sound/soc/meson/axg-tdmin.c
2129 +++ b/sound/soc/meson/axg-tdmin.c
2130 @@ -224,15 +224,6 @@ static const struct axg_tdm_formatter_ops axg_tdmin_ops = {
2131 };
2132
2133 static const struct axg_tdm_formatter_driver axg_tdmin_drv = {
2134 - .component_drv = &axg_tdmin_component_drv,
2135 - .regmap_cfg = &axg_tdmin_regmap_cfg,
2136 - .ops = &axg_tdmin_ops,
2137 - .quirks = &(const struct axg_tdm_formatter_hw) {
2138 - .skew_offset = 2,
2139 - },
2140 -};
2141 -
2142 -static const struct axg_tdm_formatter_driver g12a_tdmin_drv = {
2143 .component_drv = &axg_tdmin_component_drv,
2144 .regmap_cfg = &axg_tdmin_regmap_cfg,
2145 .ops = &axg_tdmin_ops,
2146 @@ -247,10 +238,10 @@ static const struct of_device_id axg_tdmin_of_match[] = {
2147 .data = &axg_tdmin_drv,
2148 }, {
2149 .compatible = "amlogic,g12a-tdmin",
2150 - .data = &g12a_tdmin_drv,
2151 + .data = &axg_tdmin_drv,
2152 }, {
2153 .compatible = "amlogic,sm1-tdmin",
2154 - .data = &g12a_tdmin_drv,
2155 + .data = &axg_tdmin_drv,
2156 }, {}
2157 };
2158 MODULE_DEVICE_TABLE(of, axg_tdmin_of_match);
2159 diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
2160 index 06aa393797497..7c4d5963692dd 100644
2161 --- a/sound/soc/soc-dapm.c
2162 +++ b/sound/soc/soc-dapm.c
2163 @@ -2484,6 +2484,7 @@ void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w)
2164 enum snd_soc_dapm_direction dir;
2165
2166 list_del(&w->list);
2167 + list_del(&w->dirty);
2168 /*
2169 * remove source and sink paths associated to this widget.
2170 * While removing the path, remove reference to it from both
2171 diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
2172 index ea277ce63a463..767fe1bfd922c 100644
2173 --- a/tools/perf/util/machine.c
2174 +++ b/tools/perf/util/machine.c
2175 @@ -2587,7 +2587,7 @@ int machines__for_each_thread(struct machines *machines,
2176
2177 pid_t machine__get_current_tid(struct machine *machine, int cpu)
2178 {
2179 - int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS);
2180 + int nr_cpus = min(machine->env->nr_cpus_avail, MAX_NR_CPUS);
2181
2182 if (cpu < 0 || cpu >= nr_cpus || !machine->current_tid)
2183 return -1;
2184 @@ -2599,7 +2599,7 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
2185 pid_t tid)
2186 {
2187 struct thread *thread;
2188 - int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS);
2189 + int nr_cpus = min(machine->env->nr_cpus_avail, MAX_NR_CPUS);
2190
2191 if (cpu < 0)
2192 return -EINVAL;
2193 diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
2194 index ff524a3fc5003..b21a4b1918db5 100644
2195 --- a/tools/perf/util/session.c
2196 +++ b/tools/perf/util/session.c
2197 @@ -2314,7 +2314,7 @@ int perf_session__cpu_bitmap(struct perf_session *session,
2198 {
2199 int i, err = -1;
2200 struct perf_cpu_map *map;
2201 - int nr_cpus = min(session->header.env.nr_cpus_online, MAX_NR_CPUS);
2202 + int nr_cpus = min(session->header.env.nr_cpus_avail, MAX_NR_CPUS);
2203
2204 for (i = 0; i < PERF_TYPE_MAX; ++i) {
2205 struct evsel *evsel;
2206 diff --git a/tools/testing/selftests/net/udpgro.sh b/tools/testing/selftests/net/udpgro.sh
2207 index ac2a30be9b325..f8a19f548ae9d 100755
2208 --- a/tools/testing/selftests/net/udpgro.sh
2209 +++ b/tools/testing/selftests/net/udpgro.sh
2210 @@ -5,6 +5,14 @@
2211
2212 readonly PEER_NS="ns-peer-$(mktemp -u XXXXXX)"
2213
2214 +# set global exit status, but never reset nonzero one.
2215 +check_err()
2216 +{
2217 + if [ $ret -eq 0 ]; then
2218 + ret=$1
2219 + fi
2220 +}
2221 +
2222 cleanup() {
2223 local -r jobs="$(jobs -p)"
2224 local -r ns="$(ip netns list|grep $PEER_NS)"
2225 @@ -44,7 +52,9 @@ run_one() {
2226 # Hack: let bg programs complete the startup
2227 sleep 0.1
2228 ./udpgso_bench_tx ${tx_args}
2229 + ret=$?
2230 wait $(jobs -p)
2231 + return $ret
2232 }
2233
2234 run_test() {
2235 @@ -87,8 +97,10 @@ run_one_nat() {
2236
2237 sleep 0.1
2238 ./udpgso_bench_tx ${tx_args}
2239 + ret=$?
2240 kill -INT $pid
2241 wait $(jobs -p)
2242 + return $ret
2243 }
2244
2245 run_one_2sock() {
2246 @@ -110,7 +122,9 @@ run_one_2sock() {
2247 sleep 0.1
2248 # first UDP GSO socket should be closed at this point
2249 ./udpgso_bench_tx ${tx_args}
2250 + ret=$?
2251 wait $(jobs -p)
2252 + return $ret
2253 }
2254
2255 run_nat_test() {
2256 @@ -131,36 +145,54 @@ run_all() {
2257 local -r core_args="-l 4"
2258 local -r ipv4_args="${core_args} -4 -D 192.168.1.1"
2259 local -r ipv6_args="${core_args} -6 -D 2001:db8::1"
2260 + ret=0
2261
2262 echo "ipv4"
2263 run_test "no GRO" "${ipv4_args} -M 10 -s 1400" "-4 -n 10 -l 1400"
2264 + check_err $?
2265
2266 # explicitly check we are not receiving UDP_SEGMENT cmsg (-S -1)
2267 # when GRO does not take place
2268 run_test "no GRO chk cmsg" "${ipv4_args} -M 10 -s 1400" "-4 -n 10 -l 1400 -S -1"
2269 + check_err $?
2270
2271 # the GSO packets are aggregated because:
2272 # * veth schedule napi after each xmit
2273 # * segmentation happens in BH context, veth napi poll is delayed after
2274 # the transmission of the last segment
2275 run_test "GRO" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720"
2276 + check_err $?
2277 run_test "GRO chk cmsg" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720 -S 1472"
2278 + check_err $?
2279 run_test "GRO with custom segment size" "${ipv4_args} -M 1 -s 14720 -S 500 " "-4 -n 1 -l 14720"
2280 + check_err $?
2281 run_test "GRO with custom segment size cmsg" "${ipv4_args} -M 1 -s 14720 -S 500 " "-4 -n 1 -l 14720 -S 500"
2282 + check_err $?
2283
2284 run_nat_test "bad GRO lookup" "${ipv4_args} -M 1 -s 14720 -S 0" "-n 10 -l 1472"
2285 + check_err $?
2286 run_2sock_test "multiple GRO socks" "${ipv4_args} -M 1 -s 14720 -S 0 " "-4 -n 1 -l 14720 -S 1472"
2287 + check_err $?
2288
2289 echo "ipv6"
2290 run_test "no GRO" "${ipv6_args} -M 10 -s 1400" "-n 10 -l 1400"
2291 + check_err $?
2292 run_test "no GRO chk cmsg" "${ipv6_args} -M 10 -s 1400" "-n 10 -l 1400 -S -1"
2293 + check_err $?
2294 run_test "GRO" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 1 -l 14520"
2295 + check_err $?
2296 run_test "GRO chk cmsg" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 1 -l 14520 -S 1452"
2297 + check_err $?
2298 run_test "GRO with custom segment size" "${ipv6_args} -M 1 -s 14520 -S 500" "-n 1 -l 14520"
2299 + check_err $?
2300 run_test "GRO with custom segment size cmsg" "${ipv6_args} -M 1 -s 14520 -S 500" "-n 1 -l 14520 -S 500"
2301 + check_err $?
2302
2303 run_nat_test "bad GRO lookup" "${ipv6_args} -M 1 -s 14520 -S 0" "-n 10 -l 1452"
2304 + check_err $?
2305 run_2sock_test "multiple GRO socks" "${ipv6_args} -M 1 -s 14520 -S 0 " "-n 1 -l 14520 -S 1452"
2306 + check_err $?
2307 + return $ret
2308 }
2309
2310 if [ ! -f ../bpf/xdp_dummy.o ]; then
2311 @@ -180,3 +212,5 @@ elif [[ $1 == "__subprocess_2sock" ]]; then
2312 shift
2313 run_one_2sock $@
2314 fi
2315 +
2316 +exit $?