Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0128-5.4.29-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3509 - (show annotations) (download)
Mon May 11 14:36:33 2020 UTC (3 years, 11 months ago) by niro
File size: 199020 byte(s)
-linux-5.4.29
1 diff --git a/Documentation/devicetree/bindings/net/fsl-fman.txt b/Documentation/devicetree/bindings/net/fsl-fman.txt
2 index 299c0dcd67db..1316f0aec0cf 100644
3 --- a/Documentation/devicetree/bindings/net/fsl-fman.txt
4 +++ b/Documentation/devicetree/bindings/net/fsl-fman.txt
5 @@ -110,6 +110,13 @@ PROPERTIES
6 Usage: required
7 Definition: See soc/fsl/qman.txt and soc/fsl/bman.txt
8
9 +- fsl,erratum-a050385
10 + Usage: optional
11 + Value type: boolean
12 + Definition: A boolean property. Indicates the presence of the
13 + erratum A050385 which indicates that DMA transactions that are
14 + split can result in a FMan lock.
15 +
16 =============================================================================
17 FMan MURAM Node
18
19 diff --git a/Makefile b/Makefile
20 index b015cc894123..8cb72071a842 100644
21 --- a/Makefile
22 +++ b/Makefile
23 @@ -1,7 +1,7 @@
24 # SPDX-License-Identifier: GPL-2.0
25 VERSION = 5
26 PATCHLEVEL = 4
27 -SUBLEVEL = 28
28 +SUBLEVEL = 29
29 EXTRAVERSION =
30 NAME = Kleptomaniac Octopus
31
32 diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
33 index 6481d2b7d6b6..c6be65249f42 100644
34 --- a/arch/arm/boot/dts/dra7.dtsi
35 +++ b/arch/arm/boot/dts/dra7.dtsi
36 @@ -148,6 +148,7 @@
37 #address-cells = <1>;
38 #size-cells = <1>;
39 ranges = <0x0 0x0 0x0 0xc0000000>;
40 + dma-ranges = <0x80000000 0x0 0x80000000 0x80000000>;
41 ti,hwmods = "l3_main_1", "l3_main_2";
42 reg = <0x0 0x44000000 0x0 0x1000000>,
43 <0x0 0x45000000 0x0 0x1000>;
44 diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
45 index 1fb7937638f0..041646fabb2d 100644
46 --- a/arch/arm/boot/dts/omap5.dtsi
47 +++ b/arch/arm/boot/dts/omap5.dtsi
48 @@ -143,6 +143,7 @@
49 #address-cells = <1>;
50 #size-cells = <1>;
51 ranges = <0 0 0 0xc0000000>;
52 + dma-ranges = <0x80000000 0x0 0x80000000 0x80000000>;
53 ti,hwmods = "l3_main_1", "l3_main_2", "l3_main_3";
54 reg = <0 0x44000000 0 0x2000>,
55 <0 0x44800000 0 0x3000>,
56 diff --git a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
57 index 3bec3e0a81b2..397140454132 100644
58 --- a/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
59 +++ b/arch/arm/boot/dts/sun8i-a83t-tbs-a711.dts
60 @@ -482,7 +482,8 @@
61 };
62
63 &usbphy {
64 - usb0_id_det-gpios = <&pio 7 11 GPIO_ACTIVE_HIGH>; /* PH11 */
65 + usb0_id_det-gpios = <&pio 7 11 (GPIO_ACTIVE_HIGH | GPIO_PULL_UP)>; /* PH11 */
66 + usb0_vbus_power-supply = <&usb_power_supply>;
67 usb0_vbus-supply = <&reg_drivevbus>;
68 usb1_vbus-supply = <&reg_vmain>;
69 usb2_vbus-supply = <&reg_vmain>;
70 diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
71 index 6082ae022136..d237162a8744 100644
72 --- a/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
73 +++ b/arch/arm64/boot/dts/freescale/fsl-ls1043-post.dtsi
74 @@ -20,6 +20,8 @@
75 };
76
77 &fman0 {
78 + fsl,erratum-a050385;
79 +
80 /* these aliases provide the FMan ports mapping */
81 enet0: ethernet@e0000 {
82 };
83 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
84 index 2db3b7c4de16..a353f88d299d 100644
85 --- a/arch/x86/mm/ioremap.c
86 +++ b/arch/x86/mm/ioremap.c
87 @@ -115,6 +115,9 @@ static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *des
88 if (!sev_active())
89 return;
90
91 + if (!IS_ENABLED(CONFIG_EFI))
92 + return;
93 +
94 if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
95 desc->flags |= IORES_MAP_ENCRYPTED;
96 }
97 diff --git a/arch/x86/net/bpf_jit_comp32.c b/arch/x86/net/bpf_jit_comp32.c
98 index 393d251798c0..4d2a7a764602 100644
99 --- a/arch/x86/net/bpf_jit_comp32.c
100 +++ b/arch/x86/net/bpf_jit_comp32.c
101 @@ -2039,10 +2039,12 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
102 }
103 /* and dreg_lo,sreg_lo */
104 EMIT2(0x23, add_2reg(0xC0, sreg_lo, dreg_lo));
105 - /* and dreg_hi,sreg_hi */
106 - EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
107 - /* or dreg_lo,dreg_hi */
108 - EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
109 + if (is_jmp64) {
110 + /* and dreg_hi,sreg_hi */
111 + EMIT2(0x23, add_2reg(0xC0, sreg_hi, dreg_hi));
112 + /* or dreg_lo,dreg_hi */
113 + EMIT2(0x09, add_2reg(0xC0, dreg_lo, dreg_hi));
114 + }
115 goto emit_cond_jmp;
116 }
117 case BPF_JMP | BPF_JSET | BPF_K:
118 diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
119 index 827530dae682..ce59a3f32eac 100644
120 --- a/drivers/acpi/sleep.c
121 +++ b/drivers/acpi/sleep.c
122 @@ -977,6 +977,16 @@ static int acpi_s2idle_prepare_late(void)
123 return 0;
124 }
125
126 +static void acpi_s2idle_sync(void)
127 +{
128 + /*
129 + * The EC driver uses the system workqueue and an additional special
130 + * one, so those need to be flushed too.
131 + */
132 + acpi_ec_flush_work();
133 + acpi_os_wait_events_complete(); /* synchronize Notify handling */
134 +}
135 +
136 static bool acpi_s2idle_wake(void)
137 {
138 if (!acpi_sci_irq_valid())
139 @@ -1021,13 +1031,8 @@ static bool acpi_s2idle_wake(void)
140 * should be missed by canceling the wakeup here.
141 */
142 pm_system_cancel_wakeup();
143 - /*
144 - * The EC driver uses the system workqueue and an additional
145 - * special one, so those need to be flushed too.
146 - */
147 - acpi_os_wait_events_complete(); /* synchronize EC GPE processing */
148 - acpi_ec_flush_work();
149 - acpi_os_wait_events_complete(); /* synchronize Notify handling */
150 +
151 + acpi_s2idle_sync();
152
153 /*
154 * The SCI is in the "suspended" state now and it cannot produce
155 @@ -1055,6 +1060,13 @@ static void acpi_s2idle_restore_early(void)
156
157 static void acpi_s2idle_restore(void)
158 {
159 + /*
160 + * Drain pending events before restoring the working-state configuration
161 + * of GPEs.
162 + */
163 + acpi_os_wait_events_complete(); /* synchronize GPE processing */
164 + acpi_s2idle_sync();
165 +
166 s2idle_wakeup = false;
167
168 acpi_enable_all_runtime_gpes();
169 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
170 index 1787e3ad9c44..d33528033042 100644
171 --- a/drivers/ata/ahci.c
172 +++ b/drivers/ata/ahci.c
173 @@ -393,6 +393,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
174 { PCI_VDEVICE(INTEL, 0xa252), board_ahci }, /* Lewisburg RAID*/
175 { PCI_VDEVICE(INTEL, 0xa256), board_ahci }, /* Lewisburg RAID*/
176 { PCI_VDEVICE(INTEL, 0xa356), board_ahci }, /* Cannon Lake PCH-H RAID */
177 + { PCI_VDEVICE(INTEL, 0x06d7), board_ahci }, /* Comet Lake-H RAID */
178 { PCI_VDEVICE(INTEL, 0x0f22), board_ahci_mobile }, /* Bay Trail AHCI */
179 { PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */
180 { PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
181 diff --git a/drivers/base/memory.c b/drivers/base/memory.c
182 index 84c4e1f72cbd..5a8c430fb8ff 100644
183 --- a/drivers/base/memory.c
184 +++ b/drivers/base/memory.c
185 @@ -114,30 +114,13 @@ static ssize_t phys_index_show(struct device *dev,
186 }
187
188 /*
189 - * Show whether the memory block is likely to be offlineable (or is already
190 - * offline). Once offline, the memory block could be removed. The return
191 - * value does, however, not indicate that there is a way to remove the
192 - * memory block.
193 + * Legacy interface that we cannot remove. Always indicate "removable"
194 + * with CONFIG_MEMORY_HOTREMOVE - bad heuristic.
195 */
196 static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
197 char *buf)
198 {
199 - struct memory_block *mem = to_memory_block(dev);
200 - unsigned long pfn;
201 - int ret = 1, i;
202 -
203 - if (mem->state != MEM_ONLINE)
204 - goto out;
205 -
206 - for (i = 0; i < sections_per_block; i++) {
207 - if (!present_section_nr(mem->start_section_nr + i))
208 - continue;
209 - pfn = section_nr_to_pfn(mem->start_section_nr + i);
210 - ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
211 - }
212 -
213 -out:
214 - return sprintf(buf, "%d\n", ret);
215 + return sprintf(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE));
216 }
217
218 /*
219 diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
220 index 2317d4e3daaf..36933e2b3b0d 100644
221 --- a/drivers/clocksource/hyperv_timer.c
222 +++ b/drivers/clocksource/hyperv_timer.c
223 @@ -233,7 +233,8 @@ static u64 notrace read_hv_clock_tsc(struct clocksource *arg)
224
225 static u64 read_hv_sched_clock_tsc(void)
226 {
227 - return read_hv_clock_tsc(NULL) - hv_sched_clock_offset;
228 + return (read_hv_clock_tsc(NULL) - hv_sched_clock_offset) *
229 + (NSEC_PER_SEC / HV_CLOCK_HZ);
230 }
231
232 static struct clocksource hyperv_cs_tsc = {
233 @@ -258,7 +259,8 @@ static u64 notrace read_hv_clock_msr(struct clocksource *arg)
234
235 static u64 read_hv_sched_clock_msr(void)
236 {
237 - return read_hv_clock_msr(NULL) - hv_sched_clock_offset;
238 + return (read_hv_clock_msr(NULL) - hv_sched_clock_offset) *
239 + (NSEC_PER_SEC / HV_CLOCK_HZ);
240 }
241
242 static struct clocksource hyperv_cs_msr = {
243 diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
244 index 1f98e988c0d3..a3fb450a9ca8 100644
245 --- a/drivers/gpio/gpiolib-acpi.c
246 +++ b/drivers/gpio/gpiolib-acpi.c
247 @@ -21,18 +21,21 @@
248 #include "gpiolib.h"
249 #include "gpiolib-acpi.h"
250
251 -#define QUIRK_NO_EDGE_EVENTS_ON_BOOT 0x01l
252 -#define QUIRK_NO_WAKEUP 0x02l
253 -
254 static int run_edge_events_on_boot = -1;
255 module_param(run_edge_events_on_boot, int, 0444);
256 MODULE_PARM_DESC(run_edge_events_on_boot,
257 "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
258
259 -static int honor_wakeup = -1;
260 -module_param(honor_wakeup, int, 0444);
261 -MODULE_PARM_DESC(honor_wakeup,
262 - "Honor the ACPI wake-capable flag: 0=no, 1=yes, -1=auto");
263 +static char *ignore_wake;
264 +module_param(ignore_wake, charp, 0444);
265 +MODULE_PARM_DESC(ignore_wake,
266 + "controller@pin combos on which to ignore the ACPI wake flag "
267 + "ignore_wake=controller@pin[,controller@pin[,...]]");
268 +
269 +struct acpi_gpiolib_dmi_quirk {
270 + bool no_edge_events_on_boot;
271 + char *ignore_wake;
272 +};
273
274 /**
275 * struct acpi_gpio_event - ACPI GPIO event handler data
276 @@ -202,6 +205,57 @@ static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
277 acpi_gpiochip_request_irq(acpi_gpio, event);
278 }
279
280 +static bool acpi_gpio_in_ignore_list(const char *controller_in, int pin_in)
281 +{
282 + const char *controller, *pin_str;
283 + int len, pin;
284 + char *endp;
285 +
286 + controller = ignore_wake;
287 + while (controller) {
288 + pin_str = strchr(controller, '@');
289 + if (!pin_str)
290 + goto err;
291 +
292 + len = pin_str - controller;
293 + if (len == strlen(controller_in) &&
294 + strncmp(controller, controller_in, len) == 0) {
295 + pin = simple_strtoul(pin_str + 1, &endp, 10);
296 + if (*endp != 0 && *endp != ',')
297 + goto err;
298 +
299 + if (pin == pin_in)
300 + return true;
301 + }
302 +
303 + controller = strchr(controller, ',');
304 + if (controller)
305 + controller++;
306 + }
307 +
308 + return false;
309 +err:
310 + pr_err_once("Error invalid value for gpiolib_acpi.ignore_wake: %s\n",
311 + ignore_wake);
312 + return false;
313 +}
314 +
315 +static bool acpi_gpio_irq_is_wake(struct device *parent,
316 + struct acpi_resource_gpio *agpio)
317 +{
318 + int pin = agpio->pin_table[0];
319 +
320 + if (agpio->wake_capable != ACPI_WAKE_CAPABLE)
321 + return false;
322 +
323 + if (acpi_gpio_in_ignore_list(dev_name(parent), pin)) {
324 + dev_info(parent, "Ignoring wakeup on pin %d\n", pin);
325 + return false;
326 + }
327 +
328 + return true;
329 +}
330 +
331 static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
332 void *context)
333 {
334 @@ -282,7 +336,7 @@ static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
335 event->handle = evt_handle;
336 event->handler = handler;
337 event->irq = irq;
338 - event->irq_is_wake = honor_wakeup && agpio->wake_capable == ACPI_WAKE_CAPABLE;
339 + event->irq_is_wake = acpi_gpio_irq_is_wake(chip->parent, agpio);
340 event->pin = pin;
341 event->desc = desc;
342
343 @@ -1321,7 +1375,9 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
344 DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
345 DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
346 },
347 - .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
348 + .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
349 + .no_edge_events_on_boot = true,
350 + },
351 },
352 {
353 /*
354 @@ -1334,16 +1390,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
355 DMI_MATCH(DMI_SYS_VENDOR, "Wortmann_AG"),
356 DMI_MATCH(DMI_PRODUCT_NAME, "TERRA_PAD_1061"),
357 },
358 - .driver_data = (void *)QUIRK_NO_EDGE_EVENTS_ON_BOOT,
359 + .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
360 + .no_edge_events_on_boot = true,
361 + },
362 },
363 {
364 /*
365 - * Various HP X2 10 Cherry Trail models use an external
366 - * embedded-controller connected via I2C + an ACPI GPIO
367 - * event handler. The embedded controller generates various
368 - * spurious wakeup events when suspended. So disable wakeup
369 - * for its handler (it uses the only ACPI GPIO event handler).
370 - * This breaks wakeup when opening the lid, the user needs
371 + * HP X2 10 models with Cherry Trail SoC + TI PMIC use an
372 + * external embedded-controller connected via I2C + an ACPI GPIO
373 + * event handler on INT33FF:01 pin 0, causing spurious wakeups.
374 + * When suspending by closing the LID, the power to the USB
375 + * keyboard is turned off, causing INT0002 ACPI events to
376 + * trigger once the XHCI controller notices the keyboard is
377 + * gone. So INT0002 events cause spurious wakeups too. Ignoring
378 + * EC wakes breaks wakeup when opening the lid, the user needs
379 * to press the power-button to wakeup the system. The
380 * alternative is suspend simply not working, which is worse.
381 */
382 @@ -1351,33 +1411,46 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] = {
383 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
384 DMI_MATCH(DMI_PRODUCT_NAME, "HP x2 Detachable 10-p0XX"),
385 },
386 - .driver_data = (void *)QUIRK_NO_WAKEUP,
387 + .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
388 + .ignore_wake = "INT33FF:01@0,INT0002:00@2",
389 + },
390 + },
391 + {
392 + /*
393 + * HP X2 10 models with Bay Trail SoC + AXP288 PMIC use an
394 + * external embedded-controller connected via I2C + an ACPI GPIO
395 + * event handler on INT33FC:02 pin 28, causing spurious wakeups.
396 + */
397 + .matches = {
398 + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
399 + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"),
400 + DMI_MATCH(DMI_BOARD_NAME, "815D"),
401 + },
402 + .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
403 + .ignore_wake = "INT33FC:02@28",
404 + },
405 },
406 {} /* Terminating entry */
407 };
408
409 static int acpi_gpio_setup_params(void)
410 {
411 + const struct acpi_gpiolib_dmi_quirk *quirk = NULL;
412 const struct dmi_system_id *id;
413 - long quirks = 0;
414
415 id = dmi_first_match(gpiolib_acpi_quirks);
416 if (id)
417 - quirks = (long)id->driver_data;
418 + quirk = id->driver_data;
419
420 if (run_edge_events_on_boot < 0) {
421 - if (quirks & QUIRK_NO_EDGE_EVENTS_ON_BOOT)
422 + if (quirk && quirk->no_edge_events_on_boot)
423 run_edge_events_on_boot = 0;
424 else
425 run_edge_events_on_boot = 1;
426 }
427
428 - if (honor_wakeup < 0) {
429 - if (quirks & QUIRK_NO_WAKEUP)
430 - honor_wakeup = 0;
431 - else
432 - honor_wakeup = 1;
433 - }
434 + if (ignore_wake == NULL && quirk && quirk->ignore_wake)
435 + ignore_wake = quirk->ignore_wake;
436
437 return 0;
438 }
439 diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
440 index 484fa6560adc..a8cf55eb54d8 100644
441 --- a/drivers/gpio/gpiolib.c
442 +++ b/drivers/gpio/gpiolib.c
443 @@ -2194,9 +2194,16 @@ static void gpiochip_irq_disable(struct irq_data *d)
444 {
445 struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
446
447 + /*
448 + * Since we override .irq_disable() we need to mimic the
449 + * behaviour of __irq_disable() in irq/chip.c.
450 + * First call .irq_disable() if it exists, else mimic the
451 + * behaviour of mask_irq() which calls .irq_mask() if
452 + * it exists.
453 + */
454 if (chip->irq.irq_disable)
455 chip->irq.irq_disable(d);
456 - else
457 + else if (chip->irq.chip->irq_mask)
458 chip->irq.chip->irq_mask(d);
459 gpiochip_disable_irq(chip, d->hwirq);
460 }
461 diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
462 index 80934ca17260..c086262cc181 100644
463 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c
464 +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
465 @@ -84,6 +84,13 @@
466 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
467 #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
468 #define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
469 +
470 +/* for Vega20/arcturus regiter offset change */
471 +#define mmROM_INDEX_VG20 0x00e4
472 +#define mmROM_INDEX_VG20_BASE_IDX 0
473 +#define mmROM_DATA_VG20 0x00e5
474 +#define mmROM_DATA_VG20_BASE_IDX 0
475 +
476 /*
477 * Indirect registers accessor
478 */
479 @@ -304,6 +311,8 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
480 {
481 u32 *dw_ptr;
482 u32 i, length_dw;
483 + uint32_t rom_index_offset;
484 + uint32_t rom_data_offset;
485
486 if (bios == NULL)
487 return false;
488 @@ -316,11 +325,23 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
489 dw_ptr = (u32 *)bios;
490 length_dw = ALIGN(length_bytes, 4) / 4;
491
492 + switch (adev->asic_type) {
493 + case CHIP_VEGA20:
494 + case CHIP_ARCTURUS:
495 + rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20);
496 + rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20);
497 + break;
498 + default:
499 + rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
500 + rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
501 + break;
502 + }
503 +
504 /* set rom index to 0 */
505 - WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
506 + WREG32(rom_index_offset, 0);
507 /* read out the rom data */
508 for (i = 0; i < length_dw; i++)
509 - dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
510 + dw_ptr[i] = RREG32(rom_data_offset);
511
512 return true;
513 }
514 diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
515 index 3b7769a3e67e..c13dce760098 100644
516 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
517 +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
518 @@ -269,6 +269,117 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
519 .use_urgent_burst_bw = 0
520 };
521
522 +struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
523 + .clock_limits = {
524 + {
525 + .state = 0,
526 + .dcfclk_mhz = 560.0,
527 + .fabricclk_mhz = 560.0,
528 + .dispclk_mhz = 513.0,
529 + .dppclk_mhz = 513.0,
530 + .phyclk_mhz = 540.0,
531 + .socclk_mhz = 560.0,
532 + .dscclk_mhz = 171.0,
533 + .dram_speed_mts = 8960.0,
534 + },
535 + {
536 + .state = 1,
537 + .dcfclk_mhz = 694.0,
538 + .fabricclk_mhz = 694.0,
539 + .dispclk_mhz = 642.0,
540 + .dppclk_mhz = 642.0,
541 + .phyclk_mhz = 600.0,
542 + .socclk_mhz = 694.0,
543 + .dscclk_mhz = 214.0,
544 + .dram_speed_mts = 11104.0,
545 + },
546 + {
547 + .state = 2,
548 + .dcfclk_mhz = 875.0,
549 + .fabricclk_mhz = 875.0,
550 + .dispclk_mhz = 734.0,
551 + .dppclk_mhz = 734.0,
552 + .phyclk_mhz = 810.0,
553 + .socclk_mhz = 875.0,
554 + .dscclk_mhz = 245.0,
555 + .dram_speed_mts = 14000.0,
556 + },
557 + {
558 + .state = 3,
559 + .dcfclk_mhz = 1000.0,
560 + .fabricclk_mhz = 1000.0,
561 + .dispclk_mhz = 1100.0,
562 + .dppclk_mhz = 1100.0,
563 + .phyclk_mhz = 810.0,
564 + .socclk_mhz = 1000.0,
565 + .dscclk_mhz = 367.0,
566 + .dram_speed_mts = 16000.0,
567 + },
568 + {
569 + .state = 4,
570 + .dcfclk_mhz = 1200.0,
571 + .fabricclk_mhz = 1200.0,
572 + .dispclk_mhz = 1284.0,
573 + .dppclk_mhz = 1284.0,
574 + .phyclk_mhz = 810.0,
575 + .socclk_mhz = 1200.0,
576 + .dscclk_mhz = 428.0,
577 + .dram_speed_mts = 16000.0,
578 + },
579 + /*Extra state, no dispclk ramping*/
580 + {
581 + .state = 5,
582 + .dcfclk_mhz = 1200.0,
583 + .fabricclk_mhz = 1200.0,
584 + .dispclk_mhz = 1284.0,
585 + .dppclk_mhz = 1284.0,
586 + .phyclk_mhz = 810.0,
587 + .socclk_mhz = 1200.0,
588 + .dscclk_mhz = 428.0,
589 + .dram_speed_mts = 16000.0,
590 + },
591 + },
592 + .num_states = 5,
593 + .sr_exit_time_us = 8.6,
594 + .sr_enter_plus_exit_time_us = 10.9,
595 + .urgent_latency_us = 4.0,
596 + .urgent_latency_pixel_data_only_us = 4.0,
597 + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
598 + .urgent_latency_vm_data_only_us = 4.0,
599 + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
600 + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
601 + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
602 + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
603 + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
604 + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
605 + .max_avg_sdp_bw_use_normal_percent = 40.0,
606 + .max_avg_dram_bw_use_normal_percent = 40.0,
607 + .writeback_latency_us = 12.0,
608 + .ideal_dram_bw_after_urgent_percent = 40.0,
609 + .max_request_size_bytes = 256,
610 + .dram_channel_width_bytes = 2,
611 + .fabric_datapath_to_dcn_data_return_bytes = 64,
612 + .dcn_downspread_percent = 0.5,
613 + .downspread_percent = 0.38,
614 + .dram_page_open_time_ns = 50.0,
615 + .dram_rw_turnaround_time_ns = 17.5,
616 + .dram_return_buffer_per_channel_bytes = 8192,
617 + .round_trip_ping_latency_dcfclk_cycles = 131,
618 + .urgent_out_of_order_return_per_channel_bytes = 256,
619 + .channel_interleave_bytes = 256,
620 + .num_banks = 8,
621 + .num_chans = 8,
622 + .vmm_page_size_bytes = 4096,
623 + .dram_clock_change_latency_us = 404.0,
624 + .dummy_pstate_latency_us = 5.0,
625 + .writeback_dram_clock_change_latency_us = 23.0,
626 + .return_bus_width_bytes = 64,
627 + .dispclk_dppclk_vco_speed_mhz = 3850,
628 + .xfc_bus_transport_time_us = 20,
629 + .xfc_xbuf_latency_tolerance_us = 4,
630 + .use_urgent_burst_bw = 0
631 +};
632 +
633 struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
634
635 #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
636 @@ -3135,6 +3246,9 @@ static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_s
637 static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
638 uint32_t hw_internal_rev)
639 {
640 + if (ASICREV_IS_NAVI14_M(hw_internal_rev))
641 + return &dcn2_0_nv14_soc;
642 +
643 if (ASICREV_IS_NAVI12_P(hw_internal_rev))
644 return &dcn2_0_nv12_soc;
645
646 diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
647 index 2d5cbfda3ca7..9c262daf5816 100644
648 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
649 +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
650 @@ -55,6 +55,7 @@ static const char * const decon_clks_name[] = {
651 struct decon_context {
652 struct device *dev;
653 struct drm_device *drm_dev;
654 + void *dma_priv;
655 struct exynos_drm_crtc *crtc;
656 struct exynos_drm_plane planes[WINDOWS_NR];
657 struct exynos_drm_plane_config configs[WINDOWS_NR];
658 @@ -644,7 +645,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
659
660 decon_clear_channels(ctx->crtc);
661
662 - return exynos_drm_register_dma(drm_dev, dev);
663 + return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
664 }
665
666 static void decon_unbind(struct device *dev, struct device *master, void *data)
667 @@ -654,7 +655,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
668 decon_disable(ctx->crtc);
669
670 /* detach this sub driver from iommu mapping if supported. */
671 - exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
672 + exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
673 }
674
675 static const struct component_ops decon_component_ops = {
676 diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
677 index f0640950bd46..6fd40410dfd2 100644
678 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
679 +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
680 @@ -40,6 +40,7 @@
681 struct decon_context {
682 struct device *dev;
683 struct drm_device *drm_dev;
684 + void *dma_priv;
685 struct exynos_drm_crtc *crtc;
686 struct exynos_drm_plane planes[WINDOWS_NR];
687 struct exynos_drm_plane_config configs[WINDOWS_NR];
688 @@ -127,13 +128,13 @@ static int decon_ctx_initialize(struct decon_context *ctx,
689
690 decon_clear_channels(ctx->crtc);
691
692 - return exynos_drm_register_dma(drm_dev, ctx->dev);
693 + return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv);
694 }
695
696 static void decon_ctx_remove(struct decon_context *ctx)
697 {
698 /* detach this sub driver from iommu mapping if supported. */
699 - exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
700 + exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
701 }
702
703 static u32 decon_calc_clkdiv(struct decon_context *ctx,
704 diff --git a/drivers/gpu/drm/exynos/exynos_drm_dma.c b/drivers/gpu/drm/exynos/exynos_drm_dma.c
705 index 9ebc02768847..619f81435c1b 100644
706 --- a/drivers/gpu/drm/exynos/exynos_drm_dma.c
707 +++ b/drivers/gpu/drm/exynos/exynos_drm_dma.c
708 @@ -58,7 +58,7 @@ static inline void clear_dma_max_seg_size(struct device *dev)
709 * mapping.
710 */
711 static int drm_iommu_attach_device(struct drm_device *drm_dev,
712 - struct device *subdrv_dev)
713 + struct device *subdrv_dev, void **dma_priv)
714 {
715 struct exynos_drm_private *priv = drm_dev->dev_private;
716 int ret;
717 @@ -74,7 +74,14 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
718 return ret;
719
720 if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
721 - if (to_dma_iommu_mapping(subdrv_dev))
722 + /*
723 + * Keep the original DMA mapping of the sub-device and
724 + * restore it on Exynos DRM detach, otherwise the DMA
725 + * framework considers it as IOMMU-less during the next
726 + * probe (in case of deferred probe or modular build)
727 + */
728 + *dma_priv = to_dma_iommu_mapping(subdrv_dev);
729 + if (*dma_priv)
730 arm_iommu_detach_device(subdrv_dev);
731
732 ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
733 @@ -98,19 +105,21 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
734 * mapping
735 */
736 static void drm_iommu_detach_device(struct drm_device *drm_dev,
737 - struct device *subdrv_dev)
738 + struct device *subdrv_dev, void **dma_priv)
739 {
740 struct exynos_drm_private *priv = drm_dev->dev_private;
741
742 - if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
743 + if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
744 arm_iommu_detach_device(subdrv_dev);
745 - else if (IS_ENABLED(CONFIG_IOMMU_DMA))
746 + arm_iommu_attach_device(subdrv_dev, *dma_priv);
747 + } else if (IS_ENABLED(CONFIG_IOMMU_DMA))
748 iommu_detach_device(priv->mapping, subdrv_dev);
749
750 clear_dma_max_seg_size(subdrv_dev);
751 }
752
753 -int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
754 +int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
755 + void **dma_priv)
756 {
757 struct exynos_drm_private *priv = drm->dev_private;
758
759 @@ -137,13 +146,14 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
760 priv->mapping = mapping;
761 }
762
763 - return drm_iommu_attach_device(drm, dev);
764 + return drm_iommu_attach_device(drm, dev, dma_priv);
765 }
766
767 -void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev)
768 +void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
769 + void **dma_priv)
770 {
771 if (IS_ENABLED(CONFIG_EXYNOS_IOMMU))
772 - drm_iommu_detach_device(drm, dev);
773 + drm_iommu_detach_device(drm, dev, dma_priv);
774 }
775
776 void exynos_drm_cleanup_dma(struct drm_device *drm)
777 diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
778 index d4014ba592fd..735f436c857c 100644
779 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
780 +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
781 @@ -223,8 +223,10 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
782 return priv->mapping ? true : false;
783 }
784
785 -int exynos_drm_register_dma(struct drm_device *drm, struct device *dev);
786 -void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev);
787 +int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
788 + void **dma_priv);
789 +void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
790 + void **dma_priv);
791 void exynos_drm_cleanup_dma(struct drm_device *drm);
792
793 #ifdef CONFIG_DRM_EXYNOS_DPI
794 diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
795 index 8ea2e1d77802..29ab8be8604c 100644
796 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
797 +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
798 @@ -97,6 +97,7 @@ struct fimc_scaler {
799 struct fimc_context {
800 struct exynos_drm_ipp ipp;
801 struct drm_device *drm_dev;
802 + void *dma_priv;
803 struct device *dev;
804 struct exynos_drm_ipp_task *task;
805 struct exynos_drm_ipp_formats *formats;
806 @@ -1133,7 +1134,7 @@ static int fimc_bind(struct device *dev, struct device *master, void *data)
807
808 ctx->drm_dev = drm_dev;
809 ipp->drm_dev = drm_dev;
810 - exynos_drm_register_dma(drm_dev, dev);
811 + exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
812
813 exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
814 DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
815 @@ -1153,7 +1154,7 @@ static void fimc_unbind(struct device *dev, struct device *master,
816 struct exynos_drm_ipp *ipp = &ctx->ipp;
817
818 exynos_drm_ipp_unregister(dev, ipp);
819 - exynos_drm_unregister_dma(drm_dev, dev);
820 + exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv);
821 }
822
823 static const struct component_ops fimc_component_ops = {
824 diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
825 index 8d0a929104e5..34e6b22173fa 100644
826 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
827 +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
828 @@ -167,6 +167,7 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = {
829 struct fimd_context {
830 struct device *dev;
831 struct drm_device *drm_dev;
832 + void *dma_priv;
833 struct exynos_drm_crtc *crtc;
834 struct exynos_drm_plane planes[WINDOWS_NR];
835 struct exynos_drm_plane_config configs[WINDOWS_NR];
836 @@ -1090,7 +1091,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
837 if (is_drm_iommu_supported(drm_dev))
838 fimd_clear_channels(ctx->crtc);
839
840 - return exynos_drm_register_dma(drm_dev, dev);
841 + return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
842 }
843
844 static void fimd_unbind(struct device *dev, struct device *master,
845 @@ -1100,7 +1101,7 @@ static void fimd_unbind(struct device *dev, struct device *master,
846
847 fimd_disable(ctx->crtc);
848
849 - exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev);
850 + exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
851
852 if (ctx->encoder)
853 exynos_dpi_remove(ctx->encoder);
854 diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
855 index 2a3382d43bc9..fcee33a43aca 100644
856 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
857 +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
858 @@ -232,6 +232,7 @@ struct g2d_runqueue_node {
859
860 struct g2d_data {
861 struct device *dev;
862 + void *dma_priv;
863 struct clk *gate_clk;
864 void __iomem *regs;
865 int irq;
866 @@ -1409,7 +1410,7 @@ static int g2d_bind(struct device *dev, struct device *master, void *data)
867 return ret;
868 }
869
870 - ret = exynos_drm_register_dma(drm_dev, dev);
871 + ret = exynos_drm_register_dma(drm_dev, dev, &g2d->dma_priv);
872 if (ret < 0) {
873 dev_err(dev, "failed to enable iommu.\n");
874 g2d_fini_cmdlist(g2d);
875 @@ -1434,7 +1435,7 @@ static void g2d_unbind(struct device *dev, struct device *master, void *data)
876 priv->g2d_dev = NULL;
877
878 cancel_work_sync(&g2d->runqueue_work);
879 - exynos_drm_unregister_dma(g2d->drm_dev, dev);
880 + exynos_drm_unregister_dma(g2d->drm_dev, dev, &g2d->dma_priv);
881 }
882
883 static const struct component_ops g2d_component_ops = {
884 diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
885 index 88b6fcaa20be..45e9aee8366a 100644
886 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
887 +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
888 @@ -97,6 +97,7 @@ struct gsc_scaler {
889 struct gsc_context {
890 struct exynos_drm_ipp ipp;
891 struct drm_device *drm_dev;
892 + void *dma_priv;
893 struct device *dev;
894 struct exynos_drm_ipp_task *task;
895 struct exynos_drm_ipp_formats *formats;
896 @@ -1169,7 +1170,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
897
898 ctx->drm_dev = drm_dev;
899 ctx->drm_dev = drm_dev;
900 - exynos_drm_register_dma(drm_dev, dev);
901 + exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
902
903 exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
904 DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
905 @@ -1189,7 +1190,7 @@ static void gsc_unbind(struct device *dev, struct device *master,
906 struct exynos_drm_ipp *ipp = &ctx->ipp;
907
908 exynos_drm_ipp_unregister(dev, ipp);
909 - exynos_drm_unregister_dma(drm_dev, dev);
910 + exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv);
911 }
912
913 static const struct component_ops gsc_component_ops = {
914 diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
915 index b98482990d1a..dafa87b82052 100644
916 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
917 +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
918 @@ -56,6 +56,7 @@ struct rot_variant {
919 struct rot_context {
920 struct exynos_drm_ipp ipp;
921 struct drm_device *drm_dev;
922 + void *dma_priv;
923 struct device *dev;
924 void __iomem *regs;
925 struct clk *clock;
926 @@ -243,7 +244,7 @@ static int rotator_bind(struct device *dev, struct device *master, void *data)
927
928 rot->drm_dev = drm_dev;
929 ipp->drm_dev = drm_dev;
930 - exynos_drm_register_dma(drm_dev, dev);
931 + exynos_drm_register_dma(drm_dev, dev, &rot->dma_priv);
932
933 exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
934 DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
935 @@ -261,7 +262,7 @@ static void rotator_unbind(struct device *dev, struct device *master,
936 struct exynos_drm_ipp *ipp = &rot->ipp;
937
938 exynos_drm_ipp_unregister(dev, ipp);
939 - exynos_drm_unregister_dma(rot->drm_dev, rot->dev);
940 + exynos_drm_unregister_dma(rot->drm_dev, rot->dev, &rot->dma_priv);
941 }
942
943 static const struct component_ops rotator_component_ops = {
944 diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
945 index 497973e9b2c5..93c43c8d914e 100644
946 --- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
947 +++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
948 @@ -39,6 +39,7 @@ struct scaler_data {
949 struct scaler_context {
950 struct exynos_drm_ipp ipp;
951 struct drm_device *drm_dev;
952 + void *dma_priv;
953 struct device *dev;
954 void __iomem *regs;
955 struct clk *clock[SCALER_MAX_CLK];
956 @@ -450,7 +451,7 @@ static int scaler_bind(struct device *dev, struct device *master, void *data)
957
958 scaler->drm_dev = drm_dev;
959 ipp->drm_dev = drm_dev;
960 - exynos_drm_register_dma(drm_dev, dev);
961 + exynos_drm_register_dma(drm_dev, dev, &scaler->dma_priv);
962
963 exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
964 DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
965 @@ -470,7 +471,8 @@ static void scaler_unbind(struct device *dev, struct device *master,
966 struct exynos_drm_ipp *ipp = &scaler->ipp;
967
968 exynos_drm_ipp_unregister(dev, ipp);
969 - exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev);
970 + exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev,
971 + &scaler->dma_priv);
972 }
973
974 static const struct component_ops scaler_component_ops = {
975 diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
976 index 7b24338fad3c..22f494145411 100644
977 --- a/drivers/gpu/drm/exynos/exynos_mixer.c
978 +++ b/drivers/gpu/drm/exynos/exynos_mixer.c
979 @@ -94,6 +94,7 @@ struct mixer_context {
980 struct platform_device *pdev;
981 struct device *dev;
982 struct drm_device *drm_dev;
983 + void *dma_priv;
984 struct exynos_drm_crtc *crtc;
985 struct exynos_drm_plane planes[MIXER_WIN_NR];
986 unsigned long flags;
987 @@ -894,12 +895,14 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
988 }
989 }
990
991 - return exynos_drm_register_dma(drm_dev, mixer_ctx->dev);
992 + return exynos_drm_register_dma(drm_dev, mixer_ctx->dev,
993 + &mixer_ctx->dma_priv);
994 }
995
996 static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
997 {
998 - exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev);
999 + exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev,
1000 + &mixer_ctx->dma_priv);
1001 }
1002
1003 static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
1004 diff --git a/drivers/i2c/busses/i2c-hix5hd2.c b/drivers/i2c/busses/i2c-hix5hd2.c
1005 index 8497c7a95dd4..224f830f77f9 100644
1006 --- a/drivers/i2c/busses/i2c-hix5hd2.c
1007 +++ b/drivers/i2c/busses/i2c-hix5hd2.c
1008 @@ -477,6 +477,7 @@ static int hix5hd2_i2c_remove(struct platform_device *pdev)
1009 i2c_del_adapter(&priv->adap);
1010 pm_runtime_disable(priv->dev);
1011 pm_runtime_set_suspended(priv->dev);
1012 + clk_disable_unprepare(priv->clk);
1013
1014 return 0;
1015 }
1016 diff --git a/drivers/i2c/busses/i2c-nvidia-gpu.c b/drivers/i2c/busses/i2c-nvidia-gpu.c
1017 index 5a1235fd86bb..32cd62188a3d 100644
1018 --- a/drivers/i2c/busses/i2c-nvidia-gpu.c
1019 +++ b/drivers/i2c/busses/i2c-nvidia-gpu.c
1020 @@ -8,6 +8,7 @@
1021 #include <linux/delay.h>
1022 #include <linux/i2c.h>
1023 #include <linux/interrupt.h>
1024 +#include <linux/iopoll.h>
1025 #include <linux/module.h>
1026 #include <linux/pci.h>
1027 #include <linux/platform_device.h>
1028 @@ -75,20 +76,15 @@ static void gpu_enable_i2c_bus(struct gpu_i2c_dev *i2cd)
1029
1030 static int gpu_i2c_check_status(struct gpu_i2c_dev *i2cd)
1031 {
1032 - unsigned long target = jiffies + msecs_to_jiffies(1000);
1033 u32 val;
1034 + int ret;
1035
1036 - do {
1037 - val = readl(i2cd->regs + I2C_MST_CNTL);
1038 - if (!(val & I2C_MST_CNTL_CYCLE_TRIGGER))
1039 - break;
1040 - if ((val & I2C_MST_CNTL_STATUS) !=
1041 - I2C_MST_CNTL_STATUS_BUS_BUSY)
1042 - break;
1043 - usleep_range(500, 600);
1044 - } while (time_is_after_jiffies(target));
1045 -
1046 - if (time_is_before_jiffies(target)) {
1047 + ret = readl_poll_timeout(i2cd->regs + I2C_MST_CNTL, val,
1048 + !(val & I2C_MST_CNTL_CYCLE_TRIGGER) ||
1049 + (val & I2C_MST_CNTL_STATUS) != I2C_MST_CNTL_STATUS_BUS_BUSY,
1050 + 500, 1000 * USEC_PER_MSEC);
1051 +
1052 + if (ret) {
1053 dev_err(i2cd->dev, "i2c timeout error %x\n", val);
1054 return -ETIMEDOUT;
1055 }
1056 diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
1057 index 2a770b8dca00..10ae6c6eab0a 100644
1058 --- a/drivers/infiniband/core/device.c
1059 +++ b/drivers/infiniband/core/device.c
1060 @@ -899,7 +899,9 @@ static int add_one_compat_dev(struct ib_device *device,
1061 cdev->dev.parent = device->dev.parent;
1062 rdma_init_coredev(cdev, device, read_pnet(&rnet->net));
1063 cdev->dev.release = compatdev_release;
1064 - dev_set_name(&cdev->dev, "%s", dev_name(&device->dev));
1065 + ret = dev_set_name(&cdev->dev, "%s", dev_name(&device->dev));
1066 + if (ret)
1067 + goto add_err;
1068
1069 ret = device_add(&cdev->dev);
1070 if (ret)
1071 diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
1072 index ad4301ecfa59..ef4b0c7061e4 100644
1073 --- a/drivers/infiniband/core/nldev.c
1074 +++ b/drivers/infiniband/core/nldev.c
1075 @@ -863,6 +863,10 @@ static int nldev_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
1076
1077 nla_strlcpy(name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
1078 IB_DEVICE_NAME_MAX);
1079 + if (strlen(name) == 0) {
1080 + err = -EINVAL;
1081 + goto done;
1082 + }
1083 err = ib_device_rename(device, name);
1084 goto done;
1085 }
1086 @@ -1468,7 +1472,7 @@ static int nldev_newlink(struct sk_buff *skb, struct nlmsghdr *nlh,
1087
1088 nla_strlcpy(ibdev_name, tb[RDMA_NLDEV_ATTR_DEV_NAME],
1089 sizeof(ibdev_name));
1090 - if (strchr(ibdev_name, '%'))
1091 + if (strchr(ibdev_name, '%') || strlen(ibdev_name) == 0)
1092 return -EINVAL;
1093
1094 nla_strlcpy(type, tb[RDMA_NLDEV_ATTR_LINK_TYPE], sizeof(type));
1095 diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
1096 index 2d5608315dc8..75e7ec017836 100644
1097 --- a/drivers/infiniband/core/security.c
1098 +++ b/drivers/infiniband/core/security.c
1099 @@ -349,16 +349,11 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
1100 else if (qp_pps)
1101 new_pps->main.pkey_index = qp_pps->main.pkey_index;
1102
1103 - if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
1104 + if (((qp_attr_mask & IB_QP_PKEY_INDEX) &&
1105 + (qp_attr_mask & IB_QP_PORT)) ||
1106 + (qp_pps && qp_pps->main.state != IB_PORT_PKEY_NOT_VALID))
1107 new_pps->main.state = IB_PORT_PKEY_VALID;
1108
1109 - if (!(qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) && qp_pps) {
1110 - new_pps->main.port_num = qp_pps->main.port_num;
1111 - new_pps->main.pkey_index = qp_pps->main.pkey_index;
1112 - if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
1113 - new_pps->main.state = IB_PORT_PKEY_VALID;
1114 - }
1115 -
1116 if (qp_attr_mask & IB_QP_ALT_PATH) {
1117 new_pps->alt.port_num = qp_attr->alt_port_num;
1118 new_pps->alt.pkey_index = qp_attr->alt_pkey_index;
1119 diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
1120 index 1235ffb2389b..da229eab5903 100644
1121 --- a/drivers/infiniband/core/user_mad.c
1122 +++ b/drivers/infiniband/core/user_mad.c
1123 @@ -1129,17 +1129,30 @@ static const struct file_operations umad_sm_fops = {
1124 .llseek = no_llseek,
1125 };
1126
1127 +static struct ib_umad_port *get_port(struct ib_device *ibdev,
1128 + struct ib_umad_device *umad_dev,
1129 + unsigned int port)
1130 +{
1131 + if (!umad_dev)
1132 + return ERR_PTR(-EOPNOTSUPP);
1133 + if (!rdma_is_port_valid(ibdev, port))
1134 + return ERR_PTR(-EINVAL);
1135 + if (!rdma_cap_ib_mad(ibdev, port))
1136 + return ERR_PTR(-EOPNOTSUPP);
1137 +
1138 + return &umad_dev->ports[port - rdma_start_port(ibdev)];
1139 +}
1140 +
1141 static int ib_umad_get_nl_info(struct ib_device *ibdev, void *client_data,
1142 struct ib_client_nl_info *res)
1143 {
1144 - struct ib_umad_device *umad_dev = client_data;
1145 + struct ib_umad_port *port = get_port(ibdev, client_data, res->port);
1146
1147 - if (!rdma_is_port_valid(ibdev, res->port))
1148 - return -EINVAL;
1149 + if (IS_ERR(port))
1150 + return PTR_ERR(port);
1151
1152 res->abi = IB_USER_MAD_ABI_VERSION;
1153 - res->cdev = &umad_dev->ports[res->port - rdma_start_port(ibdev)].dev;
1154 -
1155 + res->cdev = &port->dev;
1156 return 0;
1157 }
1158
1159 @@ -1154,15 +1167,13 @@ MODULE_ALIAS_RDMA_CLIENT("umad");
1160 static int ib_issm_get_nl_info(struct ib_device *ibdev, void *client_data,
1161 struct ib_client_nl_info *res)
1162 {
1163 - struct ib_umad_device *umad_dev =
1164 - ib_get_client_data(ibdev, &umad_client);
1165 + struct ib_umad_port *port = get_port(ibdev, client_data, res->port);
1166
1167 - if (!rdma_is_port_valid(ibdev, res->port))
1168 - return -EINVAL;
1169 + if (IS_ERR(port))
1170 + return PTR_ERR(port);
1171
1172 res->abi = IB_USER_MAD_ABI_VERSION;
1173 - res->cdev = &umad_dev->ports[res->port - rdma_start_port(ibdev)].sm_dev;
1174 -
1175 + res->cdev = &port->sm_dev;
1176 return 0;
1177 }
1178
1179 diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
1180 index 45f48cde6b9d..ff664355de55 100644
1181 --- a/drivers/infiniband/hw/mlx5/cq.c
1182 +++ b/drivers/infiniband/hw/mlx5/cq.c
1183 @@ -330,6 +330,22 @@ static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev,
1184 dump_cqe(dev, cqe);
1185 }
1186
1187 +static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
1188 + u16 tail, u16 head)
1189 +{
1190 + u16 idx;
1191 +
1192 + do {
1193 + idx = tail & (qp->sq.wqe_cnt - 1);
1194 + if (idx == head)
1195 + break;
1196 +
1197 + tail = qp->sq.w_list[idx].next;
1198 + } while (1);
1199 + tail = qp->sq.w_list[idx].next;
1200 + qp->sq.last_poll = tail;
1201 +}
1202 +
1203 static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
1204 {
1205 mlx5_frag_buf_free(dev->mdev, &buf->frag_buf);
1206 @@ -368,7 +384,7 @@ static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
1207 }
1208
1209 static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
1210 - int *npolled, int is_send)
1211 + int *npolled, bool is_send)
1212 {
1213 struct mlx5_ib_wq *wq;
1214 unsigned int cur;
1215 @@ -383,10 +399,16 @@ static void sw_comp(struct mlx5_ib_qp *qp, int num_entries, struct ib_wc *wc,
1216 return;
1217
1218 for (i = 0; i < cur && np < num_entries; i++) {
1219 - wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
1220 + unsigned int idx;
1221 +
1222 + idx = (is_send) ? wq->last_poll : wq->tail;
1223 + idx &= (wq->wqe_cnt - 1);
1224 + wc->wr_id = wq->wrid[idx];
1225 wc->status = IB_WC_WR_FLUSH_ERR;
1226 wc->vendor_err = MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
1227 wq->tail++;
1228 + if (is_send)
1229 + wq->last_poll = wq->w_list[idx].next;
1230 np++;
1231 wc->qp = &qp->ibqp;
1232 wc++;
1233 @@ -476,6 +498,7 @@ repoll:
1234 wqe_ctr = be16_to_cpu(cqe64->wqe_counter);
1235 idx = wqe_ctr & (wq->wqe_cnt - 1);
1236 handle_good_req(wc, cqe64, wq, idx);
1237 + handle_atomics(*cur_qp, cqe64, wq->last_poll, idx);
1238 wc->wr_id = wq->wrid[idx];
1239 wq->tail = wq->wqe_head[idx] + 1;
1240 wc->status = IB_WC_SUCCESS;
1241 diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
1242 index 9a918db9e8db..0a160fd1383a 100644
1243 --- a/drivers/infiniband/hw/mlx5/main.c
1244 +++ b/drivers/infiniband/hw/mlx5/main.c
1245 @@ -5638,9 +5638,10 @@ mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
1246 const struct mlx5_ib_counters *cnts =
1247 get_counters(dev, counter->port - 1);
1248
1249 - /* Q counters are in the beginning of all counters */
1250 return rdma_alloc_hw_stats_struct(cnts->names,
1251 - cnts->num_q_counters,
1252 + cnts->num_q_counters +
1253 + cnts->num_cong_counters +
1254 + cnts->num_ext_ppcnt_counters,
1255 RDMA_HW_STATS_DEFAULT_LIFESPAN);
1256 }
1257
1258 diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
1259 index 1a98ee2e01c4..a9ce46c4c1ae 100644
1260 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
1261 +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
1262 @@ -283,6 +283,7 @@ struct mlx5_ib_wq {
1263 unsigned head;
1264 unsigned tail;
1265 u16 cur_post;
1266 + u16 last_poll;
1267 void *cur_edge;
1268 };
1269
1270 diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
1271 index 0865373bd12d..881decb1309a 100644
1272 --- a/drivers/infiniband/hw/mlx5/qp.c
1273 +++ b/drivers/infiniband/hw/mlx5/qp.c
1274 @@ -3725,6 +3725,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1275 qp->sq.cur_post = 0;
1276 if (qp->sq.wqe_cnt)
1277 qp->sq.cur_edge = get_sq_edge(&qp->sq, 0);
1278 + qp->sq.last_poll = 0;
1279 qp->db.db[MLX5_RCV_DBR] = 0;
1280 qp->db.db[MLX5_SND_DBR] = 0;
1281 }
1282 @@ -6131,6 +6132,10 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
1283 if (udata->outlen && udata->outlen < min_resp_len)
1284 return ERR_PTR(-EINVAL);
1285
1286 + if (!capable(CAP_SYS_RAWIO) &&
1287 + init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP)
1288 + return ERR_PTR(-EPERM);
1289 +
1290 dev = to_mdev(pd->device);
1291 switch (init_attr->wq_type) {
1292 case IB_WQT_RQ:
1293 diff --git a/drivers/infiniband/sw/rdmavt/cq.c b/drivers/infiniband/sw/rdmavt/cq.c
1294 index a85571a4cf57..0fee3c87776b 100644
1295 --- a/drivers/infiniband/sw/rdmavt/cq.c
1296 +++ b/drivers/infiniband/sw/rdmavt/cq.c
1297 @@ -327,7 +327,7 @@ void rvt_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1298 if (cq->ip)
1299 kref_put(&cq->ip->ref, rvt_release_mmap_info);
1300 else
1301 - vfree(cq->queue);
1302 + vfree(cq->kqueue);
1303 }
1304
1305 /**
1306 diff --git a/drivers/input/input.c b/drivers/input/input.c
1307 index ee6c3234df36..e2eb9b9b8363 100644
1308 --- a/drivers/input/input.c
1309 +++ b/drivers/input/input.c
1310 @@ -190,6 +190,7 @@ static void input_repeat_key(struct timer_list *t)
1311 input_value_sync
1312 };
1313
1314 + input_set_timestamp(dev, ktime_get());
1315 input_pass_values(dev, vals, ARRAY_SIZE(vals));
1316
1317 if (dev->rep[REP_PERIOD])
1318 diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
1319 index 2c666fb34625..4d2036209b45 100644
1320 --- a/drivers/input/mouse/synaptics.c
1321 +++ b/drivers/input/mouse/synaptics.c
1322 @@ -186,6 +186,7 @@ static const char * const smbus_pnp_ids[] = {
1323 "SYN3052", /* HP EliteBook 840 G4 */
1324 "SYN3221", /* HP 15-ay000 */
1325 "SYN323d", /* HP Spectre X360 13-w013dx */
1326 + "SYN3257", /* HP Envy 13-ad105ng */
1327 NULL
1328 };
1329
1330 diff --git a/drivers/input/touchscreen/raydium_i2c_ts.c b/drivers/input/touchscreen/raydium_i2c_ts.c
1331 index 6ed9f22e6401..fe245439adee 100644
1332 --- a/drivers/input/touchscreen/raydium_i2c_ts.c
1333 +++ b/drivers/input/touchscreen/raydium_i2c_ts.c
1334 @@ -432,7 +432,7 @@ static int raydium_i2c_write_object(struct i2c_client *client,
1335 return 0;
1336 }
1337
1338 -static bool raydium_i2c_boot_trigger(struct i2c_client *client)
1339 +static int raydium_i2c_boot_trigger(struct i2c_client *client)
1340 {
1341 static const u8 cmd[7][6] = {
1342 { 0x08, 0x0C, 0x09, 0x00, 0x50, 0xD7 },
1343 @@ -457,10 +457,10 @@ static bool raydium_i2c_boot_trigger(struct i2c_client *client)
1344 }
1345 }
1346
1347 - return false;
1348 + return 0;
1349 }
1350
1351 -static bool raydium_i2c_fw_trigger(struct i2c_client *client)
1352 +static int raydium_i2c_fw_trigger(struct i2c_client *client)
1353 {
1354 static const u8 cmd[5][11] = {
1355 { 0, 0x09, 0x71, 0x0C, 0x09, 0x00, 0x50, 0xD7, 0, 0, 0 },
1356 @@ -483,7 +483,7 @@ static bool raydium_i2c_fw_trigger(struct i2c_client *client)
1357 }
1358 }
1359
1360 - return false;
1361 + return 0;
1362 }
1363
1364 static int raydium_i2c_check_path(struct i2c_client *client)
1365 diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
1366 index 6a9a1b987520..9e393b9c5091 100644
1367 --- a/drivers/iommu/dmar.c
1368 +++ b/drivers/iommu/dmar.c
1369 @@ -371,7 +371,8 @@ dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
1370 {
1371 struct dmar_drhd_unit *dmaru;
1372
1373 - list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
1374 + list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list,
1375 + dmar_rcu_check())
1376 if (dmaru->segment == drhd->segment &&
1377 dmaru->reg_base_addr == drhd->address)
1378 return dmaru;
1379 diff --git a/drivers/iommu/intel-iommu-debugfs.c b/drivers/iommu/intel-iommu-debugfs.c
1380 index 471f05d452e0..bdf095e9dbe0 100644
1381 --- a/drivers/iommu/intel-iommu-debugfs.c
1382 +++ b/drivers/iommu/intel-iommu-debugfs.c
1383 @@ -32,38 +32,42 @@ struct iommu_regset {
1384
1385 #define IOMMU_REGSET_ENTRY(_reg_) \
1386 { DMAR_##_reg_##_REG, __stringify(_reg_) }
1387 -static const struct iommu_regset iommu_regs[] = {
1388 +
1389 +static const struct iommu_regset iommu_regs_32[] = {
1390 IOMMU_REGSET_ENTRY(VER),
1391 - IOMMU_REGSET_ENTRY(CAP),
1392 - IOMMU_REGSET_ENTRY(ECAP),
1393 IOMMU_REGSET_ENTRY(GCMD),
1394 IOMMU_REGSET_ENTRY(GSTS),
1395 - IOMMU_REGSET_ENTRY(RTADDR),
1396 - IOMMU_REGSET_ENTRY(CCMD),
1397 IOMMU_REGSET_ENTRY(FSTS),
1398 IOMMU_REGSET_ENTRY(FECTL),
1399 IOMMU_REGSET_ENTRY(FEDATA),
1400 IOMMU_REGSET_ENTRY(FEADDR),
1401 IOMMU_REGSET_ENTRY(FEUADDR),
1402 - IOMMU_REGSET_ENTRY(AFLOG),
1403 IOMMU_REGSET_ENTRY(PMEN),
1404 IOMMU_REGSET_ENTRY(PLMBASE),
1405 IOMMU_REGSET_ENTRY(PLMLIMIT),
1406 + IOMMU_REGSET_ENTRY(ICS),
1407 + IOMMU_REGSET_ENTRY(PRS),
1408 + IOMMU_REGSET_ENTRY(PECTL),
1409 + IOMMU_REGSET_ENTRY(PEDATA),
1410 + IOMMU_REGSET_ENTRY(PEADDR),
1411 + IOMMU_REGSET_ENTRY(PEUADDR),
1412 +};
1413 +
1414 +static const struct iommu_regset iommu_regs_64[] = {
1415 + IOMMU_REGSET_ENTRY(CAP),
1416 + IOMMU_REGSET_ENTRY(ECAP),
1417 + IOMMU_REGSET_ENTRY(RTADDR),
1418 + IOMMU_REGSET_ENTRY(CCMD),
1419 + IOMMU_REGSET_ENTRY(AFLOG),
1420 IOMMU_REGSET_ENTRY(PHMBASE),
1421 IOMMU_REGSET_ENTRY(PHMLIMIT),
1422 IOMMU_REGSET_ENTRY(IQH),
1423 IOMMU_REGSET_ENTRY(IQT),
1424 IOMMU_REGSET_ENTRY(IQA),
1425 - IOMMU_REGSET_ENTRY(ICS),
1426 IOMMU_REGSET_ENTRY(IRTA),
1427 IOMMU_REGSET_ENTRY(PQH),
1428 IOMMU_REGSET_ENTRY(PQT),
1429 IOMMU_REGSET_ENTRY(PQA),
1430 - IOMMU_REGSET_ENTRY(PRS),
1431 - IOMMU_REGSET_ENTRY(PECTL),
1432 - IOMMU_REGSET_ENTRY(PEDATA),
1433 - IOMMU_REGSET_ENTRY(PEADDR),
1434 - IOMMU_REGSET_ENTRY(PEUADDR),
1435 IOMMU_REGSET_ENTRY(MTRRCAP),
1436 IOMMU_REGSET_ENTRY(MTRRDEF),
1437 IOMMU_REGSET_ENTRY(MTRR_FIX64K_00000),
1438 @@ -126,10 +130,16 @@ static int iommu_regset_show(struct seq_file *m, void *unused)
1439 * by adding the offset to the pointer (virtual address).
1440 */
1441 raw_spin_lock_irqsave(&iommu->register_lock, flag);
1442 - for (i = 0 ; i < ARRAY_SIZE(iommu_regs); i++) {
1443 - value = dmar_readq(iommu->reg + iommu_regs[i].offset);
1444 + for (i = 0 ; i < ARRAY_SIZE(iommu_regs_32); i++) {
1445 + value = dmar_readl(iommu->reg + iommu_regs_32[i].offset);
1446 + seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
1447 + iommu_regs_32[i].regs, iommu_regs_32[i].offset,
1448 + value);
1449 + }
1450 + for (i = 0 ; i < ARRAY_SIZE(iommu_regs_64); i++) {
1451 + value = dmar_readq(iommu->reg + iommu_regs_64[i].offset);
1452 seq_printf(m, "%-16s\t0x%02x\t\t0x%016llx\n",
1453 - iommu_regs[i].regs, iommu_regs[i].offset,
1454 + iommu_regs_64[i].regs, iommu_regs_64[i].offset,
1455 value);
1456 }
1457 raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
1458 @@ -271,9 +281,16 @@ static int dmar_translation_struct_show(struct seq_file *m, void *unused)
1459 {
1460 struct dmar_drhd_unit *drhd;
1461 struct intel_iommu *iommu;
1462 + u32 sts;
1463
1464 rcu_read_lock();
1465 for_each_active_iommu(iommu, drhd) {
1466 + sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
1467 + if (!(sts & DMA_GSTS_TES)) {
1468 + seq_printf(m, "DMA Remapping is not enabled on %s\n",
1469 + iommu->name);
1470 + continue;
1471 + }
1472 root_tbl_walk(m, iommu);
1473 seq_putc(m, '\n');
1474 }
1475 @@ -343,6 +360,7 @@ static int ir_translation_struct_show(struct seq_file *m, void *unused)
1476 struct dmar_drhd_unit *drhd;
1477 struct intel_iommu *iommu;
1478 u64 irta;
1479 + u32 sts;
1480
1481 rcu_read_lock();
1482 for_each_active_iommu(iommu, drhd) {
1483 @@ -352,7 +370,8 @@ static int ir_translation_struct_show(struct seq_file *m, void *unused)
1484 seq_printf(m, "Remapped Interrupt supported on IOMMU: %s\n",
1485 iommu->name);
1486
1487 - if (iommu->ir_table) {
1488 + sts = dmar_readl(iommu->reg + DMAR_GSTS_REG);
1489 + if (iommu->ir_table && (sts & DMA_GSTS_IRES)) {
1490 irta = virt_to_phys(iommu->ir_table->base);
1491 seq_printf(m, " IR table address:%llx\n", irta);
1492 ir_tbl_remap_entry_show(m, iommu);
1493 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1494 index 1c2b3e78056f..9d47b227e557 100644
1495 --- a/drivers/iommu/intel-iommu.c
1496 +++ b/drivers/iommu/intel-iommu.c
1497 @@ -4961,6 +4961,9 @@ int __init intel_iommu_init(void)
1498
1499 down_write(&dmar_global_lock);
1500
1501 + if (!no_iommu)
1502 + intel_iommu_debugfs_init();
1503 +
1504 if (no_iommu || dmar_disabled) {
1505 /*
1506 * We exit the function here to ensure IOMMU's remapping and
1507 @@ -5056,7 +5059,6 @@ int __init intel_iommu_init(void)
1508 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
1509
1510 intel_iommu_enabled = 1;
1511 - intel_iommu_debugfs_init();
1512
1513 return 0;
1514
1515 diff --git a/drivers/media/usb/b2c2/flexcop-usb.c b/drivers/media/usb/b2c2/flexcop-usb.c
1516 index 039963a7765b..198ddfb8d2b1 100644
1517 --- a/drivers/media/usb/b2c2/flexcop-usb.c
1518 +++ b/drivers/media/usb/b2c2/flexcop-usb.c
1519 @@ -511,6 +511,9 @@ static int flexcop_usb_init(struct flexcop_usb *fc_usb)
1520 return ret;
1521 }
1522
1523 + if (fc_usb->uintf->cur_altsetting->desc.bNumEndpoints < 1)
1524 + return -ENODEV;
1525 +
1526 switch (fc_usb->udev->speed) {
1527 case USB_SPEED_LOW:
1528 err("cannot handle USB speed because it is too slow.");
1529 @@ -544,9 +547,6 @@ static int flexcop_usb_probe(struct usb_interface *intf,
1530 struct flexcop_device *fc = NULL;
1531 int ret;
1532
1533 - if (intf->cur_altsetting->desc.bNumEndpoints < 1)
1534 - return -ENODEV;
1535 -
1536 if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_usb))) == NULL) {
1537 err("out of memory\n");
1538 return -ENOMEM;
1539 diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
1540 index e53c58ab6488..ef62dd6c5ae4 100644
1541 --- a/drivers/media/usb/dvb-usb/dib0700_core.c
1542 +++ b/drivers/media/usb/dvb-usb/dib0700_core.c
1543 @@ -818,7 +818,7 @@ int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf)
1544
1545 /* Starting in firmware 1.20, the RC info is provided on a bulk pipe */
1546
1547 - if (intf->altsetting[0].desc.bNumEndpoints < rc_ep + 1)
1548 + if (intf->cur_altsetting->desc.bNumEndpoints < rc_ep + 1)
1549 return -ENODEV;
1550
1551 purb = usb_alloc_urb(0, GFP_KERNEL);
1552 @@ -838,7 +838,7 @@ int dib0700_rc_setup(struct dvb_usb_device *d, struct usb_interface *intf)
1553 * Some devices like the Hauppauge NovaTD model 52009 use an interrupt
1554 * endpoint, while others use a bulk one.
1555 */
1556 - e = &intf->altsetting[0].endpoint[rc_ep].desc;
1557 + e = &intf->cur_altsetting->endpoint[rc_ep].desc;
1558 if (usb_endpoint_dir_in(e)) {
1559 if (usb_endpoint_xfer_bulk(e)) {
1560 pipe = usb_rcvbulkpipe(d->udev, rc_ep);
1561 diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
1562 index f417dfc0b872..0afe70a3f9a2 100644
1563 --- a/drivers/media/usb/gspca/ov519.c
1564 +++ b/drivers/media/usb/gspca/ov519.c
1565 @@ -3477,6 +3477,11 @@ static void ov511_mode_init_regs(struct sd *sd)
1566 return;
1567 }
1568
1569 + if (alt->desc.bNumEndpoints < 1) {
1570 + sd->gspca_dev.usb_err = -ENODEV;
1571 + return;
1572 + }
1573 +
1574 packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
1575 reg_w(sd, R51x_FIFO_PSIZE, packet_size >> 5);
1576
1577 @@ -3603,6 +3608,11 @@ static void ov518_mode_init_regs(struct sd *sd)
1578 return;
1579 }
1580
1581 + if (alt->desc.bNumEndpoints < 1) {
1582 + sd->gspca_dev.usb_err = -ENODEV;
1583 + return;
1584 + }
1585 +
1586 packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
1587 ov518_reg_w32(sd, R51x_FIFO_PSIZE, packet_size & ~7, 2);
1588
1589 diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx.c b/drivers/media/usb/gspca/stv06xx/stv06xx.c
1590 index 79653d409951..95673fc0a99c 100644
1591 --- a/drivers/media/usb/gspca/stv06xx/stv06xx.c
1592 +++ b/drivers/media/usb/gspca/stv06xx/stv06xx.c
1593 @@ -282,6 +282,9 @@ static int stv06xx_start(struct gspca_dev *gspca_dev)
1594 return -EIO;
1595 }
1596
1597 + if (alt->desc.bNumEndpoints < 1)
1598 + return -ENODEV;
1599 +
1600 packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
1601 err = stv06xx_write_bridge(sd, STV_ISO_SIZE_L, packet_size);
1602 if (err < 0)
1603 @@ -306,11 +309,21 @@ out:
1604
1605 static int stv06xx_isoc_init(struct gspca_dev *gspca_dev)
1606 {
1607 + struct usb_interface_cache *intfc;
1608 struct usb_host_interface *alt;
1609 struct sd *sd = (struct sd *) gspca_dev;
1610
1611 + intfc = gspca_dev->dev->actconfig->intf_cache[0];
1612 +
1613 + if (intfc->num_altsetting < 2)
1614 + return -ENODEV;
1615 +
1616 + alt = &intfc->altsetting[1];
1617 +
1618 + if (alt->desc.bNumEndpoints < 1)
1619 + return -ENODEV;
1620 +
1621 /* Start isoc bandwidth "negotiation" at max isoc bandwidth */
1622 - alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
1623 alt->endpoint[0].desc.wMaxPacketSize =
1624 cpu_to_le16(sd->sensor->max_packet_size[gspca_dev->curr_mode]);
1625
1626 @@ -323,6 +336,10 @@ static int stv06xx_isoc_nego(struct gspca_dev *gspca_dev)
1627 struct usb_host_interface *alt;
1628 struct sd *sd = (struct sd *) gspca_dev;
1629
1630 + /*
1631 + * Existence of altsetting and endpoint was verified in
1632 + * stv06xx_isoc_init()
1633 + */
1634 alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
1635 packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
1636 min_packet_size = sd->sensor->min_packet_size[gspca_dev->curr_mode];
1637 diff --git a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
1638 index 6d1007715ff7..ae382b3b5f7f 100644
1639 --- a/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
1640 +++ b/drivers/media/usb/gspca/stv06xx/stv06xx_pb0100.c
1641 @@ -185,6 +185,10 @@ static int pb0100_start(struct sd *sd)
1642 alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt);
1643 if (!alt)
1644 return -ENODEV;
1645 +
1646 + if (alt->desc.bNumEndpoints < 1)
1647 + return -ENODEV;
1648 +
1649 packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
1650
1651 /* If we don't have enough bandwidth use a lower framerate */
1652 diff --git a/drivers/media/usb/gspca/xirlink_cit.c b/drivers/media/usb/gspca/xirlink_cit.c
1653 index 934a90bd78c2..c579b100f066 100644
1654 --- a/drivers/media/usb/gspca/xirlink_cit.c
1655 +++ b/drivers/media/usb/gspca/xirlink_cit.c
1656 @@ -1442,6 +1442,9 @@ static int cit_get_packet_size(struct gspca_dev *gspca_dev)
1657 return -EIO;
1658 }
1659
1660 + if (alt->desc.bNumEndpoints < 1)
1661 + return -ENODEV;
1662 +
1663 return le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
1664 }
1665
1666 @@ -2626,6 +2629,7 @@ static int sd_start(struct gspca_dev *gspca_dev)
1667
1668 static int sd_isoc_init(struct gspca_dev *gspca_dev)
1669 {
1670 + struct usb_interface_cache *intfc;
1671 struct usb_host_interface *alt;
1672 int max_packet_size;
1673
1674 @@ -2641,8 +2645,17 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
1675 break;
1676 }
1677
1678 + intfc = gspca_dev->dev->actconfig->intf_cache[0];
1679 +
1680 + if (intfc->num_altsetting < 2)
1681 + return -ENODEV;
1682 +
1683 + alt = &intfc->altsetting[1];
1684 +
1685 + if (alt->desc.bNumEndpoints < 1)
1686 + return -ENODEV;
1687 +
1688 /* Start isoc bandwidth "negotiation" at max isoc bandwidth */
1689 - alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
1690 alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(max_packet_size);
1691
1692 return 0;
1693 @@ -2665,6 +2678,9 @@ static int sd_isoc_nego(struct gspca_dev *gspca_dev)
1694 break;
1695 }
1696
1697 + /*
1698 + * Existence of altsetting and endpoint was verified in sd_isoc_init()
1699 + */
1700 alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1];
1701 packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
1702 if (packet_size <= min_packet_size)
1703 diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
1704 index 5095c380b2c1..ee9c656d121f 100644
1705 --- a/drivers/media/usb/usbtv/usbtv-core.c
1706 +++ b/drivers/media/usb/usbtv/usbtv-core.c
1707 @@ -56,7 +56,7 @@ int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size)
1708
1709 ret = usb_control_msg(usbtv->udev, pipe, USBTV_REQUEST_REG,
1710 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
1711 - value, index, NULL, 0, 0);
1712 + value, index, NULL, 0, USB_CTRL_GET_TIMEOUT);
1713 if (ret < 0)
1714 return ret;
1715 }
1716 diff --git a/drivers/media/usb/usbtv/usbtv-video.c b/drivers/media/usb/usbtv/usbtv-video.c
1717 index 3d9284a09ee5..b249f037900c 100644
1718 --- a/drivers/media/usb/usbtv/usbtv-video.c
1719 +++ b/drivers/media/usb/usbtv/usbtv-video.c
1720 @@ -800,7 +800,8 @@ static int usbtv_s_ctrl(struct v4l2_ctrl *ctrl)
1721 ret = usb_control_msg(usbtv->udev,
1722 usb_rcvctrlpipe(usbtv->udev, 0), USBTV_CONTROL_REG,
1723 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
1724 - 0, USBTV_BASE + 0x0244, (void *)data, 3, 0);
1725 + 0, USBTV_BASE + 0x0244, (void *)data, 3,
1726 + USB_CTRL_GET_TIMEOUT);
1727 if (ret < 0)
1728 goto error;
1729 }
1730 @@ -851,7 +852,7 @@ static int usbtv_s_ctrl(struct v4l2_ctrl *ctrl)
1731 ret = usb_control_msg(usbtv->udev, usb_sndctrlpipe(usbtv->udev, 0),
1732 USBTV_CONTROL_REG,
1733 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
1734 - 0, index, (void *)data, size, 0);
1735 + 0, index, (void *)data, size, USB_CTRL_SET_TIMEOUT);
1736
1737 error:
1738 if (ret < 0)
1739 diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
1740 index 63d6b147b21e..41da73ce2e98 100644
1741 --- a/drivers/media/v4l2-core/v4l2-device.c
1742 +++ b/drivers/media/v4l2-core/v4l2-device.c
1743 @@ -179,6 +179,7 @@ static void v4l2_subdev_release(struct v4l2_subdev *sd)
1744
1745 if (sd->internal_ops && sd->internal_ops->release)
1746 sd->internal_ops->release(sd);
1747 + sd->devnode = NULL;
1748 module_put(owner);
1749 }
1750
1751 diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
1752 index abf8f5eb0a1c..26644b7ec13e 100644
1753 --- a/drivers/mmc/core/core.c
1754 +++ b/drivers/mmc/core/core.c
1755 @@ -1732,8 +1732,11 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
1756 * the erase operation does not exceed the max_busy_timeout, we should
1757 * use R1B response. Or we need to prevent the host from doing hw busy
1758 * detection, which is done by converting to a R1 response instead.
1759 + * Note, some hosts requires R1B, which also means they are on their own
1760 + * when it comes to deal with the busy timeout.
1761 */
1762 - if (card->host->max_busy_timeout &&
1763 + if (!(card->host->caps & MMC_CAP_NEED_RSP_BUSY) &&
1764 + card->host->max_busy_timeout &&
1765 busy_timeout > card->host->max_busy_timeout) {
1766 cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
1767 } else {
1768 diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
1769 index c8804895595f..b7159e243323 100644
1770 --- a/drivers/mmc/core/mmc.c
1771 +++ b/drivers/mmc/core/mmc.c
1772 @@ -1911,9 +1911,12 @@ static int mmc_sleep(struct mmc_host *host)
1773 * If the max_busy_timeout of the host is specified, validate it against
1774 * the sleep cmd timeout. A failure means we need to prevent the host
1775 * from doing hw busy detection, which is done by converting to a R1
1776 - * response instead of a R1B.
1777 + * response instead of a R1B. Note, some hosts requires R1B, which also
1778 + * means they are on their own when it comes to deal with the busy
1779 + * timeout.
1780 */
1781 - if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) {
1782 + if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && host->max_busy_timeout &&
1783 + (timeout_ms > host->max_busy_timeout)) {
1784 cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1785 } else {
1786 cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
1787 diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
1788 index 09113b9ad679..18a7afb2a5b2 100644
1789 --- a/drivers/mmc/core/mmc_ops.c
1790 +++ b/drivers/mmc/core/mmc_ops.c
1791 @@ -538,10 +538,12 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
1792 * If the cmd timeout and the max_busy_timeout of the host are both
1793 * specified, let's validate them. A failure means we need to prevent
1794 * the host from doing hw busy detection, which is done by converting
1795 - * to a R1 response instead of a R1B.
1796 + * to a R1 response instead of a R1B. Note, some hosts requires R1B,
1797 + * which also means they are on their own when it comes to deal with the
1798 + * busy timeout.
1799 */
1800 - if (timeout_ms && host->max_busy_timeout &&
1801 - (timeout_ms > host->max_busy_timeout))
1802 + if (!(host->caps & MMC_CAP_NEED_RSP_BUSY) && timeout_ms &&
1803 + host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
1804 use_r1b_resp = false;
1805
1806 cmd.opcode = MMC_SWITCH;
1807 diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
1808 index 083e7e053c95..d3135249b2e4 100644
1809 --- a/drivers/mmc/host/sdhci-omap.c
1810 +++ b/drivers/mmc/host/sdhci-omap.c
1811 @@ -1134,6 +1134,9 @@ static int sdhci_omap_probe(struct platform_device *pdev)
1812 host->mmc_host_ops.execute_tuning = sdhci_omap_execute_tuning;
1813 host->mmc_host_ops.enable_sdio_irq = sdhci_omap_enable_sdio_irq;
1814
1815 + /* R1B responses is required to properly manage HW busy detection. */
1816 + mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
1817 +
1818 ret = sdhci_setup_host(host);
1819 if (ret)
1820 goto err_put_sync;
1821 diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
1822 index 403ac44a7378..a25c3a4d3f6c 100644
1823 --- a/drivers/mmc/host/sdhci-tegra.c
1824 +++ b/drivers/mmc/host/sdhci-tegra.c
1825 @@ -1552,6 +1552,9 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
1826 if (tegra_host->soc_data->nvquirks & NVQUIRK_ENABLE_DDR50)
1827 host->mmc->caps |= MMC_CAP_1_8V_DDR;
1828
1829 + /* R1B responses is required to properly manage HW busy detection. */
1830 + host->mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
1831 +
1832 tegra_sdhci_parse_dt(host);
1833
1834 tegra_host->power_gpio = devm_gpiod_get_optional(&pdev->dev, "power",
1835 diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
1836 index df1c7989e13d..df3cd2589bcf 100644
1837 --- a/drivers/net/Kconfig
1838 +++ b/drivers/net/Kconfig
1839 @@ -106,6 +106,7 @@ config NET_FC
1840 config IFB
1841 tristate "Intermediate Functional Block support"
1842 depends on NET_CLS_ACT
1843 + select NET_REDIRECT
1844 ---help---
1845 This is an intermediate driver that allows sharing of
1846 resources.
1847 diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
1848 index 2f5c287eac95..a3664281a33f 100644
1849 --- a/drivers/net/can/slcan.c
1850 +++ b/drivers/net/can/slcan.c
1851 @@ -625,7 +625,10 @@ err_free_chan:
1852 tty->disc_data = NULL;
1853 clear_bit(SLF_INUSE, &sl->flags);
1854 slc_free_netdev(sl->dev);
1855 + /* do not call free_netdev before rtnl_unlock */
1856 + rtnl_unlock();
1857 free_netdev(sl->dev);
1858 + return err;
1859
1860 err_exit:
1861 rtnl_unlock();
1862 diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
1863 index 1d8d36de4d20..e0e932f0aed1 100644
1864 --- a/drivers/net/dsa/mt7530.c
1865 +++ b/drivers/net/dsa/mt7530.c
1866 @@ -566,7 +566,7 @@ mt7530_mib_reset(struct dsa_switch *ds)
1867 static void
1868 mt7530_port_set_status(struct mt7530_priv *priv, int port, int enable)
1869 {
1870 - u32 mask = PMCR_TX_EN | PMCR_RX_EN;
1871 + u32 mask = PMCR_TX_EN | PMCR_RX_EN | PMCR_FORCE_LNK;
1872
1873 if (enable)
1874 mt7530_set(priv, MT7530_PMCR_P(port), mask);
1875 @@ -1439,7 +1439,7 @@ static void mt7530_phylink_mac_config(struct dsa_switch *ds, int port,
1876 mcr_new &= ~(PMCR_FORCE_SPEED_1000 | PMCR_FORCE_SPEED_100 |
1877 PMCR_FORCE_FDX | PMCR_TX_FC_EN | PMCR_RX_FC_EN);
1878 mcr_new |= PMCR_IFG_XMIT(1) | PMCR_MAC_MODE | PMCR_BACKOFF_EN |
1879 - PMCR_BACKPR_EN | PMCR_FORCE_MODE | PMCR_FORCE_LNK;
1880 + PMCR_BACKPR_EN | PMCR_FORCE_MODE;
1881
1882 /* Are we connected to external phy */
1883 if (port == 5 && dsa_is_user_port(ds, 5))
1884 diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1885 index f0cddf250cfd..26325f7b3c1f 100644
1886 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
1887 +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
1888 @@ -3652,13 +3652,15 @@ err_disable_device:
1889
1890 /*****************************************************************************/
1891
1892 -/* ena_remove - Device Removal Routine
1893 +/* __ena_shutoff - Helper used in both PCI remove/shutdown routines
1894 * @pdev: PCI device information struct
1895 + * @shutdown: Is it a shutdown operation? If false, means it is a removal
1896 *
1897 - * ena_remove is called by the PCI subsystem to alert the driver
1898 - * that it should release a PCI device.
1899 + * __ena_shutoff is a helper routine that does the real work on shutdown and
1900 + * removal paths; the difference between those paths is with regards to whether
1901 + * dettach or unregister the netdevice.
1902 */
1903 -static void ena_remove(struct pci_dev *pdev)
1904 +static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
1905 {
1906 struct ena_adapter *adapter = pci_get_drvdata(pdev);
1907 struct ena_com_dev *ena_dev;
1908 @@ -3677,13 +3679,17 @@ static void ena_remove(struct pci_dev *pdev)
1909
1910 cancel_work_sync(&adapter->reset_task);
1911
1912 - rtnl_lock();
1913 + rtnl_lock(); /* lock released inside the below if-else block */
1914 ena_destroy_device(adapter, true);
1915 - rtnl_unlock();
1916 -
1917 - unregister_netdev(netdev);
1918 -
1919 - free_netdev(netdev);
1920 + if (shutdown) {
1921 + netif_device_detach(netdev);
1922 + dev_close(netdev);
1923 + rtnl_unlock();
1924 + } else {
1925 + rtnl_unlock();
1926 + unregister_netdev(netdev);
1927 + free_netdev(netdev);
1928 + }
1929
1930 ena_com_rss_destroy(ena_dev);
1931
1932 @@ -3698,6 +3704,30 @@ static void ena_remove(struct pci_dev *pdev)
1933 vfree(ena_dev);
1934 }
1935
1936 +/* ena_remove - Device Removal Routine
1937 + * @pdev: PCI device information struct
1938 + *
1939 + * ena_remove is called by the PCI subsystem to alert the driver
1940 + * that it should release a PCI device.
1941 + */
1942 +
1943 +static void ena_remove(struct pci_dev *pdev)
1944 +{
1945 + __ena_shutoff(pdev, false);
1946 +}
1947 +
1948 +/* ena_shutdown - Device Shutdown Routine
1949 + * @pdev: PCI device information struct
1950 + *
1951 + * ena_shutdown is called by the PCI subsystem to alert the driver that
1952 + * a shutdown/reboot (or kexec) is happening and device must be disabled.
1953 + */
1954 +
1955 +static void ena_shutdown(struct pci_dev *pdev)
1956 +{
1957 + __ena_shutoff(pdev, true);
1958 +}
1959 +
1960 #ifdef CONFIG_PM
1961 /* ena_suspend - PM suspend callback
1962 * @pdev: PCI device information struct
1963 @@ -3747,6 +3777,7 @@ static struct pci_driver ena_pci_driver = {
1964 .id_table = ena_pci_tbl,
1965 .probe = ena_probe,
1966 .remove = ena_remove,
1967 + .shutdown = ena_shutdown,
1968 #ifdef CONFIG_PM
1969 .suspend = ena_suspend,
1970 .resume = ena_resume,
1971 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1972 index 57c88e157f86..6862594b49ab 100644
1973 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1974 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1975 @@ -6863,12 +6863,12 @@ skip_rdma:
1976 }
1977 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
1978 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
1979 - if (rc)
1980 + if (rc) {
1981 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
1982 rc);
1983 - else
1984 - ctx->flags |= BNXT_CTX_FLAG_INITED;
1985 -
1986 + return rc;
1987 + }
1988 + ctx->flags |= BNXT_CTX_FLAG_INITED;
1989 return 0;
1990 }
1991
1992 @@ -7387,14 +7387,22 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
1993 pri2cos = &resp2->pri0_cos_queue_id;
1994 for (i = 0; i < 8; i++) {
1995 u8 queue_id = pri2cos[i];
1996 + u8 queue_idx;
1997
1998 + /* Per port queue IDs start from 0, 10, 20, etc */
1999 + queue_idx = queue_id % 10;
2000 + if (queue_idx > BNXT_MAX_QUEUE) {
2001 + bp->pri2cos_valid = false;
2002 + goto qstats_done;
2003 + }
2004 for (j = 0; j < bp->max_q; j++) {
2005 if (bp->q_ids[j] == queue_id)
2006 - bp->pri2cos[i] = j;
2007 + bp->pri2cos_idx[i] = queue_idx;
2008 }
2009 }
2010 bp->pri2cos_valid = 1;
2011 }
2012 +qstats_done:
2013 mutex_unlock(&bp->hwrm_cmd_lock);
2014 return rc;
2015 }
2016 @@ -11595,6 +11603,10 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
2017 bp->rx_nr_rings++;
2018 bp->cp_nr_rings++;
2019 }
2020 + if (rc) {
2021 + bp->tx_nr_rings = 0;
2022 + bp->rx_nr_rings = 0;
2023 + }
2024 return rc;
2025 }
2026
2027 @@ -11887,12 +11899,12 @@ init_err_cleanup:
2028 init_err_pci_clean:
2029 bnxt_free_hwrm_short_cmd_req(bp);
2030 bnxt_free_hwrm_resources(bp);
2031 - bnxt_free_ctx_mem(bp);
2032 - kfree(bp->ctx);
2033 - bp->ctx = NULL;
2034 kfree(bp->fw_health);
2035 bp->fw_health = NULL;
2036 bnxt_cleanup_pci(bp);
2037 + bnxt_free_ctx_mem(bp);
2038 + kfree(bp->ctx);
2039 + bp->ctx = NULL;
2040
2041 init_err_free:
2042 free_netdev(dev);
2043 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
2044 index 2e6ad53fdc75..cda7ba31095a 100644
2045 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
2046 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
2047 @@ -1688,7 +1688,7 @@ struct bnxt {
2048 u16 fw_rx_stats_ext_size;
2049 u16 fw_tx_stats_ext_size;
2050 u16 hw_ring_stats_size;
2051 - u8 pri2cos[8];
2052 + u8 pri2cos_idx[8];
2053 u8 pri2cos_valid;
2054
2055 u16 hwrm_max_req_len;
2056 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
2057 index fb6f30d0d1d0..b1511bcffb1b 100644
2058 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
2059 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
2060 @@ -479,24 +479,26 @@ static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
2061 {
2062 struct bnxt *bp = netdev_priv(dev);
2063 struct ieee_ets *my_ets = bp->ieee_ets;
2064 + int rc;
2065
2066 ets->ets_cap = bp->max_tc;
2067
2068 if (!my_ets) {
2069 - int rc;
2070 -
2071 if (bp->dcbx_cap & DCB_CAP_DCBX_HOST)
2072 return 0;
2073
2074 my_ets = kzalloc(sizeof(*my_ets), GFP_KERNEL);
2075 if (!my_ets)
2076 - return 0;
2077 + return -ENOMEM;
2078 rc = bnxt_hwrm_queue_cos2bw_qcfg(bp, my_ets);
2079 if (rc)
2080 - return 0;
2081 + goto error;
2082 rc = bnxt_hwrm_queue_pri2cos_qcfg(bp, my_ets);
2083 if (rc)
2084 - return 0;
2085 + goto error;
2086 +
2087 + /* cache result */
2088 + bp->ieee_ets = my_ets;
2089 }
2090
2091 ets->cbs = my_ets->cbs;
2092 @@ -505,6 +507,9 @@ static int bnxt_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
2093 memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa));
2094 memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc));
2095 return 0;
2096 +error:
2097 + kfree(my_ets);
2098 + return rc;
2099 }
2100
2101 static int bnxt_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
2102 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
2103 index cfa647d5b44d..fb1ab58da9fa 100644
2104 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
2105 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
2106 @@ -589,25 +589,25 @@ skip_ring_stats:
2107 if (bp->pri2cos_valid) {
2108 for (i = 0; i < 8; i++, j++) {
2109 long n = bnxt_rx_bytes_pri_arr[i].base_off +
2110 - bp->pri2cos[i];
2111 + bp->pri2cos_idx[i];
2112
2113 buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
2114 }
2115 for (i = 0; i < 8; i++, j++) {
2116 long n = bnxt_rx_pkts_pri_arr[i].base_off +
2117 - bp->pri2cos[i];
2118 + bp->pri2cos_idx[i];
2119
2120 buf[j] = le64_to_cpu(*(rx_port_stats_ext + n));
2121 }
2122 for (i = 0; i < 8; i++, j++) {
2123 long n = bnxt_tx_bytes_pri_arr[i].base_off +
2124 - bp->pri2cos[i];
2125 + bp->pri2cos_idx[i];
2126
2127 buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
2128 }
2129 for (i = 0; i < 8; i++, j++) {
2130 long n = bnxt_tx_pkts_pri_arr[i].base_off +
2131 - bp->pri2cos[i];
2132 + bp->pri2cos_idx[i];
2133
2134 buf[j] = le64_to_cpu(*(tx_port_stats_ext + n));
2135 }
2136 diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
2137 index 928bfea5457b..3a45ac8f0e01 100644
2138 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
2139 +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
2140 @@ -1324,8 +1324,9 @@ static inline void t6_fill_tnl_lso(struct sk_buff *skb,
2141 int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
2142 int maxreclaim)
2143 {
2144 + unsigned int reclaimed, hw_cidx;
2145 struct sge_txq *q = &eq->q;
2146 - unsigned int reclaimed;
2147 + int hw_in_use;
2148
2149 if (!q->in_use || !__netif_tx_trylock(eq->txq))
2150 return 0;
2151 @@ -1333,12 +1334,17 @@ int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq,
2152 /* Reclaim pending completed TX Descriptors. */
2153 reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true);
2154
2155 + hw_cidx = ntohs(READ_ONCE(q->stat->cidx));
2156 + hw_in_use = q->pidx - hw_cidx;
2157 + if (hw_in_use < 0)
2158 + hw_in_use += q->size;
2159 +
2160 /* If the TX Queue is currently stopped and there's now more than half
2161 * the queue available, restart it. Otherwise bail out since the rest
2162 * of what we want do here is with the possibility of shipping any
2163 * currently buffered Coalesced TX Work Request.
2164 */
2165 - if (netif_tx_queue_stopped(eq->txq) && txq_avail(q) > (q->size / 2)) {
2166 + if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) {
2167 netif_tx_wake_queue(eq->txq);
2168 eq->q.restarts++;
2169 }
2170 @@ -1469,16 +1475,7 @@ out_free: dev_kfree_skb_any(skb);
2171 * has opened up.
2172 */
2173 eth_txq_stop(q);
2174 -
2175 - /* If we're using the SGE Doorbell Queue Timer facility, we
2176 - * don't need to ask the Firmware to send us Egress Queue CIDX
2177 - * Updates: the Hardware will do this automatically. And
2178 - * since we send the Ingress Queue CIDX Updates to the
2179 - * corresponding Ethernet Response Queue, we'll get them very
2180 - * quickly.
2181 - */
2182 - if (!q->dbqt)
2183 - wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
2184 + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
2185 }
2186
2187 wr = (void *)&q->q.desc[q->q.pidx];
2188 @@ -1792,16 +1789,7 @@ static netdev_tx_t cxgb4_vf_eth_xmit(struct sk_buff *skb,
2189 * has opened up.
2190 */
2191 eth_txq_stop(txq);
2192 -
2193 - /* If we're using the SGE Doorbell Queue Timer facility, we
2194 - * don't need to ask the Firmware to send us Egress Queue CIDX
2195 - * Updates: the Hardware will do this automatically. And
2196 - * since we send the Ingress Queue CIDX Updates to the
2197 - * corresponding Ethernet Response Queue, we'll get them very
2198 - * quickly.
2199 - */
2200 - if (!txq->dbqt)
2201 - wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
2202 + wr_mid |= FW_WR_EQUEQ_F | FW_WR_EQUIQ_F;
2203 }
2204
2205 /* Start filling in our Work Request. Note that we do _not_ handle
2206 @@ -2924,26 +2912,6 @@ static void t4_tx_completion_handler(struct sge_rspq *rspq,
2207 }
2208
2209 txq = &s->ethtxq[pi->first_qset + rspq->idx];
2210 -
2211 - /* We've got the Hardware Consumer Index Update in the Egress Update
2212 - * message. If we're using the SGE Doorbell Queue Timer mechanism,
2213 - * these Egress Update messages will be our sole CIDX Updates we get
2214 - * since we don't want to chew up PCIe bandwidth for both Ingress
2215 - * Messages and Status Page writes. However, The code which manages
2216 - * reclaiming successfully DMA'ed TX Work Requests uses the CIDX value
2217 - * stored in the Status Page at the end of the TX Queue. It's easiest
2218 - * to simply copy the CIDX Update value from the Egress Update message
2219 - * to the Status Page. Also note that no Endian issues need to be
2220 - * considered here since both are Big Endian and we're just copying
2221 - * bytes consistently ...
2222 - */
2223 - if (txq->dbqt) {
2224 - struct cpl_sge_egr_update *egr;
2225 -
2226 - egr = (struct cpl_sge_egr_update *)rsp;
2227 - WRITE_ONCE(txq->q.stat->cidx, egr->cidx);
2228 - }
2229 -
2230 t4_sge_eth_txq_egress_update(adapter, txq, -1);
2231 }
2232
2233 diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2234 index e130233b5085..00c4beb760c3 100644
2235 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2236 +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
2237 @@ -2770,9 +2770,7 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
2238 headroom = (u16)(bl->priv_data_size + DPAA_PARSE_RESULTS_SIZE +
2239 DPAA_TIME_STAMP_SIZE + DPAA_HASH_RESULTS_SIZE);
2240
2241 - return DPAA_FD_DATA_ALIGNMENT ? ALIGN(headroom,
2242 - DPAA_FD_DATA_ALIGNMENT) :
2243 - headroom;
2244 + return ALIGN(headroom, DPAA_FD_DATA_ALIGNMENT);
2245 }
2246
2247 static int dpaa_eth_probe(struct platform_device *pdev)
2248 diff --git a/drivers/net/ethernet/freescale/fman/Kconfig b/drivers/net/ethernet/freescale/fman/Kconfig
2249 index 0139cb9042ec..34150182cc35 100644
2250 --- a/drivers/net/ethernet/freescale/fman/Kconfig
2251 +++ b/drivers/net/ethernet/freescale/fman/Kconfig
2252 @@ -8,3 +8,31 @@ config FSL_FMAN
2253 help
2254 Freescale Data-Path Acceleration Architecture Frame Manager
2255 (FMan) support
2256 +
2257 +config DPAA_ERRATUM_A050385
2258 + bool
2259 + depends on ARM64 && FSL_DPAA
2260 + default y
2261 + help
2262 + DPAA FMan erratum A050385 software workaround implementation:
2263 + align buffers, data start, SG fragment length to avoid FMan DMA
2264 + splits.
2265 + FMAN DMA read or writes under heavy traffic load may cause FMAN
2266 + internal resource leak thus stopping further packet processing.
2267 + The FMAN internal queue can overflow when FMAN splits single
2268 + read or write transactions into multiple smaller transactions
2269 + such that more than 17 AXI transactions are in flight from FMAN
2270 + to interconnect. When the FMAN internal queue overflows, it can
2271 + stall further packet processing. The issue can occur with any
2272 + one of the following three conditions:
2273 + 1. FMAN AXI transaction crosses 4K address boundary (Errata
2274 + A010022)
2275 + 2. FMAN DMA address for an AXI transaction is not 16 byte
2276 + aligned, i.e. the last 4 bits of an address are non-zero
2277 + 3. Scatter Gather (SG) frames have more than one SG buffer in
2278 + the SG list and any one of the buffers, except the last
2279 + buffer in the SG list has data size that is not a multiple
2280 + of 16 bytes, i.e., other than 16, 32, 48, 64, etc.
2281 + With any one of the above three conditions present, there is
2282 + likelihood of stalled FMAN packet processing, especially under
2283 + stress with multiple ports injecting line-rate traffic.
2284 diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
2285 index 210749bf1eac..4c2fa13a7dd7 100644
2286 --- a/drivers/net/ethernet/freescale/fman/fman.c
2287 +++ b/drivers/net/ethernet/freescale/fman/fman.c
2288 @@ -1,5 +1,6 @@
2289 /*
2290 * Copyright 2008-2015 Freescale Semiconductor Inc.
2291 + * Copyright 2020 NXP
2292 *
2293 * Redistribution and use in source and binary forms, with or without
2294 * modification, are permitted provided that the following conditions are met:
2295 @@ -566,6 +567,10 @@ struct fman_cfg {
2296 u32 qmi_def_tnums_thresh;
2297 };
2298
2299 +#ifdef CONFIG_DPAA_ERRATUM_A050385
2300 +static bool fman_has_err_a050385;
2301 +#endif
2302 +
2303 static irqreturn_t fman_exceptions(struct fman *fman,
2304 enum fman_exceptions exception)
2305 {
2306 @@ -2514,6 +2519,14 @@ struct fman *fman_bind(struct device *fm_dev)
2307 }
2308 EXPORT_SYMBOL(fman_bind);
2309
2310 +#ifdef CONFIG_DPAA_ERRATUM_A050385
2311 +bool fman_has_errata_a050385(void)
2312 +{
2313 + return fman_has_err_a050385;
2314 +}
2315 +EXPORT_SYMBOL(fman_has_errata_a050385);
2316 +#endif
2317 +
2318 static irqreturn_t fman_err_irq(int irq, void *handle)
2319 {
2320 struct fman *fman = (struct fman *)handle;
2321 @@ -2841,6 +2854,11 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
2322 goto fman_free;
2323 }
2324
2325 +#ifdef CONFIG_DPAA_ERRATUM_A050385
2326 + fman_has_err_a050385 =
2327 + of_property_read_bool(fm_node, "fsl,erratum-a050385");
2328 +#endif
2329 +
2330 return fman;
2331
2332 fman_node_put:
2333 diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
2334 index 935c317fa696..f2ede1360f03 100644
2335 --- a/drivers/net/ethernet/freescale/fman/fman.h
2336 +++ b/drivers/net/ethernet/freescale/fman/fman.h
2337 @@ -1,5 +1,6 @@
2338 /*
2339 * Copyright 2008-2015 Freescale Semiconductor Inc.
2340 + * Copyright 2020 NXP
2341 *
2342 * Redistribution and use in source and binary forms, with or without
2343 * modification, are permitted provided that the following conditions are met:
2344 @@ -398,6 +399,10 @@ u16 fman_get_max_frm(void);
2345
2346 int fman_get_rx_extra_headroom(void);
2347
2348 +#ifdef CONFIG_DPAA_ERRATUM_A050385
2349 +bool fman_has_errata_a050385(void);
2350 +#endif
2351 +
2352 struct fman *fman_bind(struct device *dev);
2353
2354 #endif /* __FM_H */
2355 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2356 index 0c8d2269bc46..403e0f089f2a 100644
2357 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2358 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
2359 @@ -1596,7 +1596,7 @@ static int hns3_setup_tc(struct net_device *netdev, void *type_data)
2360 netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
2361
2362 return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
2363 - kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
2364 + kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP;
2365 }
2366
2367 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
2368 diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
2369 index 6b1a81df1465..a10ae28ebc8a 100644
2370 --- a/drivers/net/ethernet/marvell/mvneta.c
2371 +++ b/drivers/net/ethernet/marvell/mvneta.c
2372 @@ -2804,11 +2804,10 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
2373 /* For the case where the last mvneta_poll did not process all
2374 * RX packets
2375 */
2376 - rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
2377 -
2378 cause_rx_tx |= pp->neta_armada3700 ? pp->cause_rx_tx :
2379 port->cause_rx_tx;
2380
2381 + rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
2382 if (rx_queue) {
2383 rx_queue = rx_queue - 1;
2384 if (pp->bm_priv)
2385 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
2386 index 9c8427698238..55ceabf077b2 100644
2387 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
2388 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
2389 @@ -371,6 +371,7 @@ enum {
2390
2391 struct mlx5e_sq_wqe_info {
2392 u8 opcode;
2393 + u8 num_wqebbs;
2394
2395 /* Auxiliary data for different opcodes. */
2396 union {
2397 @@ -1058,6 +1059,7 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state);
2398 void mlx5e_activate_rq(struct mlx5e_rq *rq);
2399 void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
2400 void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
2401 +void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq);
2402 void mlx5e_activate_icosq(struct mlx5e_icosq *icosq);
2403 void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq);
2404
2405 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
2406 index d3693fa547ac..e54f70d9af22 100644
2407 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
2408 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
2409 @@ -10,8 +10,7 @@
2410
2411 static inline bool cqe_syndrome_needs_recover(u8 syndrome)
2412 {
2413 - return syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR ||
2414 - syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR ||
2415 + return syndrome == MLX5_CQE_SYNDROME_LOCAL_QP_OP_ERR ||
2416 syndrome == MLX5_CQE_SYNDROME_LOCAL_PROT_ERR ||
2417 syndrome == MLX5_CQE_SYNDROME_WR_FLUSH_ERR;
2418 }
2419 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
2420 index b860569d4247..9fa4b98001d5 100644
2421 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
2422 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
2423 @@ -90,7 +90,7 @@ static int mlx5e_rx_reporter_err_icosq_cqe_recover(void *ctx)
2424 goto out;
2425
2426 mlx5e_reset_icosq_cc_pc(icosq);
2427 - mlx5e_free_rx_descs(rq);
2428 + mlx5e_free_rx_in_progress_descs(rq);
2429 clear_bit(MLX5E_SQ_STATE_RECOVERING, &icosq->state);
2430 mlx5e_activate_icosq(icosq);
2431 mlx5e_activate_rq(rq);
2432 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
2433 index a226277b0980..f07b1399744e 100644
2434 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
2435 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
2436 @@ -181,10 +181,12 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
2437
2438 static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
2439 {
2440 - if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
2441 + if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
2442 mlx5_wq_ll_reset(&rq->mpwqe.wq);
2443 - else
2444 + rq->mpwqe.actual_wq_head = 0;
2445 + } else {
2446 mlx5_wq_cyc_reset(&rq->wqe.wq);
2447 + }
2448 }
2449
2450 /* SW parser related functions */
2451 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2452 index e5e91cbcbc31..ee7c753e9ea0 100644
2453 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2454 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2455 @@ -824,6 +824,29 @@ int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
2456 return -ETIMEDOUT;
2457 }
2458
2459 +void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq)
2460 +{
2461 + struct mlx5_wq_ll *wq;
2462 + u16 head;
2463 + int i;
2464 +
2465 + if (rq->wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
2466 + return;
2467 +
2468 + wq = &rq->mpwqe.wq;
2469 + head = wq->head;
2470 +
2471 + /* Outstanding UMR WQEs (in progress) start at wq->head */
2472 + for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
2473 + rq->dealloc_wqe(rq, head);
2474 + head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
2475 + }
2476 +
2477 + rq->mpwqe.actual_wq_head = wq->head;
2478 + rq->mpwqe.umr_in_progress = 0;
2479 + rq->mpwqe.umr_completed = 0;
2480 +}
2481 +
2482 void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
2483 {
2484 __be16 wqe_ix_be;
2485 @@ -831,14 +854,8 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
2486
2487 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
2488 struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
2489 - u16 head = wq->head;
2490 - int i;
2491
2492 - /* Outstanding UMR WQEs (in progress) start at wq->head */
2493 - for (i = 0; i < rq->mpwqe.umr_in_progress; i++) {
2494 - rq->dealloc_wqe(rq, head);
2495 - head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
2496 - }
2497 + mlx5e_free_rx_in_progress_descs(rq);
2498
2499 while (!mlx5_wq_ll_is_empty(wq)) {
2500 struct mlx5e_rx_wqe_ll *wqe;
2501 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2502 index 82cffb3a9964..1d295a7afc8c 100644
2503 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2504 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
2505 @@ -477,6 +477,7 @@ static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq,
2506 /* fill sq frag edge with nops to avoid wqe wrapping two pages */
2507 for (; wi < edge_wi; wi++) {
2508 wi->opcode = MLX5_OPCODE_NOP;
2509 + wi->num_wqebbs = 1;
2510 mlx5e_post_nop(wq, sq->sqn, &sq->pc);
2511 }
2512 }
2513 @@ -525,6 +526,7 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
2514 umr_wqe->uctrl.xlt_offset = cpu_to_be16(xlt_offset);
2515
2516 sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_UMR;
2517 + sq->db.ico_wqe[pi].num_wqebbs = MLX5E_UMR_WQEBBS;
2518 sq->db.ico_wqe[pi].umr.rq = rq;
2519 sq->pc += MLX5E_UMR_WQEBBS;
2520
2521 @@ -628,17 +630,14 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
2522
2523 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
2524 wi = &sq->db.ico_wqe[ci];
2525 + sqcc += wi->num_wqebbs;
2526
2527 - if (likely(wi->opcode == MLX5_OPCODE_UMR)) {
2528 - sqcc += MLX5E_UMR_WQEBBS;
2529 + if (likely(wi->opcode == MLX5_OPCODE_UMR))
2530 wi->umr.rq->mpwqe.umr_completed++;
2531 - } else if (likely(wi->opcode == MLX5_OPCODE_NOP)) {
2532 - sqcc++;
2533 - } else {
2534 + else if (unlikely(wi->opcode != MLX5_OPCODE_NOP))
2535 netdev_WARN_ONCE(cq->channel->netdev,
2536 "Bad OPCODE in ICOSQ WQE info: 0x%x\n",
2537 wi->opcode);
2538 - }
2539
2540 } while (!last_wqe);
2541
2542 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
2543 index 257a7c9f7a14..800d34ed8a96 100644
2544 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
2545 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
2546 @@ -78,6 +78,7 @@ void mlx5e_trigger_irq(struct mlx5e_icosq *sq)
2547 u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
2548
2549 sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
2550 + sq->db.ico_wqe[pi].num_wqebbs = 1;
2551 nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
2552 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
2553 }
2554 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
2555 index 004c56c2fc0c..b2dfa2b5366f 100644
2556 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
2557 +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
2558 @@ -930,7 +930,6 @@ static int dr_actions_l2_rewrite(struct mlx5dr_domain *dmn,
2559
2560 action->rewrite.data = (void *)ops;
2561 action->rewrite.num_of_actions = i;
2562 - action->rewrite.chunk->byte_size = i * sizeof(*ops);
2563
2564 ret = mlx5dr_send_postsend_action(dmn, action);
2565 if (ret) {
2566 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
2567 index c7f10d4f8f8d..095ec7b1399d 100644
2568 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
2569 +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
2570 @@ -558,7 +558,8 @@ int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
2571 int ret;
2572
2573 send_info.write.addr = (uintptr_t)action->rewrite.data;
2574 - send_info.write.length = action->rewrite.chunk->byte_size;
2575 + send_info.write.length = action->rewrite.num_of_actions *
2576 + DR_MODIFY_ACTION_SIZE;
2577 send_info.write.lkey = 0;
2578 send_info.remote_addr = action->rewrite.chunk->mr_addr;
2579 send_info.rkey = action->rewrite.chunk->rkey;
2580 diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
2581 index 615455a21567..f3d1f9411d10 100644
2582 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
2583 +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
2584 @@ -1318,36 +1318,64 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
2585 mbox->mapaddr);
2586 }
2587
2588 -static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
2589 - const struct pci_device_id *id)
2590 +static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci,
2591 + const struct pci_device_id *id,
2592 + u32 *p_sys_status)
2593 {
2594 unsigned long end;
2595 - char mrsr_pl[MLXSW_REG_MRSR_LEN];
2596 - int err;
2597 + u32 val;
2598
2599 - mlxsw_reg_mrsr_pack(mrsr_pl);
2600 - err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
2601 - if (err)
2602 - return err;
2603 if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
2604 msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
2605 return 0;
2606 }
2607
2608 - /* We must wait for the HW to become responsive once again. */
2609 + /* We must wait for the HW to become responsive. */
2610 msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
2611
2612 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
2613 do {
2614 - u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
2615 -
2616 + val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
2617 if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
2618 return 0;
2619 cond_resched();
2620 } while (time_before(jiffies, end));
2621 +
2622 + *p_sys_status = val & MLXSW_PCI_FW_READY_MASK;
2623 +
2624 return -EBUSY;
2625 }
2626
2627 +static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
2628 + const struct pci_device_id *id)
2629 +{
2630 + struct pci_dev *pdev = mlxsw_pci->pdev;
2631 + char mrsr_pl[MLXSW_REG_MRSR_LEN];
2632 + u32 sys_status;
2633 + int err;
2634 +
2635 + err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
2636 + if (err) {
2637 + dev_err(&pdev->dev, "Failed to reach system ready status before reset. Status is 0x%x\n",
2638 + sys_status);
2639 + return err;
2640 + }
2641 +
2642 + mlxsw_reg_mrsr_pack(mrsr_pl);
2643 + err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
2644 + if (err)
2645 + return err;
2646 +
2647 + err = mlxsw_pci_sys_ready_wait(mlxsw_pci, id, &sys_status);
2648 + if (err) {
2649 + dev_err(&pdev->dev, "Failed to reach system ready status after reset. Status is 0x%x\n",
2650 + sys_status);
2651 + return err;
2652 + }
2653 +
2654 + return 0;
2655 +}
2656 +
2657 static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
2658 {
2659 int err;
2660 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
2661 index 54275624718b..336e5ecc68f8 100644
2662 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
2663 +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
2664 @@ -637,12 +637,12 @@ static int mlxsw_sp_mr_vif_resolve(struct mlxsw_sp_mr_table *mr_table,
2665 return 0;
2666
2667 err_erif_unresolve:
2668 - list_for_each_entry_from_reverse(erve, &mr_vif->route_evif_list,
2669 - vif_node)
2670 + list_for_each_entry_continue_reverse(erve, &mr_vif->route_evif_list,
2671 + vif_node)
2672 mlxsw_sp_mr_route_evif_unresolve(mr_table, erve);
2673 err_irif_unresolve:
2674 - list_for_each_entry_from_reverse(irve, &mr_vif->route_ivif_list,
2675 - vif_node)
2676 + list_for_each_entry_continue_reverse(irve, &mr_vif->route_ivif_list,
2677 + vif_node)
2678 mlxsw_sp_mr_route_ivif_unresolve(mr_table, irve);
2679 mr_vif->rif = NULL;
2680 return err;
2681 diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
2682 index a2cef6a004e7..5ebfc3e66331 100644
2683 --- a/drivers/net/ethernet/realtek/r8169_main.c
2684 +++ b/drivers/net/ethernet/realtek/r8169_main.c
2685 @@ -6812,7 +6812,7 @@ static int rtl_alloc_irq(struct rtl8169_private *tp)
2686 RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
2687 rtl_lock_config_regs(tp);
2688 /* fall through */
2689 - case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_24:
2690 + case RTL_GIGA_MAC_VER_07 ... RTL_GIGA_MAC_VER_17:
2691 flags = PCI_IRQ_LEGACY;
2692 break;
2693 default:
2694 @@ -6903,6 +6903,13 @@ static int r8169_mdio_register(struct rtl8169_private *tp)
2695 if (!tp->phydev) {
2696 mdiobus_unregister(new_bus);
2697 return -ENODEV;
2698 + } else if (!tp->phydev->drv) {
2699 + /* Most chip versions fail with the genphy driver.
2700 + * Therefore ensure that the dedicated PHY driver is loaded.
2701 + */
2702 + dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n");
2703 + mdiobus_unregister(new_bus);
2704 + return -EUNATCH;
2705 }
2706
2707 /* PHY will be woken up in rtl_open() */
2708 @@ -7064,15 +7071,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2709 int chipset, region;
2710 int jumbo_max, rc;
2711
2712 - /* Some tools for creating an initramfs don't consider softdeps, then
2713 - * r8169.ko may be in initramfs, but realtek.ko not. Then the generic
2714 - * PHY driver is used that doesn't work with most chip versions.
2715 - */
2716 - if (!driver_find("RTL8201CP Ethernet", &mdio_bus_type)) {
2717 - dev_err(&pdev->dev, "realtek.ko not loaded, maybe it needs to be added to initramfs?\n");
2718 - return -ENOENT;
2719 - }
2720 -
2721 dev = devm_alloc_etherdev(&pdev->dev, sizeof (*tp));
2722 if (!dev)
2723 return -ENOMEM;
2724 diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
2725 index c56fcbb37066..38767d797914 100644
2726 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
2727 +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
2728 @@ -2279,7 +2279,7 @@ static int __init sxgbe_cmdline_opt(char *str)
2729 if (!str || !*str)
2730 return -EINVAL;
2731 while ((opt = strsep(&str, ",")) != NULL) {
2732 - if (!strncmp(opt, "eee_timer:", 6)) {
2733 + if (!strncmp(opt, "eee_timer:", 10)) {
2734 if (kstrtoint(opt + 10, 0, &eee_timer))
2735 goto err;
2736 }
2737 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
2738 index e2e469c37a4d..9f9aaa47a8dc 100644
2739 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
2740 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
2741 @@ -1411,7 +1411,7 @@ static int rk_gmac_probe(struct platform_device *pdev)
2742
2743 ret = rk_gmac_clk_init(plat_dat);
2744 if (ret)
2745 - return ret;
2746 + goto err_remove_config_dt;
2747
2748 ret = rk_gmac_powerup(plat_dat->bsp_priv);
2749 if (ret)
2750 diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
2751 index 5c6b7fc04ea6..730ab57201bd 100644
2752 --- a/drivers/net/geneve.c
2753 +++ b/drivers/net/geneve.c
2754 @@ -1845,8 +1845,6 @@ static void geneve_destroy_tunnels(struct net *net, struct list_head *head)
2755 if (!net_eq(dev_net(geneve->dev), net))
2756 unregister_netdevice_queue(geneve->dev, head);
2757 }
2758 -
2759 - WARN_ON_ONCE(!list_empty(&gn->sock_list));
2760 }
2761
2762 static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
2763 @@ -1861,6 +1859,12 @@ static void __net_exit geneve_exit_batch_net(struct list_head *net_list)
2764 /* unregister the devices gathered above */
2765 unregister_netdevice_many(&list);
2766 rtnl_unlock();
2767 +
2768 + list_for_each_entry(net, net_list, exit_list) {
2769 + const struct geneve_net *gn = net_generic(net, geneve_net_id);
2770 +
2771 + WARN_ON_ONCE(!list_empty(&gn->sock_list));
2772 + }
2773 }
2774
2775 static struct pernet_operations geneve_net_ops = {
2776 diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
2777 index 242b9b0943f8..7fe306e76281 100644
2778 --- a/drivers/net/ifb.c
2779 +++ b/drivers/net/ifb.c
2780 @@ -75,7 +75,7 @@ static void ifb_ri_tasklet(unsigned long _txp)
2781 }
2782
2783 while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
2784 - skb->tc_redirected = 0;
2785 + skb->redirected = 0;
2786 skb->tc_skip_classify = 1;
2787
2788 u64_stats_update_begin(&txp->tsync);
2789 @@ -96,7 +96,7 @@ static void ifb_ri_tasklet(unsigned long _txp)
2790 rcu_read_unlock();
2791 skb->skb_iif = txp->dev->ifindex;
2792
2793 - if (!skb->tc_from_ingress) {
2794 + if (!skb->from_ingress) {
2795 dev_queue_xmit(skb);
2796 } else {
2797 skb_pull_rcsum(skb, skb->mac_len);
2798 @@ -243,7 +243,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
2799 txp->rx_bytes += skb->len;
2800 u64_stats_update_end(&txp->rsync);
2801
2802 - if (!skb->tc_redirected || !skb->skb_iif) {
2803 + if (!skb->redirected || !skb->skb_iif) {
2804 dev_kfree_skb(skb);
2805 dev->stats.rx_dropped++;
2806 return NETDEV_TX_OK;
2807 diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
2808 index 6497a5c45220..32c627702ac5 100644
2809 --- a/drivers/net/macsec.c
2810 +++ b/drivers/net/macsec.c
2811 @@ -16,6 +16,7 @@
2812 #include <net/genetlink.h>
2813 #include <net/sock.h>
2814 #include <net/gro_cells.h>
2815 +#include <linux/if_arp.h>
2816
2817 #include <uapi/linux/if_macsec.h>
2818
2819 @@ -3236,6 +3237,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
2820 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
2821 if (!real_dev)
2822 return -ENODEV;
2823 + if (real_dev->type != ARPHRD_ETHER)
2824 + return -EINVAL;
2825
2826 dev->priv_flags |= IFF_MACSEC;
2827
2828 diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
2829 index 52e80434e45e..31a559513362 100644
2830 --- a/drivers/net/phy/dp83867.c
2831 +++ b/drivers/net/phy/dp83867.c
2832 @@ -25,7 +25,8 @@
2833 #define DP83867_CFG3 0x1e
2834
2835 /* Extended Registers */
2836 -#define DP83867_CFG4 0x0031
2837 +#define DP83867_FLD_THR_CFG 0x002e
2838 +#define DP83867_CFG4 0x0031
2839 #define DP83867_CFG4_SGMII_ANEG_MASK (BIT(5) | BIT(6))
2840 #define DP83867_CFG4_SGMII_ANEG_TIMER_11MS (3 << 5)
2841 #define DP83867_CFG4_SGMII_ANEG_TIMER_800US (2 << 5)
2842 @@ -74,6 +75,7 @@
2843 #define DP83867_STRAP_STS2_CLK_SKEW_RX_MASK GENMASK(2, 0)
2844 #define DP83867_STRAP_STS2_CLK_SKEW_RX_SHIFT 0
2845 #define DP83867_STRAP_STS2_CLK_SKEW_NONE BIT(2)
2846 +#define DP83867_STRAP_STS2_STRAP_FLD BIT(10)
2847
2848 /* PHY CTRL bits */
2849 #define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
2850 @@ -103,6 +105,9 @@
2851 /* CFG4 bits */
2852 #define DP83867_CFG4_PORT_MIRROR_EN BIT(0)
2853
2854 +/* FLD_THR_CFG */
2855 +#define DP83867_FLD_THR_CFG_ENERGY_LOST_THR_MASK 0x7
2856 +
2857 enum {
2858 DP83867_PORT_MIRROING_KEEP,
2859 DP83867_PORT_MIRROING_EN,
2860 @@ -318,6 +323,20 @@ static int dp83867_config_init(struct phy_device *phydev)
2861 phy_clear_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
2862 BIT(7));
2863
2864 + bs = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_STRAP_STS2);
2865 + if (bs & DP83867_STRAP_STS2_STRAP_FLD) {
2866 + /* When using strap to enable FLD, the ENERGY_LOST_FLD_THR will
2867 + * be set to 0x2. This may causes the PHY link to be unstable -
2868 + * the default value 0x1 need to be restored.
2869 + */
2870 + ret = phy_modify_mmd(phydev, DP83867_DEVADDR,
2871 + DP83867_FLD_THR_CFG,
2872 + DP83867_FLD_THR_CFG_ENERGY_LOST_THR_MASK,
2873 + 0x1);
2874 + if (ret)
2875 + return ret;
2876 + }
2877 +
2878 if (phy_interface_is_rgmii(phydev)) {
2879 val = phy_read(phydev, MII_DP83867_PHYCTRL);
2880 if (val < 0)
2881 diff --git a/drivers/net/phy/mdio-bcm-unimac.c b/drivers/net/phy/mdio-bcm-unimac.c
2882 index 4a28fb29adaa..fbd36891ee64 100644
2883 --- a/drivers/net/phy/mdio-bcm-unimac.c
2884 +++ b/drivers/net/phy/mdio-bcm-unimac.c
2885 @@ -242,11 +242,9 @@ static int unimac_mdio_probe(struct platform_device *pdev)
2886 return -ENOMEM;
2887 }
2888
2889 - priv->clk = devm_clk_get(&pdev->dev, NULL);
2890 - if (PTR_ERR(priv->clk) == -EPROBE_DEFER)
2891 + priv->clk = devm_clk_get_optional(&pdev->dev, NULL);
2892 + if (IS_ERR(priv->clk))
2893 return PTR_ERR(priv->clk);
2894 - else
2895 - priv->clk = NULL;
2896
2897 ret = clk_prepare_enable(priv->clk);
2898 if (ret)
2899 diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
2900 index 88d409e48c1f..aad6809ebe39 100644
2901 --- a/drivers/net/phy/mdio-mux-bcm-iproc.c
2902 +++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
2903 @@ -288,8 +288,13 @@ static int mdio_mux_iproc_suspend(struct device *dev)
2904 static int mdio_mux_iproc_resume(struct device *dev)
2905 {
2906 struct iproc_mdiomux_desc *md = dev_get_drvdata(dev);
2907 + int rc;
2908
2909 - clk_prepare_enable(md->core_clk);
2910 + rc = clk_prepare_enable(md->core_clk);
2911 + if (rc) {
2912 + dev_err(md->dev, "failed to enable core clk\n");
2913 + return rc;
2914 + }
2915 mdio_mux_iproc_config(md);
2916
2917 return 0;
2918 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
2919 index 5754bb6ca0ee..6c738a271257 100644
2920 --- a/drivers/net/usb/qmi_wwan.c
2921 +++ b/drivers/net/usb/qmi_wwan.c
2922 @@ -1210,6 +1210,7 @@ static const struct usb_device_id products[] = {
2923 {QMI_FIXED_INTF(0x1435, 0xd182, 5)}, /* Wistron NeWeb D18 */
2924 {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */
2925 {QMI_QUIRK_SET_DTR(0x1508, 0x1001, 4)}, /* Fibocom NL668 series */
2926 + {QMI_FIXED_INTF(0x1690, 0x7588, 4)}, /* ASKEY WWHC050 */
2927 {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */
2928 {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */
2929 {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */
2930 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
2931 index 283dfeb406ad..93690f77ec9c 100644
2932 --- a/drivers/net/vxlan.c
2933 +++ b/drivers/net/vxlan.c
2934 @@ -2779,10 +2779,19 @@ static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan,
2935 /* Setup stats when device is created */
2936 static int vxlan_init(struct net_device *dev)
2937 {
2938 + struct vxlan_dev *vxlan = netdev_priv(dev);
2939 + int err;
2940 +
2941 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
2942 if (!dev->tstats)
2943 return -ENOMEM;
2944
2945 + err = gro_cells_init(&vxlan->gro_cells, dev);
2946 + if (err) {
2947 + free_percpu(dev->tstats);
2948 + return err;
2949 + }
2950 +
2951 return 0;
2952 }
2953
2954 @@ -3043,8 +3052,6 @@ static void vxlan_setup(struct net_device *dev)
2955
2956 vxlan->dev = dev;
2957
2958 - gro_cells_init(&vxlan->gro_cells, dev);
2959 -
2960 for (h = 0; h < FDB_HASH_SIZE; ++h) {
2961 spin_lock_init(&vxlan->hash_lock[h]);
2962 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
2963 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
2964 index c59cbb8cbdd7..c54fe6650018 100644
2965 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
2966 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
2967 @@ -1181,7 +1181,7 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm)
2968
2969 static int iwl_mvm_ppag_init(struct iwl_mvm *mvm)
2970 {
2971 - return -ENOENT;
2972 + return 0;
2973 }
2974 #endif /* CONFIG_ACPI */
2975
2976 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
2977 index 917729807514..e17f70b4d199 100644
2978 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
2979 +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.h
2980 @@ -561,6 +561,7 @@ static inline void clear_pci_tx_desc_content(__le32 *__pdesc, int _size)
2981 rxmcs == DESC92C_RATE11M)
2982
2983 struct phy_status_rpt {
2984 + u8 padding[2];
2985 u8 ch_corr[2];
2986 u8 cck_sig_qual_ofdm_pwdb_all;
2987 u8 cck_agc_rpt_ofdm_cfosho_a;
2988 diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c
2989 index 0cc9ac856fe2..ed2123129e0e 100644
2990 --- a/drivers/nfc/fdp/fdp.c
2991 +++ b/drivers/nfc/fdp/fdp.c
2992 @@ -184,7 +184,7 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type)
2993 const struct firmware *fw;
2994 struct sk_buff *skb;
2995 unsigned long len;
2996 - u8 max_size, payload_size;
2997 + int max_size, payload_size;
2998 int rc = 0;
2999
3000 if ((type == NCI_PATCH_TYPE_OTP && !info->otp_patch) ||
3001 @@ -207,8 +207,7 @@ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type)
3002
3003 while (len) {
3004
3005 - payload_size = min_t(unsigned long, (unsigned long) max_size,
3006 - len);
3007 + payload_size = min_t(unsigned long, max_size, len);
3008
3009 skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + payload_size),
3010 GFP_KERNEL);
3011 diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
3012 index bd6129db6417..c34a6df712ad 100644
3013 --- a/drivers/of/of_mdio.c
3014 +++ b/drivers/of/of_mdio.c
3015 @@ -268,6 +268,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
3016 rc = of_mdiobus_register_phy(mdio, child, addr);
3017 if (rc && rc != -ENODEV)
3018 goto unregister;
3019 + break;
3020 }
3021 }
3022 }
3023 diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
3024 index b727d1e34523..fe70e9875bde 100644
3025 --- a/drivers/s390/net/qeth_core_main.c
3026 +++ b/drivers/s390/net/qeth_core_main.c
3027 @@ -1244,7 +1244,6 @@ static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
3028 if (count == 1)
3029 dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
3030
3031 - card->qdio.default_out_queue = single ? 0 : QETH_DEFAULT_QUEUE;
3032 card->qdio.no_out_queues = count;
3033 return 0;
3034 }
3035 @@ -2634,12 +2633,12 @@ static int qeth_init_input_buffer(struct qeth_card *card,
3036 buf->rx_skb = netdev_alloc_skb(card->dev,
3037 QETH_RX_PULL_LEN + ETH_HLEN);
3038 if (!buf->rx_skb)
3039 - return 1;
3040 + return -ENOMEM;
3041 }
3042
3043 pool_entry = qeth_find_free_buffer_pool_entry(card);
3044 if (!pool_entry)
3045 - return 1;
3046 + return -ENOBUFS;
3047
3048 /*
3049 * since the buffer is accessed only from the input_tasklet
3050 @@ -2671,10 +2670,15 @@ int qeth_init_qdio_queues(struct qeth_card *card)
3051 /* inbound queue */
3052 qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
3053 memset(&card->rx, 0, sizeof(struct qeth_rx));
3054 +
3055 qeth_initialize_working_pool_list(card);
3056 /*give only as many buffers to hardware as we have buffer pool entries*/
3057 - for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
3058 - qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
3059 + for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; i++) {
3060 + rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
3061 + if (rc)
3062 + return rc;
3063 + }
3064 +
3065 card->qdio.in_q->next_buf_to_init =
3066 card->qdio.in_buf_pool.buf_count - 1;
3067 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
3068 diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
3069 index 079c04bc448a..7a57b61f0340 100644
3070 --- a/drivers/scsi/ipr.c
3071 +++ b/drivers/scsi/ipr.c
3072 @@ -9947,6 +9947,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
3073 ioa_cfg->max_devs_supported = ipr_max_devs;
3074
3075 if (ioa_cfg->sis64) {
3076 + host->max_channel = IPR_MAX_SIS64_BUSES;
3077 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
3078 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
3079 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
3080 @@ -9955,6 +9956,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
3081 + ((sizeof(struct ipr_config_table_entry64)
3082 * ioa_cfg->max_devs_supported)));
3083 } else {
3084 + host->max_channel = IPR_VSET_BUS;
3085 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
3086 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
3087 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
3088 @@ -9964,7 +9966,6 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
3089 * ioa_cfg->max_devs_supported)));
3090 }
3091
3092 - host->max_channel = IPR_VSET_BUS;
3093 host->unique_id = host->host_no;
3094 host->max_cmd_len = IPR_MAX_CDB_LEN;
3095 host->can_queue = ioa_cfg->max_cmds;
3096 diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
3097 index a67baeb36d1f..b97aa9ac2ffe 100644
3098 --- a/drivers/scsi/ipr.h
3099 +++ b/drivers/scsi/ipr.h
3100 @@ -1300,6 +1300,7 @@ struct ipr_resource_entry {
3101 #define IPR_ARRAY_VIRTUAL_BUS 0x1
3102 #define IPR_VSET_VIRTUAL_BUS 0x2
3103 #define IPR_IOAFP_VIRTUAL_BUS 0x3
3104 +#define IPR_MAX_SIS64_BUSES 0x4
3105
3106 #define IPR_GET_RES_PHYS_LOC(res) \
3107 (((res)->bus << 24) | ((res)->target << 8) | (res)->lun)
3108 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
3109 index ac2e88ec1190..6a2f8bacface 100644
3110 --- a/drivers/scsi/sd.c
3111 +++ b/drivers/scsi/sd.c
3112 @@ -3171,9 +3171,11 @@ static int sd_revalidate_disk(struct gendisk *disk)
3113 if (sd_validate_opt_xfer_size(sdkp, dev_max)) {
3114 q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
3115 rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
3116 - } else
3117 + } else {
3118 + q->limits.io_opt = 0;
3119 rw_max = min_not_zero(logical_to_sectors(sdp, dev_max),
3120 (sector_t)BLK_DEF_MAX_SECTORS);
3121 + }
3122
3123 /* Do not exceed controller limit */
3124 rw_max = min(rw_max, queue_max_hw_sectors(q));
3125 diff --git a/drivers/staging/kpc2000/kpc2000/core.c b/drivers/staging/kpc2000/kpc2000/core.c
3126 index 0a23727d0dc3..871441658f0e 100644
3127 --- a/drivers/staging/kpc2000/kpc2000/core.c
3128 +++ b/drivers/staging/kpc2000/kpc2000/core.c
3129 @@ -110,10 +110,10 @@ static ssize_t cpld_reconfigure(struct device *dev,
3130 const char *buf, size_t count)
3131 {
3132 struct kp2000_device *pcard = dev_get_drvdata(dev);
3133 - long wr_val;
3134 + unsigned long wr_val;
3135 int rv;
3136
3137 - rv = kstrtol(buf, 0, &wr_val);
3138 + rv = kstrtoul(buf, 0, &wr_val);
3139 if (rv < 0)
3140 return rv;
3141 if (wr_val > 7)
3142 diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
3143 index 845c8817281c..f7f09c0d273f 100644
3144 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
3145 +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
3146 @@ -32,6 +32,7 @@ static const struct usb_device_id rtw_usb_id_tbl[] = {
3147 /****** 8188EUS ********/
3148 {USB_DEVICE(0x056e, 0x4008)}, /* Elecom WDC-150SU2M */
3149 {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
3150 + {USB_DEVICE(0x0B05, 0x18F0)}, /* ASUS USB-N10 Nano B1 */
3151 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
3152 {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
3153 {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
3154 diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
3155 index 28d372a0663a..e29c14e0ed49 100644
3156 --- a/drivers/staging/wlan-ng/hfa384x_usb.c
3157 +++ b/drivers/staging/wlan-ng/hfa384x_usb.c
3158 @@ -3374,6 +3374,8 @@ static void hfa384x_int_rxmonitor(struct wlandevice *wlandev,
3159 WLAN_HDR_A4_LEN + WLAN_DATA_MAXLEN + WLAN_CRC_LEN)) {
3160 pr_debug("overlen frm: len=%zd\n",
3161 skblen - sizeof(struct p80211_caphdr));
3162 +
3163 + return;
3164 }
3165
3166 skb = dev_alloc_skb(skblen);
3167 diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
3168 index b5ba176004c1..d8d86761b790 100644
3169 --- a/drivers/staging/wlan-ng/prism2usb.c
3170 +++ b/drivers/staging/wlan-ng/prism2usb.c
3171 @@ -180,6 +180,7 @@ static void prism2sta_disconnect_usb(struct usb_interface *interface)
3172
3173 cancel_work_sync(&hw->link_bh);
3174 cancel_work_sync(&hw->commsqual_bh);
3175 + cancel_work_sync(&hw->usb_work);
3176
3177 /* Now we complete any outstanding commands
3178 * and tell everyone who is waiting for their
3179 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
3180 index 47f09a6ce7bd..84d6f7df09a4 100644
3181 --- a/drivers/usb/class/cdc-acm.c
3182 +++ b/drivers/usb/class/cdc-acm.c
3183 @@ -923,16 +923,16 @@ static int set_serial_info(struct tty_struct *tty, struct serial_struct *ss)
3184
3185 mutex_lock(&acm->port.mutex);
3186
3187 - if ((ss->close_delay != old_close_delay) ||
3188 - (ss->closing_wait != old_closing_wait)) {
3189 - if (!capable(CAP_SYS_ADMIN))
3190 + if (!capable(CAP_SYS_ADMIN)) {
3191 + if ((ss->close_delay != old_close_delay) ||
3192 + (ss->closing_wait != old_closing_wait))
3193 retval = -EPERM;
3194 - else {
3195 - acm->port.close_delay = close_delay;
3196 - acm->port.closing_wait = closing_wait;
3197 - }
3198 - } else
3199 - retval = -EOPNOTSUPP;
3200 + else
3201 + retval = -EOPNOTSUPP;
3202 + } else {
3203 + acm->port.close_delay = close_delay;
3204 + acm->port.closing_wait = closing_wait;
3205 + }
3206
3207 mutex_unlock(&acm->port.mutex);
3208 return retval;
3209 diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
3210 index 5a44b70372d9..fa9922c0c910 100644
3211 --- a/drivers/usb/musb/musb_host.c
3212 +++ b/drivers/usb/musb/musb_host.c
3213 @@ -1462,10 +1462,7 @@ done:
3214 * We need to map sg if the transfer_buffer is
3215 * NULL.
3216 */
3217 - if (!urb->transfer_buffer)
3218 - qh->use_sg = true;
3219 -
3220 - if (qh->use_sg) {
3221 + if (!urb->transfer_buffer) {
3222 /* sg_miter_start is already done in musb_ep_program */
3223 if (!sg_miter_next(&qh->sg_miter)) {
3224 dev_err(musb->controller, "error: sg list empty\n");
3225 @@ -1473,9 +1470,8 @@ done:
3226 status = -EINVAL;
3227 goto done;
3228 }
3229 - urb->transfer_buffer = qh->sg_miter.addr;
3230 length = min_t(u32, length, qh->sg_miter.length);
3231 - musb_write_fifo(hw_ep, length, urb->transfer_buffer);
3232 + musb_write_fifo(hw_ep, length, qh->sg_miter.addr);
3233 qh->sg_miter.consumed = length;
3234 sg_miter_stop(&qh->sg_miter);
3235 } else {
3236 @@ -1484,11 +1480,6 @@ done:
3237
3238 qh->segsize = length;
3239
3240 - if (qh->use_sg) {
3241 - if (offset + length >= urb->transfer_buffer_length)
3242 - qh->use_sg = false;
3243 - }
3244 -
3245 musb_ep_select(mbase, epnum);
3246 musb_writew(epio, MUSB_TXCSR,
3247 MUSB_TXCSR_H_WZC_BITS | MUSB_TXCSR_TXPKTRDY);
3248 @@ -2003,8 +1994,10 @@ finish:
3249 urb->actual_length += xfer_len;
3250 qh->offset += xfer_len;
3251 if (done) {
3252 - if (qh->use_sg)
3253 + if (qh->use_sg) {
3254 qh->use_sg = false;
3255 + urb->transfer_buffer = NULL;
3256 + }
3257
3258 if (urb->status == -EINPROGRESS)
3259 urb->status = status;
3260 diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
3261 index 5737add6a2a4..4cca0b836f43 100644
3262 --- a/drivers/usb/serial/io_edgeport.c
3263 +++ b/drivers/usb/serial/io_edgeport.c
3264 @@ -710,7 +710,7 @@ static void edge_interrupt_callback(struct urb *urb)
3265 /* grab the txcredits for the ports if available */
3266 position = 2;
3267 portNumber = 0;
3268 - while ((position < length) &&
3269 + while ((position < length - 1) &&
3270 (portNumber < edge_serial->serial->num_ports)) {
3271 txCredits = data[position] | (data[position+1] << 8);
3272 if (txCredits) {
3273 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3274 index 0b5dcf973d94..8bfffca3e4ae 100644
3275 --- a/drivers/usb/serial/option.c
3276 +++ b/drivers/usb/serial/option.c
3277 @@ -1992,8 +1992,14 @@ static const struct usb_device_id option_ids[] = {
3278 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
3279 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
3280 { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
3281 + { USB_DEVICE_INTERFACE_CLASS(0x1435, 0xd191, 0xff), /* Wistron Neweb D19Q1 */
3282 + .driver_info = RSVD(1) | RSVD(4) },
3283 + { USB_DEVICE_INTERFACE_CLASS(0x1690, 0x7588, 0xff), /* ASKEY WWHC050 */
3284 + .driver_info = RSVD(1) | RSVD(4) },
3285 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2031, 0xff), /* Olicard 600 */
3286 .driver_info = RSVD(4) },
3287 + { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2033, 0xff), /* BroadMobi BM806U */
3288 + .driver_info = RSVD(4) },
3289 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x2060, 0xff), /* BroadMobi BM818 */
3290 .driver_info = RSVD(4) },
3291 { USB_DEVICE_INTERFACE_CLASS(0x2020, 0x4000, 0xff) }, /* OLICARD300 - MT6225 */
3292 diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
3293 index b86195e4dc6c..b378cd780ed5 100644
3294 --- a/fs/afs/cmservice.c
3295 +++ b/fs/afs/cmservice.c
3296 @@ -243,6 +243,17 @@ static void afs_cm_destructor(struct afs_call *call)
3297 call->buffer = NULL;
3298 }
3299
3300 +/*
3301 + * Abort a service call from within an action function.
3302 + */
3303 +static void afs_abort_service_call(struct afs_call *call, u32 abort_code, int error,
3304 + const char *why)
3305 +{
3306 + rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
3307 + abort_code, error, why);
3308 + afs_set_call_complete(call, error, 0);
3309 +}
3310 +
3311 /*
3312 * The server supplied a list of callbacks that it wanted to break.
3313 */
3314 @@ -510,8 +521,7 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
3315 if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
3316 afs_send_empty_reply(call);
3317 else
3318 - rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
3319 - 1, 1, "K-1");
3320 + afs_abort_service_call(call, 1, 1, "K-1");
3321
3322 afs_put_call(call);
3323 _leave("");
3324 diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c
3325 index cfe62b154f68..e1b9ed679045 100644
3326 --- a/fs/afs/fs_probe.c
3327 +++ b/fs/afs/fs_probe.c
3328 @@ -145,6 +145,7 @@ static int afs_do_probe_fileserver(struct afs_net *net,
3329 read_lock(&server->fs_lock);
3330 ac.alist = rcu_dereference_protected(server->addresses,
3331 lockdep_is_held(&server->fs_lock));
3332 + afs_get_addrlist(ac.alist);
3333 read_unlock(&server->fs_lock);
3334
3335 atomic_set(&server->probe_outstanding, ac.alist->nr_addrs);
3336 @@ -163,6 +164,7 @@ static int afs_do_probe_fileserver(struct afs_net *net,
3337
3338 if (!in_progress)
3339 afs_fs_probe_done(server);
3340 + afs_put_addrlist(ac.alist);
3341 return in_progress;
3342 }
3343
3344 diff --git a/fs/afs/internal.h b/fs/afs/internal.h
3345 index 759e0578012c..d5efb1debebf 100644
3346 --- a/fs/afs/internal.h
3347 +++ b/fs/afs/internal.h
3348 @@ -154,7 +154,7 @@ struct afs_call {
3349 };
3350 unsigned char unmarshall; /* unmarshalling phase */
3351 unsigned char addr_ix; /* Address in ->alist */
3352 - bool incoming; /* T if incoming call */
3353 + bool drop_ref; /* T if need to drop ref for incoming call */
3354 bool send_pages; /* T if data from mapping should be sent */
3355 bool need_attention; /* T if RxRPC poked us */
3356 bool async; /* T if asynchronous */
3357 @@ -1203,8 +1203,16 @@ static inline void afs_set_call_complete(struct afs_call *call,
3358 ok = true;
3359 }
3360 spin_unlock_bh(&call->state_lock);
3361 - if (ok)
3362 + if (ok) {
3363 trace_afs_call_done(call);
3364 +
3365 + /* Asynchronous calls have two refs to release - one from the alloc and
3366 + * one queued with the work item - and we can't just deallocate the
3367 + * call because the work item may be queued again.
3368 + */
3369 + if (call->drop_ref)
3370 + afs_put_call(call);
3371 + }
3372 }
3373
3374 /*
3375 diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c
3376 index 61498d9f06ef..ef1d09f8920b 100644
3377 --- a/fs/afs/rxrpc.c
3378 +++ b/fs/afs/rxrpc.c
3379 @@ -18,7 +18,6 @@ struct workqueue_struct *afs_async_calls;
3380
3381 static void afs_wake_up_call_waiter(struct sock *, struct rxrpc_call *, unsigned long);
3382 static void afs_wake_up_async_call(struct sock *, struct rxrpc_call *, unsigned long);
3383 -static void afs_delete_async_call(struct work_struct *);
3384 static void afs_process_async_call(struct work_struct *);
3385 static void afs_rx_new_call(struct sock *, struct rxrpc_call *, unsigned long);
3386 static void afs_rx_discard_new_call(struct rxrpc_call *, unsigned long);
3387 @@ -169,7 +168,7 @@ void afs_put_call(struct afs_call *call)
3388 int n = atomic_dec_return(&call->usage);
3389 int o = atomic_read(&net->nr_outstanding_calls);
3390
3391 - trace_afs_call(call, afs_call_trace_put, n + 1, o,
3392 + trace_afs_call(call, afs_call_trace_put, n, o,
3393 __builtin_return_address(0));
3394
3395 ASSERTCMP(n, >=, 0);
3396 @@ -402,8 +401,10 @@ void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp)
3397 /* If the call is going to be asynchronous, we need an extra ref for
3398 * the call to hold itself so the caller need not hang on to its ref.
3399 */
3400 - if (call->async)
3401 + if (call->async) {
3402 afs_get_call(call, afs_call_trace_get);
3403 + call->drop_ref = true;
3404 + }
3405
3406 /* create a call */
3407 rxcall = rxrpc_kernel_begin_call(call->net->socket, srx, call->key,
3408 @@ -584,8 +585,6 @@ static void afs_deliver_to_call(struct afs_call *call)
3409 done:
3410 if (call->type->done)
3411 call->type->done(call);
3412 - if (state == AFS_CALL_COMPLETE && call->incoming)
3413 - afs_put_call(call);
3414 out:
3415 _leave("");
3416 return;
3417 @@ -604,11 +603,7 @@ call_complete:
3418 long afs_wait_for_call_to_complete(struct afs_call *call,
3419 struct afs_addr_cursor *ac)
3420 {
3421 - signed long rtt2, timeout;
3422 long ret;
3423 - bool stalled = false;
3424 - u64 rtt;
3425 - u32 life, last_life;
3426 bool rxrpc_complete = false;
3427
3428 DECLARE_WAITQUEUE(myself, current);
3429 @@ -619,14 +614,6 @@ long afs_wait_for_call_to_complete(struct afs_call *call,
3430 if (ret < 0)
3431 goto out;
3432
3433 - rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall);
3434 - rtt2 = nsecs_to_jiffies64(rtt) * 2;
3435 - if (rtt2 < 2)
3436 - rtt2 = 2;
3437 -
3438 - timeout = rtt2;
3439 - rxrpc_kernel_check_life(call->net->socket, call->rxcall, &last_life);
3440 -
3441 add_wait_queue(&call->waitq, &myself);
3442 for (;;) {
3443 set_current_state(TASK_UNINTERRUPTIBLE);
3444 @@ -637,37 +624,19 @@ long afs_wait_for_call_to_complete(struct afs_call *call,
3445 call->need_attention = false;
3446 __set_current_state(TASK_RUNNING);
3447 afs_deliver_to_call(call);
3448 - timeout = rtt2;
3449 continue;
3450 }
3451
3452 if (afs_check_call_state(call, AFS_CALL_COMPLETE))
3453 break;
3454
3455 - if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall, &life)) {
3456 + if (!rxrpc_kernel_check_life(call->net->socket, call->rxcall)) {
3457 /* rxrpc terminated the call. */
3458 rxrpc_complete = true;
3459 break;
3460 }
3461
3462 - if (call->intr && timeout == 0 &&
3463 - life == last_life && signal_pending(current)) {
3464 - if (stalled)
3465 - break;
3466 - __set_current_state(TASK_RUNNING);
3467 - rxrpc_kernel_probe_life(call->net->socket, call->rxcall);
3468 - timeout = rtt2;
3469 - stalled = true;
3470 - continue;
3471 - }
3472 -
3473 - if (life != last_life) {
3474 - timeout = rtt2;
3475 - last_life = life;
3476 - stalled = false;
3477 - }
3478 -
3479 - timeout = schedule_timeout(timeout);
3480 + schedule();
3481 }
3482
3483 remove_wait_queue(&call->waitq, &myself);
3484 @@ -735,7 +704,7 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
3485
3486 u = atomic_fetch_add_unless(&call->usage, 1, 0);
3487 if (u != 0) {
3488 - trace_afs_call(call, afs_call_trace_wake, u,
3489 + trace_afs_call(call, afs_call_trace_wake, u + 1,
3490 atomic_read(&call->net->nr_outstanding_calls),
3491 __builtin_return_address(0));
3492
3493 @@ -744,21 +713,6 @@ static void afs_wake_up_async_call(struct sock *sk, struct rxrpc_call *rxcall,
3494 }
3495 }
3496
3497 -/*
3498 - * Delete an asynchronous call. The work item carries a ref to the call struct
3499 - * that we need to release.
3500 - */
3501 -static void afs_delete_async_call(struct work_struct *work)
3502 -{
3503 - struct afs_call *call = container_of(work, struct afs_call, async_work);
3504 -
3505 - _enter("");
3506 -
3507 - afs_put_call(call);
3508 -
3509 - _leave("");
3510 -}
3511 -
3512 /*
3513 * Perform I/O processing on an asynchronous call. The work item carries a ref
3514 * to the call struct that we either need to release or to pass on.
3515 @@ -774,16 +728,6 @@ static void afs_process_async_call(struct work_struct *work)
3516 afs_deliver_to_call(call);
3517 }
3518
3519 - if (call->state == AFS_CALL_COMPLETE) {
3520 - /* We have two refs to release - one from the alloc and one
3521 - * queued with the work item - and we can't just deallocate the
3522 - * call because the work item may be queued again.
3523 - */
3524 - call->async_work.func = afs_delete_async_call;
3525 - if (!queue_work(afs_async_calls, &call->async_work))
3526 - afs_put_call(call);
3527 - }
3528 -
3529 afs_put_call(call);
3530 _leave("");
3531 }
3532 @@ -810,6 +754,7 @@ void afs_charge_preallocation(struct work_struct *work)
3533 if (!call)
3534 break;
3535
3536 + call->drop_ref = true;
3537 call->async = true;
3538 call->state = AFS_CALL_SV_AWAIT_OP_ID;
3539 init_waitqueue_head(&call->waitq);
3540 diff --git a/fs/ceph/file.c b/fs/ceph/file.c
3541 index cd09e63d682b..ce54a1b12819 100644
3542 --- a/fs/ceph/file.c
3543 +++ b/fs/ceph/file.c
3544 @@ -1415,10 +1415,13 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
3545 struct inode *inode = file_inode(file);
3546 struct ceph_inode_info *ci = ceph_inode(inode);
3547 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
3548 + struct ceph_osd_client *osdc = &fsc->client->osdc;
3549 struct ceph_cap_flush *prealloc_cf;
3550 ssize_t count, written = 0;
3551 int err, want, got;
3552 bool direct_lock = false;
3553 + u32 map_flags;
3554 + u64 pool_flags;
3555 loff_t pos;
3556 loff_t limit = max(i_size_read(inode), fsc->max_file_size);
3557
3558 @@ -1481,8 +1484,12 @@ retry_snap:
3559 goto out;
3560 }
3561
3562 - /* FIXME: not complete since it doesn't account for being at quota */
3563 - if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_FULL)) {
3564 + down_read(&osdc->lock);
3565 + map_flags = osdc->osdmap->flags;
3566 + pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
3567 + up_read(&osdc->lock);
3568 + if ((map_flags & CEPH_OSDMAP_FULL) ||
3569 + (pool_flags & CEPH_POOL_FLAG_FULL)) {
3570 err = -ENOSPC;
3571 goto out;
3572 }
3573 @@ -1575,7 +1582,8 @@ retry_snap:
3574 }
3575
3576 if (written >= 0) {
3577 - if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_NEARFULL))
3578 + if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
3579 + (pool_flags & CEPH_POOL_FLAG_NEARFULL))
3580 iocb->ki_flags |= IOCB_DSYNC;
3581 written = generic_write_sync(iocb, written);
3582 }
3583 diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
3584 index ccfcc66aaf44..923be9399b21 100644
3585 --- a/fs/ceph/snap.c
3586 +++ b/fs/ceph/snap.c
3587 @@ -1155,5 +1155,6 @@ void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc)
3588 pr_err("snapid map %llx -> %x still in use\n",
3589 sm->snap, sm->dev);
3590 }
3591 + kfree(sm);
3592 }
3593 }
3594 diff --git a/fs/libfs.c b/fs/libfs.c
3595 index 1463b038ffc4..5fd9cc0e2ac9 100644
3596 --- a/fs/libfs.c
3597 +++ b/fs/libfs.c
3598 @@ -821,7 +821,7 @@ int simple_attr_open(struct inode *inode, struct file *file,
3599 {
3600 struct simple_attr *attr;
3601
3602 - attr = kmalloc(sizeof(*attr), GFP_KERNEL);
3603 + attr = kzalloc(sizeof(*attr), GFP_KERNEL);
3604 if (!attr)
3605 return -ENOMEM;
3606
3607 @@ -861,9 +861,11 @@ ssize_t simple_attr_read(struct file *file, char __user *buf,
3608 if (ret)
3609 return ret;
3610
3611 - if (*ppos) { /* continued read */
3612 + if (*ppos && attr->get_buf[0]) {
3613 + /* continued read */
3614 size = strlen(attr->get_buf);
3615 - } else { /* first read */
3616 + } else {
3617 + /* first read */
3618 u64 val;
3619 ret = attr->get(attr->data, &val);
3620 if (ret)
3621 diff --git a/fs/nfs/client.c b/fs/nfs/client.c
3622 index 30838304a0bf..a05f77f9c21e 100644
3623 --- a/fs/nfs/client.c
3624 +++ b/fs/nfs/client.c
3625 @@ -153,6 +153,7 @@ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
3626 if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL)
3627 goto error_0;
3628
3629 + clp->cl_minorversion = cl_init->minorversion;
3630 clp->cl_nfs_mod = cl_init->nfs_mod;
3631 if (!try_module_get(clp->cl_nfs_mod->owner))
3632 goto error_dealloc;
3633 diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
3634 index 3800ab6f08fa..a6dcc2151e77 100644
3635 --- a/fs/nfs/fscache.c
3636 +++ b/fs/nfs/fscache.c
3637 @@ -31,6 +31,7 @@ static DEFINE_SPINLOCK(nfs_fscache_keys_lock);
3638 struct nfs_server_key {
3639 struct {
3640 uint16_t nfsversion; /* NFS protocol version */
3641 + uint32_t minorversion; /* NFSv4 minor version */
3642 uint16_t family; /* address family */
3643 __be16 port; /* IP port */
3644 } hdr;
3645 @@ -55,6 +56,7 @@ void nfs_fscache_get_client_cookie(struct nfs_client *clp)
3646
3647 memset(&key, 0, sizeof(key));
3648 key.hdr.nfsversion = clp->rpc_ops->version;
3649 + key.hdr.minorversion = clp->cl_minorversion;
3650 key.hdr.family = clp->cl_addr.ss_family;
3651
3652 switch (clp->cl_addr.ss_family) {
3653 diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
3654 index da6204025a2d..914feab64702 100644
3655 --- a/fs/nfs/nfs4client.c
3656 +++ b/fs/nfs/nfs4client.c
3657 @@ -216,7 +216,6 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init)
3658 INIT_LIST_HEAD(&clp->cl_ds_clients);
3659 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
3660 clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
3661 - clp->cl_minorversion = cl_init->minorversion;
3662 clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
3663 clp->cl_mig_gen = 1;
3664 #if IS_ENABLED(CONFIG_NFS_V4_1)
3665 diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
3666 index e081b56f1c1d..5e601975745f 100644
3667 --- a/include/linux/ceph/osdmap.h
3668 +++ b/include/linux/ceph/osdmap.h
3669 @@ -37,6 +37,9 @@ int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs);
3670 #define CEPH_POOL_FLAG_HASHPSPOOL (1ULL << 0) /* hash pg seed and pool id
3671 together */
3672 #define CEPH_POOL_FLAG_FULL (1ULL << 1) /* pool is full */
3673 +#define CEPH_POOL_FLAG_FULL_QUOTA (1ULL << 10) /* pool ran out of quota,
3674 + will set FULL too */
3675 +#define CEPH_POOL_FLAG_NEARFULL (1ULL << 11) /* pool is nearfull */
3676
3677 struct ceph_pg_pool_info {
3678 struct rb_node node;
3679 @@ -304,5 +307,6 @@ extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map,
3680
3681 extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id);
3682 extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
3683 +u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id);
3684
3685 #endif
3686 diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
3687 index 3eb0e55665b4..c004bced9b91 100644
3688 --- a/include/linux/ceph/rados.h
3689 +++ b/include/linux/ceph/rados.h
3690 @@ -143,8 +143,10 @@ extern const char *ceph_osd_state_name(int s);
3691 /*
3692 * osd map flag bits
3693 */
3694 -#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC) */
3695 -#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC) */
3696 +#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC),
3697 + not set since ~luminous */
3698 +#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC),
3699 + not set since ~luminous */
3700 #define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */
3701 #define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */
3702 #define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */
3703 diff --git a/include/linux/dmar.h b/include/linux/dmar.h
3704 index d3ea390336f3..f397e52c2d9d 100644
3705 --- a/include/linux/dmar.h
3706 +++ b/include/linux/dmar.h
3707 @@ -74,11 +74,13 @@ extern struct list_head dmar_drhd_units;
3708 dmar_rcu_check())
3709
3710 #define for_each_active_drhd_unit(drhd) \
3711 - list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
3712 + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
3713 + dmar_rcu_check()) \
3714 if (drhd->ignored) {} else
3715
3716 #define for_each_active_iommu(i, drhd) \
3717 - list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
3718 + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
3719 + dmar_rcu_check()) \
3720 if (i=drhd->iommu, drhd->ignored) {} else
3721
3722 #define for_each_iommu(i, drhd) \
3723 diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h
3724 index 0aa803c451a3..c620d9139c28 100644
3725 --- a/include/linux/dsa/8021q.h
3726 +++ b/include/linux/dsa/8021q.h
3727 @@ -28,8 +28,6 @@ int dsa_8021q_rx_switch_id(u16 vid);
3728
3729 int dsa_8021q_rx_source_port(u16 vid);
3730
3731 -struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb);
3732 -
3733 #else
3734
3735 int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index,
3736 @@ -64,11 +62,6 @@ int dsa_8021q_rx_source_port(u16 vid)
3737 return 0;
3738 }
3739
3740 -struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb)
3741 -{
3742 - return NULL;
3743 -}
3744 -
3745 #endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */
3746
3747 #endif /* _NET_DSA_8021Q_H */
3748 diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
3749 index 7d3f2ced92d1..73c66a3a33ae 100644
3750 --- a/include/linux/ieee80211.h
3751 +++ b/include/linux/ieee80211.h
3752 @@ -2102,14 +2102,14 @@ ieee80211_he_spr_size(const u8 *he_spr_ie)
3753 {
3754 struct ieee80211_he_spr *he_spr = (void *)he_spr_ie;
3755 u8 spr_len = sizeof(struct ieee80211_he_spr);
3756 - u32 he_spr_params;
3757 + u8 he_spr_params;
3758
3759 /* Make sure the input is not NULL */
3760 if (!he_spr_ie)
3761 return 0;
3762
3763 /* Calc required length */
3764 - he_spr_params = le32_to_cpu(he_spr->he_sr_control);
3765 + he_spr_params = he_spr->he_sr_control;
3766 if (he_spr_params & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
3767 spr_len++;
3768 if (he_spr_params & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT)
3769 diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
3770 index 6d8bf4bdf240..1e5dad8b8e59 100644
3771 --- a/include/linux/intel-iommu.h
3772 +++ b/include/linux/intel-iommu.h
3773 @@ -120,6 +120,8 @@
3774
3775 #define dmar_readq(a) readq(a)
3776 #define dmar_writeq(a,v) writeq(v,a)
3777 +#define dmar_readl(a) readl(a)
3778 +#define dmar_writel(a, v) writel(v, a)
3779
3780 #define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
3781 #define DMAR_VER_MINOR(v) ((v) & 0x0f)
3782 diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
3783 index ae703ea3ef48..8faca7b52543 100644
3784 --- a/include/linux/memcontrol.h
3785 +++ b/include/linux/memcontrol.h
3786 @@ -705,6 +705,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
3787 void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
3788 int val);
3789 void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
3790 +void mod_memcg_obj_state(void *p, int idx, int val);
3791
3792 static inline void mod_lruvec_state(struct lruvec *lruvec,
3793 enum node_stat_item idx, int val)
3794 @@ -1128,6 +1129,10 @@ static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
3795 __mod_node_page_state(page_pgdat(page), idx, val);
3796 }
3797
3798 +static inline void mod_memcg_obj_state(void *p, int idx, int val)
3799 +{
3800 +}
3801 +
3802 static inline
3803 unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
3804 gfp_t gfp_mask,
3805 @@ -1432,6 +1437,8 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
3806 return memcg ? memcg->kmemcg_id : -1;
3807 }
3808
3809 +struct mem_cgroup *mem_cgroup_from_obj(void *p);
3810 +
3811 #else
3812
3813 static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
3814 @@ -1473,6 +1480,11 @@ static inline void memcg_put_cache_ids(void)
3815 {
3816 }
3817
3818 +static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
3819 +{
3820 + return NULL;
3821 +}
3822 +
3823 #endif /* CONFIG_MEMCG_KMEM */
3824
3825 #endif /* _LINUX_MEMCONTROL_H */
3826 diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
3827 index ba703384bea0..4c5eb3aa8e72 100644
3828 --- a/include/linux/mmc/host.h
3829 +++ b/include/linux/mmc/host.h
3830 @@ -333,6 +333,7 @@ struct mmc_host {
3831 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | \
3832 MMC_CAP_UHS_DDR50)
3833 #define MMC_CAP_SYNC_RUNTIME_PM (1 << 21) /* Synced runtime PM suspends. */
3834 +#define MMC_CAP_NEED_RSP_BUSY (1 << 22) /* Commands with R1B can't use R1. */
3835 #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
3836 #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
3837 #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
3838 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
3839 index 6ae88b0c1c31..955e1370f033 100644
3840 --- a/include/linux/skbuff.h
3841 +++ b/include/linux/skbuff.h
3842 @@ -634,8 +634,8 @@ typedef unsigned char *sk_buff_data_t;
3843 * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
3844 * @tc_skip_classify: do not classify packet. set by IFB device
3845 * @tc_at_ingress: used within tc_classify to distinguish in/egress
3846 - * @tc_redirected: packet was redirected by a tc action
3847 - * @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect
3848 + * @redirected: packet was redirected by packet classifier
3849 + * @from_ingress: packet was redirected from the ingress path
3850 * @peeked: this packet has been seen already, so stats have been
3851 * done for it, don't do them again
3852 * @nf_trace: netfilter packet trace flag
3853 @@ -816,8 +816,10 @@ struct sk_buff {
3854 #ifdef CONFIG_NET_CLS_ACT
3855 __u8 tc_skip_classify:1;
3856 __u8 tc_at_ingress:1;
3857 - __u8 tc_redirected:1;
3858 - __u8 tc_from_ingress:1;
3859 +#endif
3860 +#ifdef CONFIG_NET_REDIRECT
3861 + __u8 redirected:1;
3862 + __u8 from_ingress:1;
3863 #endif
3864 #ifdef CONFIG_TLS_DEVICE
3865 __u8 decrypted:1;
3866 @@ -4514,5 +4516,31 @@ static inline __wsum lco_csum(struct sk_buff *skb)
3867 return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
3868 }
3869
3870 +static inline bool skb_is_redirected(const struct sk_buff *skb)
3871 +{
3872 +#ifdef CONFIG_NET_REDIRECT
3873 + return skb->redirected;
3874 +#else
3875 + return false;
3876 +#endif
3877 +}
3878 +
3879 +static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
3880 +{
3881 +#ifdef CONFIG_NET_REDIRECT
3882 + skb->redirected = 1;
3883 + skb->from_ingress = from_ingress;
3884 + if (skb->from_ingress)
3885 + skb->tstamp = 0;
3886 +#endif
3887 +}
3888 +
3889 +static inline void skb_reset_redirect(struct sk_buff *skb)
3890 +{
3891 +#ifdef CONFIG_NET_REDIRECT
3892 + skb->redirected = 0;
3893 +#endif
3894 +}
3895 +
3896 #endif /* __KERNEL__ */
3897 #endif /* _LINUX_SKBUFF_H */
3898 diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
3899 index 1abae3c340a5..299240df79e4 100644
3900 --- a/include/net/af_rxrpc.h
3901 +++ b/include/net/af_rxrpc.h
3902 @@ -58,9 +58,7 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
3903 rxrpc_user_attach_call_t, unsigned long, gfp_t,
3904 unsigned int);
3905 void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
3906 -bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *,
3907 - u32 *);
3908 -void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
3909 +bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
3910 u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
3911 bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
3912 ktime_t *);
3913 diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
3914 index d334e4609dd4..9fb7cf1cdf36 100644
3915 --- a/include/net/sch_generic.h
3916 +++ b/include/net/sch_generic.h
3917 @@ -675,22 +675,6 @@ void __qdisc_calculate_pkt_len(struct sk_buff *skb,
3918 const struct qdisc_size_table *stab);
3919 int skb_do_redirect(struct sk_buff *);
3920
3921 -static inline void skb_reset_tc(struct sk_buff *skb)
3922 -{
3923 -#ifdef CONFIG_NET_CLS_ACT
3924 - skb->tc_redirected = 0;
3925 -#endif
3926 -}
3927 -
3928 -static inline bool skb_is_tc_redirected(const struct sk_buff *skb)
3929 -{
3930 -#ifdef CONFIG_NET_CLS_ACT
3931 - return skb->tc_redirected;
3932 -#else
3933 - return false;
3934 -#endif
3935 -}
3936 -
3937 static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
3938 {
3939 #ifdef CONFIG_NET_CLS_ACT
3940 diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
3941 index 564ba1b5cf57..c612cabbc378 100644
3942 --- a/include/trace/events/afs.h
3943 +++ b/include/trace/events/afs.h
3944 @@ -233,7 +233,7 @@ enum afs_cb_break_reason {
3945 EM(afs_call_trace_get, "GET ") \
3946 EM(afs_call_trace_put, "PUT ") \
3947 EM(afs_call_trace_wake, "WAKE ") \
3948 - E_(afs_call_trace_work, "WORK ")
3949 + E_(afs_call_trace_work, "QUEUE")
3950
3951 #define afs_server_traces \
3952 EM(afs_server_trace_alloc, "ALLOC ") \
3953 diff --git a/include/uapi/linux/serio.h b/include/uapi/linux/serio.h
3954 index 50e991952c97..ed2a96f43ce4 100644
3955 --- a/include/uapi/linux/serio.h
3956 +++ b/include/uapi/linux/serio.h
3957 @@ -9,7 +9,7 @@
3958 #ifndef _UAPI_SERIO_H
3959 #define _UAPI_SERIO_H
3960
3961 -
3962 +#include <linux/const.h>
3963 #include <linux/ioctl.h>
3964
3965 #define SPIOCSTYPE _IOW('q', 0x01, unsigned long)
3966 @@ -18,10 +18,10 @@
3967 /*
3968 * bit masks for use in "interrupt" flags (3rd argument)
3969 */
3970 -#define SERIO_TIMEOUT BIT(0)
3971 -#define SERIO_PARITY BIT(1)
3972 -#define SERIO_FRAME BIT(2)
3973 -#define SERIO_OOB_DATA BIT(3)
3974 +#define SERIO_TIMEOUT _BITUL(0)
3975 +#define SERIO_PARITY _BITUL(1)
3976 +#define SERIO_FRAME _BITUL(2)
3977 +#define SERIO_OOB_DATA _BITUL(3)
3978
3979 /*
3980 * Serio types
3981 diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
3982 index 29c7c06c6bd6..b774e2210f7d 100644
3983 --- a/kernel/bpf/btf.c
3984 +++ b/kernel/bpf/btf.c
3985 @@ -2309,7 +2309,7 @@ static int btf_enum_check_member(struct btf_verifier_env *env,
3986
3987 struct_size = struct_type->size;
3988 bytes_offset = BITS_ROUNDDOWN_BYTES(struct_bits_off);
3989 - if (struct_size - bytes_offset < sizeof(int)) {
3990 + if (struct_size - bytes_offset < member_type->size) {
3991 btf_verifier_log_member(env, struct_type, member,
3992 "Member exceeds struct_size");
3993 return -EINVAL;
3994 diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
3995 index 8bd69062fbe5..869e2e1860e8 100644
3996 --- a/kernel/bpf/cgroup.c
3997 +++ b/kernel/bpf/cgroup.c
3998 @@ -228,6 +228,9 @@ cleanup:
3999 for (i = 0; i < NR; i++)
4000 bpf_prog_array_free(arrays[i]);
4001
4002 + for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p))
4003 + cgroup_bpf_put(p);
4004 +
4005 percpu_ref_exit(&cgrp->bpf.refcnt);
4006
4007 return -ENOMEM;
4008 @@ -300,8 +303,8 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
4009 {
4010 struct list_head *progs = &cgrp->bpf.progs[type];
4011 struct bpf_prog *old_prog = NULL;
4012 - struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
4013 - *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {NULL};
4014 + struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
4015 + struct bpf_cgroup_storage *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {};
4016 enum bpf_cgroup_storage_type stype;
4017 struct bpf_prog_list *pl;
4018 bool pl_was_allocated;
4019 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
4020 index b2817d0929b3..a0b76b360d6f 100644
4021 --- a/kernel/bpf/verifier.c
4022 +++ b/kernel/bpf/verifier.c
4023 @@ -979,17 +979,6 @@ static void __reg_bound_offset(struct bpf_reg_state *reg)
4024 reg->umax_value));
4025 }
4026
4027 -static void __reg_bound_offset32(struct bpf_reg_state *reg)
4028 -{
4029 - u64 mask = 0xffffFFFF;
4030 - struct tnum range = tnum_range(reg->umin_value & mask,
4031 - reg->umax_value & mask);
4032 - struct tnum lo32 = tnum_cast(reg->var_off, 4);
4033 - struct tnum hi32 = tnum_lshift(tnum_rshift(reg->var_off, 32), 32);
4034 -
4035 - reg->var_off = tnum_or(hi32, tnum_intersect(lo32, range));
4036 -}
4037 -
4038 /* Reset the min/max bounds of a register */
4039 static void __mark_reg_unbounded(struct bpf_reg_state *reg)
4040 {
4041 @@ -5452,10 +5441,6 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
4042 /* We might have learned some bits from the bounds. */
4043 __reg_bound_offset(false_reg);
4044 __reg_bound_offset(true_reg);
4045 - if (is_jmp32) {
4046 - __reg_bound_offset32(false_reg);
4047 - __reg_bound_offset32(true_reg);
4048 - }
4049 /* Intersecting with the old var_off might have improved our bounds
4050 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
4051 * then new var_off is (0; 0x7f...fc) which improves our umax.
4052 @@ -5565,10 +5550,6 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
4053 /* We might have learned some bits from the bounds. */
4054 __reg_bound_offset(false_reg);
4055 __reg_bound_offset(true_reg);
4056 - if (is_jmp32) {
4057 - __reg_bound_offset32(false_reg);
4058 - __reg_bound_offset32(true_reg);
4059 - }
4060 /* Intersecting with the old var_off might have improved our bounds
4061 * slightly. e.g. if umax was 0x7f...f and var_off was (0; 0xf...fc),
4062 * then new var_off is (0; 0x7f...fc) which improves our umax.
4063 diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
4064 index 7f83f4121d8d..f684c82efc2e 100644
4065 --- a/kernel/cgroup/cgroup-v1.c
4066 +++ b/kernel/cgroup/cgroup-v1.c
4067 @@ -473,6 +473,7 @@ static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos)
4068 */
4069 p++;
4070 if (p >= end) {
4071 + (*pos)++;
4072 return NULL;
4073 } else {
4074 *pos = *p;
4075 @@ -783,7 +784,7 @@ void cgroup1_release_agent(struct work_struct *work)
4076
4077 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
4078 agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
4079 - if (!pathbuf || !agentbuf)
4080 + if (!pathbuf || !agentbuf || !strlen(agentbuf))
4081 goto out;
4082
4083 spin_lock_irq(&css_set_lock);
4084 diff --git a/kernel/fork.c b/kernel/fork.c
4085 index 755d8160e001..27c0ef30002e 100644
4086 --- a/kernel/fork.c
4087 +++ b/kernel/fork.c
4088 @@ -394,8 +394,8 @@ static void account_kernel_stack(struct task_struct *tsk, int account)
4089 mod_zone_page_state(page_zone(first_page), NR_KERNEL_STACK_KB,
4090 THREAD_SIZE / 1024 * account);
4091
4092 - mod_memcg_page_state(first_page, MEMCG_KERNEL_STACK_KB,
4093 - account * (THREAD_SIZE / 1024));
4094 + mod_memcg_obj_state(stack, MEMCG_KERNEL_STACK_KB,
4095 + account * (THREAD_SIZE / 1024));
4096 }
4097 }
4098
4099 diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
4100 index 55b080101a20..b304c17d53a3 100644
4101 --- a/kernel/irq/manage.c
4102 +++ b/kernel/irq/manage.c
4103 @@ -284,7 +284,11 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
4104
4105 if (desc->affinity_notify) {
4106 kref_get(&desc->affinity_notify->kref);
4107 - schedule_work(&desc->affinity_notify->work);
4108 + if (!schedule_work(&desc->affinity_notify->work)) {
4109 + /* Work was already scheduled, drop our extra ref */
4110 + kref_put(&desc->affinity_notify->kref,
4111 + desc->affinity_notify->release);
4112 + }
4113 }
4114 irqd_set(data, IRQD_AFFINITY_SET);
4115
4116 @@ -384,7 +388,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
4117 raw_spin_unlock_irqrestore(&desc->lock, flags);
4118
4119 if (old_notify) {
4120 - cancel_work_sync(&old_notify->work);
4121 + if (cancel_work_sync(&old_notify->work)) {
4122 + /* Pending work had a ref, put that one too */
4123 + kref_put(&old_notify->kref, old_notify->release);
4124 + }
4125 kref_put(&old_notify->kref, old_notify->release);
4126 }
4127
4128 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
4129 index d804efb372e2..5d0575d633d2 100644
4130 --- a/mm/memcontrol.c
4131 +++ b/mm/memcontrol.c
4132 @@ -786,6 +786,17 @@ void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val)
4133 rcu_read_unlock();
4134 }
4135
4136 +void mod_memcg_obj_state(void *p, int idx, int val)
4137 +{
4138 + struct mem_cgroup *memcg;
4139 +
4140 + rcu_read_lock();
4141 + memcg = mem_cgroup_from_obj(p);
4142 + if (memcg)
4143 + mod_memcg_state(memcg, idx, val);
4144 + rcu_read_unlock();
4145 +}
4146 +
4147 /**
4148 * __count_memcg_events - account VM events in a cgroup
4149 * @memcg: the memory cgroup
4150 @@ -2778,6 +2789,33 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg,
4151 }
4152
4153 #ifdef CONFIG_MEMCG_KMEM
4154 +/*
4155 + * Returns a pointer to the memory cgroup to which the kernel object is charged.
4156 + *
4157 + * The caller must ensure the memcg lifetime, e.g. by taking rcu_read_lock(),
4158 + * cgroup_mutex, etc.
4159 + */
4160 +struct mem_cgroup *mem_cgroup_from_obj(void *p)
4161 +{
4162 + struct page *page;
4163 +
4164 + if (mem_cgroup_disabled())
4165 + return NULL;
4166 +
4167 + page = virt_to_head_page(p);
4168 +
4169 + /*
4170 + * Slab pages don't have page->mem_cgroup set because corresponding
4171 + * kmem caches can be reparented during the lifetime. That's why
4172 + * memcg_from_slab_page() should be used instead.
4173 + */
4174 + if (PageSlab(page))
4175 + return memcg_from_slab_page(page);
4176 +
4177 + /* All other pages use page->mem_cgroup */
4178 + return page->mem_cgroup;
4179 +}
4180 +
4181 static int memcg_alloc_cache_id(void)
4182 {
4183 int id, size;
4184 diff --git a/mm/sparse.c b/mm/sparse.c
4185 index a18ad9390d9f..78bbecd904c3 100644
4186 --- a/mm/sparse.c
4187 +++ b/mm/sparse.c
4188 @@ -789,6 +789,12 @@ static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
4189 ms->usage = NULL;
4190 }
4191 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
4192 + /*
4193 + * Mark the section invalid so that valid_section()
4194 + * return false. This prevents code from dereferencing
4195 + * ms->usage array.
4196 + */
4197 + ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
4198 }
4199
4200 if (section_is_early && memmap)
4201 diff --git a/mm/swapfile.c b/mm/swapfile.c
4202 index dab43523afdd..891a3ef48651 100644
4203 --- a/mm/swapfile.c
4204 +++ b/mm/swapfile.c
4205 @@ -2892,10 +2892,6 @@ static int claim_swapfile(struct swap_info_struct *p, struct inode *inode)
4206 p->bdev = inode->i_sb->s_bdev;
4207 }
4208
4209 - inode_lock(inode);
4210 - if (IS_SWAPFILE(inode))
4211 - return -EBUSY;
4212 -
4213 return 0;
4214 }
4215
4216 @@ -3150,17 +3146,22 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
4217 mapping = swap_file->f_mapping;
4218 inode = mapping->host;
4219
4220 - /* If S_ISREG(inode->i_mode) will do inode_lock(inode); */
4221 error = claim_swapfile(p, inode);
4222 if (unlikely(error))
4223 goto bad_swap;
4224
4225 + inode_lock(inode);
4226 + if (IS_SWAPFILE(inode)) {
4227 + error = -EBUSY;
4228 + goto bad_swap_unlock_inode;
4229 + }
4230 +
4231 /*
4232 * Read the swap header.
4233 */
4234 if (!mapping->a_ops->readpage) {
4235 error = -EINVAL;
4236 - goto bad_swap;
4237 + goto bad_swap_unlock_inode;
4238 }
4239 page = read_mapping_page(mapping, 0, swap_file);
4240 if (IS_ERR(page)) {
4241 @@ -3172,14 +3173,14 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
4242 maxpages = read_swap_header(p, swap_header, inode);
4243 if (unlikely(!maxpages)) {
4244 error = -EINVAL;
4245 - goto bad_swap;
4246 + goto bad_swap_unlock_inode;
4247 }
4248
4249 /* OK, set up the swap map and apply the bad block list */
4250 swap_map = vzalloc(maxpages);
4251 if (!swap_map) {
4252 error = -ENOMEM;
4253 - goto bad_swap;
4254 + goto bad_swap_unlock_inode;
4255 }
4256
4257 if (bdi_cap_stable_pages_required(inode_to_bdi(inode)))
4258 @@ -3204,7 +3205,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
4259 GFP_KERNEL);
4260 if (!cluster_info) {
4261 error = -ENOMEM;
4262 - goto bad_swap;
4263 + goto bad_swap_unlock_inode;
4264 }
4265
4266 for (ci = 0; ci < nr_cluster; ci++)
4267 @@ -3213,7 +3214,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
4268 p->percpu_cluster = alloc_percpu(struct percpu_cluster);
4269 if (!p->percpu_cluster) {
4270 error = -ENOMEM;
4271 - goto bad_swap;
4272 + goto bad_swap_unlock_inode;
4273 }
4274 for_each_possible_cpu(cpu) {
4275 struct percpu_cluster *cluster;
4276 @@ -3227,13 +3228,13 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
4277
4278 error = swap_cgroup_swapon(p->type, maxpages);
4279 if (error)
4280 - goto bad_swap;
4281 + goto bad_swap_unlock_inode;
4282
4283 nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map,
4284 cluster_info, maxpages, &span);
4285 if (unlikely(nr_extents < 0)) {
4286 error = nr_extents;
4287 - goto bad_swap;
4288 + goto bad_swap_unlock_inode;
4289 }
4290 /* frontswap enabled? set up bit-per-page map for frontswap */
4291 if (IS_ENABLED(CONFIG_FRONTSWAP))
4292 @@ -3273,7 +3274,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
4293
4294 error = init_swap_address_space(p->type, maxpages);
4295 if (error)
4296 - goto bad_swap;
4297 + goto bad_swap_unlock_inode;
4298
4299 /*
4300 * Flush any pending IO and dirty mappings before we start using this
4301 @@ -3283,7 +3284,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
4302 error = inode_drain_writes(inode);
4303 if (error) {
4304 inode->i_flags &= ~S_SWAPFILE;
4305 - goto bad_swap;
4306 + goto bad_swap_unlock_inode;
4307 }
4308
4309 mutex_lock(&swapon_mutex);
4310 @@ -3308,6 +3309,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
4311
4312 error = 0;
4313 goto out;
4314 +bad_swap_unlock_inode:
4315 + inode_unlock(inode);
4316 bad_swap:
4317 free_percpu(p->percpu_cluster);
4318 p->percpu_cluster = NULL;
4319 @@ -3315,6 +3318,7 @@ bad_swap:
4320 set_blocksize(p->bdev, p->old_block_size);
4321 blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
4322 }
4323 + inode = NULL;
4324 destroy_swap_extents(p);
4325 swap_cgroup_swapoff(p->type);
4326 spin_lock(&swap_lock);
4327 @@ -3326,13 +3330,8 @@ bad_swap:
4328 kvfree(frontswap_map);
4329 if (inced_nr_rotate_swap)
4330 atomic_dec(&nr_rotate_swap);
4331 - if (swap_file) {
4332 - if (inode) {
4333 - inode_unlock(inode);
4334 - inode = NULL;
4335 - }
4336 + if (swap_file)
4337 filp_close(swap_file, NULL);
4338 - }
4339 out:
4340 if (page && !IS_ERR(page)) {
4341 kunmap(page);
4342 diff --git a/net/Kconfig b/net/Kconfig
4343 index 3101bfcbdd7a..0b2fecc83452 100644
4344 --- a/net/Kconfig
4345 +++ b/net/Kconfig
4346 @@ -52,6 +52,9 @@ config NET_INGRESS
4347 config NET_EGRESS
4348 bool
4349
4350 +config NET_REDIRECT
4351 + bool
4352 +
4353 config SKB_EXTENSIONS
4354 bool
4355
4356 diff --git a/net/bpfilter/main.c b/net/bpfilter/main.c
4357 index 77396a098fbe..efea4874743e 100644
4358 --- a/net/bpfilter/main.c
4359 +++ b/net/bpfilter/main.c
4360 @@ -10,7 +10,7 @@
4361 #include <asm/unistd.h>
4362 #include "msgfmt.h"
4363
4364 -int debug_fd;
4365 +FILE *debug_f;
4366
4367 static int handle_get_cmd(struct mbox_request *cmd)
4368 {
4369 @@ -35,9 +35,10 @@ static void loop(void)
4370 struct mbox_reply reply;
4371 int n;
4372
4373 + fprintf(debug_f, "testing the buffer\n");
4374 n = read(0, &req, sizeof(req));
4375 if (n != sizeof(req)) {
4376 - dprintf(debug_fd, "invalid request %d\n", n);
4377 + fprintf(debug_f, "invalid request %d\n", n);
4378 return;
4379 }
4380
4381 @@ -47,7 +48,7 @@ static void loop(void)
4382
4383 n = write(1, &reply, sizeof(reply));
4384 if (n != sizeof(reply)) {
4385 - dprintf(debug_fd, "reply failed %d\n", n);
4386 + fprintf(debug_f, "reply failed %d\n", n);
4387 return;
4388 }
4389 }
4390 @@ -55,9 +56,10 @@ static void loop(void)
4391
4392 int main(void)
4393 {
4394 - debug_fd = open("/dev/kmsg", 00000002);
4395 - dprintf(debug_fd, "Started bpfilter\n");
4396 + debug_f = fopen("/dev/kmsg", "w");
4397 + setvbuf(debug_f, 0, _IOLBF, 0);
4398 + fprintf(debug_f, "Started bpfilter\n");
4399 loop();
4400 - close(debug_fd);
4401 + fclose(debug_f);
4402 return 0;
4403 }
4404 diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
4405 index 4e0de14f80bb..2a6e63a8edbe 100644
4406 --- a/net/ceph/osdmap.c
4407 +++ b/net/ceph/osdmap.c
4408 @@ -710,6 +710,15 @@ int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
4409 }
4410 EXPORT_SYMBOL(ceph_pg_poolid_by_name);
4411
4412 +u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id)
4413 +{
4414 + struct ceph_pg_pool_info *pi;
4415 +
4416 + pi = __lookup_pg_pool(&map->pg_pools, id);
4417 + return pi ? pi->flags : 0;
4418 +}
4419 +EXPORT_SYMBOL(ceph_pg_pool_flags);
4420 +
4421 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
4422 {
4423 rb_erase(&pi->node, root);
4424 diff --git a/net/core/dev.c b/net/core/dev.c
4425 index db8c229e0f4a..931dfdcbabf1 100644
4426 --- a/net/core/dev.c
4427 +++ b/net/core/dev.c
4428 @@ -4237,7 +4237,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
4429 /* Reinjected packets coming from act_mirred or similar should
4430 * not get XDP generic processing.
4431 */
4432 - if (skb_is_tc_redirected(skb))
4433 + if (skb_is_redirected(skb))
4434 return XDP_PASS;
4435
4436 /* XDP packets must be linear and must have sufficient headroom
4437 @@ -4786,7 +4786,7 @@ skip_taps:
4438 goto out;
4439 }
4440 #endif
4441 - skb_reset_tc(skb);
4442 + skb_reset_redirect(skb);
4443 skip_classify:
4444 if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
4445 goto drop;
4446 diff --git a/net/core/pktgen.c b/net/core/pktgen.c
4447 index 48b1e429857c..cb3b565ff5ad 100644
4448 --- a/net/core/pktgen.c
4449 +++ b/net/core/pktgen.c
4450 @@ -3362,7 +3362,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
4451 /* skb was 'freed' by stack, so clean few
4452 * bits and reuse it
4453 */
4454 - skb_reset_tc(skb);
4455 + skb_reset_redirect(skb);
4456 } while (--burst > 0);
4457 goto out; /* Skips xmit_mode M_START_XMIT */
4458 } else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {
4459 diff --git a/net/core/sock_map.c b/net/core/sock_map.c
4460 index 405397801bb0..8291568b707f 100644
4461 --- a/net/core/sock_map.c
4462 +++ b/net/core/sock_map.c
4463 @@ -233,8 +233,11 @@ static void sock_map_free(struct bpf_map *map)
4464 struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
4465 int i;
4466
4467 + /* After the sync no updates or deletes will be in-flight so it
4468 + * is safe to walk map and remove entries without risking a race
4469 + * in EEXIST update case.
4470 + */
4471 synchronize_rcu();
4472 - raw_spin_lock_bh(&stab->lock);
4473 for (i = 0; i < stab->map.max_entries; i++) {
4474 struct sock **psk = &stab->sks[i];
4475 struct sock *sk;
4476 @@ -248,7 +251,6 @@ static void sock_map_free(struct bpf_map *map)
4477 release_sock(sk);
4478 }
4479 }
4480 - raw_spin_unlock_bh(&stab->lock);
4481
4482 /* wait for psock readers accessing its map link */
4483 synchronize_rcu();
4484 @@ -863,10 +865,13 @@ static void sock_hash_free(struct bpf_map *map)
4485 struct hlist_node *node;
4486 int i;
4487
4488 + /* After the sync no updates or deletes will be in-flight so it
4489 + * is safe to walk map and remove entries without risking a race
4490 + * in EEXIST update case.
4491 + */
4492 synchronize_rcu();
4493 for (i = 0; i < htab->buckets_num; i++) {
4494 bucket = sock_hash_select_bucket(htab, i);
4495 - raw_spin_lock_bh(&bucket->lock);
4496 hlist_for_each_entry_safe(elem, node, &bucket->head, node) {
4497 hlist_del_rcu(&elem->node);
4498 lock_sock(elem->sk);
4499 @@ -875,7 +880,6 @@ static void sock_hash_free(struct bpf_map *map)
4500 rcu_read_unlock();
4501 release_sock(elem->sk);
4502 }
4503 - raw_spin_unlock_bh(&bucket->lock);
4504 }
4505
4506 /* wait for psock readers accessing its map link */
4507 diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c
4508 index 9e5a883a9f0c..ebe73848d1cf 100644
4509 --- a/net/dsa/tag_8021q.c
4510 +++ b/net/dsa/tag_8021q.c
4511 @@ -299,49 +299,6 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev,
4512 }
4513 EXPORT_SYMBOL_GPL(dsa_8021q_xmit);
4514
4515 -/* In the DSA packet_type handler, skb->data points in the middle of the VLAN
4516 - * tag, after tpid and before tci. This is because so far, ETH_HLEN
4517 - * (DMAC, SMAC, EtherType) bytes were pulled.
4518 - * There are 2 bytes of VLAN tag left in skb->data, and upper
4519 - * layers expect the 'real' EtherType to be consumed as well.
4520 - * Coincidentally, a VLAN header is also of the same size as
4521 - * the number of bytes that need to be pulled.
4522 - *
4523 - * skb_mac_header skb->data
4524 - * | |
4525 - * v v
4526 - * | | | | | | | | | | | | | | | | | | |
4527 - * +-----------------------+-----------------------+-------+-------+-------+
4528 - * | Destination MAC | Source MAC | TPID | TCI | EType |
4529 - * +-----------------------+-----------------------+-------+-------+-------+
4530 - * ^ | |
4531 - * |<--VLAN_HLEN-->to <---VLAN_HLEN--->
4532 - * from |
4533 - * >>>>>>> v
4534 - * >>>>>>> | | | | | | | | | | | | | | |
4535 - * >>>>>>> +-----------------------+-----------------------+-------+
4536 - * >>>>>>> | Destination MAC | Source MAC | EType |
4537 - * +-----------------------+-----------------------+-------+
4538 - * ^ ^
4539 - * (now part of | |
4540 - * skb->head) skb_mac_header skb->data
4541 - */
4542 -struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb)
4543 -{
4544 - u8 *from = skb_mac_header(skb);
4545 - u8 *dest = from + VLAN_HLEN;
4546 -
4547 - memmove(dest, from, ETH_HLEN - VLAN_HLEN);
4548 - skb_pull(skb, VLAN_HLEN);
4549 - skb_push(skb, ETH_HLEN);
4550 - skb_reset_mac_header(skb);
4551 - skb_reset_mac_len(skb);
4552 - skb_pull_rcsum(skb, ETH_HLEN);
4553 -
4554 - return skb;
4555 -}
4556 -EXPORT_SYMBOL_GPL(dsa_8021q_remove_header);
4557 -
4558 static const struct dsa_device_ops dsa_8021q_netdev_ops = {
4559 .name = "8021q",
4560 .proto = DSA_TAG_PROTO_8021Q,
4561 diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
4562 index 9c3114179690..9169b63a89e3 100644
4563 --- a/net/dsa/tag_brcm.c
4564 +++ b/net/dsa/tag_brcm.c
4565 @@ -140,6 +140,8 @@ static struct sk_buff *brcm_tag_rcv_ll(struct sk_buff *skb,
4566 /* Remove Broadcom tag and update checksum */
4567 skb_pull_rcsum(skb, BRCM_TAG_LEN);
4568
4569 + skb->offload_fwd_mark = 1;
4570 +
4571 return skb;
4572 }
4573 #endif
4574 diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c
4575 index 63ef2a14c934..12f3ce52e62e 100644
4576 --- a/net/dsa/tag_sja1105.c
4577 +++ b/net/dsa/tag_sja1105.c
4578 @@ -238,14 +238,14 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
4579 {
4580 struct sja1105_meta meta = {0};
4581 int source_port, switch_id;
4582 - struct vlan_ethhdr *hdr;
4583 + struct ethhdr *hdr;
4584 u16 tpid, vid, tci;
4585 bool is_link_local;
4586 bool is_tagged;
4587 bool is_meta;
4588
4589 - hdr = vlan_eth_hdr(skb);
4590 - tpid = ntohs(hdr->h_vlan_proto);
4591 + hdr = eth_hdr(skb);
4592 + tpid = ntohs(hdr->h_proto);
4593 is_tagged = (tpid == ETH_P_SJA1105);
4594 is_link_local = sja1105_is_link_local(skb);
4595 is_meta = sja1105_is_meta_frame(skb);
4596 @@ -254,7 +254,12 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
4597
4598 if (is_tagged) {
4599 /* Normal traffic path. */
4600 - tci = ntohs(hdr->h_vlan_TCI);
4601 + skb_push_rcsum(skb, ETH_HLEN);
4602 + __skb_vlan_pop(skb, &tci);
4603 + skb_pull_rcsum(skb, ETH_HLEN);
4604 + skb_reset_network_header(skb);
4605 + skb_reset_transport_header(skb);
4606 +
4607 vid = tci & VLAN_VID_MASK;
4608 source_port = dsa_8021q_rx_source_port(vid);
4609 switch_id = dsa_8021q_rx_switch_id(vid);
4610 @@ -283,12 +288,6 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb,
4611 return NULL;
4612 }
4613
4614 - /* Delete/overwrite fake VLAN header, DSA expects to not find
4615 - * it there, see dsa_switch_rcv: skb_push(skb, ETH_HLEN).
4616 - */
4617 - if (is_tagged)
4618 - skb = dsa_8021q_remove_header(skb);
4619 -
4620 return sja1105_rcv_meta_state_machine(skb, &meta, is_link_local,
4621 is_meta);
4622 }
4623 diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
4624 index 27dc65d7de67..002f341f3564 100644
4625 --- a/net/hsr/hsr_framereg.c
4626 +++ b/net/hsr/hsr_framereg.c
4627 @@ -482,12 +482,9 @@ int hsr_get_node_data(struct hsr_priv *hsr,
4628 struct hsr_port *port;
4629 unsigned long tdiff;
4630
4631 - rcu_read_lock();
4632 node = find_node_by_addr_A(&hsr->node_db, addr);
4633 - if (!node) {
4634 - rcu_read_unlock();
4635 - return -ENOENT; /* No such entry */
4636 - }
4637 + if (!node)
4638 + return -ENOENT;
4639
4640 ether_addr_copy(addr_b, node->macaddress_B);
4641
4642 @@ -522,7 +519,5 @@ int hsr_get_node_data(struct hsr_priv *hsr,
4643 *addr_b_ifindex = -1;
4644 }
4645
4646 - rcu_read_unlock();
4647 -
4648 return 0;
4649 }
4650 diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c
4651 index 8dc0547f01d0..fae21c863b1f 100644
4652 --- a/net/hsr/hsr_netlink.c
4653 +++ b/net/hsr/hsr_netlink.c
4654 @@ -251,15 +251,16 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
4655 if (!na)
4656 goto invalid;
4657
4658 - hsr_dev = __dev_get_by_index(genl_info_net(info),
4659 - nla_get_u32(info->attrs[HSR_A_IFINDEX]));
4660 + rcu_read_lock();
4661 + hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
4662 + nla_get_u32(info->attrs[HSR_A_IFINDEX]));
4663 if (!hsr_dev)
4664 - goto invalid;
4665 + goto rcu_unlock;
4666 if (!is_hsr_master(hsr_dev))
4667 - goto invalid;
4668 + goto rcu_unlock;
4669
4670 /* Send reply */
4671 - skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
4672 + skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
4673 if (!skb_out) {
4674 res = -ENOMEM;
4675 goto fail;
4676 @@ -313,12 +314,10 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
4677 res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
4678 if (res < 0)
4679 goto nla_put_failure;
4680 - rcu_read_lock();
4681 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
4682 if (port)
4683 res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
4684 port->dev->ifindex);
4685 - rcu_read_unlock();
4686 if (res < 0)
4687 goto nla_put_failure;
4688
4689 @@ -328,20 +327,22 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
4690 res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
4691 if (res < 0)
4692 goto nla_put_failure;
4693 - rcu_read_lock();
4694 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
4695 if (port)
4696 res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
4697 port->dev->ifindex);
4698 - rcu_read_unlock();
4699 if (res < 0)
4700 goto nla_put_failure;
4701
4702 + rcu_read_unlock();
4703 +
4704 genlmsg_end(skb_out, msg_head);
4705 genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
4706
4707 return 0;
4708
4709 +rcu_unlock:
4710 + rcu_read_unlock();
4711 invalid:
4712 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
4713 return 0;
4714 @@ -351,6 +352,7 @@ nla_put_failure:
4715 /* Fall through */
4716
4717 fail:
4718 + rcu_read_unlock();
4719 return res;
4720 }
4721
4722 @@ -358,16 +360,14 @@ fail:
4723 */
4724 static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
4725 {
4726 - /* For receiving */
4727 - struct nlattr *na;
4728 + unsigned char addr[ETH_ALEN];
4729 struct net_device *hsr_dev;
4730 -
4731 - /* For sending */
4732 struct sk_buff *skb_out;
4733 - void *msg_head;
4734 struct hsr_priv *hsr;
4735 - void *pos;
4736 - unsigned char addr[ETH_ALEN];
4737 + bool restart = false;
4738 + struct nlattr *na;
4739 + void *pos = NULL;
4740 + void *msg_head;
4741 int res;
4742
4743 if (!info)
4744 @@ -377,15 +377,17 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
4745 if (!na)
4746 goto invalid;
4747
4748 - hsr_dev = __dev_get_by_index(genl_info_net(info),
4749 - nla_get_u32(info->attrs[HSR_A_IFINDEX]));
4750 + rcu_read_lock();
4751 + hsr_dev = dev_get_by_index_rcu(genl_info_net(info),
4752 + nla_get_u32(info->attrs[HSR_A_IFINDEX]));
4753 if (!hsr_dev)
4754 - goto invalid;
4755 + goto rcu_unlock;
4756 if (!is_hsr_master(hsr_dev))
4757 - goto invalid;
4758 + goto rcu_unlock;
4759
4760 +restart:
4761 /* Send reply */
4762 - skb_out = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
4763 + skb_out = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC);
4764 if (!skb_out) {
4765 res = -ENOMEM;
4766 goto fail;
4767 @@ -399,18 +401,26 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
4768 goto nla_put_failure;
4769 }
4770
4771 - res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
4772 - if (res < 0)
4773 - goto nla_put_failure;
4774 + if (!restart) {
4775 + res = nla_put_u32(skb_out, HSR_A_IFINDEX, hsr_dev->ifindex);
4776 + if (res < 0)
4777 + goto nla_put_failure;
4778 + }
4779
4780 hsr = netdev_priv(hsr_dev);
4781
4782 - rcu_read_lock();
4783 - pos = hsr_get_next_node(hsr, NULL, addr);
4784 + if (!pos)
4785 + pos = hsr_get_next_node(hsr, NULL, addr);
4786 while (pos) {
4787 res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
4788 if (res < 0) {
4789 - rcu_read_unlock();
4790 + if (res == -EMSGSIZE) {
4791 + genlmsg_end(skb_out, msg_head);
4792 + genlmsg_unicast(genl_info_net(info), skb_out,
4793 + info->snd_portid);
4794 + restart = true;
4795 + goto restart;
4796 + }
4797 goto nla_put_failure;
4798 }
4799 pos = hsr_get_next_node(hsr, pos, addr);
4800 @@ -422,15 +432,18 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
4801
4802 return 0;
4803
4804 +rcu_unlock:
4805 + rcu_read_unlock();
4806 invalid:
4807 netlink_ack(skb_in, nlmsg_hdr(skb_in), -EINVAL, NULL);
4808 return 0;
4809
4810 nla_put_failure:
4811 - kfree_skb(skb_out);
4812 + nlmsg_free(skb_out);
4813 /* Fall through */
4814
4815 fail:
4816 + rcu_read_unlock();
4817 return res;
4818 }
4819
4820 @@ -457,6 +470,7 @@ static struct genl_family hsr_genl_family __ro_after_init = {
4821 .version = 1,
4822 .maxattr = HSR_A_MAX,
4823 .policy = hsr_genl_policy,
4824 + .netnsok = true,
4825 .module = THIS_MODULE,
4826 .ops = hsr_ops,
4827 .n_ops = ARRAY_SIZE(hsr_ops),
4828 diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
4829 index fbfd0db182b7..a9104d42aafb 100644
4830 --- a/net/hsr/hsr_slave.c
4831 +++ b/net/hsr/hsr_slave.c
4832 @@ -145,16 +145,16 @@ int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
4833 if (!port)
4834 return -ENOMEM;
4835
4836 + port->hsr = hsr;
4837 + port->dev = dev;
4838 + port->type = type;
4839 +
4840 if (type != HSR_PT_MASTER) {
4841 res = hsr_portdev_setup(dev, port);
4842 if (res)
4843 goto fail_dev_setup;
4844 }
4845
4846 - port->hsr = hsr;
4847 - port->dev = dev;
4848 - port->type = type;
4849 -
4850 list_add_tail_rcu(&port->port_list, &hsr->ports);
4851 synchronize_rcu();
4852
4853 diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
4854 index 03381f3e12ba..a926de2e42b5 100644
4855 --- a/net/ipv4/Kconfig
4856 +++ b/net/ipv4/Kconfig
4857 @@ -303,6 +303,7 @@ config SYN_COOKIES
4858
4859 config NET_IPVTI
4860 tristate "Virtual (secure) IP: tunneling"
4861 + depends on IPV6 || IPV6=n
4862 select INET_TUNNEL
4863 select NET_IP_TUNNEL
4864 select XFRM
4865 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
4866 index 71c78d223dfd..48bf3b9be475 100644
4867 --- a/net/ipv4/fib_frontend.c
4868 +++ b/net/ipv4/fib_frontend.c
4869 @@ -1007,7 +1007,9 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
4870 return -ENOENT;
4871 }
4872
4873 + rcu_read_lock();
4874 err = fib_table_dump(tb, skb, cb, &filter);
4875 + rcu_read_unlock();
4876 return skb->len ? : err;
4877 }
4878
4879 diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
4880 index 10636fb6093e..85ba1453ba5c 100644
4881 --- a/net/ipv4/ip_gre.c
4882 +++ b/net/ipv4/ip_gre.c
4883 @@ -1149,6 +1149,24 @@ static int ipgre_netlink_parms(struct net_device *dev,
4884 if (data[IFLA_GRE_FWMARK])
4885 *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
4886
4887 + return 0;
4888 +}
4889 +
4890 +static int erspan_netlink_parms(struct net_device *dev,
4891 + struct nlattr *data[],
4892 + struct nlattr *tb[],
4893 + struct ip_tunnel_parm *parms,
4894 + __u32 *fwmark)
4895 +{
4896 + struct ip_tunnel *t = netdev_priv(dev);
4897 + int err;
4898 +
4899 + err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
4900 + if (err)
4901 + return err;
4902 + if (!data)
4903 + return 0;
4904 +
4905 if (data[IFLA_GRE_ERSPAN_VER]) {
4906 t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
4907
4908 @@ -1272,45 +1290,70 @@ static void ipgre_tap_setup(struct net_device *dev)
4909 ip_tunnel_setup(dev, gre_tap_net_id);
4910 }
4911
4912 -static int ipgre_newlink(struct net *src_net, struct net_device *dev,
4913 - struct nlattr *tb[], struct nlattr *data[],
4914 - struct netlink_ext_ack *extack)
4915 +static int
4916 +ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[])
4917 {
4918 - struct ip_tunnel_parm p;
4919 struct ip_tunnel_encap ipencap;
4920 - __u32 fwmark = 0;
4921 - int err;
4922
4923 if (ipgre_netlink_encap_parms(data, &ipencap)) {
4924 struct ip_tunnel *t = netdev_priv(dev);
4925 - err = ip_tunnel_encap_setup(t, &ipencap);
4926 + int err = ip_tunnel_encap_setup(t, &ipencap);
4927
4928 if (err < 0)
4929 return err;
4930 }
4931
4932 + return 0;
4933 +}
4934 +
4935 +static int ipgre_newlink(struct net *src_net, struct net_device *dev,
4936 + struct nlattr *tb[], struct nlattr *data[],
4937 + struct netlink_ext_ack *extack)
4938 +{
4939 + struct ip_tunnel_parm p;
4940 + __u32 fwmark = 0;
4941 + int err;
4942 +
4943 + err = ipgre_newlink_encap_setup(dev, data);
4944 + if (err)
4945 + return err;
4946 +
4947 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
4948 if (err < 0)
4949 return err;
4950 return ip_tunnel_newlink(dev, tb, &p, fwmark);
4951 }
4952
4953 +static int erspan_newlink(struct net *src_net, struct net_device *dev,
4954 + struct nlattr *tb[], struct nlattr *data[],
4955 + struct netlink_ext_ack *extack)
4956 +{
4957 + struct ip_tunnel_parm p;
4958 + __u32 fwmark = 0;
4959 + int err;
4960 +
4961 + err = ipgre_newlink_encap_setup(dev, data);
4962 + if (err)
4963 + return err;
4964 +
4965 + err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
4966 + if (err)
4967 + return err;
4968 + return ip_tunnel_newlink(dev, tb, &p, fwmark);
4969 +}
4970 +
4971 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
4972 struct nlattr *data[],
4973 struct netlink_ext_ack *extack)
4974 {
4975 struct ip_tunnel *t = netdev_priv(dev);
4976 - struct ip_tunnel_encap ipencap;
4977 __u32 fwmark = t->fwmark;
4978 struct ip_tunnel_parm p;
4979 int err;
4980
4981 - if (ipgre_netlink_encap_parms(data, &ipencap)) {
4982 - err = ip_tunnel_encap_setup(t, &ipencap);
4983 -
4984 - if (err < 0)
4985 - return err;
4986 - }
4987 + err = ipgre_newlink_encap_setup(dev, data);
4988 + if (err)
4989 + return err;
4990
4991 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
4992 if (err < 0)
4993 @@ -1323,8 +1366,34 @@ static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
4994 t->parms.i_flags = p.i_flags;
4995 t->parms.o_flags = p.o_flags;
4996
4997 - if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
4998 - ipgre_link_update(dev, !tb[IFLA_MTU]);
4999 + ipgre_link_update(dev, !tb[IFLA_MTU]);
5000 +
5001 + return 0;
5002 +}
5003 +
5004 +static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
5005 + struct nlattr *data[],
5006 + struct netlink_ext_ack *extack)
5007 +{
5008 + struct ip_tunnel *t = netdev_priv(dev);
5009 + __u32 fwmark = t->fwmark;
5010 + struct ip_tunnel_parm p;
5011 + int err;
5012 +
5013 + err = ipgre_newlink_encap_setup(dev, data);
5014 + if (err)
5015 + return err;
5016 +
5017 + err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
5018 + if (err < 0)
5019 + return err;
5020 +
5021 + err = ip_tunnel_changelink(dev, tb, &p, fwmark);
5022 + if (err < 0)
5023 + return err;
5024 +
5025 + t->parms.i_flags = p.i_flags;
5026 + t->parms.o_flags = p.o_flags;
5027
5028 return 0;
5029 }
5030 @@ -1515,8 +1584,8 @@ static struct rtnl_link_ops erspan_link_ops __read_mostly = {
5031 .priv_size = sizeof(struct ip_tunnel),
5032 .setup = erspan_setup,
5033 .validate = erspan_validate,
5034 - .newlink = ipgre_newlink,
5035 - .changelink = ipgre_changelink,
5036 + .newlink = erspan_newlink,
5037 + .changelink = erspan_changelink,
5038 .dellink = ip_tunnel_dellink,
5039 .get_size = ipgre_get_size,
5040 .fill_info = ipgre_fill_info,
5041 diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
5042 index 79eef5db336a..8ecaf0f26973 100644
5043 --- a/net/ipv4/ip_vti.c
5044 +++ b/net/ipv4/ip_vti.c
5045 @@ -187,17 +187,39 @@ static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
5046 int mtu;
5047
5048 if (!dst) {
5049 - struct rtable *rt;
5050 -
5051 - fl->u.ip4.flowi4_oif = dev->ifindex;
5052 - fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
5053 - rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
5054 - if (IS_ERR(rt)) {
5055 + switch (skb->protocol) {
5056 + case htons(ETH_P_IP): {
5057 + struct rtable *rt;
5058 +
5059 + fl->u.ip4.flowi4_oif = dev->ifindex;
5060 + fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
5061 + rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
5062 + if (IS_ERR(rt)) {
5063 + dev->stats.tx_carrier_errors++;
5064 + goto tx_error_icmp;
5065 + }
5066 + dst = &rt->dst;
5067 + skb_dst_set(skb, dst);
5068 + break;
5069 + }
5070 +#if IS_ENABLED(CONFIG_IPV6)
5071 + case htons(ETH_P_IPV6):
5072 + fl->u.ip6.flowi6_oif = dev->ifindex;
5073 + fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
5074 + dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
5075 + if (dst->error) {
5076 + dst_release(dst);
5077 + dst = NULL;
5078 + dev->stats.tx_carrier_errors++;
5079 + goto tx_error_icmp;
5080 + }
5081 + skb_dst_set(skb, dst);
5082 + break;
5083 +#endif
5084 + default:
5085 dev->stats.tx_carrier_errors++;
5086 goto tx_error_icmp;
5087 }
5088 - dst = &rt->dst;
5089 - skb_dst_set(skb, dst);
5090 }
5091
5092 dst_hold(dst);
5093 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
5094 index deb466fc3d1f..e378ff17f8c6 100644
5095 --- a/net/ipv4/tcp.c
5096 +++ b/net/ipv4/tcp.c
5097 @@ -2943,8 +2943,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
5098 err = -EPERM;
5099 else if (tp->repair_queue == TCP_SEND_QUEUE)
5100 WRITE_ONCE(tp->write_seq, val);
5101 - else if (tp->repair_queue == TCP_RECV_QUEUE)
5102 + else if (tp->repair_queue == TCP_RECV_QUEUE) {
5103 WRITE_ONCE(tp->rcv_nxt, val);
5104 + WRITE_ONCE(tp->copied_seq, val);
5105 + }
5106 else
5107 err = -EINVAL;
5108 break;
5109 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
5110 index 660b24fe041e..c8d03c1b4c6b 100644
5111 --- a/net/ipv4/tcp_output.c
5112 +++ b/net/ipv4/tcp_output.c
5113 @@ -1048,6 +1048,10 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
5114
5115 if (unlikely(!skb))
5116 return -ENOBUFS;
5117 + /* retransmit skbs might have a non zero value in skb->dev
5118 + * because skb->dev is aliased with skb->rbnode.rb_left
5119 + */
5120 + skb->dev = NULL;
5121 }
5122
5123 inet = inet_sk(sk);
5124 @@ -2976,8 +2980,12 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
5125
5126 tcp_skb_tsorted_save(skb) {
5127 nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
5128 - err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
5129 - -ENOBUFS;
5130 + if (nskb) {
5131 + nskb->dev = NULL;
5132 + err = tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC);
5133 + } else {
5134 + err = -ENOBUFS;
5135 + }
5136 } tcp_skb_tsorted_restore(skb);
5137
5138 if (!err) {
5139 diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
5140 index 524006aa0d78..cc6180e08a4f 100644
5141 --- a/net/ipv6/ip6_vti.c
5142 +++ b/net/ipv6/ip6_vti.c
5143 @@ -311,7 +311,7 @@ static int vti6_rcv(struct sk_buff *skb)
5144
5145 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
5146 rcu_read_unlock();
5147 - return 0;
5148 + goto discard;
5149 }
5150
5151 ipv6h = ipv6_hdr(skb);
5152 @@ -450,15 +450,33 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
5153 int mtu;
5154
5155 if (!dst) {
5156 - fl->u.ip6.flowi6_oif = dev->ifindex;
5157 - fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
5158 - dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
5159 - if (dst->error) {
5160 - dst_release(dst);
5161 - dst = NULL;
5162 + switch (skb->protocol) {
5163 + case htons(ETH_P_IP): {
5164 + struct rtable *rt;
5165 +
5166 + fl->u.ip4.flowi4_oif = dev->ifindex;
5167 + fl->u.ip4.flowi4_flags |= FLOWI_FLAG_ANYSRC;
5168 + rt = __ip_route_output_key(dev_net(dev), &fl->u.ip4);
5169 + if (IS_ERR(rt))
5170 + goto tx_err_link_failure;
5171 + dst = &rt->dst;
5172 + skb_dst_set(skb, dst);
5173 + break;
5174 + }
5175 + case htons(ETH_P_IPV6):
5176 + fl->u.ip6.flowi6_oif = dev->ifindex;
5177 + fl->u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC;
5178 + dst = ip6_route_output(dev_net(dev), NULL, &fl->u.ip6);
5179 + if (dst->error) {
5180 + dst_release(dst);
5181 + dst = NULL;
5182 + goto tx_err_link_failure;
5183 + }
5184 + skb_dst_set(skb, dst);
5185 + break;
5186 + default:
5187 goto tx_err_link_failure;
5188 }
5189 - skb_dst_set(skb, dst);
5190 }
5191
5192 dst_hold(dst);
5193 diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
5194 index c8ad20c28c43..70ea4cc126d1 100644
5195 --- a/net/mac80211/debugfs_sta.c
5196 +++ b/net/mac80211/debugfs_sta.c
5197 @@ -5,7 +5,7 @@
5198 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
5199 * Copyright 2013-2014 Intel Mobile Communications GmbH
5200 * Copyright(c) 2016 Intel Deutschland GmbH
5201 - * Copyright (C) 2018 - 2019 Intel Corporation
5202 + * Copyright (C) 2018 - 2020 Intel Corporation
5203 */
5204
5205 #include <linux/debugfs.h>
5206 @@ -78,6 +78,7 @@ static const char * const sta_flag_names[] = {
5207 FLAG(MPSP_OWNER),
5208 FLAG(MPSP_RECIPIENT),
5209 FLAG(PS_DELIVER),
5210 + FLAG(USES_ENCRYPTION),
5211 #undef FLAG
5212 };
5213
5214 diff --git a/net/mac80211/key.c b/net/mac80211/key.c
5215 index 0f889b919b06..efc1acc6543c 100644
5216 --- a/net/mac80211/key.c
5217 +++ b/net/mac80211/key.c
5218 @@ -6,7 +6,7 @@
5219 * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net>
5220 * Copyright 2013-2014 Intel Mobile Communications GmbH
5221 * Copyright 2015-2017 Intel Deutschland GmbH
5222 - * Copyright 2018-2019 Intel Corporation
5223 + * Copyright 2018-2020 Intel Corporation
5224 */
5225
5226 #include <linux/if_ether.h>
5227 @@ -262,22 +262,29 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
5228 sta ? sta->sta.addr : bcast_addr, ret);
5229 }
5230
5231 -int ieee80211_set_tx_key(struct ieee80211_key *key)
5232 +static int _ieee80211_set_tx_key(struct ieee80211_key *key, bool force)
5233 {
5234 struct sta_info *sta = key->sta;
5235 struct ieee80211_local *local = key->local;
5236
5237 assert_key_lock(local);
5238
5239 + set_sta_flag(sta, WLAN_STA_USES_ENCRYPTION);
5240 +
5241 sta->ptk_idx = key->conf.keyidx;
5242
5243 - if (!ieee80211_hw_check(&local->hw, AMPDU_KEYBORDER_SUPPORT))
5244 + if (force || !ieee80211_hw_check(&local->hw, AMPDU_KEYBORDER_SUPPORT))
5245 clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
5246 ieee80211_check_fast_xmit(sta);
5247
5248 return 0;
5249 }
5250
5251 +int ieee80211_set_tx_key(struct ieee80211_key *key)
5252 +{
5253 + return _ieee80211_set_tx_key(key, false);
5254 +}
5255 +
5256 static void ieee80211_pairwise_rekey(struct ieee80211_key *old,
5257 struct ieee80211_key *new)
5258 {
5259 @@ -441,11 +448,8 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
5260 if (pairwise) {
5261 rcu_assign_pointer(sta->ptk[idx], new);
5262 if (new &&
5263 - !(new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX)) {
5264 - sta->ptk_idx = idx;
5265 - clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
5266 - ieee80211_check_fast_xmit(sta);
5267 - }
5268 + !(new->conf.flags & IEEE80211_KEY_FLAG_NO_AUTO_TX))
5269 + _ieee80211_set_tx_key(new, true);
5270 } else {
5271 rcu_assign_pointer(sta->gtk[idx], new);
5272 }
5273 diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
5274 index d69983370381..38a0383dfbcf 100644
5275 --- a/net/mac80211/mesh_hwmp.c
5276 +++ b/net/mac80211/mesh_hwmp.c
5277 @@ -1152,7 +1152,8 @@ int mesh_nexthop_resolve(struct ieee80211_sub_if_data *sdata,
5278 }
5279 }
5280
5281 - if (!(mpath->flags & MESH_PATH_RESOLVING))
5282 + if (!(mpath->flags & MESH_PATH_RESOLVING) &&
5283 + mesh_path_sel_is_hwmp(sdata))
5284 mesh_queue_preq(mpath, PREQ_Q_F_START);
5285
5286 if (skb_queue_len(&mpath->frame_queue) >= MESH_FRAME_QUEUE_LEN)
5287 diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
5288 index 8d3a2389b055..21b1422b1b1c 100644
5289 --- a/net/mac80211/sta_info.c
5290 +++ b/net/mac80211/sta_info.c
5291 @@ -4,7 +4,7 @@
5292 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5293 * Copyright 2013-2014 Intel Mobile Communications GmbH
5294 * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
5295 - * Copyright (C) 2018-2019 Intel Corporation
5296 + * Copyright (C) 2018-2020 Intel Corporation
5297 */
5298
5299 #include <linux/module.h>
5300 @@ -1032,6 +1032,11 @@ static void __sta_info_destroy_part2(struct sta_info *sta)
5301 might_sleep();
5302 lockdep_assert_held(&local->sta_mtx);
5303
5304 + while (sta->sta_state == IEEE80211_STA_AUTHORIZED) {
5305 + ret = sta_info_move_state(sta, IEEE80211_STA_ASSOC);
5306 + WARN_ON_ONCE(ret);
5307 + }
5308 +
5309 /* now keys can no longer be reached */
5310 ieee80211_free_sta_keys(local, sta);
5311
5312 diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
5313 index 369c2dddce52..be1d9dfa760d 100644
5314 --- a/net/mac80211/sta_info.h
5315 +++ b/net/mac80211/sta_info.h
5316 @@ -98,6 +98,7 @@ enum ieee80211_sta_info_flags {
5317 WLAN_STA_MPSP_OWNER,
5318 WLAN_STA_MPSP_RECIPIENT,
5319 WLAN_STA_PS_DELIVER,
5320 + WLAN_STA_USES_ENCRYPTION,
5321
5322 NUM_WLAN_STA_FLAGS,
5323 };
5324 diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
5325 index cbd273c0b275..c8fc29f0efcf 100644
5326 --- a/net/mac80211/tx.c
5327 +++ b/net/mac80211/tx.c
5328 @@ -5,7 +5,7 @@
5329 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
5330 * Copyright 2007 Johannes Berg <johannes@sipsolutions.net>
5331 * Copyright 2013-2014 Intel Mobile Communications GmbH
5332 - * Copyright (C) 2018 Intel Corporation
5333 + * Copyright (C) 2018, 2020 Intel Corporation
5334 *
5335 * Transmit and frame generation functions.
5336 */
5337 @@ -590,10 +590,13 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
5338 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
5339 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
5340
5341 - if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT))
5342 + if (unlikely(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) {
5343 tx->key = NULL;
5344 - else if (tx->sta &&
5345 - (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx])))
5346 + return TX_CONTINUE;
5347 + }
5348 +
5349 + if (tx->sta &&
5350 + (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx])))
5351 tx->key = key;
5352 else if (ieee80211_is_group_privacy_action(tx->skb) &&
5353 (key = rcu_dereference(tx->sdata->default_multicast_key)))
5354 @@ -654,6 +657,9 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
5355 if (!skip_hw && tx->key &&
5356 tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
5357 info->control.hw_key = &tx->key->conf;
5358 + } else if (!ieee80211_is_mgmt(hdr->frame_control) && tx->sta &&
5359 + test_sta_flag(tx->sta, WLAN_STA_USES_ENCRYPTION)) {
5360 + return TX_DROP;
5361 }
5362
5363 return TX_CONTINUE;
5364 @@ -5061,6 +5067,7 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
5365 struct ieee80211_local *local = sdata->local;
5366 struct sk_buff *skb;
5367 struct ethhdr *ehdr;
5368 + u32 ctrl_flags = 0;
5369 u32 flags;
5370
5371 /* Only accept CONTROL_PORT_PROTOCOL configured in CONNECT/ASSOCIATE
5372 @@ -5070,6 +5077,9 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
5373 proto != cpu_to_be16(ETH_P_PREAUTH))
5374 return -EINVAL;
5375
5376 + if (proto == sdata->control_port_protocol)
5377 + ctrl_flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO;
5378 +
5379 if (unencrypted)
5380 flags = IEEE80211_TX_INTFL_DONT_ENCRYPT;
5381 else
5382 @@ -5095,7 +5105,7 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
5383 skb_reset_mac_header(skb);
5384
5385 local_bh_disable();
5386 - __ieee80211_subif_start_xmit(skb, skb->dev, flags, 0);
5387 + __ieee80211_subif_start_xmit(skb, skb->dev, flags, ctrl_flags);
5388 local_bh_enable();
5389
5390 return 0;
5391 diff --git a/net/netfilter/nf_flow_table_ip.c b/net/netfilter/nf_flow_table_ip.c
5392 index b9e7dd6e60ce..e92aa6b7eb80 100644
5393 --- a/net/netfilter/nf_flow_table_ip.c
5394 +++ b/net/netfilter/nf_flow_table_ip.c
5395 @@ -189,6 +189,7 @@ static int nf_flow_tuple_ip(struct sk_buff *skb, const struct net_device *dev,
5396 if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
5397 return -1;
5398
5399 + iph = ip_hdr(skb);
5400 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
5401
5402 tuple->src_v4.s_addr = iph->saddr;
5403 @@ -449,6 +450,7 @@ static int nf_flow_tuple_ipv6(struct sk_buff *skb, const struct net_device *dev,
5404 if (!pskb_may_pull(skb, thoff + sizeof(*ports)))
5405 return -1;
5406
5407 + ip6h = ipv6_hdr(skb);
5408 ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
5409
5410 tuple->src_v6 = ip6h->saddr;
5411 diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
5412 index aba11c2333f3..3087e23297db 100644
5413 --- a/net/netfilter/nft_fwd_netdev.c
5414 +++ b/net/netfilter/nft_fwd_netdev.c
5415 @@ -28,6 +28,9 @@ static void nft_fwd_netdev_eval(const struct nft_expr *expr,
5416 struct nft_fwd_netdev *priv = nft_expr_priv(expr);
5417 int oif = regs->data[priv->sreg_dev];
5418
5419 + /* This is used by ifb only. */
5420 + skb_set_redirected(pkt->skb, true);
5421 +
5422 nf_fwd_netdev_egress(pkt, oif);
5423 regs->verdict.code = NF_STOLEN;
5424 }
5425 @@ -190,6 +193,13 @@ nla_put_failure:
5426 return -1;
5427 }
5428
5429 +static int nft_fwd_validate(const struct nft_ctx *ctx,
5430 + const struct nft_expr *expr,
5431 + const struct nft_data **data)
5432 +{
5433 + return nft_chain_validate_hooks(ctx->chain, (1 << NF_NETDEV_INGRESS));
5434 +}
5435 +
5436 static struct nft_expr_type nft_fwd_netdev_type;
5437 static const struct nft_expr_ops nft_fwd_neigh_netdev_ops = {
5438 .type = &nft_fwd_netdev_type,
5439 @@ -197,6 +207,7 @@ static const struct nft_expr_ops nft_fwd_neigh_netdev_ops = {
5440 .eval = nft_fwd_neigh_eval,
5441 .init = nft_fwd_neigh_init,
5442 .dump = nft_fwd_neigh_dump,
5443 + .validate = nft_fwd_validate,
5444 };
5445
5446 static const struct nft_expr_ops nft_fwd_netdev_ops = {
5447 @@ -205,6 +216,7 @@ static const struct nft_expr_ops nft_fwd_netdev_ops = {
5448 .eval = nft_fwd_netdev_eval,
5449 .init = nft_fwd_netdev_init,
5450 .dump = nft_fwd_netdev_dump,
5451 + .validate = nft_fwd_validate,
5452 .offload = nft_fwd_netdev_offload,
5453 };
5454
5455 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
5456 index 20edb7c25e22..1d63ab3a878a 100644
5457 --- a/net/packet/af_packet.c
5458 +++ b/net/packet/af_packet.c
5459 @@ -2172,6 +2172,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
5460 struct timespec ts;
5461 __u32 ts_status;
5462 bool is_drop_n_account = false;
5463 + unsigned int slot_id = 0;
5464 bool do_vnet = false;
5465
5466 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
5467 @@ -2274,6 +2275,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
5468 if (!h.raw)
5469 goto drop_n_account;
5470
5471 + if (po->tp_version <= TPACKET_V2) {
5472 + slot_id = po->rx_ring.head;
5473 + if (test_bit(slot_id, po->rx_ring.rx_owner_map))
5474 + goto drop_n_account;
5475 + __set_bit(slot_id, po->rx_ring.rx_owner_map);
5476 + }
5477 +
5478 if (do_vnet &&
5479 virtio_net_hdr_from_skb(skb, h.raw + macoff -
5480 sizeof(struct virtio_net_hdr),
5481 @@ -2379,7 +2387,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
5482 #endif
5483
5484 if (po->tp_version <= TPACKET_V2) {
5485 + spin_lock(&sk->sk_receive_queue.lock);
5486 __packet_set_status(po, h.raw, status);
5487 + __clear_bit(slot_id, po->rx_ring.rx_owner_map);
5488 + spin_unlock(&sk->sk_receive_queue.lock);
5489 sk->sk_data_ready(sk);
5490 } else {
5491 prb_clear_blk_fill_status(&po->rx_ring);
5492 @@ -4276,6 +4287,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
5493 {
5494 struct pgv *pg_vec = NULL;
5495 struct packet_sock *po = pkt_sk(sk);
5496 + unsigned long *rx_owner_map = NULL;
5497 int was_running, order = 0;
5498 struct packet_ring_buffer *rb;
5499 struct sk_buff_head *rb_queue;
5500 @@ -4361,6 +4373,12 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
5501 }
5502 break;
5503 default:
5504 + if (!tx_ring) {
5505 + rx_owner_map = bitmap_alloc(req->tp_frame_nr,
5506 + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
5507 + if (!rx_owner_map)
5508 + goto out_free_pg_vec;
5509 + }
5510 break;
5511 }
5512 }
5513 @@ -4390,6 +4408,8 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
5514 err = 0;
5515 spin_lock_bh(&rb_queue->lock);
5516 swap(rb->pg_vec, pg_vec);
5517 + if (po->tp_version <= TPACKET_V2)
5518 + swap(rb->rx_owner_map, rx_owner_map);
5519 rb->frame_max = (req->tp_frame_nr - 1);
5520 rb->head = 0;
5521 rb->frame_size = req->tp_frame_size;
5522 @@ -4421,6 +4441,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
5523 }
5524
5525 out_free_pg_vec:
5526 + bitmap_free(rx_owner_map);
5527 if (pg_vec)
5528 free_pg_vec(pg_vec, order, req->tp_block_nr);
5529 out:
5530 diff --git a/net/packet/internal.h b/net/packet/internal.h
5531 index 82fb2b10f790..907f4cd2a718 100644
5532 --- a/net/packet/internal.h
5533 +++ b/net/packet/internal.h
5534 @@ -70,7 +70,10 @@ struct packet_ring_buffer {
5535
5536 unsigned int __percpu *pending_refcnt;
5537
5538 - struct tpacket_kbdq_core prb_bdqc;
5539 + union {
5540 + unsigned long *rx_owner_map;
5541 + struct tpacket_kbdq_core prb_bdqc;
5542 + };
5543 };
5544
5545 extern struct mutex fanout_mutex;
5546 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
5547 index 4a6ca9723a12..a293238fe1e7 100644
5548 --- a/net/rxrpc/af_rxrpc.c
5549 +++ b/net/rxrpc/af_rxrpc.c
5550 @@ -371,44 +371,17 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call);
5551 * rxrpc_kernel_check_life - Check to see whether a call is still alive
5552 * @sock: The socket the call is on
5553 * @call: The call to check
5554 - * @_life: Where to store the life value
5555 *
5556 - * Allow a kernel service to find out whether a call is still alive - ie. we're
5557 - * getting ACKs from the server. Passes back in *_life a number representing
5558 - * the life state which can be compared to that returned by a previous call and
5559 - * return true if the call is still alive.
5560 - *
5561 - * If the life state stalls, rxrpc_kernel_probe_life() should be called and
5562 - * then 2RTT waited.
5563 + * Allow a kernel service to find out whether a call is still alive -
5564 + * ie. whether it has completed.
5565 */
5566 bool rxrpc_kernel_check_life(const struct socket *sock,
5567 - const struct rxrpc_call *call,
5568 - u32 *_life)
5569 + const struct rxrpc_call *call)
5570 {
5571 - *_life = call->acks_latest;
5572 return call->state != RXRPC_CALL_COMPLETE;
5573 }
5574 EXPORT_SYMBOL(rxrpc_kernel_check_life);
5575
5576 -/**
5577 - * rxrpc_kernel_probe_life - Poke the peer to see if it's still alive
5578 - * @sock: The socket the call is on
5579 - * @call: The call to check
5580 - *
5581 - * In conjunction with rxrpc_kernel_check_life(), allow a kernel service to
5582 - * find out whether a call is still alive by pinging it. This should cause the
5583 - * life state to be bumped in about 2*RTT.
5584 - *
5585 - * The must be called in TASK_RUNNING state on pain of might_sleep() objecting.
5586 - */
5587 -void rxrpc_kernel_probe_life(struct socket *sock, struct rxrpc_call *call)
5588 -{
5589 - rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
5590 - rxrpc_propose_ack_ping_for_check_life);
5591 - rxrpc_send_ack_packet(call, true, NULL);
5592 -}
5593 -EXPORT_SYMBOL(rxrpc_kernel_probe_life);
5594 -
5595 /**
5596 * rxrpc_kernel_get_epoch - Retrieve the epoch value from a call.
5597 * @sock: The socket the call is on
5598 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
5599 index 7d730c438404..394d18857979 100644
5600 --- a/net/rxrpc/ar-internal.h
5601 +++ b/net/rxrpc/ar-internal.h
5602 @@ -675,7 +675,6 @@ struct rxrpc_call {
5603
5604 /* transmission-phase ACK management */
5605 ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
5606 - rxrpc_serial_t acks_latest; /* serial number of latest ACK received */
5607 rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
5608 rxrpc_seq_t acks_lost_top; /* tx_top at the time lost-ack ping sent */
5609 rxrpc_serial_t acks_lost_ping; /* Serial number of probe ACK */
5610 diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
5611 index ef10fbf71b15..69e09d69c896 100644
5612 --- a/net/rxrpc/input.c
5613 +++ b/net/rxrpc/input.c
5614 @@ -882,7 +882,6 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
5615 before(prev_pkt, call->ackr_prev_seq))
5616 goto out;
5617 call->acks_latest_ts = skb->tstamp;
5618 - call->acks_latest = sp->hdr.serial;
5619
5620 call->ackr_first_seq = first_soft_ack;
5621 call->ackr_prev_seq = prev_pkt;
5622 diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
5623 index f3232a00970f..0586546c20d7 100644
5624 --- a/net/sched/act_ct.c
5625 +++ b/net/sched/act_ct.c
5626 @@ -739,7 +739,7 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
5627 if (goto_ch)
5628 tcf_chain_put_by_act(goto_ch);
5629 if (params)
5630 - kfree_rcu(params, rcu);
5631 + call_rcu(&params->rcu, tcf_ct_params_free);
5632 if (res == ACT_P_CREATED)
5633 tcf_idr_insert(tn, *a);
5634
5635 diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
5636 index f0df0d90b8bd..27f624971121 100644
5637 --- a/net/sched/act_mirred.c
5638 +++ b/net/sched/act_mirred.c
5639 @@ -284,10 +284,8 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
5640
5641 /* mirror is always swallowed */
5642 if (is_redirect) {
5643 - skb2->tc_redirected = 1;
5644 - skb2->tc_from_ingress = skb2->tc_at_ingress;
5645 - if (skb2->tc_from_ingress)
5646 - skb2->tstamp = 0;
5647 + skb_set_redirected(skb2, skb2->tc_at_ingress);
5648 +
5649 /* let's the caller reinsert the packet, if possible */
5650 if (use_reinsert) {
5651 res->ingress = want_ingress;
5652 diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
5653 index 6f8786b06bde..5efa3e7ace15 100644
5654 --- a/net/sched/cls_route.c
5655 +++ b/net/sched/cls_route.c
5656 @@ -534,8 +534,8 @@ static int route4_change(struct net *net, struct sk_buff *in_skb,
5657 fp = &b->ht[h];
5658 for (pfp = rtnl_dereference(*fp); pfp;
5659 fp = &pfp->next, pfp = rtnl_dereference(*fp)) {
5660 - if (pfp == f) {
5661 - *fp = f->next;
5662 + if (pfp == fold) {
5663 + rcu_assign_pointer(*fp, fold->next);
5664 break;
5665 }
5666 }
5667 diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
5668 index 09b7dc5fe7e0..9904299424a1 100644
5669 --- a/net/sched/cls_tcindex.c
5670 +++ b/net/sched/cls_tcindex.c
5671 @@ -261,8 +261,10 @@ static void tcindex_partial_destroy_work(struct work_struct *work)
5672 struct tcindex_data,
5673 rwork);
5674
5675 + rtnl_lock();
5676 kfree(p->perfect);
5677 kfree(p);
5678 + rtnl_unlock();
5679 }
5680
5681 static void tcindex_free_perfect_hash(struct tcindex_data *cp)
5682 @@ -357,6 +359,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base,
5683
5684 if (tcindex_alloc_perfect_hash(net, cp) < 0)
5685 goto errout;
5686 + cp->alloc_hash = cp->hash;
5687 for (i = 0; i < min(cp->hash, p->hash); i++)
5688 cp->perfect[i].res = p->perfect[i].res;
5689 balloc = 1;
5690 diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
5691 index b2905b03a432..2eaac2ff380f 100644
5692 --- a/net/sched/sch_cbs.c
5693 +++ b/net/sched/sch_cbs.c
5694 @@ -181,6 +181,11 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
5695 s64 credits;
5696 int len;
5697
5698 + /* The previous packet is still being sent */
5699 + if (now < q->last) {
5700 + qdisc_watchdog_schedule_ns(&q->watchdog, q->last);
5701 + return NULL;
5702 + }
5703 if (q->credits < 0) {
5704 credits = timediff_to_credits(now - q->last, q->idleslope);
5705
5706 @@ -212,7 +217,12 @@ static struct sk_buff *cbs_dequeue_soft(struct Qdisc *sch)
5707 credits += q->credits;
5708
5709 q->credits = max_t(s64, credits, q->locredit);
5710 - q->last = now;
5711 + /* Estimate of the transmission of the last byte of the packet in ns */
5712 + if (unlikely(atomic64_read(&q->port_rate) == 0))
5713 + q->last = now;
5714 + else
5715 + q->last = now + div64_s64(len * NSEC_PER_SEC,
5716 + atomic64_read(&q->port_rate));
5717
5718 return skb;
5719 }
5720 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
5721 index 321c132747ce..dbb6a14968ef 100644
5722 --- a/net/wireless/nl80211.c
5723 +++ b/net/wireless/nl80211.c
5724 @@ -16407,7 +16407,7 @@ void cfg80211_sta_opmode_change_notify(struct net_device *dev, const u8 *mac,
5725 goto nla_put_failure;
5726
5727 if ((sta_opmode->changed & STA_OPMODE_MAX_BW_CHANGED) &&
5728 - nla_put_u8(msg, NL80211_ATTR_CHANNEL_WIDTH, sta_opmode->bw))
5729 + nla_put_u32(msg, NL80211_ATTR_CHANNEL_WIDTH, sta_opmode->bw))
5730 goto nla_put_failure;
5731
5732 if ((sta_opmode->changed & STA_OPMODE_N_SS_CHANGED) &&
5733 diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
5734 index 189ef15acbbc..64486ad81341 100644
5735 --- a/net/xfrm/xfrm_device.c
5736 +++ b/net/xfrm/xfrm_device.c
5737 @@ -390,6 +390,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
5738 return xfrm_dev_feat_change(dev);
5739
5740 case NETDEV_DOWN:
5741 + case NETDEV_UNREGISTER:
5742 return xfrm_dev_down(dev);
5743 }
5744 return NOTIFY_DONE;
5745 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
5746 index f2d1e573ea55..264cf05a4eaa 100644
5747 --- a/net/xfrm/xfrm_policy.c
5748 +++ b/net/xfrm/xfrm_policy.c
5749 @@ -431,7 +431,9 @@ EXPORT_SYMBOL(xfrm_policy_destroy);
5750
5751 static void xfrm_policy_kill(struct xfrm_policy *policy)
5752 {
5753 + write_lock_bh(&policy->lock);
5754 policy->walk.dead = 1;
5755 + write_unlock_bh(&policy->lock);
5756
5757 atomic_inc(&policy->genid);
5758
5759 diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
5760 index b88ba45ff1ac..e6cfaa680ef3 100644
5761 --- a/net/xfrm/xfrm_user.c
5762 +++ b/net/xfrm/xfrm_user.c
5763 @@ -110,7 +110,8 @@ static inline int verify_sec_ctx_len(struct nlattr **attrs)
5764 return 0;
5765
5766 uctx = nla_data(rt);
5767 - if (uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
5768 + if (uctx->len > nla_len(rt) ||
5769 + uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
5770 return -EINVAL;
5771
5772 return 0;
5773 @@ -2273,6 +2274,9 @@ static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
5774 xfrm_mark_get(attrs, &mark);
5775
5776 err = verify_newpolicy_info(&ua->policy);
5777 + if (err)
5778 + goto free_state;
5779 + err = verify_sec_ctx_len(attrs);
5780 if (err)
5781 goto free_state;
5782
5783 diff --git a/scripts/dtc/dtc-lexer.l b/scripts/dtc/dtc-lexer.l
5784 index 5c6c3fd557d7..b3b7270300de 100644
5785 --- a/scripts/dtc/dtc-lexer.l
5786 +++ b/scripts/dtc/dtc-lexer.l
5787 @@ -23,7 +23,6 @@ LINECOMMENT "//".*\n
5788 #include "srcpos.h"
5789 #include "dtc-parser.tab.h"
5790
5791 -YYLTYPE yylloc;
5792 extern bool treesource_error;
5793
5794 /* CAUTION: this will stop working if we ever use yyless() or yyunput() */
5795 diff --git a/tools/perf/Makefile b/tools/perf/Makefile
5796 index 7902a5681fc8..b8fc7d972be9 100644
5797 --- a/tools/perf/Makefile
5798 +++ b/tools/perf/Makefile
5799 @@ -35,7 +35,7 @@ endif
5800 # Only pass canonical directory names as the output directory:
5801 #
5802 ifneq ($(O),)
5803 - FULL_O := $(shell readlink -f $(O) || echo $(O))
5804 + FULL_O := $(shell cd $(PWD); readlink -f $(O) || echo $(O))
5805 endif
5806
5807 #
5808 diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
5809 index b659466ea498..bf50f464234f 100644
5810 --- a/tools/perf/util/probe-file.c
5811 +++ b/tools/perf/util/probe-file.c
5812 @@ -206,6 +206,9 @@ static struct strlist *__probe_file__get_namelist(int fd, bool include_group)
5813 } else
5814 ret = strlist__add(sl, tev.event);
5815 clear_probe_trace_event(&tev);
5816 + /* Skip if there is same name multi-probe event in the list */
5817 + if (ret == -EEXIST)
5818 + ret = 0;
5819 if (ret < 0)
5820 break;
5821 }
5822 diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
5823 index 9ecea45da4ca..aaf3b24fffa4 100644
5824 --- a/tools/perf/util/probe-finder.c
5825 +++ b/tools/perf/util/probe-finder.c
5826 @@ -615,14 +615,19 @@ static int convert_to_trace_point(Dwarf_Die *sp_die, Dwfl_Module *mod,
5827 return -EINVAL;
5828 }
5829
5830 - /* Try to get actual symbol name from symtab */
5831 - symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
5832 + if (dwarf_entrypc(sp_die, &eaddr) == 0) {
5833 + /* If the DIE has entrypc, use it. */
5834 + symbol = dwarf_diename(sp_die);
5835 + } else {
5836 + /* Try to get actual symbol name and address from symtab */
5837 + symbol = dwfl_module_addrsym(mod, paddr, &sym, NULL);
5838 + eaddr = sym.st_value;
5839 + }
5840 if (!symbol) {
5841 pr_warning("Failed to find symbol at 0x%lx\n",
5842 (unsigned long)paddr);
5843 return -ENOENT;
5844 }
5845 - eaddr = sym.st_value;
5846
5847 tp->offset = (unsigned long)(paddr - eaddr);
5848 tp->address = (unsigned long)paddr;
5849 diff --git a/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c b/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
5850 index 3f893b99b337..555cb338a71a 100644
5851 --- a/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
5852 +++ b/tools/power/cpupower/utils/idle_monitor/amd_fam14h_idle.c
5853 @@ -82,7 +82,7 @@ static struct pci_access *pci_acc;
5854 static struct pci_dev *amd_fam14h_pci_dev;
5855 static int nbp1_entered;
5856
5857 -struct timespec start_time;
5858 +static struct timespec start_time;
5859 static unsigned long long timediff;
5860
5861 #ifdef DEBUG
5862 diff --git a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
5863 index f634aeb65c5f..7fb4f7a291ad 100644
5864 --- a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
5865 +++ b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
5866 @@ -19,7 +19,7 @@ struct cpuidle_monitor cpuidle_sysfs_monitor;
5867
5868 static unsigned long long **previous_count;
5869 static unsigned long long **current_count;
5870 -struct timespec start_time;
5871 +static struct timespec start_time;
5872 static unsigned long long timediff;
5873
5874 static int cpuidle_get_count_percent(unsigned int id, double *percent,
5875 diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
5876 index d3c3e6e7aa26..3d54fd433626 100644
5877 --- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
5878 +++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.c
5879 @@ -27,6 +27,8 @@ struct cpuidle_monitor *all_monitors[] = {
5880 0
5881 };
5882
5883 +int cpu_count;
5884 +
5885 static struct cpuidle_monitor *monitors[MONITORS_MAX];
5886 static unsigned int avail_monitors;
5887
5888 diff --git a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
5889 index a2d901d3bfaf..eafef38f1982 100644
5890 --- a/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
5891 +++ b/tools/power/cpupower/utils/idle_monitor/cpupower-monitor.h
5892 @@ -25,7 +25,7 @@
5893 #endif
5894 #define CSTATE_DESC_LEN 60
5895
5896 -int cpu_count;
5897 +extern int cpu_count;
5898
5899 /* Hard to define the right names ...: */
5900 enum power_range_e {
5901 diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include
5902 index ded7a950dc40..6d2f3a1b2249 100644
5903 --- a/tools/scripts/Makefile.include
5904 +++ b/tools/scripts/Makefile.include
5905 @@ -1,8 +1,8 @@
5906 # SPDX-License-Identifier: GPL-2.0
5907 ifneq ($(O),)
5908 ifeq ($(origin O), command line)
5909 - dummy := $(if $(shell test -d $(O) || echo $(O)),$(error O=$(O) does not exist),)
5910 - ABSOLUTE_O := $(shell cd $(O) ; pwd)
5911 + dummy := $(if $(shell cd $(PWD); test -d $(O) || echo $(O)),$(error O=$(O) does not exist),)
5912 + ABSOLUTE_O := $(shell cd $(PWD); cd $(O) ; pwd)
5913 OUTPUT := $(ABSOLUTE_O)/$(if $(subdir),$(subdir)/)
5914 COMMAND_O := O=$(ABSOLUTE_O)
5915 ifeq ($(objtree),)