Magellan Linux

Contents of /trunk/kernel-alx-legacy/patches-4.9/0135-4.9.36-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (show annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 8 months ago) by niro
File size: 211990 byte(s)
-added kerenl-alx-legacy pkg
1 diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
2 index c010fafc66a8..c7194e87d5f4 100644
3 --- a/Documentation/devicetree/bindings/net/mediatek-net.txt
4 +++ b/Documentation/devicetree/bindings/net/mediatek-net.txt
5 @@ -7,7 +7,7 @@ have dual GMAC each represented by a child node..
6 * Ethernet controller node
7
8 Required properties:
9 -- compatible: Should be "mediatek,mt7623-eth"
10 +- compatible: Should be "mediatek,mt2701-eth"
11 - reg: Address and length of the register set for the device
12 - interrupts: Should contain the three frame engines interrupts in numeric
13 order. These are fe_int0, fe_int1 and fe_int2.
14 diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
15 index bc1c3c8bf8fa..62bdc5f2bf16 100644
16 --- a/Documentation/devicetree/bindings/net/phy.txt
17 +++ b/Documentation/devicetree/bindings/net/phy.txt
18 @@ -35,6 +35,15 @@ Optional Properties:
19 - broken-turn-around: If set, indicates the PHY device does not correctly
20 release the turn around line low at the end of a MDIO transaction.
21
22 +- eee-broken-100tx:
23 +- eee-broken-1000t:
24 +- eee-broken-10gt:
25 +- eee-broken-1000kx:
26 +- eee-broken-10gkx4:
27 +- eee-broken-10gkr:
28 + Mark the corresponding energy efficient ethernet mode as broken and
29 + request the ethernet to stop advertising it.
30 +
31 Example:
32
33 ethernet-phy@0 {
34 diff --git a/Documentation/devicetree/bindings/net/ti,dp83867.txt b/Documentation/devicetree/bindings/net/ti,dp83867.txt
35 index 5d21141a68b5..75bcaa355880 100644
36 --- a/Documentation/devicetree/bindings/net/ti,dp83867.txt
37 +++ b/Documentation/devicetree/bindings/net/ti,dp83867.txt
38 @@ -3,9 +3,11 @@
39 Required properties:
40 - reg - The ID number for the phy, usually a small integer
41 - ti,rx-internal-delay - RGMII Receive Clock Delay - see dt-bindings/net/ti-dp83867.h
42 - for applicable values
43 + for applicable values. Required only if interface type is
44 + PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_RXID
45 - ti,tx-internal-delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h
46 - for applicable values
47 + for applicable values. Required only if interface type is
48 + PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_TXID
49 - ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h
50 for applicable values
51
52 diff --git a/Makefile b/Makefile
53 index 0a8d47465f97..4263dca12f07 100644
54 --- a/Makefile
55 +++ b/Makefile
56 @@ -1,6 +1,6 @@
57 VERSION = 4
58 PATCHLEVEL = 9
59 -SUBLEVEL = 35
60 +SUBLEVEL = 36
61 EXTRAVERSION =
62 NAME = Roaring Lionus
63
64 diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
65 index 8f9a69ca818c..efe53998c961 100644
66 --- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
67 +++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi
68 @@ -121,7 +121,7 @@
69 &i2c3 {
70 clock-frequency = <400000>;
71 at24@50 {
72 - compatible = "at24,24c02";
73 + compatible = "atmel,24c64";
74 readonly;
75 reg = <0x50>;
76 };
77 diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
78 index e920dd83e443..f989145480c8 100644
79 --- a/arch/arm/mach-omap2/omap_device.c
80 +++ b/arch/arm/mach-omap2/omap_device.c
81 @@ -222,6 +222,14 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
82 dev_err(dev, "failed to idle\n");
83 }
84 break;
85 + case BUS_NOTIFY_BIND_DRIVER:
86 + od = to_omap_device(pdev);
87 + if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
88 + pm_runtime_status_suspended(dev)) {
89 + od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
90 + pm_runtime_set_active(dev);
91 + }
92 + break;
93 case BUS_NOTIFY_ADD_DEVICE:
94 if (pdev->dev.of_node)
95 omap_device_build_from_dt(pdev);
96 diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
97 index 5cbfd9f86412..f7c741358f37 100644
98 --- a/arch/arm/mm/mmu.c
99 +++ b/arch/arm/mm/mmu.c
100 @@ -1211,15 +1211,15 @@ void __init adjust_lowmem_bounds(void)
101
102 high_memory = __va(arm_lowmem_limit - 1) + 1;
103
104 + if (!memblock_limit)
105 + memblock_limit = arm_lowmem_limit;
106 +
107 /*
108 * Round the memblock limit down to a pmd size. This
109 * helps to ensure that we will allocate memory from the
110 * last full pmd, which should be mapped.
111 */
112 - if (memblock_limit)
113 - memblock_limit = round_down(memblock_limit, PMD_SIZE);
114 - if (!memblock_limit)
115 - memblock_limit = arm_lowmem_limit;
116 + memblock_limit = round_down(memblock_limit, PMD_SIZE);
117
118 if (!IS_ENABLED(CONFIG_HIGHMEM) || cache_is_vipt_aliasing()) {
119 if (memblock_end_of_DRAM() > arm_lowmem_limit) {
120 diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
121 index e6e3491d48a5..f150a4c63efe 100644
122 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
123 +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
124 @@ -85,6 +85,18 @@
125 status = "okay";
126 pinctrl-0 = <&eth_pins>;
127 pinctrl-names = "default";
128 + phy-handle = <&eth_phy0>;
129 +
130 + mdio {
131 + compatible = "snps,dwmac-mdio";
132 + #address-cells = <1>;
133 + #size-cells = <0>;
134 +
135 + eth_phy0: ethernet-phy@0 {
136 + reg = <0>;
137 + eee-broken-1000t;
138 + };
139 + };
140 };
141
142 &ir {
143 diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
144 index e517088d635f..de04879bc8b8 100644
145 --- a/arch/arm64/include/asm/acpi.h
146 +++ b/arch/arm64/include/asm/acpi.h
147 @@ -22,9 +22,9 @@
148 #define ACPI_MADT_GICC_LENGTH \
149 (acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
150
151 -#define BAD_MADT_GICC_ENTRY(entry, end) \
152 - (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \
153 - (entry)->header.length != ACPI_MADT_GICC_LENGTH)
154 +#define BAD_MADT_GICC_ENTRY(entry, end) \
155 + (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \
156 + (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end))
157
158 /* Basic configuration for ACPI */
159 #ifdef CONFIG_ACPI
160 diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
161 index 28bfe6132eb6..851290d2bfe3 100644
162 --- a/arch/arm64/include/asm/assembler.h
163 +++ b/arch/arm64/include/asm/assembler.h
164 @@ -155,22 +155,25 @@ lr .req x30 // link register
165
166 /*
167 * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
168 - * <symbol> is within the range +/- 4 GB of the PC.
169 + * <symbol> is within the range +/- 4 GB of the PC when running
170 + * in core kernel context. In module context, a movz/movk sequence
171 + * is used, since modules may be loaded far away from the kernel
172 + * when KASLR is in effect.
173 */
174 /*
175 * @dst: destination register (64 bit wide)
176 * @sym: name of the symbol
177 - * @tmp: optional scratch register to be used if <dst> == sp, which
178 - * is not allowed in an adrp instruction
179 */
180 - .macro adr_l, dst, sym, tmp=
181 - .ifb \tmp
182 + .macro adr_l, dst, sym
183 +#ifndef MODULE
184 adrp \dst, \sym
185 add \dst, \dst, :lo12:\sym
186 - .else
187 - adrp \tmp, \sym
188 - add \dst, \tmp, :lo12:\sym
189 - .endif
190 +#else
191 + movz \dst, #:abs_g3:\sym
192 + movk \dst, #:abs_g2_nc:\sym
193 + movk \dst, #:abs_g1_nc:\sym
194 + movk \dst, #:abs_g0_nc:\sym
195 +#endif
196 .endm
197
198 /*
199 @@ -181,6 +184,7 @@ lr .req x30 // link register
200 * the address
201 */
202 .macro ldr_l, dst, sym, tmp=
203 +#ifndef MODULE
204 .ifb \tmp
205 adrp \dst, \sym
206 ldr \dst, [\dst, :lo12:\sym]
207 @@ -188,6 +192,15 @@ lr .req x30 // link register
208 adrp \tmp, \sym
209 ldr \dst, [\tmp, :lo12:\sym]
210 .endif
211 +#else
212 + .ifb \tmp
213 + adr_l \dst, \sym
214 + ldr \dst, [\dst]
215 + .else
216 + adr_l \tmp, \sym
217 + ldr \dst, [\tmp]
218 + .endif
219 +#endif
220 .endm
221
222 /*
223 @@ -197,8 +210,13 @@ lr .req x30 // link register
224 * while <src> needs to be preserved.
225 */
226 .macro str_l, src, sym, tmp
227 +#ifndef MODULE
228 adrp \tmp, \sym
229 str \src, [\tmp, :lo12:\sym]
230 +#else
231 + adr_l \tmp, \sym
232 + str \src, [\tmp]
233 +#endif
234 .endm
235
236 /*
237 diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
238 index 8507703dabe4..a70f7d3361c4 100644
239 --- a/arch/arm64/kernel/smp.c
240 +++ b/arch/arm64/kernel/smp.c
241 @@ -934,7 +934,7 @@ static bool have_cpu_die(void)
242 #ifdef CONFIG_HOTPLUG_CPU
243 int any_cpu = raw_smp_processor_id();
244
245 - if (cpu_ops[any_cpu]->cpu_die)
246 + if (cpu_ops[any_cpu] && cpu_ops[any_cpu]->cpu_die)
247 return true;
248 #endif
249 return false;
250 diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
251 index 7791840cf22c..db07793f7b43 100644
252 --- a/arch/mips/kernel/entry.S
253 +++ b/arch/mips/kernel/entry.S
254 @@ -11,6 +11,7 @@
255 #include <asm/asm.h>
256 #include <asm/asmmacro.h>
257 #include <asm/compiler.h>
258 +#include <asm/irqflags.h>
259 #include <asm/regdef.h>
260 #include <asm/mipsregs.h>
261 #include <asm/stackframe.h>
262 @@ -137,6 +138,7 @@ work_pending:
263 andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
264 beqz t0, work_notifysig
265 work_resched:
266 + TRACE_IRQS_OFF
267 jal schedule
268
269 local_irq_disable # make sure need_resched and
270 @@ -173,6 +175,7 @@ syscall_exit_work:
271 beqz t0, work_pending # trace bit set?
272 local_irq_enable # could let syscall_trace_leave()
273 # call schedule() instead
274 + TRACE_IRQS_ON
275 move a0, sp
276 jal syscall_trace_leave
277 b resume_userspace
278 diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
279 index cf052204eb0a..d1bb506adc10 100644
280 --- a/arch/mips/kernel/head.S
281 +++ b/arch/mips/kernel/head.S
282 @@ -106,8 +106,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
283 beq t0, t1, dtb_found
284 #endif
285 li t1, -2
286 - beq a0, t1, dtb_found
287 move t2, a1
288 + beq a0, t1, dtb_found
289
290 li t2, 0
291 dtb_found:
292 diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
293 index 7cf653e21423..60c4d4599639 100644
294 --- a/arch/mips/kernel/pm-cps.c
295 +++ b/arch/mips/kernel/pm-cps.c
296 @@ -56,7 +56,6 @@ DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
297 * state. Actually per-core rather than per-CPU.
298 */
299 static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
300 -static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
301
302 /* Indicates online CPUs coupled with the current CPU */
303 static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
304 @@ -642,7 +641,6 @@ static int cps_pm_online_cpu(unsigned int cpu)
305 {
306 enum cps_pm_state state;
307 unsigned core = cpu_data[cpu].core;
308 - unsigned dlinesz = cpu_data[cpu].dcache.linesz;
309 void *entry_fn, *core_rc;
310
311 for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
312 @@ -662,16 +660,11 @@ static int cps_pm_online_cpu(unsigned int cpu)
313 }
314
315 if (!per_cpu(ready_count, core)) {
316 - core_rc = kmalloc(dlinesz * 2, GFP_KERNEL);
317 + core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
318 if (!core_rc) {
319 pr_err("Failed allocate core %u ready_count\n", core);
320 return -ENOMEM;
321 }
322 - per_cpu(ready_count_alloc, core) = core_rc;
323 -
324 - /* Ensure ready_count is aligned to a cacheline boundary */
325 - core_rc += dlinesz - 1;
326 - core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
327 per_cpu(ready_count, core) = core_rc;
328 }
329
330 diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
331 index ec87ef93267b..b0b29cb6f3d8 100644
332 --- a/arch/mips/kernel/traps.c
333 +++ b/arch/mips/kernel/traps.c
334 @@ -199,6 +199,8 @@ void show_stack(struct task_struct *task, unsigned long *sp)
335 {
336 struct pt_regs regs;
337 mm_segment_t old_fs = get_fs();
338 +
339 + regs.cp0_status = KSU_KERNEL;
340 if (sp) {
341 regs.regs[29] = (unsigned long)sp;
342 regs.regs[31] = 0;
343 diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
344 index f25731627d7f..e5bfbf62827a 100644
345 --- a/arch/powerpc/kernel/eeh.c
346 +++ b/arch/powerpc/kernel/eeh.c
347 @@ -298,9 +298,17 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
348 *
349 * For pHyp, we have to enable IO for log retrieval. Otherwise,
350 * 0xFF's is always returned from PCI config space.
351 + *
352 + * When the @severity is EEH_LOG_PERM, the PE is going to be
353 + * removed. Prior to that, the drivers for devices included in
354 + * the PE will be closed. The drivers rely on working IO path
355 + * to bring the devices to quiet state. Otherwise, PCI traffic
356 + * from those devices after they are removed is like to cause
357 + * another unexpected EEH error.
358 */
359 if (!(pe->type & EEH_PE_PHB)) {
360 - if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
361 + if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
362 + severity == EEH_LOG_PERM)
363 eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
364
365 /*
366 diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
367 index d7697ab802f6..8e136b88cdf4 100644
368 --- a/arch/s390/include/asm/ctl_reg.h
369 +++ b/arch/s390/include/asm/ctl_reg.h
370 @@ -15,7 +15,9 @@
371 BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
372 asm volatile( \
373 " lctlg %1,%2,%0\n" \
374 - : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
375 + : \
376 + : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \
377 + : "memory"); \
378 }
379
380 #define __ctl_store(array, low, high) { \
381 diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
382 index e1b1ce63a328..5cbf03c14981 100644
383 --- a/arch/sparc/kernel/irq_64.c
384 +++ b/arch/sparc/kernel/irq_64.c
385 @@ -1021,7 +1021,7 @@ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
386 unsigned long order = get_order(size);
387 unsigned long p;
388
389 - p = __get_free_pages(GFP_KERNEL, order);
390 + p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
391 if (!p) {
392 prom_printf("SUN4V: Error, cannot allocate queue.\n");
393 prom_halt();
394 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
395 index 496fa926e1e0..d44fb806bbd7 100644
396 --- a/arch/sparc/kernel/traps_64.c
397 +++ b/arch/sparc/kernel/traps_64.c
398 @@ -2051,6 +2051,73 @@ void sun4v_resum_overflow(struct pt_regs *regs)
399 atomic_inc(&sun4v_resum_oflow_cnt);
400 }
401
402 +/* Given a set of registers, get the virtual addressi that was being accessed
403 + * by the faulting instructions at tpc.
404 + */
405 +static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
406 +{
407 + unsigned int insn;
408 +
409 + if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
410 + return compute_effective_address(regs, insn,
411 + (insn >> 25) & 0x1f);
412 + }
413 + return 0;
414 +}
415 +
416 +/* Attempt to handle non-resumable errors generated from userspace.
417 + * Returns true if the signal was handled, false otherwise.
418 + */
419 +bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
420 + struct sun4v_error_entry *ent) {
421 +
422 + unsigned int attrs = ent->err_attrs;
423 +
424 + if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
425 + unsigned long addr = ent->err_raddr;
426 + siginfo_t info;
427 +
428 + if (addr == ~(u64)0) {
429 + /* This seems highly unlikely to ever occur */
430 + pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
431 + } else {
432 + unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
433 + PAGE_SIZE);
434 +
435 + /* Break the unfortunate news. */
436 + pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
437 + addr);
438 + pr_emerg("SUN4V NON-RECOVERABLE ERROR: Claiming %lu ages.\n",
439 + page_cnt);
440 +
441 + while (page_cnt-- > 0) {
442 + if (pfn_valid(addr >> PAGE_SHIFT))
443 + get_page(pfn_to_page(addr >> PAGE_SHIFT));
444 + addr += PAGE_SIZE;
445 + }
446 + }
447 + info.si_signo = SIGKILL;
448 + info.si_errno = 0;
449 + info.si_trapno = 0;
450 + force_sig_info(info.si_signo, &info, current);
451 +
452 + return true;
453 + }
454 + if (attrs & SUN4V_ERR_ATTRS_PIO) {
455 + siginfo_t info;
456 +
457 + info.si_signo = SIGBUS;
458 + info.si_code = BUS_ADRERR;
459 + info.si_addr = (void __user *)sun4v_get_vaddr(regs);
460 + force_sig_info(info.si_signo, &info, current);
461 +
462 + return true;
463 + }
464 +
465 + /* Default to doing nothing */
466 + return false;
467 +}
468 +
469 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
470 * Log the event, clear the first word of the entry, and die.
471 */
472 @@ -2075,6 +2142,12 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
473
474 put_cpu();
475
476 + if (!(regs->tstate & TSTATE_PRIV) &&
477 + sun4v_nonresum_error_user_handled(regs, &local_copy)) {
478 + /* DON'T PANIC: This userspace error was handled. */
479 + return;
480 + }
481 +
482 #ifdef CONFIG_PCI
483 /* Check for the special PCI poke sequence. */
484 if (pci_poke_in_progress && pci_poke_cpu == cpu) {
485 diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
486 index a66854d99ee1..6de58f1bd7ec 100644
487 --- a/arch/x86/boot/compressed/kaslr.c
488 +++ b/arch/x86/boot/compressed/kaslr.c
489 @@ -430,9 +430,6 @@ void choose_random_location(unsigned long input,
490 {
491 unsigned long random_addr, min_addr;
492
493 - /* By default, keep output position unchanged. */
494 - *virt_addr = *output;
495 -
496 if (cmdline_find_option_bool("nokaslr")) {
497 warn("KASLR disabled: 'nokaslr' on cmdline.");
498 return;
499 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
500 index b3c5a5f030ce..c945acd8fa33 100644
501 --- a/arch/x86/boot/compressed/misc.c
502 +++ b/arch/x86/boot/compressed/misc.c
503 @@ -338,7 +338,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
504 unsigned long output_len)
505 {
506 const unsigned long kernel_total_size = VO__end - VO__text;
507 - unsigned long virt_addr = (unsigned long)output;
508 + unsigned long virt_addr = LOAD_PHYSICAL_ADDR;
509
510 /* Retain x86 boot parameters pointer passed from startup_32/64. */
511 boot_params = rmode;
512 @@ -397,7 +397,7 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap,
513 #ifndef CONFIG_RELOCATABLE
514 if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
515 error("Destination address does not match LOAD_PHYSICAL_ADDR");
516 - if ((unsigned long)output != virt_addr)
517 + if (virt_addr != LOAD_PHYSICAL_ADDR)
518 error("Destination virtual address changed when not relocatable");
519 #endif
520
521 diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h
522 index 1c8355eadbd1..766a5211f827 100644
523 --- a/arch/x86/boot/compressed/misc.h
524 +++ b/arch/x86/boot/compressed/misc.h
525 @@ -81,8 +81,6 @@ static inline void choose_random_location(unsigned long input,
526 unsigned long output_size,
527 unsigned long *virt_addr)
528 {
529 - /* No change from existing output location. */
530 - *virt_addr = *output;
531 }
532 #endif
533
534 diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
535 index 38623e219816..9604b2574d6c 100644
536 --- a/arch/x86/events/core.c
537 +++ b/arch/x86/events/core.c
538 @@ -505,6 +505,10 @@ int x86_pmu_hw_config(struct perf_event *event)
539
540 if (event->attr.precise_ip > precise)
541 return -EOPNOTSUPP;
542 +
543 + /* There's no sense in having PEBS for non sampling events: */
544 + if (!is_sampling_event(event))
545 + return -EINVAL;
546 }
547 /*
548 * check that PEBS LBR correction does not conflict with
549 diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
550 index 3bdb917716b1..f0f197f459b5 100644
551 --- a/arch/x86/events/intel/core.c
552 +++ b/arch/x86/events/intel/core.c
553 @@ -3164,13 +3164,16 @@ static void intel_pmu_cpu_starting(int cpu)
554
555 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
556 for_each_cpu(i, topology_sibling_cpumask(cpu)) {
557 + struct cpu_hw_events *sibling;
558 struct intel_excl_cntrs *c;
559
560 - c = per_cpu(cpu_hw_events, i).excl_cntrs;
561 + sibling = &per_cpu(cpu_hw_events, i);
562 + c = sibling->excl_cntrs;
563 if (c && c->core_id == core_id) {
564 cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
565 cpuc->excl_cntrs = c;
566 - cpuc->excl_thread_id = 1;
567 + if (!sibling->excl_thread_id)
568 + cpuc->excl_thread_id = 1;
569 break;
570 }
571 }
572 @@ -3975,7 +3978,7 @@ __init int intel_pmu_init(void)
573 x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
574 x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
575 }
576 - x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
577 + x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
578
579 if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
580 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
581 diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
582 index 272427700d48..afe8024e9e95 100644
583 --- a/arch/x86/events/intel/uncore_snbep.c
584 +++ b/arch/x86/events/intel/uncore_snbep.c
585 @@ -2686,7 +2686,7 @@ static struct intel_uncore_type *hswep_msr_uncores[] = {
586
587 void hswep_uncore_cpu_init(void)
588 {
589 - int pkg = topology_phys_to_logical_pkg(0);
590 + int pkg = boot_cpu_data.logical_proc_id;
591
592 if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
593 hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
594 diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
595 index e9cd7befcb76..19d14ac23ef9 100644
596 --- a/arch/x86/include/asm/kvm_emulate.h
597 +++ b/arch/x86/include/asm/kvm_emulate.h
598 @@ -221,6 +221,9 @@ struct x86_emulate_ops {
599 void (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
600 u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
601 void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
602 +
603 + unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
604 + void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
605 };
606
607 typedef u32 __attribute__((vector_size(16))) sse128_t;
608 @@ -290,7 +293,6 @@ struct x86_emulate_ctxt {
609
610 /* interruptibility state, as a result of execution of STI or MOV SS */
611 int interruptibility;
612 - int emul_flags;
613
614 bool perm_ok; /* do not check permissions if true */
615 bool ud; /* inject an #UD if host doesn't support insn */
616 diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
617 index eea88fe5d969..6e57edf33d75 100644
618 --- a/arch/x86/kernel/tsc.c
619 +++ b/arch/x86/kernel/tsc.c
620 @@ -694,6 +694,7 @@ unsigned long native_calibrate_tsc(void)
621 crystal_khz = 24000; /* 24.0 MHz */
622 break;
623 case INTEL_FAM6_SKYLAKE_X:
624 + case INTEL_FAM6_ATOM_DENVERTON:
625 crystal_khz = 25000; /* 25.0 MHz */
626 break;
627 case INTEL_FAM6_ATOM_GOLDMONT:
628 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
629 index 9f676adcdfc2..de36660751b5 100644
630 --- a/arch/x86/kvm/emulate.c
631 +++ b/arch/x86/kvm/emulate.c
632 @@ -2543,7 +2543,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
633 u64 smbase;
634 int ret;
635
636 - if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
637 + if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
638 return emulate_ud(ctxt);
639
640 /*
641 @@ -2592,11 +2592,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
642 return X86EMUL_UNHANDLEABLE;
643 }
644
645 - if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
646 + if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
647 ctxt->ops->set_nmi_mask(ctxt, false);
648
649 - ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK;
650 - ctxt->emul_flags &= ~X86EMUL_SMM_MASK;
651 + ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
652 + ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
653 return X86EMUL_CONTINUE;
654 }
655
656 @@ -5312,6 +5312,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
657 const struct x86_emulate_ops *ops = ctxt->ops;
658 int rc = X86EMUL_CONTINUE;
659 int saved_dst_type = ctxt->dst.type;
660 + unsigned emul_flags;
661
662 ctxt->mem_read.pos = 0;
663
664 @@ -5326,6 +5327,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
665 goto done;
666 }
667
668 + emul_flags = ctxt->ops->get_hflags(ctxt);
669 if (unlikely(ctxt->d &
670 (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
671 if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
672 @@ -5359,7 +5361,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
673 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
674 }
675
676 - if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
677 + if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
678 rc = emulator_check_intercept(ctxt, ctxt->intercept,
679 X86_ICPT_PRE_EXCEPT);
680 if (rc != X86EMUL_CONTINUE)
681 @@ -5388,7 +5390,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
682 goto done;
683 }
684
685 - if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
686 + if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
687 rc = emulator_check_intercept(ctxt, ctxt->intercept,
688 X86_ICPT_POST_EXCEPT);
689 if (rc != X86EMUL_CONTINUE)
690 @@ -5442,7 +5444,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
691
692 special_insn:
693
694 - if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
695 + if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
696 rc = emulator_check_intercept(ctxt, ctxt->intercept,
697 X86_ICPT_POST_MEMACCESS);
698 if (rc != X86EMUL_CONTINUE)
699 diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
700 index 9d4a8504a95a..5ab4a364348e 100644
701 --- a/arch/x86/kvm/pmu_intel.c
702 +++ b/arch/x86/kvm/pmu_intel.c
703 @@ -294,7 +294,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
704 ((u64)1 << edx.split.bit_width_fixed) - 1;
705 }
706
707 - pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
708 + pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
709 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
710 pmu->global_ctrl_mask = ~pmu->global_ctrl;
711
712 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
713 index 89b98e07211f..04e6bbbd8736 100644
714 --- a/arch/x86/kvm/vmx.c
715 +++ b/arch/x86/kvm/vmx.c
716 @@ -2455,7 +2455,7 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
717 if (!(vmcs12->exception_bitmap & (1u << nr)))
718 return 0;
719
720 - nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
721 + nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
722 vmcs_read32(VM_EXIT_INTR_INFO),
723 vmcs_readl(EXIT_QUALIFICATION));
724 return 1;
725 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
726 index 62cde4f67c72..3dbcb09c19cf 100644
727 --- a/arch/x86/kvm/x86.c
728 +++ b/arch/x86/kvm/x86.c
729 @@ -4999,6 +4999,8 @@ static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
730
731 if (var.unusable) {
732 memset(desc, 0, sizeof(*desc));
733 + if (base3)
734 + *base3 = 0;
735 return false;
736 }
737
738 @@ -5154,6 +5156,16 @@ static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
739 kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked);
740 }
741
742 +static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
743 +{
744 + return emul_to_vcpu(ctxt)->arch.hflags;
745 +}
746 +
747 +static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
748 +{
749 + kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
750 +}
751 +
752 static const struct x86_emulate_ops emulate_ops = {
753 .read_gpr = emulator_read_gpr,
754 .write_gpr = emulator_write_gpr,
755 @@ -5193,6 +5205,8 @@ static const struct x86_emulate_ops emulate_ops = {
756 .intercept = emulator_intercept,
757 .get_cpuid = emulator_get_cpuid,
758 .set_nmi_mask = emulator_set_nmi_mask,
759 + .get_hflags = emulator_get_hflags,
760 + .set_hflags = emulator_set_hflags,
761 };
762
763 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
764 @@ -5245,7 +5259,6 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
765 BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK);
766 BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
767 BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
768 - ctxt->emul_flags = vcpu->arch.hflags;
769
770 init_decode_cache(ctxt);
771 vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
772 @@ -5636,8 +5649,6 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
773 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
774 toggle_interruptibility(vcpu, ctxt->interruptibility);
775 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
776 - if (vcpu->arch.hflags != ctxt->emul_flags)
777 - kvm_set_hflags(vcpu, ctxt->emul_flags);
778 kvm_rip_write(vcpu, ctxt->eip);
779 if (r == EMULATE_DONE)
780 kvm_vcpu_check_singlestep(vcpu, rflags, &r);
781 @@ -6111,7 +6122,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
782
783 kvm_x86_ops->patch_hypercall(vcpu, instruction);
784
785 - return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
786 + return emulator_write_emulated(ctxt, rip, instruction, 3,
787 + &ctxt->exception);
788 }
789
790 static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
791 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
792 index 14b9dd71d9e8..9a324fc8bed8 100644
793 --- a/arch/x86/mm/init_64.c
794 +++ b/arch/x86/mm/init_64.c
795 @@ -94,10 +94,10 @@ __setup("noexec32=", nonx32_setup);
796 */
797 void sync_global_pgds(unsigned long start, unsigned long end, int removed)
798 {
799 - unsigned long address;
800 + unsigned long addr;
801
802 - for (address = start; address <= end; address += PGDIR_SIZE) {
803 - const pgd_t *pgd_ref = pgd_offset_k(address);
804 + for (addr = start; addr <= end; addr = ALIGN(addr + 1, PGDIR_SIZE)) {
805 + const pgd_t *pgd_ref = pgd_offset_k(addr);
806 struct page *page;
807
808 /*
809 @@ -113,7 +113,7 @@ void sync_global_pgds(unsigned long start, unsigned long end, int removed)
810 pgd_t *pgd;
811 spinlock_t *pgt_lock;
812
813 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
814 + pgd = (pgd_t *)page_address(page) + pgd_index(addr);
815 /* the pgt_lock only for Xen */
816 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
817 spin_lock(pgt_lock);
818 diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
819 index e4f800999b32..a75103e7f963 100644
820 --- a/arch/x86/mm/mpx.c
821 +++ b/arch/x86/mm/mpx.c
822 @@ -293,7 +293,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
823 * We were not able to extract an address from the instruction,
824 * probably because there was something invalid in it.
825 */
826 - if (info->si_addr == (void *)-1) {
827 + if (info->si_addr == (void __user *)-1) {
828 err = -EINVAL;
829 goto err_out;
830 }
831 @@ -525,15 +525,7 @@ int mpx_handle_bd_fault(void)
832 if (!kernel_managing_mpx_tables(current->mm))
833 return -EINVAL;
834
835 - if (do_mpx_bt_fault()) {
836 - force_sig(SIGSEGV, current);
837 - /*
838 - * The force_sig() is essentially "handling" this
839 - * exception, so we do not pass up the error
840 - * from do_mpx_bt_fault().
841 - */
842 - }
843 - return 0;
844 + return do_mpx_bt_fault();
845 }
846
847 /*
848 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
849 index a7655f6caf7d..75fb01109f94 100644
850 --- a/arch/x86/mm/tlb.c
851 +++ b/arch/x86/mm/tlb.c
852 @@ -263,8 +263,6 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
853 {
854 struct flush_tlb_info info;
855
856 - if (end == 0)
857 - end = start + PAGE_SIZE;
858 info.flush_mm = mm;
859 info.flush_start = start;
860 info.flush_end = end;
861 @@ -393,7 +391,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
862 }
863
864 if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
865 - flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
866 + flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE);
867
868 preempt_enable();
869 }
870 diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
871 index c42202d63567..d6eaaa25d1cc 100644
872 --- a/drivers/block/xen-blkback/blkback.c
873 +++ b/drivers/block/xen-blkback/blkback.c
874 @@ -609,8 +609,6 @@ int xen_blkif_schedule(void *arg)
875 unsigned long timeout;
876 int ret;
877
878 - xen_blkif_get(blkif);
879 -
880 set_freezable();
881 while (!kthread_should_stop()) {
882 if (try_to_freeze())
883 @@ -665,7 +663,6 @@ int xen_blkif_schedule(void *arg)
884 print_stats(ring);
885
886 ring->xenblkd = NULL;
887 - xen_blkif_put(blkif);
888
889 return 0;
890 }
891 diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
892 index 9b69fe410c08..5dfe6e8af140 100644
893 --- a/drivers/block/xen-blkback/xenbus.c
894 +++ b/drivers/block/xen-blkback/xenbus.c
895 @@ -255,7 +255,6 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
896 if (ring->xenblkd) {
897 kthread_stop(ring->xenblkd);
898 wake_up(&ring->shutdown_wq);
899 - ring->xenblkd = NULL;
900 }
901
902 /* The above kthread_stop() guarantees that at this point we
903 @@ -316,8 +315,10 @@ static int xen_blkif_disconnect(struct xen_blkif *blkif)
904 static void xen_blkif_free(struct xen_blkif *blkif)
905 {
906
907 - xen_blkif_disconnect(blkif);
908 + WARN_ON(xen_blkif_disconnect(blkif));
909 xen_vbd_free(&blkif->vbd);
910 + kfree(blkif->be->mode);
911 + kfree(blkif->be);
912
913 /* Make sure everything is drained before shutting down */
914 kmem_cache_free(xen_blkif_cachep, blkif);
915 @@ -512,8 +513,6 @@ static int xen_blkbk_remove(struct xenbus_device *dev)
916
917 /* Put the reference we set in xen_blkif_alloc(). */
918 xen_blkif_put(be->blkif);
919 - kfree(be->mode);
920 - kfree(be);
921 return 0;
922 }
923
924 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
925 index 471a301d63e3..8f890c1aca57 100644
926 --- a/drivers/char/virtio_console.c
927 +++ b/drivers/char/virtio_console.c
928 @@ -1870,7 +1870,7 @@ static void config_work_handler(struct work_struct *work)
929 {
930 struct ports_device *portdev;
931
932 - portdev = container_of(work, struct ports_device, control_work);
933 + portdev = container_of(work, struct ports_device, config_work);
934 if (!use_multiport(portdev)) {
935 struct virtio_device *vdev;
936 struct port *port;
937 diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
938 index 2a3e9d8e88b0..96d37175d0ad 100644
939 --- a/drivers/clk/clk-scpi.c
940 +++ b/drivers/clk/clk-scpi.c
941 @@ -290,13 +290,15 @@ static int scpi_clocks_probe(struct platform_device *pdev)
942 of_node_put(child);
943 return ret;
944 }
945 - }
946 - /* Add the virtual cpufreq device */
947 - cpufreq_dev = platform_device_register_simple("scpi-cpufreq",
948 - -1, NULL, 0);
949 - if (IS_ERR(cpufreq_dev))
950 - pr_warn("unable to register cpufreq device");
951
952 + if (match->data != &scpi_dvfs_ops)
953 + continue;
954 + /* Add the virtual cpufreq device if it's DVFS clock provider */
955 + cpufreq_dev = platform_device_register_simple("scpi-cpufreq",
956 + -1, NULL, 0);
957 + if (IS_ERR(cpufreq_dev))
958 + pr_warn("unable to register cpufreq device");
959 + }
960 return 0;
961 }
962
963 diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
964 index d6d425773fa4..5b2db3c6568f 100644
965 --- a/drivers/cpufreq/s3c2416-cpufreq.c
966 +++ b/drivers/cpufreq/s3c2416-cpufreq.c
967 @@ -400,7 +400,6 @@ static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
968 rate = clk_get_rate(s3c_freq->hclk);
969 if (rate < 133 * 1000 * 1000) {
970 pr_err("cpufreq: HCLK not at 133MHz\n");
971 - clk_put(s3c_freq->hclk);
972 ret = -EINVAL;
973 goto err_armclk;
974 }
975 diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
976 index 92159313361b..f2bb5122d2c2 100644
977 --- a/drivers/gpio/gpiolib.c
978 +++ b/drivers/gpio/gpiolib.c
979 @@ -707,7 +707,8 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
980
981 ge.timestamp = ktime_get_real_ns();
982
983 - if (le->eflags & GPIOEVENT_REQUEST_BOTH_EDGES) {
984 + if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE
985 + && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) {
986 int level = gpiod_get_value_cansleep(le->desc);
987
988 if (level)
989 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
990 index 82dc8d20e28a..bfb4b91869e7 100644
991 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
992 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
993 @@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
994 }
995 break;
996 }
997 +
998 + if (!(*out_ring && (*out_ring)->adev)) {
999 + DRM_ERROR("Ring %d is not initialized on IP %d\n",
1000 + ring, ip_type);
1001 + return -EINVAL;
1002 + }
1003 +
1004 return 0;
1005 }
1006
1007 diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
1008 index 6f3c89178b6a..4cb347e88cf0 100644
1009 --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
1010 +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
1011 @@ -64,6 +64,7 @@ MODULE_FIRMWARE("radeon/oland_smc.bin");
1012 MODULE_FIRMWARE("radeon/oland_k_smc.bin");
1013 MODULE_FIRMWARE("radeon/hainan_smc.bin");
1014 MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
1015 +MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
1016
1017 union power_info {
1018 struct _ATOM_POWERPLAY_INFO info;
1019 @@ -7721,10 +7722,11 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
1020 ((adev->pdev->device == 0x6660) ||
1021 (adev->pdev->device == 0x6663) ||
1022 (adev->pdev->device == 0x6665) ||
1023 - (adev->pdev->device == 0x6667))) ||
1024 - ((adev->pdev->revision == 0xc3) &&
1025 - (adev->pdev->device == 0x6665)))
1026 + (adev->pdev->device == 0x6667))))
1027 chip_name = "hainan_k";
1028 + else if ((adev->pdev->revision == 0xc3) &&
1029 + (adev->pdev->device == 0x6665))
1030 + chip_name = "banks_k_2";
1031 else
1032 chip_name = "hainan";
1033 break;
1034 diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
1035 index 6feed726e299..50f0cf2788b7 100644
1036 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
1037 +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
1038 @@ -43,9 +43,13 @@
1039
1040 #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
1041 #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
1042 +#define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07
1043 +
1044 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
1045 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
1046 #define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
1047 +#define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
1048 +
1049 #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
1050
1051 #define VCE_V3_0_FW_SIZE (384 * 1024)
1052 @@ -54,6 +58,9 @@
1053
1054 #define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
1055
1056 +#define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
1057 + | GRBM_GFX_INDEX__VCE_ALL_PIPE)
1058 +
1059 static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
1060 static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
1061 static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
1062 @@ -249,7 +256,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
1063 if (adev->vce.harvest_config & (1 << idx))
1064 continue;
1065
1066 - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
1067 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
1068 vce_v3_0_mc_resume(adev, idx);
1069 WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
1070
1071 @@ -273,7 +280,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
1072 }
1073 }
1074
1075 - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
1076 + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
1077 mutex_unlock(&adev->grbm_idx_mutex);
1078
1079 return 0;
1080 @@ -288,7 +295,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
1081 if (adev->vce.harvest_config & (1 << idx))
1082 continue;
1083
1084 - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
1085 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
1086
1087 if (adev->asic_type >= CHIP_STONEY)
1088 WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
1089 @@ -306,7 +313,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
1090 vce_v3_0_set_vce_sw_clock_gating(adev, false);
1091 }
1092
1093 - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
1094 + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
1095 mutex_unlock(&adev->grbm_idx_mutex);
1096
1097 return 0;
1098 @@ -586,17 +593,17 @@ static bool vce_v3_0_check_soft_reset(void *handle)
1099 * VCE team suggest use bit 3--bit 6 for busy status check
1100 */
1101 mutex_lock(&adev->grbm_idx_mutex);
1102 - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
1103 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
1104 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
1105 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
1106 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
1107 }
1108 - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
1109 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
1110 if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
1111 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
1112 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
1113 }
1114 - WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
1115 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
1116 mutex_unlock(&adev->grbm_idx_mutex);
1117
1118 if (srbm_soft_reset) {
1119 @@ -734,7 +741,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
1120 if (adev->vce.harvest_config & (1 << i))
1121 continue;
1122
1123 - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
1124 + WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
1125
1126 if (enable) {
1127 /* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
1128 @@ -753,7 +760,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
1129 vce_v3_0_set_vce_sw_clock_gating(adev, enable);
1130 }
1131
1132 - WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
1133 + WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
1134 mutex_unlock(&adev->grbm_idx_mutex);
1135
1136 return 0;
1137 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
1138 index 2028980f1ed4..5b261c1dcc9f 100644
1139 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
1140 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
1141 @@ -200,7 +200,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
1142 cgs_set_clockgating_state(
1143 hwmgr->device,
1144 AMD_IP_BLOCK_TYPE_VCE,
1145 - AMD_CG_STATE_UNGATE);
1146 + AMD_CG_STATE_GATE);
1147 cgs_set_powergating_state(
1148 hwmgr->device,
1149 AMD_IP_BLOCK_TYPE_VCE,
1150 @@ -218,7 +218,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
1151 cgs_set_clockgating_state(
1152 hwmgr->device,
1153 AMD_IP_BLOCK_TYPE_VCE,
1154 - AMD_PG_STATE_GATE);
1155 + AMD_PG_STATE_UNGATE);
1156 cz_dpm_update_vce_dpm(hwmgr);
1157 cz_enable_disable_vce_dpm(hwmgr, true);
1158 return 0;
1159 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
1160 index 960424913496..189ec94c6ff9 100644
1161 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
1162 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
1163 @@ -1402,14 +1402,22 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
1164 cz_hwmgr->vce_dpm.hard_min_clk,
1165 PPSMC_MSG_SetEclkHardMin));
1166 } else {
1167 - /*EPR# 419220 -HW limitation to to */
1168 - cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
1169 - smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1170 - PPSMC_MSG_SetEclkHardMin,
1171 - cz_get_eclk_level(hwmgr,
1172 - cz_hwmgr->vce_dpm.hard_min_clk,
1173 - PPSMC_MSG_SetEclkHardMin));
1174 -
1175 + /*Program HardMin based on the vce_arbiter.ecclk */
1176 + if (hwmgr->vce_arbiter.ecclk == 0) {
1177 + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1178 + PPSMC_MSG_SetEclkHardMin, 0);
1179 + /* disable ECLK DPM 0. Otherwise VCE could hang if
1180 + * switching SCLK from DPM 0 to 6/7 */
1181 + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1182 + PPSMC_MSG_SetEclkSoftMin, 1);
1183 + } else {
1184 + cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
1185 + smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
1186 + PPSMC_MSG_SetEclkHardMin,
1187 + cz_get_eclk_level(hwmgr,
1188 + cz_hwmgr->vce_dpm.hard_min_clk,
1189 + PPSMC_MSG_SetEclkHardMin));
1190 + }
1191 }
1192 return 0;
1193 }
1194 diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
1195 index 7abda94fc2cf..3bedcf7ddd2a 100644
1196 --- a/drivers/gpu/drm/ast/ast_drv.h
1197 +++ b/drivers/gpu/drm/ast/ast_drv.h
1198 @@ -113,7 +113,11 @@ struct ast_private {
1199 struct ttm_bo_kmap_obj cache_kmap;
1200 int next_cursor;
1201 bool support_wide_screen;
1202 - bool DisableP2A;
1203 + enum {
1204 + ast_use_p2a,
1205 + ast_use_dt,
1206 + ast_use_defaults
1207 + } config_mode;
1208
1209 enum ast_tx_chip tx_chip_type;
1210 u8 dp501_maxclk;
1211 diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
1212 index 533e762d036d..fb9976254224 100644
1213 --- a/drivers/gpu/drm/ast/ast_main.c
1214 +++ b/drivers/gpu/drm/ast/ast_main.c
1215 @@ -62,13 +62,84 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
1216 return ret;
1217 }
1218
1219 +static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
1220 +{
1221 + struct device_node *np = dev->pdev->dev.of_node;
1222 + struct ast_private *ast = dev->dev_private;
1223 + uint32_t data, jregd0, jregd1;
1224 +
1225 + /* Defaults */
1226 + ast->config_mode = ast_use_defaults;
1227 + *scu_rev = 0xffffffff;
1228 +
1229 + /* Check if we have device-tree properties */
1230 + if (np && !of_property_read_u32(np, "aspeed,scu-revision-id",
1231 + scu_rev)) {
1232 + /* We do, disable P2A access */
1233 + ast->config_mode = ast_use_dt;
1234 + DRM_INFO("Using device-tree for configuration\n");
1235 + return;
1236 + }
1237 +
1238 + /* Not all families have a P2A bridge */
1239 + if (dev->pdev->device != PCI_CHIP_AST2000)
1240 + return;
1241 +
1242 + /*
1243 + * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
1244 + * is disabled. We force using P2A if VGA only mode bit
1245 + * is set D[7]
1246 + */
1247 + jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
1248 + jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
1249 + if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
1250 + /* Double check it's actually working */
1251 + data = ast_read32(ast, 0xf004);
1252 + if (data != 0xFFFFFFFF) {
1253 + /* P2A works, grab silicon revision */
1254 + ast->config_mode = ast_use_p2a;
1255 +
1256 + DRM_INFO("Using P2A bridge for configuration\n");
1257 +
1258 + /* Read SCU7c (silicon revision register) */
1259 + ast_write32(ast, 0xf004, 0x1e6e0000);
1260 + ast_write32(ast, 0xf000, 0x1);
1261 + *scu_rev = ast_read32(ast, 0x1207c);
1262 + return;
1263 + }
1264 + }
1265 +
1266 + /* We have a P2A bridge but it's disabled */
1267 + DRM_INFO("P2A bridge disabled, using default configuration\n");
1268 +}
1269
1270 static int ast_detect_chip(struct drm_device *dev, bool *need_post)
1271 {
1272 struct ast_private *ast = dev->dev_private;
1273 - uint32_t data, jreg;
1274 + uint32_t jreg, scu_rev;
1275 +
1276 + /*
1277 + * If VGA isn't enabled, we need to enable now or subsequent
1278 + * access to the scratch registers will fail. We also inform
1279 + * our caller that it needs to POST the chip
1280 + * (Assumption: VGA not enabled -> need to POST)
1281 + */
1282 + if (!ast_is_vga_enabled(dev)) {
1283 + ast_enable_vga(dev);
1284 + DRM_INFO("VGA not enabled on entry, requesting chip POST\n");
1285 + *need_post = true;
1286 + } else
1287 + *need_post = false;
1288 +
1289 +
1290 + /* Enable extended register access */
1291 + ast_enable_mmio(dev);
1292 ast_open_key(ast);
1293
1294 + /* Find out whether P2A works or whether to use device-tree */
1295 + ast_detect_config_mode(dev, &scu_rev);
1296 +
1297 + /* Identify chipset */
1298 if (dev->pdev->device == PCI_CHIP_AST1180) {
1299 ast->chip = AST1100;
1300 DRM_INFO("AST 1180 detected\n");
1301 @@ -80,12 +151,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
1302 ast->chip = AST2300;
1303 DRM_INFO("AST 2300 detected\n");
1304 } else if (dev->pdev->revision >= 0x10) {
1305 - uint32_t data;
1306 - ast_write32(ast, 0xf004, 0x1e6e0000);
1307 - ast_write32(ast, 0xf000, 0x1);
1308 -
1309 - data = ast_read32(ast, 0x1207c);
1310 - switch (data & 0x0300) {
1311 + switch (scu_rev & 0x0300) {
1312 case 0x0200:
1313 ast->chip = AST1100;
1314 DRM_INFO("AST 1100 detected\n");
1315 @@ -110,26 +176,6 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
1316 }
1317 }
1318
1319 - /*
1320 - * If VGA isn't enabled, we need to enable now or subsequent
1321 - * access to the scratch registers will fail. We also inform
1322 - * our caller that it needs to POST the chip
1323 - * (Assumption: VGA not enabled -> need to POST)
1324 - */
1325 - if (!ast_is_vga_enabled(dev)) {
1326 - ast_enable_vga(dev);
1327 - ast_enable_mmio(dev);
1328 - DRM_INFO("VGA not enabled on entry, requesting chip POST\n");
1329 - *need_post = true;
1330 - } else
1331 - *need_post = false;
1332 -
1333 - /* Check P2A Access */
1334 - ast->DisableP2A = true;
1335 - data = ast_read32(ast, 0xf004);
1336 - if (data != 0xFFFFFFFF)
1337 - ast->DisableP2A = false;
1338 -
1339 /* Check if we support wide screen */
1340 switch (ast->chip) {
1341 case AST1180:
1342 @@ -146,17 +192,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
1343 ast->support_wide_screen = true;
1344 else {
1345 ast->support_wide_screen = false;
1346 - if (ast->DisableP2A == false) {
1347 - /* Read SCU7c (silicon revision register) */
1348 - ast_write32(ast, 0xf004, 0x1e6e0000);
1349 - ast_write32(ast, 0xf000, 0x1);
1350 - data = ast_read32(ast, 0x1207c);
1351 - data &= 0x300;
1352 - if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
1353 - ast->support_wide_screen = true;
1354 - if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
1355 - ast->support_wide_screen = true;
1356 - }
1357 + if (ast->chip == AST2300 &&
1358 + (scu_rev & 0x300) == 0x0) /* ast1300 */
1359 + ast->support_wide_screen = true;
1360 + if (ast->chip == AST2400 &&
1361 + (scu_rev & 0x300) == 0x100) /* ast1400 */
1362 + ast->support_wide_screen = true;
1363 }
1364 break;
1365 }
1366 @@ -220,85 +261,102 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
1367
1368 static int ast_get_dram_info(struct drm_device *dev)
1369 {
1370 + struct device_node *np = dev->pdev->dev.of_node;
1371 struct ast_private *ast = dev->dev_private;
1372 - uint32_t data, data2;
1373 - uint32_t denum, num, div, ref_pll;
1374 + uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
1375 + uint32_t denum, num, div, ref_pll, dsel;
1376
1377 - if (ast->DisableP2A)
1378 - {
1379 + switch (ast->config_mode) {
1380 + case ast_use_dt:
1381 + /*
1382 + * If some properties are missing, use reasonable
1383 + * defaults for AST2400
1384 + */
1385 + if (of_property_read_u32(np, "aspeed,mcr-configuration",
1386 + &mcr_cfg))
1387 + mcr_cfg = 0x00000577;
1388 + if (of_property_read_u32(np, "aspeed,mcr-scu-mpll",
1389 + &mcr_scu_mpll))
1390 + mcr_scu_mpll = 0x000050C0;
1391 + if (of_property_read_u32(np, "aspeed,mcr-scu-strap",
1392 + &mcr_scu_strap))
1393 + mcr_scu_strap = 0;
1394 + break;
1395 + case ast_use_p2a:
1396 + ast_write32(ast, 0xf004, 0x1e6e0000);
1397 + ast_write32(ast, 0xf000, 0x1);
1398 + mcr_cfg = ast_read32(ast, 0x10004);
1399 + mcr_scu_mpll = ast_read32(ast, 0x10120);
1400 + mcr_scu_strap = ast_read32(ast, 0x10170);
1401 + break;
1402 + case ast_use_defaults:
1403 + default:
1404 ast->dram_bus_width = 16;
1405 ast->dram_type = AST_DRAM_1Gx16;
1406 ast->mclk = 396;
1407 + return 0;
1408 }
1409 - else
1410 - {
1411 - ast_write32(ast, 0xf004, 0x1e6e0000);
1412 - ast_write32(ast, 0xf000, 0x1);
1413 - data = ast_read32(ast, 0x10004);
1414 -
1415 - if (data & 0x40)
1416 - ast->dram_bus_width = 16;
1417 - else
1418 - ast->dram_bus_width = 32;
1419
1420 - if (ast->chip == AST2300 || ast->chip == AST2400) {
1421 - switch (data & 0x03) {
1422 - case 0:
1423 - ast->dram_type = AST_DRAM_512Mx16;
1424 - break;
1425 - default:
1426 - case 1:
1427 - ast->dram_type = AST_DRAM_1Gx16;
1428 - break;
1429 - case 2:
1430 - ast->dram_type = AST_DRAM_2Gx16;
1431 - break;
1432 - case 3:
1433 - ast->dram_type = AST_DRAM_4Gx16;
1434 - break;
1435 - }
1436 - } else {
1437 - switch (data & 0x0c) {
1438 - case 0:
1439 - case 4:
1440 - ast->dram_type = AST_DRAM_512Mx16;
1441 - break;
1442 - case 8:
1443 - if (data & 0x40)
1444 - ast->dram_type = AST_DRAM_1Gx16;
1445 - else
1446 - ast->dram_type = AST_DRAM_512Mx32;
1447 - break;
1448 - case 0xc:
1449 - ast->dram_type = AST_DRAM_1Gx32;
1450 - break;
1451 - }
1452 - }
1453 + if (mcr_cfg & 0x40)
1454 + ast->dram_bus_width = 16;
1455 + else
1456 + ast->dram_bus_width = 32;
1457
1458 - data = ast_read32(ast, 0x10120);
1459 - data2 = ast_read32(ast, 0x10170);
1460 - if (data2 & 0x2000)
1461 - ref_pll = 14318;
1462 - else
1463 - ref_pll = 12000;
1464 -
1465 - denum = data & 0x1f;
1466 - num = (data & 0x3fe0) >> 5;
1467 - data = (data & 0xc000) >> 14;
1468 - switch (data) {
1469 - case 3:
1470 - div = 0x4;
1471 + if (ast->chip == AST2300 || ast->chip == AST2400) {
1472 + switch (mcr_cfg & 0x03) {
1473 + case 0:
1474 + ast->dram_type = AST_DRAM_512Mx16;
1475 break;
1476 - case 2:
1477 + default:
1478 case 1:
1479 - div = 0x2;
1480 + ast->dram_type = AST_DRAM_1Gx16;
1481 break;
1482 - default:
1483 - div = 0x1;
1484 + case 2:
1485 + ast->dram_type = AST_DRAM_2Gx16;
1486 + break;
1487 + case 3:
1488 + ast->dram_type = AST_DRAM_4Gx16;
1489 + break;
1490 + }
1491 + } else {
1492 + switch (mcr_cfg & 0x0c) {
1493 + case 0:
1494 + case 4:
1495 + ast->dram_type = AST_DRAM_512Mx16;
1496 + break;
1497 + case 8:
1498 + if (mcr_cfg & 0x40)
1499 + ast->dram_type = AST_DRAM_1Gx16;
1500 + else
1501 + ast->dram_type = AST_DRAM_512Mx32;
1502 + break;
1503 + case 0xc:
1504 + ast->dram_type = AST_DRAM_1Gx32;
1505 break;
1506 }
1507 - ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
1508 }
1509 +
1510 + if (mcr_scu_strap & 0x2000)
1511 + ref_pll = 14318;
1512 + else
1513 + ref_pll = 12000;
1514 +
1515 + denum = mcr_scu_mpll & 0x1f;
1516 + num = (mcr_scu_mpll & 0x3fe0) >> 5;
1517 + dsel = (mcr_scu_mpll & 0xc000) >> 14;
1518 + switch (dsel) {
1519 + case 3:
1520 + div = 0x4;
1521 + break;
1522 + case 2:
1523 + case 1:
1524 + div = 0x2;
1525 + break;
1526 + default:
1527 + div = 0x1;
1528 + break;
1529 + }
1530 + ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
1531 return 0;
1532 }
1533
1534 diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
1535 index 270e8fb2803f..c7c58becb25d 100644
1536 --- a/drivers/gpu/drm/ast/ast_post.c
1537 +++ b/drivers/gpu/drm/ast/ast_post.c
1538 @@ -375,17 +375,14 @@ void ast_post_gpu(struct drm_device *dev)
1539 ast_enable_mmio(dev);
1540 ast_set_def_ext_reg(dev);
1541
1542 - if (ast->DisableP2A == false)
1543 - {
1544 + if (ast->config_mode == ast_use_p2a) {
1545 if (ast->chip == AST2300 || ast->chip == AST2400)
1546 ast_init_dram_2300(dev);
1547 else
1548 ast_init_dram_reg(dev);
1549
1550 ast_init_3rdtx(dev);
1551 - }
1552 - else
1553 - {
1554 + } else {
1555 if (ast->tx_chip_type != AST_TX_NONE)
1556 ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
1557 }
1558 diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
1559 index 169ac96e8f08..fe0e85b41310 100644
1560 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
1561 +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
1562 @@ -116,9 +116,14 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
1563 struct list_head list;
1564 bool found;
1565
1566 + /*
1567 + * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
1568 + * drm_mm into giving out a low IOVA after address space
1569 + * rollover. This needs a proper fix.
1570 + */
1571 ret = drm_mm_insert_node_in_range(&mmu->mm, node,
1572 size, 0, mmu->last_iova, ~0UL,
1573 - DRM_MM_SEARCH_DEFAULT);
1574 + mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
1575
1576 if (ret != -ENOSPC)
1577 break;
1578 diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
1579 index 3333e8a45933..b75d809c292e 100644
1580 --- a/drivers/gpu/drm/radeon/si.c
1581 +++ b/drivers/gpu/drm/radeon/si.c
1582 @@ -115,6 +115,8 @@ MODULE_FIRMWARE("radeon/hainan_rlc.bin");
1583 MODULE_FIRMWARE("radeon/hainan_smc.bin");
1584 MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
1585
1586 +MODULE_FIRMWARE("radeon/si58_mc.bin");
1587 +
1588 static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
1589 static void si_pcie_gen3_enable(struct radeon_device *rdev);
1590 static void si_program_aspm(struct radeon_device *rdev);
1591 @@ -1650,6 +1652,7 @@ static int si_init_microcode(struct radeon_device *rdev)
1592 int err;
1593 int new_fw = 0;
1594 bool new_smc = false;
1595 + bool si58_fw = false;
1596
1597 DRM_DEBUG("\n");
1598
1599 @@ -1742,6 +1745,10 @@ static int si_init_microcode(struct radeon_device *rdev)
1600 default: BUG();
1601 }
1602
1603 + /* this memory configuration requires special firmware */
1604 + if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
1605 + si58_fw = true;
1606 +
1607 DRM_INFO("Loading %s Microcode\n", new_chip_name);
1608
1609 snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
1610 @@ -1845,7 +1852,10 @@ static int si_init_microcode(struct radeon_device *rdev)
1611 }
1612 }
1613
1614 - snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
1615 + if (si58_fw)
1616 + snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
1617 + else
1618 + snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
1619 err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
1620 if (err) {
1621 snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
1622 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
1623 index 13db8a2851ed..1f013d45c9e9 100644
1624 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
1625 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
1626 @@ -321,6 +321,7 @@ void vmw_cmdbuf_res_man_destroy(struct vmw_cmdbuf_res_manager *man)
1627 list_for_each_entry_safe(entry, next, &man->list, head)
1628 vmw_cmdbuf_res_free(man, entry);
1629
1630 + drm_ht_remove(&man->resources);
1631 kfree(man);
1632 }
1633
1634 diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
1635 index b1bce804fe97..8008e06b7efe 100644
1636 --- a/drivers/hid/i2c-hid/i2c-hid.c
1637 +++ b/drivers/hid/i2c-hid/i2c-hid.c
1638 @@ -427,6 +427,15 @@ static int i2c_hid_hwreset(struct i2c_client *client)
1639 if (ret)
1640 goto out_unlock;
1641
1642 + /*
1643 + * The HID over I2C specification states that if a DEVICE needs time
1644 + * after the PWR_ON request, it should utilise CLOCK stretching.
1645 + * However, it has been observered that the Windows driver provides a
1646 + * 1ms sleep between the PWR_ON and RESET requests and that some devices
1647 + * rely on this.
1648 + */
1649 + usleep_range(1000, 5000);
1650 +
1651 i2c_hid_dbg(ihid, "resetting...\n");
1652
1653 ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
1654 diff --git a/drivers/i2c/busses/i2c-brcmstb.c b/drivers/i2c/busses/i2c-brcmstb.c
1655 index 0652281662a8..78792b4d6437 100644
1656 --- a/drivers/i2c/busses/i2c-brcmstb.c
1657 +++ b/drivers/i2c/busses/i2c-brcmstb.c
1658 @@ -465,6 +465,7 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
1659 u8 *tmp_buf;
1660 int len = 0;
1661 int xfersz = brcmstb_i2c_get_xfersz(dev);
1662 + u32 cond, cond_per_msg;
1663
1664 if (dev->is_suspended)
1665 return -EBUSY;
1666 @@ -481,10 +482,11 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
1667 pmsg->buf ? pmsg->buf[0] : '0', pmsg->len);
1668
1669 if (i < (num - 1) && (msgs[i + 1].flags & I2C_M_NOSTART))
1670 - brcmstb_set_i2c_start_stop(dev, ~(COND_START_STOP));
1671 + cond = ~COND_START_STOP;
1672 else
1673 - brcmstb_set_i2c_start_stop(dev,
1674 - COND_RESTART | COND_NOSTOP);
1675 + cond = COND_RESTART | COND_NOSTOP;
1676 +
1677 + brcmstb_set_i2c_start_stop(dev, cond);
1678
1679 /* Send slave address */
1680 if (!(pmsg->flags & I2C_M_NOSTART)) {
1681 @@ -497,13 +499,24 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
1682 }
1683 }
1684
1685 + cond_per_msg = cond;
1686 +
1687 /* Perform data transfer */
1688 while (len) {
1689 bytes_to_xfer = min(len, xfersz);
1690
1691 - if (len <= xfersz && i == (num - 1))
1692 - brcmstb_set_i2c_start_stop(dev,
1693 - ~(COND_START_STOP));
1694 + if (len <= xfersz) {
1695 + if (i == (num - 1))
1696 + cond_per_msg = cond_per_msg &
1697 + ~(COND_RESTART | COND_NOSTOP);
1698 + else
1699 + cond_per_msg = cond;
1700 + } else {
1701 + cond_per_msg = (cond_per_msg & ~COND_RESTART) |
1702 + COND_NOSTOP;
1703 + }
1704 +
1705 + brcmstb_set_i2c_start_stop(dev, cond_per_msg);
1706
1707 rc = brcmstb_i2c_xfer_bsc_data(dev, tmp_buf,
1708 bytes_to_xfer, pmsg);
1709 @@ -512,6 +525,8 @@ static int brcmstb_i2c_xfer(struct i2c_adapter *adapter,
1710
1711 len -= bytes_to_xfer;
1712 tmp_buf += bytes_to_xfer;
1713 +
1714 + cond_per_msg = COND_NOSTART | COND_NOSTOP;
1715 }
1716 }
1717
1718 diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
1719 index 71232e5fabf6..20ec34761b39 100644
1720 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
1721 +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
1722 @@ -1267,6 +1267,7 @@ void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
1723 u32 doorbell[2];
1724
1725 doorbell[0] = cons_index & ((hr_cq->cq_depth << 1) - 1);
1726 + doorbell[1] = 0;
1727 roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1);
1728 roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M,
1729 ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3);
1730 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
1731 index 11a13b5be73a..41800b6d492e 100644
1732 --- a/drivers/iommu/amd_iommu.c
1733 +++ b/drivers/iommu/amd_iommu.c
1734 @@ -3857,11 +3857,9 @@ static void irte_ga_prepare(void *entry,
1735 u8 vector, u32 dest_apicid, int devid)
1736 {
1737 struct irte_ga *irte = (struct irte_ga *) entry;
1738 - struct iommu_dev_data *dev_data = search_dev_data(devid);
1739
1740 irte->lo.val = 0;
1741 irte->hi.val = 0;
1742 - irte->lo.fields_remap.guest_mode = dev_data ? dev_data->use_vapic : 0;
1743 irte->lo.fields_remap.int_type = delivery_mode;
1744 irte->lo.fields_remap.dm = dest_mode;
1745 irte->hi.fields.vector = vector;
1746 @@ -3917,10 +3915,10 @@ static void irte_ga_set_affinity(void *entry, u16 devid, u16 index,
1747 struct irte_ga *irte = (struct irte_ga *) entry;
1748 struct iommu_dev_data *dev_data = search_dev_data(devid);
1749
1750 - if (!dev_data || !dev_data->use_vapic) {
1751 + if (!dev_data || !dev_data->use_vapic ||
1752 + !irte->lo.fields_remap.guest_mode) {
1753 irte->hi.fields.vector = vector;
1754 irte->lo.fields_remap.destination = dest_apicid;
1755 - irte->lo.fields_remap.guest_mode = 0;
1756 modify_irte_ga(devid, index, irte, NULL);
1757 }
1758 }
1759 diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
1760 index f8ed8c95b685..a0b4ac64b9ff 100644
1761 --- a/drivers/iommu/amd_iommu_v2.c
1762 +++ b/drivers/iommu/amd_iommu_v2.c
1763 @@ -695,9 +695,9 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
1764
1765 out_unregister:
1766 mmu_notifier_unregister(&pasid_state->mn, mm);
1767 + mmput(mm);
1768
1769 out_free:
1770 - mmput(mm);
1771 free_pasid_state(pasid_state);
1772
1773 out:
1774 diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
1775 index c5ab8667e6f2..1520e7f02c2f 100644
1776 --- a/drivers/iommu/dma-iommu.c
1777 +++ b/drivers/iommu/dma-iommu.c
1778 @@ -112,8 +112,7 @@ static void iova_reserve_pci_windows(struct pci_dev *dev,
1779 unsigned long lo, hi;
1780
1781 resource_list_for_each_entry(window, &bridge->windows) {
1782 - if (resource_type(window->res) != IORESOURCE_MEM &&
1783 - resource_type(window->res) != IORESOURCE_IO)
1784 + if (resource_type(window->res) != IORESOURCE_MEM)
1785 continue;
1786
1787 lo = iova_pfn(iovad, window->res->start - window->offset);
1788 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1789 index 87fcbf71b85a..002f8a421efa 100644
1790 --- a/drivers/iommu/intel-iommu.c
1791 +++ b/drivers/iommu/intel-iommu.c
1792 @@ -1144,7 +1144,7 @@ static void dma_pte_free_level(struct dmar_domain *domain, int level,
1793 if (!dma_pte_present(pte) || dma_pte_superpage(pte))
1794 goto next;
1795
1796 - level_pfn = pfn & level_mask(level - 1);
1797 + level_pfn = pfn & level_mask(level);
1798 level_pte = phys_to_virt(dma_pte_addr(pte));
1799
1800 if (level > 2)
1801 diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
1802 index 9a2f1960873b..87d3060f8609 100644
1803 --- a/drivers/iommu/iommu.c
1804 +++ b/drivers/iommu/iommu.c
1805 @@ -383,36 +383,30 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
1806 device->dev = dev;
1807
1808 ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
1809 - if (ret) {
1810 - kfree(device);
1811 - return ret;
1812 - }
1813 + if (ret)
1814 + goto err_free_device;
1815
1816 device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
1817 rename:
1818 if (!device->name) {
1819 - sysfs_remove_link(&dev->kobj, "iommu_group");
1820 - kfree(device);
1821 - return -ENOMEM;
1822 + ret = -ENOMEM;
1823 + goto err_remove_link;
1824 }
1825
1826 ret = sysfs_create_link_nowarn(group->devices_kobj,
1827 &dev->kobj, device->name);
1828 if (ret) {
1829 - kfree(device->name);
1830 if (ret == -EEXIST && i >= 0) {
1831 /*
1832 * Account for the slim chance of collision
1833 * and append an instance to the name.
1834 */
1835 + kfree(device->name);
1836 device->name = kasprintf(GFP_KERNEL, "%s.%d",
1837 kobject_name(&dev->kobj), i++);
1838 goto rename;
1839 }
1840 -
1841 - sysfs_remove_link(&dev->kobj, "iommu_group");
1842 - kfree(device);
1843 - return ret;
1844 + goto err_free_name;
1845 }
1846
1847 kobject_get(group->devices_kobj);
1848 @@ -424,8 +418,10 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
1849 mutex_lock(&group->mutex);
1850 list_add_tail(&device->list, &group->devices);
1851 if (group->domain)
1852 - __iommu_attach_device(group->domain, dev);
1853 + ret = __iommu_attach_device(group->domain, dev);
1854 mutex_unlock(&group->mutex);
1855 + if (ret)
1856 + goto err_put_group;
1857
1858 /* Notify any listeners about change to group. */
1859 blocking_notifier_call_chain(&group->notifier,
1860 @@ -436,6 +432,21 @@ int iommu_group_add_device(struct iommu_group *group, struct device *dev)
1861 pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
1862
1863 return 0;
1864 +
1865 +err_put_group:
1866 + mutex_lock(&group->mutex);
1867 + list_del(&device->list);
1868 + mutex_unlock(&group->mutex);
1869 + dev->iommu_group = NULL;
1870 + kobject_put(group->devices_kobj);
1871 +err_free_name:
1872 + kfree(device->name);
1873 +err_remove_link:
1874 + sysfs_remove_link(&dev->kobj, "iommu_group");
1875 +err_free_device:
1876 + kfree(device);
1877 + pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret);
1878 + return ret;
1879 }
1880 EXPORT_SYMBOL_GPL(iommu_group_add_device);
1881
1882 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1883 index be869a990e38..0b678b5da4c4 100644
1884 --- a/drivers/md/dm-thin.c
1885 +++ b/drivers/md/dm-thin.c
1886 @@ -1095,6 +1095,19 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
1887 return;
1888 }
1889
1890 + /*
1891 + * Increment the unmapped blocks. This prevents a race between the
1892 + * passdown io and reallocation of freed blocks.
1893 + */
1894 + r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
1895 + if (r) {
1896 + metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
1897 + bio_io_error(m->bio);
1898 + cell_defer_no_holder(tc, m->cell);
1899 + mempool_free(m, pool->mapping_pool);
1900 + return;
1901 + }
1902 +
1903 discard_parent = bio_alloc(GFP_NOIO, 1);
1904 if (!discard_parent) {
1905 DMWARN("%s: unable to allocate top level discard bio for passdown. Skipping passdown.",
1906 @@ -1115,19 +1128,6 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
1907 end_discard(&op, r);
1908 }
1909 }
1910 -
1911 - /*
1912 - * Increment the unmapped blocks. This prevents a race between the
1913 - * passdown io and reallocation of freed blocks.
1914 - */
1915 - r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end);
1916 - if (r) {
1917 - metadata_operation_failed(pool, "dm_pool_inc_data_range", r);
1918 - bio_io_error(m->bio);
1919 - cell_defer_no_holder(tc, m->cell);
1920 - mempool_free(m, pool->mapping_pool);
1921 - return;
1922 - }
1923 }
1924
1925 static void process_prepared_discard_passdown_pt2(struct dm_thin_new_mapping *m)
1926 diff --git a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c
1927 index 9d2424bfdbf5..d9fab2222eb3 100644
1928 --- a/drivers/mtd/nand/brcmnand/brcmnand.c
1929 +++ b/drivers/mtd/nand/brcmnand/brcmnand.c
1930 @@ -101,6 +101,9 @@ struct brcm_nand_dma_desc {
1931 #define BRCMNAND_MIN_BLOCKSIZE (8 * 1024)
1932 #define BRCMNAND_MIN_DEVSIZE (4ULL * 1024 * 1024)
1933
1934 +#define NAND_CTRL_RDY (INTFC_CTLR_READY | INTFC_FLASH_READY)
1935 +#define NAND_POLL_STATUS_TIMEOUT_MS 100
1936 +
1937 /* Controller feature flags */
1938 enum {
1939 BRCMNAND_HAS_1K_SECTORS = BIT(0),
1940 @@ -765,6 +768,31 @@ enum {
1941 CS_SELECT_AUTO_DEVICE_ID_CFG = BIT(30),
1942 };
1943
1944 +static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl,
1945 + u32 mask, u32 expected_val,
1946 + unsigned long timeout_ms)
1947 +{
1948 + unsigned long limit;
1949 + u32 val;
1950 +
1951 + if (!timeout_ms)
1952 + timeout_ms = NAND_POLL_STATUS_TIMEOUT_MS;
1953 +
1954 + limit = jiffies + msecs_to_jiffies(timeout_ms);
1955 + do {
1956 + val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
1957 + if ((val & mask) == expected_val)
1958 + return 0;
1959 +
1960 + cpu_relax();
1961 + } while (time_after(limit, jiffies));
1962 +
1963 + dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n",
1964 + expected_val, val & mask);
1965 +
1966 + return -ETIMEDOUT;
1967 +}
1968 +
1969 static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en)
1970 {
1971 u32 val = en ? CS_SELECT_NAND_WP : 0;
1972 @@ -1024,12 +1052,39 @@ static void brcmnand_wp(struct mtd_info *mtd, int wp)
1973
1974 if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) {
1975 static int old_wp = -1;
1976 + int ret;
1977
1978 if (old_wp != wp) {
1979 dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off");
1980 old_wp = wp;
1981 }
1982 +
1983 + /*
1984 + * make sure ctrl/flash ready before and after
1985 + * changing state of #WP pin
1986 + */
1987 + ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY |
1988 + NAND_STATUS_READY,
1989 + NAND_CTRL_RDY |
1990 + NAND_STATUS_READY, 0);
1991 + if (ret)
1992 + return;
1993 +
1994 brcmnand_set_wp(ctrl, wp);
1995 + chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
1996 + /* NAND_STATUS_WP 0x00 = protected, 0x80 = not protected */
1997 + ret = bcmnand_ctrl_poll_status(ctrl,
1998 + NAND_CTRL_RDY |
1999 + NAND_STATUS_READY |
2000 + NAND_STATUS_WP,
2001 + NAND_CTRL_RDY |
2002 + NAND_STATUS_READY |
2003 + (wp ? 0 : NAND_STATUS_WP), 0);
2004 +
2005 + if (ret)
2006 + dev_err_ratelimited(&host->pdev->dev,
2007 + "nand #WP expected %s\n",
2008 + wp ? "on" : "off");
2009 }
2010 }
2011
2012 @@ -1157,15 +1212,15 @@ static irqreturn_t brcmnand_dma_irq(int irq, void *data)
2013 static void brcmnand_send_cmd(struct brcmnand_host *host, int cmd)
2014 {
2015 struct brcmnand_controller *ctrl = host->ctrl;
2016 - u32 intfc;
2017 + int ret;
2018
2019 dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd,
2020 brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS));
2021 BUG_ON(ctrl->cmd_pending != 0);
2022 ctrl->cmd_pending = cmd;
2023
2024 - intfc = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS);
2025 - WARN_ON(!(intfc & INTFC_CTLR_READY));
2026 + ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0);
2027 + WARN_ON(ret);
2028
2029 mb(); /* flush previous writes */
2030 brcmnand_write_reg(ctrl, BRCMNAND_CMD_START,
2031 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
2032 index ca106d4275cc..3424435a39dd 100644
2033 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
2034 +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
2035 @@ -2825,8 +2825,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
2036
2037 /* Flush Tx queues */
2038 ret = xgbe_flush_tx_queues(pdata);
2039 - if (ret)
2040 + if (ret) {
2041 + netdev_err(pdata->netdev, "error flushing TX queues\n");
2042 return ret;
2043 + }
2044
2045 /*
2046 * Initialize DMA related features
2047 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
2048 index 0f0f30149e5a..1e4e8b245cd5 100644
2049 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
2050 +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
2051 @@ -877,7 +877,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
2052
2053 DBGPR("-->xgbe_start\n");
2054
2055 - hw_if->init(pdata);
2056 + ret = hw_if->init(pdata);
2057 + if (ret)
2058 + return ret;
2059
2060 ret = phy_if->phy_start(pdata);
2061 if (ret)
2062 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2063 index 5cc0f8cfec87..20e569bd978a 100644
2064 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2065 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
2066 @@ -1097,7 +1097,7 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
2067 {
2068 #ifdef CONFIG_INET
2069 struct tcphdr *th;
2070 - int len, nw_off, tcp_opt_len;
2071 + int len, nw_off, tcp_opt_len = 0;
2072
2073 if (tcp_ts)
2074 tcp_opt_len = 12;
2075 diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
2076 index 679679a4ccb2..e858b1af788d 100644
2077 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
2078 +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
2079 @@ -48,8 +48,9 @@ struct lmac {
2080 struct bgx {
2081 u8 bgx_id;
2082 struct lmac lmac[MAX_LMAC_PER_BGX];
2083 - int lmac_count;
2084 + u8 lmac_count;
2085 u8 max_lmac;
2086 + u8 acpi_lmac_idx;
2087 void __iomem *reg_base;
2088 struct pci_dev *pdev;
2089 bool is_dlm;
2090 @@ -1159,13 +1160,13 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
2091 if (acpi_bus_get_device(handle, &adev))
2092 goto out;
2093
2094 - acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac);
2095 + acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
2096
2097 - SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev);
2098 + SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
2099
2100 - bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
2101 + bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
2102 + bgx->acpi_lmac_idx++; /* move to next LMAC */
2103 out:
2104 - bgx->lmac_count++;
2105 return AE_OK;
2106 }
2107
2108 diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
2109 index 0e74529a4209..30e855004c57 100644
2110 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c
2111 +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
2112 @@ -1118,7 +1118,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
2113 err:
2114 mutex_unlock(&adapter->mcc_lock);
2115
2116 - if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
2117 + if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
2118 status = -EPERM;
2119
2120 return status;
2121 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
2122 index 9711ca4510fa..b3c9cbef766e 100644
2123 --- a/drivers/net/ethernet/emulex/benet/be_main.c
2124 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
2125 @@ -319,6 +319,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
2126 if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
2127 return 0;
2128
2129 + /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
2130 + * address
2131 + */
2132 + if (BEx_chip(adapter) && be_virtfn(adapter) &&
2133 + !check_privilege(adapter, BE_PRIV_FILTMGMT))
2134 + return -EPERM;
2135 +
2136 /* if device is not running, copy MAC to netdev->dev_addr */
2137 if (!netif_running(netdev))
2138 goto done;
2139 @@ -3630,7 +3637,11 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
2140
2141 static void be_disable_if_filters(struct be_adapter *adapter)
2142 {
2143 - be_dev_mac_del(adapter, adapter->pmac_id[0]);
2144 + /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
2145 + if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
2146 + check_privilege(adapter, BE_PRIV_FILTMGMT))
2147 + be_dev_mac_del(adapter, adapter->pmac_id[0]);
2148 +
2149 be_clear_uc_list(adapter);
2150 be_clear_mc_list(adapter);
2151
2152 @@ -3783,8 +3794,9 @@ static int be_enable_if_filters(struct be_adapter *adapter)
2153 if (status)
2154 return status;
2155
2156 - /* For BE3 VFs, the PF programs the initial MAC address */
2157 - if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
2158 + /* Don't add MAC on BE3 VFs without FILTMGMT privilege */
2159 + if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
2160 + check_privilege(adapter, BE_PRIV_FILTMGMT)) {
2161 status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
2162 if (status)
2163 return status;
2164 diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
2165 index d391beebe591..3f4e71148808 100644
2166 --- a/drivers/net/ethernet/freescale/gianfar.c
2167 +++ b/drivers/net/ethernet/freescale/gianfar.c
2168 @@ -2951,7 +2951,7 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2169 }
2170
2171 /* try reuse page */
2172 - if (unlikely(page_count(page) != 1))
2173 + if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
2174 return false;
2175
2176 /* change offset to the other half */
2177 diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
2178 index 03dca732e4c6..b375ae9f98ef 100644
2179 --- a/drivers/net/ethernet/ibm/ibmveth.c
2180 +++ b/drivers/net/ethernet/ibm/ibmveth.c
2181 @@ -1604,8 +1604,11 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
2182 netdev->netdev_ops = &ibmveth_netdev_ops;
2183 netdev->ethtool_ops = &netdev_ethtool_ops;
2184 SET_NETDEV_DEV(netdev, &dev->dev);
2185 - netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
2186 - NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2187 + netdev->hw_features = NETIF_F_SG;
2188 + if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
2189 + netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2190 + NETIF_F_RXCSUM;
2191 + }
2192
2193 netdev->features |= netdev->hw_features;
2194
2195 diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
2196 index 1799fe1415df..c051987aab83 100644
2197 --- a/drivers/net/ethernet/korina.c
2198 +++ b/drivers/net/ethernet/korina.c
2199 @@ -900,10 +900,10 @@ static void korina_restart_task(struct work_struct *work)
2200 DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
2201 &lp->rx_dma_regs->dmasm);
2202
2203 - korina_free_ring(dev);
2204 -
2205 napi_disable(&lp->napi);
2206
2207 + korina_free_ring(dev);
2208 +
2209 if (korina_init(dev) < 0) {
2210 printk(KERN_ERR "%s: cannot restart device\n", dev->name);
2211 return;
2212 @@ -1064,12 +1064,12 @@ static int korina_close(struct net_device *dev)
2213 tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
2214 writel(tmp, &lp->rx_dma_regs->dmasm);
2215
2216 - korina_free_ring(dev);
2217 -
2218 napi_disable(&lp->napi);
2219
2220 cancel_work_sync(&lp->restart_task);
2221
2222 + korina_free_ring(dev);
2223 +
2224 free_irq(lp->rx_irq, dev);
2225 free_irq(lp->tx_irq, dev);
2226 free_irq(lp->ovr_irq, dev);
2227 diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
2228 index cd3638e6fe25..0509996957d9 100644
2229 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c
2230 +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
2231 @@ -554,8 +554,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
2232 break;
2233
2234 case MLX4_EVENT_TYPE_SRQ_LIMIT:
2235 - mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
2236 - __func__);
2237 + mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
2238 + __func__, be32_to_cpu(eqe->event.srq.srqn),
2239 + eq->eqn);
2240 case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
2241 if (mlx4_is_master(dev)) {
2242 /* forward only to slave owning the SRQ */
2243 @@ -570,15 +571,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
2244 eq->eqn, eq->cons_index, ret);
2245 break;
2246 }
2247 - mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
2248 - __func__, slave,
2249 - be32_to_cpu(eqe->event.srq.srqn),
2250 - eqe->type, eqe->subtype);
2251 + if (eqe->type ==
2252 + MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
2253 + mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
2254 + __func__, slave,
2255 + be32_to_cpu(eqe->event.srq.srqn),
2256 + eqe->type, eqe->subtype);
2257
2258 if (!ret && slave != dev->caps.function) {
2259 - mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
2260 - __func__, eqe->type,
2261 - eqe->subtype, slave);
2262 + if (eqe->type ==
2263 + MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
2264 + mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
2265 + __func__, eqe->type,
2266 + eqe->subtype, slave);
2267 mlx4_slave_event(dev, slave, eqe);
2268 break;
2269 }
2270 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
2271 index 21ce0b701143..6180ad45be18 100644
2272 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
2273 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
2274 @@ -283,13 +283,15 @@ struct mlx5e_dma_info {
2275
2276 struct mlx5e_rx_am_stats {
2277 int ppms; /* packets per msec */
2278 + int bpms; /* bytes per msec */
2279 int epms; /* events per msec */
2280 };
2281
2282 struct mlx5e_rx_am_sample {
2283 - ktime_t time;
2284 - unsigned int pkt_ctr;
2285 - u16 event_ctr;
2286 + ktime_t time;
2287 + u32 pkt_ctr;
2288 + u32 byte_ctr;
2289 + u16 event_ctr;
2290 };
2291
2292 struct mlx5e_rx_am { /* Adaptive Moderation */
2293 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2294 index 3744e2f79ecf..da1d73fe1a81 100644
2295 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2296 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
2297 @@ -1183,11 +1183,11 @@ static int mlx5e_get_ts_info(struct net_device *dev,
2298 SOF_TIMESTAMPING_RX_HARDWARE |
2299 SOF_TIMESTAMPING_RAW_HARDWARE;
2300
2301 - info->tx_types = (BIT(1) << HWTSTAMP_TX_OFF) |
2302 - (BIT(1) << HWTSTAMP_TX_ON);
2303 + info->tx_types = BIT(HWTSTAMP_TX_OFF) |
2304 + BIT(HWTSTAMP_TX_ON);
2305
2306 - info->rx_filters = (BIT(1) << HWTSTAMP_FILTER_NONE) |
2307 - (BIT(1) << HWTSTAMP_FILTER_ALL);
2308 + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) |
2309 + BIT(HWTSTAMP_FILTER_ALL);
2310
2311 return 0;
2312 }
2313 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2314 index d4fa851ced2a..ea582342dd8f 100644
2315 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2316 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
2317 @@ -3846,7 +3846,8 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
2318 return netdev;
2319
2320 err_cleanup_nic:
2321 - profile->cleanup(priv);
2322 + if (profile->cleanup)
2323 + profile->cleanup(priv);
2324 free_netdev(netdev);
2325
2326 return NULL;
2327 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
2328 index cbfac06b7ffd..23ccec4cb7f5 100644
2329 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
2330 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
2331 @@ -183,28 +183,27 @@ static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am)
2332 mlx5e_am_step(am);
2333 }
2334
2335 +#define IS_SIGNIFICANT_DIFF(val, ref) \
2336 + (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
2337 +
2338 static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
2339 struct mlx5e_rx_am_stats *prev)
2340 {
2341 - int diff;
2342 -
2343 - if (!prev->ppms)
2344 - return curr->ppms ? MLX5E_AM_STATS_BETTER :
2345 + if (!prev->bpms)
2346 + return curr->bpms ? MLX5E_AM_STATS_BETTER :
2347 MLX5E_AM_STATS_SAME;
2348
2349 - diff = curr->ppms - prev->ppms;
2350 - if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */
2351 - return (diff > 0) ? MLX5E_AM_STATS_BETTER :
2352 - MLX5E_AM_STATS_WORSE;
2353 + if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
2354 + return (curr->bpms > prev->bpms) ? MLX5E_AM_STATS_BETTER :
2355 + MLX5E_AM_STATS_WORSE;
2356
2357 - if (!prev->epms)
2358 - return curr->epms ? MLX5E_AM_STATS_WORSE :
2359 - MLX5E_AM_STATS_SAME;
2360 + if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
2361 + return (curr->ppms > prev->ppms) ? MLX5E_AM_STATS_BETTER :
2362 + MLX5E_AM_STATS_WORSE;
2363
2364 - diff = curr->epms - prev->epms;
2365 - if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */
2366 - return (diff < 0) ? MLX5E_AM_STATS_BETTER :
2367 - MLX5E_AM_STATS_WORSE;
2368 + if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
2369 + return (curr->epms < prev->epms) ? MLX5E_AM_STATS_BETTER :
2370 + MLX5E_AM_STATS_WORSE;
2371
2372 return MLX5E_AM_STATS_SAME;
2373 }
2374 @@ -266,10 +265,13 @@ static void mlx5e_am_sample(struct mlx5e_rq *rq,
2375 {
2376 s->time = ktime_get();
2377 s->pkt_ctr = rq->stats.packets;
2378 + s->byte_ctr = rq->stats.bytes;
2379 s->event_ctr = rq->cq.event_ctr;
2380 }
2381
2382 #define MLX5E_AM_NEVENTS 64
2383 +#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
2384 +#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
2385
2386 static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
2387 struct mlx5e_rx_am_sample *end,
2388 @@ -277,13 +279,17 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
2389 {
2390 /* u32 holds up to 71 minutes, should be enough */
2391 u32 delta_us = ktime_us_delta(end->time, start->time);
2392 - unsigned int npkts = end->pkt_ctr - start->pkt_ctr;
2393 + u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
2394 + u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
2395 + start->byte_ctr);
2396
2397 if (!delta_us)
2398 return;
2399
2400 - curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us;
2401 - curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
2402 + curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
2403 + curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
2404 + curr_stats->epms = DIV_ROUND_UP(MLX5E_AM_NEVENTS * USEC_PER_MSEC,
2405 + delta_us);
2406 }
2407
2408 void mlx5e_rx_am_work(struct work_struct *work)
2409 @@ -308,7 +314,8 @@ void mlx5e_rx_am(struct mlx5e_rq *rq)
2410
2411 switch (am->state) {
2412 case MLX5E_AM_MEASURE_IN_PROGRESS:
2413 - nevents = rq->cq.event_ctr - am->start_sample.event_ctr;
2414 + nevents = BIT_GAP(BITS_PER_TYPE(u16), rq->cq.event_ctr,
2415 + am->start_sample.event_ctr);
2416 if (nevents < MLX5E_AM_NEVENTS)
2417 break;
2418 mlx5e_am_sample(rq, &end_sample);
2419 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
2420 index d776db79e325..5bea0bf2b484 100644
2421 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
2422 +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
2423 @@ -155,8 +155,9 @@ static struct mlx5_profile profile[] = {
2424 },
2425 };
2426
2427 -#define FW_INIT_TIMEOUT_MILI 2000
2428 -#define FW_INIT_WAIT_MS 2
2429 +#define FW_INIT_TIMEOUT_MILI 2000
2430 +#define FW_INIT_WAIT_MS 2
2431 +#define FW_PRE_INIT_TIMEOUT_MILI 10000
2432
2433 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
2434 {
2435 @@ -956,6 +957,15 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
2436 */
2437 dev->state = MLX5_DEVICE_STATE_UP;
2438
2439 + /* wait for firmware to accept initialization segments configurations
2440 + */
2441 + err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
2442 + if (err) {
2443 + dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
2444 + FW_PRE_INIT_TIMEOUT_MILI);
2445 + goto out;
2446 + }
2447 +
2448 err = mlx5_cmd_init(dev);
2449 if (err) {
2450 dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
2451 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2452 index 690563099313..9e31a3390154 100644
2453 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2454 +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
2455 @@ -1178,7 +1178,8 @@ static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
2456
2457 static int
2458 mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
2459 - struct mlxsw_sp_nexthop_group *nh_grp)
2460 + struct mlxsw_sp_nexthop_group *nh_grp,
2461 + bool reallocate)
2462 {
2463 u32 adj_index = nh_grp->adj_index; /* base */
2464 struct mlxsw_sp_nexthop *nh;
2465 @@ -1193,7 +1194,7 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
2466 continue;
2467 }
2468
2469 - if (nh->update) {
2470 + if (nh->update || reallocate) {
2471 err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
2472 adj_index, nh);
2473 if (err)
2474 @@ -1254,7 +1255,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
2475 /* Nothing was added or removed, so no need to reallocate. Just
2476 * update MAC on existing adjacency indexes.
2477 */
2478 - err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
2479 + err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
2480 + false);
2481 if (err) {
2482 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
2483 goto set_trap;
2484 @@ -1282,7 +1284,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
2485 nh_grp->adj_index_valid = 1;
2486 nh_grp->adj_index = adj_index;
2487 nh_grp->ecmp_size = ecmp_size;
2488 - err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
2489 + err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
2490 if (err) {
2491 dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
2492 goto set_trap;
2493 diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
2494 index 510ff62584d6..11623aad0e8e 100644
2495 --- a/drivers/net/ethernet/renesas/ravb_main.c
2496 +++ b/drivers/net/ethernet/renesas/ravb_main.c
2497 @@ -229,18 +229,6 @@ static void ravb_ring_free(struct net_device *ndev, int q)
2498 int ring_size;
2499 int i;
2500
2501 - /* Free RX skb ringbuffer */
2502 - if (priv->rx_skb[q]) {
2503 - for (i = 0; i < priv->num_rx_ring[q]; i++)
2504 - dev_kfree_skb(priv->rx_skb[q][i]);
2505 - }
2506 - kfree(priv->rx_skb[q]);
2507 - priv->rx_skb[q] = NULL;
2508 -
2509 - /* Free aligned TX buffers */
2510 - kfree(priv->tx_align[q]);
2511 - priv->tx_align[q] = NULL;
2512 -
2513 if (priv->rx_ring[q]) {
2514 for (i = 0; i < priv->num_rx_ring[q]; i++) {
2515 struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
2516 @@ -269,6 +257,18 @@ static void ravb_ring_free(struct net_device *ndev, int q)
2517 priv->tx_ring[q] = NULL;
2518 }
2519
2520 + /* Free RX skb ringbuffer */
2521 + if (priv->rx_skb[q]) {
2522 + for (i = 0; i < priv->num_rx_ring[q]; i++)
2523 + dev_kfree_skb(priv->rx_skb[q][i]);
2524 + }
2525 + kfree(priv->rx_skb[q]);
2526 + priv->rx_skb[q] = NULL;
2527 +
2528 + /* Free aligned TX buffers */
2529 + kfree(priv->tx_align[q]);
2530 + priv->tx_align[q] = NULL;
2531 +
2532 /* Free TX skb ringbuffer.
2533 * SKBs are freed by ravb_tx_free() call above.
2534 */
2535 diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
2536 index 1a7092602aec..1bfb21465ace 100644
2537 --- a/drivers/net/ethernet/sfc/falcon.c
2538 +++ b/drivers/net/ethernet/sfc/falcon.c
2539 @@ -2801,6 +2801,11 @@ const struct efx_nic_type falcon_a1_nic_type = {
2540 .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
2541 .offload_features = NETIF_F_IP_CSUM,
2542 .mcdi_max_ver = -1,
2543 +#ifdef CONFIG_SFC_SRIOV
2544 + .vswitching_probe = efx_port_dummy_op_int,
2545 + .vswitching_restore = efx_port_dummy_op_int,
2546 + .vswitching_remove = efx_port_dummy_op_void,
2547 +#endif
2548 };
2549
2550 const struct efx_nic_type falcon_b0_nic_type = {
2551 @@ -2902,4 +2907,9 @@ const struct efx_nic_type falcon_b0_nic_type = {
2552 .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
2553 .mcdi_max_ver = -1,
2554 .max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
2555 +#ifdef CONFIG_SFC_SRIOV
2556 + .vswitching_probe = efx_port_dummy_op_int,
2557 + .vswitching_restore = efx_port_dummy_op_int,
2558 + .vswitching_remove = efx_port_dummy_op_void,
2559 +#endif
2560 };
2561 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
2562 index ac3d39c69509..890e4b083f4f 100644
2563 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
2564 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
2565 @@ -346,6 +346,7 @@ void stmmac_remove_config_dt(struct platform_device *pdev,
2566 if (of_phy_is_fixed_link(np))
2567 of_phy_deregister_fixed_link(np);
2568 of_node_put(plat->phy_node);
2569 + of_node_put(plat->mdio_node);
2570 }
2571 #else
2572 struct plat_stmmacenet_data *
2573 diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
2574 index 800b39f06279..a10d0e7fc5f7 100644
2575 --- a/drivers/net/phy/dp83848.c
2576 +++ b/drivers/net/phy/dp83848.c
2577 @@ -17,6 +17,7 @@
2578 #include <linux/phy.h>
2579
2580 #define TI_DP83848C_PHY_ID 0x20005ca0
2581 +#define TI_DP83620_PHY_ID 0x20005ce0
2582 #define NS_DP83848C_PHY_ID 0x20005c90
2583 #define TLK10X_PHY_ID 0x2000a210
2584 #define TI_DP83822_PHY_ID 0x2000a240
2585 @@ -77,6 +78,7 @@ static int dp83848_config_intr(struct phy_device *phydev)
2586 static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
2587 { TI_DP83848C_PHY_ID, 0xfffffff0 },
2588 { NS_DP83848C_PHY_ID, 0xfffffff0 },
2589 + { TI_DP83620_PHY_ID, 0xfffffff0 },
2590 { TLK10X_PHY_ID, 0xfffffff0 },
2591 { TI_DP83822_PHY_ID, 0xfffffff0 },
2592 { }
2593 @@ -106,6 +108,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
2594 static struct phy_driver dp83848_driver[] = {
2595 DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
2596 DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
2597 + DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"),
2598 DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
2599 DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"),
2600 };
2601 diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
2602 index 91177a4a32ad..4cad95552cf1 100644
2603 --- a/drivers/net/phy/dp83867.c
2604 +++ b/drivers/net/phy/dp83867.c
2605 @@ -113,12 +113,16 @@ static int dp83867_of_init(struct phy_device *phydev)
2606
2607 ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
2608 &dp83867->rx_id_delay);
2609 - if (ret)
2610 + if (ret &&
2611 + (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
2612 + phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID))
2613 return ret;
2614
2615 ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
2616 &dp83867->tx_id_delay);
2617 - if (ret)
2618 + if (ret &&
2619 + (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
2620 + phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID))
2621 return ret;
2622
2623 return of_property_read_u32(of_node, "ti,fifo-depth",
2624 diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
2625 index 2f70f80de27f..c60c147708c4 100644
2626 --- a/drivers/net/phy/marvell.c
2627 +++ b/drivers/net/phy/marvell.c
2628 @@ -1200,7 +1200,8 @@ static int marvell_read_status(struct phy_device *phydev)
2629 int err;
2630
2631 /* Check the fiber mode first */
2632 - if (phydev->supported & SUPPORTED_FIBRE) {
2633 + if (phydev->supported & SUPPORTED_FIBRE &&
2634 + phydev->interface != PHY_INTERFACE_MODE_SGMII) {
2635 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
2636 if (err < 0)
2637 goto error;
2638 diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
2639 index a9be26f1f677..edd30ebbf275 100644
2640 --- a/drivers/net/phy/phy.c
2641 +++ b/drivers/net/phy/phy.c
2642 @@ -1384,6 +1384,9 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
2643 {
2644 int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
2645
2646 + /* Mask prohibited EEE modes */
2647 + val &= ~phydev->eee_broken_modes;
2648 +
2649 phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, val);
2650
2651 return 0;
2652 diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
2653 index 14d57d0d1c04..32b555a72e13 100644
2654 --- a/drivers/net/phy/phy_device.c
2655 +++ b/drivers/net/phy/phy_device.c
2656 @@ -1146,6 +1146,43 @@ static int genphy_config_advert(struct phy_device *phydev)
2657 }
2658
2659 /**
2660 + * genphy_config_eee_advert - disable unwanted eee mode advertisement
2661 + * @phydev: target phy_device struct
2662 + *
2663 + * Description: Writes MDIO_AN_EEE_ADV after disabling unsupported energy
2664 + * efficent ethernet modes. Returns 0 if the PHY's advertisement hasn't
2665 + * changed, and 1 if it has changed.
2666 + */
2667 +static int genphy_config_eee_advert(struct phy_device *phydev)
2668 +{
2669 + int broken = phydev->eee_broken_modes;
2670 + int old_adv, adv;
2671 +
2672 + /* Nothing to disable */
2673 + if (!broken)
2674 + return 0;
2675 +
2676 + /* If the following call fails, we assume that EEE is not
2677 + * supported by the phy. If we read 0, EEE is not advertised
2678 + * In both case, we don't need to continue
2679 + */
2680 + adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN);
2681 + if (adv <= 0)
2682 + return 0;
2683 +
2684 + old_adv = adv;
2685 + adv &= ~broken;
2686 +
2687 + /* Advertising remains unchanged with the broken mask */
2688 + if (old_adv == adv)
2689 + return 0;
2690 +
2691 + phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN, adv);
2692 +
2693 + return 1;
2694 +}
2695 +
2696 +/**
2697 * genphy_setup_forced - configures/forces speed/duplex from @phydev
2698 * @phydev: target phy_device struct
2699 *
2700 @@ -1203,15 +1240,20 @@ EXPORT_SYMBOL(genphy_restart_aneg);
2701 */
2702 int genphy_config_aneg(struct phy_device *phydev)
2703 {
2704 - int result;
2705 + int err, changed;
2706 +
2707 + changed = genphy_config_eee_advert(phydev);
2708
2709 if (AUTONEG_ENABLE != phydev->autoneg)
2710 return genphy_setup_forced(phydev);
2711
2712 - result = genphy_config_advert(phydev);
2713 - if (result < 0) /* error */
2714 - return result;
2715 - if (result == 0) {
2716 + err = genphy_config_advert(phydev);
2717 + if (err < 0) /* error */
2718 + return err;
2719 +
2720 + changed |= err;
2721 +
2722 + if (changed == 0) {
2723 /* Advertisement hasn't changed, but maybe aneg was never on to
2724 * begin with? Or maybe phy was isolated?
2725 */
2726 @@ -1221,16 +1263,16 @@ int genphy_config_aneg(struct phy_device *phydev)
2727 return ctl;
2728
2729 if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
2730 - result = 1; /* do restart aneg */
2731 + changed = 1; /* do restart aneg */
2732 }
2733
2734 /* Only restart aneg if we are advertising something different
2735 * than we were before.
2736 */
2737 - if (result > 0)
2738 - result = genphy_restart_aneg(phydev);
2739 + if (changed > 0)
2740 + return genphy_restart_aneg(phydev);
2741
2742 - return result;
2743 + return 0;
2744 }
2745 EXPORT_SYMBOL(genphy_config_aneg);
2746
2747 @@ -1588,6 +1630,33 @@ static void of_set_phy_supported(struct phy_device *phydev)
2748 __set_phy_supported(phydev, max_speed);
2749 }
2750
2751 +static void of_set_phy_eee_broken(struct phy_device *phydev)
2752 +{
2753 + struct device_node *node = phydev->mdio.dev.of_node;
2754 + u32 broken = 0;
2755 +
2756 + if (!IS_ENABLED(CONFIG_OF_MDIO))
2757 + return;
2758 +
2759 + if (!node)
2760 + return;
2761 +
2762 + if (of_property_read_bool(node, "eee-broken-100tx"))
2763 + broken |= MDIO_EEE_100TX;
2764 + if (of_property_read_bool(node, "eee-broken-1000t"))
2765 + broken |= MDIO_EEE_1000T;
2766 + if (of_property_read_bool(node, "eee-broken-10gt"))
2767 + broken |= MDIO_EEE_10GT;
2768 + if (of_property_read_bool(node, "eee-broken-1000kx"))
2769 + broken |= MDIO_EEE_1000KX;
2770 + if (of_property_read_bool(node, "eee-broken-10gkx4"))
2771 + broken |= MDIO_EEE_10GKX4;
2772 + if (of_property_read_bool(node, "eee-broken-10gkr"))
2773 + broken |= MDIO_EEE_10GKR;
2774 +
2775 + phydev->eee_broken_modes = broken;
2776 +}
2777 +
2778 /**
2779 * phy_probe - probe and init a PHY device
2780 * @dev: device to probe and init
2781 @@ -1625,6 +1694,11 @@ static int phy_probe(struct device *dev)
2782 of_set_phy_supported(phydev);
2783 phydev->advertising = phydev->supported;
2784
2785 + /* Get the EEE modes we want to prohibit. We will ask
2786 + * the PHY stop advertising these mode later on
2787 + */
2788 + of_set_phy_eee_broken(phydev);
2789 +
2790 /* Set the state to READY by default */
2791 phydev->state = PHY_READY;
2792
2793 diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
2794 index 7ca99899972e..1568aedddfc9 100644
2795 --- a/drivers/net/virtio_net.c
2796 +++ b/drivers/net/virtio_net.c
2797 @@ -47,8 +47,16 @@ module_param(gso, bool, 0444);
2798 */
2799 DECLARE_EWMA(pkt_len, 1, 64)
2800
2801 +/* With mergeable buffers we align buffer address and use the low bits to
2802 + * encode its true size. Buffer size is up to 1 page so we need to align to
2803 + * square root of page size to ensure we reserve enough bits to encode the true
2804 + * size.
2805 + */
2806 +#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
2807 +
2808 /* Minimum alignment for mergeable packet buffers. */
2809 -#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
2810 +#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
2811 + 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
2812
2813 #define VIRTNET_DRIVER_VERSION "1.0.0"
2814
2815 diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
2816 index ee02605a0f89..642df9391193 100644
2817 --- a/drivers/net/vrf.c
2818 +++ b/drivers/net/vrf.c
2819 @@ -36,12 +36,14 @@
2820 #include <net/addrconf.h>
2821 #include <net/l3mdev.h>
2822 #include <net/fib_rules.h>
2823 +#include <net/netns/generic.h>
2824
2825 #define DRV_NAME "vrf"
2826 #define DRV_VERSION "1.0"
2827
2828 #define FIB_RULE_PREF 1000 /* default preference for FIB rules */
2829 -static bool add_fib_rules = true;
2830 +
2831 +static unsigned int vrf_net_id;
2832
2833 struct net_vrf {
2834 struct rtable __rcu *rth;
2835 @@ -1237,6 +1239,8 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
2836 struct nlattr *tb[], struct nlattr *data[])
2837 {
2838 struct net_vrf *vrf = netdev_priv(dev);
2839 + bool *add_fib_rules;
2840 + struct net *net;
2841 int err;
2842
2843 if (!data || !data[IFLA_VRF_TABLE])
2844 @@ -1252,13 +1256,15 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
2845 if (err)
2846 goto out;
2847
2848 - if (add_fib_rules) {
2849 + net = dev_net(dev);
2850 + add_fib_rules = net_generic(net, vrf_net_id);
2851 + if (*add_fib_rules) {
2852 err = vrf_add_fib_rules(dev);
2853 if (err) {
2854 unregister_netdevice(dev);
2855 goto out;
2856 }
2857 - add_fib_rules = false;
2858 + *add_fib_rules = false;
2859 }
2860
2861 out:
2862 @@ -1341,16 +1347,38 @@ static struct notifier_block vrf_notifier_block __read_mostly = {
2863 .notifier_call = vrf_device_event,
2864 };
2865
2866 +/* Initialize per network namespace state */
2867 +static int __net_init vrf_netns_init(struct net *net)
2868 +{
2869 + bool *add_fib_rules = net_generic(net, vrf_net_id);
2870 +
2871 + *add_fib_rules = true;
2872 +
2873 + return 0;
2874 +}
2875 +
2876 +static struct pernet_operations vrf_net_ops __net_initdata = {
2877 + .init = vrf_netns_init,
2878 + .id = &vrf_net_id,
2879 + .size = sizeof(bool),
2880 +};
2881 +
2882 static int __init vrf_init_module(void)
2883 {
2884 int rc;
2885
2886 register_netdevice_notifier(&vrf_notifier_block);
2887
2888 - rc = rtnl_link_register(&vrf_link_ops);
2889 + rc = register_pernet_subsys(&vrf_net_ops);
2890 if (rc < 0)
2891 goto error;
2892
2893 + rc = rtnl_link_register(&vrf_link_ops);
2894 + if (rc < 0) {
2895 + unregister_pernet_subsys(&vrf_net_ops);
2896 + goto error;
2897 + }
2898 +
2899 return 0;
2900
2901 error:
2902 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
2903 index 55c4408892be..963e5339a4d7 100644
2904 --- a/drivers/net/vxlan.c
2905 +++ b/drivers/net/vxlan.c
2906 @@ -2285,7 +2285,7 @@ static void vxlan_cleanup(unsigned long arg)
2907 = container_of(p, struct vxlan_fdb, hlist);
2908 unsigned long timeout;
2909
2910 - if (f->state & NUD_PERMANENT)
2911 + if (f->state & (NUD_PERMANENT | NUD_NOARP))
2912 continue;
2913
2914 timeout = f->used + vxlan->cfg.age_interval * HZ;
2915 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
2916 index 78d9966a3957..0f5dde1f2248 100644
2917 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
2918 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
2919 @@ -5913,7 +5913,6 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
2920 u32 i, j;
2921 u32 total;
2922 u32 chaninfo;
2923 - u32 index;
2924
2925 pbuf = kzalloc(BRCMF_DCMD_MEDLEN, GFP_KERNEL);
2926
2927 @@ -5961,33 +5960,36 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
2928 ch.bw == BRCMU_CHAN_BW_80)
2929 continue;
2930
2931 - channel = band->channels;
2932 - index = band->n_channels;
2933 + channel = NULL;
2934 for (j = 0; j < band->n_channels; j++) {
2935 - if (channel[j].hw_value == ch.control_ch_num) {
2936 - index = j;
2937 + if (band->channels[j].hw_value == ch.control_ch_num) {
2938 + channel = &band->channels[j];
2939 break;
2940 }
2941 }
2942 - channel[index].center_freq =
2943 - ieee80211_channel_to_frequency(ch.control_ch_num,
2944 - band->band);
2945 - channel[index].hw_value = ch.control_ch_num;
2946 + if (!channel) {
2947 + /* It seems firmware supports some channel we never
2948 + * considered. Something new in IEEE standard?
2949 + */
2950 + brcmf_err("Ignoring unexpected firmware channel %d\n",
2951 + ch.control_ch_num);
2952 + continue;
2953 + }
2954
2955 /* assuming the chanspecs order is HT20,
2956 * HT40 upper, HT40 lower, and VHT80.
2957 */
2958 if (ch.bw == BRCMU_CHAN_BW_80) {
2959 - channel[index].flags &= ~IEEE80211_CHAN_NO_80MHZ;
2960 + channel->flags &= ~IEEE80211_CHAN_NO_80MHZ;
2961 } else if (ch.bw == BRCMU_CHAN_BW_40) {
2962 - brcmf_update_bw40_channel_flag(&channel[index], &ch);
2963 + brcmf_update_bw40_channel_flag(channel, &ch);
2964 } else {
2965 /* enable the channel and disable other bandwidths
2966 * for now as mentioned order assure they are enabled
2967 * for subsequent chanspecs.
2968 */
2969 - channel[index].flags = IEEE80211_CHAN_NO_HT40 |
2970 - IEEE80211_CHAN_NO_80MHZ;
2971 + channel->flags = IEEE80211_CHAN_NO_HT40 |
2972 + IEEE80211_CHAN_NO_80MHZ;
2973 ch.bw = BRCMU_CHAN_BW_20;
2974 cfg->d11inf.encchspec(&ch);
2975 chaninfo = ch.chspec;
2976 @@ -5995,11 +5997,11 @@ static int brcmf_construct_chaninfo(struct brcmf_cfg80211_info *cfg,
2977 &chaninfo);
2978 if (!err) {
2979 if (chaninfo & WL_CHAN_RADAR)
2980 - channel[index].flags |=
2981 + channel->flags |=
2982 (IEEE80211_CHAN_RADAR |
2983 IEEE80211_CHAN_NO_IR);
2984 if (chaninfo & WL_CHAN_PASSIVE)
2985 - channel[index].flags |=
2986 + channel->flags |=
2987 IEEE80211_CHAN_NO_IR;
2988 }
2989 }
2990 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
2991 index 63a051be832e..bec7d9c46087 100644
2992 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
2993 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c
2994 @@ -843,8 +843,10 @@ static void iwl_mvm_thermal_zone_unregister(struct iwl_mvm *mvm)
2995 return;
2996
2997 IWL_DEBUG_TEMP(mvm, "Thermal zone device unregister\n");
2998 - thermal_zone_device_unregister(mvm->tz_device.tzone);
2999 - mvm->tz_device.tzone = NULL;
3000 + if (mvm->tz_device.tzone) {
3001 + thermal_zone_device_unregister(mvm->tz_device.tzone);
3002 + mvm->tz_device.tzone = NULL;
3003 + }
3004 }
3005
3006 static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
3007 @@ -853,8 +855,10 @@ static void iwl_mvm_cooling_device_unregister(struct iwl_mvm *mvm)
3008 return;
3009
3010 IWL_DEBUG_TEMP(mvm, "Cooling device unregister\n");
3011 - thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
3012 - mvm->cooling_dev.cdev = NULL;
3013 + if (mvm->cooling_dev.cdev) {
3014 + thermal_cooling_device_unregister(mvm->cooling_dev.cdev);
3015 + mvm->cooling_dev.cdev = NULL;
3016 + }
3017 }
3018 #endif /* CONFIG_THERMAL */
3019
3020 diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
3021 index 74dc2bf71428..b009d7966b46 100644
3022 --- a/drivers/net/xen-netback/interface.c
3023 +++ b/drivers/net/xen-netback/interface.c
3024 @@ -221,18 +221,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
3025 {
3026 struct xenvif *vif = netdev_priv(dev);
3027 struct xenvif_queue *queue = NULL;
3028 - unsigned int num_queues = vif->num_queues;
3029 unsigned long rx_bytes = 0;
3030 unsigned long rx_packets = 0;
3031 unsigned long tx_bytes = 0;
3032 unsigned long tx_packets = 0;
3033 unsigned int index;
3034
3035 + spin_lock(&vif->lock);
3036 if (vif->queues == NULL)
3037 goto out;
3038
3039 /* Aggregate tx and rx stats from each queue */
3040 - for (index = 0; index < num_queues; ++index) {
3041 + for (index = 0; index < vif->num_queues; ++index) {
3042 queue = &vif->queues[index];
3043 rx_bytes += queue->stats.rx_bytes;
3044 rx_packets += queue->stats.rx_packets;
3045 @@ -241,6 +241,8 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
3046 }
3047
3048 out:
3049 + spin_unlock(&vif->lock);
3050 +
3051 vif->dev->stats.rx_bytes = rx_bytes;
3052 vif->dev->stats.rx_packets = rx_packets;
3053 vif->dev->stats.tx_bytes = tx_bytes;
3054 diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
3055 index 8674e188b697..b44f37fff890 100644
3056 --- a/drivers/net/xen-netback/xenbus.c
3057 +++ b/drivers/net/xen-netback/xenbus.c
3058 @@ -493,11 +493,22 @@ static int backend_create_xenvif(struct backend_info *be)
3059 static void backend_disconnect(struct backend_info *be)
3060 {
3061 if (be->vif) {
3062 + unsigned int queue_index;
3063 +
3064 xen_unregister_watchers(be->vif);
3065 #ifdef CONFIG_DEBUG_FS
3066 xenvif_debugfs_delif(be->vif);
3067 #endif /* CONFIG_DEBUG_FS */
3068 xenvif_disconnect_data(be->vif);
3069 + for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
3070 + xenvif_deinit_queue(&be->vif->queues[queue_index]);
3071 +
3072 + spin_lock(&be->vif->lock);
3073 + vfree(be->vif->queues);
3074 + be->vif->num_queues = 0;
3075 + be->vif->queues = NULL;
3076 + spin_unlock(&be->vif->lock);
3077 +
3078 xenvif_disconnect_ctrl(be->vif);
3079 }
3080 }
3081 @@ -1040,6 +1051,8 @@ static void connect(struct backend_info *be)
3082 err:
3083 if (be->vif->num_queues > 0)
3084 xenvif_disconnect_data(be->vif); /* Clean up existing queues */
3085 + for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
3086 + xenvif_deinit_queue(&be->vif->queues[queue_index]);
3087 vfree(be->vif->queues);
3088 be->vif->queues = NULL;
3089 be->vif->num_queues = 0;
3090 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
3091 index 0cdcb2169083..599cf5090030 100644
3092 --- a/drivers/net/xen-netfront.c
3093 +++ b/drivers/net/xen-netfront.c
3094 @@ -321,7 +321,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
3095 queue->rx.req_prod_pvt = req_prod;
3096
3097 /* Not enough requests? Try again later. */
3098 - if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
3099 + if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) {
3100 mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
3101 return;
3102 }
3103 diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
3104 index b4808590870c..3456f532077c 100644
3105 --- a/drivers/nvdimm/pmem.c
3106 +++ b/drivers/nvdimm/pmem.c
3107 @@ -87,7 +87,9 @@ static int read_pmem(struct page *page, unsigned int off,
3108
3109 rc = memcpy_from_pmem(mem + off, pmem_addr, len);
3110 kunmap_atomic(mem);
3111 - return rc;
3112 + if (rc)
3113 + return -EIO;
3114 + return 0;
3115 }
3116
3117 static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
3118 diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
3119 index 01443762e570..b40a074822cf 100644
3120 --- a/drivers/pinctrl/intel/pinctrl-intel.c
3121 +++ b/drivers/pinctrl/intel/pinctrl-intel.c
3122 @@ -353,6 +353,21 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
3123 return 0;
3124 }
3125
3126 +static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
3127 +{
3128 + u32 value;
3129 +
3130 + value = readl(padcfg0);
3131 + if (input) {
3132 + value &= ~PADCFG0_GPIORXDIS;
3133 + value |= PADCFG0_GPIOTXDIS;
3134 + } else {
3135 + value &= ~PADCFG0_GPIOTXDIS;
3136 + value |= PADCFG0_GPIORXDIS;
3137 + }
3138 + writel(value, padcfg0);
3139 +}
3140 +
3141 static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
3142 struct pinctrl_gpio_range *range,
3143 unsigned pin)
3144 @@ -375,11 +390,11 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
3145 /* Disable SCI/SMI/NMI generation */
3146 value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
3147 value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
3148 - /* Disable TX buffer and enable RX (this will be input) */
3149 - value &= ~PADCFG0_GPIORXDIS;
3150 - value |= PADCFG0_GPIOTXDIS;
3151 writel(value, padcfg0);
3152
3153 + /* Disable TX buffer and enable RX (this will be input) */
3154 + __intel_gpio_set_direction(padcfg0, true);
3155 +
3156 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
3157
3158 return 0;
3159 @@ -392,18 +407,11 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
3160 struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
3161 void __iomem *padcfg0;
3162 unsigned long flags;
3163 - u32 value;
3164
3165 raw_spin_lock_irqsave(&pctrl->lock, flags);
3166
3167 padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
3168 -
3169 - value = readl(padcfg0);
3170 - if (input)
3171 - value |= PADCFG0_GPIOTXDIS;
3172 - else
3173 - value &= ~PADCFG0_GPIOTXDIS;
3174 - writel(value, padcfg0);
3175 + __intel_gpio_set_direction(padcfg0, input);
3176
3177 raw_spin_unlock_irqrestore(&pctrl->lock, flags);
3178
3179 diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
3180 index a7614fc542b5..2f1615e00cb4 100644
3181 --- a/drivers/platform/x86/ideapad-laptop.c
3182 +++ b/drivers/platform/x86/ideapad-laptop.c
3183 @@ -813,6 +813,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
3184 case 8:
3185 case 7:
3186 case 6:
3187 + case 1:
3188 ideapad_input_report(priv, vpc_bit);
3189 break;
3190 case 5:
3191 diff --git a/drivers/regulator/tps65086-regulator.c b/drivers/regulator/tps65086-regulator.c
3192 index caf174ffa316..6dbf3cf3951e 100644
3193 --- a/drivers/regulator/tps65086-regulator.c
3194 +++ b/drivers/regulator/tps65086-regulator.c
3195 @@ -156,19 +156,19 @@ static struct tps65086_regulator regulators[] = {
3196 VDOA23_VID_MASK, TPS65086_LDOA3CTRL, BIT(0),
3197 tps65086_ldoa23_ranges, 0, 0),
3198 TPS65086_SWITCH("SWA1", "swa1", SWA1, TPS65086_SWVTT_EN, BIT(5)),
3199 - TPS65086_SWITCH("SWB1", "swa2", SWB1, TPS65086_SWVTT_EN, BIT(6)),
3200 - TPS65086_SWITCH("SWB2", "swa3", SWB2, TPS65086_SWVTT_EN, BIT(7)),
3201 + TPS65086_SWITCH("SWB1", "swb1", SWB1, TPS65086_SWVTT_EN, BIT(6)),
3202 + TPS65086_SWITCH("SWB2", "swb2", SWB2, TPS65086_SWVTT_EN, BIT(7)),
3203 TPS65086_SWITCH("VTT", "vtt", VTT, TPS65086_SWVTT_EN, BIT(4)),
3204 };
3205
3206 -static int tps65086_of_parse_cb(struct device_node *dev,
3207 +static int tps65086_of_parse_cb(struct device_node *node,
3208 const struct regulator_desc *desc,
3209 struct regulator_config *config)
3210 {
3211 int ret;
3212
3213 /* Check for 25mV step mode */
3214 - if (of_property_read_bool(config->of_node, "ti,regulator-step-size-25mv")) {
3215 + if (of_property_read_bool(node, "ti,regulator-step-size-25mv")) {
3216 switch (desc->id) {
3217 case BUCK1:
3218 case BUCK2:
3219 @@ -192,7 +192,7 @@ static int tps65086_of_parse_cb(struct device_node *dev,
3220 }
3221
3222 /* Check for decay mode */
3223 - if (desc->id <= BUCK6 && of_property_read_bool(config->of_node, "ti,regulator-decay")) {
3224 + if (desc->id <= BUCK6 && of_property_read_bool(node, "ti,regulator-decay")) {
3225 ret = regmap_write_bits(config->regmap,
3226 regulators[desc->id].decay_reg,
3227 regulators[desc->id].decay_mask,
3228 diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
3229 index b7d54bfb1df9..7b696d108112 100644
3230 --- a/drivers/scsi/lpfc/lpfc_els.c
3231 +++ b/drivers/scsi/lpfc/lpfc_els.c
3232 @@ -3590,12 +3590,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
3233 } else {
3234 buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
3235 lpfc_els_free_data(phba, buf_ptr1);
3236 + elsiocb->context2 = NULL;
3237 }
3238 }
3239
3240 if (elsiocb->context3) {
3241 buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
3242 lpfc_els_free_bpl(phba, buf_ptr);
3243 + elsiocb->context3 = NULL;
3244 }
3245 lpfc_sli_release_iocbq(phba, elsiocb);
3246 return 0;
3247 diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
3248 index 49b4c798de18..2d4f4b58dcfa 100644
3249 --- a/drivers/scsi/lpfc/lpfc_sli.c
3250 +++ b/drivers/scsi/lpfc/lpfc_sli.c
3251 @@ -5951,18 +5951,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
3252
3253 free_vfi_bmask:
3254 kfree(phba->sli4_hba.vfi_bmask);
3255 + phba->sli4_hba.vfi_bmask = NULL;
3256 free_xri_ids:
3257 kfree(phba->sli4_hba.xri_ids);
3258 + phba->sli4_hba.xri_ids = NULL;
3259 free_xri_bmask:
3260 kfree(phba->sli4_hba.xri_bmask);
3261 + phba->sli4_hba.xri_bmask = NULL;
3262 free_vpi_ids:
3263 kfree(phba->vpi_ids);
3264 + phba->vpi_ids = NULL;
3265 free_vpi_bmask:
3266 kfree(phba->vpi_bmask);
3267 + phba->vpi_bmask = NULL;
3268 free_rpi_ids:
3269 kfree(phba->sli4_hba.rpi_ids);
3270 + phba->sli4_hba.rpi_ids = NULL;
3271 free_rpi_bmask:
3272 kfree(phba->sli4_hba.rpi_bmask);
3273 + phba->sli4_hba.rpi_bmask = NULL;
3274 err_exit:
3275 return rc;
3276 }
3277 diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
3278 index 8e63a7b90277..91ec0689c714 100644
3279 --- a/drivers/scsi/qla2xxx/qla_def.h
3280 +++ b/drivers/scsi/qla2xxx/qla_def.h
3281 @@ -1555,7 +1555,8 @@ typedef struct {
3282 struct atio {
3283 uint8_t entry_type; /* Entry type. */
3284 uint8_t entry_count; /* Entry count. */
3285 - uint8_t data[58];
3286 + __le16 attr_n_length;
3287 + uint8_t data[56];
3288 uint32_t signature;
3289 #define ATIO_PROCESSED 0xDEADDEAD /* Signature */
3290 };
3291 diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
3292 index 068c4e47fac9..bddaabb288d4 100644
3293 --- a/drivers/scsi/qla2xxx/qla_isr.c
3294 +++ b/drivers/scsi/qla2xxx/qla_isr.c
3295 @@ -2487,6 +2487,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
3296 if (pkt->entry_status & RF_BUSY)
3297 res = DID_BUS_BUSY << 16;
3298
3299 + if (pkt->entry_type == NOTIFY_ACK_TYPE &&
3300 + pkt->handle == QLA_TGT_SKIP_HANDLE)
3301 + return;
3302 +
3303 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
3304 if (sp) {
3305 sp->done(ha, sp, res);
3306 diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
3307 index feab7ea8e823..91f5f55a8a9b 100644
3308 --- a/drivers/scsi/qla2xxx/qla_target.c
3309 +++ b/drivers/scsi/qla2xxx/qla_target.c
3310 @@ -3067,7 +3067,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
3311
3312 pkt->entry_type = NOTIFY_ACK_TYPE;
3313 pkt->entry_count = 1;
3314 - pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
3315 + pkt->handle = QLA_TGT_SKIP_HANDLE;
3316
3317 nack = (struct nack_to_isp *)pkt;
3318 nack->ox_id = ntfy->ox_id;
3319 @@ -6463,12 +6463,29 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
3320 if (!vha->flags.online)
3321 return;
3322
3323 - while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
3324 + while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
3325 + fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
3326 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
3327 cnt = pkt->u.raw.entry_count;
3328
3329 - qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt,
3330 - ha_locked);
3331 + if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
3332 + /*
3333 + * This packet is corrupted. The header + payload
3334 + * can not be trusted. There is no point in passing
3335 + * it further up.
3336 + */
3337 + ql_log(ql_log_warn, vha, 0xffff,
3338 + "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
3339 + pkt->u.isp24.fcp_hdr.s_id,
3340 + be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
3341 + le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
3342 +
3343 + adjust_corrupted_atio(pkt);
3344 + qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0);
3345 + } else {
3346 + qlt_24xx_atio_pkt_all_vps(vha,
3347 + (struct atio_from_isp *)pkt, ha_locked);
3348 + }
3349
3350 for (i = 0; i < cnt; i++) {
3351 ha->tgt.atio_ring_index++;
3352 diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
3353 index f26c5f60eedd..0824a8164a24 100644
3354 --- a/drivers/scsi/qla2xxx/qla_target.h
3355 +++ b/drivers/scsi/qla2xxx/qla_target.h
3356 @@ -427,13 +427,33 @@ struct atio_from_isp {
3357 struct {
3358 uint8_t entry_type; /* Entry type. */
3359 uint8_t entry_count; /* Entry count. */
3360 - uint8_t data[58];
3361 + __le16 attr_n_length;
3362 +#define FCP_CMD_LENGTH_MASK 0x0fff
3363 +#define FCP_CMD_LENGTH_MIN 0x38
3364 + uint8_t data[56];
3365 uint32_t signature;
3366 #define ATIO_PROCESSED 0xDEADDEAD /* Signature */
3367 } raw;
3368 } u;
3369 } __packed;
3370
3371 +static inline int fcpcmd_is_corrupted(struct atio *atio)
3372 +{
3373 + if (atio->entry_type == ATIO_TYPE7 &&
3374 + (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
3375 + FCP_CMD_LENGTH_MIN))
3376 + return 1;
3377 + else
3378 + return 0;
3379 +}
3380 +
3381 +/* adjust corrupted atio so we won't trip over the same entry again. */
3382 +static inline void adjust_corrupted_atio(struct atio_from_isp *atio)
3383 +{
3384 + atio->u.raw.attr_n_length = cpu_to_le16(FCP_CMD_LENGTH_MIN);
3385 + atio->u.isp24.fcp_cmnd.add_cdb_len = 0;
3386 +}
3387 +
3388 #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
3389
3390 /*
3391 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
3392 index 931af0793951..13ac7e57a35d 100644
3393 --- a/drivers/scsi/sd.c
3394 +++ b/drivers/scsi/sd.c
3395 @@ -2572,7 +2572,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
3396 if (sdp->broken_fua) {
3397 sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
3398 sdkp->DPOFUA = 0;
3399 - } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
3400 + } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
3401 + !sdkp->device->use_16_for_rw) {
3402 sd_first_printk(KERN_NOTICE, sdkp,
3403 "Uses READ/WRITE(6), disabling FUA\n");
3404 sdkp->DPOFUA = 0;
3405 diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
3406 index ec91bd07f00a..c680d7641311 100644
3407 --- a/drivers/scsi/virtio_scsi.c
3408 +++ b/drivers/scsi/virtio_scsi.c
3409 @@ -534,7 +534,9 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
3410 {
3411 struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
3412 struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
3413 + unsigned long flags;
3414 int req_size;
3415 + int ret;
3416
3417 BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
3418
3419 @@ -562,8 +564,15 @@ static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
3420 req_size = sizeof(cmd->req.cmd);
3421 }
3422
3423 - if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
3424 + ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd));
3425 + if (ret == -EIO) {
3426 + cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
3427 + spin_lock_irqsave(&req_vq->vq_lock, flags);
3428 + virtscsi_complete_cmd(vscsi, cmd);
3429 + spin_unlock_irqrestore(&req_vq->vq_lock, flags);
3430 + } else if (ret != 0) {
3431 return SCSI_MLQUEUE_HOST_BUSY;
3432 + }
3433 return 0;
3434 }
3435
3436 diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
3437 index d36c11b73a35..02fb96797ac8 100644
3438 --- a/drivers/spi/spi-davinci.c
3439 +++ b/drivers/spi/spi-davinci.c
3440 @@ -646,7 +646,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
3441 buf = t->rx_buf;
3442 t->rx_dma = dma_map_single(&spi->dev, buf,
3443 t->len, DMA_FROM_DEVICE);
3444 - if (!t->rx_dma) {
3445 + if (dma_mapping_error(&spi->dev, !t->rx_dma)) {
3446 ret = -EFAULT;
3447 goto err_rx_map;
3448 }
3449 @@ -660,7 +660,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
3450 buf = (void *)t->tx_buf;
3451 t->tx_dma = dma_map_single(&spi->dev, buf,
3452 t->len, DMA_TO_DEVICE);
3453 - if (!t->tx_dma) {
3454 + if (dma_mapping_error(&spi->dev, t->tx_dma)) {
3455 ret = -EFAULT;
3456 goto err_tx_map;
3457 }
3458 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
3459 index 24d4492d0168..6db80635ace8 100644
3460 --- a/drivers/spi/spi.c
3461 +++ b/drivers/spi/spi.c
3462 @@ -621,8 +621,10 @@ void spi_unregister_device(struct spi_device *spi)
3463 if (!spi)
3464 return;
3465
3466 - if (spi->dev.of_node)
3467 + if (spi->dev.of_node) {
3468 of_node_clear_flag(spi->dev.of_node, OF_POPULATED);
3469 + of_node_put(spi->dev.of_node);
3470 + }
3471 if (ACPI_COMPANION(&spi->dev))
3472 acpi_device_clear_enumerated(ACPI_COMPANION(&spi->dev));
3473 device_unregister(&spi->dev);
3474 @@ -797,12 +799,12 @@ static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
3475 if (master->dma_tx)
3476 tx_dev = master->dma_tx->device->dev;
3477 else
3478 - tx_dev = &master->dev;
3479 + tx_dev = master->dev.parent;
3480
3481 if (master->dma_rx)
3482 rx_dev = master->dma_rx->device->dev;
3483 else
3484 - rx_dev = &master->dev;
3485 + rx_dev = master->dev.parent;
3486
3487 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3488 if (!master->can_dma(master, msg->spi, xfer))
3489 @@ -844,12 +846,12 @@ static int __spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
3490 if (master->dma_tx)
3491 tx_dev = master->dma_tx->device->dev;
3492 else
3493 - tx_dev = &master->dev;
3494 + tx_dev = master->dev.parent;
3495
3496 if (master->dma_rx)
3497 rx_dev = master->dma_rx->device->dev;
3498 else
3499 - rx_dev = &master->dev;
3500 + rx_dev = master->dev.parent;
3501
3502 list_for_each_entry(xfer, &msg->transfers, transfer_list) {
3503 if (!master->can_dma(master, msg->spi, xfer))
3504 @@ -1589,11 +1591,13 @@ of_register_spi_device(struct spi_master *master, struct device_node *nc)
3505 if (rc) {
3506 dev_err(&master->dev, "spi_device register error %s\n",
3507 nc->full_name);
3508 - goto err_out;
3509 + goto err_of_node_put;
3510 }
3511
3512 return spi;
3513
3514 +err_of_node_put:
3515 + of_node_put(nc);
3516 err_out:
3517 spi_dev_put(spi);
3518 return ERR_PTR(rc);
3519 diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
3520 index 24fbebc9b409..cfdd5c3da236 100644
3521 --- a/drivers/usb/dwc2/gadget.c
3522 +++ b/drivers/usb/dwc2/gadget.c
3523 @@ -2532,7 +2532,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
3524 /* keep other bits untouched (so e.g. forced modes are not lost) */
3525 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
3526 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
3527 - GUSBCFG_HNPCAP);
3528 + GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
3529
3530 /* set the PLL on, remove the HNP/SRP and set the PHY */
3531 val = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
3532 @@ -3403,7 +3403,7 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
3533 /* keep other bits untouched (so e.g. forced modes are not lost) */
3534 usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
3535 usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
3536 - GUSBCFG_HNPCAP);
3537 + GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
3538
3539 /* set the PLL on, remove the HNP/SRP and set the PHY */
3540 trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
3541 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
3542 index 04ffd7640c33..f9c99803a43d 100644
3543 --- a/drivers/usb/gadget/function/f_fs.c
3544 +++ b/drivers/usb/gadget/function/f_fs.c
3545 @@ -3688,6 +3688,7 @@ static void ffs_closed(struct ffs_data *ffs)
3546 {
3547 struct ffs_dev *ffs_obj;
3548 struct f_fs_opts *opts;
3549 + struct config_item *ci;
3550
3551 ENTER();
3552 ffs_dev_lock();
3553 @@ -3711,8 +3712,11 @@ static void ffs_closed(struct ffs_data *ffs)
3554 || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount))
3555 goto done;
3556
3557 - unregister_gadget_item(ffs_obj->opts->
3558 - func_inst.group.cg_item.ci_parent->ci_parent);
3559 + ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
3560 + ffs_dev_unlock();
3561 +
3562 + unregister_gadget_item(ci);
3563 + return;
3564 done:
3565 ffs_dev_unlock();
3566 }
3567 diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
3568 index 79ddcb05d126..85d3e648bdea 100644
3569 --- a/drivers/vfio/vfio_iommu_spapr_tce.c
3570 +++ b/drivers/vfio/vfio_iommu_spapr_tce.c
3571 @@ -1292,6 +1292,10 @@ static int tce_iommu_attach_group(void *iommu_data,
3572 /* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
3573 iommu_group_id(iommu_group), iommu_group); */
3574 table_group = iommu_group_get_iommudata(iommu_group);
3575 + if (!table_group) {
3576 + ret = -ENODEV;
3577 + goto unlock_exit;
3578 + }
3579
3580 if (tce_groups_attached(container) && (!table_group->ops ||
3581 !table_group->ops->take_ownership ||
3582 diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
3583 index e0c98423f2c9..11a72bc2c71b 100644
3584 --- a/drivers/watchdog/bcm_kona_wdt.c
3585 +++ b/drivers/watchdog/bcm_kona_wdt.c
3586 @@ -304,6 +304,8 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
3587 if (!wdt)
3588 return -ENOMEM;
3589
3590 + spin_lock_init(&wdt->lock);
3591 +
3592 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3593 wdt->base = devm_ioremap_resource(dev, res);
3594 if (IS_ERR(wdt->base))
3595 @@ -316,7 +318,6 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev)
3596 return ret;
3597 }
3598
3599 - spin_lock_init(&wdt->lock);
3600 platform_set_drvdata(pdev, wdt);
3601 watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt);
3602 bcm_kona_wdt_wdd.parent = &pdev->dev;
3603 diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
3604 index 8e7a3d646531..679f79f68182 100644
3605 --- a/drivers/xen/swiotlb-xen.c
3606 +++ b/drivers/xen/swiotlb-xen.c
3607 @@ -409,9 +409,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
3608 if (map == SWIOTLB_MAP_ERROR)
3609 return DMA_ERROR_CODE;
3610
3611 + dev_addr = xen_phys_to_bus(map);
3612 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
3613 dev_addr, map & ~PAGE_MASK, size, dir, attrs);
3614 - dev_addr = xen_phys_to_bus(map);
3615
3616 /*
3617 * Ensure that the address returned is DMA'ble
3618 @@ -567,13 +567,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
3619 sg_dma_len(sgl) = 0;
3620 return 0;
3621 }
3622 + dev_addr = xen_phys_to_bus(map);
3623 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
3624 dev_addr,
3625 map & ~PAGE_MASK,
3626 sg->length,
3627 dir,
3628 attrs);
3629 - sg->dma_address = xen_phys_to_bus(map);
3630 + sg->dma_address = dev_addr;
3631 } else {
3632 /* we are not interested in the dma_addr returned by
3633 * xen_dma_map_page, only in the potential cache flushes executed
3634 diff --git a/fs/aio.c b/fs/aio.c
3635 index 428484f2f841..0fcb49ad67d4 100644
3636 --- a/fs/aio.c
3637 +++ b/fs/aio.c
3638 @@ -1085,7 +1085,8 @@ static void aio_complete(struct kiocb *kiocb, long res, long res2)
3639 * Tell lockdep we inherited freeze protection from submission
3640 * thread.
3641 */
3642 - __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE);
3643 + if (S_ISREG(file_inode(file)->i_mode))
3644 + __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE);
3645 file_end_write(file);
3646 }
3647
3648 @@ -1492,7 +1493,8 @@ static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored,
3649 * by telling it the lock got released so that it doesn't
3650 * complain about held lock when we return to userspace.
3651 */
3652 - __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
3653 + if (S_ISREG(file_inode(file)->i_mode))
3654 + __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
3655 }
3656 kfree(iovec);
3657 return ret;
3658 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
3659 index 2472af2798c7..cfd724f98332 100644
3660 --- a/fs/binfmt_elf.c
3661 +++ b/fs/binfmt_elf.c
3662 @@ -2296,6 +2296,7 @@ static int elf_core_dump(struct coredump_params *cprm)
3663 goto end_coredump;
3664 }
3665 }
3666 + dump_truncate(cprm);
3667
3668 if (!elf_core_write_extra_data(cprm))
3669 goto end_coredump;
3670 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3671 index bddbae796941..a2a014b19f18 100644
3672 --- a/fs/btrfs/inode.c
3673 +++ b/fs/btrfs/inode.c
3674 @@ -4480,8 +4480,19 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
3675 if (found_type > min_type) {
3676 del_item = 1;
3677 } else {
3678 - if (item_end < new_size)
3679 + if (item_end < new_size) {
3680 + /*
3681 + * With NO_HOLES mode, for the following mapping
3682 + *
3683 + * [0-4k][hole][8k-12k]
3684 + *
3685 + * if truncating isize down to 6k, it ends up
3686 + * isize being 8k.
3687 + */
3688 + if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
3689 + last_size = new_size;
3690 break;
3691 + }
3692 if (found_key.offset >= new_size)
3693 del_item = 1;
3694 else
3695 @@ -7235,7 +7246,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
3696 struct extent_map *em = NULL;
3697 int ret;
3698
3699 - down_read(&BTRFS_I(inode)->dio_sem);
3700 if (type != BTRFS_ORDERED_NOCOW) {
3701 em = create_pinned_em(inode, start, len, orig_start,
3702 block_start, block_len, orig_block_len,
3703 @@ -7254,7 +7264,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
3704 em = ERR_PTR(ret);
3705 }
3706 out:
3707 - up_read(&BTRFS_I(inode)->dio_sem);
3708
3709 return em;
3710 }
3711 @@ -8707,6 +8716,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3712 dio_data.unsubmitted_oe_range_start = (u64)offset;
3713 dio_data.unsubmitted_oe_range_end = (u64)offset;
3714 current->journal_info = &dio_data;
3715 + down_read(&BTRFS_I(inode)->dio_sem);
3716 } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
3717 &BTRFS_I(inode)->runtime_flags)) {
3718 inode_dio_end(inode);
3719 @@ -8719,6 +8729,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
3720 iter, btrfs_get_blocks_direct, NULL,
3721 btrfs_submit_direct, flags);
3722 if (iov_iter_rw(iter) == WRITE) {
3723 + up_read(&BTRFS_I(inode)->dio_sem);
3724 current->journal_info = NULL;
3725 if (ret < 0 && ret != -EIOCBQUEUED) {
3726 if (dio_data.reserve)
3727 diff --git a/fs/coredump.c b/fs/coredump.c
3728 index eb9c92c9b20f..4407e27beca9 100644
3729 --- a/fs/coredump.c
3730 +++ b/fs/coredump.c
3731 @@ -833,3 +833,21 @@ int dump_align(struct coredump_params *cprm, int align)
3732 return mod ? dump_skip(cprm, align - mod) : 1;
3733 }
3734 EXPORT_SYMBOL(dump_align);
3735 +
3736 +/*
3737 + * Ensures that file size is big enough to contain the current file
3738 + * postion. This prevents gdb from complaining about a truncated file
3739 + * if the last "write" to the file was dump_skip.
3740 + */
3741 +void dump_truncate(struct coredump_params *cprm)
3742 +{
3743 + struct file *file = cprm->file;
3744 + loff_t offset;
3745 +
3746 + if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
3747 + offset = file->f_op->llseek(file, 0, SEEK_CUR);
3748 + if (i_size_read(file->f_mapping->host) < offset)
3749 + do_truncate(file->f_path.dentry, offset, 0, file);
3750 + }
3751 +}
3752 +EXPORT_SYMBOL(dump_truncate);
3753 diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
3754 index 484bebc20bca..0a2115084c3f 100644
3755 --- a/fs/nfs/callback.c
3756 +++ b/fs/nfs/callback.c
3757 @@ -279,7 +279,7 @@ static struct svc_serv *nfs_callback_create_svc(int minorversion)
3758 printk(KERN_WARNING "nfs_callback_create_svc: no kthread, %d users??\n",
3759 cb_info->users);
3760
3761 - serv = svc_create(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, sv_ops);
3762 + serv = svc_create_pooled(&nfs4_callback_program, NFS4_CALLBACK_BUFSIZE, sv_ops);
3763 if (!serv) {
3764 printk(KERN_ERR "nfs_callback_create_svc: create service failed\n");
3765 return ERR_PTR(-ENOMEM);
3766 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3767 index fc9b04941739..401ea6e4cab8 100644
3768 --- a/fs/nfs/nfs4proc.c
3769 +++ b/fs/nfs/nfs4proc.c
3770 @@ -2343,8 +2343,6 @@ static int nfs4_opendata_access(struct rpc_cred *cred,
3771 if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
3772 return 0;
3773
3774 - /* even though OPEN succeeded, access is denied. Close the file */
3775 - nfs4_close_state(state, fmode);
3776 return -EACCES;
3777 }
3778
3779 @@ -8431,6 +8429,7 @@ static void nfs4_layoutget_release(void *calldata)
3780 size_t max_pages = max_response_pages(server);
3781
3782 dprintk("--> %s\n", __func__);
3783 + nfs4_sequence_free_slot(&lgp->res.seq_res);
3784 nfs4_free_pages(lgp->args.layout.pages, max_pages);
3785 pnfs_put_layout_hdr(NFS_I(inode)->layout);
3786 put_nfs_open_context(lgp->args.ctx);
3787 @@ -8505,7 +8504,6 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout, gfp_t gfp_flags)
3788 /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
3789 if (status == 0 && lgp->res.layoutp->len)
3790 lseg = pnfs_layout_process(lgp);
3791 - nfs4_sequence_free_slot(&lgp->res.seq_res);
3792 rpc_put_task(task);
3793 dprintk("<-- %s status=%d\n", __func__, status);
3794 if (status)
3795 diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
3796 index 636abcbd4650..5e8709aa1e7e 100644
3797 --- a/fs/ocfs2/cluster/heartbeat.c
3798 +++ b/fs/ocfs2/cluster/heartbeat.c
3799 @@ -2242,13 +2242,13 @@ static void o2hb_heartbeat_group_drop_item(struct config_group *group,
3800 spin_unlock(&o2hb_live_lock);
3801 }
3802
3803 -static ssize_t o2hb_heartbeat_group_threshold_show(struct config_item *item,
3804 +static ssize_t o2hb_heartbeat_group_dead_threshold_show(struct config_item *item,
3805 char *page)
3806 {
3807 return sprintf(page, "%u\n", o2hb_dead_threshold);
3808 }
3809
3810 -static ssize_t o2hb_heartbeat_group_threshold_store(struct config_item *item,
3811 +static ssize_t o2hb_heartbeat_group_dead_threshold_store(struct config_item *item,
3812 const char *page, size_t count)
3813 {
3814 unsigned long tmp;
3815 @@ -2297,11 +2297,11 @@ static ssize_t o2hb_heartbeat_group_mode_store(struct config_item *item,
3816
3817 }
3818
3819 -CONFIGFS_ATTR(o2hb_heartbeat_group_, threshold);
3820 +CONFIGFS_ATTR(o2hb_heartbeat_group_, dead_threshold);
3821 CONFIGFS_ATTR(o2hb_heartbeat_group_, mode);
3822
3823 static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = {
3824 - &o2hb_heartbeat_group_attr_threshold,
3825 + &o2hb_heartbeat_group_attr_dead_threshold,
3826 &o2hb_heartbeat_group_attr_mode,
3827 NULL,
3828 };
3829 diff --git a/include/linux/bpf.h b/include/linux/bpf.h
3830 index c201017b5730..97498be2ca2e 100644
3831 --- a/include/linux/bpf.h
3832 +++ b/include/linux/bpf.h
3833 @@ -243,6 +243,8 @@ struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref);
3834 void bpf_map_put_with_uref(struct bpf_map *map);
3835 void bpf_map_put(struct bpf_map *map);
3836 int bpf_map_precharge_memlock(u32 pages);
3837 +void *bpf_map_area_alloc(size_t size);
3838 +void bpf_map_area_free(void *base);
3839
3840 extern int sysctl_unprivileged_bpf_disabled;
3841
3842 diff --git a/include/linux/coredump.h b/include/linux/coredump.h
3843 index d016a121a8c4..28ffa94aed6b 100644
3844 --- a/include/linux/coredump.h
3845 +++ b/include/linux/coredump.h
3846 @@ -14,6 +14,7 @@ struct coredump_params;
3847 extern int dump_skip(struct coredump_params *cprm, size_t nr);
3848 extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
3849 extern int dump_align(struct coredump_params *cprm, int align);
3850 +extern void dump_truncate(struct coredump_params *cprm);
3851 #ifdef CONFIG_COREDUMP
3852 extern void do_coredump(const siginfo_t *siginfo);
3853 #else
3854 diff --git a/include/linux/phy.h b/include/linux/phy.h
3855 index bd22670e2182..6c9b1e0006ee 100644
3856 --- a/include/linux/phy.h
3857 +++ b/include/linux/phy.h
3858 @@ -401,6 +401,9 @@ struct phy_device {
3859 u32 advertising;
3860 u32 lp_advertising;
3861
3862 + /* Energy efficient ethernet modes which should be prohibited */
3863 + u32 eee_broken_modes;
3864 +
3865 int autoneg;
3866
3867 int link_timeout;
3868 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
3869 index 31947b9c21d6..835c30e491c8 100644
3870 --- a/include/net/xfrm.h
3871 +++ b/include/net/xfrm.h
3872 @@ -944,10 +944,6 @@ struct xfrm_dst {
3873 struct flow_cache_object flo;
3874 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
3875 int num_pols, num_xfrms;
3876 -#ifdef CONFIG_XFRM_SUB_POLICY
3877 - struct flowi *origin;
3878 - struct xfrm_selector *partner;
3879 -#endif
3880 u32 xfrm_genid;
3881 u32 policy_genid;
3882 u32 route_mtu_cached;
3883 @@ -963,12 +959,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
3884 dst_release(xdst->route);
3885 if (likely(xdst->u.dst.xfrm))
3886 xfrm_state_put(xdst->u.dst.xfrm);
3887 -#ifdef CONFIG_XFRM_SUB_POLICY
3888 - kfree(xdst->origin);
3889 - xdst->origin = NULL;
3890 - kfree(xdst->partner);
3891 - xdst->partner = NULL;
3892 -#endif
3893 }
3894 #endif
3895
3896 diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
3897 index 8e547231c1b7..5c22e8cab24b 100644
3898 --- a/include/uapi/linux/ethtool.h
3899 +++ b/include/uapi/linux/ethtool.h
3900 @@ -1368,6 +1368,8 @@ enum ethtool_link_mode_bit_indices {
3901 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT = 44,
3902 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT = 45,
3903 ETHTOOL_LINK_MODE_10000baseER_Full_BIT = 46,
3904 + ETHTOOL_LINK_MODE_2500baseT_Full_BIT = 47,
3905 + ETHTOOL_LINK_MODE_5000baseT_Full_BIT = 48,
3906
3907
3908 /* Last allowed bit for __ETHTOOL_LINK_MODE_LEGACY_MASK is bit
3909 @@ -1377,7 +1379,7 @@ enum ethtool_link_mode_bit_indices {
3910 */
3911
3912 __ETHTOOL_LINK_MODE_LAST
3913 - = ETHTOOL_LINK_MODE_10000baseER_Full_BIT,
3914 + = ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
3915 };
3916
3917 #define __ETHTOOL_LINK_MODE_LEGACY_MASK(base_name) \
3918 diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
3919 index a2ac051c342f..f3721e150d94 100644
3920 --- a/kernel/bpf/arraymap.c
3921 +++ b/kernel/bpf/arraymap.c
3922 @@ -11,7 +11,6 @@
3923 */
3924 #include <linux/bpf.h>
3925 #include <linux/err.h>
3926 -#include <linux/vmalloc.h>
3927 #include <linux/slab.h>
3928 #include <linux/mm.h>
3929 #include <linux/filter.h>
3930 @@ -74,14 +73,10 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
3931 if (array_size >= U32_MAX - PAGE_SIZE)
3932 return ERR_PTR(-ENOMEM);
3933
3934 -
3935 /* allocate all map elements and zero-initialize them */
3936 - array = kzalloc(array_size, GFP_USER | __GFP_NOWARN);
3937 - if (!array) {
3938 - array = vzalloc(array_size);
3939 - if (!array)
3940 - return ERR_PTR(-ENOMEM);
3941 - }
3942 + array = bpf_map_area_alloc(array_size);
3943 + if (!array)
3944 + return ERR_PTR(-ENOMEM);
3945
3946 /* copy mandatory map attributes */
3947 array->map.map_type = attr->map_type;
3948 @@ -97,7 +92,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
3949
3950 if (array_size >= U32_MAX - PAGE_SIZE ||
3951 elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
3952 - kvfree(array);
3953 + bpf_map_area_free(array);
3954 return ERR_PTR(-ENOMEM);
3955 }
3956 out:
3957 @@ -262,7 +257,7 @@ static void array_map_free(struct bpf_map *map)
3958 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
3959 bpf_array_free_percpu(array);
3960
3961 - kvfree(array);
3962 + bpf_map_area_free(array);
3963 }
3964
3965 static const struct bpf_map_ops array_ops = {
3966 @@ -319,7 +314,8 @@ static void fd_array_map_free(struct bpf_map *map)
3967 /* make sure it's empty */
3968 for (i = 0; i < array->map.max_entries; i++)
3969 BUG_ON(array->ptrs[i] != NULL);
3970 - kvfree(array);
3971 +
3972 + bpf_map_area_free(array);
3973 }
3974
3975 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
3976 diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
3977 index ad1bc67aff1b..ad2f0ed75471 100644
3978 --- a/kernel/bpf/hashtab.c
3979 +++ b/kernel/bpf/hashtab.c
3980 @@ -13,7 +13,6 @@
3981 #include <linux/bpf.h>
3982 #include <linux/jhash.h>
3983 #include <linux/filter.h>
3984 -#include <linux/vmalloc.h>
3985 #include "percpu_freelist.h"
3986
3987 struct bucket {
3988 @@ -84,14 +83,15 @@ static void htab_free_elems(struct bpf_htab *htab)
3989 free_percpu(pptr);
3990 }
3991 free_elems:
3992 - vfree(htab->elems);
3993 + bpf_map_area_free(htab->elems);
3994 }
3995
3996 static int prealloc_elems_and_freelist(struct bpf_htab *htab)
3997 {
3998 int err = -ENOMEM, i;
3999
4000 - htab->elems = vzalloc(htab->elem_size * htab->map.max_entries);
4001 + htab->elems = bpf_map_area_alloc(htab->elem_size *
4002 + htab->map.max_entries);
4003 if (!htab->elems)
4004 return -ENOMEM;
4005
4006 @@ -227,14 +227,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
4007 goto free_htab;
4008
4009 err = -ENOMEM;
4010 - htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket),
4011 - GFP_USER | __GFP_NOWARN);
4012 -
4013 - if (!htab->buckets) {
4014 - htab->buckets = vmalloc(htab->n_buckets * sizeof(struct bucket));
4015 - if (!htab->buckets)
4016 - goto free_htab;
4017 - }
4018 + htab->buckets = bpf_map_area_alloc(htab->n_buckets *
4019 + sizeof(struct bucket));
4020 + if (!htab->buckets)
4021 + goto free_htab;
4022
4023 for (i = 0; i < htab->n_buckets; i++) {
4024 INIT_HLIST_HEAD(&htab->buckets[i].head);
4025 @@ -258,7 +254,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
4026 free_extra_elems:
4027 free_percpu(htab->extra_elems);
4028 free_buckets:
4029 - kvfree(htab->buckets);
4030 + bpf_map_area_free(htab->buckets);
4031 free_htab:
4032 kfree(htab);
4033 return ERR_PTR(err);
4034 @@ -715,7 +711,7 @@ static void htab_map_free(struct bpf_map *map)
4035 pcpu_freelist_destroy(&htab->freelist);
4036 }
4037 free_percpu(htab->extra_elems);
4038 - kvfree(htab->buckets);
4039 + bpf_map_area_free(htab->buckets);
4040 kfree(htab);
4041 }
4042
4043 diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
4044 index 732ae16d12b7..be8519148c25 100644
4045 --- a/kernel/bpf/stackmap.c
4046 +++ b/kernel/bpf/stackmap.c
4047 @@ -7,7 +7,6 @@
4048 #include <linux/bpf.h>
4049 #include <linux/jhash.h>
4050 #include <linux/filter.h>
4051 -#include <linux/vmalloc.h>
4052 #include <linux/stacktrace.h>
4053 #include <linux/perf_event.h>
4054 #include "percpu_freelist.h"
4055 @@ -32,7 +31,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
4056 u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
4057 int err;
4058
4059 - smap->elems = vzalloc(elem_size * smap->map.max_entries);
4060 + smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries);
4061 if (!smap->elems)
4062 return -ENOMEM;
4063
4064 @@ -45,7 +44,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
4065 return 0;
4066
4067 free_elems:
4068 - vfree(smap->elems);
4069 + bpf_map_area_free(smap->elems);
4070 return err;
4071 }
4072
4073 @@ -76,12 +75,9 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
4074 if (cost >= U32_MAX - PAGE_SIZE)
4075 return ERR_PTR(-E2BIG);
4076
4077 - smap = kzalloc(cost, GFP_USER | __GFP_NOWARN);
4078 - if (!smap) {
4079 - smap = vzalloc(cost);
4080 - if (!smap)
4081 - return ERR_PTR(-ENOMEM);
4082 - }
4083 + smap = bpf_map_area_alloc(cost);
4084 + if (!smap)
4085 + return ERR_PTR(-ENOMEM);
4086
4087 err = -E2BIG;
4088 cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
4089 @@ -112,7 +108,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
4090 put_buffers:
4091 put_callchain_buffers();
4092 free_smap:
4093 - kvfree(smap);
4094 + bpf_map_area_free(smap);
4095 return ERR_PTR(err);
4096 }
4097
4098 @@ -262,9 +258,9 @@ static void stack_map_free(struct bpf_map *map)
4099 /* wait for bpf programs to complete before freeing stack map */
4100 synchronize_rcu();
4101
4102 - vfree(smap->elems);
4103 + bpf_map_area_free(smap->elems);
4104 pcpu_freelist_destroy(&smap->freelist);
4105 - kvfree(smap);
4106 + bpf_map_area_free(smap);
4107 put_callchain_buffers();
4108 }
4109
4110 diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
4111 index 237f3d6a7ddc..72ea91df71c9 100644
4112 --- a/kernel/bpf/syscall.c
4113 +++ b/kernel/bpf/syscall.c
4114 @@ -12,6 +12,8 @@
4115 #include <linux/bpf.h>
4116 #include <linux/syscalls.h>
4117 #include <linux/slab.h>
4118 +#include <linux/vmalloc.h>
4119 +#include <linux/mmzone.h>
4120 #include <linux/anon_inodes.h>
4121 #include <linux/file.h>
4122 #include <linux/license.h>
4123 @@ -48,6 +50,30 @@ void bpf_register_map_type(struct bpf_map_type_list *tl)
4124 list_add(&tl->list_node, &bpf_map_types);
4125 }
4126
4127 +void *bpf_map_area_alloc(size_t size)
4128 +{
4129 + /* We definitely need __GFP_NORETRY, so OOM killer doesn't
4130 + * trigger under memory pressure as we really just want to
4131 + * fail instead.
4132 + */
4133 + const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
4134 + void *area;
4135 +
4136 + if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
4137 + area = kmalloc(size, GFP_USER | flags);
4138 + if (area != NULL)
4139 + return area;
4140 + }
4141 +
4142 + return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
4143 + PAGE_KERNEL);
4144 +}
4145 +
4146 +void bpf_map_area_free(void *area)
4147 +{
4148 + kvfree(area);
4149 +}
4150 +
4151 int bpf_map_precharge_memlock(u32 pages)
4152 {
4153 struct user_struct *user = get_current_user();
4154 diff --git a/kernel/events/core.c b/kernel/events/core.c
4155 index 11cc1d83c770..30ccc7029d18 100644
4156 --- a/kernel/events/core.c
4157 +++ b/kernel/events/core.c
4158 @@ -2272,7 +2272,7 @@ static int __perf_install_in_context(void *info)
4159 struct perf_event_context *ctx = event->ctx;
4160 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
4161 struct perf_event_context *task_ctx = cpuctx->task_ctx;
4162 - bool activate = true;
4163 + bool reprogram = true;
4164 int ret = 0;
4165
4166 raw_spin_lock(&cpuctx->ctx.lock);
4167 @@ -2280,27 +2280,26 @@ static int __perf_install_in_context(void *info)
4168 raw_spin_lock(&ctx->lock);
4169 task_ctx = ctx;
4170
4171 - /* If we're on the wrong CPU, try again */
4172 - if (task_cpu(ctx->task) != smp_processor_id()) {
4173 - ret = -ESRCH;
4174 - goto unlock;
4175 - }
4176 + reprogram = (ctx->task == current);
4177
4178 /*
4179 - * If we're on the right CPU, see if the task we target is
4180 - * current, if not we don't have to activate the ctx, a future
4181 - * context switch will do that for us.
4182 + * If the task is running, it must be running on this CPU,
4183 + * otherwise we cannot reprogram things.
4184 + *
4185 + * If its not running, we don't care, ctx->lock will
4186 + * serialize against it becoming runnable.
4187 */
4188 - if (ctx->task != current)
4189 - activate = false;
4190 - else
4191 - WARN_ON_ONCE(cpuctx->task_ctx && cpuctx->task_ctx != ctx);
4192 + if (task_curr(ctx->task) && !reprogram) {
4193 + ret = -ESRCH;
4194 + goto unlock;
4195 + }
4196
4197 + WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
4198 } else if (task_ctx) {
4199 raw_spin_lock(&task_ctx->lock);
4200 }
4201
4202 - if (activate) {
4203 + if (reprogram) {
4204 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
4205 add_event_to_ctx(event, ctx);
4206 ctx_resched(cpuctx, task_ctx);
4207 @@ -2351,13 +2350,36 @@ perf_install_in_context(struct perf_event_context *ctx,
4208 /*
4209 * Installing events is tricky because we cannot rely on ctx->is_active
4210 * to be set in case this is the nr_events 0 -> 1 transition.
4211 + *
4212 + * Instead we use task_curr(), which tells us if the task is running.
4213 + * However, since we use task_curr() outside of rq::lock, we can race
4214 + * against the actual state. This means the result can be wrong.
4215 + *
4216 + * If we get a false positive, we retry, this is harmless.
4217 + *
4218 + * If we get a false negative, things are complicated. If we are after
4219 + * perf_event_context_sched_in() ctx::lock will serialize us, and the
4220 + * value must be correct. If we're before, it doesn't matter since
4221 + * perf_event_context_sched_in() will program the counter.
4222 + *
4223 + * However, this hinges on the remote context switch having observed
4224 + * our task->perf_event_ctxp[] store, such that it will in fact take
4225 + * ctx::lock in perf_event_context_sched_in().
4226 + *
4227 + * We do this by task_function_call(), if the IPI fails to hit the task
4228 + * we know any future context switch of task must see the
4229 + * perf_event_ctpx[] store.
4230 */
4231 -again:
4232 +
4233 /*
4234 - * Cannot use task_function_call() because we need to run on the task's
4235 - * CPU regardless of whether its current or not.
4236 + * This smp_mb() orders the task->perf_event_ctxp[] store with the
4237 + * task_cpu() load, such that if the IPI then does not find the task
4238 + * running, a future context switch of that task must observe the
4239 + * store.
4240 */
4241 - if (!cpu_function_call(task_cpu(task), __perf_install_in_context, event))
4242 + smp_mb();
4243 +again:
4244 + if (!task_function_call(task, __perf_install_in_context, event))
4245 return;
4246
4247 raw_spin_lock_irq(&ctx->lock);
4248 @@ -2371,12 +2393,16 @@ perf_install_in_context(struct perf_event_context *ctx,
4249 raw_spin_unlock_irq(&ctx->lock);
4250 return;
4251 }
4252 - raw_spin_unlock_irq(&ctx->lock);
4253 /*
4254 - * Since !ctx->is_active doesn't mean anything, we must IPI
4255 - * unconditionally.
4256 + * If the task is not running, ctx->lock will avoid it becoming so,
4257 + * thus we can safely install the event.
4258 */
4259 - goto again;
4260 + if (task_curr(task)) {
4261 + raw_spin_unlock_irq(&ctx->lock);
4262 + goto again;
4263 + }
4264 + add_event_to_ctx(event, ctx);
4265 + raw_spin_unlock_irq(&ctx->lock);
4266 }
4267
4268 /*
4269 diff --git a/kernel/panic.c b/kernel/panic.c
4270 index e6480e20379e..dbec387099b1 100644
4271 --- a/kernel/panic.c
4272 +++ b/kernel/panic.c
4273 @@ -249,7 +249,7 @@ void panic(const char *fmt, ...)
4274 * Delay timeout seconds before rebooting the machine.
4275 * We can't use the "normal" timers since we just panicked.
4276 */
4277 - pr_emerg("Rebooting in %d seconds..", panic_timeout);
4278 + pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
4279
4280 for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
4281 touch_nmi_watchdog();
4282 diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
4283 index a2d6eb71f06b..ec91fcc09bfe 100644
4284 --- a/kernel/sched/loadavg.c
4285 +++ b/kernel/sched/loadavg.c
4286 @@ -201,8 +201,9 @@ void calc_load_exit_idle(void)
4287 struct rq *this_rq = this_rq();
4288
4289 /*
4290 - * If we're still before the sample window, we're done.
4291 + * If we're still before the pending sample window, we're done.
4292 */
4293 + this_rq->calc_load_update = calc_load_update;
4294 if (time_before(jiffies, this_rq->calc_load_update))
4295 return;
4296
4297 @@ -211,7 +212,6 @@ void calc_load_exit_idle(void)
4298 * accounted through the nohz accounting, so skip the entire deal and
4299 * sync up for the next window.
4300 */
4301 - this_rq->calc_load_update = calc_load_update;
4302 if (time_before(jiffies, this_rq->calc_load_update + 10))
4303 this_rq->calc_load_update += LOAD_FREQ;
4304 }
4305 diff --git a/lib/swiotlb.c b/lib/swiotlb.c
4306 index ad1d2962d129..b7812df04437 100644
4307 --- a/lib/swiotlb.c
4308 +++ b/lib/swiotlb.c
4309 @@ -456,11 +456,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
4310 : 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
4311
4312 /*
4313 - * For mappings greater than a page, we limit the stride (and
4314 - * hence alignment) to a page size.
4315 + * For mappings greater than or equal to a page, we limit the stride
4316 + * (and hence alignment) to a page size.
4317 */
4318 nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
4319 - if (size > PAGE_SIZE)
4320 + if (size >= PAGE_SIZE)
4321 stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
4322 else
4323 stride = 1;
4324 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
4325 index d5b2b759f76f..e7d5db958538 100644
4326 --- a/mm/huge_memory.c
4327 +++ b/mm/huge_memory.c
4328 @@ -1227,8 +1227,11 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
4329 */
4330 if (unlikely(pmd_trans_migrating(*fe->pmd))) {
4331 page = pmd_page(*fe->pmd);
4332 + if (!get_page_unless_zero(page))
4333 + goto out_unlock;
4334 spin_unlock(fe->ptl);
4335 wait_on_page_locked(page);
4336 + put_page(page);
4337 goto out;
4338 }
4339
4340 @@ -1260,8 +1263,11 @@ int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t pmd)
4341
4342 /* Migration could have started since the pmd_trans_migrating check */
4343 if (!page_locked) {
4344 + if (!get_page_unless_zero(page))
4345 + goto out_unlock;
4346 spin_unlock(fe->ptl);
4347 wait_on_page_locked(page);
4348 + put_page(page);
4349 page_nid = -1;
4350 goto out;
4351 }
4352 diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
4353 index 454d6d7509ed..3405b4ee1757 100644
4354 --- a/mm/swap_cgroup.c
4355 +++ b/mm/swap_cgroup.c
4356 @@ -204,6 +204,8 @@ void swap_cgroup_swapoff(int type)
4357 struct page *page = map[i];
4358 if (page)
4359 __free_page(page);
4360 + if (!(i % SWAP_CLUSTER_MAX))
4361 + cond_resched();
4362 }
4363 vfree(map);
4364 }
4365 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
4366 index f2481cb4e6b2..195de42bea1f 100644
4367 --- a/mm/vmalloc.c
4368 +++ b/mm/vmalloc.c
4369 @@ -244,11 +244,21 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
4370 */
4371 VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
4372
4373 + /*
4374 + * Don't dereference bad PUD or PMD (below) entries. This will also
4375 + * identify huge mappings, which we may encounter on architectures
4376 + * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
4377 + * identified as vmalloc addresses by is_vmalloc_addr(), but are
4378 + * not [unambiguously] associated with a struct page, so there is
4379 + * no correct value to return for them.
4380 + */
4381 if (!pgd_none(*pgd)) {
4382 pud_t *pud = pud_offset(pgd, addr);
4383 - if (!pud_none(*pud)) {
4384 + WARN_ON_ONCE(pud_bad(*pud));
4385 + if (!pud_none(*pud) && !pud_bad(*pud)) {
4386 pmd_t *pmd = pmd_offset(pud, addr);
4387 - if (!pmd_none(*pmd)) {
4388 + WARN_ON_ONCE(pmd_bad(*pmd));
4389 + if (!pmd_none(*pmd) && !pmd_bad(*pmd)) {
4390 pte_t *ptep, pte;
4391
4392 ptep = pte_offset_map(pmd, addr);
4393 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
4394 index f2531ad66b68..8d213f974448 100644
4395 --- a/net/8021q/vlan.c
4396 +++ b/net/8021q/vlan.c
4397 @@ -277,7 +277,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
4398 return 0;
4399
4400 out_free_newdev:
4401 - free_netdev(new_dev);
4402 + if (new_dev->reg_state == NETREG_UNINITIALIZED)
4403 + free_netdev(new_dev);
4404 return err;
4405 }
4406
4407 diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
4408 index 59ce1fcc220c..71b6ab240dea 100644
4409 --- a/net/caif/cfpkt_skbuff.c
4410 +++ b/net/caif/cfpkt_skbuff.c
4411 @@ -81,11 +81,7 @@ static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx)
4412 {
4413 struct sk_buff *skb;
4414
4415 - if (likely(in_interrupt()))
4416 - skb = alloc_skb(len + pfx, GFP_ATOMIC);
4417 - else
4418 - skb = alloc_skb(len + pfx, GFP_KERNEL);
4419 -
4420 + skb = alloc_skb(len + pfx, GFP_ATOMIC);
4421 if (unlikely(skb == NULL))
4422 return NULL;
4423
4424 diff --git a/net/core/dev.c b/net/core/dev.c
4425 index 2e04fd188081..97f806116ae9 100644
4426 --- a/net/core/dev.c
4427 +++ b/net/core/dev.c
4428 @@ -1250,8 +1250,9 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
4429 if (!new_ifalias)
4430 return -ENOMEM;
4431 dev->ifalias = new_ifalias;
4432 + memcpy(dev->ifalias, alias, len);
4433 + dev->ifalias[len] = 0;
4434
4435 - strlcpy(dev->ifalias, alias, len+1);
4436 return len;
4437 }
4438
4439 diff --git a/net/core/dst.c b/net/core/dst.c
4440 index 656b70d39690..39cc11968cf9 100644
4441 --- a/net/core/dst.c
4442 +++ b/net/core/dst.c
4443 @@ -470,6 +470,20 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event,
4444 spin_lock_bh(&dst_garbage.lock);
4445 dst = dst_garbage.list;
4446 dst_garbage.list = NULL;
4447 + /* The code in dst_ifdown places a hold on the loopback device.
4448 + * If the gc entry processing is set to expire after a lengthy
4449 + * interval, this hold can cause netdev_wait_allrefs() to hang
4450 + * out and wait for a long time -- until the the loopback
4451 + * interface is released. If we're really unlucky, it'll emit
4452 + * pr_emerg messages to console too. Reset the interval here,
4453 + * so dst cleanups occur in a more timely fashion.
4454 + */
4455 + if (dst_garbage.timer_inc > DST_GC_INC) {
4456 + dst_garbage.timer_inc = DST_GC_INC;
4457 + dst_garbage.timer_expires = DST_GC_MIN;
4458 + mod_delayed_work(system_wq, &dst_gc_work,
4459 + dst_garbage.timer_expires);
4460 + }
4461 spin_unlock_bh(&dst_garbage.lock);
4462
4463 if (last)
4464 diff --git a/net/core/ethtool.c b/net/core/ethtool.c
4465 index 072c1f4998c9..e9989b835a66 100644
4466 --- a/net/core/ethtool.c
4467 +++ b/net/core/ethtool.c
4468 @@ -1704,7 +1704,7 @@ static noinline_for_stack int ethtool_get_channels(struct net_device *dev,
4469 static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
4470 void __user *useraddr)
4471 {
4472 - struct ethtool_channels channels, max;
4473 + struct ethtool_channels channels, max = { .cmd = ETHTOOL_GCHANNELS };
4474 u32 max_rx_in_use = 0;
4475
4476 if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels)
4477 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
4478 index 1d9160794e55..9c6fd7f83a4a 100644
4479 --- a/net/core/rtnetlink.c
4480 +++ b/net/core/rtnetlink.c
4481 @@ -937,6 +937,7 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
4482 + nla_total_size(1) /* IFLA_LINKMODE */
4483 + nla_total_size(4) /* IFLA_CARRIER_CHANGES */
4484 + nla_total_size(4) /* IFLA_LINK_NETNSID */
4485 + + nla_total_size(4) /* IFLA_GROUP */
4486 + nla_total_size(ext_filter_mask
4487 & RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
4488 + rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
4489 @@ -1130,6 +1131,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
4490 struct ifla_vf_mac vf_mac;
4491 struct ifla_vf_info ivi;
4492
4493 + memset(&ivi, 0, sizeof(ivi));
4494 +
4495 /* Not all SR-IOV capable drivers support the
4496 * spoofcheck and "RSS query enable" query. Preset to
4497 * -1 so the user space tool can detect that the driver
4498 @@ -1138,7 +1141,6 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
4499 ivi.spoofchk = -1;
4500 ivi.rss_query_en = -1;
4501 ivi.trusted = -1;
4502 - memset(ivi.mac, 0, sizeof(ivi.mac));
4503 /* The default value for VF link state is "auto"
4504 * IFLA_VF_LINK_STATE_AUTO which equals zero
4505 */
4506 @@ -1464,6 +1466,7 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
4507 [IFLA_LINK_NETNSID] = { .type = NLA_S32 },
4508 [IFLA_PROTO_DOWN] = { .type = NLA_U8 },
4509 [IFLA_XDP] = { .type = NLA_NESTED },
4510 + [IFLA_GROUP] = { .type = NLA_U32 },
4511 };
4512
4513 static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
4514 diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
4515 index b1dc096d22f8..403593bd2b83 100644
4516 --- a/net/decnet/dn_route.c
4517 +++ b/net/decnet/dn_route.c
4518 @@ -188,12 +188,6 @@ static inline void dnrt_free(struct dn_route *rt)
4519 call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
4520 }
4521
4522 -static inline void dnrt_drop(struct dn_route *rt)
4523 -{
4524 - dst_release(&rt->dst);
4525 - call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
4526 -}
4527 -
4528 static void dn_dst_check_expire(unsigned long dummy)
4529 {
4530 int i;
4531 @@ -248,7 +242,7 @@ static int dn_dst_gc(struct dst_ops *ops)
4532 }
4533 *rtp = rt->dst.dn_next;
4534 rt->dst.dn_next = NULL;
4535 - dnrt_drop(rt);
4536 + dnrt_free(rt);
4537 break;
4538 }
4539 spin_unlock_bh(&dn_rt_hash_table[i].lock);
4540 @@ -350,7 +344,7 @@ static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_rou
4541 dst_use(&rth->dst, now);
4542 spin_unlock_bh(&dn_rt_hash_table[hash].lock);
4543
4544 - dnrt_drop(rt);
4545 + dst_free(&rt->dst);
4546 *rp = rth;
4547 return 0;
4548 }
4549 @@ -380,7 +374,7 @@ static void dn_run_flush(unsigned long dummy)
4550 for(; rt; rt = next) {
4551 next = rcu_dereference_raw(rt->dst.dn_next);
4552 RCU_INIT_POINTER(rt->dst.dn_next, NULL);
4553 - dst_free((struct dst_entry *)rt);
4554 + dnrt_free(rt);
4555 }
4556
4557 nothing_to_declare:
4558 @@ -1187,7 +1181,7 @@ static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *o
4559 if (dev_out->flags & IFF_LOOPBACK)
4560 flags |= RTCF_LOCAL;
4561
4562 - rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST);
4563 + rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST);
4564 if (rt == NULL)
4565 goto e_nobufs;
4566
4567 diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
4568 index 85f2fdc360c2..29246bc9a7b4 100644
4569 --- a/net/decnet/netfilter/dn_rtmsg.c
4570 +++ b/net/decnet/netfilter/dn_rtmsg.c
4571 @@ -102,7 +102,9 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
4572 {
4573 struct nlmsghdr *nlh = nlmsg_hdr(skb);
4574
4575 - if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
4576 + if (skb->len < sizeof(*nlh) ||
4577 + nlh->nlmsg_len < sizeof(*nlh) ||
4578 + skb->len < nlh->nlmsg_len)
4579 return;
4580
4581 if (!netlink_capable(skb, CAP_NET_ADMIN))
4582 diff --git a/net/dsa/slave.c b/net/dsa/slave.c
4583 index 3ff9d97cf56b..079d76bc204c 100644
4584 --- a/net/dsa/slave.c
4585 +++ b/net/dsa/slave.c
4586 @@ -1103,10 +1103,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
4587 /* Use already configured phy mode */
4588 if (p->phy_interface == PHY_INTERFACE_MODE_NA)
4589 p->phy_interface = p->phy->interface;
4590 - phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
4591 - p->phy_interface);
4592 -
4593 - return 0;
4594 + return phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
4595 + p->phy_interface);
4596 }
4597
4598 static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
4599 diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
4600 index 1bc623d7f754..19930da56b0a 100644
4601 --- a/net/ipv4/igmp.c
4602 +++ b/net/ipv4/igmp.c
4603 @@ -1112,6 +1112,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
4604 pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
4605 if (!pmc)
4606 return;
4607 + spin_lock_init(&pmc->lock);
4608 spin_lock_bh(&im->lock);
4609 pmc->interface = im->interface;
4610 in_dev_hold(in_dev);
4611 @@ -2071,21 +2072,26 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
4612
4613 static void ip_mc_clear_src(struct ip_mc_list *pmc)
4614 {
4615 - struct ip_sf_list *psf, *nextpsf;
4616 + struct ip_sf_list *psf, *nextpsf, *tomb, *sources;
4617
4618 - for (psf = pmc->tomb; psf; psf = nextpsf) {
4619 + spin_lock_bh(&pmc->lock);
4620 + tomb = pmc->tomb;
4621 + pmc->tomb = NULL;
4622 + sources = pmc->sources;
4623 + pmc->sources = NULL;
4624 + pmc->sfmode = MCAST_EXCLUDE;
4625 + pmc->sfcount[MCAST_INCLUDE] = 0;
4626 + pmc->sfcount[MCAST_EXCLUDE] = 1;
4627 + spin_unlock_bh(&pmc->lock);
4628 +
4629 + for (psf = tomb; psf; psf = nextpsf) {
4630 nextpsf = psf->sf_next;
4631 kfree(psf);
4632 }
4633 - pmc->tomb = NULL;
4634 - for (psf = pmc->sources; psf; psf = nextpsf) {
4635 + for (psf = sources; psf; psf = nextpsf) {
4636 nextpsf = psf->sf_next;
4637 kfree(psf);
4638 }
4639 - pmc->sources = NULL;
4640 - pmc->sfmode = MCAST_EXCLUDE;
4641 - pmc->sfcount[MCAST_INCLUDE] = 0;
4642 - pmc->sfcount[MCAST_EXCLUDE] = 1;
4643 }
4644
4645 /* Join a multicast group
4646 diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
4647 index b3cc1335adbc..c0cc6aa8cfaa 100644
4648 --- a/net/ipv4/netfilter.c
4649 +++ b/net/ipv4/netfilter.c
4650 @@ -23,7 +23,8 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
4651 struct rtable *rt;
4652 struct flowi4 fl4 = {};
4653 __be32 saddr = iph->saddr;
4654 - __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
4655 + const struct sock *sk = skb_to_full_sk(skb);
4656 + __u8 flags = sk ? inet_sk_flowi_flags(sk) : 0;
4657 struct net_device *dev = skb_dst(skb)->dev;
4658 unsigned int hh_len;
4659
4660 @@ -40,7 +41,7 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
4661 fl4.daddr = iph->daddr;
4662 fl4.saddr = saddr;
4663 fl4.flowi4_tos = RT_TOS(iph->tos);
4664 - fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
4665 + fl4.flowi4_oif = sk ? sk->sk_bound_dev_if : 0;
4666 if (!fl4.flowi4_oif)
4667 fl4.flowi4_oif = l3mdev_master_ifindex(dev);
4668 fl4.flowi4_mark = skb->mark;
4669 @@ -61,7 +62,7 @@ int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned int addr_t
4670 xfrm_decode_session(skb, flowi4_to_flowi(&fl4), AF_INET) == 0) {
4671 struct dst_entry *dst = skb_dst(skb);
4672 skb_dst_set(skb, NULL);
4673 - dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), skb->sk, 0);
4674 + dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), sk, 0);
4675 if (IS_ERR(dst))
4676 return PTR_ERR(dst);
4677 skb_dst_set(skb, dst);
4678 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
4679 index 045738319e8b..b6f4c42cc8ce 100644
4680 --- a/net/ipv6/addrconf.c
4681 +++ b/net/ipv6/addrconf.c
4682 @@ -315,9 +315,9 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev,
4683 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
4684 unsigned long delay)
4685 {
4686 - if (!delayed_work_pending(&ifp->dad_work))
4687 - in6_ifa_hold(ifp);
4688 - mod_delayed_work(addrconf_wq, &ifp->dad_work, delay);
4689 + in6_ifa_hold(ifp);
4690 + if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
4691 + in6_ifa_put(ifp);
4692 }
4693
4694 static int snmp6_alloc_dev(struct inet6_dev *idev)
4695 diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
4696 index eea23b57c6a5..ec849d88a662 100644
4697 --- a/net/ipv6/fib6_rules.c
4698 +++ b/net/ipv6/fib6_rules.c
4699 @@ -32,7 +32,6 @@ struct fib6_rule {
4700 struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
4701 int flags, pol_lookup_t lookup)
4702 {
4703 - struct rt6_info *rt;
4704 struct fib_lookup_arg arg = {
4705 .lookup_ptr = lookup,
4706 .flags = FIB_LOOKUP_NOREF,
4707 @@ -44,21 +43,11 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
4708 fib_rules_lookup(net->ipv6.fib6_rules_ops,
4709 flowi6_to_flowi(fl6), flags, &arg);
4710
4711 - rt = arg.result;
4712 + if (arg.result)
4713 + return arg.result;
4714
4715 - if (!rt) {
4716 - dst_hold(&net->ipv6.ip6_null_entry->dst);
4717 - return &net->ipv6.ip6_null_entry->dst;
4718 - }
4719 -
4720 - if (rt->rt6i_flags & RTF_REJECT &&
4721 - rt->dst.error == -EAGAIN) {
4722 - ip6_rt_put(rt);
4723 - rt = net->ipv6.ip6_null_entry;
4724 - dst_hold(&rt->dst);
4725 - }
4726 -
4727 - return &rt->dst;
4728 + dst_hold(&net->ipv6.ip6_null_entry->dst);
4729 + return &net->ipv6.ip6_null_entry->dst;
4730 }
4731
4732 static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
4733 @@ -121,7 +110,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
4734 flp6->saddr = saddr;
4735 }
4736 err = rt->dst.error;
4737 - goto out;
4738 + if (err != -EAGAIN)
4739 + goto out;
4740 }
4741 again:
4742 ip6_rt_put(rt);
4743 diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
4744 index 8c88a37392d0..636d4d893085 100644
4745 --- a/net/ipv6/ip6_fib.c
4746 +++ b/net/ipv6/ip6_fib.c
4747 @@ -289,8 +289,7 @@ struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
4748 struct rt6_info *rt;
4749
4750 rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
4751 - if (rt->rt6i_flags & RTF_REJECT &&
4752 - rt->dst.error == -EAGAIN) {
4753 + if (rt->dst.error == -EAGAIN) {
4754 ip6_rt_put(rt);
4755 rt = net->ipv6.ip6_null_entry;
4756 dst_hold(&rt->dst);
4757 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
4758 index 0a5922055da2..d2844ee469cb 100644
4759 --- a/net/ipv6/ip6_gre.c
4760 +++ b/net/ipv6/ip6_gre.c
4761 @@ -542,11 +542,10 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
4762
4763 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
4764
4765 - dsfield = ipv4_get_dsfield(iph);
4766 -
4767 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
4768 - fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
4769 - & IPV6_TCLASS_MASK;
4770 + dsfield = ipv4_get_dsfield(iph);
4771 + else
4772 + dsfield = ip6_tclass(t->parms.flowinfo);
4773 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
4774 fl6.flowi6_mark = skb->mark;
4775
4776 @@ -582,6 +581,9 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
4777 return -1;
4778
4779 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
4780 + /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
4781 + ipv6h = ipv6_hdr(skb);
4782 +
4783 if (offset > 0) {
4784 struct ipv6_tlv_tnl_enc_lim *tel;
4785 tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
4786 @@ -596,9 +598,11 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
4787
4788 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
4789
4790 - dsfield = ipv6_get_dsfield(ipv6h);
4791 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
4792 - fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK);
4793 + dsfield = ipv6_get_dsfield(ipv6h);
4794 + else
4795 + dsfield = ip6_tclass(t->parms.flowinfo);
4796 +
4797 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
4798 fl6.flowlabel |= ip6_flowlabel(ipv6h);
4799 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
4800 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
4801 index 3ab32ac57ccd..fd649599620e 100644
4802 --- a/net/ipv6/ip6_output.c
4803 +++ b/net/ipv6/ip6_output.c
4804 @@ -1020,8 +1020,10 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
4805 }
4806 #endif
4807 if (ipv6_addr_v4mapped(&fl6->saddr) &&
4808 - !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr)))
4809 - return -EAFNOSUPPORT;
4810 + !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) {
4811 + err = -EAFNOSUPPORT;
4812 + goto out_err_release;
4813 + }
4814
4815 return 0;
4816
4817 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
4818 index 116b4da06820..1fc9daa7b1d6 100644
4819 --- a/net/ipv6/ip6_tunnel.c
4820 +++ b/net/ipv6/ip6_tunnel.c
4821 @@ -1196,7 +1196,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
4822 skb_push(skb, sizeof(struct ipv6hdr));
4823 skb_reset_network_header(skb);
4824 ipv6h = ipv6_hdr(skb);
4825 - ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
4826 + ip6_flow_hdr(ipv6h, dsfield,
4827 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
4828 ipv6h->hop_limit = hop_limit;
4829 ipv6h->nexthdr = proto;
4830 @@ -1231,8 +1231,6 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
4831 if (tproto != IPPROTO_IPIP && tproto != 0)
4832 return -1;
4833
4834 - dsfield = ipv4_get_dsfield(iph);
4835 -
4836 if (t->parms.collect_md) {
4837 struct ip_tunnel_info *tun_info;
4838 const struct ip_tunnel_key *key;
4839 @@ -1246,6 +1244,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
4840 fl6.flowi6_proto = IPPROTO_IPIP;
4841 fl6.daddr = key->u.ipv6.dst;
4842 fl6.flowlabel = key->label;
4843 + dsfield = ip6_tclass(key->label);
4844 } else {
4845 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
4846 encap_limit = t->parms.encap_limit;
4847 @@ -1254,8 +1253,9 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
4848 fl6.flowi6_proto = IPPROTO_IPIP;
4849
4850 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
4851 - fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT)
4852 - & IPV6_TCLASS_MASK;
4853 + dsfield = ipv4_get_dsfield(iph);
4854 + else
4855 + dsfield = ip6_tclass(t->parms.flowinfo);
4856 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
4857 fl6.flowi6_mark = skb->mark;
4858 }
4859 @@ -1263,6 +1263,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
4860 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
4861 return -1;
4862
4863 + dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
4864 +
4865 skb_set_inner_ipproto(skb, IPPROTO_IPIP);
4866
4867 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
4868 @@ -1296,8 +1298,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
4869 ip6_tnl_addr_conflict(t, ipv6h))
4870 return -1;
4871
4872 - dsfield = ipv6_get_dsfield(ipv6h);
4873 -
4874 if (t->parms.collect_md) {
4875 struct ip_tunnel_info *tun_info;
4876 const struct ip_tunnel_key *key;
4877 @@ -1311,8 +1311,11 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
4878 fl6.flowi6_proto = IPPROTO_IPV6;
4879 fl6.daddr = key->u.ipv6.dst;
4880 fl6.flowlabel = key->label;
4881 + dsfield = ip6_tclass(key->label);
4882 } else {
4883 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
4884 + /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
4885 + ipv6h = ipv6_hdr(skb);
4886 if (offset > 0) {
4887 struct ipv6_tlv_tnl_enc_lim *tel;
4888
4889 @@ -1331,7 +1334,9 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
4890 fl6.flowi6_proto = IPPROTO_IPV6;
4891
4892 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
4893 - fl6.flowlabel |= (*(__be32 *)ipv6h & IPV6_TCLASS_MASK);
4894 + dsfield = ipv6_get_dsfield(ipv6h);
4895 + else
4896 + dsfield = ip6_tclass(t->parms.flowinfo);
4897 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
4898 fl6.flowlabel |= ip6_flowlabel(ipv6h);
4899 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
4900 @@ -1341,6 +1346,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
4901 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
4902 return -1;
4903
4904 + dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
4905 +
4906 skb_set_inner_ipproto(skb, IPPROTO_IPV6);
4907
4908 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
4909 diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
4910 index cc8e3ae9ca73..e88bcb8ff0fd 100644
4911 --- a/net/ipv6/proc.c
4912 +++ b/net/ipv6/proc.c
4913 @@ -219,7 +219,7 @@ static void snmp6_seq_show_item64(struct seq_file *seq, void __percpu *mib,
4914 u64 buff64[SNMP_MIB_MAX];
4915 int i;
4916
4917 - memset(buff64, 0, sizeof(unsigned long) * SNMP_MIB_MAX);
4918 + memset(buff64, 0, sizeof(u64) * SNMP_MIB_MAX);
4919
4920 snmp_get_cpu_field64_batch(buff64, itemlist, mib, syncpoff);
4921 for (i = 0; itemlist[i].name; i++)
4922 diff --git a/net/key/af_key.c b/net/key/af_key.c
4923 index f9c9ecb0cdd3..e67c28e614b9 100644
4924 --- a/net/key/af_key.c
4925 +++ b/net/key/af_key.c
4926 @@ -1135,6 +1135,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
4927 goto out;
4928 }
4929
4930 + err = -ENOBUFS;
4931 key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
4932 if (sa->sadb_sa_auth) {
4933 int keysize = 0;
4934 @@ -1146,8 +1147,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
4935 if (key)
4936 keysize = (key->sadb_key_bits + 7) / 8;
4937 x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL);
4938 - if (!x->aalg)
4939 + if (!x->aalg) {
4940 + err = -ENOMEM;
4941 goto out;
4942 + }
4943 strcpy(x->aalg->alg_name, a->name);
4944 x->aalg->alg_key_len = 0;
4945 if (key) {
4946 @@ -1166,8 +1169,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
4947 goto out;
4948 }
4949 x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL);
4950 - if (!x->calg)
4951 + if (!x->calg) {
4952 + err = -ENOMEM;
4953 goto out;
4954 + }
4955 strcpy(x->calg->alg_name, a->name);
4956 x->props.calgo = sa->sadb_sa_encrypt;
4957 } else {
4958 @@ -1181,8 +1186,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
4959 if (key)
4960 keysize = (key->sadb_key_bits + 7) / 8;
4961 x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL);
4962 - if (!x->ealg)
4963 + if (!x->ealg) {
4964 + err = -ENOMEM;
4965 goto out;
4966 + }
4967 strcpy(x->ealg->alg_name, a->name);
4968 x->ealg->alg_key_len = 0;
4969 if (key) {
4970 @@ -1227,8 +1234,10 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
4971 struct xfrm_encap_tmpl *natt;
4972
4973 x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL);
4974 - if (!x->encap)
4975 + if (!x->encap) {
4976 + err = -ENOMEM;
4977 goto out;
4978 + }
4979
4980 natt = x->encap;
4981 n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1];
4982 diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
4983 index e702cb95b89b..3bce65183c95 100644
4984 --- a/net/l2tp/l2tp_core.c
4985 +++ b/net/l2tp/l2tp_core.c
4986 @@ -278,6 +278,55 @@ struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunn
4987 }
4988 EXPORT_SYMBOL_GPL(l2tp_session_find);
4989
4990 +/* Like l2tp_session_find() but takes a reference on the returned session.
4991 + * Optionally calls session->ref() too if do_ref is true.
4992 + */
4993 +struct l2tp_session *l2tp_session_get(struct net *net,
4994 + struct l2tp_tunnel *tunnel,
4995 + u32 session_id, bool do_ref)
4996 +{
4997 + struct hlist_head *session_list;
4998 + struct l2tp_session *session;
4999 +
5000 + if (!tunnel) {
5001 + struct l2tp_net *pn = l2tp_pernet(net);
5002 +
5003 + session_list = l2tp_session_id_hash_2(pn, session_id);
5004 +
5005 + rcu_read_lock_bh();
5006 + hlist_for_each_entry_rcu(session, session_list, global_hlist) {
5007 + if (session->session_id == session_id) {
5008 + l2tp_session_inc_refcount(session);
5009 + if (do_ref && session->ref)
5010 + session->ref(session);
5011 + rcu_read_unlock_bh();
5012 +
5013 + return session;
5014 + }
5015 + }
5016 + rcu_read_unlock_bh();
5017 +
5018 + return NULL;
5019 + }
5020 +
5021 + session_list = l2tp_session_id_hash(tunnel, session_id);
5022 + read_lock_bh(&tunnel->hlist_lock);
5023 + hlist_for_each_entry(session, session_list, hlist) {
5024 + if (session->session_id == session_id) {
5025 + l2tp_session_inc_refcount(session);
5026 + if (do_ref && session->ref)
5027 + session->ref(session);
5028 + read_unlock_bh(&tunnel->hlist_lock);
5029 +
5030 + return session;
5031 + }
5032 + }
5033 + read_unlock_bh(&tunnel->hlist_lock);
5034 +
5035 + return NULL;
5036 +}
5037 +EXPORT_SYMBOL_GPL(l2tp_session_get);
5038 +
5039 struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
5040 bool do_ref)
5041 {
5042 @@ -307,7 +356,8 @@ EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
5043 /* Lookup a session by interface name.
5044 * This is very inefficient but is only used by management interfaces.
5045 */
5046 -struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
5047 +struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
5048 + bool do_ref)
5049 {
5050 struct l2tp_net *pn = l2tp_pernet(net);
5051 int hash;
5052 @@ -317,7 +367,11 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
5053 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
5054 hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) {
5055 if (!strcmp(session->ifname, ifname)) {
5056 + l2tp_session_inc_refcount(session);
5057 + if (do_ref && session->ref)
5058 + session->ref(session);
5059 rcu_read_unlock_bh();
5060 +
5061 return session;
5062 }
5063 }
5064 @@ -327,7 +381,49 @@ struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
5065
5066 return NULL;
5067 }
5068 -EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname);
5069 +EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname);
5070 +
5071 +static int l2tp_session_add_to_tunnel(struct l2tp_tunnel *tunnel,
5072 + struct l2tp_session *session)
5073 +{
5074 + struct l2tp_session *session_walk;
5075 + struct hlist_head *g_head;
5076 + struct hlist_head *head;
5077 + struct l2tp_net *pn;
5078 +
5079 + head = l2tp_session_id_hash(tunnel, session->session_id);
5080 +
5081 + write_lock_bh(&tunnel->hlist_lock);
5082 + hlist_for_each_entry(session_walk, head, hlist)
5083 + if (session_walk->session_id == session->session_id)
5084 + goto exist;
5085 +
5086 + if (tunnel->version == L2TP_HDR_VER_3) {
5087 + pn = l2tp_pernet(tunnel->l2tp_net);
5088 + g_head = l2tp_session_id_hash_2(l2tp_pernet(tunnel->l2tp_net),
5089 + session->session_id);
5090 +
5091 + spin_lock_bh(&pn->l2tp_session_hlist_lock);
5092 + hlist_for_each_entry(session_walk, g_head, global_hlist)
5093 + if (session_walk->session_id == session->session_id)
5094 + goto exist_glob;
5095 +
5096 + hlist_add_head_rcu(&session->global_hlist, g_head);
5097 + spin_unlock_bh(&pn->l2tp_session_hlist_lock);
5098 + }
5099 +
5100 + hlist_add_head(&session->hlist, head);
5101 + write_unlock_bh(&tunnel->hlist_lock);
5102 +
5103 + return 0;
5104 +
5105 +exist_glob:
5106 + spin_unlock_bh(&pn->l2tp_session_hlist_lock);
5107 +exist:
5108 + write_unlock_bh(&tunnel->hlist_lock);
5109 +
5110 + return -EEXIST;
5111 +}
5112
5113 /* Lookup a tunnel by id
5114 */
5115 @@ -637,6 +733,9 @@ static int l2tp_recv_data_seq(struct l2tp_session *session, struct sk_buff *skb)
5116 * a data (not control) frame before coming here. Fields up to the
5117 * session-id have already been parsed and ptr points to the data
5118 * after the session-id.
5119 + *
5120 + * session->ref() must have been called prior to l2tp_recv_common().
5121 + * session->deref() will be called automatically after skb is processed.
5122 */
5123 void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
5124 unsigned char *ptr, unsigned char *optr, u16 hdrflags,
5125 @@ -646,14 +745,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
5126 int offset;
5127 u32 ns, nr;
5128
5129 - /* The ref count is increased since we now hold a pointer to
5130 - * the session. Take care to decrement the refcnt when exiting
5131 - * this function from now on...
5132 - */
5133 - l2tp_session_inc_refcount(session);
5134 - if (session->ref)
5135 - (*session->ref)(session);
5136 -
5137 /* Parse and check optional cookie */
5138 if (session->peer_cookie_len > 0) {
5139 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
5140 @@ -806,8 +897,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
5141 /* Try to dequeue as many skbs from reorder_q as we can. */
5142 l2tp_recv_dequeue(session);
5143
5144 - l2tp_session_dec_refcount(session);
5145 -
5146 return;
5147
5148 discard:
5149 @@ -816,8 +905,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
5150
5151 if (session->deref)
5152 (*session->deref)(session);
5153 -
5154 - l2tp_session_dec_refcount(session);
5155 }
5156 EXPORT_SYMBOL(l2tp_recv_common);
5157
5158 @@ -924,8 +1011,14 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
5159 }
5160
5161 /* Find the session context */
5162 - session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id);
5163 + session = l2tp_session_get(tunnel->l2tp_net, tunnel, session_id, true);
5164 if (!session || !session->recv_skb) {
5165 + if (session) {
5166 + if (session->deref)
5167 + session->deref(session);
5168 + l2tp_session_dec_refcount(session);
5169 + }
5170 +
5171 /* Not found? Pass to userspace to deal with */
5172 l2tp_info(tunnel, L2TP_MSG_DATA,
5173 "%s: no session found (%u/%u). Passing up.\n",
5174 @@ -934,6 +1027,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
5175 }
5176
5177 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
5178 + l2tp_session_dec_refcount(session);
5179
5180 return 0;
5181
5182 @@ -1740,6 +1834,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
5183 struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
5184 {
5185 struct l2tp_session *session;
5186 + int err;
5187
5188 session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
5189 if (session != NULL) {
5190 @@ -1795,6 +1890,13 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
5191
5192 l2tp_session_set_header_len(session, tunnel->version);
5193
5194 + err = l2tp_session_add_to_tunnel(tunnel, session);
5195 + if (err) {
5196 + kfree(session);
5197 +
5198 + return ERR_PTR(err);
5199 + }
5200 +
5201 /* Bump the reference count. The session context is deleted
5202 * only when this drops to zero.
5203 */
5204 @@ -1804,28 +1906,14 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
5205 /* Ensure tunnel socket isn't deleted */
5206 sock_hold(tunnel->sock);
5207
5208 - /* Add session to the tunnel's hash list */
5209 - write_lock_bh(&tunnel->hlist_lock);
5210 - hlist_add_head(&session->hlist,
5211 - l2tp_session_id_hash(tunnel, session_id));
5212 - write_unlock_bh(&tunnel->hlist_lock);
5213 -
5214 - /* And to the global session list if L2TPv3 */
5215 - if (tunnel->version != L2TP_HDR_VER_2) {
5216 - struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
5217 -
5218 - spin_lock_bh(&pn->l2tp_session_hlist_lock);
5219 - hlist_add_head_rcu(&session->global_hlist,
5220 - l2tp_session_id_hash_2(pn, session_id));
5221 - spin_unlock_bh(&pn->l2tp_session_hlist_lock);
5222 - }
5223 -
5224 /* Ignore management session in session count value */
5225 if (session->session_id != 0)
5226 atomic_inc(&l2tp_session_count);
5227 +
5228 + return session;
5229 }
5230
5231 - return session;
5232 + return ERR_PTR(-ENOMEM);
5233 }
5234 EXPORT_SYMBOL_GPL(l2tp_session_create);
5235
5236 diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
5237 index e7233bad65e0..0095012509ac 100644
5238 --- a/net/l2tp/l2tp_core.h
5239 +++ b/net/l2tp/l2tp_core.h
5240 @@ -240,12 +240,16 @@ static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk)
5241 return tunnel;
5242 }
5243
5244 +struct l2tp_session *l2tp_session_get(struct net *net,
5245 + struct l2tp_tunnel *tunnel,
5246 + u32 session_id, bool do_ref);
5247 struct l2tp_session *l2tp_session_find(struct net *net,
5248 struct l2tp_tunnel *tunnel,
5249 u32 session_id);
5250 struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
5251 bool do_ref);
5252 -struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
5253 +struct l2tp_session *l2tp_session_get_by_ifname(struct net *net, char *ifname,
5254 + bool do_ref);
5255 struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
5256 struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
5257
5258 diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
5259 index 965f7e344cef..eecc64e138de 100644
5260 --- a/net/l2tp/l2tp_eth.c
5261 +++ b/net/l2tp/l2tp_eth.c
5262 @@ -223,12 +223,6 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
5263 goto out;
5264 }
5265
5266 - session = l2tp_session_find(net, tunnel, session_id);
5267 - if (session) {
5268 - rc = -EEXIST;
5269 - goto out;
5270 - }
5271 -
5272 if (cfg->ifname) {
5273 dev = dev_get_by_name(net, cfg->ifname);
5274 if (dev) {
5275 @@ -242,8 +236,8 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
5276
5277 session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
5278 peer_session_id, cfg);
5279 - if (!session) {
5280 - rc = -ENOMEM;
5281 + if (IS_ERR(session)) {
5282 + rc = PTR_ERR(session);
5283 goto out;
5284 }
5285
5286 diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
5287 index 20669537816e..3468d5635d0a 100644
5288 --- a/net/l2tp/l2tp_ip.c
5289 +++ b/net/l2tp/l2tp_ip.c
5290 @@ -143,19 +143,19 @@ static int l2tp_ip_recv(struct sk_buff *skb)
5291 }
5292
5293 /* Ok, this is a data packet. Lookup the session. */
5294 - session = l2tp_session_find(net, NULL, session_id);
5295 - if (session == NULL)
5296 + session = l2tp_session_get(net, NULL, session_id, true);
5297 + if (!session)
5298 goto discard;
5299
5300 tunnel = session->tunnel;
5301 - if (tunnel == NULL)
5302 - goto discard;
5303 + if (!tunnel)
5304 + goto discard_sess;
5305
5306 /* Trace packet contents, if enabled */
5307 if (tunnel->debug & L2TP_MSG_DATA) {
5308 length = min(32u, skb->len);
5309 if (!pskb_may_pull(skb, length))
5310 - goto discard;
5311 + goto discard_sess;
5312
5313 /* Point to L2TP header */
5314 optr = ptr = skb->data;
5315 @@ -165,6 +165,7 @@ static int l2tp_ip_recv(struct sk_buff *skb)
5316 }
5317
5318 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
5319 + l2tp_session_dec_refcount(session);
5320
5321 return 0;
5322
5323 @@ -203,6 +204,12 @@ static int l2tp_ip_recv(struct sk_buff *skb)
5324
5325 return sk_receive_skb(sk, skb, 1);
5326
5327 +discard_sess:
5328 + if (session->deref)
5329 + session->deref(session);
5330 + l2tp_session_dec_refcount(session);
5331 + goto discard;
5332 +
5333 discard_put:
5334 sock_put(sk);
5335
5336 diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
5337 index a4b0c9232bf1..b10abef6b0a0 100644
5338 --- a/net/l2tp/l2tp_ip6.c
5339 +++ b/net/l2tp/l2tp_ip6.c
5340 @@ -156,19 +156,19 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
5341 }
5342
5343 /* Ok, this is a data packet. Lookup the session. */
5344 - session = l2tp_session_find(net, NULL, session_id);
5345 - if (session == NULL)
5346 + session = l2tp_session_get(net, NULL, session_id, true);
5347 + if (!session)
5348 goto discard;
5349
5350 tunnel = session->tunnel;
5351 - if (tunnel == NULL)
5352 - goto discard;
5353 + if (!tunnel)
5354 + goto discard_sess;
5355
5356 /* Trace packet contents, if enabled */
5357 if (tunnel->debug & L2TP_MSG_DATA) {
5358 length = min(32u, skb->len);
5359 if (!pskb_may_pull(skb, length))
5360 - goto discard;
5361 + goto discard_sess;
5362
5363 /* Point to L2TP header */
5364 optr = ptr = skb->data;
5365 @@ -179,6 +179,8 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
5366
5367 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len,
5368 tunnel->recv_payload_hook);
5369 + l2tp_session_dec_refcount(session);
5370 +
5371 return 0;
5372
5373 pass_up:
5374 @@ -216,6 +218,12 @@ static int l2tp_ip6_recv(struct sk_buff *skb)
5375
5376 return sk_receive_skb(sk, skb, 1);
5377
5378 +discard_sess:
5379 + if (session->deref)
5380 + session->deref(session);
5381 + l2tp_session_dec_refcount(session);
5382 + goto discard;
5383 +
5384 discard_put:
5385 sock_put(sk);
5386
5387 diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
5388 index 9f66272b163b..1ccd310d01a5 100644
5389 --- a/net/l2tp/l2tp_netlink.c
5390 +++ b/net/l2tp/l2tp_netlink.c
5391 @@ -55,7 +55,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq,
5392 /* Accessed under genl lock */
5393 static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX];
5394
5395 -static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
5396 +static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info,
5397 + bool do_ref)
5398 {
5399 u32 tunnel_id;
5400 u32 session_id;
5401 @@ -66,14 +67,15 @@ static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
5402
5403 if (info->attrs[L2TP_ATTR_IFNAME]) {
5404 ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
5405 - session = l2tp_session_find_by_ifname(net, ifname);
5406 + session = l2tp_session_get_by_ifname(net, ifname, do_ref);
5407 } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) &&
5408 (info->attrs[L2TP_ATTR_CONN_ID])) {
5409 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
5410 session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
5411 tunnel = l2tp_tunnel_find(net, tunnel_id);
5412 if (tunnel)
5413 - session = l2tp_session_find(net, tunnel, session_id);
5414 + session = l2tp_session_get(net, tunnel, session_id,
5415 + do_ref);
5416 }
5417
5418 return session;
5419 @@ -634,10 +636,12 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf
5420 session_id, peer_session_id, &cfg);
5421
5422 if (ret >= 0) {
5423 - session = l2tp_session_find(net, tunnel, session_id);
5424 - if (session)
5425 + session = l2tp_session_get(net, tunnel, session_id, false);
5426 + if (session) {
5427 ret = l2tp_session_notify(&l2tp_nl_family, info, session,
5428 L2TP_CMD_SESSION_CREATE);
5429 + l2tp_session_dec_refcount(session);
5430 + }
5431 }
5432
5433 out:
5434 @@ -650,7 +654,7 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
5435 struct l2tp_session *session;
5436 u16 pw_type;
5437
5438 - session = l2tp_nl_session_find(info);
5439 + session = l2tp_nl_session_get(info, true);
5440 if (session == NULL) {
5441 ret = -ENODEV;
5442 goto out;
5443 @@ -664,6 +668,10 @@ static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *inf
5444 if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
5445 ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session);
5446
5447 + if (session->deref)
5448 + session->deref(session);
5449 + l2tp_session_dec_refcount(session);
5450 +
5451 out:
5452 return ret;
5453 }
5454 @@ -673,7 +681,7 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
5455 int ret = 0;
5456 struct l2tp_session *session;
5457
5458 - session = l2tp_nl_session_find(info);
5459 + session = l2tp_nl_session_get(info, false);
5460 if (session == NULL) {
5461 ret = -ENODEV;
5462 goto out;
5463 @@ -708,6 +716,8 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
5464 ret = l2tp_session_notify(&l2tp_nl_family, info,
5465 session, L2TP_CMD_SESSION_MODIFY);
5466
5467 + l2tp_session_dec_refcount(session);
5468 +
5469 out:
5470 return ret;
5471 }
5472 @@ -803,29 +813,34 @@ static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
5473 struct sk_buff *msg;
5474 int ret;
5475
5476 - session = l2tp_nl_session_find(info);
5477 + session = l2tp_nl_session_get(info, false);
5478 if (session == NULL) {
5479 ret = -ENODEV;
5480 - goto out;
5481 + goto err;
5482 }
5483
5484 msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
5485 if (!msg) {
5486 ret = -ENOMEM;
5487 - goto out;
5488 + goto err_ref;
5489 }
5490
5491 ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
5492 0, session, L2TP_CMD_SESSION_GET);
5493 if (ret < 0)
5494 - goto err_out;
5495 + goto err_ref_msg;
5496
5497 - return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
5498 + ret = genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
5499
5500 -err_out:
5501 - nlmsg_free(msg);
5502 + l2tp_session_dec_refcount(session);
5503
5504 -out:
5505 + return ret;
5506 +
5507 +err_ref_msg:
5508 + nlmsg_free(msg);
5509 +err_ref:
5510 + l2tp_session_dec_refcount(session);
5511 +err:
5512 return ret;
5513 }
5514
5515 diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
5516 index 1387f547a09e..1696f1fd5877 100644
5517 --- a/net/l2tp/l2tp_ppp.c
5518 +++ b/net/l2tp/l2tp_ppp.c
5519 @@ -583,6 +583,7 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
5520 int error = 0;
5521 u32 tunnel_id, peer_tunnel_id;
5522 u32 session_id, peer_session_id;
5523 + bool drop_refcnt = false;
5524 int ver = 2;
5525 int fd;
5526
5527 @@ -684,36 +685,36 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
5528 if (tunnel->peer_tunnel_id == 0)
5529 tunnel->peer_tunnel_id = peer_tunnel_id;
5530
5531 - /* Create session if it doesn't already exist. We handle the
5532 - * case where a session was previously created by the netlink
5533 - * interface by checking that the session doesn't already have
5534 - * a socket and its tunnel socket are what we expect. If any
5535 - * of those checks fail, return EEXIST to the caller.
5536 - */
5537 - session = l2tp_session_find(sock_net(sk), tunnel, session_id);
5538 - if (session == NULL) {
5539 - /* Default MTU must allow space for UDP/L2TP/PPP
5540 - * headers.
5541 + session = l2tp_session_get(sock_net(sk), tunnel, session_id, false);
5542 + if (session) {
5543 + drop_refcnt = true;
5544 + ps = l2tp_session_priv(session);
5545 +
5546 + /* Using a pre-existing session is fine as long as it hasn't
5547 + * been connected yet.
5548 */
5549 - cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
5550 + if (ps->sock) {
5551 + error = -EEXIST;
5552 + goto end;
5553 + }
5554
5555 - /* Allocate and initialize a new session context. */
5556 - session = l2tp_session_create(sizeof(struct pppol2tp_session),
5557 - tunnel, session_id,
5558 - peer_session_id, &cfg);
5559 - if (session == NULL) {
5560 - error = -ENOMEM;
5561 + /* consistency checks */
5562 + if (ps->tunnel_sock != tunnel->sock) {
5563 + error = -EEXIST;
5564 goto end;
5565 }
5566 } else {
5567 - ps = l2tp_session_priv(session);
5568 - error = -EEXIST;
5569 - if (ps->sock != NULL)
5570 - goto end;
5571 + /* Default MTU must allow space for UDP/L2TP/PPP headers */
5572 + cfg.mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
5573 + cfg.mru = cfg.mtu;
5574
5575 - /* consistency checks */
5576 - if (ps->tunnel_sock != tunnel->sock)
5577 + session = l2tp_session_create(sizeof(struct pppol2tp_session),
5578 + tunnel, session_id,
5579 + peer_session_id, &cfg);
5580 + if (IS_ERR(session)) {
5581 + error = PTR_ERR(session);
5582 goto end;
5583 + }
5584 }
5585
5586 /* Associate session with its PPPoL2TP socket */
5587 @@ -778,6 +779,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
5588 session->name);
5589
5590 end:
5591 + if (drop_refcnt)
5592 + l2tp_session_dec_refcount(session);
5593 release_sock(sk);
5594
5595 return error;
5596 @@ -805,12 +808,6 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
5597 if (tunnel->sock == NULL)
5598 goto out;
5599
5600 - /* Check that this session doesn't already exist */
5601 - error = -EEXIST;
5602 - session = l2tp_session_find(net, tunnel, session_id);
5603 - if (session != NULL)
5604 - goto out;
5605 -
5606 /* Default MTU values. */
5607 if (cfg->mtu == 0)
5608 cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
5609 @@ -818,12 +815,13 @@ static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_i
5610 cfg->mru = cfg->mtu;
5611
5612 /* Allocate and initialize a new session context. */
5613 - error = -ENOMEM;
5614 session = l2tp_session_create(sizeof(struct pppol2tp_session),
5615 tunnel, session_id,
5616 peer_session_id, cfg);
5617 - if (session == NULL)
5618 + if (IS_ERR(session)) {
5619 + error = PTR_ERR(session);
5620 goto out;
5621 + }
5622
5623 ps = l2tp_session_priv(session);
5624 ps->tunnel_sock = tunnel->sock;
5625 @@ -1141,11 +1139,18 @@ static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
5626 if (stats.session_id != 0) {
5627 /* resend to session ioctl handler */
5628 struct l2tp_session *session =
5629 - l2tp_session_find(sock_net(sk), tunnel, stats.session_id);
5630 - if (session != NULL)
5631 - err = pppol2tp_session_ioctl(session, cmd, arg);
5632 - else
5633 + l2tp_session_get(sock_net(sk), tunnel,
5634 + stats.session_id, true);
5635 +
5636 + if (session) {
5637 + err = pppol2tp_session_ioctl(session, cmd,
5638 + arg);
5639 + if (session->deref)
5640 + session->deref(session);
5641 + l2tp_session_dec_refcount(session);
5642 + } else {
5643 err = -EBADR;
5644 + }
5645 break;
5646 }
5647 #ifdef CONFIG_XFRM
5648 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
5649 index 8d7747e98fdb..37bec0f864b7 100644
5650 --- a/net/mac80211/iface.c
5651 +++ b/net/mac80211/iface.c
5652 @@ -6,6 +6,7 @@
5653 * Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
5654 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
5655 * Copyright 2013-2014 Intel Mobile Communications GmbH
5656 + * Copyright (c) 2016 Intel Deutschland GmbH
5657 *
5658 * This program is free software; you can redistribute it and/or modify
5659 * it under the terms of the GNU General Public License version 2 as
5660 @@ -1307,6 +1308,26 @@ static void ieee80211_iface_work(struct work_struct *work)
5661 } else if (ieee80211_is_action(mgmt->frame_control) &&
5662 mgmt->u.action.category == WLAN_CATEGORY_VHT) {
5663 switch (mgmt->u.action.u.vht_group_notif.action_code) {
5664 + case WLAN_VHT_ACTION_OPMODE_NOTIF: {
5665 + struct ieee80211_rx_status *status;
5666 + enum nl80211_band band;
5667 + u8 opmode;
5668 +
5669 + status = IEEE80211_SKB_RXCB(skb);
5670 + band = status->band;
5671 + opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
5672 +
5673 + mutex_lock(&local->sta_mtx);
5674 + sta = sta_info_get_bss(sdata, mgmt->sa);
5675 +
5676 + if (sta)
5677 + ieee80211_vht_handle_opmode(sdata, sta,
5678 + opmode,
5679 + band);
5680 +
5681 + mutex_unlock(&local->sta_mtx);
5682 + break;
5683 + }
5684 case WLAN_VHT_ACTION_GROUPID_MGMT:
5685 ieee80211_process_mu_groups(sdata, mgmt);
5686 break;
5687 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
5688 index 1075ac24c8c5..2bb6899854d4 100644
5689 --- a/net/mac80211/main.c
5690 +++ b/net/mac80211/main.c
5691 @@ -908,12 +908,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
5692 supp_ht = supp_ht || sband->ht_cap.ht_supported;
5693 supp_vht = supp_vht || sband->vht_cap.vht_supported;
5694
5695 - if (sband->ht_cap.ht_supported)
5696 - local->rx_chains =
5697 - max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
5698 - local->rx_chains);
5699 + if (!sband->ht_cap.ht_supported)
5700 + continue;
5701
5702 /* TODO: consider VHT for RX chains, hopefully it's the same */
5703 + local->rx_chains =
5704 + max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
5705 + local->rx_chains);
5706 +
5707 + /* no need to mask, SM_PS_DISABLED has all bits set */
5708 + sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
5709 + IEEE80211_HT_CAP_SM_PS_SHIFT;
5710 }
5711
5712 /* if low-level driver supports AP, we also support VLAN */
5713 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
5714 index c45a0fcfb3e7..439e597fd374 100644
5715 --- a/net/mac80211/rx.c
5716 +++ b/net/mac80211/rx.c
5717 @@ -2923,17 +2923,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
5718
5719 switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
5720 case WLAN_VHT_ACTION_OPMODE_NOTIF: {
5721 - u8 opmode;
5722 -
5723 /* verify opmode is present */
5724 if (len < IEEE80211_MIN_ACTION_SIZE + 2)
5725 goto invalid;
5726 -
5727 - opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
5728 -
5729 - ieee80211_vht_handle_opmode(rx->sdata, rx->sta,
5730 - opmode, status->band);
5731 - goto handled;
5732 + goto queue;
5733 }
5734 case WLAN_VHT_ACTION_GROUPID_MGMT: {
5735 if (len < IEEE80211_MIN_ACTION_SIZE + 25)
5736 diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
5737 index 6832bf6ab69f..43e45bb660bc 100644
5738 --- a/net/mac80211/vht.c
5739 +++ b/net/mac80211/vht.c
5740 @@ -527,8 +527,10 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
5741
5742 u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, band);
5743
5744 - if (changed > 0)
5745 + if (changed > 0) {
5746 + ieee80211_recalc_min_chandef(sdata);
5747 rate_control_rate_update(local, sband, sta, changed);
5748 + }
5749 }
5750
5751 void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
5752 diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
5753 index 27540455dc62..04111c1c3988 100644
5754 --- a/net/netfilter/nf_conntrack_netlink.c
5755 +++ b/net/netfilter/nf_conntrack_netlink.c
5756 @@ -45,6 +45,8 @@
5757 #include <net/netfilter/nf_conntrack_zones.h>
5758 #include <net/netfilter/nf_conntrack_timestamp.h>
5759 #include <net/netfilter/nf_conntrack_labels.h>
5760 +#include <net/netfilter/nf_conntrack_seqadj.h>
5761 +#include <net/netfilter/nf_conntrack_synproxy.h>
5762 #ifdef CONFIG_NF_NAT_NEEDED
5763 #include <net/netfilter/nf_nat_core.h>
5764 #include <net/netfilter/nf_nat_l4proto.h>
5765 @@ -1800,6 +1802,8 @@ ctnetlink_create_conntrack(struct net *net,
5766 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
5767 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
5768 nf_ct_labels_ext_add(ct);
5769 + nfct_seqadj_ext_add(ct);
5770 + nfct_synproxy_ext_add(ct);
5771
5772 /* we must add conntrack extensions before confirmation. */
5773 ct->status |= IPS_CONFIRMED;
5774 diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
5775 index 872db2d0e2a9..119e51fdcebc 100644
5776 --- a/net/netfilter/xt_TCPMSS.c
5777 +++ b/net/netfilter/xt_TCPMSS.c
5778 @@ -104,7 +104,7 @@ tcpmss_mangle_packet(struct sk_buff *skb,
5779 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
5780 tcp_hdrlen = tcph->doff * 4;
5781
5782 - if (len < tcp_hdrlen)
5783 + if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr))
5784 return -1;
5785
5786 if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
5787 @@ -152,6 +152,10 @@ tcpmss_mangle_packet(struct sk_buff *skb,
5788 if (len > tcp_hdrlen)
5789 return 0;
5790
5791 + /* tcph->doff has 4 bits, do not wrap it to 0 */
5792 + if (tcp_hdrlen >= 15 * 4)
5793 + return 0;
5794 +
5795 /*
5796 * MSS Option not found ?! add it..
5797 */
5798 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
5799 index e1719c695174..9647e314d4fc 100644
5800 --- a/net/sctp/socket.c
5801 +++ b/net/sctp/socket.c
5802 @@ -239,7 +239,7 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
5803 union sctp_addr *laddr = (union sctp_addr *)addr;
5804 struct sctp_transport *transport;
5805
5806 - if (sctp_verify_addr(sk, laddr, af->sockaddr_len))
5807 + if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len))
5808 return NULL;
5809
5810 addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
5811 @@ -4460,13 +4460,13 @@ int sctp_for_each_endpoint(int (*cb)(struct sctp_endpoint *, void *),
5812
5813 for (head = sctp_ep_hashtable; hash < sctp_ep_hashsize;
5814 hash++, head++) {
5815 - read_lock(&head->lock);
5816 + read_lock_bh(&head->lock);
5817 sctp_for_each_hentry(epb, &head->chain) {
5818 err = cb(sctp_ep(epb), p);
5819 if (err)
5820 break;
5821 }
5822 - read_unlock(&head->lock);
5823 + read_unlock_bh(&head->lock);
5824 }
5825
5826 return err;
5827 @@ -4506,9 +4506,8 @@ int sctp_for_each_transport(int (*cb)(struct sctp_transport *, void *),
5828 if (err)
5829 return err;
5830
5831 - sctp_transport_get_idx(net, &hti, pos);
5832 - obj = sctp_transport_get_next(net, &hti);
5833 - for (; obj && !IS_ERR(obj); obj = sctp_transport_get_next(net, &hti)) {
5834 + obj = sctp_transport_get_idx(net, &hti, pos + 1);
5835 + for (; !IS_ERR_OR_NULL(obj); obj = sctp_transport_get_next(net, &hti)) {
5836 struct sctp_transport *transport = obj;
5837
5838 if (!sctp_transport_hold(transport))
5839 diff --git a/net/tipc/discover.c b/net/tipc/discover.c
5840 index 6b109a808d4c..02462d67d191 100644
5841 --- a/net/tipc/discover.c
5842 +++ b/net/tipc/discover.c
5843 @@ -169,7 +169,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
5844
5845 /* Send response, if necessary */
5846 if (respond && (mtyp == DSC_REQ_MSG)) {
5847 - rskb = tipc_buf_acquire(MAX_H_SIZE);
5848 + rskb = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC);
5849 if (!rskb)
5850 return;
5851 tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer);
5852 @@ -278,7 +278,7 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b,
5853 req = kmalloc(sizeof(*req), GFP_ATOMIC);
5854 if (!req)
5855 return -ENOMEM;
5856 - req->buf = tipc_buf_acquire(MAX_H_SIZE);
5857 + req->buf = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC);
5858 if (!req->buf) {
5859 kfree(req);
5860 return -ENOMEM;
5861 diff --git a/net/tipc/link.c b/net/tipc/link.c
5862 index bda89bf9f4ff..4e8647aef01c 100644
5863 --- a/net/tipc/link.c
5864 +++ b/net/tipc/link.c
5865 @@ -1395,7 +1395,7 @@ void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
5866 msg_set_seqno(hdr, seqno++);
5867 pktlen = msg_size(hdr);
5868 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
5869 - tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
5870 + tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
5871 if (!tnlskb) {
5872 pr_warn("%sunable to send packet\n", link_co_err);
5873 return;
5874 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
5875 index 17201aa8423d..56ea0adcd285 100644
5876 --- a/net/tipc/msg.c
5877 +++ b/net/tipc/msg.c
5878 @@ -58,12 +58,12 @@ static unsigned int align(unsigned int i)
5879 * NOTE: Headroom is reserved to allow prepending of a data link header.
5880 * There may also be unrequested tailroom present at the buffer's end.
5881 */
5882 -struct sk_buff *tipc_buf_acquire(u32 size)
5883 +struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
5884 {
5885 struct sk_buff *skb;
5886 unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
5887
5888 - skb = alloc_skb_fclone(buf_size, GFP_ATOMIC);
5889 + skb = alloc_skb_fclone(buf_size, gfp);
5890 if (skb) {
5891 skb_reserve(skb, BUF_HEADROOM);
5892 skb_put(skb, size);
5893 @@ -95,7 +95,7 @@ struct sk_buff *tipc_msg_create(uint user, uint type,
5894 struct tipc_msg *msg;
5895 struct sk_buff *buf;
5896
5897 - buf = tipc_buf_acquire(hdr_sz + data_sz);
5898 + buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
5899 if (unlikely(!buf))
5900 return NULL;
5901
5902 @@ -261,7 +261,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
5903
5904 /* No fragmentation needed? */
5905 if (likely(msz <= pktmax)) {
5906 - skb = tipc_buf_acquire(msz);
5907 + skb = tipc_buf_acquire(msz, GFP_KERNEL);
5908 if (unlikely(!skb))
5909 return -ENOMEM;
5910 skb_orphan(skb);
5911 @@ -282,7 +282,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
5912 msg_set_importance(&pkthdr, msg_importance(mhdr));
5913
5914 /* Prepare first fragment */
5915 - skb = tipc_buf_acquire(pktmax);
5916 + skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
5917 if (!skb)
5918 return -ENOMEM;
5919 skb_orphan(skb);
5920 @@ -313,7 +313,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
5921 pktsz = drem + INT_H_SIZE;
5922 else
5923 pktsz = pktmax;
5924 - skb = tipc_buf_acquire(pktsz);
5925 + skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
5926 if (!skb) {
5927 rc = -ENOMEM;
5928 goto error;
5929 @@ -448,7 +448,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
5930 if (msz > (max / 2))
5931 return false;
5932
5933 - _skb = tipc_buf_acquire(max);
5934 + _skb = tipc_buf_acquire(max, GFP_ATOMIC);
5935 if (!_skb)
5936 return false;
5937
5938 @@ -496,7 +496,7 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
5939
5940 /* Never return SHORT header; expand by replacing buffer if necessary */
5941 if (msg_short(hdr)) {
5942 - *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen);
5943 + *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen, GFP_ATOMIC);
5944 if (!*skb)
5945 goto exit;
5946 memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen);
5947 @@ -508,7 +508,7 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
5948 }
5949
5950 if (skb_cloned(_skb) &&
5951 - pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_KERNEL))
5952 + pskb_expand_head(_skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC))
5953 goto exit;
5954
5955 /* Now reverse the concerned fields */
5956 diff --git a/net/tipc/msg.h b/net/tipc/msg.h
5957 index 50a739860d37..6c0455caf302 100644
5958 --- a/net/tipc/msg.h
5959 +++ b/net/tipc/msg.h
5960 @@ -820,7 +820,7 @@ static inline bool msg_is_reset(struct tipc_msg *hdr)
5961 return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG);
5962 }
5963
5964 -struct sk_buff *tipc_buf_acquire(u32 size);
5965 +struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp);
5966 bool tipc_msg_validate(struct sk_buff *skb);
5967 bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err);
5968 void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
5969 diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
5970 index c1cfd92de17a..23f8899e0f8c 100644
5971 --- a/net/tipc/name_distr.c
5972 +++ b/net/tipc/name_distr.c
5973 @@ -69,7 +69,7 @@ static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
5974 u32 dest)
5975 {
5976 struct tipc_net *tn = net_generic(net, tipc_net_id);
5977 - struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
5978 + struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
5979 struct tipc_msg *msg;
5980
5981 if (buf != NULL) {
5982 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
5983 index 2d03d5bcb5b9..915abe98174e 100644
5984 --- a/net/unix/af_unix.c
5985 +++ b/net/unix/af_unix.c
5986 @@ -998,7 +998,8 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
5987 struct path path = { NULL, NULL };
5988
5989 err = -EINVAL;
5990 - if (sunaddr->sun_family != AF_UNIX)
5991 + if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
5992 + sunaddr->sun_family != AF_UNIX)
5993 goto out;
5994
5995 if (addr_len == sizeof(short)) {
5996 @@ -1109,6 +1110,10 @@ static int unix_dgram_connect(struct socket *sock, struct sockaddr *addr,
5997 unsigned int hash;
5998 int err;
5999
6000 + err = -EINVAL;
6001 + if (alen < offsetofend(struct sockaddr, sa_family))
6002 + goto out;
6003 +
6004 if (addr->sa_family != AF_UNSPEC) {
6005 err = unix_mkname(sunaddr, alen, &hash);
6006 if (err < 0)
6007 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
6008 index e0437a7aa1a2..8da67f7c9c5a 100644
6009 --- a/net/xfrm/xfrm_policy.c
6010 +++ b/net/xfrm/xfrm_policy.c
6011 @@ -1808,43 +1808,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
6012 goto out;
6013 }
6014
6015 -#ifdef CONFIG_XFRM_SUB_POLICY
6016 -static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
6017 -{
6018 - if (!*target) {
6019 - *target = kmalloc(size, GFP_ATOMIC);
6020 - if (!*target)
6021 - return -ENOMEM;
6022 - }
6023 -
6024 - memcpy(*target, src, size);
6025 - return 0;
6026 -}
6027 -#endif
6028 -
6029 -static int xfrm_dst_update_parent(struct dst_entry *dst,
6030 - const struct xfrm_selector *sel)
6031 -{
6032 -#ifdef CONFIG_XFRM_SUB_POLICY
6033 - struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
6034 - return xfrm_dst_alloc_copy((void **)&(xdst->partner),
6035 - sel, sizeof(*sel));
6036 -#else
6037 - return 0;
6038 -#endif
6039 -}
6040 -
6041 -static int xfrm_dst_update_origin(struct dst_entry *dst,
6042 - const struct flowi *fl)
6043 -{
6044 -#ifdef CONFIG_XFRM_SUB_POLICY
6045 - struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
6046 - return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
6047 -#else
6048 - return 0;
6049 -#endif
6050 -}
6051 -
6052 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
6053 struct xfrm_policy **pols,
6054 int *num_pols, int *num_xfrms)
6055 @@ -1916,16 +1879,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
6056
6057 xdst = (struct xfrm_dst *)dst;
6058 xdst->num_xfrms = err;
6059 - if (num_pols > 1)
6060 - err = xfrm_dst_update_parent(dst, &pols[1]->selector);
6061 - else
6062 - err = xfrm_dst_update_origin(dst, fl);
6063 - if (unlikely(err)) {
6064 - dst_free(dst);
6065 - XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
6066 - return ERR_PTR(err);
6067 - }
6068 -
6069 xdst->num_pols = num_pols;
6070 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
6071 xdst->policy_genid = atomic_read(&pols[0]->genid);
6072 diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
6073 index 373fcad840ea..776dffa88aee 100644
6074 --- a/sound/pci/hda/hda_codec.h
6075 +++ b/sound/pci/hda/hda_codec.h
6076 @@ -294,6 +294,8 @@ struct hda_codec {
6077
6078 #define list_for_each_codec(c, bus) \
6079 list_for_each_entry(c, &(bus)->core.codec_list, core.list)
6080 +#define list_for_each_codec_safe(c, n, bus) \
6081 + list_for_each_entry_safe(c, n, &(bus)->core.codec_list, core.list)
6082
6083 /* snd_hda_codec_read/write optional flags */
6084 #define HDA_RW_NO_RESPONSE_FALLBACK (1 << 0)
6085 diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
6086 index 500878556578..0af1132a869e 100644
6087 --- a/sound/pci/hda/hda_controller.c
6088 +++ b/sound/pci/hda/hda_controller.c
6089 @@ -1333,8 +1333,12 @@ EXPORT_SYMBOL_GPL(azx_probe_codecs);
6090 /* configure each codec instance */
6091 int azx_codec_configure(struct azx *chip)
6092 {
6093 - struct hda_codec *codec;
6094 - list_for_each_codec(codec, &chip->bus) {
6095 + struct hda_codec *codec, *next;
6096 +
6097 + /* use _safe version here since snd_hda_codec_configure() deregisters
6098 + * the device upon error and deletes itself from the bus list.
6099 + */
6100 + list_for_each_codec_safe(codec, next, &chip->bus) {
6101 snd_hda_codec_configure(codec);
6102 }
6103 return 0;
6104 diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
6105 index e7c8f4f076d5..b0bd29003b5d 100644
6106 --- a/sound/pci/hda/hda_generic.c
6107 +++ b/sound/pci/hda/hda_generic.c
6108 @@ -3169,6 +3169,7 @@ static int check_dyn_adc_switch(struct hda_codec *codec)
6109 spec->input_paths[i][nums]);
6110 spec->input_paths[i][nums] =
6111 spec->input_paths[i][n];
6112 + spec->input_paths[i][n] = 0;
6113 }
6114 }
6115 nums++;
6116 diff --git a/tools/arch/x86/lib/memcpy_64.S b/tools/arch/x86/lib/memcpy_64.S
6117 index 49e6ebac7e73..98dcc112b363 100644
6118 --- a/tools/arch/x86/lib/memcpy_64.S
6119 +++ b/tools/arch/x86/lib/memcpy_64.S
6120 @@ -286,7 +286,7 @@ ENDPROC(memcpy_mcsafe_unrolled)
6121 _ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail)
6122 _ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail)
6123 _ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail)
6124 - _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
6125 + _ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail)
6126 _ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
6127 _ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail)
6128 _ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
6129 diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
6130 index 5e0dea2cdc01..039636ffb6c8 100644
6131 --- a/tools/objtool/arch/x86/decode.c
6132 +++ b/tools/objtool/arch/x86/decode.c
6133 @@ -150,9 +150,9 @@ int arch_decode_instruction(struct elf *elf, struct section *sec,
6134 *type = INSN_RETURN;
6135 break;
6136
6137 - case 0xc5: /* iret */
6138 case 0xca: /* retf */
6139 case 0xcb: /* retf */
6140 + case 0xcf: /* iret */
6141 *type = INSN_CONTEXT_SWITCH;
6142 break;
6143
6144 diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c
6145 index e8a1f699058a..b8dadb050d2b 100644
6146 --- a/tools/objtool/builtin-check.c
6147 +++ b/tools/objtool/builtin-check.c
6148 @@ -757,11 +757,20 @@ static struct rela *find_switch_table(struct objtool_file *file,
6149 insn->jump_dest->offset > orig_insn->offset))
6150 break;
6151
6152 + /* look for a relocation which references .rodata */
6153 text_rela = find_rela_by_dest_range(insn->sec, insn->offset,
6154 insn->len);
6155 - if (text_rela && text_rela->sym == file->rodata->sym)
6156 - return find_rela_by_dest(file->rodata,
6157 - text_rela->addend);
6158 + if (!text_rela || text_rela->sym != file->rodata->sym)
6159 + continue;
6160 +
6161 + /*
6162 + * Make sure the .rodata address isn't associated with a
6163 + * symbol. gcc jump tables are anonymous data.
6164 + */
6165 + if (find_symbol_containing(file->rodata, text_rela->addend))
6166 + continue;
6167 +
6168 + return find_rela_by_dest(file->rodata, text_rela->addend);
6169 }
6170
6171 return NULL;
6172 diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
6173 index 0d7983ac63ef..d897702ce742 100644
6174 --- a/tools/objtool/elf.c
6175 +++ b/tools/objtool/elf.c
6176 @@ -85,6 +85,18 @@ struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset)
6177 return NULL;
6178 }
6179
6180 +struct symbol *find_symbol_containing(struct section *sec, unsigned long offset)
6181 +{
6182 + struct symbol *sym;
6183 +
6184 + list_for_each_entry(sym, &sec->symbol_list, list)
6185 + if (sym->type != STT_SECTION &&
6186 + offset >= sym->offset && offset < sym->offset + sym->len)
6187 + return sym;
6188 +
6189 + return NULL;
6190 +}
6191 +
6192 struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset,
6193 unsigned int len)
6194 {
6195 diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h
6196 index aa1ff6596684..731973e1a3f5 100644
6197 --- a/tools/objtool/elf.h
6198 +++ b/tools/objtool/elf.h
6199 @@ -79,6 +79,7 @@ struct elf {
6200 struct elf *elf_open(const char *name);
6201 struct section *find_section_by_name(struct elf *elf, const char *name);
6202 struct symbol *find_symbol_by_offset(struct section *sec, unsigned long offset);
6203 +struct symbol *find_symbol_containing(struct section *sec, unsigned long offset);
6204 struct rela *find_rela_by_dest(struct section *sec, unsigned long offset);
6205 struct rela *find_rela_by_dest_range(struct section *sec, unsigned long offset,
6206 unsigned int len);
6207 diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
6208 index d281ae2b54e8..1d9c02bc00f1 100644
6209 --- a/tools/perf/util/probe-event.c
6210 +++ b/tools/perf/util/probe-event.c
6211 @@ -645,18 +645,31 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
6212 return ret;
6213 }
6214
6215 -static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
6216 - int ntevs, const char *module)
6217 +static int
6218 +post_process_module_probe_trace_events(struct probe_trace_event *tevs,
6219 + int ntevs, const char *module,
6220 + struct debuginfo *dinfo)
6221 {
6222 + Dwarf_Addr text_offs = 0;
6223 int i, ret = 0;
6224 char *mod_name = NULL;
6225 + struct map *map;
6226
6227 if (!module)
6228 return 0;
6229
6230 - mod_name = find_module_name(module);
6231 + map = get_target_map(module, false);
6232 + if (!map || debuginfo__get_text_offset(dinfo, &text_offs, true) < 0) {
6233 + pr_warning("Failed to get ELF symbols for %s\n", module);
6234 + return -EINVAL;
6235 + }
6236
6237 + mod_name = find_module_name(module);
6238 for (i = 0; i < ntevs; i++) {
6239 + ret = post_process_probe_trace_point(&tevs[i].point,
6240 + map, (unsigned long)text_offs);
6241 + if (ret < 0)
6242 + break;
6243 tevs[i].point.module =
6244 strdup(mod_name ? mod_name : module);
6245 if (!tevs[i].point.module) {
6246 @@ -666,6 +679,8 @@ static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
6247 }
6248
6249 free(mod_name);
6250 + map__put(map);
6251 +
6252 return ret;
6253 }
6254
6255 @@ -722,7 +737,7 @@ arch__post_process_probe_trace_events(struct perf_probe_event *pev __maybe_unuse
6256 static int post_process_probe_trace_events(struct perf_probe_event *pev,
6257 struct probe_trace_event *tevs,
6258 int ntevs, const char *module,
6259 - bool uprobe)
6260 + bool uprobe, struct debuginfo *dinfo)
6261 {
6262 int ret;
6263
6264 @@ -730,7 +745,8 @@ static int post_process_probe_trace_events(struct perf_probe_event *pev,
6265 ret = add_exec_to_probe_trace_events(tevs, ntevs, module);
6266 else if (module)
6267 /* Currently ref_reloc_sym based probe is not for drivers */
6268 - ret = add_module_to_probe_trace_events(tevs, ntevs, module);
6269 + ret = post_process_module_probe_trace_events(tevs, ntevs,
6270 + module, dinfo);
6271 else
6272 ret = post_process_kernel_probe_trace_events(tevs, ntevs);
6273
6274 @@ -774,30 +790,27 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
6275 }
6276 }
6277
6278 - debuginfo__delete(dinfo);
6279 -
6280 if (ntevs > 0) { /* Succeeded to find trace events */
6281 pr_debug("Found %d probe_trace_events.\n", ntevs);
6282 ret = post_process_probe_trace_events(pev, *tevs, ntevs,
6283 - pev->target, pev->uprobes);
6284 + pev->target, pev->uprobes, dinfo);
6285 if (ret < 0 || ret == ntevs) {
6286 + pr_debug("Post processing failed or all events are skipped. (%d)\n", ret);
6287 clear_probe_trace_events(*tevs, ntevs);
6288 zfree(tevs);
6289 + ntevs = 0;
6290 }
6291 - if (ret != ntevs)
6292 - return ret < 0 ? ret : ntevs;
6293 - ntevs = 0;
6294 - /* Fall through */
6295 }
6296
6297 + debuginfo__delete(dinfo);
6298 +
6299 if (ntevs == 0) { /* No error but failed to find probe point. */
6300 pr_warning("Probe point '%s' not found.\n",
6301 synthesize_perf_probe_point(&pev->point));
6302 return -ENOENT;
6303 - }
6304 - /* Error path : ntevs < 0 */
6305 - pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
6306 - if (ntevs < 0) {
6307 + } else if (ntevs < 0) {
6308 + /* Error path : ntevs < 0 */
6309 + pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
6310 if (ntevs == -EBADF)
6311 pr_warning("Warning: No dwarf info found in the vmlinux - "
6312 "please rebuild kernel with CONFIG_DEBUG_INFO=y.\n");
6313 diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
6314 index df4debe564da..0d9d6e0803b8 100644
6315 --- a/tools/perf/util/probe-finder.c
6316 +++ b/tools/perf/util/probe-finder.c
6317 @@ -1501,7 +1501,8 @@ int debuginfo__find_available_vars_at(struct debuginfo *dbg,
6318 }
6319
6320 /* For the kernel module, we need a special code to get a DIE */
6321 -static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs)
6322 +int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
6323 + bool adjust_offset)
6324 {
6325 int n, i;
6326 Elf32_Word shndx;
6327 @@ -1530,6 +1531,8 @@ static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs)
6328 if (!shdr)
6329 return -ENOENT;
6330 *offs = shdr->sh_addr;
6331 + if (adjust_offset)
6332 + *offs -= shdr->sh_offset;
6333 }
6334 }
6335 return 0;
6336 @@ -1543,16 +1546,12 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
6337 Dwarf_Addr _addr = 0, baseaddr = 0;
6338 const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
6339 int baseline = 0, lineno = 0, ret = 0;
6340 - bool reloc = false;
6341
6342 -retry:
6343 + /* We always need to relocate the address for aranges */
6344 + if (debuginfo__get_text_offset(dbg, &baseaddr, false) == 0)
6345 + addr += baseaddr;
6346 /* Find cu die */
6347 if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) {
6348 - if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) {
6349 - addr += baseaddr;
6350 - reloc = true;
6351 - goto retry;
6352 - }
6353 pr_warning("Failed to find debug information for address %lx\n",
6354 addr);
6355 ret = -EINVAL;
6356 diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
6357 index f1d8558f498e..2956c5198652 100644
6358 --- a/tools/perf/util/probe-finder.h
6359 +++ b/tools/perf/util/probe-finder.h
6360 @@ -46,6 +46,9 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
6361 int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
6362 struct perf_probe_point *ppt);
6363
6364 +int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
6365 + bool adjust_offset);
6366 +
6367 /* Find a line range */
6368 int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr);
6369