Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.17/0110-4.17.11-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3161 - (show annotations) (download)
Tue Jul 31 06:32:31 2018 UTC (5 years, 9 months ago) by niro
File size: 113214 byte(s)
-linux-4.16.11
1 diff --git a/Makefile b/Makefile
2 index 0ab689c38e82..e2664c641109 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 17
9 -SUBLEVEL = 10
10 +SUBLEVEL = 11
11 EXTRAVERSION =
12 NAME = Merciless Moray
13
14 diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c
15 index 10a405d593df..c782b10ddf50 100644
16 --- a/arch/mips/ath79/common.c
17 +++ b/arch/mips/ath79/common.c
18 @@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init);
19
20 void ath79_ddr_wb_flush(u32 reg)
21 {
22 - void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg;
23 + void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4);
24
25 /* Flush the DDR write buffer. */
26 __raw_writel(0x1, flush_reg);
27 diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
28 index 9632436d74d7..c2e94cf5ecda 100644
29 --- a/arch/mips/pci/pci.c
30 +++ b/arch/mips/pci/pci.c
31 @@ -54,5 +54,5 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
32 phys_addr_t size = resource_size(rsrc);
33
34 *start = fixup_bigphys_addr(rsrc->start, size);
35 - *end = rsrc->start + size;
36 + *end = rsrc->start + size - 1;
37 }
38 diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
39 index 1835ca1505d6..7472ffa76fd0 100644
40 --- a/arch/powerpc/include/asm/mmu_context.h
41 +++ b/arch/powerpc/include/asm/mmu_context.h
42 @@ -35,9 +35,9 @@ extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
43 extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
44 unsigned long ua, unsigned long entries);
45 extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
46 - unsigned long ua, unsigned long *hpa);
47 + unsigned long ua, unsigned int pageshift, unsigned long *hpa);
48 extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
49 - unsigned long ua, unsigned long *hpa);
50 + unsigned long ua, unsigned int pageshift, unsigned long *hpa);
51 extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
52 extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
53 #endif
54 diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
55 index 4dffa611376d..e14cec6bc339 100644
56 --- a/arch/powerpc/kvm/book3s_64_vio.c
57 +++ b/arch/powerpc/kvm/book3s_64_vio.c
58 @@ -433,7 +433,7 @@ long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
59 /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
60 return H_TOO_HARD;
61
62 - if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa)))
63 + if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
64 return H_HARDWARE;
65
66 if (mm_iommu_mapped_inc(mem))
67 diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
68 index 6651f736a0b1..eeb9e6651cc4 100644
69 --- a/arch/powerpc/kvm/book3s_64_vio_hv.c
70 +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
71 @@ -262,7 +262,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl,
72 if (!mem)
73 return H_TOO_HARD;
74
75 - if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa)))
76 + if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
77 + &hpa)))
78 return H_HARDWARE;
79
80 pua = (void *) vmalloc_to_phys(pua);
81 @@ -431,7 +432,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
82
83 mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
84 if (mem)
85 - prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0;
86 + prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
87 + IOMMU_PAGE_SHIFT_4K, &tces) == 0;
88 }
89
90 if (!prereg) {
91 diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
92 index 4c615fcb0cf0..4205ce92ee86 100644
93 --- a/arch/powerpc/mm/mmu_context_iommu.c
94 +++ b/arch/powerpc/mm/mmu_context_iommu.c
95 @@ -19,6 +19,7 @@
96 #include <linux/hugetlb.h>
97 #include <linux/swap.h>
98 #include <asm/mmu_context.h>
99 +#include <asm/pte-walk.h>
100
101 static DEFINE_MUTEX(mem_list_mutex);
102
103 @@ -27,6 +28,7 @@ struct mm_iommu_table_group_mem_t {
104 struct rcu_head rcu;
105 unsigned long used;
106 atomic64_t mapped;
107 + unsigned int pageshift;
108 u64 ua; /* userspace address */
109 u64 entries; /* number of entries in hpas[] */
110 u64 *hpas; /* vmalloc'ed */
111 @@ -125,6 +127,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
112 {
113 struct mm_iommu_table_group_mem_t *mem;
114 long i, j, ret = 0, locked_entries = 0;
115 + unsigned int pageshift;
116 + unsigned long flags;
117 struct page *page = NULL;
118
119 mutex_lock(&mem_list_mutex);
120 @@ -159,6 +163,12 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
121 goto unlock_exit;
122 }
123
124 + /*
125 + * For a starting point for a maximum page size calculation
126 + * we use @ua and @entries natural alignment to allow IOMMU pages
127 + * smaller than huge pages but still bigger than PAGE_SIZE.
128 + */
129 + mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
130 mem->hpas = vzalloc(entries * sizeof(mem->hpas[0]));
131 if (!mem->hpas) {
132 kfree(mem);
133 @@ -199,6 +209,23 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
134 }
135 }
136 populate:
137 + pageshift = PAGE_SHIFT;
138 + if (PageCompound(page)) {
139 + pte_t *pte;
140 + struct page *head = compound_head(page);
141 + unsigned int compshift = compound_order(head);
142 +
143 + local_irq_save(flags); /* disables as well */
144 + pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift);
145 + local_irq_restore(flags);
146 +
147 + /* Double check it is still the same pinned page */
148 + if (pte && pte_page(*pte) == head &&
149 + pageshift == compshift)
150 + pageshift = max_t(unsigned int, pageshift,
151 + PAGE_SHIFT);
152 + }
153 + mem->pageshift = min(mem->pageshift, pageshift);
154 mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
155 }
156
157 @@ -349,7 +376,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
158 EXPORT_SYMBOL_GPL(mm_iommu_find);
159
160 long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
161 - unsigned long ua, unsigned long *hpa)
162 + unsigned long ua, unsigned int pageshift, unsigned long *hpa)
163 {
164 const long entry = (ua - mem->ua) >> PAGE_SHIFT;
165 u64 *va = &mem->hpas[entry];
166 @@ -357,6 +384,9 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
167 if (entry >= mem->entries)
168 return -EFAULT;
169
170 + if (pageshift > mem->pageshift)
171 + return -EFAULT;
172 +
173 *hpa = *va | (ua & ~PAGE_MASK);
174
175 return 0;
176 @@ -364,7 +394,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
177 EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
178
179 long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
180 - unsigned long ua, unsigned long *hpa)
181 + unsigned long ua, unsigned int pageshift, unsigned long *hpa)
182 {
183 const long entry = (ua - mem->ua) >> PAGE_SHIFT;
184 void *va = &mem->hpas[entry];
185 @@ -373,6 +403,9 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
186 if (entry >= mem->entries)
187 return -EFAULT;
188
189 + if (pageshift > mem->pageshift)
190 + return -EFAULT;
191 +
192 pa = (void *) vmalloc_to_phys(va);
193 if (!pa)
194 return -EFAULT;
195 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
196 index fbc4d17e3ecc..ac01341f2d1f 100644
197 --- a/arch/x86/kvm/x86.c
198 +++ b/arch/x86/kvm/x86.c
199 @@ -1092,6 +1092,7 @@ static u32 msr_based_features[] = {
200
201 MSR_F10H_DECFG,
202 MSR_IA32_UCODE_REV,
203 + MSR_IA32_ARCH_CAPABILITIES,
204 };
205
206 static unsigned int num_msr_based_features;
207 @@ -1100,7 +1101,8 @@ static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
208 {
209 switch (msr->index) {
210 case MSR_IA32_UCODE_REV:
211 - rdmsrl(msr->index, msr->data);
212 + case MSR_IA32_ARCH_CAPABILITIES:
213 + rdmsrl_safe(msr->index, &msr->data);
214 break;
215 default:
216 if (kvm_x86_ops->get_msr_feature(msr))
217 diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/xen/xen-pvh.S
218 index e1a5fbeae08d..5d7554c025fd 100644
219 --- a/arch/x86/xen/xen-pvh.S
220 +++ b/arch/x86/xen/xen-pvh.S
221 @@ -54,6 +54,9 @@
222 * charge of setting up it's own stack, GDT and IDT.
223 */
224
225 +#define PVH_GDT_ENTRY_CANARY 4
226 +#define PVH_CANARY_SEL (PVH_GDT_ENTRY_CANARY * 8)
227 +
228 ENTRY(pvh_start_xen)
229 cld
230
231 @@ -98,6 +101,12 @@ ENTRY(pvh_start_xen)
232 /* 64-bit entry point. */
233 .code64
234 1:
235 + /* Set base address in stack canary descriptor. */
236 + mov $MSR_GS_BASE,%ecx
237 + mov $_pa(canary), %eax
238 + xor %edx, %edx
239 + wrmsr
240 +
241 call xen_prepare_pvh
242
243 /* startup_64 expects boot_params in %rsi. */
244 @@ -107,6 +116,17 @@ ENTRY(pvh_start_xen)
245
246 #else /* CONFIG_X86_64 */
247
248 + /* Set base address in stack canary descriptor. */
249 + movl $_pa(gdt_start),%eax
250 + movl $_pa(canary),%ecx
251 + movw %cx, (PVH_GDT_ENTRY_CANARY * 8) + 2(%eax)
252 + shrl $16, %ecx
253 + movb %cl, (PVH_GDT_ENTRY_CANARY * 8) + 4(%eax)
254 + movb %ch, (PVH_GDT_ENTRY_CANARY * 8) + 7(%eax)
255 +
256 + mov $PVH_CANARY_SEL,%eax
257 + mov %eax,%gs
258 +
259 call mk_early_pgtbl_32
260
261 mov $_pa(initial_page_table), %eax
262 @@ -150,9 +170,13 @@ gdt_start:
263 .quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* __KERNEL_CS */
264 #endif
265 .quad GDT_ENTRY(0xc092, 0, 0xfffff) /* __KERNEL_DS */
266 + .quad GDT_ENTRY(0x4090, 0, 0x18) /* PVH_CANARY_SEL */
267 gdt_end:
268
269 - .balign 4
270 + .balign 16
271 +canary:
272 + .fill 48, 1, 0
273 +
274 early_stack:
275 .fill 256, 1, 0
276 early_stack_end:
277 diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
278 index bc5f05906bd1..ee840be150b5 100644
279 --- a/drivers/acpi/acpica/psloop.c
280 +++ b/drivers/acpi/acpica/psloop.c
281 @@ -497,6 +497,18 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
282 status =
283 acpi_ps_create_op(walk_state, aml_op_start, &op);
284 if (ACPI_FAILURE(status)) {
285 + /*
286 + * ACPI_PARSE_MODULE_LEVEL means that we are loading a table by
287 + * executing it as a control method. However, if we encounter
288 + * an error while loading the table, we need to keep trying to
289 + * load the table rather than aborting the table load. Set the
290 + * status to AE_OK to proceed with the table load.
291 + */
292 + if ((walk_state->
293 + parse_flags & ACPI_PARSE_MODULE_LEVEL)
294 + && status == AE_ALREADY_EXISTS) {
295 + status = AE_OK;
296 + }
297 if (status == AE_CTRL_PARSE_CONTINUE) {
298 continue;
299 }
300 @@ -694,6 +706,20 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
301 acpi_ps_next_parse_state(walk_state, op, status);
302 if (status == AE_CTRL_PENDING) {
303 status = AE_OK;
304 + } else
305 + if ((walk_state->
306 + parse_flags & ACPI_PARSE_MODULE_LEVEL)
307 + && ACPI_FAILURE(status)) {
308 + /*
309 + * ACPI_PARSE_MODULE_LEVEL means that we are loading a table by
310 + * executing it as a control method. However, if we encounter
311 + * an error while loading the table, we need to keep trying to
312 + * load the table rather than aborting the table load. Set the
313 + * status to AE_OK to proceed with the table load. If we get a
314 + * failure at this point, it means that the dispatcher got an
315 + * error while processing Op (most likely an AML operand error.
316 + */
317 + status = AE_OK;
318 }
319 }
320
321 diff --git a/drivers/base/dd.c b/drivers/base/dd.c
322 index c9f54089429b..2cee8d0f3045 100644
323 --- a/drivers/base/dd.c
324 +++ b/drivers/base/dd.c
325 @@ -436,14 +436,6 @@ re_probe:
326 goto probe_failed;
327 }
328
329 - /*
330 - * Ensure devices are listed in devices_kset in correct order
331 - * It's important to move Dev to the end of devices_kset before
332 - * calling .probe, because it could be recursive and parent Dev
333 - * should always go first
334 - */
335 - devices_kset_move_last(dev);
336 -
337 if (dev->bus->probe) {
338 ret = dev->bus->probe(dev);
339 if (ret)
340 diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c
341 index 2c23e7d7ba28..43e0c33ee648 100644
342 --- a/drivers/clk/clk-aspeed.c
343 +++ b/drivers/clk/clk-aspeed.c
344 @@ -22,7 +22,7 @@
345 #define ASPEED_MPLL_PARAM 0x20
346 #define ASPEED_HPLL_PARAM 0x24
347 #define AST2500_HPLL_BYPASS_EN BIT(20)
348 -#define AST2400_HPLL_STRAPPED BIT(18)
349 +#define AST2400_HPLL_PROGRAMMED BIT(18)
350 #define AST2400_HPLL_BYPASS_EN BIT(17)
351 #define ASPEED_MISC_CTRL 0x2c
352 #define UART_DIV13_EN BIT(12)
353 @@ -88,8 +88,8 @@ static const struct aspeed_gate_data aspeed_gates[] = {
354 [ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */
355 [ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */
356 [ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */
357 - [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */
358 - [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, 0 }, /* DAC */
359 + [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */
360 + [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */
361 [ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL },
362 [ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */
363 [ASPEED_CLK_GATE_LCLK] = { 8, 5, "lclk-gate", NULL, 0 }, /* LPC */
364 @@ -530,29 +530,45 @@ builtin_platform_driver(aspeed_clk_driver);
365 static void __init aspeed_ast2400_cc(struct regmap *map)
366 {
367 struct clk_hw *hw;
368 - u32 val, freq, div;
369 + u32 val, div, clkin, hpll;
370 + const u16 hpll_rates[][4] = {
371 + {384, 360, 336, 408},
372 + {400, 375, 350, 425},
373 + };
374 + int rate;
375
376 /*
377 * CLKIN is the crystal oscillator, 24, 48 or 25MHz selected by
378 * strapping
379 */
380 regmap_read(map, ASPEED_STRAP, &val);
381 - if (val & CLKIN_25MHZ_EN)
382 - freq = 25000000;
383 - else if (val & AST2400_CLK_SOURCE_SEL)
384 - freq = 48000000;
385 - else
386 - freq = 24000000;
387 - hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, freq);
388 - pr_debug("clkin @%u MHz\n", freq / 1000000);
389 + rate = (val >> 8) & 3;
390 + if (val & CLKIN_25MHZ_EN) {
391 + clkin = 25000000;
392 + hpll = hpll_rates[1][rate];
393 + } else if (val & AST2400_CLK_SOURCE_SEL) {
394 + clkin = 48000000;
395 + hpll = hpll_rates[0][rate];
396 + } else {
397 + clkin = 24000000;
398 + hpll = hpll_rates[0][rate];
399 + }
400 + hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, clkin);
401 + pr_debug("clkin @%u MHz\n", clkin / 1000000);
402
403 /*
404 * High-speed PLL clock derived from the crystal. This the CPU clock,
405 - * and we assume that it is enabled
406 + * and we assume that it is enabled. It can be configured through the
407 + * HPLL_PARAM register, or set to a specified frequency by strapping.
408 */
409 regmap_read(map, ASPEED_HPLL_PARAM, &val);
410 - WARN(val & AST2400_HPLL_STRAPPED, "hpll is strapped not configured");
411 - aspeed_clk_data->hws[ASPEED_CLK_HPLL] = aspeed_ast2400_calc_pll("hpll", val);
412 + if (val & AST2400_HPLL_PROGRAMMED)
413 + hw = aspeed_ast2400_calc_pll("hpll", val);
414 + else
415 + hw = clk_hw_register_fixed_rate(NULL, "hpll", "clkin", 0,
416 + hpll * 1000000);
417 +
418 + aspeed_clk_data->hws[ASPEED_CLK_HPLL] = hw;
419
420 /*
421 * Strap bits 11:10 define the CPU/AHB clock frequency ratio (aka HCLK)
422 diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
423 index b1e4d9557610..0e053c17d8ba 100644
424 --- a/drivers/clk/meson/gxbb.c
425 +++ b/drivers/clk/meson/gxbb.c
426 @@ -511,6 +511,7 @@ static struct clk_regmap gxbb_fclk_div2 = {
427 .ops = &clk_regmap_gate_ops,
428 .parent_names = (const char *[]){ "fclk_div2_div" },
429 .num_parents = 1,
430 + .flags = CLK_IS_CRITICAL,
431 },
432 };
433
434 diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
435 index 87213ea7fc84..706dc80ad644 100644
436 --- a/drivers/clk/mvebu/armada-37xx-periph.c
437 +++ b/drivers/clk/mvebu/armada-37xx-periph.c
438 @@ -35,6 +35,7 @@
439 #define CLK_SEL 0x10
440 #define CLK_DIS 0x14
441
442 +#define ARMADA_37XX_DVFS_LOAD_1 1
443 #define LOAD_LEVEL_NR 4
444
445 #define ARMADA_37XX_NB_L0L1 0x18
446 @@ -507,6 +508,40 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
447 return -EINVAL;
448 }
449
450 +/*
451 + * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
452 + * respectively) to L0 frequency (1.2 Ghz) requires a significant
453 + * amount of time to let VDD stabilize to the appropriate
454 + * voltage. This amount of time is large enough that it cannot be
455 + * covered by the hardware countdown register. Due to this, the CPU
456 + * might start operating at L0 before the voltage is stabilized,
457 + * leading to CPU stalls.
458 + *
459 + * To work around this problem, we prevent switching directly from the
460 + * L2/L3 frequencies to the L0 frequency, and instead switch to the L1
461 + * frequency in-between. The sequence therefore becomes:
462 + * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
463 + * 2. Sleep 20ms for stabling VDD voltage
464 + * 3. Then switch from L1(600MHZ) to L0(1200Mhz).
465 + */
466 +static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
467 +{
468 + unsigned int cur_level;
469 +
470 + if (rate != 1200 * 1000 * 1000)
471 + return;
472 +
473 + regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
474 + cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
475 + if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
476 + return;
477 +
478 + regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
479 + ARMADA_37XX_NB_CPU_LOAD_MASK,
480 + ARMADA_37XX_DVFS_LOAD_1);
481 + msleep(20);
482 +}
483 +
484 static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
485 unsigned long parent_rate)
486 {
487 @@ -537,6 +572,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
488 */
489 reg = ARMADA_37XX_NB_CPU_LOAD;
490 mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
491 +
492 + clk_pm_cpu_set_rate_wa(rate, base);
493 +
494 regmap_update_bits(base, reg, mask, load_level);
495
496 return rate;
497 diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
498 index 501d2d290e9c..70dce544984e 100644
499 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
500 +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
501 @@ -55,6 +55,9 @@ nv04_display_create(struct drm_device *dev)
502 nouveau_display(dev)->init = nv04_display_init;
503 nouveau_display(dev)->fini = nv04_display_fini;
504
505 + /* Pre-nv50 doesn't support atomic, so don't expose the ioctls */
506 + dev->driver->driver_features &= ~DRIVER_ATOMIC;
507 +
508 nouveau_hw_save_vga_fonts(dev, 1);
509
510 nv04_crtc_create(dev, 0);
511 diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
512 index 0bffeb95b072..591d9c29ede7 100644
513 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c
514 +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
515 @@ -79,6 +79,10 @@ MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
516 int nouveau_modeset = -1;
517 module_param_named(modeset, nouveau_modeset, int, 0400);
518
519 +MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
520 +static int nouveau_atomic = 0;
521 +module_param_named(atomic, nouveau_atomic, int, 0400);
522 +
523 MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
524 static int nouveau_runtime_pm = -1;
525 module_param_named(runpm, nouveau_runtime_pm, int, 0400);
526 @@ -501,6 +505,9 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
527
528 pci_set_master(pdev);
529
530 + if (nouveau_atomic)
531 + driver_pci.driver_features |= DRIVER_ATOMIC;
532 +
533 ret = drm_get_pci_dev(pdev, pent, &driver_pci);
534 if (ret) {
535 nvkm_device_del(&device);
536 diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
537 index 2b3ccd850750..abe297fda046 100644
538 --- a/drivers/gpu/drm/nouveau/nv50_display.c
539 +++ b/drivers/gpu/drm/nouveau/nv50_display.c
540 @@ -4198,7 +4198,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
541 nv50_disp_atomic_commit_tail(state);
542
543 drm_for_each_crtc(crtc, dev) {
544 - if (crtc->state->enable) {
545 + if (crtc->state->active) {
546 if (!drm->have_disp_power_ref) {
547 drm->have_disp_power_ref = true;
548 return 0;
549 @@ -4441,10 +4441,6 @@ nv50_display_destroy(struct drm_device *dev)
550 kfree(disp);
551 }
552
553 -MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
554 -static int nouveau_atomic = 0;
555 -module_param_named(atomic, nouveau_atomic, int, 0400);
556 -
557 int
558 nv50_display_create(struct drm_device *dev)
559 {
560 @@ -4469,8 +4465,6 @@ nv50_display_create(struct drm_device *dev)
561 disp->disp = &nouveau_display(dev)->disp;
562 dev->mode_config.funcs = &nv50_disp_func;
563 dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
564 - if (nouveau_atomic)
565 - dev->driver->driver_features |= DRIVER_ATOMIC;
566
567 /* small shared memory area we use for notifiers and semaphores */
568 ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
569 diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
570 index b38798cc5288..f3a21343e636 100644
571 --- a/drivers/iommu/Kconfig
572 +++ b/drivers/iommu/Kconfig
573 @@ -142,7 +142,6 @@ config DMAR_TABLE
574 config INTEL_IOMMU
575 bool "Support for Intel IOMMU using DMA Remapping Devices"
576 depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
577 - select DMA_DIRECT_OPS
578 select IOMMU_API
579 select IOMMU_IOVA
580 select DMAR_TABLE
581 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
582 index 749d8f235346..6392a4964fc5 100644
583 --- a/drivers/iommu/intel-iommu.c
584 +++ b/drivers/iommu/intel-iommu.c
585 @@ -31,7 +31,6 @@
586 #include <linux/pci.h>
587 #include <linux/dmar.h>
588 #include <linux/dma-mapping.h>
589 -#include <linux/dma-direct.h>
590 #include <linux/mempool.h>
591 #include <linux/memory.h>
592 #include <linux/cpu.h>
593 @@ -3709,30 +3708,61 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
594 dma_addr_t *dma_handle, gfp_t flags,
595 unsigned long attrs)
596 {
597 - void *vaddr;
598 + struct page *page = NULL;
599 + int order;
600
601 - vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs);
602 - if (iommu_no_mapping(dev) || !vaddr)
603 - return vaddr;
604 + size = PAGE_ALIGN(size);
605 + order = get_order(size);
606
607 - *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr),
608 - PAGE_ALIGN(size), DMA_BIDIRECTIONAL,
609 - dev->coherent_dma_mask);
610 - if (!*dma_handle)
611 - goto out_free_pages;
612 - return vaddr;
613 + if (!iommu_no_mapping(dev))
614 + flags &= ~(GFP_DMA | GFP_DMA32);
615 + else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
616 + if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
617 + flags |= GFP_DMA;
618 + else
619 + flags |= GFP_DMA32;
620 + }
621 +
622 + if (gfpflags_allow_blocking(flags)) {
623 + unsigned int count = size >> PAGE_SHIFT;
624 +
625 + page = dma_alloc_from_contiguous(dev, count, order, flags);
626 + if (page && iommu_no_mapping(dev) &&
627 + page_to_phys(page) + size > dev->coherent_dma_mask) {
628 + dma_release_from_contiguous(dev, page, count);
629 + page = NULL;
630 + }
631 + }
632 +
633 + if (!page)
634 + page = alloc_pages(flags, order);
635 + if (!page)
636 + return NULL;
637 + memset(page_address(page), 0, size);
638 +
639 + *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
640 + DMA_BIDIRECTIONAL,
641 + dev->coherent_dma_mask);
642 + if (*dma_handle)
643 + return page_address(page);
644 + if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
645 + __free_pages(page, order);
646
647 -out_free_pages:
648 - dma_direct_free(dev, size, vaddr, *dma_handle, attrs);
649 return NULL;
650 }
651
652 static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
653 dma_addr_t dma_handle, unsigned long attrs)
654 {
655 - if (!iommu_no_mapping(dev))
656 - intel_unmap(dev, dma_handle, PAGE_ALIGN(size));
657 - dma_direct_free(dev, size, vaddr, dma_handle, attrs);
658 + int order;
659 + struct page *page = virt_to_page(vaddr);
660 +
661 + size = PAGE_ALIGN(size);
662 + order = get_order(size);
663 +
664 + intel_unmap(dev, dma_handle, size);
665 + if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
666 + __free_pages(page, order);
667 }
668
669 static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
670 diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
671 index b594bae1adbd..cdc72b7e3d26 100644
672 --- a/drivers/net/bonding/bond_options.c
673 +++ b/drivers/net/bonding/bond_options.c
674 @@ -743,15 +743,20 @@ const struct bond_option *bond_opt_get(unsigned int option)
675 static int bond_option_mode_set(struct bonding *bond,
676 const struct bond_opt_value *newval)
677 {
678 - if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) {
679 - netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
680 - newval->string);
681 - /* disable arp monitoring */
682 - bond->params.arp_interval = 0;
683 - /* set miimon to default value */
684 - bond->params.miimon = BOND_DEFAULT_MIIMON;
685 - netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
686 - bond->params.miimon);
687 + if (!bond_mode_uses_arp(newval->value)) {
688 + if (bond->params.arp_interval) {
689 + netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
690 + newval->string);
691 + /* disable arp monitoring */
692 + bond->params.arp_interval = 0;
693 + }
694 +
695 + if (!bond->params.miimon) {
696 + /* set miimon to default value */
697 + bond->params.miimon = BOND_DEFAULT_MIIMON;
698 + netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
699 + bond->params.miimon);
700 + }
701 }
702
703 if (newval->value == BOND_MODE_ALB)
704 diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
705 index b397a33f3d32..e2f965c2e3aa 100644
706 --- a/drivers/net/can/m_can/m_can.c
707 +++ b/drivers/net/can/m_can/m_can.c
708 @@ -634,10 +634,12 @@ static int m_can_clk_start(struct m_can_priv *priv)
709 int err;
710
711 err = pm_runtime_get_sync(priv->device);
712 - if (err)
713 + if (err < 0) {
714 pm_runtime_put_noidle(priv->device);
715 + return err;
716 + }
717
718 - return err;
719 + return 0;
720 }
721
722 static void m_can_clk_stop(struct m_can_priv *priv)
723 @@ -1109,7 +1111,8 @@ static void m_can_chip_config(struct net_device *dev)
724
725 } else {
726 /* Version 3.1.x or 3.2.x */
727 - cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE);
728 + cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
729 + CCCR_NISO);
730
731 /* Only 3.2.x has NISO Bit implemented */
732 if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
733 @@ -1687,8 +1690,6 @@ failed_ret:
734 return ret;
735 }
736
737 -/* TODO: runtime PM with power down or sleep mode */
738 -
739 static __maybe_unused int m_can_suspend(struct device *dev)
740 {
741 struct net_device *ndev = dev_get_drvdata(dev);
742 diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
743 index 3c51a884db87..fa689854f16b 100644
744 --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
745 +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
746 @@ -58,6 +58,10 @@ MODULE_LICENSE("GPL v2");
747 #define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */
748 #define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */
749
750 +#define PCIEFD_FW_VERSION(x, y, z) (((u32)(x) << 24) | \
751 + ((u32)(y) << 16) | \
752 + ((u32)(z) << 8))
753 +
754 /* System Control Registers Bits */
755 #define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */
756 #define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */
757 @@ -783,6 +787,21 @@ static int peak_pciefd_probe(struct pci_dev *pdev,
758 "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count,
759 hw_ver_major, hw_ver_minor, hw_ver_sub);
760
761 +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
762 + /* FW < v3.3.0 DMA logic doesn't handle correctly the mix of 32-bit and
763 + * 64-bit logical addresses: this workaround forces usage of 32-bit
764 + * DMA addresses only when such a fw is detected.
765 + */
766 + if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) <
767 + PCIEFD_FW_VERSION(3, 3, 0)) {
768 + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
769 + if (err)
770 + dev_warn(&pdev->dev,
771 + "warning: can't set DMA mask %llxh (err %d)\n",
772 + DMA_BIT_MASK(32), err);
773 + }
774 +#endif
775 +
776 /* stop system clock */
777 pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN,
778 PCIEFD_REG_SYS_CTL_CLR);
779 diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
780 index 89aec07c225f..5a24039733ef 100644
781 --- a/drivers/net/can/xilinx_can.c
782 +++ b/drivers/net/can/xilinx_can.c
783 @@ -2,6 +2,7 @@
784 *
785 * Copyright (C) 2012 - 2014 Xilinx, Inc.
786 * Copyright (C) 2009 PetaLogix. All rights reserved.
787 + * Copyright (C) 2017 Sandvik Mining and Construction Oy
788 *
789 * Description:
790 * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
791 @@ -25,8 +26,10 @@
792 #include <linux/module.h>
793 #include <linux/netdevice.h>
794 #include <linux/of.h>
795 +#include <linux/of_device.h>
796 #include <linux/platform_device.h>
797 #include <linux/skbuff.h>
798 +#include <linux/spinlock.h>
799 #include <linux/string.h>
800 #include <linux/types.h>
801 #include <linux/can/dev.h>
802 @@ -101,7 +104,7 @@ enum xcan_reg {
803 #define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
804 XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
805 XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
806 - XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK)
807 + XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK)
808
809 /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
810 #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
811 @@ -118,6 +121,7 @@ enum xcan_reg {
812 /**
813 * struct xcan_priv - This definition define CAN driver instance
814 * @can: CAN private data structure.
815 + * @tx_lock: Lock for synchronizing TX interrupt handling
816 * @tx_head: Tx CAN packets ready to send on the queue
817 * @tx_tail: Tx CAN packets successfully sended on the queue
818 * @tx_max: Maximum number packets the driver can send
819 @@ -132,6 +136,7 @@ enum xcan_reg {
820 */
821 struct xcan_priv {
822 struct can_priv can;
823 + spinlock_t tx_lock;
824 unsigned int tx_head;
825 unsigned int tx_tail;
826 unsigned int tx_max;
827 @@ -159,6 +164,11 @@ static const struct can_bittiming_const xcan_bittiming_const = {
828 .brp_inc = 1,
829 };
830
831 +#define XCAN_CAP_WATERMARK 0x0001
832 +struct xcan_devtype_data {
833 + unsigned int caps;
834 +};
835 +
836 /**
837 * xcan_write_reg_le - Write a value to the device register little endian
838 * @priv: Driver private data structure
839 @@ -238,6 +248,10 @@ static int set_reset_mode(struct net_device *ndev)
840 usleep_range(500, 10000);
841 }
842
843 + /* reset clears FIFOs */
844 + priv->tx_head = 0;
845 + priv->tx_tail = 0;
846 +
847 return 0;
848 }
849
850 @@ -392,6 +406,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
851 struct net_device_stats *stats = &ndev->stats;
852 struct can_frame *cf = (struct can_frame *)skb->data;
853 u32 id, dlc, data[2] = {0, 0};
854 + unsigned long flags;
855
856 if (can_dropped_invalid_skb(ndev, skb))
857 return NETDEV_TX_OK;
858 @@ -439,6 +454,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
859 data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
860
861 can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
862 +
863 + spin_lock_irqsave(&priv->tx_lock, flags);
864 +
865 priv->tx_head++;
866
867 /* Write the Frame to Xilinx CAN TX FIFO */
868 @@ -454,10 +472,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
869 stats->tx_bytes += cf->can_dlc;
870 }
871
872 + /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
873 + if (priv->tx_max > 1)
874 + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
875 +
876 /* Check if the TX buffer is full */
877 if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
878 netif_stop_queue(ndev);
879
880 + spin_unlock_irqrestore(&priv->tx_lock, flags);
881 +
882 return NETDEV_TX_OK;
883 }
884
885 @@ -529,6 +553,123 @@ static int xcan_rx(struct net_device *ndev)
886 return 1;
887 }
888
889 +/**
890 + * xcan_current_error_state - Get current error state from HW
891 + * @ndev: Pointer to net_device structure
892 + *
893 + * Checks the current CAN error state from the HW. Note that this
894 + * only checks for ERROR_PASSIVE and ERROR_WARNING.
895 + *
896 + * Return:
897 + * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
898 + * otherwise.
899 + */
900 +static enum can_state xcan_current_error_state(struct net_device *ndev)
901 +{
902 + struct xcan_priv *priv = netdev_priv(ndev);
903 + u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
904 +
905 + if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
906 + return CAN_STATE_ERROR_PASSIVE;
907 + else if (status & XCAN_SR_ERRWRN_MASK)
908 + return CAN_STATE_ERROR_WARNING;
909 + else
910 + return CAN_STATE_ERROR_ACTIVE;
911 +}
912 +
913 +/**
914 + * xcan_set_error_state - Set new CAN error state
915 + * @ndev: Pointer to net_device structure
916 + * @new_state: The new CAN state to be set
917 + * @cf: Error frame to be populated or NULL
918 + *
919 + * Set new CAN error state for the device, updating statistics and
920 + * populating the error frame if given.
921 + */
922 +static void xcan_set_error_state(struct net_device *ndev,
923 + enum can_state new_state,
924 + struct can_frame *cf)
925 +{
926 + struct xcan_priv *priv = netdev_priv(ndev);
927 + u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
928 + u32 txerr = ecr & XCAN_ECR_TEC_MASK;
929 + u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
930 +
931 + priv->can.state = new_state;
932 +
933 + if (cf) {
934 + cf->can_id |= CAN_ERR_CRTL;
935 + cf->data[6] = txerr;
936 + cf->data[7] = rxerr;
937 + }
938 +
939 + switch (new_state) {
940 + case CAN_STATE_ERROR_PASSIVE:
941 + priv->can.can_stats.error_passive++;
942 + if (cf)
943 + cf->data[1] = (rxerr > 127) ?
944 + CAN_ERR_CRTL_RX_PASSIVE :
945 + CAN_ERR_CRTL_TX_PASSIVE;
946 + break;
947 + case CAN_STATE_ERROR_WARNING:
948 + priv->can.can_stats.error_warning++;
949 + if (cf)
950 + cf->data[1] |= (txerr > rxerr) ?
951 + CAN_ERR_CRTL_TX_WARNING :
952 + CAN_ERR_CRTL_RX_WARNING;
953 + break;
954 + case CAN_STATE_ERROR_ACTIVE:
955 + if (cf)
956 + cf->data[1] |= CAN_ERR_CRTL_ACTIVE;
957 + break;
958 + default:
959 + /* non-ERROR states are handled elsewhere */
960 + WARN_ON(1);
961 + break;
962 + }
963 +}
964 +
965 +/**
966 + * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
967 + * @ndev: Pointer to net_device structure
968 + *
969 + * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
970 + * the performed RX/TX has caused it to drop to a lesser state and set
971 + * the interface state accordingly.
972 + */
973 +static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
974 +{
975 + struct xcan_priv *priv = netdev_priv(ndev);
976 + enum can_state old_state = priv->can.state;
977 + enum can_state new_state;
978 +
979 + /* changing error state due to successful frame RX/TX can only
980 + * occur from these states
981 + */
982 + if (old_state != CAN_STATE_ERROR_WARNING &&
983 + old_state != CAN_STATE_ERROR_PASSIVE)
984 + return;
985 +
986 + new_state = xcan_current_error_state(ndev);
987 +
988 + if (new_state != old_state) {
989 + struct sk_buff *skb;
990 + struct can_frame *cf;
991 +
992 + skb = alloc_can_err_skb(ndev, &cf);
993 +
994 + xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
995 +
996 + if (skb) {
997 + struct net_device_stats *stats = &ndev->stats;
998 +
999 + stats->rx_packets++;
1000 + stats->rx_bytes += cf->can_dlc;
1001 + netif_rx(skb);
1002 + }
1003 + }
1004 +}
1005 +
1006 /**
1007 * xcan_err_interrupt - error frame Isr
1008 * @ndev: net_device pointer
1009 @@ -544,16 +685,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
1010 struct net_device_stats *stats = &ndev->stats;
1011 struct can_frame *cf;
1012 struct sk_buff *skb;
1013 - u32 err_status, status, txerr = 0, rxerr = 0;
1014 + u32 err_status;
1015
1016 skb = alloc_can_err_skb(ndev, &cf);
1017
1018 err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
1019 priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
1020 - txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
1021 - rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
1022 - XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
1023 - status = priv->read_reg(priv, XCAN_SR_OFFSET);
1024
1025 if (isr & XCAN_IXR_BSOFF_MASK) {
1026 priv->can.state = CAN_STATE_BUS_OFF;
1027 @@ -563,28 +700,10 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
1028 can_bus_off(ndev);
1029 if (skb)
1030 cf->can_id |= CAN_ERR_BUSOFF;
1031 - } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) {
1032 - priv->can.state = CAN_STATE_ERROR_PASSIVE;
1033 - priv->can.can_stats.error_passive++;
1034 - if (skb) {
1035 - cf->can_id |= CAN_ERR_CRTL;
1036 - cf->data[1] = (rxerr > 127) ?
1037 - CAN_ERR_CRTL_RX_PASSIVE :
1038 - CAN_ERR_CRTL_TX_PASSIVE;
1039 - cf->data[6] = txerr;
1040 - cf->data[7] = rxerr;
1041 - }
1042 - } else if (status & XCAN_SR_ERRWRN_MASK) {
1043 - priv->can.state = CAN_STATE_ERROR_WARNING;
1044 - priv->can.can_stats.error_warning++;
1045 - if (skb) {
1046 - cf->can_id |= CAN_ERR_CRTL;
1047 - cf->data[1] |= (txerr > rxerr) ?
1048 - CAN_ERR_CRTL_TX_WARNING :
1049 - CAN_ERR_CRTL_RX_WARNING;
1050 - cf->data[6] = txerr;
1051 - cf->data[7] = rxerr;
1052 - }
1053 + } else {
1054 + enum can_state new_state = xcan_current_error_state(ndev);
1055 +
1056 + xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
1057 }
1058
1059 /* Check for Arbitration lost interrupt */
1060 @@ -600,7 +719,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
1061 if (isr & XCAN_IXR_RXOFLW_MASK) {
1062 stats->rx_over_errors++;
1063 stats->rx_errors++;
1064 - priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
1065 if (skb) {
1066 cf->can_id |= CAN_ERR_CRTL;
1067 cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
1068 @@ -709,26 +827,20 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
1069
1070 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1071 while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
1072 - if (isr & XCAN_IXR_RXOK_MASK) {
1073 - priv->write_reg(priv, XCAN_ICR_OFFSET,
1074 - XCAN_IXR_RXOK_MASK);
1075 - work_done += xcan_rx(ndev);
1076 - } else {
1077 - priv->write_reg(priv, XCAN_ICR_OFFSET,
1078 - XCAN_IXR_RXNEMP_MASK);
1079 - break;
1080 - }
1081 + work_done += xcan_rx(ndev);
1082 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
1083 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1084 }
1085
1086 - if (work_done)
1087 + if (work_done) {
1088 can_led_event(ndev, CAN_LED_EVENT_RX);
1089 + xcan_update_error_state_after_rxtx(ndev);
1090 + }
1091
1092 if (work_done < quota) {
1093 napi_complete_done(napi, work_done);
1094 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1095 - ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
1096 + ier |= XCAN_IXR_RXNEMP_MASK;
1097 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1098 }
1099 return work_done;
1100 @@ -743,18 +855,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
1101 {
1102 struct xcan_priv *priv = netdev_priv(ndev);
1103 struct net_device_stats *stats = &ndev->stats;
1104 + unsigned int frames_in_fifo;
1105 + int frames_sent = 1; /* TXOK => at least 1 frame was sent */
1106 + unsigned long flags;
1107 + int retries = 0;
1108 +
1109 + /* Synchronize with xmit as we need to know the exact number
1110 + * of frames in the FIFO to stay in sync due to the TXFEMP
1111 + * handling.
1112 + * This also prevents a race between netif_wake_queue() and
1113 + * netif_stop_queue().
1114 + */
1115 + spin_lock_irqsave(&priv->tx_lock, flags);
1116 +
1117 + frames_in_fifo = priv->tx_head - priv->tx_tail;
1118 +
1119 + if (WARN_ON_ONCE(frames_in_fifo == 0)) {
1120 + /* clear TXOK anyway to avoid getting back here */
1121 + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1122 + spin_unlock_irqrestore(&priv->tx_lock, flags);
1123 + return;
1124 + }
1125 +
1126 + /* Check if 2 frames were sent (TXOK only means that at least 1
1127 + * frame was sent).
1128 + */
1129 + if (frames_in_fifo > 1) {
1130 + WARN_ON(frames_in_fifo > priv->tx_max);
1131 +
1132 + /* Synchronize TXOK and isr so that after the loop:
1133 + * (1) isr variable is up-to-date at least up to TXOK clear
1134 + * time. This avoids us clearing a TXOK of a second frame
1135 + * but not noticing that the FIFO is now empty and thus
1136 + * marking only a single frame as sent.
1137 + * (2) No TXOK is left. Having one could mean leaving a
1138 + * stray TXOK as we might process the associated frame
1139 + * via TXFEMP handling as we read TXFEMP *after* TXOK
1140 + * clear to satisfy (1).
1141 + */
1142 + while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
1143 + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1144 + isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1145 + }
1146
1147 - while ((priv->tx_head - priv->tx_tail > 0) &&
1148 - (isr & XCAN_IXR_TXOK_MASK)) {
1149 + if (isr & XCAN_IXR_TXFEMP_MASK) {
1150 + /* nothing in FIFO anymore */
1151 + frames_sent = frames_in_fifo;
1152 + }
1153 + } else {
1154 + /* single frame in fifo, just clear TXOK */
1155 priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
1156 + }
1157 +
1158 + while (frames_sent--) {
1159 can_get_echo_skb(ndev, priv->tx_tail %
1160 priv->tx_max);
1161 priv->tx_tail++;
1162 stats->tx_packets++;
1163 - isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1164 }
1165 - can_led_event(ndev, CAN_LED_EVENT_TX);
1166 +
1167 netif_wake_queue(ndev);
1168 +
1169 + spin_unlock_irqrestore(&priv->tx_lock, flags);
1170 +
1171 + can_led_event(ndev, CAN_LED_EVENT_TX);
1172 + xcan_update_error_state_after_rxtx(ndev);
1173 }
1174
1175 /**
1176 @@ -773,6 +938,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
1177 struct net_device *ndev = (struct net_device *)dev_id;
1178 struct xcan_priv *priv = netdev_priv(ndev);
1179 u32 isr, ier;
1180 + u32 isr_errors;
1181
1182 /* Get the interrupt status from Xilinx CAN */
1183 isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1184 @@ -791,18 +957,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
1185 xcan_tx_interrupt(ndev, isr);
1186
1187 /* Check for the type of error interrupt and Processing it */
1188 - if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
1189 - XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) {
1190 - priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK |
1191 - XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK |
1192 - XCAN_IXR_ARBLST_MASK));
1193 + isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
1194 + XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK);
1195 + if (isr_errors) {
1196 + priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
1197 xcan_err_interrupt(ndev, isr);
1198 }
1199
1200 /* Check for the type of receive interrupt and Processing it */
1201 - if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) {
1202 + if (isr & XCAN_IXR_RXNEMP_MASK) {
1203 ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1204 - ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK);
1205 + ier &= ~XCAN_IXR_RXNEMP_MASK;
1206 priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1207 napi_schedule(&priv->napi);
1208 }
1209 @@ -819,13 +984,9 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
1210 static void xcan_chip_stop(struct net_device *ndev)
1211 {
1212 struct xcan_priv *priv = netdev_priv(ndev);
1213 - u32 ier;
1214
1215 /* Disable interrupts and leave the can in configuration mode */
1216 - ier = priv->read_reg(priv, XCAN_IER_OFFSET);
1217 - ier &= ~XCAN_INTR_ALL;
1218 - priv->write_reg(priv, XCAN_IER_OFFSET, ier);
1219 - priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
1220 + set_reset_mode(ndev);
1221 priv->can.state = CAN_STATE_STOPPED;
1222 }
1223
1224 @@ -958,10 +1119,15 @@ static const struct net_device_ops xcan_netdev_ops = {
1225 */
1226 static int __maybe_unused xcan_suspend(struct device *dev)
1227 {
1228 - if (!device_may_wakeup(dev))
1229 - return pm_runtime_force_suspend(dev);
1230 + struct net_device *ndev = dev_get_drvdata(dev);
1231
1232 - return 0;
1233 + if (netif_running(ndev)) {
1234 + netif_stop_queue(ndev);
1235 + netif_device_detach(ndev);
1236 + xcan_chip_stop(ndev);
1237 + }
1238 +
1239 + return pm_runtime_force_suspend(dev);
1240 }
1241
1242 /**
1243 @@ -973,11 +1139,27 @@ static int __maybe_unused xcan_suspend(struct device *dev)
1244 */
1245 static int __maybe_unused xcan_resume(struct device *dev)
1246 {
1247 - if (!device_may_wakeup(dev))
1248 - return pm_runtime_force_resume(dev);
1249 + struct net_device *ndev = dev_get_drvdata(dev);
1250 + int ret;
1251
1252 - return 0;
1253 + ret = pm_runtime_force_resume(dev);
1254 + if (ret) {
1255 + dev_err(dev, "pm_runtime_force_resume failed on resume\n");
1256 + return ret;
1257 + }
1258 +
1259 + if (netif_running(ndev)) {
1260 + ret = xcan_chip_start(ndev);
1261 + if (ret) {
1262 + dev_err(dev, "xcan_chip_start failed on resume\n");
1263 + return ret;
1264 + }
1265 +
1266 + netif_device_attach(ndev);
1267 + netif_start_queue(ndev);
1268 + }
1269
1270 + return 0;
1271 }
1272
1273 /**
1274 @@ -992,14 +1174,6 @@ static int __maybe_unused xcan_runtime_suspend(struct device *dev)
1275 struct net_device *ndev = dev_get_drvdata(dev);
1276 struct xcan_priv *priv = netdev_priv(ndev);
1277
1278 - if (netif_running(ndev)) {
1279 - netif_stop_queue(ndev);
1280 - netif_device_detach(ndev);
1281 - }
1282 -
1283 - priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
1284 - priv->can.state = CAN_STATE_SLEEPING;
1285 -
1286 clk_disable_unprepare(priv->bus_clk);
1287 clk_disable_unprepare(priv->can_clk);
1288
1289 @@ -1018,7 +1192,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
1290 struct net_device *ndev = dev_get_drvdata(dev);
1291 struct xcan_priv *priv = netdev_priv(ndev);
1292 int ret;
1293 - u32 isr, status;
1294
1295 ret = clk_prepare_enable(priv->bus_clk);
1296 if (ret) {
1297 @@ -1032,27 +1205,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
1298 return ret;
1299 }
1300
1301 - priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
1302 - isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
1303 - status = priv->read_reg(priv, XCAN_SR_OFFSET);
1304 -
1305 - if (netif_running(ndev)) {
1306 - if (isr & XCAN_IXR_BSOFF_MASK) {
1307 - priv->can.state = CAN_STATE_BUS_OFF;
1308 - priv->write_reg(priv, XCAN_SRR_OFFSET,
1309 - XCAN_SRR_RESET_MASK);
1310 - } else if ((status & XCAN_SR_ESTAT_MASK) ==
1311 - XCAN_SR_ESTAT_MASK) {
1312 - priv->can.state = CAN_STATE_ERROR_PASSIVE;
1313 - } else if (status & XCAN_SR_ERRWRN_MASK) {
1314 - priv->can.state = CAN_STATE_ERROR_WARNING;
1315 - } else {
1316 - priv->can.state = CAN_STATE_ERROR_ACTIVE;
1317 - }
1318 - netif_device_attach(ndev);
1319 - netif_start_queue(ndev);
1320 - }
1321 -
1322 return 0;
1323 }
1324
1325 @@ -1061,6 +1213,18 @@ static const struct dev_pm_ops xcan_dev_pm_ops = {
1326 SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
1327 };
1328
1329 +static const struct xcan_devtype_data xcan_zynq_data = {
1330 + .caps = XCAN_CAP_WATERMARK,
1331 +};
1332 +
1333 +/* Match table for OF platform binding */
1334 +static const struct of_device_id xcan_of_match[] = {
1335 + { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
1336 + { .compatible = "xlnx,axi-can-1.00.a", },
1337 + { /* end of list */ },
1338 +};
1339 +MODULE_DEVICE_TABLE(of, xcan_of_match);
1340 +
1341 /**
1342 * xcan_probe - Platform registration call
1343 * @pdev: Handle to the platform device structure
1344 @@ -1075,8 +1239,10 @@ static int xcan_probe(struct platform_device *pdev)
1345 struct resource *res; /* IO mem resources */
1346 struct net_device *ndev;
1347 struct xcan_priv *priv;
1348 + const struct of_device_id *of_id;
1349 + int caps = 0;
1350 void __iomem *addr;
1351 - int ret, rx_max, tx_max;
1352 + int ret, rx_max, tx_max, tx_fifo_depth;
1353
1354 /* Get the virtual base address for the device */
1355 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1356 @@ -1086,7 +1252,8 @@ static int xcan_probe(struct platform_device *pdev)
1357 goto err;
1358 }
1359
1360 - ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max);
1361 + ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
1362 + &tx_fifo_depth);
1363 if (ret < 0)
1364 goto err;
1365
1366 @@ -1094,6 +1261,30 @@ static int xcan_probe(struct platform_device *pdev)
1367 if (ret < 0)
1368 goto err;
1369
1370 + of_id = of_match_device(xcan_of_match, &pdev->dev);
1371 + if (of_id) {
1372 + const struct xcan_devtype_data *devtype_data = of_id->data;
1373 +
1374 + if (devtype_data)
1375 + caps = devtype_data->caps;
1376 + }
1377 +
1378 + /* There is no way to directly figure out how many frames have been
1379 + * sent when the TXOK interrupt is processed. If watermark programming
1380 + * is supported, we can have 2 frames in the FIFO and use TXFEMP
1381 + * to determine if 1 or 2 frames have been sent.
1382 + * Theoretically we should be able to use TXFWMEMP to determine up
1383 + * to 3 frames, but it seems that after putting a second frame in the
1384 + * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
1385 + * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
1386 + * sent), which is not a sensible state - possibly TXFWMEMP is not
1387 + * completely synchronized with the rest of the bits?
1388 + */
1389 + if (caps & XCAN_CAP_WATERMARK)
1390 + tx_max = min(tx_fifo_depth, 2);
1391 + else
1392 + tx_max = 1;
1393 +
1394 /* Create a CAN device instance */
1395 ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
1396 if (!ndev)
1397 @@ -1108,6 +1299,7 @@ static int xcan_probe(struct platform_device *pdev)
1398 CAN_CTRLMODE_BERR_REPORTING;
1399 priv->reg_base = addr;
1400 priv->tx_max = tx_max;
1401 + spin_lock_init(&priv->tx_lock);
1402
1403 /* Get IRQ for the device */
1404 ndev->irq = platform_get_irq(pdev, 0);
1405 @@ -1172,9 +1364,9 @@ static int xcan_probe(struct platform_device *pdev)
1406
1407 pm_runtime_put(&pdev->dev);
1408
1409 - netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n",
1410 + netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n",
1411 priv->reg_base, ndev->irq, priv->can.clock.freq,
1412 - priv->tx_max);
1413 + tx_fifo_depth, priv->tx_max);
1414
1415 return 0;
1416
1417 @@ -1208,14 +1400,6 @@ static int xcan_remove(struct platform_device *pdev)
1418 return 0;
1419 }
1420
1421 -/* Match table for OF platform binding */
1422 -static const struct of_device_id xcan_of_match[] = {
1423 - { .compatible = "xlnx,zynq-can-1.0", },
1424 - { .compatible = "xlnx,axi-can-1.00.a", },
1425 - { /* end of list */ },
1426 -};
1427 -MODULE_DEVICE_TABLE(of, xcan_of_match);
1428 -
1429 static struct platform_driver xcan_driver = {
1430 .probe = xcan_probe,
1431 .remove = xcan_remove,
1432 diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
1433 index 5b4374f21d76..04371b0bba80 100644
1434 --- a/drivers/net/dsa/mv88e6xxx/chip.c
1435 +++ b/drivers/net/dsa/mv88e6xxx/chip.c
1436 @@ -341,6 +341,7 @@ static const struct irq_domain_ops mv88e6xxx_g1_irq_domain_ops = {
1437 .xlate = irq_domain_xlate_twocell,
1438 };
1439
1440 +/* To be called with reg_lock held */
1441 static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
1442 {
1443 int irq, virq;
1444 @@ -360,9 +361,15 @@ static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
1445
1446 static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
1447 {
1448 - mv88e6xxx_g1_irq_free_common(chip);
1449 -
1450 + /*
1451 + * free_irq must be called without reg_lock taken because the irq
1452 + * handler takes this lock, too.
1453 + */
1454 free_irq(chip->irq, chip);
1455 +
1456 + mutex_lock(&chip->reg_lock);
1457 + mv88e6xxx_g1_irq_free_common(chip);
1458 + mutex_unlock(&chip->reg_lock);
1459 }
1460
1461 static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
1462 @@ -467,10 +474,12 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
1463
1464 static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip)
1465 {
1466 - mv88e6xxx_g1_irq_free_common(chip);
1467 -
1468 kthread_cancel_delayed_work_sync(&chip->irq_poll_work);
1469 kthread_destroy_worker(chip->kworker);
1470 +
1471 + mutex_lock(&chip->reg_lock);
1472 + mv88e6xxx_g1_irq_free_common(chip);
1473 + mutex_unlock(&chip->reg_lock);
1474 }
1475
1476 int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask)
1477 @@ -4286,12 +4295,10 @@ out_g2_irq:
1478 if (chip->info->g2_irqs > 0)
1479 mv88e6xxx_g2_irq_free(chip);
1480 out_g1_irq:
1481 - mutex_lock(&chip->reg_lock);
1482 if (chip->irq > 0)
1483 mv88e6xxx_g1_irq_free(chip);
1484 else
1485 mv88e6xxx_irq_poll_free(chip);
1486 - mutex_unlock(&chip->reg_lock);
1487 out:
1488 return err;
1489 }
1490 @@ -4316,12 +4323,10 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
1491 if (chip->info->g2_irqs > 0)
1492 mv88e6xxx_g2_irq_free(chip);
1493
1494 - mutex_lock(&chip->reg_lock);
1495 if (chip->irq > 0)
1496 mv88e6xxx_g1_irq_free(chip);
1497 else
1498 mv88e6xxx_irq_poll_free(chip);
1499 - mutex_unlock(&chip->reg_lock);
1500 }
1501
1502 static const struct of_device_id mv88e6xxx_of_match[] = {
1503 diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
1504 index 9128858479c4..2353ec829c04 100644
1505 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
1506 +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
1507 @@ -229,6 +229,7 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1508 txq->txq_stats.tx_busy++;
1509 u64_stats_update_end(&txq->txq_stats.syncp);
1510 err = NETDEV_TX_BUSY;
1511 + wqe_size = 0;
1512 goto flush_skbs;
1513 }
1514
1515 diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
1516 index 29e50f787349..db63f0ec3d01 100644
1517 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
1518 +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
1519 @@ -2956,7 +2956,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
1520 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
1521 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
1522 struct res_srq *srq;
1523 - int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
1524 + int local_qpn = vhcr->in_modifier & 0xffffff;
1525
1526 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
1527 if (err)
1528 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
1529 index 30cad07be2b5..065ff87f0bef 100644
1530 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
1531 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
1532 @@ -1092,9 +1092,6 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
1533 int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
1534 struct ethtool_flash *flash);
1535
1536 -int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1537 - void *cb_priv);
1538 -
1539 /* mlx5e generic netdev management API */
1540 struct net_device*
1541 mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
1542 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
1543 index 610d485c4b03..dda281cff880 100644
1544 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
1545 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
1546 @@ -381,14 +381,14 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
1547 HLIST_HEAD(del_list);
1548 spin_lock_bh(&priv->fs.arfs.arfs_lock);
1549 mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
1550 - if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
1551 - break;
1552 if (!work_pending(&arfs_rule->arfs_work) &&
1553 rps_may_expire_flow(priv->netdev,
1554 arfs_rule->rxq, arfs_rule->flow_id,
1555 arfs_rule->filter_id)) {
1556 hlist_del_init(&arfs_rule->hlist);
1557 hlist_add_head(&arfs_rule->hlist, &del_list);
1558 + if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
1559 + break;
1560 }
1561 }
1562 spin_unlock_bh(&priv->fs.arfs.arfs_lock);
1563 @@ -711,6 +711,9 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
1564 skb->protocol != htons(ETH_P_IPV6))
1565 return -EPROTONOSUPPORT;
1566
1567 + if (skb->encapsulation)
1568 + return -EPROTONOSUPPORT;
1569 +
1570 arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
1571 if (!arfs_t)
1572 return -EPROTONOSUPPORT;
1573 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
1574 index c641d5656b2d..0c6015ce85fd 100644
1575 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
1576 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
1577 @@ -272,7 +272,8 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
1578 }
1579
1580 static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
1581 - struct ieee_ets *ets)
1582 + struct ieee_ets *ets,
1583 + bool zero_sum_allowed)
1584 {
1585 bool have_ets_tc = false;
1586 int bw_sum = 0;
1587 @@ -297,8 +298,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
1588 }
1589
1590 if (have_ets_tc && bw_sum != 100) {
1591 - netdev_err(netdev,
1592 - "Failed to validate ETS: BW sum is illegal\n");
1593 + if (bw_sum || (!bw_sum && !zero_sum_allowed))
1594 + netdev_err(netdev,
1595 + "Failed to validate ETS: BW sum is illegal\n");
1596 return -EINVAL;
1597 }
1598 return 0;
1599 @@ -313,7 +315,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
1600 if (!MLX5_CAP_GEN(priv->mdev, ets))
1601 return -EOPNOTSUPP;
1602
1603 - err = mlx5e_dbcnl_validate_ets(netdev, ets);
1604 + err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
1605 if (err)
1606 return err;
1607
1608 @@ -613,12 +615,9 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
1609 ets.prio_tc[i]);
1610 }
1611
1612 - err = mlx5e_dbcnl_validate_ets(netdev, &ets);
1613 - if (err) {
1614 - netdev_err(netdev,
1615 - "%s, Failed to validate ETS: %d\n", __func__, err);
1616 + err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
1617 + if (err)
1618 goto out;
1619 - }
1620
1621 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
1622 if (err) {
1623 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1624 index d3a1a2281e77..fdf40812a2a9 100644
1625 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1626 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
1627 @@ -3093,22 +3093,23 @@ out:
1628
1629 #ifdef CONFIG_MLX5_ESWITCH
1630 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
1631 - struct tc_cls_flower_offload *cls_flower)
1632 + struct tc_cls_flower_offload *cls_flower,
1633 + int flags)
1634 {
1635 switch (cls_flower->command) {
1636 case TC_CLSFLOWER_REPLACE:
1637 - return mlx5e_configure_flower(priv, cls_flower);
1638 + return mlx5e_configure_flower(priv, cls_flower, flags);
1639 case TC_CLSFLOWER_DESTROY:
1640 - return mlx5e_delete_flower(priv, cls_flower);
1641 + return mlx5e_delete_flower(priv, cls_flower, flags);
1642 case TC_CLSFLOWER_STATS:
1643 - return mlx5e_stats_flower(priv, cls_flower);
1644 + return mlx5e_stats_flower(priv, cls_flower, flags);
1645 default:
1646 return -EOPNOTSUPP;
1647 }
1648 }
1649
1650 -int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1651 - void *cb_priv)
1652 +static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1653 + void *cb_priv)
1654 {
1655 struct mlx5e_priv *priv = cb_priv;
1656
1657 @@ -3117,7 +3118,7 @@ int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
1658
1659 switch (type) {
1660 case TC_SETUP_CLSFLOWER:
1661 - return mlx5e_setup_tc_cls_flower(priv, type_data);
1662 + return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
1663 default:
1664 return -EOPNOTSUPP;
1665 }
1666 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1667 index 286565862341..c88eb80278dd 100644
1668 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1669 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
1670 @@ -723,15 +723,31 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
1671
1672 static int
1673 mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
1674 - struct tc_cls_flower_offload *cls_flower)
1675 + struct tc_cls_flower_offload *cls_flower, int flags)
1676 {
1677 switch (cls_flower->command) {
1678 case TC_CLSFLOWER_REPLACE:
1679 - return mlx5e_configure_flower(priv, cls_flower);
1680 + return mlx5e_configure_flower(priv, cls_flower, flags);
1681 case TC_CLSFLOWER_DESTROY:
1682 - return mlx5e_delete_flower(priv, cls_flower);
1683 + return mlx5e_delete_flower(priv, cls_flower, flags);
1684 case TC_CLSFLOWER_STATS:
1685 - return mlx5e_stats_flower(priv, cls_flower);
1686 + return mlx5e_stats_flower(priv, cls_flower, flags);
1687 + default:
1688 + return -EOPNOTSUPP;
1689 + }
1690 +}
1691 +
1692 +static int mlx5e_rep_setup_tc_cb_egdev(enum tc_setup_type type, void *type_data,
1693 + void *cb_priv)
1694 +{
1695 + struct mlx5e_priv *priv = cb_priv;
1696 +
1697 + if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
1698 + return -EOPNOTSUPP;
1699 +
1700 + switch (type) {
1701 + case TC_SETUP_CLSFLOWER:
1702 + return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_EGRESS);
1703 default:
1704 return -EOPNOTSUPP;
1705 }
1706 @@ -747,7 +763,7 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
1707
1708 switch (type) {
1709 case TC_SETUP_CLSFLOWER:
1710 - return mlx5e_rep_setup_tc_cls_flower(priv, type_data);
1711 + return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
1712 default:
1713 return -EOPNOTSUPP;
1714 }
1715 @@ -1111,7 +1127,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1716
1717 uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH);
1718 upriv = netdev_priv(uplink_rpriv->netdev);
1719 - err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb,
1720 + err = tc_setup_cb_egdev_register(netdev, mlx5e_rep_setup_tc_cb_egdev,
1721 upriv);
1722 if (err)
1723 goto err_neigh_cleanup;
1724 @@ -1126,7 +1142,7 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1725 return 0;
1726
1727 err_egdev_cleanup:
1728 - tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
1729 + tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
1730 upriv);
1731
1732 err_neigh_cleanup:
1733 @@ -1155,7 +1171,7 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
1734 uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch,
1735 REP_ETH);
1736 upriv = netdev_priv(uplink_rpriv->netdev);
1737 - tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
1738 + tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
1739 upriv);
1740 mlx5e_rep_neigh_cleanup(rpriv);
1741 mlx5e_detach_netdev(priv);
1742 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1743 index b94276db3ce9..a0ba6cfc9092 100644
1744 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1745 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
1746 @@ -61,12 +61,16 @@ struct mlx5_nic_flow_attr {
1747 struct mlx5_flow_table *hairpin_ft;
1748 };
1749
1750 +#define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
1751 +
1752 enum {
1753 - MLX5E_TC_FLOW_ESWITCH = BIT(0),
1754 - MLX5E_TC_FLOW_NIC = BIT(1),
1755 - MLX5E_TC_FLOW_OFFLOADED = BIT(2),
1756 - MLX5E_TC_FLOW_HAIRPIN = BIT(3),
1757 - MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(4),
1758 + MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS,
1759 + MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS,
1760 + MLX5E_TC_FLOW_ESWITCH = BIT(MLX5E_TC_FLOW_BASE),
1761 + MLX5E_TC_FLOW_NIC = BIT(MLX5E_TC_FLOW_BASE + 1),
1762 + MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE + 2),
1763 + MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 3),
1764 + MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4),
1765 };
1766
1767 struct mlx5e_tc_flow {
1768 @@ -1890,6 +1894,10 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
1769 else
1770 actions = flow->nic_attr->action;
1771
1772 + if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
1773 + !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
1774 + return false;
1775 +
1776 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1777 return modify_header_match_supported(&parse_attr->spec, exts);
1778
1779 @@ -2566,8 +2574,20 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1780 return err;
1781 }
1782
1783 +static void get_flags(int flags, u8 *flow_flags)
1784 +{
1785 + u8 __flow_flags = 0;
1786 +
1787 + if (flags & MLX5E_TC_INGRESS)
1788 + __flow_flags |= MLX5E_TC_FLOW_INGRESS;
1789 + if (flags & MLX5E_TC_EGRESS)
1790 + __flow_flags |= MLX5E_TC_FLOW_EGRESS;
1791 +
1792 + *flow_flags = __flow_flags;
1793 +}
1794 +
1795 int mlx5e_configure_flower(struct mlx5e_priv *priv,
1796 - struct tc_cls_flower_offload *f)
1797 + struct tc_cls_flower_offload *f, int flags)
1798 {
1799 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1800 struct mlx5e_tc_flow_parse_attr *parse_attr;
1801 @@ -2576,11 +2596,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
1802 int attr_size, err = 0;
1803 u8 flow_flags = 0;
1804
1805 + get_flags(flags, &flow_flags);
1806 +
1807 if (esw && esw->mode == SRIOV_OFFLOADS) {
1808 - flow_flags = MLX5E_TC_FLOW_ESWITCH;
1809 + flow_flags |= MLX5E_TC_FLOW_ESWITCH;
1810 attr_size = sizeof(struct mlx5_esw_flow_attr);
1811 } else {
1812 - flow_flags = MLX5E_TC_FLOW_NIC;
1813 + flow_flags |= MLX5E_TC_FLOW_NIC;
1814 attr_size = sizeof(struct mlx5_nic_flow_attr);
1815 }
1816
1817 @@ -2639,7 +2661,7 @@ err_free:
1818 }
1819
1820 int mlx5e_delete_flower(struct mlx5e_priv *priv,
1821 - struct tc_cls_flower_offload *f)
1822 + struct tc_cls_flower_offload *f, int flags)
1823 {
1824 struct mlx5e_tc_flow *flow;
1825 struct mlx5e_tc_table *tc = &priv->fs.tc;
1826 @@ -2659,7 +2681,7 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
1827 }
1828
1829 int mlx5e_stats_flower(struct mlx5e_priv *priv,
1830 - struct tc_cls_flower_offload *f)
1831 + struct tc_cls_flower_offload *f, int flags)
1832 {
1833 struct mlx5e_tc_table *tc = &priv->fs.tc;
1834 struct mlx5e_tc_flow *flow;
1835 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
1836 index c14c263a739b..2255345c2e18 100644
1837 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
1838 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
1839 @@ -38,16 +38,23 @@
1840 #define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
1841
1842 #ifdef CONFIG_MLX5_ESWITCH
1843 +
1844 +enum {
1845 + MLX5E_TC_INGRESS = BIT(0),
1846 + MLX5E_TC_EGRESS = BIT(1),
1847 + MLX5E_TC_LAST_EXPORTED_BIT = 1,
1848 +};
1849 +
1850 int mlx5e_tc_init(struct mlx5e_priv *priv);
1851 void mlx5e_tc_cleanup(struct mlx5e_priv *priv);
1852
1853 int mlx5e_configure_flower(struct mlx5e_priv *priv,
1854 - struct tc_cls_flower_offload *f);
1855 + struct tc_cls_flower_offload *f, int flags);
1856 int mlx5e_delete_flower(struct mlx5e_priv *priv,
1857 - struct tc_cls_flower_offload *f);
1858 + struct tc_cls_flower_offload *f, int flags);
1859
1860 int mlx5e_stats_flower(struct mlx5e_priv *priv,
1861 - struct tc_cls_flower_offload *f);
1862 + struct tc_cls_flower_offload *f, int flags);
1863
1864 struct mlx5e_encap_entry;
1865 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
1866 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
1867 index c3a18ddf5dba..0a75e9d441e6 100644
1868 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
1869 +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
1870 @@ -2221,6 +2221,6 @@ free_out:
1871
1872 u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
1873 {
1874 - return esw->mode;
1875 + return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
1876 }
1877 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
1878 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
1879 index 857035583ccd..c14e7fc11d8a 100644
1880 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
1881 +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
1882 @@ -487,6 +487,7 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
1883 void mlx5_init_clock(struct mlx5_core_dev *mdev)
1884 {
1885 struct mlx5_clock *clock = &mdev->clock;
1886 + u64 overflow_cycles;
1887 u64 ns;
1888 u64 frac = 0;
1889 u32 dev_freq;
1890 @@ -510,10 +511,17 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
1891
1892 /* Calculate period in seconds to call the overflow watchdog - to make
1893 * sure counter is checked at least once every wrap around.
1894 + * The period is calculated as the minimum between max HW cycles count
1895 + * (The clock source mask) and max amount of cycles that can be
1896 + * multiplied by clock multiplier where the result doesn't exceed
1897 + * 64bits.
1898 */
1899 - ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask,
1900 + overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
1901 + overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1);
1902 +
1903 + ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
1904 frac, &frac);
1905 - do_div(ns, NSEC_PER_SEC / 2 / HZ);
1906 + do_div(ns, NSEC_PER_SEC / HZ);
1907 clock->overflow_period = ns;
1908
1909 mdev->clock_info_page = alloc_page(GFP_KERNEL);
1910 diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
1911 index ec524d97869d..5ef61132604e 100644
1912 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
1913 +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
1914 @@ -317,7 +317,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
1915 payload.dst_ipv4 = flow->daddr;
1916
1917 /* If entry has expired send dst IP with all other fields 0. */
1918 - if (!(neigh->nud_state & NUD_VALID)) {
1919 + if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
1920 nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
1921 /* Trigger ARP to verify invalid neighbour state. */
1922 neigh_event_send(neigh, NULL);
1923 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
1924 index c7aac1fc99e8..764b25fa470c 100644
1925 --- a/drivers/net/ethernet/realtek/r8169.c
1926 +++ b/drivers/net/ethernet/realtek/r8169.c
1927 @@ -8272,8 +8272,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1928 return rc;
1929 }
1930
1931 - /* override BIOS settings, use userspace tools to enable WOL */
1932 - __rtl8169_set_wol(tp, 0);
1933 + tp->saved_wolopts = __rtl8169_get_wol(tp);
1934
1935 if (rtl_tbi_enabled(tp)) {
1936 tp->set_speed = rtl8169_set_speed_tbi;
1937 diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
1938 index 05c1e8ef15e6..69a8106b9b98 100644
1939 --- a/drivers/net/phy/phy.c
1940 +++ b/drivers/net/phy/phy.c
1941 @@ -514,7 +514,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
1942 * negotiation may already be done and aneg interrupt may not be
1943 * generated.
1944 */
1945 - if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
1946 + if (phydev->irq != PHY_POLL && phydev->state == PHY_AN) {
1947 err = phy_aneg_done(phydev);
1948 if (err > 0) {
1949 trigger = true;
1950 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
1951 index 4b170599fa5e..3b050817bbda 100644
1952 --- a/drivers/net/vxlan.c
1953 +++ b/drivers/net/vxlan.c
1954 @@ -636,8 +636,61 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
1955 return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
1956 }
1957
1958 -/* Add new entry to forwarding table -- assumes lock held */
1959 +static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
1960 + const u8 *mac, __u16 state,
1961 + __be32 src_vni, __u8 ndm_flags)
1962 +{
1963 + struct vxlan_fdb *f;
1964 +
1965 + f = kmalloc(sizeof(*f), GFP_ATOMIC);
1966 + if (!f)
1967 + return NULL;
1968 + f->state = state;
1969 + f->flags = ndm_flags;
1970 + f->updated = f->used = jiffies;
1971 + f->vni = src_vni;
1972 + INIT_LIST_HEAD(&f->remotes);
1973 + memcpy(f->eth_addr, mac, ETH_ALEN);
1974 +
1975 + return f;
1976 +}
1977 +
1978 static int vxlan_fdb_create(struct vxlan_dev *vxlan,
1979 + const u8 *mac, union vxlan_addr *ip,
1980 + __u16 state, __be16 port, __be32 src_vni,
1981 + __be32 vni, __u32 ifindex, __u8 ndm_flags,
1982 + struct vxlan_fdb **fdb)
1983 +{
1984 + struct vxlan_rdst *rd = NULL;
1985 + struct vxlan_fdb *f;
1986 + int rc;
1987 +
1988 + if (vxlan->cfg.addrmax &&
1989 + vxlan->addrcnt >= vxlan->cfg.addrmax)
1990 + return -ENOSPC;
1991 +
1992 + netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
1993 + f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags);
1994 + if (!f)
1995 + return -ENOMEM;
1996 +
1997 + rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
1998 + if (rc < 0) {
1999 + kfree(f);
2000 + return rc;
2001 + }
2002 +
2003 + ++vxlan->addrcnt;
2004 + hlist_add_head_rcu(&f->hlist,
2005 + vxlan_fdb_head(vxlan, mac, src_vni));
2006 +
2007 + *fdb = f;
2008 +
2009 + return 0;
2010 +}
2011 +
2012 +/* Add new entry to forwarding table -- assumes lock held */
2013 +static int vxlan_fdb_update(struct vxlan_dev *vxlan,
2014 const u8 *mac, union vxlan_addr *ip,
2015 __u16 state, __u16 flags,
2016 __be16 port, __be32 src_vni, __be32 vni,
2017 @@ -687,37 +740,17 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
2018 if (!(flags & NLM_F_CREATE))
2019 return -ENOENT;
2020
2021 - if (vxlan->cfg.addrmax &&
2022 - vxlan->addrcnt >= vxlan->cfg.addrmax)
2023 - return -ENOSPC;
2024 -
2025 /* Disallow replace to add a multicast entry */
2026 if ((flags & NLM_F_REPLACE) &&
2027 (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
2028 return -EOPNOTSUPP;
2029
2030 netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
2031 - f = kmalloc(sizeof(*f), GFP_ATOMIC);
2032 - if (!f)
2033 - return -ENOMEM;
2034 -
2035 - notify = 1;
2036 - f->state = state;
2037 - f->flags = ndm_flags;
2038 - f->updated = f->used = jiffies;
2039 - f->vni = src_vni;
2040 - INIT_LIST_HEAD(&f->remotes);
2041 - memcpy(f->eth_addr, mac, ETH_ALEN);
2042 -
2043 - rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
2044 - if (rc < 0) {
2045 - kfree(f);
2046 + rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
2047 + vni, ifindex, ndm_flags, &f);
2048 + if (rc < 0)
2049 return rc;
2050 - }
2051 -
2052 - ++vxlan->addrcnt;
2053 - hlist_add_head_rcu(&f->hlist,
2054 - vxlan_fdb_head(vxlan, mac, src_vni));
2055 + notify = 1;
2056 }
2057
2058 if (notify) {
2059 @@ -741,13 +774,15 @@ static void vxlan_fdb_free(struct rcu_head *head)
2060 kfree(f);
2061 }
2062
2063 -static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
2064 +static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
2065 + bool do_notify)
2066 {
2067 netdev_dbg(vxlan->dev,
2068 "delete %pM\n", f->eth_addr);
2069
2070 --vxlan->addrcnt;
2071 - vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
2072 + if (do_notify)
2073 + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
2074
2075 hlist_del_rcu(&f->hlist);
2076 call_rcu(&f->rcu, vxlan_fdb_free);
2077 @@ -863,7 +898,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
2078 return -EAFNOSUPPORT;
2079
2080 spin_lock_bh(&vxlan->hash_lock);
2081 - err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
2082 + err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
2083 port, src_vni, vni, ifindex, ndm->ndm_flags);
2084 spin_unlock_bh(&vxlan->hash_lock);
2085
2086 @@ -897,7 +932,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
2087 goto out;
2088 }
2089
2090 - vxlan_fdb_destroy(vxlan, f);
2091 + vxlan_fdb_destroy(vxlan, f, true);
2092
2093 out:
2094 return 0;
2095 @@ -1006,7 +1041,7 @@ static bool vxlan_snoop(struct net_device *dev,
2096
2097 /* close off race between vxlan_flush and incoming packets */
2098 if (netif_running(dev))
2099 - vxlan_fdb_create(vxlan, src_mac, src_ip,
2100 + vxlan_fdb_update(vxlan, src_mac, src_ip,
2101 NUD_REACHABLE,
2102 NLM_F_EXCL|NLM_F_CREATE,
2103 vxlan->cfg.dst_port,
2104 @@ -2360,7 +2395,7 @@ static void vxlan_cleanup(struct timer_list *t)
2105 "garbage collect %pM\n",
2106 f->eth_addr);
2107 f->state = NUD_STALE;
2108 - vxlan_fdb_destroy(vxlan, f);
2109 + vxlan_fdb_destroy(vxlan, f, true);
2110 } else if (time_before(timeout, next_timer))
2111 next_timer = timeout;
2112 }
2113 @@ -2411,7 +2446,7 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
2114 spin_lock_bh(&vxlan->hash_lock);
2115 f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
2116 if (f)
2117 - vxlan_fdb_destroy(vxlan, f);
2118 + vxlan_fdb_destroy(vxlan, f, true);
2119 spin_unlock_bh(&vxlan->hash_lock);
2120 }
2121
2122 @@ -2465,7 +2500,7 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
2123 continue;
2124 /* the all_zeros_mac entry is deleted at vxlan_uninit */
2125 if (!is_zero_ether_addr(f->eth_addr))
2126 - vxlan_fdb_destroy(vxlan, f);
2127 + vxlan_fdb_destroy(vxlan, f, true);
2128 }
2129 }
2130 spin_unlock_bh(&vxlan->hash_lock);
2131 @@ -3155,6 +3190,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
2132 {
2133 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2134 struct vxlan_dev *vxlan = netdev_priv(dev);
2135 + struct vxlan_fdb *f = NULL;
2136 int err;
2137
2138 err = vxlan_dev_configure(net, dev, conf, false, extack);
2139 @@ -3168,24 +3204,35 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
2140 err = vxlan_fdb_create(vxlan, all_zeros_mac,
2141 &vxlan->default_dst.remote_ip,
2142 NUD_REACHABLE | NUD_PERMANENT,
2143 - NLM_F_EXCL | NLM_F_CREATE,
2144 vxlan->cfg.dst_port,
2145 vxlan->default_dst.remote_vni,
2146 vxlan->default_dst.remote_vni,
2147 vxlan->default_dst.remote_ifindex,
2148 - NTF_SELF);
2149 + NTF_SELF, &f);
2150 if (err)
2151 return err;
2152 }
2153
2154 err = register_netdevice(dev);
2155 + if (err)
2156 + goto errout;
2157 +
2158 + err = rtnl_configure_link(dev, NULL);
2159 if (err) {
2160 - vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
2161 - return err;
2162 + unregister_netdevice(dev);
2163 + goto errout;
2164 }
2165
2166 + /* notify default fdb entry */
2167 + if (f)
2168 + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
2169 +
2170 list_add(&vxlan->next, &vn->vxlan_list);
2171 return 0;
2172 +errout:
2173 + if (f)
2174 + vxlan_fdb_destroy(vxlan, f, false);
2175 + return err;
2176 }
2177
2178 static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
2179 @@ -3414,6 +3461,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
2180 struct vxlan_rdst *dst = &vxlan->default_dst;
2181 struct vxlan_rdst old_dst;
2182 struct vxlan_config conf;
2183 + struct vxlan_fdb *f = NULL;
2184 int err;
2185
2186 err = vxlan_nl2conf(tb, data,
2187 @@ -3442,16 +3490,16 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
2188 err = vxlan_fdb_create(vxlan, all_zeros_mac,
2189 &dst->remote_ip,
2190 NUD_REACHABLE | NUD_PERMANENT,
2191 - NLM_F_CREATE | NLM_F_APPEND,
2192 vxlan->cfg.dst_port,
2193 dst->remote_vni,
2194 dst->remote_vni,
2195 dst->remote_ifindex,
2196 - NTF_SELF);
2197 + NTF_SELF, &f);
2198 if (err) {
2199 spin_unlock_bh(&vxlan->hash_lock);
2200 return err;
2201 }
2202 + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
2203 }
2204 spin_unlock_bh(&vxlan->hash_lock);
2205 }
2206 diff --git a/drivers/staging/rtl8188eu/Kconfig b/drivers/staging/rtl8188eu/Kconfig
2207 index 673fdce25530..ff7832798a77 100644
2208 --- a/drivers/staging/rtl8188eu/Kconfig
2209 +++ b/drivers/staging/rtl8188eu/Kconfig
2210 @@ -7,7 +7,6 @@ config R8188EU
2211 select LIB80211
2212 select LIB80211_CRYPT_WEP
2213 select LIB80211_CRYPT_CCMP
2214 - select LIB80211_CRYPT_TKIP
2215 ---help---
2216 This option adds the Realtek RTL8188EU USB device such as TP-Link TL-WN725N.
2217 If built as a module, it will be called r8188eu.
2218 diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
2219 index 05936a45eb93..c6857a5be12a 100644
2220 --- a/drivers/staging/rtl8188eu/core/rtw_recv.c
2221 +++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
2222 @@ -23,7 +23,6 @@
2223 #include <mon.h>
2224 #include <wifi.h>
2225 #include <linux/vmalloc.h>
2226 -#include <net/lib80211.h>
2227
2228 #define ETHERNET_HEADER_SIZE 14 /* Ethernet Header Length */
2229 #define LLC_HEADER_SIZE 6 /* LLC Header Length */
2230 @@ -221,20 +220,31 @@ u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
2231 static int recvframe_chkmic(struct adapter *adapter,
2232 struct recv_frame *precvframe)
2233 {
2234 - int res = _SUCCESS;
2235 - struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
2236 - struct sta_info *stainfo = rtw_get_stainfo(&adapter->stapriv, prxattrib->ta);
2237 + int i, res = _SUCCESS;
2238 + u32 datalen;
2239 + u8 miccode[8];
2240 + u8 bmic_err = false, brpt_micerror = true;
2241 + u8 *pframe, *payload, *pframemic;
2242 + u8 *mickey;
2243 + struct sta_info *stainfo;
2244 + struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
2245 + struct security_priv *psecuritypriv = &adapter->securitypriv;
2246 +
2247 + struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
2248 + struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
2249 +
2250 + stainfo = rtw_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]);
2251
2252 if (prxattrib->encrypt == _TKIP_) {
2253 + RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
2254 + ("\n %s: prxattrib->encrypt==_TKIP_\n", __func__));
2255 + RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
2256 + ("\n %s: da=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
2257 + __func__, prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
2258 + prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5]));
2259 +
2260 + /* calculate mic code */
2261 if (stainfo) {
2262 - int key_idx;
2263 - const int iv_len = 8, icv_len = 4, key_length = 32;
2264 - struct sk_buff *skb = precvframe->pkt;
2265 - u8 key[32], iv[8], icv[4], *pframe = skb->data;
2266 - void *crypto_private = NULL;
2267 - struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("TKIP"), "lib80211_crypt_tkip");
2268 - struct security_priv *psecuritypriv = &adapter->securitypriv;
2269 -
2270 if (IS_MCAST(prxattrib->ra)) {
2271 if (!psecuritypriv) {
2272 res = _FAIL;
2273 @@ -243,58 +253,115 @@ static int recvframe_chkmic(struct adapter *adapter,
2274 DBG_88E("\n %s: didn't install group key!!!!!!!!!!\n", __func__);
2275 goto exit;
2276 }
2277 - key_idx = prxattrib->key_index;
2278 - memcpy(key, psecuritypriv->dot118021XGrpKey[key_idx].skey, 16);
2279 - memcpy(key + 16, psecuritypriv->dot118021XGrprxmickey[key_idx].skey, 16);
2280 + mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
2281 +
2282 + RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
2283 + ("\n %s: bcmc key\n", __func__));
2284 } else {
2285 - key_idx = 0;
2286 - memcpy(key, stainfo->dot118021x_UncstKey.skey, 16);
2287 - memcpy(key + 16, stainfo->dot11tkiprxmickey.skey, 16);
2288 + mickey = &stainfo->dot11tkiprxmickey.skey[0];
2289 + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
2290 + ("\n %s: unicast key\n", __func__));
2291 }
2292
2293 - if (!crypto_ops) {
2294 - res = _FAIL;
2295 - goto exit_lib80211_tkip;
2296 - }
2297 + /* icv_len included the mic code */
2298 + datalen = precvframe->pkt->len-prxattrib->hdrlen -
2299 + prxattrib->iv_len-prxattrib->icv_len-8;
2300 + pframe = precvframe->pkt->data;
2301 + payload = pframe+prxattrib->hdrlen+prxattrib->iv_len;
2302
2303 - memcpy(iv, pframe + prxattrib->hdrlen, iv_len);
2304 - memcpy(icv, pframe + skb->len - icv_len, icv_len);
2305 - memmove(pframe + iv_len, pframe, prxattrib->hdrlen);
2306 + RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len));
2307 + rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0],
2308 + (unsigned char)prxattrib->priority); /* care the length of the data */
2309
2310 - skb_pull(skb, iv_len);
2311 - skb_trim(skb, skb->len - icv_len);
2312 + pframemic = payload+datalen;
2313
2314 - crypto_private = crypto_ops->init(key_idx);
2315 - if (!crypto_private) {
2316 - res = _FAIL;
2317 - goto exit_lib80211_tkip;
2318 - }
2319 - if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) {
2320 - res = _FAIL;
2321 - goto exit_lib80211_tkip;
2322 - }
2323 - if (crypto_ops->decrypt_msdu(skb, key_idx, prxattrib->hdrlen, crypto_private)) {
2324 - res = _FAIL;
2325 - goto exit_lib80211_tkip;
2326 + bmic_err = false;
2327 +
2328 + for (i = 0; i < 8; i++) {
2329 + if (miccode[i] != *(pframemic+i)) {
2330 + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
2331 + ("%s: miccode[%d](%02x)!=*(pframemic+%d)(%02x) ",
2332 + __func__, i, miccode[i], i, *(pframemic + i)));
2333 + bmic_err = true;
2334 + }
2335 }
2336
2337 - memmove(pframe, pframe + iv_len, prxattrib->hdrlen);
2338 - skb_push(skb, iv_len);
2339 - skb_put(skb, icv_len);
2340 + if (bmic_err) {
2341 + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
2342 + ("\n *(pframemic-8)-*(pframemic-1)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
2343 + *(pframemic-8), *(pframemic-7), *(pframemic-6),
2344 + *(pframemic-5), *(pframemic-4), *(pframemic-3),
2345 + *(pframemic-2), *(pframemic-1)));
2346 + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
2347 + ("\n *(pframemic-16)-*(pframemic-9)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
2348 + *(pframemic-16), *(pframemic-15), *(pframemic-14),
2349 + *(pframemic-13), *(pframemic-12), *(pframemic-11),
2350 + *(pframemic-10), *(pframemic-9)));
2351 + {
2352 + uint i;
2353
2354 - memcpy(pframe + prxattrib->hdrlen, iv, iv_len);
2355 - memcpy(pframe + skb->len - icv_len, icv, icv_len);
2356 + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
2357 + ("\n ======demp packet (len=%d)======\n",
2358 + precvframe->pkt->len));
2359 + for (i = 0; i < precvframe->pkt->len; i += 8) {
2360 + RT_TRACE(_module_rtl871x_recv_c_,
2361 + _drv_err_,
2362 + ("0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x",
2363 + *(precvframe->pkt->data+i),
2364 + *(precvframe->pkt->data+i+1),
2365 + *(precvframe->pkt->data+i+2),
2366 + *(precvframe->pkt->data+i+3),
2367 + *(precvframe->pkt->data+i+4),
2368 + *(precvframe->pkt->data+i+5),
2369 + *(precvframe->pkt->data+i+6),
2370 + *(precvframe->pkt->data+i+7)));
2371 + }
2372 + RT_TRACE(_module_rtl871x_recv_c_,
2373 + _drv_err_,
2374 + ("\n ====== demp packet end [len=%d]======\n",
2375 + precvframe->pkt->len));
2376 + RT_TRACE(_module_rtl871x_recv_c_,
2377 + _drv_err_,
2378 + ("\n hrdlen=%d,\n",
2379 + prxattrib->hdrlen));
2380 + }
2381
2382 -exit_lib80211_tkip:
2383 - if (crypto_ops && crypto_private)
2384 - crypto_ops->deinit(crypto_private);
2385 + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
2386 + ("ra=0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x psecuritypriv->binstallGrpkey=%d ",
2387 + prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
2388 + prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5], psecuritypriv->binstallGrpkey));
2389 +
2390 + /* double check key_index for some timing issue , */
2391 + /* cannot compare with psecuritypriv->dot118021XGrpKeyid also cause timing issue */
2392 + if ((IS_MCAST(prxattrib->ra) == true) && (prxattrib->key_index != pmlmeinfo->key_index))
2393 + brpt_micerror = false;
2394 +
2395 + if ((prxattrib->bdecrypted) && (brpt_micerror)) {
2396 + rtw_handle_tkip_mic_err(adapter, (u8)IS_MCAST(prxattrib->ra));
2397 + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
2398 + DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
2399 + } else {
2400 + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
2401 + DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
2402 + }
2403 + res = _FAIL;
2404 + } else {
2405 + /* mic checked ok */
2406 + if ((!psecuritypriv->bcheck_grpkey) && (IS_MCAST(prxattrib->ra))) {
2407 + psecuritypriv->bcheck_grpkey = true;
2408 + RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("psecuritypriv->bcheck_grpkey = true"));
2409 + }
2410 + }
2411 } else {
2412 RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
2413 ("%s: rtw_get_stainfo==NULL!!!\n", __func__));
2414 }
2415 +
2416 + skb_trim(precvframe->pkt, precvframe->pkt->len - 8);
2417 }
2418
2419 exit:
2420 +
2421 return res;
2422 }
2423
2424 diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
2425 index bfe0b217e679..67a2490f055e 100644
2426 --- a/drivers/staging/rtl8188eu/core/rtw_security.c
2427 +++ b/drivers/staging/rtl8188eu/core/rtw_security.c
2428 @@ -650,71 +650,71 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
2429 return res;
2430 }
2431
2432 +/* The hlen isn't include the IV */
2433 u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
2434 -{
2435 - struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib;
2436 - u32 res = _SUCCESS;
2437 +{ /* exclude ICV */
2438 + u16 pnl;
2439 + u32 pnh;
2440 + u8 rc4key[16];
2441 + u8 ttkey[16];
2442 + u8 crc[4];
2443 + struct arc4context mycontext;
2444 + int length;
2445 +
2446 + u8 *pframe, *payload, *iv, *prwskey;
2447 + union pn48 dot11txpn;
2448 + struct sta_info *stainfo;
2449 + struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib;
2450 + struct security_priv *psecuritypriv = &padapter->securitypriv;
2451 + u32 res = _SUCCESS;
2452 +
2453 +
2454 + pframe = (unsigned char *)((struct recv_frame *)precvframe)->pkt->data;
2455
2456 /* 4 start to decrypt recvframe */
2457 if (prxattrib->encrypt == _TKIP_) {
2458 - struct sta_info *stainfo = rtw_get_stainfo(&padapter->stapriv, prxattrib->ta);
2459 -
2460 + stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
2461 if (stainfo) {
2462 - int key_idx;
2463 - const int iv_len = 8, icv_len = 4, key_length = 32;
2464 - void *crypto_private = NULL;
2465 - struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt;
2466 - u8 key[32], iv[8], icv[4], *pframe = skb->data;
2467 - struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("TKIP"), "lib80211_crypt_tkip");
2468 - struct security_priv *psecuritypriv = &padapter->securitypriv;
2469 -
2470 if (IS_MCAST(prxattrib->ra)) {
2471 if (!psecuritypriv->binstallGrpkey) {
2472 res = _FAIL;
2473 DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__);
2474 goto exit;
2475 }
2476 - key_idx = prxattrib->key_index;
2477 - memcpy(key, psecuritypriv->dot118021XGrpKey[key_idx].skey, 16);
2478 - memcpy(key + 16, psecuritypriv->dot118021XGrprxmickey[key_idx].skey, 16);
2479 + prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
2480 } else {
2481 - key_idx = 0;
2482 - memcpy(key, stainfo->dot118021x_UncstKey.skey, 16);
2483 - memcpy(key + 16, stainfo->dot11tkiprxmickey.skey, 16);
2484 + RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__));
2485 + prwskey = &stainfo->dot118021x_UncstKey.skey[0];
2486 }
2487
2488 - if (!crypto_ops) {
2489 - res = _FAIL;
2490 - goto exit_lib80211_tkip;
2491 - }
2492 + iv = pframe+prxattrib->hdrlen;
2493 + payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
2494 + length = ((struct recv_frame *)precvframe)->pkt->len-prxattrib->hdrlen-prxattrib->iv_len;
2495
2496 - memcpy(iv, pframe + prxattrib->hdrlen, iv_len);
2497 - memcpy(icv, pframe + skb->len - icv_len, icv_len);
2498 + GET_TKIP_PN(iv, dot11txpn);
2499
2500 - crypto_private = crypto_ops->init(key_idx);
2501 - if (!crypto_private) {
2502 - res = _FAIL;
2503 - goto exit_lib80211_tkip;
2504 - }
2505 - if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) {
2506 - res = _FAIL;
2507 - goto exit_lib80211_tkip;
2508 - }
2509 - if (crypto_ops->decrypt_mpdu(skb, prxattrib->hdrlen, crypto_private)) {
2510 - res = _FAIL;
2511 - goto exit_lib80211_tkip;
2512 - }
2513 + pnl = (u16)(dot11txpn.val);
2514 + pnh = (u32)(dot11txpn.val>>16);
2515
2516 - memmove(pframe, pframe + iv_len, prxattrib->hdrlen);
2517 - skb_push(skb, iv_len);
2518 - skb_put(skb, icv_len);
2519 + phase1((u16 *)&ttkey[0], prwskey, &prxattrib->ta[0], pnh);
2520 + phase2(&rc4key[0], prwskey, (unsigned short *)&ttkey[0], pnl);
2521
2522 - memcpy(pframe + prxattrib->hdrlen, iv, iv_len);
2523 - memcpy(pframe + skb->len - icv_len, icv, icv_len);
2524 + /* 4 decrypt payload include icv */
2525
2526 -exit_lib80211_tkip:
2527 - if (crypto_ops && crypto_private)
2528 - crypto_ops->deinit(crypto_private);
2529 + arcfour_init(&mycontext, rc4key, 16);
2530 + arcfour_encrypt(&mycontext, payload, payload, length);
2531 +
2532 + *((__le32 *)crc) = getcrc32(payload, length-4);
2533 +
2534 + if (crc[3] != payload[length-1] ||
2535 + crc[2] != payload[length-2] ||
2536 + crc[1] != payload[length-3] ||
2537 + crc[0] != payload[length-4]) {
2538 + RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
2539 + ("rtw_wep_decrypt:icv error crc (%4ph)!=payload (%4ph)\n",
2540 + &crc, &payload[length-4]));
2541 + res = _FAIL;
2542 + }
2543 } else {
2544 RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt: stainfo==NULL!!!\n"));
2545 res = _FAIL;
2546 diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
2547 index 0a1a7c259ab0..2f8f4ed62e40 100644
2548 --- a/drivers/staging/speakup/speakup_soft.c
2549 +++ b/drivers/staging/speakup/speakup_soft.c
2550 @@ -197,11 +197,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
2551 int chars_sent = 0;
2552 char __user *cp;
2553 char *init;
2554 + size_t bytes_per_ch = unicode ? 3 : 1;
2555 u16 ch;
2556 int empty;
2557 unsigned long flags;
2558 DEFINE_WAIT(wait);
2559
2560 + if (count < bytes_per_ch)
2561 + return -EINVAL;
2562 +
2563 spin_lock_irqsave(&speakup_info.spinlock, flags);
2564 while (1) {
2565 prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
2566 @@ -227,7 +231,7 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
2567 init = get_initstring();
2568
2569 /* Keep 3 bytes available for a 16bit UTF-8-encoded character */
2570 - while (chars_sent <= count - 3) {
2571 + while (chars_sent <= count - bytes_per_ch) {
2572 if (speakup_info.flushing) {
2573 speakup_info.flushing = 0;
2574 ch = '\x18';
2575 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
2576 index 998b32d0167e..75c4623ad779 100644
2577 --- a/drivers/usb/class/cdc-acm.c
2578 +++ b/drivers/usb/class/cdc-acm.c
2579 @@ -1831,6 +1831,9 @@ static const struct usb_device_id acm_ids[] = {
2580 { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
2581 .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
2582 },
2583 + { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
2584 + .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
2585 + },
2586
2587 { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
2588 .driver_info = CLEAR_HALT_CONDITIONS,
2589 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
2590 index e3bf65e213cd..40c2d9878190 100644
2591 --- a/drivers/usb/core/hub.c
2592 +++ b/drivers/usb/core/hub.c
2593 @@ -1142,10 +1142,14 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
2594
2595 if (!udev || udev->state == USB_STATE_NOTATTACHED) {
2596 /* Tell hub_wq to disconnect the device or
2597 - * check for a new connection
2598 + * check for a new connection or over current condition.
2599 + * Based on USB2.0 Spec Section 11.12.5,
2600 + * C_PORT_OVER_CURRENT could be set while
2601 + * PORT_OVER_CURRENT is not. So check for any of them.
2602 */
2603 if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
2604 - (portstatus & USB_PORT_STAT_OVERCURRENT))
2605 + (portstatus & USB_PORT_STAT_OVERCURRENT) ||
2606 + (portchange & USB_PORT_STAT_C_OVERCURRENT))
2607 set_bit(port1, hub->change_bits);
2608
2609 } else if (portstatus & USB_PORT_STAT_ENABLE) {
2610 diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
2611 index c51b73b3e048..3a5f0005fae5 100644
2612 --- a/drivers/usb/dwc2/hcd.c
2613 +++ b/drivers/usb/dwc2/hcd.c
2614 @@ -2627,34 +2627,29 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
2615
2616 #define DWC2_USB_DMA_ALIGN 4
2617
2618 -struct dma_aligned_buffer {
2619 - void *kmalloc_ptr;
2620 - void *old_xfer_buffer;
2621 - u8 data[0];
2622 -};
2623 -
2624 static void dwc2_free_dma_aligned_buffer(struct urb *urb)
2625 {
2626 - struct dma_aligned_buffer *temp;
2627 + void *stored_xfer_buffer;
2628
2629 if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
2630 return;
2631
2632 - temp = container_of(urb->transfer_buffer,
2633 - struct dma_aligned_buffer, data);
2634 + /* Restore urb->transfer_buffer from the end of the allocated area */
2635 + memcpy(&stored_xfer_buffer, urb->transfer_buffer +
2636 + urb->transfer_buffer_length, sizeof(urb->transfer_buffer));
2637
2638 if (usb_urb_dir_in(urb))
2639 - memcpy(temp->old_xfer_buffer, temp->data,
2640 + memcpy(stored_xfer_buffer, urb->transfer_buffer,
2641 urb->transfer_buffer_length);
2642 - urb->transfer_buffer = temp->old_xfer_buffer;
2643 - kfree(temp->kmalloc_ptr);
2644 + kfree(urb->transfer_buffer);
2645 + urb->transfer_buffer = stored_xfer_buffer;
2646
2647 urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
2648 }
2649
2650 static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
2651 {
2652 - struct dma_aligned_buffer *temp, *kmalloc_ptr;
2653 + void *kmalloc_ptr;
2654 size_t kmalloc_size;
2655
2656 if (urb->num_sgs || urb->sg ||
2657 @@ -2662,22 +2657,29 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
2658 !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1)))
2659 return 0;
2660
2661 - /* Allocate a buffer with enough padding for alignment */
2662 + /*
2663 + * Allocate a buffer with enough padding for original transfer_buffer
2664 + * pointer. This allocation is guaranteed to be aligned properly for
2665 + * DMA
2666 + */
2667 kmalloc_size = urb->transfer_buffer_length +
2668 - sizeof(struct dma_aligned_buffer) + DWC2_USB_DMA_ALIGN - 1;
2669 + sizeof(urb->transfer_buffer);
2670
2671 kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
2672 if (!kmalloc_ptr)
2673 return -ENOMEM;
2674
2675 - /* Position our struct dma_aligned_buffer such that data is aligned */
2676 - temp = PTR_ALIGN(kmalloc_ptr + 1, DWC2_USB_DMA_ALIGN) - 1;
2677 - temp->kmalloc_ptr = kmalloc_ptr;
2678 - temp->old_xfer_buffer = urb->transfer_buffer;
2679 + /*
2680 + * Position value of original urb->transfer_buffer pointer to the end
2681 + * of allocation for later referencing
2682 + */
2683 + memcpy(kmalloc_ptr + urb->transfer_buffer_length,
2684 + &urb->transfer_buffer, sizeof(urb->transfer_buffer));
2685 +
2686 if (usb_urb_dir_out(urb))
2687 - memcpy(temp->data, urb->transfer_buffer,
2688 + memcpy(kmalloc_ptr, urb->transfer_buffer,
2689 urb->transfer_buffer_length);
2690 - urb->transfer_buffer = temp->data;
2691 + urb->transfer_buffer = kmalloc_ptr;
2692
2693 urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
2694
2695 diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
2696 index 63a7cb87514a..330c591fd7d6 100644
2697 --- a/drivers/usb/gadget/composite.c
2698 +++ b/drivers/usb/gadget/composite.c
2699 @@ -1816,7 +1816,6 @@ unknown:
2700 if (cdev->use_os_string && cdev->os_desc_config &&
2701 (ctrl->bRequestType & USB_TYPE_VENDOR) &&
2702 ctrl->bRequest == cdev->b_vendor_code) {
2703 - struct usb_request *req;
2704 struct usb_configuration *os_desc_cfg;
2705 u8 *buf;
2706 int interface;
2707 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
2708 index 0294e4f18873..7e57439ac282 100644
2709 --- a/drivers/usb/gadget/function/f_fs.c
2710 +++ b/drivers/usb/gadget/function/f_fs.c
2711 @@ -3242,7 +3242,7 @@ static int ffs_func_setup(struct usb_function *f,
2712 __ffs_event_add(ffs, FUNCTIONFS_SETUP);
2713 spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
2714
2715 - return USB_GADGET_DELAYED_STATUS;
2716 + return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
2717 }
2718
2719 static bool ffs_func_req_match(struct usb_function *f,
2720 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
2721 index 61c3dc2f3be5..5fb4319d7fd1 100644
2722 --- a/drivers/usb/host/xhci.c
2723 +++ b/drivers/usb/host/xhci.c
2724 @@ -2981,6 +2981,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
2725 if (!list_empty(&ep->ring->td_list)) {
2726 dev_err(&udev->dev, "EP not empty, refuse reset\n");
2727 spin_unlock_irqrestore(&xhci->lock, flags);
2728 + xhci_free_command(xhci, cfg_cmd);
2729 goto cleanup;
2730 }
2731 xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
2732 diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
2733 index 2da5f054257a..7cd63b0c1a46 100644
2734 --- a/drivers/vfio/vfio_iommu_spapr_tce.c
2735 +++ b/drivers/vfio/vfio_iommu_spapr_tce.c
2736 @@ -467,7 +467,7 @@ static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
2737 if (!mem)
2738 return -EINVAL;
2739
2740 - ret = mm_iommu_ua_to_hpa(mem, tce, phpa);
2741 + ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa);
2742 if (ret)
2743 return -EINVAL;
2744
2745 diff --git a/include/net/tcp.h b/include/net/tcp.h
2746 index 9c9b3768b350..9cf770150539 100644
2747 --- a/include/net/tcp.h
2748 +++ b/include/net/tcp.h
2749 @@ -342,6 +342,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
2750 struct pipe_inode_info *pipe, size_t len,
2751 unsigned int flags);
2752
2753 +void tcp_enter_quickack_mode(struct sock *sk);
2754 static inline void tcp_dec_quickack_mode(struct sock *sk,
2755 const unsigned int pkts)
2756 {
2757 @@ -535,6 +536,7 @@ void tcp_send_fin(struct sock *sk);
2758 void tcp_send_active_reset(struct sock *sk, gfp_t priority);
2759 int tcp_send_synack(struct sock *);
2760 void tcp_push_one(struct sock *, unsigned int mss_now);
2761 +void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
2762 void tcp_send_ack(struct sock *sk);
2763 void tcp_send_delayed_ack(struct sock *sk);
2764 void tcp_send_loss_probe(struct sock *sk);
2765 @@ -826,6 +828,11 @@ struct tcp_skb_cb {
2766 * as TCP moves IP6CB into a different location in skb->cb[]
2767 */
2768 static inline int tcp_v6_iif(const struct sk_buff *skb)
2769 +{
2770 + return TCP_SKB_CB(skb)->header.h6.iif;
2771 +}
2772 +
2773 +static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
2774 {
2775 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
2776
2777 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
2778 index 19f6ab5de6e1..3dab3c7b6831 100644
2779 --- a/net/core/rtnetlink.c
2780 +++ b/net/core/rtnetlink.c
2781 @@ -2749,9 +2749,12 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
2782 return err;
2783 }
2784
2785 - dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
2786 -
2787 - __dev_notify_flags(dev, old_flags, ~0U);
2788 + if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
2789 + __dev_notify_flags(dev, old_flags, 0U);
2790 + } else {
2791 + dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
2792 + __dev_notify_flags(dev, old_flags, ~0U);
2793 + }
2794 return 0;
2795 }
2796 EXPORT_SYMBOL(rtnl_configure_link);
2797 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
2798 index a84d69c047ac..b2d457df7d86 100644
2799 --- a/net/core/skbuff.c
2800 +++ b/net/core/skbuff.c
2801 @@ -3705,6 +3705,7 @@ normal:
2802 net_warn_ratelimited(
2803 "skb_segment: too many frags: %u %u\n",
2804 pos, mss);
2805 + err = -EINVAL;
2806 goto err;
2807 }
2808
2809 @@ -3738,11 +3739,10 @@ skip_fraglist:
2810
2811 perform_csum_check:
2812 if (!csum) {
2813 - if (skb_has_shared_frag(nskb)) {
2814 - err = __skb_linearize(nskb);
2815 - if (err)
2816 - goto err;
2817 - }
2818 + if (skb_has_shared_frag(nskb) &&
2819 + __skb_linearize(nskb))
2820 + goto err;
2821 +
2822 if (!nskb->remcsum_offload)
2823 nskb->ip_summed = CHECKSUM_NONE;
2824 SKB_GSO_CB(nskb)->csum =
2825 diff --git a/net/core/sock.c b/net/core/sock.c
2826 index 3b6d02854e57..f82843756534 100644
2827 --- a/net/core/sock.c
2828 +++ b/net/core/sock.c
2829 @@ -2270,9 +2270,9 @@ int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
2830 pfrag->offset += use;
2831
2832 sge = sg + sg_curr - 1;
2833 - if (sg_curr > first_coalesce && sg_page(sg) == pfrag->page &&
2834 - sg->offset + sg->length == orig_offset) {
2835 - sg->length += use;
2836 + if (sg_curr > first_coalesce && sg_page(sge) == pfrag->page &&
2837 + sge->offset + sge->length == orig_offset) {
2838 + sge->length += use;
2839 } else {
2840 sge = sg + sg_curr;
2841 sg_unmark_end(sge);
2842 diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
2843 index b26a81a7de42..4af0625344a0 100644
2844 --- a/net/ipv4/igmp.c
2845 +++ b/net/ipv4/igmp.c
2846 @@ -1201,8 +1201,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
2847 if (pmc) {
2848 im->interface = pmc->interface;
2849 im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
2850 - im->sfmode = pmc->sfmode;
2851 - if (pmc->sfmode == MCAST_INCLUDE) {
2852 + if (im->sfmode == MCAST_INCLUDE) {
2853 im->tomb = pmc->tomb;
2854 im->sources = pmc->sources;
2855 for (psf = im->sources; psf; psf = psf->sf_next)
2856 diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
2857 index d54abc097800..267b69cfea71 100644
2858 --- a/net/ipv4/ip_output.c
2859 +++ b/net/ipv4/ip_output.c
2860 @@ -523,6 +523,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
2861 to->dev = from->dev;
2862 to->mark = from->mark;
2863
2864 + skb_copy_hash(to, from);
2865 +
2866 /* Copy the flags to each fragment. */
2867 IPCB(to)->flags = IPCB(from)->flags;
2868
2869 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
2870 index 57bbb060faaf..7c14c7818ead 100644
2871 --- a/net/ipv4/ip_sockglue.c
2872 +++ b/net/ipv4/ip_sockglue.c
2873 @@ -148,15 +148,18 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
2874 {
2875 struct sockaddr_in sin;
2876 const struct iphdr *iph = ip_hdr(skb);
2877 - __be16 *ports = (__be16 *)skb_transport_header(skb);
2878 + __be16 *ports;
2879 + int end;
2880
2881 - if (skb_transport_offset(skb) + 4 > (int)skb->len)
2882 + end = skb_transport_offset(skb) + 4;
2883 + if (end > 0 && !pskb_may_pull(skb, end))
2884 return;
2885
2886 /* All current transport protocols have the port numbers in the
2887 * first four bytes of the transport header and this function is
2888 * written with this assumption in mind.
2889 */
2890 + ports = (__be16 *)skb_transport_header(skb);
2891
2892 sin.sin_family = AF_INET;
2893 sin.sin_addr.s_addr = iph->daddr;
2894 diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
2895 index 5f5e5936760e..c78fb53988a1 100644
2896 --- a/net/ipv4/tcp_dctcp.c
2897 +++ b/net/ipv4/tcp_dctcp.c
2898 @@ -131,23 +131,14 @@ static void dctcp_ce_state_0_to_1(struct sock *sk)
2899 struct dctcp *ca = inet_csk_ca(sk);
2900 struct tcp_sock *tp = tcp_sk(sk);
2901
2902 - /* State has changed from CE=0 to CE=1 and delayed
2903 - * ACK has not sent yet.
2904 - */
2905 - if (!ca->ce_state && ca->delayed_ack_reserved) {
2906 - u32 tmp_rcv_nxt;
2907 -
2908 - /* Save current rcv_nxt. */
2909 - tmp_rcv_nxt = tp->rcv_nxt;
2910 -
2911 - /* Generate previous ack with CE=0. */
2912 - tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
2913 - tp->rcv_nxt = ca->prior_rcv_nxt;
2914 -
2915 - tcp_send_ack(sk);
2916 -
2917 - /* Recover current rcv_nxt. */
2918 - tp->rcv_nxt = tmp_rcv_nxt;
2919 + if (!ca->ce_state) {
2920 + /* State has changed from CE=0 to CE=1, force an immediate
2921 + * ACK to reflect the new CE state. If an ACK was delayed,
2922 + * send that first to reflect the prior CE state.
2923 + */
2924 + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
2925 + __tcp_send_ack(sk, ca->prior_rcv_nxt);
2926 + tcp_enter_quickack_mode(sk);
2927 }
2928
2929 ca->prior_rcv_nxt = tp->rcv_nxt;
2930 @@ -161,23 +152,14 @@ static void dctcp_ce_state_1_to_0(struct sock *sk)
2931 struct dctcp *ca = inet_csk_ca(sk);
2932 struct tcp_sock *tp = tcp_sk(sk);
2933
2934 - /* State has changed from CE=1 to CE=0 and delayed
2935 - * ACK has not sent yet.
2936 - */
2937 - if (ca->ce_state && ca->delayed_ack_reserved) {
2938 - u32 tmp_rcv_nxt;
2939 -
2940 - /* Save current rcv_nxt. */
2941 - tmp_rcv_nxt = tp->rcv_nxt;
2942 -
2943 - /* Generate previous ack with CE=1. */
2944 - tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
2945 - tp->rcv_nxt = ca->prior_rcv_nxt;
2946 -
2947 - tcp_send_ack(sk);
2948 -
2949 - /* Recover current rcv_nxt. */
2950 - tp->rcv_nxt = tmp_rcv_nxt;
2951 + if (ca->ce_state) {
2952 + /* State has changed from CE=1 to CE=0, force an immediate
2953 + * ACK to reflect the new CE state. If an ACK was delayed,
2954 + * send that first to reflect the prior CE state.
2955 + */
2956 + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
2957 + __tcp_send_ack(sk, ca->prior_rcv_nxt);
2958 + tcp_enter_quickack_mode(sk);
2959 }
2960
2961 ca->prior_rcv_nxt = tp->rcv_nxt;
2962 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2963 index 1f25ebab25d2..0f5e9510c3fa 100644
2964 --- a/net/ipv4/tcp_input.c
2965 +++ b/net/ipv4/tcp_input.c
2966 @@ -195,13 +195,14 @@ static void tcp_incr_quickack(struct sock *sk)
2967 icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
2968 }
2969
2970 -static void tcp_enter_quickack_mode(struct sock *sk)
2971 +void tcp_enter_quickack_mode(struct sock *sk)
2972 {
2973 struct inet_connection_sock *icsk = inet_csk(sk);
2974 tcp_incr_quickack(sk);
2975 icsk->icsk_ack.pingpong = 0;
2976 icsk->icsk_ack.ato = TCP_ATO_MIN;
2977 }
2978 +EXPORT_SYMBOL(tcp_enter_quickack_mode);
2979
2980 /* Send ACKs quickly, if "quick" count is not exhausted
2981 * and the session is not interactive.
2982 @@ -4298,6 +4299,23 @@ static bool tcp_try_coalesce(struct sock *sk,
2983 return true;
2984 }
2985
2986 +static bool tcp_ooo_try_coalesce(struct sock *sk,
2987 + struct sk_buff *to,
2988 + struct sk_buff *from,
2989 + bool *fragstolen)
2990 +{
2991 + bool res = tcp_try_coalesce(sk, to, from, fragstolen);
2992 +
2993 + /* In case tcp_drop() is called later, update to->gso_segs */
2994 + if (res) {
2995 + u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
2996 + max_t(u16, 1, skb_shinfo(from)->gso_segs);
2997 +
2998 + skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
2999 + }
3000 + return res;
3001 +}
3002 +
3003 static void tcp_drop(struct sock *sk, struct sk_buff *skb)
3004 {
3005 sk_drops_add(sk, skb);
3006 @@ -4421,8 +4439,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
3007 /* In the typical case, we are adding an skb to the end of the list.
3008 * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
3009 */
3010 - if (tcp_try_coalesce(sk, tp->ooo_last_skb,
3011 - skb, &fragstolen)) {
3012 + if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
3013 + skb, &fragstolen)) {
3014 coalesce_done:
3015 tcp_grow_window(sk, skb);
3016 kfree_skb_partial(skb, fragstolen);
3017 @@ -4450,7 +4468,7 @@ coalesce_done:
3018 /* All the bits are present. Drop. */
3019 NET_INC_STATS(sock_net(sk),
3020 LINUX_MIB_TCPOFOMERGE);
3021 - __kfree_skb(skb);
3022 + tcp_drop(sk, skb);
3023 skb = NULL;
3024 tcp_dsack_set(sk, seq, end_seq);
3025 goto add_sack;
3026 @@ -4469,11 +4487,11 @@ coalesce_done:
3027 TCP_SKB_CB(skb1)->end_seq);
3028 NET_INC_STATS(sock_net(sk),
3029 LINUX_MIB_TCPOFOMERGE);
3030 - __kfree_skb(skb1);
3031 + tcp_drop(sk, skb1);
3032 goto merge_right;
3033 }
3034 - } else if (tcp_try_coalesce(sk, skb1,
3035 - skb, &fragstolen)) {
3036 + } else if (tcp_ooo_try_coalesce(sk, skb1,
3037 + skb, &fragstolen)) {
3038 goto coalesce_done;
3039 }
3040 p = &parent->rb_right;
3041 @@ -4833,6 +4851,7 @@ end:
3042 static void tcp_collapse_ofo_queue(struct sock *sk)
3043 {
3044 struct tcp_sock *tp = tcp_sk(sk);
3045 + u32 range_truesize, sum_tiny = 0;
3046 struct sk_buff *skb, *head;
3047 u32 start, end;
3048
3049 @@ -4844,6 +4863,7 @@ new_range:
3050 }
3051 start = TCP_SKB_CB(skb)->seq;
3052 end = TCP_SKB_CB(skb)->end_seq;
3053 + range_truesize = skb->truesize;
3054
3055 for (head = skb;;) {
3056 skb = skb_rb_next(skb);
3057 @@ -4854,11 +4874,20 @@ new_range:
3058 if (!skb ||
3059 after(TCP_SKB_CB(skb)->seq, end) ||
3060 before(TCP_SKB_CB(skb)->end_seq, start)) {
3061 - tcp_collapse(sk, NULL, &tp->out_of_order_queue,
3062 - head, skb, start, end);
3063 + /* Do not attempt collapsing tiny skbs */
3064 + if (range_truesize != head->truesize ||
3065 + end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) {
3066 + tcp_collapse(sk, NULL, &tp->out_of_order_queue,
3067 + head, skb, start, end);
3068 + } else {
3069 + sum_tiny += range_truesize;
3070 + if (sum_tiny > sk->sk_rcvbuf >> 3)
3071 + return;
3072 + }
3073 goto new_range;
3074 }
3075
3076 + range_truesize += skb->truesize;
3077 if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
3078 start = TCP_SKB_CB(skb)->seq;
3079 if (after(TCP_SKB_CB(skb)->end_seq, end))
3080 @@ -4873,6 +4902,7 @@ new_range:
3081 * 2) not add too big latencies if thousands of packets sit there.
3082 * (But if application shrinks SO_RCVBUF, we could still end up
3083 * freeing whole queue here)
3084 + * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks.
3085 *
3086 * Return true if queue has shrunk.
3087 */
3088 @@ -4880,20 +4910,26 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
3089 {
3090 struct tcp_sock *tp = tcp_sk(sk);
3091 struct rb_node *node, *prev;
3092 + int goal;
3093
3094 if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
3095 return false;
3096
3097 NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
3098 + goal = sk->sk_rcvbuf >> 3;
3099 node = &tp->ooo_last_skb->rbnode;
3100 do {
3101 prev = rb_prev(node);
3102 rb_erase(node, &tp->out_of_order_queue);
3103 + goal -= rb_to_skb(node)->truesize;
3104 tcp_drop(sk, rb_to_skb(node));
3105 - sk_mem_reclaim(sk);
3106 - if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
3107 - !tcp_under_memory_pressure(sk))
3108 - break;
3109 + if (!prev || goal <= 0) {
3110 + sk_mem_reclaim(sk);
3111 + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
3112 + !tcp_under_memory_pressure(sk))
3113 + break;
3114 + goal = sk->sk_rcvbuf >> 3;
3115 + }
3116 node = prev;
3117 } while (node);
3118 tp->ooo_last_skb = rb_to_skb(prev);
3119 @@ -4928,6 +4964,9 @@ static int tcp_prune_queue(struct sock *sk)
3120 else if (tcp_under_memory_pressure(sk))
3121 tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
3122
3123 + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
3124 + return 0;
3125 +
3126 tcp_collapse_ofo_queue(sk);
3127 if (!skb_queue_empty(&sk->sk_receive_queue))
3128 tcp_collapse(sk, &sk->sk_receive_queue, NULL,
3129 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
3130 index d07e34f8e309..3049d10a1476 100644
3131 --- a/net/ipv4/tcp_output.c
3132 +++ b/net/ipv4/tcp_output.c
3133 @@ -160,8 +160,13 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
3134 }
3135
3136 /* Account for an ACK we sent. */
3137 -static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
3138 +static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
3139 + u32 rcv_nxt)
3140 {
3141 + struct tcp_sock *tp = tcp_sk(sk);
3142 +
3143 + if (unlikely(rcv_nxt != tp->rcv_nxt))
3144 + return; /* Special ACK sent by DCTCP to reflect ECN */
3145 tcp_dec_quickack_mode(sk, pkts);
3146 inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
3147 }
3148 @@ -1031,8 +1036,8 @@ static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb)
3149 * We are working here with either a clone of the original
3150 * SKB, or a fresh unique copy made by the retransmit engine.
3151 */
3152 -static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
3153 - gfp_t gfp_mask)
3154 +static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
3155 + int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
3156 {
3157 const struct inet_connection_sock *icsk = inet_csk(sk);
3158 struct inet_sock *inet;
3159 @@ -1108,7 +1113,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
3160 th->source = inet->inet_sport;
3161 th->dest = inet->inet_dport;
3162 th->seq = htonl(tcb->seq);
3163 - th->ack_seq = htonl(tp->rcv_nxt);
3164 + th->ack_seq = htonl(rcv_nxt);
3165 *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
3166 tcb->tcp_flags);
3167
3168 @@ -1149,7 +1154,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
3169 icsk->icsk_af_ops->send_check(sk, skb);
3170
3171 if (likely(tcb->tcp_flags & TCPHDR_ACK))
3172 - tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
3173 + tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
3174
3175 if (skb->len != tcp_header_size) {
3176 tcp_event_data_sent(tp, sk);
3177 @@ -1186,6 +1191,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
3178 return err;
3179 }
3180
3181 +static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
3182 + gfp_t gfp_mask)
3183 +{
3184 + return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
3185 + tcp_sk(sk)->rcv_nxt);
3186 +}
3187 +
3188 /* This routine just queues the buffer for sending.
3189 *
3190 * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
3191 @@ -3583,7 +3595,7 @@ void tcp_send_delayed_ack(struct sock *sk)
3192 }
3193
3194 /* This routine sends an ack and also updates the window. */
3195 -void tcp_send_ack(struct sock *sk)
3196 +void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
3197 {
3198 struct sk_buff *buff;
3199
3200 @@ -3618,9 +3630,14 @@ void tcp_send_ack(struct sock *sk)
3201 skb_set_tcp_pure_ack(buff);
3202
3203 /* Send it off, this clears delayed acks for us. */
3204 - tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0);
3205 + __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
3206 +}
3207 +EXPORT_SYMBOL_GPL(__tcp_send_ack);
3208 +
3209 +void tcp_send_ack(struct sock *sk)
3210 +{
3211 + __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
3212 }
3213 -EXPORT_SYMBOL_GPL(tcp_send_ack);
3214
3215 /* This routine sends a packet with an out of date sequence
3216 * number. It assumes the other end will try to ack it.
3217 diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
3218 index 2ee08b6a86a4..1a1f876f8e28 100644
3219 --- a/net/ipv6/datagram.c
3220 +++ b/net/ipv6/datagram.c
3221 @@ -700,13 +700,16 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
3222 }
3223 if (np->rxopt.bits.rxorigdstaddr) {
3224 struct sockaddr_in6 sin6;
3225 - __be16 *ports = (__be16 *) skb_transport_header(skb);
3226 + __be16 *ports;
3227 + int end;
3228
3229 - if (skb_transport_offset(skb) + 4 <= (int)skb->len) {
3230 + end = skb_transport_offset(skb) + 4;
3231 + if (end <= 0 || pskb_may_pull(skb, end)) {
3232 /* All current transport protocols have the port numbers in the
3233 * first four bytes of the transport header and this function is
3234 * written with this assumption in mind.
3235 */
3236 + ports = (__be16 *)skb_transport_header(skb);
3237
3238 sin6.sin6_family = AF_INET6;
3239 sin6.sin6_addr = ipv6_hdr(skb)->daddr;
3240 diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
3241 index d8c4b6374377..ca893a798d8a 100644
3242 --- a/net/ipv6/icmp.c
3243 +++ b/net/ipv6/icmp.c
3244 @@ -402,9 +402,10 @@ static int icmp6_iif(const struct sk_buff *skb)
3245
3246 /* for local traffic to local address, skb dev is the loopback
3247 * device. Check if there is a dst attached to the skb and if so
3248 - * get the real device index.
3249 + * get the real device index. Same is needed for replies to a link
3250 + * local address on a device enslaved to an L3 master device
3251 */
3252 - if (unlikely(iif == LOOPBACK_IFINDEX)) {
3253 + if (unlikely(iif == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) {
3254 const struct rt6_info *rt6 = skb_rt6_info(skb);
3255
3256 if (rt6)
3257 diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
3258 index af49f6cb5d3e..8f4c596a683d 100644
3259 --- a/net/ipv6/ip6_output.c
3260 +++ b/net/ipv6/ip6_output.c
3261 @@ -596,6 +596,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
3262 to->dev = from->dev;
3263 to->mark = from->mark;
3264
3265 + skb_copy_hash(to, from);
3266 +
3267 #ifdef CONFIG_NET_SCHED
3268 to->tc_index = from->tc_index;
3269 #endif
3270 diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
3271 index 793159d77d8a..0604a737eecf 100644
3272 --- a/net/ipv6/mcast.c
3273 +++ b/net/ipv6/mcast.c
3274 @@ -771,8 +771,7 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
3275 if (pmc) {
3276 im->idev = pmc->idev;
3277 im->mca_crcount = idev->mc_qrv;
3278 - im->mca_sfmode = pmc->mca_sfmode;
3279 - if (pmc->mca_sfmode == MCAST_INCLUDE) {
3280 + if (im->mca_sfmode == MCAST_INCLUDE) {
3281 im->mca_tomb = pmc->mca_tomb;
3282 im->mca_sources = pmc->mca_sources;
3283 for (psf = im->mca_sources; psf; psf = psf->sf_next)
3284 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
3285 index 5d4eb9d2c3a7..1adf7eb80d03 100644
3286 --- a/net/ipv6/tcp_ipv6.c
3287 +++ b/net/ipv6/tcp_ipv6.c
3288 @@ -934,7 +934,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
3289 &tcp_hashinfo, NULL, 0,
3290 &ipv6h->saddr,
3291 th->source, &ipv6h->daddr,
3292 - ntohs(th->source), tcp_v6_iif(skb),
3293 + ntohs(th->source),
3294 + tcp_v6_iif_l3_slave(skb),
3295 tcp_v6_sdif(skb));
3296 if (!sk1)
3297 goto out;
3298 @@ -1605,7 +1606,8 @@ do_time_wait:
3299 skb, __tcp_hdrlen(th),
3300 &ipv6_hdr(skb)->saddr, th->source,
3301 &ipv6_hdr(skb)->daddr,
3302 - ntohs(th->dest), tcp_v6_iif(skb),
3303 + ntohs(th->dest),
3304 + tcp_v6_iif_l3_slave(skb),
3305 sdif);
3306 if (sk2) {
3307 struct inet_timewait_sock *tw = inet_twsk(sk);
3308 diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
3309 index 9a7f91232de8..60708a4ebed4 100644
3310 --- a/net/tls/tls_sw.c
3311 +++ b/net/tls/tls_sw.c
3312 @@ -646,6 +646,9 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
3313 return NULL;
3314 }
3315
3316 + if (sk->sk_shutdown & RCV_SHUTDOWN)
3317 + return NULL;
3318 +
3319 if (sock_flag(sk, SOCK_DONE))
3320 return NULL;
3321