Contents of /trunk/kernel-alx/patches-4.14/0158-4.14.59-all-fixes.patch
Parent Directory | Revision Log
Revision 3238 -
(show annotations)
(download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 10 months ago) by niro
File size: 73835 byte(s)
Fri Nov 9 12:14:58 2018 UTC (5 years, 10 months ago) by niro
File size: 73835 byte(s)
-added up to patches-4.14.79
1 | diff --git a/Makefile b/Makefile |
2 | index ffc9b4e3867e..81b0e99dce80 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,7 +1,7 @@ |
6 | # SPDX-License-Identifier: GPL-2.0 |
7 | VERSION = 4 |
8 | PATCHLEVEL = 14 |
9 | -SUBLEVEL = 58 |
10 | +SUBLEVEL = 59 |
11 | EXTRAVERSION = |
12 | NAME = Petit Gorille |
13 | |
14 | @@ -642,6 +642,7 @@ KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,) |
15 | KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation) |
16 | KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow) |
17 | KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context) |
18 | +KBUILD_CFLAGS += $(call cc-disable-warning, attribute-alias) |
19 | |
20 | ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE |
21 | KBUILD_CFLAGS += $(call cc-option,-Oz,-Os) |
22 | diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c |
23 | index 10a405d593df..c782b10ddf50 100644 |
24 | --- a/arch/mips/ath79/common.c |
25 | +++ b/arch/mips/ath79/common.c |
26 | @@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init); |
27 | |
28 | void ath79_ddr_wb_flush(u32 reg) |
29 | { |
30 | - void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg; |
31 | + void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4); |
32 | |
33 | /* Flush the DDR write buffer. */ |
34 | __raw_writel(0x1, flush_reg); |
35 | diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c |
36 | index 9632436d74d7..c2e94cf5ecda 100644 |
37 | --- a/arch/mips/pci/pci.c |
38 | +++ b/arch/mips/pci/pci.c |
39 | @@ -54,5 +54,5 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar, |
40 | phys_addr_t size = resource_size(rsrc); |
41 | |
42 | *start = fixup_bigphys_addr(rsrc->start, size); |
43 | - *end = rsrc->start + size; |
44 | + *end = rsrc->start + size - 1; |
45 | } |
46 | diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h |
47 | index 44fdf4786638..6f67ff5a5267 100644 |
48 | --- a/arch/powerpc/include/asm/mmu_context.h |
49 | +++ b/arch/powerpc/include/asm/mmu_context.h |
50 | @@ -35,9 +35,9 @@ extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm( |
51 | extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, |
52 | unsigned long ua, unsigned long entries); |
53 | extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, |
54 | - unsigned long ua, unsigned long *hpa); |
55 | + unsigned long ua, unsigned int pageshift, unsigned long *hpa); |
56 | extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, |
57 | - unsigned long ua, unsigned long *hpa); |
58 | + unsigned long ua, unsigned int pageshift, unsigned long *hpa); |
59 | extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem); |
60 | extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem); |
61 | #endif |
62 | diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c |
63 | index 4dffa611376d..e14cec6bc339 100644 |
64 | --- a/arch/powerpc/kvm/book3s_64_vio.c |
65 | +++ b/arch/powerpc/kvm/book3s_64_vio.c |
66 | @@ -433,7 +433,7 @@ long kvmppc_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, |
67 | /* This only handles v2 IOMMU type, v1 is handled via ioctl() */ |
68 | return H_TOO_HARD; |
69 | |
70 | - if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa))) |
71 | + if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa))) |
72 | return H_HARDWARE; |
73 | |
74 | if (mm_iommu_mapped_inc(mem)) |
75 | diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c |
76 | index c32e9bfe75b1..648cf6c01348 100644 |
77 | --- a/arch/powerpc/kvm/book3s_64_vio_hv.c |
78 | +++ b/arch/powerpc/kvm/book3s_64_vio_hv.c |
79 | @@ -262,7 +262,8 @@ static long kvmppc_rm_tce_iommu_map(struct kvm *kvm, struct iommu_table *tbl, |
80 | if (!mem) |
81 | return H_TOO_HARD; |
82 | |
83 | - if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa))) |
84 | + if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift, |
85 | + &hpa))) |
86 | return H_HARDWARE; |
87 | |
88 | pua = (void *) vmalloc_to_phys(pua); |
89 | @@ -431,7 +432,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, |
90 | |
91 | mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K); |
92 | if (mem) |
93 | - prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0; |
94 | + prereg = mm_iommu_ua_to_hpa_rm(mem, ua, |
95 | + IOMMU_PAGE_SHIFT_4K, &tces) == 0; |
96 | } |
97 | |
98 | if (!prereg) { |
99 | diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c |
100 | index e0a2d8e806ed..816055927ee4 100644 |
101 | --- a/arch/powerpc/mm/mmu_context_iommu.c |
102 | +++ b/arch/powerpc/mm/mmu_context_iommu.c |
103 | @@ -19,6 +19,7 @@ |
104 | #include <linux/hugetlb.h> |
105 | #include <linux/swap.h> |
106 | #include <asm/mmu_context.h> |
107 | +#include <asm/pte-walk.h> |
108 | |
109 | static DEFINE_MUTEX(mem_list_mutex); |
110 | |
111 | @@ -27,6 +28,7 @@ struct mm_iommu_table_group_mem_t { |
112 | struct rcu_head rcu; |
113 | unsigned long used; |
114 | atomic64_t mapped; |
115 | + unsigned int pageshift; |
116 | u64 ua; /* userspace address */ |
117 | u64 entries; /* number of entries in hpas[] */ |
118 | u64 *hpas; /* vmalloc'ed */ |
119 | @@ -126,6 +128,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, |
120 | { |
121 | struct mm_iommu_table_group_mem_t *mem; |
122 | long i, j, ret = 0, locked_entries = 0; |
123 | + unsigned int pageshift; |
124 | + unsigned long flags; |
125 | struct page *page = NULL; |
126 | |
127 | mutex_lock(&mem_list_mutex); |
128 | @@ -160,6 +164,12 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, |
129 | goto unlock_exit; |
130 | } |
131 | |
132 | + /* |
133 | + * For a starting point for a maximum page size calculation |
134 | + * we use @ua and @entries natural alignment to allow IOMMU pages |
135 | + * smaller than huge pages but still bigger than PAGE_SIZE. |
136 | + */ |
137 | + mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT)); |
138 | mem->hpas = vzalloc(entries * sizeof(mem->hpas[0])); |
139 | if (!mem->hpas) { |
140 | kfree(mem); |
141 | @@ -200,6 +210,23 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries, |
142 | } |
143 | } |
144 | populate: |
145 | + pageshift = PAGE_SHIFT; |
146 | + if (PageCompound(page)) { |
147 | + pte_t *pte; |
148 | + struct page *head = compound_head(page); |
149 | + unsigned int compshift = compound_order(head); |
150 | + |
151 | + local_irq_save(flags); /* disables as well */ |
152 | + pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift); |
153 | + local_irq_restore(flags); |
154 | + |
155 | + /* Double check it is still the same pinned page */ |
156 | + if (pte && pte_page(*pte) == head && |
157 | + pageshift == compshift) |
158 | + pageshift = max_t(unsigned int, pageshift, |
159 | + PAGE_SHIFT); |
160 | + } |
161 | + mem->pageshift = min(mem->pageshift, pageshift); |
162 | mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT; |
163 | } |
164 | |
165 | @@ -350,7 +377,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm, |
166 | EXPORT_SYMBOL_GPL(mm_iommu_find); |
167 | |
168 | long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, |
169 | - unsigned long ua, unsigned long *hpa) |
170 | + unsigned long ua, unsigned int pageshift, unsigned long *hpa) |
171 | { |
172 | const long entry = (ua - mem->ua) >> PAGE_SHIFT; |
173 | u64 *va = &mem->hpas[entry]; |
174 | @@ -358,6 +385,9 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, |
175 | if (entry >= mem->entries) |
176 | return -EFAULT; |
177 | |
178 | + if (pageshift > mem->pageshift) |
179 | + return -EFAULT; |
180 | + |
181 | *hpa = *va | (ua & ~PAGE_MASK); |
182 | |
183 | return 0; |
184 | @@ -365,7 +395,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem, |
185 | EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa); |
186 | |
187 | long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, |
188 | - unsigned long ua, unsigned long *hpa) |
189 | + unsigned long ua, unsigned int pageshift, unsigned long *hpa) |
190 | { |
191 | const long entry = (ua - mem->ua) >> PAGE_SHIFT; |
192 | void *va = &mem->hpas[entry]; |
193 | @@ -374,6 +404,9 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem, |
194 | if (entry >= mem->entries) |
195 | return -EFAULT; |
196 | |
197 | + if (pageshift > mem->pageshift) |
198 | + return -EFAULT; |
199 | + |
200 | pa = (void *) vmalloc_to_phys(va); |
201 | if (!pa) |
202 | return -EFAULT; |
203 | diff --git a/arch/x86/xen/xen-pvh.S b/arch/x86/xen/xen-pvh.S |
204 | index e1a5fbeae08d..5d7554c025fd 100644 |
205 | --- a/arch/x86/xen/xen-pvh.S |
206 | +++ b/arch/x86/xen/xen-pvh.S |
207 | @@ -54,6 +54,9 @@ |
208 | * charge of setting up it's own stack, GDT and IDT. |
209 | */ |
210 | |
211 | +#define PVH_GDT_ENTRY_CANARY 4 |
212 | +#define PVH_CANARY_SEL (PVH_GDT_ENTRY_CANARY * 8) |
213 | + |
214 | ENTRY(pvh_start_xen) |
215 | cld |
216 | |
217 | @@ -98,6 +101,12 @@ ENTRY(pvh_start_xen) |
218 | /* 64-bit entry point. */ |
219 | .code64 |
220 | 1: |
221 | + /* Set base address in stack canary descriptor. */ |
222 | + mov $MSR_GS_BASE,%ecx |
223 | + mov $_pa(canary), %eax |
224 | + xor %edx, %edx |
225 | + wrmsr |
226 | + |
227 | call xen_prepare_pvh |
228 | |
229 | /* startup_64 expects boot_params in %rsi. */ |
230 | @@ -107,6 +116,17 @@ ENTRY(pvh_start_xen) |
231 | |
232 | #else /* CONFIG_X86_64 */ |
233 | |
234 | + /* Set base address in stack canary descriptor. */ |
235 | + movl $_pa(gdt_start),%eax |
236 | + movl $_pa(canary),%ecx |
237 | + movw %cx, (PVH_GDT_ENTRY_CANARY * 8) + 2(%eax) |
238 | + shrl $16, %ecx |
239 | + movb %cl, (PVH_GDT_ENTRY_CANARY * 8) + 4(%eax) |
240 | + movb %ch, (PVH_GDT_ENTRY_CANARY * 8) + 7(%eax) |
241 | + |
242 | + mov $PVH_CANARY_SEL,%eax |
243 | + mov %eax,%gs |
244 | + |
245 | call mk_early_pgtbl_32 |
246 | |
247 | mov $_pa(initial_page_table), %eax |
248 | @@ -150,9 +170,13 @@ gdt_start: |
249 | .quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* __KERNEL_CS */ |
250 | #endif |
251 | .quad GDT_ENTRY(0xc092, 0, 0xfffff) /* __KERNEL_DS */ |
252 | + .quad GDT_ENTRY(0x4090, 0, 0x18) /* PVH_CANARY_SEL */ |
253 | gdt_end: |
254 | |
255 | - .balign 4 |
256 | + .balign 16 |
257 | +canary: |
258 | + .fill 48, 1, 0 |
259 | + |
260 | early_stack: |
261 | .fill 256, 1, 0 |
262 | early_stack_end: |
263 | diff --git a/drivers/base/dd.c b/drivers/base/dd.c |
264 | index ad44b40fe284..55fc31f6fe7f 100644 |
265 | --- a/drivers/base/dd.c |
266 | +++ b/drivers/base/dd.c |
267 | @@ -401,14 +401,6 @@ re_probe: |
268 | goto probe_failed; |
269 | } |
270 | |
271 | - /* |
272 | - * Ensure devices are listed in devices_kset in correct order |
273 | - * It's important to move Dev to the end of devices_kset before |
274 | - * calling .probe, because it could be recursive and parent Dev |
275 | - * should always go first |
276 | - */ |
277 | - devices_kset_move_last(dev); |
278 | - |
279 | if (dev->bus->probe) { |
280 | ret = dev->bus->probe(dev); |
281 | if (ret) |
282 | diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c |
283 | index 5b9d549aa791..e7926da59214 100644 |
284 | --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c |
285 | +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c |
286 | @@ -55,6 +55,9 @@ nv04_display_create(struct drm_device *dev) |
287 | nouveau_display(dev)->init = nv04_display_init; |
288 | nouveau_display(dev)->fini = nv04_display_fini; |
289 | |
290 | + /* Pre-nv50 doesn't support atomic, so don't expose the ioctls */ |
291 | + dev->driver->driver_features &= ~DRIVER_ATOMIC; |
292 | + |
293 | nouveau_hw_save_vga_fonts(dev, 1); |
294 | |
295 | nv04_crtc_create(dev, 0); |
296 | diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c |
297 | index 595630d1fb9e..362a34cb435d 100644 |
298 | --- a/drivers/gpu/drm/nouveau/nouveau_drm.c |
299 | +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c |
300 | @@ -79,6 +79,10 @@ MODULE_PARM_DESC(modeset, "enable driver (default: auto, " |
301 | int nouveau_modeset = -1; |
302 | module_param_named(modeset, nouveau_modeset, int, 0400); |
303 | |
304 | +MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)"); |
305 | +static int nouveau_atomic = 0; |
306 | +module_param_named(atomic, nouveau_atomic, int, 0400); |
307 | + |
308 | MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)"); |
309 | static int nouveau_runtime_pm = -1; |
310 | module_param_named(runpm, nouveau_runtime_pm, int, 0400); |
311 | @@ -383,6 +387,9 @@ static int nouveau_drm_probe(struct pci_dev *pdev, |
312 | |
313 | pci_set_master(pdev); |
314 | |
315 | + if (nouveau_atomic) |
316 | + driver_pci.driver_features |= DRIVER_ATOMIC; |
317 | + |
318 | ret = drm_get_pci_dev(pdev, pent, &driver_pci); |
319 | if (ret) { |
320 | nvkm_device_del(&device); |
321 | diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c |
322 | index a29474528e85..926ec51ba5be 100644 |
323 | --- a/drivers/gpu/drm/nouveau/nv50_display.c |
324 | +++ b/drivers/gpu/drm/nouveau/nv50_display.c |
325 | @@ -4150,7 +4150,7 @@ nv50_disp_atomic_commit(struct drm_device *dev, |
326 | nv50_disp_atomic_commit_tail(state); |
327 | |
328 | drm_for_each_crtc(crtc, dev) { |
329 | - if (crtc->state->enable) { |
330 | + if (crtc->state->active) { |
331 | if (!drm->have_disp_power_ref) { |
332 | drm->have_disp_power_ref = true; |
333 | return 0; |
334 | @@ -4398,10 +4398,6 @@ nv50_display_destroy(struct drm_device *dev) |
335 | kfree(disp); |
336 | } |
337 | |
338 | -MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)"); |
339 | -static int nouveau_atomic = 0; |
340 | -module_param_named(atomic, nouveau_atomic, int, 0400); |
341 | - |
342 | int |
343 | nv50_display_create(struct drm_device *dev) |
344 | { |
345 | @@ -4426,8 +4422,6 @@ nv50_display_create(struct drm_device *dev) |
346 | disp->disp = &nouveau_display(dev)->disp; |
347 | dev->mode_config.funcs = &nv50_disp_func; |
348 | dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP; |
349 | - if (nouveau_atomic) |
350 | - dev->driver->driver_features |= DRIVER_ATOMIC; |
351 | |
352 | /* small shared memory area we use for notifiers and semaphores */ |
353 | ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM, |
354 | diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c |
355 | index 61084ba69a99..3d154eb63dcf 100644 |
356 | --- a/drivers/net/bonding/bond_options.c |
357 | +++ b/drivers/net/bonding/bond_options.c |
358 | @@ -743,15 +743,20 @@ const struct bond_option *bond_opt_get(unsigned int option) |
359 | static int bond_option_mode_set(struct bonding *bond, |
360 | const struct bond_opt_value *newval) |
361 | { |
362 | - if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) { |
363 | - netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n", |
364 | - newval->string); |
365 | - /* disable arp monitoring */ |
366 | - bond->params.arp_interval = 0; |
367 | - /* set miimon to default value */ |
368 | - bond->params.miimon = BOND_DEFAULT_MIIMON; |
369 | - netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n", |
370 | - bond->params.miimon); |
371 | + if (!bond_mode_uses_arp(newval->value)) { |
372 | + if (bond->params.arp_interval) { |
373 | + netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n", |
374 | + newval->string); |
375 | + /* disable arp monitoring */ |
376 | + bond->params.arp_interval = 0; |
377 | + } |
378 | + |
379 | + if (!bond->params.miimon) { |
380 | + /* set miimon to default value */ |
381 | + bond->params.miimon = BOND_DEFAULT_MIIMON; |
382 | + netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n", |
383 | + bond->params.miimon); |
384 | + } |
385 | } |
386 | |
387 | if (newval->value == BOND_MODE_ALB) |
388 | diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c |
389 | index 5d4e61741476..ca3fa82316c2 100644 |
390 | --- a/drivers/net/can/m_can/m_can.c |
391 | +++ b/drivers/net/can/m_can/m_can.c |
392 | @@ -1073,7 +1073,8 @@ static void m_can_chip_config(struct net_device *dev) |
393 | |
394 | } else { |
395 | /* Version 3.1.x or 3.2.x */ |
396 | - cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE); |
397 | + cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE | |
398 | + CCCR_NISO); |
399 | |
400 | /* Only 3.2.x has NISO Bit implemented */ |
401 | if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) |
402 | diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c |
403 | index 3c51a884db87..fa689854f16b 100644 |
404 | --- a/drivers/net/can/peak_canfd/peak_pciefd_main.c |
405 | +++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c |
406 | @@ -58,6 +58,10 @@ MODULE_LICENSE("GPL v2"); |
407 | #define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */ |
408 | #define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */ |
409 | |
410 | +#define PCIEFD_FW_VERSION(x, y, z) (((u32)(x) << 24) | \ |
411 | + ((u32)(y) << 16) | \ |
412 | + ((u32)(z) << 8)) |
413 | + |
414 | /* System Control Registers Bits */ |
415 | #define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */ |
416 | #define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */ |
417 | @@ -783,6 +787,21 @@ static int peak_pciefd_probe(struct pci_dev *pdev, |
418 | "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count, |
419 | hw_ver_major, hw_ver_minor, hw_ver_sub); |
420 | |
421 | +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
422 | + /* FW < v3.3.0 DMA logic doesn't handle correctly the mix of 32-bit and |
423 | + * 64-bit logical addresses: this workaround forces usage of 32-bit |
424 | + * DMA addresses only when such a fw is detected. |
425 | + */ |
426 | + if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) < |
427 | + PCIEFD_FW_VERSION(3, 3, 0)) { |
428 | + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); |
429 | + if (err) |
430 | + dev_warn(&pdev->dev, |
431 | + "warning: can't set DMA mask %llxh (err %d)\n", |
432 | + DMA_BIT_MASK(32), err); |
433 | + } |
434 | +#endif |
435 | + |
436 | /* stop system clock */ |
437 | pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN, |
438 | PCIEFD_REG_SYS_CTL_CLR); |
439 | diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c |
440 | index 89aec07c225f..5a24039733ef 100644 |
441 | --- a/drivers/net/can/xilinx_can.c |
442 | +++ b/drivers/net/can/xilinx_can.c |
443 | @@ -2,6 +2,7 @@ |
444 | * |
445 | * Copyright (C) 2012 - 2014 Xilinx, Inc. |
446 | * Copyright (C) 2009 PetaLogix. All rights reserved. |
447 | + * Copyright (C) 2017 Sandvik Mining and Construction Oy |
448 | * |
449 | * Description: |
450 | * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. |
451 | @@ -25,8 +26,10 @@ |
452 | #include <linux/module.h> |
453 | #include <linux/netdevice.h> |
454 | #include <linux/of.h> |
455 | +#include <linux/of_device.h> |
456 | #include <linux/platform_device.h> |
457 | #include <linux/skbuff.h> |
458 | +#include <linux/spinlock.h> |
459 | #include <linux/string.h> |
460 | #include <linux/types.h> |
461 | #include <linux/can/dev.h> |
462 | @@ -101,7 +104,7 @@ enum xcan_reg { |
463 | #define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\ |
464 | XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \ |
465 | XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \ |
466 | - XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK) |
467 | + XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK) |
468 | |
469 | /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */ |
470 | #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ |
471 | @@ -118,6 +121,7 @@ enum xcan_reg { |
472 | /** |
473 | * struct xcan_priv - This definition define CAN driver instance |
474 | * @can: CAN private data structure. |
475 | + * @tx_lock: Lock for synchronizing TX interrupt handling |
476 | * @tx_head: Tx CAN packets ready to send on the queue |
477 | * @tx_tail: Tx CAN packets successfully sended on the queue |
478 | * @tx_max: Maximum number packets the driver can send |
479 | @@ -132,6 +136,7 @@ enum xcan_reg { |
480 | */ |
481 | struct xcan_priv { |
482 | struct can_priv can; |
483 | + spinlock_t tx_lock; |
484 | unsigned int tx_head; |
485 | unsigned int tx_tail; |
486 | unsigned int tx_max; |
487 | @@ -159,6 +164,11 @@ static const struct can_bittiming_const xcan_bittiming_const = { |
488 | .brp_inc = 1, |
489 | }; |
490 | |
491 | +#define XCAN_CAP_WATERMARK 0x0001 |
492 | +struct xcan_devtype_data { |
493 | + unsigned int caps; |
494 | +}; |
495 | + |
496 | /** |
497 | * xcan_write_reg_le - Write a value to the device register little endian |
498 | * @priv: Driver private data structure |
499 | @@ -238,6 +248,10 @@ static int set_reset_mode(struct net_device *ndev) |
500 | usleep_range(500, 10000); |
501 | } |
502 | |
503 | + /* reset clears FIFOs */ |
504 | + priv->tx_head = 0; |
505 | + priv->tx_tail = 0; |
506 | + |
507 | return 0; |
508 | } |
509 | |
510 | @@ -392,6 +406,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
511 | struct net_device_stats *stats = &ndev->stats; |
512 | struct can_frame *cf = (struct can_frame *)skb->data; |
513 | u32 id, dlc, data[2] = {0, 0}; |
514 | + unsigned long flags; |
515 | |
516 | if (can_dropped_invalid_skb(ndev, skb)) |
517 | return NETDEV_TX_OK; |
518 | @@ -439,6 +454,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
519 | data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); |
520 | |
521 | can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); |
522 | + |
523 | + spin_lock_irqsave(&priv->tx_lock, flags); |
524 | + |
525 | priv->tx_head++; |
526 | |
527 | /* Write the Frame to Xilinx CAN TX FIFO */ |
528 | @@ -454,10 +472,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) |
529 | stats->tx_bytes += cf->can_dlc; |
530 | } |
531 | |
532 | + /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */ |
533 | + if (priv->tx_max > 1) |
534 | + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK); |
535 | + |
536 | /* Check if the TX buffer is full */ |
537 | if ((priv->tx_head - priv->tx_tail) == priv->tx_max) |
538 | netif_stop_queue(ndev); |
539 | |
540 | + spin_unlock_irqrestore(&priv->tx_lock, flags); |
541 | + |
542 | return NETDEV_TX_OK; |
543 | } |
544 | |
545 | @@ -529,6 +553,123 @@ static int xcan_rx(struct net_device *ndev) |
546 | return 1; |
547 | } |
548 | |
549 | +/** |
550 | + * xcan_current_error_state - Get current error state from HW |
551 | + * @ndev: Pointer to net_device structure |
552 | + * |
553 | + * Checks the current CAN error state from the HW. Note that this |
554 | + * only checks for ERROR_PASSIVE and ERROR_WARNING. |
555 | + * |
556 | + * Return: |
557 | + * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE |
558 | + * otherwise. |
559 | + */ |
560 | +static enum can_state xcan_current_error_state(struct net_device *ndev) |
561 | +{ |
562 | + struct xcan_priv *priv = netdev_priv(ndev); |
563 | + u32 status = priv->read_reg(priv, XCAN_SR_OFFSET); |
564 | + |
565 | + if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) |
566 | + return CAN_STATE_ERROR_PASSIVE; |
567 | + else if (status & XCAN_SR_ERRWRN_MASK) |
568 | + return CAN_STATE_ERROR_WARNING; |
569 | + else |
570 | + return CAN_STATE_ERROR_ACTIVE; |
571 | +} |
572 | + |
573 | +/** |
574 | + * xcan_set_error_state - Set new CAN error state |
575 | + * @ndev: Pointer to net_device structure |
576 | + * @new_state: The new CAN state to be set |
577 | + * @cf: Error frame to be populated or NULL |
578 | + * |
579 | + * Set new CAN error state for the device, updating statistics and |
580 | + * populating the error frame if given. |
581 | + */ |
582 | +static void xcan_set_error_state(struct net_device *ndev, |
583 | + enum can_state new_state, |
584 | + struct can_frame *cf) |
585 | +{ |
586 | + struct xcan_priv *priv = netdev_priv(ndev); |
587 | + u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET); |
588 | + u32 txerr = ecr & XCAN_ECR_TEC_MASK; |
589 | + u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT; |
590 | + |
591 | + priv->can.state = new_state; |
592 | + |
593 | + if (cf) { |
594 | + cf->can_id |= CAN_ERR_CRTL; |
595 | + cf->data[6] = txerr; |
596 | + cf->data[7] = rxerr; |
597 | + } |
598 | + |
599 | + switch (new_state) { |
600 | + case CAN_STATE_ERROR_PASSIVE: |
601 | + priv->can.can_stats.error_passive++; |
602 | + if (cf) |
603 | + cf->data[1] = (rxerr > 127) ? |
604 | + CAN_ERR_CRTL_RX_PASSIVE : |
605 | + CAN_ERR_CRTL_TX_PASSIVE; |
606 | + break; |
607 | + case CAN_STATE_ERROR_WARNING: |
608 | + priv->can.can_stats.error_warning++; |
609 | + if (cf) |
610 | + cf->data[1] |= (txerr > rxerr) ? |
611 | + CAN_ERR_CRTL_TX_WARNING : |
612 | + CAN_ERR_CRTL_RX_WARNING; |
613 | + break; |
614 | + case CAN_STATE_ERROR_ACTIVE: |
615 | + if (cf) |
616 | + cf->data[1] |= CAN_ERR_CRTL_ACTIVE; |
617 | + break; |
618 | + default: |
619 | + /* non-ERROR states are handled elsewhere */ |
620 | + WARN_ON(1); |
621 | + break; |
622 | + } |
623 | +} |
624 | + |
625 | +/** |
626 | + * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX |
627 | + * @ndev: Pointer to net_device structure |
628 | + * |
629 | + * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if |
630 | + * the performed RX/TX has caused it to drop to a lesser state and set |
631 | + * the interface state accordingly. |
632 | + */ |
633 | +static void xcan_update_error_state_after_rxtx(struct net_device *ndev) |
634 | +{ |
635 | + struct xcan_priv *priv = netdev_priv(ndev); |
636 | + enum can_state old_state = priv->can.state; |
637 | + enum can_state new_state; |
638 | + |
639 | + /* changing error state due to successful frame RX/TX can only |
640 | + * occur from these states |
641 | + */ |
642 | + if (old_state != CAN_STATE_ERROR_WARNING && |
643 | + old_state != CAN_STATE_ERROR_PASSIVE) |
644 | + return; |
645 | + |
646 | + new_state = xcan_current_error_state(ndev); |
647 | + |
648 | + if (new_state != old_state) { |
649 | + struct sk_buff *skb; |
650 | + struct can_frame *cf; |
651 | + |
652 | + skb = alloc_can_err_skb(ndev, &cf); |
653 | + |
654 | + xcan_set_error_state(ndev, new_state, skb ? cf : NULL); |
655 | + |
656 | + if (skb) { |
657 | + struct net_device_stats *stats = &ndev->stats; |
658 | + |
659 | + stats->rx_packets++; |
660 | + stats->rx_bytes += cf->can_dlc; |
661 | + netif_rx(skb); |
662 | + } |
663 | + } |
664 | +} |
665 | + |
666 | /** |
667 | * xcan_err_interrupt - error frame Isr |
668 | * @ndev: net_device pointer |
669 | @@ -544,16 +685,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) |
670 | struct net_device_stats *stats = &ndev->stats; |
671 | struct can_frame *cf; |
672 | struct sk_buff *skb; |
673 | - u32 err_status, status, txerr = 0, rxerr = 0; |
674 | + u32 err_status; |
675 | |
676 | skb = alloc_can_err_skb(ndev, &cf); |
677 | |
678 | err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); |
679 | priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); |
680 | - txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK; |
681 | - rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) & |
682 | - XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT); |
683 | - status = priv->read_reg(priv, XCAN_SR_OFFSET); |
684 | |
685 | if (isr & XCAN_IXR_BSOFF_MASK) { |
686 | priv->can.state = CAN_STATE_BUS_OFF; |
687 | @@ -563,28 +700,10 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) |
688 | can_bus_off(ndev); |
689 | if (skb) |
690 | cf->can_id |= CAN_ERR_BUSOFF; |
691 | - } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) { |
692 | - priv->can.state = CAN_STATE_ERROR_PASSIVE; |
693 | - priv->can.can_stats.error_passive++; |
694 | - if (skb) { |
695 | - cf->can_id |= CAN_ERR_CRTL; |
696 | - cf->data[1] = (rxerr > 127) ? |
697 | - CAN_ERR_CRTL_RX_PASSIVE : |
698 | - CAN_ERR_CRTL_TX_PASSIVE; |
699 | - cf->data[6] = txerr; |
700 | - cf->data[7] = rxerr; |
701 | - } |
702 | - } else if (status & XCAN_SR_ERRWRN_MASK) { |
703 | - priv->can.state = CAN_STATE_ERROR_WARNING; |
704 | - priv->can.can_stats.error_warning++; |
705 | - if (skb) { |
706 | - cf->can_id |= CAN_ERR_CRTL; |
707 | - cf->data[1] |= (txerr > rxerr) ? |
708 | - CAN_ERR_CRTL_TX_WARNING : |
709 | - CAN_ERR_CRTL_RX_WARNING; |
710 | - cf->data[6] = txerr; |
711 | - cf->data[7] = rxerr; |
712 | - } |
713 | + } else { |
714 | + enum can_state new_state = xcan_current_error_state(ndev); |
715 | + |
716 | + xcan_set_error_state(ndev, new_state, skb ? cf : NULL); |
717 | } |
718 | |
719 | /* Check for Arbitration lost interrupt */ |
720 | @@ -600,7 +719,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) |
721 | if (isr & XCAN_IXR_RXOFLW_MASK) { |
722 | stats->rx_over_errors++; |
723 | stats->rx_errors++; |
724 | - priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); |
725 | if (skb) { |
726 | cf->can_id |= CAN_ERR_CRTL; |
727 | cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; |
728 | @@ -709,26 +827,20 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota) |
729 | |
730 | isr = priv->read_reg(priv, XCAN_ISR_OFFSET); |
731 | while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) { |
732 | - if (isr & XCAN_IXR_RXOK_MASK) { |
733 | - priv->write_reg(priv, XCAN_ICR_OFFSET, |
734 | - XCAN_IXR_RXOK_MASK); |
735 | - work_done += xcan_rx(ndev); |
736 | - } else { |
737 | - priv->write_reg(priv, XCAN_ICR_OFFSET, |
738 | - XCAN_IXR_RXNEMP_MASK); |
739 | - break; |
740 | - } |
741 | + work_done += xcan_rx(ndev); |
742 | priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK); |
743 | isr = priv->read_reg(priv, XCAN_ISR_OFFSET); |
744 | } |
745 | |
746 | - if (work_done) |
747 | + if (work_done) { |
748 | can_led_event(ndev, CAN_LED_EVENT_RX); |
749 | + xcan_update_error_state_after_rxtx(ndev); |
750 | + } |
751 | |
752 | if (work_done < quota) { |
753 | napi_complete_done(napi, work_done); |
754 | ier = priv->read_reg(priv, XCAN_IER_OFFSET); |
755 | - ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK); |
756 | + ier |= XCAN_IXR_RXNEMP_MASK; |
757 | priv->write_reg(priv, XCAN_IER_OFFSET, ier); |
758 | } |
759 | return work_done; |
760 | @@ -743,18 +855,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) |
761 | { |
762 | struct xcan_priv *priv = netdev_priv(ndev); |
763 | struct net_device_stats *stats = &ndev->stats; |
764 | + unsigned int frames_in_fifo; |
765 | + int frames_sent = 1; /* TXOK => at least 1 frame was sent */ |
766 | + unsigned long flags; |
767 | + int retries = 0; |
768 | + |
769 | + /* Synchronize with xmit as we need to know the exact number |
770 | + * of frames in the FIFO to stay in sync due to the TXFEMP |
771 | + * handling. |
772 | + * This also prevents a race between netif_wake_queue() and |
773 | + * netif_stop_queue(). |
774 | + */ |
775 | + spin_lock_irqsave(&priv->tx_lock, flags); |
776 | + |
777 | + frames_in_fifo = priv->tx_head - priv->tx_tail; |
778 | + |
779 | + if (WARN_ON_ONCE(frames_in_fifo == 0)) { |
780 | + /* clear TXOK anyway to avoid getting back here */ |
781 | + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); |
782 | + spin_unlock_irqrestore(&priv->tx_lock, flags); |
783 | + return; |
784 | + } |
785 | + |
786 | + /* Check if 2 frames were sent (TXOK only means that at least 1 |
787 | + * frame was sent). |
788 | + */ |
789 | + if (frames_in_fifo > 1) { |
790 | + WARN_ON(frames_in_fifo > priv->tx_max); |
791 | + |
792 | + /* Synchronize TXOK and isr so that after the loop: |
793 | + * (1) isr variable is up-to-date at least up to TXOK clear |
794 | + * time. This avoids us clearing a TXOK of a second frame |
795 | + * but not noticing that the FIFO is now empty and thus |
796 | + * marking only a single frame as sent. |
797 | + * (2) No TXOK is left. Having one could mean leaving a |
798 | + * stray TXOK as we might process the associated frame |
799 | + * via TXFEMP handling as we read TXFEMP *after* TXOK |
800 | + * clear to satisfy (1). |
801 | + */ |
802 | + while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) { |
803 | + priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); |
804 | + isr = priv->read_reg(priv, XCAN_ISR_OFFSET); |
805 | + } |
806 | |
807 | - while ((priv->tx_head - priv->tx_tail > 0) && |
808 | - (isr & XCAN_IXR_TXOK_MASK)) { |
809 | + if (isr & XCAN_IXR_TXFEMP_MASK) { |
810 | + /* nothing in FIFO anymore */ |
811 | + frames_sent = frames_in_fifo; |
812 | + } |
813 | + } else { |
814 | + /* single frame in fifo, just clear TXOK */ |
815 | priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); |
816 | + } |
817 | + |
818 | + while (frames_sent--) { |
819 | can_get_echo_skb(ndev, priv->tx_tail % |
820 | priv->tx_max); |
821 | priv->tx_tail++; |
822 | stats->tx_packets++; |
823 | - isr = priv->read_reg(priv, XCAN_ISR_OFFSET); |
824 | } |
825 | - can_led_event(ndev, CAN_LED_EVENT_TX); |
826 | + |
827 | netif_wake_queue(ndev); |
828 | + |
829 | + spin_unlock_irqrestore(&priv->tx_lock, flags); |
830 | + |
831 | + can_led_event(ndev, CAN_LED_EVENT_TX); |
832 | + xcan_update_error_state_after_rxtx(ndev); |
833 | } |
834 | |
835 | /** |
836 | @@ -773,6 +938,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) |
837 | struct net_device *ndev = (struct net_device *)dev_id; |
838 | struct xcan_priv *priv = netdev_priv(ndev); |
839 | u32 isr, ier; |
840 | + u32 isr_errors; |
841 | |
842 | /* Get the interrupt status from Xilinx CAN */ |
843 | isr = priv->read_reg(priv, XCAN_ISR_OFFSET); |
844 | @@ -791,18 +957,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) |
845 | xcan_tx_interrupt(ndev, isr); |
846 | |
847 | /* Check for the type of error interrupt and Processing it */ |
848 | - if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | |
849 | - XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) { |
850 | - priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK | |
851 | - XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK | |
852 | - XCAN_IXR_ARBLST_MASK)); |
853 | + isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | |
854 | + XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK); |
855 | + if (isr_errors) { |
856 | + priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors); |
857 | xcan_err_interrupt(ndev, isr); |
858 | } |
859 | |
860 | /* Check for the type of receive interrupt and Processing it */ |
861 | - if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) { |
862 | + if (isr & XCAN_IXR_RXNEMP_MASK) { |
863 | ier = priv->read_reg(priv, XCAN_IER_OFFSET); |
864 | - ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK); |
865 | + ier &= ~XCAN_IXR_RXNEMP_MASK; |
866 | priv->write_reg(priv, XCAN_IER_OFFSET, ier); |
867 | napi_schedule(&priv->napi); |
868 | } |
869 | @@ -819,13 +984,9 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) |
870 | static void xcan_chip_stop(struct net_device *ndev) |
871 | { |
872 | struct xcan_priv *priv = netdev_priv(ndev); |
873 | - u32 ier; |
874 | |
875 | /* Disable interrupts and leave the can in configuration mode */ |
876 | - ier = priv->read_reg(priv, XCAN_IER_OFFSET); |
877 | - ier &= ~XCAN_INTR_ALL; |
878 | - priv->write_reg(priv, XCAN_IER_OFFSET, ier); |
879 | - priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); |
880 | + set_reset_mode(ndev); |
881 | priv->can.state = CAN_STATE_STOPPED; |
882 | } |
883 | |
884 | @@ -958,10 +1119,15 @@ static const struct net_device_ops xcan_netdev_ops = { |
885 | */ |
886 | static int __maybe_unused xcan_suspend(struct device *dev) |
887 | { |
888 | - if (!device_may_wakeup(dev)) |
889 | - return pm_runtime_force_suspend(dev); |
890 | + struct net_device *ndev = dev_get_drvdata(dev); |
891 | |
892 | - return 0; |
893 | + if (netif_running(ndev)) { |
894 | + netif_stop_queue(ndev); |
895 | + netif_device_detach(ndev); |
896 | + xcan_chip_stop(ndev); |
897 | + } |
898 | + |
899 | + return pm_runtime_force_suspend(dev); |
900 | } |
901 | |
902 | /** |
903 | @@ -973,11 +1139,27 @@ static int __maybe_unused xcan_suspend(struct device *dev) |
904 | */ |
905 | static int __maybe_unused xcan_resume(struct device *dev) |
906 | { |
907 | - if (!device_may_wakeup(dev)) |
908 | - return pm_runtime_force_resume(dev); |
909 | + struct net_device *ndev = dev_get_drvdata(dev); |
910 | + int ret; |
911 | |
912 | - return 0; |
913 | + ret = pm_runtime_force_resume(dev); |
914 | + if (ret) { |
915 | + dev_err(dev, "pm_runtime_force_resume failed on resume\n"); |
916 | + return ret; |
917 | + } |
918 | + |
919 | + if (netif_running(ndev)) { |
920 | + ret = xcan_chip_start(ndev); |
921 | + if (ret) { |
922 | + dev_err(dev, "xcan_chip_start failed on resume\n"); |
923 | + return ret; |
924 | + } |
925 | + |
926 | + netif_device_attach(ndev); |
927 | + netif_start_queue(ndev); |
928 | + } |
929 | |
930 | + return 0; |
931 | } |
932 | |
933 | /** |
934 | @@ -992,14 +1174,6 @@ static int __maybe_unused xcan_runtime_suspend(struct device *dev) |
935 | struct net_device *ndev = dev_get_drvdata(dev); |
936 | struct xcan_priv *priv = netdev_priv(ndev); |
937 | |
938 | - if (netif_running(ndev)) { |
939 | - netif_stop_queue(ndev); |
940 | - netif_device_detach(ndev); |
941 | - } |
942 | - |
943 | - priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK); |
944 | - priv->can.state = CAN_STATE_SLEEPING; |
945 | - |
946 | clk_disable_unprepare(priv->bus_clk); |
947 | clk_disable_unprepare(priv->can_clk); |
948 | |
949 | @@ -1018,7 +1192,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev) |
950 | struct net_device *ndev = dev_get_drvdata(dev); |
951 | struct xcan_priv *priv = netdev_priv(ndev); |
952 | int ret; |
953 | - u32 isr, status; |
954 | |
955 | ret = clk_prepare_enable(priv->bus_clk); |
956 | if (ret) { |
957 | @@ -1032,27 +1205,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev) |
958 | return ret; |
959 | } |
960 | |
961 | - priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); |
962 | - isr = priv->read_reg(priv, XCAN_ISR_OFFSET); |
963 | - status = priv->read_reg(priv, XCAN_SR_OFFSET); |
964 | - |
965 | - if (netif_running(ndev)) { |
966 | - if (isr & XCAN_IXR_BSOFF_MASK) { |
967 | - priv->can.state = CAN_STATE_BUS_OFF; |
968 | - priv->write_reg(priv, XCAN_SRR_OFFSET, |
969 | - XCAN_SRR_RESET_MASK); |
970 | - } else if ((status & XCAN_SR_ESTAT_MASK) == |
971 | - XCAN_SR_ESTAT_MASK) { |
972 | - priv->can.state = CAN_STATE_ERROR_PASSIVE; |
973 | - } else if (status & XCAN_SR_ERRWRN_MASK) { |
974 | - priv->can.state = CAN_STATE_ERROR_WARNING; |
975 | - } else { |
976 | - priv->can.state = CAN_STATE_ERROR_ACTIVE; |
977 | - } |
978 | - netif_device_attach(ndev); |
979 | - netif_start_queue(ndev); |
980 | - } |
981 | - |
982 | return 0; |
983 | } |
984 | |
985 | @@ -1061,6 +1213,18 @@ static const struct dev_pm_ops xcan_dev_pm_ops = { |
986 | SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL) |
987 | }; |
988 | |
989 | +static const struct xcan_devtype_data xcan_zynq_data = { |
990 | + .caps = XCAN_CAP_WATERMARK, |
991 | +}; |
992 | + |
993 | +/* Match table for OF platform binding */ |
994 | +static const struct of_device_id xcan_of_match[] = { |
995 | + { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data }, |
996 | + { .compatible = "xlnx,axi-can-1.00.a", }, |
997 | + { /* end of list */ }, |
998 | +}; |
999 | +MODULE_DEVICE_TABLE(of, xcan_of_match); |
1000 | + |
1001 | /** |
1002 | * xcan_probe - Platform registration call |
1003 | * @pdev: Handle to the platform device structure |
1004 | @@ -1075,8 +1239,10 @@ static int xcan_probe(struct platform_device *pdev) |
1005 | struct resource *res; /* IO mem resources */ |
1006 | struct net_device *ndev; |
1007 | struct xcan_priv *priv; |
1008 | + const struct of_device_id *of_id; |
1009 | + int caps = 0; |
1010 | void __iomem *addr; |
1011 | - int ret, rx_max, tx_max; |
1012 | + int ret, rx_max, tx_max, tx_fifo_depth; |
1013 | |
1014 | /* Get the virtual base address for the device */ |
1015 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1016 | @@ -1086,7 +1252,8 @@ static int xcan_probe(struct platform_device *pdev) |
1017 | goto err; |
1018 | } |
1019 | |
1020 | - ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max); |
1021 | + ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", |
1022 | + &tx_fifo_depth); |
1023 | if (ret < 0) |
1024 | goto err; |
1025 | |
1026 | @@ -1094,6 +1261,30 @@ static int xcan_probe(struct platform_device *pdev) |
1027 | if (ret < 0) |
1028 | goto err; |
1029 | |
1030 | + of_id = of_match_device(xcan_of_match, &pdev->dev); |
1031 | + if (of_id) { |
1032 | + const struct xcan_devtype_data *devtype_data = of_id->data; |
1033 | + |
1034 | + if (devtype_data) |
1035 | + caps = devtype_data->caps; |
1036 | + } |
1037 | + |
1038 | + /* There is no way to directly figure out how many frames have been |
1039 | + * sent when the TXOK interrupt is processed. If watermark programming |
1040 | + * is supported, we can have 2 frames in the FIFO and use TXFEMP |
1041 | + * to determine if 1 or 2 frames have been sent. |
1042 | + * Theoretically we should be able to use TXFWMEMP to determine up |
1043 | + * to 3 frames, but it seems that after putting a second frame in the |
1044 | + * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less |
1045 | + * than 2 frames in FIFO) is set anyway with no TXOK (a frame was |
1046 | + * sent), which is not a sensible state - possibly TXFWMEMP is not |
1047 | + * completely synchronized with the rest of the bits? |
1048 | + */ |
1049 | + if (caps & XCAN_CAP_WATERMARK) |
1050 | + tx_max = min(tx_fifo_depth, 2); |
1051 | + else |
1052 | + tx_max = 1; |
1053 | + |
1054 | /* Create a CAN device instance */ |
1055 | ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); |
1056 | if (!ndev) |
1057 | @@ -1108,6 +1299,7 @@ static int xcan_probe(struct platform_device *pdev) |
1058 | CAN_CTRLMODE_BERR_REPORTING; |
1059 | priv->reg_base = addr; |
1060 | priv->tx_max = tx_max; |
1061 | + spin_lock_init(&priv->tx_lock); |
1062 | |
1063 | /* Get IRQ for the device */ |
1064 | ndev->irq = platform_get_irq(pdev, 0); |
1065 | @@ -1172,9 +1364,9 @@ static int xcan_probe(struct platform_device *pdev) |
1066 | |
1067 | pm_runtime_put(&pdev->dev); |
1068 | |
1069 | - netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n", |
1070 | + netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n", |
1071 | priv->reg_base, ndev->irq, priv->can.clock.freq, |
1072 | - priv->tx_max); |
1073 | + tx_fifo_depth, priv->tx_max); |
1074 | |
1075 | return 0; |
1076 | |
1077 | @@ -1208,14 +1400,6 @@ static int xcan_remove(struct platform_device *pdev) |
1078 | return 0; |
1079 | } |
1080 | |
1081 | -/* Match table for OF platform binding */ |
1082 | -static const struct of_device_id xcan_of_match[] = { |
1083 | - { .compatible = "xlnx,zynq-can-1.0", }, |
1084 | - { .compatible = "xlnx,axi-can-1.00.a", }, |
1085 | - { /* end of list */ }, |
1086 | -}; |
1087 | -MODULE_DEVICE_TABLE(of, xcan_of_match); |
1088 | - |
1089 | static struct platform_driver xcan_driver = { |
1090 | .probe = xcan_probe, |
1091 | .remove = xcan_remove, |
1092 | diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c |
1093 | index a069fcc823c3..b26da0952a4d 100644 |
1094 | --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c |
1095 | +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c |
1096 | @@ -2957,7 +2957,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave, |
1097 | u32 srqn = qp_get_srqn(qpc) & 0xffffff; |
1098 | int use_srq = (qp_get_srqn(qpc) >> 24) & 1; |
1099 | struct res_srq *srq; |
1100 | - int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff; |
1101 | + int local_qpn = vhcr->in_modifier & 0xffffff; |
1102 | |
1103 | err = adjust_qp_sched_queue(dev, slave, qpc, inbox); |
1104 | if (err) |
1105 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c |
1106 | index 12d3ced61114..e87923e046c9 100644 |
1107 | --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c |
1108 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c |
1109 | @@ -381,14 +381,14 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv) |
1110 | HLIST_HEAD(del_list); |
1111 | spin_lock_bh(&priv->fs.arfs.arfs_lock); |
1112 | mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) { |
1113 | - if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA) |
1114 | - break; |
1115 | if (!work_pending(&arfs_rule->arfs_work) && |
1116 | rps_may_expire_flow(priv->netdev, |
1117 | arfs_rule->rxq, arfs_rule->flow_id, |
1118 | arfs_rule->filter_id)) { |
1119 | hlist_del_init(&arfs_rule->hlist); |
1120 | hlist_add_head(&arfs_rule->hlist, &del_list); |
1121 | + if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA) |
1122 | + break; |
1123 | } |
1124 | } |
1125 | spin_unlock_bh(&priv->fs.arfs.arfs_lock); |
1126 | @@ -711,6 +711,9 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, |
1127 | skb->protocol != htons(ETH_P_IPV6)) |
1128 | return -EPROTONOSUPPORT; |
1129 | |
1130 | + if (skb->encapsulation) |
1131 | + return -EPROTONOSUPPORT; |
1132 | + |
1133 | arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol); |
1134 | if (!arfs_t) |
1135 | return -EPROTONOSUPPORT; |
1136 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c |
1137 | index 84dd63e74041..27040009d87a 100644 |
1138 | --- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c |
1139 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c |
1140 | @@ -545,6 +545,7 @@ void mlx5e_pps_event_handler(struct mlx5e_priv *priv, |
1141 | void mlx5e_timestamp_init(struct mlx5e_priv *priv) |
1142 | { |
1143 | struct mlx5e_tstamp *tstamp = &priv->tstamp; |
1144 | + u64 overflow_cycles; |
1145 | u64 ns; |
1146 | u64 frac = 0; |
1147 | u32 dev_freq; |
1148 | @@ -569,10 +570,17 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv) |
1149 | |
1150 | /* Calculate period in seconds to call the overflow watchdog - to make |
1151 | * sure counter is checked at least once every wrap around. |
1152 | + * The period is calculated as the minimum between max HW cycles count |
1153 | + * (The clock source mask) and max amount of cycles that can be |
1154 | + * multiplied by clock multiplier where the result doesn't exceed |
1155 | + * 64bits. |
1156 | */ |
1157 | - ns = cyclecounter_cyc2ns(&tstamp->cycles, tstamp->cycles.mask, |
1158 | + overflow_cycles = div64_u64(~0ULL >> 1, tstamp->cycles.mult); |
1159 | + overflow_cycles = min(overflow_cycles, tstamp->cycles.mask >> 1); |
1160 | + |
1161 | + ns = cyclecounter_cyc2ns(&tstamp->cycles, overflow_cycles, |
1162 | frac, &frac); |
1163 | - do_div(ns, NSEC_PER_SEC / 2 / HZ); |
1164 | + do_div(ns, NSEC_PER_SEC / HZ); |
1165 | tstamp->overflow_period = ns; |
1166 | |
1167 | INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out); |
1168 | diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c |
1169 | index dba6d17ad885..47d2ef2fb9b3 100644 |
1170 | --- a/drivers/net/phy/phy.c |
1171 | +++ b/drivers/net/phy/phy.c |
1172 | @@ -511,7 +511,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync) |
1173 | * negotiation may already be done and aneg interrupt may not be |
1174 | * generated. |
1175 | */ |
1176 | - if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) { |
1177 | + if (phydev->irq != PHY_POLL && phydev->state == PHY_AN) { |
1178 | err = phy_aneg_done(phydev); |
1179 | if (err > 0) { |
1180 | trigger = true; |
1181 | diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c |
1182 | index bbdb46916dc3..13d39a72fe0d 100644 |
1183 | --- a/drivers/net/vxlan.c |
1184 | +++ b/drivers/net/vxlan.c |
1185 | @@ -636,8 +636,61 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) |
1186 | return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr)); |
1187 | } |
1188 | |
1189 | -/* Add new entry to forwarding table -- assumes lock held */ |
1190 | +static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, |
1191 | + const u8 *mac, __u16 state, |
1192 | + __be32 src_vni, __u8 ndm_flags) |
1193 | +{ |
1194 | + struct vxlan_fdb *f; |
1195 | + |
1196 | + f = kmalloc(sizeof(*f), GFP_ATOMIC); |
1197 | + if (!f) |
1198 | + return NULL; |
1199 | + f->state = state; |
1200 | + f->flags = ndm_flags; |
1201 | + f->updated = f->used = jiffies; |
1202 | + f->vni = src_vni; |
1203 | + INIT_LIST_HEAD(&f->remotes); |
1204 | + memcpy(f->eth_addr, mac, ETH_ALEN); |
1205 | + |
1206 | + return f; |
1207 | +} |
1208 | + |
1209 | static int vxlan_fdb_create(struct vxlan_dev *vxlan, |
1210 | + const u8 *mac, union vxlan_addr *ip, |
1211 | + __u16 state, __be16 port, __be32 src_vni, |
1212 | + __be32 vni, __u32 ifindex, __u8 ndm_flags, |
1213 | + struct vxlan_fdb **fdb) |
1214 | +{ |
1215 | + struct vxlan_rdst *rd = NULL; |
1216 | + struct vxlan_fdb *f; |
1217 | + int rc; |
1218 | + |
1219 | + if (vxlan->cfg.addrmax && |
1220 | + vxlan->addrcnt >= vxlan->cfg.addrmax) |
1221 | + return -ENOSPC; |
1222 | + |
1223 | + netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); |
1224 | + f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags); |
1225 | + if (!f) |
1226 | + return -ENOMEM; |
1227 | + |
1228 | + rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); |
1229 | + if (rc < 0) { |
1230 | + kfree(f); |
1231 | + return rc; |
1232 | + } |
1233 | + |
1234 | + ++vxlan->addrcnt; |
1235 | + hlist_add_head_rcu(&f->hlist, |
1236 | + vxlan_fdb_head(vxlan, mac, src_vni)); |
1237 | + |
1238 | + *fdb = f; |
1239 | + |
1240 | + return 0; |
1241 | +} |
1242 | + |
1243 | +/* Add new entry to forwarding table -- assumes lock held */ |
1244 | +static int vxlan_fdb_update(struct vxlan_dev *vxlan, |
1245 | const u8 *mac, union vxlan_addr *ip, |
1246 | __u16 state, __u16 flags, |
1247 | __be16 port, __be32 src_vni, __be32 vni, |
1248 | @@ -687,37 +740,17 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, |
1249 | if (!(flags & NLM_F_CREATE)) |
1250 | return -ENOENT; |
1251 | |
1252 | - if (vxlan->cfg.addrmax && |
1253 | - vxlan->addrcnt >= vxlan->cfg.addrmax) |
1254 | - return -ENOSPC; |
1255 | - |
1256 | /* Disallow replace to add a multicast entry */ |
1257 | if ((flags & NLM_F_REPLACE) && |
1258 | (is_multicast_ether_addr(mac) || is_zero_ether_addr(mac))) |
1259 | return -EOPNOTSUPP; |
1260 | |
1261 | netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip); |
1262 | - f = kmalloc(sizeof(*f), GFP_ATOMIC); |
1263 | - if (!f) |
1264 | - return -ENOMEM; |
1265 | - |
1266 | - notify = 1; |
1267 | - f->state = state; |
1268 | - f->flags = ndm_flags; |
1269 | - f->updated = f->used = jiffies; |
1270 | - f->vni = src_vni; |
1271 | - INIT_LIST_HEAD(&f->remotes); |
1272 | - memcpy(f->eth_addr, mac, ETH_ALEN); |
1273 | - |
1274 | - rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd); |
1275 | - if (rc < 0) { |
1276 | - kfree(f); |
1277 | + rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni, |
1278 | + vni, ifindex, ndm_flags, &f); |
1279 | + if (rc < 0) |
1280 | return rc; |
1281 | - } |
1282 | - |
1283 | - ++vxlan->addrcnt; |
1284 | - hlist_add_head_rcu(&f->hlist, |
1285 | - vxlan_fdb_head(vxlan, mac, src_vni)); |
1286 | + notify = 1; |
1287 | } |
1288 | |
1289 | if (notify) { |
1290 | @@ -741,13 +774,15 @@ static void vxlan_fdb_free(struct rcu_head *head) |
1291 | kfree(f); |
1292 | } |
1293 | |
1294 | -static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) |
1295 | +static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, |
1296 | + bool do_notify) |
1297 | { |
1298 | netdev_dbg(vxlan->dev, |
1299 | "delete %pM\n", f->eth_addr); |
1300 | |
1301 | --vxlan->addrcnt; |
1302 | - vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); |
1303 | + if (do_notify) |
1304 | + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH); |
1305 | |
1306 | hlist_del_rcu(&f->hlist); |
1307 | call_rcu(&f->rcu, vxlan_fdb_free); |
1308 | @@ -863,7 +898,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], |
1309 | return -EAFNOSUPPORT; |
1310 | |
1311 | spin_lock_bh(&vxlan->hash_lock); |
1312 | - err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags, |
1313 | + err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags, |
1314 | port, src_vni, vni, ifindex, ndm->ndm_flags); |
1315 | spin_unlock_bh(&vxlan->hash_lock); |
1316 | |
1317 | @@ -897,7 +932,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, |
1318 | goto out; |
1319 | } |
1320 | |
1321 | - vxlan_fdb_destroy(vxlan, f); |
1322 | + vxlan_fdb_destroy(vxlan, f, true); |
1323 | |
1324 | out: |
1325 | return 0; |
1326 | @@ -1006,7 +1041,7 @@ static bool vxlan_snoop(struct net_device *dev, |
1327 | |
1328 | /* close off race between vxlan_flush and incoming packets */ |
1329 | if (netif_running(dev)) |
1330 | - vxlan_fdb_create(vxlan, src_mac, src_ip, |
1331 | + vxlan_fdb_update(vxlan, src_mac, src_ip, |
1332 | NUD_REACHABLE, |
1333 | NLM_F_EXCL|NLM_F_CREATE, |
1334 | vxlan->cfg.dst_port, |
1335 | @@ -2360,7 +2395,7 @@ static void vxlan_cleanup(unsigned long arg) |
1336 | "garbage collect %pM\n", |
1337 | f->eth_addr); |
1338 | f->state = NUD_STALE; |
1339 | - vxlan_fdb_destroy(vxlan, f); |
1340 | + vxlan_fdb_destroy(vxlan, f, true); |
1341 | } else if (time_before(timeout, next_timer)) |
1342 | next_timer = timeout; |
1343 | } |
1344 | @@ -2411,7 +2446,7 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni) |
1345 | spin_lock_bh(&vxlan->hash_lock); |
1346 | f = __vxlan_find_mac(vxlan, all_zeros_mac, vni); |
1347 | if (f) |
1348 | - vxlan_fdb_destroy(vxlan, f); |
1349 | + vxlan_fdb_destroy(vxlan, f, true); |
1350 | spin_unlock_bh(&vxlan->hash_lock); |
1351 | } |
1352 | |
1353 | @@ -2465,7 +2500,7 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) |
1354 | continue; |
1355 | /* the all_zeros_mac entry is deleted at vxlan_uninit */ |
1356 | if (!is_zero_ether_addr(f->eth_addr)) |
1357 | - vxlan_fdb_destroy(vxlan, f); |
1358 | + vxlan_fdb_destroy(vxlan, f, true); |
1359 | } |
1360 | } |
1361 | spin_unlock_bh(&vxlan->hash_lock); |
1362 | @@ -3157,6 +3192,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, |
1363 | { |
1364 | struct vxlan_net *vn = net_generic(net, vxlan_net_id); |
1365 | struct vxlan_dev *vxlan = netdev_priv(dev); |
1366 | + struct vxlan_fdb *f = NULL; |
1367 | int err; |
1368 | |
1369 | err = vxlan_dev_configure(net, dev, conf, false, extack); |
1370 | @@ -3170,24 +3206,35 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, |
1371 | err = vxlan_fdb_create(vxlan, all_zeros_mac, |
1372 | &vxlan->default_dst.remote_ip, |
1373 | NUD_REACHABLE | NUD_PERMANENT, |
1374 | - NLM_F_EXCL | NLM_F_CREATE, |
1375 | vxlan->cfg.dst_port, |
1376 | vxlan->default_dst.remote_vni, |
1377 | vxlan->default_dst.remote_vni, |
1378 | vxlan->default_dst.remote_ifindex, |
1379 | - NTF_SELF); |
1380 | + NTF_SELF, &f); |
1381 | if (err) |
1382 | return err; |
1383 | } |
1384 | |
1385 | err = register_netdevice(dev); |
1386 | + if (err) |
1387 | + goto errout; |
1388 | + |
1389 | + err = rtnl_configure_link(dev, NULL); |
1390 | if (err) { |
1391 | - vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni); |
1392 | - return err; |
1393 | + unregister_netdevice(dev); |
1394 | + goto errout; |
1395 | } |
1396 | |
1397 | + /* notify default fdb entry */ |
1398 | + if (f) |
1399 | + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH); |
1400 | + |
1401 | list_add(&vxlan->next, &vn->vxlan_list); |
1402 | return 0; |
1403 | +errout: |
1404 | + if (f) |
1405 | + vxlan_fdb_destroy(vxlan, f, false); |
1406 | + return err; |
1407 | } |
1408 | |
1409 | static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], |
1410 | @@ -3416,6 +3463,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], |
1411 | struct vxlan_rdst *dst = &vxlan->default_dst; |
1412 | struct vxlan_rdst old_dst; |
1413 | struct vxlan_config conf; |
1414 | + struct vxlan_fdb *f = NULL; |
1415 | int err; |
1416 | |
1417 | err = vxlan_nl2conf(tb, data, |
1418 | @@ -3444,16 +3492,16 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], |
1419 | err = vxlan_fdb_create(vxlan, all_zeros_mac, |
1420 | &dst->remote_ip, |
1421 | NUD_REACHABLE | NUD_PERMANENT, |
1422 | - NLM_F_CREATE | NLM_F_APPEND, |
1423 | vxlan->cfg.dst_port, |
1424 | dst->remote_vni, |
1425 | dst->remote_vni, |
1426 | dst->remote_ifindex, |
1427 | - NTF_SELF); |
1428 | + NTF_SELF, &f); |
1429 | if (err) { |
1430 | spin_unlock_bh(&vxlan->hash_lock); |
1431 | return err; |
1432 | } |
1433 | + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH); |
1434 | } |
1435 | spin_unlock_bh(&vxlan->hash_lock); |
1436 | } |
1437 | diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c |
1438 | index d99daf69e501..fe229d63deec 100644 |
1439 | --- a/drivers/staging/speakup/speakup_soft.c |
1440 | +++ b/drivers/staging/speakup/speakup_soft.c |
1441 | @@ -207,11 +207,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count, |
1442 | int chars_sent = 0; |
1443 | char __user *cp; |
1444 | char *init; |
1445 | + size_t bytes_per_ch = unicode ? 3 : 1; |
1446 | u16 ch; |
1447 | int empty; |
1448 | unsigned long flags; |
1449 | DEFINE_WAIT(wait); |
1450 | |
1451 | + if (count < bytes_per_ch) |
1452 | + return -EINVAL; |
1453 | + |
1454 | spin_lock_irqsave(&speakup_info.spinlock, flags); |
1455 | while (1) { |
1456 | prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE); |
1457 | @@ -237,7 +241,7 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count, |
1458 | init = get_initstring(); |
1459 | |
1460 | /* Keep 3 bytes available for a 16bit UTF-8-encoded character */ |
1461 | - while (chars_sent <= count - 3) { |
1462 | + while (chars_sent <= count - bytes_per_ch) { |
1463 | if (speakup_info.flushing) { |
1464 | speakup_info.flushing = 0; |
1465 | ch = '\x18'; |
1466 | diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c |
1467 | index 3b9aadd007f5..f2f31fc16f29 100644 |
1468 | --- a/drivers/usb/class/cdc-acm.c |
1469 | +++ b/drivers/usb/class/cdc-acm.c |
1470 | @@ -1844,6 +1844,9 @@ static const struct usb_device_id acm_ids[] = { |
1471 | { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */ |
1472 | .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */ |
1473 | }, |
1474 | + { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */ |
1475 | + .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ |
1476 | + }, |
1477 | |
1478 | { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */ |
1479 | .driver_info = CLEAR_HALT_CONDITIONS, |
1480 | diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c |
1481 | index e5f77e611451..a8bc48b26c23 100644 |
1482 | --- a/drivers/usb/core/hub.c |
1483 | +++ b/drivers/usb/core/hub.c |
1484 | @@ -1141,10 +1141,14 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) |
1485 | |
1486 | if (!udev || udev->state == USB_STATE_NOTATTACHED) { |
1487 | /* Tell hub_wq to disconnect the device or |
1488 | - * check for a new connection |
1489 | + * check for a new connection or over current condition. |
1490 | + * Based on USB2.0 Spec Section 11.12.5, |
1491 | + * C_PORT_OVER_CURRENT could be set while |
1492 | + * PORT_OVER_CURRENT is not. So check for any of them. |
1493 | */ |
1494 | if (udev || (portstatus & USB_PORT_STAT_CONNECTION) || |
1495 | - (portstatus & USB_PORT_STAT_OVERCURRENT)) |
1496 | + (portstatus & USB_PORT_STAT_OVERCURRENT) || |
1497 | + (portchange & USB_PORT_STAT_C_OVERCURRENT)) |
1498 | set_bit(port1, hub->change_bits); |
1499 | |
1500 | } else if (portstatus & USB_PORT_STAT_ENABLE) { |
1501 | diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c |
1502 | index 87484f71b2ab..46d3b0fc00c5 100644 |
1503 | --- a/drivers/usb/dwc2/hcd.c |
1504 | +++ b/drivers/usb/dwc2/hcd.c |
1505 | @@ -2606,34 +2606,29 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg, |
1506 | |
1507 | #define DWC2_USB_DMA_ALIGN 4 |
1508 | |
1509 | -struct dma_aligned_buffer { |
1510 | - void *kmalloc_ptr; |
1511 | - void *old_xfer_buffer; |
1512 | - u8 data[0]; |
1513 | -}; |
1514 | - |
1515 | static void dwc2_free_dma_aligned_buffer(struct urb *urb) |
1516 | { |
1517 | - struct dma_aligned_buffer *temp; |
1518 | + void *stored_xfer_buffer; |
1519 | |
1520 | if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER)) |
1521 | return; |
1522 | |
1523 | - temp = container_of(urb->transfer_buffer, |
1524 | - struct dma_aligned_buffer, data); |
1525 | + /* Restore urb->transfer_buffer from the end of the allocated area */ |
1526 | + memcpy(&stored_xfer_buffer, urb->transfer_buffer + |
1527 | + urb->transfer_buffer_length, sizeof(urb->transfer_buffer)); |
1528 | |
1529 | if (usb_urb_dir_in(urb)) |
1530 | - memcpy(temp->old_xfer_buffer, temp->data, |
1531 | + memcpy(stored_xfer_buffer, urb->transfer_buffer, |
1532 | urb->transfer_buffer_length); |
1533 | - urb->transfer_buffer = temp->old_xfer_buffer; |
1534 | - kfree(temp->kmalloc_ptr); |
1535 | + kfree(urb->transfer_buffer); |
1536 | + urb->transfer_buffer = stored_xfer_buffer; |
1537 | |
1538 | urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER; |
1539 | } |
1540 | |
1541 | static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) |
1542 | { |
1543 | - struct dma_aligned_buffer *temp, *kmalloc_ptr; |
1544 | + void *kmalloc_ptr; |
1545 | size_t kmalloc_size; |
1546 | |
1547 | if (urb->num_sgs || urb->sg || |
1548 | @@ -2641,22 +2636,29 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags) |
1549 | !((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1))) |
1550 | return 0; |
1551 | |
1552 | - /* Allocate a buffer with enough padding for alignment */ |
1553 | + /* |
1554 | + * Allocate a buffer with enough padding for original transfer_buffer |
1555 | + * pointer. This allocation is guaranteed to be aligned properly for |
1556 | + * DMA |
1557 | + */ |
1558 | kmalloc_size = urb->transfer_buffer_length + |
1559 | - sizeof(struct dma_aligned_buffer) + DWC2_USB_DMA_ALIGN - 1; |
1560 | + sizeof(urb->transfer_buffer); |
1561 | |
1562 | kmalloc_ptr = kmalloc(kmalloc_size, mem_flags); |
1563 | if (!kmalloc_ptr) |
1564 | return -ENOMEM; |
1565 | |
1566 | - /* Position our struct dma_aligned_buffer such that data is aligned */ |
1567 | - temp = PTR_ALIGN(kmalloc_ptr + 1, DWC2_USB_DMA_ALIGN) - 1; |
1568 | - temp->kmalloc_ptr = kmalloc_ptr; |
1569 | - temp->old_xfer_buffer = urb->transfer_buffer; |
1570 | + /* |
1571 | + * Position value of original urb->transfer_buffer pointer to the end |
1572 | + * of allocation for later referencing |
1573 | + */ |
1574 | + memcpy(kmalloc_ptr + urb->transfer_buffer_length, |
1575 | + &urb->transfer_buffer, sizeof(urb->transfer_buffer)); |
1576 | + |
1577 | if (usb_urb_dir_out(urb)) |
1578 | - memcpy(temp->data, urb->transfer_buffer, |
1579 | + memcpy(kmalloc_ptr, urb->transfer_buffer, |
1580 | urb->transfer_buffer_length); |
1581 | - urb->transfer_buffer = temp->data; |
1582 | + urb->transfer_buffer = kmalloc_ptr; |
1583 | |
1584 | urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER; |
1585 | |
1586 | diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c |
1587 | index 7b53ac548b1a..52e6897fa35a 100644 |
1588 | --- a/drivers/usb/gadget/function/f_fs.c |
1589 | +++ b/drivers/usb/gadget/function/f_fs.c |
1590 | @@ -3243,7 +3243,7 @@ static int ffs_func_setup(struct usb_function *f, |
1591 | __ffs_event_add(ffs, FUNCTIONFS_SETUP); |
1592 | spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); |
1593 | |
1594 | - return USB_GADGET_DELAYED_STATUS; |
1595 | + return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0; |
1596 | } |
1597 | |
1598 | static bool ffs_func_req_match(struct usb_function *f, |
1599 | diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c |
1600 | index b751dd60e41a..b4c68f3b82be 100644 |
1601 | --- a/drivers/vfio/vfio_iommu_spapr_tce.c |
1602 | +++ b/drivers/vfio/vfio_iommu_spapr_tce.c |
1603 | @@ -467,7 +467,7 @@ static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container, |
1604 | if (!mem) |
1605 | return -EINVAL; |
1606 | |
1607 | - ret = mm_iommu_ua_to_hpa(mem, tce, phpa); |
1608 | + ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa); |
1609 | if (ret) |
1610 | return -EINVAL; |
1611 | |
1612 | diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c |
1613 | index 0480cd9a9e81..71b81980787f 100644 |
1614 | --- a/fs/cifs/smb2pdu.c |
1615 | +++ b/fs/cifs/smb2pdu.c |
1616 | @@ -338,10 +338,7 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon, |
1617 | return rc; |
1618 | |
1619 | /* BB eventually switch this to SMB2 specific small buf size */ |
1620 | - if (smb2_command == SMB2_SET_INFO) |
1621 | - *request_buf = cifs_buf_get(); |
1622 | - else |
1623 | - *request_buf = cifs_small_buf_get(); |
1624 | + *request_buf = cifs_small_buf_get(); |
1625 | if (*request_buf == NULL) { |
1626 | /* BB should we add a retry in here if not a writepage? */ |
1627 | return -ENOMEM; |
1628 | @@ -3171,7 +3168,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon, |
1629 | } |
1630 | |
1631 | rc = SendReceive2(xid, ses, iov, num, &resp_buftype, flags, &rsp_iov); |
1632 | - cifs_buf_release(req); |
1633 | + cifs_small_buf_release(req); |
1634 | rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base; |
1635 | |
1636 | if (rc != 0) |
1637 | diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h |
1638 | index 9cf971c68401..6dd77767fd5b 100644 |
1639 | --- a/include/linux/skbuff.h |
1640 | +++ b/include/linux/skbuff.h |
1641 | @@ -3167,6 +3167,8 @@ static inline int __skb_grow_rcsum(struct sk_buff *skb, unsigned int len) |
1642 | return __skb_grow(skb, len); |
1643 | } |
1644 | |
1645 | +#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode) |
1646 | + |
1647 | #define skb_queue_walk(queue, skb) \ |
1648 | for (skb = (queue)->next; \ |
1649 | skb != (struct sk_buff *)(queue); \ |
1650 | diff --git a/include/net/tcp.h b/include/net/tcp.h |
1651 | index fb653736f335..3173dd12b8cc 100644 |
1652 | --- a/include/net/tcp.h |
1653 | +++ b/include/net/tcp.h |
1654 | @@ -372,6 +372,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos, |
1655 | struct pipe_inode_info *pipe, size_t len, |
1656 | unsigned int flags); |
1657 | |
1658 | +void tcp_enter_quickack_mode(struct sock *sk); |
1659 | static inline void tcp_dec_quickack_mode(struct sock *sk, |
1660 | const unsigned int pkts) |
1661 | { |
1662 | @@ -560,6 +561,7 @@ void tcp_send_fin(struct sock *sk); |
1663 | void tcp_send_active_reset(struct sock *sk, gfp_t priority); |
1664 | int tcp_send_synack(struct sock *); |
1665 | void tcp_push_one(struct sock *, unsigned int mss_now); |
1666 | +void __tcp_send_ack(struct sock *sk, u32 rcv_nxt); |
1667 | void tcp_send_ack(struct sock *sk); |
1668 | void tcp_send_delayed_ack(struct sock *sk); |
1669 | void tcp_send_loss_probe(struct sock *sk); |
1670 | @@ -857,6 +859,11 @@ struct tcp_skb_cb { |
1671 | * as TCP moves IP6CB into a different location in skb->cb[] |
1672 | */ |
1673 | static inline int tcp_v6_iif(const struct sk_buff *skb) |
1674 | +{ |
1675 | + return TCP_SKB_CB(skb)->header.h6.iif; |
1676 | +} |
1677 | + |
1678 | +static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb) |
1679 | { |
1680 | bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags); |
1681 | |
1682 | diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c |
1683 | index 4cfdad08aca0..efe396cc77b5 100644 |
1684 | --- a/net/core/rtnetlink.c |
1685 | +++ b/net/core/rtnetlink.c |
1686 | @@ -2402,9 +2402,12 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm) |
1687 | return err; |
1688 | } |
1689 | |
1690 | - dev->rtnl_link_state = RTNL_LINK_INITIALIZED; |
1691 | - |
1692 | - __dev_notify_flags(dev, old_flags, ~0U); |
1693 | + if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) { |
1694 | + __dev_notify_flags(dev, old_flags, 0U); |
1695 | + } else { |
1696 | + dev->rtnl_link_state = RTNL_LINK_INITIALIZED; |
1697 | + __dev_notify_flags(dev, old_flags, ~0U); |
1698 | + } |
1699 | return 0; |
1700 | } |
1701 | EXPORT_SYMBOL(rtnl_configure_link); |
1702 | diff --git a/net/core/skbuff.c b/net/core/skbuff.c |
1703 | index 23041b5c0b27..2e5eeba97de9 100644 |
1704 | --- a/net/core/skbuff.c |
1705 | +++ b/net/core/skbuff.c |
1706 | @@ -3675,6 +3675,7 @@ normal: |
1707 | net_warn_ratelimited( |
1708 | "skb_segment: too many frags: %u %u\n", |
1709 | pos, mss); |
1710 | + err = -EINVAL; |
1711 | goto err; |
1712 | } |
1713 | |
1714 | @@ -3713,11 +3714,10 @@ skip_fraglist: |
1715 | |
1716 | perform_csum_check: |
1717 | if (!csum) { |
1718 | - if (skb_has_shared_frag(nskb)) { |
1719 | - err = __skb_linearize(nskb); |
1720 | - if (err) |
1721 | - goto err; |
1722 | - } |
1723 | + if (skb_has_shared_frag(nskb) && |
1724 | + __skb_linearize(nskb)) |
1725 | + goto err; |
1726 | + |
1727 | if (!nskb->remcsum_offload) |
1728 | nskb->ip_summed = CHECKSUM_NONE; |
1729 | SKB_GSO_CB(nskb)->csum = |
1730 | diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c |
1731 | index fbeb35ad804b..502aae3e3ab8 100644 |
1732 | --- a/net/ipv4/igmp.c |
1733 | +++ b/net/ipv4/igmp.c |
1734 | @@ -1201,8 +1201,7 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im) |
1735 | if (pmc) { |
1736 | im->interface = pmc->interface; |
1737 | im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; |
1738 | - im->sfmode = pmc->sfmode; |
1739 | - if (pmc->sfmode == MCAST_INCLUDE) { |
1740 | + if (im->sfmode == MCAST_INCLUDE) { |
1741 | im->tomb = pmc->tomb; |
1742 | im->sources = pmc->sources; |
1743 | for (psf = im->sources; psf; psf = psf->sf_next) |
1744 | diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c |
1745 | index 63d5d66e040a..e2dd325bed9b 100644 |
1746 | --- a/net/ipv4/ip_output.c |
1747 | +++ b/net/ipv4/ip_output.c |
1748 | @@ -523,6 +523,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) |
1749 | to->dev = from->dev; |
1750 | to->mark = from->mark; |
1751 | |
1752 | + skb_copy_hash(to, from); |
1753 | + |
1754 | /* Copy the flags to each fragment. */ |
1755 | IPCB(to)->flags = IPCB(from)->flags; |
1756 | |
1757 | diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c |
1758 | index d07ba4d5917b..048d5f6dd320 100644 |
1759 | --- a/net/ipv4/ip_sockglue.c |
1760 | +++ b/net/ipv4/ip_sockglue.c |
1761 | @@ -148,15 +148,18 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb) |
1762 | { |
1763 | struct sockaddr_in sin; |
1764 | const struct iphdr *iph = ip_hdr(skb); |
1765 | - __be16 *ports = (__be16 *)skb_transport_header(skb); |
1766 | + __be16 *ports; |
1767 | + int end; |
1768 | |
1769 | - if (skb_transport_offset(skb) + 4 > (int)skb->len) |
1770 | + end = skb_transport_offset(skb) + 4; |
1771 | + if (end > 0 && !pskb_may_pull(skb, end)) |
1772 | return; |
1773 | |
1774 | /* All current transport protocols have the port numbers in the |
1775 | * first four bytes of the transport header and this function is |
1776 | * written with this assumption in mind. |
1777 | */ |
1778 | + ports = (__be16 *)skb_transport_header(skb); |
1779 | |
1780 | sin.sin_family = AF_INET; |
1781 | sin.sin_addr.s_addr = iph->daddr; |
1782 | diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c |
1783 | index 5f5e5936760e..c78fb53988a1 100644 |
1784 | --- a/net/ipv4/tcp_dctcp.c |
1785 | +++ b/net/ipv4/tcp_dctcp.c |
1786 | @@ -131,23 +131,14 @@ static void dctcp_ce_state_0_to_1(struct sock *sk) |
1787 | struct dctcp *ca = inet_csk_ca(sk); |
1788 | struct tcp_sock *tp = tcp_sk(sk); |
1789 | |
1790 | - /* State has changed from CE=0 to CE=1 and delayed |
1791 | - * ACK has not sent yet. |
1792 | - */ |
1793 | - if (!ca->ce_state && ca->delayed_ack_reserved) { |
1794 | - u32 tmp_rcv_nxt; |
1795 | - |
1796 | - /* Save current rcv_nxt. */ |
1797 | - tmp_rcv_nxt = tp->rcv_nxt; |
1798 | - |
1799 | - /* Generate previous ack with CE=0. */ |
1800 | - tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; |
1801 | - tp->rcv_nxt = ca->prior_rcv_nxt; |
1802 | - |
1803 | - tcp_send_ack(sk); |
1804 | - |
1805 | - /* Recover current rcv_nxt. */ |
1806 | - tp->rcv_nxt = tmp_rcv_nxt; |
1807 | + if (!ca->ce_state) { |
1808 | + /* State has changed from CE=0 to CE=1, force an immediate |
1809 | + * ACK to reflect the new CE state. If an ACK was delayed, |
1810 | + * send that first to reflect the prior CE state. |
1811 | + */ |
1812 | + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) |
1813 | + __tcp_send_ack(sk, ca->prior_rcv_nxt); |
1814 | + tcp_enter_quickack_mode(sk); |
1815 | } |
1816 | |
1817 | ca->prior_rcv_nxt = tp->rcv_nxt; |
1818 | @@ -161,23 +152,14 @@ static void dctcp_ce_state_1_to_0(struct sock *sk) |
1819 | struct dctcp *ca = inet_csk_ca(sk); |
1820 | struct tcp_sock *tp = tcp_sk(sk); |
1821 | |
1822 | - /* State has changed from CE=1 to CE=0 and delayed |
1823 | - * ACK has not sent yet. |
1824 | - */ |
1825 | - if (ca->ce_state && ca->delayed_ack_reserved) { |
1826 | - u32 tmp_rcv_nxt; |
1827 | - |
1828 | - /* Save current rcv_nxt. */ |
1829 | - tmp_rcv_nxt = tp->rcv_nxt; |
1830 | - |
1831 | - /* Generate previous ack with CE=1. */ |
1832 | - tp->ecn_flags |= TCP_ECN_DEMAND_CWR; |
1833 | - tp->rcv_nxt = ca->prior_rcv_nxt; |
1834 | - |
1835 | - tcp_send_ack(sk); |
1836 | - |
1837 | - /* Recover current rcv_nxt. */ |
1838 | - tp->rcv_nxt = tmp_rcv_nxt; |
1839 | + if (ca->ce_state) { |
1840 | + /* State has changed from CE=1 to CE=0, force an immediate |
1841 | + * ACK to reflect the new CE state. If an ACK was delayed, |
1842 | + * send that first to reflect the prior CE state. |
1843 | + */ |
1844 | + if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) |
1845 | + __tcp_send_ack(sk, ca->prior_rcv_nxt); |
1846 | + tcp_enter_quickack_mode(sk); |
1847 | } |
1848 | |
1849 | ca->prior_rcv_nxt = tp->rcv_nxt; |
1850 | diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
1851 | index 5711b1b12d28..b86e7b8beb1d 100644 |
1852 | --- a/net/ipv4/tcp_input.c |
1853 | +++ b/net/ipv4/tcp_input.c |
1854 | @@ -209,13 +209,14 @@ static void tcp_incr_quickack(struct sock *sk) |
1855 | icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); |
1856 | } |
1857 | |
1858 | -static void tcp_enter_quickack_mode(struct sock *sk) |
1859 | +void tcp_enter_quickack_mode(struct sock *sk) |
1860 | { |
1861 | struct inet_connection_sock *icsk = inet_csk(sk); |
1862 | tcp_incr_quickack(sk); |
1863 | icsk->icsk_ack.pingpong = 0; |
1864 | icsk->icsk_ack.ato = TCP_ATO_MIN; |
1865 | } |
1866 | +EXPORT_SYMBOL(tcp_enter_quickack_mode); |
1867 | |
1868 | /* Send ACKs quickly, if "quick" count is not exhausted |
1869 | * and the session is not interactive. |
1870 | @@ -4331,6 +4332,23 @@ static bool tcp_try_coalesce(struct sock *sk, |
1871 | return true; |
1872 | } |
1873 | |
1874 | +static bool tcp_ooo_try_coalesce(struct sock *sk, |
1875 | + struct sk_buff *to, |
1876 | + struct sk_buff *from, |
1877 | + bool *fragstolen) |
1878 | +{ |
1879 | + bool res = tcp_try_coalesce(sk, OOO_QUEUE, to, from, fragstolen); |
1880 | + |
1881 | + /* In case tcp_drop() is called later, update to->gso_segs */ |
1882 | + if (res) { |
1883 | + u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) + |
1884 | + max_t(u16, 1, skb_shinfo(from)->gso_segs); |
1885 | + |
1886 | + skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF); |
1887 | + } |
1888 | + return res; |
1889 | +} |
1890 | + |
1891 | static void tcp_drop(struct sock *sk, struct sk_buff *skb) |
1892 | { |
1893 | sk_drops_add(sk, skb); |
1894 | @@ -4462,8 +4480,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) |
1895 | /* In the typical case, we are adding an skb to the end of the list. |
1896 | * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. |
1897 | */ |
1898 | - if (tcp_try_coalesce(sk, OOO_QUEUE, tp->ooo_last_skb, |
1899 | - skb, &fragstolen)) { |
1900 | + if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, |
1901 | + skb, &fragstolen)) { |
1902 | coalesce_done: |
1903 | tcp_grow_window(sk, skb); |
1904 | kfree_skb_partial(skb, fragstolen); |
1905 | @@ -4491,7 +4509,7 @@ coalesce_done: |
1906 | /* All the bits are present. Drop. */ |
1907 | NET_INC_STATS(sock_net(sk), |
1908 | LINUX_MIB_TCPOFOMERGE); |
1909 | - __kfree_skb(skb); |
1910 | + tcp_drop(sk, skb); |
1911 | skb = NULL; |
1912 | tcp_dsack_set(sk, seq, end_seq); |
1913 | goto add_sack; |
1914 | @@ -4510,11 +4528,11 @@ coalesce_done: |
1915 | TCP_SKB_CB(skb1)->end_seq); |
1916 | NET_INC_STATS(sock_net(sk), |
1917 | LINUX_MIB_TCPOFOMERGE); |
1918 | - __kfree_skb(skb1); |
1919 | + tcp_drop(sk, skb1); |
1920 | goto merge_right; |
1921 | } |
1922 | - } else if (tcp_try_coalesce(sk, OOO_QUEUE, skb1, |
1923 | - skb, &fragstolen)) { |
1924 | + } else if (tcp_ooo_try_coalesce(sk, skb1, |
1925 | + skb, &fragstolen)) { |
1926 | goto coalesce_done; |
1927 | } |
1928 | p = &parent->rb_right; |
1929 | @@ -4876,6 +4894,7 @@ end: |
1930 | static void tcp_collapse_ofo_queue(struct sock *sk) |
1931 | { |
1932 | struct tcp_sock *tp = tcp_sk(sk); |
1933 | + u32 range_truesize, sum_tiny = 0; |
1934 | struct sk_buff *skb, *head; |
1935 | struct rb_node *p; |
1936 | u32 start, end; |
1937 | @@ -4894,6 +4913,7 @@ new_range: |
1938 | } |
1939 | start = TCP_SKB_CB(skb)->seq; |
1940 | end = TCP_SKB_CB(skb)->end_seq; |
1941 | + range_truesize = skb->truesize; |
1942 | |
1943 | for (head = skb;;) { |
1944 | skb = tcp_skb_next(skb, NULL); |
1945 | @@ -4904,11 +4924,20 @@ new_range: |
1946 | if (!skb || |
1947 | after(TCP_SKB_CB(skb)->seq, end) || |
1948 | before(TCP_SKB_CB(skb)->end_seq, start)) { |
1949 | - tcp_collapse(sk, NULL, &tp->out_of_order_queue, |
1950 | - head, skb, start, end); |
1951 | + /* Do not attempt collapsing tiny skbs */ |
1952 | + if (range_truesize != head->truesize || |
1953 | + end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) { |
1954 | + tcp_collapse(sk, NULL, &tp->out_of_order_queue, |
1955 | + head, skb, start, end); |
1956 | + } else { |
1957 | + sum_tiny += range_truesize; |
1958 | + if (sum_tiny > sk->sk_rcvbuf >> 3) |
1959 | + return; |
1960 | + } |
1961 | goto new_range; |
1962 | } |
1963 | |
1964 | + range_truesize += skb->truesize; |
1965 | if (unlikely(before(TCP_SKB_CB(skb)->seq, start))) |
1966 | start = TCP_SKB_CB(skb)->seq; |
1967 | if (after(TCP_SKB_CB(skb)->end_seq, end)) |
1968 | @@ -4923,6 +4952,7 @@ new_range: |
1969 | * 2) not add too big latencies if thousands of packets sit there. |
1970 | * (But if application shrinks SO_RCVBUF, we could still end up |
1971 | * freeing whole queue here) |
1972 | + * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks. |
1973 | * |
1974 | * Return true if queue has shrunk. |
1975 | */ |
1976 | @@ -4930,20 +4960,26 @@ static bool tcp_prune_ofo_queue(struct sock *sk) |
1977 | { |
1978 | struct tcp_sock *tp = tcp_sk(sk); |
1979 | struct rb_node *node, *prev; |
1980 | + int goal; |
1981 | |
1982 | if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) |
1983 | return false; |
1984 | |
1985 | NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED); |
1986 | + goal = sk->sk_rcvbuf >> 3; |
1987 | node = &tp->ooo_last_skb->rbnode; |
1988 | do { |
1989 | prev = rb_prev(node); |
1990 | rb_erase(node, &tp->out_of_order_queue); |
1991 | + goal -= rb_to_skb(node)->truesize; |
1992 | tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode)); |
1993 | - sk_mem_reclaim(sk); |
1994 | - if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && |
1995 | - !tcp_under_memory_pressure(sk)) |
1996 | - break; |
1997 | + if (!prev || goal <= 0) { |
1998 | + sk_mem_reclaim(sk); |
1999 | + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && |
2000 | + !tcp_under_memory_pressure(sk)) |
2001 | + break; |
2002 | + goal = sk->sk_rcvbuf >> 3; |
2003 | + } |
2004 | node = prev; |
2005 | } while (node); |
2006 | tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode); |
2007 | @@ -4978,6 +5014,9 @@ static int tcp_prune_queue(struct sock *sk) |
2008 | else if (tcp_under_memory_pressure(sk)) |
2009 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); |
2010 | |
2011 | + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) |
2012 | + return 0; |
2013 | + |
2014 | tcp_collapse_ofo_queue(sk); |
2015 | if (!skb_queue_empty(&sk->sk_receive_queue)) |
2016 | tcp_collapse(sk, &sk->sk_receive_queue, NULL, |
2017 | diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c |
2018 | index abae5196cd3a..3d8f6f342cb1 100644 |
2019 | --- a/net/ipv4/tcp_output.c |
2020 | +++ b/net/ipv4/tcp_output.c |
2021 | @@ -175,8 +175,13 @@ static void tcp_event_data_sent(struct tcp_sock *tp, |
2022 | } |
2023 | |
2024 | /* Account for an ACK we sent. */ |
2025 | -static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) |
2026 | +static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, |
2027 | + u32 rcv_nxt) |
2028 | { |
2029 | + struct tcp_sock *tp = tcp_sk(sk); |
2030 | + |
2031 | + if (unlikely(rcv_nxt != tp->rcv_nxt)) |
2032 | + return; /* Special ACK sent by DCTCP to reflect ECN */ |
2033 | tcp_dec_quickack_mode(sk, pkts); |
2034 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); |
2035 | } |
2036 | @@ -984,8 +989,8 @@ static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb) |
2037 | * We are working here with either a clone of the original |
2038 | * SKB, or a fresh unique copy made by the retransmit engine. |
2039 | */ |
2040 | -static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, |
2041 | - gfp_t gfp_mask) |
2042 | +static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, |
2043 | + int clone_it, gfp_t gfp_mask, u32 rcv_nxt) |
2044 | { |
2045 | const struct inet_connection_sock *icsk = inet_csk(sk); |
2046 | struct inet_sock *inet; |
2047 | @@ -1057,7 +1062,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, |
2048 | th->source = inet->inet_sport; |
2049 | th->dest = inet->inet_dport; |
2050 | th->seq = htonl(tcb->seq); |
2051 | - th->ack_seq = htonl(tp->rcv_nxt); |
2052 | + th->ack_seq = htonl(rcv_nxt); |
2053 | *(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | |
2054 | tcb->tcp_flags); |
2055 | |
2056 | @@ -1098,7 +1103,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, |
2057 | icsk->icsk_af_ops->send_check(sk, skb); |
2058 | |
2059 | if (likely(tcb->tcp_flags & TCPHDR_ACK)) |
2060 | - tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); |
2061 | + tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt); |
2062 | |
2063 | if (skb->len != tcp_header_size) { |
2064 | tcp_event_data_sent(tp, sk); |
2065 | @@ -1135,6 +1140,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, |
2066 | return err; |
2067 | } |
2068 | |
2069 | +static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, |
2070 | + gfp_t gfp_mask) |
2071 | +{ |
2072 | + return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask, |
2073 | + tcp_sk(sk)->rcv_nxt); |
2074 | +} |
2075 | + |
2076 | /* This routine just queues the buffer for sending. |
2077 | * |
2078 | * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, |
2079 | @@ -3551,7 +3563,7 @@ void tcp_send_delayed_ack(struct sock *sk) |
2080 | } |
2081 | |
2082 | /* This routine sends an ack and also updates the window. */ |
2083 | -void tcp_send_ack(struct sock *sk) |
2084 | +void __tcp_send_ack(struct sock *sk, u32 rcv_nxt) |
2085 | { |
2086 | struct sk_buff *buff; |
2087 | |
2088 | @@ -3586,9 +3598,14 @@ void tcp_send_ack(struct sock *sk) |
2089 | skb_set_tcp_pure_ack(buff); |
2090 | |
2091 | /* Send it off, this clears delayed acks for us. */ |
2092 | - tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0); |
2093 | + __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt); |
2094 | +} |
2095 | +EXPORT_SYMBOL_GPL(__tcp_send_ack); |
2096 | + |
2097 | +void tcp_send_ack(struct sock *sk) |
2098 | +{ |
2099 | + __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt); |
2100 | } |
2101 | -EXPORT_SYMBOL_GPL(tcp_send_ack); |
2102 | |
2103 | /* This routine sends a packet with an out of date sequence |
2104 | * number. It assumes the other end will try to ack it. |
2105 | diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c |
2106 | index 453dc3726199..461825e0680f 100644 |
2107 | --- a/net/ipv6/datagram.c |
2108 | +++ b/net/ipv6/datagram.c |
2109 | @@ -708,13 +708,16 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg, |
2110 | } |
2111 | if (np->rxopt.bits.rxorigdstaddr) { |
2112 | struct sockaddr_in6 sin6; |
2113 | - __be16 *ports = (__be16 *) skb_transport_header(skb); |
2114 | + __be16 *ports; |
2115 | + int end; |
2116 | |
2117 | - if (skb_transport_offset(skb) + 4 <= (int)skb->len) { |
2118 | + end = skb_transport_offset(skb) + 4; |
2119 | + if (end <= 0 || pskb_may_pull(skb, end)) { |
2120 | /* All current transport protocols have the port numbers in the |
2121 | * first four bytes of the transport header and this function is |
2122 | * written with this assumption in mind. |
2123 | */ |
2124 | + ports = (__be16 *)skb_transport_header(skb); |
2125 | |
2126 | sin6.sin6_family = AF_INET6; |
2127 | sin6.sin6_addr = ipv6_hdr(skb)->daddr; |
2128 | diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c |
2129 | index 5acb54405b10..c5f2b17b7ee1 100644 |
2130 | --- a/net/ipv6/icmp.c |
2131 | +++ b/net/ipv6/icmp.c |
2132 | @@ -405,9 +405,10 @@ static int icmp6_iif(const struct sk_buff *skb) |
2133 | |
2134 | /* for local traffic to local address, skb dev is the loopback |
2135 | * device. Check if there is a dst attached to the skb and if so |
2136 | - * get the real device index. |
2137 | + * get the real device index. Same is needed for replies to a link |
2138 | + * local address on a device enslaved to an L3 master device |
2139 | */ |
2140 | - if (unlikely(iif == LOOPBACK_IFINDEX)) { |
2141 | + if (unlikely(iif == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) { |
2142 | const struct rt6_info *rt6 = skb_rt6_info(skb); |
2143 | |
2144 | if (rt6) |
2145 | diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c |
2146 | index 32fcce711855..1da021527fcd 100644 |
2147 | --- a/net/ipv6/ip6_output.c |
2148 | +++ b/net/ipv6/ip6_output.c |
2149 | @@ -595,6 +595,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from) |
2150 | to->dev = from->dev; |
2151 | to->mark = from->mark; |
2152 | |
2153 | + skb_copy_hash(to, from); |
2154 | + |
2155 | #ifdef CONFIG_NET_SCHED |
2156 | to->tc_index = from->tc_index; |
2157 | #endif |
2158 | diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c |
2159 | index 9a38a2c641fa..6fd913d63835 100644 |
2160 | --- a/net/ipv6/mcast.c |
2161 | +++ b/net/ipv6/mcast.c |
2162 | @@ -771,8 +771,7 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im) |
2163 | if (pmc) { |
2164 | im->idev = pmc->idev; |
2165 | im->mca_crcount = idev->mc_qrv; |
2166 | - im->mca_sfmode = pmc->mca_sfmode; |
2167 | - if (pmc->mca_sfmode == MCAST_INCLUDE) { |
2168 | + if (im->mca_sfmode == MCAST_INCLUDE) { |
2169 | im->mca_tomb = pmc->mca_tomb; |
2170 | im->mca_sources = pmc->mca_sources; |
2171 | for (psf = im->mca_sources; psf; psf = psf->sf_next) |
2172 | diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c |
2173 | index 35e8aef9ceed..ba8586aadffa 100644 |
2174 | --- a/net/ipv6/tcp_ipv6.c |
2175 | +++ b/net/ipv6/tcp_ipv6.c |
2176 | @@ -918,7 +918,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb) |
2177 | &tcp_hashinfo, NULL, 0, |
2178 | &ipv6h->saddr, |
2179 | th->source, &ipv6h->daddr, |
2180 | - ntohs(th->source), tcp_v6_iif(skb), |
2181 | + ntohs(th->source), |
2182 | + tcp_v6_iif_l3_slave(skb), |
2183 | tcp_v6_sdif(skb)); |
2184 | if (!sk1) |
2185 | goto out; |
2186 | @@ -1573,7 +1574,8 @@ do_time_wait: |
2187 | skb, __tcp_hdrlen(th), |
2188 | &ipv6_hdr(skb)->saddr, th->source, |
2189 | &ipv6_hdr(skb)->daddr, |
2190 | - ntohs(th->dest), tcp_v6_iif(skb), |
2191 | + ntohs(th->dest), |
2192 | + tcp_v6_iif_l3_slave(skb), |
2193 | sdif); |
2194 | if (sk2) { |
2195 | struct inet_timewait_sock *tw = inet_twsk(sk); |
2196 | diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c |
2197 | index 8ee4e667a414..fb79caf56d0e 100644 |
2198 | --- a/net/tls/tls_sw.c |
2199 | +++ b/net/tls/tls_sw.c |
2200 | @@ -135,9 +135,10 @@ static int alloc_sg(struct sock *sk, int len, struct scatterlist *sg, |
2201 | pfrag->offset += use; |
2202 | |
2203 | sge = sg + num_elem - 1; |
2204 | - if (num_elem > first_coalesce && sg_page(sg) == pfrag->page && |
2205 | - sg->offset + sg->length == orig_offset) { |
2206 | - sg->length += use; |
2207 | + |
2208 | + if (num_elem > first_coalesce && sg_page(sge) == pfrag->page && |
2209 | + sge->offset + sge->length == orig_offset) { |
2210 | + sge->length += use; |
2211 | } else { |
2212 | sge++; |
2213 | sg_unmark_end(sge); |