Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0125-5.4.26-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3506 - (show annotations) (download)
Mon May 11 14:36:28 2020 UTC (4 years ago) by niro
File size: 140314 byte(s)
-linux-5.4.26
1 diff --git a/Documentation/filesystems/porting.rst b/Documentation/filesystems/porting.rst
2 index f18506083ced..26c093969573 100644
3 --- a/Documentation/filesystems/porting.rst
4 +++ b/Documentation/filesystems/porting.rst
5 @@ -850,3 +850,11 @@ business doing so.
6 d_alloc_pseudo() is internal-only; uses outside of alloc_file_pseudo() are
7 very suspect (and won't work in modules). Such uses are very likely to
8 be misspelled d_alloc_anon().
9 +
10 +---
11 +
12 +**mandatory**
13 +
14 +[should've been added in 2016] stale comment in finish_open() nonwithstanding,
15 +failure exits in ->atomic_open() instances should *NOT* fput() the file,
16 +no matter what. Everything is handled by the caller.
17 diff --git a/Makefile b/Makefile
18 index 85e41313f078..2250b1bb8aa9 100644
19 --- a/Makefile
20 +++ b/Makefile
21 @@ -1,7 +1,7 @@
22 # SPDX-License-Identifier: GPL-2.0
23 VERSION = 5
24 PATCHLEVEL = 4
25 -SUBLEVEL = 25
26 +SUBLEVEL = 26
27 EXTRAVERSION =
28 NAME = Kleptomaniac Octopus
29
30 diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
31 index d9ee43c6b7db..fe19f1d412e7 100644
32 --- a/arch/arc/include/asm/linkage.h
33 +++ b/arch/arc/include/asm/linkage.h
34 @@ -29,6 +29,8 @@
35 .endm
36
37 #define ASM_NL ` /* use '`' to mark new line in macro */
38 +#define __ALIGN .align 4
39 +#define __ALIGN_STR __stringify(__ALIGN)
40
41 /* annotation for data we want in DCCM - if enabled in .config */
42 .macro ARCFP_DATA nm
43 diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
44 index a6ea07f2aa84..4d867a752f0e 100644
45 --- a/arch/x86/events/amd/uncore.c
46 +++ b/arch/x86/events/amd/uncore.c
47 @@ -190,15 +190,12 @@ static int amd_uncore_event_init(struct perf_event *event)
48
49 /*
50 * NB and Last level cache counters (MSRs) are shared across all cores
51 - * that share the same NB / Last level cache. Interrupts can be directed
52 - * to a single target core, however, event counts generated by processes
53 - * running on other cores cannot be masked out. So we do not support
54 - * sampling and per-thread events.
55 + * that share the same NB / Last level cache. On family 16h and below,
56 + * Interrupts can be directed to a single target core, however, event
57 + * counts generated by processes running on other cores cannot be masked
58 + * out. So we do not support sampling and per-thread events via
59 + * CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
60 */
61 - if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
62 - return -EINVAL;
63 -
64 - /* and we do not enable counter overflow interrupts */
65 hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
66 hwc->idx = -1;
67
68 @@ -306,7 +303,7 @@ static struct pmu amd_nb_pmu = {
69 .start = amd_uncore_start,
70 .stop = amd_uncore_stop,
71 .read = amd_uncore_read,
72 - .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
73 + .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
74 };
75
76 static struct pmu amd_llc_pmu = {
77 @@ -317,7 +314,7 @@ static struct pmu amd_llc_pmu = {
78 .start = amd_uncore_start,
79 .stop = amd_uncore_stop,
80 .read = amd_uncore_read,
81 - .capabilities = PERF_PMU_CAP_NO_EXCLUDE,
82 + .capabilities = PERF_PMU_CAP_NO_EXCLUDE | PERF_PMU_CAP_NO_INTERRUPT,
83 };
84
85 static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
86 diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c
87 index 88cd9598fa57..f2350967a898 100644
88 --- a/arch/x86/kernel/cpu/mce/intel.c
89 +++ b/arch/x86/kernel/cpu/mce/intel.c
90 @@ -489,17 +489,18 @@ static void intel_ppin_init(struct cpuinfo_x86 *c)
91 return;
92
93 if ((val & 3UL) == 1UL) {
94 - /* PPIN available but disabled: */
95 + /* PPIN locked in disabled mode */
96 return;
97 }
98
99 - /* If PPIN is disabled, but not locked, try to enable: */
100 - if (!(val & 3UL)) {
101 + /* If PPIN is disabled, try to enable */
102 + if (!(val & 2UL)) {
103 wrmsrl_safe(MSR_PPIN_CTL, val | 2UL);
104 rdmsrl_safe(MSR_PPIN_CTL, &val);
105 }
106
107 - if ((val & 3UL) == 2UL)
108 + /* Is the enable bit set? */
109 + if (val & 2UL)
110 set_cpu_cap(c, X86_FEATURE_INTEL_PPIN);
111 }
112 }
113 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
114 index 37aa9ce29b33..128d3ad46e96 100644
115 --- a/arch/x86/kvm/emulate.c
116 +++ b/arch/x86/kvm/emulate.c
117 @@ -5197,6 +5197,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
118 ctxt->fetch.ptr = ctxt->fetch.data;
119 ctxt->fetch.end = ctxt->fetch.data + insn_len;
120 ctxt->opcode_len = 1;
121 + ctxt->intercept = x86_intercept_none;
122 if (insn_len > 0)
123 memcpy(ctxt->fetch.data, insn, insn_len);
124 else {
125 diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
126 index e1d8062ef12e..2b44554baf28 100644
127 --- a/arch/x86/kvm/vmx/nested.c
128 +++ b/arch/x86/kvm/vmx/nested.c
129 @@ -223,7 +223,7 @@ static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
130 return;
131
132 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
133 - vmx->nested.hv_evmcs_vmptr = -1ull;
134 + vmx->nested.hv_evmcs_vmptr = 0;
135 vmx->nested.hv_evmcs = NULL;
136 }
137
138 @@ -1828,7 +1828,8 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
139 if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa))
140 return 1;
141
142 - if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
143 + if (unlikely(!vmx->nested.hv_evmcs ||
144 + evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
145 if (!vmx->nested.hv_evmcs)
146 vmx->nested.current_vmptr = -1ull;
147
148 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
149 index a39dcdb5ae34..2db3b7c4de16 100644
150 --- a/arch/x86/mm/ioremap.c
151 +++ b/arch/x86/mm/ioremap.c
152 @@ -106,6 +106,19 @@ static unsigned int __ioremap_check_encrypted(struct resource *res)
153 return 0;
154 }
155
156 +/*
157 + * The EFI runtime services data area is not covered by walk_mem_res(), but must
158 + * be mapped encrypted when SEV is active.
159 + */
160 +static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
161 +{
162 + if (!sev_active())
163 + return;
164 +
165 + if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA)
166 + desc->flags |= IORES_MAP_ENCRYPTED;
167 +}
168 +
169 static int __ioremap_collect_map_flags(struct resource *res, void *arg)
170 {
171 struct ioremap_desc *desc = arg;
172 @@ -124,6 +137,9 @@ static int __ioremap_collect_map_flags(struct resource *res, void *arg)
173 * To avoid multiple resource walks, this function walks resources marked as
174 * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
175 * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
176 + *
177 + * After that, deal with misc other ranges in __ioremap_check_other() which do
178 + * not fall into the above category.
179 */
180 static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
181 struct ioremap_desc *desc)
182 @@ -135,6 +151,8 @@ static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
183 memset(desc, 0, sizeof(struct ioremap_desc));
184
185 walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
186 +
187 + __ioremap_check_other(addr, desc);
188 }
189
190 /*
191 diff --git a/block/blk-iocost.c b/block/blk-iocost.c
192 index 27ca68621137..9a599cc28c29 100644
193 --- a/block/blk-iocost.c
194 +++ b/block/blk-iocost.c
195 @@ -1318,7 +1318,7 @@ static bool iocg_is_idle(struct ioc_gq *iocg)
196 return false;
197
198 /* is something in flight? */
199 - if (atomic64_read(&iocg->done_vtime) < atomic64_read(&iocg->vtime))
200 + if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime))
201 return false;
202
203 return true;
204 diff --git a/drivers/base/platform.c b/drivers/base/platform.c
205 index 60386a32208f..604a461848c9 100644
206 --- a/drivers/base/platform.c
207 +++ b/drivers/base/platform.c
208 @@ -335,10 +335,10 @@ static void setup_pdev_dma_masks(struct platform_device *pdev)
209 {
210 if (!pdev->dev.coherent_dma_mask)
211 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
212 - if (!pdev->dma_mask)
213 - pdev->dma_mask = DMA_BIT_MASK(32);
214 - if (!pdev->dev.dma_mask)
215 - pdev->dev.dma_mask = &pdev->dma_mask;
216 + if (!pdev->dev.dma_mask) {
217 + pdev->platform_dma_mask = DMA_BIT_MASK(32);
218 + pdev->dev.dma_mask = &pdev->platform_dma_mask;
219 + }
220 };
221
222 /**
223 @@ -634,20 +634,8 @@ struct platform_device *platform_device_register_full(
224 pdev->dev.of_node_reused = pdevinfo->of_node_reused;
225
226 if (pdevinfo->dma_mask) {
227 - /*
228 - * This memory isn't freed when the device is put,
229 - * I don't have a nice idea for that though. Conceptually
230 - * dma_mask in struct device should not be a pointer.
231 - * See http://thread.gmane.org/gmane.linux.kernel.pci/9081
232 - */
233 - pdev->dev.dma_mask =
234 - kmalloc(sizeof(*pdev->dev.dma_mask), GFP_KERNEL);
235 - if (!pdev->dev.dma_mask)
236 - goto err;
237 -
238 - kmemleak_ignore(pdev->dev.dma_mask);
239 -
240 - *pdev->dev.dma_mask = pdevinfo->dma_mask;
241 + pdev->platform_dma_mask = pdevinfo->dma_mask;
242 + pdev->dev.dma_mask = &pdev->platform_dma_mask;
243 pdev->dev.coherent_dma_mask = pdevinfo->dma_mask;
244 }
245
246 @@ -672,7 +660,6 @@ struct platform_device *platform_device_register_full(
247 if (ret) {
248 err:
249 ACPI_COMPANION_SET(&pdev->dev, NULL);
250 - kfree(pdev->dev.dma_mask);
251 platform_device_put(pdev);
252 return ERR_PTR(ret);
253 }
254 diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
255 index 7ffd719d89de..c2ed3e9128e3 100644
256 --- a/drivers/block/virtio_blk.c
257 +++ b/drivers/block/virtio_blk.c
258 @@ -339,10 +339,12 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
259 err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
260 if (err) {
261 virtqueue_kick(vblk->vqs[qid].vq);
262 - blk_mq_stop_hw_queue(hctx);
263 + /* Don't stop the queue if -ENOMEM: we may have failed to
264 + * bounce the buffer due to global resource outage.
265 + */
266 + if (err == -ENOSPC)
267 + blk_mq_stop_hw_queue(hctx);
268 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
269 - /* Out of mem doesn't actually happen, since we fall back
270 - * to direct descriptors */
271 if (err == -ENOMEM || err == -ENOSPC)
272 return BLK_STS_DEV_RESOURCE;
273 return BLK_STS_IOERR;
274 diff --git a/drivers/char/ipmi/ipmi_si_platform.c b/drivers/char/ipmi/ipmi_si_platform.c
275 index c78127ccbc0d..638c693e17ad 100644
276 --- a/drivers/char/ipmi/ipmi_si_platform.c
277 +++ b/drivers/char/ipmi/ipmi_si_platform.c
278 @@ -194,7 +194,7 @@ static int platform_ipmi_probe(struct platform_device *pdev)
279 else
280 io.slave_addr = slave_addr;
281
282 - io.irq = platform_get_irq(pdev, 0);
283 + io.irq = platform_get_irq_optional(pdev, 0);
284 if (io.irq > 0)
285 io.irq_setup = ipmi_std_irq_setup;
286 else
287 @@ -378,7 +378,7 @@ static int acpi_ipmi_probe(struct platform_device *pdev)
288 io.irq = tmp;
289 io.irq_setup = acpi_gpe_irq_setup;
290 } else {
291 - int irq = platform_get_irq(pdev, 0);
292 + int irq = platform_get_irq_optional(pdev, 0);
293
294 if (irq > 0) {
295 io.irq = irq;
296 diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
297 index 7576450c8254..aff3dfb4d7ba 100644
298 --- a/drivers/firmware/efi/efivars.c
299 +++ b/drivers/firmware/efi/efivars.c
300 @@ -83,13 +83,16 @@ static ssize_t
301 efivar_attr_read(struct efivar_entry *entry, char *buf)
302 {
303 struct efi_variable *var = &entry->var;
304 + unsigned long size = sizeof(var->Data);
305 char *str = buf;
306 + int ret;
307
308 if (!entry || !buf)
309 return -EINVAL;
310
311 - var->DataSize = 1024;
312 - if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
313 + ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
314 + var->DataSize = size;
315 + if (ret)
316 return -EIO;
317
318 if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
319 @@ -116,13 +119,16 @@ static ssize_t
320 efivar_size_read(struct efivar_entry *entry, char *buf)
321 {
322 struct efi_variable *var = &entry->var;
323 + unsigned long size = sizeof(var->Data);
324 char *str = buf;
325 + int ret;
326
327 if (!entry || !buf)
328 return -EINVAL;
329
330 - var->DataSize = 1024;
331 - if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
332 + ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
333 + var->DataSize = size;
334 + if (ret)
335 return -EIO;
336
337 str += sprintf(str, "0x%lx\n", var->DataSize);
338 @@ -133,12 +139,15 @@ static ssize_t
339 efivar_data_read(struct efivar_entry *entry, char *buf)
340 {
341 struct efi_variable *var = &entry->var;
342 + unsigned long size = sizeof(var->Data);
343 + int ret;
344
345 if (!entry || !buf)
346 return -EINVAL;
347
348 - var->DataSize = 1024;
349 - if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
350 + ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
351 + var->DataSize = size;
352 + if (ret)
353 return -EIO;
354
355 memcpy(buf, var->Data, var->DataSize);
356 @@ -199,6 +208,9 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
357 u8 *data;
358 int err;
359
360 + if (!entry || !buf)
361 + return -EINVAL;
362 +
363 if (in_compat_syscall()) {
364 struct compat_efi_variable *compat;
365
366 @@ -250,14 +262,16 @@ efivar_show_raw(struct efivar_entry *entry, char *buf)
367 {
368 struct efi_variable *var = &entry->var;
369 struct compat_efi_variable *compat;
370 + unsigned long datasize = sizeof(var->Data);
371 size_t size;
372 + int ret;
373
374 if (!entry || !buf)
375 return 0;
376
377 - var->DataSize = 1024;
378 - if (efivar_entry_get(entry, &entry->var.Attributes,
379 - &entry->var.DataSize, entry->var.Data))
380 + ret = efivar_entry_get(entry, &var->Attributes, &datasize, var->Data);
381 + var->DataSize = datasize;
382 + if (ret)
383 return -EIO;
384
385 if (in_compat_syscall()) {
386 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
387 index f2f40f05fa5c..c687432da426 100644
388 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
389 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
390 @@ -365,8 +365,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
391 router.ddc_valid = false;
392 router.cd_valid = false;
393 for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
394 - uint8_t grph_obj_type=
395 - grph_obj_type =
396 + uint8_t grph_obj_type =
397 (le16_to_cpu(path->usGraphicObjIds[j]) &
398 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
399
400 diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
401 index e635e1e5f4d3..bd4e41380777 100644
402 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
403 +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
404 @@ -439,7 +439,8 @@ eb_validate_vma(struct i915_execbuffer *eb,
405 if (unlikely(entry->flags & eb->invalid_flags))
406 return -EINVAL;
407
408 - if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
409 + if (unlikely(entry->alignment &&
410 + !is_power_of_2_u64(entry->alignment)))
411 return -EINVAL;
412
413 /*
414 diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
415 index e1c313da6c00..a62bdf9be682 100644
416 --- a/drivers/gpu/drm/i915/gvt/display.c
417 +++ b/drivers/gpu/drm/i915/gvt/display.c
418 @@ -457,7 +457,8 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
419 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
420
421 /* TODO: add more platforms support */
422 - if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
423 + if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
424 + IS_COFFEELAKE(dev_priv)) {
425 if (connected) {
426 vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
427 SFUSE_STRAP_DDID_DETECTED;
428 diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
429 index b232965b45b5..32e57635709a 100644
430 --- a/drivers/gpu/drm/i915/gvt/vgpu.c
431 +++ b/drivers/gpu/drm/i915/gvt/vgpu.c
432 @@ -272,10 +272,17 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
433 {
434 struct intel_gvt *gvt = vgpu->gvt;
435
436 - mutex_lock(&vgpu->vgpu_lock);
437 -
438 WARN(vgpu->active, "vGPU is still active!\n");
439
440 + /*
441 + * remove idr first so later clean can judge if need to stop
442 + * service if no active vgpu.
443 + */
444 + mutex_lock(&gvt->lock);
445 + idr_remove(&gvt->vgpu_idr, vgpu->id);
446 + mutex_unlock(&gvt->lock);
447 +
448 + mutex_lock(&vgpu->vgpu_lock);
449 intel_gvt_debugfs_remove_vgpu(vgpu);
450 intel_vgpu_clean_sched_policy(vgpu);
451 intel_vgpu_clean_submission(vgpu);
452 @@ -290,7 +297,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
453 mutex_unlock(&vgpu->vgpu_lock);
454
455 mutex_lock(&gvt->lock);
456 - idr_remove(&gvt->vgpu_idr, vgpu->id);
457 if (idr_is_empty(&gvt->vgpu_idr))
458 intel_gvt_clean_irq(gvt);
459 intel_gvt_update_vgpu_types(gvt);
460 diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
461 index 1c5506822dc7..0d39038898d4 100644
462 --- a/drivers/gpu/drm/i915/i915_request.c
463 +++ b/drivers/gpu/drm/i915/i915_request.c
464 @@ -560,19 +560,31 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
465 return NOTIFY_DONE;
466 }
467
468 +static void irq_semaphore_cb(struct irq_work *wrk)
469 +{
470 + struct i915_request *rq =
471 + container_of(wrk, typeof(*rq), semaphore_work);
472 +
473 + i915_schedule_bump_priority(rq, I915_PRIORITY_NOSEMAPHORE);
474 + i915_request_put(rq);
475 +}
476 +
477 static int __i915_sw_fence_call
478 semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
479 {
480 - struct i915_request *request =
481 - container_of(fence, typeof(*request), semaphore);
482 + struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
483
484 switch (state) {
485 case FENCE_COMPLETE:
486 - i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE);
487 + if (!(READ_ONCE(rq->sched.attr.priority) & I915_PRIORITY_NOSEMAPHORE)) {
488 + i915_request_get(rq);
489 + init_irq_work(&rq->semaphore_work, irq_semaphore_cb);
490 + irq_work_queue(&rq->semaphore_work);
491 + }
492 break;
493
494 case FENCE_FREE:
495 - i915_request_put(request);
496 + i915_request_put(rq);
497 break;
498 }
499
500 @@ -1215,9 +1227,9 @@ void __i915_request_queue(struct i915_request *rq,
501 * decide whether to preempt the entire chain so that it is ready to
502 * run at the earliest possible convenience.
503 */
504 - i915_sw_fence_commit(&rq->semaphore);
505 if (attr && rq->engine->schedule)
506 rq->engine->schedule(rq, attr);
507 + i915_sw_fence_commit(&rq->semaphore);
508 i915_sw_fence_commit(&rq->submit);
509 }
510
511 diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
512 index e4dd013761e8..3a3e7bbf19ff 100644
513 --- a/drivers/gpu/drm/i915/i915_request.h
514 +++ b/drivers/gpu/drm/i915/i915_request.h
515 @@ -26,6 +26,7 @@
516 #define I915_REQUEST_H
517
518 #include <linux/dma-fence.h>
519 +#include <linux/irq_work.h>
520 #include <linux/lockdep.h>
521
522 #include "gt/intel_context_types.h"
523 @@ -147,6 +148,7 @@ struct i915_request {
524 };
525 struct list_head execute_cb;
526 struct i915_sw_fence semaphore;
527 + struct irq_work semaphore_work;
528
529 /*
530 * A list of everyone we wait upon, and everyone who waits upon us.
531 diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
532 index 562f756da421..5b1622a40baa 100644
533 --- a/drivers/gpu/drm/i915/i915_utils.h
534 +++ b/drivers/gpu/drm/i915/i915_utils.h
535 @@ -233,6 +233,11 @@ static inline u64 ptr_to_u64(const void *ptr)
536 __idx; \
537 })
538
539 +static inline bool is_power_of_2_u64(u64 n)
540 +{
541 + return (n != 0 && ((n & (n - 1)) == 0));
542 +}
543 +
544 static inline void __list_del_many(struct list_head *head,
545 struct list_head *first)
546 {
547 diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
548 index 050adda7c1bd..05b35ac33ce3 100644
549 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c
550 +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
551 @@ -313,6 +313,7 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev)
552 pm_runtime_get_noresume(&pdev->dev);
553
554 i2c_del_adapter(&dev->adapter);
555 + devm_free_irq(&pdev->dev, dev->irq, dev);
556 pci_free_irq_vectors(pdev);
557 }
558
559 diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
560 index 3a9e840a3546..a4a6825c8758 100644
561 --- a/drivers/i2c/busses/i2c-gpio.c
562 +++ b/drivers/i2c/busses/i2c-gpio.c
563 @@ -348,7 +348,7 @@ static struct gpio_desc *i2c_gpio_get_desc(struct device *dev,
564 if (ret == -ENOENT)
565 retdesc = ERR_PTR(-EPROBE_DEFER);
566
567 - if (ret != -EPROBE_DEFER)
568 + if (PTR_ERR(retdesc) != -EPROBE_DEFER)
569 dev_err(dev, "error trying to get descriptor: %d\n", ret);
570
571 return retdesc;
572 diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c
573 index 62a1c92ab803..ce70b5288472 100644
574 --- a/drivers/i2c/i2c-core-acpi.c
575 +++ b/drivers/i2c/i2c-core-acpi.c
576 @@ -394,9 +394,17 @@ EXPORT_SYMBOL_GPL(i2c_acpi_find_adapter_by_handle);
577 static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev)
578 {
579 struct device *dev;
580 + struct i2c_client *client;
581
582 dev = bus_find_device_by_acpi_dev(&i2c_bus_type, adev);
583 - return dev ? i2c_verify_client(dev) : NULL;
584 + if (!dev)
585 + return NULL;
586 +
587 + client = i2c_verify_client(dev);
588 + if (!client)
589 + put_device(dev);
590 +
591 + return client;
592 }
593
594 static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value,
595 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
596 index 8bd5d608a82c..bc7771498342 100644
597 --- a/drivers/iommu/amd_iommu.c
598 +++ b/drivers/iommu/amd_iommu.c
599 @@ -4421,7 +4421,7 @@ int amd_iommu_activate_guest_mode(void *data)
600 entry->lo.fields_vapic.ga_tag = ir_data->ga_tag;
601
602 return modify_irte_ga(ir_data->irq_2_irte.devid,
603 - ir_data->irq_2_irte.index, entry, NULL);
604 + ir_data->irq_2_irte.index, entry, ir_data);
605 }
606 EXPORT_SYMBOL(amd_iommu_activate_guest_mode);
607
608 @@ -4447,7 +4447,7 @@ int amd_iommu_deactivate_guest_mode(void *data)
609 APICID_TO_IRTE_DEST_HI(cfg->dest_apicid);
610
611 return modify_irte_ga(ir_data->irq_2_irte.devid,
612 - ir_data->irq_2_irte.index, entry, NULL);
613 + ir_data->irq_2_irte.index, entry, ir_data);
614 }
615 EXPORT_SYMBOL(amd_iommu_deactivate_guest_mode);
616
617 diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
618 index c68a1f072c31..76bd2309e023 100644
619 --- a/drivers/iommu/dma-iommu.c
620 +++ b/drivers/iommu/dma-iommu.c
621 @@ -176,15 +176,15 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
622 start -= iova_offset(iovad, start);
623 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
624
625 - msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
626 - if (!msi_page)
627 - return -ENOMEM;
628 -
629 for (i = 0; i < num_pages; i++) {
630 - msi_page[i].phys = start;
631 - msi_page[i].iova = start;
632 - INIT_LIST_HEAD(&msi_page[i].list);
633 - list_add(&msi_page[i].list, &cookie->msi_page_list);
634 + msi_page = kmalloc(sizeof(*msi_page), GFP_KERNEL);
635 + if (!msi_page)
636 + return -ENOMEM;
637 +
638 + msi_page->phys = start;
639 + msi_page->iova = start;
640 + INIT_LIST_HEAD(&msi_page->list);
641 + list_add(&msi_page->list, &cookie->msi_page_list);
642 start += iovad->granule;
643 }
644
645 diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
646 index 7196cabafb25..6a9a1b987520 100644
647 --- a/drivers/iommu/dmar.c
648 +++ b/drivers/iommu/dmar.c
649 @@ -28,6 +28,7 @@
650 #include <linux/slab.h>
651 #include <linux/iommu.h>
652 #include <linux/numa.h>
653 +#include <linux/limits.h>
654 #include <asm/irq_remapping.h>
655 #include <asm/iommu_table.h>
656
657 @@ -128,6 +129,13 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
658
659 BUG_ON(dev->is_virtfn);
660
661 + /*
662 + * Ignore devices that have a domain number higher than what can
663 + * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
664 + */
665 + if (pci_domain_nr(dev->bus) > U16_MAX)
666 + return NULL;
667 +
668 /* Only generate path[] for device addition event */
669 if (event == BUS_NOTIFY_ADD_DEVICE)
670 for (tmp = dev; tmp; tmp = tmp->bus->self)
671 @@ -440,12 +448,13 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
672
673 /* Check for NUL termination within the designated length */
674 if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
675 - WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
676 + pr_warn(FW_BUG
677 "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
678 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
679 dmi_get_system_info(DMI_BIOS_VENDOR),
680 dmi_get_system_info(DMI_BIOS_VERSION),
681 dmi_get_system_info(DMI_PRODUCT_VERSION));
682 + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
683 return -EINVAL;
684 }
685 pr_info("ANDD device: %x name: %s\n", andd->device_number,
686 @@ -471,14 +480,14 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
687 return 0;
688 }
689 }
690 - WARN_TAINT(
691 - 1, TAINT_FIRMWARE_WORKAROUND,
692 + pr_warn(FW_BUG
693 "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
694 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
695 - drhd->reg_base_addr,
696 + rhsa->base_address,
697 dmi_get_system_info(DMI_BIOS_VENDOR),
698 dmi_get_system_info(DMI_BIOS_VERSION),
699 dmi_get_system_info(DMI_PRODUCT_VERSION));
700 + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
701
702 return 0;
703 }
704 @@ -827,14 +836,14 @@ int __init dmar_table_init(void)
705
706 static void warn_invalid_dmar(u64 addr, const char *message)
707 {
708 - WARN_TAINT_ONCE(
709 - 1, TAINT_FIRMWARE_WORKAROUND,
710 + pr_warn_once(FW_BUG
711 "Your BIOS is broken; DMAR reported at address %llx%s!\n"
712 "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
713 addr, message,
714 dmi_get_system_info(DMI_BIOS_VENDOR),
715 dmi_get_system_info(DMI_BIOS_VERSION),
716 dmi_get_system_info(DMI_PRODUCT_VERSION));
717 + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
718 }
719
720 static int __ref
721 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
722 index 760a242d0801..1c2b3e78056f 100644
723 --- a/drivers/iommu/intel-iommu.c
724 +++ b/drivers/iommu/intel-iommu.c
725 @@ -4129,10 +4129,11 @@ static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
726
727 /* we know that the this iommu should be at offset 0xa000 from vtbar */
728 drhd = dmar_find_matched_drhd_unit(pdev);
729 - if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
730 - TAINT_FIRMWARE_WORKAROUND,
731 - "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
732 + if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
733 + pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
734 + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
735 pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
736 + }
737 }
738 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
739
740 @@ -5023,6 +5024,7 @@ int __init intel_iommu_init(void)
741
742 init_iommu_pm_ops();
743
744 + down_read(&dmar_global_lock);
745 for_each_active_iommu(iommu, drhd) {
746 iommu_device_sysfs_add(&iommu->iommu, NULL,
747 intel_iommu_groups,
748 @@ -5030,6 +5032,7 @@ int __init intel_iommu_init(void)
749 iommu_device_set_ops(&iommu->iommu, &intel_iommu_ops);
750 iommu_device_register(&iommu->iommu);
751 }
752 + up_read(&dmar_global_lock);
753
754 bus_set_iommu(&pci_bus_type, &intel_iommu_ops);
755 if (si_domain && !hw_pass_through)
756 @@ -5040,7 +5043,6 @@ int __init intel_iommu_init(void)
757 down_read(&dmar_global_lock);
758 if (probe_acpi_namespace_devices())
759 pr_warn("ACPI name space devices didn't probe correctly\n");
760 - up_read(&dmar_global_lock);
761
762 /* Finally, we enable the DMA remapping hardware. */
763 for_each_iommu(iommu, drhd) {
764 @@ -5049,6 +5051,8 @@ int __init intel_iommu_init(void)
765
766 iommu_disable_protect_mem_regions(iommu);
767 }
768 + up_read(&dmar_global_lock);
769 +
770 pr_info("Intel(R) Virtualization Technology for Directed I/O\n");
771
772 intel_iommu_enabled = 1;
773 @@ -5523,8 +5527,10 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
774 u64 phys = 0;
775
776 pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
777 - if (pte)
778 - phys = dma_pte_addr(pte);
779 + if (pte && dma_pte_present(pte))
780 + phys = dma_pte_addr(pte) +
781 + (iova & (BIT_MASK(level_to_offset_bits(level) +
782 + VTD_PAGE_SHIFT) - 1));
783
784 return phys;
785 }
786 diff --git a/drivers/macintosh/windfarm_ad7417_sensor.c b/drivers/macintosh/windfarm_ad7417_sensor.c
787 index 125605987b44..e7dec328c7cf 100644
788 --- a/drivers/macintosh/windfarm_ad7417_sensor.c
789 +++ b/drivers/macintosh/windfarm_ad7417_sensor.c
790 @@ -312,9 +312,16 @@ static const struct i2c_device_id wf_ad7417_id[] = {
791 };
792 MODULE_DEVICE_TABLE(i2c, wf_ad7417_id);
793
794 +static const struct of_device_id wf_ad7417_of_id[] = {
795 + { .compatible = "ad7417", },
796 + { }
797 +};
798 +MODULE_DEVICE_TABLE(of, wf_ad7417_of_id);
799 +
800 static struct i2c_driver wf_ad7417_driver = {
801 .driver = {
802 .name = "wf_ad7417",
803 + .of_match_table = wf_ad7417_of_id,
804 },
805 .probe = wf_ad7417_probe,
806 .remove = wf_ad7417_remove,
807 diff --git a/drivers/macintosh/windfarm_fcu_controls.c b/drivers/macintosh/windfarm_fcu_controls.c
808 index 3c971297b6dc..89b48e8ac6ef 100644
809 --- a/drivers/macintosh/windfarm_fcu_controls.c
810 +++ b/drivers/macintosh/windfarm_fcu_controls.c
811 @@ -582,9 +582,16 @@ static const struct i2c_device_id wf_fcu_id[] = {
812 };
813 MODULE_DEVICE_TABLE(i2c, wf_fcu_id);
814
815 +static const struct of_device_id wf_fcu_of_id[] = {
816 + { .compatible = "fcu", },
817 + { }
818 +};
819 +MODULE_DEVICE_TABLE(of, wf_fcu_of_id);
820 +
821 static struct i2c_driver wf_fcu_driver = {
822 .driver = {
823 .name = "wf_fcu",
824 + .of_match_table = wf_fcu_of_id,
825 },
826 .probe = wf_fcu_probe,
827 .remove = wf_fcu_remove,
828 diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
829 index 282c28a17ea1..1e5fa09845e7 100644
830 --- a/drivers/macintosh/windfarm_lm75_sensor.c
831 +++ b/drivers/macintosh/windfarm_lm75_sensor.c
832 @@ -14,6 +14,7 @@
833 #include <linux/init.h>
834 #include <linux/wait.h>
835 #include <linux/i2c.h>
836 +#include <linux/of_device.h>
837 #include <asm/prom.h>
838 #include <asm/machdep.h>
839 #include <asm/io.h>
840 @@ -91,9 +92,14 @@ static int wf_lm75_probe(struct i2c_client *client,
841 const struct i2c_device_id *id)
842 {
843 struct wf_lm75_sensor *lm;
844 - int rc, ds1775 = id->driver_data;
845 + int rc, ds1775;
846 const char *name, *loc;
847
848 + if (id)
849 + ds1775 = id->driver_data;
850 + else
851 + ds1775 = !!of_device_get_match_data(&client->dev);
852 +
853 DBG("wf_lm75: creating %s device at address 0x%02x\n",
854 ds1775 ? "ds1775" : "lm75", client->addr);
855
856 @@ -164,9 +170,17 @@ static const struct i2c_device_id wf_lm75_id[] = {
857 };
858 MODULE_DEVICE_TABLE(i2c, wf_lm75_id);
859
860 +static const struct of_device_id wf_lm75_of_id[] = {
861 + { .compatible = "lm75", .data = (void *)0},
862 + { .compatible = "ds1775", .data = (void *)1 },
863 + { }
864 +};
865 +MODULE_DEVICE_TABLE(of, wf_lm75_of_id);
866 +
867 static struct i2c_driver wf_lm75_driver = {
868 .driver = {
869 .name = "wf_lm75",
870 + .of_match_table = wf_lm75_of_id,
871 },
872 .probe = wf_lm75_probe,
873 .remove = wf_lm75_remove,
874 diff --git a/drivers/macintosh/windfarm_lm87_sensor.c b/drivers/macintosh/windfarm_lm87_sensor.c
875 index e44525b19071..1a1f8f3f0abc 100644
876 --- a/drivers/macintosh/windfarm_lm87_sensor.c
877 +++ b/drivers/macintosh/windfarm_lm87_sensor.c
878 @@ -166,9 +166,16 @@ static const struct i2c_device_id wf_lm87_id[] = {
879 };
880 MODULE_DEVICE_TABLE(i2c, wf_lm87_id);
881
882 +static const struct of_device_id wf_lm87_of_id[] = {
883 + { .compatible = "lm87cimt", },
884 + { }
885 +};
886 +MODULE_DEVICE_TABLE(of, wf_lm87_of_id);
887 +
888 static struct i2c_driver wf_lm87_driver = {
889 .driver = {
890 .name = "wf_lm87",
891 + .of_match_table = wf_lm87_of_id,
892 },
893 .probe = wf_lm87_probe,
894 .remove = wf_lm87_remove,
895 diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
896 index e666cc020683..1e7b03d44ad9 100644
897 --- a/drivers/macintosh/windfarm_max6690_sensor.c
898 +++ b/drivers/macintosh/windfarm_max6690_sensor.c
899 @@ -120,9 +120,16 @@ static const struct i2c_device_id wf_max6690_id[] = {
900 };
901 MODULE_DEVICE_TABLE(i2c, wf_max6690_id);
902
903 +static const struct of_device_id wf_max6690_of_id[] = {
904 + { .compatible = "max6690", },
905 + { }
906 +};
907 +MODULE_DEVICE_TABLE(of, wf_max6690_of_id);
908 +
909 static struct i2c_driver wf_max6690_driver = {
910 .driver = {
911 .name = "wf_max6690",
912 + .of_match_table = wf_max6690_of_id,
913 },
914 .probe = wf_max6690_probe,
915 .remove = wf_max6690_remove,
916 diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
917 index c84ec49c3741..cb75dc035616 100644
918 --- a/drivers/macintosh/windfarm_smu_sat.c
919 +++ b/drivers/macintosh/windfarm_smu_sat.c
920 @@ -341,9 +341,16 @@ static const struct i2c_device_id wf_sat_id[] = {
921 };
922 MODULE_DEVICE_TABLE(i2c, wf_sat_id);
923
924 +static const struct of_device_id wf_sat_of_id[] = {
925 + { .compatible = "smu-sat", },
926 + { }
927 +};
928 +MODULE_DEVICE_TABLE(of, wf_sat_of_id);
929 +
930 static struct i2c_driver wf_sat_driver = {
931 .driver = {
932 .name = "wf_smu_sat",
933 + .of_match_table = wf_sat_of_id,
934 },
935 .probe = wf_sat_probe,
936 .remove = wf_sat_remove,
937 diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
938 index 5eea8d70a85d..ce15a05f23d4 100644
939 --- a/drivers/mmc/host/sdhci-pci-gli.c
940 +++ b/drivers/mmc/host/sdhci-pci-gli.c
941 @@ -262,10 +262,26 @@ static int gl9750_execute_tuning(struct sdhci_host *host, u32 opcode)
942 return 0;
943 }
944
945 +static void gli_pcie_enable_msi(struct sdhci_pci_slot *slot)
946 +{
947 + int ret;
948 +
949 + ret = pci_alloc_irq_vectors(slot->chip->pdev, 1, 1,
950 + PCI_IRQ_MSI | PCI_IRQ_MSIX);
951 + if (ret < 0) {
952 + pr_warn("%s: enable PCI MSI failed, error=%d\n",
953 + mmc_hostname(slot->host->mmc), ret);
954 + return;
955 + }
956 +
957 + slot->host->irq = pci_irq_vector(slot->chip->pdev, 0);
958 +}
959 +
960 static int gli_probe_slot_gl9750(struct sdhci_pci_slot *slot)
961 {
962 struct sdhci_host *host = slot->host;
963
964 + gli_pcie_enable_msi(slot);
965 slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
966 sdhci_enable_v4_mode(host);
967
968 @@ -276,6 +292,7 @@ static int gli_probe_slot_gl9755(struct sdhci_pci_slot *slot)
969 {
970 struct sdhci_host *host = slot->host;
971
972 + gli_pcie_enable_msi(slot);
973 slot->host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
974 sdhci_enable_v4_mode(host);
975
976 diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
977 index 1cc2cd894f87..c81698550e5a 100644
978 --- a/drivers/net/bonding/bond_alb.c
979 +++ b/drivers/net/bonding/bond_alb.c
980 @@ -50,11 +50,6 @@ struct arp_pkt {
981 };
982 #pragma pack()
983
984 -static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
985 -{
986 - return (struct arp_pkt *)skb_network_header(skb);
987 -}
988 -
989 /* Forward declaration */
990 static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
991 bool strict_match);
992 @@ -553,10 +548,11 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
993 spin_unlock(&bond->mode_lock);
994 }
995
996 -static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond)
997 +static struct slave *rlb_choose_channel(struct sk_buff *skb,
998 + struct bonding *bond,
999 + const struct arp_pkt *arp)
1000 {
1001 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
1002 - struct arp_pkt *arp = arp_pkt(skb);
1003 struct slave *assigned_slave, *curr_active_slave;
1004 struct rlb_client_info *client_info;
1005 u32 hash_index = 0;
1006 @@ -653,8 +649,12 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
1007 */
1008 static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
1009 {
1010 - struct arp_pkt *arp = arp_pkt(skb);
1011 struct slave *tx_slave = NULL;
1012 + struct arp_pkt *arp;
1013 +
1014 + if (!pskb_network_may_pull(skb, sizeof(*arp)))
1015 + return NULL;
1016 + arp = (struct arp_pkt *)skb_network_header(skb);
1017
1018 /* Don't modify or load balance ARPs that do not originate locally
1019 * (e.g.,arrive via a bridge).
1020 @@ -664,7 +664,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
1021
1022 if (arp->op_code == htons(ARPOP_REPLY)) {
1023 /* the arp must be sent on the selected rx channel */
1024 - tx_slave = rlb_choose_channel(skb, bond);
1025 + tx_slave = rlb_choose_channel(skb, bond, arp);
1026 if (tx_slave)
1027 bond_hw_addr_copy(arp->mac_src, tx_slave->dev->dev_addr,
1028 tx_slave->dev->addr_len);
1029 @@ -676,7 +676,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
1030 * When the arp reply is received the entry will be updated
1031 * with the correct unicast address of the client.
1032 */
1033 - tx_slave = rlb_choose_channel(skb, bond);
1034 + tx_slave = rlb_choose_channel(skb, bond, arp);
1035
1036 /* The ARP reply packets must be delayed so that
1037 * they can cancel out the influence of the ARP request.
1038 diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
1039 index 1c88c361938c..3a33fb503400 100644
1040 --- a/drivers/net/can/dev.c
1041 +++ b/drivers/net/can/dev.c
1042 @@ -884,6 +884,7 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
1043 = { .len = sizeof(struct can_bittiming) },
1044 [IFLA_CAN_DATA_BITTIMING_CONST]
1045 = { .len = sizeof(struct can_bittiming_const) },
1046 + [IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
1047 };
1048
1049 static int can_validate(struct nlattr *tb[], struct nlattr *data[],
1050 diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
1051 index bdbb72fc20ed..6240976679e1 100644
1052 --- a/drivers/net/dsa/mv88e6xxx/global2.c
1053 +++ b/drivers/net/dsa/mv88e6xxx/global2.c
1054 @@ -1083,6 +1083,13 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
1055 {
1056 int err, irq, virq;
1057
1058 + chip->g2_irq.masked = ~0;
1059 + mv88e6xxx_reg_lock(chip);
1060 + err = mv88e6xxx_g2_int_mask(chip, ~chip->g2_irq.masked);
1061 + mv88e6xxx_reg_unlock(chip);
1062 + if (err)
1063 + return err;
1064 +
1065 chip->g2_irq.domain = irq_domain_add_simple(
1066 chip->dev->of_node, 16, 0, &mv88e6xxx_g2_irq_domain_ops, chip);
1067 if (!chip->g2_irq.domain)
1068 @@ -1092,7 +1099,6 @@ int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip)
1069 irq_create_mapping(chip->g2_irq.domain, irq);
1070
1071 chip->g2_irq.chip = mv88e6xxx_g2_irq_chip;
1072 - chip->g2_irq.masked = ~0;
1073
1074 chip->device_irq = irq_find_mapping(chip->g1_irq.domain,
1075 MV88E6XXX_G1_STS_IRQ_DEVICE);
1076 diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
1077 index 4a27577e137b..ad86a186ddc5 100644
1078 --- a/drivers/net/ethernet/broadcom/bcmsysport.c
1079 +++ b/drivers/net/ethernet/broadcom/bcmsysport.c
1080 @@ -2135,7 +2135,7 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
1081 return -ENOSPC;
1082
1083 index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
1084 - if (index > RXCHK_BRCM_TAG_MAX)
1085 + if (index >= RXCHK_BRCM_TAG_MAX)
1086 return -ENOSPC;
1087
1088 /* Location is the classification ID, and index is the position
1089 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1090 index 374e11a91790..57c88e157f86 100644
1091 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1092 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1093 @@ -10891,13 +10891,13 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
1094 struct bnxt *bp = netdev_priv(dev);
1095
1096 if (netif_running(dev))
1097 - bnxt_close_nic(bp, false, false);
1098 + bnxt_close_nic(bp, true, false);
1099
1100 dev->mtu = new_mtu;
1101 bnxt_set_ring_params(bp);
1102
1103 if (netif_running(dev))
1104 - return bnxt_open_nic(bp, false, false);
1105 + return bnxt_open_nic(bp, true, false);
1106
1107 return 0;
1108 }
1109 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1110 index ece70f61c89a..cfa647d5b44d 100644
1111 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1112 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
1113 @@ -2005,8 +2005,8 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
1114 struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr;
1115 struct hwrm_nvm_install_update_input install = {0};
1116 const struct firmware *fw;
1117 - int rc, hwrm_err = 0;
1118 u32 item_len;
1119 + int rc = 0;
1120 u16 index;
1121
1122 bnxt_hwrm_fw_set_time(bp);
1123 @@ -2050,15 +2050,14 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
1124 memcpy(kmem, fw->data, fw->size);
1125 modify.host_src_addr = cpu_to_le64(dma_handle);
1126
1127 - hwrm_err = hwrm_send_message(bp, &modify,
1128 - sizeof(modify),
1129 - FLASH_PACKAGE_TIMEOUT);
1130 + rc = hwrm_send_message(bp, &modify, sizeof(modify),
1131 + FLASH_PACKAGE_TIMEOUT);
1132 dma_free_coherent(&bp->pdev->dev, fw->size, kmem,
1133 dma_handle);
1134 }
1135 }
1136 release_firmware(fw);
1137 - if (rc || hwrm_err)
1138 + if (rc)
1139 goto err_exit;
1140
1141 if ((install_type & 0xffff) == 0)
1142 @@ -2067,20 +2066,19 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
1143 install.install_type = cpu_to_le32(install_type);
1144
1145 mutex_lock(&bp->hwrm_cmd_lock);
1146 - hwrm_err = _hwrm_send_message(bp, &install, sizeof(install),
1147 - INSTALL_PACKAGE_TIMEOUT);
1148 - if (hwrm_err) {
1149 + rc = _hwrm_send_message(bp, &install, sizeof(install),
1150 + INSTALL_PACKAGE_TIMEOUT);
1151 + if (rc) {
1152 u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err;
1153
1154 if (resp->error_code && error_code ==
1155 NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) {
1156 install.flags |= cpu_to_le16(
1157 NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG);
1158 - hwrm_err = _hwrm_send_message(bp, &install,
1159 - sizeof(install),
1160 - INSTALL_PACKAGE_TIMEOUT);
1161 + rc = _hwrm_send_message(bp, &install, sizeof(install),
1162 + INSTALL_PACKAGE_TIMEOUT);
1163 }
1164 - if (hwrm_err)
1165 + if (rc)
1166 goto flash_pkg_exit;
1167 }
1168
1169 @@ -2092,7 +2090,7 @@ static int bnxt_flash_package_from_file(struct net_device *dev,
1170 flash_pkg_exit:
1171 mutex_unlock(&bp->hwrm_cmd_lock);
1172 err_exit:
1173 - if (hwrm_err == -EACCES)
1174 + if (rc == -EACCES)
1175 bnxt_print_admin_err(bp);
1176 return rc;
1177 }
1178 diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
1179 index 8336f4cbaf95..3fc8a66e4f41 100644
1180 --- a/drivers/net/ethernet/freescale/fec_main.c
1181 +++ b/drivers/net/ethernet/freescale/fec_main.c
1182 @@ -2529,15 +2529,15 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
1183 return -EINVAL;
1184 }
1185
1186 - cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
1187 + cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs);
1188 if (cycle > 0xFFFF) {
1189 dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
1190 return -EINVAL;
1191 }
1192
1193 - cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
1194 + cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs);
1195 if (cycle > 0xFFFF) {
1196 - dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
1197 + dev_err(dev, "Tx coalesced usec exceed hardware limitation\n");
1198 return -EINVAL;
1199 }
1200
1201 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1202 index c01cf8ef69df..d4652dea4569 100644
1203 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1204 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
1205 @@ -2417,10 +2417,12 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
1206
1207 int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
1208 {
1209 + struct hclge_mac *mac = &hdev->hw.mac;
1210 int ret;
1211
1212 duplex = hclge_check_speed_dup(duplex, speed);
1213 - if (hdev->hw.mac.speed == speed && hdev->hw.mac.duplex == duplex)
1214 + if (!mac->support_autoneg && mac->speed == speed &&
1215 + mac->duplex == duplex)
1216 return 0;
1217
1218 ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
1219 diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
1220 index 2fef7402233e..82391abbd42b 100644
1221 --- a/drivers/net/ethernet/sfc/efx.c
1222 +++ b/drivers/net/ethernet/sfc/efx.c
1223 @@ -519,6 +519,7 @@ efx_copy_channel(const struct efx_channel *old_channel)
1224 if (tx_queue->channel)
1225 tx_queue->channel = channel;
1226 tx_queue->buffer = NULL;
1227 + tx_queue->cb_page = NULL;
1228 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
1229 }
1230
1231 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
1232 index 3d69da112625..43a785f86c69 100644
1233 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
1234 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
1235 @@ -24,6 +24,7 @@
1236 static void dwmac1000_core_init(struct mac_device_info *hw,
1237 struct net_device *dev)
1238 {
1239 + struct stmmac_priv *priv = netdev_priv(dev);
1240 void __iomem *ioaddr = hw->pcsr;
1241 u32 value = readl(ioaddr + GMAC_CONTROL);
1242 int mtu = dev->mtu;
1243 @@ -35,7 +36,7 @@ static void dwmac1000_core_init(struct mac_device_info *hw,
1244 * Broadcom tags can look like invalid LLC/SNAP packets and cause the
1245 * hardware to truncate packets on reception.
1246 */
1247 - if (netdev_uses_dsa(dev))
1248 + if (netdev_uses_dsa(dev) || !priv->plat->enh_desc)
1249 value &= ~GMAC_CONTROL_ACS;
1250
1251 if (mtu > 1500)
1252 diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
1253 index 30cd0c4f0be0..8801d093135c 100644
1254 --- a/drivers/net/ipvlan/ipvlan_core.c
1255 +++ b/drivers/net/ipvlan/ipvlan_core.c
1256 @@ -293,6 +293,7 @@ void ipvlan_process_multicast(struct work_struct *work)
1257 }
1258 if (dev)
1259 dev_put(dev);
1260 + cond_resched();
1261 }
1262 }
1263
1264 @@ -498,19 +499,21 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
1265 struct ethhdr *ethh = eth_hdr(skb);
1266 int ret = NET_XMIT_DROP;
1267
1268 - /* In this mode we dont care about multicast and broadcast traffic */
1269 - if (is_multicast_ether_addr(ethh->h_dest)) {
1270 - pr_debug_ratelimited("Dropped {multi|broad}cast of type=[%x]\n",
1271 - ntohs(skb->protocol));
1272 - kfree_skb(skb);
1273 - goto out;
1274 - }
1275 -
1276 /* The ipvlan is a pseudo-L2 device, so the packets that we receive
1277 * will have L2; which need to discarded and processed further
1278 * in the net-ns of the main-device.
1279 */
1280 if (skb_mac_header_was_set(skb)) {
1281 + /* In this mode we dont care about
1282 + * multicast and broadcast traffic */
1283 + if (is_multicast_ether_addr(ethh->h_dest)) {
1284 + pr_debug_ratelimited(
1285 + "Dropped {multi|broad}cast of type=[%x]\n",
1286 + ntohs(skb->protocol));
1287 + kfree_skb(skb);
1288 + goto out;
1289 + }
1290 +
1291 skb_pull(skb, sizeof(*ethh));
1292 skb->mac_header = (typeof(skb->mac_header))~0U;
1293 skb_reset_network_header(skb);
1294 diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
1295 index ba3dfac1d904..b805abc9ec3b 100644
1296 --- a/drivers/net/ipvlan/ipvlan_main.c
1297 +++ b/drivers/net/ipvlan/ipvlan_main.c
1298 @@ -164,7 +164,6 @@ static void ipvlan_uninit(struct net_device *dev)
1299 static int ipvlan_open(struct net_device *dev)
1300 {
1301 struct ipvl_dev *ipvlan = netdev_priv(dev);
1302 - struct net_device *phy_dev = ipvlan->phy_dev;
1303 struct ipvl_addr *addr;
1304
1305 if (ipvlan->port->mode == IPVLAN_MODE_L3 ||
1306 @@ -178,7 +177,7 @@ static int ipvlan_open(struct net_device *dev)
1307 ipvlan_ht_addr_add(ipvlan, addr);
1308 rcu_read_unlock();
1309
1310 - return dev_uc_add(phy_dev, phy_dev->dev_addr);
1311 + return 0;
1312 }
1313
1314 static int ipvlan_stop(struct net_device *dev)
1315 @@ -190,8 +189,6 @@ static int ipvlan_stop(struct net_device *dev)
1316 dev_uc_unsync(phy_dev, dev);
1317 dev_mc_unsync(phy_dev, dev);
1318
1319 - dev_uc_del(phy_dev, phy_dev->dev_addr);
1320 -
1321 rcu_read_lock();
1322 list_for_each_entry_rcu(addr, &ipvlan->addrs, anode)
1323 ipvlan_ht_addr_del(addr);
1324 diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
1325 index afd8b2a08245..6497a5c45220 100644
1326 --- a/drivers/net/macsec.c
1327 +++ b/drivers/net/macsec.c
1328 @@ -2882,6 +2882,11 @@ static void macsec_dev_set_rx_mode(struct net_device *dev)
1329 dev_uc_sync(real_dev, dev);
1330 }
1331
1332 +static sci_t dev_to_sci(struct net_device *dev, __be16 port)
1333 +{
1334 + return make_sci(dev->dev_addr, port);
1335 +}
1336 +
1337 static int macsec_set_mac_address(struct net_device *dev, void *p)
1338 {
1339 struct macsec_dev *macsec = macsec_priv(dev);
1340 @@ -2903,6 +2908,7 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
1341
1342 out:
1343 ether_addr_copy(dev->dev_addr, addr->sa_data);
1344 + macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
1345 return 0;
1346 }
1347
1348 @@ -2977,6 +2983,7 @@ static const struct device_type macsec_type = {
1349
1350 static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
1351 [IFLA_MACSEC_SCI] = { .type = NLA_U64 },
1352 + [IFLA_MACSEC_PORT] = { .type = NLA_U16 },
1353 [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
1354 [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
1355 [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
1356 @@ -3176,11 +3183,6 @@ static bool sci_exists(struct net_device *dev, sci_t sci)
1357 return false;
1358 }
1359
1360 -static sci_t dev_to_sci(struct net_device *dev, __be16 port)
1361 -{
1362 - return make_sci(dev->dev_addr, port);
1363 -}
1364 -
1365 static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
1366 {
1367 struct macsec_dev *macsec = macsec_priv(dev);
1368 diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
1369 index c5bf61565726..26f6be4796c7 100644
1370 --- a/drivers/net/macvlan.c
1371 +++ b/drivers/net/macvlan.c
1372 @@ -334,6 +334,8 @@ static void macvlan_process_broadcast(struct work_struct *w)
1373 if (src)
1374 dev_put(src->dev);
1375 consume_skb(skb);
1376 +
1377 + cond_resched();
1378 }
1379 }
1380
1381 diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
1382 index 23f1958ba6ad..459fb2069c7e 100644
1383 --- a/drivers/net/phy/bcm63xx.c
1384 +++ b/drivers/net/phy/bcm63xx.c
1385 @@ -73,6 +73,7 @@ static struct phy_driver bcm63xx_driver[] = {
1386 /* same phy as above, with just a different OUI */
1387 .phy_id = 0x002bdc00,
1388 .phy_id_mask = 0xfffffc00,
1389 + .name = "Broadcom BCM63XX (2)",
1390 /* PHY_BASIC_FEATURES */
1391 .flags = PHY_IS_INTERNAL,
1392 .config_init = bcm63xx_config_init,
1393 diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
1394 index 105d389b58e7..ea890d802ffe 100644
1395 --- a/drivers/net/phy/phy.c
1396 +++ b/drivers/net/phy/phy.c
1397 @@ -761,7 +761,8 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat)
1398 phy_trigger_machine(phydev);
1399 }
1400
1401 - if (phy_clear_interrupt(phydev))
1402 + /* did_interrupt() may have cleared the interrupt already */
1403 + if (!phydev->drv->did_interrupt && phy_clear_interrupt(phydev))
1404 goto phy_err;
1405 return IRQ_HANDLED;
1406
1407 diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
1408 index 2bf0fda209a8..0907c3d8d94a 100644
1409 --- a/drivers/net/phy/phy_device.c
1410 +++ b/drivers/net/phy/phy_device.c
1411 @@ -246,7 +246,7 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
1412 * MDIO bus driver and clock gated at this point.
1413 */
1414 if (!netdev)
1415 - return !phydev->suspended;
1416 + goto out;
1417
1418 if (netdev->wol_enabled)
1419 return false;
1420 @@ -266,7 +266,8 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
1421 if (device_may_wakeup(&netdev->dev))
1422 return false;
1423
1424 - return true;
1425 +out:
1426 + return !phydev->suspended;
1427 }
1428
1429 static int mdio_bus_phy_suspend(struct device *dev)
1430 @@ -284,6 +285,8 @@ static int mdio_bus_phy_suspend(struct device *dev)
1431 if (!mdio_bus_phy_may_suspend(phydev))
1432 return 0;
1433
1434 + phydev->suspended_by_mdio_bus = 1;
1435 +
1436 return phy_suspend(phydev);
1437 }
1438
1439 @@ -292,9 +295,11 @@ static int mdio_bus_phy_resume(struct device *dev)
1440 struct phy_device *phydev = to_phy_device(dev);
1441 int ret;
1442
1443 - if (!mdio_bus_phy_may_suspend(phydev))
1444 + if (!phydev->suspended_by_mdio_bus)
1445 goto no_resume;
1446
1447 + phydev->suspended_by_mdio_bus = 0;
1448 +
1449 ret = phy_resume(phydev);
1450 if (ret < 0)
1451 return ret;
1452 diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
1453 index 58a69f830d29..f78ceba42e57 100644
1454 --- a/drivers/net/slip/slhc.c
1455 +++ b/drivers/net/slip/slhc.c
1456 @@ -232,7 +232,7 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
1457 struct cstate *cs = lcs->next;
1458 unsigned long deltaS, deltaA;
1459 short changes = 0;
1460 - int hlen;
1461 + int nlen, hlen;
1462 unsigned char new_seq[16];
1463 unsigned char *cp = new_seq;
1464 struct iphdr *ip;
1465 @@ -248,6 +248,8 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
1466 return isize;
1467
1468 ip = (struct iphdr *) icp;
1469 + if (ip->version != 4 || ip->ihl < 5)
1470 + return isize;
1471
1472 /* Bail if this packet isn't TCP, or is an IP fragment */
1473 if (ip->protocol != IPPROTO_TCP || (ntohs(ip->frag_off) & 0x3fff)) {
1474 @@ -258,10 +260,14 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
1475 comp->sls_o_tcp++;
1476 return isize;
1477 }
1478 - /* Extract TCP header */
1479 + nlen = ip->ihl * 4;
1480 + if (isize < nlen + sizeof(*th))
1481 + return isize;
1482
1483 - th = (struct tcphdr *)(((unsigned char *)ip) + ip->ihl*4);
1484 - hlen = ip->ihl*4 + th->doff*4;
1485 + th = (struct tcphdr *)(icp + nlen);
1486 + if (th->doff < sizeof(struct tcphdr) / 4)
1487 + return isize;
1488 + hlen = nlen + th->doff * 4;
1489
1490 /* Bail if the TCP packet isn't `compressible' (i.e., ACK isn't set or
1491 * some other control bit is set). Also uncompressible if
1492 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
1493 index ca70a1d840eb..4004f98e50d9 100644
1494 --- a/drivers/net/team/team.c
1495 +++ b/drivers/net/team/team.c
1496 @@ -2240,6 +2240,8 @@ team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
1497 [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
1498 [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
1499 [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY },
1500 + [TEAM_ATTR_OPTION_PORT_IFINDEX] = { .type = NLA_U32 },
1501 + [TEAM_ATTR_OPTION_ARRAY_INDEX] = { .type = NLA_U32 },
1502 };
1503
1504 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
1505 diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
1506 index 6912624eed4a..44ea5dcc43fd 100644
1507 --- a/drivers/net/usb/r8152.c
1508 +++ b/drivers/net/usb/r8152.c
1509 @@ -3006,6 +3006,8 @@ static u16 r8153_phy_status(struct r8152 *tp, u16 desired)
1510 }
1511
1512 msleep(20);
1513 + if (test_bit(RTL8152_UNPLUG, &tp->flags))
1514 + break;
1515 }
1516
1517 return data;
1518 @@ -4419,7 +4421,10 @@ static void r8153_init(struct r8152 *tp)
1519 if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
1520 AUTOLOAD_DONE)
1521 break;
1522 +
1523 msleep(20);
1524 + if (test_bit(RTL8152_UNPLUG, &tp->flags))
1525 + break;
1526 }
1527
1528 data = r8153_phy_status(tp, 0);
1529 @@ -4545,7 +4550,10 @@ static void r8153b_init(struct r8152 *tp)
1530 if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
1531 AUTOLOAD_DONE)
1532 break;
1533 +
1534 msleep(20);
1535 + if (test_bit(RTL8152_UNPLUG, &tp->flags))
1536 + break;
1537 }
1538
1539 data = r8153_phy_status(tp, 0);
1540 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
1541 index 945c1ea5cda8..ed367b0a185c 100644
1542 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
1543 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
1544 @@ -309,7 +309,8 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
1545 }
1546
1547 /* PHY_SKU section is mandatory in B0 */
1548 - if (!mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
1549 + if (mvm->trans->cfg->nvm_type == IWL_NVM_EXT &&
1550 + !mvm->nvm_sections[NVM_SECTION_TYPE_PHY_SKU].data) {
1551 IWL_ERR(mvm,
1552 "Can't parse phy_sku in B0, empty sections\n");
1553 return NULL;
1554 diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
1555 index 8f69d00bd940..6249a46c1976 100644
1556 --- a/drivers/net/wireless/mediatek/mt76/dma.c
1557 +++ b/drivers/net/wireless/mediatek/mt76/dma.c
1558 @@ -448,10 +448,13 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
1559 struct page *page = virt_to_head_page(data);
1560 int offset = data - page_address(page);
1561 struct sk_buff *skb = q->rx_head;
1562 + struct skb_shared_info *shinfo = skb_shinfo(skb);
1563
1564 - offset += q->buf_offset;
1565 - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset, len,
1566 - q->buf_size);
1567 + if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) {
1568 + offset += q->buf_offset;
1569 + skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len,
1570 + q->buf_size);
1571 + }
1572
1573 if (more)
1574 return;
1575 diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
1576 index 2bbd8ee93507..6381745e3bb1 100644
1577 --- a/drivers/pinctrl/core.c
1578 +++ b/drivers/pinctrl/core.c
1579 @@ -2025,7 +2025,6 @@ static int pinctrl_claim_hogs(struct pinctrl_dev *pctldev)
1580 return PTR_ERR(pctldev->p);
1581 }
1582
1583 - kref_get(&pctldev->p->users);
1584 pctldev->hog_default =
1585 pinctrl_lookup_state(pctldev->p, PINCTRL_STATE_DEFAULT);
1586 if (IS_ERR(pctldev->hog_default)) {
1587 diff --git a/drivers/pinctrl/freescale/pinctrl-scu.c b/drivers/pinctrl/freescale/pinctrl-scu.c
1588 index 73bf1d9f9cc6..23cf04bdfc55 100644
1589 --- a/drivers/pinctrl/freescale/pinctrl-scu.c
1590 +++ b/drivers/pinctrl/freescale/pinctrl-scu.c
1591 @@ -23,12 +23,12 @@ struct imx_sc_msg_req_pad_set {
1592 struct imx_sc_rpc_msg hdr;
1593 u32 val;
1594 u16 pad;
1595 -} __packed;
1596 +} __packed __aligned(4);
1597
1598 struct imx_sc_msg_req_pad_get {
1599 struct imx_sc_rpc_msg hdr;
1600 u16 pad;
1601 -} __packed;
1602 +} __packed __aligned(4);
1603
1604 struct imx_sc_msg_resp_pad_get {
1605 struct imx_sc_rpc_msg hdr;
1606 diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
1607 index 72c5373c8dc1..e8d1f3050487 100644
1608 --- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
1609 +++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
1610 @@ -147,8 +147,8 @@ static const unsigned int sdio_d0_pins[] = { GPIOX_0 };
1611 static const unsigned int sdio_d1_pins[] = { GPIOX_1 };
1612 static const unsigned int sdio_d2_pins[] = { GPIOX_2 };
1613 static const unsigned int sdio_d3_pins[] = { GPIOX_3 };
1614 -static const unsigned int sdio_cmd_pins[] = { GPIOX_4 };
1615 -static const unsigned int sdio_clk_pins[] = { GPIOX_5 };
1616 +static const unsigned int sdio_clk_pins[] = { GPIOX_4 };
1617 +static const unsigned int sdio_cmd_pins[] = { GPIOX_5 };
1618 static const unsigned int sdio_irq_pins[] = { GPIOX_7 };
1619
1620 static const unsigned int nand_ce0_pins[] = { BOOT_8 };
1621 diff --git a/drivers/pinctrl/pinctrl-falcon.c b/drivers/pinctrl/pinctrl-falcon.c
1622 index a454f57c264e..62c02b969327 100644
1623 --- a/drivers/pinctrl/pinctrl-falcon.c
1624 +++ b/drivers/pinctrl/pinctrl-falcon.c
1625 @@ -451,7 +451,7 @@ static int pinctrl_falcon_probe(struct platform_device *pdev)
1626 falcon_info.clk[*bank] = clk_get(&ppdev->dev, NULL);
1627 if (IS_ERR(falcon_info.clk[*bank])) {
1628 dev_err(&ppdev->dev, "failed to get clock\n");
1629 - of_node_put(np)
1630 + of_node_put(np);
1631 return PTR_ERR(falcon_info.clk[*bank]);
1632 }
1633 falcon_info.membase[*bank] = devm_ioremap_resource(&pdev->dev,
1634 diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
1635 index 6cca72782af6..cf87eb27879f 100644
1636 --- a/drivers/s390/block/dasd.c
1637 +++ b/drivers/s390/block/dasd.c
1638 @@ -178,6 +178,8 @@ struct dasd_block *dasd_alloc_block(void)
1639 (unsigned long) block);
1640 INIT_LIST_HEAD(&block->ccw_queue);
1641 spin_lock_init(&block->queue_lock);
1642 + INIT_LIST_HEAD(&block->format_list);
1643 + spin_lock_init(&block->format_lock);
1644 timer_setup(&block->timer, dasd_block_timeout, 0);
1645 spin_lock_init(&block->profile.lock);
1646
1647 @@ -1779,20 +1781,26 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1648
1649 if (dasd_ese_needs_format(cqr->block, irb)) {
1650 if (rq_data_dir((struct request *)cqr->callback_data) == READ) {
1651 - device->discipline->ese_read(cqr);
1652 + device->discipline->ese_read(cqr, irb);
1653 cqr->status = DASD_CQR_SUCCESS;
1654 cqr->stopclk = now;
1655 dasd_device_clear_timer(device);
1656 dasd_schedule_device_bh(device);
1657 return;
1658 }
1659 - fcqr = device->discipline->ese_format(device, cqr);
1660 + fcqr = device->discipline->ese_format(device, cqr, irb);
1661 if (IS_ERR(fcqr)) {
1662 + if (PTR_ERR(fcqr) == -EINVAL) {
1663 + cqr->status = DASD_CQR_ERROR;
1664 + return;
1665 + }
1666 /*
1667 * If we can't format now, let the request go
1668 * one extra round. Maybe we can format later.
1669 */
1670 cqr->status = DASD_CQR_QUEUED;
1671 + dasd_schedule_device_bh(device);
1672 + return;
1673 } else {
1674 fcqr->status = DASD_CQR_QUEUED;
1675 cqr->status = DASD_CQR_QUEUED;
1676 @@ -2748,11 +2756,13 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1677 {
1678 struct request *req;
1679 blk_status_t error = BLK_STS_OK;
1680 + unsigned int proc_bytes;
1681 int status;
1682
1683 req = (struct request *) cqr->callback_data;
1684 dasd_profile_end(cqr->block, cqr, req);
1685
1686 + proc_bytes = cqr->proc_bytes;
1687 status = cqr->block->base->discipline->free_cp(cqr, req);
1688 if (status < 0)
1689 error = errno_to_blk_status(status);
1690 @@ -2783,7 +2793,18 @@ static void __dasd_cleanup_cqr(struct dasd_ccw_req *cqr)
1691 blk_mq_end_request(req, error);
1692 blk_mq_run_hw_queues(req->q, true);
1693 } else {
1694 - blk_mq_complete_request(req);
1695 + /*
1696 + * Partial completed requests can happen with ESE devices.
1697 + * During read we might have gotten a NRF error and have to
1698 + * complete a request partially.
1699 + */
1700 + if (proc_bytes) {
1701 + blk_update_request(req, BLK_STS_OK,
1702 + blk_rq_bytes(req) - proc_bytes);
1703 + blk_mq_requeue_request(req, true);
1704 + } else {
1705 + blk_mq_complete_request(req);
1706 + }
1707 }
1708 }
1709
1710 diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
1711 index a28b9ff82378..ad44d22e8859 100644
1712 --- a/drivers/s390/block/dasd_eckd.c
1713 +++ b/drivers/s390/block/dasd_eckd.c
1714 @@ -207,6 +207,45 @@ static void set_ch_t(struct ch_t *geo, __u32 cyl, __u8 head)
1715 geo->head |= head;
1716 }
1717
1718 +/*
1719 + * calculate failing track from sense data depending if
1720 + * it is an EAV device or not
1721 + */
1722 +static int dasd_eckd_track_from_irb(struct irb *irb, struct dasd_device *device,
1723 + sector_t *track)
1724 +{
1725 + struct dasd_eckd_private *private = device->private;
1726 + u8 *sense = NULL;
1727 + u32 cyl;
1728 + u8 head;
1729 +
1730 + sense = dasd_get_sense(irb);
1731 + if (!sense) {
1732 + DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1733 + "ESE error no sense data\n");
1734 + return -EINVAL;
1735 + }
1736 + if (!(sense[27] & DASD_SENSE_BIT_2)) {
1737 + DBF_DEV_EVENT(DBF_WARNING, device, "%s",
1738 + "ESE error no valid track data\n");
1739 + return -EINVAL;
1740 + }
1741 +
1742 + if (sense[27] & DASD_SENSE_BIT_3) {
1743 + /* enhanced addressing */
1744 + cyl = sense[30] << 20;
1745 + cyl |= (sense[31] & 0xF0) << 12;
1746 + cyl |= sense[28] << 8;
1747 + cyl |= sense[29];
1748 + } else {
1749 + cyl = sense[29] << 8;
1750 + cyl |= sense[30];
1751 + }
1752 + head = sense[31] & 0x0F;
1753 + *track = cyl * private->rdc_data.trk_per_cyl + head;
1754 + return 0;
1755 +}
1756 +
1757 static int set_timestamp(struct ccw1 *ccw, struct DE_eckd_data *data,
1758 struct dasd_device *device)
1759 {
1760 @@ -2986,6 +3025,37 @@ static int dasd_eckd_format_device(struct dasd_device *base,
1761 0, NULL);
1762 }
1763
1764 +static bool test_and_set_format_track(struct dasd_format_entry *to_format,
1765 + struct dasd_block *block)
1766 +{
1767 + struct dasd_format_entry *format;
1768 + unsigned long flags;
1769 + bool rc = false;
1770 +
1771 + spin_lock_irqsave(&block->format_lock, flags);
1772 + list_for_each_entry(format, &block->format_list, list) {
1773 + if (format->track == to_format->track) {
1774 + rc = true;
1775 + goto out;
1776 + }
1777 + }
1778 + list_add_tail(&to_format->list, &block->format_list);
1779 +
1780 +out:
1781 + spin_unlock_irqrestore(&block->format_lock, flags);
1782 + return rc;
1783 +}
1784 +
1785 +static void clear_format_track(struct dasd_format_entry *format,
1786 + struct dasd_block *block)
1787 +{
1788 + unsigned long flags;
1789 +
1790 + spin_lock_irqsave(&block->format_lock, flags);
1791 + list_del_init(&format->list);
1792 + spin_unlock_irqrestore(&block->format_lock, flags);
1793 +}
1794 +
1795 /*
1796 * Callback function to free ESE format requests.
1797 */
1798 @@ -2993,15 +3063,19 @@ static void dasd_eckd_ese_format_cb(struct dasd_ccw_req *cqr, void *data)
1799 {
1800 struct dasd_device *device = cqr->startdev;
1801 struct dasd_eckd_private *private = device->private;
1802 + struct dasd_format_entry *format = data;
1803
1804 + clear_format_track(format, cqr->basedev->block);
1805 private->count--;
1806 dasd_ffree_request(cqr, device);
1807 }
1808
1809 static struct dasd_ccw_req *
1810 -dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
1811 +dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr,
1812 + struct irb *irb)
1813 {
1814 struct dasd_eckd_private *private;
1815 + struct dasd_format_entry *format;
1816 struct format_data_t fdata;
1817 unsigned int recs_per_trk;
1818 struct dasd_ccw_req *fcqr;
1819 @@ -3011,23 +3085,39 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
1820 struct request *req;
1821 sector_t first_trk;
1822 sector_t last_trk;
1823 + sector_t curr_trk;
1824 int rc;
1825
1826 req = cqr->callback_data;
1827 - base = cqr->block->base;
1828 + block = cqr->block;
1829 + base = block->base;
1830 private = base->private;
1831 - block = base->block;
1832 blksize = block->bp_block;
1833 recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
1834 + format = &startdev->format_entry;
1835
1836 first_trk = blk_rq_pos(req) >> block->s2b_shift;
1837 sector_div(first_trk, recs_per_trk);
1838 last_trk =
1839 (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
1840 sector_div(last_trk, recs_per_trk);
1841 + rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
1842 + if (rc)
1843 + return ERR_PTR(rc);
1844
1845 - fdata.start_unit = first_trk;
1846 - fdata.stop_unit = last_trk;
1847 + if (curr_trk < first_trk || curr_trk > last_trk) {
1848 + DBF_DEV_EVENT(DBF_WARNING, startdev,
1849 + "ESE error track %llu not within range %llu - %llu\n",
1850 + curr_trk, first_trk, last_trk);
1851 + return ERR_PTR(-EINVAL);
1852 + }
1853 + format->track = curr_trk;
1854 + /* test if track is already in formatting by another thread */
1855 + if (test_and_set_format_track(format, block))
1856 + return ERR_PTR(-EEXIST);
1857 +
1858 + fdata.start_unit = curr_trk;
1859 + fdata.stop_unit = curr_trk;
1860 fdata.blksize = blksize;
1861 fdata.intensity = private->uses_cdl ? DASD_FMT_INT_COMPAT : 0;
1862
1863 @@ -3044,6 +3134,7 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
1864 return fcqr;
1865
1866 fcqr->callback = dasd_eckd_ese_format_cb;
1867 + fcqr->callback_data = (void *) format;
1868
1869 return fcqr;
1870 }
1871 @@ -3051,29 +3142,87 @@ dasd_eckd_ese_format(struct dasd_device *startdev, struct dasd_ccw_req *cqr)
1872 /*
1873 * When data is read from an unformatted area of an ESE volume, this function
1874 * returns zeroed data and thereby mimics a read of zero data.
1875 + *
1876 + * The first unformatted track is the one that got the NRF error, the address is
1877 + * encoded in the sense data.
1878 + *
1879 + * All tracks before have returned valid data and should not be touched.
1880 + * All tracks after the unformatted track might be formatted or not. This is
1881 + * currently not known, remember the processed data and return the remainder of
1882 + * the request to the blocklayer in __dasd_cleanup_cqr().
1883 */
1884 -static void dasd_eckd_ese_read(struct dasd_ccw_req *cqr)
1885 +static int dasd_eckd_ese_read(struct dasd_ccw_req *cqr, struct irb *irb)
1886 {
1887 + struct dasd_eckd_private *private;
1888 + sector_t first_trk, last_trk;
1889 + sector_t first_blk, last_blk;
1890 unsigned int blksize, off;
1891 + unsigned int recs_per_trk;
1892 struct dasd_device *base;
1893 struct req_iterator iter;
1894 + struct dasd_block *block;
1895 + unsigned int skip_block;
1896 + unsigned int blk_count;
1897 struct request *req;
1898 struct bio_vec bv;
1899 + sector_t curr_trk;
1900 + sector_t end_blk;
1901 char *dst;
1902 + int rc;
1903
1904 req = (struct request *) cqr->callback_data;
1905 base = cqr->block->base;
1906 blksize = base->block->bp_block;
1907 + block = cqr->block;
1908 + private = base->private;
1909 + skip_block = 0;
1910 + blk_count = 0;
1911 +
1912 + recs_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
1913 + first_trk = first_blk = blk_rq_pos(req) >> block->s2b_shift;
1914 + sector_div(first_trk, recs_per_trk);
1915 + last_trk = last_blk =
1916 + (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift;
1917 + sector_div(last_trk, recs_per_trk);
1918 + rc = dasd_eckd_track_from_irb(irb, base, &curr_trk);
1919 + if (rc)
1920 + return rc;
1921 +
1922 + /* sanity check if the current track from sense data is valid */
1923 + if (curr_trk < first_trk || curr_trk > last_trk) {
1924 + DBF_DEV_EVENT(DBF_WARNING, base,
1925 + "ESE error track %llu not within range %llu - %llu\n",
1926 + curr_trk, first_trk, last_trk);
1927 + return -EINVAL;
1928 + }
1929 +
1930 + /*
1931 + * if not the first track got the NRF error we have to skip over valid
1932 + * blocks
1933 + */
1934 + if (curr_trk != first_trk)
1935 + skip_block = curr_trk * recs_per_trk - first_blk;
1936 +
1937 + /* we have no information beyond the current track */
1938 + end_blk = (curr_trk + 1) * recs_per_trk;
1939
1940 rq_for_each_segment(bv, req, iter) {
1941 dst = page_address(bv.bv_page) + bv.bv_offset;
1942 for (off = 0; off < bv.bv_len; off += blksize) {
1943 - if (dst && rq_data_dir(req) == READ) {
1944 + if (first_blk + blk_count >= end_blk) {
1945 + cqr->proc_bytes = blk_count * blksize;
1946 + return 0;
1947 + }
1948 + if (dst && !skip_block) {
1949 dst += off;
1950 memset(dst, 0, blksize);
1951 + } else {
1952 + skip_block--;
1953 }
1954 + blk_count++;
1955 }
1956 }
1957 + return 0;
1958 }
1959
1960 /*
1961 diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
1962 index 91c9f9586e0f..fa552f9f1666 100644
1963 --- a/drivers/s390/block/dasd_int.h
1964 +++ b/drivers/s390/block/dasd_int.h
1965 @@ -187,6 +187,7 @@ struct dasd_ccw_req {
1966
1967 void (*callback)(struct dasd_ccw_req *, void *data);
1968 void *callback_data;
1969 + unsigned int proc_bytes; /* bytes for partial completion */
1970 };
1971
1972 /*
1973 @@ -387,8 +388,9 @@ struct dasd_discipline {
1974 int (*ext_pool_warn_thrshld)(struct dasd_device *);
1975 int (*ext_pool_oos)(struct dasd_device *);
1976 int (*ext_pool_exhaust)(struct dasd_device *, struct dasd_ccw_req *);
1977 - struct dasd_ccw_req *(*ese_format)(struct dasd_device *, struct dasd_ccw_req *);
1978 - void (*ese_read)(struct dasd_ccw_req *);
1979 + struct dasd_ccw_req *(*ese_format)(struct dasd_device *,
1980 + struct dasd_ccw_req *, struct irb *);
1981 + int (*ese_read)(struct dasd_ccw_req *, struct irb *);
1982 };
1983
1984 extern struct dasd_discipline *dasd_diag_discipline_pointer;
1985 @@ -474,6 +476,11 @@ struct dasd_profile {
1986 spinlock_t lock;
1987 };
1988
1989 +struct dasd_format_entry {
1990 + struct list_head list;
1991 + sector_t track;
1992 +};
1993 +
1994 struct dasd_device {
1995 /* Block device stuff. */
1996 struct dasd_block *block;
1997 @@ -539,6 +546,7 @@ struct dasd_device {
1998 struct dentry *debugfs_dentry;
1999 struct dentry *hosts_dentry;
2000 struct dasd_profile profile;
2001 + struct dasd_format_entry format_entry;
2002 };
2003
2004 struct dasd_block {
2005 @@ -564,6 +572,9 @@ struct dasd_block {
2006
2007 struct dentry *debugfs_dentry;
2008 struct dasd_profile profile;
2009 +
2010 + struct list_head format_list;
2011 + spinlock_t format_lock;
2012 };
2013
2014 struct dasd_attention_data {
2015 diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
2016 index d2c4eb9efd70..7aaf150f89ba 100644
2017 --- a/drivers/virtio/virtio_balloon.c
2018 +++ b/drivers/virtio/virtio_balloon.c
2019 @@ -958,8 +958,8 @@ out_iput:
2020 iput(vb->vb_dev_info.inode);
2021 out_kern_unmount:
2022 kern_unmount(balloon_mnt);
2023 -#endif
2024 out_del_vqs:
2025 +#endif
2026 vdev->config->del_vqs(vdev);
2027 out_free_vb:
2028 kfree(vb);
2029 diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
2030 index 867c7ebd3f10..58b96baa8d48 100644
2031 --- a/drivers/virtio/virtio_ring.c
2032 +++ b/drivers/virtio/virtio_ring.c
2033 @@ -2203,10 +2203,10 @@ void vring_del_virtqueue(struct virtqueue *_vq)
2034 vq->split.queue_size_in_bytes,
2035 vq->split.vring.desc,
2036 vq->split.queue_dma_addr);
2037 -
2038 - kfree(vq->split.desc_state);
2039 }
2040 }
2041 + if (!vq->packed_ring)
2042 + kfree(vq->split.desc_state);
2043 list_del(&_vq->list);
2044 kfree(vq);
2045 }
2046 diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
2047 index 7ce689d31aa2..5a35850ccb1a 100644
2048 --- a/fs/cifs/dir.c
2049 +++ b/fs/cifs/dir.c
2050 @@ -560,7 +560,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
2051 if (server->ops->close)
2052 server->ops->close(xid, tcon, &fid);
2053 cifs_del_pending_open(&open);
2054 - fput(file);
2055 rc = -ENOMEM;
2056 }
2057
2058 diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c
2059 index d71c2d6dd162..75898340eb46 100644
2060 --- a/fs/crypto/keysetup.c
2061 +++ b/fs/crypto/keysetup.c
2062 @@ -578,6 +578,15 @@ int fscrypt_drop_inode(struct inode *inode)
2063 return 0;
2064 mk = ci->ci_master_key->payload.data[0];
2065
2066 + /*
2067 + * With proper, non-racy use of FS_IOC_REMOVE_ENCRYPTION_KEY, all inodes
2068 + * protected by the key were cleaned by sync_filesystem(). But if
2069 + * userspace is still using the files, inodes can be dirtied between
2070 + * then and now. We mustn't lose any writes, so skip dirty inodes here.
2071 + */
2072 + if (inode->i_state & I_DIRTY_ALL)
2073 + return 0;
2074 +
2075 /*
2076 * Note: since we aren't holding ->mk_secret_sem, the result here can
2077 * immediately become outdated. But there's no correctness problem with
2078 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
2079 index ed1abc9e33cf..4f2e4f38feb8 100644
2080 --- a/fs/fuse/dev.c
2081 +++ b/fs/fuse/dev.c
2082 @@ -276,12 +276,10 @@ static void flush_bg_queue(struct fuse_conn *fc)
2083 void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req)
2084 {
2085 struct fuse_iqueue *fiq = &fc->iq;
2086 - bool async;
2087
2088 if (test_and_set_bit(FR_FINISHED, &req->flags))
2089 goto put_request;
2090
2091 - async = req->args->end;
2092 /*
2093 * test_and_set_bit() implies smp_mb() between bit
2094 * changing and below intr_entry check. Pairs with
2095 @@ -324,7 +322,7 @@ void fuse_request_end(struct fuse_conn *fc, struct fuse_req *req)
2096 wake_up(&req->waitq);
2097 }
2098
2099 - if (async)
2100 + if (test_bit(FR_ASYNC, &req->flags))
2101 req->args->end(fc, req->args, req->out.h.error);
2102 put_request:
2103 fuse_put_request(fc, req);
2104 @@ -471,6 +469,8 @@ static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args)
2105 req->in.h.opcode = args->opcode;
2106 req->in.h.nodeid = args->nodeid;
2107 req->args = args;
2108 + if (args->end)
2109 + __set_bit(FR_ASYNC, &req->flags);
2110 }
2111
2112 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
2113 diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
2114 index aa75e2305b75..ca344bf71404 100644
2115 --- a/fs/fuse/fuse_i.h
2116 +++ b/fs/fuse/fuse_i.h
2117 @@ -301,6 +301,7 @@ struct fuse_io_priv {
2118 * FR_SENT: request is in userspace, waiting for an answer
2119 * FR_FINISHED: request is finished
2120 * FR_PRIVATE: request is on private list
2121 + * FR_ASYNC: request is asynchronous
2122 */
2123 enum fuse_req_flag {
2124 FR_ISREPLY,
2125 @@ -314,6 +315,7 @@ enum fuse_req_flag {
2126 FR_SENT,
2127 FR_FINISHED,
2128 FR_PRIVATE,
2129 + FR_ASYNC,
2130 };
2131
2132 /**
2133 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
2134 index e1e18fb587eb..8466166f22e3 100644
2135 --- a/fs/gfs2/inode.c
2136 +++ b/fs/gfs2/inode.c
2137 @@ -1248,7 +1248,7 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
2138 if (!(file->f_mode & FMODE_OPENED))
2139 return finish_no_open(file, d);
2140 dput(d);
2141 - return 0;
2142 + return excl && (flags & O_CREAT) ? -EEXIST : 0;
2143 }
2144
2145 BUG_ON(d != NULL);
2146 diff --git a/fs/open.c b/fs/open.c
2147 index b62f5c0923a8..dcbd01611237 100644
2148 --- a/fs/open.c
2149 +++ b/fs/open.c
2150 @@ -860,9 +860,6 @@ cleanup_file:
2151 * the return value of d_splice_alias(), then the caller needs to perform dput()
2152 * on it after finish_open().
2153 *
2154 - * On successful return @file is a fully instantiated open file. After this, if
2155 - * an error occurs in ->atomic_open(), it needs to clean up with fput().
2156 - *
2157 * Returns zero on success or -errno if the open failed.
2158 */
2159 int finish_open(struct file *file, struct dentry *dentry,
2160 diff --git a/include/dt-bindings/clock/imx8mn-clock.h b/include/dt-bindings/clock/imx8mn-clock.h
2161 index d7b201652f4c..0c7c750fc2c4 100644
2162 --- a/include/dt-bindings/clock/imx8mn-clock.h
2163 +++ b/include/dt-bindings/clock/imx8mn-clock.h
2164 @@ -122,8 +122,8 @@
2165 #define IMX8MN_CLK_I2C1 105
2166 #define IMX8MN_CLK_I2C2 106
2167 #define IMX8MN_CLK_I2C3 107
2168 -#define IMX8MN_CLK_I2C4 118
2169 -#define IMX8MN_CLK_UART1 119
2170 +#define IMX8MN_CLK_I2C4 108
2171 +#define IMX8MN_CLK_UART1 109
2172 #define IMX8MN_CLK_UART2 110
2173 #define IMX8MN_CLK_UART3 111
2174 #define IMX8MN_CLK_UART4 112
2175 diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
2176 index 3ba3e6da13a6..57577075d204 100644
2177 --- a/include/linux/cgroup.h
2178 +++ b/include/linux/cgroup.h
2179 @@ -62,6 +62,7 @@ struct css_task_iter {
2180 struct list_head *mg_tasks_head;
2181 struct list_head *dying_tasks_head;
2182
2183 + struct list_head *cur_tasks_head;
2184 struct css_set *cur_cset;
2185 struct css_set *cur_dcset;
2186 struct task_struct *cur_task;
2187 diff --git a/include/linux/dmar.h b/include/linux/dmar.h
2188 index a7cf3599d9a1..d3ea390336f3 100644
2189 --- a/include/linux/dmar.h
2190 +++ b/include/linux/dmar.h
2191 @@ -69,8 +69,9 @@ struct dmar_pci_notify_info {
2192 extern struct rw_semaphore dmar_global_lock;
2193 extern struct list_head dmar_drhd_units;
2194
2195 -#define for_each_drhd_unit(drhd) \
2196 - list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)
2197 +#define for_each_drhd_unit(drhd) \
2198 + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
2199 + dmar_rcu_check())
2200
2201 #define for_each_active_drhd_unit(drhd) \
2202 list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
2203 @@ -81,7 +82,8 @@ extern struct list_head dmar_drhd_units;
2204 if (i=drhd->iommu, drhd->ignored) {} else
2205
2206 #define for_each_iommu(i, drhd) \
2207 - list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
2208 + list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
2209 + dmar_rcu_check()) \
2210 if (i=drhd->iommu, 0) {} else
2211
2212 static inline bool dmar_rcu_check(void)
2213 diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
2214 index 39faaaf843e1..c91cf2dee12a 100644
2215 --- a/include/linux/inet_diag.h
2216 +++ b/include/linux/inet_diag.h
2217 @@ -2,15 +2,10 @@
2218 #ifndef _INET_DIAG_H_
2219 #define _INET_DIAG_H_ 1
2220
2221 +#include <net/netlink.h>
2222 #include <uapi/linux/inet_diag.h>
2223
2224 -struct net;
2225 -struct sock;
2226 struct inet_hashinfo;
2227 -struct nlattr;
2228 -struct nlmsghdr;
2229 -struct sk_buff;
2230 -struct netlink_callback;
2231
2232 struct inet_diag_handler {
2233 void (*dump)(struct sk_buff *skb,
2234 @@ -62,6 +57,17 @@ int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
2235
2236 void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
2237
2238 +static inline size_t inet_diag_msg_attrs_size(void)
2239 +{
2240 + return nla_total_size(1) /* INET_DIAG_SHUTDOWN */
2241 + + nla_total_size(1) /* INET_DIAG_TOS */
2242 +#if IS_ENABLED(CONFIG_IPV6)
2243 + + nla_total_size(1) /* INET_DIAG_TCLASS */
2244 + + nla_total_size(1) /* INET_DIAG_SKV6ONLY */
2245 +#endif
2246 + + nla_total_size(4) /* INET_DIAG_MARK */
2247 + + nla_total_size(4); /* INET_DIAG_CLASS_ID */
2248 +}
2249 int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
2250 struct inet_diag_msg *r, int ext,
2251 struct user_namespace *user_ns, bool net_admin);
2252 diff --git a/include/linux/phy.h b/include/linux/phy.h
2253 index 3d5d53313e6c..80750783b5b0 100644
2254 --- a/include/linux/phy.h
2255 +++ b/include/linux/phy.h
2256 @@ -336,6 +336,7 @@ struct phy_c45_device_ids {
2257 * is_gigabit_capable: Set to true if PHY supports 1000Mbps
2258 * has_fixups: Set to true if this phy has fixups/quirks.
2259 * suspended: Set to true if this phy has been suspended successfully.
2260 + * suspended_by_mdio_bus: Set to true if this phy was suspended by MDIO bus.
2261 * sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
2262 * loopback_enabled: Set true if this phy has been loopbacked successfully.
2263 * state: state of the PHY for management purposes
2264 @@ -372,6 +373,7 @@ struct phy_device {
2265 unsigned is_gigabit_capable:1;
2266 unsigned has_fixups:1;
2267 unsigned suspended:1;
2268 + unsigned suspended_by_mdio_bus:1;
2269 unsigned sysfs_links:1;
2270 unsigned loopback_enabled:1;
2271
2272 @@ -524,6 +526,7 @@ struct phy_driver {
2273 /*
2274 * Checks if the PHY generated an interrupt.
2275 * For multi-PHY devices with shared PHY interrupt pin
2276 + * Set interrupt bits have to be cleared.
2277 */
2278 int (*did_interrupt)(struct phy_device *phydev);
2279
2280 diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
2281 index f2688404d1cd..569f446502be 100644
2282 --- a/include/linux/platform_device.h
2283 +++ b/include/linux/platform_device.h
2284 @@ -24,7 +24,7 @@ struct platform_device {
2285 int id;
2286 bool id_auto;
2287 struct device dev;
2288 - u64 dma_mask;
2289 + u64 platform_dma_mask;
2290 u32 num_resources;
2291 struct resource *resource;
2292
2293 diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
2294 index 20dcadd8eed9..7fed3193f81d 100644
2295 --- a/include/net/fib_rules.h
2296 +++ b/include/net/fib_rules.h
2297 @@ -108,6 +108,7 @@ struct fib_rule_notifier_info {
2298 [FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \
2299 [FRA_PRIORITY] = { .type = NLA_U32 }, \
2300 [FRA_FWMARK] = { .type = NLA_U32 }, \
2301 + [FRA_TUN_ID] = { .type = NLA_U64 }, \
2302 [FRA_FWMASK] = { .type = NLA_U32 }, \
2303 [FRA_TABLE] = { .type = NLA_U32 }, \
2304 [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \
2305 diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
2306 index 595c52d59f31..7c9e97553a00 100644
2307 --- a/kernel/cgroup/cgroup.c
2308 +++ b/kernel/cgroup/cgroup.c
2309 @@ -4461,12 +4461,16 @@ static void css_task_iter_advance_css_set(struct css_task_iter *it)
2310 }
2311 } while (!css_set_populated(cset) && list_empty(&cset->dying_tasks));
2312
2313 - if (!list_empty(&cset->tasks))
2314 + if (!list_empty(&cset->tasks)) {
2315 it->task_pos = cset->tasks.next;
2316 - else if (!list_empty(&cset->mg_tasks))
2317 + it->cur_tasks_head = &cset->tasks;
2318 + } else if (!list_empty(&cset->mg_tasks)) {
2319 it->task_pos = cset->mg_tasks.next;
2320 - else
2321 + it->cur_tasks_head = &cset->mg_tasks;
2322 + } else {
2323 it->task_pos = cset->dying_tasks.next;
2324 + it->cur_tasks_head = &cset->dying_tasks;
2325 + }
2326
2327 it->tasks_head = &cset->tasks;
2328 it->mg_tasks_head = &cset->mg_tasks;
2329 @@ -4524,10 +4528,14 @@ repeat:
2330 else
2331 it->task_pos = it->task_pos->next;
2332
2333 - if (it->task_pos == it->tasks_head)
2334 + if (it->task_pos == it->tasks_head) {
2335 it->task_pos = it->mg_tasks_head->next;
2336 - if (it->task_pos == it->mg_tasks_head)
2337 + it->cur_tasks_head = it->mg_tasks_head;
2338 + }
2339 + if (it->task_pos == it->mg_tasks_head) {
2340 it->task_pos = it->dying_tasks_head->next;
2341 + it->cur_tasks_head = it->dying_tasks_head;
2342 + }
2343 if (it->task_pos == it->dying_tasks_head)
2344 css_task_iter_advance_css_set(it);
2345 } else {
2346 @@ -4546,11 +4554,12 @@ repeat:
2347 goto repeat;
2348
2349 /* and dying leaders w/o live member threads */
2350 - if (!atomic_read(&task->signal->live))
2351 + if (it->cur_tasks_head == it->dying_tasks_head &&
2352 + !atomic_read(&task->signal->live))
2353 goto repeat;
2354 } else {
2355 /* skip all dying ones */
2356 - if (task->flags & PF_EXITING)
2357 + if (it->cur_tasks_head == it->dying_tasks_head)
2358 goto repeat;
2359 }
2360 }
2361 @@ -4659,6 +4668,9 @@ static void *cgroup_procs_next(struct seq_file *s, void *v, loff_t *pos)
2362 struct kernfs_open_file *of = s->private;
2363 struct css_task_iter *it = of->priv;
2364
2365 + if (pos)
2366 + (*pos)++;
2367 +
2368 return css_task_iter_next(it);
2369 }
2370
2371 @@ -4674,7 +4686,7 @@ static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
2372 * from position 0, so we can simply keep iterating on !0 *pos.
2373 */
2374 if (!it) {
2375 - if (WARN_ON_ONCE((*pos)++))
2376 + if (WARN_ON_ONCE((*pos)))
2377 return ERR_PTR(-EINVAL);
2378
2379 it = kzalloc(sizeof(*it), GFP_KERNEL);
2380 @@ -4682,10 +4694,11 @@ static void *__cgroup_procs_start(struct seq_file *s, loff_t *pos,
2381 return ERR_PTR(-ENOMEM);
2382 of->priv = it;
2383 css_task_iter_start(&cgrp->self, iter_flags, it);
2384 - } else if (!(*pos)++) {
2385 + } else if (!(*pos)) {
2386 css_task_iter_end(it);
2387 css_task_iter_start(&cgrp->self, iter_flags, it);
2388 - }
2389 + } else
2390 + return it->cur_task;
2391
2392 return cgroup_procs_next(s, NULL, NULL);
2393 }
2394 @@ -6381,6 +6394,10 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
2395 return;
2396 }
2397
2398 + /* Don't associate the sock with unrelated interrupted task's cgroup. */
2399 + if (in_interrupt())
2400 + return;
2401 +
2402 rcu_read_lock();
2403
2404 while (true) {
2405 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
2406 index e9c63b79e03f..1a0c224af6fb 100644
2407 --- a/kernel/workqueue.c
2408 +++ b/kernel/workqueue.c
2409 @@ -1417,14 +1417,16 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
2410 return;
2411 rcu_read_lock();
2412 retry:
2413 - if (req_cpu == WORK_CPU_UNBOUND)
2414 - cpu = wq_select_unbound_cpu(raw_smp_processor_id());
2415 -
2416 /* pwq which will be used unless @work is executing elsewhere */
2417 - if (!(wq->flags & WQ_UNBOUND))
2418 - pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
2419 - else
2420 + if (wq->flags & WQ_UNBOUND) {
2421 + if (req_cpu == WORK_CPU_UNBOUND)
2422 + cpu = wq_select_unbound_cpu(raw_smp_processor_id());
2423 pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
2424 + } else {
2425 + if (req_cpu == WORK_CPU_UNBOUND)
2426 + cpu = raw_smp_processor_id();
2427 + pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
2428 + }
2429
2430 /*
2431 * If @work was previously on a different pool, it might still be
2432 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
2433 index ae9044bc9f80..23c99c6778d3 100644
2434 --- a/mm/memcontrol.c
2435 +++ b/mm/memcontrol.c
2436 @@ -6792,19 +6792,9 @@ void mem_cgroup_sk_alloc(struct sock *sk)
2437 if (!mem_cgroup_sockets_enabled)
2438 return;
2439
2440 - /*
2441 - * Socket cloning can throw us here with sk_memcg already
2442 - * filled. It won't however, necessarily happen from
2443 - * process context. So the test for root memcg given
2444 - * the current task's memcg won't help us in this case.
2445 - *
2446 - * Respecting the original socket's memcg is a better
2447 - * decision in this case.
2448 - */
2449 - if (sk->sk_memcg) {
2450 - css_get(&sk->sk_memcg->css);
2451 + /* Do not associate the sock with unrelated interrupted task's memcg. */
2452 + if (in_interrupt())
2453 return;
2454 - }
2455
2456 rcu_read_lock();
2457 memcg = mem_cgroup_from_task(current);
2458 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
2459 index 5b0b20e6da95..d88a4de02237 100644
2460 --- a/net/batman-adv/bat_iv_ogm.c
2461 +++ b/net/batman-adv/bat_iv_ogm.c
2462 @@ -789,6 +789,10 @@ static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface)
2463
2464 lockdep_assert_held(&hard_iface->bat_iv.ogm_buff_mutex);
2465
2466 + /* interface already disabled by batadv_iv_ogm_iface_disable */
2467 + if (!*ogm_buff)
2468 + return;
2469 +
2470 /* the interface gets activated here to avoid race conditions between
2471 * the moment of activating the interface in
2472 * hardif_activate_interface() where the originator mac is set and
2473 diff --git a/net/core/devlink.c b/net/core/devlink.c
2474 index 61bc67047f56..4c25f1aa2d37 100644
2475 --- a/net/core/devlink.c
2476 +++ b/net/core/devlink.c
2477 @@ -3222,34 +3222,41 @@ devlink_param_value_get_from_info(const struct devlink_param *param,
2478 struct genl_info *info,
2479 union devlink_param_value *value)
2480 {
2481 + struct nlattr *param_data;
2482 int len;
2483
2484 - if (param->type != DEVLINK_PARAM_TYPE_BOOL &&
2485 - !info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA])
2486 + param_data = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA];
2487 +
2488 + if (param->type != DEVLINK_PARAM_TYPE_BOOL && !param_data)
2489 return -EINVAL;
2490
2491 switch (param->type) {
2492 case DEVLINK_PARAM_TYPE_U8:
2493 - value->vu8 = nla_get_u8(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
2494 + if (nla_len(param_data) != sizeof(u8))
2495 + return -EINVAL;
2496 + value->vu8 = nla_get_u8(param_data);
2497 break;
2498 case DEVLINK_PARAM_TYPE_U16:
2499 - value->vu16 = nla_get_u16(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
2500 + if (nla_len(param_data) != sizeof(u16))
2501 + return -EINVAL;
2502 + value->vu16 = nla_get_u16(param_data);
2503 break;
2504 case DEVLINK_PARAM_TYPE_U32:
2505 - value->vu32 = nla_get_u32(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]);
2506 + if (nla_len(param_data) != sizeof(u32))
2507 + return -EINVAL;
2508 + value->vu32 = nla_get_u32(param_data);
2509 break;
2510 case DEVLINK_PARAM_TYPE_STRING:
2511 - len = strnlen(nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]),
2512 - nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]));
2513 - if (len == nla_len(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]) ||
2514 + len = strnlen(nla_data(param_data), nla_len(param_data));
2515 + if (len == nla_len(param_data) ||
2516 len >= __DEVLINK_PARAM_MAX_STRING_VALUE)
2517 return -EINVAL;
2518 - strcpy(value->vstr,
2519 - nla_data(info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA]));
2520 + strcpy(value->vstr, nla_data(param_data));
2521 break;
2522 case DEVLINK_PARAM_TYPE_BOOL:
2523 - value->vbool = info->attrs[DEVLINK_ATTR_PARAM_VALUE_DATA] ?
2524 - true : false;
2525 + if (param_data && nla_len(param_data))
2526 + return -EINVAL;
2527 + value->vbool = nla_get_flag(param_data);
2528 break;
2529 }
2530 return 0;
2531 @@ -5797,6 +5804,8 @@ static const struct nla_policy devlink_nl_policy[DEVLINK_ATTR_MAX + 1] = {
2532 [DEVLINK_ATTR_PARAM_VALUE_CMODE] = { .type = NLA_U8 },
2533 [DEVLINK_ATTR_REGION_NAME] = { .type = NLA_NUL_STRING },
2534 [DEVLINK_ATTR_REGION_SNAPSHOT_ID] = { .type = NLA_U32 },
2535 + [DEVLINK_ATTR_REGION_CHUNK_ADDR] = { .type = NLA_U64 },
2536 + [DEVLINK_ATTR_REGION_CHUNK_LEN] = { .type = NLA_U64 },
2537 [DEVLINK_ATTR_HEALTH_REPORTER_NAME] = { .type = NLA_NUL_STRING },
2538 [DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD] = { .type = NLA_U64 },
2539 [DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER] = { .type = NLA_U8 },
2540 diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
2541 index 0642f91c4038..b4c87fe31be2 100644
2542 --- a/net/core/netclassid_cgroup.c
2543 +++ b/net/core/netclassid_cgroup.c
2544 @@ -53,30 +53,60 @@ static void cgrp_css_free(struct cgroup_subsys_state *css)
2545 kfree(css_cls_state(css));
2546 }
2547
2548 +/*
2549 + * To avoid freezing of sockets creation for tasks with big number of threads
2550 + * and opened sockets lets release file_lock every 1000 iterated descriptors.
2551 + * New sockets will already have been created with new classid.
2552 + */
2553 +
2554 +struct update_classid_context {
2555 + u32 classid;
2556 + unsigned int batch;
2557 +};
2558 +
2559 +#define UPDATE_CLASSID_BATCH 1000
2560 +
2561 static int update_classid_sock(const void *v, struct file *file, unsigned n)
2562 {
2563 int err;
2564 + struct update_classid_context *ctx = (void *)v;
2565 struct socket *sock = sock_from_file(file, &err);
2566
2567 if (sock) {
2568 spin_lock(&cgroup_sk_update_lock);
2569 - sock_cgroup_set_classid(&sock->sk->sk_cgrp_data,
2570 - (unsigned long)v);
2571 + sock_cgroup_set_classid(&sock->sk->sk_cgrp_data, ctx->classid);
2572 spin_unlock(&cgroup_sk_update_lock);
2573 }
2574 + if (--ctx->batch == 0) {
2575 + ctx->batch = UPDATE_CLASSID_BATCH;
2576 + return n + 1;
2577 + }
2578 return 0;
2579 }
2580
2581 +static void update_classid_task(struct task_struct *p, u32 classid)
2582 +{
2583 + struct update_classid_context ctx = {
2584 + .classid = classid,
2585 + .batch = UPDATE_CLASSID_BATCH
2586 + };
2587 + unsigned int fd = 0;
2588 +
2589 + do {
2590 + task_lock(p);
2591 + fd = iterate_fd(p->files, fd, update_classid_sock, &ctx);
2592 + task_unlock(p);
2593 + cond_resched();
2594 + } while (fd);
2595 +}
2596 +
2597 static void cgrp_attach(struct cgroup_taskset *tset)
2598 {
2599 struct cgroup_subsys_state *css;
2600 struct task_struct *p;
2601
2602 cgroup_taskset_for_each(p, css, tset) {
2603 - task_lock(p);
2604 - iterate_fd(p->files, 0, update_classid_sock,
2605 - (void *)(unsigned long)css_cls_state(css)->classid);
2606 - task_unlock(p);
2607 + update_classid_task(p, css_cls_state(css)->classid);
2608 }
2609 }
2610
2611 @@ -98,10 +128,7 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
2612
2613 css_task_iter_start(css, 0, &it);
2614 while ((p = css_task_iter_next(&it))) {
2615 - task_lock(p);
2616 - iterate_fd(p->files, 0, update_classid_sock,
2617 - (void *)(unsigned long)cs->classid);
2618 - task_unlock(p);
2619 + update_classid_task(p, cs->classid);
2620 cond_resched();
2621 }
2622 css_task_iter_end(&it);
2623 diff --git a/net/core/sock.c b/net/core/sock.c
2624 index b4d1112174c1..0adf7a9e5a90 100644
2625 --- a/net/core/sock.c
2626 +++ b/net/core/sock.c
2627 @@ -1832,7 +1832,10 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
2628 atomic_set(&newsk->sk_zckey, 0);
2629
2630 sock_reset_flag(newsk, SOCK_DONE);
2631 - mem_cgroup_sk_alloc(newsk);
2632 +
2633 + /* sk->sk_memcg will be populated at accept() time */
2634 + newsk->sk_memcg = NULL;
2635 +
2636 cgroup_sk_alloc(&newsk->sk_cgrp_data);
2637
2638 rcu_read_lock();
2639 diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
2640 index 12f8c7ee4dd8..bf9947c577b6 100644
2641 --- a/net/dsa/dsa_priv.h
2642 +++ b/net/dsa/dsa_priv.h
2643 @@ -128,7 +128,9 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
2644 /* port.c */
2645 int dsa_port_set_state(struct dsa_port *dp, u8 state,
2646 struct switchdev_trans *trans);
2647 +int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy);
2648 int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy);
2649 +void dsa_port_disable_rt(struct dsa_port *dp);
2650 void dsa_port_disable(struct dsa_port *dp);
2651 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br);
2652 void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
2653 diff --git a/net/dsa/port.c b/net/dsa/port.c
2654 index 9b54e5a76297..fa023af69bc4 100644
2655 --- a/net/dsa/port.c
2656 +++ b/net/dsa/port.c
2657 @@ -63,7 +63,7 @@ static void dsa_port_set_state_now(struct dsa_port *dp, u8 state)
2658 pr_err("DSA: failed to set STP state %u (%d)\n", state, err);
2659 }
2660
2661 -int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
2662 +int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy)
2663 {
2664 struct dsa_switch *ds = dp->ds;
2665 int port = dp->index;
2666 @@ -78,14 +78,31 @@ int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
2667 if (!dp->bridge_dev)
2668 dsa_port_set_state_now(dp, BR_STATE_FORWARDING);
2669
2670 + if (dp->pl)
2671 + phylink_start(dp->pl);
2672 +
2673 return 0;
2674 }
2675
2676 -void dsa_port_disable(struct dsa_port *dp)
2677 +int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy)
2678 +{
2679 + int err;
2680 +
2681 + rtnl_lock();
2682 + err = dsa_port_enable_rt(dp, phy);
2683 + rtnl_unlock();
2684 +
2685 + return err;
2686 +}
2687 +
2688 +void dsa_port_disable_rt(struct dsa_port *dp)
2689 {
2690 struct dsa_switch *ds = dp->ds;
2691 int port = dp->index;
2692
2693 + if (dp->pl)
2694 + phylink_stop(dp->pl);
2695 +
2696 if (!dp->bridge_dev)
2697 dsa_port_set_state_now(dp, BR_STATE_DISABLED);
2698
2699 @@ -93,6 +110,13 @@ void dsa_port_disable(struct dsa_port *dp)
2700 ds->ops->port_disable(ds, port);
2701 }
2702
2703 +void dsa_port_disable(struct dsa_port *dp)
2704 +{
2705 + rtnl_lock();
2706 + dsa_port_disable_rt(dp);
2707 + rtnl_unlock();
2708 +}
2709 +
2710 int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br)
2711 {
2712 struct dsa_notifier_bridge_info info = {
2713 @@ -615,10 +639,6 @@ static int dsa_port_phylink_register(struct dsa_port *dp)
2714 goto err_phy_connect;
2715 }
2716
2717 - rtnl_lock();
2718 - phylink_start(dp->pl);
2719 - rtnl_unlock();
2720 -
2721 return 0;
2722
2723 err_phy_connect:
2724 @@ -629,9 +649,14 @@ err_phy_connect:
2725 int dsa_port_link_register_of(struct dsa_port *dp)
2726 {
2727 struct dsa_switch *ds = dp->ds;
2728 + struct device_node *phy_np;
2729
2730 - if (!ds->ops->adjust_link)
2731 - return dsa_port_phylink_register(dp);
2732 + if (!ds->ops->adjust_link) {
2733 + phy_np = of_parse_phandle(dp->dn, "phy-handle", 0);
2734 + if (of_phy_is_fixed_link(dp->dn) || phy_np)
2735 + return dsa_port_phylink_register(dp);
2736 + return 0;
2737 + }
2738
2739 dev_warn(ds->dev,
2740 "Using legacy PHYLIB callbacks. Please migrate to PHYLINK!\n");
2741 @@ -646,11 +671,12 @@ void dsa_port_link_unregister_of(struct dsa_port *dp)
2742 {
2743 struct dsa_switch *ds = dp->ds;
2744
2745 - if (!ds->ops->adjust_link) {
2746 + if (!ds->ops->adjust_link && dp->pl) {
2747 rtnl_lock();
2748 phylink_disconnect_phy(dp->pl);
2749 rtnl_unlock();
2750 phylink_destroy(dp->pl);
2751 + dp->pl = NULL;
2752 return;
2753 }
2754
2755 diff --git a/net/dsa/slave.c b/net/dsa/slave.c
2756 index 028e65f4b5ba..23c2210fa7ec 100644
2757 --- a/net/dsa/slave.c
2758 +++ b/net/dsa/slave.c
2759 @@ -90,12 +90,10 @@ static int dsa_slave_open(struct net_device *dev)
2760 goto clear_allmulti;
2761 }
2762
2763 - err = dsa_port_enable(dp, dev->phydev);
2764 + err = dsa_port_enable_rt(dp, dev->phydev);
2765 if (err)
2766 goto clear_promisc;
2767
2768 - phylink_start(dp->pl);
2769 -
2770 return 0;
2771
2772 clear_promisc:
2773 @@ -119,9 +117,7 @@ static int dsa_slave_close(struct net_device *dev)
2774 cancel_work_sync(&dp->xmit_work);
2775 skb_queue_purge(&dp->xmit_queue);
2776
2777 - phylink_stop(dp->pl);
2778 -
2779 - dsa_port_disable(dp);
2780 + dsa_port_disable_rt(dp);
2781
2782 dev_mc_unsync(master, dev);
2783 dev_uc_unsync(master, dev);
2784 diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c
2785 index 2c7a38d76a3a..0672b2f01586 100644
2786 --- a/net/ieee802154/nl_policy.c
2787 +++ b/net/ieee802154/nl_policy.c
2788 @@ -21,7 +21,13 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
2789 [IEEE802154_ATTR_HW_ADDR] = { .type = NLA_HW_ADDR, },
2790 [IEEE802154_ATTR_PAN_ID] = { .type = NLA_U16, },
2791 [IEEE802154_ATTR_CHANNEL] = { .type = NLA_U8, },
2792 + [IEEE802154_ATTR_BCN_ORD] = { .type = NLA_U8, },
2793 + [IEEE802154_ATTR_SF_ORD] = { .type = NLA_U8, },
2794 + [IEEE802154_ATTR_PAN_COORD] = { .type = NLA_U8, },
2795 + [IEEE802154_ATTR_BAT_EXT] = { .type = NLA_U8, },
2796 + [IEEE802154_ATTR_COORD_REALIGN] = { .type = NLA_U8, },
2797 [IEEE802154_ATTR_PAGE] = { .type = NLA_U8, },
2798 + [IEEE802154_ATTR_DEV_TYPE] = { .type = NLA_U8, },
2799 [IEEE802154_ATTR_COORD_SHORT_ADDR] = { .type = NLA_U16, },
2800 [IEEE802154_ATTR_COORD_HW_ADDR] = { .type = NLA_HW_ADDR, },
2801 [IEEE802154_ATTR_COORD_PAN_ID] = { .type = NLA_U16, },
2802 diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
2803 index 5fd6e8ed02b5..66fdbfe5447c 100644
2804 --- a/net/ipv4/gre_demux.c
2805 +++ b/net/ipv4/gre_demux.c
2806 @@ -56,7 +56,9 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
2807 }
2808 EXPORT_SYMBOL_GPL(gre_del_protocol);
2809
2810 -/* Fills in tpi and returns header length to be pulled. */
2811 +/* Fills in tpi and returns header length to be pulled.
2812 + * Note that caller must use pskb_may_pull() before pulling GRE header.
2813 + */
2814 int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
2815 bool *csum_err, __be16 proto, int nhs)
2816 {
2817 @@ -110,8 +112,14 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
2818 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
2819 */
2820 if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
2821 + u8 _val, *val;
2822 +
2823 + val = skb_header_pointer(skb, nhs + hdr_len,
2824 + sizeof(_val), &_val);
2825 + if (!val)
2826 + return -EINVAL;
2827 tpi->proto = proto;
2828 - if ((*(u8 *)options & 0xF0) != 0x40)
2829 + if ((*val & 0xF0) != 0x40)
2830 hdr_len += 4;
2831 }
2832 tpi->hdr_len = hdr_len;
2833 diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
2834 index ac05e273bc66..b0010c710802 100644
2835 --- a/net/ipv4/inet_connection_sock.c
2836 +++ b/net/ipv4/inet_connection_sock.c
2837 @@ -482,8 +482,28 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
2838 }
2839 spin_unlock_bh(&queue->fastopenq.lock);
2840 }
2841 +
2842 out:
2843 release_sock(sk);
2844 + if (newsk && mem_cgroup_sockets_enabled) {
2845 + int amt;
2846 +
2847 + /* atomically get the memory usage, set and charge the
2848 + * newsk->sk_memcg.
2849 + */
2850 + lock_sock(newsk);
2851 +
2852 + /* The socket has not been accepted yet, no need to look at
2853 + * newsk->sk_wmem_queued.
2854 + */
2855 + amt = sk_mem_pages(newsk->sk_forward_alloc +
2856 + atomic_read(&newsk->sk_rmem_alloc));
2857 + mem_cgroup_sk_alloc(newsk);
2858 + if (newsk->sk_memcg && amt)
2859 + mem_cgroup_charge_skmem(newsk->sk_memcg, amt);
2860 +
2861 + release_sock(newsk);
2862 + }
2863 if (req)
2864 reqsk_put(req);
2865 return newsk;
2866 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
2867 index 6a4c82f96e78..5b68bdaa8bff 100644
2868 --- a/net/ipv4/inet_diag.c
2869 +++ b/net/ipv4/inet_diag.c
2870 @@ -100,13 +100,9 @@ static size_t inet_sk_attr_size(struct sock *sk,
2871 aux = handler->idiag_get_aux_size(sk, net_admin);
2872
2873 return nla_total_size(sizeof(struct tcp_info))
2874 - + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
2875 - + nla_total_size(1) /* INET_DIAG_TOS */
2876 - + nla_total_size(1) /* INET_DIAG_TCLASS */
2877 - + nla_total_size(4) /* INET_DIAG_MARK */
2878 - + nla_total_size(4) /* INET_DIAG_CLASS_ID */
2879 - + nla_total_size(sizeof(struct inet_diag_meminfo))
2880 + nla_total_size(sizeof(struct inet_diag_msg))
2881 + + inet_diag_msg_attrs_size()
2882 + + nla_total_size(sizeof(struct inet_diag_meminfo))
2883 + nla_total_size(SK_MEMINFO_VARS * sizeof(u32))
2884 + nla_total_size(TCP_CA_NAME_MAX)
2885 + nla_total_size(sizeof(struct tcpvegas_info))
2886 @@ -147,6 +143,24 @@ int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
2887 if (net_admin && nla_put_u32(skb, INET_DIAG_MARK, sk->sk_mark))
2888 goto errout;
2889
2890 + if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
2891 + ext & (1 << (INET_DIAG_TCLASS - 1))) {
2892 + u32 classid = 0;
2893 +
2894 +#ifdef CONFIG_SOCK_CGROUP_DATA
2895 + classid = sock_cgroup_classid(&sk->sk_cgrp_data);
2896 +#endif
2897 + /* Fallback to socket priority if class id isn't set.
2898 + * Classful qdiscs use it as direct reference to class.
2899 + * For cgroup2 classid is always zero.
2900 + */
2901 + if (!classid)
2902 + classid = sk->sk_priority;
2903 +
2904 + if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
2905 + goto errout;
2906 + }
2907 +
2908 r->idiag_uid = from_kuid_munged(user_ns, sock_i_uid(sk));
2909 r->idiag_inode = sock_i_ino(sk);
2910
2911 @@ -284,24 +298,6 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
2912 goto errout;
2913 }
2914
2915 - if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) ||
2916 - ext & (1 << (INET_DIAG_TCLASS - 1))) {
2917 - u32 classid = 0;
2918 -
2919 -#ifdef CONFIG_SOCK_CGROUP_DATA
2920 - classid = sock_cgroup_classid(&sk->sk_cgrp_data);
2921 -#endif
2922 - /* Fallback to socket priority if class id isn't set.
2923 - * Classful qdiscs use it as direct reference to class.
2924 - * For cgroup2 classid is always zero.
2925 - */
2926 - if (!classid)
2927 - classid = sk->sk_priority;
2928 -
2929 - if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid))
2930 - goto errout;
2931 - }
2932 -
2933 out:
2934 nlmsg_end(skb, nlh);
2935 return 0;
2936 diff --git a/net/ipv4/raw_diag.c b/net/ipv4/raw_diag.c
2937 index e35736b99300..a93e7d1e1251 100644
2938 --- a/net/ipv4/raw_diag.c
2939 +++ b/net/ipv4/raw_diag.c
2940 @@ -100,8 +100,9 @@ static int raw_diag_dump_one(struct sk_buff *in_skb,
2941 if (IS_ERR(sk))
2942 return PTR_ERR(sk);
2943
2944 - rep = nlmsg_new(sizeof(struct inet_diag_msg) +
2945 - sizeof(struct inet_diag_meminfo) + 64,
2946 + rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) +
2947 + inet_diag_msg_attrs_size() +
2948 + nla_total_size(sizeof(struct inet_diag_meminfo)) + 64,
2949 GFP_KERNEL);
2950 if (!rep) {
2951 sock_put(sk);
2952 diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
2953 index 910555a4d9fe..dccd2286bc28 100644
2954 --- a/net/ipv4/udp_diag.c
2955 +++ b/net/ipv4/udp_diag.c
2956 @@ -64,8 +64,9 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
2957 goto out;
2958
2959 err = -ENOMEM;
2960 - rep = nlmsg_new(sizeof(struct inet_diag_msg) +
2961 - sizeof(struct inet_diag_meminfo) + 64,
2962 + rep = nlmsg_new(nla_total_size(sizeof(struct inet_diag_msg)) +
2963 + inet_diag_msg_attrs_size() +
2964 + nla_total_size(sizeof(struct inet_diag_meminfo)) + 64,
2965 GFP_KERNEL);
2966 if (!rep)
2967 goto out;
2968 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
2969 index b11ccb53c7e0..d02ccd749a60 100644
2970 --- a/net/ipv6/addrconf.c
2971 +++ b/net/ipv6/addrconf.c
2972 @@ -1226,11 +1226,13 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
2973 }
2974
2975 static void
2976 -cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, bool del_rt)
2977 +cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
2978 + bool del_rt, bool del_peer)
2979 {
2980 struct fib6_info *f6i;
2981
2982 - f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len,
2983 + f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr,
2984 + ifp->prefix_len,
2985 ifp->idev->dev, 0, RTF_DEFAULT, true);
2986 if (f6i) {
2987 if (del_rt)
2988 @@ -1293,7 +1295,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
2989
2990 if (action != CLEANUP_PREFIX_RT_NOP) {
2991 cleanup_prefix_route(ifp, expires,
2992 - action == CLEANUP_PREFIX_RT_DEL);
2993 + action == CLEANUP_PREFIX_RT_DEL, false);
2994 }
2995
2996 /* clean up prefsrc entries */
2997 @@ -3345,6 +3347,10 @@ static void addrconf_dev_config(struct net_device *dev)
2998 (dev->type != ARPHRD_NONE) &&
2999 (dev->type != ARPHRD_RAWIP)) {
3000 /* Alas, we support only Ethernet autoconfiguration. */
3001 + idev = __in6_dev_get(dev);
3002 + if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP &&
3003 + dev->flags & IFF_MULTICAST)
3004 + ipv6_mc_up(idev);
3005 return;
3006 }
3007
3008 @@ -4586,12 +4592,14 @@ inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
3009 }
3010
3011 static int modify_prefix_route(struct inet6_ifaddr *ifp,
3012 - unsigned long expires, u32 flags)
3013 + unsigned long expires, u32 flags,
3014 + bool modify_peer)
3015 {
3016 struct fib6_info *f6i;
3017 u32 prio;
3018
3019 - f6i = addrconf_get_prefix_route(&ifp->addr, ifp->prefix_len,
3020 + f6i = addrconf_get_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
3021 + ifp->prefix_len,
3022 ifp->idev->dev, 0, RTF_DEFAULT, true);
3023 if (!f6i)
3024 return -ENOENT;
3025 @@ -4602,7 +4610,8 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
3026 ip6_del_rt(dev_net(ifp->idev->dev), f6i);
3027
3028 /* add new one */
3029 - addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
3030 + addrconf_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
3031 + ifp->prefix_len,
3032 ifp->rt_priority, ifp->idev->dev,
3033 expires, flags, GFP_KERNEL);
3034 } else {
3035 @@ -4624,6 +4633,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
3036 unsigned long timeout;
3037 bool was_managetempaddr;
3038 bool had_prefixroute;
3039 + bool new_peer = false;
3040
3041 ASSERT_RTNL();
3042
3043 @@ -4655,6 +4665,13 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
3044 cfg->preferred_lft = timeout;
3045 }
3046
3047 + if (cfg->peer_pfx &&
3048 + memcmp(&ifp->peer_addr, cfg->peer_pfx, sizeof(struct in6_addr))) {
3049 + if (!ipv6_addr_any(&ifp->peer_addr))
3050 + cleanup_prefix_route(ifp, expires, true, true);
3051 + new_peer = true;
3052 + }
3053 +
3054 spin_lock_bh(&ifp->lock);
3055 was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
3056 had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
3057 @@ -4670,6 +4687,9 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
3058 if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority)
3059 ifp->rt_priority = cfg->rt_priority;
3060
3061 + if (new_peer)
3062 + ifp->peer_addr = *cfg->peer_pfx;
3063 +
3064 spin_unlock_bh(&ifp->lock);
3065 if (!(ifp->flags&IFA_F_TENTATIVE))
3066 ipv6_ifa_notify(0, ifp);
3067 @@ -4678,7 +4698,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
3068 int rc = -ENOENT;
3069
3070 if (had_prefixroute)
3071 - rc = modify_prefix_route(ifp, expires, flags);
3072 + rc = modify_prefix_route(ifp, expires, flags, false);
3073
3074 /* prefix route could have been deleted; if so restore it */
3075 if (rc == -ENOENT) {
3076 @@ -4686,6 +4706,15 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
3077 ifp->rt_priority, ifp->idev->dev,
3078 expires, flags, GFP_KERNEL);
3079 }
3080 +
3081 + if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr))
3082 + rc = modify_prefix_route(ifp, expires, flags, true);
3083 +
3084 + if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) {
3085 + addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len,
3086 + ifp->rt_priority, ifp->idev->dev,
3087 + expires, flags, GFP_KERNEL);
3088 + }
3089 } else if (had_prefixroute) {
3090 enum cleanup_prefix_rt_t action;
3091 unsigned long rt_expires;
3092 @@ -4696,7 +4725,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, struct ifa6_config *cfg)
3093
3094 if (action != CLEANUP_PREFIX_RT_NOP) {
3095 cleanup_prefix_route(ifp, rt_expires,
3096 - action == CLEANUP_PREFIX_RT_DEL);
3097 + action == CLEANUP_PREFIX_RT_DEL, false);
3098 }
3099 }
3100
3101 @@ -5984,9 +6013,9 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
3102 if (ifp->idev->cnf.forwarding)
3103 addrconf_join_anycast(ifp);
3104 if (!ipv6_addr_any(&ifp->peer_addr))
3105 - addrconf_prefix_route(&ifp->peer_addr, 128, 0,
3106 - ifp->idev->dev, 0, 0,
3107 - GFP_ATOMIC);
3108 + addrconf_prefix_route(&ifp->peer_addr, 128,
3109 + ifp->rt_priority, ifp->idev->dev,
3110 + 0, 0, GFP_ATOMIC);
3111 break;
3112 case RTM_DELADDR:
3113 if (ifp->idev->cnf.forwarding)
3114 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
3115 index 79fc012dd2ca..debdaeba5d8c 100644
3116 --- a/net/ipv6/ipv6_sockglue.c
3117 +++ b/net/ipv6/ipv6_sockglue.c
3118 @@ -183,9 +183,15 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
3119 retv = -EBUSY;
3120 break;
3121 }
3122 - } else if (sk->sk_protocol != IPPROTO_TCP)
3123 + } else if (sk->sk_protocol == IPPROTO_TCP) {
3124 + if (sk->sk_prot != &tcpv6_prot) {
3125 + retv = -EBUSY;
3126 + break;
3127 + }
3128 break;
3129 -
3130 + } else {
3131 + break;
3132 + }
3133 if (sk->sk_state != TCP_ESTABLISHED) {
3134 retv = -ENOTCONN;
3135 break;
3136 diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
3137 index 410809c669e1..4912069627b6 100644
3138 --- a/net/netfilter/nf_conntrack_standalone.c
3139 +++ b/net/netfilter/nf_conntrack_standalone.c
3140 @@ -411,7 +411,7 @@ static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3141 *pos = cpu + 1;
3142 return per_cpu_ptr(net->ct.stat, cpu);
3143 }
3144 -
3145 + (*pos)++;
3146 return NULL;
3147 }
3148
3149 diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
3150 index b0930d4aba22..b9cbe1e2453e 100644
3151 --- a/net/netfilter/nf_synproxy_core.c
3152 +++ b/net/netfilter/nf_synproxy_core.c
3153 @@ -267,7 +267,7 @@ static void *synproxy_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3154 *pos = cpu + 1;
3155 return per_cpu_ptr(snet->stats, cpu);
3156 }
3157 -
3158 + (*pos)++;
3159 return NULL;
3160 }
3161
3162 diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
3163 index 23544842b692..068daff41f6e 100644
3164 --- a/net/netfilter/nf_tables_api.c
3165 +++ b/net/netfilter/nf_tables_api.c
3166 @@ -1309,6 +1309,11 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net,
3167 lockdep_commit_lock_is_held(net));
3168 if (nft_dump_stats(skb, stats))
3169 goto nla_put_failure;
3170 +
3171 + if ((chain->flags & NFT_CHAIN_HW_OFFLOAD) &&
3172 + nla_put_be32(skb, NFTA_CHAIN_FLAGS,
3173 + htonl(NFT_CHAIN_HW_OFFLOAD)))
3174 + goto nla_put_failure;
3175 }
3176
3177 if (nla_put_be32(skb, NFTA_CHAIN_USE, htonl(chain->use)))
3178 @@ -6970,13 +6975,8 @@ static void nf_tables_module_autoload(struct net *net)
3179 list_splice_init(&net->nft.module_list, &module_list);
3180 mutex_unlock(&net->nft.commit_mutex);
3181 list_for_each_entry_safe(req, next, &module_list, list) {
3182 - if (req->done) {
3183 - list_del(&req->list);
3184 - kfree(req);
3185 - } else {
3186 - request_module("%s", req->module);
3187 - req->done = true;
3188 - }
3189 + request_module("%s", req->module);
3190 + req->done = true;
3191 }
3192 mutex_lock(&net->nft.commit_mutex);
3193 list_splice(&module_list, &net->nft.module_list);
3194 @@ -7759,6 +7759,7 @@ static void __net_exit nf_tables_exit_net(struct net *net)
3195 __nft_release_tables(net);
3196 mutex_unlock(&net->nft.commit_mutex);
3197 WARN_ON_ONCE(!list_empty(&net->nft.tables));
3198 + WARN_ON_ONCE(!list_empty(&net->nft.module_list));
3199 }
3200
3201 static struct pernet_operations nf_tables_net_ops = {
3202 diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
3203 index 7525063c25f5..60838d5fb8e0 100644
3204 --- a/net/netfilter/nfnetlink_cthelper.c
3205 +++ b/net/netfilter/nfnetlink_cthelper.c
3206 @@ -742,6 +742,8 @@ static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = {
3207 [NFCTH_NAME] = { .type = NLA_NUL_STRING,
3208 .len = NF_CT_HELPER_NAME_LEN-1 },
3209 [NFCTH_QUEUE_NUM] = { .type = NLA_U32, },
3210 + [NFCTH_PRIV_DATA_LEN] = { .type = NLA_U32, },
3211 + [NFCTH_STATUS] = { .type = NLA_U32, },
3212 };
3213
3214 static const struct nfnl_callback nfnl_cthelper_cb[NFNL_MSG_CTHELPER_MAX] = {
3215 diff --git a/net/netfilter/nft_chain_nat.c b/net/netfilter/nft_chain_nat.c
3216 index ff9ac8ae0031..eac4a901233f 100644
3217 --- a/net/netfilter/nft_chain_nat.c
3218 +++ b/net/netfilter/nft_chain_nat.c
3219 @@ -89,6 +89,7 @@ static const struct nft_chain_type nft_chain_nat_inet = {
3220 .name = "nat",
3221 .type = NFT_CHAIN_T_NAT,
3222 .family = NFPROTO_INET,
3223 + .owner = THIS_MODULE,
3224 .hook_mask = (1 << NF_INET_PRE_ROUTING) |
3225 (1 << NF_INET_LOCAL_IN) |
3226 (1 << NF_INET_LOCAL_OUT) |
3227 diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
3228 index 5cb2d8908d2a..0e3bfbc26e79 100644
3229 --- a/net/netfilter/nft_payload.c
3230 +++ b/net/netfilter/nft_payload.c
3231 @@ -121,6 +121,7 @@ static const struct nla_policy nft_payload_policy[NFTA_PAYLOAD_MAX + 1] = {
3232 [NFTA_PAYLOAD_LEN] = { .type = NLA_U32 },
3233 [NFTA_PAYLOAD_CSUM_TYPE] = { .type = NLA_U32 },
3234 [NFTA_PAYLOAD_CSUM_OFFSET] = { .type = NLA_U32 },
3235 + [NFTA_PAYLOAD_CSUM_FLAGS] = { .type = NLA_U32 },
3236 };
3237
3238 static int nft_payload_init(const struct nft_ctx *ctx,
3239 diff --git a/net/netfilter/nft_tunnel.c b/net/netfilter/nft_tunnel.c
3240 index 037e8fce9b30..1effd4878619 100644
3241 --- a/net/netfilter/nft_tunnel.c
3242 +++ b/net/netfilter/nft_tunnel.c
3243 @@ -339,6 +339,8 @@ static const struct nla_policy nft_tunnel_key_policy[NFTA_TUNNEL_KEY_MAX + 1] =
3244 [NFTA_TUNNEL_KEY_FLAGS] = { .type = NLA_U32, },
3245 [NFTA_TUNNEL_KEY_TOS] = { .type = NLA_U8, },
3246 [NFTA_TUNNEL_KEY_TTL] = { .type = NLA_U8, },
3247 + [NFTA_TUNNEL_KEY_SPORT] = { .type = NLA_U16, },
3248 + [NFTA_TUNNEL_KEY_DPORT] = { .type = NLA_U16, },
3249 [NFTA_TUNNEL_KEY_OPTS] = { .type = NLA_NESTED, },
3250 };
3251
3252 diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
3253 index ce70c2576bb2..44f971f31992 100644
3254 --- a/net/netfilter/x_tables.c
3255 +++ b/net/netfilter/x_tables.c
3256 @@ -1551,6 +1551,9 @@ static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
3257 uint8_t nfproto = (unsigned long)PDE_DATA(file_inode(seq->file));
3258 struct nf_mttg_trav *trav = seq->private;
3259
3260 + if (ppos != NULL)
3261 + ++(*ppos);
3262 +
3263 switch (trav->class) {
3264 case MTTG_TRAV_INIT:
3265 trav->class = MTTG_TRAV_NFP_UNSPEC;
3266 @@ -1576,9 +1579,6 @@ static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
3267 default:
3268 return NULL;
3269 }
3270 -
3271 - if (ppos != NULL)
3272 - ++*ppos;
3273 return trav;
3274 }
3275
3276 diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
3277 index 781e0b482189..6c2582a19766 100644
3278 --- a/net/netfilter/xt_recent.c
3279 +++ b/net/netfilter/xt_recent.c
3280 @@ -492,12 +492,12 @@ static void *recent_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3281 const struct recent_entry *e = v;
3282 const struct list_head *head = e->list.next;
3283
3284 + (*pos)++;
3285 while (head == &t->iphash[st->bucket]) {
3286 if (++st->bucket >= ip_list_hash_size)
3287 return NULL;
3288 head = t->iphash[st->bucket].next;
3289 }
3290 - (*pos)++;
3291 return list_entry(head, struct recent_entry, list);
3292 }
3293
3294 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
3295 index e64b8784d487..c2a5174387ff 100644
3296 --- a/net/netlink/af_netlink.c
3297 +++ b/net/netlink/af_netlink.c
3298 @@ -2434,7 +2434,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
3299 in_skb->len))
3300 WARN_ON(nla_put_u32(skb, NLMSGERR_ATTR_OFFS,
3301 (u8 *)extack->bad_attr -
3302 - in_skb->data));
3303 + (u8 *)nlh));
3304 } else {
3305 if (extack->cookie_len)
3306 WARN_ON(nla_put(skb, NLMSGERR_ATTR_COOKIE,
3307 diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
3308 index 6f1b096e601c..43811b5219b5 100644
3309 --- a/net/nfc/hci/core.c
3310 +++ b/net/nfc/hci/core.c
3311 @@ -181,13 +181,20 @@ exit:
3312 void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
3313 struct sk_buff *skb)
3314 {
3315 - u8 gate = hdev->pipes[pipe].gate;
3316 u8 status = NFC_HCI_ANY_OK;
3317 struct hci_create_pipe_resp *create_info;
3318 struct hci_delete_pipe_noti *delete_info;
3319 struct hci_all_pipe_cleared_noti *cleared_info;
3320 + u8 gate;
3321
3322 - pr_debug("from gate %x pipe %x cmd %x\n", gate, pipe, cmd);
3323 + pr_debug("from pipe %x cmd %x\n", pipe, cmd);
3324 +
3325 + if (pipe >= NFC_HCI_MAX_PIPES) {
3326 + status = NFC_HCI_ANY_E_NOK;
3327 + goto exit;
3328 + }
3329 +
3330 + gate = hdev->pipes[pipe].gate;
3331
3332 switch (cmd) {
3333 case NFC_HCI_ADM_NOTIFY_PIPE_CREATED:
3334 @@ -375,8 +382,14 @@ void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
3335 struct sk_buff *skb)
3336 {
3337 int r = 0;
3338 - u8 gate = hdev->pipes[pipe].gate;
3339 + u8 gate;
3340 +
3341 + if (pipe >= NFC_HCI_MAX_PIPES) {
3342 + pr_err("Discarded event %x to invalid pipe %x\n", event, pipe);
3343 + goto exit;
3344 + }
3345
3346 + gate = hdev->pipes[pipe].gate;
3347 if (gate == NFC_HCI_INVALID_GATE) {
3348 pr_err("Discarded event %x to unopened pipe %x\n", event, pipe);
3349 goto exit;
3350 diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
3351 index afde0d763039..1b261375722e 100644
3352 --- a/net/nfc/netlink.c
3353 +++ b/net/nfc/netlink.c
3354 @@ -32,6 +32,7 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
3355 [NFC_ATTR_DEVICE_NAME] = { .type = NLA_STRING,
3356 .len = NFC_DEVICE_NAME_MAXSIZE },
3357 [NFC_ATTR_PROTOCOLS] = { .type = NLA_U32 },
3358 + [NFC_ATTR_TARGET_INDEX] = { .type = NLA_U32 },
3359 [NFC_ATTR_COMM_MODE] = { .type = NLA_U8 },
3360 [NFC_ATTR_RF_MODE] = { .type = NLA_U8 },
3361 [NFC_ATTR_DEVICE_POWERED] = { .type = NLA_U8 },
3362 @@ -43,7 +44,10 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
3363 [NFC_ATTR_LLC_SDP] = { .type = NLA_NESTED },
3364 [NFC_ATTR_FIRMWARE_NAME] = { .type = NLA_STRING,
3365 .len = NFC_FIRMWARE_NAME_MAXSIZE },
3366 + [NFC_ATTR_SE_INDEX] = { .type = NLA_U32 },
3367 [NFC_ATTR_SE_APDU] = { .type = NLA_BINARY },
3368 + [NFC_ATTR_VENDOR_ID] = { .type = NLA_U32 },
3369 + [NFC_ATTR_VENDOR_SUBCMD] = { .type = NLA_U32 },
3370 [NFC_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
3371
3372 };
3373 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
3374 index 118cd66b7516..20edb7c25e22 100644
3375 --- a/net/packet/af_packet.c
3376 +++ b/net/packet/af_packet.c
3377 @@ -2273,6 +2273,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
3378 TP_STATUS_KERNEL, (macoff+snaplen));
3379 if (!h.raw)
3380 goto drop_n_account;
3381 +
3382 + if (do_vnet &&
3383 + virtio_net_hdr_from_skb(skb, h.raw + macoff -
3384 + sizeof(struct virtio_net_hdr),
3385 + vio_le(), true, 0))
3386 + goto drop_n_account;
3387 +
3388 if (po->tp_version <= TPACKET_V2) {
3389 packet_increment_rx_head(po, &po->rx_ring);
3390 /*
3391 @@ -2285,12 +2292,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
3392 status |= TP_STATUS_LOSING;
3393 }
3394
3395 - if (do_vnet &&
3396 - virtio_net_hdr_from_skb(skb, h.raw + macoff -
3397 - sizeof(struct virtio_net_hdr),
3398 - vio_le(), true, 0))
3399 - goto drop_n_account;
3400 -
3401 po->stats.stats1.tp_packets++;
3402 if (copy_skb) {
3403 status |= TP_STATUS_COPY;
3404 diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
3405 index 712ad248d6a7..f757ea90aba6 100644
3406 --- a/net/sched/sch_fq.c
3407 +++ b/net/sched/sch_fq.c
3408 @@ -745,6 +745,7 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
3409 [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
3410 [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
3411 [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
3412 + [TCA_FQ_ORPHAN_MASK] = { .type = NLA_U32 },
3413 [TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 },
3414 [TCA_FQ_CE_THRESHOLD] = { .type = NLA_U32 },
3415 };
3416 diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
3417 index 660fc45ee40f..b1eb12d33b9a 100644
3418 --- a/net/sched/sch_taprio.c
3419 +++ b/net/sched/sch_taprio.c
3420 @@ -564,8 +564,10 @@ static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
3421 prio = skb->priority;
3422 tc = netdev_get_prio_tc_map(dev, prio);
3423
3424 - if (!(gate_mask & BIT(tc)))
3425 + if (!(gate_mask & BIT(tc))) {
3426 + skb = NULL;
3427 continue;
3428 + }
3429
3430 len = qdisc_pkt_len(skb);
3431 guard = ktime_add_ns(taprio_get_time(q),
3432 @@ -575,13 +577,17 @@ static struct sk_buff *taprio_dequeue_soft(struct Qdisc *sch)
3433 * guard band ...
3434 */
3435 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
3436 - ktime_after(guard, entry->close_time))
3437 + ktime_after(guard, entry->close_time)) {
3438 + skb = NULL;
3439 continue;
3440 + }
3441
3442 /* ... and no budget. */
3443 if (gate_mask != TAPRIO_ALL_GATES_OPEN &&
3444 - atomic_sub_return(len, &entry->budget) < 0)
3445 + atomic_sub_return(len, &entry->budget) < 0) {
3446 + skb = NULL;
3447 continue;
3448 + }
3449
3450 skb = child->ops->dequeue(child);
3451 if (unlikely(!skb))
3452 @@ -768,6 +774,7 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = {
3453 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 },
3454 [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 },
3455 [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 },
3456 + [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 },
3457 };
3458
3459 static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry,
3460 diff --git a/net/sctp/diag.c b/net/sctp/diag.c
3461 index 0851166b9175..ba9f64fdfd23 100644
3462 --- a/net/sctp/diag.c
3463 +++ b/net/sctp/diag.c
3464 @@ -237,15 +237,11 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc)
3465 addrcnt++;
3466
3467 return nla_total_size(sizeof(struct sctp_info))
3468 - + nla_total_size(1) /* INET_DIAG_SHUTDOWN */
3469 - + nla_total_size(1) /* INET_DIAG_TOS */
3470 - + nla_total_size(1) /* INET_DIAG_TCLASS */
3471 - + nla_total_size(4) /* INET_DIAG_MARK */
3472 - + nla_total_size(4) /* INET_DIAG_CLASS_ID */
3473 + nla_total_size(addrlen * asoc->peer.transport_count)
3474 + nla_total_size(addrlen * addrcnt)
3475 - + nla_total_size(sizeof(struct inet_diag_meminfo))
3476 + nla_total_size(sizeof(struct inet_diag_msg))
3477 + + inet_diag_msg_attrs_size()
3478 + + nla_total_size(sizeof(struct inet_diag_meminfo))
3479 + 64;
3480 }
3481
3482 diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
3483 index d14ca4af6f94..d74a71dff5b8 100644
3484 --- a/net/smc/smc_ib.c
3485 +++ b/net/smc/smc_ib.c
3486 @@ -560,12 +560,15 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
3487 struct smc_ib_device *smcibdev;
3488
3489 smcibdev = ib_get_client_data(ibdev, &smc_ib_client);
3490 + if (!smcibdev || smcibdev->ibdev != ibdev)
3491 + return;
3492 ib_set_client_data(ibdev, &smc_ib_client, NULL);
3493 spin_lock(&smc_ib_devices.lock);
3494 list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
3495 spin_unlock(&smc_ib_devices.lock);
3496 smc_ib_cleanup_per_ibdev(smcibdev);
3497 ib_unregister_event_handler(&smcibdev->event_handler);
3498 + cancel_work_sync(&smcibdev->port_event_work);
3499 kfree(smcibdev);
3500 }
3501
3502 diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
3503 index d6165ad384c0..e9bbf4a00881 100644
3504 --- a/net/tipc/netlink.c
3505 +++ b/net/tipc/netlink.c
3506 @@ -111,6 +111,7 @@ const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
3507 [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 },
3508 [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 },
3509 [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 },
3510 + [TIPC_NLA_PROP_MTU] = { .type = NLA_U32 },
3511 [TIPC_NLA_PROP_BROADCAST] = { .type = NLA_U32 },
3512 [TIPC_NLA_PROP_BROADCAST_RATIO] = { .type = NLA_U32 }
3513 };
3514 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
3515 index 17514744af9e..321c132747ce 100644
3516 --- a/net/wireless/nl80211.c
3517 +++ b/net/wireless/nl80211.c
3518 @@ -469,6 +469,8 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
3519 [NL80211_ATTR_WOWLAN_TRIGGERS] = { .type = NLA_NESTED },
3520 [NL80211_ATTR_STA_PLINK_STATE] =
3521 NLA_POLICY_MAX(NLA_U8, NUM_NL80211_PLINK_STATES - 1),
3522 + [NL80211_ATTR_MEASUREMENT_DURATION] = { .type = NLA_U16 },
3523 + [NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY] = { .type = NLA_FLAG },
3524 [NL80211_ATTR_MESH_PEER_AID] =
3525 NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID),
3526 [NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 },
3527 @@ -530,6 +532,8 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
3528 [NL80211_ATTR_MDID] = { .type = NLA_U16 },
3529 [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY,
3530 .len = IEEE80211_MAX_DATA_LEN },
3531 + [NL80211_ATTR_CRIT_PROT_ID] = { .type = NLA_U16 },
3532 + [NL80211_ATTR_MAX_CRIT_PROT_DURATION] = { .type = NLA_U16 },
3533 [NL80211_ATTR_PEER_AID] =
3534 NLA_POLICY_RANGE(NLA_U16, 1, IEEE80211_MAX_AID),
3535 [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 },
3536 @@ -560,6 +564,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
3537 NLA_POLICY_MAX(NLA_U8, IEEE80211_NUM_UPS - 1),
3538 [NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 },
3539 [NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 },
3540 + [NL80211_ATTR_OPER_CLASS] = { .type = NLA_U8 },
3541 [NL80211_ATTR_MAC_MASK] = {
3542 .type = NLA_EXACT_LEN_WARN,
3543 .len = ETH_ALEN
3544 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3545 index b6b837a5bdaf..12858d95c2c8 100644
3546 --- a/sound/pci/hda/patch_realtek.c
3547 +++ b/sound/pci/hda/patch_realtek.c
3548 @@ -949,7 +949,7 @@ struct alc_codec_rename_pci_table {
3549 const char *name;
3550 };
3551
3552 -static struct alc_codec_rename_table rename_tbl[] = {
3553 +static const struct alc_codec_rename_table rename_tbl[] = {
3554 { 0x10ec0221, 0xf00f, 0x1003, "ALC231" },
3555 { 0x10ec0269, 0xfff0, 0x3010, "ALC277" },
3556 { 0x10ec0269, 0xf0f0, 0x2010, "ALC259" },
3557 @@ -970,7 +970,7 @@ static struct alc_codec_rename_table rename_tbl[] = {
3558 { } /* terminator */
3559 };
3560
3561 -static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
3562 +static const struct alc_codec_rename_pci_table rename_pci_tbl[] = {
3563 { 0x10ec0280, 0x1028, 0, "ALC3220" },
3564 { 0x10ec0282, 0x1028, 0, "ALC3221" },
3565 { 0x10ec0283, 0x1028, 0, "ALC3223" },
3566 @@ -3000,7 +3000,7 @@ static void alc269_shutup(struct hda_codec *codec)
3567 alc_shutup_pins(codec);
3568 }
3569
3570 -static struct coef_fw alc282_coefs[] = {
3571 +static const struct coef_fw alc282_coefs[] = {
3572 WRITE_COEF(0x03, 0x0002), /* Power Down Control */
3573 UPDATE_COEF(0x05, 0xff3f, 0x0700), /* FIFO and filter clock */
3574 WRITE_COEF(0x07, 0x0200), /* DMIC control */
3575 @@ -3112,7 +3112,7 @@ static void alc282_shutup(struct hda_codec *codec)
3576 alc_write_coef_idx(codec, 0x78, coef78);
3577 }
3578
3579 -static struct coef_fw alc283_coefs[] = {
3580 +static const struct coef_fw alc283_coefs[] = {
3581 WRITE_COEF(0x03, 0x0002), /* Power Down Control */
3582 UPDATE_COEF(0x05, 0xff3f, 0x0700), /* FIFO and filter clock */
3583 WRITE_COEF(0x07, 0x0200), /* DMIC control */
3584 @@ -4188,7 +4188,7 @@ static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
3585 }
3586 }
3587
3588 -static struct coef_fw alc225_pre_hsmode[] = {
3589 +static const struct coef_fw alc225_pre_hsmode[] = {
3590 UPDATE_COEF(0x4a, 1<<8, 0),
3591 UPDATE_COEFEX(0x57, 0x05, 1<<14, 0),
3592 UPDATE_COEF(0x63, 3<<14, 3<<14),
3593 @@ -4201,7 +4201,7 @@ static struct coef_fw alc225_pre_hsmode[] = {
3594
3595 static void alc_headset_mode_unplugged(struct hda_codec *codec)
3596 {
3597 - static struct coef_fw coef0255[] = {
3598 + static const struct coef_fw coef0255[] = {
3599 WRITE_COEF(0x1b, 0x0c0b), /* LDO and MISC control */
3600 WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
3601 UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
3602 @@ -4209,7 +4209,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
3603 WRITE_COEFEX(0x57, 0x03, 0x8aa6), /* Direct Drive HP Amp control */
3604 {}
3605 };
3606 - static struct coef_fw coef0256[] = {
3607 + static const struct coef_fw coef0256[] = {
3608 WRITE_COEF(0x1b, 0x0c4b), /* LDO and MISC control */
3609 WRITE_COEF(0x45, 0xd089), /* UAJ function set to menual mode */
3610 WRITE_COEF(0x06, 0x6104), /* Set MIC2 Vref gate with HP */
3611 @@ -4217,7 +4217,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
3612 UPDATE_COEFEX(0x57, 0x05, 1<<14, 0), /* Direct Drive HP Amp control(Set to verb control)*/
3613 {}
3614 };
3615 - static struct coef_fw coef0233[] = {
3616 + static const struct coef_fw coef0233[] = {
3617 WRITE_COEF(0x1b, 0x0c0b),
3618 WRITE_COEF(0x45, 0xc429),
3619 UPDATE_COEF(0x35, 0x4000, 0),
3620 @@ -4227,7 +4227,7 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
3621 WRITE_COEF(0x32, 0x42a3),
3622 {}
3623 };
3624 - static struct coef_fw coef0288[] = {
3625 + static const struct coef_fw coef0288[] = {
3626 UPDATE_COEF(0x4f, 0xfcc0, 0xc400),
3627 UPDATE_COEF(0x50, 0x2000, 0x2000),
3628 UPDATE_COEF(0x56, 0x0006, 0x0006),
3629 @@ -4235,18 +4235,18 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
3630 UPDATE_COEF(0x67, 0x2000, 0),
3631 {}
3632 };
3633 - static struct coef_fw coef0298[] = {
3634 + static const struct coef_fw coef0298[] = {
3635 UPDATE_COEF(0x19, 0x1300, 0x0300),
3636 {}
3637 };
3638 - static struct coef_fw coef0292[] = {
3639 + static const struct coef_fw coef0292[] = {
3640 WRITE_COEF(0x76, 0x000e),
3641 WRITE_COEF(0x6c, 0x2400),
3642 WRITE_COEF(0x18, 0x7308),
3643 WRITE_COEF(0x6b, 0xc429),
3644 {}
3645 };
3646 - static struct coef_fw coef0293[] = {
3647 + static const struct coef_fw coef0293[] = {
3648 UPDATE_COEF(0x10, 7<<8, 6<<8), /* SET Line1 JD to 0 */
3649 UPDATE_COEFEX(0x57, 0x05, 1<<15|1<<13, 0x0), /* SET charge pump by verb */
3650 UPDATE_COEFEX(0x57, 0x03, 1<<10, 1<<10), /* SET EN_OSW to 1 */
3651 @@ -4255,16 +4255,16 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
3652 UPDATE_COEF(0x4a, 0x000f, 0x000e), /* Combo Jack auto detect */
3653 {}
3654 };
3655 - static struct coef_fw coef0668[] = {
3656 + static const struct coef_fw coef0668[] = {
3657 WRITE_COEF(0x15, 0x0d40),
3658 WRITE_COEF(0xb7, 0x802b),
3659 {}
3660 };
3661 - static struct coef_fw coef0225[] = {
3662 + static const struct coef_fw coef0225[] = {
3663 UPDATE_COEF(0x63, 3<<14, 0),
3664 {}
3665 };
3666 - static struct coef_fw coef0274[] = {
3667 + static const struct coef_fw coef0274[] = {
3668 UPDATE_COEF(0x4a, 0x0100, 0),
3669 UPDATE_COEFEX(0x57, 0x05, 0x4000, 0),
3670 UPDATE_COEF(0x6b, 0xf000, 0x5000),
3671 @@ -4329,25 +4329,25 @@ static void alc_headset_mode_unplugged(struct hda_codec *codec)
3672 static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
3673 hda_nid_t mic_pin)
3674 {
3675 - static struct coef_fw coef0255[] = {
3676 + static const struct coef_fw coef0255[] = {
3677 WRITE_COEFEX(0x57, 0x03, 0x8aa6),
3678 WRITE_COEF(0x06, 0x6100), /* Set MIC2 Vref gate to normal */
3679 {}
3680 };
3681 - static struct coef_fw coef0256[] = {
3682 + static const struct coef_fw coef0256[] = {
3683 UPDATE_COEFEX(0x57, 0x05, 1<<14, 1<<14), /* Direct Drive HP Amp control(Set to verb control)*/
3684 WRITE_COEFEX(0x57, 0x03, 0x09a3),
3685 WRITE_COEF(0x06, 0x6100), /* Set MIC2 Vref gate to normal */
3686 {}
3687 };
3688 - static struct coef_fw coef0233[] = {
3689 + static const struct coef_fw coef0233[] = {
3690 UPDATE_COEF(0x35, 0, 1<<14),
3691 WRITE_COEF(0x06, 0x2100),
3692 WRITE_COEF(0x1a, 0x0021),
3693 WRITE_COEF(0x26, 0x008c),
3694 {}
3695 };
3696 - static struct coef_fw coef0288[] = {
3697 + static const struct coef_fw coef0288[] = {
3698 UPDATE_COEF(0x4f, 0x00c0, 0),
3699 UPDATE_COEF(0x50, 0x2000, 0),
3700 UPDATE_COEF(0x56, 0x0006, 0),
3701 @@ -4356,30 +4356,30 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
3702 UPDATE_COEF(0x67, 0x2000, 0x2000),
3703 {}
3704 };
3705 - static struct coef_fw coef0292[] = {
3706 + static const struct coef_fw coef0292[] = {
3707 WRITE_COEF(0x19, 0xa208),
3708 WRITE_COEF(0x2e, 0xacf0),
3709 {}
3710 };
3711 - static struct coef_fw coef0293[] = {
3712 + static const struct coef_fw coef0293[] = {
3713 UPDATE_COEFEX(0x57, 0x05, 0, 1<<15|1<<13), /* SET charge pump by verb */
3714 UPDATE_COEFEX(0x57, 0x03, 1<<10, 0), /* SET EN_OSW to 0 */
3715 UPDATE_COEF(0x1a, 1<<3, 0), /* Combo JD gating without LINE1-VREFO */
3716 {}
3717 };
3718 - static struct coef_fw coef0688[] = {
3719 + static const struct coef_fw coef0688[] = {
3720 WRITE_COEF(0xb7, 0x802b),
3721 WRITE_COEF(0xb5, 0x1040),
3722 UPDATE_COEF(0xc3, 0, 1<<12),
3723 {}
3724 };
3725 - static struct coef_fw coef0225[] = {
3726 + static const struct coef_fw coef0225[] = {
3727 UPDATE_COEFEX(0x57, 0x05, 1<<14, 1<<14),
3728 UPDATE_COEF(0x4a, 3<<4, 2<<4),
3729 UPDATE_COEF(0x63, 3<<14, 0),
3730 {}
3731 };
3732 - static struct coef_fw coef0274[] = {
3733 + static const struct coef_fw coef0274[] = {
3734 UPDATE_COEFEX(0x57, 0x05, 0x4000, 0x4000),
3735 UPDATE_COEF(0x4a, 0x0010, 0),
3736 UPDATE_COEF(0x6b, 0xf000, 0),
3737 @@ -4465,7 +4465,7 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
3738
3739 static void alc_headset_mode_default(struct hda_codec *codec)
3740 {
3741 - static struct coef_fw coef0225[] = {
3742 + static const struct coef_fw coef0225[] = {
3743 UPDATE_COEF(0x45, 0x3f<<10, 0x30<<10),
3744 UPDATE_COEF(0x45, 0x3f<<10, 0x31<<10),
3745 UPDATE_COEF(0x49, 3<<8, 0<<8),
3746 @@ -4474,14 +4474,14 @@ static void alc_headset_mode_default(struct hda_codec *codec)
3747 UPDATE_COEF(0x67, 0xf000, 0x3000),
3748 {}
3749 };
3750 - static struct coef_fw coef0255[] = {
3751 + static const struct coef_fw coef0255[] = {
3752 WRITE_COEF(0x45, 0xc089),
3753 WRITE_COEF(0x45, 0xc489),
3754 WRITE_COEFEX(0x57, 0x03, 0x8ea6),
3755 WRITE_COEF(0x49, 0x0049),
3756 {}
3757 };
3758 - static struct coef_fw coef0256[] = {
3759 + static const struct coef_fw coef0256[] = {
3760 WRITE_COEF(0x45, 0xc489),
3761 WRITE_COEFEX(0x57, 0x03, 0x0da3),
3762 WRITE_COEF(0x49, 0x0049),
3763 @@ -4489,12 +4489,12 @@ static void alc_headset_mode_default(struct hda_codec *codec)
3764 WRITE_COEF(0x06, 0x6100),
3765 {}
3766 };
3767 - static struct coef_fw coef0233[] = {
3768 + static const struct coef_fw coef0233[] = {
3769 WRITE_COEF(0x06, 0x2100),
3770 WRITE_COEF(0x32, 0x4ea3),
3771 {}
3772 };
3773 - static struct coef_fw coef0288[] = {
3774 + static const struct coef_fw coef0288[] = {
3775 UPDATE_COEF(0x4f, 0xfcc0, 0xc400), /* Set to TRS type */
3776 UPDATE_COEF(0x50, 0x2000, 0x2000),
3777 UPDATE_COEF(0x56, 0x0006, 0x0006),
3778 @@ -4502,26 +4502,26 @@ static void alc_headset_mode_default(struct hda_codec *codec)
3779 UPDATE_COEF(0x67, 0x2000, 0),
3780 {}
3781 };
3782 - static struct coef_fw coef0292[] = {
3783 + static const struct coef_fw coef0292[] = {
3784 WRITE_COEF(0x76, 0x000e),
3785 WRITE_COEF(0x6c, 0x2400),
3786 WRITE_COEF(0x6b, 0xc429),
3787 WRITE_COEF(0x18, 0x7308),
3788 {}
3789 };
3790 - static struct coef_fw coef0293[] = {
3791 + static const struct coef_fw coef0293[] = {
3792 UPDATE_COEF(0x4a, 0x000f, 0x000e), /* Combo Jack auto detect */
3793 WRITE_COEF(0x45, 0xC429), /* Set to TRS type */
3794 UPDATE_COEF(0x1a, 1<<3, 0), /* Combo JD gating without LINE1-VREFO */
3795 {}
3796 };
3797 - static struct coef_fw coef0688[] = {
3798 + static const struct coef_fw coef0688[] = {
3799 WRITE_COEF(0x11, 0x0041),
3800 WRITE_COEF(0x15, 0x0d40),
3801 WRITE_COEF(0xb7, 0x802b),
3802 {}
3803 };
3804 - static struct coef_fw coef0274[] = {
3805 + static const struct coef_fw coef0274[] = {
3806 WRITE_COEF(0x45, 0x4289),
3807 UPDATE_COEF(0x4a, 0x0010, 0x0010),
3808 UPDATE_COEF(0x6b, 0x0f00, 0),
3809 @@ -4584,53 +4584,53 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
3810 {
3811 int val;
3812
3813 - static struct coef_fw coef0255[] = {
3814 + static const struct coef_fw coef0255[] = {
3815 WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */
3816 WRITE_COEF(0x1b, 0x0c2b),
3817 WRITE_COEFEX(0x57, 0x03, 0x8ea6),
3818 {}
3819 };
3820 - static struct coef_fw coef0256[] = {
3821 + static const struct coef_fw coef0256[] = {
3822 WRITE_COEF(0x45, 0xd489), /* Set to CTIA type */
3823 WRITE_COEF(0x1b, 0x0e6b),
3824 {}
3825 };
3826 - static struct coef_fw coef0233[] = {
3827 + static const struct coef_fw coef0233[] = {
3828 WRITE_COEF(0x45, 0xd429),
3829 WRITE_COEF(0x1b, 0x0c2b),
3830 WRITE_COEF(0x32, 0x4ea3),
3831 {}
3832 };
3833 - static struct coef_fw coef0288[] = {
3834 + static const struct coef_fw coef0288[] = {
3835 UPDATE_COEF(0x50, 0x2000, 0x2000),
3836 UPDATE_COEF(0x56, 0x0006, 0x0006),
3837 UPDATE_COEF(0x66, 0x0008, 0),
3838 UPDATE_COEF(0x67, 0x2000, 0),
3839 {}
3840 };
3841 - static struct coef_fw coef0292[] = {
3842 + static const struct coef_fw coef0292[] = {
3843 WRITE_COEF(0x6b, 0xd429),
3844 WRITE_COEF(0x76, 0x0008),
3845 WRITE_COEF(0x18, 0x7388),
3846 {}
3847 };
3848 - static struct coef_fw coef0293[] = {
3849 + static const struct coef_fw coef0293[] = {
3850 WRITE_COEF(0x45, 0xd429), /* Set to ctia type */
3851 UPDATE_COEF(0x10, 7<<8, 7<<8), /* SET Line1 JD to 1 */
3852 {}
3853 };
3854 - static struct coef_fw coef0688[] = {
3855 + static const struct coef_fw coef0688[] = {
3856 WRITE_COEF(0x11, 0x0001),
3857 WRITE_COEF(0x15, 0x0d60),
3858 WRITE_COEF(0xc3, 0x0000),
3859 {}
3860 };
3861 - static struct coef_fw coef0225_1[] = {
3862 + static const struct coef_fw coef0225_1[] = {
3863 UPDATE_COEF(0x45, 0x3f<<10, 0x35<<10),
3864 UPDATE_COEF(0x63, 3<<14, 2<<14),
3865 {}
3866 };
3867 - static struct coef_fw coef0225_2[] = {
3868 + static const struct coef_fw coef0225_2[] = {
3869 UPDATE_COEF(0x45, 0x3f<<10, 0x35<<10),
3870 UPDATE_COEF(0x63, 3<<14, 1<<14),
3871 {}
3872 @@ -4702,48 +4702,48 @@ static void alc_headset_mode_ctia(struct hda_codec *codec)
3873 /* Nokia type */
3874 static void alc_headset_mode_omtp(struct hda_codec *codec)
3875 {
3876 - static struct coef_fw coef0255[] = {
3877 + static const struct coef_fw coef0255[] = {
3878 WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */
3879 WRITE_COEF(0x1b, 0x0c2b),
3880 WRITE_COEFEX(0x57, 0x03, 0x8ea6),
3881 {}
3882 };
3883 - static struct coef_fw coef0256[] = {
3884 + static const struct coef_fw coef0256[] = {
3885 WRITE_COEF(0x45, 0xe489), /* Set to OMTP Type */
3886 WRITE_COEF(0x1b, 0x0e6b),
3887 {}
3888 };
3889 - static struct coef_fw coef0233[] = {
3890 + static const struct coef_fw coef0233[] = {
3891 WRITE_COEF(0x45, 0xe429),
3892 WRITE_COEF(0x1b, 0x0c2b),
3893 WRITE_COEF(0x32, 0x4ea3),
3894 {}
3895 };
3896 - static struct coef_fw coef0288[] = {
3897 + static const struct coef_fw coef0288[] = {
3898 UPDATE_COEF(0x50, 0x2000, 0x2000),
3899 UPDATE_COEF(0x56, 0x0006, 0x0006),
3900 UPDATE_COEF(0x66, 0x0008, 0),
3901 UPDATE_COEF(0x67, 0x2000, 0),
3902 {}
3903 };
3904 - static struct coef_fw coef0292[] = {
3905 + static const struct coef_fw coef0292[] = {
3906 WRITE_COEF(0x6b, 0xe429),
3907 WRITE_COEF(0x76, 0x0008),
3908 WRITE_COEF(0x18, 0x7388),
3909 {}
3910 };
3911 - static struct coef_fw coef0293[] = {
3912 + static const struct coef_fw coef0293[] = {
3913 WRITE_COEF(0x45, 0xe429), /* Set to omtp type */
3914 UPDATE_COEF(0x10, 7<<8, 7<<8), /* SET Line1 JD to 1 */
3915 {}
3916 };
3917 - static struct coef_fw coef0688[] = {
3918 + static const struct coef_fw coef0688[] = {
3919 WRITE_COEF(0x11, 0x0001),
3920 WRITE_COEF(0x15, 0x0d50),
3921 WRITE_COEF(0xc3, 0x0000),
3922 {}
3923 };
3924 - static struct coef_fw coef0225[] = {
3925 + static const struct coef_fw coef0225[] = {
3926 UPDATE_COEF(0x45, 0x3f<<10, 0x39<<10),
3927 UPDATE_COEF(0x63, 3<<14, 2<<14),
3928 {}
3929 @@ -4803,17 +4803,17 @@ static void alc_determine_headset_type(struct hda_codec *codec)
3930 int val;
3931 bool is_ctia = false;
3932 struct alc_spec *spec = codec->spec;
3933 - static struct coef_fw coef0255[] = {
3934 + static const struct coef_fw coef0255[] = {
3935 WRITE_COEF(0x45, 0xd089), /* combo jack auto switch control(Check type)*/
3936 WRITE_COEF(0x49, 0x0149), /* combo jack auto switch control(Vref
3937 conteol) */
3938 {}
3939 };
3940 - static struct coef_fw coef0288[] = {
3941 + static const struct coef_fw coef0288[] = {
3942 UPDATE_COEF(0x4f, 0xfcc0, 0xd400), /* Check Type */
3943 {}
3944 };
3945 - static struct coef_fw coef0298[] = {
3946 + static const struct coef_fw coef0298[] = {
3947 UPDATE_COEF(0x50, 0x2000, 0x2000),
3948 UPDATE_COEF(0x56, 0x0006, 0x0006),
3949 UPDATE_COEF(0x66, 0x0008, 0),
3950 @@ -4821,19 +4821,19 @@ static void alc_determine_headset_type(struct hda_codec *codec)
3951 UPDATE_COEF(0x19, 0x1300, 0x1300),
3952 {}
3953 };
3954 - static struct coef_fw coef0293[] = {
3955 + static const struct coef_fw coef0293[] = {
3956 UPDATE_COEF(0x4a, 0x000f, 0x0008), /* Combo Jack auto detect */
3957 WRITE_COEF(0x45, 0xD429), /* Set to ctia type */
3958 {}
3959 };
3960 - static struct coef_fw coef0688[] = {
3961 + static const struct coef_fw coef0688[] = {
3962 WRITE_COEF(0x11, 0x0001),
3963 WRITE_COEF(0xb7, 0x802b),
3964 WRITE_COEF(0x15, 0x0d60),
3965 WRITE_COEF(0xc3, 0x0c00),
3966 {}
3967 };
3968 - static struct coef_fw coef0274[] = {
3969 + static const struct coef_fw coef0274[] = {
3970 UPDATE_COEF(0x4a, 0x0010, 0),
3971 UPDATE_COEF(0x4a, 0x8000, 0),
3972 WRITE_COEF(0x45, 0xd289),
3973 @@ -5120,7 +5120,7 @@ static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec,
3974 static void alc255_set_default_jack_type(struct hda_codec *codec)
3975 {
3976 /* Set to iphone type */
3977 - static struct coef_fw alc255fw[] = {
3978 + static const struct coef_fw alc255fw[] = {
3979 WRITE_COEF(0x1b, 0x880b),
3980 WRITE_COEF(0x45, 0xd089),
3981 WRITE_COEF(0x1b, 0x080b),
3982 @@ -5128,7 +5128,7 @@ static void alc255_set_default_jack_type(struct hda_codec *codec)
3983 WRITE_COEF(0x1b, 0x0c0b),
3984 {}
3985 };
3986 - static struct coef_fw alc256fw[] = {
3987 + static const struct coef_fw alc256fw[] = {
3988 WRITE_COEF(0x1b, 0x884b),
3989 WRITE_COEF(0x45, 0xd089),
3990 WRITE_COEF(0x1b, 0x084b),
3991 @@ -8542,7 +8542,30 @@ static void alc662_fixup_aspire_ethos_hp(struct hda_codec *codec,
3992 }
3993 }
3994
3995 -static struct coef_fw alc668_coefs[] = {
3996 +static void alc671_fixup_hp_headset_mic2(struct hda_codec *codec,
3997 + const struct hda_fixup *fix, int action)
3998 +{
3999 + struct alc_spec *spec = codec->spec;
4000 +
4001 + static const struct hda_pintbl pincfgs[] = {
4002 + { 0x19, 0x02a11040 }, /* use as headset mic, with its own jack detect */
4003 + { 0x1b, 0x0181304f },
4004 + { }
4005 + };
4006 +
4007 + switch (action) {
4008 + case HDA_FIXUP_ACT_PRE_PROBE:
4009 + spec->gen.mixer_nid = 0;
4010 + spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
4011 + snd_hda_apply_pincfgs(codec, pincfgs);
4012 + break;
4013 + case HDA_FIXUP_ACT_INIT:
4014 + alc_write_coef_idx(codec, 0x19, 0xa054);
4015 + break;
4016 + }
4017 +}
4018 +
4019 +static const struct coef_fw alc668_coefs[] = {
4020 WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03, 0x0),
4021 WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06, 0x0), WRITE_COEF(0x07, 0x0f80),
4022 WRITE_COEF(0x08, 0x0031), WRITE_COEF(0x0a, 0x0060), WRITE_COEF(0x0b, 0x0),
4023 @@ -8615,6 +8638,7 @@ enum {
4024 ALC662_FIXUP_LENOVO_MULTI_CODECS,
4025 ALC669_FIXUP_ACER_ASPIRE_ETHOS,
4026 ALC669_FIXUP_ACER_ASPIRE_ETHOS_HEADSET,
4027 + ALC671_FIXUP_HP_HEADSET_MIC2,
4028 };
4029
4030 static const struct hda_fixup alc662_fixups[] = {
4031 @@ -8956,6 +8980,10 @@ static const struct hda_fixup alc662_fixups[] = {
4032 .chained = true,
4033 .chain_id = ALC669_FIXUP_ACER_ASPIRE_ETHOS_HEADSET
4034 },
4035 + [ALC671_FIXUP_HP_HEADSET_MIC2] = {
4036 + .type = HDA_FIXUP_FUNC,
4037 + .v.func = alc671_fixup_hp_headset_mic2,
4038 + },
4039 };
4040
4041 static const struct snd_pci_quirk alc662_fixup_tbl[] = {
4042 @@ -9138,6 +9166,23 @@ static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
4043 {0x12, 0x90a60130},
4044 {0x14, 0x90170110},
4045 {0x15, 0x0321101f}),
4046 + SND_HDA_PIN_QUIRK(0x10ec0671, 0x103c, "HP cPC", ALC671_FIXUP_HP_HEADSET_MIC2,
4047 + {0x14, 0x01014010},
4048 + {0x17, 0x90170150},
4049 + {0x19, 0x02a11060},
4050 + {0x1b, 0x01813030},
4051 + {0x21, 0x02211020}),
4052 + SND_HDA_PIN_QUIRK(0x10ec0671, 0x103c, "HP cPC", ALC671_FIXUP_HP_HEADSET_MIC2,
4053 + {0x14, 0x01014010},
4054 + {0x18, 0x01a19040},
4055 + {0x1b, 0x01813030},
4056 + {0x21, 0x02211020}),
4057 + SND_HDA_PIN_QUIRK(0x10ec0671, 0x103c, "HP cPC", ALC671_FIXUP_HP_HEADSET_MIC2,
4058 + {0x14, 0x01014020},
4059 + {0x17, 0x90170110},
4060 + {0x18, 0x01a19050},
4061 + {0x1b, 0x01813040},
4062 + {0x21, 0x02211030}),
4063 {}
4064 };
4065
4066 diff --git a/tools/perf/bench/futex-wake.c b/tools/perf/bench/futex-wake.c
4067 index df810096abfe..58906e9499bb 100644
4068 --- a/tools/perf/bench/futex-wake.c
4069 +++ b/tools/perf/bench/futex-wake.c
4070 @@ -43,7 +43,7 @@ static bool done = false, silent = false, fshared = false;
4071 static pthread_mutex_t thread_lock;
4072 static pthread_cond_t thread_parent, thread_worker;
4073 static struct stats waketime_stats, wakeup_stats;
4074 -static unsigned int ncpus, threads_starting, nthreads = 0;
4075 +static unsigned int threads_starting, nthreads = 0;
4076 static int futex_flag = 0;
4077
4078 static const struct option options[] = {
4079 @@ -141,7 +141,7 @@ int bench_futex_wake(int argc, const char **argv)
4080 sigaction(SIGINT, &act, NULL);
4081
4082 if (!nthreads)
4083 - nthreads = ncpus;
4084 + nthreads = cpu->nr;
4085
4086 worker = calloc(nthreads, sizeof(*worker));
4087 if (!worker)
4088 diff --git a/tools/testing/ktest/ktest.pl b/tools/testing/ktest/ktest.pl
4089 index 220d04f958a6..42b6cd41d2ea 100755
4090 --- a/tools/testing/ktest/ktest.pl
4091 +++ b/tools/testing/ktest/ktest.pl
4092 @@ -1383,7 +1383,7 @@ sub reboot {
4093
4094 } else {
4095 # Make sure everything has been written to disk
4096 - run_ssh("sync");
4097 + run_ssh("sync", 10);
4098
4099 if (defined($time)) {
4100 start_monitor;
4101 diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
4102 index 474638ef2697..09854f8a0b57 100755
4103 --- a/tools/testing/selftests/net/fib_tests.sh
4104 +++ b/tools/testing/selftests/net/fib_tests.sh
4105 @@ -1041,6 +1041,27 @@ ipv6_addr_metric_test()
4106 fi
4107 log_test $rc 0 "Prefix route with metric on link up"
4108
4109 + # verify peer metric added correctly
4110 + set -e
4111 + run_cmd "$IP -6 addr flush dev dummy2"
4112 + run_cmd "$IP -6 addr add dev dummy2 2001:db8:104::1 peer 2001:db8:104::2 metric 260"
4113 + set +e
4114 +
4115 + check_route6 "2001:db8:104::1 dev dummy2 proto kernel metric 260"
4116 + log_test $? 0 "Set metric with peer route on local side"
4117 + log_test $? 0 "User specified metric on local address"
4118 + check_route6 "2001:db8:104::2 dev dummy2 proto kernel metric 260"
4119 + log_test $? 0 "Set metric with peer route on peer side"
4120 +
4121 + set -e
4122 + run_cmd "$IP -6 addr change dev dummy2 2001:db8:104::1 peer 2001:db8:104::3 metric 261"
4123 + set +e
4124 +
4125 + check_route6 "2001:db8:104::1 dev dummy2 proto kernel metric 261"
4126 + log_test $? 0 "Modify metric and peer address on local side"
4127 + check_route6 "2001:db8:104::3 dev dummy2 proto kernel metric 261"
4128 + log_test $? 0 "Modify metric and peer address on peer side"
4129 +
4130 $IP li del dummy1
4131 $IP li del dummy2
4132 cleanup
4133 @@ -1457,13 +1478,20 @@ ipv4_addr_metric_test()
4134
4135 run_cmd "$IP addr flush dev dummy2"
4136 run_cmd "$IP addr add dev dummy2 172.16.104.1/32 peer 172.16.104.2 metric 260"
4137 - run_cmd "$IP addr change dev dummy2 172.16.104.1/32 peer 172.16.104.2 metric 261"
4138 rc=$?
4139 if [ $rc -eq 0 ]; then
4140 - check_route "172.16.104.2 dev dummy2 proto kernel scope link src 172.16.104.1 metric 261"
4141 + check_route "172.16.104.2 dev dummy2 proto kernel scope link src 172.16.104.1 metric 260"
4142 + rc=$?
4143 + fi
4144 + log_test $rc 0 "Set metric of address with peer route"
4145 +
4146 + run_cmd "$IP addr change dev dummy2 172.16.104.1/32 peer 172.16.104.3 metric 261"
4147 + rc=$?
4148 + if [ $rc -eq 0 ]; then
4149 + check_route "172.16.104.3 dev dummy2 proto kernel scope link src 172.16.104.1 metric 261"
4150 rc=$?
4151 fi
4152 - log_test $rc 0 "Modify metric of address with peer route"
4153 + log_test $rc 0 "Modify metric and peer address for peer route"
4154
4155 $IP li del dummy1
4156 $IP li del dummy2