Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0316-4.9.217-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3591 - (hide annotations) (download)
Thu Aug 13 10:21:33 2020 UTC (3 years, 8 months ago) by niro
File size: 105735 byte(s)
linux-217
1 niro 3591 diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
2     index bdd025ceb763..85ed3450099a 100644
3     --- a/Documentation/filesystems/porting
4     +++ b/Documentation/filesystems/porting
5     @@ -596,3 +596,10 @@ in your dentry operations instead.
6     [mandatory]
7     ->rename() has an added flags argument. Any flags not handled by the
8     filesystem should result in EINVAL being returned.
9     +--
10     +[mandatory]
11     +
12     + [should've been added in 2016] stale comment in finish_open()
13     + nonwithstanding, failure exits in ->atomic_open() instances should
14     + *NOT* fput() the file, no matter what. Everything is handled by the
15     + caller.
16     diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
17     index b2d2f4539a3f..e05d65d6fcb6 100644
18     --- a/Documentation/kernel-parameters.txt
19     +++ b/Documentation/kernel-parameters.txt
20     @@ -335,6 +335,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
21     dynamic table installation which will install SSDT
22     tables to /sys/firmware/acpi/tables/dynamic.
23    
24     + acpi_no_watchdog [HW,ACPI,WDT]
25     + Ignore the ACPI-based watchdog interface (WDAT) and let
26     + a native driver control the watchdog device instead.
27     +
28     acpi_rsdp= [ACPI,EFI,KEXEC]
29     Pass the RSDP address to the kernel, mostly used
30     on machines running EFI runtime service to boot the
31     diff --git a/Makefile b/Makefile
32     index f0290097784a..96b230200cbe 100644
33     --- a/Makefile
34     +++ b/Makefile
35     @@ -1,6 +1,6 @@
36     VERSION = 4
37     PATCHLEVEL = 9
38     -SUBLEVEL = 216
39     +SUBLEVEL = 217
40     EXTRAVERSION =
41     NAME = Roaring Lionus
42    
43     diff --git a/arch/arc/include/asm/linkage.h b/arch/arc/include/asm/linkage.h
44     index b29f1a9fd6f7..07c8e1a6c56e 100644
45     --- a/arch/arc/include/asm/linkage.h
46     +++ b/arch/arc/include/asm/linkage.h
47     @@ -14,6 +14,8 @@
48     #ifdef __ASSEMBLY__
49    
50     #define ASM_NL ` /* use '`' to mark new line in macro */
51     +#define __ALIGN .align 4
52     +#define __ALIGN_STR __stringify(__ALIGN)
53    
54     /* annotation for data we want in DCCM - if enabled in .config */
55     .macro ARCFP_DATA nm
56     diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
57     index 890439737374..bf6e45dec017 100644
58     --- a/arch/arm/kernel/vdso.c
59     +++ b/arch/arm/kernel/vdso.c
60     @@ -85,6 +85,8 @@ static bool __init cntvct_functional(void)
61     * this.
62     */
63     np = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
64     + if (!np)
65     + np = of_find_compatible_node(NULL, NULL, "arm,armv8-timer");
66     if (!np)
67     goto out_put;
68    
69     diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
70     index 6709a8d33963..f1e34f16cfab 100644
71     --- a/arch/arm/lib/copy_from_user.S
72     +++ b/arch/arm/lib/copy_from_user.S
73     @@ -100,7 +100,7 @@ ENTRY(arm_copy_from_user)
74    
75     ENDPROC(arm_copy_from_user)
76    
77     - .pushsection .fixup,"ax"
78     + .pushsection .text.fixup,"ax"
79     .align 0
80     copy_abort_preamble
81     ldmfd sp!, {r1, r2, r3}
82     diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
83     index c16c99bc2a10..6bfb9a68134c 100644
84     --- a/arch/x86/events/amd/uncore.c
85     +++ b/arch/x86/events/amd/uncore.c
86     @@ -185,20 +185,18 @@ static int amd_uncore_event_init(struct perf_event *event)
87    
88     /*
89     * NB and Last level cache counters (MSRs) are shared across all cores
90     - * that share the same NB / Last level cache. Interrupts can be directed
91     - * to a single target core, however, event counts generated by processes
92     - * running on other cores cannot be masked out. So we do not support
93     - * sampling and per-thread events.
94     + * that share the same NB / Last level cache. On family 16h and below,
95     + * Interrupts can be directed to a single target core, however, event
96     + * counts generated by processes running on other cores cannot be masked
97     + * out. So we do not support sampling and per-thread events via
98     + * CAP_NO_INTERRUPT, and we do not enable counter overflow interrupts:
99     */
100     - if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
101     - return -EINVAL;
102    
103     /* NB and Last level cache counters do not have usr/os/guest/host bits */
104     if (event->attr.exclude_user || event->attr.exclude_kernel ||
105     event->attr.exclude_host || event->attr.exclude_guest)
106     return -EINVAL;
107    
108     - /* and we do not enable counter overflow interrupts */
109     hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
110     hwc->idx = -1;
111    
112     @@ -275,6 +273,7 @@ static struct pmu amd_nb_pmu = {
113     .start = amd_uncore_start,
114     .stop = amd_uncore_stop,
115     .read = amd_uncore_read,
116     + .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
117     };
118    
119     static struct pmu amd_llc_pmu = {
120     @@ -287,6 +286,7 @@ static struct pmu amd_llc_pmu = {
121     .start = amd_uncore_start,
122     .stop = amd_uncore_stop,
123     .read = amd_uncore_read,
124     + .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
125     };
126    
127     static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
128     diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
129     index e9c7090858d6..da3cd734dee1 100644
130     --- a/arch/x86/kvm/emulate.c
131     +++ b/arch/x86/kvm/emulate.c
132     @@ -5022,6 +5022,7 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
133     ctxt->fetch.ptr = ctxt->fetch.data;
134     ctxt->fetch.end = ctxt->fetch.data + insn_len;
135     ctxt->opcode_len = 1;
136     + ctxt->intercept = x86_intercept_none;
137     if (insn_len > 0)
138     memcpy(ctxt->fetch.data, insn, insn_len);
139     else {
140     diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
141     index 7ef0a0e105e1..4296f4932294 100644
142     --- a/drivers/acpi/acpi_watchdog.c
143     +++ b/drivers/acpi/acpi_watchdog.c
144     @@ -58,12 +58,14 @@ static bool acpi_watchdog_uses_rtc(const struct acpi_table_wdat *wdat)
145     }
146     #endif
147    
148     +static bool acpi_no_watchdog;
149     +
150     static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
151     {
152     const struct acpi_table_wdat *wdat = NULL;
153     acpi_status status;
154    
155     - if (acpi_disabled)
156     + if (acpi_disabled || acpi_no_watchdog)
157     return NULL;
158    
159     status = acpi_get_table(ACPI_SIG_WDAT, 0,
160     @@ -91,6 +93,14 @@ bool acpi_has_watchdog(void)
161     }
162     EXPORT_SYMBOL_GPL(acpi_has_watchdog);
163    
164     +/* ACPI watchdog can be disabled on boot command line */
165     +static int __init disable_acpi_watchdog(char *str)
166     +{
167     + acpi_no_watchdog = true;
168     + return 1;
169     +}
170     +__setup("acpi_no_watchdog", disable_acpi_watchdog);
171     +
172     void __init acpi_watchdog_init(void)
173     {
174     const struct acpi_wdat_entry *entries;
175     diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
176     index 44ef1d66caa6..f287eec36b28 100644
177     --- a/drivers/block/virtio_blk.c
178     +++ b/drivers/block/virtio_blk.c
179     @@ -215,10 +215,12 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
180     err = __virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
181     if (err) {
182     virtqueue_kick(vblk->vqs[qid].vq);
183     - blk_mq_stop_hw_queue(hctx);
184     + /* Don't stop the queue if -ENOMEM: we may have failed to
185     + * bounce the buffer due to global resource outage.
186     + */
187     + if (err == -ENOSPC)
188     + blk_mq_stop_hw_queue(hctx);
189     spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
190     - /* Out of mem doesn't actually happen, since we fall back
191     - * to direct descriptors */
192     if (err == -ENOMEM || err == -ENOSPC)
193     return BLK_MQ_RQ_QUEUE_BUSY;
194     return BLK_MQ_RQ_QUEUE_ERROR;
195     diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
196     index 3e626fd9bd4e..1c65f5ac4368 100644
197     --- a/drivers/firmware/efi/efivars.c
198     +++ b/drivers/firmware/efi/efivars.c
199     @@ -139,13 +139,16 @@ static ssize_t
200     efivar_attr_read(struct efivar_entry *entry, char *buf)
201     {
202     struct efi_variable *var = &entry->var;
203     + unsigned long size = sizeof(var->Data);
204     char *str = buf;
205     + int ret;
206    
207     if (!entry || !buf)
208     return -EINVAL;
209    
210     - var->DataSize = 1024;
211     - if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
212     + ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
213     + var->DataSize = size;
214     + if (ret)
215     return -EIO;
216    
217     if (var->Attributes & EFI_VARIABLE_NON_VOLATILE)
218     @@ -172,13 +175,16 @@ static ssize_t
219     efivar_size_read(struct efivar_entry *entry, char *buf)
220     {
221     struct efi_variable *var = &entry->var;
222     + unsigned long size = sizeof(var->Data);
223     char *str = buf;
224     + int ret;
225    
226     if (!entry || !buf)
227     return -EINVAL;
228    
229     - var->DataSize = 1024;
230     - if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
231     + ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
232     + var->DataSize = size;
233     + if (ret)
234     return -EIO;
235    
236     str += sprintf(str, "0x%lx\n", var->DataSize);
237     @@ -189,12 +195,15 @@ static ssize_t
238     efivar_data_read(struct efivar_entry *entry, char *buf)
239     {
240     struct efi_variable *var = &entry->var;
241     + unsigned long size = sizeof(var->Data);
242     + int ret;
243    
244     if (!entry || !buf)
245     return -EINVAL;
246    
247     - var->DataSize = 1024;
248     - if (efivar_entry_get(entry, &var->Attributes, &var->DataSize, var->Data))
249     + ret = efivar_entry_get(entry, &var->Attributes, &size, var->Data);
250     + var->DataSize = size;
251     + if (ret)
252     return -EIO;
253    
254     memcpy(buf, var->Data, var->DataSize);
255     @@ -263,6 +272,9 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
256     u8 *data;
257     int err;
258    
259     + if (!entry || !buf)
260     + return -EINVAL;
261     +
262     if (is_compat()) {
263     struct compat_efi_variable *compat;
264    
265     @@ -314,14 +326,16 @@ efivar_show_raw(struct efivar_entry *entry, char *buf)
266     {
267     struct efi_variable *var = &entry->var;
268     struct compat_efi_variable *compat;
269     + unsigned long datasize = sizeof(var->Data);
270     size_t size;
271     + int ret;
272    
273     if (!entry || !buf)
274     return 0;
275    
276     - var->DataSize = 1024;
277     - if (efivar_entry_get(entry, &entry->var.Attributes,
278     - &entry->var.DataSize, entry->var.Data))
279     + ret = efivar_entry_get(entry, &var->Attributes, &datasize, var->Data);
280     + var->DataSize = datasize;
281     + if (ret)
282     return -EIO;
283    
284     if (is_compat()) {
285     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
286     index ac8885562919..0c2ed1254585 100644
287     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
288     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
289     @@ -363,8 +363,7 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
290     router.ddc_valid = false;
291     router.cd_valid = false;
292     for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
293     - uint8_t grph_obj_type=
294     - grph_obj_type =
295     + uint8_t grph_obj_type =
296     (le16_to_cpu(path->usGraphicObjIds[j]) &
297     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
298    
299     diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
300     index 31c087e1746d..197eb75d10ef 100644
301     --- a/drivers/hid/hid-apple.c
302     +++ b/drivers/hid/hid-apple.c
303     @@ -341,7 +341,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
304     unsigned long **bit, int *max)
305     {
306     if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
307     - usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
308     + usage->hid == (HID_UP_MSVENDOR | 0x0003) ||
309     + usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) {
310     /* The fn key on Apple USB keyboards */
311     set_bit(EV_REP, hi->input->evbit);
312     hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
313     diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
314     index 10af8585c820..95052373a828 100644
315     --- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
316     +++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
317     @@ -341,6 +341,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
318     },
319     .driver_data = (void *)&sipodev_desc
320     },
321     + {
322     + .ident = "Trekstor SURFBOOK E11B",
323     + .matches = {
324     + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
325     + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SURFBOOK E11B"),
326     + },
327     + .driver_data = (void *)&sipodev_desc
328     + },
329     {
330     .ident = "Direkt-Tek DTLAPY116-2",
331     .matches = {
332     diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
333     index d51734e0c350..977070ce4fe9 100644
334     --- a/drivers/iommu/dmar.c
335     +++ b/drivers/iommu/dmar.c
336     @@ -39,6 +39,7 @@
337     #include <linux/dmi.h>
338     #include <linux/slab.h>
339     #include <linux/iommu.h>
340     +#include <linux/limits.h>
341     #include <asm/irq_remapping.h>
342     #include <asm/iommu_table.h>
343    
344     @@ -138,6 +139,13 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event)
345    
346     BUG_ON(dev->is_virtfn);
347    
348     + /*
349     + * Ignore devices that have a domain number higher than what can
350     + * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000
351     + */
352     + if (pci_domain_nr(dev->bus) > U16_MAX)
353     + return NULL;
354     +
355     /* Only generate path[] for device addition event */
356     if (event == BUS_NOTIFY_ADD_DEVICE)
357     for (tmp = dev; tmp; tmp = tmp->bus->self)
358     @@ -450,12 +458,13 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
359    
360     /* Check for NUL termination within the designated length */
361     if (strnlen(andd->device_name, header->length - 8) == header->length - 8) {
362     - WARN_TAINT(1, TAINT_FIRMWARE_WORKAROUND,
363     + pr_warn(FW_BUG
364     "Your BIOS is broken; ANDD object name is not NUL-terminated\n"
365     "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
366     dmi_get_system_info(DMI_BIOS_VENDOR),
367     dmi_get_system_info(DMI_BIOS_VERSION),
368     dmi_get_system_info(DMI_PRODUCT_VERSION));
369     + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
370     return -EINVAL;
371     }
372     pr_info("ANDD device: %x name: %s\n", andd->device_number,
373     @@ -481,14 +490,14 @@ static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
374     return 0;
375     }
376     }
377     - WARN_TAINT(
378     - 1, TAINT_FIRMWARE_WORKAROUND,
379     + pr_warn(FW_BUG
380     "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n"
381     "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
382     - drhd->reg_base_addr,
383     + rhsa->base_address,
384     dmi_get_system_info(DMI_BIOS_VENDOR),
385     dmi_get_system_info(DMI_BIOS_VERSION),
386     dmi_get_system_info(DMI_PRODUCT_VERSION));
387     + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
388    
389     return 0;
390     }
391     @@ -834,14 +843,14 @@ int __init dmar_table_init(void)
392    
393     static void warn_invalid_dmar(u64 addr, const char *message)
394     {
395     - WARN_TAINT_ONCE(
396     - 1, TAINT_FIRMWARE_WORKAROUND,
397     + pr_warn_once(FW_BUG
398     "Your BIOS is broken; DMAR reported at address %llx%s!\n"
399     "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
400     addr, message,
401     dmi_get_system_info(DMI_BIOS_VENDOR),
402     dmi_get_system_info(DMI_BIOS_VERSION),
403     dmi_get_system_info(DMI_PRODUCT_VERSION));
404     + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
405     }
406    
407     static int __ref
408     diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
409     index 5c6e0a9fd2f3..593a4bfcba42 100644
410     --- a/drivers/iommu/intel-iommu.c
411     +++ b/drivers/iommu/intel-iommu.c
412     @@ -4085,10 +4085,11 @@ static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev)
413    
414     /* we know that the this iommu should be at offset 0xa000 from vtbar */
415     drhd = dmar_find_matched_drhd_unit(pdev);
416     - if (WARN_TAINT_ONCE(!drhd || drhd->reg_base_addr - vtbar != 0xa000,
417     - TAINT_FIRMWARE_WORKAROUND,
418     - "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"))
419     + if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) {
420     + pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n");
421     + add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
422     pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
423     + }
424     }
425     DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu);
426    
427     @@ -5192,8 +5193,10 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
428     u64 phys = 0;
429    
430     pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, &level);
431     - if (pte)
432     - phys = dma_pte_addr(pte);
433     + if (pte && dma_pte_present(pte))
434     + phys = dma_pte_addr(pte) +
435     + (iova & (BIT_MASK(level_to_offset_bits(level) +
436     + VTD_PAGE_SHIFT) - 1));
437    
438     return phys;
439     }
440     diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
441     index 9834d28d52e8..1f8fbd7776fb 100644
442     --- a/drivers/net/bonding/bond_alb.c
443     +++ b/drivers/net/bonding/bond_alb.c
444     @@ -71,11 +71,6 @@ struct arp_pkt {
445     };
446     #pragma pack()
447    
448     -static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb)
449     -{
450     - return (struct arp_pkt *)skb_network_header(skb);
451     -}
452     -
453     /* Forward declaration */
454     static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
455     bool strict_match);
456     @@ -574,10 +569,11 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
457     spin_unlock(&bond->mode_lock);
458     }
459    
460     -static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bond)
461     +static struct slave *rlb_choose_channel(struct sk_buff *skb,
462     + struct bonding *bond,
463     + const struct arp_pkt *arp)
464     {
465     struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
466     - struct arp_pkt *arp = arp_pkt(skb);
467     struct slave *assigned_slave, *curr_active_slave;
468     struct rlb_client_info *client_info;
469     u32 hash_index = 0;
470     @@ -674,8 +670,12 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
471     */
472     static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
473     {
474     - struct arp_pkt *arp = arp_pkt(skb);
475     struct slave *tx_slave = NULL;
476     + struct arp_pkt *arp;
477     +
478     + if (!pskb_network_may_pull(skb, sizeof(*arp)))
479     + return NULL;
480     + arp = (struct arp_pkt *)skb_network_header(skb);
481    
482     /* Don't modify or load balance ARPs that do not originate locally
483     * (e.g.,arrive via a bridge).
484     @@ -685,7 +685,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
485    
486     if (arp->op_code == htons(ARPOP_REPLY)) {
487     /* the arp must be sent on the selected rx channel */
488     - tx_slave = rlb_choose_channel(skb, bond);
489     + tx_slave = rlb_choose_channel(skb, bond, arp);
490     if (tx_slave)
491     ether_addr_copy(arp->mac_src, tx_slave->dev->dev_addr);
492     netdev_dbg(bond->dev, "Server sent ARP Reply packet\n");
493     @@ -695,7 +695,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
494     * When the arp reply is received the entry will be updated
495     * with the correct unicast address of the client.
496     */
497     - rlb_choose_channel(skb, bond);
498     + rlb_choose_channel(skb, bond, arp);
499    
500     /* The ARP reply packets must be delayed so that
501     * they can cancel out the influence of the ARP request.
502     diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
503     index fbe3c2c114f9..736e550163e1 100644
504     --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
505     +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
506     @@ -6439,13 +6439,13 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
507     return -EINVAL;
508    
509     if (netif_running(dev))
510     - bnxt_close_nic(bp, false, false);
511     + bnxt_close_nic(bp, true, false);
512    
513     dev->mtu = new_mtu;
514     bnxt_set_ring_params(bp);
515    
516     if (netif_running(dev))
517     - return bnxt_open_nic(bp, false, false);
518     + return bnxt_open_nic(bp, true, false);
519    
520     return 0;
521     }
522     diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
523     index 1b07c6216e2a..8df32398d343 100644
524     --- a/drivers/net/ethernet/freescale/fec_main.c
525     +++ b/drivers/net/ethernet/freescale/fec_main.c
526     @@ -2470,15 +2470,15 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
527     return -EINVAL;
528     }
529    
530     - cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
531     + cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs);
532     if (cycle > 0xFFFF) {
533     pr_err("Rx coalesced usec exceed hardware limitation\n");
534     return -EINVAL;
535     }
536    
537     - cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
538     + cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs);
539     if (cycle > 0xFFFF) {
540     - pr_err("Rx coalesced usec exceed hardware limitation\n");
541     + pr_err("Tx coalesced usec exceed hardware limitation\n");
542     return -EINVAL;
543     }
544    
545     diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
546     index d94e151cff12..d4747caf1e7c 100644
547     --- a/drivers/net/ethernet/micrel/ks8851_mll.c
548     +++ b/drivers/net/ethernet/micrel/ks8851_mll.c
549     @@ -831,14 +831,17 @@ static irqreturn_t ks_irq(int irq, void *pw)
550     {
551     struct net_device *netdev = pw;
552     struct ks_net *ks = netdev_priv(netdev);
553     + unsigned long flags;
554     u16 status;
555    
556     + spin_lock_irqsave(&ks->statelock, flags);
557     /*this should be the first in IRQ handler */
558     ks_save_cmd_reg(ks);
559    
560     status = ks_rdreg16(ks, KS_ISR);
561     if (unlikely(!status)) {
562     ks_restore_cmd_reg(ks);
563     + spin_unlock_irqrestore(&ks->statelock, flags);
564     return IRQ_NONE;
565     }
566    
567     @@ -864,6 +867,7 @@ static irqreturn_t ks_irq(int irq, void *pw)
568     ks->netdev->stats.rx_over_errors++;
569     /* this should be the last in IRQ handler*/
570     ks_restore_cmd_reg(ks);
571     + spin_unlock_irqrestore(&ks->statelock, flags);
572     return IRQ_HANDLED;
573     }
574    
575     @@ -933,6 +937,7 @@ static int ks_net_stop(struct net_device *netdev)
576    
577     /* shutdown RX/TX QMU */
578     ks_disable_qmu(ks);
579     + ks_disable_int(ks);
580    
581     /* set powermode to soft power down to save power */
582     ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
583     @@ -989,10 +994,9 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
584     {
585     netdev_tx_t retv = NETDEV_TX_OK;
586     struct ks_net *ks = netdev_priv(netdev);
587     + unsigned long flags;
588    
589     - disable_irq(netdev->irq);
590     - ks_disable_int(ks);
591     - spin_lock(&ks->statelock);
592     + spin_lock_irqsave(&ks->statelock, flags);
593    
594     /* Extra space are required:
595     * 4 byte for alignment, 4 for status/length, 4 for CRC
596     @@ -1006,9 +1010,7 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
597     dev_kfree_skb(skb);
598     } else
599     retv = NETDEV_TX_BUSY;
600     - spin_unlock(&ks->statelock);
601     - ks_enable_int(ks);
602     - enable_irq(netdev->irq);
603     + spin_unlock_irqrestore(&ks->statelock, flags);
604     return retv;
605     }
606    
607     diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
608     index c747ab652665..6c0982a39486 100644
609     --- a/drivers/net/ipvlan/ipvlan_core.c
610     +++ b/drivers/net/ipvlan/ipvlan_core.c
611     @@ -251,6 +251,7 @@ acct:
612     } else {
613     kfree_skb(skb);
614     }
615     + cond_resched();
616     }
617     }
618    
619     @@ -443,19 +444,21 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
620     struct ethhdr *ethh = eth_hdr(skb);
621     int ret = NET_XMIT_DROP;
622    
623     - /* In this mode we dont care about multicast and broadcast traffic */
624     - if (is_multicast_ether_addr(ethh->h_dest)) {
625     - pr_warn_ratelimited("Dropped {multi|broad}cast of type= [%x]\n",
626     - ntohs(skb->protocol));
627     - kfree_skb(skb);
628     - goto out;
629     - }
630     -
631     /* The ipvlan is a pseudo-L2 device, so the packets that we receive
632     * will have L2; which need to discarded and processed further
633     * in the net-ns of the main-device.
634     */
635     if (skb_mac_header_was_set(skb)) {
636     + /* In this mode we dont care about
637     + * multicast and broadcast traffic */
638     + if (is_multicast_ether_addr(ethh->h_dest)) {
639     + pr_debug_ratelimited(
640     + "Dropped {multi|broad}cast of type=[%x]\n",
641     + ntohs(skb->protocol));
642     + kfree_skb(skb);
643     + goto out;
644     + }
645     +
646     skb_pull(skb, sizeof(*ethh));
647     skb->mac_header = (typeof(skb->mac_header))~0U;
648     skb_reset_network_header(skb);
649     diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
650     index 72fb55ca27f3..72f37e546ed2 100644
651     --- a/drivers/net/ipvlan/ipvlan_main.c
652     +++ b/drivers/net/ipvlan/ipvlan_main.c
653     @@ -217,7 +217,6 @@ static void ipvlan_uninit(struct net_device *dev)
654     static int ipvlan_open(struct net_device *dev)
655     {
656     struct ipvl_dev *ipvlan = netdev_priv(dev);
657     - struct net_device *phy_dev = ipvlan->phy_dev;
658     struct ipvl_addr *addr;
659    
660     if (ipvlan->port->mode == IPVLAN_MODE_L3 ||
661     @@ -229,7 +228,7 @@ static int ipvlan_open(struct net_device *dev)
662     list_for_each_entry(addr, &ipvlan->addrs, anode)
663     ipvlan_ht_addr_add(ipvlan, addr);
664    
665     - return dev_uc_add(phy_dev, phy_dev->dev_addr);
666     + return 0;
667     }
668    
669     static int ipvlan_stop(struct net_device *dev)
670     @@ -241,8 +240,6 @@ static int ipvlan_stop(struct net_device *dev)
671     dev_uc_unsync(phy_dev, dev);
672     dev_mc_unsync(phy_dev, dev);
673    
674     - dev_uc_del(phy_dev, phy_dev->dev_addr);
675     -
676     list_for_each_entry(addr, &ipvlan->addrs, anode)
677     ipvlan_ht_addr_del(addr);
678    
679     diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
680     index a48ed0873cc7..8c64b06cb98c 100644
681     --- a/drivers/net/macsec.c
682     +++ b/drivers/net/macsec.c
683     @@ -2871,6 +2871,11 @@ static void macsec_dev_set_rx_mode(struct net_device *dev)
684     dev_uc_sync(real_dev, dev);
685     }
686    
687     +static sci_t dev_to_sci(struct net_device *dev, __be16 port)
688     +{
689     + return make_sci(dev->dev_addr, port);
690     +}
691     +
692     static int macsec_set_mac_address(struct net_device *dev, void *p)
693     {
694     struct macsec_dev *macsec = macsec_priv(dev);
695     @@ -2892,6 +2897,7 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
696    
697     out:
698     ether_addr_copy(dev->dev_addr, addr->sa_data);
699     + macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
700     return 0;
701     }
702    
703     @@ -2976,6 +2982,7 @@ static const struct device_type macsec_type = {
704    
705     static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
706     [IFLA_MACSEC_SCI] = { .type = NLA_U64 },
707     + [IFLA_MACSEC_PORT] = { .type = NLA_U16 },
708     [IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
709     [IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
710     [IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
711     @@ -3160,11 +3167,6 @@ static bool sci_exists(struct net_device *dev, sci_t sci)
712     return false;
713     }
714    
715     -static sci_t dev_to_sci(struct net_device *dev, __be16 port)
716     -{
717     - return make_sci(dev->dev_addr, port);
718     -}
719     -
720     static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
721     {
722     struct macsec_dev *macsec = macsec_priv(dev);
723     diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
724     index e2b3d3c4d4df..294881621430 100644
725     --- a/drivers/net/macvlan.c
726     +++ b/drivers/net/macvlan.c
727     @@ -309,6 +309,8 @@ static void macvlan_process_broadcast(struct work_struct *w)
728     if (src)
729     dev_put(src->dev);
730     kfree_skb(skb);
731     +
732     + cond_resched();
733     }
734     }
735    
736     diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
737     index 487d0372a444..2f5587306022 100644
738     --- a/drivers/net/phy/phy_device.c
739     +++ b/drivers/net/phy/phy_device.c
740     @@ -80,7 +80,7 @@ static LIST_HEAD(phy_fixup_list);
741     static DEFINE_MUTEX(phy_fixup_lock);
742    
743     #ifdef CONFIG_PM
744     -static bool mdio_bus_phy_may_suspend(struct phy_device *phydev, bool suspend)
745     +static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
746     {
747     struct device_driver *drv = phydev->mdio.dev.driver;
748     struct phy_driver *phydrv = to_phy_driver(drv);
749     @@ -92,11 +92,10 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev, bool suspend)
750     /* PHY not attached? May suspend if the PHY has not already been
751     * suspended as part of a prior call to phy_disconnect() ->
752     * phy_detach() -> phy_suspend() because the parent netdev might be the
753     - * MDIO bus driver and clock gated at this point. Also may resume if
754     - * PHY is not attached.
755     + * MDIO bus driver and clock gated at this point.
756     */
757     if (!netdev)
758     - return suspend ? !phydev->suspended : phydev->suspended;
759     + goto out;
760    
761     /* Don't suspend PHY if the attached netdev parent may wakeup.
762     * The parent may point to a PCI device, as in tg3 driver.
763     @@ -111,7 +110,8 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev, bool suspend)
764     if (device_may_wakeup(&netdev->dev))
765     return false;
766    
767     - return true;
768     +out:
769     + return !phydev->suspended;
770     }
771    
772     static int mdio_bus_phy_suspend(struct device *dev)
773     @@ -126,9 +126,11 @@ static int mdio_bus_phy_suspend(struct device *dev)
774     if (phydev->attached_dev && phydev->adjust_link)
775     phy_stop_machine(phydev);
776    
777     - if (!mdio_bus_phy_may_suspend(phydev, true))
778     + if (!mdio_bus_phy_may_suspend(phydev))
779     return 0;
780    
781     + phydev->suspended_by_mdio_bus = true;
782     +
783     return phy_suspend(phydev);
784     }
785    
786     @@ -137,9 +139,11 @@ static int mdio_bus_phy_resume(struct device *dev)
787     struct phy_device *phydev = to_phy_device(dev);
788     int ret;
789    
790     - if (!mdio_bus_phy_may_suspend(phydev, false))
791     + if (!phydev->suspended_by_mdio_bus)
792     goto no_resume;
793    
794     + phydev->suspended_by_mdio_bus = false;
795     +
796     ret = phy_resume(phydev);
797     if (ret < 0)
798     return ret;
799     diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
800     index ddceed3c5a4a..a516470da015 100644
801     --- a/drivers/net/slip/slhc.c
802     +++ b/drivers/net/slip/slhc.c
803     @@ -232,7 +232,7 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
804     register struct cstate *cs = lcs->next;
805     register unsigned long deltaS, deltaA;
806     register short changes = 0;
807     - int hlen;
808     + int nlen, hlen;
809     unsigned char new_seq[16];
810     register unsigned char *cp = new_seq;
811     struct iphdr *ip;
812     @@ -248,6 +248,8 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
813     return isize;
814    
815     ip = (struct iphdr *) icp;
816     + if (ip->version != 4 || ip->ihl < 5)
817     + return isize;
818    
819     /* Bail if this packet isn't TCP, or is an IP fragment */
820     if (ip->protocol != IPPROTO_TCP || (ntohs(ip->frag_off) & 0x3fff)) {
821     @@ -258,10 +260,14 @@ slhc_compress(struct slcompress *comp, unsigned char *icp, int isize,
822     comp->sls_o_tcp++;
823     return isize;
824     }
825     - /* Extract TCP header */
826     + nlen = ip->ihl * 4;
827     + if (isize < nlen + sizeof(*th))
828     + return isize;
829    
830     - th = (struct tcphdr *)(((unsigned char *)ip) + ip->ihl*4);
831     - hlen = ip->ihl*4 + th->doff*4;
832     + th = (struct tcphdr *)(icp + nlen);
833     + if (th->doff < sizeof(struct tcphdr) / 4)
834     + return isize;
835     + hlen = nlen + th->doff * 4;
836    
837     /* Bail if the TCP packet isn't `compressible' (i.e., ACK isn't set or
838     * some other control bit is set). Also uncompressible if
839     diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
840     index fd2573cca803..d0c18e3557f1 100644
841     --- a/drivers/net/team/team.c
842     +++ b/drivers/net/team/team.c
843     @@ -2216,6 +2216,8 @@ team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
844     [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
845     [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
846     [TEAM_ATTR_OPTION_DATA] = { .type = NLA_BINARY },
847     + [TEAM_ATTR_OPTION_PORT_IFINDEX] = { .type = NLA_U32 },
848     + [TEAM_ATTR_OPTION_ARRAY_INDEX] = { .type = NLA_U32 },
849     };
850    
851     static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
852     diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
853     index ba7cfc089516..6e74965d26a0 100644
854     --- a/drivers/net/usb/r8152.c
855     +++ b/drivers/net/usb/r8152.c
856     @@ -3423,7 +3423,10 @@ static void r8153_init(struct r8152 *tp)
857     if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
858     AUTOLOAD_DONE)
859     break;
860     +
861     msleep(20);
862     + if (test_bit(RTL8152_UNPLUG, &tp->flags))
863     + break;
864     }
865    
866     for (i = 0; i < 500; i++) {
867     @@ -3447,7 +3450,10 @@ static void r8153_init(struct r8152 *tp)
868     ocp_data = ocp_reg_read(tp, OCP_PHY_STATUS) & PHY_STAT_MASK;
869     if (ocp_data == PHY_STAT_LAN_ON)
870     break;
871     +
872     msleep(20);
873     + if (test_bit(RTL8152_UNPLUG, &tp->flags))
874     + break;
875     }
876    
877     usb_disable_lpm(tp->udev);
878     diff --git a/drivers/net/wireless/marvell/mwifiex/tdls.c b/drivers/net/wireless/marvell/mwifiex/tdls.c
879     index df9704de0715..c6fc09d17462 100644
880     --- a/drivers/net/wireless/marvell/mwifiex/tdls.c
881     +++ b/drivers/net/wireless/marvell/mwifiex/tdls.c
882     @@ -917,59 +917,117 @@ void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
883    
884     switch (*pos) {
885     case WLAN_EID_SUPP_RATES:
886     + if (pos[1] > 32)
887     + return;
888     sta_ptr->tdls_cap.rates_len = pos[1];
889     for (i = 0; i < pos[1]; i++)
890     sta_ptr->tdls_cap.rates[i] = pos[i + 2];
891     break;
892    
893     case WLAN_EID_EXT_SUPP_RATES:
894     + if (pos[1] > 32)
895     + return;
896     basic = sta_ptr->tdls_cap.rates_len;
897     + if (pos[1] > 32 - basic)
898     + return;
899     for (i = 0; i < pos[1]; i++)
900     sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
901     sta_ptr->tdls_cap.rates_len += pos[1];
902     break;
903     case WLAN_EID_HT_CAPABILITY:
904     - memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos,
905     + if (pos > end - sizeof(struct ieee80211_ht_cap) - 2)
906     + return;
907     + if (pos[1] != sizeof(struct ieee80211_ht_cap))
908     + return;
909     + /* copy the ie's value into ht_capb*/
910     + memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos + 2,
911     sizeof(struct ieee80211_ht_cap));
912     sta_ptr->is_11n_enabled = 1;
913     break;
914     case WLAN_EID_HT_OPERATION:
915     - memcpy(&sta_ptr->tdls_cap.ht_oper, pos,
916     + if (pos > end -
917     + sizeof(struct ieee80211_ht_operation) - 2)
918     + return;
919     + if (pos[1] != sizeof(struct ieee80211_ht_operation))
920     + return;
921     + /* copy the ie's value into ht_oper*/
922     + memcpy(&sta_ptr->tdls_cap.ht_oper, pos + 2,
923     sizeof(struct ieee80211_ht_operation));
924     break;
925     case WLAN_EID_BSS_COEX_2040:
926     + if (pos > end - 3)
927     + return;
928     + if (pos[1] != 1)
929     + return;
930     sta_ptr->tdls_cap.coex_2040 = pos[2];
931     break;
932     case WLAN_EID_EXT_CAPABILITY:
933     + if (pos > end - sizeof(struct ieee_types_header))
934     + return;
935     + if (pos[1] < sizeof(struct ieee_types_header))
936     + return;
937     + if (pos[1] > 8)
938     + return;
939     memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
940     sizeof(struct ieee_types_header) +
941     min_t(u8, pos[1], 8));
942     break;
943     case WLAN_EID_RSN:
944     + if (pos > end - sizeof(struct ieee_types_header))
945     + return;
946     + if (pos[1] < sizeof(struct ieee_types_header))
947     + return;
948     + if (pos[1] > IEEE_MAX_IE_SIZE -
949     + sizeof(struct ieee_types_header))
950     + return;
951     memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
952     sizeof(struct ieee_types_header) +
953     min_t(u8, pos[1], IEEE_MAX_IE_SIZE -
954     sizeof(struct ieee_types_header)));
955     break;
956     case WLAN_EID_QOS_CAPA:
957     + if (pos > end - 3)
958     + return;
959     + if (pos[1] != 1)
960     + return;
961     sta_ptr->tdls_cap.qos_info = pos[2];
962     break;
963     case WLAN_EID_VHT_OPERATION:
964     - if (priv->adapter->is_hw_11ac_capable)
965     - memcpy(&sta_ptr->tdls_cap.vhtoper, pos,
966     + if (priv->adapter->is_hw_11ac_capable) {
967     + if (pos > end -
968     + sizeof(struct ieee80211_vht_operation) - 2)
969     + return;
970     + if (pos[1] !=
971     + sizeof(struct ieee80211_vht_operation))
972     + return;
973     + /* copy the ie's value into vhtoper*/
974     + memcpy(&sta_ptr->tdls_cap.vhtoper, pos + 2,
975     sizeof(struct ieee80211_vht_operation));
976     + }
977     break;
978     case WLAN_EID_VHT_CAPABILITY:
979     if (priv->adapter->is_hw_11ac_capable) {
980     - memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos,
981     + if (pos > end -
982     + sizeof(struct ieee80211_vht_cap) - 2)
983     + return;
984     + if (pos[1] != sizeof(struct ieee80211_vht_cap))
985     + return;
986     + /* copy the ie's value into vhtcap*/
987     + memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos + 2,
988     sizeof(struct ieee80211_vht_cap));
989     sta_ptr->is_11ac_enabled = 1;
990     }
991     break;
992     case WLAN_EID_AID:
993     - if (priv->adapter->is_hw_11ac_capable)
994     + if (priv->adapter->is_hw_11ac_capable) {
995     + if (pos > end - 4)
996     + return;
997     + if (pos[1] != 2)
998     + return;
999     sta_ptr->tdls_cap.aid =
1000     le16_to_cpu(*(__le16 *)(pos + 2));
1001     + }
1002     + break;
1003     default:
1004     break;
1005     }
1006     diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
1007     index d6475dcce9df..0262c8f7e7c7 100644
1008     --- a/fs/cifs/dir.c
1009     +++ b/fs/cifs/dir.c
1010     @@ -551,7 +551,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
1011     if (server->ops->close)
1012     server->ops->close(xid, tcon, &fid);
1013     cifs_del_pending_open(&open);
1014     - fput(file);
1015     rc = -ENOMEM;
1016     }
1017    
1018     diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
1019     index bd6202b70447..daad7b04f88c 100644
1020     --- a/fs/gfs2/inode.c
1021     +++ b/fs/gfs2/inode.c
1022     @@ -1248,7 +1248,7 @@ static int gfs2_atomic_open(struct inode *dir, struct dentry *dentry,
1023     if (!(*opened & FILE_OPENED))
1024     return finish_no_open(file, d);
1025     dput(d);
1026     - return 0;
1027     + return excl && (flags & O_CREAT) ? -EEXIST : 0;
1028     }
1029    
1030     BUG_ON(d != NULL);
1031     diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
1032     index 04dd0652bb5c..8de458d64134 100644
1033     --- a/fs/jbd2/transaction.c
1034     +++ b/fs/jbd2/transaction.c
1035     @@ -1037,8 +1037,8 @@ static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh,
1036     /* For undo access buffer must have data copied */
1037     if (undo && !jh->b_committed_data)
1038     goto out;
1039     - if (jh->b_transaction != handle->h_transaction &&
1040     - jh->b_next_transaction != handle->h_transaction)
1041     + if (READ_ONCE(jh->b_transaction) != handle->h_transaction &&
1042     + READ_ONCE(jh->b_next_transaction) != handle->h_transaction)
1043     goto out;
1044     /*
1045     * There are two reasons for the barrier here:
1046     @@ -2448,8 +2448,8 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
1047     * our jh reference and thus __jbd2_journal_file_buffer() must not
1048     * take a new one.
1049     */
1050     - jh->b_transaction = jh->b_next_transaction;
1051     - jh->b_next_transaction = NULL;
1052     + WRITE_ONCE(jh->b_transaction, jh->b_next_transaction);
1053     + WRITE_ONCE(jh->b_next_transaction, NULL);
1054     if (buffer_freed(bh))
1055     jlist = BJ_Forget;
1056     else if (jh->b_modified)
1057     diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
1058     index c2665d920cf8..2517fcd423b6 100644
1059     --- a/fs/nfs/dir.c
1060     +++ b/fs/nfs/dir.c
1061     @@ -678,8 +678,6 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page,
1062     goto out_label_free;
1063     }
1064    
1065     - array = kmap(page);
1066     -
1067     status = nfs_readdir_alloc_pages(pages, array_size);
1068     if (status < 0)
1069     goto out_release_array;
1070     diff --git a/fs/open.c b/fs/open.c
1071     index 8db6e3a5fc10..e17cc79bd88a 100644
1072     --- a/fs/open.c
1073     +++ b/fs/open.c
1074     @@ -824,9 +824,6 @@ cleanup_file:
1075     * the return value of d_splice_alias(), then the caller needs to perform dput()
1076     * on it after finish_open().
1077     *
1078     - * On successful return @file is a fully instantiated open file. After this, if
1079     - * an error occurs in ->atomic_open(), it needs to clean up with fput().
1080     - *
1081     * Returns zero on success or -errno if the open failed.
1082     */
1083     int finish_open(struct file *file, struct dentry *dentry,
1084     diff --git a/include/linux/phy.h b/include/linux/phy.h
1085     index 867110c9d707..8eafced47540 100644
1086     --- a/include/linux/phy.h
1087     +++ b/include/linux/phy.h
1088     @@ -333,6 +333,7 @@ struct phy_c45_device_ids {
1089     * is_pseudo_fixed_link: Set to true if this phy is an Ethernet switch, etc.
1090     * has_fixups: Set to true if this phy has fixups/quirks.
1091     * suspended: Set to true if this phy has been suspended successfully.
1092     + * suspended_by_mdio_bus: Set to true if this phy was suspended by MDIO bus.
1093     * state: state of the PHY for management purposes
1094     * dev_flags: Device-specific flags used by the PHY driver.
1095     * link_timeout: The number of timer firings to wait before the
1096     @@ -369,6 +370,7 @@ struct phy_device {
1097     bool is_pseudo_fixed_link;
1098     bool has_fixups;
1099     bool suspended;
1100     + bool suspended_by_mdio_bus;
1101    
1102     enum phy_state state;
1103    
1104     diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
1105     index 456e4a6006ab..0b0ad792dd5c 100644
1106     --- a/include/net/fib_rules.h
1107     +++ b/include/net/fib_rules.h
1108     @@ -87,6 +87,7 @@ struct fib_rules_ops {
1109     [FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \
1110     [FRA_PRIORITY] = { .type = NLA_U32 }, \
1111     [FRA_FWMARK] = { .type = NLA_U32 }, \
1112     + [FRA_TUN_ID] = { .type = NLA_U64 }, \
1113     [FRA_FWMASK] = { .type = NLA_U32 }, \
1114     [FRA_TABLE] = { .type = NLA_U32 }, \
1115     [FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \
1116     diff --git a/kernel/cgroup.c b/kernel/cgroup.c
1117     index bb0cf1caf1cd..2d7a4fc42a88 100644
1118     --- a/kernel/cgroup.c
1119     +++ b/kernel/cgroup.c
1120     @@ -6335,6 +6335,10 @@ void cgroup_sk_alloc(struct sock_cgroup_data *skcd)
1121     return;
1122     }
1123    
1124     + /* Don't associate the sock with unrelated interrupted task's cgroup. */
1125     + if (in_interrupt())
1126     + return;
1127     +
1128     rcu_read_lock();
1129    
1130     while (true) {
1131     diff --git a/kernel/signal.c b/kernel/signal.c
1132     index 57fadbe69c2e..d90ccbeb909d 100644
1133     --- a/kernel/signal.c
1134     +++ b/kernel/signal.c
1135     @@ -373,27 +373,32 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
1136     {
1137     struct sigqueue *q = NULL;
1138     struct user_struct *user;
1139     + int sigpending;
1140    
1141     /*
1142     * Protect access to @t credentials. This can go away when all
1143     * callers hold rcu read lock.
1144     + *
1145     + * NOTE! A pending signal will hold on to the user refcount,
1146     + * and we get/put the refcount only when the sigpending count
1147     + * changes from/to zero.
1148     */
1149     rcu_read_lock();
1150     - user = get_uid(__task_cred(t)->user);
1151     - atomic_inc(&user->sigpending);
1152     + user = __task_cred(t)->user;
1153     + sigpending = atomic_inc_return(&user->sigpending);
1154     + if (sigpending == 1)
1155     + get_uid(user);
1156     rcu_read_unlock();
1157    
1158     - if (override_rlimit ||
1159     - atomic_read(&user->sigpending) <=
1160     - task_rlimit(t, RLIMIT_SIGPENDING)) {
1161     + if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
1162     q = kmem_cache_alloc(sigqueue_cachep, flags);
1163     } else {
1164     print_dropped_signal(sig);
1165     }
1166    
1167     if (unlikely(q == NULL)) {
1168     - atomic_dec(&user->sigpending);
1169     - free_uid(user);
1170     + if (atomic_dec_and_test(&user->sigpending))
1171     + free_uid(user);
1172     } else {
1173     INIT_LIST_HEAD(&q->list);
1174     q->flags = 0;
1175     @@ -407,8 +412,8 @@ static void __sigqueue_free(struct sigqueue *q)
1176     {
1177     if (q->flags & SIGQUEUE_PREALLOC)
1178     return;
1179     - atomic_dec(&q->user->sigpending);
1180     - free_uid(q->user);
1181     + if (atomic_dec_and_test(&q->user->sigpending))
1182     + free_uid(q->user);
1183     kmem_cache_free(sigqueue_cachep, q);
1184     }
1185    
1186     diff --git a/kernel/workqueue.c b/kernel/workqueue.c
1187     index 7d970b565c4d..00c295d3104b 100644
1188     --- a/kernel/workqueue.c
1189     +++ b/kernel/workqueue.c
1190     @@ -1384,14 +1384,16 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
1191     WARN_ON_ONCE(!is_chained_work(wq)))
1192     return;
1193     retry:
1194     - if (req_cpu == WORK_CPU_UNBOUND)
1195     - cpu = wq_select_unbound_cpu(raw_smp_processor_id());
1196     -
1197     /* pwq which will be used unless @work is executing elsewhere */
1198     - if (!(wq->flags & WQ_UNBOUND))
1199     - pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1200     - else
1201     + if (wq->flags & WQ_UNBOUND) {
1202     + if (req_cpu == WORK_CPU_UNBOUND)
1203     + cpu = wq_select_unbound_cpu(raw_smp_processor_id());
1204     pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
1205     + } else {
1206     + if (req_cpu == WORK_CPU_UNBOUND)
1207     + cpu = raw_smp_processor_id();
1208     + pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1209     + }
1210    
1211     /*
1212     * If @work was previously on a different pool, it might still be
1213     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
1214     index 0f8422239dea..b85a1c040bc9 100644
1215     --- a/mm/memcontrol.c
1216     +++ b/mm/memcontrol.c
1217     @@ -5726,6 +5726,10 @@ void mem_cgroup_sk_alloc(struct sock *sk)
1218     return;
1219     }
1220    
1221     + /* Do not associate the sock with unrelated interrupted task's memcg. */
1222     + if (in_interrupt())
1223     + return;
1224     +
1225     rcu_read_lock();
1226     memcg = mem_cgroup_from_task(current);
1227     if (memcg == root_mem_cgroup)
1228     diff --git a/mm/slub.c b/mm/slub.c
1229     index fa6d62d559eb..4a5b2a0f9360 100644
1230     --- a/mm/slub.c
1231     +++ b/mm/slub.c
1232     @@ -3114,6 +3114,15 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
1233     void *object = c->freelist;
1234    
1235     if (unlikely(!object)) {
1236     + /*
1237     + * We may have removed an object from c->freelist using
1238     + * the fastpath in the previous iteration; in that case,
1239     + * c->tid has not been bumped yet.
1240     + * Since ___slab_alloc() may reenable interrupts while
1241     + * allocating memory, we should bump c->tid now.
1242     + */
1243     + c->tid = next_tid(c->tid);
1244     +
1245     /*
1246     * Invoking slow path likely have side-effect
1247     * of re-populating per CPU c->freelist
1248     diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
1249     index 780700fcbe63..2b663622bdb4 100644
1250     --- a/net/batman-adv/bat_iv_ogm.c
1251     +++ b/net/batman-adv/bat_iv_ogm.c
1252     @@ -34,6 +34,7 @@
1253     #include <linux/kref.h>
1254     #include <linux/list.h>
1255     #include <linux/lockdep.h>
1256     +#include <linux/mutex.h>
1257     #include <linux/netdevice.h>
1258     #include <linux/netlink.h>
1259     #include <linux/pkt_sched.h>
1260     @@ -149,7 +150,7 @@ static void batadv_iv_ogm_orig_free(struct batadv_orig_node *orig_node)
1261     * Return: 0 on success, a negative error code otherwise.
1262     */
1263     static int batadv_iv_ogm_orig_add_if(struct batadv_orig_node *orig_node,
1264     - int max_if_num)
1265     + unsigned int max_if_num)
1266     {
1267     void *data_ptr;
1268     size_t old_size;
1269     @@ -193,7 +194,8 @@ unlock:
1270     */
1271     static void
1272     batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node,
1273     - int max_if_num, int del_if_num)
1274     + unsigned int max_if_num,
1275     + unsigned int del_if_num)
1276     {
1277     size_t chunk_size;
1278     size_t if_offset;
1279     @@ -231,7 +233,8 @@ batadv_iv_ogm_drop_bcast_own_entry(struct batadv_orig_node *orig_node,
1280     */
1281     static void
1282     batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node,
1283     - int max_if_num, int del_if_num)
1284     + unsigned int max_if_num,
1285     + unsigned int del_if_num)
1286     {
1287     size_t if_offset;
1288     void *data_ptr;
1289     @@ -268,7 +271,8 @@ batadv_iv_ogm_drop_bcast_own_sum_entry(struct batadv_orig_node *orig_node,
1290     * Return: 0 on success, a negative error code otherwise.
1291     */
1292     static int batadv_iv_ogm_orig_del_if(struct batadv_orig_node *orig_node,
1293     - int max_if_num, int del_if_num)
1294     + unsigned int max_if_num,
1295     + unsigned int del_if_num)
1296     {
1297     spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
1298    
1299     @@ -302,7 +306,8 @@ static struct batadv_orig_node *
1300     batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const u8 *addr)
1301     {
1302     struct batadv_orig_node *orig_node;
1303     - int size, hash_added;
1304     + int hash_added;
1305     + size_t size;
1306    
1307     orig_node = batadv_orig_hash_find(bat_priv, addr);
1308     if (orig_node)
1309     @@ -366,14 +371,18 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
1310     unsigned char *ogm_buff;
1311     u32 random_seqno;
1312    
1313     + mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
1314     +
1315     /* randomize initial seqno to avoid collision */
1316     get_random_bytes(&random_seqno, sizeof(random_seqno));
1317     atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
1318    
1319     hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
1320     ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
1321     - if (!ogm_buff)
1322     + if (!ogm_buff) {
1323     + mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
1324     return -ENOMEM;
1325     + }
1326    
1327     hard_iface->bat_iv.ogm_buff = ogm_buff;
1328    
1329     @@ -385,35 +394,59 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
1330     batadv_ogm_packet->reserved = 0;
1331     batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
1332    
1333     + mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
1334     +
1335     return 0;
1336     }
1337    
1338     static void batadv_iv_ogm_iface_disable(struct batadv_hard_iface *hard_iface)
1339     {
1340     + mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
1341     +
1342     kfree(hard_iface->bat_iv.ogm_buff);
1343     hard_iface->bat_iv.ogm_buff = NULL;
1344     +
1345     + mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
1346     }
1347    
1348     static void batadv_iv_ogm_iface_update_mac(struct batadv_hard_iface *hard_iface)
1349     {
1350     struct batadv_ogm_packet *batadv_ogm_packet;
1351     - unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff;
1352     + void *ogm_buff;
1353    
1354     - batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
1355     + mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
1356     +
1357     + ogm_buff = hard_iface->bat_iv.ogm_buff;
1358     + if (!ogm_buff)
1359     + goto unlock;
1360     +
1361     + batadv_ogm_packet = ogm_buff;
1362     ether_addr_copy(batadv_ogm_packet->orig,
1363     hard_iface->net_dev->dev_addr);
1364     ether_addr_copy(batadv_ogm_packet->prev_sender,
1365     hard_iface->net_dev->dev_addr);
1366     +
1367     +unlock:
1368     + mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
1369     }
1370    
1371     static void
1372     batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
1373     {
1374     struct batadv_ogm_packet *batadv_ogm_packet;
1375     - unsigned char *ogm_buff = hard_iface->bat_iv.ogm_buff;
1376     + void *ogm_buff;
1377    
1378     - batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
1379     + mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
1380     +
1381     + ogm_buff = hard_iface->bat_iv.ogm_buff;
1382     + if (!ogm_buff)
1383     + goto unlock;
1384     +
1385     + batadv_ogm_packet = ogm_buff;
1386     batadv_ogm_packet->ttl = BATADV_TTL;
1387     +
1388     +unlock:
1389     + mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
1390     }
1391    
1392     /* when do we schedule our own ogm to be sent */
1393     @@ -898,7 +931,7 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
1394     u32 i;
1395     size_t word_index;
1396     u8 *w;
1397     - int if_num;
1398     + unsigned int if_num;
1399    
1400     for (i = 0; i < hash->size; i++) {
1401     head = &hash->table[i];
1402     @@ -919,7 +952,11 @@ batadv_iv_ogm_slide_own_bcast_window(struct batadv_hard_iface *hard_iface)
1403     }
1404     }
1405    
1406     -static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
1407     +/**
1408     + * batadv_iv_ogm_schedule_buff() - schedule submission of hardif ogm buffer
1409     + * @hard_iface: interface whose ogm buffer should be transmitted
1410     + */
1411     +static void batadv_iv_ogm_schedule_buff(struct batadv_hard_iface *hard_iface)
1412     {
1413     struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1414     unsigned char **ogm_buff = &hard_iface->bat_iv.ogm_buff;
1415     @@ -930,8 +967,10 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
1416     u16 tvlv_len = 0;
1417     unsigned long send_time;
1418    
1419     - if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
1420     - (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
1421     + lockdep_assert_held(&hard_iface->bat_iv.ogm_buff_mutex);
1422     +
1423     + /* interface already disabled by batadv_iv_ogm_iface_disable */
1424     + if (!*ogm_buff)
1425     return;
1426    
1427     /* the interface gets activated here to avoid race conditions between
1428     @@ -1000,6 +1039,17 @@ out:
1429     batadv_hardif_put(primary_if);
1430     }
1431    
1432     +static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
1433     +{
1434     + if (hard_iface->if_status == BATADV_IF_NOT_IN_USE ||
1435     + hard_iface->if_status == BATADV_IF_TO_BE_REMOVED)
1436     + return;
1437     +
1438     + mutex_lock(&hard_iface->bat_iv.ogm_buff_mutex);
1439     + batadv_iv_ogm_schedule_buff(hard_iface);
1440     + mutex_unlock(&hard_iface->bat_iv.ogm_buff_mutex);
1441     +}
1442     +
1443     /**
1444     * batadv_iv_ogm_orig_update - use OGM to update corresponding data in an
1445     * originator
1446     @@ -1028,7 +1078,7 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
1447     struct batadv_neigh_node *tmp_neigh_node = NULL;
1448     struct batadv_neigh_node *router = NULL;
1449     struct batadv_orig_node *orig_node_tmp;
1450     - int if_num;
1451     + unsigned int if_num;
1452     u8 sum_orig, sum_neigh;
1453     u8 *neigh_addr;
1454     u8 tq_avg;
1455     @@ -1186,7 +1236,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
1456     u8 total_count;
1457     u8 orig_eq_count, neigh_rq_count, neigh_rq_inv, tq_own;
1458     unsigned int neigh_rq_inv_cube, neigh_rq_max_cube;
1459     - int if_num;
1460     + unsigned int if_num;
1461     unsigned int tq_asym_penalty, inv_asym_penalty;
1462     unsigned int combined_tq;
1463     unsigned int tq_iface_penalty;
1464     @@ -1227,7 +1277,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
1465     orig_node->last_seen = jiffies;
1466    
1467     /* find packet count of corresponding one hop neighbor */
1468     - spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock);
1469     + spin_lock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
1470     if_num = if_incoming->if_num;
1471     orig_eq_count = orig_neigh_node->bat_iv.bcast_own_sum[if_num];
1472     neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing);
1473     @@ -1237,7 +1287,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node,
1474     } else {
1475     neigh_rq_count = 0;
1476     }
1477     - spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock);
1478     + spin_unlock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock);
1479    
1480     /* pay attention to not get a value bigger than 100 % */
1481     if (orig_eq_count > neigh_rq_count)
1482     @@ -1705,9 +1755,9 @@ static void batadv_iv_ogm_process(const struct sk_buff *skb, int ogm_offset,
1483    
1484     if (is_my_orig) {
1485     unsigned long *word;
1486     - int offset;
1487     + size_t offset;
1488     s32 bit_pos;
1489     - s16 if_num;
1490     + unsigned int if_num;
1491     u8 *weight;
1492    
1493     orig_neigh_node = batadv_iv_ogm_orig_get(bat_priv,
1494     @@ -2473,12 +2523,22 @@ batadv_iv_ogm_neigh_is_sob(struct batadv_neigh_node *neigh1,
1495     return ret;
1496     }
1497    
1498     -static void batadv_iv_iface_activate(struct batadv_hard_iface *hard_iface)
1499     +static void batadv_iv_iface_enabled(struct batadv_hard_iface *hard_iface)
1500     {
1501     /* begin scheduling originator messages on that interface */
1502     batadv_iv_ogm_schedule(hard_iface);
1503     }
1504    
1505     +/**
1506     + * batadv_iv_init_sel_class - initialize GW selection class
1507     + * @bat_priv: the bat priv with all the soft interface information
1508     + */
1509     +static void batadv_iv_init_sel_class(struct batadv_priv *bat_priv)
1510     +{
1511     + /* set default TQ difference threshold to 20 */
1512     + atomic_set(&bat_priv->gw.sel_class, 20);
1513     +}
1514     +
1515     static struct batadv_gw_node *
1516     batadv_iv_gw_get_best_gw_node(struct batadv_priv *bat_priv)
1517     {
1518     @@ -2803,8 +2863,8 @@ unlock:
1519     static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
1520     .name = "BATMAN_IV",
1521     .iface = {
1522     - .activate = batadv_iv_iface_activate,
1523     .enable = batadv_iv_ogm_iface_enable,
1524     + .enabled = batadv_iv_iface_enabled,
1525     .disable = batadv_iv_ogm_iface_disable,
1526     .update_mac = batadv_iv_ogm_iface_update_mac,
1527     .primary_set = batadv_iv_ogm_primary_iface_set,
1528     @@ -2827,6 +2887,7 @@ static struct batadv_algo_ops batadv_batman_iv __read_mostly = {
1529     .del_if = batadv_iv_ogm_orig_del_if,
1530     },
1531     .gw = {
1532     + .init_sel_class = batadv_iv_init_sel_class,
1533     .get_best_gw_node = batadv_iv_gw_get_best_gw_node,
1534     .is_eligible = batadv_iv_gw_is_eligible,
1535     #ifdef CONFIG_BATMAN_ADV_DEBUGFS
1536     diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
1537     index 4348118e7eac..18fa602e5fc6 100644
1538     --- a/net/batman-adv/bat_v.c
1539     +++ b/net/batman-adv/bat_v.c
1540     @@ -19,7 +19,6 @@
1541     #include "main.h"
1542    
1543     #include <linux/atomic.h>
1544     -#include <linux/bug.h>
1545     #include <linux/cache.h>
1546     #include <linux/errno.h>
1547     #include <linux/if_ether.h>
1548     @@ -623,11 +622,11 @@ static int batadv_v_neigh_cmp(struct batadv_neigh_node *neigh1,
1549     int ret = 0;
1550    
1551     ifinfo1 = batadv_neigh_ifinfo_get(neigh1, if_outgoing1);
1552     - if (WARN_ON(!ifinfo1))
1553     + if (!ifinfo1)
1554     goto err_ifinfo1;
1555    
1556     ifinfo2 = batadv_neigh_ifinfo_get(neigh2, if_outgoing2);
1557     - if (WARN_ON(!ifinfo2))
1558     + if (!ifinfo2)
1559     goto err_ifinfo2;
1560    
1561     ret = ifinfo1->bat_v.throughput - ifinfo2->bat_v.throughput;
1562     @@ -649,11 +648,11 @@ static bool batadv_v_neigh_is_sob(struct batadv_neigh_node *neigh1,
1563     bool ret = false;
1564    
1565     ifinfo1 = batadv_neigh_ifinfo_get(neigh1, if_outgoing1);
1566     - if (WARN_ON(!ifinfo1))
1567     + if (!ifinfo1)
1568     goto err_ifinfo1;
1569    
1570     ifinfo2 = batadv_neigh_ifinfo_get(neigh2, if_outgoing2);
1571     - if (WARN_ON(!ifinfo2))
1572     + if (!ifinfo2)
1573     goto err_ifinfo2;
1574    
1575     threshold = ifinfo1->bat_v.throughput / 4;
1576     @@ -668,6 +667,16 @@ err_ifinfo1:
1577     return ret;
1578     }
1579    
1580     +/**
1581     + * batadv_v_init_sel_class - initialize GW selection class
1582     + * @bat_priv: the bat priv with all the soft interface information
1583     + */
1584     +static void batadv_v_init_sel_class(struct batadv_priv *bat_priv)
1585     +{
1586     + /* set default throughput difference threshold to 5Mbps */
1587     + atomic_set(&bat_priv->gw.sel_class, 50);
1588     +}
1589     +
1590     static ssize_t batadv_v_store_sel_class(struct batadv_priv *bat_priv,
1591     char *buff, size_t count)
1592     {
1593     @@ -805,7 +814,7 @@ static bool batadv_v_gw_is_eligible(struct batadv_priv *bat_priv,
1594     }
1595    
1596     orig_gw = batadv_gw_node_get(bat_priv, orig_node);
1597     - if (!orig_node)
1598     + if (!orig_gw)
1599     goto out;
1600    
1601     if (batadv_v_gw_throughput_get(orig_gw, &orig_throughput) < 0)
1602     @@ -1054,6 +1063,7 @@ static struct batadv_algo_ops batadv_batman_v __read_mostly = {
1603     .dump = batadv_v_orig_dump,
1604     },
1605     .gw = {
1606     + .init_sel_class = batadv_v_init_sel_class,
1607     .store_sel_class = batadv_v_store_sel_class,
1608     .show_sel_class = batadv_v_show_sel_class,
1609     .get_best_gw_node = batadv_v_gw_get_best_gw_node,
1610     @@ -1094,9 +1104,6 @@ int batadv_v_mesh_init(struct batadv_priv *bat_priv)
1611     if (ret < 0)
1612     return ret;
1613    
1614     - /* set default throughput difference threshold to 5Mbps */
1615     - atomic_set(&bat_priv->gw.sel_class, 50);
1616     -
1617     return 0;
1618     }
1619    
1620     diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
1621     index 5d79004de25c..62df763b2aae 100644
1622     --- a/net/batman-adv/bat_v_elp.c
1623     +++ b/net/batman-adv/bat_v_elp.c
1624     @@ -19,6 +19,7 @@
1625     #include "main.h"
1626    
1627     #include <linux/atomic.h>
1628     +#include <linux/bitops.h>
1629     #include <linux/byteorder/generic.h>
1630     #include <linux/errno.h>
1631     #include <linux/etherdevice.h>
1632     @@ -29,6 +30,7 @@
1633     #include <linux/kernel.h>
1634     #include <linux/kref.h>
1635     #include <linux/netdevice.h>
1636     +#include <linux/nl80211.h>
1637     #include <linux/random.h>
1638     #include <linux/rculist.h>
1639     #include <linux/rcupdate.h>
1640     @@ -100,8 +102,12 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
1641     */
1642     return 0;
1643     }
1644     - if (!ret)
1645     - return sinfo.expected_throughput / 100;
1646     + if (ret)
1647     + goto default_throughput;
1648     + if (!(sinfo.filled & BIT(NL80211_STA_INFO_EXPECTED_THROUGHPUT)))
1649     + goto default_throughput;
1650     +
1651     + return sinfo.expected_throughput / 100;
1652     }
1653    
1654     /* unsupported WiFi driver version */
1655     @@ -185,6 +191,7 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
1656     struct sk_buff *skb;
1657     int probe_len, i;
1658     int elp_skb_len;
1659     + void *tmp;
1660    
1661     /* this probing routine is for Wifi neighbours only */
1662     if (!batadv_is_wifi_netdev(hard_iface->net_dev))
1663     @@ -216,7 +223,8 @@ batadv_v_elp_wifi_neigh_probe(struct batadv_hardif_neigh_node *neigh)
1664     * the packet to be exactly of that size to make the link
1665     * throughput estimation effective.
1666     */
1667     - skb_put(skb, probe_len - hard_iface->bat_v.elp_skb->len);
1668     + tmp = skb_put(skb, probe_len - hard_iface->bat_v.elp_skb->len);
1669     + memset(tmp, 0, probe_len - hard_iface->bat_v.elp_skb->len);
1670    
1671     batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1672     "Sending unicast (probe) ELP packet on interface %s to %pM\n",
1673     @@ -327,21 +335,23 @@ out:
1674     */
1675     int batadv_v_elp_iface_enable(struct batadv_hard_iface *hard_iface)
1676     {
1677     + static const size_t tvlv_padding = sizeof(__be32);
1678     struct batadv_elp_packet *elp_packet;
1679     unsigned char *elp_buff;
1680     u32 random_seqno;
1681     size_t size;
1682     int res = -ENOMEM;
1683    
1684     - size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN;
1685     + size = ETH_HLEN + NET_IP_ALIGN + BATADV_ELP_HLEN + tvlv_padding;
1686     hard_iface->bat_v.elp_skb = dev_alloc_skb(size);
1687     if (!hard_iface->bat_v.elp_skb)
1688     goto out;
1689    
1690     skb_reserve(hard_iface->bat_v.elp_skb, ETH_HLEN + NET_IP_ALIGN);
1691     - elp_buff = skb_put(hard_iface->bat_v.elp_skb, BATADV_ELP_HLEN);
1692     + elp_buff = skb_put(hard_iface->bat_v.elp_skb,
1693     + BATADV_ELP_HLEN + tvlv_padding);
1694     elp_packet = (struct batadv_elp_packet *)elp_buff;
1695     - memset(elp_packet, 0, BATADV_ELP_HLEN);
1696     + memset(elp_packet, 0, BATADV_ELP_HLEN + tvlv_padding);
1697    
1698     elp_packet->packet_type = BATADV_ELP;
1699     elp_packet->version = BATADV_COMPAT_VERSION;
1700     diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
1701     index f435435b447e..b0cae59bd327 100644
1702     --- a/net/batman-adv/bat_v_ogm.c
1703     +++ b/net/batman-adv/bat_v_ogm.c
1704     @@ -28,6 +28,8 @@
1705     #include <linux/kernel.h>
1706     #include <linux/kref.h>
1707     #include <linux/list.h>
1708     +#include <linux/lockdep.h>
1709     +#include <linux/mutex.h>
1710     #include <linux/netdevice.h>
1711     #include <linux/random.h>
1712     #include <linux/rculist.h>
1713     @@ -127,22 +129,19 @@ static void batadv_v_ogm_send_to_if(struct sk_buff *skb,
1714     }
1715    
1716     /**
1717     - * batadv_v_ogm_send - periodic worker broadcasting the own OGM
1718     - * @work: work queue item
1719     + * batadv_v_ogm_send_softif() - periodic worker broadcasting the own OGM
1720     + * @bat_priv: the bat priv with all the soft interface information
1721     */
1722     -static void batadv_v_ogm_send(struct work_struct *work)
1723     +static void batadv_v_ogm_send_softif(struct batadv_priv *bat_priv)
1724     {
1725     struct batadv_hard_iface *hard_iface;
1726     - struct batadv_priv_bat_v *bat_v;
1727     - struct batadv_priv *bat_priv;
1728     struct batadv_ogm2_packet *ogm_packet;
1729     struct sk_buff *skb, *skb_tmp;
1730     unsigned char *ogm_buff, *pkt_buff;
1731     int ogm_buff_len;
1732     u16 tvlv_len = 0;
1733    
1734     - bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work);
1735     - bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
1736     + lockdep_assert_held(&bat_priv->bat_v.ogm_buff_mutex);
1737    
1738     if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
1739     goto out;
1740     @@ -209,6 +208,23 @@ out:
1741     return;
1742     }
1743    
1744     +/**
1745     + * batadv_v_ogm_send() - periodic worker broadcasting the own OGM
1746     + * @work: work queue item
1747     + */
1748     +static void batadv_v_ogm_send(struct work_struct *work)
1749     +{
1750     + struct batadv_priv_bat_v *bat_v;
1751     + struct batadv_priv *bat_priv;
1752     +
1753     + bat_v = container_of(work, struct batadv_priv_bat_v, ogm_wq.work);
1754     + bat_priv = container_of(bat_v, struct batadv_priv, bat_v);
1755     +
1756     + mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
1757     + batadv_v_ogm_send_softif(bat_priv);
1758     + mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
1759     +}
1760     +
1761     /**
1762     * batadv_v_ogm_iface_enable - prepare an interface for B.A.T.M.A.N. V
1763     * @hard_iface: the interface to prepare
1764     @@ -235,11 +251,15 @@ void batadv_v_ogm_primary_iface_set(struct batadv_hard_iface *primary_iface)
1765     struct batadv_priv *bat_priv = netdev_priv(primary_iface->soft_iface);
1766     struct batadv_ogm2_packet *ogm_packet;
1767    
1768     + mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
1769     if (!bat_priv->bat_v.ogm_buff)
1770     - return;
1771     + goto unlock;
1772    
1773     ogm_packet = (struct batadv_ogm2_packet *)bat_priv->bat_v.ogm_buff;
1774     ether_addr_copy(ogm_packet->orig, primary_iface->net_dev->dev_addr);
1775     +
1776     +unlock:
1777     + mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
1778     }
1779    
1780     /**
1781     @@ -827,6 +847,8 @@ int batadv_v_ogm_init(struct batadv_priv *bat_priv)
1782     atomic_set(&bat_priv->bat_v.ogm_seqno, random_seqno);
1783     INIT_DELAYED_WORK(&bat_priv->bat_v.ogm_wq, batadv_v_ogm_send);
1784    
1785     + mutex_init(&bat_priv->bat_v.ogm_buff_mutex);
1786     +
1787     return 0;
1788     }
1789    
1790     @@ -838,7 +860,11 @@ void batadv_v_ogm_free(struct batadv_priv *bat_priv)
1791     {
1792     cancel_delayed_work_sync(&bat_priv->bat_v.ogm_wq);
1793    
1794     + mutex_lock(&bat_priv->bat_v.ogm_buff_mutex);
1795     +
1796     kfree(bat_priv->bat_v.ogm_buff);
1797     bat_priv->bat_v.ogm_buff = NULL;
1798     bat_priv->bat_v.ogm_buff_len = 0;
1799     +
1800     + mutex_unlock(&bat_priv->bat_v.ogm_buff_mutex);
1801     }
1802     diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
1803     index b4ffba7dd583..e0ab277db503 100644
1804     --- a/net/batman-adv/debugfs.c
1805     +++ b/net/batman-adv/debugfs.c
1806     @@ -18,6 +18,7 @@
1807     #include "debugfs.h"
1808     #include "main.h"
1809    
1810     +#include <linux/dcache.h>
1811     #include <linux/debugfs.h>
1812     #include <linux/device.h>
1813     #include <linux/errno.h>
1814     @@ -339,6 +340,25 @@ out:
1815     return -ENOMEM;
1816     }
1817    
1818     +/**
1819     + * batadv_debugfs_rename_hardif() - Fix debugfs path for renamed hardif
1820     + * @hard_iface: hard interface which was renamed
1821     + */
1822     +void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface)
1823     +{
1824     + const char *name = hard_iface->net_dev->name;
1825     + struct dentry *dir;
1826     + struct dentry *d;
1827     +
1828     + dir = hard_iface->debug_dir;
1829     + if (!dir)
1830     + return;
1831     +
1832     + d = debugfs_rename(dir->d_parent, dir, dir->d_parent, name);
1833     + if (!d)
1834     + pr_err("Can't rename debugfs dir to %s\n", name);
1835     +}
1836     +
1837     /**
1838     * batadv_debugfs_del_hardif - delete the base directory for a hard interface
1839     * in debugfs.
1840     @@ -403,6 +423,26 @@ out:
1841     return -ENOMEM;
1842     }
1843    
1844     +/**
1845     + * batadv_debugfs_rename_meshif() - Fix debugfs path for renamed softif
1846     + * @dev: net_device which was renamed
1847     + */
1848     +void batadv_debugfs_rename_meshif(struct net_device *dev)
1849     +{
1850     + struct batadv_priv *bat_priv = netdev_priv(dev);
1851     + const char *name = dev->name;
1852     + struct dentry *dir;
1853     + struct dentry *d;
1854     +
1855     + dir = bat_priv->debug_dir;
1856     + if (!dir)
1857     + return;
1858     +
1859     + d = debugfs_rename(dir->d_parent, dir, dir->d_parent, name);
1860     + if (!d)
1861     + pr_err("Can't rename debugfs dir to %s\n", name);
1862     +}
1863     +
1864     void batadv_debugfs_del_meshif(struct net_device *dev)
1865     {
1866     struct batadv_priv *bat_priv = netdev_priv(dev);
1867     diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h
1868     index e49121ee55f6..59a0d6d70ecd 100644
1869     --- a/net/batman-adv/debugfs.h
1870     +++ b/net/batman-adv/debugfs.h
1871     @@ -29,8 +29,10 @@ struct net_device;
1872     void batadv_debugfs_init(void);
1873     void batadv_debugfs_destroy(void);
1874     int batadv_debugfs_add_meshif(struct net_device *dev);
1875     +void batadv_debugfs_rename_meshif(struct net_device *dev);
1876     void batadv_debugfs_del_meshif(struct net_device *dev);
1877     int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface);
1878     +void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface);
1879     void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface);
1880    
1881     #else
1882     @@ -48,6 +50,10 @@ static inline int batadv_debugfs_add_meshif(struct net_device *dev)
1883     return 0;
1884     }
1885    
1886     +static inline void batadv_debugfs_rename_meshif(struct net_device *dev)
1887     +{
1888     +}
1889     +
1890     static inline void batadv_debugfs_del_meshif(struct net_device *dev)
1891     {
1892     }
1893     @@ -58,6 +64,11 @@ int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface)
1894     return 0;
1895     }
1896    
1897     +static inline
1898     +void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface)
1899     +{
1900     +}
1901     +
1902     static inline
1903     void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface)
1904     {
1905     diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
1906     index 3b440b8d7c05..83c7009b0da1 100644
1907     --- a/net/batman-adv/distributed-arp-table.c
1908     +++ b/net/batman-adv/distributed-arp-table.c
1909     @@ -1025,8 +1025,9 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
1910     skb_reset_mac_header(skb_new);
1911     skb_new->protocol = eth_type_trans(skb_new,
1912     bat_priv->soft_iface);
1913     - bat_priv->stats.rx_packets++;
1914     - bat_priv->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size;
1915     + batadv_inc_counter(bat_priv, BATADV_CNT_RX);
1916     + batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
1917     + skb->len + ETH_HLEN + hdr_size);
1918     bat_priv->soft_iface->last_rx = jiffies;
1919    
1920     netif_rx(skb_new);
1921     diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
1922     index a06b6041f3e0..fef21f75892e 100644
1923     --- a/net/batman-adv/fragmentation.c
1924     +++ b/net/batman-adv/fragmentation.c
1925     @@ -232,8 +232,10 @@ err_unlock:
1926     spin_unlock_bh(&chain->lock);
1927    
1928     err:
1929     - if (!ret)
1930     + if (!ret) {
1931     kfree(frag_entry_new);
1932     + kfree_skb(skb);
1933     + }
1934    
1935     return ret;
1936     }
1937     @@ -305,7 +307,7 @@ free:
1938     *
1939     * There are three possible outcomes: 1) Packet is merged: Return true and
1940     * set *skb to merged packet; 2) Packet is buffered: Return true and set *skb
1941     - * to NULL; 3) Error: Return false and leave skb as is.
1942     + * to NULL; 3) Error: Return false and free skb.
1943     *
1944     * Return: true when packet is merged or buffered, false when skb is not not
1945     * used.
1946     @@ -330,9 +332,9 @@ bool batadv_frag_skb_buffer(struct sk_buff **skb,
1947     goto out_err;
1948    
1949     out:
1950     - *skb = skb_out;
1951     ret = true;
1952     out_err:
1953     + *skb = skb_out;
1954     return ret;
1955     }
1956    
1957     @@ -482,12 +484,20 @@ int batadv_frag_send_packet(struct sk_buff *skb,
1958     */
1959     if (skb->priority >= 256 && skb->priority <= 263)
1960     frag_header.priority = skb->priority - 256;
1961     + else
1962     + frag_header.priority = 0;
1963    
1964     ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr);
1965     ether_addr_copy(frag_header.dest, orig_node->orig);
1966    
1967     /* Eat and send fragments from the tail of skb */
1968     while (skb->len > max_fragment_size) {
1969     + /* The initial check in this function should cover this case */
1970     + if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) {
1971     + ret = -1;
1972     + goto out;
1973     + }
1974     +
1975     skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
1976     if (!skb_fragment)
1977     goto out;
1978     @@ -505,12 +515,6 @@ int batadv_frag_send_packet(struct sk_buff *skb,
1979     }
1980    
1981     frag_header.no++;
1982     -
1983     - /* The initial check in this function should cover this case */
1984     - if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) {
1985     - ret = -1;
1986     - goto out;
1987     - }
1988     }
1989    
1990     /* Make room for the fragment header. */
1991     diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
1992     index ed9aaf30fbcf..3bd7ed6b6b3e 100644
1993     --- a/net/batman-adv/gateway_client.c
1994     +++ b/net/batman-adv/gateway_client.c
1995     @@ -31,6 +31,7 @@
1996     #include <linux/kernel.h>
1997     #include <linux/kref.h>
1998     #include <linux/list.h>
1999     +#include <linux/lockdep.h>
2000     #include <linux/netdevice.h>
2001     #include <linux/netlink.h>
2002     #include <linux/rculist.h>
2003     @@ -325,6 +326,9 @@ out:
2004     * @bat_priv: the bat priv with all the soft interface information
2005     * @orig_node: originator announcing gateway capabilities
2006     * @gateway: announced bandwidth information
2007     + *
2008     + * Has to be called with the appropriate locks being acquired
2009     + * (gw.list_lock).
2010     */
2011     static void batadv_gw_node_add(struct batadv_priv *bat_priv,
2012     struct batadv_orig_node *orig_node,
2013     @@ -332,6 +336,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
2014     {
2015     struct batadv_gw_node *gw_node;
2016    
2017     + lockdep_assert_held(&bat_priv->gw.list_lock);
2018     +
2019     if (gateway->bandwidth_down == 0)
2020     return;
2021    
2022     @@ -346,10 +352,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
2023     gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
2024     gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
2025    
2026     - spin_lock_bh(&bat_priv->gw.list_lock);
2027     kref_get(&gw_node->refcount);
2028     hlist_add_head_rcu(&gw_node->list, &bat_priv->gw.list);
2029     - spin_unlock_bh(&bat_priv->gw.list_lock);
2030    
2031     batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
2032     "Found new gateway %pM -> gw bandwidth: %u.%u/%u.%u MBit\n",
2033     @@ -404,11 +408,14 @@ void batadv_gw_node_update(struct batadv_priv *bat_priv,
2034     {
2035     struct batadv_gw_node *gw_node, *curr_gw = NULL;
2036    
2037     + spin_lock_bh(&bat_priv->gw.list_lock);
2038     gw_node = batadv_gw_node_get(bat_priv, orig_node);
2039     if (!gw_node) {
2040     batadv_gw_node_add(bat_priv, orig_node, gateway);
2041     + spin_unlock_bh(&bat_priv->gw.list_lock);
2042     goto out;
2043     }
2044     + spin_unlock_bh(&bat_priv->gw.list_lock);
2045    
2046     if ((gw_node->bandwidth_down == ntohl(gateway->bandwidth_down)) &&
2047     (gw_node->bandwidth_up == ntohl(gateway->bandwidth_up)))
2048     diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
2049     index 21184810d89f..3e3f91ab694f 100644
2050     --- a/net/batman-adv/gateway_common.c
2051     +++ b/net/batman-adv/gateway_common.c
2052     @@ -253,6 +253,11 @@ static void batadv_gw_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
2053     */
2054     void batadv_gw_init(struct batadv_priv *bat_priv)
2055     {
2056     + if (bat_priv->algo_ops->gw.init_sel_class)
2057     + bat_priv->algo_ops->gw.init_sel_class(bat_priv);
2058     + else
2059     + atomic_set(&bat_priv->gw.sel_class, 1);
2060     +
2061     batadv_tvlv_handler_register(bat_priv, batadv_gw_tvlv_ogm_handler_v1,
2062     NULL, BATADV_TVLV_GW, 1,
2063     BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
2064     diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
2065     index 8f7883b7d717..f528761674df 100644
2066     --- a/net/batman-adv/hard-interface.c
2067     +++ b/net/batman-adv/hard-interface.c
2068     @@ -28,6 +28,7 @@
2069     #include <linux/kernel.h>
2070     #include <linux/kref.h>
2071     #include <linux/list.h>
2072     +#include <linux/mutex.h>
2073     #include <linux/netdevice.h>
2074     #include <linux/printk.h>
2075     #include <linux/rculist.h>
2076     @@ -539,6 +540,11 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
2077     hard_iface->soft_iface = soft_iface;
2078     bat_priv = netdev_priv(hard_iface->soft_iface);
2079    
2080     + if (bat_priv->num_ifaces >= UINT_MAX) {
2081     + ret = -ENOSPC;
2082     + goto err_dev;
2083     + }
2084     +
2085     ret = netdev_master_upper_dev_link(hard_iface->net_dev,
2086     soft_iface, NULL, NULL);
2087     if (ret)
2088     @@ -591,6 +597,9 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
2089    
2090     batadv_hardif_recalc_extra_skbroom(soft_iface);
2091    
2092     + if (bat_priv->algo_ops->iface.enabled)
2093     + bat_priv->algo_ops->iface.enabled(hard_iface);
2094     +
2095     out:
2096     return 0;
2097    
2098     @@ -646,7 +655,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface,
2099     batadv_hardif_recalc_extra_skbroom(hard_iface->soft_iface);
2100    
2101     /* nobody uses this interface anymore */
2102     - if (!bat_priv->num_ifaces) {
2103     + if (bat_priv->num_ifaces == 0) {
2104     batadv_gw_check_client_stop(bat_priv);
2105    
2106     if (autodel == BATADV_IF_CLEANUP_AUTO)
2107     @@ -682,7 +691,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
2108     if (ret)
2109     goto free_if;
2110    
2111     - hard_iface->if_num = -1;
2112     + hard_iface->if_num = 0;
2113     hard_iface->net_dev = net_dev;
2114     hard_iface->soft_iface = NULL;
2115     hard_iface->if_status = BATADV_IF_NOT_IN_USE;
2116     @@ -694,6 +703,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
2117     INIT_LIST_HEAD(&hard_iface->list);
2118     INIT_HLIST_HEAD(&hard_iface->neigh_list);
2119    
2120     + mutex_init(&hard_iface->bat_iv.ogm_buff_mutex);
2121     spin_lock_init(&hard_iface->neigh_list_lock);
2122     kref_init(&hard_iface->refcount);
2123    
2124     @@ -750,6 +760,32 @@ void batadv_hardif_remove_interfaces(void)
2125     rtnl_unlock();
2126     }
2127    
2128     +/**
2129     + * batadv_hard_if_event_softif() - Handle events for soft interfaces
2130     + * @event: NETDEV_* event to handle
2131     + * @net_dev: net_device which generated an event
2132     + *
2133     + * Return: NOTIFY_* result
2134     + */
2135     +static int batadv_hard_if_event_softif(unsigned long event,
2136     + struct net_device *net_dev)
2137     +{
2138     + struct batadv_priv *bat_priv;
2139     +
2140     + switch (event) {
2141     + case NETDEV_REGISTER:
2142     + batadv_sysfs_add_meshif(net_dev);
2143     + bat_priv = netdev_priv(net_dev);
2144     + batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
2145     + break;
2146     + case NETDEV_CHANGENAME:
2147     + batadv_debugfs_rename_meshif(net_dev);
2148     + break;
2149     + }
2150     +
2151     + return NOTIFY_DONE;
2152     +}
2153     +
2154     static int batadv_hard_if_event(struct notifier_block *this,
2155     unsigned long event, void *ptr)
2156     {
2157     @@ -758,12 +794,8 @@ static int batadv_hard_if_event(struct notifier_block *this,
2158     struct batadv_hard_iface *primary_if = NULL;
2159     struct batadv_priv *bat_priv;
2160    
2161     - if (batadv_softif_is_valid(net_dev) && event == NETDEV_REGISTER) {
2162     - batadv_sysfs_add_meshif(net_dev);
2163     - bat_priv = netdev_priv(net_dev);
2164     - batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
2165     - return NOTIFY_DONE;
2166     - }
2167     + if (batadv_softif_is_valid(net_dev))
2168     + return batadv_hard_if_event_softif(event, net_dev);
2169    
2170     hard_iface = batadv_hardif_get_by_netdev(net_dev);
2171     if (!hard_iface && (event == NETDEV_REGISTER ||
2172     @@ -807,6 +839,9 @@ static int batadv_hard_if_event(struct notifier_block *this,
2173     if (hard_iface == primary_if)
2174     batadv_primary_if_update_addr(bat_priv, NULL);
2175     break;
2176     + case NETDEV_CHANGENAME:
2177     + batadv_debugfs_rename_hardif(hard_iface);
2178     + break;
2179     default:
2180     break;
2181     }
2182     diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
2183     index 7c8d16086f0f..8466f83fc32f 100644
2184     --- a/net/batman-adv/originator.c
2185     +++ b/net/batman-adv/originator.c
2186     @@ -1495,7 +1495,7 @@ int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb)
2187     }
2188    
2189     int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
2190     - int max_if_num)
2191     + unsigned int max_if_num)
2192     {
2193     struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
2194     struct batadv_algo_ops *bao = bat_priv->algo_ops;
2195     @@ -1530,7 +1530,7 @@ err:
2196     }
2197    
2198     int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
2199     - int max_if_num)
2200     + unsigned int max_if_num)
2201     {
2202     struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
2203     struct batadv_hashtable *hash = bat_priv->orig_hash;
2204     diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
2205     index ebc56183f358..fab0b2cc141d 100644
2206     --- a/net/batman-adv/originator.h
2207     +++ b/net/batman-adv/originator.h
2208     @@ -78,9 +78,9 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset);
2209     int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb);
2210     int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset);
2211     int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
2212     - int max_if_num);
2213     + unsigned int max_if_num);
2214     int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
2215     - int max_if_num);
2216     + unsigned int max_if_num);
2217     struct batadv_orig_node_vlan *
2218     batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
2219     unsigned short vid);
2220     diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
2221     index 8b98609ebc1e..19059ae26e51 100644
2222     --- a/net/batman-adv/routing.c
2223     +++ b/net/batman-adv/routing.c
2224     @@ -930,7 +930,6 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
2225     bool is4addr;
2226    
2227     unicast_packet = (struct batadv_unicast_packet *)skb->data;
2228     - unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
2229    
2230     is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
2231     /* the caller function should have already pulled 2 bytes */
2232     @@ -951,9 +950,13 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
2233     if (!batadv_check_unicast_ttvn(bat_priv, skb, hdr_size))
2234     return NET_RX_DROP;
2235    
2236     + unicast_packet = (struct batadv_unicast_packet *)skb->data;
2237     +
2238     /* packet for me */
2239     if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
2240     if (is4addr) {
2241     + unicast_4addr_packet =
2242     + (struct batadv_unicast_4addr_packet *)skb->data;
2243     subtype = unicast_4addr_packet->subtype;
2244     batadv_dat_inc_counter(bat_priv, subtype);
2245    
2246     @@ -1080,6 +1083,12 @@ int batadv_recv_frag_packet(struct sk_buff *skb,
2247     batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_RX);
2248     batadv_add_counter(bat_priv, BATADV_CNT_FRAG_RX_BYTES, skb->len);
2249    
2250     + /* batadv_frag_skb_buffer will always consume the skb and
2251     + * the caller should therefore never try to free the
2252     + * skb after this point
2253     + */
2254     + ret = NET_RX_SUCCESS;
2255     +
2256     /* Add fragment to buffer and merge if possible. */
2257     if (!batadv_frag_skb_buffer(&skb, orig_node_src))
2258     goto out;
2259     diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
2260     index a92512a46e91..99d2c453c872 100644
2261     --- a/net/batman-adv/soft-interface.c
2262     +++ b/net/batman-adv/soft-interface.c
2263     @@ -808,7 +808,6 @@ static int batadv_softif_init_late(struct net_device *dev)
2264     atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0);
2265     #endif
2266     atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF);
2267     - atomic_set(&bat_priv->gw.sel_class, 20);
2268     atomic_set(&bat_priv->gw.bandwidth_down, 100);
2269     atomic_set(&bat_priv->gw.bandwidth_up, 20);
2270     atomic_set(&bat_priv->orig_interval, 1000);
2271     diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
2272     index 1fab9bcf535d..d40d83949b00 100644
2273     --- a/net/batman-adv/translation-table.c
2274     +++ b/net/batman-adv/translation-table.c
2275     @@ -867,7 +867,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
2276     struct batadv_orig_node_vlan *vlan;
2277     u8 *tt_change_ptr;
2278    
2279     - rcu_read_lock();
2280     + spin_lock_bh(&orig_node->vlan_list_lock);
2281     hlist_for_each_entry_rcu(vlan, &orig_node->vlan_list, list) {
2282     num_vlan++;
2283     num_entries += atomic_read(&vlan->tt.num_entries);
2284     @@ -905,7 +905,7 @@ batadv_tt_prepare_tvlv_global_data(struct batadv_orig_node *orig_node,
2285     *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
2286    
2287     out:
2288     - rcu_read_unlock();
2289     + spin_unlock_bh(&orig_node->vlan_list_lock);
2290     return tvlv_len;
2291     }
2292    
2293     @@ -936,15 +936,20 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
2294     struct batadv_tvlv_tt_vlan_data *tt_vlan;
2295     struct batadv_softif_vlan *vlan;
2296     u16 num_vlan = 0;
2297     - u16 num_entries = 0;
2298     + u16 vlan_entries = 0;
2299     + u16 total_entries = 0;
2300     u16 tvlv_len;
2301     u8 *tt_change_ptr;
2302     int change_offset;
2303    
2304     - rcu_read_lock();
2305     + spin_lock_bh(&bat_priv->softif_vlan_list_lock);
2306     hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
2307     + vlan_entries = atomic_read(&vlan->tt.num_entries);
2308     + if (vlan_entries < 1)
2309     + continue;
2310     +
2311     num_vlan++;
2312     - num_entries += atomic_read(&vlan->tt.num_entries);
2313     + total_entries += vlan_entries;
2314     }
2315    
2316     change_offset = sizeof(**tt_data);
2317     @@ -952,7 +957,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
2318    
2319     /* if tt_len is negative, allocate the space needed by the full table */
2320     if (*tt_len < 0)
2321     - *tt_len = batadv_tt_len(num_entries);
2322     + *tt_len = batadv_tt_len(total_entries);
2323    
2324     tvlv_len = *tt_len;
2325     tvlv_len += change_offset;
2326     @@ -969,6 +974,10 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
2327    
2328     tt_vlan = (struct batadv_tvlv_tt_vlan_data *)(*tt_data + 1);
2329     hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
2330     + vlan_entries = atomic_read(&vlan->tt.num_entries);
2331     + if (vlan_entries < 1)
2332     + continue;
2333     +
2334     tt_vlan->vid = htons(vlan->vid);
2335     tt_vlan->crc = htonl(vlan->tt.crc);
2336    
2337     @@ -979,7 +988,7 @@ batadv_tt_prepare_tvlv_local_data(struct batadv_priv *bat_priv,
2338     *tt_change = (struct batadv_tvlv_tt_change *)tt_change_ptr;
2339    
2340     out:
2341     - rcu_read_unlock();
2342     + spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
2343     return tvlv_len;
2344     }
2345    
2346     @@ -1539,6 +1548,8 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
2347     * by a given originator
2348     * @entry: the TT global entry to check
2349     * @orig_node: the originator to search in the list
2350     + * @flags: a pointer to store TT flags for the given @entry received
2351     + * from @orig_node
2352     *
2353     * find out if an orig_node is already in the list of a tt_global_entry.
2354     *
2355     @@ -1546,7 +1557,8 @@ batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
2356     */
2357     static bool
2358     batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
2359     - const struct batadv_orig_node *orig_node)
2360     + const struct batadv_orig_node *orig_node,
2361     + u8 *flags)
2362     {
2363     struct batadv_tt_orig_list_entry *orig_entry;
2364     bool found = false;
2365     @@ -1554,15 +1566,51 @@ batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
2366     orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node);
2367     if (orig_entry) {
2368     found = true;
2369     +
2370     + if (flags)
2371     + *flags = orig_entry->flags;
2372     +
2373     batadv_tt_orig_list_entry_put(orig_entry);
2374     }
2375    
2376     return found;
2377     }
2378    
2379     +/**
2380     + * batadv_tt_global_sync_flags - update TT sync flags
2381     + * @tt_global: the TT global entry to update sync flags in
2382     + *
2383     + * Updates the sync flag bits in the tt_global flag attribute with a logical
2384     + * OR of all sync flags from any of its TT orig entries.
2385     + */
2386     +static void
2387     +batadv_tt_global_sync_flags(struct batadv_tt_global_entry *tt_global)
2388     +{
2389     + struct batadv_tt_orig_list_entry *orig_entry;
2390     + const struct hlist_head *head;
2391     + u16 flags = BATADV_NO_FLAGS;
2392     +
2393     + rcu_read_lock();
2394     + head = &tt_global->orig_list;
2395     + hlist_for_each_entry_rcu(orig_entry, head, list)
2396     + flags |= orig_entry->flags;
2397     + rcu_read_unlock();
2398     +
2399     + flags |= tt_global->common.flags & (~BATADV_TT_SYNC_MASK);
2400     + tt_global->common.flags = flags;
2401     +}
2402     +
2403     +/**
2404     + * batadv_tt_global_orig_entry_add - add or update a TT orig entry
2405     + * @tt_global: the TT global entry to add an orig entry in
2406     + * @orig_node: the originator to add an orig entry for
2407     + * @ttvn: translation table version number of this changeset
2408     + * @flags: TT sync flags
2409     + */
2410     static void
2411     batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
2412     - struct batadv_orig_node *orig_node, int ttvn)
2413     + struct batadv_orig_node *orig_node, int ttvn,
2414     + u8 flags)
2415     {
2416     struct batadv_tt_orig_list_entry *orig_entry;
2417    
2418     @@ -1574,7 +1622,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
2419     * was added during a "temporary client detection"
2420     */
2421     orig_entry->ttvn = ttvn;
2422     - goto out;
2423     + orig_entry->flags = flags;
2424     + goto sync_flags;
2425     }
2426    
2427     orig_entry = kmem_cache_zalloc(batadv_tt_orig_cache, GFP_ATOMIC);
2428     @@ -1586,6 +1635,7 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
2429     batadv_tt_global_size_inc(orig_node, tt_global->common.vid);
2430     orig_entry->orig_node = orig_node;
2431     orig_entry->ttvn = ttvn;
2432     + orig_entry->flags = flags;
2433     kref_init(&orig_entry->refcount);
2434    
2435     kref_get(&orig_entry->refcount);
2436     @@ -1593,6 +1643,8 @@ batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
2437     &tt_global->orig_list);
2438     atomic_inc(&tt_global->orig_list_count);
2439    
2440     +sync_flags:
2441     + batadv_tt_global_sync_flags(tt_global);
2442     out:
2443     if (orig_entry)
2444     batadv_tt_orig_list_entry_put(orig_entry);
2445     @@ -1656,7 +1708,9 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
2446     ether_addr_copy(common->addr, tt_addr);
2447     common->vid = vid;
2448    
2449     - common->flags = flags;
2450     + if (!is_multicast_ether_addr(common->addr))
2451     + common->flags = flags & (~BATADV_TT_SYNC_MASK);
2452     +
2453     tt_global_entry->roam_at = 0;
2454     /* node must store current time in case of roaming. This is
2455     * needed to purge this entry out on timeout (if nobody claims
2456     @@ -1698,7 +1752,7 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
2457     if (!(common->flags & BATADV_TT_CLIENT_TEMP))
2458     goto out;
2459     if (batadv_tt_global_entry_has_orig(tt_global_entry,
2460     - orig_node))
2461     + orig_node, NULL))
2462     goto out_remove;
2463     batadv_tt_global_del_orig_list(tt_global_entry);
2464     goto add_orig_entry;
2465     @@ -1716,10 +1770,11 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
2466     }
2467    
2468     /* the change can carry possible "attribute" flags like the
2469     - * TT_CLIENT_WIFI, therefore they have to be copied in the
2470     + * TT_CLIENT_TEMP, therefore they have to be copied in the
2471     * client entry
2472     */
2473     - common->flags |= flags;
2474     + if (!is_multicast_ether_addr(common->addr))
2475     + common->flags |= flags & (~BATADV_TT_SYNC_MASK);
2476    
2477     /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
2478     * one originator left in the list and we previously received a
2479     @@ -1736,7 +1791,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
2480     }
2481     add_orig_entry:
2482     /* add the new orig_entry (if needed) or update it */
2483     - batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
2484     + batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn,
2485     + flags & BATADV_TT_SYNC_MASK);
2486    
2487     batadv_dbg(BATADV_DBG_TT, bat_priv,
2488     "Creating new global tt entry: %pM (vid: %d, via %pM)\n",
2489     @@ -1959,6 +2015,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
2490     struct batadv_tt_orig_list_entry *orig,
2491     bool best)
2492     {
2493     + u16 flags = (common->flags & (~BATADV_TT_SYNC_MASK)) | orig->flags;
2494     void *hdr;
2495     struct batadv_orig_node_vlan *vlan;
2496     u8 last_ttvn;
2497     @@ -1988,7 +2045,7 @@ batadv_tt_global_dump_subentry(struct sk_buff *msg, u32 portid, u32 seq,
2498     nla_put_u8(msg, BATADV_ATTR_TT_LAST_TTVN, last_ttvn) ||
2499     nla_put_u32(msg, BATADV_ATTR_TT_CRC32, crc) ||
2500     nla_put_u16(msg, BATADV_ATTR_TT_VID, common->vid) ||
2501     - nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, common->flags))
2502     + nla_put_u32(msg, BATADV_ATTR_TT_FLAGS, flags))
2503     goto nla_put_failure;
2504    
2505     if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))
2506     @@ -2602,6 +2659,7 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
2507     unsigned short vid)
2508     {
2509     struct batadv_hashtable *hash = bat_priv->tt.global_hash;
2510     + struct batadv_tt_orig_list_entry *tt_orig;
2511     struct batadv_tt_common_entry *tt_common;
2512     struct batadv_tt_global_entry *tt_global;
2513     struct hlist_head *head;
2514     @@ -2640,8 +2698,9 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
2515     /* find out if this global entry is announced by this
2516     * originator
2517     */
2518     - if (!batadv_tt_global_entry_has_orig(tt_global,
2519     - orig_node))
2520     + tt_orig = batadv_tt_global_orig_entry_find(tt_global,
2521     + orig_node);
2522     + if (!tt_orig)
2523     continue;
2524    
2525     /* use network order to read the VID: this ensures that
2526     @@ -2653,10 +2712,12 @@ static u32 batadv_tt_global_crc(struct batadv_priv *bat_priv,
2527     /* compute the CRC on flags that have to be kept in sync
2528     * among nodes
2529     */
2530     - flags = tt_common->flags & BATADV_TT_SYNC_MASK;
2531     + flags = tt_orig->flags;
2532     crc_tmp = crc32c(crc_tmp, &flags, sizeof(flags));
2533    
2534     crc ^= crc32c(crc_tmp, tt_common->addr, ETH_ALEN);
2535     +
2536     + batadv_tt_orig_list_entry_put(tt_orig);
2537     }
2538     rcu_read_unlock();
2539     }
2540     @@ -2834,23 +2895,46 @@ unlock:
2541     }
2542    
2543     /**
2544     - * batadv_tt_local_valid - verify that given tt entry is a valid one
2545     + * batadv_tt_local_valid() - verify local tt entry and get flags
2546     * @entry_ptr: to be checked local tt entry
2547     * @data_ptr: not used but definition required to satisfy the callback prototype
2548     + * @flags: a pointer to store TT flags for this client to
2549     + *
2550     + * Checks the validity of the given local TT entry. If it is, then the provided
2551     + * flags pointer is updated.
2552     *
2553     * Return: true if the entry is a valid, false otherwise.
2554     */
2555     -static bool batadv_tt_local_valid(const void *entry_ptr, const void *data_ptr)
2556     +static bool batadv_tt_local_valid(const void *entry_ptr,
2557     + const void *data_ptr,
2558     + u8 *flags)
2559     {
2560     const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
2561    
2562     if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW)
2563     return false;
2564     +
2565     + if (flags)
2566     + *flags = tt_common_entry->flags;
2567     +
2568     return true;
2569     }
2570    
2571     +/**
2572     + * batadv_tt_global_valid() - verify global tt entry and get flags
2573     + * @entry_ptr: to be checked global tt entry
2574     + * @data_ptr: an orig_node object (may be NULL)
2575     + * @flags: a pointer to store TT flags for this client to
2576     + *
2577     + * Checks the validity of the given global TT entry. If it is, then the provided
2578     + * flags pointer is updated either with the common (summed) TT flags if data_ptr
2579     + * is NULL or the specific, per originator TT flags otherwise.
2580     + *
2581     + * Return: true if the entry is a valid, false otherwise.
2582     + */
2583     static bool batadv_tt_global_valid(const void *entry_ptr,
2584     - const void *data_ptr)
2585     + const void *data_ptr,
2586     + u8 *flags)
2587     {
2588     const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
2589     const struct batadv_tt_global_entry *tt_global_entry;
2590     @@ -2864,7 +2948,8 @@ static bool batadv_tt_global_valid(const void *entry_ptr,
2591     struct batadv_tt_global_entry,
2592     common);
2593    
2594     - return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node);
2595     + return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node,
2596     + flags);
2597     }
2598    
2599     /**
2600     @@ -2874,25 +2959,34 @@ static bool batadv_tt_global_valid(const void *entry_ptr,
2601     * @hash: hash table containing the tt entries
2602     * @tt_len: expected tvlv tt data buffer length in number of bytes
2603     * @tvlv_buff: pointer to the buffer to fill with the TT data
2604     - * @valid_cb: function to filter tt change entries
2605     + * @valid_cb: function to filter tt change entries and to return TT flags
2606     * @cb_data: data passed to the filter function as argument
2607     + *
2608     + * Fills the tvlv buff with the tt entries from the specified hash. If valid_cb
2609     + * is not provided then this becomes a no-op.
2610     */
2611     static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
2612     struct batadv_hashtable *hash,
2613     void *tvlv_buff, u16 tt_len,
2614     bool (*valid_cb)(const void *,
2615     - const void *),
2616     + const void *,
2617     + u8 *flags),
2618     void *cb_data)
2619     {
2620     struct batadv_tt_common_entry *tt_common_entry;
2621     struct batadv_tvlv_tt_change *tt_change;
2622     struct hlist_head *head;
2623     u16 tt_tot, tt_num_entries = 0;
2624     + u8 flags;
2625     + bool ret;
2626     u32 i;
2627    
2628     tt_tot = batadv_tt_entries(tt_len);
2629     tt_change = (struct batadv_tvlv_tt_change *)tvlv_buff;
2630    
2631     + if (!valid_cb)
2632     + return;
2633     +
2634     rcu_read_lock();
2635     for (i = 0; i < hash->size; i++) {
2636     head = &hash->table[i];
2637     @@ -2902,11 +2996,12 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
2638     if (tt_tot == tt_num_entries)
2639     break;
2640    
2641     - if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
2642     + ret = valid_cb(tt_common_entry, cb_data, &flags);
2643     + if (!ret)
2644     continue;
2645    
2646     ether_addr_copy(tt_change->addr, tt_common_entry->addr);
2647     - tt_change->flags = tt_common_entry->flags;
2648     + tt_change->flags = flags;
2649     tt_change->vid = htons(tt_common_entry->vid);
2650     memset(tt_change->reserved, 0,
2651     sizeof(tt_change->reserved));
2652     diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
2653     index b3dd1a381aad..c17b74e51fe9 100644
2654     --- a/net/batman-adv/types.h
2655     +++ b/net/batman-adv/types.h
2656     @@ -27,6 +27,7 @@
2657     #include <linux/compiler.h>
2658     #include <linux/if_ether.h>
2659     #include <linux/kref.h>
2660     +#include <linux/mutex.h>
2661     #include <linux/netdevice.h>
2662     #include <linux/netlink.h>
2663     #include <linux/sched.h> /* for linux/wait.h */
2664     @@ -81,11 +82,13 @@ enum batadv_dhcp_recipient {
2665     * @ogm_buff: buffer holding the OGM packet
2666     * @ogm_buff_len: length of the OGM packet buffer
2667     * @ogm_seqno: OGM sequence number - used to identify each OGM
2668     + * @ogm_buff_mutex: lock protecting ogm_buff and ogm_buff_len
2669     */
2670     struct batadv_hard_iface_bat_iv {
2671     unsigned char *ogm_buff;
2672     int ogm_buff_len;
2673     atomic_t ogm_seqno;
2674     + struct mutex ogm_buff_mutex;
2675     };
2676    
2677     /**
2678     @@ -139,7 +142,7 @@ struct batadv_hard_iface_bat_v {
2679     */
2680     struct batadv_hard_iface {
2681     struct list_head list;
2682     - s16 if_num;
2683     + unsigned int if_num;
2684     char if_status;
2685     struct net_device *net_dev;
2686     u8 num_bcasts;
2687     @@ -966,12 +969,14 @@ struct batadv_softif_vlan {
2688     * @ogm_buff: buffer holding the OGM packet
2689     * @ogm_buff_len: length of the OGM packet buffer
2690     * @ogm_seqno: OGM sequence number - used to identify each OGM
2691     + * @ogm_buff_mutex: lock protecting ogm_buff and ogm_buff_len
2692     * @ogm_wq: workqueue used to schedule OGM transmissions
2693     */
2694     struct batadv_priv_bat_v {
2695     unsigned char *ogm_buff;
2696     int ogm_buff_len;
2697     atomic_t ogm_seqno;
2698     + struct mutex ogm_buff_mutex;
2699     struct delayed_work ogm_wq;
2700     };
2701    
2702     @@ -1060,7 +1065,7 @@ struct batadv_priv {
2703     atomic_t bcast_seqno;
2704     atomic_t bcast_queue_left;
2705     atomic_t batman_queue_left;
2706     - char num_ifaces;
2707     + unsigned int num_ifaces;
2708     struct kobject *mesh_obj;
2709     struct dentry *debug_dir;
2710     struct hlist_head forw_bat_list;
2711     @@ -1241,6 +1246,7 @@ struct batadv_tt_global_entry {
2712     * struct batadv_tt_orig_list_entry - orig node announcing a non-mesh client
2713     * @orig_node: pointer to orig node announcing this non-mesh client
2714     * @ttvn: translation table version number which added the non-mesh client
2715     + * @flags: per orig entry TT sync flags
2716     * @list: list node for batadv_tt_global_entry::orig_list
2717     * @refcount: number of contexts the object is used
2718     * @rcu: struct used for freeing in an RCU-safe manner
2719     @@ -1248,6 +1254,7 @@ struct batadv_tt_global_entry {
2720     struct batadv_tt_orig_list_entry {
2721     struct batadv_orig_node *orig_node;
2722     u8 ttvn;
2723     + u8 flags;
2724     struct hlist_node list;
2725     struct kref refcount;
2726     struct rcu_head rcu;
2727     @@ -1397,6 +1404,7 @@ struct batadv_forw_packet {
2728     * @activate: start routing mechanisms when hard-interface is brought up
2729     * (optional)
2730     * @enable: init routing info when hard-interface is enabled
2731     + * @enabled: notification when hard-interface was enabled (optional)
2732     * @disable: de-init routing info when hard-interface is disabled
2733     * @update_mac: (re-)init mac addresses of the protocol information
2734     * belonging to this hard-interface
2735     @@ -1405,6 +1413,7 @@ struct batadv_forw_packet {
2736     struct batadv_algo_iface_ops {
2737     void (*activate)(struct batadv_hard_iface *hard_iface);
2738     int (*enable)(struct batadv_hard_iface *hard_iface);
2739     + void (*enabled)(struct batadv_hard_iface *hard_iface);
2740     void (*disable)(struct batadv_hard_iface *hard_iface);
2741     void (*update_mac)(struct batadv_hard_iface *hard_iface);
2742     void (*primary_set)(struct batadv_hard_iface *hard_iface);
2743     @@ -1452,9 +1461,10 @@ struct batadv_algo_neigh_ops {
2744     */
2745     struct batadv_algo_orig_ops {
2746     void (*free)(struct batadv_orig_node *orig_node);
2747     - int (*add_if)(struct batadv_orig_node *orig_node, int max_if_num);
2748     - int (*del_if)(struct batadv_orig_node *orig_node, int max_if_num,
2749     - int del_if_num);
2750     + int (*add_if)(struct batadv_orig_node *orig_node,
2751     + unsigned int max_if_num);
2752     + int (*del_if)(struct batadv_orig_node *orig_node,
2753     + unsigned int max_if_num, unsigned int del_if_num);
2754     #ifdef CONFIG_BATMAN_ADV_DEBUGFS
2755     void (*print)(struct batadv_priv *priv, struct seq_file *seq,
2756     struct batadv_hard_iface *hard_iface);
2757     @@ -1466,6 +1476,7 @@ struct batadv_algo_orig_ops {
2758    
2759     /**
2760     * struct batadv_algo_gw_ops - mesh algorithm callbacks (GW specific)
2761     + * @init_sel_class: initialize GW selection class (optional)
2762     * @store_sel_class: parse and stores a new GW selection class (optional)
2763     * @show_sel_class: prints the current GW selection class (optional)
2764     * @get_best_gw_node: select the best GW from the list of available nodes
2765     @@ -1476,6 +1487,7 @@ struct batadv_algo_orig_ops {
2766     * @dump: dump gateways to a netlink socket (optional)
2767     */
2768     struct batadv_algo_gw_ops {
2769     + void (*init_sel_class)(struct batadv_priv *bat_priv);
2770     ssize_t (*store_sel_class)(struct batadv_priv *bat_priv, char *buff,
2771     size_t count);
2772     ssize_t (*show_sel_class)(struct batadv_priv *bat_priv, char *buff);
2773     diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
2774     index 2e4eef71471d..db65b0cdfc4c 100644
2775     --- a/net/core/netclassid_cgroup.c
2776     +++ b/net/core/netclassid_cgroup.c
2777     @@ -55,30 +55,60 @@ static void cgrp_css_free(struct cgroup_subsys_state *css)
2778     kfree(css_cls_state(css));
2779     }
2780    
2781     +/*
2782     + * To avoid freezing of sockets creation for tasks with big number of threads
2783     + * and opened sockets lets release file_lock every 1000 iterated descriptors.
2784     + * New sockets will already have been created with new classid.
2785     + */
2786     +
2787     +struct update_classid_context {
2788     + u32 classid;
2789     + unsigned int batch;
2790     +};
2791     +
2792     +#define UPDATE_CLASSID_BATCH 1000
2793     +
2794     static int update_classid_sock(const void *v, struct file *file, unsigned n)
2795     {
2796     int err;
2797     + struct update_classid_context *ctx = (void *)v;
2798     struct socket *sock = sock_from_file(file, &err);
2799    
2800     if (sock) {
2801     spin_lock(&cgroup_sk_update_lock);
2802     - sock_cgroup_set_classid(&sock->sk->sk_cgrp_data,
2803     - (unsigned long)v);
2804     + sock_cgroup_set_classid(&sock->sk->sk_cgrp_data, ctx->classid);
2805     spin_unlock(&cgroup_sk_update_lock);
2806     }
2807     + if (--ctx->batch == 0) {
2808     + ctx->batch = UPDATE_CLASSID_BATCH;
2809     + return n + 1;
2810     + }
2811     return 0;
2812     }
2813    
2814     +static void update_classid_task(struct task_struct *p, u32 classid)
2815     +{
2816     + struct update_classid_context ctx = {
2817     + .classid = classid,
2818     + .batch = UPDATE_CLASSID_BATCH
2819     + };
2820     + unsigned int fd = 0;
2821     +
2822     + do {
2823     + task_lock(p);
2824     + fd = iterate_fd(p->files, fd, update_classid_sock, &ctx);
2825     + task_unlock(p);
2826     + cond_resched();
2827     + } while (fd);
2828     +}
2829     +
2830     static void cgrp_attach(struct cgroup_taskset *tset)
2831     {
2832     struct cgroup_subsys_state *css;
2833     struct task_struct *p;
2834    
2835     cgroup_taskset_for_each(p, css, tset) {
2836     - task_lock(p);
2837     - iterate_fd(p->files, 0, update_classid_sock,
2838     - (void *)(unsigned long)css_cls_state(css)->classid);
2839     - task_unlock(p);
2840     + update_classid_task(p, css_cls_state(css)->classid);
2841     }
2842     }
2843    
2844     @@ -100,10 +130,7 @@ static int write_classid(struct cgroup_subsys_state *css, struct cftype *cft,
2845    
2846     css_task_iter_start(css, &it);
2847     while ((p = css_task_iter_next(&it))) {
2848     - task_lock(p);
2849     - iterate_fd(p->files, 0, update_classid_sock,
2850     - (void *)(unsigned long)cs->classid);
2851     - task_unlock(p);
2852     + update_classid_task(p, cs->classid);
2853     cond_resched();
2854     }
2855     css_task_iter_end(&it);
2856     diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c
2857     index 35c432668454..040983fc15da 100644
2858     --- a/net/ieee802154/nl_policy.c
2859     +++ b/net/ieee802154/nl_policy.c
2860     @@ -30,7 +30,13 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
2861     [IEEE802154_ATTR_HW_ADDR] = { .type = NLA_HW_ADDR, },
2862     [IEEE802154_ATTR_PAN_ID] = { .type = NLA_U16, },
2863     [IEEE802154_ATTR_CHANNEL] = { .type = NLA_U8, },
2864     + [IEEE802154_ATTR_BCN_ORD] = { .type = NLA_U8, },
2865     + [IEEE802154_ATTR_SF_ORD] = { .type = NLA_U8, },
2866     + [IEEE802154_ATTR_PAN_COORD] = { .type = NLA_U8, },
2867     + [IEEE802154_ATTR_BAT_EXT] = { .type = NLA_U8, },
2868     + [IEEE802154_ATTR_COORD_REALIGN] = { .type = NLA_U8, },
2869     [IEEE802154_ATTR_PAGE] = { .type = NLA_U8, },
2870     + [IEEE802154_ATTR_DEV_TYPE] = { .type = NLA_U8, },
2871     [IEEE802154_ATTR_COORD_SHORT_ADDR] = { .type = NLA_U16, },
2872     [IEEE802154_ATTR_COORD_HW_ADDR] = { .type = NLA_HW_ADDR, },
2873     [IEEE802154_ATTR_COORD_PAN_ID] = { .type = NLA_U16, },
2874     diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
2875     index 71bcab94c5c7..0a6f72763beb 100644
2876     --- a/net/ipv4/cipso_ipv4.c
2877     +++ b/net/ipv4/cipso_ipv4.c
2878     @@ -1738,6 +1738,7 @@ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
2879     {
2880     unsigned char optbuf[sizeof(struct ip_options) + 40];
2881     struct ip_options *opt = (struct ip_options *)optbuf;
2882     + int res;
2883    
2884     if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
2885     return;
2886     @@ -1749,7 +1750,11 @@ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
2887    
2888     memset(opt, 0, sizeof(struct ip_options));
2889     opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
2890     - if (__ip_options_compile(dev_net(skb->dev), opt, skb, NULL))
2891     + rcu_read_lock();
2892     + res = __ip_options_compile(dev_net(skb->dev), opt, skb, NULL);
2893     + rcu_read_unlock();
2894     +
2895     + if (res)
2896     return;
2897    
2898     if (gateway)
2899     diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
2900     index 7efe740c06eb..4a5e55e94a9e 100644
2901     --- a/net/ipv4/gre_demux.c
2902     +++ b/net/ipv4/gre_demux.c
2903     @@ -60,7 +60,9 @@ int gre_del_protocol(const struct gre_protocol *proto, u8 version)
2904     }
2905     EXPORT_SYMBOL_GPL(gre_del_protocol);
2906    
2907     -/* Fills in tpi and returns header length to be pulled. */
2908     +/* Fills in tpi and returns header length to be pulled.
2909     + * Note that caller must use pskb_may_pull() before pulling GRE header.
2910     + */
2911     int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
2912     bool *csum_err, __be16 proto, int nhs)
2913     {
2914     @@ -114,8 +116,14 @@ int gre_parse_header(struct sk_buff *skb, struct tnl_ptk_info *tpi,
2915     * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
2916     */
2917     if (greh->flags == 0 && tpi->proto == htons(ETH_P_WCCP)) {
2918     + u8 _val, *val;
2919     +
2920     + val = skb_header_pointer(skb, nhs + hdr_len,
2921     + sizeof(_val), &_val);
2922     + if (!val)
2923     + return -EINVAL;
2924     tpi->proto = proto;
2925     - if ((*(u8 *)options & 0xF0) != 0x40)
2926     + if ((*val & 0xF0) != 0x40)
2927     hdr_len += 4;
2928     }
2929     tpi->hdr_len = hdr_len;
2930     diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
2931     index 6b1310d5e808..a4c00242a90b 100644
2932     --- a/net/ipv6/addrconf.c
2933     +++ b/net/ipv6/addrconf.c
2934     @@ -3189,6 +3189,10 @@ static void addrconf_dev_config(struct net_device *dev)
2935     (dev->type != ARPHRD_6LOWPAN) &&
2936     (dev->type != ARPHRD_NONE)) {
2937     /* Alas, we support only Ethernet autoconfiguration. */
2938     + idev = __in6_dev_get(dev);
2939     + if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP &&
2940     + dev->flags & IFF_MULTICAST)
2941     + ipv6_mc_up(idev);
2942     return;
2943     }
2944    
2945     diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
2946     index 81fd35ed8732..1080770b5eaf 100644
2947     --- a/net/ipv6/ipv6_sockglue.c
2948     +++ b/net/ipv6/ipv6_sockglue.c
2949     @@ -184,9 +184,15 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
2950     retv = -EBUSY;
2951     break;
2952     }
2953     - } else if (sk->sk_protocol != IPPROTO_TCP)
2954     + } else if (sk->sk_protocol == IPPROTO_TCP) {
2955     + if (sk->sk_prot != &tcpv6_prot) {
2956     + retv = -EBUSY;
2957     + break;
2958     + }
2959     break;
2960     -
2961     + } else {
2962     + break;
2963     + }
2964     if (sk->sk_state != TCP_ESTABLISHED) {
2965     retv = -ENOTCONN;
2966     break;
2967     diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
2968     index 74652eb2f90f..a6f265262f15 100644
2969     --- a/net/mac80211/rx.c
2970     +++ b/net/mac80211/rx.c
2971     @@ -3841,7 +3841,7 @@ void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
2972    
2973     lockdep_assert_held(&local->sta_mtx);
2974    
2975     - list_for_each_entry_rcu(sta, &local->sta_list, list) {
2976     + list_for_each_entry(sta, &local->sta_list, list) {
2977     if (sdata != sta->sdata &&
2978     (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
2979     continue;
2980     diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
2981     index 3f499126727c..8396dc8ee247 100644
2982     --- a/net/netfilter/nfnetlink_cthelper.c
2983     +++ b/net/netfilter/nfnetlink_cthelper.c
2984     @@ -711,6 +711,8 @@ static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = {
2985     [NFCTH_NAME] = { .type = NLA_NUL_STRING,
2986     .len = NF_CT_HELPER_NAME_LEN-1 },
2987     [NFCTH_QUEUE_NUM] = { .type = NLA_U32, },
2988     + [NFCTH_PRIV_DATA_LEN] = { .type = NLA_U32, },
2989     + [NFCTH_STATUS] = { .type = NLA_U32, },
2990     };
2991    
2992     static const struct nfnl_callback nfnl_cthelper_cb[NFNL_MSG_CTHELPER_MAX] = {
2993     diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
2994     index 5a58f9f38095..291f24fef19a 100644
2995     --- a/net/nfc/hci/core.c
2996     +++ b/net/nfc/hci/core.c
2997     @@ -193,13 +193,20 @@ exit:
2998     void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
2999     struct sk_buff *skb)
3000     {
3001     - u8 gate = hdev->pipes[pipe].gate;
3002     u8 status = NFC_HCI_ANY_OK;
3003     struct hci_create_pipe_resp *create_info;
3004     struct hci_delete_pipe_noti *delete_info;
3005     struct hci_all_pipe_cleared_noti *cleared_info;
3006     + u8 gate;
3007    
3008     - pr_debug("from gate %x pipe %x cmd %x\n", gate, pipe, cmd);
3009     + pr_debug("from pipe %x cmd %x\n", pipe, cmd);
3010     +
3011     + if (pipe >= NFC_HCI_MAX_PIPES) {
3012     + status = NFC_HCI_ANY_E_NOK;
3013     + goto exit;
3014     + }
3015     +
3016     + gate = hdev->pipes[pipe].gate;
3017    
3018     switch (cmd) {
3019     case NFC_HCI_ADM_NOTIFY_PIPE_CREATED:
3020     @@ -387,8 +394,14 @@ void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
3021     struct sk_buff *skb)
3022     {
3023     int r = 0;
3024     - u8 gate = hdev->pipes[pipe].gate;
3025     + u8 gate;
3026     +
3027     + if (pipe >= NFC_HCI_MAX_PIPES) {
3028     + pr_err("Discarded event %x to invalid pipe %x\n", event, pipe);
3029     + goto exit;
3030     + }
3031    
3032     + gate = hdev->pipes[pipe].gate;
3033     if (gate == NFC_HCI_INVALID_GATE) {
3034     pr_err("Discarded event %x to unopened pipe %x\n", event, pipe);
3035     goto exit;
3036     diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
3037     index d3c8dd5dc817..e79a49fe61e8 100644
3038     --- a/net/nfc/netlink.c
3039     +++ b/net/nfc/netlink.c
3040     @@ -62,7 +62,10 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
3041     [NFC_ATTR_LLC_SDP] = { .type = NLA_NESTED },
3042     [NFC_ATTR_FIRMWARE_NAME] = { .type = NLA_STRING,
3043     .len = NFC_FIRMWARE_NAME_MAXSIZE },
3044     + [NFC_ATTR_SE_INDEX] = { .type = NLA_U32 },
3045     [NFC_ATTR_SE_APDU] = { .type = NLA_BINARY },
3046     + [NFC_ATTR_VENDOR_ID] = { .type = NLA_U32 },
3047     + [NFC_ATTR_VENDOR_SUBCMD] = { .type = NLA_U32 },
3048     [NFC_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
3049    
3050     };
3051     diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
3052     index 7e7eba33bbdb..9f53d4ec0e37 100644
3053     --- a/net/sched/sch_fq.c
3054     +++ b/net/sched/sch_fq.c
3055     @@ -697,6 +697,7 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
3056     [TCA_FQ_FLOW_MAX_RATE] = { .type = NLA_U32 },
3057     [TCA_FQ_BUCKETS_LOG] = { .type = NLA_U32 },
3058     [TCA_FQ_FLOW_REFILL_DELAY] = { .type = NLA_U32 },
3059     + [TCA_FQ_ORPHAN_MASK] = { .type = NLA_U32 },
3060     [TCA_FQ_LOW_RATE_THRESHOLD] = { .type = NLA_U32 },
3061     };
3062    
3063     diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
3064     index 9823bef65e5e..0048f90944dd 100644
3065     --- a/net/wireless/nl80211.c
3066     +++ b/net/wireless/nl80211.c
3067     @@ -359,6 +359,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
3068     [NL80211_ATTR_KEY_DEFAULT_TYPES] = { .type = NLA_NESTED },
3069     [NL80211_ATTR_WOWLAN_TRIGGERS] = { .type = NLA_NESTED },
3070     [NL80211_ATTR_STA_PLINK_STATE] = { .type = NLA_U8 },
3071     + [NL80211_ATTR_MEASUREMENT_DURATION] = { .type = NLA_U16 },
3072     + [NL80211_ATTR_MEASUREMENT_DURATION_MANDATORY] = { .type = NLA_FLAG },
3073     [NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 },
3074     [NL80211_ATTR_REKEY_DATA] = { .type = NLA_NESTED },
3075     [NL80211_ATTR_SCAN_SUPP_RATES] = { .type = NLA_NESTED },
3076     @@ -407,6 +409,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
3077     [NL80211_ATTR_MDID] = { .type = NLA_U16 },
3078     [NL80211_ATTR_IE_RIC] = { .type = NLA_BINARY,
3079     .len = IEEE80211_MAX_DATA_LEN },
3080     + [NL80211_ATTR_CRIT_PROT_ID] = { .type = NLA_U16 },
3081     + [NL80211_ATTR_MAX_CRIT_PROT_DURATION] = { .type = NLA_U16 },
3082     [NL80211_ATTR_PEER_AID] = { .type = NLA_U16 },
3083     [NL80211_ATTR_CH_SWITCH_COUNT] = { .type = NLA_U32 },
3084     [NL80211_ATTR_CH_SWITCH_BLOCK_TX] = { .type = NLA_FLAG },
3085     @@ -432,6 +436,7 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
3086     [NL80211_ATTR_USER_PRIO] = { .type = NLA_U8 },
3087     [NL80211_ATTR_ADMITTED_TIME] = { .type = NLA_U16 },
3088     [NL80211_ATTR_SMPS_MODE] = { .type = NLA_U8 },
3089     + [NL80211_ATTR_OPER_CLASS] = { .type = NLA_U8 },
3090     [NL80211_ATTR_MAC_MASK] = { .len = ETH_ALEN },
3091     [NL80211_ATTR_WIPHY_SELF_MANAGED_REG] = { .type = NLA_FLAG },
3092     [NL80211_ATTR_NETNS_FD] = { .type = NLA_U32 },
3093     diff --git a/net/wireless/reg.c b/net/wireless/reg.c
3094     index 0e66768427ba..6d5f3f737207 100644
3095     --- a/net/wireless/reg.c
3096     +++ b/net/wireless/reg.c
3097     @@ -1730,7 +1730,7 @@ static void handle_channel_custom(struct wiphy *wiphy,
3098     break;
3099     }
3100    
3101     - if (IS_ERR(reg_rule)) {
3102     + if (IS_ERR_OR_NULL(reg_rule)) {
3103     pr_debug("Disabling freq %d MHz as custom regd has no rule that fits it\n",
3104     chan->center_freq);
3105     if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) {