Magellan Linux

Contents of /trunk/kernel-alx-legacy/patches-4.9/0373-4.9.274-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3675 - (show annotations) (download)
Mon Oct 24 14:07:52 2022 UTC (18 months, 3 weeks ago) by niro
File size: 66553 byte(s)
-linux-4.9.274
1 diff --git a/Makefile b/Makefile
2 index e43823c3337f3..3002dfee32314 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 273
9 +SUBLEVEL = 274
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 @@ -718,12 +718,11 @@ KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
14 # See modpost pattern 2
15 KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
16 KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
17 -else
18 +endif
19
20 # These warnings generated too much noise in a regular build.
21 # Use make W=1 to enable them (see scripts/Makefile.extrawarn)
22 KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
23 -endif
24
25 KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
26 ifdef CONFIG_FRAME_POINTER
27 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
28 index 4764742db7b05..627889ea89eff 100644
29 --- a/arch/arm/kernel/setup.c
30 +++ b/arch/arm/kernel/setup.c
31 @@ -547,9 +547,11 @@ void notrace cpu_init(void)
32 * In Thumb-2, msr with an immediate value is not allowed.
33 */
34 #ifdef CONFIG_THUMB2_KERNEL
35 -#define PLC "r"
36 +#define PLC_l "l"
37 +#define PLC_r "r"
38 #else
39 -#define PLC "I"
40 +#define PLC_l "I"
41 +#define PLC_r "I"
42 #endif
43
44 /*
45 @@ -571,15 +573,15 @@ void notrace cpu_init(void)
46 "msr cpsr_c, %9"
47 :
48 : "r" (stk),
49 - PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
50 + PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
51 "I" (offsetof(struct stack, irq[0])),
52 - PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
53 + PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
54 "I" (offsetof(struct stack, abt[0])),
55 - PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
56 + PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
57 "I" (offsetof(struct stack, und[0])),
58 - PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
59 + PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
60 "I" (offsetof(struct stack, fiq[0])),
61 - PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
62 + PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
63 : "r14");
64 #endif
65 }
66 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
67 index 6b6fda65fb3b5..5eeecf83c9e6b 100644
68 --- a/arch/arm/mach-omap2/board-n8x0.c
69 +++ b/arch/arm/mach-omap2/board-n8x0.c
70 @@ -327,6 +327,7 @@ static int n8x0_mmc_get_cover_state(struct device *dev, int slot)
71
72 static void n8x0_mmc_callback(void *data, u8 card_mask)
73 {
74 +#ifdef CONFIG_MMC_OMAP
75 int bit, *openp, index;
76
77 if (board_is_n800()) {
78 @@ -344,7 +345,6 @@ static void n8x0_mmc_callback(void *data, u8 card_mask)
79 else
80 *openp = 0;
81
82 -#ifdef CONFIG_MMC_OMAP
83 omap_mmc_notify_cover_event(mmc_device, index, *openp);
84 #else
85 pr_warn("MMC: notify cover event not available\n");
86 diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
87 index 0770d6d1c37ff..7f95d6ac20110 100644
88 --- a/arch/arm64/kernel/perf_event.c
89 +++ b/arch/arm64/kernel/perf_event.c
90 @@ -748,6 +748,28 @@ static void armv8pmu_disable_event(struct perf_event *event)
91 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
92 }
93
94 +static void armv8pmu_start(struct arm_pmu *cpu_pmu)
95 +{
96 + unsigned long flags;
97 + struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
98 +
99 + raw_spin_lock_irqsave(&events->pmu_lock, flags);
100 + /* Enable all counters */
101 + armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
102 + raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
103 +}
104 +
105 +static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
106 +{
107 + unsigned long flags;
108 + struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
109 +
110 + raw_spin_lock_irqsave(&events->pmu_lock, flags);
111 + /* Disable all counters */
112 + armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
113 + raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
114 +}
115 +
116 static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
117 {
118 u32 pmovsr;
119 @@ -773,6 +795,11 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
120 */
121 regs = get_irq_regs();
122
123 + /*
124 + * Stop the PMU while processing the counter overflows
125 + * to prevent skews in group events.
126 + */
127 + armv8pmu_stop(cpu_pmu);
128 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
129 struct perf_event *event = cpuc->events[idx];
130 struct hw_perf_event *hwc;
131 @@ -797,6 +824,7 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
132 if (perf_event_overflow(event, &data, regs))
133 cpu_pmu->disable(event);
134 }
135 + armv8pmu_start(cpu_pmu);
136
137 /*
138 * Handle the pending perf events.
139 @@ -810,28 +838,6 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
140 return IRQ_HANDLED;
141 }
142
143 -static void armv8pmu_start(struct arm_pmu *cpu_pmu)
144 -{
145 - unsigned long flags;
146 - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
147 -
148 - raw_spin_lock_irqsave(&events->pmu_lock, flags);
149 - /* Enable all counters */
150 - armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E);
151 - raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
152 -}
153 -
154 -static void armv8pmu_stop(struct arm_pmu *cpu_pmu)
155 -{
156 - unsigned long flags;
157 - struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
158 -
159 - raw_spin_lock_irqsave(&events->pmu_lock, flags);
160 - /* Disable all counters */
161 - armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E);
162 - raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
163 -}
164 -
165 static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc,
166 struct perf_event *event)
167 {
168 diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
169 index 769831d9fd114..07b0ebd495769 100644
170 --- a/arch/x86/kernel/fpu/signal.c
171 +++ b/arch/x86/kernel/fpu/signal.c
172 @@ -276,15 +276,23 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
173 return 0;
174 }
175
176 - if (!access_ok(VERIFY_READ, buf, size))
177 + if (!access_ok(VERIFY_READ, buf, size)) {
178 + fpu__clear(fpu);
179 return -EACCES;
180 + }
181
182 fpu__activate_curr(fpu);
183
184 - if (!static_cpu_has(X86_FEATURE_FPU))
185 - return fpregs_soft_set(current, NULL,
186 - 0, sizeof(struct user_i387_ia32_struct),
187 - NULL, buf) != 0;
188 + if (!static_cpu_has(X86_FEATURE_FPU)) {
189 + int ret = fpregs_soft_set(current, NULL, 0,
190 + sizeof(struct user_i387_ia32_struct),
191 + NULL, buf);
192 +
193 + if (ret)
194 + fpu__clear(fpu);
195 +
196 + return ret != 0;
197 + }
198
199 if (use_xsave()) {
200 struct _fpx_sw_bytes fx_sw_user;
201 diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
202 index f5a9bb1231882..d8997dafb876c 100644
203 --- a/drivers/dma/pl330.c
204 +++ b/drivers/dma/pl330.c
205 @@ -2579,13 +2579,15 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
206 for (i = 0; i < len / period_len; i++) {
207 desc = pl330_get_desc(pch);
208 if (!desc) {
209 + unsigned long iflags;
210 +
211 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
212 __func__, __LINE__);
213
214 if (!first)
215 return NULL;
216
217 - spin_lock_irqsave(&pl330->pool_lock, flags);
218 + spin_lock_irqsave(&pl330->pool_lock, iflags);
219
220 while (!list_empty(&first->node)) {
221 desc = list_entry(first->node.next,
222 @@ -2595,7 +2597,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
223
224 list_move_tail(&first->node, &pl330->desc_pool);
225
226 - spin_unlock_irqrestore(&pl330->pool_lock, flags);
227 + spin_unlock_irqrestore(&pl330->pool_lock, iflags);
228
229 return NULL;
230 }
231 diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig
232 index a7761c4025f41..a97c7123d913c 100644
233 --- a/drivers/dma/qcom/Kconfig
234 +++ b/drivers/dma/qcom/Kconfig
235 @@ -9,6 +9,7 @@ config QCOM_BAM_DMA
236
237 config QCOM_HIDMA_MGMT
238 tristate "Qualcomm Technologies HIDMA Management support"
239 + depends on HAS_IOMEM
240 select DMA_ENGINE
241 help
242 Enable support for the Qualcomm Technologies HIDMA Management.
243 diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
244 index 68b41daab3a8f..bf7105814ee72 100644
245 --- a/drivers/dma/ste_dma40.c
246 +++ b/drivers/dma/ste_dma40.c
247 @@ -3674,6 +3674,9 @@ static int __init d40_probe(struct platform_device *pdev)
248
249 kfree(base->lcla_pool.base_unaligned);
250
251 + if (base->lcpa_base)
252 + iounmap(base->lcpa_base);
253 +
254 if (base->phy_lcpa)
255 release_mem_region(base->phy_lcpa,
256 base->lcpa_size);
257 diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
258 index 16239b07ce45d..2610919eb709d 100644
259 --- a/drivers/gpu/drm/radeon/radeon_uvd.c
260 +++ b/drivers/gpu/drm/radeon/radeon_uvd.c
261 @@ -286,7 +286,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
262 if (rdev->uvd.vcpu_bo == NULL)
263 return -EINVAL;
264
265 - memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
266 + memcpy_toio((void __iomem *)rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
267
268 size = radeon_bo_size(rdev->uvd.vcpu_bo);
269 size -= rdev->uvd_fw->size;
270 @@ -294,7 +294,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
271 ptr = rdev->uvd.cpu_addr;
272 ptr += rdev->uvd_fw->size;
273
274 - memset(ptr, 0, size);
275 + memset_io((void __iomem *)ptr, 0, size);
276
277 return 0;
278 }
279 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
280 index 40b36e59a8676..a056850328ef4 100644
281 --- a/drivers/hid/hid-core.c
282 +++ b/drivers/hid/hid-core.c
283 @@ -1804,6 +1804,9 @@ int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
284 case BUS_I2C:
285 bus = "I2C";
286 break;
287 + case BUS_VIRTUAL:
288 + bus = "VIRTUAL";
289 + break;
290 default:
291 bus = "<UNKNOWN>";
292 }
293 diff --git a/drivers/hid/hid-gt683r.c b/drivers/hid/hid-gt683r.c
294 index a298fbd8db6b9..8ca4c1baeda89 100644
295 --- a/drivers/hid/hid-gt683r.c
296 +++ b/drivers/hid/hid-gt683r.c
297 @@ -64,6 +64,7 @@ static const struct hid_device_id gt683r_led_id[] = {
298 { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
299 { }
300 };
301 +MODULE_DEVICE_TABLE(hid, gt683r_led_id);
302
303 static void gt683r_brightness_set(struct led_classdev *led_cdev,
304 enum led_brightness brightness)
305 diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
306 index 7001f07ca3996..4ea18f07c65b8 100644
307 --- a/drivers/hid/hid-sensor-hub.c
308 +++ b/drivers/hid/hid-sensor-hub.c
309 @@ -223,16 +223,21 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
310 buffer_size = buffer_size / sizeof(__s32);
311 if (buffer_size) {
312 for (i = 0; i < buffer_size; ++i) {
313 - hid_set_field(report->field[field_index], i,
314 - (__force __s32)cpu_to_le32(*buf32));
315 + ret = hid_set_field(report->field[field_index], i,
316 + (__force __s32)cpu_to_le32(*buf32));
317 + if (ret)
318 + goto done_proc;
319 +
320 ++buf32;
321 }
322 }
323 if (remaining_bytes) {
324 value = 0;
325 memcpy(&value, (u8 *)buf32, remaining_bytes);
326 - hid_set_field(report->field[field_index], i,
327 - (__force __s32)cpu_to_le32(value));
328 + ret = hid_set_field(report->field[field_index], i,
329 + (__force __s32)cpu_to_le32(value));
330 + if (ret)
331 + goto done_proc;
332 }
333 hid_hw_request(hsdev->hdev, report, HID_REQ_SET_REPORT);
334 hid_hw_wait(hsdev->hdev);
335 diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
336 index 7838343eb37c5..b6600329a272d 100644
337 --- a/drivers/hid/usbhid/hid-core.c
338 +++ b/drivers/hid/usbhid/hid-core.c
339 @@ -372,7 +372,7 @@ static int hid_submit_ctrl(struct hid_device *hid)
340 raw_report = usbhid->ctrl[usbhid->ctrltail].raw_report;
341 dir = usbhid->ctrl[usbhid->ctrltail].dir;
342
343 - len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
344 + len = hid_report_len(report);
345 if (dir == USB_DIR_OUT) {
346 usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0);
347 usbhid->urbctrl->transfer_buffer_length = len;
348 diff --git a/drivers/i2c/busses/i2c-robotfuzz-osif.c b/drivers/i2c/busses/i2c-robotfuzz-osif.c
349 index 89d8b41b66680..032e8535e8604 100644
350 --- a/drivers/i2c/busses/i2c-robotfuzz-osif.c
351 +++ b/drivers/i2c/busses/i2c-robotfuzz-osif.c
352 @@ -89,7 +89,7 @@ static int osif_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
353 }
354 }
355
356 - ret = osif_usb_read(adapter, OSIFI2C_STOP, 0, 0, NULL, 0);
357 + ret = osif_usb_write(adapter, OSIFI2C_STOP, 0, 0, NULL, 0);
358 if (ret) {
359 dev_err(&adapter->dev, "failure sending STOP\n");
360 return -EREMOTEIO;
361 @@ -159,7 +159,7 @@ static int osif_probe(struct usb_interface *interface,
362 * Set bus frequency. The frequency is:
363 * 120,000,000 / ( 16 + 2 * div * 4^prescale).
364 * Using dev = 52, prescale = 0 give 100KHz */
365 - ret = osif_usb_read(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0,
366 + ret = osif_usb_write(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0,
367 NULL, 0);
368 if (ret) {
369 dev_err(&interface->dev, "failure sending bit rate");
370 diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
371 index 32834dad0b836..1243c2e5a86a2 100644
372 --- a/drivers/net/caif/caif_serial.c
373 +++ b/drivers/net/caif/caif_serial.c
374 @@ -362,6 +362,7 @@ static int ldisc_open(struct tty_struct *tty)
375 rtnl_lock();
376 result = register_netdevice(dev);
377 if (result) {
378 + tty_kref_put(tty);
379 rtnl_unlock();
380 free_netdev(dev);
381 return -ENODEV;
382 diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
383 index 9de0f9f5b11ca..59af298f99e06 100644
384 --- a/drivers/net/ethernet/atheros/alx/main.c
385 +++ b/drivers/net/ethernet/atheros/alx/main.c
386 @@ -1653,6 +1653,7 @@ out_free_netdev:
387 free_netdev(netdev);
388 out_pci_release:
389 pci_release_mem_regions(pdev);
390 + pci_disable_pcie_error_reporting(pdev);
391 out_pci_disable:
392 pci_disable_device(pdev);
393 return err;
394 diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
395 index f7b42483921c5..0ade0c6d81ee3 100644
396 --- a/drivers/net/ethernet/ec_bhf.c
397 +++ b/drivers/net/ethernet/ec_bhf.c
398 @@ -589,10 +589,12 @@ static void ec_bhf_remove(struct pci_dev *dev)
399 struct ec_bhf_priv *priv = netdev_priv(net_dev);
400
401 unregister_netdev(net_dev);
402 - free_netdev(net_dev);
403
404 pci_iounmap(dev, priv->dma_io);
405 pci_iounmap(dev, priv->io);
406 +
407 + free_netdev(net_dev);
408 +
409 pci_release_regions(dev);
410 pci_clear_master(dev);
411 pci_disable_device(dev);
412 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
413 index 289560b0f6433..b0b9f77c37406 100644
414 --- a/drivers/net/ethernet/emulex/benet/be_main.c
415 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
416 @@ -5998,6 +5998,7 @@ drv_cleanup:
417 unmap_bars:
418 be_unmap_pci_bars(adapter);
419 free_netdev:
420 + pci_disable_pcie_error_reporting(pdev);
421 free_netdev(netdev);
422 rel_reg:
423 pci_release_regions(pdev);
424 diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
425 index 1231816125955..031d4b3a544c0 100644
426 --- a/drivers/net/ethernet/freescale/fec_ptp.c
427 +++ b/drivers/net/ethernet/freescale/fec_ptp.c
428 @@ -586,6 +586,10 @@ void fec_ptp_init(struct platform_device *pdev)
429 fep->ptp_caps.enable = fec_ptp_enable;
430
431 fep->cycle_speed = clk_get_rate(fep->clk_ptp);
432 + if (!fep->cycle_speed) {
433 + fep->cycle_speed = NSEC_PER_SEC;
434 + dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n");
435 + }
436 fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
437
438 spin_lock_init(&fep->tmreg_lock);
439 diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
440 index 40644657b1b74..0b1ee353f4150 100644
441 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
442 +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
443 @@ -9059,10 +9059,6 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
444 0, 0, nlflags, filter_mask, NULL);
445 }
446
447 -/* Hardware supports L4 tunnel length of 128B (=2^7) which includes
448 - * inner mac plus all inner ethertypes.
449 - */
450 -#define I40E_MAX_TUNNEL_HDR_LEN 128
451 /**
452 * i40e_features_check - Validate encapsulated packet conforms to limits
453 * @skb: skb buff
454 @@ -9073,12 +9069,52 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb,
455 struct net_device *dev,
456 netdev_features_t features)
457 {
458 - if (skb->encapsulation &&
459 - ((skb_inner_network_header(skb) - skb_transport_header(skb)) >
460 - I40E_MAX_TUNNEL_HDR_LEN))
461 - return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
462 + size_t len;
463 +
464 + /* No point in doing any of this if neither checksum nor GSO are
465 + * being requested for this frame. We can rule out both by just
466 + * checking for CHECKSUM_PARTIAL
467 + */
468 + if (skb->ip_summed != CHECKSUM_PARTIAL)
469 + return features;
470 +
471 + /* We cannot support GSO if the MSS is going to be less than
472 + * 64 bytes. If it is then we need to drop support for GSO.
473 + */
474 + if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
475 + features &= ~NETIF_F_GSO_MASK;
476 +
477 + /* MACLEN can support at most 63 words */
478 + len = skb_network_header(skb) - skb->data;
479 + if (len & ~(63 * 2))
480 + goto out_err;
481 +
482 + /* IPLEN and EIPLEN can support at most 127 dwords */
483 + len = skb_transport_header(skb) - skb_network_header(skb);
484 + if (len & ~(127 * 4))
485 + goto out_err;
486 +
487 + if (skb->encapsulation) {
488 + /* L4TUNLEN can support 127 words */
489 + len = skb_inner_network_header(skb) - skb_transport_header(skb);
490 + if (len & ~(127 * 2))
491 + goto out_err;
492 +
493 + /* IPLEN can support at most 127 dwords */
494 + len = skb_inner_transport_header(skb) -
495 + skb_inner_network_header(skb);
496 + if (len & ~(127 * 4))
497 + goto out_err;
498 + }
499 +
500 + /* No need to validate L4LEN as TCP is the only protocol with a
501 + * a flexible value and we support all possible values supported
502 + * by TCP, which is at most 15 dwords
503 + */
504
505 return features;
506 +out_err:
507 + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
508 }
509
510 static const struct net_device_ops i40e_netdev_ops = {
511 diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
512 index 02ec326cb1293..5eeba263b5f8a 100644
513 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
514 +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
515 @@ -4050,6 +4050,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
516 dev_err(&pdev->dev,
517 "invalid sram_size %dB or board span %ldB\n",
518 mgp->sram_size, mgp->board_span);
519 + status = -EINVAL;
520 goto abort_with_ioremap;
521 }
522 memcpy_fromio(mgp->eeprom_strings,
523 diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
524 index a5ee3d328f3d6..75e25a3fe4a72 100644
525 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
526 +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
527 @@ -1617,6 +1617,8 @@ err_out_free_netdev:
528 free_netdev(netdev);
529
530 err_out_free_res:
531 + if (NX_IS_REVISION_P3(pdev->revision))
532 + pci_disable_pcie_error_reporting(pdev);
533 pci_release_regions(pdev);
534
535 err_out_disable_pdev:
536 diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
537 index 7b6824e560d2c..59e59878a3a71 100644
538 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
539 +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
540 @@ -1205,9 +1205,11 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
541 p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_IEEE;
542
543 p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
544 + BUILD_BUG_ON(sizeof(dcbx_info->operational.params) !=
545 + sizeof(p_hwfn->p_dcbx_info->set.config.params));
546 memcpy(&p_hwfn->p_dcbx_info->set.config.params,
547 &dcbx_info->operational.params,
548 - sizeof(struct qed_dcbx_admin_params));
549 + sizeof(p_hwfn->p_dcbx_info->set.config.params));
550 p_hwfn->p_dcbx_info->set.config.valid = true;
551
552 memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set));
553 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
554 index 0928da21efd04..19dca845042e0 100644
555 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
556 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
557 @@ -2707,6 +2707,7 @@ err_out_free_hw_res:
558 kfree(ahw);
559
560 err_out_free_res:
561 + pci_disable_pcie_error_reporting(pdev);
562 pci_release_regions(pdev);
563
564 err_out_disable_pdev:
565 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
566 index 3521e3a77556d..f321b115719a5 100644
567 --- a/drivers/net/ethernet/realtek/r8169.c
568 +++ b/drivers/net/ethernet/realtek/r8169.c
569 @@ -2338,7 +2338,7 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
570 {
571 switch(stringset) {
572 case ETH_SS_STATS:
573 - memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
574 + memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings));
575 break;
576 }
577 }
578 diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
579 index 0e5b1935af50e..468f02beccee4 100644
580 --- a/drivers/net/ethernet/renesas/sh_eth.c
581 +++ b/drivers/net/ethernet/renesas/sh_eth.c
582 @@ -2117,7 +2117,7 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
583 {
584 switch (stringset) {
585 case ETH_SS_STATS:
586 - memcpy(data, *sh_eth_gstrings_stats,
587 + memcpy(data, sh_eth_gstrings_stats,
588 sizeof(sh_eth_gstrings_stats));
589 break;
590 }
591 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
592 index ff3e5ab39bd0e..24fb7a2bba625 100644
593 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
594 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
595 @@ -91,10 +91,10 @@ enum power_event {
596 #define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
597
598 /* GMAC HW ADDR regs */
599 -#define GMAC_ADDR_HIGH(reg) (((reg > 15) ? 0x00000800 : 0x00000040) + \
600 - (reg * 8))
601 -#define GMAC_ADDR_LOW(reg) (((reg > 15) ? 0x00000804 : 0x00000044) + \
602 - (reg * 8))
603 +#define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \
604 + 0x00000040 + (reg * 8))
605 +#define GMAC_ADDR_LOW(reg) ((reg > 15) ? 0x00000804 + (reg - 16) * 8 : \
606 + 0x00000044 + (reg * 8))
607 #define GMAC_MAX_PERFECT_ADDRESSES 1
608
609 #define GMAC_PCS_BASE 0x000000c0 /* PCS register base */
610 diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
611 index 545f60877bb7d..9ba36c930ce3b 100644
612 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c
613 +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
614 @@ -735,6 +735,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
615 /* Kick off the transfer */
616 lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
617
618 + if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
619 + netdev_info(ndev, "%s -> netif_stop_queue\n", __func__);
620 + netif_stop_queue(ndev);
621 + }
622 +
623 return NETDEV_TX_OK;
624 }
625
626 diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
627 index 088fe5d34f500..76340bc3cf445 100644
628 --- a/drivers/net/hamradio/mkiss.c
629 +++ b/drivers/net/hamradio/mkiss.c
630 @@ -810,6 +810,7 @@ static void mkiss_close(struct tty_struct *tty)
631 ax->tty = NULL;
632
633 unregister_netdev(ax->dev);
634 + free_netdev(ax->dev);
635 }
636
637 /* Perform I/O control on an active ax25 channel. */
638 diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
639 index f7180f8db39e1..9c15e1a1261be 100644
640 --- a/drivers/net/usb/cdc_eem.c
641 +++ b/drivers/net/usb/cdc_eem.c
642 @@ -138,10 +138,10 @@ static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
643 }
644
645 skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags);
646 + dev_kfree_skb_any(skb);
647 if (!skb2)
648 return NULL;
649
650 - dev_kfree_skb_any(skb);
651 skb = skb2;
652
653 done:
654 diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
655 index 297d3f599efda..5a5db2f09f788 100644
656 --- a/drivers/net/usb/cdc_ncm.c
657 +++ b/drivers/net/usb/cdc_ncm.c
658 @@ -1639,7 +1639,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
659 static const struct driver_info cdc_ncm_info = {
660 .description = "CDC NCM",
661 .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET
662 - | FLAG_LINK_INTR,
663 + | FLAG_LINK_INTR | FLAG_ETHER,
664 .bind = cdc_ncm_bind,
665 .unbind = cdc_ncm_unbind,
666 .manage_power = usbnet_manage_power,
667 diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
668 index 6e74965d26a0a..64fdea3328861 100644
669 --- a/drivers/net/usb/r8152.c
670 +++ b/drivers/net/usb/r8152.c
671 @@ -3938,7 +3938,7 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
672 {
673 switch (stringset) {
674 case ETH_SS_STATS:
675 - memcpy(data, *rtl8152_gstrings, sizeof(rtl8152_gstrings));
676 + memcpy(data, rtl8152_gstrings, sizeof(rtl8152_gstrings));
677 break;
678 }
679 }
680 diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
681 index 3a391ae5c4e0d..841d974915929 100644
682 --- a/drivers/net/usb/smsc75xx.c
683 +++ b/drivers/net/usb/smsc75xx.c
684 @@ -1497,7 +1497,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
685 ret = smsc75xx_wait_ready(dev, 0);
686 if (ret < 0) {
687 netdev_warn(dev->net, "device not ready in smsc75xx_bind\n");
688 - goto err;
689 + goto free_pdata;
690 }
691
692 smsc75xx_init_mac_address(dev);
693 @@ -1506,7 +1506,7 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
694 ret = smsc75xx_reset(dev);
695 if (ret < 0) {
696 netdev_warn(dev->net, "smsc75xx_reset error %d\n", ret);
697 - goto err;
698 + goto cancel_work;
699 }
700
701 dev->net->netdev_ops = &smsc75xx_netdev_ops;
702 @@ -1516,8 +1516,11 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf)
703 dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
704 return 0;
705
706 -err:
707 +cancel_work:
708 + cancel_work_sync(&pdata->set_multicast);
709 +free_pdata:
710 kfree(pdata);
711 + dev->data[0] = 0;
712 return ret;
713 }
714
715 @@ -1528,7 +1531,6 @@ static void smsc75xx_unbind(struct usbnet *dev, struct usb_interface *intf)
716 cancel_work_sync(&pdata->set_multicast);
717 netif_dbg(dev, ifdown, dev->net, "free pdata\n");
718 kfree(pdata);
719 - pdata = NULL;
720 dev->data[0] = 0;
721 }
722 }
723 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
724 index acd89fa9820c4..e09653c73ab4b 100644
725 --- a/drivers/pci/pci.c
726 +++ b/drivers/pci/pci.c
727 @@ -1378,11 +1378,21 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
728 int err;
729 int i, bars = 0;
730
731 - if (atomic_inc_return(&dev->enable_cnt) > 1) {
732 - pci_update_current_state(dev, dev->current_state);
733 - return 0; /* already enabled */
734 + /*
735 + * Power state could be unknown at this point, either due to a fresh
736 + * boot or a device removal call. So get the current power state
737 + * so that things like MSI message writing will behave as expected
738 + * (e.g. if the device really is in D0 at enable time).
739 + */
740 + if (dev->pm_cap) {
741 + u16 pmcsr;
742 + pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
743 + dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
744 }
745
746 + if (atomic_inc_return(&dev->enable_cnt) > 1)
747 + return 0; /* already enabled */
748 +
749 bridge = pci_upstream_bridge(dev);
750 if (bridge)
751 pci_enable_bridge(bridge);
752 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
753 index 0ebf7500e171e..096ba11ac1058 100644
754 --- a/drivers/pci/quirks.c
755 +++ b/drivers/pci/quirks.c
756 @@ -3345,6 +3345,18 @@ static void quirk_no_bus_reset(struct pci_dev *dev)
757 dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
758 }
759
760 +/*
761 + * Some NVIDIA GPU devices do not work with bus reset, SBR needs to be
762 + * prevented for those affected devices.
763 + */
764 +static void quirk_nvidia_no_bus_reset(struct pci_dev *dev)
765 +{
766 + if ((dev->device & 0xffc0) == 0x2340)
767 + quirk_no_bus_reset(dev);
768 +}
769 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID,
770 + quirk_nvidia_no_bus_reset);
771 +
772 /*
773 * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset.
774 * The device will throw a Link Down error on AER-capable systems and
775 @@ -3358,6 +3370,16 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset);
776 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0033, quirk_no_bus_reset);
777 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0034, quirk_no_bus_reset);
778
779 +/*
780 + * Some TI KeyStone C667X devices do not support bus/hot reset. The PCIESS
781 + * automatically disables LTSSM when Secondary Bus Reset is received and
782 + * the device stops working. Prevent bus reset for these devices. With
783 + * this change, the device can be assigned to VMs with VFIO, but it will
784 + * leak state between VMs. Reference
785 + * https://e2e.ti.com/support/processors/f/791/t/954382
786 + */
787 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TI, 0xb005, quirk_no_bus_reset);
788 +
789 static void quirk_no_pm_reset(struct pci_dev *dev)
790 {
791 /*
792 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
793 index dc09f10d5d4b8..604cf3385aae2 100644
794 --- a/drivers/scsi/hosts.c
795 +++ b/drivers/scsi/hosts.c
796 @@ -265,12 +265,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
797
798 device_enable_async_suspend(&shost->shost_dev);
799
800 + get_device(&shost->shost_gendev);
801 error = device_add(&shost->shost_dev);
802 if (error)
803 goto out_del_gendev;
804
805 - get_device(&shost->shost_gendev);
806 -
807 if (shost->transportt->host_size) {
808 shost->shost_data = kzalloc(shost->transportt->host_size,
809 GFP_KERNEL);
810 @@ -307,6 +306,11 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
811 out_del_dev:
812 device_del(&shost->shost_dev);
813 out_del_gendev:
814 + /*
815 + * Host state is SHOST_RUNNING so we have to explicitly release
816 + * ->shost_dev.
817 + */
818 + put_device(&shost->shost_dev);
819 device_del(&shost->shost_gendev);
820 out_destroy_freelist:
821 device_disable_async_suspend(&shost->shost_gendev);
822 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
823 index ecd707f74ddcb..6afb65387be6c 100644
824 --- a/drivers/target/target_core_transport.c
825 +++ b/drivers/target/target_core_transport.c
826 @@ -2779,9 +2779,7 @@ __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
827 __releases(&cmd->t_state_lock)
828 __acquires(&cmd->t_state_lock)
829 {
830 -
831 - assert_spin_locked(&cmd->t_state_lock);
832 - WARN_ON_ONCE(!irqs_disabled());
833 + lockdep_assert_held(&cmd->t_state_lock);
834
835 if (fabric_stop)
836 cmd->transport_state |= CMD_T_FABRIC_STOP;
837 diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
838 index 66254500e7a94..b6d6fe4565fdf 100644
839 --- a/drivers/usb/dwc3/core.c
840 +++ b/drivers/usb/dwc3/core.c
841 @@ -1199,8 +1199,8 @@ static int dwc3_remove(struct platform_device *pdev)
842 */
843 res->start -= DWC3_GLOBALS_REGS_START;
844
845 - dwc3_debugfs_exit(dwc);
846 dwc3_core_exit_mode(dwc);
847 + dwc3_debugfs_exit(dwc);
848
849 dwc3_core_exit(dwc);
850 dwc3_ulpi_exit(dwc);
851 diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
852 index f19e49a5d032b..3d4d350834384 100644
853 --- a/fs/gfs2/glock.c
854 +++ b/fs/gfs2/glock.c
855 @@ -1350,6 +1350,7 @@ __acquires(&lru_lock)
856 while(!list_empty(list)) {
857 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
858 list_del_init(&gl->gl_lru);
859 + clear_bit(GLF_LRU, &gl->gl_flags);
860 if (!spin_trylock(&gl->gl_lockref.lock)) {
861 add_back_to_lru:
862 list_add(&gl->gl_lru, &lru_list);
863 @@ -1396,7 +1397,6 @@ static long gfs2_scan_glock_lru(int nr)
864 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
865 list_move(&gl->gl_lru, &dispose);
866 atomic_dec(&lru_count);
867 - clear_bit(GLF_LRU, &gl->gl_flags);
868 freed++;
869 continue;
870 }
871 diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
872 index 490303e3d5179..e9903bceb2bf1 100644
873 --- a/fs/nilfs2/sysfs.c
874 +++ b/fs/nilfs2/sysfs.c
875 @@ -1064,6 +1064,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs)
876 nilfs_sysfs_delete_superblock_group(nilfs);
877 nilfs_sysfs_delete_segctor_group(nilfs);
878 kobject_del(&nilfs->ns_dev_kobj);
879 + kobject_put(&nilfs->ns_dev_kobj);
880 kfree(nilfs->ns_dev_subgroups);
881 }
882
883 diff --git a/include/linux/hid.h b/include/linux/hid.h
884 index 41c372573a289..2ed6850356ead 100644
885 --- a/include/linux/hid.h
886 +++ b/include/linux/hid.h
887 @@ -1127,8 +1127,7 @@ static inline void hid_hw_wait(struct hid_device *hdev)
888 */
889 static inline u32 hid_report_len(struct hid_report *report)
890 {
891 - /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
892 - return ((report->size - 1) >> 3) + 1 + (report->id > 0);
893 + return DIV_ROUND_UP(report->size, 8) + (report->id > 0);
894 }
895
896 int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
897 diff --git a/include/linux/swapops.h b/include/linux/swapops.h
898 index 5c3a5f3e7eec6..c5ff7b217ee6e 100644
899 --- a/include/linux/swapops.h
900 +++ b/include/linux/swapops.h
901 @@ -196,15 +196,6 @@ static inline void num_poisoned_pages_dec(void)
902 atomic_long_dec(&num_poisoned_pages);
903 }
904
905 -static inline void num_poisoned_pages_add(long num)
906 -{
907 - atomic_long_add(num, &num_poisoned_pages);
908 -}
909 -
910 -static inline void num_poisoned_pages_sub(long num)
911 -{
912 - atomic_long_sub(num, &num_poisoned_pages);
913 -}
914 #else
915
916 static inline swp_entry_t make_hwpoison_entry(struct page *page)
917 diff --git a/include/net/sock.h b/include/net/sock.h
918 index d0e18917d8be8..cf27f3688c39c 100644
919 --- a/include/net/sock.h
920 +++ b/include/net/sock.h
921 @@ -1681,7 +1681,8 @@ static inline u32 net_tx_rndhash(void)
922
923 static inline void sk_set_txhash(struct sock *sk)
924 {
925 - sk->sk_txhash = net_tx_rndhash();
926 + /* This pairs with READ_ONCE() in skb_set_hash_from_sk() */
927 + WRITE_ONCE(sk->sk_txhash, net_tx_rndhash());
928 }
929
930 static inline void sk_rethink_txhash(struct sock *sk)
931 @@ -1936,9 +1937,12 @@ static inline void sock_poll_wait(struct file *filp,
932
933 static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
934 {
935 - if (sk->sk_txhash) {
936 + /* This pairs with WRITE_ONCE() in sk_set_txhash() */
937 + u32 txhash = READ_ONCE(sk->sk_txhash);
938 +
939 + if (txhash) {
940 skb->l4_hash = 1;
941 - skb->hash = sk->sk_txhash;
942 + skb->hash = txhash;
943 }
944 }
945
946 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
947 index cdf614943aa3d..e8bd8de856de9 100644
948 --- a/kernel/trace/trace.c
949 +++ b/kernel/trace/trace.c
950 @@ -1616,9 +1616,6 @@ struct saved_cmdlines_buffer {
951 };
952 static struct saved_cmdlines_buffer *savedcmd;
953
954 -/* temporary disable recording */
955 -static atomic_t trace_record_cmdline_disabled __read_mostly;
956 -
957 static inline char *get_saved_cmdlines(int idx)
958 {
959 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
960 @@ -1882,9 +1879,6 @@ void trace_find_cmdline(int pid, char comm[])
961
962 void tracing_record_cmdline(struct task_struct *tsk)
963 {
964 - if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
965 - return;
966 -
967 if (!__this_cpu_read(trace_cmdline_save))
968 return;
969
970 @@ -2828,9 +2822,6 @@ static void *s_start(struct seq_file *m, loff_t *pos)
971 return ERR_PTR(-EBUSY);
972 #endif
973
974 - if (!iter->snapshot)
975 - atomic_inc(&trace_record_cmdline_disabled);
976 -
977 if (*pos != iter->pos) {
978 iter->ent = NULL;
979 iter->cpu = 0;
980 @@ -2873,9 +2864,6 @@ static void s_stop(struct seq_file *m, void *p)
981 return;
982 #endif
983
984 - if (!iter->snapshot)
985 - atomic_dec(&trace_record_cmdline_disabled);
986 -
987 trace_access_unlock(iter->cpu_file);
988 trace_event_read_unlock();
989 }
990 diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
991 index b67ea5eed2a89..b70233a9563f8 100644
992 --- a/kernel/trace/trace_clock.c
993 +++ b/kernel/trace/trace_clock.c
994 @@ -113,9 +113,9 @@ u64 notrace trace_clock_global(void)
995 prev_time = READ_ONCE(trace_clock_struct.prev_time);
996 now = sched_clock_cpu(this_cpu);
997
998 - /* Make sure that now is always greater than prev_time */
999 + /* Make sure that now is always greater than or equal to prev_time */
1000 if ((s64)(now - prev_time) < 0)
1001 - now = prev_time + 1;
1002 + now = prev_time;
1003
1004 /*
1005 * If in an NMI context then dont risk lockups and simply return
1006 @@ -129,7 +129,7 @@ u64 notrace trace_clock_global(void)
1007 /* Reread prev_time in case it was already updated */
1008 prev_time = READ_ONCE(trace_clock_struct.prev_time);
1009 if ((s64)(now - prev_time) < 0)
1010 - now = prev_time + 1;
1011 + now = prev_time;
1012
1013 trace_clock_struct.prev_time = now;
1014
1015 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
1016 index d6524dce43b26..ad156b42d2adf 100644
1017 --- a/mm/memory-failure.c
1018 +++ b/mm/memory-failure.c
1019 @@ -1010,22 +1010,6 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
1020 return ret;
1021 }
1022
1023 -static void set_page_hwpoison_huge_page(struct page *hpage)
1024 -{
1025 - int i;
1026 - int nr_pages = 1 << compound_order(hpage);
1027 - for (i = 0; i < nr_pages; i++)
1028 - SetPageHWPoison(hpage + i);
1029 -}
1030 -
1031 -static void clear_page_hwpoison_huge_page(struct page *hpage)
1032 -{
1033 - int i;
1034 - int nr_pages = 1 << compound_order(hpage);
1035 - for (i = 0; i < nr_pages; i++)
1036 - ClearPageHWPoison(hpage + i);
1037 -}
1038 -
1039 /**
1040 * memory_failure - Handle memory failure of a page.
1041 * @pfn: Page Number of the corrupted page
1042 @@ -1051,7 +1035,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1043 struct page *hpage;
1044 struct page *orig_head;
1045 int res;
1046 - unsigned int nr_pages;
1047 unsigned long page_flags;
1048
1049 if (!sysctl_memory_failure_recovery)
1050 @@ -1065,24 +1048,23 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1051
1052 p = pfn_to_page(pfn);
1053 orig_head = hpage = compound_head(p);
1054 +
1055 + /* tmporary check code, to be updated in later patches */
1056 + if (PageHuge(p)) {
1057 + if (TestSetPageHWPoison(hpage)) {
1058 + pr_err("Memory failure: %#lx: already hardware poisoned\n", pfn);
1059 + return 0;
1060 + }
1061 + goto tmp;
1062 + }
1063 if (TestSetPageHWPoison(p)) {
1064 pr_err("Memory failure: %#lx: already hardware poisoned\n",
1065 pfn);
1066 return 0;
1067 }
1068
1069 - /*
1070 - * Currently errors on hugetlbfs pages are measured in hugepage units,
1071 - * so nr_pages should be 1 << compound_order. OTOH when errors are on
1072 - * transparent hugepages, they are supposed to be split and error
1073 - * measurement is done in normal page units. So nr_pages should be one
1074 - * in this case.
1075 - */
1076 - if (PageHuge(p))
1077 - nr_pages = 1 << compound_order(hpage);
1078 - else /* normal page or thp */
1079 - nr_pages = 1;
1080 - num_poisoned_pages_add(nr_pages);
1081 +tmp:
1082 + num_poisoned_pages_inc();
1083
1084 /*
1085 * We need/can do nothing about count=0 pages.
1086 @@ -1110,12 +1092,11 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1087 if (PageHWPoison(hpage)) {
1088 if ((hwpoison_filter(p) && TestClearPageHWPoison(p))
1089 || (p != hpage && TestSetPageHWPoison(hpage))) {
1090 - num_poisoned_pages_sub(nr_pages);
1091 + num_poisoned_pages_dec();
1092 unlock_page(hpage);
1093 return 0;
1094 }
1095 }
1096 - set_page_hwpoison_huge_page(hpage);
1097 res = dequeue_hwpoisoned_huge_page(hpage);
1098 action_result(pfn, MF_MSG_FREE_HUGE,
1099 res ? MF_IGNORED : MF_DELAYED);
1100 @@ -1138,7 +1119,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1101 pr_err("Memory failure: %#lx: thp split failed\n",
1102 pfn);
1103 if (TestClearPageHWPoison(p))
1104 - num_poisoned_pages_sub(nr_pages);
1105 + num_poisoned_pages_dec();
1106 put_hwpoison_page(p);
1107 return -EBUSY;
1108 }
1109 @@ -1202,14 +1183,14 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1110 */
1111 if (!PageHWPoison(p)) {
1112 pr_err("Memory failure: %#lx: just unpoisoned\n", pfn);
1113 - num_poisoned_pages_sub(nr_pages);
1114 + num_poisoned_pages_dec();
1115 unlock_page(hpage);
1116 put_hwpoison_page(hpage);
1117 return 0;
1118 }
1119 if (hwpoison_filter(p)) {
1120 if (TestClearPageHWPoison(p))
1121 - num_poisoned_pages_sub(nr_pages);
1122 + num_poisoned_pages_dec();
1123 unlock_page(hpage);
1124 put_hwpoison_page(hpage);
1125 return 0;
1126 @@ -1228,14 +1209,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1127 put_hwpoison_page(hpage);
1128 return 0;
1129 }
1130 - /*
1131 - * Set PG_hwpoison on all pages in an error hugepage,
1132 - * because containment is done in hugepage unit for now.
1133 - * Since we have done TestSetPageHWPoison() for the head page with
1134 - * page lock held, we can safely set PG_hwpoison bits on tail pages.
1135 - */
1136 - if (PageHuge(p))
1137 - set_page_hwpoison_huge_page(hpage);
1138
1139 /*
1140 * It's very difficult to mess with pages currently under IO
1141 @@ -1407,7 +1380,6 @@ int unpoison_memory(unsigned long pfn)
1142 struct page *page;
1143 struct page *p;
1144 int freeit = 0;
1145 - unsigned int nr_pages;
1146 static DEFINE_RATELIMIT_STATE(unpoison_rs, DEFAULT_RATELIMIT_INTERVAL,
1147 DEFAULT_RATELIMIT_BURST);
1148
1149 @@ -1452,8 +1424,6 @@ int unpoison_memory(unsigned long pfn)
1150 return 0;
1151 }
1152
1153 - nr_pages = 1 << compound_order(page);
1154 -
1155 if (!get_hwpoison_page(p)) {
1156 /*
1157 * Since HWPoisoned hugepage should have non-zero refcount,
1158 @@ -1483,10 +1453,8 @@ int unpoison_memory(unsigned long pfn)
1159 if (TestClearPageHWPoison(page)) {
1160 unpoison_pr_info("Unpoison: Software-unpoisoned page %#lx\n",
1161 pfn, &unpoison_rs);
1162 - num_poisoned_pages_sub(nr_pages);
1163 + num_poisoned_pages_dec();
1164 freeit = 1;
1165 - if (PageHuge(page))
1166 - clear_page_hwpoison_huge_page(page);
1167 }
1168 unlock_page(page);
1169
1170 @@ -1612,14 +1580,10 @@ static int soft_offline_huge_page(struct page *page, int flags)
1171 ret = -EIO;
1172 } else {
1173 /* overcommit hugetlb page will be freed to buddy */
1174 - if (PageHuge(page)) {
1175 - set_page_hwpoison_huge_page(hpage);
1176 + SetPageHWPoison(page);
1177 + if (PageHuge(page))
1178 dequeue_hwpoisoned_huge_page(hpage);
1179 - num_poisoned_pages_add(1 << compound_order(hpage));
1180 - } else {
1181 - SetPageHWPoison(page);
1182 - num_poisoned_pages_inc();
1183 - }
1184 + num_poisoned_pages_inc();
1185 }
1186 return ret;
1187 }
1188 @@ -1728,15 +1692,12 @@ static int soft_offline_in_use_page(struct page *page, int flags)
1189
1190 static void soft_offline_free_page(struct page *page)
1191 {
1192 - if (PageHuge(page)) {
1193 - struct page *hpage = compound_head(page);
1194 + struct page *head = compound_head(page);
1195
1196 - set_page_hwpoison_huge_page(hpage);
1197 - if (!dequeue_hwpoisoned_huge_page(hpage))
1198 - num_poisoned_pages_add(1 << compound_order(hpage));
1199 - } else {
1200 - if (!TestSetPageHWPoison(page))
1201 - num_poisoned_pages_inc();
1202 + if (!TestSetPageHWPoison(head)) {
1203 + num_poisoned_pages_inc();
1204 + if (PageHuge(head))
1205 + dequeue_hwpoisoned_huge_page(head);
1206 }
1207 }
1208
1209 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
1210 index 2b663622bdb45..f85e6a9ee5eac 100644
1211 --- a/net/batman-adv/bat_iv_ogm.c
1212 +++ b/net/batman-adv/bat_iv_ogm.c
1213 @@ -585,8 +585,10 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
1214 if (WARN_ON(!forw_packet->if_outgoing))
1215 return;
1216
1217 - if (WARN_ON(forw_packet->if_outgoing->soft_iface != soft_iface))
1218 + if (forw_packet->if_outgoing->soft_iface != soft_iface) {
1219 + pr_warn("%s: soft interface switch for queued OGM\n", __func__);
1220 return;
1221 + }
1222
1223 if (forw_packet->if_incoming->if_status != BATADV_IF_ACTIVE)
1224 return;
1225 diff --git a/net/can/bcm.c b/net/can/bcm.c
1226 index c99e7c75eeee1..65fa0ac2fb47d 100644
1227 --- a/net/can/bcm.c
1228 +++ b/net/can/bcm.c
1229 @@ -127,7 +127,7 @@ struct bcm_sock {
1230 struct sock sk;
1231 int bound;
1232 int ifindex;
1233 - struct notifier_block notifier;
1234 + struct list_head notifier;
1235 struct list_head rx_ops;
1236 struct list_head tx_ops;
1237 unsigned long dropped_usr_msgs;
1238 @@ -135,6 +135,10 @@ struct bcm_sock {
1239 char procname [32]; /* inode number in decimal with \0 */
1240 };
1241
1242 +static LIST_HEAD(bcm_notifier_list);
1243 +static DEFINE_SPINLOCK(bcm_notifier_lock);
1244 +static struct bcm_sock *bcm_busy_notifier;
1245 +
1246 static inline struct bcm_sock *bcm_sk(const struct sock *sk)
1247 {
1248 return (struct bcm_sock *)sk;
1249 @@ -405,6 +409,7 @@ static void bcm_tx_timeout_tsklet(unsigned long data)
1250 if (!op->count && (op->flags & TX_COUNTEVT)) {
1251
1252 /* create notification to user */
1253 + memset(&msg_head, 0, sizeof(msg_head));
1254 msg_head.opcode = TX_EXPIRED;
1255 msg_head.flags = op->flags;
1256 msg_head.count = op->count;
1257 @@ -452,6 +457,7 @@ static void bcm_rx_changed(struct bcm_op *op, struct canfd_frame *data)
1258 /* this element is not throttled anymore */
1259 data->flags &= (BCM_CAN_FLAGS_MASK|RX_RECV);
1260
1261 + memset(&head, 0, sizeof(head));
1262 head.opcode = RX_CHANGED;
1263 head.flags = op->flags;
1264 head.count = op->count;
1265 @@ -566,6 +572,7 @@ static void bcm_rx_timeout_tsklet(unsigned long data)
1266 struct bcm_msg_head msg_head;
1267
1268 /* create notification to user */
1269 + memset(&msg_head, 0, sizeof(msg_head));
1270 msg_head.opcode = RX_TIMEOUT;
1271 msg_head.flags = op->flags;
1272 msg_head.count = op->count;
1273 @@ -1436,20 +1443,15 @@ static int bcm_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
1274 /*
1275 * notification handler for netdevice status changes
1276 */
1277 -static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1278 - void *ptr)
1279 +static void bcm_notify(struct bcm_sock *bo, unsigned long msg,
1280 + struct net_device *dev)
1281 {
1282 - struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1283 - struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
1284 struct sock *sk = &bo->sk;
1285 struct bcm_op *op;
1286 int notify_enodev = 0;
1287
1288 if (!net_eq(dev_net(dev), &init_net))
1289 - return NOTIFY_DONE;
1290 -
1291 - if (dev->type != ARPHRD_CAN)
1292 - return NOTIFY_DONE;
1293 + return;
1294
1295 switch (msg) {
1296
1297 @@ -1484,7 +1486,28 @@ static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1298 sk->sk_error_report(sk);
1299 }
1300 }
1301 +}
1302
1303 +static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1304 + void *ptr)
1305 +{
1306 + struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1307 +
1308 + if (dev->type != ARPHRD_CAN)
1309 + return NOTIFY_DONE;
1310 + if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
1311 + return NOTIFY_DONE;
1312 + if (unlikely(bcm_busy_notifier)) /* Check for reentrant bug. */
1313 + return NOTIFY_DONE;
1314 +
1315 + spin_lock(&bcm_notifier_lock);
1316 + list_for_each_entry(bcm_busy_notifier, &bcm_notifier_list, notifier) {
1317 + spin_unlock(&bcm_notifier_lock);
1318 + bcm_notify(bcm_busy_notifier, msg, dev);
1319 + spin_lock(&bcm_notifier_lock);
1320 + }
1321 + bcm_busy_notifier = NULL;
1322 + spin_unlock(&bcm_notifier_lock);
1323 return NOTIFY_DONE;
1324 }
1325
1326 @@ -1504,9 +1527,9 @@ static int bcm_init(struct sock *sk)
1327 INIT_LIST_HEAD(&bo->rx_ops);
1328
1329 /* set notifier */
1330 - bo->notifier.notifier_call = bcm_notifier;
1331 -
1332 - register_netdevice_notifier(&bo->notifier);
1333 + spin_lock(&bcm_notifier_lock);
1334 + list_add_tail(&bo->notifier, &bcm_notifier_list);
1335 + spin_unlock(&bcm_notifier_lock);
1336
1337 return 0;
1338 }
1339 @@ -1527,7 +1550,14 @@ static int bcm_release(struct socket *sock)
1340
1341 /* remove bcm_ops, timer, rx_unregister(), etc. */
1342
1343 - unregister_netdevice_notifier(&bo->notifier);
1344 + spin_lock(&bcm_notifier_lock);
1345 + while (bcm_busy_notifier == bo) {
1346 + spin_unlock(&bcm_notifier_lock);
1347 + schedule_timeout_uninterruptible(1);
1348 + spin_lock(&bcm_notifier_lock);
1349 + }
1350 + list_del(&bo->notifier);
1351 + spin_unlock(&bcm_notifier_lock);
1352
1353 lock_sock(sk);
1354
1355 @@ -1713,6 +1743,10 @@ static const struct can_proto bcm_can_proto = {
1356 .prot = &bcm_proto,
1357 };
1358
1359 +static struct notifier_block canbcm_notifier = {
1360 + .notifier_call = bcm_notifier
1361 +};
1362 +
1363 static int __init bcm_module_init(void)
1364 {
1365 int err;
1366 @@ -1727,6 +1761,8 @@ static int __init bcm_module_init(void)
1367
1368 /* create /proc/net/can-bcm directory */
1369 proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
1370 + register_netdevice_notifier(&canbcm_notifier);
1371 +
1372 return 0;
1373 }
1374
1375 @@ -1736,6 +1772,8 @@ static void __exit bcm_module_exit(void)
1376
1377 if (proc_dir)
1378 remove_proc_entry("can-bcm", init_net.proc_net);
1379 +
1380 + unregister_netdevice_notifier(&canbcm_notifier);
1381 }
1382
1383 module_init(bcm_module_init);
1384 diff --git a/net/can/raw.c b/net/can/raw.c
1385 index 6dc546a06673f..2bb50b1535c2f 100644
1386 --- a/net/can/raw.c
1387 +++ b/net/can/raw.c
1388 @@ -84,7 +84,7 @@ struct raw_sock {
1389 struct sock sk;
1390 int bound;
1391 int ifindex;
1392 - struct notifier_block notifier;
1393 + struct list_head notifier;
1394 int loopback;
1395 int recv_own_msgs;
1396 int fd_frames;
1397 @@ -96,6 +96,10 @@ struct raw_sock {
1398 struct uniqframe __percpu *uniq;
1399 };
1400
1401 +static LIST_HEAD(raw_notifier_list);
1402 +static DEFINE_SPINLOCK(raw_notifier_lock);
1403 +static struct raw_sock *raw_busy_notifier;
1404 +
1405 /*
1406 * Return pointer to store the extra msg flags for raw_recvmsg().
1407 * We use the space of one unsigned int beyond the 'struct sockaddr_can'
1408 @@ -260,21 +264,16 @@ static int raw_enable_allfilters(struct net_device *dev, struct sock *sk)
1409 return err;
1410 }
1411
1412 -static int raw_notifier(struct notifier_block *nb,
1413 - unsigned long msg, void *ptr)
1414 +static void raw_notify(struct raw_sock *ro, unsigned long msg,
1415 + struct net_device *dev)
1416 {
1417 - struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1418 - struct raw_sock *ro = container_of(nb, struct raw_sock, notifier);
1419 struct sock *sk = &ro->sk;
1420
1421 if (!net_eq(dev_net(dev), &init_net))
1422 - return NOTIFY_DONE;
1423 -
1424 - if (dev->type != ARPHRD_CAN)
1425 - return NOTIFY_DONE;
1426 + return;
1427
1428 if (ro->ifindex != dev->ifindex)
1429 - return NOTIFY_DONE;
1430 + return;
1431
1432 switch (msg) {
1433
1434 @@ -303,7 +302,28 @@ static int raw_notifier(struct notifier_block *nb,
1435 sk->sk_error_report(sk);
1436 break;
1437 }
1438 +}
1439 +
1440 +static int raw_notifier(struct notifier_block *nb, unsigned long msg,
1441 + void *ptr)
1442 +{
1443 + struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1444 +
1445 + if (dev->type != ARPHRD_CAN)
1446 + return NOTIFY_DONE;
1447 + if (msg != NETDEV_UNREGISTER && msg != NETDEV_DOWN)
1448 + return NOTIFY_DONE;
1449 + if (unlikely(raw_busy_notifier)) /* Check for reentrant bug. */
1450 + return NOTIFY_DONE;
1451
1452 + spin_lock(&raw_notifier_lock);
1453 + list_for_each_entry(raw_busy_notifier, &raw_notifier_list, notifier) {
1454 + spin_unlock(&raw_notifier_lock);
1455 + raw_notify(raw_busy_notifier, msg, dev);
1456 + spin_lock(&raw_notifier_lock);
1457 + }
1458 + raw_busy_notifier = NULL;
1459 + spin_unlock(&raw_notifier_lock);
1460 return NOTIFY_DONE;
1461 }
1462
1463 @@ -332,9 +352,9 @@ static int raw_init(struct sock *sk)
1464 return -ENOMEM;
1465
1466 /* set notifier */
1467 - ro->notifier.notifier_call = raw_notifier;
1468 -
1469 - register_netdevice_notifier(&ro->notifier);
1470 + spin_lock(&raw_notifier_lock);
1471 + list_add_tail(&ro->notifier, &raw_notifier_list);
1472 + spin_unlock(&raw_notifier_lock);
1473
1474 return 0;
1475 }
1476 @@ -349,7 +369,14 @@ static int raw_release(struct socket *sock)
1477
1478 ro = raw_sk(sk);
1479
1480 - unregister_netdevice_notifier(&ro->notifier);
1481 + spin_lock(&raw_notifier_lock);
1482 + while (raw_busy_notifier == ro) {
1483 + spin_unlock(&raw_notifier_lock);
1484 + schedule_timeout_uninterruptible(1);
1485 + spin_lock(&raw_notifier_lock);
1486 + }
1487 + list_del(&ro->notifier);
1488 + spin_unlock(&raw_notifier_lock);
1489
1490 lock_sock(sk);
1491
1492 @@ -857,6 +884,10 @@ static const struct can_proto raw_can_proto = {
1493 .prot = &raw_proto,
1494 };
1495
1496 +static struct notifier_block canraw_notifier = {
1497 + .notifier_call = raw_notifier
1498 +};
1499 +
1500 static __init int raw_module_init(void)
1501 {
1502 int err;
1503 @@ -866,6 +897,8 @@ static __init int raw_module_init(void)
1504 err = can_proto_register(&raw_can_proto);
1505 if (err < 0)
1506 printk(KERN_ERR "can: registration of raw protocol failed\n");
1507 + else
1508 + register_netdevice_notifier(&canraw_notifier);
1509
1510 return err;
1511 }
1512 @@ -873,6 +906,7 @@ static __init int raw_module_init(void)
1513 static __exit void raw_module_exit(void)
1514 {
1515 can_proto_unregister(&raw_can_proto);
1516 + unregister_netdevice_notifier(&canraw_notifier);
1517 }
1518
1519 module_init(raw_module_init);
1520 diff --git a/net/compat.c b/net/compat.c
1521 index ce851cf4d0f9d..1f08f0e49e071 100644
1522 --- a/net/compat.c
1523 +++ b/net/compat.c
1524 @@ -159,7 +159,7 @@ int cmsghdr_from_user_compat_to_kern(struct msghdr *kmsg, struct sock *sk,
1525 if (kcmlen > stackbuf_size)
1526 kcmsg_base = kcmsg = sock_kmalloc(sk, kcmlen, GFP_KERNEL);
1527 if (kcmsg == NULL)
1528 - return -ENOBUFS;
1529 + return -ENOMEM;
1530
1531 /* Now copy them over neatly. */
1532 memset(kcmsg, 0, kcmlen);
1533 diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
1534 index 9f172906cc889..cc6e7ca0aff5a 100644
1535 --- a/net/core/fib_rules.c
1536 +++ b/net/core/fib_rules.c
1537 @@ -767,7 +767,7 @@ static void notify_rule_change(int event, struct fib_rule *rule,
1538 {
1539 struct net *net;
1540 struct sk_buff *skb;
1541 - int err = -ENOBUFS;
1542 + int err = -ENOMEM;
1543
1544 net = ops->fro_net;
1545 skb = nlmsg_new(fib_rule_nlmsg_size(ops, rule), GFP_KERNEL);
1546 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
1547 index e652e376fb30f..911752e8a3e64 100644
1548 --- a/net/core/rtnetlink.c
1549 +++ b/net/core/rtnetlink.c
1550 @@ -3530,6 +3530,10 @@ static int rtnl_bridge_notify(struct net_device *dev)
1551 if (err < 0)
1552 goto errout;
1553
1554 + /* Notification info is only filled for bridge ports, not the bridge
1555 + * device itself. Therefore, a zero notification length is valid and
1556 + * should not result in an error.
1557 + */
1558 if (!skb->len)
1559 goto errout;
1560
1561 diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c
1562 index cfc01314958f7..936371340dc37 100644
1563 --- a/net/ieee802154/nl802154.c
1564 +++ b/net/ieee802154/nl802154.c
1565 @@ -1330,19 +1330,20 @@ ieee802154_llsec_parse_dev_addr(struct nlattr *nla,
1566 nl802154_dev_addr_policy))
1567 return -EINVAL;
1568
1569 - if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] ||
1570 - !attrs[NL802154_DEV_ADDR_ATTR_MODE] ||
1571 - !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] ||
1572 - attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]))
1573 + if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] || !attrs[NL802154_DEV_ADDR_ATTR_MODE])
1574 return -EINVAL;
1575
1576 addr->pan_id = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_PAN_ID]);
1577 addr->mode = nla_get_u32(attrs[NL802154_DEV_ADDR_ATTR_MODE]);
1578 switch (addr->mode) {
1579 case NL802154_DEV_ADDR_SHORT:
1580 + if (!attrs[NL802154_DEV_ADDR_ATTR_SHORT])
1581 + return -EINVAL;
1582 addr->short_addr = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_SHORT]);
1583 break;
1584 case NL802154_DEV_ADDR_EXTENDED:
1585 + if (!attrs[NL802154_DEV_ADDR_ATTR_EXTENDED])
1586 + return -EINVAL;
1587 addr->extended_addr = nla_get_le64(attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]);
1588 break;
1589 default:
1590 diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
1591 index 71409928763b0..553cda6f887ad 100644
1592 --- a/net/ipv4/cipso_ipv4.c
1593 +++ b/net/ipv4/cipso_ipv4.c
1594 @@ -486,6 +486,7 @@ void cipso_v4_doi_free(struct cipso_v4_doi *doi_def)
1595 kfree(doi_def->map.std->lvl.local);
1596 kfree(doi_def->map.std->cat.cipso);
1597 kfree(doi_def->map.std->cat.local);
1598 + kfree(doi_def->map.std);
1599 break;
1600 }
1601 kfree(doi_def);
1602 diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
1603 index 02c1736c0b897..f4a827964b685 100644
1604 --- a/net/ipv4/igmp.c
1605 +++ b/net/ipv4/igmp.c
1606 @@ -1782,6 +1782,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
1607 while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
1608 in_dev->mc_list = i->next_rcu;
1609 in_dev->mc_count--;
1610 + ip_mc_clear_src(i);
1611 ip_ma_put(i);
1612 }
1613 }
1614 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
1615 index d278b06459ac9..79d8ea98a5b1f 100644
1616 --- a/net/ipv4/ipconfig.c
1617 +++ b/net/ipv4/ipconfig.c
1618 @@ -880,7 +880,7 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
1619
1620
1621 /*
1622 - * Copy BOOTP-supplied string if not already set.
1623 + * Copy BOOTP-supplied string
1624 */
1625 static int __init ic_bootp_string(char *dest, char *src, int len, int max)
1626 {
1627 @@ -929,12 +929,15 @@ static void __init ic_do_bootp_ext(u8 *ext)
1628 }
1629 break;
1630 case 12: /* Host name */
1631 - ic_bootp_string(utsname()->nodename, ext+1, *ext,
1632 - __NEW_UTS_LEN);
1633 - ic_host_name_set = 1;
1634 + if (!ic_host_name_set) {
1635 + ic_bootp_string(utsname()->nodename, ext+1, *ext,
1636 + __NEW_UTS_LEN);
1637 + ic_host_name_set = 1;
1638 + }
1639 break;
1640 case 15: /* Domain name (DNS) */
1641 - ic_bootp_string(ic_domain, ext+1, *ext, sizeof(ic_domain));
1642 + if (!ic_domain[0])
1643 + ic_bootp_string(ic_domain, ext+1, *ext, sizeof(ic_domain));
1644 break;
1645 case 17: /* Root path */
1646 if (!root_server_path[0])
1647 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
1648 index 4fda9abf38ee1..dd4e46af1e531 100644
1649 --- a/net/ipv4/ping.c
1650 +++ b/net/ipv4/ping.c
1651 @@ -976,6 +976,7 @@ bool ping_rcv(struct sk_buff *skb)
1652 struct sock *sk;
1653 struct net *net = dev_net(skb->dev);
1654 struct icmphdr *icmph = icmp_hdr(skb);
1655 + bool rc = false;
1656
1657 /* We assume the packet has already been checked by icmp_rcv */
1658
1659 @@ -990,14 +991,15 @@ bool ping_rcv(struct sk_buff *skb)
1660 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1661
1662 pr_debug("rcv on socket %p\n", sk);
1663 - if (skb2)
1664 - ping_queue_rcv_skb(sk, skb2);
1665 + if (skb2 && !ping_queue_rcv_skb(sk, skb2))
1666 + rc = true;
1667 sock_put(sk);
1668 - return true;
1669 }
1670 - pr_debug("no socket, dropping\n");
1671
1672 - return false;
1673 + if (!rc)
1674 + pr_debug("no socket, dropping\n");
1675 +
1676 + return rc;
1677 }
1678 EXPORT_SYMBOL_GPL(ping_rcv);
1679
1680 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
1681 index e9aae4686536a..5350e1b61c06b 100644
1682 --- a/net/ipv4/route.c
1683 +++ b/net/ipv4/route.c
1684 @@ -70,6 +70,7 @@
1685 #include <linux/types.h>
1686 #include <linux/kernel.h>
1687 #include <linux/mm.h>
1688 +#include <linux/bootmem.h>
1689 #include <linux/string.h>
1690 #include <linux/socket.h>
1691 #include <linux/sockios.h>
1692 @@ -463,8 +464,10 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
1693 return neigh_create(&arp_tbl, pkey, dev);
1694 }
1695
1696 -#define IP_IDENTS_SZ 2048u
1697 -
1698 +/* Hash tables of size 2048..262144 depending on RAM size.
1699 + * Each bucket uses 8 bytes.
1700 + */
1701 +static u32 ip_idents_mask __read_mostly;
1702 static atomic_t *ip_idents __read_mostly;
1703 static u32 *ip_tstamps __read_mostly;
1704
1705 @@ -474,12 +477,16 @@ static u32 *ip_tstamps __read_mostly;
1706 */
1707 u32 ip_idents_reserve(u32 hash, int segs)
1708 {
1709 - u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
1710 - atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
1711 - u32 old = ACCESS_ONCE(*p_tstamp);
1712 - u32 now = (u32)jiffies;
1713 + u32 bucket, old, now = (u32)jiffies;
1714 + atomic_t *p_id;
1715 + u32 *p_tstamp;
1716 u32 delta = 0;
1717
1718 + bucket = hash & ip_idents_mask;
1719 + p_tstamp = ip_tstamps + bucket;
1720 + p_id = ip_idents + bucket;
1721 + old = ACCESS_ONCE(*p_tstamp);
1722 +
1723 if (old != now && cmpxchg(p_tstamp, old, now) == old)
1724 delta = prandom_u32_max(now - old);
1725
1726 @@ -2936,18 +2943,27 @@ struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
1727
1728 int __init ip_rt_init(void)
1729 {
1730 + void *idents_hash;
1731 int rc = 0;
1732 int cpu;
1733
1734 - ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
1735 - if (!ip_idents)
1736 - panic("IP: failed to allocate ip_idents\n");
1737 + /* For modern hosts, this will use 2 MB of memory */
1738 + idents_hash = alloc_large_system_hash("IP idents",
1739 + sizeof(*ip_idents) + sizeof(*ip_tstamps),
1740 + 0,
1741 + 16, /* one bucket per 64 KB */
1742 + 0,
1743 + NULL,
1744 + &ip_idents_mask,
1745 + 2048,
1746 + 256*1024);
1747 +
1748 + ip_idents = idents_hash;
1749
1750 - prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
1751 + prandom_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents));
1752
1753 - ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
1754 - if (!ip_tstamps)
1755 - panic("IP: failed to allocate ip_tstamps\n");
1756 + ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents);
1757 + memset(ip_tstamps, 0, (ip_idents_mask + 1) * sizeof(*ip_tstamps));
1758
1759 for_each_possible_cpu(cpu) {
1760 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1761 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
1762 index 18a1a4890c5f5..79249a44e4a3b 100644
1763 --- a/net/ipv4/udp.c
1764 +++ b/net/ipv4/udp.c
1765 @@ -1998,6 +1998,9 @@ void udp_destroy_sock(struct sock *sk)
1766 {
1767 struct udp_sock *up = udp_sk(sk);
1768 bool slow = lock_sock_fast(sk);
1769 +
1770 + /* protects from races with udp_abort() */
1771 + sock_set_flag(sk, SOCK_DEAD);
1772 udp_flush_pending_frames(sk);
1773 unlock_sock_fast(sk, slow);
1774 if (static_key_false(&udp_encap_needed) && up->encap_type) {
1775 @@ -2228,10 +2231,17 @@ int udp_abort(struct sock *sk, int err)
1776 {
1777 lock_sock(sk);
1778
1779 + /* udp{v6}_destroy_sock() sets it under the sk lock, avoid racing
1780 + * with close()
1781 + */
1782 + if (sock_flag(sk, SOCK_DEAD))
1783 + goto out;
1784 +
1785 sk->sk_err = err;
1786 sk->sk_error_report(sk);
1787 __udp_disconnect(sk, 0);
1788
1789 +out:
1790 release_sock(sk);
1791
1792 return 0;
1793 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
1794 index 1ad84e18c03b7..3a876a2fdd82d 100644
1795 --- a/net/ipv6/udp.c
1796 +++ b/net/ipv6/udp.c
1797 @@ -1325,6 +1325,9 @@ void udpv6_destroy_sock(struct sock *sk)
1798 {
1799 struct udp_sock *up = udp_sk(sk);
1800 lock_sock(sk);
1801 +
1802 + /* protects from races with udp_abort() */
1803 + sock_set_flag(sk, SOCK_DEAD);
1804 udp_v6_flush_pending_frames(sk);
1805 release_sock(sk);
1806
1807 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
1808 index 21b35255ecc24..f5532a3ce72e1 100644
1809 --- a/net/mac80211/ieee80211_i.h
1810 +++ b/net/mac80211/ieee80211_i.h
1811 @@ -1391,7 +1391,7 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
1812 rcu_read_lock();
1813 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
1814
1815 - if (WARN_ON_ONCE(!chanctx_conf)) {
1816 + if (!chanctx_conf) {
1817 rcu_read_unlock();
1818 return NULL;
1819 }
1820 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
1821 index 721caa5a5430f..3a069cb188b72 100644
1822 --- a/net/mac80211/rx.c
1823 +++ b/net/mac80211/rx.c
1824 @@ -1988,17 +1988,15 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1825 sc = le16_to_cpu(hdr->seq_ctrl);
1826 frag = sc & IEEE80211_SCTL_FRAG;
1827
1828 - if (is_multicast_ether_addr(hdr->addr1)) {
1829 - I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount);
1830 - goto out_no_led;
1831 - }
1832 -
1833 if (rx->sta)
1834 cache = &rx->sta->frags;
1835
1836 if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
1837 goto out;
1838
1839 + if (is_multicast_ether_addr(hdr->addr1))
1840 + return RX_DROP_MONITOR;
1841 +
1842 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1843
1844 if (skb_linearize(rx->skb))
1845 @@ -2127,7 +2125,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1846
1847 out:
1848 ieee80211_led_rx(rx->local);
1849 - out_no_led:
1850 if (rx->sta)
1851 rx->sta->rx_stats.packets++;
1852 return RX_CONTINUE;
1853 diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
1854 index c8a4a48bced98..8be604eb69616 100644
1855 --- a/net/netfilter/nf_synproxy_core.c
1856 +++ b/net/netfilter/nf_synproxy_core.c
1857 @@ -34,6 +34,9 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
1858 int length = (th->doff * 4) - sizeof(*th);
1859 u8 buf[40], *ptr;
1860
1861 + if (unlikely(length < 0))
1862 + return false;
1863 +
1864 ptr = skb_header_pointer(skb, doff + sizeof(*th), length, buf);
1865 if (ptr == NULL)
1866 return false;
1867 @@ -50,6 +53,8 @@ synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
1868 length--;
1869 continue;
1870 default:
1871 + if (length < 2)
1872 + return true;
1873 opsize = *ptr++;
1874 if (opsize < 2)
1875 return true;
1876 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1877 index b5b79f5015415..370d0a4af1f97 100644
1878 --- a/net/packet/af_packet.c
1879 +++ b/net/packet/af_packet.c
1880 @@ -2674,7 +2674,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
1881 }
1882 if (likely(saddr == NULL)) {
1883 dev = packet_cached_dev_get(po);
1884 - proto = po->num;
1885 + proto = READ_ONCE(po->num);
1886 } else {
1887 err = -EINVAL;
1888 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
1889 @@ -2886,7 +2886,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
1890
1891 if (likely(saddr == NULL)) {
1892 dev = packet_cached_dev_get(po);
1893 - proto = po->num;
1894 + proto = READ_ONCE(po->num);
1895 } else {
1896 err = -EINVAL;
1897 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
1898 @@ -3157,7 +3157,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
1899 /* prevents packet_notifier() from calling
1900 * register_prot_hook()
1901 */
1902 - po->num = 0;
1903 + WRITE_ONCE(po->num, 0);
1904 __unregister_prot_hook(sk, true);
1905 rcu_read_lock();
1906 dev_curr = po->prot_hook.dev;
1907 @@ -3167,17 +3167,17 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
1908 }
1909
1910 BUG_ON(po->running);
1911 - po->num = proto;
1912 + WRITE_ONCE(po->num, proto);
1913 po->prot_hook.type = proto;
1914
1915 if (unlikely(unlisted)) {
1916 dev_put(dev);
1917 po->prot_hook.dev = NULL;
1918 - po->ifindex = -1;
1919 + WRITE_ONCE(po->ifindex, -1);
1920 packet_cached_dev_reset(po);
1921 } else {
1922 po->prot_hook.dev = dev;
1923 - po->ifindex = dev ? dev->ifindex : 0;
1924 + WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
1925 packet_cached_dev_assign(po, dev);
1926 }
1927 }
1928 @@ -3492,7 +3492,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
1929 uaddr->sa_family = AF_PACKET;
1930 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
1931 rcu_read_lock();
1932 - dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
1933 + dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
1934 if (dev)
1935 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
1936 rcu_read_unlock();
1937 @@ -3508,16 +3508,18 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
1938 struct sock *sk = sock->sk;
1939 struct packet_sock *po = pkt_sk(sk);
1940 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
1941 + int ifindex;
1942
1943 if (peer)
1944 return -EOPNOTSUPP;
1945
1946 + ifindex = READ_ONCE(po->ifindex);
1947 sll->sll_family = AF_PACKET;
1948 - sll->sll_ifindex = po->ifindex;
1949 - sll->sll_protocol = po->num;
1950 + sll->sll_ifindex = ifindex;
1951 + sll->sll_protocol = READ_ONCE(po->num);
1952 sll->sll_pkttype = 0;
1953 rcu_read_lock();
1954 - dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
1955 + dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
1956 if (dev) {
1957 sll->sll_hatype = dev->type;
1958 sll->sll_halen = dev->addr_len;
1959 @@ -4097,7 +4099,7 @@ static int packet_notifier(struct notifier_block *this,
1960 }
1961 if (msg == NETDEV_UNREGISTER) {
1962 packet_cached_dev_reset(po);
1963 - po->ifindex = -1;
1964 + WRITE_ONCE(po->ifindex, -1);
1965 if (po->prot_hook.dev)
1966 dev_put(po->prot_hook.dev);
1967 po->prot_hook.dev = NULL;
1968 @@ -4400,7 +4402,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
1969 was_running = po->running;
1970 num = po->num;
1971 if (was_running) {
1972 - po->num = 0;
1973 + WRITE_ONCE(po->num, 0);
1974 __unregister_prot_hook(sk, false);
1975 }
1976 spin_unlock(&po->bind_lock);
1977 @@ -4433,7 +4435,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
1978
1979 spin_lock(&po->bind_lock);
1980 if (was_running) {
1981 - po->num = num;
1982 + WRITE_ONCE(po->num, num);
1983 register_prot_hook(sk);
1984 }
1985 spin_unlock(&po->bind_lock);
1986 @@ -4602,8 +4604,8 @@ static int packet_seq_show(struct seq_file *seq, void *v)
1987 s,
1988 atomic_read(&s->sk_refcnt),
1989 s->sk_type,
1990 - ntohs(po->num),
1991 - po->ifindex,
1992 + ntohs(READ_ONCE(po->num)),
1993 + READ_ONCE(po->ifindex),
1994 po->running,
1995 atomic_read(&s->sk_rmem_alloc),
1996 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
1997 diff --git a/net/rds/recv.c b/net/rds/recv.c
1998 index 488a198be3e1f..4bd307e31b404 100644
1999 --- a/net/rds/recv.c
2000 +++ b/net/rds/recv.c
2001 @@ -596,7 +596,7 @@ int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
2002
2003 if (rds_cmsg_recv(inc, msg, rs)) {
2004 ret = -EFAULT;
2005 - goto out;
2006 + break;
2007 }
2008
2009 rds_stats_inc(s_recv_delivered);
2010 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
2011 index bcd6ed6e7e25c..8bbaa35937dd9 100644
2012 --- a/net/unix/af_unix.c
2013 +++ b/net/unix/af_unix.c
2014 @@ -534,12 +534,14 @@ static void unix_release_sock(struct sock *sk, int embrion)
2015 u->path.mnt = NULL;
2016 state = sk->sk_state;
2017 sk->sk_state = TCP_CLOSE;
2018 +
2019 + skpair = unix_peer(sk);
2020 + unix_peer(sk) = NULL;
2021 +
2022 unix_state_unlock(sk);
2023
2024 wake_up_interruptible_all(&u->peer_wait);
2025
2026 - skpair = unix_peer(sk);
2027 -
2028 if (skpair != NULL) {
2029 if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
2030 unix_state_lock(skpair);
2031 @@ -554,7 +556,6 @@ static void unix_release_sock(struct sock *sk, int embrion)
2032
2033 unix_dgram_peer_wake_disconnect(sk, skpair);
2034 sock_put(skpair); /* It may now die */
2035 - unix_peer(sk) = NULL;
2036 }
2037
2038 /* Try to flush out this socket. Throw out buffers at least */
2039 diff --git a/net/wireless/util.c b/net/wireless/util.c
2040 index 939320571d71f..a16e805c4857f 100644
2041 --- a/net/wireless/util.c
2042 +++ b/net/wireless/util.c
2043 @@ -1050,6 +1050,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
2044 case NL80211_IFTYPE_MESH_POINT:
2045 /* mesh should be handled? */
2046 break;
2047 + case NL80211_IFTYPE_OCB:
2048 + cfg80211_leave_ocb(rdev, dev);
2049 + break;
2050 default:
2051 break;
2052 }
2053 diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
2054 index 9c3fbf4553cc7..c23c04d38a82e 100644
2055 --- a/net/x25/af_x25.c
2056 +++ b/net/x25/af_x25.c
2057 @@ -550,7 +550,7 @@ static int x25_create(struct net *net, struct socket *sock, int protocol,
2058 if (protocol)
2059 goto out;
2060
2061 - rc = -ENOBUFS;
2062 + rc = -ENOMEM;
2063 if ((sk = x25_alloc_socket(net, kern)) == NULL)
2064 goto out;
2065