Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.16/0108-4.16.9-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3112 - (show annotations) (download)
Wed May 16 14:24:46 2018 UTC (5 years, 11 months ago) by niro
File size: 82642 byte(s)
-linux-4.16.9
1 diff --git a/Makefile b/Makefile
2 index 5da6ffd69209..ea3cb221d7c5 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 16
9 -SUBLEVEL = 8
10 +SUBLEVEL = 9
11 EXTRAVERSION =
12 NAME = Fearless Coyote
13
14 diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
15 index e08c0c193767..f8939e82249b 100644
16 --- a/arch/arm/boot/dts/imx35.dtsi
17 +++ b/arch/arm/boot/dts/imx35.dtsi
18 @@ -303,7 +303,7 @@
19 };
20
21 can1: can@53fe4000 {
22 - compatible = "fsl,imx35-flexcan";
23 + compatible = "fsl,imx35-flexcan", "fsl,imx25-flexcan";
24 reg = <0x53fe4000 0x1000>;
25 clocks = <&clks 33>, <&clks 33>;
26 clock-names = "ipg", "per";
27 @@ -312,7 +312,7 @@
28 };
29
30 can2: can@53fe8000 {
31 - compatible = "fsl,imx35-flexcan";
32 + compatible = "fsl,imx35-flexcan", "fsl,imx25-flexcan";
33 reg = <0x53fe8000 0x1000>;
34 clocks = <&clks 34>, <&clks 34>;
35 clock-names = "ipg", "per";
36 diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
37 index 1040251f2951..f333c1e40d6c 100644
38 --- a/arch/arm/boot/dts/imx53.dtsi
39 +++ b/arch/arm/boot/dts/imx53.dtsi
40 @@ -551,7 +551,7 @@
41 };
42
43 can1: can@53fc8000 {
44 - compatible = "fsl,imx53-flexcan";
45 + compatible = "fsl,imx53-flexcan", "fsl,imx25-flexcan";
46 reg = <0x53fc8000 0x4000>;
47 interrupts = <82>;
48 clocks = <&clks IMX5_CLK_CAN1_IPG_GATE>,
49 @@ -561,7 +561,7 @@
50 };
51
52 can2: can@53fcc000 {
53 - compatible = "fsl,imx53-flexcan";
54 + compatible = "fsl,imx53-flexcan", "fsl,imx25-flexcan";
55 reg = <0x53fcc000 0x4000>;
56 interrupts = <83>;
57 clocks = <&clks IMX5_CLK_CAN2_IPG_GATE>,
58 diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
59 index 88797c80b3e0..06086439b7bd 100644
60 --- a/arch/x86/events/core.c
61 +++ b/arch/x86/events/core.c
62 @@ -27,6 +27,7 @@
63 #include <linux/cpu.h>
64 #include <linux/bitops.h>
65 #include <linux/device.h>
66 +#include <linux/nospec.h>
67
68 #include <asm/apic.h>
69 #include <asm/stacktrace.h>
70 @@ -304,17 +305,20 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
71
72 config = attr->config;
73
74 - cache_type = (config >> 0) & 0xff;
75 + cache_type = (config >> 0) & 0xff;
76 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
77 return -EINVAL;
78 + cache_type = array_index_nospec(cache_type, PERF_COUNT_HW_CACHE_MAX);
79
80 cache_op = (config >> 8) & 0xff;
81 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
82 return -EINVAL;
83 + cache_op = array_index_nospec(cache_op, PERF_COUNT_HW_CACHE_OP_MAX);
84
85 cache_result = (config >> 16) & 0xff;
86 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
87 return -EINVAL;
88 + cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX);
89
90 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
91
92 @@ -421,6 +425,8 @@ int x86_setup_perfctr(struct perf_event *event)
93 if (attr->config >= x86_pmu.max_events)
94 return -EINVAL;
95
96 + attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events);
97 +
98 /*
99 * The generic map:
100 */
101 diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
102 index 72db0664a53d..357e82dc0e2a 100644
103 --- a/arch/x86/events/intel/cstate.c
104 +++ b/arch/x86/events/intel/cstate.c
105 @@ -91,6 +91,7 @@
106 #include <linux/module.h>
107 #include <linux/slab.h>
108 #include <linux/perf_event.h>
109 +#include <linux/nospec.h>
110 #include <asm/cpu_device_id.h>
111 #include <asm/intel-family.h>
112 #include "../perf_event.h"
113 @@ -301,6 +302,7 @@ static int cstate_pmu_event_init(struct perf_event *event)
114 } else if (event->pmu == &cstate_pkg_pmu) {
115 if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
116 return -EINVAL;
117 + cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
118 if (!pkg_msr[cfg].attr)
119 return -EINVAL;
120 event->hw.event_base = pkg_msr[cfg].msr;
121 diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
122 index 18e2628e2d8f..a8aae89dee7f 100644
123 --- a/arch/x86/events/msr.c
124 +++ b/arch/x86/events/msr.c
125 @@ -1,5 +1,6 @@
126 // SPDX-License-Identifier: GPL-2.0
127 #include <linux/perf_event.h>
128 +#include <linux/nospec.h>
129 #include <asm/intel-family.h>
130
131 enum perf_msr_id {
132 @@ -158,9 +159,6 @@ static int msr_event_init(struct perf_event *event)
133 if (event->attr.type != event->pmu->type)
134 return -ENOENT;
135
136 - if (cfg >= PERF_MSR_EVENT_MAX)
137 - return -EINVAL;
138 -
139 /* unsupported modes and filters */
140 if (event->attr.exclude_user ||
141 event->attr.exclude_kernel ||
142 @@ -171,6 +169,11 @@ static int msr_event_init(struct perf_event *event)
143 event->attr.sample_period) /* no sampling */
144 return -EINVAL;
145
146 + if (cfg >= PERF_MSR_EVENT_MAX)
147 + return -EINVAL;
148 +
149 + cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX);
150 +
151 if (!msr[cfg].attr)
152 return -EINVAL;
153
154 diff --git a/crypto/af_alg.c b/crypto/af_alg.c
155 index c49766b03165..7846c0c20cfe 100644
156 --- a/crypto/af_alg.c
157 +++ b/crypto/af_alg.c
158 @@ -158,16 +158,16 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
159 void *private;
160 int err;
161
162 - /* If caller uses non-allowed flag, return error. */
163 - if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
164 - return -EINVAL;
165 -
166 if (sock->state == SS_CONNECTED)
167 return -EINVAL;
168
169 if (addr_len < sizeof(*sa))
170 return -EINVAL;
171
172 + /* If caller uses non-allowed flag, return error. */
173 + if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
174 + return -EINVAL;
175 +
176 sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
177 sa->salg_name[sizeof(sa->salg_name) + addr_len - sizeof(*sa) - 1] = 0;
178
179 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
180 index 7431ccd03316..0df21f046fc6 100644
181 --- a/drivers/ata/libata-core.c
182 +++ b/drivers/ata/libata-core.c
183 @@ -4549,6 +4549,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
184 ATA_HORKAGE_ZERO_AFTER_TRIM |
185 ATA_HORKAGE_NOLPM, },
186
187 + /* Sandisk devices which are known to not handle LPM well */
188 + { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
189 +
190 /* devices that don't properly handle queued TRIM commands */
191 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
192 ATA_HORKAGE_ZERO_AFTER_TRIM, },
193 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
194 index 1ef67db03c8e..9c9a22958717 100644
195 --- a/drivers/atm/zatm.c
196 +++ b/drivers/atm/zatm.c
197 @@ -28,6 +28,7 @@
198 #include <asm/io.h>
199 #include <linux/atomic.h>
200 #include <linux/uaccess.h>
201 +#include <linux/nospec.h>
202
203 #include "uPD98401.h"
204 #include "uPD98402.h"
205 @@ -1458,6 +1459,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
206 return -EFAULT;
207 if (pool < 0 || pool > ZATM_LAST_POOL)
208 return -EINVAL;
209 + pool = array_index_nospec(pool,
210 + ZATM_LAST_POOL + 1);
211 spin_lock_irqsave(&zatm_dev->lock, flags);
212 info = zatm_dev->pool_info[pool];
213 if (cmd == ZATM_GETPOOLZ) {
214 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
215 index 366a49c7c08f..dcb982e3a41f 100644
216 --- a/drivers/bluetooth/btusb.c
217 +++ b/drivers/bluetooth/btusb.c
218 @@ -231,6 +231,7 @@ static const struct usb_device_id blacklist_table[] = {
219 { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
220 { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
221 { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
222 + { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
223 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
224 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
225 { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
226 @@ -263,7 +264,6 @@ static const struct usb_device_id blacklist_table[] = {
227 { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
228
229 /* QCA ROME chipset */
230 - { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME },
231 { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
232 { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME },
233 { USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME },
234 @@ -392,6 +392,13 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
235 DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"),
236 },
237 },
238 + {
239 + /* Dell XPS 9360 (QCA ROME device 0cf3:e300) */
240 + .matches = {
241 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
242 + DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"),
243 + },
244 + },
245 {}
246 };
247
248 @@ -2839,6 +2846,12 @@ static int btusb_config_oob_wake(struct hci_dev *hdev)
249 }
250 #endif
251
252 +static void btusb_check_needs_reset_resume(struct usb_interface *intf)
253 +{
254 + if (dmi_check_system(btusb_needs_reset_resume_table))
255 + interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
256 +}
257 +
258 static int btusb_probe(struct usb_interface *intf,
259 const struct usb_device_id *id)
260 {
261 @@ -2961,9 +2974,6 @@ static int btusb_probe(struct usb_interface *intf,
262 hdev->send = btusb_send_frame;
263 hdev->notify = btusb_notify;
264
265 - if (dmi_check_system(btusb_needs_reset_resume_table))
266 - interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
267 -
268 #ifdef CONFIG_PM
269 err = btusb_config_oob_wake(hdev);
270 if (err)
271 @@ -3050,6 +3060,7 @@ static int btusb_probe(struct usb_interface *intf,
272 if (id->driver_info & BTUSB_QCA_ROME) {
273 data->setup_on_usb = btusb_setup_qca;
274 hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
275 + btusb_check_needs_reset_resume(intf);
276 }
277
278 #ifdef CONFIG_BT_HCIBTUSB_RTL
279 diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
280 index d9b43bfc2532..b799a21463d9 100644
281 --- a/drivers/clk/ti/clock.h
282 +++ b/drivers/clk/ti/clock.h
283 @@ -74,6 +74,11 @@ enum {
284 #define CLKF_CORE (1 << 9)
285 #define CLKF_J_TYPE (1 << 10)
286
287 +/* CLKCTRL flags */
288 +#define CLKF_SW_SUP BIT(5)
289 +#define CLKF_HW_SUP BIT(6)
290 +#define CLKF_NO_IDLEST BIT(7)
291 +
292 #define CLK(dev, con, ck) \
293 { \
294 .lk = { \
295 @@ -183,10 +188,6 @@ extern const struct omap_clkctrl_data am438x_clkctrl_data[];
296 extern const struct omap_clkctrl_data dm814_clkctrl_data[];
297 extern const struct omap_clkctrl_data dm816_clkctrl_data[];
298
299 -#define CLKF_SW_SUP BIT(0)
300 -#define CLKF_HW_SUP BIT(1)
301 -#define CLKF_NO_IDLEST BIT(2)
302 -
303 typedef void (*ti_of_clk_init_cb_t)(void *, struct device_node *);
304
305 struct clk *ti_clk_register(struct device *dev, struct clk_hw *hw,
306 diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c
307 index 77e485557498..6f693b7d5220 100644
308 --- a/drivers/gpio/gpio-aspeed.c
309 +++ b/drivers/gpio/gpio-aspeed.c
310 @@ -384,7 +384,7 @@ static void aspeed_gpio_irq_set_mask(struct irq_data *d, bool set)
311 if (set)
312 reg |= bit;
313 else
314 - reg &= bit;
315 + reg &= ~bit;
316 iowrite32(reg, addr);
317
318 spin_unlock_irqrestore(&gpio->lock, flags);
319 diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
320 index d66de67ef307..2d18b598c011 100644
321 --- a/drivers/gpio/gpiolib.c
322 +++ b/drivers/gpio/gpiolib.c
323 @@ -446,7 +446,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
324 struct gpiohandle_request handlereq;
325 struct linehandle_state *lh;
326 struct file *file;
327 - int fd, i, ret;
328 + int fd, i, count = 0, ret;
329 u32 lflags;
330
331 if (copy_from_user(&handlereq, ip, sizeof(handlereq)))
332 @@ -507,6 +507,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
333 if (ret)
334 goto out_free_descs;
335 lh->descs[i] = desc;
336 + count = i;
337
338 if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
339 set_bit(FLAG_ACTIVE_LOW, &desc->flags);
340 @@ -577,7 +578,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
341 out_put_unused_fd:
342 put_unused_fd(fd);
343 out_free_descs:
344 - for (; i >= 0; i--)
345 + for (i = 0; i < count; i++)
346 gpiod_free(lh->descs[i]);
347 kfree(lh->label);
348 out_free_lh:
349 @@ -851,7 +852,7 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
350 desc = &gdev->descs[offset];
351 ret = gpiod_request(desc, le->label);
352 if (ret)
353 - goto out_free_desc;
354 + goto out_free_label;
355 le->desc = desc;
356 le->eflags = eflags;
357
358 diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
359 index b76d49218cf1..40549f6824ff 100644
360 --- a/drivers/gpu/drm/drm_atomic.c
361 +++ b/drivers/gpu/drm/drm_atomic.c
362 @@ -155,6 +155,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
363 state->connectors[i].state);
364 state->connectors[i].ptr = NULL;
365 state->connectors[i].state = NULL;
366 + state->connectors[i].old_state = NULL;
367 + state->connectors[i].new_state = NULL;
368 drm_connector_put(connector);
369 }
370
371 @@ -169,6 +171,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
372
373 state->crtcs[i].ptr = NULL;
374 state->crtcs[i].state = NULL;
375 + state->crtcs[i].old_state = NULL;
376 + state->crtcs[i].new_state = NULL;
377 }
378
379 for (i = 0; i < config->num_total_plane; i++) {
380 @@ -181,6 +185,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
381 state->planes[i].state);
382 state->planes[i].ptr = NULL;
383 state->planes[i].state = NULL;
384 + state->planes[i].old_state = NULL;
385 + state->planes[i].new_state = NULL;
386 }
387
388 for (i = 0; i < state->num_private_objs; i++) {
389 @@ -190,6 +196,8 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
390 state->private_objs[i].state);
391 state->private_objs[i].ptr = NULL;
392 state->private_objs[i].state = NULL;
393 + state->private_objs[i].old_state = NULL;
394 + state->private_objs[i].new_state = NULL;
395 }
396 state->num_private_objs = 0;
397
398 diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
399 index fd58647fbff3..6c76c7534c49 100644
400 --- a/drivers/gpu/drm/i915/intel_cdclk.c
401 +++ b/drivers/gpu/drm/i915/intel_cdclk.c
402 @@ -2108,9 +2108,44 @@ static int bdw_modeset_calc_cdclk(struct drm_atomic_state *state)
403 return 0;
404 }
405
406 +static int skl_dpll0_vco(struct intel_atomic_state *intel_state)
407 +{
408 + struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
409 + struct intel_crtc *crtc;
410 + struct intel_crtc_state *crtc_state;
411 + int vco, i;
412 +
413 + vco = intel_state->cdclk.logical.vco;
414 + if (!vco)
415 + vco = dev_priv->skl_preferred_vco_freq;
416 +
417 + for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
418 + if (!crtc_state->base.enable)
419 + continue;
420 +
421 + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
422 + continue;
423 +
424 + /*
425 + * DPLL0 VCO may need to be adjusted to get the correct
426 + * clock for eDP. This will affect cdclk as well.
427 + */
428 + switch (crtc_state->port_clock / 2) {
429 + case 108000:
430 + case 216000:
431 + vco = 8640000;
432 + break;
433 + default:
434 + vco = 8100000;
435 + break;
436 + }
437 + }
438 +
439 + return vco;
440 +}
441 +
442 static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
443 {
444 - struct drm_i915_private *dev_priv = to_i915(state->dev);
445 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
446 int min_cdclk, cdclk, vco;
447
448 @@ -2118,9 +2153,7 @@ static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
449 if (min_cdclk < 0)
450 return min_cdclk;
451
452 - vco = intel_state->cdclk.logical.vco;
453 - if (!vco)
454 - vco = dev_priv->skl_preferred_vco_freq;
455 + vco = skl_dpll0_vco(intel_state);
456
457 /*
458 * FIXME should also account for plane ratio
459 diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
460 index a29868cd30c7..79521da5d11d 100644
461 --- a/drivers/gpu/drm/i915/intel_dp.c
462 +++ b/drivers/gpu/drm/i915/intel_dp.c
463 @@ -1794,26 +1794,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
464 reduce_m_n);
465 }
466
467 - /*
468 - * DPLL0 VCO may need to be adjusted to get the correct
469 - * clock for eDP. This will affect cdclk as well.
470 - */
471 - if (intel_dp_is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) {
472 - int vco;
473 -
474 - switch (pipe_config->port_clock / 2) {
475 - case 108000:
476 - case 216000:
477 - vco = 8640000;
478 - break;
479 - default:
480 - vco = 8100000;
481 - break;
482 - }
483 -
484 - to_intel_atomic_state(pipe_config->base.state)->cdclk.logical.vco = vco;
485 - }
486 -
487 if (!HAS_DDI(dev_priv))
488 intel_dp_set_clock(encoder, pipe_config);
489
490 diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
491 index ef80499113ee..7ed6f7b69556 100644
492 --- a/drivers/gpu/drm/i915/intel_lvds.c
493 +++ b/drivers/gpu/drm/i915/intel_lvds.c
494 @@ -319,7 +319,8 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
495
496 I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
497 POSTING_READ(lvds_encoder->reg);
498 - if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 1000))
499 +
500 + if (intel_wait_for_register(dev_priv, PP_STATUS(0), PP_ON, PP_ON, 5000))
501 DRM_ERROR("timed out waiting for panel to power on\n");
502
503 intel_panel_enable_backlight(pipe_config, conn_state);
504 diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
505 index 80fa68d54bd3..2e8c95ce1a5a 100644
506 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c
507 +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
508 @@ -214,7 +214,6 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
509 INIT_LIST_HEAD(&nvbo->entry);
510 INIT_LIST_HEAD(&nvbo->vma_list);
511 nvbo->bo.bdev = &drm->ttm.bdev;
512 - nvbo->cli = cli;
513
514 /* This is confusing, and doesn't actually mean we want an uncached
515 * mapping, but is what NOUVEAU_GEM_DOMAIN_COHERENT gets translated
516 diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
517 index be8e00b49cde..73c48440d4d7 100644
518 --- a/drivers/gpu/drm/nouveau/nouveau_bo.h
519 +++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
520 @@ -26,8 +26,6 @@ struct nouveau_bo {
521
522 struct list_head vma_list;
523
524 - struct nouveau_cli *cli;
525 -
526 unsigned contig:1;
527 unsigned page:5;
528 unsigned kind:8;
529 diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
530 index dff51a0ee028..8c093ca4222e 100644
531 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
532 +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
533 @@ -63,7 +63,7 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
534 struct ttm_mem_reg *reg)
535 {
536 struct nouveau_bo *nvbo = nouveau_bo(bo);
537 - struct nouveau_drm *drm = nvbo->cli->drm;
538 + struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
539 struct nouveau_mem *mem;
540 int ret;
541
542 @@ -103,7 +103,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
543 struct ttm_mem_reg *reg)
544 {
545 struct nouveau_bo *nvbo = nouveau_bo(bo);
546 - struct nouveau_drm *drm = nvbo->cli->drm;
547 + struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
548 struct nouveau_mem *mem;
549 int ret;
550
551 @@ -131,7 +131,7 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
552 struct ttm_mem_reg *reg)
553 {
554 struct nouveau_bo *nvbo = nouveau_bo(bo);
555 - struct nouveau_drm *drm = nvbo->cli->drm;
556 + struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
557 struct nouveau_mem *mem;
558 int ret;
559
560 diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
561 index caddce88d2d8..0451d80672a5 100644
562 --- a/drivers/gpu/drm/nouveau/nv50_display.c
563 +++ b/drivers/gpu/drm/nouveau/nv50_display.c
564 @@ -3272,10 +3272,11 @@ nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
565
566 drm_connector_unregister(&mstc->connector);
567
568 - drm_modeset_lock_all(drm->dev);
569 drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
570 +
571 + drm_modeset_lock(&drm->dev->mode_config.connection_mutex, NULL);
572 mstc->port = NULL;
573 - drm_modeset_unlock_all(drm->dev);
574 + drm_modeset_unlock(&drm->dev->mode_config.connection_mutex);
575
576 drm_connector_unreference(&mstc->connector);
577 }
578 @@ -3285,9 +3286,7 @@ nv50_mstm_register_connector(struct drm_connector *connector)
579 {
580 struct nouveau_drm *drm = nouveau_drm(connector->dev);
581
582 - drm_modeset_lock_all(drm->dev);
583 drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
584 - drm_modeset_unlock_all(drm->dev);
585
586 drm_connector_register(connector);
587 }
588 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
589 index 2b12c55a3bff..28311caf1e47 100644
590 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
591 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
592 @@ -904,7 +904,8 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
593 while (npages >= HPAGE_PMD_NR) {
594 gfp_t huge_flags = gfp_flags;
595
596 - huge_flags |= GFP_TRANSHUGE;
597 + huge_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
598 + __GFP_KSWAPD_RECLAIM;
599 huge_flags &= ~__GFP_MOVABLE;
600 huge_flags &= ~__GFP_COMP;
601 p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
602 @@ -1021,11 +1022,15 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
603 GFP_USER | GFP_DMA32, "uc dma", 0);
604
605 ttm_page_pool_init_locked(&_manager->wc_pool_huge,
606 - GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP),
607 + (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
608 + __GFP_KSWAPD_RECLAIM) &
609 + ~(__GFP_MOVABLE | __GFP_COMP),
610 "wc huge", order);
611
612 ttm_page_pool_init_locked(&_manager->uc_pool_huge,
613 - GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP)
614 + (GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
615 + __GFP_KSWAPD_RECLAIM) &
616 + ~(__GFP_MOVABLE | __GFP_COMP)
617 , "uc huge", order);
618
619 _manager->options.max_size = max_pages;
620 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
621 index a88051552ace..323cadaeb7d1 100644
622 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
623 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
624 @@ -915,7 +915,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
625 gfp_flags |= __GFP_ZERO;
626
627 if (huge) {
628 - gfp_flags |= GFP_TRANSHUGE;
629 + gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
630 + __GFP_KSWAPD_RECLAIM;
631 gfp_flags &= ~__GFP_MOVABLE;
632 gfp_flags &= ~__GFP_COMP;
633 }
634 diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
635 index 515f97997624..92bd12d3fe2b 100644
636 --- a/drivers/gpu/drm/vc4/vc4_plane.c
637 +++ b/drivers/gpu/drm/vc4/vc4_plane.c
638 @@ -557,7 +557,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane,
639 * the scl fields here.
640 */
641 if (num_planes == 1) {
642 - scl0 = vc4_get_scl_field(state, 1);
643 + scl0 = vc4_get_scl_field(state, 0);
644 scl1 = scl0;
645 } else {
646 scl0 = vc4_get_scl_field(state, 1);
647 diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
648 index 036a03f0d0a6..1667b6e7674f 100644
649 --- a/drivers/i2c/i2c-dev.c
650 +++ b/drivers/i2c/i2c-dev.c
651 @@ -280,7 +280,7 @@ static noinline int i2cdev_ioctl_rdwr(struct i2c_client *client,
652 */
653 if (msgs[i].flags & I2C_M_RECV_LEN) {
654 if (!(msgs[i].flags & I2C_M_RD) ||
655 - msgs[i].buf[0] < 1 ||
656 + msgs[i].len < 1 || msgs[i].buf[0] < 1 ||
657 msgs[i].len < msgs[i].buf[0] +
658 I2C_SMBUS_BLOCK_MAX) {
659 res = -EINVAL;
660 diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
661 index 46d7c8749222..945f9501b642 100644
662 --- a/drivers/md/dm-integrity.c
663 +++ b/drivers/md/dm-integrity.c
664 @@ -2440,7 +2440,7 @@ static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, str
665 unsigned i;
666 for (i = 0; i < ic->journal_sections; i++)
667 kvfree(sl[i]);
668 - kfree(sl);
669 + kvfree(sl);
670 }
671
672 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl)
673 diff --git a/drivers/mtd/nand/marvell_nand.c b/drivers/mtd/nand/marvell_nand.c
674 index 795f868fe1f7..f978c06fbd7d 100644
675 --- a/drivers/mtd/nand/marvell_nand.c
676 +++ b/drivers/mtd/nand/marvell_nand.c
677 @@ -1070,7 +1070,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip,
678 return ret;
679
680 ret = marvell_nfc_wait_op(chip,
681 - chip->data_interface.timings.sdr.tPROG_max);
682 + PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
683 return ret;
684 }
685
686 @@ -1404,6 +1404,7 @@ marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
687 struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip);
688 struct marvell_nfc *nfc = to_marvell_nfc(chip->controller);
689 const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout;
690 + u32 xtype;
691 int ret;
692 struct marvell_nfc_op nfc_op = {
693 .ndcb[0] = NDCB0_CMD_TYPE(TYPE_WRITE) | NDCB0_LEN_OVRD,
694 @@ -1419,7 +1420,12 @@ marvell_nfc_hw_ecc_bch_write_chunk(struct nand_chip *chip, int chunk,
695 * last naked write.
696 */
697 if (chunk == 0) {
698 - nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(XTYPE_WRITE_DISPATCH) |
699 + if (lt->nchunks == 1)
700 + xtype = XTYPE_MONOLITHIC_RW;
701 + else
702 + xtype = XTYPE_WRITE_DISPATCH;
703 +
704 + nfc_op.ndcb[0] |= NDCB0_CMD_XTYPE(xtype) |
705 NDCB0_ADDR_CYC(marvell_nand->addr_cyc) |
706 NDCB0_CMD1(NAND_CMD_SEQIN);
707 nfc_op.ndcb[1] |= NDCB1_ADDRS_PAGE(page);
708 @@ -1490,7 +1496,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct mtd_info *mtd,
709 }
710
711 ret = marvell_nfc_wait_op(chip,
712 - chip->data_interface.timings.sdr.tPROG_max);
713 + PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max));
714
715 marvell_nfc_disable_hw_ecc(chip);
716
717 diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
718 index 634c51e6b8ae..d53a45bf2a72 100644
719 --- a/drivers/net/can/flexcan.c
720 +++ b/drivers/net/can/flexcan.c
721 @@ -200,6 +200,7 @@
722 #define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */
723 #define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */
724 #define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */
725 +#define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7) /* default to BE register access */
726
727 /* Structure of the message buffer */
728 struct flexcan_mb {
729 @@ -287,6 +288,12 @@ struct flexcan_priv {
730 };
731
732 static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
733 + .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
734 + FLEXCAN_QUIRK_BROKEN_PERR_STATE |
735 + FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN,
736 +};
737 +
738 +static const struct flexcan_devtype_data fsl_imx25_devtype_data = {
739 .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
740 FLEXCAN_QUIRK_BROKEN_PERR_STATE,
741 };
742 @@ -1251,9 +1258,9 @@ static void unregister_flexcandev(struct net_device *dev)
743 static const struct of_device_id flexcan_of_match[] = {
744 { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
745 { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
746 - { .compatible = "fsl,imx53-flexcan", .data = &fsl_p1010_devtype_data, },
747 - { .compatible = "fsl,imx35-flexcan", .data = &fsl_p1010_devtype_data, },
748 - { .compatible = "fsl,imx25-flexcan", .data = &fsl_p1010_devtype_data, },
749 + { .compatible = "fsl,imx53-flexcan", .data = &fsl_imx25_devtype_data, },
750 + { .compatible = "fsl,imx35-flexcan", .data = &fsl_imx25_devtype_data, },
751 + { .compatible = "fsl,imx25-flexcan", .data = &fsl_imx25_devtype_data, },
752 { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
753 { .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
754 { .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, },
755 @@ -1337,18 +1344,13 @@ static int flexcan_probe(struct platform_device *pdev)
756
757 priv = netdev_priv(dev);
758
759 - if (of_property_read_bool(pdev->dev.of_node, "big-endian")) {
760 + if (of_property_read_bool(pdev->dev.of_node, "big-endian") ||
761 + devtype_data->quirks & FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN) {
762 priv->read = flexcan_read_be;
763 priv->write = flexcan_write_be;
764 } else {
765 - if (of_device_is_compatible(pdev->dev.of_node,
766 - "fsl,p1010-flexcan")) {
767 - priv->read = flexcan_read_be;
768 - priv->write = flexcan_write_be;
769 - } else {
770 - priv->read = flexcan_read_le;
771 - priv->write = flexcan_write_le;
772 - }
773 + priv->read = flexcan_read_le;
774 + priv->write = flexcan_write_le;
775 }
776
777 priv->can.clock.freq = clock_freq;
778 diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c
779 index 5590c559a8ca..53e320c92a8b 100644
780 --- a/drivers/net/can/spi/hi311x.c
781 +++ b/drivers/net/can/spi/hi311x.c
782 @@ -91,6 +91,7 @@
783 #define HI3110_STAT_BUSOFF BIT(2)
784 #define HI3110_STAT_ERRP BIT(3)
785 #define HI3110_STAT_ERRW BIT(4)
786 +#define HI3110_STAT_TXMTY BIT(7)
787
788 #define HI3110_BTR0_SJW_SHIFT 6
789 #define HI3110_BTR0_BRP_SHIFT 0
790 @@ -427,8 +428,10 @@ static int hi3110_get_berr_counter(const struct net_device *net,
791 struct hi3110_priv *priv = netdev_priv(net);
792 struct spi_device *spi = priv->spi;
793
794 + mutex_lock(&priv->hi3110_lock);
795 bec->txerr = hi3110_read(spi, HI3110_READ_TEC);
796 bec->rxerr = hi3110_read(spi, HI3110_READ_REC);
797 + mutex_unlock(&priv->hi3110_lock);
798
799 return 0;
800 }
801 @@ -735,10 +738,7 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
802 }
803 }
804
805 - if (intf == 0)
806 - break;
807 -
808 - if (intf & HI3110_INT_TXCPLT) {
809 + if (priv->tx_len && statf & HI3110_STAT_TXMTY) {
810 net->stats.tx_packets++;
811 net->stats.tx_bytes += priv->tx_len - 1;
812 can_led_event(net, CAN_LED_EVENT_TX);
813 @@ -748,6 +748,9 @@ static irqreturn_t hi3110_can_ist(int irq, void *dev_id)
814 }
815 netif_wake_queue(net);
816 }
817 +
818 + if (intf == 0)
819 + break;
820 }
821 mutex_unlock(&priv->hi3110_lock);
822 return IRQ_HANDLED;
823 diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
824 index 63587b8e6825..daed57d3d209 100644
825 --- a/drivers/net/can/usb/kvaser_usb.c
826 +++ b/drivers/net/can/usb/kvaser_usb.c
827 @@ -1179,7 +1179,7 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
828
829 skb = alloc_can_skb(priv->netdev, &cf);
830 if (!skb) {
831 - stats->tx_dropped++;
832 + stats->rx_dropped++;
833 return;
834 }
835
836 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
837 index 0b9e60861e53..f81773570dfd 100644
838 --- a/drivers/nvme/host/core.c
839 +++ b/drivers/nvme/host/core.c
840 @@ -122,7 +122,8 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
841 ret = nvme_reset_ctrl(ctrl);
842 if (!ret) {
843 flush_work(&ctrl->reset_work);
844 - if (ctrl->state != NVME_CTRL_LIVE)
845 + if (ctrl->state != NVME_CTRL_LIVE &&
846 + ctrl->state != NVME_CTRL_ADMIN_ONLY)
847 ret = -ENETRESET;
848 }
849
850 diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
851 index d733b14ede9d..013380641ddf 100644
852 --- a/drivers/nvme/host/nvme.h
853 +++ b/drivers/nvme/host/nvme.h
854 @@ -83,6 +83,11 @@ enum nvme_quirks {
855 * Supports the LighNVM command set if indicated in vs[1].
856 */
857 NVME_QUIRK_LIGHTNVM = (1 << 6),
858 +
859 + /*
860 + * Set MEDIUM priority on SQ creation
861 + */
862 + NVME_QUIRK_MEDIUM_PRIO_SQ = (1 << 7),
863 };
864
865 /*
866 diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
867 index b6f43b738f03..f6648610d153 100644
868 --- a/drivers/nvme/host/pci.c
869 +++ b/drivers/nvme/host/pci.c
870 @@ -1091,9 +1091,18 @@ static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
871 static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
872 struct nvme_queue *nvmeq)
873 {
874 + struct nvme_ctrl *ctrl = &dev->ctrl;
875 struct nvme_command c;
876 int flags = NVME_QUEUE_PHYS_CONTIG;
877
878 + /*
879 + * Some drives have a bug that auto-enables WRRU if MEDIUM isn't
880 + * set. Since URGENT priority is zeroes, it makes all queues
881 + * URGENT.
882 + */
883 + if (ctrl->quirks & NVME_QUIRK_MEDIUM_PRIO_SQ)
884 + flags |= NVME_SQ_PRIO_MEDIUM;
885 +
886 /*
887 * Note: we (ab)use the fact that the prp fields survive if no data
888 * is attached to the request.
889 @@ -2684,7 +2693,8 @@ static const struct pci_device_id nvme_id_table[] = {
890 .driver_data = NVME_QUIRK_STRIPE_SIZE |
891 NVME_QUIRK_DEALLOCATE_ZEROES, },
892 { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */
893 - .driver_data = NVME_QUIRK_NO_DEEPEST_PS },
894 + .driver_data = NVME_QUIRK_NO_DEEPEST_PS |
895 + NVME_QUIRK_MEDIUM_PRIO_SQ },
896 { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */
897 .driver_data = NVME_QUIRK_IDENTIFY_CNS, },
898 { PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
899 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
900 index f6a4dd10d9b0..4f98b26301cb 100644
901 --- a/drivers/pci/pci.c
902 +++ b/drivers/pci/pci.c
903 @@ -1897,7 +1897,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
904 EXPORT_SYMBOL(pci_pme_active);
905
906 /**
907 - * pci_enable_wake - enable PCI device as wakeup event source
908 + * __pci_enable_wake - enable PCI device as wakeup event source
909 * @dev: PCI device affected
910 * @state: PCI state from which device will issue wakeup events
911 * @enable: True to enable event generation; false to disable
912 @@ -1915,7 +1915,7 @@ EXPORT_SYMBOL(pci_pme_active);
913 * Error code depending on the platform is returned if both the platform and
914 * the native mechanism fail to enable the generation of wake-up events
915 */
916 -int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
917 +static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
918 {
919 int ret = 0;
920
921 @@ -1956,6 +1956,23 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
922
923 return ret;
924 }
925 +
926 +/**
927 + * pci_enable_wake - change wakeup settings for a PCI device
928 + * @pci_dev: Target device
929 + * @state: PCI state from which device will issue wakeup events
930 + * @enable: Whether or not to enable event generation
931 + *
932 + * If @enable is set, check device_may_wakeup() for the device before calling
933 + * __pci_enable_wake() for it.
934 + */
935 +int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
936 +{
937 + if (enable && !device_may_wakeup(&pci_dev->dev))
938 + return -EINVAL;
939 +
940 + return __pci_enable_wake(pci_dev, state, enable);
941 +}
942 EXPORT_SYMBOL(pci_enable_wake);
943
944 /**
945 @@ -1968,9 +1985,9 @@ EXPORT_SYMBOL(pci_enable_wake);
946 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
947 * ordering constraints.
948 *
949 - * This function only returns error code if the device is not capable of
950 - * generating PME# from both D3_hot and D3_cold, and the platform is unable to
951 - * enable wake-up power for it.
952 + * This function only returns error code if the device is not allowed to wake
953 + * up the system from sleep or it is not capable of generating PME# from both
954 + * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
955 */
956 int pci_wake_from_d3(struct pci_dev *dev, bool enable)
957 {
958 @@ -2101,7 +2118,7 @@ int pci_finish_runtime_suspend(struct pci_dev *dev)
959
960 dev->runtime_d3cold = target_state == PCI_D3cold;
961
962 - pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
963 + __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
964
965 error = pci_set_power_state(dev, target_state);
966
967 @@ -2125,16 +2142,16 @@ bool pci_dev_run_wake(struct pci_dev *dev)
968 {
969 struct pci_bus *bus = dev->bus;
970
971 - if (device_can_wakeup(&dev->dev))
972 - return true;
973 -
974 if (!dev->pme_support)
975 return false;
976
977 /* PME-capable in principle, but not from the target power state */
978 - if (!pci_pme_capable(dev, pci_target_state(dev, false)))
979 + if (!pci_pme_capable(dev, pci_target_state(dev, true)))
980 return false;
981
982 + if (device_can_wakeup(&dev->dev))
983 + return true;
984 +
985 while (bus->parent) {
986 struct pci_dev *bridge = bus->self;
987
988 diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
989 index ed805c7c5ace..ac83f721db24 100644
990 --- a/drivers/thermal/samsung/exynos_tmu.c
991 +++ b/drivers/thermal/samsung/exynos_tmu.c
992 @@ -185,6 +185,7 @@
993 * @regulator: pointer to the TMU regulator structure.
994 * @reg_conf: pointer to structure to register with core thermal.
995 * @ntrip: number of supported trip points.
996 + * @enabled: current status of TMU device
997 * @tmu_initialize: SoC specific TMU initialization method
998 * @tmu_control: SoC specific TMU control method
999 * @tmu_read: SoC specific TMU temperature read method
1000 @@ -205,6 +206,7 @@ struct exynos_tmu_data {
1001 struct regulator *regulator;
1002 struct thermal_zone_device *tzd;
1003 unsigned int ntrip;
1004 + bool enabled;
1005
1006 int (*tmu_initialize)(struct platform_device *pdev);
1007 void (*tmu_control)(struct platform_device *pdev, bool on);
1008 @@ -398,6 +400,7 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
1009 mutex_lock(&data->lock);
1010 clk_enable(data->clk);
1011 data->tmu_control(pdev, on);
1012 + data->enabled = on;
1013 clk_disable(data->clk);
1014 mutex_unlock(&data->lock);
1015 }
1016 @@ -889,19 +892,24 @@ static void exynos7_tmu_control(struct platform_device *pdev, bool on)
1017 static int exynos_get_temp(void *p, int *temp)
1018 {
1019 struct exynos_tmu_data *data = p;
1020 + int value, ret = 0;
1021
1022 - if (!data || !data->tmu_read)
1023 + if (!data || !data->tmu_read || !data->enabled)
1024 return -EINVAL;
1025
1026 mutex_lock(&data->lock);
1027 clk_enable(data->clk);
1028
1029 - *temp = code_to_temp(data, data->tmu_read(data)) * MCELSIUS;
1030 + value = data->tmu_read(data);
1031 + if (value < 0)
1032 + ret = value;
1033 + else
1034 + *temp = code_to_temp(data, value) * MCELSIUS;
1035
1036 clk_disable(data->clk);
1037 mutex_unlock(&data->lock);
1038
1039 - return 0;
1040 + return ret;
1041 }
1042
1043 #ifdef CONFIG_THERMAL_EMULATION
1044 diff --git a/fs/ceph/file.c b/fs/ceph/file.c
1045 index b67eec3532a1..4ce8de724c62 100644
1046 --- a/fs/ceph/file.c
1047 +++ b/fs/ceph/file.c
1048 @@ -878,6 +878,11 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1049 size_t start = 0;
1050 ssize_t len;
1051
1052 + if (write)
1053 + size = min_t(u64, size, fsc->mount_options->wsize);
1054 + else
1055 + size = min_t(u64, size, fsc->mount_options->rsize);
1056 +
1057 vino = ceph_vino(inode);
1058 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1059 vino, pos, &size, 0,
1060 @@ -893,11 +898,6 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1061 break;
1062 }
1063
1064 - if (write)
1065 - size = min_t(u64, size, fsc->mount_options->wsize);
1066 - else
1067 - size = min_t(u64, size, fsc->mount_options->rsize);
1068 -
1069 len = size;
1070 pages = dio_get_pages_alloc(iter, len, &start, &num_pages);
1071 if (IS_ERR(pages)) {
1072 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
1073 index f715609b13f3..5a5a0158cc8f 100644
1074 --- a/fs/cifs/cifsfs.c
1075 +++ b/fs/cifs/cifsfs.c
1076 @@ -1047,6 +1047,18 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
1077 return rc;
1078 }
1079
1080 +/*
1081 + * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1082 + * is a dummy operation.
1083 + */
1084 +static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1085 +{
1086 + cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1087 + file, datasync);
1088 +
1089 + return 0;
1090 +}
1091 +
1092 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1093 struct file *dst_file, loff_t destoff,
1094 size_t len, unsigned int flags)
1095 @@ -1181,6 +1193,7 @@ const struct file_operations cifs_dir_ops = {
1096 .copy_file_range = cifs_copy_file_range,
1097 .clone_file_range = cifs_clone_file_range,
1098 .llseek = generic_file_llseek,
1099 + .fsync = cifs_dir_fsync,
1100 };
1101
1102 static void
1103 diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
1104 index 40c34a0ef58a..3abf4b6f3a3f 100644
1105 --- a/fs/fs-writeback.c
1106 +++ b/fs/fs-writeback.c
1107 @@ -1961,7 +1961,7 @@ void wb_workfn(struct work_struct *work)
1108 }
1109
1110 if (!list_empty(&wb->work_list))
1111 - mod_delayed_work(bdi_wq, &wb->dwork, 0);
1112 + wb_wakeup(wb);
1113 else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
1114 wb_wakeup_delayed(wb);
1115
1116 diff --git a/include/linux/bpf.h b/include/linux/bpf.h
1117 index 66df387106de..a9e4f6c6339e 100644
1118 --- a/include/linux/bpf.h
1119 +++ b/include/linux/bpf.h
1120 @@ -335,8 +335,8 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
1121 void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs,
1122 struct bpf_prog *old_prog);
1123 int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
1124 - __u32 __user *prog_ids, u32 request_cnt,
1125 - __u32 __user *prog_cnt);
1126 + u32 *prog_ids, u32 request_cnt,
1127 + u32 *prog_cnt);
1128 int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
1129 struct bpf_prog *exclude_prog,
1130 struct bpf_prog *include_prog,
1131 diff --git a/include/linux/oom.h b/include/linux/oom.h
1132 index 5bad038ac012..6adac113e96d 100644
1133 --- a/include/linux/oom.h
1134 +++ b/include/linux/oom.h
1135 @@ -95,6 +95,8 @@ static inline int check_stable_address_space(struct mm_struct *mm)
1136 return 0;
1137 }
1138
1139 +void __oom_reap_task_mm(struct mm_struct *mm);
1140 +
1141 extern unsigned long oom_badness(struct task_struct *p,
1142 struct mem_cgroup *memcg, const nodemask_t *nodemask,
1143 unsigned long totalpages);
1144 diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h
1145 index 61b39eaf7cad..612b82ca68b5 100644
1146 --- a/include/linux/wait_bit.h
1147 +++ b/include/linux/wait_bit.h
1148 @@ -262,4 +262,21 @@ int wait_on_atomic_t(atomic_t *val, wait_atomic_t_action_f action, unsigned mode
1149 return out_of_line_wait_on_atomic_t(val, action, mode);
1150 }
1151
1152 +/**
1153 + * clear_and_wake_up_bit - clear a bit and wake up anyone waiting on that bit
1154 + *
1155 + * @bit: the bit of the word being waited on
1156 + * @word: the word being waited on, a kernel virtual address
1157 + *
1158 + * You can use this helper if bitflags are manipulated atomically rather than
1159 + * non-atomically under a lock.
1160 + */
1161 +static inline void clear_and_wake_up_bit(int bit, void *word)
1162 +{
1163 + clear_bit_unlock(bit, word);
1164 + /* See wake_up_bit() for which memory barrier you need to use. */
1165 + smp_mb__after_atomic();
1166 + wake_up_bit(word, bit);
1167 +}
1168 +
1169 #endif /* _LINUX_WAIT_BIT_H */
1170 diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
1171 index 899495589a7e..c7be1ca8e562 100644
1172 --- a/include/net/inet_timewait_sock.h
1173 +++ b/include/net/inet_timewait_sock.h
1174 @@ -43,6 +43,7 @@ struct inet_timewait_sock {
1175 #define tw_family __tw_common.skc_family
1176 #define tw_state __tw_common.skc_state
1177 #define tw_reuse __tw_common.skc_reuse
1178 +#define tw_reuseport __tw_common.skc_reuseport
1179 #define tw_ipv6only __tw_common.skc_ipv6only
1180 #define tw_bound_dev_if __tw_common.skc_bound_dev_if
1181 #define tw_node __tw_common.skc_nulls_node
1182 diff --git a/include/net/nexthop.h b/include/net/nexthop.h
1183 index 36bb794f5cd6..902ff382a6dc 100644
1184 --- a/include/net/nexthop.h
1185 +++ b/include/net/nexthop.h
1186 @@ -7,7 +7,7 @@
1187
1188 static inline int rtnh_ok(const struct rtnexthop *rtnh, int remaining)
1189 {
1190 - return remaining >= sizeof(*rtnh) &&
1191 + return remaining >= (int)sizeof(*rtnh) &&
1192 rtnh->rtnh_len >= sizeof(*rtnh) &&
1193 rtnh->rtnh_len <= remaining;
1194 }
1195 diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
1196 index d315b393abdd..ba03ec39efb3 100644
1197 --- a/kernel/bpf/core.c
1198 +++ b/kernel/bpf/core.c
1199 @@ -1572,13 +1572,32 @@ int bpf_prog_array_length(struct bpf_prog_array __rcu *progs)
1200 return cnt;
1201 }
1202
1203 +static bool bpf_prog_array_copy_core(struct bpf_prog **prog,
1204 + u32 *prog_ids,
1205 + u32 request_cnt)
1206 +{
1207 + int i = 0;
1208 +
1209 + for (; *prog; prog++) {
1210 + if (*prog == &dummy_bpf_prog.prog)
1211 + continue;
1212 + prog_ids[i] = (*prog)->aux->id;
1213 + if (++i == request_cnt) {
1214 + prog++;
1215 + break;
1216 + }
1217 + }
1218 +
1219 + return !!(*prog);
1220 +}
1221 +
1222 int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
1223 __u32 __user *prog_ids, u32 cnt)
1224 {
1225 struct bpf_prog **prog;
1226 unsigned long err = 0;
1227 - u32 i = 0, *ids;
1228 bool nospc;
1229 + u32 *ids;
1230
1231 /* users of this function are doing:
1232 * cnt = bpf_prog_array_length();
1233 @@ -1595,16 +1614,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
1234 return -ENOMEM;
1235 rcu_read_lock();
1236 prog = rcu_dereference(progs)->progs;
1237 - for (; *prog; prog++) {
1238 - if (*prog == &dummy_bpf_prog.prog)
1239 - continue;
1240 - ids[i] = (*prog)->aux->id;
1241 - if (++i == cnt) {
1242 - prog++;
1243 - break;
1244 - }
1245 - }
1246 - nospc = !!(*prog);
1247 + nospc = bpf_prog_array_copy_core(prog, ids, cnt);
1248 rcu_read_unlock();
1249 err = copy_to_user(prog_ids, ids, cnt * sizeof(u32));
1250 kfree(ids);
1251 @@ -1683,22 +1693,25 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
1252 }
1253
1254 int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
1255 - __u32 __user *prog_ids, u32 request_cnt,
1256 - __u32 __user *prog_cnt)
1257 + u32 *prog_ids, u32 request_cnt,
1258 + u32 *prog_cnt)
1259 {
1260 + struct bpf_prog **prog;
1261 u32 cnt = 0;
1262
1263 if (array)
1264 cnt = bpf_prog_array_length(array);
1265
1266 - if (copy_to_user(prog_cnt, &cnt, sizeof(cnt)))
1267 - return -EFAULT;
1268 + *prog_cnt = cnt;
1269
1270 /* return early if user requested only program count or nothing to copy */
1271 if (!request_cnt || !cnt)
1272 return 0;
1273
1274 - return bpf_prog_array_copy_to_user(array, prog_ids, request_cnt);
1275 + /* this function is called under trace/bpf_trace.c: bpf_event_mutex */
1276 + prog = rcu_dereference_check(array, 1)->progs;
1277 + return bpf_prog_array_copy_core(prog, prog_ids, request_cnt) ? -ENOSPC
1278 + : 0;
1279 }
1280
1281 static void bpf_prog_free_deferred(struct work_struct *work)
1282 diff --git a/kernel/compat.c b/kernel/compat.c
1283 index 3f5fa8902e7d..b3a9ea4aa8fd 100644
1284 --- a/kernel/compat.c
1285 +++ b/kernel/compat.c
1286 @@ -34,6 +34,7 @@ int compat_get_timex(struct timex *txc, const struct compat_timex __user *utp)
1287 {
1288 struct compat_timex tx32;
1289
1290 + memset(txc, 0, sizeof(struct timex));
1291 if (copy_from_user(&tx32, utp, sizeof(struct compat_timex)))
1292 return -EFAULT;
1293
1294 diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
1295 index 73cc26e321de..c187aa3df3c8 100644
1296 --- a/kernel/events/callchain.c
1297 +++ b/kernel/events/callchain.c
1298 @@ -131,14 +131,8 @@ int get_callchain_buffers(int event_max_stack)
1299 goto exit;
1300 }
1301
1302 - if (count > 1) {
1303 - /* If the allocation failed, give up */
1304 - if (!callchain_cpus_entries)
1305 - err = -ENOMEM;
1306 - goto exit;
1307 - }
1308 -
1309 - err = alloc_callchain_buffers();
1310 + if (count == 1)
1311 + err = alloc_callchain_buffers();
1312 exit:
1313 if (err)
1314 atomic_dec(&nr_callchain_events);
1315 diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
1316 index 6c6b3c48db71..1d8ca9ea9979 100644
1317 --- a/kernel/events/ring_buffer.c
1318 +++ b/kernel/events/ring_buffer.c
1319 @@ -14,6 +14,7 @@
1320 #include <linux/slab.h>
1321 #include <linux/circ_buf.h>
1322 #include <linux/poll.h>
1323 +#include <linux/nospec.h>
1324
1325 #include "internal.h"
1326
1327 @@ -867,8 +868,10 @@ perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
1328 return NULL;
1329
1330 /* AUX space */
1331 - if (pgoff >= rb->aux_pgoff)
1332 - return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]);
1333 + if (pgoff >= rb->aux_pgoff) {
1334 + int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
1335 + return virt_to_page(rb->aux_pages[aux_pgoff]);
1336 + }
1337 }
1338
1339 return __perf_mmap_to_page(rb, pgoff);
1340 diff --git a/kernel/sched/autogroup.c b/kernel/sched/autogroup.c
1341 index bb4b9fe026a1..e3d1ba7e3a94 100644
1342 --- a/kernel/sched/autogroup.c
1343 +++ b/kernel/sched/autogroup.c
1344 @@ -4,6 +4,7 @@
1345 #include <linux/utsname.h>
1346 #include <linux/security.h>
1347 #include <linux/export.h>
1348 +#include <linux/nospec.h>
1349
1350 #include "sched.h"
1351
1352 @@ -212,7 +213,7 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
1353 static unsigned long next = INITIAL_JIFFIES;
1354 struct autogroup *ag;
1355 unsigned long shares;
1356 - int err;
1357 + int err, idx;
1358
1359 if (nice < MIN_NICE || nice > MAX_NICE)
1360 return -EINVAL;
1361 @@ -230,7 +231,9 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
1362
1363 next = HZ / 10 + jiffies;
1364 ag = autogroup_task_get(p);
1365 - shares = scale_load(sched_prio_to_weight[nice + 20]);
1366 +
1367 + idx = array_index_nospec(nice + 20, 40);
1368 + shares = scale_load(sched_prio_to_weight[idx]);
1369
1370 down_write(&ag->lock);
1371 err = sched_group_set_shares(ag->tg, shares);
1372 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
1373 index c94895bc5a2c..5f37ef9f6cd5 100644
1374 --- a/kernel/sched/core.c
1375 +++ b/kernel/sched/core.c
1376 @@ -23,6 +23,7 @@
1377 #include <linux/mmu_context.h>
1378 #include <linux/module.h>
1379 #include <linux/nmi.h>
1380 +#include <linux/nospec.h>
1381 #include <linux/prefetch.h>
1382 #include <linux/profile.h>
1383 #include <linux/security.h>
1384 @@ -6873,11 +6874,15 @@ static int cpu_weight_nice_write_s64(struct cgroup_subsys_state *css,
1385 struct cftype *cft, s64 nice)
1386 {
1387 unsigned long weight;
1388 + int idx;
1389
1390 if (nice < MIN_NICE || nice > MAX_NICE)
1391 return -ERANGE;
1392
1393 - weight = sched_prio_to_weight[NICE_TO_PRIO(nice) - MAX_RT_PRIO];
1394 + idx = NICE_TO_PRIO(nice) - MAX_RT_PRIO;
1395 + idx = array_index_nospec(idx, 40);
1396 + weight = sched_prio_to_weight[idx];
1397 +
1398 return sched_group_set_shares(css_tg(css), scale_load(weight));
1399 }
1400 #endif
1401 diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
1402 index 7936f548e071..6a64d45a4c80 100644
1403 --- a/kernel/sched/cpufreq_schedutil.c
1404 +++ b/kernel/sched/cpufreq_schedutil.c
1405 @@ -290,7 +290,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
1406 * Do not reduce the frequency if the CPU has not been idle
1407 * recently, as the reduction is likely to be premature then.
1408 */
1409 - if (busy && next_f < sg_policy->next_freq) {
1410 + if (busy && next_f < sg_policy->next_freq &&
1411 + sg_policy->next_freq != UINT_MAX) {
1412 next_f = sg_policy->next_freq;
1413
1414 /* Reset cached freq as next_freq has changed */
1415 diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
1416 index 01e6b3a38871..142b6c73bba8 100644
1417 --- a/kernel/trace/bpf_trace.c
1418 +++ b/kernel/trace/bpf_trace.c
1419 @@ -876,6 +876,7 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1420 {
1421 struct perf_event_query_bpf __user *uquery = info;
1422 struct perf_event_query_bpf query = {};
1423 + u32 *ids, prog_cnt, ids_len;
1424 int ret;
1425
1426 if (!capable(CAP_SYS_ADMIN))
1427 @@ -884,15 +885,31 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
1428 return -EINVAL;
1429 if (copy_from_user(&query, uquery, sizeof(query)))
1430 return -EFAULT;
1431 - if (query.ids_len > BPF_TRACE_MAX_PROGS)
1432 +
1433 + ids_len = query.ids_len;
1434 + if (ids_len > BPF_TRACE_MAX_PROGS)
1435 return -E2BIG;
1436 + ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN);
1437 + if (!ids)
1438 + return -ENOMEM;
1439 + /*
1440 + * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which
1441 + * is required when user only wants to check for uquery->prog_cnt.
1442 + * There is no need to check for it since the case is handled
1443 + * gracefully in bpf_prog_array_copy_info.
1444 + */
1445
1446 mutex_lock(&bpf_event_mutex);
1447 ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
1448 - uquery->ids,
1449 - query.ids_len,
1450 - &uquery->prog_cnt);
1451 + ids,
1452 + ids_len,
1453 + &prog_cnt);
1454 mutex_unlock(&bpf_event_mutex);
1455
1456 + if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) ||
1457 + copy_to_user(uquery->ids, ids, ids_len * sizeof(u32)))
1458 + ret = -EFAULT;
1459 +
1460 + kfree(ids);
1461 return ret;
1462 }
1463 diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
1464 index a764aec3c9a1..55008fa93097 100644
1465 --- a/kernel/trace/trace_events_filter.c
1466 +++ b/kernel/trace/trace_events_filter.c
1467 @@ -338,6 +338,9 @@ static int regex_match_full(char *str, struct regex *r, int len)
1468
1469 static int regex_match_front(char *str, struct regex *r, int len)
1470 {
1471 + if (len < r->len)
1472 + return 0;
1473 +
1474 if (strncmp(str, r->pattern, r->len) == 0)
1475 return 1;
1476 return 0;
1477 diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
1478 index fff97dc0b70f..67a52bbbe48d 100644
1479 --- a/kernel/trace/trace_uprobe.c
1480 +++ b/kernel/trace/trace_uprobe.c
1481 @@ -152,6 +152,8 @@ static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
1482 return;
1483
1484 ret = strncpy_from_user(dst, src, maxlen);
1485 + if (ret == maxlen)
1486 + dst[--ret] = '\0';
1487
1488 if (ret < 0) { /* Failed to fetch string */
1489 ((u8 *)get_rloc_data(dest))[0] = '\0';
1490 diff --git a/lib/swiotlb.c b/lib/swiotlb.c
1491 index 0331de0e9144..dc81f16b9095 100644
1492 --- a/lib/swiotlb.c
1493 +++ b/lib/swiotlb.c
1494 @@ -727,7 +727,7 @@ swiotlb_alloc_buffer(struct device *dev, size_t size, dma_addr_t *dma_handle,
1495
1496 phys_addr = swiotlb_tbl_map_single(dev,
1497 swiotlb_phys_to_dma(dev, io_tlb_start),
1498 - 0, size, DMA_FROM_DEVICE, 0);
1499 + 0, size, DMA_FROM_DEVICE, attrs);
1500 if (phys_addr == SWIOTLB_MAP_ERROR)
1501 goto out_warn;
1502
1503 diff --git a/mm/backing-dev.c b/mm/backing-dev.c
1504 index b5f940ce0143..be585f545337 100644
1505 --- a/mm/backing-dev.c
1506 +++ b/mm/backing-dev.c
1507 @@ -126,6 +126,7 @@ static int bdi_debug_register(struct backing_dev_info *bdi, const char *name)
1508 bdi, &bdi_debug_stats_fops);
1509 if (!bdi->debug_stats) {
1510 debugfs_remove(bdi->debug_dir);
1511 + bdi->debug_dir = NULL;
1512 return -ENOMEM;
1513 }
1514
1515 @@ -394,7 +395,7 @@ static void wb_shutdown(struct bdi_writeback *wb)
1516 * the barrier provided by test_and_clear_bit() above.
1517 */
1518 smp_wmb();
1519 - clear_bit(WB_shutting_down, &wb->state);
1520 + clear_and_wake_up_bit(WB_shutting_down, &wb->state);
1521 }
1522
1523 static void wb_exit(struct bdi_writeback *wb)
1524 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
1525 index 9ec024b862ac..88719f53ae3b 100644
1526 --- a/mm/memcontrol.c
1527 +++ b/mm/memcontrol.c
1528 @@ -4108,6 +4108,9 @@ static void free_mem_cgroup_per_node_info(struct mem_cgroup *memcg, int node)
1529 {
1530 struct mem_cgroup_per_node *pn = memcg->nodeinfo[node];
1531
1532 + if (!pn)
1533 + return;
1534 +
1535 free_percpu(pn->lruvec_stat_cpu);
1536 kfree(pn);
1537 }
1538 diff --git a/mm/mmap.c b/mm/mmap.c
1539 index 9efdc021ad22..03ca089cce0f 100644
1540 --- a/mm/mmap.c
1541 +++ b/mm/mmap.c
1542 @@ -2997,6 +2997,32 @@ void exit_mmap(struct mm_struct *mm)
1543 /* mm's last user has gone, and its about to be pulled down */
1544 mmu_notifier_release(mm);
1545
1546 + if (unlikely(mm_is_oom_victim(mm))) {
1547 + /*
1548 + * Manually reap the mm to free as much memory as possible.
1549 + * Then, as the oom reaper does, set MMF_OOM_SKIP to disregard
1550 + * this mm from further consideration. Taking mm->mmap_sem for
1551 + * write after setting MMF_OOM_SKIP will guarantee that the oom
1552 + * reaper will not run on this mm again after mmap_sem is
1553 + * dropped.
1554 + *
1555 + * Nothing can be holding mm->mmap_sem here and the above call
1556 + * to mmu_notifier_release(mm) ensures mmu notifier callbacks in
1557 + * __oom_reap_task_mm() will not block.
1558 + *
1559 + * This needs to be done before calling munlock_vma_pages_all(),
1560 + * which clears VM_LOCKED, otherwise the oom reaper cannot
1561 + * reliably test it.
1562 + */
1563 + mutex_lock(&oom_lock);
1564 + __oom_reap_task_mm(mm);
1565 + mutex_unlock(&oom_lock);
1566 +
1567 + set_bit(MMF_OOM_SKIP, &mm->flags);
1568 + down_write(&mm->mmap_sem);
1569 + up_write(&mm->mmap_sem);
1570 + }
1571 +
1572 if (mm->locked_vm) {
1573 vma = mm->mmap;
1574 while (vma) {
1575 @@ -3018,24 +3044,6 @@ void exit_mmap(struct mm_struct *mm)
1576 /* update_hiwater_rss(mm) here? but nobody should be looking */
1577 /* Use -1 here to ensure all VMAs in the mm are unmapped */
1578 unmap_vmas(&tlb, vma, 0, -1);
1579 -
1580 - if (unlikely(mm_is_oom_victim(mm))) {
1581 - /*
1582 - * Wait for oom_reap_task() to stop working on this
1583 - * mm. Because MMF_OOM_SKIP is already set before
1584 - * calling down_read(), oom_reap_task() will not run
1585 - * on this "mm" post up_write().
1586 - *
1587 - * mm_is_oom_victim() cannot be set from under us
1588 - * either because victim->mm is already set to NULL
1589 - * under task_lock before calling mmput and oom_mm is
1590 - * set not NULL by the OOM killer only if victim->mm
1591 - * is found not NULL while holding the task_lock.
1592 - */
1593 - set_bit(MMF_OOM_SKIP, &mm->flags);
1594 - down_write(&mm->mmap_sem);
1595 - up_write(&mm->mmap_sem);
1596 - }
1597 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
1598 tlb_finish_mmu(&tlb, 0, -1);
1599
1600 diff --git a/mm/oom_kill.c b/mm/oom_kill.c
1601 index f2e7dfb81eee..c594679ce201 100644
1602 --- a/mm/oom_kill.c
1603 +++ b/mm/oom_kill.c
1604 @@ -474,7 +474,6 @@ bool process_shares_mm(struct task_struct *p, struct mm_struct *mm)
1605 return false;
1606 }
1607
1608 -
1609 #ifdef CONFIG_MMU
1610 /*
1611 * OOM Reaper kernel thread which tries to reap the memory used by the OOM
1612 @@ -485,16 +484,54 @@ static DECLARE_WAIT_QUEUE_HEAD(oom_reaper_wait);
1613 static struct task_struct *oom_reaper_list;
1614 static DEFINE_SPINLOCK(oom_reaper_lock);
1615
1616 -static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
1617 +void __oom_reap_task_mm(struct mm_struct *mm)
1618 {
1619 - struct mmu_gather tlb;
1620 struct vm_area_struct *vma;
1621 +
1622 + /*
1623 + * Tell all users of get_user/copy_from_user etc... that the content
1624 + * is no longer stable. No barriers really needed because unmapping
1625 + * should imply barriers already and the reader would hit a page fault
1626 + * if it stumbled over a reaped memory.
1627 + */
1628 + set_bit(MMF_UNSTABLE, &mm->flags);
1629 +
1630 + for (vma = mm->mmap ; vma; vma = vma->vm_next) {
1631 + if (!can_madv_dontneed_vma(vma))
1632 + continue;
1633 +
1634 + /*
1635 + * Only anonymous pages have a good chance to be dropped
1636 + * without additional steps which we cannot afford as we
1637 + * are OOM already.
1638 + *
1639 + * We do not even care about fs backed pages because all
1640 + * which are reclaimable have already been reclaimed and
1641 + * we do not want to block exit_mmap by keeping mm ref
1642 + * count elevated without a good reason.
1643 + */
1644 + if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
1645 + const unsigned long start = vma->vm_start;
1646 + const unsigned long end = vma->vm_end;
1647 + struct mmu_gather tlb;
1648 +
1649 + tlb_gather_mmu(&tlb, mm, start, end);
1650 + mmu_notifier_invalidate_range_start(mm, start, end);
1651 + unmap_page_range(&tlb, vma, start, end, NULL);
1652 + mmu_notifier_invalidate_range_end(mm, start, end);
1653 + tlb_finish_mmu(&tlb, start, end);
1654 + }
1655 + }
1656 +}
1657 +
1658 +static bool oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
1659 +{
1660 bool ret = true;
1661
1662 /*
1663 * We have to make sure to not race with the victim exit path
1664 * and cause premature new oom victim selection:
1665 - * __oom_reap_task_mm exit_mm
1666 + * oom_reap_task_mm exit_mm
1667 * mmget_not_zero
1668 * mmput
1669 * atomic_dec_and_test
1670 @@ -539,39 +576,8 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
1671
1672 trace_start_task_reaping(tsk->pid);
1673
1674 - /*
1675 - * Tell all users of get_user/copy_from_user etc... that the content
1676 - * is no longer stable. No barriers really needed because unmapping
1677 - * should imply barriers already and the reader would hit a page fault
1678 - * if it stumbled over a reaped memory.
1679 - */
1680 - set_bit(MMF_UNSTABLE, &mm->flags);
1681 -
1682 - for (vma = mm->mmap ; vma; vma = vma->vm_next) {
1683 - if (!can_madv_dontneed_vma(vma))
1684 - continue;
1685 + __oom_reap_task_mm(mm);
1686
1687 - /*
1688 - * Only anonymous pages have a good chance to be dropped
1689 - * without additional steps which we cannot afford as we
1690 - * are OOM already.
1691 - *
1692 - * We do not even care about fs backed pages because all
1693 - * which are reclaimable have already been reclaimed and
1694 - * we do not want to block exit_mmap by keeping mm ref
1695 - * count elevated without a good reason.
1696 - */
1697 - if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
1698 - const unsigned long start = vma->vm_start;
1699 - const unsigned long end = vma->vm_end;
1700 -
1701 - tlb_gather_mmu(&tlb, mm, start, end);
1702 - mmu_notifier_invalidate_range_start(mm, start, end);
1703 - unmap_page_range(&tlb, vma, start, end, NULL);
1704 - mmu_notifier_invalidate_range_end(mm, start, end);
1705 - tlb_finish_mmu(&tlb, start, end);
1706 - }
1707 - }
1708 pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
1709 task_pid_nr(tsk), tsk->comm,
1710 K(get_mm_counter(mm, MM_ANONPAGES)),
1711 @@ -592,13 +598,12 @@ static void oom_reap_task(struct task_struct *tsk)
1712 struct mm_struct *mm = tsk->signal->oom_mm;
1713
1714 /* Retry the down_read_trylock(mmap_sem) a few times */
1715 - while (attempts++ < MAX_OOM_REAP_RETRIES && !__oom_reap_task_mm(tsk, mm))
1716 + while (attempts++ < MAX_OOM_REAP_RETRIES && !oom_reap_task_mm(tsk, mm))
1717 schedule_timeout_idle(HZ/10);
1718
1719 if (attempts <= MAX_OOM_REAP_RETRIES)
1720 goto done;
1721
1722 -
1723 pr_info("oom_reaper: unable to reap pid:%d (%s)\n",
1724 task_pid_nr(tsk), tsk->comm);
1725 debug_show_all_locks();
1726 diff --git a/mm/sparse.c b/mm/sparse.c
1727 index 7af5e7a92528..6336444fe589 100644
1728 --- a/mm/sparse.c
1729 +++ b/mm/sparse.c
1730 @@ -666,7 +666,7 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
1731 unsigned long pfn;
1732
1733 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
1734 - unsigned long section_nr = pfn_to_section_nr(start_pfn);
1735 + unsigned long section_nr = pfn_to_section_nr(pfn);
1736 struct mem_section *ms;
1737
1738 /*
1739 diff --git a/mm/z3fold.c b/mm/z3fold.c
1740 index d589d318727f..36d31d3593e1 100644
1741 --- a/mm/z3fold.c
1742 +++ b/mm/z3fold.c
1743 @@ -144,7 +144,8 @@ enum z3fold_page_flags {
1744 PAGE_HEADLESS = 0,
1745 MIDDLE_CHUNK_MAPPED,
1746 NEEDS_COMPACTING,
1747 - PAGE_STALE
1748 + PAGE_STALE,
1749 + UNDER_RECLAIM
1750 };
1751
1752 /*****************
1753 @@ -173,6 +174,7 @@ static struct z3fold_header *init_z3fold_page(struct page *page,
1754 clear_bit(MIDDLE_CHUNK_MAPPED, &page->private);
1755 clear_bit(NEEDS_COMPACTING, &page->private);
1756 clear_bit(PAGE_STALE, &page->private);
1757 + clear_bit(UNDER_RECLAIM, &page->private);
1758
1759 spin_lock_init(&zhdr->page_lock);
1760 kref_init(&zhdr->refcount);
1761 @@ -748,6 +750,10 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle)
1762 atomic64_dec(&pool->pages_nr);
1763 return;
1764 }
1765 + if (test_bit(UNDER_RECLAIM, &page->private)) {
1766 + z3fold_page_unlock(zhdr);
1767 + return;
1768 + }
1769 if (test_and_set_bit(NEEDS_COMPACTING, &page->private)) {
1770 z3fold_page_unlock(zhdr);
1771 return;
1772 @@ -832,6 +838,8 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1773 kref_get(&zhdr->refcount);
1774 list_del_init(&zhdr->buddy);
1775 zhdr->cpu = -1;
1776 + set_bit(UNDER_RECLAIM, &page->private);
1777 + break;
1778 }
1779
1780 list_del_init(&page->lru);
1781 @@ -879,25 +887,35 @@ static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries)
1782 goto next;
1783 }
1784 next:
1785 - spin_lock(&pool->lock);
1786 if (test_bit(PAGE_HEADLESS, &page->private)) {
1787 if (ret == 0) {
1788 - spin_unlock(&pool->lock);
1789 free_z3fold_page(page);
1790 return 0;
1791 }
1792 - } else if (kref_put(&zhdr->refcount, release_z3fold_page)) {
1793 - atomic64_dec(&pool->pages_nr);
1794 + spin_lock(&pool->lock);
1795 + list_add(&page->lru, &pool->lru);
1796 + spin_unlock(&pool->lock);
1797 + } else {
1798 + z3fold_page_lock(zhdr);
1799 + clear_bit(UNDER_RECLAIM, &page->private);
1800 + if (kref_put(&zhdr->refcount,
1801 + release_z3fold_page_locked)) {
1802 + atomic64_dec(&pool->pages_nr);
1803 + return 0;
1804 + }
1805 + /*
1806 + * if we are here, the page is still not completely
1807 + * free. Take the global pool lock then to be able
1808 + * to add it back to the lru list
1809 + */
1810 + spin_lock(&pool->lock);
1811 + list_add(&page->lru, &pool->lru);
1812 spin_unlock(&pool->lock);
1813 - return 0;
1814 + z3fold_page_unlock(zhdr);
1815 }
1816
1817 - /*
1818 - * Add to the beginning of LRU.
1819 - * Pool lock has to be kept here to ensure the page has
1820 - * not already been released
1821 - */
1822 - list_add(&page->lru, &pool->lru);
1823 + /* We started off locked to we need to lock the pool back */
1824 + spin_lock(&pool->lock);
1825 }
1826 spin_unlock(&pool->lock);
1827 return -EAGAIN;
1828 diff --git a/net/atm/lec.c b/net/atm/lec.c
1829 index 09a1f056712a..7579e85af531 100644
1830 --- a/net/atm/lec.c
1831 +++ b/net/atm/lec.c
1832 @@ -41,6 +41,9 @@ static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 };
1833 #include <linux/module.h>
1834 #include <linux/init.h>
1835
1836 +/* Hardening for Spectre-v1 */
1837 +#include <linux/nospec.h>
1838 +
1839 #include "lec.h"
1840 #include "lec_arpc.h"
1841 #include "resources.h"
1842 @@ -687,8 +690,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
1843 bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc));
1844 if (bytes_left != 0)
1845 pr_info("copy from user failed for %d bytes\n", bytes_left);
1846 - if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF ||
1847 - !dev_lec[ioc_data.dev_num])
1848 + if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF)
1849 + return -EINVAL;
1850 + ioc_data.dev_num = array_index_nospec(ioc_data.dev_num, MAX_LEC_ITF);
1851 + if (!dev_lec[ioc_data.dev_num])
1852 return -EINVAL;
1853 vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
1854 if (!vpriv)
1855 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
1856 index 752112539753..a685cb02438d 100644
1857 --- a/net/bridge/netfilter/ebtables.c
1858 +++ b/net/bridge/netfilter/ebtables.c
1859 @@ -1821,13 +1821,14 @@ static int compat_table_info(const struct ebt_table_info *info,
1860 {
1861 unsigned int size = info->entries_size;
1862 const void *entries = info->entries;
1863 - int ret;
1864
1865 newinfo->entries_size = size;
1866 -
1867 - ret = xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
1868 - if (ret)
1869 - return ret;
1870 + if (info->nentries) {
1871 + int ret = xt_compat_init_offsets(NFPROTO_BRIDGE,
1872 + info->nentries);
1873 + if (ret)
1874 + return ret;
1875 + }
1876
1877 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1878 entries, newinfo);
1879 diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
1880 index c0548d268e1a..e3e6a3e2ca22 100644
1881 --- a/net/core/dev_addr_lists.c
1882 +++ b/net/core/dev_addr_lists.c
1883 @@ -57,8 +57,8 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
1884 return -EINVAL;
1885
1886 list_for_each_entry(ha, &list->list, list) {
1887 - if (!memcmp(ha->addr, addr, addr_len) &&
1888 - ha->type == addr_type) {
1889 + if (ha->type == addr_type &&
1890 + !memcmp(ha->addr, addr, addr_len)) {
1891 if (global) {
1892 /* check if addr is already used as global */
1893 if (ha->global_use)
1894 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1895 index 857e4e6f751a..789f8edd37ae 100644
1896 --- a/net/core/skbuff.c
1897 +++ b/net/core/skbuff.c
1898 @@ -857,6 +857,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
1899 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
1900 n->cloned = 1;
1901 n->nohdr = 0;
1902 + n->peeked = 0;
1903 n->destructor = NULL;
1904 C(tail);
1905 C(end);
1906 diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
1907 index e65fcb45c3f6..b08feb219b44 100644
1908 --- a/net/dccp/ipv4.c
1909 +++ b/net/dccp/ipv4.c
1910 @@ -614,6 +614,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1911 ireq = inet_rsk(req);
1912 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1913 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1914 + ireq->ir_mark = inet_request_mark(sk, skb);
1915 ireq->ireq_family = AF_INET;
1916 ireq->ir_iif = sk->sk_bound_dev_if;
1917
1918 diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
1919 index 5df7857fc0f3..6344f1b18a6a 100644
1920 --- a/net/dccp/ipv6.c
1921 +++ b/net/dccp/ipv6.c
1922 @@ -351,6 +351,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1923 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
1924 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
1925 ireq->ireq_family = AF_INET6;
1926 + ireq->ir_mark = inet_request_mark(sk, skb);
1927
1928 if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
1929 np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
1930 diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
1931 index c3ea4906d237..88c5069b5d20 100644
1932 --- a/net/ipv4/inet_timewait_sock.c
1933 +++ b/net/ipv4/inet_timewait_sock.c
1934 @@ -178,6 +178,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
1935 tw->tw_dport = inet->inet_dport;
1936 tw->tw_family = sk->sk_family;
1937 tw->tw_reuse = sk->sk_reuse;
1938 + tw->tw_reuseport = sk->sk_reuseport;
1939 tw->tw_hash = sk->sk_hash;
1940 tw->tw_ipv6only = 0;
1941 tw->tw_transparent = inet->transparent;
1942 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
1943 index 914d56928578..1ef8f86072bd 100644
1944 --- a/net/ipv4/inetpeer.c
1945 +++ b/net/ipv4/inetpeer.c
1946 @@ -210,6 +210,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
1947 p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
1948 if (p) {
1949 p->daddr = *daddr;
1950 + p->dtime = (__u32)jiffies;
1951 refcount_set(&p->refcnt, 2);
1952 atomic_set(&p->rid, 0);
1953 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
1954 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
1955 index 299e247b2032..9d9b8358a898 100644
1956 --- a/net/ipv4/route.c
1957 +++ b/net/ipv4/route.c
1958 @@ -2306,13 +2306,14 @@ struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
1959 const struct sk_buff *skb)
1960 {
1961 __u8 tos = RT_FL_TOS(fl4);
1962 - struct fib_result res;
1963 + struct fib_result res = {
1964 + .type = RTN_UNSPEC,
1965 + .fi = NULL,
1966 + .table = NULL,
1967 + .tclassid = 0,
1968 + };
1969 struct rtable *rth;
1970
1971 - res.tclassid = 0;
1972 - res.fi = NULL;
1973 - res.table = NULL;
1974 -
1975 fl4->flowi4_iif = LOOPBACK_IFINDEX;
1976 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
1977 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
1978 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
1979 index 1ab8733dac5f..c92fd253fc46 100644
1980 --- a/net/ipv4/tcp.c
1981 +++ b/net/ipv4/tcp.c
1982 @@ -2690,7 +2690,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
1983 case TCP_REPAIR_QUEUE:
1984 if (!tp->repair)
1985 err = -EPERM;
1986 - else if (val < TCP_QUEUES_NR)
1987 + else if ((unsigned int)val < TCP_QUEUES_NR)
1988 tp->repair_queue = val;
1989 else
1990 err = -EINVAL;
1991 diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
1992 index 34355fd19f27..dc76bc346829 100644
1993 --- a/net/kcm/kcmsock.c
1994 +++ b/net/kcm/kcmsock.c
1995 @@ -1425,6 +1425,7 @@ static int kcm_attach(struct socket *sock, struct socket *csock,
1996 */
1997 if (csk->sk_user_data) {
1998 write_unlock_bh(&csk->sk_callback_lock);
1999 + strp_stop(&psock->strp);
2000 strp_done(&psock->strp);
2001 kmem_cache_free(kcm_psockp, psock);
2002 err = -EALREADY;
2003 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
2004 index 5ebde4b15810..f36098887ad0 100644
2005 --- a/net/netfilter/ipvs/ip_vs_ctl.c
2006 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
2007 @@ -2384,11 +2384,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2008 strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
2009 sizeof(cfg.mcast_ifn));
2010 cfg.syncid = dm->syncid;
2011 - rtnl_lock();
2012 - mutex_lock(&ipvs->sync_mutex);
2013 ret = start_sync_thread(ipvs, &cfg, dm->state);
2014 - mutex_unlock(&ipvs->sync_mutex);
2015 - rtnl_unlock();
2016 } else {
2017 mutex_lock(&ipvs->sync_mutex);
2018 ret = stop_sync_thread(ipvs, dm->state);
2019 @@ -3481,12 +3477,8 @@ static int ip_vs_genl_new_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
2020 if (ipvs->mixed_address_family_dests > 0)
2021 return -EINVAL;
2022
2023 - rtnl_lock();
2024 - mutex_lock(&ipvs->sync_mutex);
2025 ret = start_sync_thread(ipvs, &c,
2026 nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
2027 - mutex_unlock(&ipvs->sync_mutex);
2028 - rtnl_unlock();
2029 return ret;
2030 }
2031
2032 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
2033 index fbaf3bd05b2e..001501e25625 100644
2034 --- a/net/netfilter/ipvs/ip_vs_sync.c
2035 +++ b/net/netfilter/ipvs/ip_vs_sync.c
2036 @@ -49,6 +49,7 @@
2037 #include <linux/kthread.h>
2038 #include <linux/wait.h>
2039 #include <linux/kernel.h>
2040 +#include <linux/sched/signal.h>
2041
2042 #include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */
2043
2044 @@ -1360,15 +1361,9 @@ static void set_mcast_pmtudisc(struct sock *sk, int val)
2045 /*
2046 * Specifiy default interface for outgoing multicasts
2047 */
2048 -static int set_mcast_if(struct sock *sk, char *ifname)
2049 +static int set_mcast_if(struct sock *sk, struct net_device *dev)
2050 {
2051 - struct net_device *dev;
2052 struct inet_sock *inet = inet_sk(sk);
2053 - struct net *net = sock_net(sk);
2054 -
2055 - dev = __dev_get_by_name(net, ifname);
2056 - if (!dev)
2057 - return -ENODEV;
2058
2059 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
2060 return -EINVAL;
2061 @@ -1396,19 +1391,14 @@ static int set_mcast_if(struct sock *sk, char *ifname)
2062 * in the in_addr structure passed in as a parameter.
2063 */
2064 static int
2065 -join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
2066 +join_mcast_group(struct sock *sk, struct in_addr *addr, struct net_device *dev)
2067 {
2068 - struct net *net = sock_net(sk);
2069 struct ip_mreqn mreq;
2070 - struct net_device *dev;
2071 int ret;
2072
2073 memset(&mreq, 0, sizeof(mreq));
2074 memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
2075
2076 - dev = __dev_get_by_name(net, ifname);
2077 - if (!dev)
2078 - return -ENODEV;
2079 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
2080 return -EINVAL;
2081
2082 @@ -1423,15 +1413,10 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
2083
2084 #ifdef CONFIG_IP_VS_IPV6
2085 static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
2086 - char *ifname)
2087 + struct net_device *dev)
2088 {
2089 - struct net *net = sock_net(sk);
2090 - struct net_device *dev;
2091 int ret;
2092
2093 - dev = __dev_get_by_name(net, ifname);
2094 - if (!dev)
2095 - return -ENODEV;
2096 if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
2097 return -EINVAL;
2098
2099 @@ -1443,24 +1428,18 @@ static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
2100 }
2101 #endif
2102
2103 -static int bind_mcastif_addr(struct socket *sock, char *ifname)
2104 +static int bind_mcastif_addr(struct socket *sock, struct net_device *dev)
2105 {
2106 - struct net *net = sock_net(sock->sk);
2107 - struct net_device *dev;
2108 __be32 addr;
2109 struct sockaddr_in sin;
2110
2111 - dev = __dev_get_by_name(net, ifname);
2112 - if (!dev)
2113 - return -ENODEV;
2114 -
2115 addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
2116 if (!addr)
2117 pr_err("You probably need to specify IP address on "
2118 "multicast interface.\n");
2119
2120 IP_VS_DBG(7, "binding socket with (%s) %pI4\n",
2121 - ifname, &addr);
2122 + dev->name, &addr);
2123
2124 /* Now bind the socket with the address of multicast interface */
2125 sin.sin_family = AF_INET;
2126 @@ -1493,7 +1472,8 @@ static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen,
2127 /*
2128 * Set up sending multicast socket over UDP
2129 */
2130 -static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
2131 +static int make_send_sock(struct netns_ipvs *ipvs, int id,
2132 + struct net_device *dev, struct socket **sock_ret)
2133 {
2134 /* multicast addr */
2135 union ipvs_sockaddr mcast_addr;
2136 @@ -1505,9 +1485,10 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
2137 IPPROTO_UDP, &sock);
2138 if (result < 0) {
2139 pr_err("Error during creation of socket; terminating\n");
2140 - return ERR_PTR(result);
2141 + goto error;
2142 }
2143 - result = set_mcast_if(sock->sk, ipvs->mcfg.mcast_ifn);
2144 + *sock_ret = sock;
2145 + result = set_mcast_if(sock->sk, dev);
2146 if (result < 0) {
2147 pr_err("Error setting outbound mcast interface\n");
2148 goto error;
2149 @@ -1522,7 +1503,7 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
2150 set_sock_size(sock->sk, 1, result);
2151
2152 if (AF_INET == ipvs->mcfg.mcast_af)
2153 - result = bind_mcastif_addr(sock, ipvs->mcfg.mcast_ifn);
2154 + result = bind_mcastif_addr(sock, dev);
2155 else
2156 result = 0;
2157 if (result < 0) {
2158 @@ -1538,19 +1519,18 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
2159 goto error;
2160 }
2161
2162 - return sock;
2163 + return 0;
2164
2165 error:
2166 - sock_release(sock);
2167 - return ERR_PTR(result);
2168 + return result;
2169 }
2170
2171
2172 /*
2173 * Set up receiving multicast socket over UDP
2174 */
2175 -static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
2176 - int ifindex)
2177 +static int make_receive_sock(struct netns_ipvs *ipvs, int id,
2178 + struct net_device *dev, struct socket **sock_ret)
2179 {
2180 /* multicast addr */
2181 union ipvs_sockaddr mcast_addr;
2182 @@ -1562,8 +1542,9 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
2183 IPPROTO_UDP, &sock);
2184 if (result < 0) {
2185 pr_err("Error during creation of socket; terminating\n");
2186 - return ERR_PTR(result);
2187 + goto error;
2188 }
2189 + *sock_ret = sock;
2190 /* it is equivalent to the REUSEADDR option in user-space */
2191 sock->sk->sk_reuse = SK_CAN_REUSE;
2192 result = sysctl_sync_sock_size(ipvs);
2193 @@ -1571,7 +1552,7 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
2194 set_sock_size(sock->sk, 0, result);
2195
2196 get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
2197 - sock->sk->sk_bound_dev_if = ifindex;
2198 + sock->sk->sk_bound_dev_if = dev->ifindex;
2199 result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
2200 if (result < 0) {
2201 pr_err("Error binding to the multicast addr\n");
2202 @@ -1582,21 +1563,20 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
2203 #ifdef CONFIG_IP_VS_IPV6
2204 if (ipvs->bcfg.mcast_af == AF_INET6)
2205 result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr,
2206 - ipvs->bcfg.mcast_ifn);
2207 + dev);
2208 else
2209 #endif
2210 result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr,
2211 - ipvs->bcfg.mcast_ifn);
2212 + dev);
2213 if (result < 0) {
2214 pr_err("Error joining to the multicast group\n");
2215 goto error;
2216 }
2217
2218 - return sock;
2219 + return 0;
2220
2221 error:
2222 - sock_release(sock);
2223 - return ERR_PTR(result);
2224 + return result;
2225 }
2226
2227
2228 @@ -1778,13 +1758,12 @@ static int sync_thread_backup(void *data)
2229 int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
2230 int state)
2231 {
2232 - struct ip_vs_sync_thread_data *tinfo;
2233 + struct ip_vs_sync_thread_data *tinfo = NULL;
2234 struct task_struct **array = NULL, *task;
2235 - struct socket *sock;
2236 struct net_device *dev;
2237 char *name;
2238 int (*threadfn)(void *data);
2239 - int id, count, hlen;
2240 + int id = 0, count, hlen;
2241 int result = -ENOMEM;
2242 u16 mtu, min_mtu;
2243
2244 @@ -1792,6 +1771,18 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
2245 IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n",
2246 sizeof(struct ip_vs_sync_conn_v0));
2247
2248 + /* Do not hold one mutex and then to block on another */
2249 + for (;;) {
2250 + rtnl_lock();
2251 + if (mutex_trylock(&ipvs->sync_mutex))
2252 + break;
2253 + rtnl_unlock();
2254 + mutex_lock(&ipvs->sync_mutex);
2255 + if (rtnl_trylock())
2256 + break;
2257 + mutex_unlock(&ipvs->sync_mutex);
2258 + }
2259 +
2260 if (!ipvs->sync_state) {
2261 count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX);
2262 ipvs->threads_mask = count - 1;
2263 @@ -1810,7 +1801,8 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
2264 dev = __dev_get_by_name(ipvs->net, c->mcast_ifn);
2265 if (!dev) {
2266 pr_err("Unknown mcast interface: %s\n", c->mcast_ifn);
2267 - return -ENODEV;
2268 + result = -ENODEV;
2269 + goto out_early;
2270 }
2271 hlen = (AF_INET6 == c->mcast_af) ?
2272 sizeof(struct ipv6hdr) + sizeof(struct udphdr) :
2273 @@ -1827,26 +1819,30 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
2274 c->sync_maxlen = mtu - hlen;
2275
2276 if (state == IP_VS_STATE_MASTER) {
2277 + result = -EEXIST;
2278 if (ipvs->ms)
2279 - return -EEXIST;
2280 + goto out_early;
2281
2282 ipvs->mcfg = *c;
2283 name = "ipvs-m:%d:%d";
2284 threadfn = sync_thread_master;
2285 } else if (state == IP_VS_STATE_BACKUP) {
2286 + result = -EEXIST;
2287 if (ipvs->backup_threads)
2288 - return -EEXIST;
2289 + goto out_early;
2290
2291 ipvs->bcfg = *c;
2292 name = "ipvs-b:%d:%d";
2293 threadfn = sync_thread_backup;
2294 } else {
2295 - return -EINVAL;
2296 + result = -EINVAL;
2297 + goto out_early;
2298 }
2299
2300 if (state == IP_VS_STATE_MASTER) {
2301 struct ipvs_master_sync_state *ms;
2302
2303 + result = -ENOMEM;
2304 ipvs->ms = kcalloc(count, sizeof(ipvs->ms[0]), GFP_KERNEL);
2305 if (!ipvs->ms)
2306 goto out;
2307 @@ -1862,39 +1858,38 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
2308 } else {
2309 array = kcalloc(count, sizeof(struct task_struct *),
2310 GFP_KERNEL);
2311 + result = -ENOMEM;
2312 if (!array)
2313 goto out;
2314 }
2315
2316 - tinfo = NULL;
2317 for (id = 0; id < count; id++) {
2318 - if (state == IP_VS_STATE_MASTER)
2319 - sock = make_send_sock(ipvs, id);
2320 - else
2321 - sock = make_receive_sock(ipvs, id, dev->ifindex);
2322 - if (IS_ERR(sock)) {
2323 - result = PTR_ERR(sock);
2324 - goto outtinfo;
2325 - }
2326 + result = -ENOMEM;
2327 tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
2328 if (!tinfo)
2329 - goto outsocket;
2330 + goto out;
2331 tinfo->ipvs = ipvs;
2332 - tinfo->sock = sock;
2333 + tinfo->sock = NULL;
2334 if (state == IP_VS_STATE_BACKUP) {
2335 tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
2336 GFP_KERNEL);
2337 if (!tinfo->buf)
2338 - goto outtinfo;
2339 + goto out;
2340 } else {
2341 tinfo->buf = NULL;
2342 }
2343 tinfo->id = id;
2344 + if (state == IP_VS_STATE_MASTER)
2345 + result = make_send_sock(ipvs, id, dev, &tinfo->sock);
2346 + else
2347 + result = make_receive_sock(ipvs, id, dev, &tinfo->sock);
2348 + if (result < 0)
2349 + goto out;
2350
2351 task = kthread_run(threadfn, tinfo, name, ipvs->gen, id);
2352 if (IS_ERR(task)) {
2353 result = PTR_ERR(task);
2354 - goto outtinfo;
2355 + goto out;
2356 }
2357 tinfo = NULL;
2358 if (state == IP_VS_STATE_MASTER)
2359 @@ -1911,20 +1906,20 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
2360 ipvs->sync_state |= state;
2361 spin_unlock_bh(&ipvs->sync_buff_lock);
2362
2363 + mutex_unlock(&ipvs->sync_mutex);
2364 + rtnl_unlock();
2365 +
2366 /* increase the module use count */
2367 ip_vs_use_count_inc();
2368
2369 return 0;
2370
2371 -outsocket:
2372 - sock_release(sock);
2373 -
2374 -outtinfo:
2375 - if (tinfo) {
2376 - sock_release(tinfo->sock);
2377 - kfree(tinfo->buf);
2378 - kfree(tinfo);
2379 - }
2380 +out:
2381 + /* We do not need RTNL lock anymore, release it here so that
2382 + * sock_release below and in the kthreads can use rtnl_lock
2383 + * to leave the mcast group.
2384 + */
2385 + rtnl_unlock();
2386 count = id;
2387 while (count-- > 0) {
2388 if (state == IP_VS_STATE_MASTER)
2389 @@ -1932,13 +1927,23 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
2390 else
2391 kthread_stop(array[count]);
2392 }
2393 - kfree(array);
2394 -
2395 -out:
2396 if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
2397 kfree(ipvs->ms);
2398 ipvs->ms = NULL;
2399 }
2400 + mutex_unlock(&ipvs->sync_mutex);
2401 + if (tinfo) {
2402 + if (tinfo->sock)
2403 + sock_release(tinfo->sock);
2404 + kfree(tinfo->buf);
2405 + kfree(tinfo);
2406 + }
2407 + kfree(array);
2408 + return result;
2409 +
2410 +out_early:
2411 + mutex_unlock(&ipvs->sync_mutex);
2412 + rtnl_unlock();
2413 return result;
2414 }
2415
2416 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
2417 index 70c455341243..02506752051d 100644
2418 --- a/net/netlink/af_netlink.c
2419 +++ b/net/netlink/af_netlink.c
2420 @@ -1845,6 +1845,8 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2421
2422 if (msg->msg_namelen) {
2423 err = -EINVAL;
2424 + if (msg->msg_namelen < sizeof(struct sockaddr_nl))
2425 + goto out;
2426 if (addr->nl_family != AF_NETLINK)
2427 goto out;
2428 dst_portid = addr->nl_pid;
2429 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
2430 index 44c4652721af..ae18892a7010 100644
2431 --- a/net/rds/tcp.c
2432 +++ b/net/rds/tcp.c
2433 @@ -275,13 +275,14 @@ static int rds_tcp_laddr_check(struct net *net, __be32 addr)
2434 static void rds_tcp_conn_free(void *arg)
2435 {
2436 struct rds_tcp_connection *tc = arg;
2437 + unsigned long flags;
2438
2439 rdsdebug("freeing tc %p\n", tc);
2440
2441 - spin_lock_bh(&rds_tcp_conn_lock);
2442 + spin_lock_irqsave(&rds_tcp_conn_lock, flags);
2443 if (!tc->t_tcp_node_detached)
2444 list_del(&tc->t_tcp_node);
2445 - spin_unlock_bh(&rds_tcp_conn_lock);
2446 + spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
2447
2448 kmem_cache_free(rds_tcp_conn_slab, tc);
2449 }
2450 @@ -311,13 +312,13 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
2451 rdsdebug("rds_conn_path [%d] tc %p\n", i,
2452 conn->c_path[i].cp_transport_data);
2453 }
2454 - spin_lock_bh(&rds_tcp_conn_lock);
2455 + spin_lock_irq(&rds_tcp_conn_lock);
2456 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
2457 tc = conn->c_path[i].cp_transport_data;
2458 tc->t_tcp_node_detached = false;
2459 list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
2460 }
2461 - spin_unlock_bh(&rds_tcp_conn_lock);
2462 + spin_unlock_irq(&rds_tcp_conn_lock);
2463 fail:
2464 if (ret) {
2465 for (j = 0; j < i; j++)
2466 @@ -529,7 +530,7 @@ static void rds_tcp_kill_sock(struct net *net)
2467
2468 rtn->rds_tcp_listen_sock = NULL;
2469 rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
2470 - spin_lock_bh(&rds_tcp_conn_lock);
2471 + spin_lock_irq(&rds_tcp_conn_lock);
2472 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
2473 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
2474
2475 @@ -542,7 +543,7 @@ static void rds_tcp_kill_sock(struct net *net)
2476 tc->t_tcp_node_detached = true;
2477 }
2478 }
2479 - spin_unlock_bh(&rds_tcp_conn_lock);
2480 + spin_unlock_irq(&rds_tcp_conn_lock);
2481 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
2482 rds_conn_destroy(tc->t_cpath->cp_conn);
2483 }
2484 @@ -590,7 +591,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
2485 {
2486 struct rds_tcp_connection *tc, *_tc;
2487
2488 - spin_lock_bh(&rds_tcp_conn_lock);
2489 + spin_lock_irq(&rds_tcp_conn_lock);
2490 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
2491 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
2492
2493 @@ -600,7 +601,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
2494 /* reconnect with new parameters */
2495 rds_conn_path_drop(tc->t_cpath, false);
2496 }
2497 - spin_unlock_bh(&rds_tcp_conn_lock);
2498 + spin_unlock_irq(&rds_tcp_conn_lock);
2499 }
2500
2501 static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
2502 diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
2503 index 41bd496531d4..00192a996be0 100644
2504 --- a/net/rfkill/rfkill-gpio.c
2505 +++ b/net/rfkill/rfkill-gpio.c
2506 @@ -137,13 +137,18 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
2507
2508 ret = rfkill_register(rfkill->rfkill_dev);
2509 if (ret < 0)
2510 - return ret;
2511 + goto err_destroy;
2512
2513 platform_set_drvdata(pdev, rfkill);
2514
2515 dev_info(&pdev->dev, "%s device registered.\n", rfkill->name);
2516
2517 return 0;
2518 +
2519 +err_destroy:
2520 + rfkill_destroy(rfkill->rfkill_dev);
2521 +
2522 + return ret;
2523 }
2524
2525 static int rfkill_gpio_remove(struct platform_device *pdev)