Contents of /trunk/kernel-magellan/patches-5.3/0100-5.3.1-all-fixes.patch
Parent Directory | Revision Log
Revision 3447 -
(show annotations)
(download)
Fri Oct 4 07:57:04 2019 UTC (4 years, 11 months ago) by niro
File size: 34870 byte(s)
Fri Oct 4 07:57:04 2019 UTC (4 years, 11 months ago) by niro
File size: 34870 byte(s)
-linux-5.3.1
1 | diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt |
2 | index 1da2f1668f08..845d689e0fd7 100644 |
3 | --- a/Documentation/filesystems/overlayfs.txt |
4 | +++ b/Documentation/filesystems/overlayfs.txt |
5 | @@ -302,7 +302,7 @@ beneath or above the path of another overlay lower layer path. |
6 | |
7 | Using an upper layer path and/or a workdir path that are already used by |
8 | another overlay mount is not allowed and may fail with EBUSY. Using |
9 | -partially overlapping paths is not allowed but will not fail with EBUSY. |
10 | +partially overlapping paths is not allowed and may fail with EBUSY. |
11 | If files are accessed from two overlayfs mounts which share or overlap the |
12 | upper layer and/or workdir path the behavior of the overlay is undefined, |
13 | though it will not result in a crash or deadlock. |
14 | diff --git a/Documentation/sphinx/automarkup.py b/Documentation/sphinx/automarkup.py |
15 | index 77e89c1956d7..a8798369e8f7 100644 |
16 | --- a/Documentation/sphinx/automarkup.py |
17 | +++ b/Documentation/sphinx/automarkup.py |
18 | @@ -25,7 +25,7 @@ RE_function = re.compile(r'([\w_][\w\d_]+\(\))') |
19 | # to the creation of incorrect and confusing cross references. So |
20 | # just don't even try with these names. |
21 | # |
22 | -Skipfuncs = [ 'open', 'close', 'read', 'write', 'fcntl', 'mmap' |
23 | +Skipfuncs = [ 'open', 'close', 'read', 'write', 'fcntl', 'mmap', |
24 | 'select', 'poll', 'fork', 'execve', 'clone', 'ioctl'] |
25 | |
26 | # |
27 | diff --git a/Makefile b/Makefile |
28 | index 6886f22902c9..f32e8d2e09c3 100644 |
29 | --- a/Makefile |
30 | +++ b/Makefile |
31 | @@ -1,7 +1,7 @@ |
32 | # SPDX-License-Identifier: GPL-2.0 |
33 | VERSION = 5 |
34 | PATCHLEVEL = 3 |
35 | -SUBLEVEL = 0 |
36 | +SUBLEVEL = 1 |
37 | EXTRAVERSION = |
38 | NAME = Bobtail Squid |
39 | |
40 | diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h |
41 | index e09760ece844..8eb5c0fbdee6 100644 |
42 | --- a/arch/arm64/include/asm/pgtable.h |
43 | +++ b/arch/arm64/include/asm/pgtable.h |
44 | @@ -220,8 +220,10 @@ static inline void set_pte(pte_t *ptep, pte_t pte) |
45 | * Only if the new pte is valid and kernel, otherwise TLB maintenance |
46 | * or update_mmu_cache() have the necessary barriers. |
47 | */ |
48 | - if (pte_valid_not_user(pte)) |
49 | + if (pte_valid_not_user(pte)) { |
50 | dsb(ishst); |
51 | + isb(); |
52 | + } |
53 | } |
54 | |
55 | extern void __sync_icache_dcache(pte_t pteval); |
56 | @@ -484,8 +486,10 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) |
57 | |
58 | WRITE_ONCE(*pmdp, pmd); |
59 | |
60 | - if (pmd_valid(pmd)) |
61 | + if (pmd_valid(pmd)) { |
62 | dsb(ishst); |
63 | + isb(); |
64 | + } |
65 | } |
66 | |
67 | static inline void pmd_clear(pmd_t *pmdp) |
68 | @@ -543,8 +547,10 @@ static inline void set_pud(pud_t *pudp, pud_t pud) |
69 | |
70 | WRITE_ONCE(*pudp, pud); |
71 | |
72 | - if (pud_valid(pud)) |
73 | + if (pud_valid(pud)) { |
74 | dsb(ishst); |
75 | + isb(); |
76 | + } |
77 | } |
78 | |
79 | static inline void pud_clear(pud_t *pudp) |
80 | diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c |
81 | index 0469aceaa230..485865fd0412 100644 |
82 | --- a/drivers/block/floppy.c |
83 | +++ b/drivers/block/floppy.c |
84 | @@ -3780,7 +3780,7 @@ static int compat_getdrvprm(int drive, |
85 | v.native_format = UDP->native_format; |
86 | mutex_unlock(&floppy_mutex); |
87 | |
88 | - if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_params))) |
89 | + if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_params))) |
90 | return -EFAULT; |
91 | return 0; |
92 | } |
93 | @@ -3816,7 +3816,7 @@ static int compat_getdrvstat(int drive, bool poll, |
94 | v.bufblocks = UDRS->bufblocks; |
95 | mutex_unlock(&floppy_mutex); |
96 | |
97 | - if (copy_from_user(arg, &v, sizeof(struct compat_floppy_drive_struct))) |
98 | + if (copy_to_user(arg, &v, sizeof(struct compat_floppy_drive_struct))) |
99 | return -EFAULT; |
100 | return 0; |
101 | Eintr: |
102 | diff --git a/drivers/firmware/google/vpd.c b/drivers/firmware/google/vpd.c |
103 | index 0739f3b70347..db0812263d46 100644 |
104 | --- a/drivers/firmware/google/vpd.c |
105 | +++ b/drivers/firmware/google/vpd.c |
106 | @@ -92,8 +92,8 @@ static int vpd_section_check_key_name(const u8 *key, s32 key_len) |
107 | return VPD_OK; |
108 | } |
109 | |
110 | -static int vpd_section_attrib_add(const u8 *key, s32 key_len, |
111 | - const u8 *value, s32 value_len, |
112 | +static int vpd_section_attrib_add(const u8 *key, u32 key_len, |
113 | + const u8 *value, u32 value_len, |
114 | void *arg) |
115 | { |
116 | int ret; |
117 | diff --git a/drivers/firmware/google/vpd_decode.c b/drivers/firmware/google/vpd_decode.c |
118 | index 92e3258552fc..dda525c0f968 100644 |
119 | --- a/drivers/firmware/google/vpd_decode.c |
120 | +++ b/drivers/firmware/google/vpd_decode.c |
121 | @@ -9,8 +9,8 @@ |
122 | |
123 | #include "vpd_decode.h" |
124 | |
125 | -static int vpd_decode_len(const s32 max_len, const u8 *in, |
126 | - s32 *length, s32 *decoded_len) |
127 | +static int vpd_decode_len(const u32 max_len, const u8 *in, |
128 | + u32 *length, u32 *decoded_len) |
129 | { |
130 | u8 more; |
131 | int i = 0; |
132 | @@ -30,18 +30,39 @@ static int vpd_decode_len(const s32 max_len, const u8 *in, |
133 | } while (more); |
134 | |
135 | *decoded_len = i; |
136 | + return VPD_OK; |
137 | +} |
138 | + |
139 | +static int vpd_decode_entry(const u32 max_len, const u8 *input_buf, |
140 | + u32 *_consumed, const u8 **entry, u32 *entry_len) |
141 | +{ |
142 | + u32 decoded_len; |
143 | + u32 consumed = *_consumed; |
144 | + |
145 | + if (vpd_decode_len(max_len - consumed, &input_buf[consumed], |
146 | + entry_len, &decoded_len) != VPD_OK) |
147 | + return VPD_FAIL; |
148 | + if (max_len - consumed < decoded_len) |
149 | + return VPD_FAIL; |
150 | + |
151 | + consumed += decoded_len; |
152 | + *entry = input_buf + consumed; |
153 | + |
154 | + /* entry_len is untrusted data and must be checked again. */ |
155 | + if (max_len - consumed < *entry_len) |
156 | + return VPD_FAIL; |
157 | |
158 | + consumed += decoded_len; |
159 | + *_consumed = consumed; |
160 | return VPD_OK; |
161 | } |
162 | |
163 | -int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed, |
164 | +int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed, |
165 | vpd_decode_callback callback, void *callback_arg) |
166 | { |
167 | int type; |
168 | - int res; |
169 | - s32 key_len; |
170 | - s32 value_len; |
171 | - s32 decoded_len; |
172 | + u32 key_len; |
173 | + u32 value_len; |
174 | const u8 *key; |
175 | const u8 *value; |
176 | |
177 | @@ -56,26 +77,14 @@ int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed, |
178 | case VPD_TYPE_STRING: |
179 | (*consumed)++; |
180 | |
181 | - /* key */ |
182 | - res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed], |
183 | - &key_len, &decoded_len); |
184 | - if (res != VPD_OK || *consumed + decoded_len >= max_len) |
185 | + if (vpd_decode_entry(max_len, input_buf, consumed, &key, |
186 | + &key_len) != VPD_OK) |
187 | return VPD_FAIL; |
188 | |
189 | - *consumed += decoded_len; |
190 | - key = &input_buf[*consumed]; |
191 | - *consumed += key_len; |
192 | - |
193 | - /* value */ |
194 | - res = vpd_decode_len(max_len - *consumed, &input_buf[*consumed], |
195 | - &value_len, &decoded_len); |
196 | - if (res != VPD_OK || *consumed + decoded_len > max_len) |
197 | + if (vpd_decode_entry(max_len, input_buf, consumed, &value, |
198 | + &value_len) != VPD_OK) |
199 | return VPD_FAIL; |
200 | |
201 | - *consumed += decoded_len; |
202 | - value = &input_buf[*consumed]; |
203 | - *consumed += value_len; |
204 | - |
205 | if (type == VPD_TYPE_STRING) |
206 | return callback(key, key_len, value, value_len, |
207 | callback_arg); |
208 | diff --git a/drivers/firmware/google/vpd_decode.h b/drivers/firmware/google/vpd_decode.h |
209 | index cf8c2ace155a..8dbe41cac599 100644 |
210 | --- a/drivers/firmware/google/vpd_decode.h |
211 | +++ b/drivers/firmware/google/vpd_decode.h |
212 | @@ -25,8 +25,8 @@ enum { |
213 | }; |
214 | |
215 | /* Callback for vpd_decode_string to invoke. */ |
216 | -typedef int vpd_decode_callback(const u8 *key, s32 key_len, |
217 | - const u8 *value, s32 value_len, |
218 | +typedef int vpd_decode_callback(const u8 *key, u32 key_len, |
219 | + const u8 *value, u32 value_len, |
220 | void *arg); |
221 | |
222 | /* |
223 | @@ -44,7 +44,7 @@ typedef int vpd_decode_callback(const u8 *key, s32 key_len, |
224 | * If one entry is successfully decoded, sends it to callback and returns the |
225 | * result. |
226 | */ |
227 | -int vpd_decode_string(const s32 max_len, const u8 *input_buf, s32 *consumed, |
228 | +int vpd_decode_string(const u32 max_len, const u8 *input_buf, u32 *consumed, |
229 | vpd_decode_callback callback, void *callback_arg); |
230 | |
231 | #endif /* __VPD_DECODE_H */ |
232 | diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c |
233 | index c659e18b358b..676d233d46d5 100644 |
234 | --- a/drivers/media/usb/dvb-usb/technisat-usb2.c |
235 | +++ b/drivers/media/usb/dvb-usb/technisat-usb2.c |
236 | @@ -608,10 +608,9 @@ static int technisat_usb2_frontend_attach(struct dvb_usb_adapter *a) |
237 | static int technisat_usb2_get_ir(struct dvb_usb_device *d) |
238 | { |
239 | struct technisat_usb2_state *state = d->priv; |
240 | - u8 *buf = state->buf; |
241 | - u8 *b; |
242 | - int ret; |
243 | struct ir_raw_event ev; |
244 | + u8 *buf = state->buf; |
245 | + int i, ret; |
246 | |
247 | buf[0] = GET_IR_DATA_VENDOR_REQUEST; |
248 | buf[1] = 0x08; |
249 | @@ -647,26 +646,25 @@ unlock: |
250 | return 0; /* no key pressed */ |
251 | |
252 | /* decoding */ |
253 | - b = buf+1; |
254 | |
255 | #if 0 |
256 | deb_rc("RC: %d ", ret); |
257 | - debug_dump(b, ret, deb_rc); |
258 | + debug_dump(buf + 1, ret, deb_rc); |
259 | #endif |
260 | |
261 | ev.pulse = 0; |
262 | - while (1) { |
263 | - ev.pulse = !ev.pulse; |
264 | - ev.duration = (*b * FIRMWARE_CLOCK_DIVISOR * FIRMWARE_CLOCK_TICK) / 1000; |
265 | - ir_raw_event_store(d->rc_dev, &ev); |
266 | - |
267 | - b++; |
268 | - if (*b == 0xff) { |
269 | + for (i = 1; i < ARRAY_SIZE(state->buf); i++) { |
270 | + if (buf[i] == 0xff) { |
271 | ev.pulse = 0; |
272 | ev.duration = 888888*2; |
273 | ir_raw_event_store(d->rc_dev, &ev); |
274 | break; |
275 | } |
276 | + |
277 | + ev.pulse = !ev.pulse; |
278 | + ev.duration = (buf[i] * FIRMWARE_CLOCK_DIVISOR * |
279 | + FIRMWARE_CLOCK_TICK) / 1000; |
280 | + ir_raw_event_store(d->rc_dev, &ev); |
281 | } |
282 | |
283 | ir_raw_event_handle(d->rc_dev); |
284 | diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c |
285 | index e4d2dcd5cc0f..19c90fa9e443 100644 |
286 | --- a/drivers/media/usb/tm6000/tm6000-dvb.c |
287 | +++ b/drivers/media/usb/tm6000/tm6000-dvb.c |
288 | @@ -97,6 +97,7 @@ static void tm6000_urb_received(struct urb *urb) |
289 | printk(KERN_ERR "tm6000: error %s\n", __func__); |
290 | kfree(urb->transfer_buffer); |
291 | usb_free_urb(urb); |
292 | + dev->dvb->bulk_urb = NULL; |
293 | } |
294 | } |
295 | } |
296 | @@ -127,6 +128,7 @@ static int tm6000_start_stream(struct tm6000_core *dev) |
297 | dvb->bulk_urb->transfer_buffer = kzalloc(size, GFP_KERNEL); |
298 | if (!dvb->bulk_urb->transfer_buffer) { |
299 | usb_free_urb(dvb->bulk_urb); |
300 | + dvb->bulk_urb = NULL; |
301 | return -ENOMEM; |
302 | } |
303 | |
304 | @@ -153,6 +155,7 @@ static int tm6000_start_stream(struct tm6000_core *dev) |
305 | |
306 | kfree(dvb->bulk_urb->transfer_buffer); |
307 | usb_free_urb(dvb->bulk_urb); |
308 | + dvb->bulk_urb = NULL; |
309 | return ret; |
310 | } |
311 | |
312 | diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
313 | index fd54c7c87485..b19ab09cb18f 100644 |
314 | --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
315 | +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
316 | @@ -4451,10 +4451,12 @@ int stmmac_suspend(struct device *dev) |
317 | if (!ndev || !netif_running(ndev)) |
318 | return 0; |
319 | |
320 | - phylink_stop(priv->phylink); |
321 | - |
322 | mutex_lock(&priv->lock); |
323 | |
324 | + rtnl_lock(); |
325 | + phylink_stop(priv->phylink); |
326 | + rtnl_unlock(); |
327 | + |
328 | netif_device_detach(ndev); |
329 | stmmac_stop_all_queues(priv); |
330 | |
331 | @@ -4558,9 +4560,11 @@ int stmmac_resume(struct device *dev) |
332 | |
333 | stmmac_start_all_queues(priv); |
334 | |
335 | - mutex_unlock(&priv->lock); |
336 | - |
337 | + rtnl_lock(); |
338 | phylink_start(priv->phylink); |
339 | + rtnl_unlock(); |
340 | + |
341 | + mutex_unlock(&priv->lock); |
342 | |
343 | return 0; |
344 | } |
345 | diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c |
346 | index 8d33970a2950..5f5722bf6762 100644 |
347 | --- a/drivers/net/xen-netfront.c |
348 | +++ b/drivers/net/xen-netfront.c |
349 | @@ -906,7 +906,7 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, |
350 | __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); |
351 | } |
352 | if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) { |
353 | - queue->rx.rsp_cons = ++cons; |
354 | + queue->rx.rsp_cons = ++cons + skb_queue_len(list); |
355 | kfree_skb(nskb); |
356 | return ~0U; |
357 | } |
358 | diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c |
359 | index 34ff6434da8f..6bb49cc25c63 100644 |
360 | --- a/drivers/phy/qualcomm/phy-qcom-qmp.c |
361 | +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c |
362 | @@ -35,7 +35,7 @@ |
363 | #define PLL_READY_GATE_EN BIT(3) |
364 | /* QPHY_PCS_STATUS bit */ |
365 | #define PHYSTATUS BIT(6) |
366 | -/* QPHY_COM_PCS_READY_STATUS bit */ |
367 | +/* QPHY_PCS_READY_STATUS & QPHY_COM_PCS_READY_STATUS bit */ |
368 | #define PCS_READY BIT(0) |
369 | |
370 | /* QPHY_V3_DP_COM_RESET_OVRD_CTRL register bits */ |
371 | @@ -115,6 +115,7 @@ enum qphy_reg_layout { |
372 | QPHY_SW_RESET, |
373 | QPHY_START_CTRL, |
374 | QPHY_PCS_READY_STATUS, |
375 | + QPHY_PCS_STATUS, |
376 | QPHY_PCS_AUTONOMOUS_MODE_CTRL, |
377 | QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR, |
378 | QPHY_PCS_LFPS_RXTERM_IRQ_STATUS, |
379 | @@ -133,7 +134,7 @@ static const unsigned int pciephy_regs_layout[] = { |
380 | [QPHY_FLL_MAN_CODE] = 0xd4, |
381 | [QPHY_SW_RESET] = 0x00, |
382 | [QPHY_START_CTRL] = 0x08, |
383 | - [QPHY_PCS_READY_STATUS] = 0x174, |
384 | + [QPHY_PCS_STATUS] = 0x174, |
385 | }; |
386 | |
387 | static const unsigned int usb3phy_regs_layout[] = { |
388 | @@ -144,7 +145,7 @@ static const unsigned int usb3phy_regs_layout[] = { |
389 | [QPHY_FLL_MAN_CODE] = 0xd0, |
390 | [QPHY_SW_RESET] = 0x00, |
391 | [QPHY_START_CTRL] = 0x08, |
392 | - [QPHY_PCS_READY_STATUS] = 0x17c, |
393 | + [QPHY_PCS_STATUS] = 0x17c, |
394 | [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d4, |
395 | [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0d8, |
396 | [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x178, |
397 | @@ -153,7 +154,7 @@ static const unsigned int usb3phy_regs_layout[] = { |
398 | static const unsigned int qmp_v3_usb3phy_regs_layout[] = { |
399 | [QPHY_SW_RESET] = 0x00, |
400 | [QPHY_START_CTRL] = 0x08, |
401 | - [QPHY_PCS_READY_STATUS] = 0x174, |
402 | + [QPHY_PCS_STATUS] = 0x174, |
403 | [QPHY_PCS_AUTONOMOUS_MODE_CTRL] = 0x0d8, |
404 | [QPHY_PCS_LFPS_RXTERM_IRQ_CLEAR] = 0x0dc, |
405 | [QPHY_PCS_LFPS_RXTERM_IRQ_STATUS] = 0x170, |
406 | @@ -911,7 +912,6 @@ struct qmp_phy_cfg { |
407 | |
408 | unsigned int start_ctrl; |
409 | unsigned int pwrdn_ctrl; |
410 | - unsigned int mask_pcs_ready; |
411 | unsigned int mask_com_pcs_ready; |
412 | |
413 | /* true, if PHY has a separate PHY_COM control block */ |
414 | @@ -1074,7 +1074,6 @@ static const struct qmp_phy_cfg msm8996_pciephy_cfg = { |
415 | |
416 | .start_ctrl = PCS_START | PLL_READY_GATE_EN, |
417 | .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, |
418 | - .mask_pcs_ready = PHYSTATUS, |
419 | .mask_com_pcs_ready = PCS_READY, |
420 | |
421 | .has_phy_com_ctrl = true, |
422 | @@ -1106,7 +1105,6 @@ static const struct qmp_phy_cfg msm8996_usb3phy_cfg = { |
423 | |
424 | .start_ctrl = SERDES_START | PCS_START, |
425 | .pwrdn_ctrl = SW_PWRDN, |
426 | - .mask_pcs_ready = PHYSTATUS, |
427 | }; |
428 | |
429 | /* list of resets */ |
430 | @@ -1136,7 +1134,6 @@ static const struct qmp_phy_cfg ipq8074_pciephy_cfg = { |
431 | |
432 | .start_ctrl = SERDES_START | PCS_START, |
433 | .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, |
434 | - .mask_pcs_ready = PHYSTATUS, |
435 | |
436 | .has_phy_com_ctrl = false, |
437 | .has_lane_rst = false, |
438 | @@ -1167,7 +1164,6 @@ static const struct qmp_phy_cfg qmp_v3_usb3phy_cfg = { |
439 | |
440 | .start_ctrl = SERDES_START | PCS_START, |
441 | .pwrdn_ctrl = SW_PWRDN, |
442 | - .mask_pcs_ready = PHYSTATUS, |
443 | |
444 | .has_pwrdn_delay = true, |
445 | .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, |
446 | @@ -1199,7 +1195,6 @@ static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = { |
447 | |
448 | .start_ctrl = SERDES_START | PCS_START, |
449 | .pwrdn_ctrl = SW_PWRDN, |
450 | - .mask_pcs_ready = PHYSTATUS, |
451 | |
452 | .has_pwrdn_delay = true, |
453 | .pwrdn_delay_min = POWER_DOWN_DELAY_US_MIN, |
454 | @@ -1226,7 +1221,6 @@ static const struct qmp_phy_cfg sdm845_ufsphy_cfg = { |
455 | |
456 | .start_ctrl = SERDES_START, |
457 | .pwrdn_ctrl = SW_PWRDN, |
458 | - .mask_pcs_ready = PCS_READY, |
459 | |
460 | .is_dual_lane_phy = true, |
461 | .no_pcs_sw_reset = true, |
462 | @@ -1254,7 +1248,6 @@ static const struct qmp_phy_cfg msm8998_pciephy_cfg = { |
463 | |
464 | .start_ctrl = SERDES_START | PCS_START, |
465 | .pwrdn_ctrl = SW_PWRDN | REFCLK_DRV_DSBL, |
466 | - .mask_pcs_ready = PHYSTATUS, |
467 | }; |
468 | |
469 | static const struct qmp_phy_cfg msm8998_usb3phy_cfg = { |
470 | @@ -1279,7 +1272,6 @@ static const struct qmp_phy_cfg msm8998_usb3phy_cfg = { |
471 | |
472 | .start_ctrl = SERDES_START | PCS_START, |
473 | .pwrdn_ctrl = SW_PWRDN, |
474 | - .mask_pcs_ready = PHYSTATUS, |
475 | |
476 | .is_dual_lane_phy = true, |
477 | }; |
478 | @@ -1457,7 +1449,7 @@ static int qcom_qmp_phy_enable(struct phy *phy) |
479 | void __iomem *pcs = qphy->pcs; |
480 | void __iomem *dp_com = qmp->dp_com; |
481 | void __iomem *status; |
482 | - unsigned int mask, val; |
483 | + unsigned int mask, val, ready; |
484 | int ret; |
485 | |
486 | dev_vdbg(qmp->dev, "Initializing QMP phy\n"); |
487 | @@ -1545,10 +1537,17 @@ static int qcom_qmp_phy_enable(struct phy *phy) |
488 | /* start SerDes and Phy-Coding-Sublayer */ |
489 | qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); |
490 | |
491 | - status = pcs + cfg->regs[QPHY_PCS_READY_STATUS]; |
492 | - mask = cfg->mask_pcs_ready; |
493 | + if (cfg->type == PHY_TYPE_UFS) { |
494 | + status = pcs + cfg->regs[QPHY_PCS_READY_STATUS]; |
495 | + mask = PCS_READY; |
496 | + ready = PCS_READY; |
497 | + } else { |
498 | + status = pcs + cfg->regs[QPHY_PCS_STATUS]; |
499 | + mask = PHYSTATUS; |
500 | + ready = 0; |
501 | + } |
502 | |
503 | - ret = readl_poll_timeout(status, val, val & mask, 10, |
504 | + ret = readl_poll_timeout(status, val, (val & mask) == ready, 10, |
505 | PHY_INIT_COMPLETE_TIMEOUT); |
506 | if (ret) { |
507 | dev_err(qmp->dev, "phy initialization timed-out\n"); |
508 | diff --git a/drivers/phy/renesas/phy-rcar-gen3-usb2.c b/drivers/phy/renesas/phy-rcar-gen3-usb2.c |
509 | index 8ffba67568ec..b7f6b1324395 100644 |
510 | --- a/drivers/phy/renesas/phy-rcar-gen3-usb2.c |
511 | +++ b/drivers/phy/renesas/phy-rcar-gen3-usb2.c |
512 | @@ -61,6 +61,7 @@ |
513 | USB2_OBINT_IDDIGCHG) |
514 | |
515 | /* VBCTRL */ |
516 | +#define USB2_VBCTRL_OCCLREN BIT(16) |
517 | #define USB2_VBCTRL_DRVVBUSSEL BIT(8) |
518 | |
519 | /* LINECTRL1 */ |
520 | @@ -374,6 +375,7 @@ static void rcar_gen3_init_otg(struct rcar_gen3_chan *ch) |
521 | writel(val, usb2_base + USB2_LINECTRL1); |
522 | |
523 | val = readl(usb2_base + USB2_VBCTRL); |
524 | + val &= ~USB2_VBCTRL_OCCLREN; |
525 | writel(val | USB2_VBCTRL_DRVVBUSSEL, usb2_base + USB2_VBCTRL); |
526 | val = readl(usb2_base + USB2_ADPCTRL); |
527 | writel(val | USB2_ADPCTRL_IDPULLUP, usb2_base + USB2_ADPCTRL); |
528 | diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c |
529 | index 0b4f36905321..8e667967928a 100644 |
530 | --- a/drivers/tty/serial/atmel_serial.c |
531 | +++ b/drivers/tty/serial/atmel_serial.c |
532 | @@ -1400,7 +1400,6 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending) |
533 | |
534 | atmel_port->hd_start_rx = false; |
535 | atmel_start_rx(port); |
536 | - return; |
537 | } |
538 | |
539 | atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx); |
540 | diff --git a/drivers/tty/serial/sprd_serial.c b/drivers/tty/serial/sprd_serial.c |
541 | index 73d71a4e6c0c..f49b7d6fbc88 100644 |
542 | --- a/drivers/tty/serial/sprd_serial.c |
543 | +++ b/drivers/tty/serial/sprd_serial.c |
544 | @@ -609,7 +609,7 @@ static inline void sprd_rx(struct uart_port *port) |
545 | |
546 | if (lsr & (SPRD_LSR_BI | SPRD_LSR_PE | |
547 | SPRD_LSR_FE | SPRD_LSR_OE)) |
548 | - if (handle_lsr_errors(port, &lsr, &flag)) |
549 | + if (handle_lsr_errors(port, &flag, &lsr)) |
550 | continue; |
551 | if (uart_handle_sysrq_char(port, ch)) |
552 | continue; |
553 | diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c |
554 | index 9d6cb709ca7b..151a74a54386 100644 |
555 | --- a/drivers/usb/core/config.c |
556 | +++ b/drivers/usb/core/config.c |
557 | @@ -921,7 +921,7 @@ int usb_get_bos_descriptor(struct usb_device *dev) |
558 | struct usb_bos_descriptor *bos; |
559 | struct usb_dev_cap_header *cap; |
560 | struct usb_ssp_cap_descriptor *ssp_cap; |
561 | - unsigned char *buffer; |
562 | + unsigned char *buffer, *buffer0; |
563 | int length, total_len, num, i, ssac; |
564 | __u8 cap_type; |
565 | int ret; |
566 | @@ -966,10 +966,12 @@ int usb_get_bos_descriptor(struct usb_device *dev) |
567 | ret = -ENOMSG; |
568 | goto err; |
569 | } |
570 | + |
571 | + buffer0 = buffer; |
572 | total_len -= length; |
573 | + buffer += length; |
574 | |
575 | for (i = 0; i < num; i++) { |
576 | - buffer += length; |
577 | cap = (struct usb_dev_cap_header *)buffer; |
578 | |
579 | if (total_len < sizeof(*cap) || total_len < cap->bLength) { |
580 | @@ -983,8 +985,6 @@ int usb_get_bos_descriptor(struct usb_device *dev) |
581 | break; |
582 | } |
583 | |
584 | - total_len -= length; |
585 | - |
586 | if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) { |
587 | dev_warn(ddev, "descriptor type invalid, skip\n"); |
588 | continue; |
589 | @@ -1019,7 +1019,11 @@ int usb_get_bos_descriptor(struct usb_device *dev) |
590 | default: |
591 | break; |
592 | } |
593 | + |
594 | + total_len -= length; |
595 | + buffer += length; |
596 | } |
597 | + dev->bos->desc->wTotalLength = cpu_to_le16(buffer - buffer0); |
598 | |
599 | return 0; |
600 | |
601 | diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h |
602 | index 28a2d12a1029..a8279280e88d 100644 |
603 | --- a/fs/overlayfs/ovl_entry.h |
604 | +++ b/fs/overlayfs/ovl_entry.h |
605 | @@ -66,6 +66,7 @@ struct ovl_fs { |
606 | bool workdir_locked; |
607 | /* Traps in ovl inode cache */ |
608 | struct inode *upperdir_trap; |
609 | + struct inode *workbasedir_trap; |
610 | struct inode *workdir_trap; |
611 | struct inode *indexdir_trap; |
612 | /* Inode numbers in all layers do not use the high xino_bits */ |
613 | diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c |
614 | index b368e2e102fa..afbcb116a7f1 100644 |
615 | --- a/fs/overlayfs/super.c |
616 | +++ b/fs/overlayfs/super.c |
617 | @@ -212,6 +212,7 @@ static void ovl_free_fs(struct ovl_fs *ofs) |
618 | { |
619 | unsigned i; |
620 | |
621 | + iput(ofs->workbasedir_trap); |
622 | iput(ofs->indexdir_trap); |
623 | iput(ofs->workdir_trap); |
624 | iput(ofs->upperdir_trap); |
625 | @@ -1003,6 +1004,25 @@ static int ovl_setup_trap(struct super_block *sb, struct dentry *dir, |
626 | return 0; |
627 | } |
628 | |
629 | +/* |
630 | + * Determine how we treat concurrent use of upperdir/workdir based on the |
631 | + * index feature. This is papering over mount leaks of container runtimes, |
632 | + * for example, an old overlay mount is leaked and now its upperdir is |
633 | + * attempted to be used as a lower layer in a new overlay mount. |
634 | + */ |
635 | +static int ovl_report_in_use(struct ovl_fs *ofs, const char *name) |
636 | +{ |
637 | + if (ofs->config.index) { |
638 | + pr_err("overlayfs: %s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n", |
639 | + name); |
640 | + return -EBUSY; |
641 | + } else { |
642 | + pr_warn("overlayfs: %s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n", |
643 | + name); |
644 | + return 0; |
645 | + } |
646 | +} |
647 | + |
648 | static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs, |
649 | struct path *upperpath) |
650 | { |
651 | @@ -1040,14 +1060,12 @@ static int ovl_get_upper(struct super_block *sb, struct ovl_fs *ofs, |
652 | upper_mnt->mnt_flags &= ~(MNT_NOATIME | MNT_NODIRATIME | MNT_RELATIME); |
653 | ofs->upper_mnt = upper_mnt; |
654 | |
655 | - err = -EBUSY; |
656 | if (ovl_inuse_trylock(ofs->upper_mnt->mnt_root)) { |
657 | ofs->upperdir_locked = true; |
658 | - } else if (ofs->config.index) { |
659 | - pr_err("overlayfs: upperdir is in-use by another mount, mount with '-o index=off' to override exclusive upperdir protection.\n"); |
660 | - goto out; |
661 | } else { |
662 | - pr_warn("overlayfs: upperdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n"); |
663 | + err = ovl_report_in_use(ofs, "upperdir"); |
664 | + if (err) |
665 | + goto out; |
666 | } |
667 | |
668 | err = 0; |
669 | @@ -1157,16 +1175,19 @@ static int ovl_get_workdir(struct super_block *sb, struct ovl_fs *ofs, |
670 | |
671 | ofs->workbasedir = dget(workpath.dentry); |
672 | |
673 | - err = -EBUSY; |
674 | if (ovl_inuse_trylock(ofs->workbasedir)) { |
675 | ofs->workdir_locked = true; |
676 | - } else if (ofs->config.index) { |
677 | - pr_err("overlayfs: workdir is in-use by another mount, mount with '-o index=off' to override exclusive workdir protection.\n"); |
678 | - goto out; |
679 | } else { |
680 | - pr_warn("overlayfs: workdir is in-use by another mount, accessing files from both mounts will result in undefined behavior.\n"); |
681 | + err = ovl_report_in_use(ofs, "workdir"); |
682 | + if (err) |
683 | + goto out; |
684 | } |
685 | |
686 | + err = ovl_setup_trap(sb, ofs->workbasedir, &ofs->workbasedir_trap, |
687 | + "workdir"); |
688 | + if (err) |
689 | + goto out; |
690 | + |
691 | err = ovl_make_workdir(sb, ofs, &workpath); |
692 | |
693 | out: |
694 | @@ -1313,16 +1334,16 @@ static int ovl_get_lower_layers(struct super_block *sb, struct ovl_fs *ofs, |
695 | if (err < 0) |
696 | goto out; |
697 | |
698 | - err = -EBUSY; |
699 | - if (ovl_is_inuse(stack[i].dentry)) { |
700 | - pr_err("overlayfs: lowerdir is in-use as upperdir/workdir\n"); |
701 | - goto out; |
702 | - } |
703 | - |
704 | err = ovl_setup_trap(sb, stack[i].dentry, &trap, "lowerdir"); |
705 | if (err) |
706 | goto out; |
707 | |
708 | + if (ovl_is_inuse(stack[i].dentry)) { |
709 | + err = ovl_report_in_use(ofs, "lowerdir"); |
710 | + if (err) |
711 | + goto out; |
712 | + } |
713 | + |
714 | mnt = clone_private_mount(&stack[i]); |
715 | err = PTR_ERR(mnt); |
716 | if (IS_ERR(mnt)) { |
717 | @@ -1469,8 +1490,8 @@ out_err: |
718 | * - another layer of this overlayfs instance |
719 | * - upper/work dir of any overlayfs instance |
720 | */ |
721 | -static int ovl_check_layer(struct super_block *sb, struct dentry *dentry, |
722 | - const char *name) |
723 | +static int ovl_check_layer(struct super_block *sb, struct ovl_fs *ofs, |
724 | + struct dentry *dentry, const char *name) |
725 | { |
726 | struct dentry *next = dentry, *parent; |
727 | int err = 0; |
728 | @@ -1482,13 +1503,11 @@ static int ovl_check_layer(struct super_block *sb, struct dentry *dentry, |
729 | |
730 | /* Walk back ancestors to root (inclusive) looking for traps */ |
731 | while (!err && parent != next) { |
732 | - if (ovl_is_inuse(parent)) { |
733 | - err = -EBUSY; |
734 | - pr_err("overlayfs: %s path overlapping in-use upperdir/workdir\n", |
735 | - name); |
736 | - } else if (ovl_lookup_trap_inode(sb, parent)) { |
737 | + if (ovl_lookup_trap_inode(sb, parent)) { |
738 | err = -ELOOP; |
739 | pr_err("overlayfs: overlapping %s path\n", name); |
740 | + } else if (ovl_is_inuse(parent)) { |
741 | + err = ovl_report_in_use(ofs, name); |
742 | } |
743 | next = parent; |
744 | parent = dget_parent(next); |
745 | @@ -1509,7 +1528,8 @@ static int ovl_check_overlapping_layers(struct super_block *sb, |
746 | int i, err; |
747 | |
748 | if (ofs->upper_mnt) { |
749 | - err = ovl_check_layer(sb, ofs->upper_mnt->mnt_root, "upperdir"); |
750 | + err = ovl_check_layer(sb, ofs, ofs->upper_mnt->mnt_root, |
751 | + "upperdir"); |
752 | if (err) |
753 | return err; |
754 | |
755 | @@ -1520,13 +1540,14 @@ static int ovl_check_overlapping_layers(struct super_block *sb, |
756 | * workbasedir. In that case, we already have their traps in |
757 | * inode cache and we will catch that case on lookup. |
758 | */ |
759 | - err = ovl_check_layer(sb, ofs->workbasedir, "workdir"); |
760 | + err = ovl_check_layer(sb, ofs, ofs->workbasedir, "workdir"); |
761 | if (err) |
762 | return err; |
763 | } |
764 | |
765 | for (i = 0; i < ofs->numlower; i++) { |
766 | - err = ovl_check_layer(sb, ofs->lower_layers[i].mnt->mnt_root, |
767 | + err = ovl_check_layer(sb, ofs, |
768 | + ofs->lower_layers[i].mnt->mnt_root, |
769 | "lowerdir"); |
770 | if (err) |
771 | return err; |
772 | diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h |
773 | index a16fbe9a2a67..aa99c73c3fbd 100644 |
774 | --- a/include/net/pkt_sched.h |
775 | +++ b/include/net/pkt_sched.h |
776 | @@ -118,7 +118,12 @@ void __qdisc_run(struct Qdisc *q); |
777 | static inline void qdisc_run(struct Qdisc *q) |
778 | { |
779 | if (qdisc_run_begin(q)) { |
780 | - __qdisc_run(q); |
781 | + /* NOLOCK qdisc must check 'state' under the qdisc seqlock |
782 | + * to avoid racing with dev_qdisc_reset() |
783 | + */ |
784 | + if (!(q->flags & TCQ_F_NOLOCK) || |
785 | + likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) |
786 | + __qdisc_run(q); |
787 | qdisc_run_end(q); |
788 | } |
789 | } |
790 | diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h |
791 | index d9112de85261..43f4a818d88f 100644 |
792 | --- a/include/net/sock_reuseport.h |
793 | +++ b/include/net/sock_reuseport.h |
794 | @@ -21,7 +21,8 @@ struct sock_reuseport { |
795 | unsigned int synq_overflow_ts; |
796 | /* ID stays the same even after the size of socks[] grows. */ |
797 | unsigned int reuseport_id; |
798 | - bool bind_inany; |
799 | + unsigned int bind_inany:1; |
800 | + unsigned int has_conns:1; |
801 | struct bpf_prog __rcu *prog; /* optional BPF sock selector */ |
802 | struct sock *socks[0]; /* array of sock pointers */ |
803 | }; |
804 | @@ -37,6 +38,23 @@ extern struct sock *reuseport_select_sock(struct sock *sk, |
805 | extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog); |
806 | extern int reuseport_detach_prog(struct sock *sk); |
807 | |
808 | +static inline bool reuseport_has_conns(struct sock *sk, bool set) |
809 | +{ |
810 | + struct sock_reuseport *reuse; |
811 | + bool ret = false; |
812 | + |
813 | + rcu_read_lock(); |
814 | + reuse = rcu_dereference(sk->sk_reuseport_cb); |
815 | + if (reuse) { |
816 | + if (set) |
817 | + reuse->has_conns = 1; |
818 | + ret = reuse->has_conns; |
819 | + } |
820 | + rcu_read_unlock(); |
821 | + |
822 | + return ret; |
823 | +} |
824 | + |
825 | int reuseport_get_id(struct sock_reuseport *reuse); |
826 | |
827 | #endif /* _SOCK_REUSEPORT_H */ |
828 | diff --git a/net/core/dev.c b/net/core/dev.c |
829 | index 5156c0edebe8..4ed9df74eb8a 100644 |
830 | --- a/net/core/dev.c |
831 | +++ b/net/core/dev.c |
832 | @@ -3467,18 +3467,22 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, |
833 | qdisc_calculate_pkt_len(skb, q); |
834 | |
835 | if (q->flags & TCQ_F_NOLOCK) { |
836 | - if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { |
837 | - __qdisc_drop(skb, &to_free); |
838 | - rc = NET_XMIT_DROP; |
839 | - } else if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty && |
840 | - qdisc_run_begin(q)) { |
841 | + if ((q->flags & TCQ_F_CAN_BYPASS) && q->empty && |
842 | + qdisc_run_begin(q)) { |
843 | + if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, |
844 | + &q->state))) { |
845 | + __qdisc_drop(skb, &to_free); |
846 | + rc = NET_XMIT_DROP; |
847 | + goto end_run; |
848 | + } |
849 | qdisc_bstats_cpu_update(q, skb); |
850 | |
851 | + rc = NET_XMIT_SUCCESS; |
852 | if (sch_direct_xmit(skb, q, dev, txq, NULL, true)) |
853 | __qdisc_run(q); |
854 | |
855 | +end_run: |
856 | qdisc_run_end(q); |
857 | - rc = NET_XMIT_SUCCESS; |
858 | } else { |
859 | rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; |
860 | qdisc_run(q); |
861 | diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c |
862 | index 9408f9264d05..f3ceec93f392 100644 |
863 | --- a/net/core/sock_reuseport.c |
864 | +++ b/net/core/sock_reuseport.c |
865 | @@ -295,8 +295,19 @@ struct sock *reuseport_select_sock(struct sock *sk, |
866 | |
867 | select_by_hash: |
868 | /* no bpf or invalid bpf result: fall back to hash usage */ |
869 | - if (!sk2) |
870 | - sk2 = reuse->socks[reciprocal_scale(hash, socks)]; |
871 | + if (!sk2) { |
872 | + int i, j; |
873 | + |
874 | + i = j = reciprocal_scale(hash, socks); |
875 | + while (reuse->socks[i]->sk_state == TCP_ESTABLISHED) { |
876 | + i++; |
877 | + if (i >= reuse->num_socks) |
878 | + i = 0; |
879 | + if (i == j) |
880 | + goto out; |
881 | + } |
882 | + sk2 = reuse->socks[i]; |
883 | + } |
884 | } |
885 | |
886 | out: |
887 | diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c |
888 | index 3abd173ebacb..96f787cf9b6e 100644 |
889 | --- a/net/dsa/dsa2.c |
890 | +++ b/net/dsa/dsa2.c |
891 | @@ -623,6 +623,8 @@ static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master) |
892 | tag_protocol = ds->ops->get_tag_protocol(ds, dp->index); |
893 | tag_ops = dsa_tag_driver_get(tag_protocol); |
894 | if (IS_ERR(tag_ops)) { |
895 | + if (PTR_ERR(tag_ops) == -ENOPROTOOPT) |
896 | + return -EPROBE_DEFER; |
897 | dev_warn(ds->dev, "No tagger for this switch\n"); |
898 | return PTR_ERR(tag_ops); |
899 | } |
900 | diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c |
901 | index 7bd29e694603..9a0fe0c2fa02 100644 |
902 | --- a/net/ipv4/datagram.c |
903 | +++ b/net/ipv4/datagram.c |
904 | @@ -15,6 +15,7 @@ |
905 | #include <net/sock.h> |
906 | #include <net/route.h> |
907 | #include <net/tcp_states.h> |
908 | +#include <net/sock_reuseport.h> |
909 | |
910 | int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
911 | { |
912 | @@ -69,6 +70,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len |
913 | } |
914 | inet->inet_daddr = fl4->daddr; |
915 | inet->inet_dport = usin->sin_port; |
916 | + reuseport_has_conns(sk, true); |
917 | sk->sk_state = TCP_ESTABLISHED; |
918 | sk_set_txhash(sk); |
919 | inet->inet_id = jiffies; |
920 | diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c |
921 | index d88821c794fb..16486c8b708b 100644 |
922 | --- a/net/ipv4/udp.c |
923 | +++ b/net/ipv4/udp.c |
924 | @@ -423,12 +423,13 @@ static struct sock *udp4_lib_lookup2(struct net *net, |
925 | score = compute_score(sk, net, saddr, sport, |
926 | daddr, hnum, dif, sdif); |
927 | if (score > badness) { |
928 | - if (sk->sk_reuseport) { |
929 | + if (sk->sk_reuseport && |
930 | + sk->sk_state != TCP_ESTABLISHED) { |
931 | hash = udp_ehashfn(net, daddr, hnum, |
932 | saddr, sport); |
933 | result = reuseport_select_sock(sk, hash, skb, |
934 | sizeof(struct udphdr)); |
935 | - if (result) |
936 | + if (result && !reuseport_has_conns(sk, false)) |
937 | return result; |
938 | } |
939 | badness = score; |
940 | diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c |
941 | index 9ab897ded4df..96f939248d2f 100644 |
942 | --- a/net/ipv6/datagram.c |
943 | +++ b/net/ipv6/datagram.c |
944 | @@ -27,6 +27,7 @@ |
945 | #include <net/ip6_route.h> |
946 | #include <net/tcp_states.h> |
947 | #include <net/dsfield.h> |
948 | +#include <net/sock_reuseport.h> |
949 | |
950 | #include <linux/errqueue.h> |
951 | #include <linux/uaccess.h> |
952 | @@ -254,6 +255,7 @@ ipv4_connected: |
953 | goto out; |
954 | } |
955 | |
956 | + reuseport_has_conns(sk, true); |
957 | sk->sk_state = TCP_ESTABLISHED; |
958 | sk_set_txhash(sk); |
959 | out: |
960 | diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c |
961 | index dd2d0b963260..d5779d6a6065 100644 |
962 | --- a/net/ipv6/ip6_gre.c |
963 | +++ b/net/ipv6/ip6_gre.c |
964 | @@ -968,7 +968,7 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb, |
965 | if (unlikely(!tun_info || |
966 | !(tun_info->mode & IP_TUNNEL_INFO_TX) || |
967 | ip_tunnel_info_af(tun_info) != AF_INET6)) |
968 | - return -EINVAL; |
969 | + goto tx_err; |
970 | |
971 | key = &tun_info->key; |
972 | memset(&fl6, 0, sizeof(fl6)); |
973 | diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c |
974 | index 827fe7385078..5995fdc99d3f 100644 |
975 | --- a/net/ipv6/udp.c |
976 | +++ b/net/ipv6/udp.c |
977 | @@ -158,13 +158,14 @@ static struct sock *udp6_lib_lookup2(struct net *net, |
978 | score = compute_score(sk, net, saddr, sport, |
979 | daddr, hnum, dif, sdif); |
980 | if (score > badness) { |
981 | - if (sk->sk_reuseport) { |
982 | + if (sk->sk_reuseport && |
983 | + sk->sk_state != TCP_ESTABLISHED) { |
984 | hash = udp6_ehashfn(net, daddr, hnum, |
985 | saddr, sport); |
986 | |
987 | result = reuseport_select_sock(sk, hash, skb, |
988 | sizeof(struct udphdr)); |
989 | - if (result) |
990 | + if (result && !reuseport_has_conns(sk, false)) |
991 | return result; |
992 | } |
993 | result = sk; |
994 | diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c |
995 | index ac28f6a5d70e..17bd8f539bc7 100644 |
996 | --- a/net/sched/sch_generic.c |
997 | +++ b/net/sched/sch_generic.c |
998 | @@ -985,6 +985,9 @@ static void qdisc_destroy(struct Qdisc *qdisc) |
999 | |
1000 | void qdisc_put(struct Qdisc *qdisc) |
1001 | { |
1002 | + if (!qdisc) |
1003 | + return; |
1004 | + |
1005 | if (qdisc->flags & TCQ_F_BUILTIN || |
1006 | !refcount_dec_and_test(&qdisc->refcnt)) |
1007 | return; |
1008 | diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c |
1009 | index fd05ae1437a9..c9cfc796eccf 100644 |
1010 | --- a/net/wireless/nl80211.c |
1011 | +++ b/net/wireless/nl80211.c |
1012 | @@ -10659,9 +10659,11 @@ static int cfg80211_cqm_rssi_update(struct cfg80211_registered_device *rdev, |
1013 | hyst = wdev->cqm_config->rssi_hyst; |
1014 | n = wdev->cqm_config->n_rssi_thresholds; |
1015 | |
1016 | - for (i = 0; i < n; i++) |
1017 | + for (i = 0; i < n; i++) { |
1018 | + i = array_index_nospec(i, n); |
1019 | if (last < wdev->cqm_config->rssi_thresholds[i]) |
1020 | break; |
1021 | + } |
1022 | |
1023 | low_index = i - 1; |
1024 | if (low_index >= 0) { |
1025 | diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c |
1026 | index 5294abb3f178..8ffd07e2a160 100644 |
1027 | --- a/virt/kvm/coalesced_mmio.c |
1028 | +++ b/virt/kvm/coalesced_mmio.c |
1029 | @@ -40,7 +40,7 @@ static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev, |
1030 | return 1; |
1031 | } |
1032 | |
1033 | -static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) |
1034 | +static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last) |
1035 | { |
1036 | struct kvm_coalesced_mmio_ring *ring; |
1037 | unsigned avail; |
1038 | @@ -52,7 +52,7 @@ static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev) |
1039 | * there is always one unused entry in the buffer |
1040 | */ |
1041 | ring = dev->kvm->coalesced_mmio_ring; |
1042 | - avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX; |
1043 | + avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX; |
1044 | if (avail == 0) { |
1045 | /* full */ |
1046 | return 0; |
1047 | @@ -67,25 +67,28 @@ static int coalesced_mmio_write(struct kvm_vcpu *vcpu, |
1048 | { |
1049 | struct kvm_coalesced_mmio_dev *dev = to_mmio(this); |
1050 | struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring; |
1051 | + __u32 insert; |
1052 | |
1053 | if (!coalesced_mmio_in_range(dev, addr, len)) |
1054 | return -EOPNOTSUPP; |
1055 | |
1056 | spin_lock(&dev->kvm->ring_lock); |
1057 | |
1058 | - if (!coalesced_mmio_has_room(dev)) { |
1059 | + insert = READ_ONCE(ring->last); |
1060 | + if (!coalesced_mmio_has_room(dev, insert) || |
1061 | + insert >= KVM_COALESCED_MMIO_MAX) { |
1062 | spin_unlock(&dev->kvm->ring_lock); |
1063 | return -EOPNOTSUPP; |
1064 | } |
1065 | |
1066 | /* copy data in first free entry of the ring */ |
1067 | |
1068 | - ring->coalesced_mmio[ring->last].phys_addr = addr; |
1069 | - ring->coalesced_mmio[ring->last].len = len; |
1070 | - memcpy(ring->coalesced_mmio[ring->last].data, val, len); |
1071 | - ring->coalesced_mmio[ring->last].pio = dev->zone.pio; |
1072 | + ring->coalesced_mmio[insert].phys_addr = addr; |
1073 | + ring->coalesced_mmio[insert].len = len; |
1074 | + memcpy(ring->coalesced_mmio[insert].data, val, len); |
1075 | + ring->coalesced_mmio[insert].pio = dev->zone.pio; |
1076 | smp_wmb(); |
1077 | - ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX; |
1078 | + ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX; |
1079 | spin_unlock(&dev->kvm->ring_lock); |
1080 | return 0; |
1081 | } |