Contents of /trunk/kernel-alx/patches-5.4/0253-5.4.154-all-fixes.patch
Parent Directory | Revision Log
Revision 3637 -
(show annotations)
(download)
Mon Oct 24 12:40:44 2022 UTC (23 months ago) by niro
File size: 28735 byte(s)
Mon Oct 24 12:40:44 2022 UTC (23 months ago) by niro
File size: 28735 byte(s)
-add missing
1 | diff --git a/Makefile b/Makefile |
2 | index df9b1d07ca097..3358f56a37f06 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,7 +1,7 @@ |
6 | # SPDX-License-Identifier: GPL-2.0 |
7 | VERSION = 5 |
8 | PATCHLEVEL = 4 |
9 | -SUBLEVEL = 153 |
10 | +SUBLEVEL = 154 |
11 | EXTRAVERSION = |
12 | NAME = Kleptomaniac Octopus |
13 | |
14 | diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c |
15 | index 05610e6924c16..f7121b775e5f0 100644 |
16 | --- a/arch/m68k/kernel/signal.c |
17 | +++ b/arch/m68k/kernel/signal.c |
18 | @@ -448,7 +448,7 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs) |
19 | |
20 | if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) { |
21 | fpu_version = sc->sc_fpstate[0]; |
22 | - if (CPU_IS_020_OR_030 && |
23 | + if (CPU_IS_020_OR_030 && !regs->stkadj && |
24 | regs->vector >= (VEC_FPBRUC * 4) && |
25 | regs->vector <= (VEC_FPNAN * 4)) { |
26 | /* Clear pending exception in 68882 idle frame */ |
27 | @@ -511,7 +511,7 @@ static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs * |
28 | if (!(CPU_IS_060 || CPU_IS_COLDFIRE)) |
29 | context_size = fpstate[1]; |
30 | fpu_version = fpstate[0]; |
31 | - if (CPU_IS_020_OR_030 && |
32 | + if (CPU_IS_020_OR_030 && !regs->stkadj && |
33 | regs->vector >= (VEC_FPBRUC * 4) && |
34 | regs->vector <= (VEC_FPNAN * 4)) { |
35 | /* Clear pending exception in 68882 idle frame */ |
36 | @@ -829,18 +829,24 @@ badframe: |
37 | return 0; |
38 | } |
39 | |
40 | +static inline struct pt_regs *rte_regs(struct pt_regs *regs) |
41 | +{ |
42 | + return (void *)regs + regs->stkadj; |
43 | +} |
44 | + |
45 | static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, |
46 | unsigned long mask) |
47 | { |
48 | + struct pt_regs *tregs = rte_regs(regs); |
49 | sc->sc_mask = mask; |
50 | sc->sc_usp = rdusp(); |
51 | sc->sc_d0 = regs->d0; |
52 | sc->sc_d1 = regs->d1; |
53 | sc->sc_a0 = regs->a0; |
54 | sc->sc_a1 = regs->a1; |
55 | - sc->sc_sr = regs->sr; |
56 | - sc->sc_pc = regs->pc; |
57 | - sc->sc_formatvec = regs->format << 12 | regs->vector; |
58 | + sc->sc_sr = tregs->sr; |
59 | + sc->sc_pc = tregs->pc; |
60 | + sc->sc_formatvec = tregs->format << 12 | tregs->vector; |
61 | save_a5_state(sc, regs); |
62 | save_fpu_state(sc, regs); |
63 | } |
64 | @@ -848,6 +854,7 @@ static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs, |
65 | static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs) |
66 | { |
67 | struct switch_stack *sw = (struct switch_stack *)regs - 1; |
68 | + struct pt_regs *tregs = rte_regs(regs); |
69 | greg_t __user *gregs = uc->uc_mcontext.gregs; |
70 | int err = 0; |
71 | |
72 | @@ -868,9 +875,9 @@ static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs * |
73 | err |= __put_user(sw->a5, &gregs[13]); |
74 | err |= __put_user(sw->a6, &gregs[14]); |
75 | err |= __put_user(rdusp(), &gregs[15]); |
76 | - err |= __put_user(regs->pc, &gregs[16]); |
77 | - err |= __put_user(regs->sr, &gregs[17]); |
78 | - err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec); |
79 | + err |= __put_user(tregs->pc, &gregs[16]); |
80 | + err |= __put_user(tregs->sr, &gregs[17]); |
81 | + err |= __put_user((tregs->format << 12) | tregs->vector, &uc->uc_formatvec); |
82 | err |= rt_save_fpu_state(uc, regs); |
83 | return err; |
84 | } |
85 | @@ -887,13 +894,14 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, |
86 | struct pt_regs *regs) |
87 | { |
88 | struct sigframe __user *frame; |
89 | - int fsize = frame_extra_sizes(regs->format); |
90 | + struct pt_regs *tregs = rte_regs(regs); |
91 | + int fsize = frame_extra_sizes(tregs->format); |
92 | struct sigcontext context; |
93 | int err = 0, sig = ksig->sig; |
94 | |
95 | if (fsize < 0) { |
96 | pr_debug("setup_frame: Unknown frame format %#x\n", |
97 | - regs->format); |
98 | + tregs->format); |
99 | return -EFAULT; |
100 | } |
101 | |
102 | @@ -904,7 +912,7 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, |
103 | |
104 | err |= __put_user(sig, &frame->sig); |
105 | |
106 | - err |= __put_user(regs->vector, &frame->code); |
107 | + err |= __put_user(tregs->vector, &frame->code); |
108 | err |= __put_user(&frame->sc, &frame->psc); |
109 | |
110 | if (_NSIG_WORDS > 1) |
111 | @@ -929,34 +937,28 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, |
112 | |
113 | push_cache ((unsigned long) &frame->retcode); |
114 | |
115 | - /* |
116 | - * Set up registers for signal handler. All the state we are about |
117 | - * to destroy is successfully copied to sigframe. |
118 | - */ |
119 | - wrusp ((unsigned long) frame); |
120 | - regs->pc = (unsigned long) ksig->ka.sa.sa_handler; |
121 | - adjustformat(regs); |
122 | - |
123 | /* |
124 | * This is subtle; if we build more than one sigframe, all but the |
125 | * first one will see frame format 0 and have fsize == 0, so we won't |
126 | * screw stkadj. |
127 | */ |
128 | - if (fsize) |
129 | + if (fsize) { |
130 | regs->stkadj = fsize; |
131 | - |
132 | - /* Prepare to skip over the extra stuff in the exception frame. */ |
133 | - if (regs->stkadj) { |
134 | - struct pt_regs *tregs = |
135 | - (struct pt_regs *)((ulong)regs + regs->stkadj); |
136 | + tregs = rte_regs(regs); |
137 | pr_debug("Performing stackadjust=%04lx\n", regs->stkadj); |
138 | - /* This must be copied with decreasing addresses to |
139 | - handle overlaps. */ |
140 | tregs->vector = 0; |
141 | tregs->format = 0; |
142 | - tregs->pc = regs->pc; |
143 | tregs->sr = regs->sr; |
144 | } |
145 | + |
146 | + /* |
147 | + * Set up registers for signal handler. All the state we are about |
148 | + * to destroy is successfully copied to sigframe. |
149 | + */ |
150 | + wrusp ((unsigned long) frame); |
151 | + tregs->pc = (unsigned long) ksig->ka.sa.sa_handler; |
152 | + adjustformat(regs); |
153 | + |
154 | return 0; |
155 | } |
156 | |
157 | @@ -964,7 +966,8 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, |
158 | struct pt_regs *regs) |
159 | { |
160 | struct rt_sigframe __user *frame; |
161 | - int fsize = frame_extra_sizes(regs->format); |
162 | + struct pt_regs *tregs = rte_regs(regs); |
163 | + int fsize = frame_extra_sizes(tregs->format); |
164 | int err = 0, sig = ksig->sig; |
165 | |
166 | if (fsize < 0) { |
167 | @@ -1013,34 +1016,27 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, |
168 | |
169 | push_cache ((unsigned long) &frame->retcode); |
170 | |
171 | - /* |
172 | - * Set up registers for signal handler. All the state we are about |
173 | - * to destroy is successfully copied to sigframe. |
174 | - */ |
175 | - wrusp ((unsigned long) frame); |
176 | - regs->pc = (unsigned long) ksig->ka.sa.sa_handler; |
177 | - adjustformat(regs); |
178 | - |
179 | /* |
180 | * This is subtle; if we build more than one sigframe, all but the |
181 | * first one will see frame format 0 and have fsize == 0, so we won't |
182 | * screw stkadj. |
183 | */ |
184 | - if (fsize) |
185 | + if (fsize) { |
186 | regs->stkadj = fsize; |
187 | - |
188 | - /* Prepare to skip over the extra stuff in the exception frame. */ |
189 | - if (regs->stkadj) { |
190 | - struct pt_regs *tregs = |
191 | - (struct pt_regs *)((ulong)regs + regs->stkadj); |
192 | + tregs = rte_regs(regs); |
193 | pr_debug("Performing stackadjust=%04lx\n", regs->stkadj); |
194 | - /* This must be copied with decreasing addresses to |
195 | - handle overlaps. */ |
196 | tregs->vector = 0; |
197 | tregs->format = 0; |
198 | - tregs->pc = regs->pc; |
199 | tregs->sr = regs->sr; |
200 | } |
201 | + |
202 | + /* |
203 | + * Set up registers for signal handler. All the state we are about |
204 | + * to destroy is successfully copied to sigframe. |
205 | + */ |
206 | + wrusp ((unsigned long) frame); |
207 | + tregs->pc = (unsigned long) ksig->ka.sa.sa_handler; |
208 | + adjustformat(regs); |
209 | return 0; |
210 | } |
211 | |
212 | diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c |
213 | index f642e066e67a2..85ee0e849647e 100644 |
214 | --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c |
215 | +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c |
216 | @@ -903,6 +903,8 @@ static int gmc_v10_0_hw_fini(void *handle) |
217 | { |
218 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
219 | |
220 | + gmc_v10_0_gart_disable(adev); |
221 | + |
222 | if (amdgpu_sriov_vf(adev)) { |
223 | /* full access mode, so don't touch any GMC register */ |
224 | DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); |
225 | @@ -910,7 +912,6 @@ static int gmc_v10_0_hw_fini(void *handle) |
226 | } |
227 | |
228 | amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); |
229 | - gmc_v10_0_gart_disable(adev); |
230 | |
231 | return 0; |
232 | } |
233 | diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c |
234 | index 688111ef814de..63205de4a5656 100644 |
235 | --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c |
236 | +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c |
237 | @@ -1526,6 +1526,8 @@ static int gmc_v9_0_hw_fini(void *handle) |
238 | { |
239 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
240 | |
241 | + gmc_v9_0_gart_disable(adev); |
242 | + |
243 | if (amdgpu_sriov_vf(adev)) { |
244 | /* full access mode, so don't touch any GMC register */ |
245 | DRM_DEBUG("For SRIOV client, shouldn't do anything.\n"); |
246 | @@ -1534,7 +1536,6 @@ static int gmc_v9_0_hw_fini(void *handle) |
247 | |
248 | amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0); |
249 | amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); |
250 | - gmc_v9_0_gart_disable(adev); |
251 | |
252 | return 0; |
253 | } |
254 | diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c |
255 | index 6909c045fece1..07df64daf7dae 100644 |
256 | --- a/drivers/hid/hid-apple.c |
257 | +++ b/drivers/hid/hid-apple.c |
258 | @@ -301,12 +301,19 @@ static int apple_event(struct hid_device *hdev, struct hid_field *field, |
259 | |
260 | /* |
261 | * MacBook JIS keyboard has wrong logical maximum |
262 | + * Magic Keyboard JIS has wrong logical maximum |
263 | */ |
264 | static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc, |
265 | unsigned int *rsize) |
266 | { |
267 | struct apple_sc *asc = hid_get_drvdata(hdev); |
268 | |
269 | + if(*rsize >=71 && rdesc[70] == 0x65 && rdesc[64] == 0x65) { |
270 | + hid_info(hdev, |
271 | + "fixing up Magic Keyboard JIS report descriptor\n"); |
272 | + rdesc[64] = rdesc[70] = 0xe7; |
273 | + } |
274 | + |
275 | if ((asc->quirks & APPLE_RDESC_JIS) && *rsize >= 60 && |
276 | rdesc[53] == 0x65 && rdesc[59] == 0x65) { |
277 | hid_info(hdev, |
278 | diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c |
279 | index d5425bc1ad61a..f6be2e70a4967 100644 |
280 | --- a/drivers/hid/wacom_wac.c |
281 | +++ b/drivers/hid/wacom_wac.c |
282 | @@ -4715,6 +4715,12 @@ static const struct wacom_features wacom_features_0x393 = |
283 | { "Wacom Intuos Pro S", 31920, 19950, 8191, 63, |
284 | INTUOSP2S_BT, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 7, |
285 | .touch_max = 10 }; |
286 | +static const struct wacom_features wacom_features_0x3c6 = |
287 | + { "Wacom Intuos BT S", 15200, 9500, 4095, 63, |
288 | + INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 }; |
289 | +static const struct wacom_features wacom_features_0x3c8 = |
290 | + { "Wacom Intuos BT M", 21600, 13500, 4095, 63, |
291 | + INTUOSHT3_BT, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 }; |
292 | |
293 | static const struct wacom_features wacom_features_HID_ANY_ID = |
294 | { "Wacom HID", .type = HID_GENERIC, .oVid = HID_ANY_ID, .oPid = HID_ANY_ID }; |
295 | @@ -4888,6 +4894,8 @@ const struct hid_device_id wacom_ids[] = { |
296 | { USB_DEVICE_WACOM(0x37A) }, |
297 | { USB_DEVICE_WACOM(0x37B) }, |
298 | { BT_DEVICE_WACOM(0x393) }, |
299 | + { BT_DEVICE_WACOM(0x3c6) }, |
300 | + { BT_DEVICE_WACOM(0x3c8) }, |
301 | { USB_DEVICE_WACOM(0x4001) }, |
302 | { USB_DEVICE_WACOM(0x4004) }, |
303 | { USB_DEVICE_WACOM(0x5000) }, |
304 | diff --git a/drivers/net/ethernet/sun/Kconfig b/drivers/net/ethernet/sun/Kconfig |
305 | index 7b982e02ea3a4..1080a2a3e13a2 100644 |
306 | --- a/drivers/net/ethernet/sun/Kconfig |
307 | +++ b/drivers/net/ethernet/sun/Kconfig |
308 | @@ -73,6 +73,7 @@ config CASSINI |
309 | config SUNVNET_COMMON |
310 | tristate "Common routines to support Sun Virtual Networking" |
311 | depends on SUN_LDOMS |
312 | + depends on INET |
313 | default m |
314 | |
315 | config SUNVNET |
316 | diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c |
317 | index af8eabe7a6d44..d372626c603d4 100644 |
318 | --- a/drivers/net/phy/bcm7xxx.c |
319 | +++ b/drivers/net/phy/bcm7xxx.c |
320 | @@ -26,7 +26,12 @@ |
321 | #define MII_BCM7XXX_SHD_2_ADDR_CTRL 0xe |
322 | #define MII_BCM7XXX_SHD_2_CTRL_STAT 0xf |
323 | #define MII_BCM7XXX_SHD_2_BIAS_TRIM 0x1a |
324 | +#define MII_BCM7XXX_SHD_3_PCS_CTRL 0x0 |
325 | +#define MII_BCM7XXX_SHD_3_PCS_STATUS 0x1 |
326 | +#define MII_BCM7XXX_SHD_3_EEE_CAP 0x2 |
327 | #define MII_BCM7XXX_SHD_3_AN_EEE_ADV 0x3 |
328 | +#define MII_BCM7XXX_SHD_3_EEE_LP 0x4 |
329 | +#define MII_BCM7XXX_SHD_3_EEE_WK_ERR 0x5 |
330 | #define MII_BCM7XXX_SHD_3_PCS_CTRL_2 0x6 |
331 | #define MII_BCM7XXX_PCS_CTRL_2_DEF 0x4400 |
332 | #define MII_BCM7XXX_SHD_3_AN_STAT 0xb |
333 | @@ -210,25 +215,37 @@ static int bcm7xxx_28nm_resume(struct phy_device *phydev) |
334 | return genphy_config_aneg(phydev); |
335 | } |
336 | |
337 | -static int phy_set_clr_bits(struct phy_device *dev, int location, |
338 | - int set_mask, int clr_mask) |
339 | +static int __phy_set_clr_bits(struct phy_device *dev, int location, |
340 | + int set_mask, int clr_mask) |
341 | { |
342 | int v, ret; |
343 | |
344 | - v = phy_read(dev, location); |
345 | + v = __phy_read(dev, location); |
346 | if (v < 0) |
347 | return v; |
348 | |
349 | v &= ~clr_mask; |
350 | v |= set_mask; |
351 | |
352 | - ret = phy_write(dev, location, v); |
353 | + ret = __phy_write(dev, location, v); |
354 | if (ret < 0) |
355 | return ret; |
356 | |
357 | return v; |
358 | } |
359 | |
360 | +static int phy_set_clr_bits(struct phy_device *dev, int location, |
361 | + int set_mask, int clr_mask) |
362 | +{ |
363 | + int ret; |
364 | + |
365 | + mutex_lock(&dev->mdio.bus->mdio_lock); |
366 | + ret = __phy_set_clr_bits(dev, location, set_mask, clr_mask); |
367 | + mutex_unlock(&dev->mdio.bus->mdio_lock); |
368 | + |
369 | + return ret; |
370 | +} |
371 | + |
372 | static int bcm7xxx_28nm_ephy_01_afe_config_init(struct phy_device *phydev) |
373 | { |
374 | int ret; |
375 | @@ -392,6 +409,93 @@ static int bcm7xxx_28nm_ephy_config_init(struct phy_device *phydev) |
376 | return bcm7xxx_28nm_ephy_apd_enable(phydev); |
377 | } |
378 | |
379 | +#define MII_BCM7XXX_REG_INVALID 0xff |
380 | + |
381 | +static u8 bcm7xxx_28nm_ephy_regnum_to_shd(u16 regnum) |
382 | +{ |
383 | + switch (regnum) { |
384 | + case MDIO_CTRL1: |
385 | + return MII_BCM7XXX_SHD_3_PCS_CTRL; |
386 | + case MDIO_STAT1: |
387 | + return MII_BCM7XXX_SHD_3_PCS_STATUS; |
388 | + case MDIO_PCS_EEE_ABLE: |
389 | + return MII_BCM7XXX_SHD_3_EEE_CAP; |
390 | + case MDIO_AN_EEE_ADV: |
391 | + return MII_BCM7XXX_SHD_3_AN_EEE_ADV; |
392 | + case MDIO_AN_EEE_LPABLE: |
393 | + return MII_BCM7XXX_SHD_3_EEE_LP; |
394 | + case MDIO_PCS_EEE_WK_ERR: |
395 | + return MII_BCM7XXX_SHD_3_EEE_WK_ERR; |
396 | + default: |
397 | + return MII_BCM7XXX_REG_INVALID; |
398 | + } |
399 | +} |
400 | + |
401 | +static bool bcm7xxx_28nm_ephy_dev_valid(int devnum) |
402 | +{ |
403 | + return devnum == MDIO_MMD_AN || devnum == MDIO_MMD_PCS; |
404 | +} |
405 | + |
406 | +static int bcm7xxx_28nm_ephy_read_mmd(struct phy_device *phydev, |
407 | + int devnum, u16 regnum) |
408 | +{ |
409 | + u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum); |
410 | + int ret; |
411 | + |
412 | + if (!bcm7xxx_28nm_ephy_dev_valid(devnum) || |
413 | + shd == MII_BCM7XXX_REG_INVALID) |
414 | + return -EOPNOTSUPP; |
415 | + |
416 | + /* set shadow mode 2 */ |
417 | + ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, |
418 | + MII_BCM7XXX_SHD_MODE_2, 0); |
419 | + if (ret < 0) |
420 | + return ret; |
421 | + |
422 | + /* Access the desired shadow register address */ |
423 | + ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd); |
424 | + if (ret < 0) |
425 | + goto reset_shadow_mode; |
426 | + |
427 | + ret = __phy_read(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT); |
428 | + |
429 | +reset_shadow_mode: |
430 | + /* reset shadow mode 2 */ |
431 | + __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0, |
432 | + MII_BCM7XXX_SHD_MODE_2); |
433 | + return ret; |
434 | +} |
435 | + |
436 | +static int bcm7xxx_28nm_ephy_write_mmd(struct phy_device *phydev, |
437 | + int devnum, u16 regnum, u16 val) |
438 | +{ |
439 | + u8 shd = bcm7xxx_28nm_ephy_regnum_to_shd(regnum); |
440 | + int ret; |
441 | + |
442 | + if (!bcm7xxx_28nm_ephy_dev_valid(devnum) || |
443 | + shd == MII_BCM7XXX_REG_INVALID) |
444 | + return -EOPNOTSUPP; |
445 | + |
446 | + /* set shadow mode 2 */ |
447 | + ret = __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, |
448 | + MII_BCM7XXX_SHD_MODE_2, 0); |
449 | + if (ret < 0) |
450 | + return ret; |
451 | + |
452 | + /* Access the desired shadow register address */ |
453 | + ret = __phy_write(phydev, MII_BCM7XXX_SHD_2_ADDR_CTRL, shd); |
454 | + if (ret < 0) |
455 | + goto reset_shadow_mode; |
456 | + |
457 | + /* Write the desired value in the shadow register */ |
458 | + __phy_write(phydev, MII_BCM7XXX_SHD_2_CTRL_STAT, val); |
459 | + |
460 | +reset_shadow_mode: |
461 | + /* reset shadow mode 2 */ |
462 | + return __phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0, |
463 | + MII_BCM7XXX_SHD_MODE_2); |
464 | +} |
465 | + |
466 | static int bcm7xxx_28nm_ephy_resume(struct phy_device *phydev) |
467 | { |
468 | int ret; |
469 | @@ -563,6 +667,8 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev) |
470 | .get_strings = bcm_phy_get_strings, \ |
471 | .get_stats = bcm7xxx_28nm_get_phy_stats, \ |
472 | .probe = bcm7xxx_28nm_probe, \ |
473 | + .read_mmd = bcm7xxx_28nm_ephy_read_mmd, \ |
474 | + .write_mmd = bcm7xxx_28nm_ephy_write_mmd, \ |
475 | } |
476 | |
477 | #define BCM7XXX_40NM_EPHY(_oui, _name) \ |
478 | diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c |
479 | index 43e682297fd5f..0a1734f34587d 100644 |
480 | --- a/drivers/scsi/ses.c |
481 | +++ b/drivers/scsi/ses.c |
482 | @@ -118,7 +118,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code, |
483 | static int ses_send_diag(struct scsi_device *sdev, int page_code, |
484 | void *buf, int bufflen) |
485 | { |
486 | - u32 result; |
487 | + int result; |
488 | |
489 | unsigned char cmd[] = { |
490 | SEND_DIAGNOSTIC, |
491 | diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c |
492 | index bfec84aacd90b..cb833c5fb9ce2 100644 |
493 | --- a/drivers/scsi/virtio_scsi.c |
494 | +++ b/drivers/scsi/virtio_scsi.c |
495 | @@ -297,7 +297,7 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi, |
496 | } |
497 | break; |
498 | default: |
499 | - pr_info("Unsupport virtio scsi event reason %x\n", event->reason); |
500 | + pr_info("Unsupported virtio scsi event reason %x\n", event->reason); |
501 | } |
502 | } |
503 | |
504 | @@ -381,7 +381,7 @@ static void virtscsi_handle_event(struct work_struct *work) |
505 | virtscsi_handle_param_change(vscsi, event); |
506 | break; |
507 | default: |
508 | - pr_err("Unsupport virtio scsi event %x\n", event->event); |
509 | + pr_err("Unsupported virtio scsi event %x\n", event->event); |
510 | } |
511 | virtscsi_kick_event(vscsi, event_node); |
512 | } |
513 | diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c |
514 | index 46151bda62368..cdb10e9fded65 100644 |
515 | --- a/fs/ext4/inline.c |
516 | +++ b/fs/ext4/inline.c |
517 | @@ -733,18 +733,13 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, |
518 | void *kaddr; |
519 | struct ext4_iloc iloc; |
520 | |
521 | - if (unlikely(copied < len)) { |
522 | - if (!PageUptodate(page)) { |
523 | - copied = 0; |
524 | - goto out; |
525 | - } |
526 | - } |
527 | + if (unlikely(copied < len) && !PageUptodate(page)) |
528 | + return 0; |
529 | |
530 | ret = ext4_get_inode_loc(inode, &iloc); |
531 | if (ret) { |
532 | ext4_std_error(inode->i_sb, ret); |
533 | - copied = 0; |
534 | - goto out; |
535 | + return ret; |
536 | } |
537 | |
538 | ext4_write_lock_xattr(inode, &no_expand); |
539 | @@ -757,7 +752,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, |
540 | (void) ext4_find_inline_data_nolock(inode); |
541 | |
542 | kaddr = kmap_atomic(page); |
543 | - ext4_write_inline_data(inode, &iloc, kaddr, pos, len); |
544 | + ext4_write_inline_data(inode, &iloc, kaddr, pos, copied); |
545 | kunmap_atomic(kaddr); |
546 | SetPageUptodate(page); |
547 | /* clear page dirty so that writepages wouldn't work for us. */ |
548 | @@ -766,7 +761,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len, |
549 | ext4_write_unlock_xattr(inode, &no_expand); |
550 | brelse(iloc.bh); |
551 | mark_inode_dirty(inode); |
552 | -out: |
553 | + |
554 | return copied; |
555 | } |
556 | |
557 | diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c |
558 | index 48b467353f6f1..dcbd8ac8d4711 100644 |
559 | --- a/fs/ext4/inode.c |
560 | +++ b/fs/ext4/inode.c |
561 | @@ -1439,6 +1439,7 @@ static int ext4_write_end(struct file *file, |
562 | goto errout; |
563 | } |
564 | copied = ret; |
565 | + ret = 0; |
566 | } else |
567 | copied = block_write_end(file, mapping, pos, |
568 | len, copied, page, fsdata); |
569 | @@ -1465,13 +1466,14 @@ static int ext4_write_end(struct file *file, |
570 | if (i_size_changed || inline_data) |
571 | ext4_mark_inode_dirty(handle, inode); |
572 | |
573 | +errout: |
574 | if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode)) |
575 | /* if we have allocated more blocks and copied |
576 | * less. We will have blocks allocated outside |
577 | * inode->i_size. So truncate them |
578 | */ |
579 | ext4_orphan_add(handle, inode); |
580 | -errout: |
581 | + |
582 | ret2 = ext4_journal_stop(handle); |
583 | if (!ret) |
584 | ret = ret2; |
585 | @@ -1554,6 +1556,7 @@ static int ext4_journalled_write_end(struct file *file, |
586 | goto errout; |
587 | } |
588 | copied = ret; |
589 | + ret = 0; |
590 | } else if (unlikely(copied < len) && !PageUptodate(page)) { |
591 | copied = 0; |
592 | ext4_journalled_zero_new_buffers(handle, page, from, to); |
593 | @@ -1583,6 +1586,7 @@ static int ext4_journalled_write_end(struct file *file, |
594 | ret = ret2; |
595 | } |
596 | |
597 | +errout: |
598 | if (pos + len > inode->i_size && !verity && ext4_can_truncate(inode)) |
599 | /* if we have allocated more blocks and copied |
600 | * less. We will have blocks allocated outside |
601 | @@ -1590,7 +1594,6 @@ static int ext4_journalled_write_end(struct file *file, |
602 | */ |
603 | ext4_orphan_add(handle, inode); |
604 | |
605 | -errout: |
606 | ret2 = ext4_journal_stop(handle); |
607 | if (!ret) |
608 | ret = ret2; |
609 | diff --git a/include/linux/sched.h b/include/linux/sched.h |
610 | index 5710b80f8050a..afee5d5eb9458 100644 |
611 | --- a/include/linux/sched.h |
612 | +++ b/include/linux/sched.h |
613 | @@ -1500,7 +1500,7 @@ extern struct pid *cad_pid; |
614 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) |
615 | #define used_math() tsk_used_math(current) |
616 | |
617 | -static inline bool is_percpu_thread(void) |
618 | +static __always_inline bool is_percpu_thread(void) |
619 | { |
620 | #ifdef CONFIG_SMP |
621 | return (current->flags & PF_NO_SETAFFINITY) && |
622 | diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h |
623 | index b16f9236de147..d1585b54fb0bd 100644 |
624 | --- a/include/net/pkt_sched.h |
625 | +++ b/include/net/pkt_sched.h |
626 | @@ -11,6 +11,7 @@ |
627 | #include <uapi/linux/pkt_sched.h> |
628 | |
629 | #define DEFAULT_TX_QUEUE_LEN 1000 |
630 | +#define STAB_SIZE_LOG_MAX 30 |
631 | |
632 | struct qdisc_walker { |
633 | int stop; |
634 | diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c |
635 | index 8bb543b0e775e..41268612bdd4e 100644 |
636 | --- a/net/ipv6/netfilter/ip6_tables.c |
637 | +++ b/net/ipv6/netfilter/ip6_tables.c |
638 | @@ -273,6 +273,7 @@ ip6t_do_table(struct sk_buff *skb, |
639 | * things we don't know, ie. tcp syn flag or ports). If the |
640 | * rule is also a fragment-specific rule, non-fragments won't |
641 | * match it. */ |
642 | + acpar.fragoff = 0; |
643 | acpar.hotdrop = false; |
644 | acpar.state = state; |
645 | |
646 | diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c |
647 | index 1708b64d41094..d7ae7415d54d0 100644 |
648 | --- a/net/mac80211/mesh_pathtbl.c |
649 | +++ b/net/mac80211/mesh_pathtbl.c |
650 | @@ -60,7 +60,10 @@ static struct mesh_table *mesh_table_alloc(void) |
651 | atomic_set(&newtbl->entries, 0); |
652 | spin_lock_init(&newtbl->gates_lock); |
653 | spin_lock_init(&newtbl->walk_lock); |
654 | - rhashtable_init(&newtbl->rhead, &mesh_rht_params); |
655 | + if (rhashtable_init(&newtbl->rhead, &mesh_rht_params)) { |
656 | + kfree(newtbl); |
657 | + return NULL; |
658 | + } |
659 | |
660 | return newtbl; |
661 | } |
662 | diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c |
663 | index 670d84e54db73..c7e6bf7c22c78 100644 |
664 | --- a/net/mac80211/rx.c |
665 | +++ b/net/mac80211/rx.c |
666 | @@ -3952,7 +3952,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) |
667 | if (!bssid) |
668 | return false; |
669 | if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || |
670 | - ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2)) |
671 | + ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) || |
672 | + !is_valid_ether_addr(hdr->addr2)) |
673 | return false; |
674 | if (ieee80211_is_beacon(hdr->frame_control)) |
675 | return true; |
676 | diff --git a/net/netfilter/nf_nat_masquerade.c b/net/netfilter/nf_nat_masquerade.c |
677 | index 8e8a65d46345b..acd73f717a088 100644 |
678 | --- a/net/netfilter/nf_nat_masquerade.c |
679 | +++ b/net/netfilter/nf_nat_masquerade.c |
680 | @@ -9,8 +9,19 @@ |
681 | |
682 | #include <net/netfilter/nf_nat_masquerade.h> |
683 | |
684 | +struct masq_dev_work { |
685 | + struct work_struct work; |
686 | + struct net *net; |
687 | + union nf_inet_addr addr; |
688 | + int ifindex; |
689 | + int (*iter)(struct nf_conn *i, void *data); |
690 | +}; |
691 | + |
692 | +#define MAX_MASQ_WORKER_COUNT 16 |
693 | + |
694 | static DEFINE_MUTEX(masq_mutex); |
695 | static unsigned int masq_refcnt __read_mostly; |
696 | +static atomic_t masq_worker_count __read_mostly; |
697 | |
698 | unsigned int |
699 | nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum, |
700 | @@ -63,13 +74,71 @@ nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum, |
701 | } |
702 | EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv4); |
703 | |
704 | -static int device_cmp(struct nf_conn *i, void *ifindex) |
705 | +static void iterate_cleanup_work(struct work_struct *work) |
706 | +{ |
707 | + struct masq_dev_work *w; |
708 | + |
709 | + w = container_of(work, struct masq_dev_work, work); |
710 | + |
711 | + nf_ct_iterate_cleanup_net(w->net, w->iter, (void *)w, 0, 0); |
712 | + |
713 | + put_net(w->net); |
714 | + kfree(w); |
715 | + atomic_dec(&masq_worker_count); |
716 | + module_put(THIS_MODULE); |
717 | +} |
718 | + |
719 | +/* Iterate conntrack table in the background and remove conntrack entries |
720 | + * that use the device/address being removed. |
721 | + * |
722 | + * In case too many work items have been queued already or memory allocation |
723 | + * fails iteration is skipped, conntrack entries will time out eventually. |
724 | + */ |
725 | +static void nf_nat_masq_schedule(struct net *net, union nf_inet_addr *addr, |
726 | + int ifindex, |
727 | + int (*iter)(struct nf_conn *i, void *data), |
728 | + gfp_t gfp_flags) |
729 | +{ |
730 | + struct masq_dev_work *w; |
731 | + |
732 | + if (atomic_read(&masq_worker_count) > MAX_MASQ_WORKER_COUNT) |
733 | + return; |
734 | + |
735 | + net = maybe_get_net(net); |
736 | + if (!net) |
737 | + return; |
738 | + |
739 | + if (!try_module_get(THIS_MODULE)) |
740 | + goto err_module; |
741 | + |
742 | + w = kzalloc(sizeof(*w), gfp_flags); |
743 | + if (w) { |
744 | + /* We can overshoot MAX_MASQ_WORKER_COUNT, no big deal */ |
745 | + atomic_inc(&masq_worker_count); |
746 | + |
747 | + INIT_WORK(&w->work, iterate_cleanup_work); |
748 | + w->ifindex = ifindex; |
749 | + w->net = net; |
750 | + w->iter = iter; |
751 | + if (addr) |
752 | + w->addr = *addr; |
753 | + schedule_work(&w->work); |
754 | + return; |
755 | + } |
756 | + |
757 | + module_put(THIS_MODULE); |
758 | + err_module: |
759 | + put_net(net); |
760 | +} |
761 | + |
762 | +static int device_cmp(struct nf_conn *i, void *arg) |
763 | { |
764 | const struct nf_conn_nat *nat = nfct_nat(i); |
765 | + const struct masq_dev_work *w = arg; |
766 | |
767 | if (!nat) |
768 | return 0; |
769 | - return nat->masq_index == (int)(long)ifindex; |
770 | + return nat->masq_index == w->ifindex; |
771 | } |
772 | |
773 | static int masq_device_event(struct notifier_block *this, |
774 | @@ -85,8 +154,8 @@ static int masq_device_event(struct notifier_block *this, |
775 | * and forget them. |
776 | */ |
777 | |
778 | - nf_ct_iterate_cleanup_net(net, device_cmp, |
779 | - (void *)(long)dev->ifindex, 0, 0); |
780 | + nf_nat_masq_schedule(net, NULL, dev->ifindex, |
781 | + device_cmp, GFP_KERNEL); |
782 | } |
783 | |
784 | return NOTIFY_DONE; |
785 | @@ -94,35 +163,45 @@ static int masq_device_event(struct notifier_block *this, |
786 | |
787 | static int inet_cmp(struct nf_conn *ct, void *ptr) |
788 | { |
789 | - struct in_ifaddr *ifa = (struct in_ifaddr *)ptr; |
790 | - struct net_device *dev = ifa->ifa_dev->dev; |
791 | struct nf_conntrack_tuple *tuple; |
792 | + struct masq_dev_work *w = ptr; |
793 | |
794 | - if (!device_cmp(ct, (void *)(long)dev->ifindex)) |
795 | + if (!device_cmp(ct, ptr)) |
796 | return 0; |
797 | |
798 | tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; |
799 | |
800 | - return ifa->ifa_address == tuple->dst.u3.ip; |
801 | + return nf_inet_addr_cmp(&w->addr, &tuple->dst.u3); |
802 | } |
803 | |
804 | static int masq_inet_event(struct notifier_block *this, |
805 | unsigned long event, |
806 | void *ptr) |
807 | { |
808 | - struct in_device *idev = ((struct in_ifaddr *)ptr)->ifa_dev; |
809 | - struct net *net = dev_net(idev->dev); |
810 | + const struct in_ifaddr *ifa = ptr; |
811 | + const struct in_device *idev; |
812 | + const struct net_device *dev; |
813 | + union nf_inet_addr addr; |
814 | + |
815 | + if (event != NETDEV_DOWN) |
816 | + return NOTIFY_DONE; |
817 | |
818 | /* The masq_dev_notifier will catch the case of the device going |
819 | * down. So if the inetdev is dead and being destroyed we have |
820 | * no work to do. Otherwise this is an individual address removal |
821 | * and we have to perform the flush. |
822 | */ |
823 | + idev = ifa->ifa_dev; |
824 | if (idev->dead) |
825 | return NOTIFY_DONE; |
826 | |
827 | - if (event == NETDEV_DOWN) |
828 | - nf_ct_iterate_cleanup_net(net, inet_cmp, ptr, 0, 0); |
829 | + memset(&addr, 0, sizeof(addr)); |
830 | + |
831 | + addr.ip = ifa->ifa_address; |
832 | + |
833 | + dev = idev->dev; |
834 | + nf_nat_masq_schedule(dev_net(idev->dev), &addr, dev->ifindex, |
835 | + inet_cmp, GFP_KERNEL); |
836 | |
837 | return NOTIFY_DONE; |
838 | } |
839 | @@ -136,8 +215,6 @@ static struct notifier_block masq_inet_notifier = { |
840 | }; |
841 | |
842 | #if IS_ENABLED(CONFIG_IPV6) |
843 | -static atomic_t v6_worker_count __read_mostly; |
844 | - |
845 | static int |
846 | nat_ipv6_dev_get_saddr(struct net *net, const struct net_device *dev, |
847 | const struct in6_addr *daddr, unsigned int srcprefs, |
848 | @@ -187,40 +264,6 @@ nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range2 *range, |
849 | } |
850 | EXPORT_SYMBOL_GPL(nf_nat_masquerade_ipv6); |
851 | |
852 | -struct masq_dev_work { |
853 | - struct work_struct work; |
854 | - struct net *net; |
855 | - struct in6_addr addr; |
856 | - int ifindex; |
857 | -}; |
858 | - |
859 | -static int inet6_cmp(struct nf_conn *ct, void *work) |
860 | -{ |
861 | - struct masq_dev_work *w = (struct masq_dev_work *)work; |
862 | - struct nf_conntrack_tuple *tuple; |
863 | - |
864 | - if (!device_cmp(ct, (void *)(long)w->ifindex)) |
865 | - return 0; |
866 | - |
867 | - tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; |
868 | - |
869 | - return ipv6_addr_equal(&w->addr, &tuple->dst.u3.in6); |
870 | -} |
871 | - |
872 | -static void iterate_cleanup_work(struct work_struct *work) |
873 | -{ |
874 | - struct masq_dev_work *w; |
875 | - |
876 | - w = container_of(work, struct masq_dev_work, work); |
877 | - |
878 | - nf_ct_iterate_cleanup_net(w->net, inet6_cmp, (void *)w, 0, 0); |
879 | - |
880 | - put_net(w->net); |
881 | - kfree(w); |
882 | - atomic_dec(&v6_worker_count); |
883 | - module_put(THIS_MODULE); |
884 | -} |
885 | - |
886 | /* atomic notifier; can't call nf_ct_iterate_cleanup_net (it can sleep). |
887 | * |
888 | * Defer it to the system workqueue. |
889 | @@ -233,36 +276,19 @@ static int masq_inet6_event(struct notifier_block *this, |
890 | { |
891 | struct inet6_ifaddr *ifa = ptr; |
892 | const struct net_device *dev; |
893 | - struct masq_dev_work *w; |
894 | - struct net *net; |
895 | + union nf_inet_addr addr; |
896 | |
897 | - if (event != NETDEV_DOWN || atomic_read(&v6_worker_count) >= 16) |
898 | + if (event != NETDEV_DOWN) |
899 | return NOTIFY_DONE; |
900 | |
901 | dev = ifa->idev->dev; |
902 | - net = maybe_get_net(dev_net(dev)); |
903 | - if (!net) |
904 | - return NOTIFY_DONE; |
905 | |
906 | - if (!try_module_get(THIS_MODULE)) |
907 | - goto err_module; |
908 | + memset(&addr, 0, sizeof(addr)); |
909 | |
910 | - w = kmalloc(sizeof(*w), GFP_ATOMIC); |
911 | - if (w) { |
912 | - atomic_inc(&v6_worker_count); |
913 | - |
914 | - INIT_WORK(&w->work, iterate_cleanup_work); |
915 | - w->ifindex = dev->ifindex; |
916 | - w->net = net; |
917 | - w->addr = ifa->addr; |
918 | - schedule_work(&w->work); |
919 | + addr.in6 = ifa->addr; |
920 | |
921 | - return NOTIFY_DONE; |
922 | - } |
923 | - |
924 | - module_put(THIS_MODULE); |
925 | - err_module: |
926 | - put_net(net); |
927 | + nf_nat_masq_schedule(dev_net(dev), &addr, dev->ifindex, inet_cmp, |
928 | + GFP_ATOMIC); |
929 | return NOTIFY_DONE; |
930 | } |
931 | |
932 | diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c |
933 | index 3b1b5ee521379..e70f990334083 100644 |
934 | --- a/net/sched/sch_api.c |
935 | +++ b/net/sched/sch_api.c |
936 | @@ -510,6 +510,12 @@ static struct qdisc_size_table *qdisc_get_stab(struct nlattr *opt, |
937 | return stab; |
938 | } |
939 | |
940 | + if (s->size_log > STAB_SIZE_LOG_MAX || |
941 | + s->cell_log > STAB_SIZE_LOG_MAX) { |
942 | + NL_SET_ERR_MSG(extack, "Invalid logarithmic size of size table"); |
943 | + return ERR_PTR(-EINVAL); |
944 | + } |
945 | + |
946 | stab = kmalloc(sizeof(*stab) + tsize * sizeof(u16), GFP_KERNEL); |
947 | if (!stab) |
948 | return ERR_PTR(-ENOMEM); |