Annotation of /trunk/kernel-alx-legacy/patches-4.9/0386-4.9.287-all-fixes.patch
Parent Directory | Revision Log
Revision 3688 -
(hide annotations)
(download)
Mon Oct 24 14:08:03 2022 UTC (18 months, 2 weeks ago) by niro
File size: 19039 byte(s)
Mon Oct 24 14:08:03 2022 UTC (18 months, 2 weeks ago) by niro
File size: 19039 byte(s)
-linux-4.9.287
1 | niro | 3688 | diff --git a/Makefile b/Makefile |
2 | index 68f2c6f3869e2..76eff0f592346 100644 | ||
3 | --- a/Makefile | ||
4 | +++ b/Makefile | ||
5 | @@ -1,6 +1,6 @@ | ||
6 | VERSION = 4 | ||
7 | PATCHLEVEL = 9 | ||
8 | -SUBLEVEL = 286 | ||
9 | +SUBLEVEL = 287 | ||
10 | EXTRAVERSION = | ||
11 | NAME = Roaring Lionus | ||
12 | |||
13 | diff --git a/arch/arm/boot/dts/omap3430-sdp.dts b/arch/arm/boot/dts/omap3430-sdp.dts | ||
14 | index abd6921143beb..e0ba97bd41b79 100644 | ||
15 | --- a/arch/arm/boot/dts/omap3430-sdp.dts | ||
16 | +++ b/arch/arm/boot/dts/omap3430-sdp.dts | ||
17 | @@ -104,7 +104,7 @@ | ||
18 | |||
19 | nand@1,0 { | ||
20 | compatible = "ti,omap2-nand"; | ||
21 | - reg = <0 0 4>; /* CS0, offset 0, IO size 4 */ | ||
22 | + reg = <1 0 4>; /* CS1, offset 0, IO size 4 */ | ||
23 | interrupt-parent = <&gpmc>; | ||
24 | interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */ | ||
25 | <1 IRQ_TYPE_NONE>; /* termcount */ | ||
26 | diff --git a/arch/arm/mach-imx/pm-imx6.c b/arch/arm/mach-imx/pm-imx6.c | ||
27 | index 6da26692f2fde..950c9f2ffe005 100644 | ||
28 | --- a/arch/arm/mach-imx/pm-imx6.c | ||
29 | +++ b/arch/arm/mach-imx/pm-imx6.c | ||
30 | @@ -15,6 +15,7 @@ | ||
31 | #include <linux/io.h> | ||
32 | #include <linux/irq.h> | ||
33 | #include <linux/genalloc.h> | ||
34 | +#include <linux/irqchip/arm-gic.h> | ||
35 | #include <linux/mfd/syscon.h> | ||
36 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> | ||
37 | #include <linux/of.h> | ||
38 | @@ -606,6 +607,7 @@ static void __init imx6_pm_common_init(const struct imx6_pm_socdata | ||
39 | |||
40 | static void imx6_pm_stby_poweroff(void) | ||
41 | { | ||
42 | + gic_cpu_if_down(0); | ||
43 | imx6_set_lpm(STOP_POWER_OFF); | ||
44 | imx6q_suspend_finish(0); | ||
45 | |||
46 | diff --git a/arch/powerpc/boot/dts/fsl/t1023rdb.dts b/arch/powerpc/boot/dts/fsl/t1023rdb.dts | ||
47 | index 29757623e5baf..f5f8f969dd586 100644 | ||
48 | --- a/arch/powerpc/boot/dts/fsl/t1023rdb.dts | ||
49 | +++ b/arch/powerpc/boot/dts/fsl/t1023rdb.dts | ||
50 | @@ -125,7 +125,7 @@ | ||
51 | |||
52 | fm1mac3: ethernet@e4000 { | ||
53 | phy-handle = <&sgmii_aqr_phy3>; | ||
54 | - phy-connection-type = "sgmii-2500"; | ||
55 | + phy-connection-type = "2500base-x"; | ||
56 | sleep = <&rcpm 0x20000000>; | ||
57 | }; | ||
58 | |||
59 | diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c | ||
60 | index c26cca506f646..c20df6a3540c2 100644 | ||
61 | --- a/arch/x86/events/core.c | ||
62 | +++ b/arch/x86/events/core.c | ||
63 | @@ -2075,6 +2075,7 @@ static int x86_pmu_event_init(struct perf_event *event) | ||
64 | if (err) { | ||
65 | if (event->destroy) | ||
66 | event->destroy(event); | ||
67 | + event->destroy = NULL; | ||
68 | } | ||
69 | |||
70 | if (ACCESS_ONCE(x86_pmu.attr_rdpmc)) | ||
71 | diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c | ||
72 | index 441694464b1e4..fbbc24b914e30 100644 | ||
73 | --- a/arch/xtensa/kernel/irq.c | ||
74 | +++ b/arch/xtensa/kernel/irq.c | ||
75 | @@ -144,7 +144,7 @@ unsigned xtensa_get_ext_irq_no(unsigned irq) | ||
76 | |||
77 | void __init init_IRQ(void) | ||
78 | { | ||
79 | -#ifdef CONFIG_OF | ||
80 | +#ifdef CONFIG_USE_OF | ||
81 | irqchip_init(); | ||
82 | #else | ||
83 | #ifdef CONFIG_HAVE_SMP | ||
84 | diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c | ||
85 | index 411c12cdb2499..bb516eb124213 100644 | ||
86 | --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c | ||
87 | +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c | ||
88 | @@ -178,6 +178,7 @@ static const struct file_operations nouveau_pstate_fops = { | ||
89 | .open = nouveau_debugfs_pstate_open, | ||
90 | .read = seq_read, | ||
91 | .write = nouveau_debugfs_pstate_set, | ||
92 | + .release = single_release, | ||
93 | }; | ||
94 | |||
95 | static struct drm_info_list nouveau_debugfs_list[] = { | ||
96 | diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c | ||
97 | index 959a9e38b4f54..149902619cbc8 100644 | ||
98 | --- a/drivers/hid/hid-apple.c | ||
99 | +++ b/drivers/hid/hid-apple.c | ||
100 | @@ -302,12 +302,19 @@ static int apple_event(struct hid_device *hdev, struct hid_field *field, | ||
101 | |||
102 | /* | ||
103 | * MacBook JIS keyboard has wrong logical maximum | ||
104 | + * Magic Keyboard JIS has wrong logical maximum | ||
105 | */ | ||
106 | static __u8 *apple_report_fixup(struct hid_device *hdev, __u8 *rdesc, | ||
107 | unsigned int *rsize) | ||
108 | { | ||
109 | struct apple_sc *asc = hid_get_drvdata(hdev); | ||
110 | |||
111 | + if(*rsize >=71 && rdesc[70] == 0x65 && rdesc[64] == 0x65) { | ||
112 | + hid_info(hdev, | ||
113 | + "fixing up Magic Keyboard JIS report descriptor\n"); | ||
114 | + rdesc[64] = rdesc[70] = 0xe7; | ||
115 | + } | ||
116 | + | ||
117 | if ((asc->quirks & APPLE_RDESC_JIS) && *rsize >= 60 && | ||
118 | rdesc[53] == 0x65 && rdesc[59] == 0x65) { | ||
119 | hid_info(hdev, | ||
120 | diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c | ||
121 | index 832fffed4a1fa..e7585f6c4665b 100644 | ||
122 | --- a/drivers/net/ethernet/intel/i40e/i40e_main.c | ||
123 | +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | ||
124 | @@ -6646,7 +6646,7 @@ static int i40e_get_capabilities(struct i40e_pf *pf) | ||
125 | if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { | ||
126 | /* retry with a larger buffer */ | ||
127 | buf_len = data_size; | ||
128 | - } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { | ||
129 | + } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK || err) { | ||
130 | dev_info(&pf->pdev->dev, | ||
131 | "capability discovery failed, err %s aq_err %s\n", | ||
132 | i40e_stat_str(&pf->hw, err), | ||
133 | diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c | ||
134 | index 8cc7563ab103b..92fb664b56fbb 100644 | ||
135 | --- a/drivers/net/phy/mdio_bus.c | ||
136 | +++ b/drivers/net/phy/mdio_bus.c | ||
137 | @@ -316,6 +316,13 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) | ||
138 | bus->dev.groups = NULL; | ||
139 | dev_set_name(&bus->dev, "%s", bus->id); | ||
140 | |||
141 | + /* We need to set state to MDIOBUS_UNREGISTERED to correctly release | ||
142 | + * the device in mdiobus_free() | ||
143 | + * | ||
144 | + * State will be updated later in this function in case of success | ||
145 | + */ | ||
146 | + bus->state = MDIOBUS_UNREGISTERED; | ||
147 | + | ||
148 | err = device_register(&bus->dev); | ||
149 | if (err) { | ||
150 | pr_err("mii_bus %s failed to register\n", bus->id); | ||
151 | diff --git a/drivers/ptp/ptp_pch.c b/drivers/ptp/ptp_pch.c | ||
152 | index 3aa22ae4d94c0..a911325fc0b4f 100644 | ||
153 | --- a/drivers/ptp/ptp_pch.c | ||
154 | +++ b/drivers/ptp/ptp_pch.c | ||
155 | @@ -698,6 +698,7 @@ static const struct pci_device_id pch_ieee1588_pcidev_id[] = { | ||
156 | }, | ||
157 | {0} | ||
158 | }; | ||
159 | +MODULE_DEVICE_TABLE(pci, pch_ieee1588_pcidev_id); | ||
160 | |||
161 | static struct pci_driver pch_driver = { | ||
162 | .name = KBUILD_MODNAME, | ||
163 | diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c | ||
164 | index 69046d342bc5d..39396548f9b55 100644 | ||
165 | --- a/drivers/scsi/ses.c | ||
166 | +++ b/drivers/scsi/ses.c | ||
167 | @@ -120,7 +120,7 @@ static int ses_recv_diag(struct scsi_device *sdev, int page_code, | ||
168 | static int ses_send_diag(struct scsi_device *sdev, int page_code, | ||
169 | void *buf, int bufflen) | ||
170 | { | ||
171 | - u32 result; | ||
172 | + int result; | ||
173 | |||
174 | unsigned char cmd[] = { | ||
175 | SEND_DIAGNOSTIC, | ||
176 | diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c | ||
177 | index 7ba0031d3a738..d5575869a25c7 100644 | ||
178 | --- a/drivers/scsi/virtio_scsi.c | ||
179 | +++ b/drivers/scsi/virtio_scsi.c | ||
180 | @@ -343,7 +343,7 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi, | ||
181 | } | ||
182 | break; | ||
183 | default: | ||
184 | - pr_info("Unsupport virtio scsi event reason %x\n", event->reason); | ||
185 | + pr_info("Unsupported virtio scsi event reason %x\n", event->reason); | ||
186 | } | ||
187 | } | ||
188 | |||
189 | @@ -396,7 +396,7 @@ static void virtscsi_handle_event(struct work_struct *work) | ||
190 | virtscsi_handle_param_change(vscsi, event); | ||
191 | break; | ||
192 | default: | ||
193 | - pr_err("Unsupport virtio scsi event %x\n", event->event); | ||
194 | + pr_err("Unsupported virtio scsi event %x\n", event->event); | ||
195 | } | ||
196 | virtscsi_kick_event(vscsi, event_node); | ||
197 | } | ||
198 | diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig | ||
199 | index 0103f777b97ad..bff5a15e59c00 100644 | ||
200 | --- a/drivers/usb/Kconfig | ||
201 | +++ b/drivers/usb/Kconfig | ||
202 | @@ -160,8 +160,7 @@ source "drivers/usb/gadget/Kconfig" | ||
203 | |||
204 | config USB_LED_TRIG | ||
205 | bool "USB LED Triggers" | ||
206 | - depends on LEDS_CLASS && LEDS_TRIGGERS | ||
207 | - select USB_COMMON | ||
208 | + depends on LEDS_CLASS && USB_COMMON && LEDS_TRIGGERS | ||
209 | help | ||
210 | This option adds LED triggers for USB host and/or gadget activity. | ||
211 | |||
212 | diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c | ||
213 | index 23df1549eb0d8..b7b83ca83ba0d 100644 | ||
214 | --- a/drivers/usb/class/cdc-acm.c | ||
215 | +++ b/drivers/usb/class/cdc-acm.c | ||
216 | @@ -349,6 +349,9 @@ static void acm_ctrl_irq(struct urb *urb) | ||
217 | acm->iocount.overrun++; | ||
218 | spin_unlock(&acm->read_lock); | ||
219 | |||
220 | + if (newctrl & ACM_CTRL_BRK) | ||
221 | + tty_flip_buffer_push(&acm->port); | ||
222 | + | ||
223 | if (difference) | ||
224 | wake_up_all(&acm->wioctl); | ||
225 | |||
226 | @@ -408,11 +411,16 @@ static int acm_submit_read_urbs(struct acm *acm, gfp_t mem_flags) | ||
227 | |||
228 | static void acm_process_read_urb(struct acm *acm, struct urb *urb) | ||
229 | { | ||
230 | + unsigned long flags; | ||
231 | + | ||
232 | if (!urb->actual_length) | ||
233 | return; | ||
234 | |||
235 | + spin_lock_irqsave(&acm->read_lock, flags); | ||
236 | tty_insert_flip_string(&acm->port, urb->transfer_buffer, | ||
237 | urb->actual_length); | ||
238 | + spin_unlock_irqrestore(&acm->read_lock, flags); | ||
239 | + | ||
240 | tty_flip_buffer_push(&acm->port); | ||
241 | } | ||
242 | |||
243 | diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c | ||
244 | index b16a6c0363527..dc51011d670df 100644 | ||
245 | --- a/fs/nfsd/nfs4xdr.c | ||
246 | +++ b/fs/nfsd/nfs4xdr.c | ||
247 | @@ -3028,15 +3028,18 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen, | ||
248 | goto fail; | ||
249 | cd->rd_maxcount -= entry_bytes; | ||
250 | /* | ||
251 | - * RFC 3530 14.2.24 describes rd_dircount as only a "hint", so | ||
252 | - * let's always let through the first entry, at least: | ||
253 | + * RFC 3530 14.2.24 describes rd_dircount as only a "hint", and | ||
254 | + * notes that it could be zero. If it is zero, then the server | ||
255 | + * should enforce only the rd_maxcount value. | ||
256 | */ | ||
257 | - if (!cd->rd_dircount) | ||
258 | - goto fail; | ||
259 | - name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8; | ||
260 | - if (name_and_cookie > cd->rd_dircount && cd->cookie_offset) | ||
261 | - goto fail; | ||
262 | - cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie); | ||
263 | + if (cd->rd_dircount) { | ||
264 | + name_and_cookie = 4 + 4 * XDR_QUADLEN(namlen) + 8; | ||
265 | + if (name_and_cookie > cd->rd_dircount && cd->cookie_offset) | ||
266 | + goto fail; | ||
267 | + cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie); | ||
268 | + if (!cd->rd_dircount) | ||
269 | + cd->rd_maxcount = 0; | ||
270 | + } | ||
271 | |||
272 | cd->cookie_offset = cookie_offset; | ||
273 | skip_entry: | ||
274 | diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c | ||
275 | index 8546384a5fdfd..edd5979aca4f6 100644 | ||
276 | --- a/fs/overlayfs/dir.c | ||
277 | +++ b/fs/overlayfs/dir.c | ||
278 | @@ -926,9 +926,13 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old, | ||
279 | goto out_dput; | ||
280 | } | ||
281 | } else { | ||
282 | - if (!d_is_negative(newdentry) && | ||
283 | - (!new_opaque || !ovl_is_whiteout(newdentry))) | ||
284 | - goto out_dput; | ||
285 | + if (!d_is_negative(newdentry)) { | ||
286 | + if (!new_opaque || !ovl_is_whiteout(newdentry)) | ||
287 | + goto out_dput; | ||
288 | + } else { | ||
289 | + if (flags & RENAME_EXCHANGE) | ||
290 | + goto out_dput; | ||
291 | + } | ||
292 | } | ||
293 | |||
294 | if (olddentry == trap) | ||
295 | diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c | ||
296 | index 2fdf6f96f9762..6f09728cd1dd3 100644 | ||
297 | --- a/kernel/bpf/stackmap.c | ||
298 | +++ b/kernel/bpf/stackmap.c | ||
299 | @@ -28,7 +28,8 @@ struct bpf_stack_map { | ||
300 | |||
301 | static int prealloc_elems_and_freelist(struct bpf_stack_map *smap) | ||
302 | { | ||
303 | - u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size; | ||
304 | + u64 elem_size = sizeof(struct stack_map_bucket) + | ||
305 | + (u64)smap->map.value_size; | ||
306 | int err; | ||
307 | |||
308 | smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries); | ||
309 | diff --git a/mm/gup.c b/mm/gup.c | ||
310 | index 6bb7a8eb7f820..301dd96ef176c 100644 | ||
311 | --- a/mm/gup.c | ||
312 | +++ b/mm/gup.c | ||
313 | @@ -61,13 +61,22 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, | ||
314 | } | ||
315 | |||
316 | /* | ||
317 | - * FOLL_FORCE can write to even unwritable pte's, but only | ||
318 | - * after we've gone through a COW cycle and they are dirty. | ||
319 | + * FOLL_FORCE or a forced COW break can write even to unwritable pte's, | ||
320 | + * but only after we've gone through a COW cycle and they are dirty. | ||
321 | */ | ||
322 | static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) | ||
323 | { | ||
324 | - return pte_write(pte) || | ||
325 | - ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); | ||
326 | + return pte_write(pte) || ((flags & FOLL_COW) && pte_dirty(pte)); | ||
327 | +} | ||
328 | + | ||
329 | +/* | ||
330 | + * A (separate) COW fault might break the page the other way and | ||
331 | + * get_user_pages() would return the page from what is now the wrong | ||
332 | + * VM. So we need to force a COW break at GUP time even for reads. | ||
333 | + */ | ||
334 | +static inline bool should_force_cow_break(struct vm_area_struct *vma, unsigned int flags) | ||
335 | +{ | ||
336 | + return is_cow_mapping(vma->vm_flags) && (flags & FOLL_GET); | ||
337 | } | ||
338 | |||
339 | static struct page *follow_page_pte(struct vm_area_struct *vma, | ||
340 | @@ -577,12 +586,18 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | ||
341 | if (!vma || check_vma_flags(vma, gup_flags)) | ||
342 | return i ? : -EFAULT; | ||
343 | if (is_vm_hugetlb_page(vma)) { | ||
344 | + if (should_force_cow_break(vma, foll_flags)) | ||
345 | + foll_flags |= FOLL_WRITE; | ||
346 | i = follow_hugetlb_page(mm, vma, pages, vmas, | ||
347 | &start, &nr_pages, i, | ||
348 | - gup_flags); | ||
349 | + foll_flags); | ||
350 | continue; | ||
351 | } | ||
352 | } | ||
353 | + | ||
354 | + if (should_force_cow_break(vma, foll_flags)) | ||
355 | + foll_flags |= FOLL_WRITE; | ||
356 | + | ||
357 | retry: | ||
358 | /* | ||
359 | * If we have a pending SIGKILL, don't keep faulting pages and | ||
360 | @@ -1503,6 +1518,10 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end, | ||
361 | /* | ||
362 | * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to | ||
363 | * the regular GUP. It will only return non-negative values. | ||
364 | + * | ||
365 | + * Careful, careful! COW breaking can go either way, so a non-write | ||
366 | + * access can get ambiguous page results. If you call this function without | ||
367 | + * 'write' set, you'd better be sure that you're ok with that ambiguity. | ||
368 | */ | ||
369 | int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | ||
370 | struct page **pages) | ||
371 | @@ -1532,6 +1551,12 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | ||
372 | * | ||
373 | * We do not adopt an rcu_read_lock(.) here as we also want to | ||
374 | * block IPIs that come from THPs splitting. | ||
375 | + * | ||
376 | + * NOTE! We allow read-only gup_fast() here, but you'd better be | ||
377 | + * careful about possible COW pages. You'll get _a_ COW page, but | ||
378 | + * not necessarily the one you intended to get depending on what | ||
379 | + * COW event happens after this. COW may break the page copy in a | ||
380 | + * random direction. | ||
381 | */ | ||
382 | |||
383 | local_irq_save(flags); | ||
384 | @@ -1542,15 +1567,22 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, | ||
385 | next = pgd_addr_end(addr, end); | ||
386 | if (pgd_none(pgd)) | ||
387 | break; | ||
388 | + /* | ||
389 | + * The FAST_GUP case requires FOLL_WRITE even for pure reads, | ||
390 | + * because get_user_pages() may need to cause an early COW in | ||
391 | + * order to avoid confusing the normal COW routines. So only | ||
392 | + * targets that are already writable are safe to do by just | ||
393 | + * looking at the page tables. | ||
394 | + */ | ||
395 | if (unlikely(pgd_huge(pgd))) { | ||
396 | - if (!gup_huge_pgd(pgd, pgdp, addr, next, write, | ||
397 | + if (!gup_huge_pgd(pgd, pgdp, addr, next, 1, | ||
398 | pages, &nr)) | ||
399 | break; | ||
400 | } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) { | ||
401 | if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, | ||
402 | - PGDIR_SHIFT, next, write, pages, &nr)) | ||
403 | + PGDIR_SHIFT, next, 1, pages, &nr)) | ||
404 | break; | ||
405 | - } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr)) | ||
406 | + } else if (!gup_pud_range(pgd, addr, next, 1, pages, &nr)) | ||
407 | break; | ||
408 | } while (pgdp++, addr = next, addr != end); | ||
409 | local_irq_restore(flags); | ||
410 | diff --git a/mm/huge_memory.c b/mm/huge_memory.c | ||
411 | index 91f33bb43f178..3f3a86cc62b68 100644 | ||
412 | --- a/mm/huge_memory.c | ||
413 | +++ b/mm/huge_memory.c | ||
414 | @@ -1135,13 +1135,12 @@ out_unlock: | ||
415 | } | ||
416 | |||
417 | /* | ||
418 | - * FOLL_FORCE can write to even unwritable pmd's, but only | ||
419 | - * after we've gone through a COW cycle and they are dirty. | ||
420 | + * FOLL_FORCE or a forced COW break can write even to unwritable pmd's, | ||
421 | + * but only after we've gone through a COW cycle and they are dirty. | ||
422 | */ | ||
423 | static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) | ||
424 | { | ||
425 | - return pmd_write(pmd) || | ||
426 | - ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); | ||
427 | + return pmd_write(pmd) || ((flags & FOLL_COW) && pmd_dirty(pmd)); | ||
428 | } | ||
429 | |||
430 | struct page *follow_trans_huge_pmd(struct vm_area_struct *vma, | ||
431 | diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c | ||
432 | index 4f831225d34f4..ca8757090ae35 100644 | ||
433 | --- a/net/bridge/br_netlink.c | ||
434 | +++ b/net/bridge/br_netlink.c | ||
435 | @@ -1298,7 +1298,7 @@ static size_t br_get_linkxstats_size(const struct net_device *dev, int attr) | ||
436 | } | ||
437 | |||
438 | return numvls * nla_total_size(sizeof(struct bridge_vlan_xstats)) + | ||
439 | - nla_total_size(sizeof(struct br_mcast_stats)) + | ||
440 | + nla_total_size_64bit(sizeof(struct br_mcast_stats)) + | ||
441 | nla_total_size(0); | ||
442 | } | ||
443 | |||
444 | diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c | ||
445 | index 911752e8a3e64..012143f313a87 100644 | ||
446 | --- a/net/core/rtnetlink.c | ||
447 | +++ b/net/core/rtnetlink.c | ||
448 | @@ -3900,7 +3900,7 @@ nla_put_failure: | ||
449 | static size_t if_nlmsg_stats_size(const struct net_device *dev, | ||
450 | u32 filter_mask) | ||
451 | { | ||
452 | - size_t size = 0; | ||
453 | + size_t size = NLMSG_ALIGN(sizeof(struct if_stats_msg)); | ||
454 | |||
455 | if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_64, 0)) | ||
456 | size += nla_total_size_64bit(sizeof(struct rtnl_link_stats64)); | ||
457 | diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c | ||
458 | index 579fda1bc45df..ce54e66b47a03 100644 | ||
459 | --- a/net/ipv6/netfilter/ip6_tables.c | ||
460 | +++ b/net/ipv6/netfilter/ip6_tables.c | ||
461 | @@ -290,6 +290,7 @@ ip6t_do_table(struct sk_buff *skb, | ||
462 | * things we don't know, ie. tcp syn flag or ports). If the | ||
463 | * rule is also a fragment-specific rule, non-fragments won't | ||
464 | * match it. */ | ||
465 | + acpar.fragoff = 0; | ||
466 | acpar.hotdrop = false; | ||
467 | acpar.net = state->net; | ||
468 | acpar.in = state->in; | ||
469 | diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c | ||
470 | index b40e71a5d7957..3dc370ad23bf6 100644 | ||
471 | --- a/net/mac80211/rx.c | ||
472 | +++ b/net/mac80211/rx.c | ||
473 | @@ -3692,7 +3692,8 @@ static bool ieee80211_accept_frame(struct ieee80211_rx_data *rx) | ||
474 | if (!bssid) | ||
475 | return false; | ||
476 | if (ether_addr_equal(sdata->vif.addr, hdr->addr2) || | ||
477 | - ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2)) | ||
478 | + ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2) || | ||
479 | + !is_valid_ether_addr(hdr->addr2)) | ||
480 | return false; | ||
481 | if (ieee80211_is_beacon(hdr->frame_control)) | ||
482 | return true; | ||
483 | diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c | ||
484 | index 453b0efdc0d71..1b70de5898c42 100644 | ||
485 | --- a/net/netlink/af_netlink.c | ||
486 | +++ b/net/netlink/af_netlink.c | ||
487 | @@ -574,7 +574,10 @@ static int netlink_insert(struct sock *sk, u32 portid) | ||
488 | |||
489 | /* We need to ensure that the socket is hashed and visible. */ | ||
490 | smp_wmb(); | ||
491 | - nlk_sk(sk)->bound = portid; | ||
492 | + /* Paired with lockless reads from netlink_bind(), | ||
493 | + * netlink_connect() and netlink_sendmsg(). | ||
494 | + */ | ||
495 | + WRITE_ONCE(nlk_sk(sk)->bound, portid); | ||
496 | |||
497 | err: | ||
498 | release_sock(sk); | ||
499 | @@ -993,7 +996,8 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, | ||
500 | else if (nlk->ngroups < 8*sizeof(groups)) | ||
501 | groups &= (1UL << nlk->ngroups) - 1; | ||
502 | |||
503 | - bound = nlk->bound; | ||
504 | + /* Paired with WRITE_ONCE() in netlink_insert() */ | ||
505 | + bound = READ_ONCE(nlk->bound); | ||
506 | if (bound) { | ||
507 | /* Ensure nlk->portid is up-to-date. */ | ||
508 | smp_rmb(); | ||
509 | @@ -1073,8 +1077,9 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, | ||
510 | |||
511 | /* No need for barriers here as we return to user-space without | ||
512 | * using any of the bound attributes. | ||
513 | + * Paired with WRITE_ONCE() in netlink_insert(). | ||
514 | */ | ||
515 | - if (!nlk->bound) | ||
516 | + if (!READ_ONCE(nlk->bound)) | ||
517 | err = netlink_autobind(sock); | ||
518 | |||
519 | if (err == 0) { | ||
520 | @@ -1821,7 +1826,8 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) | ||
521 | dst_group = nlk->dst_group; | ||
522 | } | ||
523 | |||
524 | - if (!nlk->bound) { | ||
525 | + /* Paired with WRITE_ONCE() in netlink_insert() */ | ||
526 | + if (!READ_ONCE(nlk->bound)) { | ||
527 | err = netlink_autobind(sock); | ||
528 | if (err) | ||
529 | goto out; | ||
530 | diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c | ||
531 | index 1e37247656f80..8b7110cbcce4c 100644 | ||
532 | --- a/net/sched/sch_fifo.c | ||
533 | +++ b/net/sched/sch_fifo.c | ||
534 | @@ -151,6 +151,9 @@ int fifo_set_limit(struct Qdisc *q, unsigned int limit) | ||
535 | if (strncmp(q->ops->id + 1, "fifo", 4) != 0) | ||
536 | return 0; | ||
537 | |||
538 | + if (!q->ops->change) | ||
539 | + return 0; | ||
540 | + | ||
541 | nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL); | ||
542 | if (nla) { | ||
543 | nla->nla_type = RTM_NEWQDISC; |