Magellan Linux

Contents of /trunk/kernel-alx-legacy/patches-4.9/0377-4.9.278-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3679 - (show annotations) (download)
Mon Oct 24 14:07:56 2022 UTC (18 months, 1 week ago) by niro
File size: 50412 byte(s)
-linux-4.9.278
1 diff --git a/Makefile b/Makefile
2 index 560a7e2b5efc2..82fc1c7475926 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 277
9 +SUBLEVEL = 278
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 diff --git a/arch/arm/boot/dts/versatile-ab.dts b/arch/arm/boot/dts/versatile-ab.dts
14 index 00d7d28e86f0b..4633b79bf5ea6 100644
15 --- a/arch/arm/boot/dts/versatile-ab.dts
16 +++ b/arch/arm/boot/dts/versatile-ab.dts
17 @@ -154,16 +154,15 @@
18 #size-cells = <1>;
19 ranges;
20
21 - vic: intc@10140000 {
22 + vic: interrupt-controller@10140000 {
23 compatible = "arm,versatile-vic";
24 interrupt-controller;
25 #interrupt-cells = <1>;
26 reg = <0x10140000 0x1000>;
27 - clear-mask = <0xffffffff>;
28 valid-mask = <0xffffffff>;
29 };
30
31 - sic: intc@10003000 {
32 + sic: interrupt-controller@10003000 {
33 compatible = "arm,versatile-sic";
34 interrupt-controller;
35 #interrupt-cells = <1>;
36 diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
37 index 33a8eb28374ea..3a23164c2c2d4 100644
38 --- a/arch/arm/boot/dts/versatile-pb.dts
39 +++ b/arch/arm/boot/dts/versatile-pb.dts
40 @@ -6,7 +6,7 @@
41
42 amba {
43 /* The Versatile PB is using more SIC IRQ lines than the AB */
44 - sic: intc@10003000 {
45 + sic: interrupt-controller@10003000 {
46 clear-mask = <0xffffffff>;
47 /*
48 * Valid interrupt lines mask according to
49 diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
50 index 0a066f03b5ec9..180c1782ad63d 100644
51 --- a/arch/arm/kernel/signal.c
52 +++ b/arch/arm/kernel/signal.c
53 @@ -625,18 +625,20 @@ struct page *get_signal_page(void)
54
55 addr = page_address(page);
56
57 + /* Poison the entire page */
58 + memset32(addr, __opcode_to_mem_arm(0xe7fddef1),
59 + PAGE_SIZE / sizeof(u32));
60 +
61 /* Give the signal return code some randomness */
62 offset = 0x200 + (get_random_int() & 0x7fc);
63 signal_return_offset = offset;
64
65 - /*
66 - * Copy signal return handlers into the vector page, and
67 - * set sigreturn to be a pointer to these.
68 - */
69 + /* Copy signal return handlers into the page */
70 memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
71
72 - ptr = (unsigned long)addr + offset;
73 - flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
74 + /* Flush out all instructions in this page */
75 + ptr = (unsigned long)addr;
76 + flush_icache_range(ptr, ptr + PAGE_SIZE);
77
78 return page;
79 }
80 diff --git a/arch/x86/include/asm/proto.h b/arch/x86/include/asm/proto.h
81 index 9b9b30b194418..36a7a3f11839a 100644
82 --- a/arch/x86/include/asm/proto.h
83 +++ b/arch/x86/include/asm/proto.h
84 @@ -3,6 +3,8 @@
85
86 #include <asm/ldt.h>
87
88 +struct task_struct;
89 +
90 /* misc architecture specific prototypes */
91
92 void syscall_init(void);
93 diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
94 index aa34b16e62c24..a069d0dd3ded4 100644
95 --- a/arch/x86/kvm/ioapic.c
96 +++ b/arch/x86/kvm/ioapic.c
97 @@ -96,7 +96,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
98 static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
99 {
100 ioapic->rtc_status.pending_eoi = 0;
101 - bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
102 + bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID + 1);
103 }
104
105 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
106 diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
107 index 1cc6e54436dba..2f3df43489f22 100644
108 --- a/arch/x86/kvm/ioapic.h
109 +++ b/arch/x86/kvm/ioapic.h
110 @@ -42,13 +42,13 @@ struct kvm_vcpu;
111
112 struct dest_map {
113 /* vcpu bitmap where IRQ has been sent */
114 - DECLARE_BITMAP(map, KVM_MAX_VCPU_ID);
115 + DECLARE_BITMAP(map, KVM_MAX_VCPU_ID + 1);
116
117 /*
118 * Vector sent to a given vcpu, only valid when
119 * the vcpu's bit in map is set
120 */
121 - u8 vectors[KVM_MAX_VCPU_ID];
122 + u8 vectors[KVM_MAX_VCPU_ID + 1];
123 };
124
125
126 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
127 index 8377bd388d673..14e9b06829d54 100644
128 --- a/drivers/iommu/amd_iommu.c
129 +++ b/drivers/iommu/amd_iommu.c
130 @@ -1333,7 +1333,7 @@ static void increase_address_space(struct protection_domain *domain,
131
132 pte = (void *)get_zeroed_page(gfp);
133 if (!pte)
134 - goto out;
135 + return;
136
137 spin_lock_irqsave(&domain->lock, flags);
138
139 diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
140 index 022a9b3c7d4e8..d62d61d734ea1 100644
141 --- a/drivers/net/can/usb/ems_usb.c
142 +++ b/drivers/net/can/usb/ems_usb.c
143 @@ -267,6 +267,8 @@ struct ems_usb {
144 unsigned int free_slots; /* remember number of available slots */
145
146 struct ems_cpc_msg active_params; /* active controller parameters */
147 + void *rxbuf[MAX_RX_URBS];
148 + dma_addr_t rxbuf_dma[MAX_RX_URBS];
149 };
150
151 static void ems_usb_read_interrupt_callback(struct urb *urb)
152 @@ -598,6 +600,7 @@ static int ems_usb_start(struct ems_usb *dev)
153 for (i = 0; i < MAX_RX_URBS; i++) {
154 struct urb *urb = NULL;
155 u8 *buf = NULL;
156 + dma_addr_t buf_dma;
157
158 /* create a URB, and a buffer for it */
159 urb = usb_alloc_urb(0, GFP_KERNEL);
160 @@ -607,7 +610,7 @@ static int ems_usb_start(struct ems_usb *dev)
161 }
162
163 buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
164 - &urb->transfer_dma);
165 + &buf_dma);
166 if (!buf) {
167 netdev_err(netdev, "No memory left for USB buffer\n");
168 usb_free_urb(urb);
169 @@ -615,6 +618,8 @@ static int ems_usb_start(struct ems_usb *dev)
170 break;
171 }
172
173 + urb->transfer_dma = buf_dma;
174 +
175 usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2),
176 buf, RX_BUFFER_SIZE,
177 ems_usb_read_bulk_callback, dev);
178 @@ -630,6 +635,9 @@ static int ems_usb_start(struct ems_usb *dev)
179 break;
180 }
181
182 + dev->rxbuf[i] = buf;
183 + dev->rxbuf_dma[i] = buf_dma;
184 +
185 /* Drop reference, USB core will take care of freeing it */
186 usb_free_urb(urb);
187 }
188 @@ -695,6 +703,10 @@ static void unlink_all_urbs(struct ems_usb *dev)
189
190 usb_kill_anchored_urbs(&dev->rx_submitted);
191
192 + for (i = 0; i < MAX_RX_URBS; ++i)
193 + usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
194 + dev->rxbuf[i], dev->rxbuf_dma[i]);
195 +
196 usb_kill_anchored_urbs(&dev->tx_submitted);
197 atomic_set(&dev->active_tx_urbs, 0);
198
199 diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
200 index c6dcf93675c00..592c6e7f3dca4 100644
201 --- a/drivers/net/can/usb/esd_usb2.c
202 +++ b/drivers/net/can/usb/esd_usb2.c
203 @@ -207,6 +207,8 @@ struct esd_usb2 {
204 int net_count;
205 u32 version;
206 int rxinitdone;
207 + void *rxbuf[MAX_RX_URBS];
208 + dma_addr_t rxbuf_dma[MAX_RX_URBS];
209 };
210
211 struct esd_usb2_net_priv {
212 @@ -556,6 +558,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
213 for (i = 0; i < MAX_RX_URBS; i++) {
214 struct urb *urb = NULL;
215 u8 *buf = NULL;
216 + dma_addr_t buf_dma;
217
218 /* create a URB, and a buffer for it */
219 urb = usb_alloc_urb(0, GFP_KERNEL);
220 @@ -565,7 +568,7 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
221 }
222
223 buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL,
224 - &urb->transfer_dma);
225 + &buf_dma);
226 if (!buf) {
227 dev_warn(dev->udev->dev.parent,
228 "No memory left for USB buffer\n");
229 @@ -573,6 +576,8 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
230 goto freeurb;
231 }
232
233 + urb->transfer_dma = buf_dma;
234 +
235 usb_fill_bulk_urb(urb, dev->udev,
236 usb_rcvbulkpipe(dev->udev, 1),
237 buf, RX_BUFFER_SIZE,
238 @@ -585,8 +590,12 @@ static int esd_usb2_setup_rx_urbs(struct esd_usb2 *dev)
239 usb_unanchor_urb(urb);
240 usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
241 urb->transfer_dma);
242 + goto freeurb;
243 }
244
245 + dev->rxbuf[i] = buf;
246 + dev->rxbuf_dma[i] = buf_dma;
247 +
248 freeurb:
249 /* Drop reference, USB core will take care of freeing it */
250 usb_free_urb(urb);
251 @@ -674,6 +683,11 @@ static void unlink_all_urbs(struct esd_usb2 *dev)
252 int i, j;
253
254 usb_kill_anchored_urbs(&dev->rx_submitted);
255 +
256 + for (i = 0; i < MAX_RX_URBS; ++i)
257 + usb_free_coherent(dev->udev, RX_BUFFER_SIZE,
258 + dev->rxbuf[i], dev->rxbuf_dma[i]);
259 +
260 for (i = 0; i < dev->net_count; i++) {
261 priv = dev->nets[i];
262 if (priv) {
263 diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
264 index 3e44164736079..df99354ec12aa 100644
265 --- a/drivers/net/can/usb/usb_8dev.c
266 +++ b/drivers/net/can/usb/usb_8dev.c
267 @@ -148,7 +148,8 @@ struct usb_8dev_priv {
268 u8 *cmd_msg_buffer;
269
270 struct mutex usb_8dev_cmd_lock;
271 -
272 + void *rxbuf[MAX_RX_URBS];
273 + dma_addr_t rxbuf_dma[MAX_RX_URBS];
274 };
275
276 /* tx frame */
277 @@ -744,6 +745,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
278 for (i = 0; i < MAX_RX_URBS; i++) {
279 struct urb *urb = NULL;
280 u8 *buf;
281 + dma_addr_t buf_dma;
282
283 /* create a URB, and a buffer for it */
284 urb = usb_alloc_urb(0, GFP_KERNEL);
285 @@ -753,7 +755,7 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
286 }
287
288 buf = usb_alloc_coherent(priv->udev, RX_BUFFER_SIZE, GFP_KERNEL,
289 - &urb->transfer_dma);
290 + &buf_dma);
291 if (!buf) {
292 netdev_err(netdev, "No memory left for USB buffer\n");
293 usb_free_urb(urb);
294 @@ -761,6 +763,8 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
295 break;
296 }
297
298 + urb->transfer_dma = buf_dma;
299 +
300 usb_fill_bulk_urb(urb, priv->udev,
301 usb_rcvbulkpipe(priv->udev,
302 USB_8DEV_ENDP_DATA_RX),
303 @@ -778,6 +782,9 @@ static int usb_8dev_start(struct usb_8dev_priv *priv)
304 break;
305 }
306
307 + priv->rxbuf[i] = buf;
308 + priv->rxbuf_dma[i] = buf_dma;
309 +
310 /* Drop reference, USB core will take care of freeing it */
311 usb_free_urb(urb);
312 }
313 @@ -847,6 +854,10 @@ static void unlink_all_urbs(struct usb_8dev_priv *priv)
314
315 usb_kill_anchored_urbs(&priv->rx_submitted);
316
317 + for (i = 0; i < MAX_RX_URBS; ++i)
318 + usb_free_coherent(priv->udev, RX_BUFFER_SIZE,
319 + priv->rxbuf[i], priv->rxbuf_dma[i]);
320 +
321 usb_kill_anchored_urbs(&priv->tx_submitted);
322 atomic_set(&priv->active_tx_urbs, 0);
323
324 diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
325 index 1f62b94238510..31dfb695ededa 100644
326 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c
327 +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
328 @@ -368,7 +368,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
329 int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
330 void __iomem *ioaddr;
331
332 - i = pci_enable_device(pdev);
333 + i = pcim_enable_device(pdev);
334 if (i) return i;
335
336 pci_set_master(pdev);
337 @@ -390,7 +390,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
338
339 ioaddr = pci_iomap(pdev, TULIP_BAR, netdev_res_size);
340 if (!ioaddr)
341 - goto err_out_free_res;
342 + goto err_out_netdev;
343
344 for (i = 0; i < 3; i++)
345 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
346 @@ -469,8 +469,6 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
347
348 err_out_cleardev:
349 pci_iounmap(pdev, ioaddr);
350 -err_out_free_res:
351 - pci_release_regions(pdev);
352 err_out_netdev:
353 free_netdev (dev);
354 return -ENODEV;
355 @@ -1537,7 +1535,6 @@ static void w840_remove1(struct pci_dev *pdev)
356 if (dev) {
357 struct netdev_private *np = netdev_priv(dev);
358 unregister_netdev(dev);
359 - pci_release_regions(pdev);
360 pci_iounmap(pdev, np->base_addr);
361 free_netdev(dev);
362 }
363 diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
364 index 9b6a96074df80..78b04f2713440 100644
365 --- a/drivers/net/ethernet/mellanox/mlx4/main.c
366 +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
367 @@ -3445,6 +3445,7 @@ slave_start:
368
369 if (!SRIOV_VALID_STATE(dev->flags)) {
370 mlx4_err(dev, "Invalid SRIOV state\n");
371 + err = -EINVAL;
372 goto err_close;
373 }
374 }
375 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
376 index 13dfc197bdd8d..6c092dc41c82e 100644
377 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
378 +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
379 @@ -701,17 +701,19 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev,
380 static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft,
381 struct fs_prio *prio)
382 {
383 - struct mlx5_flow_table *next_ft;
384 + struct mlx5_flow_table *next_ft, *first_ft;
385 int err = 0;
386
387 /* Connect_prev_fts and update_root_ft_create are mutually exclusive */
388
389 - if (list_empty(&prio->node.children)) {
390 + first_ft = list_first_entry_or_null(&prio->node.children,
391 + struct mlx5_flow_table, node.list);
392 + if (!first_ft || first_ft->level > ft->level) {
393 err = connect_prev_fts(dev, ft, prio);
394 if (err)
395 return err;
396
397 - next_ft = find_next_chained_ft(prio);
398 + next_ft = first_ft ? first_ft : find_next_chained_ft(prio);
399 err = connect_fwd_rules(dev, ft, next_ft);
400 if (err)
401 return err;
402 @@ -1357,7 +1359,7 @@ static int disconnect_flow_table(struct mlx5_flow_table *ft)
403 node.list) == ft))
404 return 0;
405
406 - next_ft = find_next_chained_ft(prio);
407 + next_ft = find_next_ft(ft);
408 err = connect_fwd_rules(dev, next_ft, ft);
409 if (err)
410 return err;
411 diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
412 index ae9b983e8e5c9..4a0e69a2d4f58 100644
413 --- a/drivers/net/ethernet/sis/sis900.c
414 +++ b/drivers/net/ethernet/sis/sis900.c
415 @@ -442,7 +442,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
416 #endif
417
418 /* setup various bits in PCI command register */
419 - ret = pci_enable_device(pci_dev);
420 + ret = pcim_enable_device(pci_dev);
421 if(ret) return ret;
422
423 i = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
424 @@ -468,7 +468,7 @@ static int sis900_probe(struct pci_dev *pci_dev,
425 ioaddr = pci_iomap(pci_dev, 0, 0);
426 if (!ioaddr) {
427 ret = -ENOMEM;
428 - goto err_out_cleardev;
429 + goto err_out;
430 }
431
432 sis_priv = netdev_priv(net_dev);
433 @@ -576,8 +576,6 @@ err_unmap_tx:
434 sis_priv->tx_ring_dma);
435 err_out_unmap:
436 pci_iounmap(pci_dev, ioaddr);
437 -err_out_cleardev:
438 - pci_release_regions(pci_dev);
439 err_out:
440 free_netdev(net_dev);
441 return ret;
442 @@ -2425,7 +2423,6 @@ static void sis900_remove(struct pci_dev *pci_dev)
443 sis_priv->tx_ring_dma);
444 pci_iounmap(pci_dev, sis_priv->ioaddr);
445 free_netdev(net_dev);
446 - pci_release_regions(pci_dev);
447 }
448
449 #ifdef CONFIG_PM
450 diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
451 index 306d5d08141ee..f3aed26656a39 100644
452 --- a/drivers/net/ethernet/sun/niu.c
453 +++ b/drivers/net/ethernet/sun/niu.c
454 @@ -8213,8 +8213,9 @@ static int niu_pci_vpd_fetch(struct niu *np, u32 start)
455 err = niu_pci_vpd_scan_props(np, here, end);
456 if (err < 0)
457 return err;
458 + /* ret == 1 is not an error */
459 if (err == 1)
460 - return -EINVAL;
461 + return 0;
462 }
463 return 0;
464 }
465 diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c
466 index a466e79784668..11aa6a04ff466 100644
467 --- a/drivers/nfc/nfcsim.c
468 +++ b/drivers/nfc/nfcsim.c
469 @@ -201,8 +201,7 @@ static void nfcsim_recv_wq(struct work_struct *work)
470
471 if (!IS_ERR(skb))
472 dev_kfree_skb(skb);
473 -
474 - skb = ERR_PTR(-ENODEV);
475 + return;
476 }
477
478 dev->cb(dev->nfc_digital_dev, dev->arg, skb);
479 diff --git a/fs/hfs/bfind.c b/fs/hfs/bfind.c
480 index de69d8a24f6d7..7f2ef95dcd055 100644
481 --- a/fs/hfs/bfind.c
482 +++ b/fs/hfs/bfind.c
483 @@ -24,7 +24,19 @@ int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd)
484 fd->key = ptr + tree->max_key_len + 2;
485 hfs_dbg(BNODE_REFS, "find_init: %d (%p)\n",
486 tree->cnid, __builtin_return_address(0));
487 - mutex_lock(&tree->tree_lock);
488 + switch (tree->cnid) {
489 + case HFS_CAT_CNID:
490 + mutex_lock_nested(&tree->tree_lock, CATALOG_BTREE_MUTEX);
491 + break;
492 + case HFS_EXT_CNID:
493 + mutex_lock_nested(&tree->tree_lock, EXTENTS_BTREE_MUTEX);
494 + break;
495 + case HFS_ATTR_CNID:
496 + mutex_lock_nested(&tree->tree_lock, ATTR_BTREE_MUTEX);
497 + break;
498 + default:
499 + return -EINVAL;
500 + }
501 return 0;
502 }
503
504 diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
505 index d77d844b668b1..1ff979c9d0a36 100644
506 --- a/fs/hfs/bnode.c
507 +++ b/fs/hfs/bnode.c
508 @@ -14,16 +14,31 @@
509
510 #include "btree.h"
511
512 -void hfs_bnode_read(struct hfs_bnode *node, void *buf,
513 - int off, int len)
514 +void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
515 {
516 struct page *page;
517 + int pagenum;
518 + int bytes_read;
519 + int bytes_to_read;
520 + void *vaddr;
521
522 off += node->page_offset;
523 - page = node->page[0];
524 + pagenum = off >> PAGE_SHIFT;
525 + off &= ~PAGE_MASK; /* compute page offset for the first page */
526
527 - memcpy(buf, kmap(page) + off, len);
528 - kunmap(page);
529 + for (bytes_read = 0; bytes_read < len; bytes_read += bytes_to_read) {
530 + if (pagenum >= node->tree->pages_per_bnode)
531 + break;
532 + page = node->page[pagenum];
533 + bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
534 +
535 + vaddr = kmap_atomic(page);
536 + memcpy(buf + bytes_read, vaddr + off, bytes_to_read);
537 + kunmap_atomic(vaddr);
538 +
539 + pagenum++;
540 + off = 0; /* page offset only applies to the first page */
541 + }
542 }
543
544 u16 hfs_bnode_read_u16(struct hfs_bnode *node, int off)
545 diff --git a/fs/hfs/btree.h b/fs/hfs/btree.h
546 index 2715f416b5a80..308b5f1af65ba 100644
547 --- a/fs/hfs/btree.h
548 +++ b/fs/hfs/btree.h
549 @@ -12,6 +12,13 @@ typedef int (*btree_keycmp)(const btree_key *, const btree_key *);
550
551 #define NODE_HASH_SIZE 256
552
553 +/* B-tree mutex nested subclasses */
554 +enum hfs_btree_mutex_classes {
555 + CATALOG_BTREE_MUTEX,
556 + EXTENTS_BTREE_MUTEX,
557 + ATTR_BTREE_MUTEX,
558 +};
559 +
560 /* A HFS BTree held in memory */
561 struct hfs_btree {
562 struct super_block *sb;
563 diff --git a/fs/hfs/super.c b/fs/hfs/super.c
564 index bf6304a350a6b..c2a5a0ca39486 100644
565 --- a/fs/hfs/super.c
566 +++ b/fs/hfs/super.c
567 @@ -427,14 +427,12 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
568 if (!res) {
569 if (fd.entrylength > sizeof(rec) || fd.entrylength < 0) {
570 res = -EIO;
571 - goto bail;
572 + goto bail_hfs_find;
573 }
574 hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, fd.entrylength);
575 }
576 - if (res) {
577 - hfs_find_exit(&fd);
578 - goto bail_no_root;
579 - }
580 + if (res)
581 + goto bail_hfs_find;
582 res = -EINVAL;
583 root_inode = hfs_iget(sb, &fd.search_key->cat, &rec);
584 hfs_find_exit(&fd);
585 @@ -450,6 +448,8 @@ static int hfs_fill_super(struct super_block *sb, void *data, int silent)
586 /* everything's okay */
587 return 0;
588
589 +bail_hfs_find:
590 + hfs_find_exit(&fd);
591 bail_no_root:
592 pr_err("get root inode failed\n");
593 bail:
594 diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
595 index 3aa4441f5ab50..d526e86cf5bbe 100644
596 --- a/fs/ocfs2/file.c
597 +++ b/fs/ocfs2/file.c
598 @@ -1511,6 +1511,45 @@ static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
599 }
600 }
601
602 +/*
603 + * zero out partial blocks of one cluster.
604 + *
605 + * start: file offset where zero starts, will be made upper block aligned.
606 + * len: it will be trimmed to the end of current cluster if "start + len"
607 + * is bigger than it.
608 + */
609 +static int ocfs2_zeroout_partial_cluster(struct inode *inode,
610 + u64 start, u64 len)
611 +{
612 + int ret;
613 + u64 start_block, end_block, nr_blocks;
614 + u64 p_block, offset;
615 + u32 cluster, p_cluster, nr_clusters;
616 + struct super_block *sb = inode->i_sb;
617 + u64 end = ocfs2_align_bytes_to_clusters(sb, start);
618 +
619 + if (start + len < end)
620 + end = start + len;
621 +
622 + start_block = ocfs2_blocks_for_bytes(sb, start);
623 + end_block = ocfs2_blocks_for_bytes(sb, end);
624 + nr_blocks = end_block - start_block;
625 + if (!nr_blocks)
626 + return 0;
627 +
628 + cluster = ocfs2_bytes_to_clusters(sb, start);
629 + ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
630 + &nr_clusters, NULL);
631 + if (ret)
632 + return ret;
633 + if (!p_cluster)
634 + return 0;
635 +
636 + offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
637 + p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
638 + return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
639 +}
640 +
641 static int ocfs2_zero_partial_clusters(struct inode *inode,
642 u64 start, u64 len)
643 {
644 @@ -1520,6 +1559,7 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
645 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
646 unsigned int csize = osb->s_clustersize;
647 handle_t *handle;
648 + loff_t isize = i_size_read(inode);
649
650 /*
651 * The "start" and "end" values are NOT necessarily part of
652 @@ -1540,6 +1580,26 @@ static int ocfs2_zero_partial_clusters(struct inode *inode,
653 if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
654 goto out;
655
656 + /* No page cache for EOF blocks, issue zero out to disk. */
657 + if (end > isize) {
658 + /*
659 + * zeroout eof blocks in last cluster starting from
660 + * "isize" even "start" > "isize" because it is
661 + * complicated to zeroout just at "start" as "start"
662 + * may be not aligned with block size, buffer write
663 + * would be required to do that, but out of eof buffer
664 + * write is not supported.
665 + */
666 + ret = ocfs2_zeroout_partial_cluster(inode, isize,
667 + end - isize);
668 + if (ret) {
669 + mlog_errno(ret);
670 + goto out;
671 + }
672 + if (start >= isize)
673 + goto out;
674 + end = isize;
675 + }
676 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
677 if (IS_ERR(handle)) {
678 ret = PTR_ERR(handle);
679 @@ -1838,45 +1898,6 @@ out:
680 return ret;
681 }
682
683 -/*
684 - * zero out partial blocks of one cluster.
685 - *
686 - * start: file offset where zero starts, will be made upper block aligned.
687 - * len: it will be trimmed to the end of current cluster if "start + len"
688 - * is bigger than it.
689 - */
690 -static int ocfs2_zeroout_partial_cluster(struct inode *inode,
691 - u64 start, u64 len)
692 -{
693 - int ret;
694 - u64 start_block, end_block, nr_blocks;
695 - u64 p_block, offset;
696 - u32 cluster, p_cluster, nr_clusters;
697 - struct super_block *sb = inode->i_sb;
698 - u64 end = ocfs2_align_bytes_to_clusters(sb, start);
699 -
700 - if (start + len < end)
701 - end = start + len;
702 -
703 - start_block = ocfs2_blocks_for_bytes(sb, start);
704 - end_block = ocfs2_blocks_for_bytes(sb, end);
705 - nr_blocks = end_block - start_block;
706 - if (!nr_blocks)
707 - return 0;
708 -
709 - cluster = ocfs2_bytes_to_clusters(sb, start);
710 - ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
711 - &nr_clusters, NULL);
712 - if (ret)
713 - return ret;
714 - if (!p_cluster)
715 - return 0;
716 -
717 - offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
718 - p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
719 - return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
720 -}
721 -
722 /*
723 * Parts of this function taken from xfs_change_file_space()
724 */
725 @@ -1918,7 +1939,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
726 goto out_inode_unlock;
727 }
728
729 - orig_isize = i_size_read(inode);
730 switch (sr->l_whence) {
731 case 0: /*SEEK_SET*/
732 break;
733 @@ -1926,7 +1946,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
734 sr->l_start += f_pos;
735 break;
736 case 2: /*SEEK_END*/
737 - sr->l_start += orig_isize;
738 + sr->l_start += i_size_read(inode);
739 break;
740 default:
741 ret = -EINVAL;
742 @@ -1981,6 +2001,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
743 ret = -EINVAL;
744 }
745
746 + orig_isize = i_size_read(inode);
747 /* zeroout eof blocks in the cluster. */
748 if (!ret && change_size && orig_isize < size) {
749 ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
750 diff --git a/include/linux/string.h b/include/linux/string.h
751 index 66a91f5a34499..dd810d4739ee9 100644
752 --- a/include/linux/string.h
753 +++ b/include/linux/string.h
754 @@ -103,6 +103,36 @@ extern __kernel_size_t strcspn(const char *,const char *);
755 #ifndef __HAVE_ARCH_MEMSET
756 extern void * memset(void *,int,__kernel_size_t);
757 #endif
758 +
759 +#ifndef __HAVE_ARCH_MEMSET16
760 +extern void *memset16(uint16_t *, uint16_t, __kernel_size_t);
761 +#endif
762 +
763 +#ifndef __HAVE_ARCH_MEMSET32
764 +extern void *memset32(uint32_t *, uint32_t, __kernel_size_t);
765 +#endif
766 +
767 +#ifndef __HAVE_ARCH_MEMSET64
768 +extern void *memset64(uint64_t *, uint64_t, __kernel_size_t);
769 +#endif
770 +
771 +static inline void *memset_l(unsigned long *p, unsigned long v,
772 + __kernel_size_t n)
773 +{
774 + if (BITS_PER_LONG == 32)
775 + return memset32((uint32_t *)p, v, n);
776 + else
777 + return memset64((uint64_t *)p, v, n);
778 +}
779 +
780 +static inline void *memset_p(void **p, void *v, __kernel_size_t n)
781 +{
782 + if (BITS_PER_LONG == 32)
783 + return memset32((uint32_t *)p, (uintptr_t)v, n);
784 + else
785 + return memset64((uint64_t *)p, (uintptr_t)v, n);
786 +}
787 +
788 #ifndef __HAVE_ARCH_MEMCPY
789 extern void * memcpy(void *,const void *,__kernel_size_t);
790 #endif
791 diff --git a/include/net/af_unix.h b/include/net/af_unix.h
792 index fd60eccb59a67..79f2e1ccfcfb8 100644
793 --- a/include/net/af_unix.h
794 +++ b/include/net/af_unix.h
795 @@ -8,6 +8,7 @@
796
797 void unix_inflight(struct user_struct *user, struct file *fp);
798 void unix_notinflight(struct user_struct *user, struct file *fp);
799 +void unix_destruct_scm(struct sk_buff *skb);
800 void unix_gc(void);
801 void wait_for_unix_gc(void);
802 struct sock *unix_get_socket(struct file *filp);
803 diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
804 index c0f0a13ed8183..49aa79c7b278a 100644
805 --- a/include/net/llc_pdu.h
806 +++ b/include/net/llc_pdu.h
807 @@ -15,9 +15,11 @@
808 #include <linux/if_ether.h>
809
810 /* Lengths of frame formats */
811 -#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */
812 -#define LLC_PDU_LEN_S 4
813 -#define LLC_PDU_LEN_U 3 /* header and 1 control byte */
814 +#define LLC_PDU_LEN_I 4 /* header and 2 control bytes */
815 +#define LLC_PDU_LEN_S 4
816 +#define LLC_PDU_LEN_U 3 /* header and 1 control byte */
817 +/* header and 1 control byte and XID info */
818 +#define LLC_PDU_LEN_U_XID (LLC_PDU_LEN_U + sizeof(struct llc_xid_info))
819 /* Known SAP addresses */
820 #define LLC_GLOBAL_SAP 0xFF
821 #define LLC_NULL_SAP 0x00 /* not network-layer visible */
822 @@ -50,9 +52,10 @@
823 #define LLC_PDU_TYPE_U_MASK 0x03 /* 8-bit control field */
824 #define LLC_PDU_TYPE_MASK 0x03
825
826 -#define LLC_PDU_TYPE_I 0 /* first bit */
827 -#define LLC_PDU_TYPE_S 1 /* first two bits */
828 -#define LLC_PDU_TYPE_U 3 /* first two bits */
829 +#define LLC_PDU_TYPE_I 0 /* first bit */
830 +#define LLC_PDU_TYPE_S 1 /* first two bits */
831 +#define LLC_PDU_TYPE_U 3 /* first two bits */
832 +#define LLC_PDU_TYPE_U_XID 4 /* private type for detecting XID commands */
833
834 #define LLC_PDU_TYPE_IS_I(pdu) \
835 ((!(pdu->ctrl_1 & LLC_PDU_TYPE_I_MASK)) ? 1 : 0)
836 @@ -230,9 +233,18 @@ static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb)
837 static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
838 u8 ssap, u8 dsap, u8 cr)
839 {
840 - const int hlen = type == LLC_PDU_TYPE_U ? 3 : 4;
841 + int hlen = 4; /* default value for I and S types */
842 struct llc_pdu_un *pdu;
843
844 + switch (type) {
845 + case LLC_PDU_TYPE_U:
846 + hlen = 3;
847 + break;
848 + case LLC_PDU_TYPE_U_XID:
849 + hlen = 6;
850 + break;
851 + }
852 +
853 skb_push(skb, hlen);
854 skb_reset_network_header(skb);
855 pdu = llc_pdu_un_hdr(skb);
856 @@ -374,7 +386,10 @@ static inline void llc_pdu_init_as_xid_cmd(struct sk_buff *skb,
857 xid_info->fmt_id = LLC_XID_FMT_ID; /* 0x81 */
858 xid_info->type = svcs_supported;
859 xid_info->rw = rx_window << 1; /* size of receive window */
860 - skb_put(skb, sizeof(struct llc_xid_info));
861 +
862 + /* no need to push/put since llc_pdu_header_init() has already
863 + * pushed 3 + 3 bytes
864 + */
865 }
866
867 /**
868 diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
869 index 8890fd66021dd..9799c300603a9 100644
870 --- a/include/net/sctp/constants.h
871 +++ b/include/net/sctp/constants.h
872 @@ -344,8 +344,7 @@ typedef enum {
873 } sctp_scope_policy_t;
874
875 /* Based on IPv4 scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>,
876 - * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 198.18.0.0/24,
877 - * 192.88.99.0/24.
878 + * SCTP IPv4 unusable addresses: 0.0.0.0/8, 224.0.0.0/4, 192.88.99.0/24.
879 * Also, RFC 8.4, non-unicast addresses are not considered valid SCTP
880 * addresses.
881 */
882 @@ -353,7 +352,6 @@ typedef enum {
883 ((htonl(INADDR_BROADCAST) == a) || \
884 ipv4_is_multicast(a) || \
885 ipv4_is_zeronet(a) || \
886 - ipv4_is_test_198(a) || \
887 ipv4_is_anycast_6to4(a))
888
889 /* Flags used for the bind address copy functions. */
890 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
891 index a410d5827a73f..b3476a21a7b31 100644
892 --- a/kernel/workqueue.c
893 +++ b/kernel/workqueue.c
894 @@ -3397,15 +3397,21 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
895 unbound_release_work);
896 struct workqueue_struct *wq = pwq->wq;
897 struct worker_pool *pool = pwq->pool;
898 - bool is_last;
899 + bool is_last = false;
900
901 - if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
902 - return;
903 + /*
904 + * when @pwq is not linked, it doesn't hold any reference to the
905 + * @wq, and @wq is invalid to access.
906 + */
907 + if (!list_empty(&pwq->pwqs_node)) {
908 + if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
909 + return;
910
911 - mutex_lock(&wq->mutex);
912 - list_del_rcu(&pwq->pwqs_node);
913 - is_last = list_empty(&wq->pwqs);
914 - mutex_unlock(&wq->mutex);
915 + mutex_lock(&wq->mutex);
916 + list_del_rcu(&pwq->pwqs_node);
917 + is_last = list_empty(&wq->pwqs);
918 + mutex_unlock(&wq->mutex);
919 + }
920
921 mutex_lock(&wq_pool_mutex);
922 put_unbound_pool(pool);
923 diff --git a/lib/string.c b/lib/string.c
924 index 8fe13371aed7a..ec1ba61b358f2 100644
925 --- a/lib/string.c
926 +++ b/lib/string.c
927 @@ -754,6 +754,72 @@ void memzero_explicit(void *s, size_t count)
928 }
929 EXPORT_SYMBOL(memzero_explicit);
930
931 +#ifndef __HAVE_ARCH_MEMSET16
932 +/**
933 + * memset16() - Fill a memory area with a uint16_t
934 + * @s: Pointer to the start of the area.
935 + * @v: The value to fill the area with
936 + * @count: The number of values to store
937 + *
938 + * Differs from memset() in that it fills with a uint16_t instead
939 + * of a byte. Remember that @count is the number of uint16_ts to
940 + * store, not the number of bytes.
941 + */
942 +void *memset16(uint16_t *s, uint16_t v, size_t count)
943 +{
944 + uint16_t *xs = s;
945 +
946 + while (count--)
947 + *xs++ = v;
948 + return s;
949 +}
950 +EXPORT_SYMBOL(memset16);
951 +#endif
952 +
953 +#ifndef __HAVE_ARCH_MEMSET32
954 +/**
955 + * memset32() - Fill a memory area with a uint32_t
956 + * @s: Pointer to the start of the area.
957 + * @v: The value to fill the area with
958 + * @count: The number of values to store
959 + *
960 + * Differs from memset() in that it fills with a uint32_t instead
961 + * of a byte. Remember that @count is the number of uint32_ts to
962 + * store, not the number of bytes.
963 + */
964 +void *memset32(uint32_t *s, uint32_t v, size_t count)
965 +{
966 + uint32_t *xs = s;
967 +
968 + while (count--)
969 + *xs++ = v;
970 + return s;
971 +}
972 +EXPORT_SYMBOL(memset32);
973 +#endif
974 +
975 +#ifndef __HAVE_ARCH_MEMSET64
976 +/**
977 + * memset64() - Fill a memory area with a uint64_t
978 + * @s: Pointer to the start of the area.
979 + * @v: The value to fill the area with
980 + * @count: The number of values to store
981 + *
982 + * Differs from memset() in that it fills with a uint64_t instead
983 + * of a byte. Remember that @count is the number of uint64_ts to
984 + * store, not the number of bytes.
985 + */
986 +void *memset64(uint64_t *s, uint64_t v, size_t count)
987 +{
988 + uint64_t *xs = s;
989 +
990 + while (count--)
991 + *xs++ = v;
992 + return s;
993 +}
994 +EXPORT_SYMBOL(memset64);
995 +#endif
996 +
997 #ifndef __HAVE_ARCH_MEMCPY
998 /**
999 * memcpy - Copy one area of memory to another
1000 diff --git a/net/802/garp.c b/net/802/garp.c
1001 index b38ee6dcba45f..5239b8f244e75 100644
1002 --- a/net/802/garp.c
1003 +++ b/net/802/garp.c
1004 @@ -206,6 +206,19 @@ static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr
1005 kfree(attr);
1006 }
1007
1008 +static void garp_attr_destroy_all(struct garp_applicant *app)
1009 +{
1010 + struct rb_node *node, *next;
1011 + struct garp_attr *attr;
1012 +
1013 + for (node = rb_first(&app->gid);
1014 + next = node ? rb_next(node) : NULL, node != NULL;
1015 + node = next) {
1016 + attr = rb_entry(node, struct garp_attr, node);
1017 + garp_attr_destroy(app, attr);
1018 + }
1019 +}
1020 +
1021 static int garp_pdu_init(struct garp_applicant *app)
1022 {
1023 struct sk_buff *skb;
1024 @@ -612,6 +625,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
1025
1026 spin_lock_bh(&app->lock);
1027 garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
1028 + garp_attr_destroy_all(app);
1029 garp_pdu_queue(app);
1030 spin_unlock_bh(&app->lock);
1031
1032 diff --git a/net/802/mrp.c b/net/802/mrp.c
1033 index 72db2785ef2c0..4ee3af3d400b1 100644
1034 --- a/net/802/mrp.c
1035 +++ b/net/802/mrp.c
1036 @@ -295,6 +295,19 @@ static void mrp_attr_destroy(struct mrp_applicant *app, struct mrp_attr *attr)
1037 kfree(attr);
1038 }
1039
1040 +static void mrp_attr_destroy_all(struct mrp_applicant *app)
1041 +{
1042 + struct rb_node *node, *next;
1043 + struct mrp_attr *attr;
1044 +
1045 + for (node = rb_first(&app->mad);
1046 + next = node ? rb_next(node) : NULL, node != NULL;
1047 + node = next) {
1048 + attr = rb_entry(node, struct mrp_attr, node);
1049 + mrp_attr_destroy(app, attr);
1050 + }
1051 +}
1052 +
1053 static int mrp_pdu_init(struct mrp_applicant *app)
1054 {
1055 struct sk_buff *skb;
1056 @@ -900,6 +913,7 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
1057
1058 spin_lock_bh(&app->lock);
1059 mrp_mad_event(app, MRP_EVENT_TX);
1060 + mrp_attr_destroy_all(app);
1061 mrp_pdu_queue(app);
1062 spin_unlock_bh(&app->lock);
1063
1064 diff --git a/net/Makefile b/net/Makefile
1065 index 4cafaa2b4667e..1955d89de75bb 100644
1066 --- a/net/Makefile
1067 +++ b/net/Makefile
1068 @@ -16,7 +16,7 @@ obj-$(CONFIG_NET) += ethernet/ 802/ sched/ netlink/
1069 obj-$(CONFIG_NETFILTER) += netfilter/
1070 obj-$(CONFIG_INET) += ipv4/
1071 obj-$(CONFIG_XFRM) += xfrm/
1072 -obj-$(CONFIG_UNIX) += unix/
1073 +obj-$(CONFIG_UNIX_SCM) += unix/
1074 obj-$(CONFIG_NET) += ipv6/
1075 obj-$(CONFIG_PACKET) += packet/
1076 obj-$(CONFIG_NET_KEY) += key/
1077 diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
1078 index 1a77d0687d743..a8866455e8b2a 100644
1079 --- a/net/llc/af_llc.c
1080 +++ b/net/llc/af_llc.c
1081 @@ -96,8 +96,16 @@ static inline u8 llc_ui_header_len(struct sock *sk, struct sockaddr_llc *addr)
1082 {
1083 u8 rc = LLC_PDU_LEN_U;
1084
1085 - if (addr->sllc_test || addr->sllc_xid)
1086 + if (addr->sllc_test)
1087 rc = LLC_PDU_LEN_U;
1088 + else if (addr->sllc_xid)
1089 + /* We need to expand header to sizeof(struct llc_xid_info)
1090 + * since llc_pdu_init_as_xid_cmd() sets 4,5,6 bytes of LLC header
1091 + * as XID PDU. In llc_ui_sendmsg() we reserved header size and then
1092 + * filled all other space with user data. If we won't reserve this
1093 + * bytes, llc_pdu_init_as_xid_cmd() will overwrite user data
1094 + */
1095 + rc = LLC_PDU_LEN_U_XID;
1096 else if (sk->sk_type == SOCK_STREAM)
1097 rc = LLC_PDU_LEN_I;
1098 return rc;
1099 diff --git a/net/llc/llc_s_ac.c b/net/llc/llc_s_ac.c
1100 index 7ae4cc684d3ab..9fa3342c7a829 100644
1101 --- a/net/llc/llc_s_ac.c
1102 +++ b/net/llc/llc_s_ac.c
1103 @@ -79,7 +79,7 @@ int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb)
1104 struct llc_sap_state_ev *ev = llc_sap_ev(skb);
1105 int rc;
1106
1107 - llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
1108 + llc_pdu_header_init(skb, LLC_PDU_TYPE_U_XID, ev->saddr.lsap,
1109 ev->daddr.lsap, LLC_PDU_CMD);
1110 llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
1111 rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
1112 diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
1113 index ddd90a3820d39..d03f067178445 100644
1114 --- a/net/netfilter/nf_conntrack_core.c
1115 +++ b/net/netfilter/nf_conntrack_core.c
1116 @@ -491,8 +491,13 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
1117 return false;
1118
1119 tstamp = nf_conn_tstamp_find(ct);
1120 - if (tstamp && tstamp->stop == 0)
1121 + if (tstamp) {
1122 + s32 timeout = ct->timeout - nfct_time_stamp;
1123 +
1124 tstamp->stop = ktime_get_real_ns();
1125 + if (timeout < 0)
1126 + tstamp->stop -= jiffies_to_nsecs(-timeout);
1127 + }
1128
1129 if (nf_conntrack_event_report(IPCT_DESTROY, ct,
1130 portid, report) < 0) {
1131 diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
1132 index d2510e432c183..d338d69a0e0bb 100644
1133 --- a/net/netfilter/nft_nat.c
1134 +++ b/net/netfilter/nft_nat.c
1135 @@ -157,7 +157,9 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
1136 alen = FIELD_SIZEOF(struct nf_nat_range, min_addr.ip6);
1137 break;
1138 default:
1139 - return -EAFNOSUPPORT;
1140 + if (tb[NFTA_NAT_REG_ADDR_MIN])
1141 + return -EAFNOSUPPORT;
1142 + break;
1143 }
1144 priv->family = family;
1145
1146 diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
1147 index b2c242facf1b9..b1932fd125dad 100644
1148 --- a/net/sctp/protocol.c
1149 +++ b/net/sctp/protocol.c
1150 @@ -413,7 +413,8 @@ static sctp_scope_t sctp_v4_scope(union sctp_addr *addr)
1151 retval = SCTP_SCOPE_LINK;
1152 } else if (ipv4_is_private_10(addr->v4.sin_addr.s_addr) ||
1153 ipv4_is_private_172(addr->v4.sin_addr.s_addr) ||
1154 - ipv4_is_private_192(addr->v4.sin_addr.s_addr)) {
1155 + ipv4_is_private_192(addr->v4.sin_addr.s_addr) ||
1156 + ipv4_is_test_198(addr->v4.sin_addr.s_addr)) {
1157 retval = SCTP_SCOPE_PRIVATE;
1158 } else {
1159 retval = SCTP_SCOPE_GLOBAL;
1160 diff --git a/net/tipc/link.c b/net/tipc/link.c
1161 index 06327f78f2032..6fc2fa75503d2 100644
1162 --- a/net/tipc/link.c
1163 +++ b/net/tipc/link.c
1164 @@ -893,6 +893,7 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
1165 if (pkt_cnt <= 0)
1166 return 0;
1167
1168 + hdr = buf_msg(skb_peek(list));
1169 imp = msg_importance(hdr);
1170 /* Match msg importance against this and all higher backlog limits: */
1171 if (!skb_queue_empty(backlogq)) {
1172 @@ -902,7 +903,6 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
1173 }
1174 }
1175
1176 - hdr = buf_msg(skb_peek(list));
1177 if (unlikely(msg_size(hdr) > mtu)) {
1178 skb_queue_purge(list);
1179 return -EMSGSIZE;
1180 diff --git a/net/tipc/socket.c b/net/tipc/socket.c
1181 index c1b9074f3325e..6077850774454 100644
1182 --- a/net/tipc/socket.c
1183 +++ b/net/tipc/socket.c
1184 @@ -1985,7 +1985,7 @@ static int tipc_listen(struct socket *sock, int len)
1185 static int tipc_wait_for_accept(struct socket *sock, long timeo)
1186 {
1187 struct sock *sk = sock->sk;
1188 - DEFINE_WAIT(wait);
1189 + DEFINE_WAIT_FUNC(wait, woken_wake_function);
1190 int err;
1191
1192 /* True wake-one mechanism for incoming connections: only
1193 @@ -1994,12 +1994,12 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
1194 * anymore, the common case will execute the loop only once.
1195 */
1196 for (;;) {
1197 - prepare_to_wait_exclusive(sk_sleep(sk), &wait,
1198 - TASK_INTERRUPTIBLE);
1199 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
1200 + add_wait_queue(sk_sleep(sk), &wait);
1201 release_sock(sk);
1202 - timeo = schedule_timeout(timeo);
1203 + timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
1204 lock_sock(sk);
1205 + remove_wait_queue(sk_sleep(sk), &wait);
1206 }
1207 err = 0;
1208 if (!skb_queue_empty(&sk->sk_receive_queue))
1209 @@ -2014,7 +2014,6 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
1210 if (signal_pending(current))
1211 break;
1212 }
1213 - finish_wait(sk_sleep(sk), &wait);
1214 return err;
1215 }
1216
1217 diff --git a/net/unix/Kconfig b/net/unix/Kconfig
1218 index 8b31ab85d050f..3b9e450656a4d 100644
1219 --- a/net/unix/Kconfig
1220 +++ b/net/unix/Kconfig
1221 @@ -19,6 +19,11 @@ config UNIX
1222
1223 Say Y unless you know what you are doing.
1224
1225 +config UNIX_SCM
1226 + bool
1227 + depends on UNIX
1228 + default y
1229 +
1230 config UNIX_DIAG
1231 tristate "UNIX: socket monitoring interface"
1232 depends on UNIX
1233 diff --git a/net/unix/Makefile b/net/unix/Makefile
1234 index b663c607b1c61..dc686c6757fb5 100644
1235 --- a/net/unix/Makefile
1236 +++ b/net/unix/Makefile
1237 @@ -9,3 +9,5 @@ unix-$(CONFIG_SYSCTL) += sysctl_net_unix.o
1238
1239 obj-$(CONFIG_UNIX_DIAG) += unix_diag.o
1240 unix_diag-y := diag.o
1241 +
1242 +obj-$(CONFIG_UNIX_SCM) += scm.o
1243 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
1244 index 8bbaa35937dd9..bfdfb958a37d6 100644
1245 --- a/net/unix/af_unix.c
1246 +++ b/net/unix/af_unix.c
1247 @@ -118,6 +118,8 @@
1248 #include <linux/security.h>
1249 #include <linux/freezer.h>
1250
1251 +#include "scm.h"
1252 +
1253 struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
1254 EXPORT_SYMBOL_GPL(unix_socket_table);
1255 DEFINE_SPINLOCK(unix_table_lock);
1256 @@ -1505,78 +1507,51 @@ out:
1257 return err;
1258 }
1259
1260 -static void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1261 -{
1262 - int i;
1263 -
1264 - scm->fp = UNIXCB(skb).fp;
1265 - UNIXCB(skb).fp = NULL;
1266 -
1267 - for (i = scm->fp->count-1; i >= 0; i--)
1268 - unix_notinflight(scm->fp->user, scm->fp->fp[i]);
1269 -}
1270 -
1271 -static void unix_destruct_scm(struct sk_buff *skb)
1272 -{
1273 - struct scm_cookie scm;
1274 - memset(&scm, 0, sizeof(scm));
1275 - scm.pid = UNIXCB(skb).pid;
1276 - if (UNIXCB(skb).fp)
1277 - unix_detach_fds(&scm, skb);
1278 -
1279 - /* Alas, it calls VFS */
1280 - /* So fscking what? fput() had been SMP-safe since the last Summer */
1281 - scm_destroy(&scm);
1282 - sock_wfree(skb);
1283 -}
1284 -
1285 -/*
1286 - * The "user->unix_inflight" variable is protected by the garbage
1287 - * collection lock, and we just read it locklessly here. If you go
1288 - * over the limit, there might be a tiny race in actually noticing
1289 - * it across threads. Tough.
1290 - */
1291 -static inline bool too_many_unix_fds(struct task_struct *p)
1292 -{
1293 - struct user_struct *user = current_user();
1294 -
1295 - if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
1296 - return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
1297 - return false;
1298 -}
1299 -
1300 -#define MAX_RECURSION_LEVEL 4
1301 -
1302 -static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1303 +static void unix_peek_fds(struct scm_cookie *scm, struct sk_buff *skb)
1304 {
1305 - int i;
1306 - unsigned char max_level = 0;
1307 -
1308 - if (too_many_unix_fds(current))
1309 - return -ETOOMANYREFS;
1310 -
1311 - for (i = scm->fp->count - 1; i >= 0; i--) {
1312 - struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1313 -
1314 - if (sk)
1315 - max_level = max(max_level,
1316 - unix_sk(sk)->recursion_level);
1317 - }
1318 - if (unlikely(max_level > MAX_RECURSION_LEVEL))
1319 - return -ETOOMANYREFS;
1320 + scm->fp = scm_fp_dup(UNIXCB(skb).fp);
1321
1322 /*
1323 - * Need to duplicate file references for the sake of garbage
1324 - * collection. Otherwise a socket in the fps might become a
1325 - * candidate for GC while the skb is not yet queued.
1326 + * Garbage collection of unix sockets starts by selecting a set of
1327 + * candidate sockets which have reference only from being in flight
1328 + * (total_refs == inflight_refs). This condition is checked once during
1329 + * the candidate collection phase, and candidates are marked as such, so
1330 + * that non-candidates can later be ignored. While inflight_refs is
1331 + * protected by unix_gc_lock, total_refs (file count) is not, hence this
1332 + * is an instantaneous decision.
1333 + *
1334 + * Once a candidate, however, the socket must not be reinstalled into a
1335 + * file descriptor while the garbage collection is in progress.
1336 + *
1337 + * If the above conditions are met, then the directed graph of
1338 + * candidates (*) does not change while unix_gc_lock is held.
1339 + *
1340 + * Any operations that changes the file count through file descriptors
1341 + * (dup, close, sendmsg) does not change the graph since candidates are
1342 + * not installed in fds.
1343 + *
1344 + * Dequeing a candidate via recvmsg would install it into an fd, but
1345 + * that takes unix_gc_lock to decrement the inflight count, so it's
1346 + * serialized with garbage collection.
1347 + *
1348 + * MSG_PEEK is special in that it does not change the inflight count,
1349 + * yet does install the socket into an fd. The following lock/unlock
1350 + * pair is to ensure serialization with garbage collection. It must be
1351 + * done between incrementing the file count and installing the file into
1352 + * an fd.
1353 + *
1354 + * If garbage collection starts after the barrier provided by the
1355 + * lock/unlock, then it will see the elevated refcount and not mark this
1356 + * as a candidate. If a garbage collection is already in progress
1357 + * before the file count was incremented, then the lock/unlock pair will
1358 + * ensure that garbage collection is finished before progressing to
1359 + * installing the fd.
1360 + *
1361 + * (*) A -> B where B is on the queue of A or B is on the queue of C
1362 + * which is on the queue of listening socket A.
1363 */
1364 - UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1365 - if (!UNIXCB(skb).fp)
1366 - return -ENOMEM;
1367 -
1368 - for (i = scm->fp->count - 1; i >= 0; i--)
1369 - unix_inflight(scm->fp->user, scm->fp->fp[i]);
1370 - return max_level;
1371 + spin_lock(&unix_gc_lock);
1372 + spin_unlock(&unix_gc_lock);
1373 }
1374
1375 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
1376 @@ -2212,7 +2187,7 @@ static int unix_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
1377 sk_peek_offset_fwd(sk, size);
1378
1379 if (UNIXCB(skb).fp)
1380 - scm.fp = scm_fp_dup(UNIXCB(skb).fp);
1381 + unix_peek_fds(&scm, skb);
1382 }
1383 err = (flags & MSG_TRUNC) ? skb->len - skip : size;
1384
1385 @@ -2457,7 +2432,7 @@ unlock:
1386 /* It is questionable, see note in unix_dgram_recvmsg.
1387 */
1388 if (UNIXCB(skb).fp)
1389 - scm.fp = scm_fp_dup(UNIXCB(skb).fp);
1390 + unix_peek_fds(&scm, skb);
1391
1392 sk_peek_offset_fwd(sk, chunk);
1393
1394 diff --git a/net/unix/garbage.c b/net/unix/garbage.c
1395 index c36757e728442..8bbe1b8e4ff7f 100644
1396 --- a/net/unix/garbage.c
1397 +++ b/net/unix/garbage.c
1398 @@ -86,77 +86,13 @@
1399 #include <net/scm.h>
1400 #include <net/tcp_states.h>
1401
1402 +#include "scm.h"
1403 +
1404 /* Internal data structures and random procedures: */
1405
1406 -static LIST_HEAD(gc_inflight_list);
1407 static LIST_HEAD(gc_candidates);
1408 -static DEFINE_SPINLOCK(unix_gc_lock);
1409 static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
1410
1411 -unsigned int unix_tot_inflight;
1412 -
1413 -struct sock *unix_get_socket(struct file *filp)
1414 -{
1415 - struct sock *u_sock = NULL;
1416 - struct inode *inode = file_inode(filp);
1417 -
1418 - /* Socket ? */
1419 - if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
1420 - struct socket *sock = SOCKET_I(inode);
1421 - struct sock *s = sock->sk;
1422 -
1423 - /* PF_UNIX ? */
1424 - if (s && sock->ops && sock->ops->family == PF_UNIX)
1425 - u_sock = s;
1426 - }
1427 - return u_sock;
1428 -}
1429 -
1430 -/* Keep the number of times in flight count for the file
1431 - * descriptor if it is for an AF_UNIX socket.
1432 - */
1433 -
1434 -void unix_inflight(struct user_struct *user, struct file *fp)
1435 -{
1436 - struct sock *s = unix_get_socket(fp);
1437 -
1438 - spin_lock(&unix_gc_lock);
1439 -
1440 - if (s) {
1441 - struct unix_sock *u = unix_sk(s);
1442 -
1443 - if (atomic_long_inc_return(&u->inflight) == 1) {
1444 - BUG_ON(!list_empty(&u->link));
1445 - list_add_tail(&u->link, &gc_inflight_list);
1446 - } else {
1447 - BUG_ON(list_empty(&u->link));
1448 - }
1449 - unix_tot_inflight++;
1450 - }
1451 - user->unix_inflight++;
1452 - spin_unlock(&unix_gc_lock);
1453 -}
1454 -
1455 -void unix_notinflight(struct user_struct *user, struct file *fp)
1456 -{
1457 - struct sock *s = unix_get_socket(fp);
1458 -
1459 - spin_lock(&unix_gc_lock);
1460 -
1461 - if (s) {
1462 - struct unix_sock *u = unix_sk(s);
1463 -
1464 - BUG_ON(!atomic_long_read(&u->inflight));
1465 - BUG_ON(list_empty(&u->link));
1466 -
1467 - if (atomic_long_dec_and_test(&u->inflight))
1468 - list_del_init(&u->link);
1469 - unix_tot_inflight--;
1470 - }
1471 - user->unix_inflight--;
1472 - spin_unlock(&unix_gc_lock);
1473 -}
1474 -
1475 static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
1476 struct sk_buff_head *hitlist)
1477 {
1478 diff --git a/net/unix/scm.c b/net/unix/scm.c
1479 new file mode 100644
1480 index 0000000000000..df8f636ab1d8c
1481 --- /dev/null
1482 +++ b/net/unix/scm.c
1483 @@ -0,0 +1,161 @@
1484 +// SPDX-License-Identifier: GPL-2.0
1485 +#include <linux/module.h>
1486 +#include <linux/kernel.h>
1487 +#include <linux/string.h>
1488 +#include <linux/socket.h>
1489 +#include <linux/net.h>
1490 +#include <linux/fs.h>
1491 +#include <net/af_unix.h>
1492 +#include <net/scm.h>
1493 +#include <linux/init.h>
1494 +
1495 +#include "scm.h"
1496 +
1497 +unsigned int unix_tot_inflight;
1498 +EXPORT_SYMBOL(unix_tot_inflight);
1499 +
1500 +LIST_HEAD(gc_inflight_list);
1501 +EXPORT_SYMBOL(gc_inflight_list);
1502 +
1503 +DEFINE_SPINLOCK(unix_gc_lock);
1504 +EXPORT_SYMBOL(unix_gc_lock);
1505 +
1506 +struct sock *unix_get_socket(struct file *filp)
1507 +{
1508 + struct sock *u_sock = NULL;
1509 + struct inode *inode = file_inode(filp);
1510 +
1511 + /* Socket ? */
1512 + if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
1513 + struct socket *sock = SOCKET_I(inode);
1514 + struct sock *s = sock->sk;
1515 +
1516 + /* PF_UNIX ? */
1517 + if (s && sock->ops && sock->ops->family == PF_UNIX)
1518 + u_sock = s;
1519 + }
1520 + return u_sock;
1521 +}
1522 +EXPORT_SYMBOL(unix_get_socket);
1523 +
1524 +/* Keep the number of times in flight count for the file
1525 + * descriptor if it is for an AF_UNIX socket.
1526 + */
1527 +void unix_inflight(struct user_struct *user, struct file *fp)
1528 +{
1529 + struct sock *s = unix_get_socket(fp);
1530 +
1531 + spin_lock(&unix_gc_lock);
1532 +
1533 + if (s) {
1534 + struct unix_sock *u = unix_sk(s);
1535 +
1536 + if (atomic_long_inc_return(&u->inflight) == 1) {
1537 + BUG_ON(!list_empty(&u->link));
1538 + list_add_tail(&u->link, &gc_inflight_list);
1539 + } else {
1540 + BUG_ON(list_empty(&u->link));
1541 + }
1542 + unix_tot_inflight++;
1543 + }
1544 + user->unix_inflight++;
1545 + spin_unlock(&unix_gc_lock);
1546 +}
1547 +
1548 +void unix_notinflight(struct user_struct *user, struct file *fp)
1549 +{
1550 + struct sock *s = unix_get_socket(fp);
1551 +
1552 + spin_lock(&unix_gc_lock);
1553 +
1554 + if (s) {
1555 + struct unix_sock *u = unix_sk(s);
1556 +
1557 + BUG_ON(!atomic_long_read(&u->inflight));
1558 + BUG_ON(list_empty(&u->link));
1559 +
1560 + if (atomic_long_dec_and_test(&u->inflight))
1561 + list_del_init(&u->link);
1562 + unix_tot_inflight--;
1563 + }
1564 + user->unix_inflight--;
1565 + spin_unlock(&unix_gc_lock);
1566 +}
1567 +
1568 +/*
1569 + * The "user->unix_inflight" variable is protected by the garbage
1570 + * collection lock, and we just read it locklessly here. If you go
1571 + * over the limit, there might be a tiny race in actually noticing
1572 + * it across threads. Tough.
1573 + */
1574 +static inline bool too_many_unix_fds(struct task_struct *p)
1575 +{
1576 + struct user_struct *user = current_user();
1577 +
1578 + if (unlikely(user->unix_inflight > task_rlimit(p, RLIMIT_NOFILE)))
1579 + return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
1580 + return false;
1581 +}
1582 +
1583 +#define MAX_RECURSION_LEVEL 4
1584 +
1585 +int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1586 +{
1587 + int i;
1588 + unsigned char max_level = 0;
1589 +
1590 + if (too_many_unix_fds(current))
1591 + return -ETOOMANYREFS;
1592 +
1593 + for (i = scm->fp->count - 1; i >= 0; i--) {
1594 + struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1595 +
1596 + if (sk)
1597 + max_level = max(max_level,
1598 + unix_sk(sk)->recursion_level);
1599 + }
1600 + if (unlikely(max_level > MAX_RECURSION_LEVEL))
1601 + return -ETOOMANYREFS;
1602 +
1603 + /*
1604 + * Need to duplicate file references for the sake of garbage
1605 + * collection. Otherwise a socket in the fps might become a
1606 + * candidate for GC while the skb is not yet queued.
1607 + */
1608 + UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1609 + if (!UNIXCB(skb).fp)
1610 + return -ENOMEM;
1611 +
1612 + for (i = scm->fp->count - 1; i >= 0; i--)
1613 + unix_inflight(scm->fp->user, scm->fp->fp[i]);
1614 + return max_level;
1615 +}
1616 +EXPORT_SYMBOL(unix_attach_fds);
1617 +
1618 +void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1619 +{
1620 + int i;
1621 +
1622 + scm->fp = UNIXCB(skb).fp;
1623 + UNIXCB(skb).fp = NULL;
1624 +
1625 + for (i = scm->fp->count-1; i >= 0; i--)
1626 + unix_notinflight(scm->fp->user, scm->fp->fp[i]);
1627 +}
1628 +EXPORT_SYMBOL(unix_detach_fds);
1629 +
1630 +void unix_destruct_scm(struct sk_buff *skb)
1631 +{
1632 + struct scm_cookie scm;
1633 +
1634 + memset(&scm, 0, sizeof(scm));
1635 + scm.pid = UNIXCB(skb).pid;
1636 + if (UNIXCB(skb).fp)
1637 + unix_detach_fds(&scm, skb);
1638 +
1639 + /* Alas, it calls VFS */
1640 + /* So fscking what? fput() had been SMP-safe since the last Summer */
1641 + scm_destroy(&scm);
1642 + sock_wfree(skb);
1643 +}
1644 +EXPORT_SYMBOL(unix_destruct_scm);
1645 diff --git a/net/unix/scm.h b/net/unix/scm.h
1646 new file mode 100644
1647 index 0000000000000..5a255a477f160
1648 --- /dev/null
1649 +++ b/net/unix/scm.h
1650 @@ -0,0 +1,10 @@
1651 +#ifndef NET_UNIX_SCM_H
1652 +#define NET_UNIX_SCM_H
1653 +
1654 +extern struct list_head gc_inflight_list;
1655 +extern spinlock_t unix_gc_lock;
1656 +
1657 +int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb);
1658 +void unix_detach_fds(struct scm_cookie *scm, struct sk_buff *skb);
1659 +
1660 +#endif
1661 diff --git a/net/wireless/scan.c b/net/wireless/scan.c
1662 index 71a8e6980e2fc..901a8742a28dc 100644
1663 --- a/net/wireless/scan.c
1664 +++ b/net/wireless/scan.c
1665 @@ -949,16 +949,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev,
1666 * be grouped with this beacon for updates ...
1667 */
1668 if (!cfg80211_combine_bsses(rdev, new)) {
1669 - kfree(new);
1670 + bss_ref_put(rdev, new);
1671 goto drop;
1672 }
1673 }
1674
1675 if (rdev->bss_entries >= bss_entries_limit &&
1676 !cfg80211_bss_expire_oldest(rdev)) {
1677 - if (!list_empty(&new->hidden_list))
1678 - list_del(&new->hidden_list);
1679 - kfree(new);
1680 + bss_ref_put(rdev, new);
1681 goto drop;
1682 }
1683