Magellan Linux

Contents of /trunk/kernel-alx-legacy/patches-4.9/0107-4.9.8-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (show annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 8 months ago) by niro
File size: 64550 byte(s)
-added kerenl-alx-legacy pkg
1 diff --git a/Makefile b/Makefile
2 index da704d903321..1130803ab93c 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 7
9 +SUBLEVEL = 8
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
14 index 25d1eb4933d0..be7ec5a76a54 100644
15 --- a/drivers/net/ethernet/broadcom/bcmsysport.c
16 +++ b/drivers/net/ethernet/broadcom/bcmsysport.c
17 @@ -710,11 +710,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
18 unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
19 unsigned int pkts_compl = 0, bytes_compl = 0;
20 struct bcm_sysport_cb *cb;
21 - struct netdev_queue *txq;
22 u32 hw_ind;
23
24 - txq = netdev_get_tx_queue(ndev, ring->index);
25 -
26 /* Compute how many descriptors have been processed since last call */
27 hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
28 c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
29 @@ -745,9 +742,6 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
30
31 ring->c_index = c_index;
32
33 - if (netif_tx_queue_stopped(txq) && pkts_compl)
34 - netif_tx_wake_queue(txq);
35 -
36 netif_dbg(priv, tx_done, ndev,
37 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
38 ring->index, ring->c_index, pkts_compl, bytes_compl);
39 @@ -759,16 +753,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
40 static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
41 struct bcm_sysport_tx_ring *ring)
42 {
43 + struct netdev_queue *txq;
44 unsigned int released;
45 unsigned long flags;
46
47 + txq = netdev_get_tx_queue(priv->netdev, ring->index);
48 +
49 spin_lock_irqsave(&ring->lock, flags);
50 released = __bcm_sysport_tx_reclaim(priv, ring);
51 + if (released)
52 + netif_tx_wake_queue(txq);
53 +
54 spin_unlock_irqrestore(&ring->lock, flags);
55
56 return released;
57 }
58
59 +/* Locked version of the per-ring TX reclaim, but does not wake the queue */
60 +static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
61 + struct bcm_sysport_tx_ring *ring)
62 +{
63 + unsigned long flags;
64 +
65 + spin_lock_irqsave(&ring->lock, flags);
66 + __bcm_sysport_tx_reclaim(priv, ring);
67 + spin_unlock_irqrestore(&ring->lock, flags);
68 +}
69 +
70 static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
71 {
72 struct bcm_sysport_tx_ring *ring =
73 @@ -1253,7 +1264,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
74 napi_disable(&ring->napi);
75 netif_napi_del(&ring->napi);
76
77 - bcm_sysport_tx_reclaim(priv, ring);
78 + bcm_sysport_tx_clean(priv, ring);
79
80 kfree(ring->cbs);
81 ring->cbs = NULL;
82 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
83 index fb8bb027b69c..d223e7cb68ba 100644
84 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
85 +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
86 @@ -1740,8 +1740,11 @@ int mlx4_en_start_port(struct net_device *dev)
87 /* Process all completions if exist to prevent
88 * the queues freezing if they are full
89 */
90 - for (i = 0; i < priv->rx_ring_num; i++)
91 + for (i = 0; i < priv->rx_ring_num; i++) {
92 + local_bh_disable();
93 napi_schedule(&priv->rx_cq[i]->napi);
94 + local_bh_enable();
95 + }
96
97 netif_tx_start_all_queues(dev);
98 netif_device_attach(dev);
99 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
100 index 33495d88aeb2..e7b2158bb48a 100644
101 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
102 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
103 @@ -193,6 +193,9 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
104 return false;
105 }
106
107 + if (unlikely(page_is_pfmemalloc(dma_info->page)))
108 + return false;
109 +
110 cache->page_cache[cache->tail] = *dma_info;
111 cache->tail = tail_next;
112 return true;
113 diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.h b/drivers/net/ethernet/mellanox/mlxsw/pci.h
114 index d942a3e6fa41..846fd4df7dab 100644
115 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.h
116 +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.h
117 @@ -211,21 +211,21 @@ MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
118 /* pci_eqe_cmd_token
119 * Command completion event - token
120 */
121 -MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16);
122 +MLXSW_ITEM32(pci, eqe, cmd_token, 0x00, 16, 16);
123
124 /* pci_eqe_cmd_status
125 * Command completion event - status
126 */
127 -MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8);
128 +MLXSW_ITEM32(pci, eqe, cmd_status, 0x00, 0, 8);
129
130 /* pci_eqe_cmd_out_param_h
131 * Command completion event - output parameter - higher part
132 */
133 -MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32);
134 +MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x04, 0, 32);
135
136 /* pci_eqe_cmd_out_param_l
137 * Command completion event - output parameter - lower part
138 */
139 -MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32);
140 +MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x08, 0, 32);
141
142 #endif
143 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
144 index dda5761e91bc..f902c4d3de99 100644
145 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
146 +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
147 @@ -684,6 +684,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
148 dev_kfree_skb_any(skb_orig);
149 return NETDEV_TX_OK;
150 }
151 + dev_consume_skb_any(skb_orig);
152 }
153
154 if (eth_skb_pad(skb)) {
155 diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
156 index 92bda8703f87..d548f0a55174 100644
157 --- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
158 +++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
159 @@ -314,6 +314,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
160 dev_kfree_skb_any(skb_orig);
161 return NETDEV_TX_OK;
162 }
163 + dev_consume_skb_any(skb_orig);
164 }
165 mlxsw_sx_txhdr_construct(skb, &tx_info);
166 /* TX header is consumed by HW on the way so we shouldn't count its
167 diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
168 index d6a217874a8b..862f18ed6022 100644
169 --- a/drivers/net/ethernet/renesas/ravb_main.c
170 +++ b/drivers/net/ethernet/renesas/ravb_main.c
171 @@ -1508,6 +1508,19 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
172 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
173 entry / NUM_TX_DESC * DPTR_ALIGN;
174 len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
175 + /* Zero length DMA descriptors are problematic as they seem to
176 + * terminate DMA transfers. Avoid them by simply using a length of
177 + * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN.
178 + *
179 + * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of
180 + * data by the call to skb_put_padto() above this is safe with
181 + * respect to both the length of the first DMA descriptor (len)
182 + * overflowing the available data and the length of the second DMA
183 + * descriptor (skb->len - len) being negative.
184 + */
185 + if (len == 0)
186 + len = DPTR_ALIGN;
187 +
188 memcpy(buffer, skb->data, len);
189 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
190 if (dma_mapping_error(ndev->dev.parent, dma_addr))
191 diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
192 index c9140c3aeb67..ff038e507fd6 100644
193 --- a/drivers/net/hyperv/netvsc_drv.c
194 +++ b/drivers/net/hyperv/netvsc_drv.c
195 @@ -659,6 +659,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
196 * policy filters on the host). Deliver these via the VF
197 * interface in the guest.
198 */
199 + rcu_read_lock();
200 vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
201 if (vf_netdev && (vf_netdev->flags & IFF_UP))
202 net = vf_netdev;
203 @@ -667,6 +668,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
204 skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
205 if (unlikely(!skb)) {
206 ++net->stats.rx_dropped;
207 + rcu_read_unlock();
208 return NVSP_STAT_FAIL;
209 }
210
211 @@ -696,6 +698,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
212 * TODO - use NAPI?
213 */
214 netif_rx(skb);
215 + rcu_read_unlock();
216
217 return 0;
218 }
219 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
220 index 7869b0651576..6f38daf2d978 100644
221 --- a/drivers/net/macvtap.c
222 +++ b/drivers/net/macvtap.c
223 @@ -827,7 +827,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
224 return -EINVAL;
225
226 ret = virtio_net_hdr_from_skb(skb, &vnet_hdr,
227 - macvtap_is_little_endian(q));
228 + macvtap_is_little_endian(q), true);
229 if (ret)
230 BUG();
231
232 diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
233 index e741bf614c4e..b0492ef2cdaa 100644
234 --- a/drivers/net/phy/bcm63xx.c
235 +++ b/drivers/net/phy/bcm63xx.c
236 @@ -21,6 +21,23 @@ MODULE_DESCRIPTION("Broadcom 63xx internal PHY driver");
237 MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
238 MODULE_LICENSE("GPL");
239
240 +static int bcm63xx_config_intr(struct phy_device *phydev)
241 +{
242 + int reg, err;
243 +
244 + reg = phy_read(phydev, MII_BCM63XX_IR);
245 + if (reg < 0)
246 + return reg;
247 +
248 + if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
249 + reg &= ~MII_BCM63XX_IR_GMASK;
250 + else
251 + reg |= MII_BCM63XX_IR_GMASK;
252 +
253 + err = phy_write(phydev, MII_BCM63XX_IR, reg);
254 + return err;
255 +}
256 +
257 static int bcm63xx_config_init(struct phy_device *phydev)
258 {
259 int reg, err;
260 @@ -55,7 +72,7 @@ static struct phy_driver bcm63xx_driver[] = {
261 .config_aneg = genphy_config_aneg,
262 .read_status = genphy_read_status,
263 .ack_interrupt = bcm_phy_ack_intr,
264 - .config_intr = bcm_phy_config_intr,
265 + .config_intr = bcm63xx_config_intr,
266 }, {
267 /* same phy as above, with just a different OUI */
268 .phy_id = 0x002bdc00,
269 @@ -67,7 +84,7 @@ static struct phy_driver bcm63xx_driver[] = {
270 .config_aneg = genphy_config_aneg,
271 .read_status = genphy_read_status,
272 .ack_interrupt = bcm_phy_ack_intr,
273 - .config_intr = bcm_phy_config_intr,
274 + .config_intr = bcm63xx_config_intr,
275 } };
276
277 module_phy_driver(bcm63xx_driver);
278 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
279 index db6acecabeaa..18402d79539e 100644
280 --- a/drivers/net/tun.c
281 +++ b/drivers/net/tun.c
282 @@ -1374,7 +1374,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
283 return -EINVAL;
284
285 ret = virtio_net_hdr_from_skb(skb, &gso,
286 - tun_is_little_endian(tun));
287 + tun_is_little_endian(tun), true);
288 if (ret) {
289 struct skb_shared_info *sinfo = skb_shinfo(skb);
290 pr_err("unexpected GSO type: "
291 diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
292 index dd623f674487..b82be816256c 100644
293 --- a/drivers/net/usb/cdc_ether.c
294 +++ b/drivers/net/usb/cdc_ether.c
295 @@ -531,6 +531,7 @@ static const struct driver_info wwan_info = {
296 #define SAMSUNG_VENDOR_ID 0x04e8
297 #define LENOVO_VENDOR_ID 0x17ef
298 #define NVIDIA_VENDOR_ID 0x0955
299 +#define HP_VENDOR_ID 0x03f0
300
301 static const struct usb_device_id products[] = {
302 /* BLACKLIST !!
303 @@ -677,6 +678,13 @@ static const struct usb_device_id products[] = {
304 .driver_info = 0,
305 },
306
307 +/* HP lt2523 (Novatel E371) - handled by qmi_wwan */
308 +{
309 + USB_DEVICE_AND_INTERFACE_INFO(HP_VENDOR_ID, 0x421d, USB_CLASS_COMM,
310 + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
311 + .driver_info = 0,
312 +},
313 +
314 /* AnyDATA ADU960S - handled by qmi_wwan */
315 {
316 USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
317 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
318 index 6fe1cdb0174f..24d5272cdce5 100644
319 --- a/drivers/net/usb/qmi_wwan.c
320 +++ b/drivers/net/usb/qmi_wwan.c
321 @@ -654,6 +654,13 @@ static const struct usb_device_id products[] = {
322 USB_CDC_PROTO_NONE),
323 .driver_info = (unsigned long)&qmi_wwan_info,
324 },
325 + { /* HP lt2523 (Novatel E371) */
326 + USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d,
327 + USB_CLASS_COMM,
328 + USB_CDC_SUBCLASS_ETHERNET,
329 + USB_CDC_PROTO_NONE),
330 + .driver_info = (unsigned long)&qmi_wwan_info,
331 + },
332 { /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
333 USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
334 .driver_info = (unsigned long)&qmi_wwan_info,
335 diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
336 index 4b5cb162442b..90b426c5ffce 100644
337 --- a/drivers/net/usb/r8152.c
338 +++ b/drivers/net/usb/r8152.c
339 @@ -32,7 +32,7 @@
340 #define NETNEXT_VERSION "08"
341
342 /* Information for net */
343 -#define NET_VERSION "6"
344 +#define NET_VERSION "7"
345
346 #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
347 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
348 @@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
349 u8 checksum = CHECKSUM_NONE;
350 u32 opts2, opts3;
351
352 - if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02)
353 + if (!(tp->netdev->features & NETIF_F_RXCSUM))
354 goto return_result;
355
356 opts2 = le32_to_cpu(rx_desc->opts2);
357 @@ -3572,6 +3572,8 @@ static bool delay_autosuspend(struct r8152 *tp)
358 */
359 if (!sw_linking && tp->rtl_ops.in_nway(tp))
360 return true;
361 + else if (!skb_queue_empty(&tp->tx_queue))
362 + return true;
363 else
364 return false;
365 }
366 @@ -4358,6 +4360,11 @@ static int rtl8152_probe(struct usb_interface *intf,
367 NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
368 NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
369
370 + if (tp->version == RTL_VER_01) {
371 + netdev->features &= ~NETIF_F_RXCSUM;
372 + netdev->hw_features &= ~NETIF_F_RXCSUM;
373 + }
374 +
375 netdev->ethtool_ops = &ops;
376 netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
377
378 diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
379 index cbf1c613c67a..51fc0c33a62f 100644
380 --- a/drivers/net/virtio_net.c
381 +++ b/drivers/net/virtio_net.c
382 @@ -840,7 +840,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
383 hdr = skb_vnet_hdr(skb);
384
385 if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
386 - virtio_is_little_endian(vi->vdev)))
387 + virtio_is_little_endian(vi->vdev), false))
388 BUG();
389
390 if (vi->mergeable_rx_bufs)
391 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
392 index 2ba01ca02c9c..0fafaa9d903b 100644
393 --- a/drivers/net/vxlan.c
394 +++ b/drivers/net/vxlan.c
395 @@ -2887,7 +2887,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
396 memcpy(&vxlan->cfg, conf, sizeof(*conf));
397 if (!vxlan->cfg.dst_port) {
398 if (conf->flags & VXLAN_F_GPE)
399 - vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */
400 + vxlan->cfg.dst_port = htons(4790); /* IANA VXLAN-GPE port */
401 else
402 vxlan->cfg.dst_port = default_port;
403 }
404 diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
405 index 5050056a0b06..9f06a211e157 100644
406 --- a/fs/xfs/libxfs/xfs_alloc.c
407 +++ b/fs/xfs/libxfs/xfs_alloc.c
408 @@ -95,10 +95,7 @@ unsigned int
409 xfs_alloc_set_aside(
410 struct xfs_mount *mp)
411 {
412 - unsigned int blocks;
413 -
414 - blocks = 4 + (mp->m_sb.sb_agcount * XFS_ALLOC_AGFL_RESERVE);
415 - return blocks;
416 + return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
417 }
418
419 /*
420 @@ -365,36 +362,12 @@ xfs_alloc_fix_len(
421 return;
422 ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
423 ASSERT(rlen % args->prod == args->mod);
424 + ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
425 + rlen + args->minleft);
426 args->len = rlen;
427 }
428
429 /*
430 - * Fix up length if there is too little space left in the a.g.
431 - * Return 1 if ok, 0 if too little, should give up.
432 - */
433 -STATIC int
434 -xfs_alloc_fix_minleft(
435 - xfs_alloc_arg_t *args) /* allocation argument structure */
436 -{
437 - xfs_agf_t *agf; /* a.g. freelist header */
438 - int diff; /* free space difference */
439 -
440 - if (args->minleft == 0)
441 - return 1;
442 - agf = XFS_BUF_TO_AGF(args->agbp);
443 - diff = be32_to_cpu(agf->agf_freeblks)
444 - - args->len - args->minleft;
445 - if (diff >= 0)
446 - return 1;
447 - args->len += diff; /* shrink the allocated space */
448 - /* casts to (int) catch length underflows */
449 - if ((int)args->len >= (int)args->minlen)
450 - return 1;
451 - args->agbno = NULLAGBLOCK;
452 - return 0;
453 -}
454 -
455 -/*
456 * Update the two btrees, logically removing from freespace the extent
457 * starting at rbno, rlen blocks. The extent is contained within the
458 * actual (current) free extent fbno for flen blocks.
459 @@ -689,8 +662,6 @@ xfs_alloc_ag_vextent(
460 xfs_alloc_arg_t *args) /* argument structure for allocation */
461 {
462 int error=0;
463 - xfs_extlen_t reservation;
464 - xfs_extlen_t oldmax;
465
466 ASSERT(args->minlen > 0);
467 ASSERT(args->maxlen > 0);
468 @@ -699,20 +670,6 @@ xfs_alloc_ag_vextent(
469 ASSERT(args->alignment > 0);
470
471 /*
472 - * Clamp maxlen to the amount of free space minus any reservations
473 - * that have been made.
474 - */
475 - oldmax = args->maxlen;
476 - reservation = xfs_ag_resv_needed(args->pag, args->resv);
477 - if (args->maxlen > args->pag->pagf_freeblks - reservation)
478 - args->maxlen = args->pag->pagf_freeblks - reservation;
479 - if (args->maxlen == 0) {
480 - args->agbno = NULLAGBLOCK;
481 - args->maxlen = oldmax;
482 - return 0;
483 - }
484 -
485 - /*
486 * Branch to correct routine based on the type.
487 */
488 args->wasfromfl = 0;
489 @@ -731,8 +688,6 @@ xfs_alloc_ag_vextent(
490 /* NOTREACHED */
491 }
492
493 - args->maxlen = oldmax;
494 -
495 if (error || args->agbno == NULLAGBLOCK)
496 return error;
497
498 @@ -841,9 +796,6 @@ xfs_alloc_ag_vextent_exact(
499 args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
500 - args->agbno;
501 xfs_alloc_fix_len(args);
502 - if (!xfs_alloc_fix_minleft(args))
503 - goto not_found;
504 -
505 ASSERT(args->agbno + args->len <= tend);
506
507 /*
508 @@ -1149,12 +1101,7 @@ xfs_alloc_ag_vextent_near(
509 XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
510 ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
511 args->len = blen;
512 - if (!xfs_alloc_fix_minleft(args)) {
513 - xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
514 - trace_xfs_alloc_near_nominleft(args);
515 - return 0;
516 - }
517 - blen = args->len;
518 +
519 /*
520 * We are allocating starting at bnew for blen blocks.
521 */
522 @@ -1346,12 +1293,6 @@ xfs_alloc_ag_vextent_near(
523 */
524 args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
525 xfs_alloc_fix_len(args);
526 - if (!xfs_alloc_fix_minleft(args)) {
527 - trace_xfs_alloc_near_nominleft(args);
528 - xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
529 - xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
530 - return 0;
531 - }
532 rlen = args->len;
533 (void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
534 args->datatype, ltbnoa, ltlena, &ltnew);
535 @@ -1553,8 +1494,6 @@ xfs_alloc_ag_vextent_size(
536 }
537 xfs_alloc_fix_len(args);
538
539 - if (!xfs_alloc_fix_minleft(args))
540 - goto out_nominleft;
541 rlen = args->len;
542 XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
543 /*
544 @@ -2056,7 +1995,7 @@ xfs_alloc_space_available(
545 int flags)
546 {
547 struct xfs_perag *pag = args->pag;
548 - xfs_extlen_t longest;
549 + xfs_extlen_t alloc_len, longest;
550 xfs_extlen_t reservation; /* blocks that are still reserved */
551 int available;
552
553 @@ -2066,17 +2005,28 @@ xfs_alloc_space_available(
554 reservation = xfs_ag_resv_needed(pag, args->resv);
555
556 /* do we have enough contiguous free space for the allocation? */
557 + alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
558 longest = xfs_alloc_longest_free_extent(args->mp, pag, min_free,
559 reservation);
560 - if ((args->minlen + args->alignment + args->minalignslop - 1) > longest)
561 + if (longest < alloc_len)
562 return false;
563
564 /* do we have enough free space remaining for the allocation? */
565 available = (int)(pag->pagf_freeblks + pag->pagf_flcount -
566 - reservation - min_free - args->total);
567 - if (available < (int)args->minleft || available <= 0)
568 + reservation - min_free - args->minleft);
569 + if (available < (int)max(args->total, alloc_len))
570 return false;
571
572 + /*
573 + * Clamp maxlen to the amount of free space available for the actual
574 + * extent allocation.
575 + */
576 + if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
577 + args->maxlen = available;
578 + ASSERT(args->maxlen > 0);
579 + ASSERT(args->maxlen >= args->minlen);
580 + }
581 +
582 return true;
583 }
584
585 @@ -2122,7 +2072,8 @@ xfs_alloc_fix_freelist(
586 }
587
588 need = xfs_alloc_min_freelist(mp, pag);
589 - if (!xfs_alloc_space_available(args, need, flags))
590 + if (!xfs_alloc_space_available(args, need, flags |
591 + XFS_ALLOC_FLAG_CHECK))
592 goto out_agbp_relse;
593
594 /*
595 @@ -2638,12 +2589,10 @@ xfs_alloc_vextent(
596 xfs_agblock_t agsize; /* allocation group size */
597 int error;
598 int flags; /* XFS_ALLOC_FLAG_... locking flags */
599 - xfs_extlen_t minleft;/* minimum left value, temp copy */
600 xfs_mount_t *mp; /* mount structure pointer */
601 xfs_agnumber_t sagno; /* starting allocation group number */
602 xfs_alloctype_t type; /* input allocation type */
603 int bump_rotor = 0;
604 - int no_min = 0;
605 xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
606
607 mp = args->mp;
608 @@ -2672,7 +2621,6 @@ xfs_alloc_vextent(
609 trace_xfs_alloc_vextent_badargs(args);
610 return 0;
611 }
612 - minleft = args->minleft;
613
614 switch (type) {
615 case XFS_ALLOCTYPE_THIS_AG:
616 @@ -2683,9 +2631,7 @@ xfs_alloc_vextent(
617 */
618 args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
619 args->pag = xfs_perag_get(mp, args->agno);
620 - args->minleft = 0;
621 error = xfs_alloc_fix_freelist(args, 0);
622 - args->minleft = minleft;
623 if (error) {
624 trace_xfs_alloc_vextent_nofix(args);
625 goto error0;
626 @@ -2750,9 +2696,7 @@ xfs_alloc_vextent(
627 */
628 for (;;) {
629 args->pag = xfs_perag_get(mp, args->agno);
630 - if (no_min) args->minleft = 0;
631 error = xfs_alloc_fix_freelist(args, flags);
632 - args->minleft = minleft;
633 if (error) {
634 trace_xfs_alloc_vextent_nofix(args);
635 goto error0;
636 @@ -2792,20 +2736,17 @@ xfs_alloc_vextent(
637 * or switch to non-trylock mode.
638 */
639 if (args->agno == sagno) {
640 - if (no_min == 1) {
641 + if (flags == 0) {
642 args->agbno = NULLAGBLOCK;
643 trace_xfs_alloc_vextent_allfailed(args);
644 break;
645 }
646 - if (flags == 0) {
647 - no_min = 1;
648 - } else {
649 - flags = 0;
650 - if (type == XFS_ALLOCTYPE_START_BNO) {
651 - args->agbno = XFS_FSB_TO_AGBNO(mp,
652 - args->fsbno);
653 - args->type = XFS_ALLOCTYPE_NEAR_BNO;
654 - }
655 +
656 + flags = 0;
657 + if (type == XFS_ALLOCTYPE_START_BNO) {
658 + args->agbno = XFS_FSB_TO_AGBNO(mp,
659 + args->fsbno);
660 + args->type = XFS_ALLOCTYPE_NEAR_BNO;
661 }
662 }
663 xfs_perag_put(args->pag);
664 diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
665 index 7c404a6b0ae3..1d0f48a501a3 100644
666 --- a/fs/xfs/libxfs/xfs_alloc.h
667 +++ b/fs/xfs/libxfs/xfs_alloc.h
668 @@ -56,7 +56,7 @@ typedef unsigned int xfs_alloctype_t;
669 #define XFS_ALLOC_FLAG_FREEING 0x00000002 /* indicate caller is freeing extents*/
670 #define XFS_ALLOC_FLAG_NORMAP 0x00000004 /* don't modify the rmapbt */
671 #define XFS_ALLOC_FLAG_NOSHRINK 0x00000008 /* don't shrink the freelist */
672 -
673 +#define XFS_ALLOC_FLAG_CHECK 0x00000010 /* test only, don't modify args */
674
675 /*
676 * Argument structure for xfs_alloc routines.
677 diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
678 index af1ecb19121e..6622d46ddec3 100644
679 --- a/fs/xfs/libxfs/xfs_attr.c
680 +++ b/fs/xfs/libxfs/xfs_attr.c
681 @@ -131,9 +131,6 @@ xfs_attr_get(
682 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
683 return -EIO;
684
685 - if (!xfs_inode_hasattr(ip))
686 - return -ENOATTR;
687 -
688 error = xfs_attr_args_init(&args, ip, name, flags);
689 if (error)
690 return error;
691 @@ -392,9 +389,6 @@ xfs_attr_remove(
692 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
693 return -EIO;
694
695 - if (!xfs_inode_hasattr(dp))
696 - return -ENOATTR;
697 -
698 error = xfs_attr_args_init(&args, dp, name, flags);
699 if (error)
700 return error;
701 diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
702 index 89d727b659fc..f52fd63fce19 100644
703 --- a/fs/xfs/libxfs/xfs_bmap.c
704 +++ b/fs/xfs/libxfs/xfs_bmap.c
705 @@ -3720,7 +3720,7 @@ xfs_bmap_btalloc(
706 align = xfs_get_cowextsz_hint(ap->ip);
707 else if (xfs_alloc_is_userdata(ap->datatype))
708 align = xfs_get_extsz_hint(ap->ip);
709 - if (unlikely(align)) {
710 + if (align) {
711 error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
712 align, 0, ap->eof, 0, ap->conv,
713 &ap->offset, &ap->length);
714 @@ -3792,7 +3792,7 @@ xfs_bmap_btalloc(
715 args.minlen = ap->minlen;
716 }
717 /* apply extent size hints if obtained earlier */
718 - if (unlikely(align)) {
719 + if (align) {
720 args.prod = align;
721 if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
722 args.mod = (xfs_extlen_t)(args.prod - args.mod);
723 @@ -3903,7 +3903,6 @@ xfs_bmap_btalloc(
724 args.fsbno = 0;
725 args.type = XFS_ALLOCTYPE_FIRST_AG;
726 args.total = ap->minlen;
727 - args.minleft = 0;
728 if ((error = xfs_alloc_vextent(&args)))
729 return error;
730 ap->dfops->dop_low = true;
731 @@ -4437,8 +4436,6 @@ xfs_bmapi_allocate(
732 if (error)
733 return error;
734
735 - if (bma->dfops->dop_low)
736 - bma->minleft = 0;
737 if (bma->cur)
738 bma->cur->bc_private.b.firstblock = *bma->firstblock;
739 if (bma->blkno == NULLFSBLOCK)
740 @@ -4610,8 +4607,6 @@ xfs_bmapi_write(
741 int n; /* current extent index */
742 xfs_fileoff_t obno; /* old block number (offset) */
743 int whichfork; /* data or attr fork */
744 - char inhole; /* current location is hole in file */
745 - char wasdelay; /* old extent was delayed */
746
747 #ifdef DEBUG
748 xfs_fileoff_t orig_bno; /* original block number value */
749 @@ -4697,22 +4692,44 @@ xfs_bmapi_write(
750 bma.firstblock = firstblock;
751
752 while (bno < end && n < *nmap) {
753 - inhole = eof || bma.got.br_startoff > bno;
754 - wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
755 + bool need_alloc = false, wasdelay = false;
756
757 - /*
758 - * Make sure we only reflink into a hole.
759 - */
760 - if (flags & XFS_BMAPI_REMAP)
761 - ASSERT(inhole);
762 - if (flags & XFS_BMAPI_COWFORK)
763 - ASSERT(!inhole);
764 + /* in hole or beyoned EOF? */
765 + if (eof || bma.got.br_startoff > bno) {
766 + if (flags & XFS_BMAPI_DELALLOC) {
767 + /*
768 + * For the COW fork we can reasonably get a
769 + * request for converting an extent that races
770 + * with other threads already having converted
771 + * part of it, as there converting COW to
772 + * regular blocks is not protected using the
773 + * IOLOCK.
774 + */
775 + ASSERT(flags & XFS_BMAPI_COWFORK);
776 + if (!(flags & XFS_BMAPI_COWFORK)) {
777 + error = -EIO;
778 + goto error0;
779 + }
780 +
781 + if (eof || bno >= end)
782 + break;
783 + } else {
784 + need_alloc = true;
785 + }
786 + } else {
787 + /*
788 + * Make sure we only reflink into a hole.
789 + */
790 + ASSERT(!(flags & XFS_BMAPI_REMAP));
791 + if (isnullstartblock(bma.got.br_startblock))
792 + wasdelay = true;
793 + }
794
795 /*
796 * First, deal with the hole before the allocated space
797 * that we found, if any.
798 */
799 - if (inhole || wasdelay) {
800 + if (need_alloc || wasdelay) {
801 bma.eof = eof;
802 bma.conv = !!(flags & XFS_BMAPI_CONVERT);
803 bma.wasdel = wasdelay;
804 diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
805 index d6d175a4fdec..e7d40b39f18f 100644
806 --- a/fs/xfs/libxfs/xfs_bmap.h
807 +++ b/fs/xfs/libxfs/xfs_bmap.h
808 @@ -110,6 +110,9 @@ struct xfs_extent_free_item
809 /* Map something in the CoW fork. */
810 #define XFS_BMAPI_COWFORK 0x200
811
812 +/* Only convert delalloc space, don't allocate entirely new extents */
813 +#define XFS_BMAPI_DELALLOC 0x400
814 +
815 #define XFS_BMAPI_FLAGS \
816 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \
817 { XFS_BMAPI_METADATA, "METADATA" }, \
818 @@ -120,7 +123,8 @@ struct xfs_extent_free_item
819 { XFS_BMAPI_CONVERT, "CONVERT" }, \
820 { XFS_BMAPI_ZERO, "ZERO" }, \
821 { XFS_BMAPI_REMAP, "REMAP" }, \
822 - { XFS_BMAPI_COWFORK, "COWFORK" }
823 + { XFS_BMAPI_COWFORK, "COWFORK" }, \
824 + { XFS_BMAPI_DELALLOC, "DELALLOC" }
825
826
827 static inline int xfs_bmapi_aflag(int w)
828 diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
829 index 049fa597ae91..f76c1693ff01 100644
830 --- a/fs/xfs/libxfs/xfs_bmap_btree.c
831 +++ b/fs/xfs/libxfs/xfs_bmap_btree.c
832 @@ -502,12 +502,11 @@ xfs_bmbt_alloc_block(
833 if (args.fsbno == NULLFSBLOCK && args.minleft) {
834 /*
835 * Could not find an AG with enough free space to satisfy
836 - * a full btree split. Try again without minleft and if
837 + * a full btree split. Try again and if
838 * successful activate the lowspace algorithm.
839 */
840 args.fsbno = 0;
841 args.type = XFS_ALLOCTYPE_FIRST_AG;
842 - args.minleft = 0;
843 error = xfs_alloc_vextent(&args);
844 if (error)
845 goto error0;
846 diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
847 index 20a96dd5af7e..7825d78d4587 100644
848 --- a/fs/xfs/libxfs/xfs_dir2.c
849 +++ b/fs/xfs/libxfs/xfs_dir2.c
850 @@ -36,21 +36,29 @@
851 struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR };
852
853 /*
854 - * @mode, if set, indicates that the type field needs to be set up.
855 - * This uses the transformation from file mode to DT_* as defined in linux/fs.h
856 - * for file type specification. This will be propagated into the directory
857 - * structure if appropriate for the given operation and filesystem config.
858 + * Convert inode mode to directory entry filetype
859 */
860 -const unsigned char xfs_mode_to_ftype[S_IFMT >> S_SHIFT] = {
861 - [0] = XFS_DIR3_FT_UNKNOWN,
862 - [S_IFREG >> S_SHIFT] = XFS_DIR3_FT_REG_FILE,
863 - [S_IFDIR >> S_SHIFT] = XFS_DIR3_FT_DIR,
864 - [S_IFCHR >> S_SHIFT] = XFS_DIR3_FT_CHRDEV,
865 - [S_IFBLK >> S_SHIFT] = XFS_DIR3_FT_BLKDEV,
866 - [S_IFIFO >> S_SHIFT] = XFS_DIR3_FT_FIFO,
867 - [S_IFSOCK >> S_SHIFT] = XFS_DIR3_FT_SOCK,
868 - [S_IFLNK >> S_SHIFT] = XFS_DIR3_FT_SYMLINK,
869 -};
870 +unsigned char xfs_mode_to_ftype(int mode)
871 +{
872 + switch (mode & S_IFMT) {
873 + case S_IFREG:
874 + return XFS_DIR3_FT_REG_FILE;
875 + case S_IFDIR:
876 + return XFS_DIR3_FT_DIR;
877 + case S_IFCHR:
878 + return XFS_DIR3_FT_CHRDEV;
879 + case S_IFBLK:
880 + return XFS_DIR3_FT_BLKDEV;
881 + case S_IFIFO:
882 + return XFS_DIR3_FT_FIFO;
883 + case S_IFSOCK:
884 + return XFS_DIR3_FT_SOCK;
885 + case S_IFLNK:
886 + return XFS_DIR3_FT_SYMLINK;
887 + default:
888 + return XFS_DIR3_FT_UNKNOWN;
889 + }
890 +}
891
892 /*
893 * ASCII case-insensitive (ie. A-Z) support for directories that was
894 @@ -631,7 +639,8 @@ xfs_dir2_isblock(
895 if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK)))
896 return rval;
897 rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize;
898 - ASSERT(rval == 0 || args->dp->i_d.di_size == args->geo->blksize);
899 + if (rval != 0 && args->dp->i_d.di_size != args->geo->blksize)
900 + return -EFSCORRUPTED;
901 *vp = rval;
902 return 0;
903 }
904 diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
905 index becc926c3e3d..ae0d55bf6500 100644
906 --- a/fs/xfs/libxfs/xfs_dir2.h
907 +++ b/fs/xfs/libxfs/xfs_dir2.h
908 @@ -18,6 +18,9 @@
909 #ifndef __XFS_DIR2_H__
910 #define __XFS_DIR2_H__
911
912 +#include "xfs_da_format.h"
913 +#include "xfs_da_btree.h"
914 +
915 struct xfs_defer_ops;
916 struct xfs_da_args;
917 struct xfs_inode;
918 @@ -32,10 +35,9 @@ struct xfs_dir2_data_unused;
919 extern struct xfs_name xfs_name_dotdot;
920
921 /*
922 - * directory filetype conversion tables.
923 + * Convert inode mode to directory entry filetype
924 */
925 -#define S_SHIFT 12
926 -extern const unsigned char xfs_mode_to_ftype[];
927 +extern unsigned char xfs_mode_to_ftype(int mode);
928
929 /*
930 * directory operations vector for encode/decode routines
931 diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
932 index c906e50515f0..37ee7f01a35d 100644
933 --- a/fs/xfs/libxfs/xfs_inode_buf.c
934 +++ b/fs/xfs/libxfs/xfs_inode_buf.c
935 @@ -29,6 +29,7 @@
936 #include "xfs_icache.h"
937 #include "xfs_trans.h"
938 #include "xfs_ialloc.h"
939 +#include "xfs_dir2.h"
940
941 /*
942 * Check that none of the inode's in the buffer have a next
943 @@ -386,6 +387,7 @@ xfs_dinode_verify(
944 struct xfs_inode *ip,
945 struct xfs_dinode *dip)
946 {
947 + uint16_t mode;
948 uint16_t flags;
949 uint64_t flags2;
950
951 @@ -396,8 +398,12 @@ xfs_dinode_verify(
952 if (be64_to_cpu(dip->di_size) & (1ULL << 63))
953 return false;
954
955 - /* No zero-length symlinks. */
956 - if (S_ISLNK(be16_to_cpu(dip->di_mode)) && dip->di_size == 0)
957 + mode = be16_to_cpu(dip->di_mode);
958 + if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
959 + return false;
960 +
961 + /* No zero-length symlinks/dirs. */
962 + if ((S_ISLNK(mode) || S_ISDIR(mode)) && dip->di_size == 0)
963 return false;
964
965 /* only version 3 or greater inodes are extensively verified here */
966 diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
967 index 2580262e4ea0..584ec896a533 100644
968 --- a/fs/xfs/libxfs/xfs_sb.c
969 +++ b/fs/xfs/libxfs/xfs_sb.c
970 @@ -242,7 +242,7 @@ xfs_mount_validate_sb(
971 sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG ||
972 sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
973 sbp->sb_blocksize != (1 << sbp->sb_blocklog) ||
974 - sbp->sb_dirblklog > XFS_MAX_BLOCKSIZE_LOG ||
975 + sbp->sb_dirblklog + sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
976 sbp->sb_inodesize < XFS_DINODE_MIN_SIZE ||
977 sbp->sb_inodesize > XFS_DINODE_MAX_SIZE ||
978 sbp->sb_inodelog < XFS_DINODE_MIN_LOG ||
979 diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
980 index 0670a8bd5818..efb8ccd6bbf2 100644
981 --- a/fs/xfs/xfs_bmap_util.c
982 +++ b/fs/xfs/xfs_bmap_util.c
983 @@ -528,7 +528,6 @@ xfs_getbmap(
984 xfs_bmbt_irec_t *map; /* buffer for user's data */
985 xfs_mount_t *mp; /* file system mount point */
986 int nex; /* # of user extents can do */
987 - int nexleft; /* # of user extents left */
988 int subnex; /* # of bmapi's can do */
989 int nmap; /* number of map entries */
990 struct getbmapx *out; /* output structure */
991 @@ -686,10 +685,8 @@ xfs_getbmap(
992 goto out_free_map;
993 }
994
995 - nexleft = nex;
996 -
997 do {
998 - nmap = (nexleft > subnex) ? subnex : nexleft;
999 + nmap = (nex> subnex) ? subnex : nex;
1000 error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
1001 XFS_BB_TO_FSB(mp, bmv->bmv_length),
1002 map, &nmap, bmapi_flags);
1003 @@ -697,8 +694,8 @@ xfs_getbmap(
1004 goto out_free_map;
1005 ASSERT(nmap <= subnex);
1006
1007 - for (i = 0; i < nmap && nexleft && bmv->bmv_length &&
1008 - cur_ext < bmv->bmv_count; i++) {
1009 + for (i = 0; i < nmap && bmv->bmv_length &&
1010 + cur_ext < bmv->bmv_count - 1; i++) {
1011 out[cur_ext].bmv_oflags = 0;
1012 if (map[i].br_state == XFS_EXT_UNWRITTEN)
1013 out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
1014 @@ -760,16 +757,27 @@ xfs_getbmap(
1015 continue;
1016 }
1017
1018 + /*
1019 + * In order to report shared extents accurately,
1020 + * we report each distinct shared/unshared part
1021 + * of a single bmbt record using multiple bmap
1022 + * extents. To make that happen, we iterate the
1023 + * same map array item multiple times, each
1024 + * time trimming out the subextent that we just
1025 + * reported.
1026 + *
1027 + * Because of this, we must check the out array
1028 + * index (cur_ext) directly against bmv_count-1
1029 + * to avoid overflows.
1030 + */
1031 if (inject_map.br_startblock != NULLFSBLOCK) {
1032 map[i] = inject_map;
1033 i--;
1034 - } else
1035 - nexleft--;
1036 + }
1037 bmv->bmv_entries++;
1038 cur_ext++;
1039 }
1040 - } while (nmap && nexleft && bmv->bmv_length &&
1041 - cur_ext < bmv->bmv_count);
1042 + } while (nmap && bmv->bmv_length && cur_ext < bmv->bmv_count - 1);
1043
1044 out_free_map:
1045 kmem_free(map);
1046 diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
1047 index b5b9bffe3520..d7a67d7fbc7f 100644
1048 --- a/fs/xfs/xfs_buf.c
1049 +++ b/fs/xfs/xfs_buf.c
1050 @@ -423,6 +423,7 @@ xfs_buf_allocate_memory(
1051 out_free_pages:
1052 for (i = 0; i < bp->b_page_count; i++)
1053 __free_page(bp->b_pages[i]);
1054 + bp->b_flags &= ~_XBF_PAGES;
1055 return error;
1056 }
1057
1058 diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
1059 index 7a30b8f11db7..9d06cc30e875 100644
1060 --- a/fs/xfs/xfs_dquot.c
1061 +++ b/fs/xfs/xfs_dquot.c
1062 @@ -710,6 +710,10 @@ xfs_dq_get_next_id(
1063 /* Simple advance */
1064 next_id = *id + 1;
1065
1066 + /* If we'd wrap past the max ID, stop */
1067 + if (next_id < *id)
1068 + return -ENOENT;
1069 +
1070 /* If new ID is within the current chunk, advancing it sufficed */
1071 if (next_id % mp->m_quotainfo->qi_dqperchunk) {
1072 *id = next_id;
1073 diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
1074 index 15a83813b708..cdc6bdd495be 100644
1075 --- a/fs/xfs/xfs_iomap.c
1076 +++ b/fs/xfs/xfs_iomap.c
1077 @@ -681,7 +681,7 @@ xfs_iomap_write_allocate(
1078 xfs_trans_t *tp;
1079 int nimaps;
1080 int error = 0;
1081 - int flags = 0;
1082 + int flags = XFS_BMAPI_DELALLOC;
1083 int nres;
1084
1085 if (whichfork == XFS_COW_FORK)
1086 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
1087 index 405a65cd9d6b..f5e0f608e245 100644
1088 --- a/fs/xfs/xfs_iops.c
1089 +++ b/fs/xfs/xfs_iops.c
1090 @@ -98,12 +98,27 @@ xfs_init_security(
1091 static void
1092 xfs_dentry_to_name(
1093 struct xfs_name *namep,
1094 + struct dentry *dentry)
1095 +{
1096 + namep->name = dentry->d_name.name;
1097 + namep->len = dentry->d_name.len;
1098 + namep->type = XFS_DIR3_FT_UNKNOWN;
1099 +}
1100 +
1101 +static int
1102 +xfs_dentry_mode_to_name(
1103 + struct xfs_name *namep,
1104 struct dentry *dentry,
1105 int mode)
1106 {
1107 namep->name = dentry->d_name.name;
1108 namep->len = dentry->d_name.len;
1109 - namep->type = xfs_mode_to_ftype[(mode & S_IFMT) >> S_SHIFT];
1110 + namep->type = xfs_mode_to_ftype(mode);
1111 +
1112 + if (unlikely(namep->type == XFS_DIR3_FT_UNKNOWN))
1113 + return -EFSCORRUPTED;
1114 +
1115 + return 0;
1116 }
1117
1118 STATIC void
1119 @@ -119,7 +134,7 @@ xfs_cleanup_inode(
1120 * xfs_init_security we must back out.
1121 * ENOSPC can hit here, among other things.
1122 */
1123 - xfs_dentry_to_name(&teardown, dentry, 0);
1124 + xfs_dentry_to_name(&teardown, dentry);
1125
1126 xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
1127 }
1128 @@ -154,8 +169,12 @@ xfs_generic_create(
1129 if (error)
1130 return error;
1131
1132 + /* Verify mode is valid also for tmpfile case */
1133 + error = xfs_dentry_mode_to_name(&name, dentry, mode);
1134 + if (unlikely(error))
1135 + goto out_free_acl;
1136 +
1137 if (!tmpfile) {
1138 - xfs_dentry_to_name(&name, dentry, mode);
1139 error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
1140 } else {
1141 error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip);
1142 @@ -248,7 +267,7 @@ xfs_vn_lookup(
1143 if (dentry->d_name.len >= MAXNAMELEN)
1144 return ERR_PTR(-ENAMETOOLONG);
1145
1146 - xfs_dentry_to_name(&name, dentry, 0);
1147 + xfs_dentry_to_name(&name, dentry);
1148 error = xfs_lookup(XFS_I(dir), &name, &cip, NULL);
1149 if (unlikely(error)) {
1150 if (unlikely(error != -ENOENT))
1151 @@ -275,7 +294,7 @@ xfs_vn_ci_lookup(
1152 if (dentry->d_name.len >= MAXNAMELEN)
1153 return ERR_PTR(-ENAMETOOLONG);
1154
1155 - xfs_dentry_to_name(&xname, dentry, 0);
1156 + xfs_dentry_to_name(&xname, dentry);
1157 error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name);
1158 if (unlikely(error)) {
1159 if (unlikely(error != -ENOENT))
1160 @@ -310,7 +329,9 @@ xfs_vn_link(
1161 struct xfs_name name;
1162 int error;
1163
1164 - xfs_dentry_to_name(&name, dentry, inode->i_mode);
1165 + error = xfs_dentry_mode_to_name(&name, dentry, inode->i_mode);
1166 + if (unlikely(error))
1167 + return error;
1168
1169 error = xfs_link(XFS_I(dir), XFS_I(inode), &name);
1170 if (unlikely(error))
1171 @@ -329,7 +350,7 @@ xfs_vn_unlink(
1172 struct xfs_name name;
1173 int error;
1174
1175 - xfs_dentry_to_name(&name, dentry, 0);
1176 + xfs_dentry_to_name(&name, dentry);
1177
1178 error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry)));
1179 if (error)
1180 @@ -359,7 +380,9 @@ xfs_vn_symlink(
1181
1182 mode = S_IFLNK |
1183 (irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO);
1184 - xfs_dentry_to_name(&name, dentry, mode);
1185 + error = xfs_dentry_mode_to_name(&name, dentry, mode);
1186 + if (unlikely(error))
1187 + goto out;
1188
1189 error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip);
1190 if (unlikely(error))
1191 @@ -395,6 +418,7 @@ xfs_vn_rename(
1192 {
1193 struct inode *new_inode = d_inode(ndentry);
1194 int omode = 0;
1195 + int error;
1196 struct xfs_name oname;
1197 struct xfs_name nname;
1198
1199 @@ -405,8 +429,14 @@ xfs_vn_rename(
1200 if (flags & RENAME_EXCHANGE)
1201 omode = d_inode(ndentry)->i_mode;
1202
1203 - xfs_dentry_to_name(&oname, odentry, omode);
1204 - xfs_dentry_to_name(&nname, ndentry, d_inode(odentry)->i_mode);
1205 + error = xfs_dentry_mode_to_name(&oname, odentry, omode);
1206 + if (omode && unlikely(error))
1207 + return error;
1208 +
1209 + error = xfs_dentry_mode_to_name(&nname, ndentry,
1210 + d_inode(odentry)->i_mode);
1211 + if (unlikely(error))
1212 + return error;
1213
1214 return xfs_rename(XFS_I(odir), &oname, XFS_I(d_inode(odentry)),
1215 XFS_I(ndir), &nname,
1216 diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
1217 index 68640fb63a54..1455b25205a8 100644
1218 --- a/fs/xfs/xfs_linux.h
1219 +++ b/fs/xfs/xfs_linux.h
1220 @@ -330,11 +330,11 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
1221 }
1222
1223 #define ASSERT_ALWAYS(expr) \
1224 - (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
1225 + (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
1226
1227 #ifdef DEBUG
1228 #define ASSERT(expr) \
1229 - (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
1230 + (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
1231
1232 #ifndef STATIC
1233 # define STATIC noinline
1234 @@ -345,7 +345,7 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
1235 #ifdef XFS_WARN
1236
1237 #define ASSERT(expr) \
1238 - (unlikely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
1239 + (likely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
1240
1241 #ifndef STATIC
1242 # define STATIC static noinline
1243 diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
1244 index 3b74fa011bb1..4017aa967331 100644
1245 --- a/fs/xfs/xfs_log.c
1246 +++ b/fs/xfs/xfs_log.c
1247 @@ -3324,12 +3324,8 @@ xfs_log_force(
1248 xfs_mount_t *mp,
1249 uint flags)
1250 {
1251 - int error;
1252 -
1253 trace_xfs_log_force(mp, 0, _RET_IP_);
1254 - error = _xfs_log_force(mp, flags, NULL);
1255 - if (error)
1256 - xfs_warn(mp, "%s: error %d returned.", __func__, error);
1257 + _xfs_log_force(mp, flags, NULL);
1258 }
1259
1260 /*
1261 @@ -3473,12 +3469,8 @@ xfs_log_force_lsn(
1262 xfs_lsn_t lsn,
1263 uint flags)
1264 {
1265 - int error;
1266 -
1267 trace_xfs_log_force(mp, lsn, _RET_IP_);
1268 - error = _xfs_log_force_lsn(mp, lsn, flags, NULL);
1269 - if (error)
1270 - xfs_warn(mp, "%s: error %d returned.", __func__, error);
1271 + _xfs_log_force_lsn(mp, lsn, flags, NULL);
1272 }
1273
1274 /*
1275 diff --git a/include/linux/tcp.h b/include/linux/tcp.h
1276 index a17ae7b85218..647532b0eb03 100644
1277 --- a/include/linux/tcp.h
1278 +++ b/include/linux/tcp.h
1279 @@ -62,8 +62,13 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
1280
1281 /* TCP Fast Open Cookie as stored in memory */
1282 struct tcp_fastopen_cookie {
1283 + union {
1284 + u8 val[TCP_FASTOPEN_COOKIE_MAX];
1285 +#if IS_ENABLED(CONFIG_IPV6)
1286 + struct in6_addr addr;
1287 +#endif
1288 + };
1289 s8 len;
1290 - u8 val[TCP_FASTOPEN_COOKIE_MAX];
1291 bool exp; /* In RFC6994 experimental option format */
1292 };
1293
1294 diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
1295 index 1c912f85e041..f211c348e592 100644
1296 --- a/include/linux/virtio_net.h
1297 +++ b/include/linux/virtio_net.h
1298 @@ -56,7 +56,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
1299
1300 static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
1301 struct virtio_net_hdr *hdr,
1302 - bool little_endian)
1303 + bool little_endian,
1304 + bool has_data_valid)
1305 {
1306 memset(hdr, 0, sizeof(*hdr));
1307
1308 @@ -91,7 +92,8 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
1309 skb_checksum_start_offset(skb));
1310 hdr->csum_offset = __cpu_to_virtio16(little_endian,
1311 skb->csum_offset);
1312 - } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1313 + } else if (has_data_valid &&
1314 + skb->ip_summed == CHECKSUM_UNNECESSARY) {
1315 hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
1316 } /* else everything is zero */
1317
1318 diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
1319 index ea3f80f58fd6..fc7c0dbdd1ff 100644
1320 --- a/include/net/lwtunnel.h
1321 +++ b/include/net/lwtunnel.h
1322 @@ -43,13 +43,12 @@ struct lwtunnel_encap_ops {
1323 int (*get_encap_size)(struct lwtunnel_state *lwtstate);
1324 int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
1325 int (*xmit)(struct sk_buff *skb);
1326 +
1327 + struct module *owner;
1328 };
1329
1330 #ifdef CONFIG_LWTUNNEL
1331 -static inline void lwtstate_free(struct lwtunnel_state *lws)
1332 -{
1333 - kfree(lws);
1334 -}
1335 +void lwtstate_free(struct lwtunnel_state *lws);
1336
1337 static inline struct lwtunnel_state *
1338 lwtstate_get(struct lwtunnel_state *lws)
1339 @@ -106,6 +105,8 @@ int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
1340 unsigned int num);
1341 int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
1342 unsigned int num);
1343 +int lwtunnel_valid_encap_type(u16 encap_type);
1344 +int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len);
1345 int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
1346 struct nlattr *encap,
1347 unsigned int family, const void *cfg,
1348 @@ -169,6 +170,15 @@ static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
1349 return -EOPNOTSUPP;
1350 }
1351
1352 +static inline int lwtunnel_valid_encap_type(u16 encap_type)
1353 +{
1354 + return -EOPNOTSUPP;
1355 +}
1356 +static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len)
1357 +{
1358 + return -EOPNOTSUPP;
1359 +}
1360 +
1361 static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
1362 struct nlattr *encap,
1363 unsigned int family, const void *cfg,
1364 diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
1365 index 655a7d4c96e1..983f0b5e14f1 100644
1366 --- a/net/ax25/ax25_subr.c
1367 +++ b/net/ax25/ax25_subr.c
1368 @@ -264,7 +264,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
1369 {
1370 ax25_clear_queues(ax25);
1371
1372 - if (!sock_flag(ax25->sk, SOCK_DESTROY))
1373 + if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY))
1374 ax25_stop_heartbeat(ax25);
1375 ax25_stop_t1timer(ax25);
1376 ax25_stop_t2timer(ax25);
1377 diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
1378 index e99037c6f7b7..04741064a173 100644
1379 --- a/net/bridge/br_netlink.c
1380 +++ b/net/bridge/br_netlink.c
1381 @@ -781,20 +781,6 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
1382 return 0;
1383 }
1384
1385 -static int br_dev_newlink(struct net *src_net, struct net_device *dev,
1386 - struct nlattr *tb[], struct nlattr *data[])
1387 -{
1388 - struct net_bridge *br = netdev_priv(dev);
1389 -
1390 - if (tb[IFLA_ADDRESS]) {
1391 - spin_lock_bh(&br->lock);
1392 - br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
1393 - spin_unlock_bh(&br->lock);
1394 - }
1395 -
1396 - return register_netdevice(dev);
1397 -}
1398 -
1399 static int br_port_slave_changelink(struct net_device *brdev,
1400 struct net_device *dev,
1401 struct nlattr *tb[],
1402 @@ -1093,6 +1079,25 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
1403 return 0;
1404 }
1405
1406 +static int br_dev_newlink(struct net *src_net, struct net_device *dev,
1407 + struct nlattr *tb[], struct nlattr *data[])
1408 +{
1409 + struct net_bridge *br = netdev_priv(dev);
1410 + int err;
1411 +
1412 + if (tb[IFLA_ADDRESS]) {
1413 + spin_lock_bh(&br->lock);
1414 + br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
1415 + spin_unlock_bh(&br->lock);
1416 + }
1417 +
1418 + err = br_changelink(dev, tb, data);
1419 + if (err)
1420 + return err;
1421 +
1422 + return register_netdevice(dev);
1423 +}
1424 +
1425 static size_t br_get_size(const struct net_device *brdev)
1426 {
1427 return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */
1428 diff --git a/net/core/dev.c b/net/core/dev.c
1429 index e1d731fdc72c..df51c50927ab 100644
1430 --- a/net/core/dev.c
1431 +++ b/net/core/dev.c
1432 @@ -2815,9 +2815,9 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
1433 if (skb->ip_summed != CHECKSUM_NONE &&
1434 !can_checksum_protocol(features, type)) {
1435 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
1436 - } else if (illegal_highdma(skb->dev, skb)) {
1437 - features &= ~NETIF_F_SG;
1438 }
1439 + if (illegal_highdma(skb->dev, skb))
1440 + features &= ~NETIF_F_SG;
1441
1442 return features;
1443 }
1444 diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
1445 index e5f84c26ba1a..afa64f086d87 100644
1446 --- a/net/core/lwtunnel.c
1447 +++ b/net/core/lwtunnel.c
1448 @@ -26,6 +26,7 @@
1449 #include <net/lwtunnel.h>
1450 #include <net/rtnetlink.h>
1451 #include <net/ip6_fib.h>
1452 +#include <net/nexthop.h>
1453
1454 #ifdef CONFIG_MODULES
1455
1456 @@ -65,6 +66,15 @@ EXPORT_SYMBOL(lwtunnel_state_alloc);
1457 static const struct lwtunnel_encap_ops __rcu *
1458 lwtun_encaps[LWTUNNEL_ENCAP_MAX + 1] __read_mostly;
1459
1460 +void lwtstate_free(struct lwtunnel_state *lws)
1461 +{
1462 + const struct lwtunnel_encap_ops *ops = lwtun_encaps[lws->type];
1463 +
1464 + kfree(lws);
1465 + module_put(ops->owner);
1466 +}
1467 +EXPORT_SYMBOL(lwtstate_free);
1468 +
1469 int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *ops,
1470 unsigned int num)
1471 {
1472 @@ -110,25 +120,77 @@ int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
1473 ret = -EOPNOTSUPP;
1474 rcu_read_lock();
1475 ops = rcu_dereference(lwtun_encaps[encap_type]);
1476 + if (likely(ops && ops->build_state && try_module_get(ops->owner))) {
1477 + ret = ops->build_state(dev, encap, family, cfg, lws);
1478 + if (ret)
1479 + module_put(ops->owner);
1480 + }
1481 + rcu_read_unlock();
1482 +
1483 + return ret;
1484 +}
1485 +EXPORT_SYMBOL(lwtunnel_build_state);
1486 +
1487 +int lwtunnel_valid_encap_type(u16 encap_type)
1488 +{
1489 + const struct lwtunnel_encap_ops *ops;
1490 + int ret = -EINVAL;
1491 +
1492 + if (encap_type == LWTUNNEL_ENCAP_NONE ||
1493 + encap_type > LWTUNNEL_ENCAP_MAX)
1494 + return ret;
1495 +
1496 + rcu_read_lock();
1497 + ops = rcu_dereference(lwtun_encaps[encap_type]);
1498 + rcu_read_unlock();
1499 #ifdef CONFIG_MODULES
1500 if (!ops) {
1501 const char *encap_type_str = lwtunnel_encap_str(encap_type);
1502
1503 if (encap_type_str) {
1504 - rcu_read_unlock();
1505 + __rtnl_unlock();
1506 request_module("rtnl-lwt-%s", encap_type_str);
1507 + rtnl_lock();
1508 +
1509 rcu_read_lock();
1510 ops = rcu_dereference(lwtun_encaps[encap_type]);
1511 + rcu_read_unlock();
1512 }
1513 }
1514 #endif
1515 - if (likely(ops && ops->build_state))
1516 - ret = ops->build_state(dev, encap, family, cfg, lws);
1517 - rcu_read_unlock();
1518 + return ops ? 0 : -EOPNOTSUPP;
1519 +}
1520 +EXPORT_SYMBOL(lwtunnel_valid_encap_type);
1521
1522 - return ret;
1523 +int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining)
1524 +{
1525 + struct rtnexthop *rtnh = (struct rtnexthop *)attr;
1526 + struct nlattr *nla_entype;
1527 + struct nlattr *attrs;
1528 + struct nlattr *nla;
1529 + u16 encap_type;
1530 + int attrlen;
1531 +
1532 + while (rtnh_ok(rtnh, remaining)) {
1533 + attrlen = rtnh_attrlen(rtnh);
1534 + if (attrlen > 0) {
1535 + attrs = rtnh_attrs(rtnh);
1536 + nla = nla_find(attrs, attrlen, RTA_ENCAP);
1537 + nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
1538 +
1539 + if (nla_entype) {
1540 + encap_type = nla_get_u16(nla_entype);
1541 +
1542 + if (lwtunnel_valid_encap_type(encap_type) != 0)
1543 + return -EOPNOTSUPP;
1544 + }
1545 + }
1546 + rtnh = rtnh_next(rtnh, &remaining);
1547 + }
1548 +
1549 + return 0;
1550 }
1551 -EXPORT_SYMBOL(lwtunnel_build_state);
1552 +EXPORT_SYMBOL(lwtunnel_valid_encap_type_attr);
1553
1554 int lwtunnel_fill_encap(struct sk_buff *skb, struct lwtunnel_state *lwtstate)
1555 {
1556 diff --git a/net/dsa/slave.c b/net/dsa/slave.c
1557 index 30e2e21d7619..3ff9d97cf56b 100644
1558 --- a/net/dsa/slave.c
1559 +++ b/net/dsa/slave.c
1560 @@ -1201,6 +1201,8 @@ int dsa_slave_suspend(struct net_device *slave_dev)
1561 {
1562 struct dsa_slave_priv *p = netdev_priv(slave_dev);
1563
1564 + netif_device_detach(slave_dev);
1565 +
1566 if (p->phy) {
1567 phy_stop(p->phy);
1568 p->old_pause = -1;
1569 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
1570 index 3e4f183fc241..5b03d7f3b255 100644
1571 --- a/net/ipv4/fib_frontend.c
1572 +++ b/net/ipv4/fib_frontend.c
1573 @@ -46,6 +46,7 @@
1574 #include <net/rtnetlink.h>
1575 #include <net/xfrm.h>
1576 #include <net/l3mdev.h>
1577 +#include <net/lwtunnel.h>
1578 #include <trace/events/fib.h>
1579
1580 #ifndef CONFIG_IP_MULTIPLE_TABLES
1581 @@ -676,6 +677,10 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
1582 cfg->fc_mx_len = nla_len(attr);
1583 break;
1584 case RTA_MULTIPATH:
1585 + err = lwtunnel_valid_encap_type_attr(nla_data(attr),
1586 + nla_len(attr));
1587 + if (err < 0)
1588 + goto errout;
1589 cfg->fc_mp = nla_data(attr);
1590 cfg->fc_mp_len = nla_len(attr);
1591 break;
1592 @@ -690,6 +695,9 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
1593 break;
1594 case RTA_ENCAP_TYPE:
1595 cfg->fc_encap_type = nla_get_u16(attr);
1596 + err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
1597 + if (err < 0)
1598 + goto errout;
1599 break;
1600 }
1601 }
1602 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
1603 index a8508b79b406..6a4068031aaa 100644
1604 --- a/net/ipv4/fib_semantics.c
1605 +++ b/net/ipv4/fib_semantics.c
1606 @@ -1278,8 +1278,9 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
1607 nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
1608 goto nla_put_failure;
1609 #endif
1610 - if (fi->fib_nh->nh_lwtstate)
1611 - lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate);
1612 + if (fi->fib_nh->nh_lwtstate &&
1613 + lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate) < 0)
1614 + goto nla_put_failure;
1615 }
1616 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1617 if (fi->fib_nhs > 1) {
1618 @@ -1315,8 +1316,10 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
1619 nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
1620 goto nla_put_failure;
1621 #endif
1622 - if (nh->nh_lwtstate)
1623 - lwtunnel_fill_encap(skb, nh->nh_lwtstate);
1624 + if (nh->nh_lwtstate &&
1625 + lwtunnel_fill_encap(skb, nh->nh_lwtstate) < 0)
1626 + goto nla_put_failure;
1627 +
1628 /* length of rtnetlink header + attributes */
1629 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
1630 } endfor_nexthops(fi);
1631 diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
1632 index fed3d29f9eb3..0fd1976ab63b 100644
1633 --- a/net/ipv4/ip_tunnel_core.c
1634 +++ b/net/ipv4/ip_tunnel_core.c
1635 @@ -313,6 +313,7 @@ static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
1636 .fill_encap = ip_tun_fill_encap_info,
1637 .get_encap_size = ip_tun_encap_nlsize,
1638 .cmp_encap = ip_tun_cmp_encap,
1639 + .owner = THIS_MODULE,
1640 };
1641
1642 static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
1643 @@ -403,6 +404,7 @@ static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
1644 .fill_encap = ip6_tun_fill_encap_info,
1645 .get_encap_size = ip6_tun_encap_nlsize,
1646 .cmp_encap = ip_tun_cmp_encap,
1647 + .owner = THIS_MODULE,
1648 };
1649
1650 void __init ip_tunnel_core_init(void)
1651 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
1652 index 8197b06d9aaa..d851cae27dac 100644
1653 --- a/net/ipv4/route.c
1654 +++ b/net/ipv4/route.c
1655 @@ -2440,7 +2440,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
1656 r->rtm_dst_len = 32;
1657 r->rtm_src_len = 0;
1658 r->rtm_tos = fl4->flowi4_tos;
1659 - r->rtm_table = table_id;
1660 + r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
1661 if (nla_put_u32(skb, RTA_TABLE, table_id))
1662 goto nla_put_failure;
1663 r->rtm_type = rt->rt_type;
1664 diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
1665 index 4e777a3243f9..dd2560c83a85 100644
1666 --- a/net/ipv4/tcp_fastopen.c
1667 +++ b/net/ipv4/tcp_fastopen.c
1668 @@ -113,7 +113,7 @@ static bool tcp_fastopen_cookie_gen(struct request_sock *req,
1669 struct tcp_fastopen_cookie tmp;
1670
1671 if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) {
1672 - struct in6_addr *buf = (struct in6_addr *) tmp.val;
1673 + struct in6_addr *buf = &tmp.addr;
1674 int i;
1675
1676 for (i = 0; i < 4; i++)
1677 @@ -205,6 +205,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
1678 * scaled. So correct it appropriately.
1679 */
1680 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
1681 + tp->max_window = tp->snd_wnd;
1682
1683 /* Activate the retrans timer so that SYNACK can be retransmitted.
1684 * The request socket is not added to the ehash
1685 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
1686 index 4bc5ba3ae452..95dfcba38ff6 100644
1687 --- a/net/ipv6/addrconf.c
1688 +++ b/net/ipv6/addrconf.c
1689 @@ -5515,8 +5515,7 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
1690 struct net_device *dev;
1691 struct inet6_dev *idev;
1692
1693 - rcu_read_lock();
1694 - for_each_netdev_rcu(net, dev) {
1695 + for_each_netdev(net, dev) {
1696 idev = __in6_dev_get(dev);
1697 if (idev) {
1698 int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
1699 @@ -5525,7 +5524,6 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
1700 dev_disable_change(idev);
1701 }
1702 }
1703 - rcu_read_unlock();
1704 }
1705
1706 static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
1707 diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
1708 index e50c27a93e17..f3db364fc853 100644
1709 --- a/net/ipv6/ila/ila_lwt.c
1710 +++ b/net/ipv6/ila/ila_lwt.c
1711 @@ -164,6 +164,7 @@ static const struct lwtunnel_encap_ops ila_encap_ops = {
1712 .fill_encap = ila_fill_encap_info,
1713 .get_encap_size = ila_encap_nlsize,
1714 .cmp_encap = ila_encap_cmp,
1715 + .owner = THIS_MODULE,
1716 };
1717
1718 int ila_lwt_init(void)
1719 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
1720 index d76674efe523..f95437f1087c 100644
1721 --- a/net/ipv6/ip6_tunnel.c
1722 +++ b/net/ipv6/ip6_tunnel.c
1723 @@ -1108,7 +1108,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1724 t->parms.name);
1725 goto tx_err_dst_release;
1726 }
1727 - mtu = dst_mtu(dst) - psh_hlen;
1728 + mtu = dst_mtu(dst) - psh_hlen - t->tun_hlen;
1729 if (encap_limit >= 0) {
1730 max_headroom += 8;
1731 mtu -= 8;
1732 @@ -1117,7 +1117,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1733 mtu = IPV6_MIN_MTU;
1734 if (skb_dst(skb) && !t->parms.collect_md)
1735 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
1736 - if (skb->len > mtu && !skb_is_gso(skb)) {
1737 + if (skb->len - t->tun_hlen > mtu && !skb_is_gso(skb)) {
1738 *pmtu = mtu;
1739 err = -EMSGSIZE;
1740 goto tx_err_dst_release;
1741 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1742 index 1b57e11e6e0d..bff4460f17be 100644
1743 --- a/net/ipv6/route.c
1744 +++ b/net/ipv6/route.c
1745 @@ -2885,6 +2885,11 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
1746 if (tb[RTA_MULTIPATH]) {
1747 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
1748 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
1749 +
1750 + err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
1751 + cfg->fc_mp_len);
1752 + if (err < 0)
1753 + goto errout;
1754 }
1755
1756 if (tb[RTA_PREF]) {
1757 @@ -2898,9 +2903,14 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
1758 if (tb[RTA_ENCAP])
1759 cfg->fc_encap = tb[RTA_ENCAP];
1760
1761 - if (tb[RTA_ENCAP_TYPE])
1762 + if (tb[RTA_ENCAP_TYPE]) {
1763 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
1764
1765 + err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
1766 + if (err < 0)
1767 + goto errout;
1768 + }
1769 +
1770 if (tb[RTA_EXPIRES]) {
1771 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
1772
1773 @@ -3306,7 +3316,8 @@ static int rt6_fill_node(struct net *net,
1774 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
1775 goto nla_put_failure;
1776
1777 - lwtunnel_fill_encap(skb, rt->dst.lwtstate);
1778 + if (lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
1779 + goto nla_put_failure;
1780
1781 nlmsg_end(skb, nlh);
1782 return 0;
1783 diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
1784 index 15fe97644ffe..5b77377e5a15 100644
1785 --- a/net/mpls/af_mpls.c
1786 +++ b/net/mpls/af_mpls.c
1787 @@ -98,18 +98,19 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
1788 }
1789 EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
1790
1791 -static u32 mpls_multipath_hash(struct mpls_route *rt,
1792 - struct sk_buff *skb, bool bos)
1793 +static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb)
1794 {
1795 struct mpls_entry_decoded dec;
1796 + unsigned int mpls_hdr_len = 0;
1797 struct mpls_shim_hdr *hdr;
1798 bool eli_seen = false;
1799 int label_index;
1800 u32 hash = 0;
1801
1802 - for (label_index = 0; label_index < MAX_MP_SELECT_LABELS && !bos;
1803 + for (label_index = 0; label_index < MAX_MP_SELECT_LABELS;
1804 label_index++) {
1805 - if (!pskb_may_pull(skb, sizeof(*hdr) * label_index))
1806 + mpls_hdr_len += sizeof(*hdr);
1807 + if (!pskb_may_pull(skb, mpls_hdr_len))
1808 break;
1809
1810 /* Read and decode the current label */
1811 @@ -134,37 +135,38 @@ static u32 mpls_multipath_hash(struct mpls_route *rt,
1812 eli_seen = true;
1813 }
1814
1815 - bos = dec.bos;
1816 - if (bos && pskb_may_pull(skb, sizeof(*hdr) * label_index +
1817 - sizeof(struct iphdr))) {
1818 + if (!dec.bos)
1819 + continue;
1820 +
1821 + /* found bottom label; does skb have room for a header? */
1822 + if (pskb_may_pull(skb, mpls_hdr_len + sizeof(struct iphdr))) {
1823 const struct iphdr *v4hdr;
1824
1825 - v4hdr = (const struct iphdr *)(mpls_hdr(skb) +
1826 - label_index);
1827 + v4hdr = (const struct iphdr *)(hdr + 1);
1828 if (v4hdr->version == 4) {
1829 hash = jhash_3words(ntohl(v4hdr->saddr),
1830 ntohl(v4hdr->daddr),
1831 v4hdr->protocol, hash);
1832 } else if (v4hdr->version == 6 &&
1833 - pskb_may_pull(skb, sizeof(*hdr) * label_index +
1834 - sizeof(struct ipv6hdr))) {
1835 + pskb_may_pull(skb, mpls_hdr_len +
1836 + sizeof(struct ipv6hdr))) {
1837 const struct ipv6hdr *v6hdr;
1838
1839 - v6hdr = (const struct ipv6hdr *)(mpls_hdr(skb) +
1840 - label_index);
1841 -
1842 + v6hdr = (const struct ipv6hdr *)(hdr + 1);
1843 hash = __ipv6_addr_jhash(&v6hdr->saddr, hash);
1844 hash = __ipv6_addr_jhash(&v6hdr->daddr, hash);
1845 hash = jhash_1word(v6hdr->nexthdr, hash);
1846 }
1847 }
1848 +
1849 + break;
1850 }
1851
1852 return hash;
1853 }
1854
1855 static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
1856 - struct sk_buff *skb, bool bos)
1857 + struct sk_buff *skb)
1858 {
1859 int alive = ACCESS_ONCE(rt->rt_nhn_alive);
1860 u32 hash = 0;
1861 @@ -180,7 +182,7 @@ static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
1862 if (alive <= 0)
1863 return NULL;
1864
1865 - hash = mpls_multipath_hash(rt, skb, bos);
1866 + hash = mpls_multipath_hash(rt, skb);
1867 nh_index = hash % alive;
1868 if (alive == rt->rt_nhn)
1869 goto out;
1870 @@ -278,17 +280,11 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
1871 hdr = mpls_hdr(skb);
1872 dec = mpls_entry_decode(hdr);
1873
1874 - /* Pop the label */
1875 - skb_pull(skb, sizeof(*hdr));
1876 - skb_reset_network_header(skb);
1877 -
1878 - skb_orphan(skb);
1879 -
1880 rt = mpls_route_input_rcu(net, dec.label);
1881 if (!rt)
1882 goto drop;
1883
1884 - nh = mpls_select_multipath(rt, skb, dec.bos);
1885 + nh = mpls_select_multipath(rt, skb);
1886 if (!nh)
1887 goto drop;
1888
1889 @@ -297,6 +293,12 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
1890 if (!mpls_output_possible(out_dev))
1891 goto drop;
1892
1893 + /* Pop the label */
1894 + skb_pull(skb, sizeof(*hdr));
1895 + skb_reset_network_header(skb);
1896 +
1897 + skb_orphan(skb);
1898 +
1899 if (skb_warn_if_lro(skb))
1900 goto drop;
1901
1902 diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
1903 index cf52cf30ac4b..bc9aaf58c7cc 100644
1904 --- a/net/mpls/mpls_iptunnel.c
1905 +++ b/net/mpls/mpls_iptunnel.c
1906 @@ -218,6 +218,7 @@ static const struct lwtunnel_encap_ops mpls_iptun_ops = {
1907 .fill_encap = mpls_fill_encap_info,
1908 .get_encap_size = mpls_encap_nlsize,
1909 .cmp_encap = mpls_encap_cmp,
1910 + .owner = THIS_MODULE,
1911 };
1912
1913 static int __init mpls_iptunnel_init(void)
1914 diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
1915 index fecefa2dc94e..eab210bb1ef0 100644
1916 --- a/net/openvswitch/conntrack.c
1917 +++ b/net/openvswitch/conntrack.c
1918 @@ -514,7 +514,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
1919 int hooknum, nh_off, err = NF_ACCEPT;
1920
1921 nh_off = skb_network_offset(skb);
1922 - skb_pull(skb, nh_off);
1923 + skb_pull_rcsum(skb, nh_off);
1924
1925 /* See HOOK2MANIP(). */
1926 if (maniptype == NF_NAT_MANIP_SRC)
1927 @@ -579,6 +579,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
1928 err = nf_nat_packet(ct, ctinfo, hooknum, skb);
1929 push:
1930 skb_push(skb, nh_off);
1931 + skb_postpush_rcsum(skb, skb->data, nh_off);
1932
1933 return err;
1934 }
1935 @@ -890,7 +891,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
1936
1937 /* The conntrack module expects to be working at L3. */
1938 nh_ofs = skb_network_offset(skb);
1939 - skb_pull(skb, nh_ofs);
1940 + skb_pull_rcsum(skb, nh_ofs);
1941
1942 if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
1943 err = handle_fragments(net, key, info->zone.id, skb);
1944 @@ -904,6 +905,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
1945 err = ovs_ct_lookup(net, key, info, skb);
1946
1947 skb_push(skb, nh_ofs);
1948 + skb_postpush_rcsum(skb, skb->data, nh_ofs);
1949 if (err)
1950 kfree_skb(skb);
1951 return err;
1952 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1953 index dd2332390c45..94e4a5941d89 100644
1954 --- a/net/packet/af_packet.c
1955 +++ b/net/packet/af_packet.c
1956 @@ -1972,7 +1972,7 @@ static int __packet_rcv_vnet(const struct sk_buff *skb,
1957 {
1958 *vnet_hdr = (const struct virtio_net_hdr) { 0 };
1959
1960 - if (virtio_net_hdr_from_skb(skb, vnet_hdr, vio_le()))
1961 + if (virtio_net_hdr_from_skb(skb, vnet_hdr, vio_le(), true))
1962 BUG();
1963
1964 return 0;
1965 diff --git a/net/sched/act_api.c b/net/sched/act_api.c
1966 index f893d180da1c..c6c2a93cc2a2 100644
1967 --- a/net/sched/act_api.c
1968 +++ b/net/sched/act_api.c
1969 @@ -903,8 +903,6 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
1970 goto err;
1971 }
1972 act->order = i;
1973 - if (event == RTM_GETACTION)
1974 - act->tcfa_refcnt++;
1975 list_add_tail(&act->list, &actions);
1976 }
1977
1978 @@ -917,7 +915,8 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
1979 return ret;
1980 }
1981 err:
1982 - tcf_action_destroy(&actions, 0);
1983 + if (event != RTM_GETACTION)
1984 + tcf_action_destroy(&actions, 0);
1985 return ret;
1986 }
1987
1988 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
1989 index 2358f2690ec5..2d03d5bcb5b9 100644
1990 --- a/net/unix/af_unix.c
1991 +++ b/net/unix/af_unix.c
1992 @@ -995,6 +995,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1993 unsigned int hash;
1994 struct unix_address *addr;
1995 struct hlist_head *list;
1996 + struct path path = { NULL, NULL };
1997
1998 err = -EINVAL;
1999 if (sunaddr->sun_family != AF_UNIX)
2000 @@ -1010,9 +1011,20 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2001 goto out;
2002 addr_len = err;
2003
2004 + if (sun_path[0]) {
2005 + umode_t mode = S_IFSOCK |
2006 + (SOCK_INODE(sock)->i_mode & ~current_umask());
2007 + err = unix_mknod(sun_path, mode, &path);
2008 + if (err) {
2009 + if (err == -EEXIST)
2010 + err = -EADDRINUSE;
2011 + goto out;
2012 + }
2013 + }
2014 +
2015 err = mutex_lock_interruptible(&u->bindlock);
2016 if (err)
2017 - goto out;
2018 + goto out_put;
2019
2020 err = -EINVAL;
2021 if (u->addr)
2022 @@ -1029,16 +1041,6 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2023 atomic_set(&addr->refcnt, 1);
2024
2025 if (sun_path[0]) {
2026 - struct path path;
2027 - umode_t mode = S_IFSOCK |
2028 - (SOCK_INODE(sock)->i_mode & ~current_umask());
2029 - err = unix_mknod(sun_path, mode, &path);
2030 - if (err) {
2031 - if (err == -EEXIST)
2032 - err = -EADDRINUSE;
2033 - unix_release_addr(addr);
2034 - goto out_up;
2035 - }
2036 addr->hash = UNIX_HASH_SIZE;
2037 hash = d_real_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
2038 spin_lock(&unix_table_lock);
2039 @@ -1065,6 +1067,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2040 spin_unlock(&unix_table_lock);
2041 out_up:
2042 mutex_unlock(&u->bindlock);
2043 +out_put:
2044 + if (err)
2045 + path_put(&path);
2046 out:
2047 return err;
2048 }