Contents of /trunk/kernel-magellan/patches-4.2/0102-4.2.3-all-fixes.patch
Parent Directory | Revision Log
Revision 2708 -
(show annotations)
(download)
Mon Nov 2 12:49:20 2015 UTC (8 years, 10 months ago) by niro
File size: 47385 byte(s)
Mon Nov 2 12:49:20 2015 UTC (8 years, 10 months ago) by niro
File size: 47385 byte(s)
-linux-4.2.3
1 | diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt |
2 | index 41b3f3f864e8..5d88f37480b6 100644 |
3 | --- a/Documentation/devicetree/bindings/net/ethernet.txt |
4 | +++ b/Documentation/devicetree/bindings/net/ethernet.txt |
5 | @@ -25,7 +25,11 @@ The following properties are common to the Ethernet controllers: |
6 | flow control thresholds. |
7 | - tx-fifo-depth: the size of the controller's transmit fifo in bytes. This |
8 | is used for components that can have configurable fifo sizes. |
9 | +- managed: string, specifies the PHY management type. Supported values are: |
10 | + "auto", "in-band-status". "auto" is the default, it usess MDIO for |
11 | + management if fixed-link is not specified. |
12 | |
13 | Child nodes of the Ethernet controller are typically the individual PHY devices |
14 | connected via the MDIO bus (sometimes the MDIO bus controller is separate). |
15 | They are described in the phy.txt file in this same directory. |
16 | +For non-MDIO PHY management see fixed-link.txt. |
17 | diff --git a/Makefile b/Makefile |
18 | index 3578b4426ecf..a6edbb11a69a 100644 |
19 | --- a/Makefile |
20 | +++ b/Makefile |
21 | @@ -1,6 +1,6 @@ |
22 | VERSION = 4 |
23 | PATCHLEVEL = 2 |
24 | -SUBLEVEL = 2 |
25 | +SUBLEVEL = 3 |
26 | EXTRAVERSION = |
27 | NAME = Hurr durr I'ma sheep |
28 | |
29 | diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c |
30 | index 965d1afb0eaa..5cb13ca3a3ac 100644 |
31 | --- a/drivers/block/zram/zcomp.c |
32 | +++ b/drivers/block/zram/zcomp.c |
33 | @@ -330,12 +330,14 @@ void zcomp_destroy(struct zcomp *comp) |
34 | * allocate new zcomp and initialize it. return compressing |
35 | * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL) |
36 | * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in |
37 | - * case of allocation error. |
38 | + * case of allocation error, or any other error potentially |
39 | + * returned by functions zcomp_strm_{multi,single}_create. |
40 | */ |
41 | struct zcomp *zcomp_create(const char *compress, int max_strm) |
42 | { |
43 | struct zcomp *comp; |
44 | struct zcomp_backend *backend; |
45 | + int error; |
46 | |
47 | backend = find_backend(compress); |
48 | if (!backend) |
49 | @@ -347,12 +349,12 @@ struct zcomp *zcomp_create(const char *compress, int max_strm) |
50 | |
51 | comp->backend = backend; |
52 | if (max_strm > 1) |
53 | - zcomp_strm_multi_create(comp, max_strm); |
54 | + error = zcomp_strm_multi_create(comp, max_strm); |
55 | else |
56 | - zcomp_strm_single_create(comp); |
57 | - if (!comp->stream) { |
58 | + error = zcomp_strm_single_create(comp); |
59 | + if (error) { |
60 | kfree(comp); |
61 | - return ERR_PTR(-ENOMEM); |
62 | + return ERR_PTR(error); |
63 | } |
64 | return comp; |
65 | } |
66 | diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c |
67 | index 079897b3a955..9d56515f4c4d 100644 |
68 | --- a/drivers/net/dsa/bcm_sf2.c |
69 | +++ b/drivers/net/dsa/bcm_sf2.c |
70 | @@ -418,7 +418,7 @@ static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port) |
71 | core_writel(priv, port, CORE_FAST_AGE_PORT); |
72 | |
73 | reg = core_readl(priv, CORE_FAST_AGE_CTRL); |
74 | - reg |= EN_AGE_PORT | FAST_AGE_STR_DONE; |
75 | + reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE; |
76 | core_writel(priv, reg, CORE_FAST_AGE_CTRL); |
77 | |
78 | do { |
79 | @@ -432,6 +432,8 @@ static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port) |
80 | if (!timeout) |
81 | return -ETIMEDOUT; |
82 | |
83 | + core_writel(priv, 0, CORE_FAST_AGE_CTRL); |
84 | + |
85 | return 0; |
86 | } |
87 | |
88 | @@ -507,7 +509,7 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, |
89 | u32 reg; |
90 | |
91 | reg = core_readl(priv, CORE_G_PCTL_PORT(port)); |
92 | - cur_hw_state = reg >> G_MISTP_STATE_SHIFT; |
93 | + cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT); |
94 | |
95 | switch (state) { |
96 | case BR_STATE_DISABLED: |
97 | @@ -531,10 +533,12 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port, |
98 | } |
99 | |
100 | /* Fast-age ARL entries if we are moving a port from Learning or |
101 | - * Forwarding state to Disabled, Blocking or Listening state |
102 | + * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening |
103 | + * state (hw_state) |
104 | */ |
105 | if (cur_hw_state != hw_state) { |
106 | - if (cur_hw_state & 4 && !(hw_state & 4)) { |
107 | + if (cur_hw_state >= G_MISTP_LEARN_STATE && |
108 | + hw_state <= G_MISTP_LISTEN_STATE) { |
109 | ret = bcm_sf2_sw_fast_age_port(ds, port); |
110 | if (ret) { |
111 | pr_err("%s: fast-ageing failed\n", __func__); |
112 | @@ -901,15 +905,11 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, |
113 | struct fixed_phy_status *status) |
114 | { |
115 | struct bcm_sf2_priv *priv = ds_to_priv(ds); |
116 | - u32 duplex, pause, speed; |
117 | + u32 duplex, pause; |
118 | u32 reg; |
119 | |
120 | duplex = core_readl(priv, CORE_DUPSTS); |
121 | pause = core_readl(priv, CORE_PAUSESTS); |
122 | - speed = core_readl(priv, CORE_SPDSTS); |
123 | - |
124 | - speed >>= (port * SPDSTS_SHIFT); |
125 | - speed &= SPDSTS_MASK; |
126 | |
127 | status->link = 0; |
128 | |
129 | @@ -944,18 +944,6 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, |
130 | reg &= ~LINK_STS; |
131 | core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); |
132 | |
133 | - switch (speed) { |
134 | - case SPDSTS_10: |
135 | - status->speed = SPEED_10; |
136 | - break; |
137 | - case SPDSTS_100: |
138 | - status->speed = SPEED_100; |
139 | - break; |
140 | - case SPDSTS_1000: |
141 | - status->speed = SPEED_1000; |
142 | - break; |
143 | - } |
144 | - |
145 | if ((pause & (1 << port)) && |
146 | (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) { |
147 | status->asym_pause = 1; |
148 | diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h |
149 | index 22e2ebf31333..789d7b7737da 100644 |
150 | --- a/drivers/net/dsa/bcm_sf2.h |
151 | +++ b/drivers/net/dsa/bcm_sf2.h |
152 | @@ -112,8 +112,8 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \ |
153 | spin_unlock(&priv->indir_lock); \ |
154 | return (u64)indir << 32 | dir; \ |
155 | } \ |
156 | -static inline void name##_writeq(struct bcm_sf2_priv *priv, u32 off, \ |
157 | - u64 val) \ |
158 | +static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \ |
159 | + u32 off) \ |
160 | { \ |
161 | spin_lock(&priv->indir_lock); \ |
162 | reg_writel(priv, upper_32_bits(val), REG_DIR_DATA_WRITE); \ |
163 | diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c |
164 | index 561342466076..26ec2fbfaa89 100644 |
165 | --- a/drivers/net/dsa/mv88e6xxx.c |
166 | +++ b/drivers/net/dsa/mv88e6xxx.c |
167 | @@ -1387,6 +1387,7 @@ static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port) |
168 | reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL); |
169 | if (dsa_is_cpu_port(ds, port) || |
170 | ds->dsa_port_mask & (1 << port)) { |
171 | + reg &= ~PORT_PCS_CTRL_UNFORCED; |
172 | reg |= PORT_PCS_CTRL_FORCE_LINK | |
173 | PORT_PCS_CTRL_LINK_UP | |
174 | PORT_PCS_CTRL_DUPLEX_FULL | |
175 | diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c |
176 | index da48e66377b5..8207877d6237 100644 |
177 | --- a/drivers/net/ethernet/altera/altera_tse_main.c |
178 | +++ b/drivers/net/ethernet/altera/altera_tse_main.c |
179 | @@ -511,8 +511,7 @@ static int tse_poll(struct napi_struct *napi, int budget) |
180 | |
181 | if (rxcomplete < budget) { |
182 | |
183 | - napi_gro_flush(napi, false); |
184 | - __napi_complete(napi); |
185 | + napi_complete(napi); |
186 | |
187 | netdev_dbg(priv->dev, |
188 | "NAPI Complete, did %d packets with budget %d\n", |
189 | diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c |
190 | index b349e6f36ea7..de63266de16b 100644 |
191 | --- a/drivers/net/ethernet/freescale/fec_main.c |
192 | +++ b/drivers/net/ethernet/freescale/fec_main.c |
193 | @@ -1402,6 +1402,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id) |
194 | if ((status & BD_ENET_RX_LAST) == 0) |
195 | netdev_err(ndev, "rcv is not +last\n"); |
196 | |
197 | + writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); |
198 | |
199 | /* Check for errors. */ |
200 | if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | |
201 | diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c |
202 | index 62e48bc0cb23..09ec32e33076 100644 |
203 | --- a/drivers/net/ethernet/marvell/mvneta.c |
204 | +++ b/drivers/net/ethernet/marvell/mvneta.c |
205 | @@ -1479,6 +1479,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, |
206 | struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); |
207 | struct sk_buff *skb; |
208 | unsigned char *data; |
209 | + dma_addr_t phys_addr; |
210 | u32 rx_status; |
211 | int rx_bytes, err; |
212 | |
213 | @@ -1486,6 +1487,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, |
214 | rx_status = rx_desc->status; |
215 | rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); |
216 | data = (unsigned char *)rx_desc->buf_cookie; |
217 | + phys_addr = rx_desc->buf_phys_addr; |
218 | |
219 | if (!mvneta_rxq_desc_is_first_last(rx_status) || |
220 | (rx_status & MVNETA_RXD_ERR_SUMMARY)) { |
221 | @@ -1534,7 +1536,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, |
222 | if (!skb) |
223 | goto err_drop_frame; |
224 | |
225 | - dma_unmap_single(dev->dev.parent, rx_desc->buf_phys_addr, |
226 | + dma_unmap_single(dev->dev.parent, phys_addr, |
227 | MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); |
228 | |
229 | rcvd_pkts++; |
230 | @@ -3027,8 +3029,8 @@ static int mvneta_probe(struct platform_device *pdev) |
231 | const char *dt_mac_addr; |
232 | char hw_mac_addr[ETH_ALEN]; |
233 | const char *mac_from; |
234 | + const char *managed; |
235 | int phy_mode; |
236 | - int fixed_phy = 0; |
237 | int err; |
238 | |
239 | /* Our multiqueue support is not complete, so for now, only |
240 | @@ -3062,7 +3064,6 @@ static int mvneta_probe(struct platform_device *pdev) |
241 | dev_err(&pdev->dev, "cannot register fixed PHY\n"); |
242 | goto err_free_irq; |
243 | } |
244 | - fixed_phy = 1; |
245 | |
246 | /* In the case of a fixed PHY, the DT node associated |
247 | * to the PHY is the Ethernet MAC DT node. |
248 | @@ -3086,8 +3087,10 @@ static int mvneta_probe(struct platform_device *pdev) |
249 | pp = netdev_priv(dev); |
250 | pp->phy_node = phy_node; |
251 | pp->phy_interface = phy_mode; |
252 | - pp->use_inband_status = (phy_mode == PHY_INTERFACE_MODE_SGMII) && |
253 | - fixed_phy; |
254 | + |
255 | + err = of_property_read_string(dn, "managed", &managed); |
256 | + pp->use_inband_status = (err == 0 && |
257 | + strcmp(managed, "in-band-status") == 0); |
258 | |
259 | pp->clk = devm_clk_get(&pdev->dev, NULL); |
260 | if (IS_ERR(pp->clk)) { |
261 | diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c |
262 | index 9c145dddd717..4f95fa7b594d 100644 |
263 | --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c |
264 | +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c |
265 | @@ -1250,8 +1250,6 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) |
266 | rss_context->hash_fn = MLX4_RSS_HASH_TOP; |
267 | memcpy(rss_context->rss_key, priv->rss_key, |
268 | MLX4_EN_RSS_KEY_SIZE); |
269 | - netdev_rss_key_fill(rss_context->rss_key, |
270 | - MLX4_EN_RSS_KEY_SIZE); |
271 | } else { |
272 | en_err(priv, "Unknown RSS hash function requested\n"); |
273 | err = -EINVAL; |
274 | diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c |
275 | index 29c2a017a450..a408977a531a 100644 |
276 | --- a/drivers/net/ethernet/mellanox/mlx4/main.c |
277 | +++ b/drivers/net/ethernet/mellanox/mlx4/main.c |
278 | @@ -2654,9 +2654,14 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) |
279 | |
280 | if (msi_x) { |
281 | int nreq = dev->caps.num_ports * num_online_cpus() + 1; |
282 | + bool shared_ports = false; |
283 | |
284 | nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs, |
285 | nreq); |
286 | + if (nreq > MAX_MSIX) { |
287 | + nreq = MAX_MSIX; |
288 | + shared_ports = true; |
289 | + } |
290 | |
291 | entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL); |
292 | if (!entries) |
293 | @@ -2679,6 +2684,9 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) |
294 | bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports, |
295 | dev->caps.num_ports); |
296 | |
297 | + if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) |
298 | + shared_ports = true; |
299 | + |
300 | for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) { |
301 | if (i == MLX4_EQ_ASYNC) |
302 | continue; |
303 | @@ -2686,7 +2694,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) |
304 | priv->eq_table.eq[i].irq = |
305 | entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector; |
306 | |
307 | - if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) { |
308 | + if (shared_ports) { |
309 | bitmap_fill(priv->eq_table.eq[i].actv_ports.ports, |
310 | dev->caps.num_ports); |
311 | /* We don't set affinity hint when there |
312 | diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c |
313 | index edd77342773a..248478c6f6e4 100644 |
314 | --- a/drivers/net/macvtap.c |
315 | +++ b/drivers/net/macvtap.c |
316 | @@ -1111,10 +1111,10 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd, |
317 | return 0; |
318 | |
319 | case TUNSETSNDBUF: |
320 | - if (get_user(u, up)) |
321 | + if (get_user(s, sp)) |
322 | return -EFAULT; |
323 | |
324 | - q->sk.sk_sndbuf = u; |
325 | + q->sk.sk_sndbuf = s; |
326 | return 0; |
327 | |
328 | case TUNGETVNETHDRSZ: |
329 | diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c |
330 | index d7a65247f952..99d9bc19c94a 100644 |
331 | --- a/drivers/net/phy/fixed_phy.c |
332 | +++ b/drivers/net/phy/fixed_phy.c |
333 | @@ -52,6 +52,10 @@ static int fixed_phy_update_regs(struct fixed_phy *fp) |
334 | u16 lpagb = 0; |
335 | u16 lpa = 0; |
336 | |
337 | + if (!fp->status.link) |
338 | + goto done; |
339 | + bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; |
340 | + |
341 | if (fp->status.duplex) { |
342 | bmcr |= BMCR_FULLDPLX; |
343 | |
344 | @@ -96,15 +100,13 @@ static int fixed_phy_update_regs(struct fixed_phy *fp) |
345 | } |
346 | } |
347 | |
348 | - if (fp->status.link) |
349 | - bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; |
350 | - |
351 | if (fp->status.pause) |
352 | lpa |= LPA_PAUSE_CAP; |
353 | |
354 | if (fp->status.asym_pause) |
355 | lpa |= LPA_PAUSE_ASYM; |
356 | |
357 | +done: |
358 | fp->regs[MII_PHYSID1] = 0; |
359 | fp->regs[MII_PHYSID2] = 0; |
360 | |
361 | diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c |
362 | index 46a14cbb0215..02a4615b65f8 100644 |
363 | --- a/drivers/net/phy/mdio_bus.c |
364 | +++ b/drivers/net/phy/mdio_bus.c |
365 | @@ -303,12 +303,12 @@ void mdiobus_unregister(struct mii_bus *bus) |
366 | BUG_ON(bus->state != MDIOBUS_REGISTERED); |
367 | bus->state = MDIOBUS_UNREGISTERED; |
368 | |
369 | - device_del(&bus->dev); |
370 | for (i = 0; i < PHY_MAX_ADDR; i++) { |
371 | if (bus->phy_map[i]) |
372 | device_unregister(&bus->phy_map[i]->dev); |
373 | bus->phy_map[i] = NULL; |
374 | } |
375 | + device_del(&bus->dev); |
376 | } |
377 | EXPORT_SYMBOL(mdiobus_unregister); |
378 | |
379 | diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c |
380 | index fa8f5046afe9..487be20b6b12 100644 |
381 | --- a/drivers/net/ppp/ppp_generic.c |
382 | +++ b/drivers/net/ppp/ppp_generic.c |
383 | @@ -2742,6 +2742,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit, |
384 | */ |
385 | dev_net_set(dev, net); |
386 | |
387 | + rtnl_lock(); |
388 | mutex_lock(&pn->all_ppp_mutex); |
389 | |
390 | if (unit < 0) { |
391 | @@ -2772,7 +2773,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit, |
392 | ppp->file.index = unit; |
393 | sprintf(dev->name, "ppp%d", unit); |
394 | |
395 | - ret = register_netdev(dev); |
396 | + ret = register_netdevice(dev); |
397 | if (ret != 0) { |
398 | unit_put(&pn->units_idr, unit); |
399 | netdev_err(ppp->dev, "PPP: couldn't register device %s (%d)\n", |
400 | @@ -2784,6 +2785,7 @@ static struct ppp *ppp_create_interface(struct net *net, int unit, |
401 | |
402 | atomic_inc(&ppp_unit_count); |
403 | mutex_unlock(&pn->all_ppp_mutex); |
404 | + rtnl_unlock(); |
405 | |
406 | *retp = 0; |
407 | return ppp; |
408 | diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c |
409 | index fdc60db60829..7c8c23cc6896 100644 |
410 | --- a/drivers/of/of_mdio.c |
411 | +++ b/drivers/of/of_mdio.c |
412 | @@ -266,7 +266,8 @@ EXPORT_SYMBOL(of_phy_attach); |
413 | bool of_phy_is_fixed_link(struct device_node *np) |
414 | { |
415 | struct device_node *dn; |
416 | - int len; |
417 | + int len, err; |
418 | + const char *managed; |
419 | |
420 | /* New binding */ |
421 | dn = of_get_child_by_name(np, "fixed-link"); |
422 | @@ -275,6 +276,10 @@ bool of_phy_is_fixed_link(struct device_node *np) |
423 | return true; |
424 | } |
425 | |
426 | + err = of_property_read_string(np, "managed", &managed); |
427 | + if (err == 0 && strcmp(managed, "auto") != 0) |
428 | + return true; |
429 | + |
430 | /* Old binding */ |
431 | if (of_get_property(np, "fixed-link", &len) && |
432 | len == (5 * sizeof(__be32))) |
433 | @@ -289,8 +294,18 @@ int of_phy_register_fixed_link(struct device_node *np) |
434 | struct fixed_phy_status status = {}; |
435 | struct device_node *fixed_link_node; |
436 | const __be32 *fixed_link_prop; |
437 | - int len; |
438 | + int len, err; |
439 | struct phy_device *phy; |
440 | + const char *managed; |
441 | + |
442 | + err = of_property_read_string(np, "managed", &managed); |
443 | + if (err == 0) { |
444 | + if (strcmp(managed, "in-band-status") == 0) { |
445 | + /* status is zeroed, namely its .link member */ |
446 | + phy = fixed_phy_register(PHY_POLL, &status, np); |
447 | + return IS_ERR(phy) ? PTR_ERR(phy) : 0; |
448 | + } |
449 | + } |
450 | |
451 | /* New binding */ |
452 | fixed_link_node = of_get_child_by_name(np, "fixed-link"); |
453 | diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c |
454 | index 06697315a088..fb4dd7b3ee71 100644 |
455 | --- a/drivers/platform/x86/hp-wmi.c |
456 | +++ b/drivers/platform/x86/hp-wmi.c |
457 | @@ -54,8 +54,9 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4"); |
458 | #define HPWMI_HARDWARE_QUERY 0x4 |
459 | #define HPWMI_WIRELESS_QUERY 0x5 |
460 | #define HPWMI_BIOS_QUERY 0x9 |
461 | +#define HPWMI_FEATURE_QUERY 0xb |
462 | #define HPWMI_HOTKEY_QUERY 0xc |
463 | -#define HPWMI_FEATURE_QUERY 0xd |
464 | +#define HPWMI_FEATURE2_QUERY 0xd |
465 | #define HPWMI_WIRELESS2_QUERY 0x1b |
466 | #define HPWMI_POSTCODEERROR_QUERY 0x2a |
467 | |
468 | @@ -295,25 +296,33 @@ static int hp_wmi_tablet_state(void) |
469 | return (state & 0x4) ? 1 : 0; |
470 | } |
471 | |
472 | -static int __init hp_wmi_bios_2009_later(void) |
473 | +static int __init hp_wmi_bios_2008_later(void) |
474 | { |
475 | int state = 0; |
476 | int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, 0, &state, |
477 | sizeof(state), sizeof(state)); |
478 | - if (ret) |
479 | - return ret; |
480 | + if (!ret) |
481 | + return 1; |
482 | |
483 | - return (state & 0x10) ? 1 : 0; |
484 | + return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO; |
485 | } |
486 | |
487 | -static int hp_wmi_enable_hotkeys(void) |
488 | +static int __init hp_wmi_bios_2009_later(void) |
489 | { |
490 | - int ret; |
491 | - int query = 0x6e; |
492 | + int state = 0; |
493 | + int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, 0, &state, |
494 | + sizeof(state), sizeof(state)); |
495 | + if (!ret) |
496 | + return 1; |
497 | |
498 | - ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query), |
499 | - 0); |
500 | + return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO; |
501 | +} |
502 | |
503 | +static int __init hp_wmi_enable_hotkeys(void) |
504 | +{ |
505 | + int value = 0x6e; |
506 | + int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &value, |
507 | + sizeof(value), 0); |
508 | if (ret) |
509 | return -EINVAL; |
510 | return 0; |
511 | @@ -663,7 +672,7 @@ static int __init hp_wmi_input_setup(void) |
512 | hp_wmi_tablet_state()); |
513 | input_sync(hp_wmi_input_dev); |
514 | |
515 | - if (hp_wmi_bios_2009_later() == 4) |
516 | + if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later()) |
517 | hp_wmi_enable_hotkeys(); |
518 | |
519 | status = wmi_install_notify_handler(HPWMI_EVENT_GUID, hp_wmi_notify, NULL); |
520 | diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c |
521 | index 1285eaf5dc22..03cdb9e18d57 100644 |
522 | --- a/net/bridge/br_multicast.c |
523 | +++ b/net/bridge/br_multicast.c |
524 | @@ -991,7 +991,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br, |
525 | |
526 | ih = igmpv3_report_hdr(skb); |
527 | num = ntohs(ih->ngrec); |
528 | - len = sizeof(*ih); |
529 | + len = skb_transport_offset(skb) + sizeof(*ih); |
530 | |
531 | for (i = 0; i < num; i++) { |
532 | len += sizeof(*grec); |
533 | @@ -1052,7 +1052,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, |
534 | |
535 | icmp6h = icmp6_hdr(skb); |
536 | num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); |
537 | - len = sizeof(*icmp6h); |
538 | + len = skb_transport_offset(skb) + sizeof(*icmp6h); |
539 | |
540 | for (i = 0; i < num; i++) { |
541 | __be16 *nsrcs, _nsrcs; |
542 | diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c |
543 | index 9a12668f7d62..0ad144fb0c79 100644 |
544 | --- a/net/core/fib_rules.c |
545 | +++ b/net/core/fib_rules.c |
546 | @@ -615,15 +615,17 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb, |
547 | { |
548 | int idx = 0; |
549 | struct fib_rule *rule; |
550 | + int err = 0; |
551 | |
552 | rcu_read_lock(); |
553 | list_for_each_entry_rcu(rule, &ops->rules_list, list) { |
554 | if (idx < cb->args[1]) |
555 | goto skip; |
556 | |
557 | - if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid, |
558 | - cb->nlh->nlmsg_seq, RTM_NEWRULE, |
559 | - NLM_F_MULTI, ops) < 0) |
560 | + err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid, |
561 | + cb->nlh->nlmsg_seq, RTM_NEWRULE, |
562 | + NLM_F_MULTI, ops); |
563 | + if (err) |
564 | break; |
565 | skip: |
566 | idx++; |
567 | @@ -632,7 +634,7 @@ skip: |
568 | cb->args[1] = idx; |
569 | rules_ops_put(ops); |
570 | |
571 | - return skb->len; |
572 | + return err; |
573 | } |
574 | |
575 | static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) |
576 | @@ -648,7 +650,9 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb) |
577 | if (ops == NULL) |
578 | return -EAFNOSUPPORT; |
579 | |
580 | - return dump_rules(skb, cb, ops); |
581 | + dump_rules(skb, cb, ops); |
582 | + |
583 | + return skb->len; |
584 | } |
585 | |
586 | rcu_read_lock(); |
587 | diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c |
588 | index dc004b1e1f85..0861018be708 100644 |
589 | --- a/net/core/rtnetlink.c |
590 | +++ b/net/core/rtnetlink.c |
591 | @@ -3021,6 +3021,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) |
592 | u32 portid = NETLINK_CB(cb->skb).portid; |
593 | u32 seq = cb->nlh->nlmsg_seq; |
594 | u32 filter_mask = 0; |
595 | + int err; |
596 | |
597 | if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) { |
598 | struct nlattr *extfilt; |
599 | @@ -3041,20 +3042,25 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) |
600 | struct net_device *br_dev = netdev_master_upper_dev_get(dev); |
601 | |
602 | if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) { |
603 | - if (idx >= cb->args[0] && |
604 | - br_dev->netdev_ops->ndo_bridge_getlink( |
605 | - skb, portid, seq, dev, filter_mask, |
606 | - NLM_F_MULTI) < 0) |
607 | - break; |
608 | + if (idx >= cb->args[0]) { |
609 | + err = br_dev->netdev_ops->ndo_bridge_getlink( |
610 | + skb, portid, seq, dev, |
611 | + filter_mask, NLM_F_MULTI); |
612 | + if (err < 0 && err != -EOPNOTSUPP) |
613 | + break; |
614 | + } |
615 | idx++; |
616 | } |
617 | |
618 | if (ops->ndo_bridge_getlink) { |
619 | - if (idx >= cb->args[0] && |
620 | - ops->ndo_bridge_getlink(skb, portid, seq, dev, |
621 | - filter_mask, |
622 | - NLM_F_MULTI) < 0) |
623 | - break; |
624 | + if (idx >= cb->args[0]) { |
625 | + err = ops->ndo_bridge_getlink(skb, portid, |
626 | + seq, dev, |
627 | + filter_mask, |
628 | + NLM_F_MULTI); |
629 | + if (err < 0 && err != -EOPNOTSUPP) |
630 | + break; |
631 | + } |
632 | idx++; |
633 | } |
634 | } |
635 | diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c |
636 | index d79866c5f8bc..817622f3dbb7 100644 |
637 | --- a/net/core/sock_diag.c |
638 | +++ b/net/core/sock_diag.c |
639 | @@ -90,6 +90,9 @@ int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk, |
640 | goto out; |
641 | |
642 | fprog = filter->prog->orig_prog; |
643 | + if (!fprog) |
644 | + goto out; |
645 | + |
646 | flen = bpf_classic_proglen(fprog); |
647 | |
648 | attr = nla_reserve(skb, attrtype, flen); |
649 | diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c |
650 | index b1c218df2c85..b7dedd9d36d8 100644 |
651 | --- a/net/ipv4/tcp_output.c |
652 | +++ b/net/ipv4/tcp_output.c |
653 | @@ -2898,6 +2898,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) |
654 | skb_reserve(skb, MAX_TCP_HEADER); |
655 | tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), |
656 | TCPHDR_ACK | TCPHDR_RST); |
657 | + skb_mstamp_get(&skb->skb_mstamp); |
658 | /* Send it off. */ |
659 | if (tcp_transmit_skb(sk, skb, 0, priority)) |
660 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); |
661 | diff --git a/net/ipv6/exthdrs_offload.c b/net/ipv6/exthdrs_offload.c |
662 | index 447a7fbd1bb6..f5e2ba1c18bf 100644 |
663 | --- a/net/ipv6/exthdrs_offload.c |
664 | +++ b/net/ipv6/exthdrs_offload.c |
665 | @@ -36,6 +36,6 @@ out: |
666 | return ret; |
667 | |
668 | out_rt: |
669 | - inet_del_offload(&rthdr_offload, IPPROTO_ROUTING); |
670 | + inet6_del_offload(&rthdr_offload, IPPROTO_ROUTING); |
671 | goto out; |
672 | } |
673 | diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c |
674 | index 74ceb73c1c9a..5f36266b1f5e 100644 |
675 | --- a/net/ipv6/ip6mr.c |
676 | +++ b/net/ipv6/ip6mr.c |
677 | @@ -550,7 +550,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) |
678 | |
679 | if (it->cache == &mrt->mfc6_unres_queue) |
680 | spin_unlock_bh(&mfc_unres_lock); |
681 | - else if (it->cache == mrt->mfc6_cache_array) |
682 | + else if (it->cache == &mrt->mfc6_cache_array[it->ct]) |
683 | read_unlock(&mrt_lock); |
684 | } |
685 | |
686 | diff --git a/net/ipv6/route.c b/net/ipv6/route.c |
687 | index d15586490cec..00b64d402a57 100644 |
688 | --- a/net/ipv6/route.c |
689 | +++ b/net/ipv6/route.c |
690 | @@ -1727,7 +1727,7 @@ static int ip6_convert_metrics(struct mx6_config *mxc, |
691 | return -EINVAL; |
692 | } |
693 | |
694 | -int ip6_route_add(struct fib6_config *cfg) |
695 | +int ip6_route_info_create(struct fib6_config *cfg, struct rt6_info **rt_ret) |
696 | { |
697 | int err; |
698 | struct net *net = cfg->fc_nlinfo.nl_net; |
699 | @@ -1735,7 +1735,6 @@ int ip6_route_add(struct fib6_config *cfg) |
700 | struct net_device *dev = NULL; |
701 | struct inet6_dev *idev = NULL; |
702 | struct fib6_table *table; |
703 | - struct mx6_config mxc = { .mx = NULL, }; |
704 | int addr_type; |
705 | |
706 | if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128) |
707 | @@ -1941,6 +1940,32 @@ install_route: |
708 | |
709 | cfg->fc_nlinfo.nl_net = dev_net(dev); |
710 | |
711 | + *rt_ret = rt; |
712 | + |
713 | + return 0; |
714 | +out: |
715 | + if (dev) |
716 | + dev_put(dev); |
717 | + if (idev) |
718 | + in6_dev_put(idev); |
719 | + if (rt) |
720 | + dst_free(&rt->dst); |
721 | + |
722 | + *rt_ret = NULL; |
723 | + |
724 | + return err; |
725 | +} |
726 | + |
727 | +int ip6_route_add(struct fib6_config *cfg) |
728 | +{ |
729 | + struct mx6_config mxc = { .mx = NULL, }; |
730 | + struct rt6_info *rt = NULL; |
731 | + int err; |
732 | + |
733 | + err = ip6_route_info_create(cfg, &rt); |
734 | + if (err) |
735 | + goto out; |
736 | + |
737 | err = ip6_convert_metrics(&mxc, cfg); |
738 | if (err) |
739 | goto out; |
740 | @@ -1948,14 +1973,12 @@ install_route: |
741 | err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc); |
742 | |
743 | kfree(mxc.mx); |
744 | + |
745 | return err; |
746 | out: |
747 | - if (dev) |
748 | - dev_put(dev); |
749 | - if (idev) |
750 | - in6_dev_put(idev); |
751 | if (rt) |
752 | dst_free(&rt->dst); |
753 | + |
754 | return err; |
755 | } |
756 | |
757 | @@ -2727,19 +2750,78 @@ errout: |
758 | return err; |
759 | } |
760 | |
761 | -static int ip6_route_multipath(struct fib6_config *cfg, int add) |
762 | +struct rt6_nh { |
763 | + struct rt6_info *rt6_info; |
764 | + struct fib6_config r_cfg; |
765 | + struct mx6_config mxc; |
766 | + struct list_head next; |
767 | +}; |
768 | + |
769 | +static void ip6_print_replace_route_err(struct list_head *rt6_nh_list) |
770 | +{ |
771 | + struct rt6_nh *nh; |
772 | + |
773 | + list_for_each_entry(nh, rt6_nh_list, next) { |
774 | + pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6 nexthop %pI6 ifi %d\n", |
775 | + &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway, |
776 | + nh->r_cfg.fc_ifindex); |
777 | + } |
778 | +} |
779 | + |
780 | +static int ip6_route_info_append(struct list_head *rt6_nh_list, |
781 | + struct rt6_info *rt, struct fib6_config *r_cfg) |
782 | +{ |
783 | + struct rt6_nh *nh; |
784 | + struct rt6_info *rtnh; |
785 | + int err = -EEXIST; |
786 | + |
787 | + list_for_each_entry(nh, rt6_nh_list, next) { |
788 | + /* check if rt6_info already exists */ |
789 | + rtnh = nh->rt6_info; |
790 | + |
791 | + if (rtnh->dst.dev == rt->dst.dev && |
792 | + rtnh->rt6i_idev == rt->rt6i_idev && |
793 | + ipv6_addr_equal(&rtnh->rt6i_gateway, |
794 | + &rt->rt6i_gateway)) |
795 | + return err; |
796 | + } |
797 | + |
798 | + nh = kzalloc(sizeof(*nh), GFP_KERNEL); |
799 | + if (!nh) |
800 | + return -ENOMEM; |
801 | + nh->rt6_info = rt; |
802 | + err = ip6_convert_metrics(&nh->mxc, r_cfg); |
803 | + if (err) { |
804 | + kfree(nh); |
805 | + return err; |
806 | + } |
807 | + memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg)); |
808 | + list_add_tail(&nh->next, rt6_nh_list); |
809 | + |
810 | + return 0; |
811 | +} |
812 | + |
813 | +static int ip6_route_multipath_add(struct fib6_config *cfg) |
814 | { |
815 | struct fib6_config r_cfg; |
816 | struct rtnexthop *rtnh; |
817 | + struct rt6_info *rt; |
818 | + struct rt6_nh *err_nh; |
819 | + struct rt6_nh *nh, *nh_safe; |
820 | int remaining; |
821 | int attrlen; |
822 | - int err = 0, last_err = 0; |
823 | + int err = 1; |
824 | + int nhn = 0; |
825 | + int replace = (cfg->fc_nlinfo.nlh && |
826 | + (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE)); |
827 | + LIST_HEAD(rt6_nh_list); |
828 | |
829 | remaining = cfg->fc_mp_len; |
830 | -beginning: |
831 | rtnh = (struct rtnexthop *)cfg->fc_mp; |
832 | |
833 | - /* Parse a Multipath Entry */ |
834 | + /* Parse a Multipath Entry and build a list (rt6_nh_list) of |
835 | + * rt6_info structs per nexthop |
836 | + */ |
837 | while (rtnh_ok(rtnh, remaining)) { |
838 | memcpy(&r_cfg, cfg, sizeof(*cfg)); |
839 | if (rtnh->rtnh_ifindex) |
840 | @@ -2755,22 +2837,32 @@ beginning: |
841 | r_cfg.fc_flags |= RTF_GATEWAY; |
842 | } |
843 | } |
844 | - err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg); |
845 | + |
846 | + err = ip6_route_info_create(&r_cfg, &rt); |
847 | + if (err) |
848 | + goto cleanup; |
849 | + |
850 | + err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg); |
851 | if (err) { |
852 | - last_err = err; |
853 | - /* If we are trying to remove a route, do not stop the |
854 | - * loop when ip6_route_del() fails (because next hop is |
855 | - * already gone), we should try to remove all next hops. |
856 | - */ |
857 | - if (add) { |
858 | - /* If add fails, we should try to delete all |
859 | - * next hops that have been already added. |
860 | - */ |
861 | - add = 0; |
862 | - remaining = cfg->fc_mp_len - remaining; |
863 | - goto beginning; |
864 | - } |
865 | + dst_free(&rt->dst); |
866 | + goto cleanup; |
867 | + } |
868 | + |
869 | + rtnh = rtnh_next(rtnh, &remaining); |
870 | + } |
871 | + |
872 | + err_nh = NULL; |
873 | + list_for_each_entry(nh, &rt6_nh_list, next) { |
874 | + err = __ip6_ins_rt(nh->rt6_info, &cfg->fc_nlinfo, &nh->mxc); |
875 | + /* nh->rt6_info is used or freed at this point, reset to NULL*/ |
876 | + nh->rt6_info = NULL; |
877 | + if (err) { |
878 | + if (replace && nhn) |
879 | + ip6_print_replace_route_err(&rt6_nh_list); |
880 | + err_nh = nh; |
881 | + goto add_errout; |
882 | } |
883 | + |
884 | /* Because each route is added like a single route we remove |
885 | * these flags after the first nexthop: if there is a collision, |
886 | * we have already failed to add the first nexthop: |
887 | @@ -2780,6 +2872,63 @@ beginning: |
888 | */ |
889 | cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | |
890 | NLM_F_REPLACE); |
891 | + nhn++; |
892 | + } |
893 | + |
894 | + goto cleanup; |
895 | + |
896 | +add_errout: |
897 | + /* Delete routes that were already added */ |
898 | + list_for_each_entry(nh, &rt6_nh_list, next) { |
899 | + if (err_nh == nh) |
900 | + break; |
901 | + ip6_route_del(&nh->r_cfg); |
902 | + } |
903 | + |
904 | +cleanup: |
905 | + list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) { |
906 | + if (nh->rt6_info) |
907 | + dst_free(&nh->rt6_info->dst); |
908 | + if (nh->mxc.mx) |
909 | + kfree(nh->mxc.mx); |
910 | + list_del(&nh->next); |
911 | + kfree(nh); |
912 | + } |
913 | + |
914 | + return err; |
915 | +} |
916 | + |
917 | +static int ip6_route_multipath_del(struct fib6_config *cfg) |
918 | +{ |
919 | + struct fib6_config r_cfg; |
920 | + struct rtnexthop *rtnh; |
921 | + int remaining; |
922 | + int attrlen; |
923 | + int err = 1, last_err = 0; |
924 | + |
925 | + remaining = cfg->fc_mp_len; |
926 | + rtnh = (struct rtnexthop *)cfg->fc_mp; |
927 | + |
928 | + /* Parse a Multipath Entry */ |
929 | + while (rtnh_ok(rtnh, remaining)) { |
930 | + memcpy(&r_cfg, cfg, sizeof(*cfg)); |
931 | + if (rtnh->rtnh_ifindex) |
932 | + r_cfg.fc_ifindex = rtnh->rtnh_ifindex; |
933 | + |
934 | + attrlen = rtnh_attrlen(rtnh); |
935 | + if (attrlen > 0) { |
936 | + struct nlattr *nla, *attrs = rtnh_attrs(rtnh); |
937 | + |
938 | + nla = nla_find(attrs, attrlen, RTA_GATEWAY); |
939 | + if (nla) { |
940 | + nla_memcpy(&r_cfg.fc_gateway, nla, 16); |
941 | + r_cfg.fc_flags |= RTF_GATEWAY; |
942 | + } |
943 | + } |
944 | + err = ip6_route_del(&r_cfg); |
945 | + if (err) |
946 | + last_err = err; |
947 | + |
948 | rtnh = rtnh_next(rtnh, &remaining); |
949 | } |
950 | |
951 | @@ -2796,7 +2945,7 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh) |
952 | return err; |
953 | |
954 | if (cfg.fc_mp) |
955 | - return ip6_route_multipath(&cfg, 0); |
956 | + return ip6_route_multipath_del(&cfg); |
957 | else |
958 | return ip6_route_del(&cfg); |
959 | } |
960 | @@ -2811,7 +2960,7 @@ static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh) |
961 | return err; |
962 | |
963 | if (cfg.fc_mp) |
964 | - return ip6_route_multipath(&cfg, 1); |
965 | + return ip6_route_multipath_add(&cfg); |
966 | else |
967 | return ip6_route_add(&cfg); |
968 | } |
969 | diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c |
970 | index a774985489e2..0857f7243797 100644 |
971 | --- a/net/netlink/af_netlink.c |
972 | +++ b/net/netlink/af_netlink.c |
973 | @@ -124,6 +124,24 @@ static inline u32 netlink_group_mask(u32 group) |
974 | return group ? 1 << (group - 1) : 0; |
975 | } |
976 | |
977 | +static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb, |
978 | + gfp_t gfp_mask) |
979 | +{ |
980 | + unsigned int len = skb_end_offset(skb); |
981 | + struct sk_buff *new; |
982 | + |
983 | + new = alloc_skb(len, gfp_mask); |
984 | + if (new == NULL) |
985 | + return NULL; |
986 | + |
987 | + NETLINK_CB(new).portid = NETLINK_CB(skb).portid; |
988 | + NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group; |
989 | + NETLINK_CB(new).creds = NETLINK_CB(skb).creds; |
990 | + |
991 | + memcpy(skb_put(new, len), skb->data, len); |
992 | + return new; |
993 | +} |
994 | + |
995 | int netlink_add_tap(struct netlink_tap *nt) |
996 | { |
997 | if (unlikely(nt->dev->type != ARPHRD_NETLINK)) |
998 | @@ -205,7 +223,11 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb, |
999 | int ret = -ENOMEM; |
1000 | |
1001 | dev_hold(dev); |
1002 | - nskb = skb_clone(skb, GFP_ATOMIC); |
1003 | + |
1004 | + if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head)) |
1005 | + nskb = netlink_to_full_skb(skb, GFP_ATOMIC); |
1006 | + else |
1007 | + nskb = skb_clone(skb, GFP_ATOMIC); |
1008 | if (nskb) { |
1009 | nskb->dev = dev; |
1010 | nskb->protocol = htons((u16) sk->sk_protocol); |
1011 | @@ -278,11 +300,6 @@ static void netlink_rcv_wake(struct sock *sk) |
1012 | } |
1013 | |
1014 | #ifdef CONFIG_NETLINK_MMAP |
1015 | -static bool netlink_skb_is_mmaped(const struct sk_buff *skb) |
1016 | -{ |
1017 | - return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED; |
1018 | -} |
1019 | - |
1020 | static bool netlink_rx_is_mmaped(struct sock *sk) |
1021 | { |
1022 | return nlk_sk(sk)->rx_ring.pg_vec != NULL; |
1023 | @@ -834,7 +851,6 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb) |
1024 | } |
1025 | |
1026 | #else /* CONFIG_NETLINK_MMAP */ |
1027 | -#define netlink_skb_is_mmaped(skb) false |
1028 | #define netlink_rx_is_mmaped(sk) false |
1029 | #define netlink_tx_is_mmaped(sk) false |
1030 | #define netlink_mmap sock_no_mmap |
1031 | @@ -1082,8 +1098,8 @@ static int netlink_insert(struct sock *sk, u32 portid) |
1032 | |
1033 | lock_sock(sk); |
1034 | |
1035 | - err = -EBUSY; |
1036 | - if (nlk_sk(sk)->portid) |
1037 | + err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY; |
1038 | + if (nlk_sk(sk)->bound) |
1039 | goto err; |
1040 | |
1041 | err = -ENOMEM; |
1042 | @@ -1103,10 +1119,14 @@ static int netlink_insert(struct sock *sk, u32 portid) |
1043 | err = -EOVERFLOW; |
1044 | if (err == -EEXIST) |
1045 | err = -EADDRINUSE; |
1046 | - nlk_sk(sk)->portid = 0; |
1047 | sock_put(sk); |
1048 | + goto err; |
1049 | } |
1050 | |
1051 | + /* We need to ensure that the socket is hashed and visible. */ |
1052 | + smp_wmb(); |
1053 | + nlk_sk(sk)->bound = portid; |
1054 | + |
1055 | err: |
1056 | release_sock(sk); |
1057 | return err; |
1058 | @@ -1491,6 +1511,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, |
1059 | struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; |
1060 | int err; |
1061 | long unsigned int groups = nladdr->nl_groups; |
1062 | + bool bound; |
1063 | |
1064 | if (addr_len < sizeof(struct sockaddr_nl)) |
1065 | return -EINVAL; |
1066 | @@ -1507,9 +1528,14 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, |
1067 | return err; |
1068 | } |
1069 | |
1070 | - if (nlk->portid) |
1071 | + bound = nlk->bound; |
1072 | + if (bound) { |
1073 | + /* Ensure nlk->portid is up-to-date. */ |
1074 | + smp_rmb(); |
1075 | + |
1076 | if (nladdr->nl_pid != nlk->portid) |
1077 | return -EINVAL; |
1078 | + } |
1079 | |
1080 | if (nlk->netlink_bind && groups) { |
1081 | int group; |
1082 | @@ -1525,7 +1551,10 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr, |
1083 | } |
1084 | } |
1085 | |
1086 | - if (!nlk->portid) { |
1087 | + /* No need for barriers here as we return to user-space without |
1088 | + * using any of the bound attributes. |
1089 | + */ |
1090 | + if (!bound) { |
1091 | err = nladdr->nl_pid ? |
1092 | netlink_insert(sk, nladdr->nl_pid) : |
1093 | netlink_autobind(sock); |
1094 | @@ -1573,7 +1602,10 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr, |
1095 | !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND)) |
1096 | return -EPERM; |
1097 | |
1098 | - if (!nlk->portid) |
1099 | + /* No need for barriers here as we return to user-space without |
1100 | + * using any of the bound attributes. |
1101 | + */ |
1102 | + if (!nlk->bound) |
1103 | err = netlink_autobind(sock); |
1104 | |
1105 | if (err == 0) { |
1106 | @@ -2391,10 +2423,13 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) |
1107 | dst_group = nlk->dst_group; |
1108 | } |
1109 | |
1110 | - if (!nlk->portid) { |
1111 | + if (!nlk->bound) { |
1112 | err = netlink_autobind(sock); |
1113 | if (err) |
1114 | goto out; |
1115 | + } else { |
1116 | + /* Ensure nlk is hashed and visible. */ |
1117 | + smp_rmb(); |
1118 | } |
1119 | |
1120 | /* It's a really convoluted way for userland to ask for mmaped |
1121 | diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h |
1122 | index 89008405d6b4..14437d9b1965 100644 |
1123 | --- a/net/netlink/af_netlink.h |
1124 | +++ b/net/netlink/af_netlink.h |
1125 | @@ -35,6 +35,7 @@ struct netlink_sock { |
1126 | unsigned long state; |
1127 | size_t max_recvmsg_len; |
1128 | wait_queue_head_t wait; |
1129 | + bool bound; |
1130 | bool cb_running; |
1131 | struct netlink_callback cb; |
1132 | struct mutex *cb_mutex; |
1133 | @@ -59,6 +60,15 @@ static inline struct netlink_sock *nlk_sk(struct sock *sk) |
1134 | return container_of(sk, struct netlink_sock, sk); |
1135 | } |
1136 | |
1137 | +static inline bool netlink_skb_is_mmaped(const struct sk_buff *skb) |
1138 | +{ |
1139 | +#ifdef CONFIG_NETLINK_MMAP |
1140 | + return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED; |
1141 | +#else |
1142 | + return false; |
1143 | +#endif /* CONFIG_NETLINK_MMAP */ |
1144 | +} |
1145 | + |
1146 | struct netlink_table { |
1147 | struct rhashtable hash; |
1148 | struct hlist_head mc_list; |
1149 | diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c |
1150 | index ff8c4a4c1609..ff332d1b94bc 100644 |
1151 | --- a/net/openvswitch/datapath.c |
1152 | +++ b/net/openvswitch/datapath.c |
1153 | @@ -920,7 +920,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info) |
1154 | if (error) |
1155 | goto err_kfree_flow; |
1156 | |
1157 | - ovs_flow_mask_key(&new_flow->key, &key, &mask); |
1158 | + ovs_flow_mask_key(&new_flow->key, &key, true, &mask); |
1159 | |
1160 | /* Extract flow identifier. */ |
1161 | error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID], |
1162 | @@ -1047,7 +1047,7 @@ static struct sw_flow_actions *get_flow_actions(const struct nlattr *a, |
1163 | struct sw_flow_key masked_key; |
1164 | int error; |
1165 | |
1166 | - ovs_flow_mask_key(&masked_key, key, mask); |
1167 | + ovs_flow_mask_key(&masked_key, key, true, mask); |
1168 | error = ovs_nla_copy_actions(a, &masked_key, &acts, log); |
1169 | if (error) { |
1170 | OVS_NLERR(log, |
1171 | diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c |
1172 | index 65523948fb95..b5c3bba87fc8 100644 |
1173 | --- a/net/openvswitch/flow_table.c |
1174 | +++ b/net/openvswitch/flow_table.c |
1175 | @@ -56,20 +56,21 @@ static u16 range_n_bytes(const struct sw_flow_key_range *range) |
1176 | } |
1177 | |
1178 | void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, |
1179 | - const struct sw_flow_mask *mask) |
1180 | + bool full, const struct sw_flow_mask *mask) |
1181 | { |
1182 | - const long *m = (const long *)((const u8 *)&mask->key + |
1183 | - mask->range.start); |
1184 | - const long *s = (const long *)((const u8 *)src + |
1185 | - mask->range.start); |
1186 | - long *d = (long *)((u8 *)dst + mask->range.start); |
1187 | + int start = full ? 0 : mask->range.start; |
1188 | + int len = full ? sizeof *dst : range_n_bytes(&mask->range); |
1189 | + const long *m = (const long *)((const u8 *)&mask->key + start); |
1190 | + const long *s = (const long *)((const u8 *)src + start); |
1191 | + long *d = (long *)((u8 *)dst + start); |
1192 | int i; |
1193 | |
1194 | - /* The memory outside of the 'mask->range' are not set since |
1195 | - * further operations on 'dst' only uses contents within |
1196 | - * 'mask->range'. |
1197 | + /* If 'full' is true then all of 'dst' is fully initialized. Otherwise, |
1198 | + * if 'full' is false the memory outside of the 'mask->range' is left |
1199 | + * uninitialized. This can be used as an optimization when further |
1200 | + * operations on 'dst' only use contents within 'mask->range'. |
1201 | */ |
1202 | - for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long)) |
1203 | + for (i = 0; i < len; i += sizeof(long)) |
1204 | *d++ = *s++ & *m++; |
1205 | } |
1206 | |
1207 | @@ -473,7 +474,7 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti, |
1208 | u32 hash; |
1209 | struct sw_flow_key masked_key; |
1210 | |
1211 | - ovs_flow_mask_key(&masked_key, unmasked, mask); |
1212 | + ovs_flow_mask_key(&masked_key, unmasked, false, mask); |
1213 | hash = flow_hash(&masked_key, &mask->range); |
1214 | head = find_bucket(ti, hash); |
1215 | hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) { |
1216 | diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h |
1217 | index 616eda10d955..2dd9900f533d 100644 |
1218 | --- a/net/openvswitch/flow_table.h |
1219 | +++ b/net/openvswitch/flow_table.h |
1220 | @@ -86,5 +86,5 @@ struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *, |
1221 | bool ovs_flow_cmp(const struct sw_flow *, const struct sw_flow_match *); |
1222 | |
1223 | void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src, |
1224 | - const struct sw_flow_mask *mask); |
1225 | + bool full, const struct sw_flow_mask *mask); |
1226 | #endif /* flow_table.h */ |
1227 | diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c |
1228 | index ed458b315ef4..7851b1222a36 100644 |
1229 | --- a/net/packet/af_packet.c |
1230 | +++ b/net/packet/af_packet.c |
1231 | @@ -229,6 +229,8 @@ struct packet_skb_cb { |
1232 | } sa; |
1233 | }; |
1234 | |
1235 | +#define vio_le() virtio_legacy_is_little_endian() |
1236 | + |
1237 | #define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb)) |
1238 | |
1239 | #define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc)) |
1240 | @@ -2561,15 +2563,15 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) |
1241 | goto out_unlock; |
1242 | |
1243 | if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && |
1244 | - (__virtio16_to_cpu(false, vnet_hdr.csum_start) + |
1245 | - __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2 > |
1246 | - __virtio16_to_cpu(false, vnet_hdr.hdr_len))) |
1247 | - vnet_hdr.hdr_len = __cpu_to_virtio16(false, |
1248 | - __virtio16_to_cpu(false, vnet_hdr.csum_start) + |
1249 | - __virtio16_to_cpu(false, vnet_hdr.csum_offset) + 2); |
1250 | + (__virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) + |
1251 | + __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2 > |
1252 | + __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len))) |
1253 | + vnet_hdr.hdr_len = __cpu_to_virtio16(vio_le(), |
1254 | + __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start) + |
1255 | + __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset) + 2); |
1256 | |
1257 | err = -EINVAL; |
1258 | - if (__virtio16_to_cpu(false, vnet_hdr.hdr_len) > len) |
1259 | + if (__virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len) > len) |
1260 | goto out_unlock; |
1261 | |
1262 | if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
1263 | @@ -2612,7 +2614,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) |
1264 | hlen = LL_RESERVED_SPACE(dev); |
1265 | tlen = dev->needed_tailroom; |
1266 | skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, |
1267 | - __virtio16_to_cpu(false, vnet_hdr.hdr_len), |
1268 | + __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len), |
1269 | msg->msg_flags & MSG_DONTWAIT, &err); |
1270 | if (skb == NULL) |
1271 | goto out_unlock; |
1272 | @@ -2659,8 +2661,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) |
1273 | |
1274 | if (po->has_vnet_hdr) { |
1275 | if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { |
1276 | - u16 s = __virtio16_to_cpu(false, vnet_hdr.csum_start); |
1277 | - u16 o = __virtio16_to_cpu(false, vnet_hdr.csum_offset); |
1278 | + u16 s = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_start); |
1279 | + u16 o = __virtio16_to_cpu(vio_le(), vnet_hdr.csum_offset); |
1280 | if (!skb_partial_csum_set(skb, s, o)) { |
1281 | err = -EINVAL; |
1282 | goto out_free; |
1283 | @@ -2668,7 +2670,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) |
1284 | } |
1285 | |
1286 | skb_shinfo(skb)->gso_size = |
1287 | - __virtio16_to_cpu(false, vnet_hdr.gso_size); |
1288 | + __virtio16_to_cpu(vio_le(), vnet_hdr.gso_size); |
1289 | skb_shinfo(skb)->gso_type = gso_type; |
1290 | |
1291 | /* Header must be checked, and gso_segs computed. */ |
1292 | @@ -3042,9 +3044,9 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, |
1293 | |
1294 | /* This is a hint as to how much should be linear. */ |
1295 | vnet_hdr.hdr_len = |
1296 | - __cpu_to_virtio16(false, skb_headlen(skb)); |
1297 | + __cpu_to_virtio16(vio_le(), skb_headlen(skb)); |
1298 | vnet_hdr.gso_size = |
1299 | - __cpu_to_virtio16(false, sinfo->gso_size); |
1300 | + __cpu_to_virtio16(vio_le(), sinfo->gso_size); |
1301 | if (sinfo->gso_type & SKB_GSO_TCPV4) |
1302 | vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; |
1303 | else if (sinfo->gso_type & SKB_GSO_TCPV6) |
1304 | @@ -3062,9 +3064,9 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, |
1305 | |
1306 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1307 | vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; |
1308 | - vnet_hdr.csum_start = __cpu_to_virtio16(false, |
1309 | + vnet_hdr.csum_start = __cpu_to_virtio16(vio_le(), |
1310 | skb_checksum_start_offset(skb)); |
1311 | - vnet_hdr.csum_offset = __cpu_to_virtio16(false, |
1312 | + vnet_hdr.csum_offset = __cpu_to_virtio16(vio_le(), |
1313 | skb->csum_offset); |
1314 | } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { |
1315 | vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID; |
1316 | diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c |
1317 | index 715e01e5910a..f23a3b68bba6 100644 |
1318 | --- a/net/sched/cls_fw.c |
1319 | +++ b/net/sched/cls_fw.c |
1320 | @@ -33,7 +33,6 @@ |
1321 | |
1322 | struct fw_head { |
1323 | u32 mask; |
1324 | - bool mask_set; |
1325 | struct fw_filter __rcu *ht[HTSIZE]; |
1326 | struct rcu_head rcu; |
1327 | }; |
1328 | @@ -84,7 +83,7 @@ static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp, |
1329 | } |
1330 | } |
1331 | } else { |
1332 | - /* old method */ |
1333 | + /* Old method: classify the packet using its skb mark. */ |
1334 | if (id && (TC_H_MAJ(id) == 0 || |
1335 | !(TC_H_MAJ(id ^ tp->q->handle)))) { |
1336 | res->classid = id; |
1337 | @@ -114,14 +113,9 @@ static unsigned long fw_get(struct tcf_proto *tp, u32 handle) |
1338 | |
1339 | static int fw_init(struct tcf_proto *tp) |
1340 | { |
1341 | - struct fw_head *head; |
1342 | - |
1343 | - head = kzalloc(sizeof(struct fw_head), GFP_KERNEL); |
1344 | - if (head == NULL) |
1345 | - return -ENOBUFS; |
1346 | - |
1347 | - head->mask_set = false; |
1348 | - rcu_assign_pointer(tp->root, head); |
1349 | + /* We don't allocate fw_head here, because in the old method |
1350 | + * we don't need it at all. |
1351 | + */ |
1352 | return 0; |
1353 | } |
1354 | |
1355 | @@ -252,7 +246,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, |
1356 | int err; |
1357 | |
1358 | if (!opt) |
1359 | - return handle ? -EINVAL : 0; |
1360 | + return handle ? -EINVAL : 0; /* Succeed if it is old method. */ |
1361 | |
1362 | err = nla_parse_nested(tb, TCA_FW_MAX, opt, fw_policy); |
1363 | if (err < 0) |
1364 | @@ -302,11 +296,17 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, |
1365 | if (!handle) |
1366 | return -EINVAL; |
1367 | |
1368 | - if (!head->mask_set) { |
1369 | - head->mask = 0xFFFFFFFF; |
1370 | + if (!head) { |
1371 | + u32 mask = 0xFFFFFFFF; |
1372 | if (tb[TCA_FW_MASK]) |
1373 | - head->mask = nla_get_u32(tb[TCA_FW_MASK]); |
1374 | - head->mask_set = true; |
1375 | + mask = nla_get_u32(tb[TCA_FW_MASK]); |
1376 | + |
1377 | + head = kzalloc(sizeof(*head), GFP_KERNEL); |
1378 | + if (!head) |
1379 | + return -ENOBUFS; |
1380 | + head->mask = mask; |
1381 | + |
1382 | + rcu_assign_pointer(tp->root, head); |
1383 | } |
1384 | |
1385 | f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL); |
1386 | diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c |
1387 | index 59e80356672b..3ac604f96da0 100644 |
1388 | --- a/net/sctp/protocol.c |
1389 | +++ b/net/sctp/protocol.c |
1390 | @@ -1166,7 +1166,7 @@ static void sctp_v4_del_protocol(void) |
1391 | unregister_inetaddr_notifier(&sctp_inetaddr_notifier); |
1392 | } |
1393 | |
1394 | -static int __net_init sctp_net_init(struct net *net) |
1395 | +static int __net_init sctp_defaults_init(struct net *net) |
1396 | { |
1397 | int status; |
1398 | |
1399 | @@ -1259,12 +1259,6 @@ static int __net_init sctp_net_init(struct net *net) |
1400 | |
1401 | sctp_dbg_objcnt_init(net); |
1402 | |
1403 | - /* Initialize the control inode/socket for handling OOTB packets. */ |
1404 | - if ((status = sctp_ctl_sock_init(net))) { |
1405 | - pr_err("Failed to initialize the SCTP control sock\n"); |
1406 | - goto err_ctl_sock_init; |
1407 | - } |
1408 | - |
1409 | /* Initialize the local address list. */ |
1410 | INIT_LIST_HEAD(&net->sctp.local_addr_list); |
1411 | spin_lock_init(&net->sctp.local_addr_lock); |
1412 | @@ -1280,9 +1274,6 @@ static int __net_init sctp_net_init(struct net *net) |
1413 | |
1414 | return 0; |
1415 | |
1416 | -err_ctl_sock_init: |
1417 | - sctp_dbg_objcnt_exit(net); |
1418 | - sctp_proc_exit(net); |
1419 | err_init_proc: |
1420 | cleanup_sctp_mibs(net); |
1421 | err_init_mibs: |
1422 | @@ -1291,15 +1282,12 @@ err_sysctl_register: |
1423 | return status; |
1424 | } |
1425 | |
1426 | -static void __net_exit sctp_net_exit(struct net *net) |
1427 | +static void __net_exit sctp_defaults_exit(struct net *net) |
1428 | { |
1429 | /* Free the local address list */ |
1430 | sctp_free_addr_wq(net); |
1431 | sctp_free_local_addr_list(net); |
1432 | |
1433 | - /* Free the control endpoint. */ |
1434 | - inet_ctl_sock_destroy(net->sctp.ctl_sock); |
1435 | - |
1436 | sctp_dbg_objcnt_exit(net); |
1437 | |
1438 | sctp_proc_exit(net); |
1439 | @@ -1307,9 +1295,32 @@ static void __net_exit sctp_net_exit(struct net *net) |
1440 | sctp_sysctl_net_unregister(net); |
1441 | } |
1442 | |
1443 | -static struct pernet_operations sctp_net_ops = { |
1444 | - .init = sctp_net_init, |
1445 | - .exit = sctp_net_exit, |
1446 | +static struct pernet_operations sctp_defaults_ops = { |
1447 | + .init = sctp_defaults_init, |
1448 | + .exit = sctp_defaults_exit, |
1449 | +}; |
1450 | + |
1451 | +static int __net_init sctp_ctrlsock_init(struct net *net) |
1452 | +{ |
1453 | + int status; |
1454 | + |
1455 | + /* Initialize the control inode/socket for handling OOTB packets. */ |
1456 | + status = sctp_ctl_sock_init(net); |
1457 | + if (status) |
1458 | + pr_err("Failed to initialize the SCTP control sock\n"); |
1459 | + |
1460 | + return status; |
1461 | +} |
1462 | + |
1463 | +static void __net_init sctp_ctrlsock_exit(struct net *net) |
1464 | +{ |
1465 | + /* Free the control endpoint. */ |
1466 | + inet_ctl_sock_destroy(net->sctp.ctl_sock); |
1467 | +} |
1468 | + |
1469 | +static struct pernet_operations sctp_ctrlsock_ops = { |
1470 | + .init = sctp_ctrlsock_init, |
1471 | + .exit = sctp_ctrlsock_exit, |
1472 | }; |
1473 | |
1474 | /* Initialize the universe into something sensible. */ |
1475 | @@ -1442,8 +1453,11 @@ static __init int sctp_init(void) |
1476 | sctp_v4_pf_init(); |
1477 | sctp_v6_pf_init(); |
1478 | |
1479 | - status = sctp_v4_protosw_init(); |
1480 | + status = register_pernet_subsys(&sctp_defaults_ops); |
1481 | + if (status) |
1482 | + goto err_register_defaults; |
1483 | |
1484 | + status = sctp_v4_protosw_init(); |
1485 | if (status) |
1486 | goto err_protosw_init; |
1487 | |
1488 | @@ -1451,9 +1465,9 @@ static __init int sctp_init(void) |
1489 | if (status) |
1490 | goto err_v6_protosw_init; |
1491 | |
1492 | - status = register_pernet_subsys(&sctp_net_ops); |
1493 | + status = register_pernet_subsys(&sctp_ctrlsock_ops); |
1494 | if (status) |
1495 | - goto err_register_pernet_subsys; |
1496 | + goto err_register_ctrlsock; |
1497 | |
1498 | status = sctp_v4_add_protocol(); |
1499 | if (status) |
1500 | @@ -1469,12 +1483,14 @@ out: |
1501 | err_v6_add_protocol: |
1502 | sctp_v4_del_protocol(); |
1503 | err_add_protocol: |
1504 | - unregister_pernet_subsys(&sctp_net_ops); |
1505 | -err_register_pernet_subsys: |
1506 | + unregister_pernet_subsys(&sctp_ctrlsock_ops); |
1507 | +err_register_ctrlsock: |
1508 | sctp_v6_protosw_exit(); |
1509 | err_v6_protosw_init: |
1510 | sctp_v4_protosw_exit(); |
1511 | err_protosw_init: |
1512 | + unregister_pernet_subsys(&sctp_defaults_ops); |
1513 | +err_register_defaults: |
1514 | sctp_v4_pf_exit(); |
1515 | sctp_v6_pf_exit(); |
1516 | sctp_sysctl_unregister(); |
1517 | @@ -1507,12 +1523,14 @@ static __exit void sctp_exit(void) |
1518 | sctp_v6_del_protocol(); |
1519 | sctp_v4_del_protocol(); |
1520 | |
1521 | - unregister_pernet_subsys(&sctp_net_ops); |
1522 | + unregister_pernet_subsys(&sctp_ctrlsock_ops); |
1523 | |
1524 | /* Free protosw registrations */ |
1525 | sctp_v6_protosw_exit(); |
1526 | sctp_v4_protosw_exit(); |
1527 | |
1528 | + unregister_pernet_subsys(&sctp_defaults_ops); |
1529 | + |
1530 | /* Unregister with socket layer. */ |
1531 | sctp_v6_pf_exit(); |
1532 | sctp_v4_pf_exit(); |