Magellan Linux

Contents of /trunk/kernel-alx/patches-4.1/0109-4.1.10-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2748 - (show annotations) (download)
Mon Jan 11 12:00:45 2016 UTC (8 years, 3 months ago) by niro
File size: 40107 byte(s)
-linux-4.1 patches up to 4.1.15
1 diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
2 index 41b3f3f864e8..5d88f37480b6 100644
3 --- a/Documentation/devicetree/bindings/net/ethernet.txt
4 +++ b/Documentation/devicetree/bindings/net/ethernet.txt
5 @@ -25,7 +25,11 @@ The following properties are common to the Ethernet controllers:
6 flow control thresholds.
7 - tx-fifo-depth: the size of the controller's transmit fifo in bytes. This
8 is used for components that can have configurable fifo sizes.
9 +- managed: string, specifies the PHY management type. Supported values are:
10 + "auto", "in-band-status". "auto" is the default, it usess MDIO for
11 + management if fixed-link is not specified.
12
13 Child nodes of the Ethernet controller are typically the individual PHY devices
14 connected via the MDIO bus (sometimes the MDIO bus controller is separate).
15 They are described in the phy.txt file in this same directory.
16 +For non-MDIO PHY management see fixed-link.txt.
17 diff --git a/Makefile b/Makefile
18 index e071176b2ce6..d02f16b510dc 100644
19 --- a/Makefile
20 +++ b/Makefile
21 @@ -1,6 +1,6 @@
22 VERSION = 4
23 PATCHLEVEL = 1
24 -SUBLEVEL = 9
25 +SUBLEVEL = 10
26 EXTRAVERSION =
27 NAME = Series 4800
28
29 diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c
30 index f1ff39a3d1c1..54d946a9eee6 100644
31 --- a/drivers/block/zram/zcomp.c
32 +++ b/drivers/block/zram/zcomp.c
33 @@ -325,12 +325,14 @@ void zcomp_destroy(struct zcomp *comp)
34 * allocate new zcomp and initialize it. return compressing
35 * backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
36 * if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
37 - * case of allocation error.
38 + * case of allocation error, or any other error potentially
39 + * returned by functions zcomp_strm_{multi,single}_create.
40 */
41 struct zcomp *zcomp_create(const char *compress, int max_strm)
42 {
43 struct zcomp *comp;
44 struct zcomp_backend *backend;
45 + int error;
46
47 backend = find_backend(compress);
48 if (!backend)
49 @@ -342,12 +344,12 @@ struct zcomp *zcomp_create(const char *compress, int max_strm)
50
51 comp->backend = backend;
52 if (max_strm > 1)
53 - zcomp_strm_multi_create(comp, max_strm);
54 + error = zcomp_strm_multi_create(comp, max_strm);
55 else
56 - zcomp_strm_single_create(comp);
57 - if (!comp->stream) {
58 + error = zcomp_strm_single_create(comp);
59 + if (error) {
60 kfree(comp);
61 - return ERR_PTR(-ENOMEM);
62 + return ERR_PTR(error);
63 }
64 return comp;
65 }
66 diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
67 index cedb572bf25a..db9ebbc1a732 100644
68 --- a/drivers/net/dsa/bcm_sf2.c
69 +++ b/drivers/net/dsa/bcm_sf2.c
70 @@ -417,7 +417,7 @@ static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
71 core_writel(priv, port, CORE_FAST_AGE_PORT);
72
73 reg = core_readl(priv, CORE_FAST_AGE_CTRL);
74 - reg |= EN_AGE_PORT | FAST_AGE_STR_DONE;
75 + reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
76 core_writel(priv, reg, CORE_FAST_AGE_CTRL);
77
78 do {
79 @@ -431,6 +431,8 @@ static int bcm_sf2_sw_fast_age_port(struct dsa_switch *ds, int port)
80 if (!timeout)
81 return -ETIMEDOUT;
82
83 + core_writel(priv, 0, CORE_FAST_AGE_CTRL);
84 +
85 return 0;
86 }
87
88 @@ -506,7 +508,7 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
89 u32 reg;
90
91 reg = core_readl(priv, CORE_G_PCTL_PORT(port));
92 - cur_hw_state = reg >> G_MISTP_STATE_SHIFT;
93 + cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
94
95 switch (state) {
96 case BR_STATE_DISABLED:
97 @@ -530,10 +532,12 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
98 }
99
100 /* Fast-age ARL entries if we are moving a port from Learning or
101 - * Forwarding state to Disabled, Blocking or Listening state
102 + * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
103 + * state (hw_state)
104 */
105 if (cur_hw_state != hw_state) {
106 - if (cur_hw_state & 4 && !(hw_state & 4)) {
107 + if (cur_hw_state >= G_MISTP_LEARN_STATE &&
108 + hw_state <= G_MISTP_LISTEN_STATE) {
109 ret = bcm_sf2_sw_fast_age_port(ds, port);
110 if (ret) {
111 pr_err("%s: fast-ageing failed\n", __func__);
112 @@ -889,15 +893,11 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
113 struct fixed_phy_status *status)
114 {
115 struct bcm_sf2_priv *priv = ds_to_priv(ds);
116 - u32 duplex, pause, speed;
117 + u32 duplex, pause;
118 u32 reg;
119
120 duplex = core_readl(priv, CORE_DUPSTS);
121 pause = core_readl(priv, CORE_PAUSESTS);
122 - speed = core_readl(priv, CORE_SPDSTS);
123 -
124 - speed >>= (port * SPDSTS_SHIFT);
125 - speed &= SPDSTS_MASK;
126
127 status->link = 0;
128
129 @@ -925,18 +925,6 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
130 reg &= ~LINK_STS;
131 core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
132
133 - switch (speed) {
134 - case SPDSTS_10:
135 - status->speed = SPEED_10;
136 - break;
137 - case SPDSTS_100:
138 - status->speed = SPEED_100;
139 - break;
140 - case SPDSTS_1000:
141 - status->speed = SPEED_1000;
142 - break;
143 - }
144 -
145 if ((pause & (1 << port)) &&
146 (pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
147 status->asym_pause = 1;
148 diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
149 index 22e2ebf31333..789d7b7737da 100644
150 --- a/drivers/net/dsa/bcm_sf2.h
151 +++ b/drivers/net/dsa/bcm_sf2.h
152 @@ -112,8 +112,8 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \
153 spin_unlock(&priv->indir_lock); \
154 return (u64)indir << 32 | dir; \
155 } \
156 -static inline void name##_writeq(struct bcm_sf2_priv *priv, u32 off, \
157 - u64 val) \
158 +static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \
159 + u32 off) \
160 { \
161 spin_lock(&priv->indir_lock); \
162 reg_writel(priv, upper_32_bits(val), REG_DIR_DATA_WRITE); \
163 diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
164 index da48e66377b5..8207877d6237 100644
165 --- a/drivers/net/ethernet/altera/altera_tse_main.c
166 +++ b/drivers/net/ethernet/altera/altera_tse_main.c
167 @@ -511,8 +511,7 @@ static int tse_poll(struct napi_struct *napi, int budget)
168
169 if (rxcomplete < budget) {
170
171 - napi_gro_flush(napi, false);
172 - __napi_complete(napi);
173 + napi_complete(napi);
174
175 netdev_dbg(priv->dev,
176 "NAPI Complete, did %d packets with budget %d\n",
177 diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
178 index 66d47e448e4d..570390b5cd42 100644
179 --- a/drivers/net/ethernet/freescale/fec_main.c
180 +++ b/drivers/net/ethernet/freescale/fec_main.c
181 @@ -1396,6 +1396,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
182 if ((status & BD_ENET_RX_LAST) == 0)
183 netdev_err(ndev, "rcv is not +last\n");
184
185 + writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
186
187 /* Check for errors. */
188 if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
189 diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
190 index 74d0389bf233..4d608f0117cd 100644
191 --- a/drivers/net/ethernet/marvell/mvneta.c
192 +++ b/drivers/net/ethernet/marvell/mvneta.c
193 @@ -3029,8 +3029,8 @@ static int mvneta_probe(struct platform_device *pdev)
194 const char *dt_mac_addr;
195 char hw_mac_addr[ETH_ALEN];
196 const char *mac_from;
197 + const char *managed;
198 int phy_mode;
199 - int fixed_phy = 0;
200 int err;
201
202 /* Our multiqueue support is not complete, so for now, only
203 @@ -3064,7 +3064,6 @@ static int mvneta_probe(struct platform_device *pdev)
204 dev_err(&pdev->dev, "cannot register fixed PHY\n");
205 goto err_free_irq;
206 }
207 - fixed_phy = 1;
208
209 /* In the case of a fixed PHY, the DT node associated
210 * to the PHY is the Ethernet MAC DT node.
211 @@ -3088,8 +3087,10 @@ static int mvneta_probe(struct platform_device *pdev)
212 pp = netdev_priv(dev);
213 pp->phy_node = phy_node;
214 pp->phy_interface = phy_mode;
215 - pp->use_inband_status = (phy_mode == PHY_INTERFACE_MODE_SGMII) &&
216 - fixed_phy;
217 +
218 + err = of_property_read_string(dn, "managed", &managed);
219 + pp->use_inband_status = (err == 0 &&
220 + strcmp(managed, "in-band-status") == 0);
221
222 pp->clk = devm_clk_get(&pdev->dev, NULL);
223 if (IS_ERR(pp->clk)) {
224 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
225 index eab4e080ebd2..80aac20104de 100644
226 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
227 +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
228 @@ -1256,8 +1256,6 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
229 rss_context->hash_fn = MLX4_RSS_HASH_TOP;
230 memcpy(rss_context->rss_key, priv->rss_key,
231 MLX4_EN_RSS_KEY_SIZE);
232 - netdev_rss_key_fill(rss_context->rss_key,
233 - MLX4_EN_RSS_KEY_SIZE);
234 } else {
235 en_err(priv, "Unknown RSS hash function requested\n");
236 err = -EINVAL;
237 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
238 index 8c350c5d54ad..58858c5589db 100644
239 --- a/drivers/net/macvtap.c
240 +++ b/drivers/net/macvtap.c
241 @@ -1054,10 +1054,10 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
242 return 0;
243
244 case TUNSETSNDBUF:
245 - if (get_user(u, up))
246 + if (get_user(s, sp))
247 return -EFAULT;
248
249 - q->sk.sk_sndbuf = u;
250 + q->sk.sk_sndbuf = s;
251 return 0;
252
253 case TUNGETVNETHDRSZ:
254 diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c
255 index 1960b46add65..479b93f9581c 100644
256 --- a/drivers/net/phy/fixed_phy.c
257 +++ b/drivers/net/phy/fixed_phy.c
258 @@ -52,6 +52,10 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
259 u16 lpagb = 0;
260 u16 lpa = 0;
261
262 + if (!fp->status.link)
263 + goto done;
264 + bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
265 +
266 if (fp->status.duplex) {
267 bmcr |= BMCR_FULLDPLX;
268
269 @@ -96,15 +100,13 @@ static int fixed_phy_update_regs(struct fixed_phy *fp)
270 }
271 }
272
273 - if (fp->status.link)
274 - bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE;
275 -
276 if (fp->status.pause)
277 lpa |= LPA_PAUSE_CAP;
278
279 if (fp->status.asym_pause)
280 lpa |= LPA_PAUSE_ASYM;
281
282 +done:
283 fp->regs[MII_PHYSID1] = 0;
284 fp->regs[MII_PHYSID2] = 0;
285
286 diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
287 index 3c86b107275a..e0498571ae26 100644
288 --- a/drivers/net/usb/usbnet.c
289 +++ b/drivers/net/usb/usbnet.c
290 @@ -778,7 +778,7 @@ int usbnet_stop (struct net_device *net)
291 {
292 struct usbnet *dev = netdev_priv(net);
293 struct driver_info *info = dev->driver_info;
294 - int retval, pm;
295 + int retval, pm, mpn;
296
297 clear_bit(EVENT_DEV_OPEN, &dev->flags);
298 netif_stop_queue (net);
299 @@ -809,6 +809,8 @@ int usbnet_stop (struct net_device *net)
300
301 usbnet_purge_paused_rxq(dev);
302
303 + mpn = !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags);
304 +
305 /* deferred work (task, timer, softirq) must also stop.
306 * can't flush_scheduled_work() until we drop rtnl (later),
307 * else workers could deadlock; so make workers a NOP.
308 @@ -819,8 +821,7 @@ int usbnet_stop (struct net_device *net)
309 if (!pm)
310 usb_autopm_put_interface(dev->intf);
311
312 - if (info->manage_power &&
313 - !test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
314 + if (info->manage_power && mpn)
315 info->manage_power(dev, 0);
316 else
317 usb_autopm_put_interface(dev->intf);
318 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
319 index 21a0fbf1ed94..0085b8df83e2 100644
320 --- a/drivers/net/vxlan.c
321 +++ b/drivers/net/vxlan.c
322 @@ -2212,6 +2212,8 @@ static int vxlan_open(struct net_device *dev)
323
324 if (vxlan_addr_multicast(&vxlan->default_dst.remote_ip)) {
325 ret = vxlan_igmp_join(vxlan);
326 + if (ret == -EADDRINUSE)
327 + ret = 0;
328 if (ret) {
329 vxlan_sock_release(vs);
330 return ret;
331 diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
332 index 0c064485d1c2..bec8ec2b31f6 100644
333 --- a/drivers/of/of_mdio.c
334 +++ b/drivers/of/of_mdio.c
335 @@ -263,7 +263,8 @@ EXPORT_SYMBOL(of_phy_attach);
336 bool of_phy_is_fixed_link(struct device_node *np)
337 {
338 struct device_node *dn;
339 - int len;
340 + int len, err;
341 + const char *managed;
342
343 /* New binding */
344 dn = of_get_child_by_name(np, "fixed-link");
345 @@ -272,6 +273,10 @@ bool of_phy_is_fixed_link(struct device_node *np)
346 return true;
347 }
348
349 + err = of_property_read_string(np, "managed", &managed);
350 + if (err == 0 && strcmp(managed, "auto") != 0)
351 + return true;
352 +
353 /* Old binding */
354 if (of_get_property(np, "fixed-link", &len) &&
355 len == (5 * sizeof(__be32)))
356 @@ -286,8 +291,18 @@ int of_phy_register_fixed_link(struct device_node *np)
357 struct fixed_phy_status status = {};
358 struct device_node *fixed_link_node;
359 const __be32 *fixed_link_prop;
360 - int len;
361 + int len, err;
362 struct phy_device *phy;
363 + const char *managed;
364 +
365 + err = of_property_read_string(np, "managed", &managed);
366 + if (err == 0) {
367 + if (strcmp(managed, "in-band-status") == 0) {
368 + /* status is zeroed, namely its .link member */
369 + phy = fixed_phy_register(PHY_POLL, &status, np);
370 + return IS_ERR(phy) ? PTR_ERR(phy) : 0;
371 + }
372 + }
373
374 /* New binding */
375 fixed_link_node = of_get_child_by_name(np, "fixed-link");
376 diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
377 index 06697315a088..fb4dd7b3ee71 100644
378 --- a/drivers/platform/x86/hp-wmi.c
379 +++ b/drivers/platform/x86/hp-wmi.c
380 @@ -54,8 +54,9 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4");
381 #define HPWMI_HARDWARE_QUERY 0x4
382 #define HPWMI_WIRELESS_QUERY 0x5
383 #define HPWMI_BIOS_QUERY 0x9
384 +#define HPWMI_FEATURE_QUERY 0xb
385 #define HPWMI_HOTKEY_QUERY 0xc
386 -#define HPWMI_FEATURE_QUERY 0xd
387 +#define HPWMI_FEATURE2_QUERY 0xd
388 #define HPWMI_WIRELESS2_QUERY 0x1b
389 #define HPWMI_POSTCODEERROR_QUERY 0x2a
390
391 @@ -295,25 +296,33 @@ static int hp_wmi_tablet_state(void)
392 return (state & 0x4) ? 1 : 0;
393 }
394
395 -static int __init hp_wmi_bios_2009_later(void)
396 +static int __init hp_wmi_bios_2008_later(void)
397 {
398 int state = 0;
399 int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, 0, &state,
400 sizeof(state), sizeof(state));
401 - if (ret)
402 - return ret;
403 + if (!ret)
404 + return 1;
405
406 - return (state & 0x10) ? 1 : 0;
407 + return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO;
408 }
409
410 -static int hp_wmi_enable_hotkeys(void)
411 +static int __init hp_wmi_bios_2009_later(void)
412 {
413 - int ret;
414 - int query = 0x6e;
415 + int state = 0;
416 + int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, 0, &state,
417 + sizeof(state), sizeof(state));
418 + if (!ret)
419 + return 1;
420
421 - ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &query, sizeof(query),
422 - 0);
423 + return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO;
424 +}
425
426 +static int __init hp_wmi_enable_hotkeys(void)
427 +{
428 + int value = 0x6e;
429 + int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, 1, &value,
430 + sizeof(value), 0);
431 if (ret)
432 return -EINVAL;
433 return 0;
434 @@ -663,7 +672,7 @@ static int __init hp_wmi_input_setup(void)
435 hp_wmi_tablet_state());
436 input_sync(hp_wmi_input_dev);
437
438 - if (hp_wmi_bios_2009_later() == 4)
439 + if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later())
440 hp_wmi_enable_hotkeys();
441
442 status = wmi_install_notify_handler(HPWMI_EVENT_GUID, hp_wmi_notify, NULL);
443 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
444 index ff667e18b2d6..9ba383f5b0c4 100644
445 --- a/net/bridge/br_multicast.c
446 +++ b/net/bridge/br_multicast.c
447 @@ -980,7 +980,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
448
449 ih = igmpv3_report_hdr(skb);
450 num = ntohs(ih->ngrec);
451 - len = sizeof(*ih);
452 + len = skb_transport_offset(skb) + sizeof(*ih);
453
454 for (i = 0; i < num; i++) {
455 len += sizeof(*grec);
456 @@ -1035,7 +1035,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
457
458 icmp6h = icmp6_hdr(skb);
459 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
460 - len = sizeof(*icmp6h);
461 + len = skb_transport_offset(skb) + sizeof(*icmp6h);
462
463 for (i = 0; i < num; i++) {
464 __be16 *nsrcs, _nsrcs;
465 diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
466 index 9a12668f7d62..0ad144fb0c79 100644
467 --- a/net/core/fib_rules.c
468 +++ b/net/core/fib_rules.c
469 @@ -615,15 +615,17 @@ static int dump_rules(struct sk_buff *skb, struct netlink_callback *cb,
470 {
471 int idx = 0;
472 struct fib_rule *rule;
473 + int err = 0;
474
475 rcu_read_lock();
476 list_for_each_entry_rcu(rule, &ops->rules_list, list) {
477 if (idx < cb->args[1])
478 goto skip;
479
480 - if (fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
481 - cb->nlh->nlmsg_seq, RTM_NEWRULE,
482 - NLM_F_MULTI, ops) < 0)
483 + err = fib_nl_fill_rule(skb, rule, NETLINK_CB(cb->skb).portid,
484 + cb->nlh->nlmsg_seq, RTM_NEWRULE,
485 + NLM_F_MULTI, ops);
486 + if (err)
487 break;
488 skip:
489 idx++;
490 @@ -632,7 +634,7 @@ skip:
491 cb->args[1] = idx;
492 rules_ops_put(ops);
493
494 - return skb->len;
495 + return err;
496 }
497
498 static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
499 @@ -648,7 +650,9 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
500 if (ops == NULL)
501 return -EAFNOSUPPORT;
502
503 - return dump_rules(skb, cb, ops);
504 + dump_rules(skb, cb, ops);
505 +
506 + return skb->len;
507 }
508
509 rcu_read_lock();
510 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
511 index 74dddf84adcd..556ecf96a385 100644
512 --- a/net/core/sock_diag.c
513 +++ b/net/core/sock_diag.c
514 @@ -86,6 +86,9 @@ int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
515 goto out;
516
517 fprog = filter->prog->orig_prog;
518 + if (!fprog)
519 + goto out;
520 +
521 flen = bpf_classic_proglen(fprog);
522
523 attr = nla_reserve(skb, attrtype, flen);
524 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
525 index a369e8a70b2c..986440b24978 100644
526 --- a/net/ipv4/tcp_output.c
527 +++ b/net/ipv4/tcp_output.c
528 @@ -2893,6 +2893,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
529 skb_reserve(skb, MAX_TCP_HEADER);
530 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
531 TCPHDR_ACK | TCPHDR_RST);
532 + skb_mstamp_get(&skb->skb_mstamp);
533 /* Send it off. */
534 if (tcp_transmit_skb(sk, skb, 0, priority))
535 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
536 diff --git a/net/ipv6/exthdrs_offload.c b/net/ipv6/exthdrs_offload.c
537 index 447a7fbd1bb6..f5e2ba1c18bf 100644
538 --- a/net/ipv6/exthdrs_offload.c
539 +++ b/net/ipv6/exthdrs_offload.c
540 @@ -36,6 +36,6 @@ out:
541 return ret;
542
543 out_rt:
544 - inet_del_offload(&rthdr_offload, IPPROTO_ROUTING);
545 + inet6_del_offload(&rthdr_offload, IPPROTO_ROUTING);
546 goto out;
547 }
548 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
549 index a38d3ac0f18f..69f4f689f06a 100644
550 --- a/net/ipv6/ip6_gre.c
551 +++ b/net/ipv6/ip6_gre.c
552 @@ -361,6 +361,7 @@ static void ip6gre_tunnel_uninit(struct net_device *dev)
553 struct ip6gre_net *ign = net_generic(t->net, ip6gre_net_id);
554
555 ip6gre_tunnel_unlink(ign, t);
556 + ip6_tnl_dst_reset(t);
557 dev_put(dev);
558 }
559
560 diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
561 index 74ceb73c1c9a..5f36266b1f5e 100644
562 --- a/net/ipv6/ip6mr.c
563 +++ b/net/ipv6/ip6mr.c
564 @@ -550,7 +550,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
565
566 if (it->cache == &mrt->mfc6_unres_queue)
567 spin_unlock_bh(&mfc_unres_lock);
568 - else if (it->cache == mrt->mfc6_cache_array)
569 + else if (it->cache == &mrt->mfc6_cache_array[it->ct])
570 read_unlock(&mrt_lock);
571 }
572
573 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
574 index c73ae5039e46..f371fefa7fdc 100644
575 --- a/net/ipv6/route.c
576 +++ b/net/ipv6/route.c
577 @@ -1515,7 +1515,7 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
578 return -EINVAL;
579 }
580
581 -int ip6_route_add(struct fib6_config *cfg)
582 +int ip6_route_info_create(struct fib6_config *cfg, struct rt6_info **rt_ret)
583 {
584 int err;
585 struct net *net = cfg->fc_nlinfo.nl_net;
586 @@ -1523,7 +1523,6 @@ int ip6_route_add(struct fib6_config *cfg)
587 struct net_device *dev = NULL;
588 struct inet6_dev *idev = NULL;
589 struct fib6_table *table;
590 - struct mx6_config mxc = { .mx = NULL, };
591 int addr_type;
592
593 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
594 @@ -1719,6 +1718,32 @@ install_route:
595
596 cfg->fc_nlinfo.nl_net = dev_net(dev);
597
598 + *rt_ret = rt;
599 +
600 + return 0;
601 +out:
602 + if (dev)
603 + dev_put(dev);
604 + if (idev)
605 + in6_dev_put(idev);
606 + if (rt)
607 + dst_free(&rt->dst);
608 +
609 + *rt_ret = NULL;
610 +
611 + return err;
612 +}
613 +
614 +int ip6_route_add(struct fib6_config *cfg)
615 +{
616 + struct mx6_config mxc = { .mx = NULL, };
617 + struct rt6_info *rt = NULL;
618 + int err;
619 +
620 + err = ip6_route_info_create(cfg, &rt);
621 + if (err)
622 + goto out;
623 +
624 err = ip6_convert_metrics(&mxc, cfg);
625 if (err)
626 goto out;
627 @@ -1726,14 +1751,12 @@ install_route:
628 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc);
629
630 kfree(mxc.mx);
631 +
632 return err;
633 out:
634 - if (dev)
635 - dev_put(dev);
636 - if (idev)
637 - in6_dev_put(idev);
638 if (rt)
639 dst_free(&rt->dst);
640 +
641 return err;
642 }
643
644 @@ -2496,19 +2519,78 @@ errout:
645 return err;
646 }
647
648 -static int ip6_route_multipath(struct fib6_config *cfg, int add)
649 +struct rt6_nh {
650 + struct rt6_info *rt6_info;
651 + struct fib6_config r_cfg;
652 + struct mx6_config mxc;
653 + struct list_head next;
654 +};
655 +
656 +static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
657 +{
658 + struct rt6_nh *nh;
659 +
660 + list_for_each_entry(nh, rt6_nh_list, next) {
661 + pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6 nexthop %pI6 ifi %d\n",
662 + &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
663 + nh->r_cfg.fc_ifindex);
664 + }
665 +}
666 +
667 +static int ip6_route_info_append(struct list_head *rt6_nh_list,
668 + struct rt6_info *rt, struct fib6_config *r_cfg)
669 +{
670 + struct rt6_nh *nh;
671 + struct rt6_info *rtnh;
672 + int err = -EEXIST;
673 +
674 + list_for_each_entry(nh, rt6_nh_list, next) {
675 + /* check if rt6_info already exists */
676 + rtnh = nh->rt6_info;
677 +
678 + if (rtnh->dst.dev == rt->dst.dev &&
679 + rtnh->rt6i_idev == rt->rt6i_idev &&
680 + ipv6_addr_equal(&rtnh->rt6i_gateway,
681 + &rt->rt6i_gateway))
682 + return err;
683 + }
684 +
685 + nh = kzalloc(sizeof(*nh), GFP_KERNEL);
686 + if (!nh)
687 + return -ENOMEM;
688 + nh->rt6_info = rt;
689 + err = ip6_convert_metrics(&nh->mxc, r_cfg);
690 + if (err) {
691 + kfree(nh);
692 + return err;
693 + }
694 + memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
695 + list_add_tail(&nh->next, rt6_nh_list);
696 +
697 + return 0;
698 +}
699 +
700 +static int ip6_route_multipath_add(struct fib6_config *cfg)
701 {
702 struct fib6_config r_cfg;
703 struct rtnexthop *rtnh;
704 + struct rt6_info *rt;
705 + struct rt6_nh *err_nh;
706 + struct rt6_nh *nh, *nh_safe;
707 int remaining;
708 int attrlen;
709 - int err = 0, last_err = 0;
710 + int err = 1;
711 + int nhn = 0;
712 + int replace = (cfg->fc_nlinfo.nlh &&
713 + (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
714 + LIST_HEAD(rt6_nh_list);
715
716 remaining = cfg->fc_mp_len;
717 -beginning:
718 rtnh = (struct rtnexthop *)cfg->fc_mp;
719
720 - /* Parse a Multipath Entry */
721 + /* Parse a Multipath Entry and build a list (rt6_nh_list) of
722 + * rt6_info structs per nexthop
723 + */
724 while (rtnh_ok(rtnh, remaining)) {
725 memcpy(&r_cfg, cfg, sizeof(*cfg));
726 if (rtnh->rtnh_ifindex)
727 @@ -2524,22 +2606,32 @@ beginning:
728 r_cfg.fc_flags |= RTF_GATEWAY;
729 }
730 }
731 - err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
732 +
733 + err = ip6_route_info_create(&r_cfg, &rt);
734 + if (err)
735 + goto cleanup;
736 +
737 + err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
738 if (err) {
739 - last_err = err;
740 - /* If we are trying to remove a route, do not stop the
741 - * loop when ip6_route_del() fails (because next hop is
742 - * already gone), we should try to remove all next hops.
743 - */
744 - if (add) {
745 - /* If add fails, we should try to delete all
746 - * next hops that have been already added.
747 - */
748 - add = 0;
749 - remaining = cfg->fc_mp_len - remaining;
750 - goto beginning;
751 - }
752 + dst_free(&rt->dst);
753 + goto cleanup;
754 + }
755 +
756 + rtnh = rtnh_next(rtnh, &remaining);
757 + }
758 +
759 + err_nh = NULL;
760 + list_for_each_entry(nh, &rt6_nh_list, next) {
761 + err = __ip6_ins_rt(nh->rt6_info, &cfg->fc_nlinfo, &nh->mxc);
762 + /* nh->rt6_info is used or freed at this point, reset to NULL*/
763 + nh->rt6_info = NULL;
764 + if (err) {
765 + if (replace && nhn)
766 + ip6_print_replace_route_err(&rt6_nh_list);
767 + err_nh = nh;
768 + goto add_errout;
769 }
770 +
771 /* Because each route is added like a single route we remove
772 * these flags after the first nexthop: if there is a collision,
773 * we have already failed to add the first nexthop:
774 @@ -2549,6 +2641,63 @@ beginning:
775 */
776 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
777 NLM_F_REPLACE);
778 + nhn++;
779 + }
780 +
781 + goto cleanup;
782 +
783 +add_errout:
784 + /* Delete routes that were already added */
785 + list_for_each_entry(nh, &rt6_nh_list, next) {
786 + if (err_nh == nh)
787 + break;
788 + ip6_route_del(&nh->r_cfg);
789 + }
790 +
791 +cleanup:
792 + list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
793 + if (nh->rt6_info)
794 + dst_free(&nh->rt6_info->dst);
795 + if (nh->mxc.mx)
796 + kfree(nh->mxc.mx);
797 + list_del(&nh->next);
798 + kfree(nh);
799 + }
800 +
801 + return err;
802 +}
803 +
804 +static int ip6_route_multipath_del(struct fib6_config *cfg)
805 +{
806 + struct fib6_config r_cfg;
807 + struct rtnexthop *rtnh;
808 + int remaining;
809 + int attrlen;
810 + int err = 1, last_err = 0;
811 +
812 + remaining = cfg->fc_mp_len;
813 + rtnh = (struct rtnexthop *)cfg->fc_mp;
814 +
815 + /* Parse a Multipath Entry */
816 + while (rtnh_ok(rtnh, remaining)) {
817 + memcpy(&r_cfg, cfg, sizeof(*cfg));
818 + if (rtnh->rtnh_ifindex)
819 + r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
820 +
821 + attrlen = rtnh_attrlen(rtnh);
822 + if (attrlen > 0) {
823 + struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
824 +
825 + nla = nla_find(attrs, attrlen, RTA_GATEWAY);
826 + if (nla) {
827 + nla_memcpy(&r_cfg.fc_gateway, nla, 16);
828 + r_cfg.fc_flags |= RTF_GATEWAY;
829 + }
830 + }
831 + err = ip6_route_del(&r_cfg);
832 + if (err)
833 + last_err = err;
834 +
835 rtnh = rtnh_next(rtnh, &remaining);
836 }
837
838 @@ -2565,7 +2714,7 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
839 return err;
840
841 if (cfg.fc_mp)
842 - return ip6_route_multipath(&cfg, 0);
843 + return ip6_route_multipath_del(&cfg);
844 else
845 return ip6_route_del(&cfg);
846 }
847 @@ -2580,7 +2729,7 @@ static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
848 return err;
849
850 if (cfg.fc_mp)
851 - return ip6_route_multipath(&cfg, 1);
852 + return ip6_route_multipath_add(&cfg);
853 else
854 return ip6_route_add(&cfg);
855 }
856 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
857 index 4856d975492d..980121e75d2e 100644
858 --- a/net/netlink/af_netlink.c
859 +++ b/net/netlink/af_netlink.c
860 @@ -123,6 +123,24 @@ static inline u32 netlink_group_mask(u32 group)
861 return group ? 1 << (group - 1) : 0;
862 }
863
864 +static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
865 + gfp_t gfp_mask)
866 +{
867 + unsigned int len = skb_end_offset(skb);
868 + struct sk_buff *new;
869 +
870 + new = alloc_skb(len, gfp_mask);
871 + if (new == NULL)
872 + return NULL;
873 +
874 + NETLINK_CB(new).portid = NETLINK_CB(skb).portid;
875 + NETLINK_CB(new).dst_group = NETLINK_CB(skb).dst_group;
876 + NETLINK_CB(new).creds = NETLINK_CB(skb).creds;
877 +
878 + memcpy(skb_put(new, len), skb->data, len);
879 + return new;
880 +}
881 +
882 int netlink_add_tap(struct netlink_tap *nt)
883 {
884 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
885 @@ -204,7 +222,11 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb,
886 int ret = -ENOMEM;
887
888 dev_hold(dev);
889 - nskb = skb_clone(skb, GFP_ATOMIC);
890 +
891 + if (netlink_skb_is_mmaped(skb) || is_vmalloc_addr(skb->head))
892 + nskb = netlink_to_full_skb(skb, GFP_ATOMIC);
893 + else
894 + nskb = skb_clone(skb, GFP_ATOMIC);
895 if (nskb) {
896 nskb->dev = dev;
897 nskb->protocol = htons((u16) sk->sk_protocol);
898 @@ -276,11 +298,6 @@ static void netlink_rcv_wake(struct sock *sk)
899 }
900
901 #ifdef CONFIG_NETLINK_MMAP
902 -static bool netlink_skb_is_mmaped(const struct sk_buff *skb)
903 -{
904 - return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
905 -}
906 -
907 static bool netlink_rx_is_mmaped(struct sock *sk)
908 {
909 return nlk_sk(sk)->rx_ring.pg_vec != NULL;
910 @@ -832,7 +849,6 @@ static void netlink_ring_set_copied(struct sock *sk, struct sk_buff *skb)
911 }
912
913 #else /* CONFIG_NETLINK_MMAP */
914 -#define netlink_skb_is_mmaped(skb) false
915 #define netlink_rx_is_mmaped(sk) false
916 #define netlink_tx_is_mmaped(sk) false
917 #define netlink_mmap sock_no_mmap
918 @@ -1080,8 +1096,8 @@ static int netlink_insert(struct sock *sk, u32 portid)
919
920 lock_sock(sk);
921
922 - err = -EBUSY;
923 - if (nlk_sk(sk)->portid)
924 + err = nlk_sk(sk)->portid == portid ? 0 : -EBUSY;
925 + if (nlk_sk(sk)->bound)
926 goto err;
927
928 err = -ENOMEM;
929 @@ -1101,10 +1117,13 @@ static int netlink_insert(struct sock *sk, u32 portid)
930 err = -EOVERFLOW;
931 if (err == -EEXIST)
932 err = -EADDRINUSE;
933 - nlk_sk(sk)->portid = 0;
934 sock_put(sk);
935 }
936
937 + /* We need to ensure that the socket is hashed and visible. */
938 + smp_wmb();
939 + nlk_sk(sk)->bound = portid;
940 +
941 err:
942 release_sock(sk);
943 return err;
944 @@ -1484,6 +1503,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
945 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
946 int err;
947 long unsigned int groups = nladdr->nl_groups;
948 + bool bound;
949
950 if (addr_len < sizeof(struct sockaddr_nl))
951 return -EINVAL;
952 @@ -1500,9 +1520,14 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
953 return err;
954 }
955
956 - if (nlk->portid)
957 + bound = nlk->bound;
958 + if (bound) {
959 + /* Ensure nlk->portid is up-to-date. */
960 + smp_rmb();
961 +
962 if (nladdr->nl_pid != nlk->portid)
963 return -EINVAL;
964 + }
965
966 if (nlk->netlink_bind && groups) {
967 int group;
968 @@ -1518,7 +1543,10 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
969 }
970 }
971
972 - if (!nlk->portid) {
973 + /* No need for barriers here as we return to user-space without
974 + * using any of the bound attributes.
975 + */
976 + if (!bound) {
977 err = nladdr->nl_pid ?
978 netlink_insert(sk, nladdr->nl_pid) :
979 netlink_autobind(sock);
980 @@ -1566,7 +1594,10 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
981 !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
982 return -EPERM;
983
984 - if (!nlk->portid)
985 + /* No need for barriers here as we return to user-space without
986 + * using any of the bound attributes.
987 + */
988 + if (!nlk->bound)
989 err = netlink_autobind(sock);
990
991 if (err == 0) {
992 @@ -2323,10 +2354,13 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
993 dst_group = nlk->dst_group;
994 }
995
996 - if (!nlk->portid) {
997 + if (!nlk->bound) {
998 err = netlink_autobind(sock);
999 if (err)
1000 goto out;
1001 + } else {
1002 + /* Ensure nlk is hashed and visible. */
1003 + smp_rmb();
1004 }
1005
1006 /* It's a really convoluted way for userland to ask for mmaped
1007 diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
1008 index 89008405d6b4..14437d9b1965 100644
1009 --- a/net/netlink/af_netlink.h
1010 +++ b/net/netlink/af_netlink.h
1011 @@ -35,6 +35,7 @@ struct netlink_sock {
1012 unsigned long state;
1013 size_t max_recvmsg_len;
1014 wait_queue_head_t wait;
1015 + bool bound;
1016 bool cb_running;
1017 struct netlink_callback cb;
1018 struct mutex *cb_mutex;
1019 @@ -59,6 +60,15 @@ static inline struct netlink_sock *nlk_sk(struct sock *sk)
1020 return container_of(sk, struct netlink_sock, sk);
1021 }
1022
1023 +static inline bool netlink_skb_is_mmaped(const struct sk_buff *skb)
1024 +{
1025 +#ifdef CONFIG_NETLINK_MMAP
1026 + return NETLINK_CB(skb).flags & NETLINK_SKB_MMAPED;
1027 +#else
1028 + return false;
1029 +#endif /* CONFIG_NETLINK_MMAP */
1030 +}
1031 +
1032 struct netlink_table {
1033 struct rhashtable hash;
1034 struct hlist_head mc_list;
1035 diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
1036 index 096c6276e6b9..27e14962b504 100644
1037 --- a/net/openvswitch/datapath.c
1038 +++ b/net/openvswitch/datapath.c
1039 @@ -906,7 +906,7 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
1040 if (error)
1041 goto err_kfree_flow;
1042
1043 - ovs_flow_mask_key(&new_flow->key, &key, &mask);
1044 + ovs_flow_mask_key(&new_flow->key, &key, true, &mask);
1045
1046 /* Extract flow identifier. */
1047 error = ovs_nla_get_identifier(&new_flow->id, a[OVS_FLOW_ATTR_UFID],
1048 @@ -1033,7 +1033,7 @@ static struct sw_flow_actions *get_flow_actions(const struct nlattr *a,
1049 struct sw_flow_key masked_key;
1050 int error;
1051
1052 - ovs_flow_mask_key(&masked_key, key, mask);
1053 + ovs_flow_mask_key(&masked_key, key, true, mask);
1054 error = ovs_nla_copy_actions(a, &masked_key, &acts, log);
1055 if (error) {
1056 OVS_NLERR(log,
1057 diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
1058 index 4613df8c8290..aa349514e4cb 100644
1059 --- a/net/openvswitch/flow_table.c
1060 +++ b/net/openvswitch/flow_table.c
1061 @@ -56,20 +56,21 @@ static u16 range_n_bytes(const struct sw_flow_key_range *range)
1062 }
1063
1064 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
1065 - const struct sw_flow_mask *mask)
1066 + bool full, const struct sw_flow_mask *mask)
1067 {
1068 - const long *m = (const long *)((const u8 *)&mask->key +
1069 - mask->range.start);
1070 - const long *s = (const long *)((const u8 *)src +
1071 - mask->range.start);
1072 - long *d = (long *)((u8 *)dst + mask->range.start);
1073 + int start = full ? 0 : mask->range.start;
1074 + int len = full ? sizeof *dst : range_n_bytes(&mask->range);
1075 + const long *m = (const long *)((const u8 *)&mask->key + start);
1076 + const long *s = (const long *)((const u8 *)src + start);
1077 + long *d = (long *)((u8 *)dst + start);
1078 int i;
1079
1080 - /* The memory outside of the 'mask->range' are not set since
1081 - * further operations on 'dst' only uses contents within
1082 - * 'mask->range'.
1083 + /* If 'full' is true then all of 'dst' is fully initialized. Otherwise,
1084 + * if 'full' is false the memory outside of the 'mask->range' is left
1085 + * uninitialized. This can be used as an optimization when further
1086 + * operations on 'dst' only use contents within 'mask->range'.
1087 */
1088 - for (i = 0; i < range_n_bytes(&mask->range); i += sizeof(long))
1089 + for (i = 0; i < len; i += sizeof(long))
1090 *d++ = *s++ & *m++;
1091 }
1092
1093 @@ -473,7 +474,7 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
1094 u32 hash;
1095 struct sw_flow_key masked_key;
1096
1097 - ovs_flow_mask_key(&masked_key, unmasked, mask);
1098 + ovs_flow_mask_key(&masked_key, unmasked, false, mask);
1099 hash = flow_hash(&masked_key, &mask->range);
1100 head = find_bucket(ti, hash);
1101 hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
1102 diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
1103 index 616eda10d955..2dd9900f533d 100644
1104 --- a/net/openvswitch/flow_table.h
1105 +++ b/net/openvswitch/flow_table.h
1106 @@ -86,5 +86,5 @@ struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *,
1107 bool ovs_flow_cmp(const struct sw_flow *, const struct sw_flow_match *);
1108
1109 void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
1110 - const struct sw_flow_mask *mask);
1111 + bool full, const struct sw_flow_mask *mask);
1112 #endif /* flow_table.h */
1113 diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
1114 index 715e01e5910a..f23a3b68bba6 100644
1115 --- a/net/sched/cls_fw.c
1116 +++ b/net/sched/cls_fw.c
1117 @@ -33,7 +33,6 @@
1118
1119 struct fw_head {
1120 u32 mask;
1121 - bool mask_set;
1122 struct fw_filter __rcu *ht[HTSIZE];
1123 struct rcu_head rcu;
1124 };
1125 @@ -84,7 +83,7 @@ static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1126 }
1127 }
1128 } else {
1129 - /* old method */
1130 + /* Old method: classify the packet using its skb mark. */
1131 if (id && (TC_H_MAJ(id) == 0 ||
1132 !(TC_H_MAJ(id ^ tp->q->handle)))) {
1133 res->classid = id;
1134 @@ -114,14 +113,9 @@ static unsigned long fw_get(struct tcf_proto *tp, u32 handle)
1135
1136 static int fw_init(struct tcf_proto *tp)
1137 {
1138 - struct fw_head *head;
1139 -
1140 - head = kzalloc(sizeof(struct fw_head), GFP_KERNEL);
1141 - if (head == NULL)
1142 - return -ENOBUFS;
1143 -
1144 - head->mask_set = false;
1145 - rcu_assign_pointer(tp->root, head);
1146 + /* We don't allocate fw_head here, because in the old method
1147 + * we don't need it at all.
1148 + */
1149 return 0;
1150 }
1151
1152 @@ -252,7 +246,7 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
1153 int err;
1154
1155 if (!opt)
1156 - return handle ? -EINVAL : 0;
1157 + return handle ? -EINVAL : 0; /* Succeed if it is old method. */
1158
1159 err = nla_parse_nested(tb, TCA_FW_MAX, opt, fw_policy);
1160 if (err < 0)
1161 @@ -302,11 +296,17 @@ static int fw_change(struct net *net, struct sk_buff *in_skb,
1162 if (!handle)
1163 return -EINVAL;
1164
1165 - if (!head->mask_set) {
1166 - head->mask = 0xFFFFFFFF;
1167 + if (!head) {
1168 + u32 mask = 0xFFFFFFFF;
1169 if (tb[TCA_FW_MASK])
1170 - head->mask = nla_get_u32(tb[TCA_FW_MASK]);
1171 - head->mask_set = true;
1172 + mask = nla_get_u32(tb[TCA_FW_MASK]);
1173 +
1174 + head = kzalloc(sizeof(*head), GFP_KERNEL);
1175 + if (!head)
1176 + return -ENOBUFS;
1177 + head->mask = mask;
1178 +
1179 + rcu_assign_pointer(tp->root, head);
1180 }
1181
1182 f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL);
1183 diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
1184 index cab9e9b43967..4fbb67430ce4 100644
1185 --- a/net/sched/cls_u32.c
1186 +++ b/net/sched/cls_u32.c
1187 @@ -490,6 +490,19 @@ static bool u32_destroy(struct tcf_proto *tp, bool force)
1188 return false;
1189 }
1190 }
1191 +
1192 + if (tp_c->refcnt > 1)
1193 + return false;
1194 +
1195 + if (tp_c->refcnt == 1) {
1196 + struct tc_u_hnode *ht;
1197 +
1198 + for (ht = rtnl_dereference(tp_c->hlist);
1199 + ht;
1200 + ht = rtnl_dereference(ht->next))
1201 + if (!ht_empty(ht))
1202 + return false;
1203 + }
1204 }
1205
1206 if (root_ht && --root_ht->refcnt == 0)
1207 diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
1208 index 53b7acde9aa3..e13c3c3ea4ac 100644
1209 --- a/net/sctp/protocol.c
1210 +++ b/net/sctp/protocol.c
1211 @@ -1166,7 +1166,7 @@ static void sctp_v4_del_protocol(void)
1212 unregister_inetaddr_notifier(&sctp_inetaddr_notifier);
1213 }
1214
1215 -static int __net_init sctp_net_init(struct net *net)
1216 +static int __net_init sctp_defaults_init(struct net *net)
1217 {
1218 int status;
1219
1220 @@ -1259,12 +1259,6 @@ static int __net_init sctp_net_init(struct net *net)
1221
1222 sctp_dbg_objcnt_init(net);
1223
1224 - /* Initialize the control inode/socket for handling OOTB packets. */
1225 - if ((status = sctp_ctl_sock_init(net))) {
1226 - pr_err("Failed to initialize the SCTP control sock\n");
1227 - goto err_ctl_sock_init;
1228 - }
1229 -
1230 /* Initialize the local address list. */
1231 INIT_LIST_HEAD(&net->sctp.local_addr_list);
1232 spin_lock_init(&net->sctp.local_addr_lock);
1233 @@ -1280,9 +1274,6 @@ static int __net_init sctp_net_init(struct net *net)
1234
1235 return 0;
1236
1237 -err_ctl_sock_init:
1238 - sctp_dbg_objcnt_exit(net);
1239 - sctp_proc_exit(net);
1240 err_init_proc:
1241 cleanup_sctp_mibs(net);
1242 err_init_mibs:
1243 @@ -1291,15 +1282,12 @@ err_sysctl_register:
1244 return status;
1245 }
1246
1247 -static void __net_exit sctp_net_exit(struct net *net)
1248 +static void __net_exit sctp_defaults_exit(struct net *net)
1249 {
1250 /* Free the local address list */
1251 sctp_free_addr_wq(net);
1252 sctp_free_local_addr_list(net);
1253
1254 - /* Free the control endpoint. */
1255 - inet_ctl_sock_destroy(net->sctp.ctl_sock);
1256 -
1257 sctp_dbg_objcnt_exit(net);
1258
1259 sctp_proc_exit(net);
1260 @@ -1307,9 +1295,32 @@ static void __net_exit sctp_net_exit(struct net *net)
1261 sctp_sysctl_net_unregister(net);
1262 }
1263
1264 -static struct pernet_operations sctp_net_ops = {
1265 - .init = sctp_net_init,
1266 - .exit = sctp_net_exit,
1267 +static struct pernet_operations sctp_defaults_ops = {
1268 + .init = sctp_defaults_init,
1269 + .exit = sctp_defaults_exit,
1270 +};
1271 +
1272 +static int __net_init sctp_ctrlsock_init(struct net *net)
1273 +{
1274 + int status;
1275 +
1276 + /* Initialize the control inode/socket for handling OOTB packets. */
1277 + status = sctp_ctl_sock_init(net);
1278 + if (status)
1279 + pr_err("Failed to initialize the SCTP control sock\n");
1280 +
1281 + return status;
1282 +}
1283 +
1284 +static void __net_init sctp_ctrlsock_exit(struct net *net)
1285 +{
1286 + /* Free the control endpoint. */
1287 + inet_ctl_sock_destroy(net->sctp.ctl_sock);
1288 +}
1289 +
1290 +static struct pernet_operations sctp_ctrlsock_ops = {
1291 + .init = sctp_ctrlsock_init,
1292 + .exit = sctp_ctrlsock_exit,
1293 };
1294
1295 /* Initialize the universe into something sensible. */
1296 @@ -1442,8 +1453,11 @@ static __init int sctp_init(void)
1297 sctp_v4_pf_init();
1298 sctp_v6_pf_init();
1299
1300 - status = sctp_v4_protosw_init();
1301 + status = register_pernet_subsys(&sctp_defaults_ops);
1302 + if (status)
1303 + goto err_register_defaults;
1304
1305 + status = sctp_v4_protosw_init();
1306 if (status)
1307 goto err_protosw_init;
1308
1309 @@ -1451,9 +1465,9 @@ static __init int sctp_init(void)
1310 if (status)
1311 goto err_v6_protosw_init;
1312
1313 - status = register_pernet_subsys(&sctp_net_ops);
1314 + status = register_pernet_subsys(&sctp_ctrlsock_ops);
1315 if (status)
1316 - goto err_register_pernet_subsys;
1317 + goto err_register_ctrlsock;
1318
1319 status = sctp_v4_add_protocol();
1320 if (status)
1321 @@ -1469,12 +1483,14 @@ out:
1322 err_v6_add_protocol:
1323 sctp_v4_del_protocol();
1324 err_add_protocol:
1325 - unregister_pernet_subsys(&sctp_net_ops);
1326 -err_register_pernet_subsys:
1327 + unregister_pernet_subsys(&sctp_ctrlsock_ops);
1328 +err_register_ctrlsock:
1329 sctp_v6_protosw_exit();
1330 err_v6_protosw_init:
1331 sctp_v4_protosw_exit();
1332 err_protosw_init:
1333 + unregister_pernet_subsys(&sctp_defaults_ops);
1334 +err_register_defaults:
1335 sctp_v4_pf_exit();
1336 sctp_v6_pf_exit();
1337 sctp_sysctl_unregister();
1338 @@ -1507,12 +1523,14 @@ static __exit void sctp_exit(void)
1339 sctp_v6_del_protocol();
1340 sctp_v4_del_protocol();
1341
1342 - unregister_pernet_subsys(&sctp_net_ops);
1343 + unregister_pernet_subsys(&sctp_ctrlsock_ops);
1344
1345 /* Free protosw registrations */
1346 sctp_v6_protosw_exit();
1347 sctp_v4_protosw_exit();
1348
1349 + unregister_pernet_subsys(&sctp_defaults_ops);
1350 +
1351 /* Unregister with socket layer. */
1352 sctp_v6_pf_exit();
1353 sctp_v4_pf_exit();