Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.20/0111-4.20.12-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3288 - (show annotations) (download)
Mon Mar 4 10:38:37 2019 UTC (5 years, 1 month ago) by niro
File size: 30950 byte(s)
linux-4.20.12
1 diff --git a/Makefile b/Makefile
2 index 193cfe3a3d70..0a92b4e11621 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 20
9 -SUBLEVEL = 11
10 +SUBLEVEL = 12
11 EXTRAVERSION =
12 NAME = Shy Crocodile
13
14 diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
15 index 778af0b7f7fd..c67081301035 100644
16 --- a/arch/arm64/include/asm/memory.h
17 +++ b/arch/arm64/include/asm/memory.h
18 @@ -303,6 +303,17 @@ static inline void *phys_to_virt(phys_addr_t x)
19 #define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \
20 _virt_addr_valid(kaddr))
21
22 +/*
23 + * Given that the GIC architecture permits ITS implementations that can only be
24 + * configured with a LPI table address once, GICv3 systems with many CPUs may
25 + * end up reserving a lot of different regions after a kexec for their LPI
26 + * tables (one per CPU), as we are forced to reuse the same memory after kexec
27 + * (and thus reserve it persistently with EFI beforehand)
28 + */
29 +#if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS)
30 +# define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + 2*(NR_CPUS + 1))
31 +#endif
32 +
33 #include <asm-generic/memory_model.h>
34
35 #endif
36 diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
37 index f4fc1e0544b7..953e316521fc 100644
38 --- a/arch/arm64/kernel/setup.c
39 +++ b/arch/arm64/kernel/setup.c
40 @@ -313,7 +313,6 @@ void __init setup_arch(char **cmdline_p)
41 arm64_memblock_init();
42
43 paging_init();
44 - efi_apply_persistent_mem_reservations();
45
46 acpi_table_upgrade();
47
48 diff --git a/crypto/af_alg.c b/crypto/af_alg.c
49 index 17eb09d222ff..ec78a04eb136 100644
50 --- a/crypto/af_alg.c
51 +++ b/crypto/af_alg.c
52 @@ -122,8 +122,10 @@ static void alg_do_release(const struct af_alg_type *type, void *private)
53
54 int af_alg_release(struct socket *sock)
55 {
56 - if (sock->sk)
57 + if (sock->sk) {
58 sock_put(sock->sk);
59 + sock->sk = NULL;
60 + }
61 return 0;
62 }
63 EXPORT_SYMBOL_GPL(af_alg_release);
64 diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
65 index 415849bab233..bde3822cf539 100644
66 --- a/drivers/firmware/efi/efi.c
67 +++ b/drivers/firmware/efi/efi.c
68 @@ -592,11 +592,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
69
70 early_memunmap(tbl, sizeof(*tbl));
71 }
72 - return 0;
73 -}
74
75 -int __init efi_apply_persistent_mem_reservations(void)
76 -{
77 if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) {
78 unsigned long prsv = efi.mem_reserve;
79
80 diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
81 index 3d36142cf812..30ac0c975f8a 100644
82 --- a/drivers/firmware/efi/libstub/arm-stub.c
83 +++ b/drivers/firmware/efi/libstub/arm-stub.c
84 @@ -75,9 +75,6 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
85 efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
86 efi_status_t status;
87
88 - if (IS_ENABLED(CONFIG_ARM))
89 - return;
90 -
91 status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
92 (void **)&rsv);
93 if (status != EFI_SUCCESS) {
94 diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
95 index 0e30fa00204c..f9b8e3e23a8e 100644
96 --- a/drivers/hwmon/lm80.c
97 +++ b/drivers/hwmon/lm80.c
98 @@ -393,8 +393,10 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
99 }
100
101 rv = lm80_read_value(client, LM80_REG_FANDIV);
102 - if (rv < 0)
103 + if (rv < 0) {
104 + mutex_unlock(&data->update_lock);
105 return rv;
106 + }
107 reg = (rv & ~(3 << (2 * (nr + 1))))
108 | (data->fan_div[nr] << (2 * (nr + 1)));
109 lm80_write_value(client, LM80_REG_FANDIV, reg);
110 diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c
111 index 211ed6cffd10..578978711887 100644
112 --- a/drivers/isdn/mISDN/timerdev.c
113 +++ b/drivers/isdn/mISDN/timerdev.c
114 @@ -170,8 +170,8 @@ dev_expire_timer(struct timer_list *t)
115 spin_lock_irqsave(&timer->dev->lock, flags);
116 if (timer->id >= 0)
117 list_move_tail(&timer->list, &timer->dev->expired);
118 - spin_unlock_irqrestore(&timer->dev->lock, flags);
119 wake_up_interruptible(&timer->dev->wait);
120 + spin_unlock_irqrestore(&timer->dev->lock, flags);
121 }
122
123 static int
124 diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
125 index ef9deaa361c7..ddd98cdd33bc 100644
126 --- a/drivers/mmc/host/meson-gx-mmc.c
127 +++ b/drivers/mmc/host/meson-gx-mmc.c
128 @@ -1286,7 +1286,8 @@ static int meson_mmc_probe(struct platform_device *pdev)
129 host->regs + SD_EMMC_IRQ_EN);
130
131 ret = request_threaded_irq(host->irq, meson_mmc_irq,
132 - meson_mmc_irq_thread, IRQF_SHARED, NULL, host);
133 + meson_mmc_irq_thread, IRQF_SHARED,
134 + dev_name(&pdev->dev), host);
135 if (ret)
136 goto err_init_clk;
137
138 diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
139 index b2a0e59b6252..b0113f6fdbb4 100644
140 --- a/drivers/net/dsa/mv88e6xxx/chip.c
141 +++ b/drivers/net/dsa/mv88e6xxx/chip.c
142 @@ -261,6 +261,7 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
143 unsigned int sub_irq;
144 unsigned int n;
145 u16 reg;
146 + u16 ctl1;
147 int err;
148
149 mutex_lock(&chip->reg_lock);
150 @@ -270,13 +271,28 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
151 if (err)
152 goto out;
153
154 - for (n = 0; n < chip->g1_irq.nirqs; ++n) {
155 - if (reg & (1 << n)) {
156 - sub_irq = irq_find_mapping(chip->g1_irq.domain, n);
157 - handle_nested_irq(sub_irq);
158 - ++nhandled;
159 + do {
160 + for (n = 0; n < chip->g1_irq.nirqs; ++n) {
161 + if (reg & (1 << n)) {
162 + sub_irq = irq_find_mapping(chip->g1_irq.domain,
163 + n);
164 + handle_nested_irq(sub_irq);
165 + ++nhandled;
166 + }
167 }
168 - }
169 +
170 + mutex_lock(&chip->reg_lock);
171 + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1);
172 + if (err)
173 + goto unlock;
174 + err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &reg);
175 +unlock:
176 + mutex_unlock(&chip->reg_lock);
177 + if (err)
178 + goto out;
179 + ctl1 &= GENMASK(chip->g1_irq.nirqs, 0);
180 + } while (reg & ctl1);
181 +
182 out:
183 return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
184 }
185 diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
186 index 697d9b374f5e..ae2f35039343 100644
187 --- a/drivers/net/ethernet/marvell/sky2.c
188 +++ b/drivers/net/ethernet/marvell/sky2.c
189 @@ -5087,7 +5087,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
190 INIT_WORK(&hw->restart_work, sky2_restart);
191
192 pci_set_drvdata(pdev, hw);
193 - pdev->d3_delay = 200;
194 + pdev->d3_delay = 300;
195
196 return 0;
197
198 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
199 index 8f1180fff955..280173b48962 100644
200 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
201 +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
202 @@ -845,8 +845,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
203 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
204 bool configure = false;
205 bool pfc = false;
206 + u16 thres_cells;
207 + u16 delay_cells;
208 bool lossy;
209 - u16 thres;
210
211 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
212 if (prio_tc[j] == i) {
213 @@ -860,10 +861,11 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
214 continue;
215
216 lossy = !(pfc || pause_en);
217 - thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
218 - delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc,
219 - pause_en);
220 - mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy);
221 + thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
222 + delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
223 + pfc, pause_en);
224 + mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells,
225 + thres_cells, lossy);
226 }
227
228 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
229 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
230 index 20299f6f65fc..736e29635b77 100644
231 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
232 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
233 @@ -241,15 +241,18 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts)
234 static int dwmac4_rx_check_timestamp(void *desc)
235 {
236 struct dma_desc *p = (struct dma_desc *)desc;
237 + unsigned int rdes0 = le32_to_cpu(p->des0);
238 + unsigned int rdes1 = le32_to_cpu(p->des1);
239 + unsigned int rdes3 = le32_to_cpu(p->des3);
240 u32 own, ctxt;
241 int ret = 1;
242
243 - own = p->des3 & RDES3_OWN;
244 - ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR)
245 + own = rdes3 & RDES3_OWN;
246 + ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR)
247 >> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
248
249 if (likely(!own && ctxt)) {
250 - if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
251 + if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff))
252 /* Corrupted value */
253 ret = -EINVAL;
254 else
255 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
256 index 5710864fa809..9caf79ba5ef1 100644
257 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
258 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
259 @@ -692,25 +692,27 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev,
260 struct ethtool_eee *edata)
261 {
262 struct stmmac_priv *priv = netdev_priv(dev);
263 + int ret;
264
265 - priv->eee_enabled = edata->eee_enabled;
266 -
267 - if (!priv->eee_enabled)
268 + if (!edata->eee_enabled) {
269 stmmac_disable_eee_mode(priv);
270 - else {
271 + } else {
272 /* We are asking for enabling the EEE but it is safe
273 * to verify all by invoking the eee_init function.
274 * In case of failure it will return an error.
275 */
276 - priv->eee_enabled = stmmac_eee_init(priv);
277 - if (!priv->eee_enabled)
278 + edata->eee_enabled = stmmac_eee_init(priv);
279 + if (!edata->eee_enabled)
280 return -EOPNOTSUPP;
281 -
282 - /* Do not change tx_lpi_timer in case of failure */
283 - priv->tx_lpi_timer = edata->tx_lpi_timer;
284 }
285
286 - return phy_ethtool_set_eee(dev->phydev, edata);
287 + ret = phy_ethtool_set_eee(dev->phydev, edata);
288 + if (ret)
289 + return ret;
290 +
291 + priv->eee_enabled = edata->eee_enabled;
292 + priv->tx_lpi_timer = edata->tx_lpi_timer;
293 + return 0;
294 }
295
296 static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
297 diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
298 index 1f612268c998..d847f672a705 100644
299 --- a/drivers/net/ethernet/ti/netcp_core.c
300 +++ b/drivers/net/ethernet/ti/netcp_core.c
301 @@ -259,7 +259,7 @@ static int netcp_module_probe(struct netcp_device *netcp_device,
302 const char *name;
303 char node_name[32];
304
305 - if (of_property_read_string(node, "label", &name) < 0) {
306 + if (of_property_read_string(child, "label", &name) < 0) {
307 snprintf(node_name, sizeof(node_name), "%pOFn", child);
308 name = node_name;
309 }
310 diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
311 index 74a8782313cf..bd6084e315de 100644
312 --- a/drivers/net/phy/xilinx_gmii2rgmii.c
313 +++ b/drivers/net/phy/xilinx_gmii2rgmii.c
314 @@ -44,7 +44,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev)
315 u16 val = 0;
316 int err;
317
318 - err = priv->phy_drv->read_status(phydev);
319 + if (priv->phy_drv->read_status)
320 + err = priv->phy_drv->read_status(phydev);
321 + else
322 + err = genphy_read_status(phydev);
323 if (err < 0)
324 return err;
325
326 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
327 index 0565f8880199..8f022964b2d1 100644
328 --- a/drivers/net/vxlan.c
329 +++ b/drivers/net/vxlan.c
330 @@ -2072,7 +2072,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
331 struct pcpu_sw_netstats *tx_stats, *rx_stats;
332 union vxlan_addr loopback;
333 union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip;
334 - struct net_device *dev = skb->dev;
335 + struct net_device *dev;
336 int len = skb->len;
337
338 tx_stats = this_cpu_ptr(src_vxlan->dev->tstats);
339 @@ -2092,9 +2092,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
340 #endif
341 }
342
343 + rcu_read_lock();
344 + dev = skb->dev;
345 + if (unlikely(!(dev->flags & IFF_UP))) {
346 + kfree_skb(skb);
347 + goto drop;
348 + }
349 +
350 if (dst_vxlan->cfg.flags & VXLAN_F_LEARN)
351 - vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0,
352 - vni);
353 + vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni);
354
355 u64_stats_update_begin(&tx_stats->syncp);
356 tx_stats->tx_packets++;
357 @@ -2107,8 +2113,10 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
358 rx_stats->rx_bytes += len;
359 u64_stats_update_end(&rx_stats->syncp);
360 } else {
361 +drop:
362 dev->stats.rx_dropped++;
363 }
364 + rcu_read_unlock();
365 }
366
367 static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
368 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
369 index c9d8e3c837de..c25acace7d91 100644
370 --- a/drivers/pci/pci.c
371 +++ b/drivers/pci/pci.c
372 @@ -6195,7 +6195,8 @@ static int __init pci_setup(char *str)
373 } else if (!strncmp(str, "pcie_scan_all", 13)) {
374 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
375 } else if (!strncmp(str, "disable_acs_redir=", 18)) {
376 - disable_acs_redir_param = str + 18;
377 + disable_acs_redir_param =
378 + kstrdup(str + 18, GFP_KERNEL);
379 } else {
380 printk(KERN_ERR "PCI: Unknown option `%s'\n",
381 str);
382 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
383 index ffa5b9f771b5..900442605c72 100644
384 --- a/drivers/target/target_core_transport.c
385 +++ b/drivers/target/target_core_transport.c
386 @@ -266,7 +266,7 @@ struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
387 }
388 ret = transport_init_session(se_sess);
389 if (ret < 0) {
390 - kfree(se_sess);
391 + kmem_cache_free(se_sess_cache, se_sess);
392 return ERR_PTR(ret);
393 }
394 se_sess->sup_prot_ops = sup_prot_ops;
395 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
396 index cf82e7266397..5eaeca805c95 100644
397 --- a/drivers/vhost/vhost.c
398 +++ b/drivers/vhost/vhost.c
399 @@ -1784,7 +1784,7 @@ static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
400
401 ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
402 len, iov, 64, VHOST_ACCESS_WO);
403 - if (ret)
404 + if (ret < 0)
405 return ret;
406
407 for (i = 0; i < ret; i++) {
408 diff --git a/include/linux/efi.h b/include/linux/efi.h
409 index 100ce4a4aff6..845174e113ce 100644
410 --- a/include/linux/efi.h
411 +++ b/include/linux/efi.h
412 @@ -1167,8 +1167,6 @@ static inline bool efi_enabled(int feature)
413 extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
414
415 extern bool efi_is_table_address(unsigned long phys_addr);
416 -
417 -extern int efi_apply_persistent_mem_reservations(void);
418 #else
419 static inline bool efi_enabled(int feature)
420 {
421 @@ -1187,11 +1185,6 @@ static inline bool efi_is_table_address(unsigned long phys_addr)
422 {
423 return false;
424 }
425 -
426 -static inline int efi_apply_persistent_mem_reservations(void)
427 -{
428 - return 0;
429 -}
430 #endif
431
432 extern int efi_status_to_err(efi_status_t status);
433 diff --git a/include/linux/memblock.h b/include/linux/memblock.h
434 index 3ef3086ed52f..ecff64ff365d 100644
435 --- a/include/linux/memblock.h
436 +++ b/include/linux/memblock.h
437 @@ -29,9 +29,6 @@ extern unsigned long max_pfn;
438 */
439 extern unsigned long long max_possible_pfn;
440
441 -#define INIT_MEMBLOCK_REGIONS 128
442 -#define INIT_PHYSMEM_REGIONS 4
443 -
444 /**
445 * enum memblock_flags - definition of memory region attributes
446 * @MEMBLOCK_NONE: no special request
447 diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
448 index 2b2a6dce1630..4c76fe2c8488 100644
449 --- a/include/linux/netdev_features.h
450 +++ b/include/linux/netdev_features.h
451 @@ -11,6 +11,8 @@
452 #define _LINUX_NETDEV_FEATURES_H
453
454 #include <linux/types.h>
455 +#include <linux/bitops.h>
456 +#include <asm/byteorder.h>
457
458 typedef u64 netdev_features_t;
459
460 @@ -154,8 +156,26 @@ enum {
461 #define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
462 #define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
463
464 -#define for_each_netdev_feature(mask_addr, bit) \
465 - for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
466 +/* Finds the next feature with the highest number of the range of start till 0.
467 + */
468 +static inline int find_next_netdev_feature(u64 feature, unsigned long start)
469 +{
470 + /* like BITMAP_LAST_WORD_MASK() for u64
471 + * this sets the most significant 64 - start to 0.
472 + */
473 + feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1));
474 +
475 + return fls64(feature) - 1;
476 +}
477 +
478 +/* This goes for the MSB to the LSB through the set feature bits,
479 + * mask_addr should be a u64 and bit an int
480 + */
481 +#define for_each_netdev_feature(mask_addr, bit) \
482 + for ((bit) = find_next_netdev_feature((mask_addr), \
483 + NETDEV_FEATURE_COUNT); \
484 + (bit) >= 0; \
485 + (bit) = find_next_netdev_feature((mask_addr), (bit) - 1))
486
487 /* Features valid for ethtool to change */
488 /* = all defined minus driver/device-class-related */
489 diff --git a/include/net/ax25.h b/include/net/ax25.h
490 index 3f9aea8087e3..8b7eb46ad72d 100644
491 --- a/include/net/ax25.h
492 +++ b/include/net/ax25.h
493 @@ -201,6 +201,18 @@ static inline void ax25_hold_route(ax25_route *ax25_rt)
494
495 void __ax25_put_route(ax25_route *ax25_rt);
496
497 +extern rwlock_t ax25_route_lock;
498 +
499 +static inline void ax25_route_lock_use(void)
500 +{
501 + read_lock(&ax25_route_lock);
502 +}
503 +
504 +static inline void ax25_route_lock_unuse(void)
505 +{
506 + read_unlock(&ax25_route_lock);
507 +}
508 +
509 static inline void ax25_put_route(ax25_route *ax25_rt)
510 {
511 if (refcount_dec_and_test(&ax25_rt->refcount))
512 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
513 index 00b5e7825508..74ff688568a0 100644
514 --- a/include/net/inetpeer.h
515 +++ b/include/net/inetpeer.h
516 @@ -39,6 +39,7 @@ struct inet_peer {
517
518 u32 metrics[RTAX_MAX];
519 u32 rate_tokens; /* rate limiting for ICMP */
520 + u32 n_redirects;
521 unsigned long rate_last;
522 /*
523 * Once inet_peer is queued for deletion (refcnt == 0), following field
524 diff --git a/mm/memblock.c b/mm/memblock.c
525 index f45a049532fe..74ac4f89018a 100644
526 --- a/mm/memblock.c
527 +++ b/mm/memblock.c
528 @@ -26,6 +26,13 @@
529
530 #include "internal.h"
531
532 +#define INIT_MEMBLOCK_REGIONS 128
533 +#define INIT_PHYSMEM_REGIONS 4
534 +
535 +#ifndef INIT_MEMBLOCK_RESERVED_REGIONS
536 +# define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS
537 +#endif
538 +
539 /**
540 * DOC: memblock overview
541 *
542 @@ -92,7 +99,7 @@ unsigned long max_pfn;
543 unsigned long long max_possible_pfn;
544
545 static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
546 -static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
547 +static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
548 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
549 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
550 #endif
551 @@ -105,7 +112,7 @@ struct memblock memblock __initdata_memblock = {
552
553 .reserved.regions = memblock_reserved_init_regions,
554 .reserved.cnt = 1, /* empty dummy entry */
555 - .reserved.max = INIT_MEMBLOCK_REGIONS,
556 + .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS,
557 .reserved.name = "reserved",
558
559 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
560 diff --git a/net/ax25/ax25_ip.c b/net/ax25/ax25_ip.c
561 index 70417e9b932d..314bbc8010fb 100644
562 --- a/net/ax25/ax25_ip.c
563 +++ b/net/ax25/ax25_ip.c
564 @@ -114,6 +114,7 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
565 dst = (ax25_address *)(bp + 1);
566 src = (ax25_address *)(bp + 8);
567
568 + ax25_route_lock_use();
569 route = ax25_get_route(dst, NULL);
570 if (route) {
571 digipeat = route->digipeat;
572 @@ -206,9 +207,8 @@ netdev_tx_t ax25_ip_xmit(struct sk_buff *skb)
573 ax25_queue_xmit(skb, dev);
574
575 put:
576 - if (route)
577 - ax25_put_route(route);
578
579 + ax25_route_lock_unuse();
580 return NETDEV_TX_OK;
581 }
582
583 diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c
584 index a0eff323af12..66f74c85cf6b 100644
585 --- a/net/ax25/ax25_route.c
586 +++ b/net/ax25/ax25_route.c
587 @@ -40,7 +40,7 @@
588 #include <linux/export.h>
589
590 static ax25_route *ax25_route_list;
591 -static DEFINE_RWLOCK(ax25_route_lock);
592 +DEFINE_RWLOCK(ax25_route_lock);
593
594 void ax25_rt_device_down(struct net_device *dev)
595 {
596 @@ -335,6 +335,7 @@ const struct seq_operations ax25_rt_seqops = {
597 * Find AX.25 route
598 *
599 * Only routes with a reference count of zero can be destroyed.
600 + * Must be called with ax25_route_lock read locked.
601 */
602 ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
603 {
604 @@ -342,7 +343,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
605 ax25_route *ax25_def_rt = NULL;
606 ax25_route *ax25_rt;
607
608 - read_lock(&ax25_route_lock);
609 /*
610 * Bind to the physical interface we heard them on, or the default
611 * route if none is found;
612 @@ -365,11 +365,6 @@ ax25_route *ax25_get_route(ax25_address *addr, struct net_device *dev)
613 if (ax25_spe_rt != NULL)
614 ax25_rt = ax25_spe_rt;
615
616 - if (ax25_rt != NULL)
617 - ax25_hold_route(ax25_rt);
618 -
619 - read_unlock(&ax25_route_lock);
620 -
621 return ax25_rt;
622 }
623
624 @@ -400,9 +395,12 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
625 ax25_route *ax25_rt;
626 int err = 0;
627
628 - if ((ax25_rt = ax25_get_route(addr, NULL)) == NULL)
629 + ax25_route_lock_use();
630 + ax25_rt = ax25_get_route(addr, NULL);
631 + if (!ax25_rt) {
632 + ax25_route_lock_unuse();
633 return -EHOSTUNREACH;
634 -
635 + }
636 if ((ax25->ax25_dev = ax25_dev_ax25dev(ax25_rt->dev)) == NULL) {
637 err = -EHOSTUNREACH;
638 goto put;
639 @@ -437,8 +435,7 @@ int ax25_rt_autobind(ax25_cb *ax25, ax25_address *addr)
640 }
641
642 put:
643 - ax25_put_route(ax25_rt);
644 -
645 + ax25_route_lock_unuse();
646 return err;
647 }
648
649 diff --git a/net/core/dev.c b/net/core/dev.c
650 index 43f8a4fd4968..de0690e5b3df 100644
651 --- a/net/core/dev.c
652 +++ b/net/core/dev.c
653 @@ -8064,7 +8064,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower,
654 netdev_features_t feature;
655 int feature_bit;
656
657 - for_each_netdev_feature(&upper_disables, feature_bit) {
658 + for_each_netdev_feature(upper_disables, feature_bit) {
659 feature = __NETIF_F_BIT(feature_bit);
660 if (!(upper->wanted_features & feature)
661 && (features & feature)) {
662 @@ -8084,7 +8084,7 @@ static void netdev_sync_lower_features(struct net_device *upper,
663 netdev_features_t feature;
664 int feature_bit;
665
666 - for_each_netdev_feature(&upper_disables, feature_bit) {
667 + for_each_netdev_feature(upper_disables, feature_bit) {
668 feature = __NETIF_F_BIT(feature_bit);
669 if (!(features & feature) && (lower->features & feature)) {
670 netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n",
671 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
672 index eebc3106d30e..fc3d652a2de0 100644
673 --- a/net/core/skbuff.c
674 +++ b/net/core/skbuff.c
675 @@ -353,6 +353,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
676 */
677 void *netdev_alloc_frag(unsigned int fragsz)
678 {
679 + fragsz = SKB_DATA_ALIGN(fragsz);
680 +
681 return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
682 }
683 EXPORT_SYMBOL(netdev_alloc_frag);
684 @@ -366,6 +368,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
685
686 void *napi_alloc_frag(unsigned int fragsz)
687 {
688 + fragsz = SKB_DATA_ALIGN(fragsz);
689 +
690 return __napi_alloc_frag(fragsz, GFP_ATOMIC);
691 }
692 EXPORT_SYMBOL(napi_alloc_frag);
693 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
694 index d757b9642d0d..be778599bfed 100644
695 --- a/net/ipv4/inetpeer.c
696 +++ b/net/ipv4/inetpeer.c
697 @@ -216,6 +216,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
698 atomic_set(&p->rid, 0);
699 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
700 p->rate_tokens = 0;
701 + p->n_redirects = 0;
702 /* 60*HZ is arbitrary, but chosen enough high so that the first
703 * calculation of tokens is at its maximum.
704 */
705 diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
706 index a0aa13bcabda..0a8a60c1bf9a 100644
707 --- a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
708 +++ b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c
709 @@ -105,6 +105,8 @@ static void fast_csum(struct snmp_ctx *ctx, unsigned char offset)
710 int snmp_version(void *context, size_t hdrlen, unsigned char tag,
711 const void *data, size_t datalen)
712 {
713 + if (datalen != 1)
714 + return -EINVAL;
715 if (*(unsigned char *)data > 1)
716 return -ENOTSUPP;
717 return 1;
718 @@ -114,8 +116,11 @@ int snmp_helper(void *context, size_t hdrlen, unsigned char tag,
719 const void *data, size_t datalen)
720 {
721 struct snmp_ctx *ctx = (struct snmp_ctx *)context;
722 - __be32 *pdata = (__be32 *)data;
723 + __be32 *pdata;
724
725 + if (datalen != 4)
726 + return -EINVAL;
727 + pdata = (__be32 *)data;
728 if (*pdata == ctx->from) {
729 pr_debug("%s: %pI4 to %pI4\n", __func__,
730 (void *)&ctx->from, (void *)&ctx->to);
731 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
732 index c0a9d26c06ce..d1ddf1d03721 100644
733 --- a/net/ipv4/route.c
734 +++ b/net/ipv4/route.c
735 @@ -887,13 +887,15 @@ void ip_rt_send_redirect(struct sk_buff *skb)
736 /* No redirected packets during ip_rt_redirect_silence;
737 * reset the algorithm.
738 */
739 - if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
740 + if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
741 peer->rate_tokens = 0;
742 + peer->n_redirects = 0;
743 + }
744
745 /* Too many ignored redirects; do not send anything
746 * set dst.rate_last to the last seen redirected packet.
747 */
748 - if (peer->rate_tokens >= ip_rt_redirect_number) {
749 + if (peer->n_redirects >= ip_rt_redirect_number) {
750 peer->rate_last = jiffies;
751 goto out_put_peer;
752 }
753 @@ -910,6 +912,7 @@ void ip_rt_send_redirect(struct sk_buff *skb)
754 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
755 peer->rate_last = jiffies;
756 ++peer->rate_tokens;
757 + ++peer->n_redirects;
758 #ifdef CONFIG_IP_ROUTE_VERBOSE
759 if (log_martians &&
760 peer->rate_tokens == ip_rt_redirect_number)
761 diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
762 index 40cbe5609663..b102973102b9 100644
763 --- a/net/ipv4/tcp.c
764 +++ b/net/ipv4/tcp.c
765 @@ -2532,6 +2532,7 @@ void tcp_write_queue_purge(struct sock *sk)
766 sk_mem_reclaim(sk);
767 tcp_clear_all_retrans_hints(tcp_sk(sk));
768 tcp_sk(sk)->packets_out = 0;
769 + inet_csk(sk)->icsk_backoff = 0;
770 }
771
772 int tcp_disconnect(struct sock *sk, int flags)
773 @@ -2580,7 +2581,6 @@ int tcp_disconnect(struct sock *sk, int flags)
774 tp->write_seq += tp->max_window + 2;
775 if (tp->write_seq == 0)
776 tp->write_seq = 1;
777 - icsk->icsk_backoff = 0;
778 tp->snd_cwnd = 2;
779 icsk->icsk_probes_out = 0;
780 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
781 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
782 index de47038afdf0..b654f21064bb 100644
783 --- a/net/ipv4/tcp_ipv4.c
784 +++ b/net/ipv4/tcp_ipv4.c
785 @@ -535,14 +535,15 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
786 if (sock_owned_by_user(sk))
787 break;
788
789 + skb = tcp_rtx_queue_head(sk);
790 + if (WARN_ON_ONCE(!skb))
791 + break;
792 +
793 icsk->icsk_backoff--;
794 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
795 TCP_TIMEOUT_INIT;
796 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
797
798 - skb = tcp_rtx_queue_head(sk);
799 - BUG_ON(!skb);
800 -
801 tcp_mstamp_refresh(tp);
802 delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
803 remaining = icsk->icsk_rto -
804 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
805 index d2b597674d60..9fa51ab01ac4 100644
806 --- a/net/ipv6/addrconf.c
807 +++ b/net/ipv6/addrconf.c
808 @@ -1165,7 +1165,8 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
809 list_for_each_entry(ifa, &idev->addr_list, if_list) {
810 if (ifa == ifp)
811 continue;
812 - if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr,
813 + if (ifa->prefix_len != ifp->prefix_len ||
814 + !ipv6_prefix_equal(&ifa->addr, &ifp->addr,
815 ifp->prefix_len))
816 continue;
817 if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
818 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
819 index 94903061f324..e83c41c53f4a 100644
820 --- a/net/ipv6/ip6_gre.c
821 +++ b/net/ipv6/ip6_gre.c
822 @@ -1717,6 +1717,24 @@ static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[],
823 return 0;
824 }
825
826 +static void ip6erspan_set_version(struct nlattr *data[],
827 + struct __ip6_tnl_parm *parms)
828 +{
829 + parms->erspan_ver = 1;
830 + if (data[IFLA_GRE_ERSPAN_VER])
831 + parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
832 +
833 + if (parms->erspan_ver == 1) {
834 + if (data[IFLA_GRE_ERSPAN_INDEX])
835 + parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
836 + } else if (parms->erspan_ver == 2) {
837 + if (data[IFLA_GRE_ERSPAN_DIR])
838 + parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
839 + if (data[IFLA_GRE_ERSPAN_HWID])
840 + parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
841 + }
842 +}
843 +
844 static void ip6gre_netlink_parms(struct nlattr *data[],
845 struct __ip6_tnl_parm *parms)
846 {
847 @@ -1765,20 +1783,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
848
849 if (data[IFLA_GRE_COLLECT_METADATA])
850 parms->collect_md = true;
851 -
852 - parms->erspan_ver = 1;
853 - if (data[IFLA_GRE_ERSPAN_VER])
854 - parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
855 -
856 - if (parms->erspan_ver == 1) {
857 - if (data[IFLA_GRE_ERSPAN_INDEX])
858 - parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
859 - } else if (parms->erspan_ver == 2) {
860 - if (data[IFLA_GRE_ERSPAN_DIR])
861 - parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
862 - if (data[IFLA_GRE_ERSPAN_HWID])
863 - parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
864 - }
865 }
866
867 static int ip6gre_tap_init(struct net_device *dev)
868 @@ -2207,6 +2211,7 @@ static int ip6erspan_newlink(struct net *src_net, struct net_device *dev,
869 int err;
870
871 ip6gre_netlink_parms(data, &nt->parms);
872 + ip6erspan_set_version(data, &nt->parms);
873 ign = net_generic(net, ip6gre_net_id);
874
875 if (nt->parms.collect_md) {
876 @@ -2252,6 +2257,7 @@ static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[],
877 if (IS_ERR(t))
878 return PTR_ERR(t);
879
880 + ip6erspan_set_version(data, &p);
881 ip6gre_tunnel_unlink_md(ign, t);
882 ip6gre_tunnel_unlink(ign, t);
883 ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]);
884 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
885 index d0945253f43b..3b1a78906bc0 100644
886 --- a/net/packet/af_packet.c
887 +++ b/net/packet/af_packet.c
888 @@ -2887,7 +2887,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
889 goto out_free;
890 } else if (reserve) {
891 skb_reserve(skb, -reserve);
892 - if (len < reserve)
893 + if (len < reserve + sizeof(struct ipv6hdr) &&
894 + dev->min_header_len != dev->hard_header_len)
895 skb_reset_network_header(skb);
896 }
897
898 diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
899 index c361ce782412..c3d5ab01fba7 100644
900 --- a/net/vmw_vsock/vmci_transport.c
901 +++ b/net/vmw_vsock/vmci_transport.c
902 @@ -1651,6 +1651,10 @@ static void vmci_transport_cleanup(struct work_struct *work)
903
904 static void vmci_transport_destruct(struct vsock_sock *vsk)
905 {
906 + /* transport can be NULL if we hit a failure at init() time */
907 + if (!vmci_trans(vsk))
908 + return;
909 +
910 /* Ensure that the detach callback doesn't use the sk/vsk
911 * we are about to destruct.
912 */
913 diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
914 index 5121729b8b63..ec3a828672ef 100644
915 --- a/net/x25/af_x25.c
916 +++ b/net/x25/af_x25.c
917 @@ -352,17 +352,15 @@ static unsigned int x25_new_lci(struct x25_neigh *nb)
918 unsigned int lci = 1;
919 struct sock *sk;
920
921 - read_lock_bh(&x25_list_lock);
922 -
923 - while ((sk = __x25_find_socket(lci, nb)) != NULL) {
924 + while ((sk = x25_find_socket(lci, nb)) != NULL) {
925 sock_put(sk);
926 if (++lci == 4096) {
927 lci = 0;
928 break;
929 }
930 + cond_resched();
931 }
932
933 - read_unlock_bh(&x25_list_lock);
934 return lci;
935 }
936