Magellan Linux

Contents of /trunk/kernel-alx/patches-4.9/0217-4.9.118-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3194 - (show annotations) (download)
Wed Aug 8 14:17:43 2018 UTC (5 years, 8 months ago) by niro
File size: 27736 byte(s)
-linux-4.9.118
1 diff --git a/Makefile b/Makefile
2 index 773c26c95d98..0940f11fa071 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 117
9 +SUBLEVEL = 118
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
14 index 4e0292e0aafb..30b74b491909 100644
15 --- a/arch/x86/kvm/vmx.c
16 +++ b/arch/x86/kvm/vmx.c
17 @@ -7085,6 +7085,8 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
18 HRTIMER_MODE_REL_PINNED);
19 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
20
21 + vmx->nested.vpid02 = allocate_vpid();
22 +
23 vmx->nested.vmxon = true;
24
25 skip_emulated_instruction(vcpu);
26 @@ -9264,10 +9266,8 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
27 goto free_vmcs;
28 }
29
30 - if (nested) {
31 + if (nested)
32 nested_vmx_setup_ctls_msrs(vmx);
33 - vmx->nested.vpid02 = allocate_vpid();
34 - }
35
36 vmx->nested.posted_intr_nv = -1;
37 vmx->nested.current_vmptr = -1ull;
38 @@ -9285,7 +9285,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
39 return &vmx->vcpu;
40
41 free_vmcs:
42 - free_vpid(vmx->nested.vpid02);
43 free_loaded_vmcs(vmx->loaded_vmcs);
44 free_msrs:
45 kfree(vmx->guest_msrs);
46 diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
47 index 9126627cbf4d..75f2bef79718 100644
48 --- a/drivers/crypto/padlock-aes.c
49 +++ b/drivers/crypto/padlock-aes.c
50 @@ -266,6 +266,8 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
51 return;
52 }
53
54 + count -= initial;
55 +
56 if (initial)
57 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
58 : "+S"(input), "+D"(output)
59 @@ -273,7 +275,7 @@ static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key,
60
61 asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */
62 : "+S"(input), "+D"(output)
63 - : "d"(control_word), "b"(key), "c"(count - initial));
64 + : "d"(control_word), "b"(key), "c"(count));
65 }
66
67 static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
68 @@ -284,6 +286,8 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
69 if (count < cbc_fetch_blocks)
70 return cbc_crypt(input, output, key, iv, control_word, count);
71
72 + count -= initial;
73 +
74 if (initial)
75 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
76 : "+S" (input), "+D" (output), "+a" (iv)
77 @@ -291,7 +295,7 @@ static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key,
78
79 asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */
80 : "+S" (input), "+D" (output), "+a" (iv)
81 - : "d" (control_word), "b" (key), "c" (count-initial));
82 + : "d" (control_word), "b" (key), "c" (count));
83 return iv;
84 }
85
86 diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
87 index 75056553b06c..f8c9f6f4f822 100644
88 --- a/drivers/gpu/drm/vc4/vc4_plane.c
89 +++ b/drivers/gpu/drm/vc4/vc4_plane.c
90 @@ -350,6 +350,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
91 vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
92 if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
93 vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
94 + } else {
95 + vc4_state->x_scaling[1] = VC4_SCALING_NONE;
96 + vc4_state->y_scaling[1] = VC4_SCALING_NONE;
97 }
98
99 vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
100 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
101 index f5fcc0850dac..8a5e0ae4e4c0 100644
102 --- a/drivers/net/bonding/bond_main.c
103 +++ b/drivers/net/bonding/bond_main.c
104 @@ -1682,6 +1682,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
105 goto err_upper_unlink;
106 }
107
108 + bond->nest_level = dev_get_nest_level(bond_dev) + 1;
109 +
110 /* If the mode uses primary, then the following is handled by
111 * bond_change_active_slave().
112 */
113 @@ -1729,7 +1731,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
114 if (bond_mode_uses_xmit_hash(bond))
115 bond_update_slave_arr(bond, NULL);
116
117 - bond->nest_level = dev_get_nest_level(bond_dev);
118
119 netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
120 slave_dev->name,
121 @@ -3359,6 +3360,13 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
122 }
123 }
124
125 +static int bond_get_nest_level(struct net_device *bond_dev)
126 +{
127 + struct bonding *bond = netdev_priv(bond_dev);
128 +
129 + return bond->nest_level;
130 +}
131 +
132 static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
133 struct rtnl_link_stats64 *stats)
134 {
135 @@ -3367,7 +3375,7 @@ static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
136 struct list_head *iter;
137 struct slave *slave;
138
139 - spin_lock(&bond->stats_lock);
140 + spin_lock_nested(&bond->stats_lock, bond_get_nest_level(bond_dev));
141 memcpy(stats, &bond->bond_stats, sizeof(*stats));
142
143 rcu_read_lock();
144 @@ -4163,6 +4171,7 @@ static const struct net_device_ops bond_netdev_ops = {
145 .ndo_neigh_setup = bond_neigh_setup,
146 .ndo_vlan_rx_add_vid = bond_vlan_rx_add_vid,
147 .ndo_vlan_rx_kill_vid = bond_vlan_rx_kill_vid,
148 + .ndo_get_lock_subclass = bond_get_nest_level,
149 #ifdef CONFIG_NET_POLL_CONTROLLER
150 .ndo_netpoll_setup = bond_netpoll_setup,
151 .ndo_netpoll_cleanup = bond_netpoll_cleanup,
152 @@ -4655,6 +4664,7 @@ static int bond_init(struct net_device *bond_dev)
153 if (!bond->wq)
154 return -ENOMEM;
155
156 + bond->nest_level = SINGLE_DEPTH_NESTING;
157 netdev_lockdep_set_classes(bond_dev);
158
159 list_add_tail(&bond->bond_list, &bn->dev_list);
160 diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
161 index b00358297424..d0846ae9e0e4 100644
162 --- a/drivers/net/can/usb/ems_usb.c
163 +++ b/drivers/net/can/usb/ems_usb.c
164 @@ -1071,6 +1071,7 @@ static void ems_usb_disconnect(struct usb_interface *intf)
165 usb_free_urb(dev->intr_urb);
166
167 kfree(dev->intr_in_buffer);
168 + kfree(dev->tx_msg_buffer);
169 }
170 }
171
172 diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
173 index e13c9cd45dc0..bcd993140f84 100644
174 --- a/drivers/net/ethernet/amazon/ena/ena_com.c
175 +++ b/drivers/net/ethernet/amazon/ena/ena_com.c
176 @@ -331,6 +331,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
177
178 memset(&io_sq->desc_addr, 0x0, sizeof(struct ena_com_io_desc_addr));
179
180 + io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
181 io_sq->desc_entry_size =
182 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
183 sizeof(struct ena_eth_io_tx_desc) :
184 diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
185 index 84c5d296d13e..684835833fe3 100644
186 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
187 +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
188 @@ -877,14 +877,14 @@ static void xgbe_phy_adjust_link(struct xgbe_prv_data *pdata)
189
190 if (pdata->tx_pause != pdata->phy.tx_pause) {
191 new_state = 1;
192 - pdata->hw_if.config_tx_flow_control(pdata);
193 pdata->tx_pause = pdata->phy.tx_pause;
194 + pdata->hw_if.config_tx_flow_control(pdata);
195 }
196
197 if (pdata->rx_pause != pdata->phy.rx_pause) {
198 new_state = 1;
199 - pdata->hw_if.config_rx_flow_control(pdata);
200 pdata->rx_pause = pdata->phy.rx_pause;
201 + pdata->hw_if.config_rx_flow_control(pdata);
202 }
203
204 /* Speed support */
205 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
206 index b3bc1287b2a7..0df71865fab1 100644
207 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
208 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
209 @@ -55,7 +55,7 @@
210 #include <linux/of_mdio.h>
211 #include "dwmac1000.h"
212
213 -#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
214 +#define STMMAC_ALIGN(x) __ALIGN_KERNEL(x, SMP_CACHE_BYTES)
215 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
216
217 /* Module parameters */
218 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
219 index 56c8a2342c14..eafc28142cd2 100644
220 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
221 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
222 @@ -183,7 +183,7 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
223 return -ENOMEM;
224
225 /* Enable pci device */
226 - ret = pcim_enable_device(pdev);
227 + ret = pci_enable_device(pdev);
228 if (ret) {
229 dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
230 __func__);
231 @@ -232,9 +232,45 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
232 static void stmmac_pci_remove(struct pci_dev *pdev)
233 {
234 stmmac_dvr_remove(&pdev->dev);
235 + pci_disable_device(pdev);
236 }
237
238 -static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume);
239 +static int stmmac_pci_suspend(struct device *dev)
240 +{
241 + struct pci_dev *pdev = to_pci_dev(dev);
242 + int ret;
243 +
244 + ret = stmmac_suspend(dev);
245 + if (ret)
246 + return ret;
247 +
248 + ret = pci_save_state(pdev);
249 + if (ret)
250 + return ret;
251 +
252 + pci_disable_device(pdev);
253 + pci_wake_from_d3(pdev, true);
254 + return 0;
255 +}
256 +
257 +static int stmmac_pci_resume(struct device *dev)
258 +{
259 + struct pci_dev *pdev = to_pci_dev(dev);
260 + int ret;
261 +
262 + pci_restore_state(pdev);
263 + pci_set_power_state(pdev, PCI_D0);
264 +
265 + ret = pci_enable_device(pdev);
266 + if (ret)
267 + return ret;
268 +
269 + pci_set_master(pdev);
270 +
271 + return stmmac_resume(dev);
272 +}
273 +
274 +static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
275
276 #define STMMAC_VENDOR_ID 0x700
277 #define STMMAC_QUARK_ID 0x0937
278 diff --git a/drivers/net/phy/mdio-mux-bcm-iproc.c b/drivers/net/phy/mdio-mux-bcm-iproc.c
279 index 0a5f62e0efcc..487bf5b8f545 100644
280 --- a/drivers/net/phy/mdio-mux-bcm-iproc.c
281 +++ b/drivers/net/phy/mdio-mux-bcm-iproc.c
282 @@ -218,7 +218,7 @@ out:
283
284 static int mdio_mux_iproc_remove(struct platform_device *pdev)
285 {
286 - struct iproc_mdiomux_desc *md = dev_get_platdata(&pdev->dev);
287 + struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev);
288
289 mdio_mux_uninit(md->mux_handle);
290 mdiobus_unregister(md->mii_bus);
291 diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
292 index 5e0626c80b81..c5e04d1ad73a 100644
293 --- a/drivers/net/usb/lan78xx.c
294 +++ b/drivers/net/usb/lan78xx.c
295 @@ -1170,6 +1170,8 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
296 mod_timer(&dev->stat_monitor,
297 jiffies + STAT_UPDATE_TIMER);
298 }
299 +
300 + tasklet_schedule(&dev->bh);
301 }
302
303 return ret;
304 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
305 index a5908e4c06cb..681256f97cb3 100644
306 --- a/drivers/net/xen-netfront.c
307 +++ b/drivers/net/xen-netfront.c
308 @@ -86,6 +86,7 @@ struct netfront_cb {
309 /* IRQ name is queue name with "-tx" or "-rx" appended */
310 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
311
312 +static DECLARE_WAIT_QUEUE_HEAD(module_load_q);
313 static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
314
315 struct netfront_stats {
316 @@ -1349,6 +1350,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
317 netif_carrier_off(netdev);
318
319 xenbus_switch_state(dev, XenbusStateInitialising);
320 + wait_event(module_load_q,
321 + xenbus_read_driver_state(dev->otherend) !=
322 + XenbusStateClosed &&
323 + xenbus_read_driver_state(dev->otherend) !=
324 + XenbusStateUnknown);
325 return netdev;
326
327 exit:
328 diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
329 index b40a074822cf..15aeeb2159cc 100644
330 --- a/drivers/pinctrl/intel/pinctrl-intel.c
331 +++ b/drivers/pinctrl/intel/pinctrl-intel.c
332 @@ -604,12 +604,17 @@ static int intel_gpio_get(struct gpio_chip *chip, unsigned offset)
333 {
334 struct intel_pinctrl *pctrl = gpiochip_get_data(chip);
335 void __iomem *reg;
336 + u32 padcfg0;
337
338 reg = intel_get_padcfg(pctrl, offset, PADCFG0);
339 if (!reg)
340 return -EINVAL;
341
342 - return !!(readl(reg) & PADCFG0_GPIORXSTATE);
343 + padcfg0 = readl(reg);
344 + if (!(padcfg0 & PADCFG0_GPIOTXDIS))
345 + return !!(padcfg0 & PADCFG0_GPIOTXSTATE);
346 +
347 + return !!(padcfg0 & PADCFG0_GPIORXSTATE);
348 }
349
350 static void intel_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
351 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
352 index 2065a0f9dca6..8d9b416399f9 100644
353 --- a/drivers/scsi/sg.c
354 +++ b/drivers/scsi/sg.c
355 @@ -2185,6 +2185,7 @@ sg_add_sfp(Sg_device * sdp)
356 write_lock_irqsave(&sdp->sfd_lock, iflags);
357 if (atomic_read(&sdp->detaching)) {
358 write_unlock_irqrestore(&sdp->sfd_lock, iflags);
359 + kfree(sfp);
360 return ERR_PTR(-ENODEV);
361 }
362 list_add_tail(&sfp->sfd_siblings, &sdp->sfds);
363 diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
364 index a7c08cc4c1b7..30076956a096 100644
365 --- a/drivers/virtio/virtio_balloon.c
366 +++ b/drivers/virtio/virtio_balloon.c
367 @@ -493,7 +493,9 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
368 tell_host(vb, vb->inflate_vq);
369
370 /* balloon's page migration 2nd step -- deflate "page" */
371 + spin_lock_irqsave(&vb_dev_info->pages_lock, flags);
372 balloon_page_delete(page);
373 + spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
374 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
375 set_page_pfns(vb, vb->pfns, page);
376 tell_host(vb, vb->deflate_vq);
377 diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c
378 index ce62a380314f..cec0fa208078 100644
379 --- a/fs/squashfs/block.c
380 +++ b/fs/squashfs/block.c
381 @@ -166,6 +166,8 @@ int squashfs_read_data(struct super_block *sb, u64 index, int length,
382 }
383
384 if (compressed) {
385 + if (!msblk->stream)
386 + goto read_failure;
387 length = squashfs_decompress(msblk, bh, b, offset, length,
388 output);
389 if (length < 0)
390 diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c
391 index 86ad9a4b8c36..0681feab4a84 100644
392 --- a/fs/squashfs/fragment.c
393 +++ b/fs/squashfs/fragment.c
394 @@ -49,11 +49,16 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
395 u64 *fragment_block)
396 {
397 struct squashfs_sb_info *msblk = sb->s_fs_info;
398 - int block = SQUASHFS_FRAGMENT_INDEX(fragment);
399 - int offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
400 - u64 start_block = le64_to_cpu(msblk->fragment_index[block]);
401 + int block, offset, size;
402 struct squashfs_fragment_entry fragment_entry;
403 - int size;
404 + u64 start_block;
405 +
406 + if (fragment >= msblk->fragments)
407 + return -EIO;
408 + block = SQUASHFS_FRAGMENT_INDEX(fragment);
409 + offset = SQUASHFS_FRAGMENT_INDEX_OFFSET(fragment);
410 +
411 + start_block = le64_to_cpu(msblk->fragment_index[block]);
412
413 size = squashfs_read_metadata(sb, &fragment_entry, &start_block,
414 &offset, sizeof(fragment_entry));
415 diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h
416 index 1da565cb50c3..ef69c31947bf 100644
417 --- a/fs/squashfs/squashfs_fs_sb.h
418 +++ b/fs/squashfs/squashfs_fs_sb.h
419 @@ -75,6 +75,7 @@ struct squashfs_sb_info {
420 unsigned short block_log;
421 long long bytes_used;
422 unsigned int inodes;
423 + unsigned int fragments;
424 int xattr_ids;
425 };
426 #endif
427 diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c
428 index cf01e15a7b16..1516bb779b8d 100644
429 --- a/fs/squashfs/super.c
430 +++ b/fs/squashfs/super.c
431 @@ -175,6 +175,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
432 msblk->inode_table = le64_to_cpu(sblk->inode_table_start);
433 msblk->directory_table = le64_to_cpu(sblk->directory_table_start);
434 msblk->inodes = le32_to_cpu(sblk->inodes);
435 + msblk->fragments = le32_to_cpu(sblk->fragments);
436 flags = le16_to_cpu(sblk->flags);
437
438 TRACE("Found valid superblock on %pg\n", sb->s_bdev);
439 @@ -185,7 +186,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent)
440 TRACE("Filesystem size %lld bytes\n", msblk->bytes_used);
441 TRACE("Block size %d\n", msblk->block_size);
442 TRACE("Number of inodes %d\n", msblk->inodes);
443 - TRACE("Number of fragments %d\n", le32_to_cpu(sblk->fragments));
444 + TRACE("Number of fragments %d\n", msblk->fragments);
445 TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids));
446 TRACE("sblk->inode_table_start %llx\n", msblk->inode_table);
447 TRACE("sblk->directory_table_start %llx\n", msblk->directory_table);
448 @@ -272,7 +273,7 @@ allocate_id_index_table:
449 sb->s_export_op = &squashfs_export_ops;
450
451 handle_fragments:
452 - fragments = le32_to_cpu(sblk->fragments);
453 + fragments = msblk->fragments;
454 if (fragments == 0)
455 goto check_directory_table;
456
457 diff --git a/include/net/tcp.h b/include/net/tcp.h
458 index 5d440bb0e409..97d210535cdd 100644
459 --- a/include/net/tcp.h
460 +++ b/include/net/tcp.h
461 @@ -363,7 +363,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
462 struct pipe_inode_info *pipe, size_t len,
463 unsigned int flags);
464
465 -void tcp_enter_quickack_mode(struct sock *sk);
466 +void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
467 static inline void tcp_dec_quickack_mode(struct sock *sk,
468 const unsigned int pkts)
469 {
470 diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c
471 index 82f0dff90030..9c2da06a8869 100644
472 --- a/kernel/sched/swait.c
473 +++ b/kernel/sched/swait.c
474 @@ -33,9 +33,6 @@ void swake_up(struct swait_queue_head *q)
475 {
476 unsigned long flags;
477
478 - if (!swait_active(q))
479 - return;
480 -
481 raw_spin_lock_irqsave(&q->lock, flags);
482 swake_up_locked(q);
483 raw_spin_unlock_irqrestore(&q->lock, flags);
484 @@ -51,9 +48,6 @@ void swake_up_all(struct swait_queue_head *q)
485 struct swait_queue *curr;
486 LIST_HEAD(tmp);
487
488 - if (!swait_active(q))
489 - return;
490 -
491 raw_spin_lock_irq(&q->lock);
492 list_splice_init(&q->task_list, &tmp);
493 while (!list_empty(&tmp)) {
494 diff --git a/net/dsa/slave.c b/net/dsa/slave.c
495 index 5000e6f20f4a..339d9c678d3e 100644
496 --- a/net/dsa/slave.c
497 +++ b/net/dsa/slave.c
498 @@ -1199,6 +1199,9 @@ int dsa_slave_suspend(struct net_device *slave_dev)
499 {
500 struct dsa_slave_priv *p = netdev_priv(slave_dev);
501
502 + if (!netif_running(slave_dev))
503 + return 0;
504 +
505 netif_device_detach(slave_dev);
506
507 if (p->phy) {
508 @@ -1216,6 +1219,9 @@ int dsa_slave_resume(struct net_device *slave_dev)
509 {
510 struct dsa_slave_priv *p = netdev_priv(slave_dev);
511
512 + if (!netif_running(slave_dev))
513 + return 0;
514 +
515 netif_device_attach(slave_dev);
516
517 if (p->phy) {
518 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
519 index 7bdd89354db5..6a2ef162088d 100644
520 --- a/net/ipv4/fib_frontend.c
521 +++ b/net/ipv4/fib_frontend.c
522 @@ -282,19 +282,19 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
523 return ip_hdr(skb)->daddr;
524
525 in_dev = __in_dev_get_rcu(dev);
526 - BUG_ON(!in_dev);
527
528 net = dev_net(dev);
529
530 scope = RT_SCOPE_UNIVERSE;
531 if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
532 + bool vmark = in_dev && IN_DEV_SRC_VMARK(in_dev);
533 struct flowi4 fl4 = {
534 .flowi4_iif = LOOPBACK_IFINDEX,
535 .flowi4_oif = l3mdev_master_ifindex_rcu(dev),
536 .daddr = ip_hdr(skb)->saddr,
537 .flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
538 .flowi4_scope = scope,
539 - .flowi4_mark = IN_DEV_SRC_VMARK(in_dev) ? skb->mark : 0,
540 + .flowi4_mark = vmark ? skb->mark : 0,
541 };
542 if (!fib_lookup(net, &fl4, &res, 0))
543 return FIB_RES_PREFSRC(net, res);
544 diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
545 index 8effac0f2219..f8b41aaac76f 100644
546 --- a/net/ipv4/inet_fragment.c
547 +++ b/net/ipv4/inet_fragment.c
548 @@ -356,11 +356,6 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
549 {
550 struct inet_frag_queue *q;
551
552 - if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
553 - inet_frag_schedule_worker(f);
554 - return NULL;
555 - }
556 -
557 q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
558 if (!q)
559 return NULL;
560 @@ -397,6 +392,11 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
561 struct inet_frag_queue *q;
562 int depth = 0;
563
564 + if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
565 + inet_frag_schedule_worker(f);
566 + return NULL;
567 + }
568 +
569 if (frag_mem_limit(nf) > nf->low_thresh)
570 inet_frag_schedule_worker(f);
571
572 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
573 index 4bf3b8af0257..752711cd4834 100644
574 --- a/net/ipv4/ip_fragment.c
575 +++ b/net/ipv4/ip_fragment.c
576 @@ -446,11 +446,16 @@ found:
577 int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
578
579 if (i < next->len) {
580 + int delta = -next->truesize;
581 +
582 /* Eat head of the next overlapped fragment
583 * and leave the loop. The next ones cannot overlap.
584 */
585 if (!pskb_pull(next, i))
586 goto err;
587 + delta += next->truesize;
588 + if (delta)
589 + add_frag_mem_limit(qp->q.net, delta);
590 FRAG_CB(next)->offset += i;
591 qp->q.meat -= i;
592 if (next->ip_summed != CHECKSUM_UNNECESSARY)
593 diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
594 index 9169859506b7..7e44d23b0328 100644
595 --- a/net/ipv4/tcp_bbr.c
596 +++ b/net/ipv4/tcp_bbr.c
597 @@ -324,6 +324,10 @@ static u32 bbr_target_cwnd(struct sock *sk, u32 bw, int gain)
598 /* Reduce delayed ACKs by rounding up cwnd to the next even number. */
599 cwnd = (cwnd + 1) & ~1U;
600
601 + /* Ensure gain cycling gets inflight above BDP even for small BDPs. */
602 + if (bbr->mode == BBR_PROBE_BW && gain > BBR_UNIT)
603 + cwnd += 2;
604 +
605 return cwnd;
606 }
607
608 diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
609 index dd52ccb812ea..8905a0aec8ee 100644
610 --- a/net/ipv4/tcp_dctcp.c
611 +++ b/net/ipv4/tcp_dctcp.c
612 @@ -138,7 +138,7 @@ static void dctcp_ce_state_0_to_1(struct sock *sk)
613 */
614 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
615 __tcp_send_ack(sk, ca->prior_rcv_nxt);
616 - tcp_enter_quickack_mode(sk);
617 + tcp_enter_quickack_mode(sk, 1);
618 }
619
620 ca->prior_rcv_nxt = tp->rcv_nxt;
621 @@ -159,7 +159,7 @@ static void dctcp_ce_state_1_to_0(struct sock *sk)
622 */
623 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
624 __tcp_send_ack(sk, ca->prior_rcv_nxt);
625 - tcp_enter_quickack_mode(sk);
626 + tcp_enter_quickack_mode(sk, 1);
627 }
628
629 ca->prior_rcv_nxt = tp->rcv_nxt;
630 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
631 index 44d136fd2af5..a9be8df108b4 100644
632 --- a/net/ipv4/tcp_input.c
633 +++ b/net/ipv4/tcp_input.c
634 @@ -198,21 +198,23 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
635 }
636 }
637
638 -static void tcp_incr_quickack(struct sock *sk)
639 +static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
640 {
641 struct inet_connection_sock *icsk = inet_csk(sk);
642 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss);
643
644 if (quickacks == 0)
645 quickacks = 2;
646 + quickacks = min(quickacks, max_quickacks);
647 if (quickacks > icsk->icsk_ack.quick)
648 - icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS);
649 + icsk->icsk_ack.quick = quickacks;
650 }
651
652 -void tcp_enter_quickack_mode(struct sock *sk)
653 +void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
654 {
655 struct inet_connection_sock *icsk = inet_csk(sk);
656 - tcp_incr_quickack(sk);
657 +
658 + tcp_incr_quickack(sk, max_quickacks);
659 icsk->icsk_ack.pingpong = 0;
660 icsk->icsk_ack.ato = TCP_ATO_MIN;
661 }
662 @@ -248,8 +250,10 @@ static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
663 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
664 }
665
666 -static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
667 +static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
668 {
669 + struct tcp_sock *tp = tcp_sk(sk);
670 +
671 switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) {
672 case INET_ECN_NOT_ECT:
673 /* Funny extension: if ECT is not set on a segment,
674 @@ -257,31 +261,31 @@ static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
675 * it is probably a retransmit.
676 */
677 if (tp->ecn_flags & TCP_ECN_SEEN)
678 - tcp_enter_quickack_mode((struct sock *)tp);
679 + tcp_enter_quickack_mode(sk, 2);
680 break;
681 case INET_ECN_CE:
682 - if (tcp_ca_needs_ecn((struct sock *)tp))
683 - tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_IS_CE);
684 + if (tcp_ca_needs_ecn(sk))
685 + tcp_ca_event(sk, CA_EVENT_ECN_IS_CE);
686
687 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
688 /* Better not delay acks, sender can have a very low cwnd */
689 - tcp_enter_quickack_mode((struct sock *)tp);
690 + tcp_enter_quickack_mode(sk, 2);
691 tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
692 }
693 tp->ecn_flags |= TCP_ECN_SEEN;
694 break;
695 default:
696 - if (tcp_ca_needs_ecn((struct sock *)tp))
697 - tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_NO_CE);
698 + if (tcp_ca_needs_ecn(sk))
699 + tcp_ca_event(sk, CA_EVENT_ECN_NO_CE);
700 tp->ecn_flags |= TCP_ECN_SEEN;
701 break;
702 }
703 }
704
705 -static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb)
706 +static void tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
707 {
708 - if (tp->ecn_flags & TCP_ECN_OK)
709 - __tcp_ecn_check_ce(tp, skb);
710 + if (tcp_sk(sk)->ecn_flags & TCP_ECN_OK)
711 + __tcp_ecn_check_ce(sk, skb);
712 }
713
714 static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th)
715 @@ -675,7 +679,7 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
716 /* The _first_ data packet received, initialize
717 * delayed ACK engine.
718 */
719 - tcp_incr_quickack(sk);
720 + tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
721 icsk->icsk_ack.ato = TCP_ATO_MIN;
722 } else {
723 int m = now - icsk->icsk_ack.lrcvtime;
724 @@ -691,13 +695,13 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
725 /* Too long gap. Apparently sender failed to
726 * restart window, so that we send ACKs quickly.
727 */
728 - tcp_incr_quickack(sk);
729 + tcp_incr_quickack(sk, TCP_MAX_QUICKACKS);
730 sk_mem_reclaim(sk);
731 }
732 }
733 icsk->icsk_ack.lrcvtime = now;
734
735 - tcp_ecn_check_ce(tp, skb);
736 + tcp_ecn_check_ce(sk, skb);
737
738 if (skb->len >= 128)
739 tcp_grow_window(sk, skb);
740 @@ -4210,7 +4214,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
741 if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
742 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
743 NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
744 - tcp_enter_quickack_mode(sk);
745 + tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
746
747 if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
748 u32 end_seq = TCP_SKB_CB(skb)->end_seq;
749 @@ -4454,7 +4458,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
750 u32 seq, end_seq;
751 bool fragstolen;
752
753 - tcp_ecn_check_ce(tp, skb);
754 + tcp_ecn_check_ce(sk, skb);
755
756 if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
757 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
758 @@ -4734,7 +4738,7 @@ queue_and_out:
759 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
760
761 out_of_window:
762 - tcp_enter_quickack_mode(sk);
763 + tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
764 inet_csk_schedule_ack(sk);
765 drop:
766 tcp_drop(sk, skb);
767 @@ -4745,8 +4749,6 @@ drop:
768 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt + tcp_receive_window(tp)))
769 goto out_of_window;
770
771 - tcp_enter_quickack_mode(sk);
772 -
773 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
774 /* Partial packet, seq < rcv_next < end_seq */
775 SOCK_DEBUG(sk, "partial packet: rcv_next %X seq %X - %X\n",
776 @@ -5830,7 +5832,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
777 * to stand against the temptation 8) --ANK
778 */
779 inet_csk_schedule_ack(sk);
780 - tcp_enter_quickack_mode(sk);
781 + tcp_enter_quickack_mode(sk, TCP_MAX_QUICKACKS);
782 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
783 TCP_DELACK_MAX, TCP_RTO_MAX);
784
785 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
786 index 15e6e7b9fd2b..8d0aafbdbbc3 100644
787 --- a/net/netlink/af_netlink.c
788 +++ b/net/netlink/af_netlink.c
789 @@ -62,6 +62,7 @@
790 #include <asm/cacheflush.h>
791 #include <linux/hash.h>
792 #include <linux/genetlink.h>
793 +#include <linux/nospec.h>
794
795 #include <net/net_namespace.h>
796 #include <net/sock.h>
797 @@ -654,6 +655,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
798
799 if (protocol < 0 || protocol >= MAX_LINKS)
800 return -EPROTONOSUPPORT;
801 + protocol = array_index_nospec(protocol, MAX_LINKS);
802
803 netlink_lock_table();
804 #ifdef CONFIG_MODULES
805 diff --git a/net/socket.c b/net/socket.c
806 index bd3b33988ee0..35fa349ba274 100644
807 --- a/net/socket.c
808 +++ b/net/socket.c
809 @@ -89,6 +89,7 @@
810 #include <linux/magic.h>
811 #include <linux/slab.h>
812 #include <linux/xattr.h>
813 +#include <linux/nospec.h>
814
815 #include <asm/uaccess.h>
816 #include <asm/unistd.h>
817 @@ -2338,6 +2339,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
818
819 if (call < 1 || call > SYS_SENDMMSG)
820 return -EINVAL;
821 + call = array_index_nospec(call, SYS_SENDMMSG + 1);
822
823 len = nargs[call];
824 if (len > sizeof(a))