Contents of /trunk/kernel-alx/patches-5.4/0167-5.4.68-all-fixes.patch
Parent Directory | Revision Log
Revision 3637 -
(show annotations)
(download)
Mon Oct 24 12:40:44 2022 UTC (19 months, 1 week ago) by niro
File size: 54925 byte(s)
Mon Oct 24 12:40:44 2022 UTC (19 months, 1 week ago) by niro
File size: 54925 byte(s)
-add missing
1 | diff --git a/Makefile b/Makefile |
2 | index d2e46ca4c955b..acb2499d9b053 100644 |
3 | --- a/Makefile |
4 | +++ b/Makefile |
5 | @@ -1,7 +1,7 @@ |
6 | # SPDX-License-Identifier: GPL-2.0 |
7 | VERSION = 5 |
8 | PATCHLEVEL = 4 |
9 | -SUBLEVEL = 67 |
10 | +SUBLEVEL = 68 |
11 | EXTRAVERSION = |
12 | NAME = Kleptomaniac Octopus |
13 | |
14 | diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig |
15 | index 390568afee9fd..fc0160e8ed334 100644 |
16 | --- a/drivers/iommu/Kconfig |
17 | +++ b/drivers/iommu/Kconfig |
18 | @@ -138,7 +138,7 @@ config AMD_IOMMU |
19 | select PCI_PASID |
20 | select IOMMU_API |
21 | select IOMMU_IOVA |
22 | - depends on X86_64 && PCI && ACPI |
23 | + depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE |
24 | ---help--- |
25 | With this option you can enable support for AMD IOMMU hardware in |
26 | your system. An IOMMU is a hardware component which provides |
27 | diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c |
28 | index fa91d856a43ee..7b724f7b27a99 100644 |
29 | --- a/drivers/iommu/amd_iommu.c |
30 | +++ b/drivers/iommu/amd_iommu.c |
31 | @@ -3873,6 +3873,7 @@ out: |
32 | static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte, |
33 | struct amd_ir_data *data) |
34 | { |
35 | + bool ret; |
36 | struct irq_remap_table *table; |
37 | struct amd_iommu *iommu; |
38 | unsigned long flags; |
39 | @@ -3890,10 +3891,18 @@ static int modify_irte_ga(u16 devid, int index, struct irte_ga *irte, |
40 | |
41 | entry = (struct irte_ga *)table->table; |
42 | entry = &entry[index]; |
43 | - entry->lo.fields_remap.valid = 0; |
44 | - entry->hi.val = irte->hi.val; |
45 | - entry->lo.val = irte->lo.val; |
46 | - entry->lo.fields_remap.valid = 1; |
47 | + |
48 | + ret = cmpxchg_double(&entry->lo.val, &entry->hi.val, |
49 | + entry->lo.val, entry->hi.val, |
50 | + irte->lo.val, irte->hi.val); |
51 | + /* |
52 | + * We use cmpxchg16 to atomically update the 128-bit IRTE, |
53 | + * and it cannot be updated by the hardware or other processors |
54 | + * behind us, so the return value of cmpxchg16 should be the |
55 | + * same as the old value. |
56 | + */ |
57 | + WARN_ON(!ret); |
58 | + |
59 | if (data) |
60 | data->ref = entry; |
61 | |
62 | diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c |
63 | index 135ae5222cf35..31d7e2d4f3040 100644 |
64 | --- a/drivers/iommu/amd_iommu_init.c |
65 | +++ b/drivers/iommu/amd_iommu_init.c |
66 | @@ -1522,7 +1522,14 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) |
67 | iommu->mmio_phys_end = MMIO_REG_END_OFFSET; |
68 | else |
69 | iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; |
70 | - if (((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) |
71 | + |
72 | + /* |
73 | + * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. |
74 | + * GAM also requires GA mode. Therefore, we need to |
75 | + * check cmpxchg16b support before enabling it. |
76 | + */ |
77 | + if (!boot_cpu_has(X86_FEATURE_CX16) || |
78 | + ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0)) |
79 | amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; |
80 | break; |
81 | case 0x11: |
82 | @@ -1531,8 +1538,18 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) |
83 | iommu->mmio_phys_end = MMIO_REG_END_OFFSET; |
84 | else |
85 | iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET; |
86 | - if (((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) |
87 | + |
88 | + /* |
89 | + * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports. |
90 | + * XT, GAM also requires GA mode. Therefore, we need to |
91 | + * check cmpxchg16b support before enabling them. |
92 | + */ |
93 | + if (!boot_cpu_has(X86_FEATURE_CX16) || |
94 | + ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) { |
95 | amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY; |
96 | + break; |
97 | + } |
98 | + |
99 | /* |
100 | * Note: Since iommu_update_intcapxt() leverages |
101 | * the IOMMU MMIO access to MSI capability block registers |
102 | diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c |
103 | index 1368816abaed1..99cdb2f18fa2f 100644 |
104 | --- a/drivers/net/dsa/rtl8366.c |
105 | +++ b/drivers/net/dsa/rtl8366.c |
106 | @@ -452,13 +452,19 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port, |
107 | return ret; |
108 | |
109 | if (vid == vlanmc.vid) { |
110 | - /* clear VLAN member configurations */ |
111 | - vlanmc.vid = 0; |
112 | - vlanmc.priority = 0; |
113 | - vlanmc.member = 0; |
114 | - vlanmc.untag = 0; |
115 | - vlanmc.fid = 0; |
116 | - |
117 | + /* Remove this port from the VLAN */ |
118 | + vlanmc.member &= ~BIT(port); |
119 | + vlanmc.untag &= ~BIT(port); |
120 | + /* |
121 | + * If no ports are members of this VLAN |
122 | + * anymore then clear the whole member |
123 | + * config so it can be reused. |
124 | + */ |
125 | + if (!vlanmc.member && vlanmc.untag) { |
126 | + vlanmc.vid = 0; |
127 | + vlanmc.priority = 0; |
128 | + vlanmc.fid = 0; |
129 | + } |
130 | ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); |
131 | if (ret) { |
132 | dev_err(smi->dev, |
133 | diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c |
134 | index 4030020f92be5..4f4fd80762610 100644 |
135 | --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c |
136 | +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c |
137 | @@ -4204,7 +4204,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, |
138 | u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; |
139 | u16 dst = BNXT_HWRM_CHNL_CHIMP; |
140 | |
141 | - if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) |
142 | + if (BNXT_NO_FW_ACCESS(bp)) |
143 | return -EBUSY; |
144 | |
145 | if (msg_len > BNXT_HWRM_MAX_REQ_LEN) { |
146 | @@ -5539,7 +5539,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp, |
147 | struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; |
148 | u16 error_code; |
149 | |
150 | - if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) |
151 | + if (BNXT_NO_FW_ACCESS(bp)) |
152 | return 0; |
153 | |
154 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); |
155 | @@ -7454,7 +7454,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) |
156 | |
157 | if (set_tpa) |
158 | tpa_flags = bp->flags & BNXT_FLAG_TPA; |
159 | - else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) |
160 | + else if (BNXT_NO_FW_ACCESS(bp)) |
161 | return 0; |
162 | for (i = 0; i < bp->nr_vnics; i++) { |
163 | rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); |
164 | @@ -8939,18 +8939,16 @@ static ssize_t bnxt_show_temp(struct device *dev, |
165 | struct hwrm_temp_monitor_query_output *resp; |
166 | struct bnxt *bp = dev_get_drvdata(dev); |
167 | u32 len = 0; |
168 | + int rc; |
169 | |
170 | resp = bp->hwrm_cmd_resp_addr; |
171 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); |
172 | mutex_lock(&bp->hwrm_cmd_lock); |
173 | - if (!_hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) |
174 | + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
175 | + if (!rc) |
176 | len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */ |
177 | mutex_unlock(&bp->hwrm_cmd_lock); |
178 | - |
179 | - if (len) |
180 | - return len; |
181 | - |
182 | - return sprintf(buf, "unknown\n"); |
183 | + return rc ?: len; |
184 | } |
185 | static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0); |
186 | |
187 | @@ -8970,7 +8968,16 @@ static void bnxt_hwmon_close(struct bnxt *bp) |
188 | |
189 | static void bnxt_hwmon_open(struct bnxt *bp) |
190 | { |
191 | + struct hwrm_temp_monitor_query_input req = {0}; |
192 | struct pci_dev *pdev = bp->pdev; |
193 | + int rc; |
194 | + |
195 | + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); |
196 | + rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); |
197 | + if (rc == -EACCES || rc == -EOPNOTSUPP) { |
198 | + bnxt_hwmon_close(bp); |
199 | + return; |
200 | + } |
201 | |
202 | if (bp->hwmon_dev) |
203 | return; |
204 | @@ -11385,14 +11392,15 @@ static void bnxt_remove_one(struct pci_dev *pdev) |
205 | if (BNXT_PF(bp)) |
206 | bnxt_sriov_disable(bp); |
207 | |
208 | + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); |
209 | + bnxt_cancel_sp_work(bp); |
210 | + bp->sp_event = 0; |
211 | + |
212 | bnxt_dl_fw_reporters_destroy(bp, true); |
213 | pci_disable_pcie_error_reporting(pdev); |
214 | unregister_netdev(dev); |
215 | bnxt_dl_unregister(bp); |
216 | bnxt_shutdown_tc(bp); |
217 | - clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); |
218 | - bnxt_cancel_sp_work(bp); |
219 | - bp->sp_event = 0; |
220 | |
221 | bnxt_clear_int_mode(bp); |
222 | bnxt_hwrm_func_drv_unrgtr(bp); |
223 | diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h |
224 | index a61a5873ab0a7..d2dd852d27da9 100644 |
225 | --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h |
226 | +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h |
227 | @@ -1628,6 +1628,10 @@ struct bnxt { |
228 | #define BNXT_STATE_ABORT_ERR 5 |
229 | #define BNXT_STATE_FW_FATAL_COND 6 |
230 | |
231 | +#define BNXT_NO_FW_ACCESS(bp) \ |
232 | + (test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \ |
233 | + pci_channel_offline((bp)->pdev)) |
234 | + |
235 | struct bnxt_irq *irq_tbl; |
236 | int total_irqs; |
237 | u8 mac_addr[ETH_ALEN]; |
238 | diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c |
239 | index fd01bcc8e28d4..1d15ff08f176b 100644 |
240 | --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c |
241 | +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c |
242 | @@ -1665,9 +1665,12 @@ static int bnxt_set_pauseparam(struct net_device *dev, |
243 | if (!BNXT_SINGLE_PF(bp)) |
244 | return -EOPNOTSUPP; |
245 | |
246 | + mutex_lock(&bp->link_lock); |
247 | if (epause->autoneg) { |
248 | - if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) |
249 | - return -EINVAL; |
250 | + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { |
251 | + rc = -EINVAL; |
252 | + goto pause_exit; |
253 | + } |
254 | |
255 | link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; |
256 | if (bp->hwrm_spec_code >= 0x10201) |
257 | @@ -1688,11 +1691,11 @@ static int bnxt_set_pauseparam(struct net_device *dev, |
258 | if (epause->tx_pause) |
259 | link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; |
260 | |
261 | - if (netif_running(dev)) { |
262 | - mutex_lock(&bp->link_lock); |
263 | + if (netif_running(dev)) |
264 | rc = bnxt_hwrm_set_pause(bp); |
265 | - mutex_unlock(&bp->link_lock); |
266 | - } |
267 | + |
268 | +pause_exit: |
269 | + mutex_unlock(&bp->link_lock); |
270 | return rc; |
271 | } |
272 | |
273 | @@ -2397,8 +2400,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) |
274 | struct bnxt *bp = netdev_priv(dev); |
275 | struct ethtool_eee *eee = &bp->eee; |
276 | struct bnxt_link_info *link_info = &bp->link_info; |
277 | - u32 advertising = |
278 | - _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); |
279 | + u32 advertising; |
280 | int rc = 0; |
281 | |
282 | if (!BNXT_SINGLE_PF(bp)) |
283 | @@ -2407,19 +2409,23 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) |
284 | if (!(bp->flags & BNXT_FLAG_EEE_CAP)) |
285 | return -EOPNOTSUPP; |
286 | |
287 | + mutex_lock(&bp->link_lock); |
288 | + advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); |
289 | if (!edata->eee_enabled) |
290 | goto eee_ok; |
291 | |
292 | if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { |
293 | netdev_warn(dev, "EEE requires autoneg\n"); |
294 | - return -EINVAL; |
295 | + rc = -EINVAL; |
296 | + goto eee_exit; |
297 | } |
298 | if (edata->tx_lpi_enabled) { |
299 | if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi || |
300 | edata->tx_lpi_timer < bp->lpi_tmr_lo)) { |
301 | netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n", |
302 | bp->lpi_tmr_lo, bp->lpi_tmr_hi); |
303 | - return -EINVAL; |
304 | + rc = -EINVAL; |
305 | + goto eee_exit; |
306 | } else if (!bp->lpi_tmr_hi) { |
307 | edata->tx_lpi_timer = eee->tx_lpi_timer; |
308 | } |
309 | @@ -2429,7 +2435,8 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) |
310 | } else if (edata->advertised & ~advertising) { |
311 | netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n", |
312 | edata->advertised, advertising); |
313 | - return -EINVAL; |
314 | + rc = -EINVAL; |
315 | + goto eee_exit; |
316 | } |
317 | |
318 | eee->advertised = edata->advertised; |
319 | @@ -2441,6 +2448,8 @@ eee_ok: |
320 | if (netif_running(dev)) |
321 | rc = bnxt_hwrm_set_link_setting(bp, false, true); |
322 | |
323 | +eee_exit: |
324 | + mutex_unlock(&bp->link_lock); |
325 | return rc; |
326 | } |
327 | |
328 | diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c |
329 | index f459313357c78..137ff00605d94 100644 |
330 | --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c |
331 | +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c |
332 | @@ -1617,13 +1617,16 @@ out: |
333 | static int configure_filter_tcb(struct adapter *adap, unsigned int tid, |
334 | struct filter_entry *f) |
335 | { |
336 | - if (f->fs.hitcnts) |
337 | + if (f->fs.hitcnts) { |
338 | set_tcb_field(adap, f, tid, TCB_TIMESTAMP_W, |
339 | - TCB_TIMESTAMP_V(TCB_TIMESTAMP_M) | |
340 | + TCB_TIMESTAMP_V(TCB_TIMESTAMP_M), |
341 | + TCB_TIMESTAMP_V(0ULL), |
342 | + 1); |
343 | + set_tcb_field(adap, f, tid, TCB_RTT_TS_RECENT_AGE_W, |
344 | TCB_RTT_TS_RECENT_AGE_V(TCB_RTT_TS_RECENT_AGE_M), |
345 | - TCB_TIMESTAMP_V(0ULL) | |
346 | TCB_RTT_TS_RECENT_AGE_V(0ULL), |
347 | 1); |
348 | + } |
349 | |
350 | if (f->fs.newdmac) |
351 | set_tcb_tflag(adap, f, tid, TF_CCTRL_ECE_S, 1, |
352 | diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c |
353 | index b1a073eea60b2..a020e84906813 100644 |
354 | --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c |
355 | +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c |
356 | @@ -229,7 +229,7 @@ void cxgb4_free_mps_ref_entries(struct adapter *adap) |
357 | { |
358 | struct mps_entries_ref *mps_entry, *tmp; |
359 | |
360 | - if (!list_empty(&adap->mps_ref)) |
361 | + if (list_empty(&adap->mps_ref)) |
362 | return; |
363 | |
364 | spin_lock(&adap->mps_ref_lock); |
365 | diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c |
366 | index 2d20a48f0ba0a..5329af2337a91 100644 |
367 | --- a/drivers/net/ethernet/ibm/ibmvnic.c |
368 | +++ b/drivers/net/ethernet/ibm/ibmvnic.c |
369 | @@ -416,6 +416,9 @@ static int reset_rx_pools(struct ibmvnic_adapter *adapter) |
370 | int i, j, rc; |
371 | u64 *size_array; |
372 | |
373 | + if (!adapter->rx_pool) |
374 | + return -1; |
375 | + |
376 | size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + |
377 | be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); |
378 | |
379 | @@ -586,6 +589,9 @@ static int reset_tx_pools(struct ibmvnic_adapter *adapter) |
380 | int tx_scrqs; |
381 | int i, rc; |
382 | |
383 | + if (!adapter->tx_pool) |
384 | + return -1; |
385 | + |
386 | tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); |
387 | for (i = 0; i < tx_scrqs; i++) { |
388 | rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]); |
389 | @@ -1918,7 +1924,10 @@ static int do_reset(struct ibmvnic_adapter *adapter, |
390 | adapter->req_rx_add_entries_per_subcrq != |
391 | old_num_rx_slots || |
392 | adapter->req_tx_entries_per_subcrq != |
393 | - old_num_tx_slots) { |
394 | + old_num_tx_slots || |
395 | + !adapter->rx_pool || |
396 | + !adapter->tso_pool || |
397 | + !adapter->tx_pool) { |
398 | release_rx_pools(adapter); |
399 | release_tx_pools(adapter); |
400 | release_napi(adapter); |
401 | @@ -1930,12 +1939,18 @@ static int do_reset(struct ibmvnic_adapter *adapter, |
402 | |
403 | } else { |
404 | rc = reset_tx_pools(adapter); |
405 | - if (rc) |
406 | + if (rc) { |
407 | + netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n", |
408 | + rc); |
409 | goto out; |
410 | + } |
411 | |
412 | rc = reset_rx_pools(adapter); |
413 | - if (rc) |
414 | + if (rc) { |
415 | + netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n", |
416 | + rc); |
417 | goto out; |
418 | + } |
419 | } |
420 | ibmvnic_disable_irqs(adapter); |
421 | } |
422 | diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c |
423 | index 900affbdcc0e4..96948276b2bc3 100644 |
424 | --- a/drivers/net/ethernet/lantiq_xrx200.c |
425 | +++ b/drivers/net/ethernet/lantiq_xrx200.c |
426 | @@ -230,8 +230,8 @@ static int xrx200_poll_rx(struct napi_struct *napi, int budget) |
427 | } |
428 | |
429 | if (rx < budget) { |
430 | - napi_complete(&ch->napi); |
431 | - ltq_dma_enable_irq(&ch->dma); |
432 | + if (napi_complete_done(&ch->napi, rx)) |
433 | + ltq_dma_enable_irq(&ch->dma); |
434 | } |
435 | |
436 | return rx; |
437 | @@ -268,9 +268,12 @@ static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget) |
438 | net_dev->stats.tx_bytes += bytes; |
439 | netdev_completed_queue(ch->priv->net_dev, pkts, bytes); |
440 | |
441 | + if (netif_queue_stopped(net_dev)) |
442 | + netif_wake_queue(net_dev); |
443 | + |
444 | if (pkts < budget) { |
445 | - napi_complete(&ch->napi); |
446 | - ltq_dma_enable_irq(&ch->dma); |
447 | + if (napi_complete_done(&ch->napi, pkts)) |
448 | + ltq_dma_enable_irq(&ch->dma); |
449 | } |
450 | |
451 | return pkts; |
452 | @@ -341,10 +344,12 @@ static irqreturn_t xrx200_dma_irq(int irq, void *ptr) |
453 | { |
454 | struct xrx200_chan *ch = ptr; |
455 | |
456 | - ltq_dma_disable_irq(&ch->dma); |
457 | - ltq_dma_ack_irq(&ch->dma); |
458 | + if (napi_schedule_prep(&ch->napi)) { |
459 | + __napi_schedule(&ch->napi); |
460 | + ltq_dma_disable_irq(&ch->dma); |
461 | + } |
462 | |
463 | - napi_schedule(&ch->napi); |
464 | + ltq_dma_ack_irq(&ch->dma); |
465 | |
466 | return IRQ_HANDLED; |
467 | } |
468 | @@ -498,7 +503,7 @@ static int xrx200_probe(struct platform_device *pdev) |
469 | |
470 | /* setup NAPI */ |
471 | netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32); |
472 | - netif_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32); |
473 | + netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32); |
474 | |
475 | platform_set_drvdata(pdev, priv); |
476 | |
477 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c |
478 | index 01468ec274466..b949b9a7538b0 100644 |
479 | --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c |
480 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c |
481 | @@ -35,7 +35,6 @@ |
482 | #include <net/sock.h> |
483 | |
484 | #include "en.h" |
485 | -#include "accel/tls.h" |
486 | #include "fpga/sdk.h" |
487 | #include "en_accel/tls.h" |
488 | |
489 | @@ -51,9 +50,14 @@ static const struct counter_desc mlx5e_tls_sw_stats_desc[] = { |
490 | |
491 | #define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc) |
492 | |
493 | +static bool is_tls_atomic_stats(struct mlx5e_priv *priv) |
494 | +{ |
495 | + return priv->tls && !mlx5_accel_is_ktls_device(priv->mdev); |
496 | +} |
497 | + |
498 | int mlx5e_tls_get_count(struct mlx5e_priv *priv) |
499 | { |
500 | - if (!priv->tls) |
501 | + if (!is_tls_atomic_stats(priv)) |
502 | return 0; |
503 | |
504 | return NUM_TLS_SW_COUNTERS; |
505 | @@ -63,7 +67,7 @@ int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) |
506 | { |
507 | unsigned int i, idx = 0; |
508 | |
509 | - if (!priv->tls) |
510 | + if (!is_tls_atomic_stats(priv)) |
511 | return 0; |
512 | |
513 | for (i = 0; i < NUM_TLS_SW_COUNTERS; i++) |
514 | @@ -77,7 +81,7 @@ int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) |
515 | { |
516 | int i, idx = 0; |
517 | |
518 | - if (!priv->tls) |
519 | + if (!is_tls_atomic_stats(priv)) |
520 | return 0; |
521 | |
522 | for (i = 0; i < NUM_TLS_SW_COUNTERS; i++) |
523 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c |
524 | index 5acfdea3a75a8..7cc80dc4e6d89 100644 |
525 | --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c |
526 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c |
527 | @@ -1143,35 +1143,37 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) |
528 | } |
529 | esw->fdb_table.offloads.send_to_vport_grp = g; |
530 | |
531 | - /* create peer esw miss group */ |
532 | - memset(flow_group_in, 0, inlen); |
533 | + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { |
534 | + /* create peer esw miss group */ |
535 | + memset(flow_group_in, 0, inlen); |
536 | |
537 | - esw_set_flow_group_source_port(esw, flow_group_in); |
538 | + esw_set_flow_group_source_port(esw, flow_group_in); |
539 | |
540 | - if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) { |
541 | - match_criteria = MLX5_ADDR_OF(create_flow_group_in, |
542 | - flow_group_in, |
543 | - match_criteria); |
544 | + if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) { |
545 | + match_criteria = MLX5_ADDR_OF(create_flow_group_in, |
546 | + flow_group_in, |
547 | + match_criteria); |
548 | |
549 | - MLX5_SET_TO_ONES(fte_match_param, match_criteria, |
550 | - misc_parameters.source_eswitch_owner_vhca_id); |
551 | + MLX5_SET_TO_ONES(fte_match_param, match_criteria, |
552 | + misc_parameters.source_eswitch_owner_vhca_id); |
553 | |
554 | - MLX5_SET(create_flow_group_in, flow_group_in, |
555 | - source_eswitch_owner_vhca_id_valid, 1); |
556 | - } |
557 | + MLX5_SET(create_flow_group_in, flow_group_in, |
558 | + source_eswitch_owner_vhca_id_valid, 1); |
559 | + } |
560 | |
561 | - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); |
562 | - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, |
563 | - ix + esw->total_vports - 1); |
564 | - ix += esw->total_vports; |
565 | + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); |
566 | + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, |
567 | + ix + esw->total_vports - 1); |
568 | + ix += esw->total_vports; |
569 | |
570 | - g = mlx5_create_flow_group(fdb, flow_group_in); |
571 | - if (IS_ERR(g)) { |
572 | - err = PTR_ERR(g); |
573 | - esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err); |
574 | - goto peer_miss_err; |
575 | + g = mlx5_create_flow_group(fdb, flow_group_in); |
576 | + if (IS_ERR(g)) { |
577 | + err = PTR_ERR(g); |
578 | + esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err); |
579 | + goto peer_miss_err; |
580 | + } |
581 | + esw->fdb_table.offloads.peer_miss_grp = g; |
582 | } |
583 | - esw->fdb_table.offloads.peer_miss_grp = g; |
584 | |
585 | /* create miss group */ |
586 | memset(flow_group_in, 0, inlen); |
587 | @@ -1206,7 +1208,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) |
588 | miss_rule_err: |
589 | mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); |
590 | miss_err: |
591 | - mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); |
592 | + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) |
593 | + mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); |
594 | peer_miss_err: |
595 | mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); |
596 | send_vport_err: |
597 | @@ -1229,7 +1232,8 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) |
598 | mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi); |
599 | mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); |
600 | mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); |
601 | - mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); |
602 | + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) |
603 | + mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); |
604 | mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); |
605 | |
606 | mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); |
607 | diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c |
608 | index b66e5b6eecd99..9ac2f52187ea4 100644 |
609 | --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c |
610 | +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c |
611 | @@ -629,7 +629,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft, |
612 | fte->action = *flow_act; |
613 | fte->flow_context = spec->flow_context; |
614 | |
615 | - tree_init_node(&fte->node, NULL, del_sw_fte); |
616 | + tree_init_node(&fte->node, del_hw_fte, del_sw_fte); |
617 | |
618 | return fte; |
619 | } |
620 | @@ -1737,7 +1737,6 @@ skip_search: |
621 | up_write_ref_node(&g->node, false); |
622 | rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); |
623 | up_write_ref_node(&fte->node, false); |
624 | - tree_put_node(&fte->node, false); |
625 | return rule; |
626 | } |
627 | rule = ERR_PTR(-ENOENT); |
628 | @@ -1837,7 +1836,6 @@ search_again_locked: |
629 | up_write_ref_node(&g->node, false); |
630 | rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); |
631 | up_write_ref_node(&fte->node, false); |
632 | - tree_put_node(&fte->node, false); |
633 | tree_put_node(&g->node, false); |
634 | return rule; |
635 | |
636 | @@ -1930,7 +1928,9 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) |
637 | up_write_ref_node(&fte->node, false); |
638 | } else { |
639 | del_hw_fte(&fte->node); |
640 | - up_write(&fte->node.lock); |
641 | + /* Avoid double call to del_hw_fte */ |
642 | + fte->node.del_hw_func = NULL; |
643 | + up_write_ref_node(&fte->node, false); |
644 | tree_put_node(&fte->node, false); |
645 | } |
646 | kfree(handle); |
647 | diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c |
648 | index 1b840ee473396..17b91ed39369c 100644 |
649 | --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c |
650 | +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c |
651 | @@ -731,8 +731,8 @@ nfp_port_get_fecparam(struct net_device *netdev, |
652 | struct nfp_eth_table_port *eth_port; |
653 | struct nfp_port *port; |
654 | |
655 | - param->active_fec = ETHTOOL_FEC_NONE_BIT; |
656 | - param->fec = ETHTOOL_FEC_NONE_BIT; |
657 | + param->active_fec = ETHTOOL_FEC_NONE; |
658 | + param->fec = ETHTOOL_FEC_NONE; |
659 | |
660 | port = nfp_port_from_netdev(netdev); |
661 | eth_port = nfp_port_get_eth_port(port); |
662 | diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c |
663 | index adfdf6260b269..fcb7a6b4cc02a 100644 |
664 | --- a/drivers/net/geneve.c |
665 | +++ b/drivers/net/geneve.c |
666 | @@ -773,7 +773,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, |
667 | struct net_device *dev, |
668 | struct geneve_sock *gs4, |
669 | struct flowi4 *fl4, |
670 | - const struct ip_tunnel_info *info) |
671 | + const struct ip_tunnel_info *info, |
672 | + __be16 dport, __be16 sport) |
673 | { |
674 | bool use_cache = ip_tunnel_dst_cache_usable(skb, info); |
675 | struct geneve_dev *geneve = netdev_priv(dev); |
676 | @@ -789,6 +790,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, |
677 | fl4->flowi4_proto = IPPROTO_UDP; |
678 | fl4->daddr = info->key.u.ipv4.dst; |
679 | fl4->saddr = info->key.u.ipv4.src; |
680 | + fl4->fl4_dport = dport; |
681 | + fl4->fl4_sport = sport; |
682 | |
683 | tos = info->key.tos; |
684 | if ((tos == 1) && !geneve->collect_md) { |
685 | @@ -823,7 +826,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, |
686 | struct net_device *dev, |
687 | struct geneve_sock *gs6, |
688 | struct flowi6 *fl6, |
689 | - const struct ip_tunnel_info *info) |
690 | + const struct ip_tunnel_info *info, |
691 | + __be16 dport, __be16 sport) |
692 | { |
693 | bool use_cache = ip_tunnel_dst_cache_usable(skb, info); |
694 | struct geneve_dev *geneve = netdev_priv(dev); |
695 | @@ -839,6 +843,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, |
696 | fl6->flowi6_proto = IPPROTO_UDP; |
697 | fl6->daddr = info->key.u.ipv6.dst; |
698 | fl6->saddr = info->key.u.ipv6.src; |
699 | + fl6->fl6_dport = dport; |
700 | + fl6->fl6_sport = sport; |
701 | + |
702 | prio = info->key.tos; |
703 | if ((prio == 1) && !geneve->collect_md) { |
704 | prio = ip_tunnel_get_dsfield(ip_hdr(skb), skb); |
705 | @@ -885,14 +892,15 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, |
706 | __be16 sport; |
707 | int err; |
708 | |
709 | - rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info); |
710 | + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); |
711 | + rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, |
712 | + geneve->info.key.tp_dst, sport); |
713 | if (IS_ERR(rt)) |
714 | return PTR_ERR(rt); |
715 | |
716 | skb_tunnel_check_pmtu(skb, &rt->dst, |
717 | GENEVE_IPV4_HLEN + info->options_len); |
718 | |
719 | - sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); |
720 | if (geneve->collect_md) { |
721 | tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); |
722 | ttl = key->ttl; |
723 | @@ -947,13 +955,14 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, |
724 | __be16 sport; |
725 | int err; |
726 | |
727 | - dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info); |
728 | + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); |
729 | + dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, |
730 | + geneve->info.key.tp_dst, sport); |
731 | if (IS_ERR(dst)) |
732 | return PTR_ERR(dst); |
733 | |
734 | skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len); |
735 | |
736 | - sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); |
737 | if (geneve->collect_md) { |
738 | prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); |
739 | ttl = key->ttl; |
740 | @@ -1034,13 +1043,18 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) |
741 | { |
742 | struct ip_tunnel_info *info = skb_tunnel_info(skb); |
743 | struct geneve_dev *geneve = netdev_priv(dev); |
744 | + __be16 sport; |
745 | |
746 | if (ip_tunnel_info_af(info) == AF_INET) { |
747 | struct rtable *rt; |
748 | struct flowi4 fl4; |
749 | + |
750 | struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); |
751 | + sport = udp_flow_src_port(geneve->net, skb, |
752 | + 1, USHRT_MAX, true); |
753 | |
754 | - rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info); |
755 | + rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, |
756 | + geneve->info.key.tp_dst, sport); |
757 | if (IS_ERR(rt)) |
758 | return PTR_ERR(rt); |
759 | |
760 | @@ -1050,9 +1064,13 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) |
761 | } else if (ip_tunnel_info_af(info) == AF_INET6) { |
762 | struct dst_entry *dst; |
763 | struct flowi6 fl6; |
764 | + |
765 | struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); |
766 | + sport = udp_flow_src_port(geneve->net, skb, |
767 | + 1, USHRT_MAX, true); |
768 | |
769 | - dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info); |
770 | + dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, |
771 | + geneve->info.key.tp_dst, sport); |
772 | if (IS_ERR(dst)) |
773 | return PTR_ERR(dst); |
774 | |
775 | @@ -1063,8 +1081,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) |
776 | return -EINVAL; |
777 | } |
778 | |
779 | - info->key.tp_src = udp_flow_src_port(geneve->net, skb, |
780 | - 1, USHRT_MAX, true); |
781 | + info->key.tp_src = sport; |
782 | info->key.tp_dst = geneve->info.key.tp_dst; |
783 | return 0; |
784 | } |
785 | diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c |
786 | index 54e5d4f9622cd..b718b11607fcd 100644 |
787 | --- a/drivers/net/phy/phy.c |
788 | +++ b/drivers/net/phy/phy.c |
789 | @@ -834,7 +834,7 @@ EXPORT_SYMBOL(phy_free_interrupt); |
790 | */ |
791 | void phy_stop(struct phy_device *phydev) |
792 | { |
793 | - if (!phy_is_started(phydev)) { |
794 | + if (!phy_is_started(phydev) && phydev->state != PHY_DOWN) { |
795 | WARN(1, "called from state %s\n", |
796 | phy_state_to_str(phydev->state)); |
797 | return; |
798 | diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c |
799 | index 110924d627449..9d0a306f05623 100644 |
800 | --- a/drivers/net/phy/phy_device.c |
801 | +++ b/drivers/net/phy/phy_device.c |
802 | @@ -1421,7 +1421,8 @@ void phy_detach(struct phy_device *phydev) |
803 | |
804 | phy_led_triggers_unregister(phydev); |
805 | |
806 | - module_put(phydev->mdio.dev.driver->owner); |
807 | + if (phydev->mdio.dev.driver) |
808 | + module_put(phydev->mdio.dev.driver->owner); |
809 | |
810 | /* If the device had no specific driver before (i.e. - it |
811 | * was using the generic driver), we unbind the device |
812 | diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c |
813 | index 48ced3912576c..16f33d1ffbfb9 100644 |
814 | --- a/drivers/net/wan/hdlc_ppp.c |
815 | +++ b/drivers/net/wan/hdlc_ppp.c |
816 | @@ -383,11 +383,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, |
817 | } |
818 | |
819 | for (opt = data; len; len -= opt[1], opt += opt[1]) { |
820 | - if (len < 2 || len < opt[1]) { |
821 | - dev->stats.rx_errors++; |
822 | - kfree(out); |
823 | - return; /* bad packet, drop silently */ |
824 | - } |
825 | + if (len < 2 || opt[1] < 2 || len < opt[1]) |
826 | + goto err_out; |
827 | |
828 | if (pid == PID_LCP) |
829 | switch (opt[0]) { |
830 | @@ -395,6 +392,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, |
831 | continue; /* MRU always OK and > 1500 bytes? */ |
832 | |
833 | case LCP_OPTION_ACCM: /* async control character map */ |
834 | + if (opt[1] < sizeof(valid_accm)) |
835 | + goto err_out; |
836 | if (!memcmp(opt, valid_accm, |
837 | sizeof(valid_accm))) |
838 | continue; |
839 | @@ -406,6 +405,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, |
840 | } |
841 | break; |
842 | case LCP_OPTION_MAGIC: |
843 | + if (len < 6) |
844 | + goto err_out; |
845 | if (opt[1] != 6 || (!opt[2] && !opt[3] && |
846 | !opt[4] && !opt[5])) |
847 | break; /* reject invalid magic number */ |
848 | @@ -424,6 +425,11 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, |
849 | ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data); |
850 | |
851 | kfree(out); |
852 | + return; |
853 | + |
854 | +err_out: |
855 | + dev->stats.rx_errors++; |
856 | + kfree(out); |
857 | } |
858 | |
859 | static int ppp_rx(struct sk_buff *skb) |
860 | diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h |
861 | index 955e1370f033d..a62889c8bed7a 100644 |
862 | --- a/include/linux/skbuff.h |
863 | +++ b/include/linux/skbuff.h |
864 | @@ -3185,8 +3185,9 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) |
865 | * is untouched. Otherwise it is extended. Returns zero on |
866 | * success. The skb is freed on error if @free_on_error is true. |
867 | */ |
868 | -static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len, |
869 | - bool free_on_error) |
870 | +static inline int __must_check __skb_put_padto(struct sk_buff *skb, |
871 | + unsigned int len, |
872 | + bool free_on_error) |
873 | { |
874 | unsigned int size = skb->len; |
875 | |
876 | @@ -3209,7 +3210,7 @@ static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len, |
877 | * is untouched. Otherwise it is extended. Returns zero on |
878 | * success. The skb is freed on error. |
879 | */ |
880 | -static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) |
881 | +static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len) |
882 | { |
883 | return __skb_put_padto(skb, len, true); |
884 | } |
885 | diff --git a/include/net/flow.h b/include/net/flow.h |
886 | index a50fb77a0b279..d058e63fb59a3 100644 |
887 | --- a/include/net/flow.h |
888 | +++ b/include/net/flow.h |
889 | @@ -116,6 +116,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif, |
890 | fl4->saddr = saddr; |
891 | fl4->fl4_dport = dport; |
892 | fl4->fl4_sport = sport; |
893 | + fl4->flowi4_multipath_hash = 0; |
894 | } |
895 | |
896 | /* Reset some input parameters after previous lookup */ |
897 | diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h |
898 | index 2b6f3f13d5bcb..3e8f87a3c52fa 100644 |
899 | --- a/include/net/sctp/structs.h |
900 | +++ b/include/net/sctp/structs.h |
901 | @@ -224,12 +224,14 @@ struct sctp_sock { |
902 | data_ready_signalled:1; |
903 | |
904 | atomic_t pd_mode; |
905 | + |
906 | + /* Fields after this point will be skipped on copies, like on accept |
907 | + * and peeloff operations |
908 | + */ |
909 | + |
910 | /* Receive to here while partial delivery is in effect. */ |
911 | struct sk_buff_head pd_lobby; |
912 | |
913 | - /* These must be the last fields, as they will skipped on copies, |
914 | - * like on accept and peeloff operations |
915 | - */ |
916 | struct list_head auto_asconf_list; |
917 | int do_auto_asconf; |
918 | }; |
919 | diff --git a/kernel/kprobes.c b/kernel/kprobes.c |
920 | index bbff4bccb885d..5646f291eb705 100644 |
921 | --- a/kernel/kprobes.c |
922 | +++ b/kernel/kprobes.c |
923 | @@ -2088,6 +2088,9 @@ static void kill_kprobe(struct kprobe *p) |
924 | { |
925 | struct kprobe *kp; |
926 | |
927 | + if (WARN_ON_ONCE(kprobe_gone(p))) |
928 | + return; |
929 | + |
930 | p->flags |= KPROBE_FLAG_GONE; |
931 | if (kprobe_aggrprobe(p)) { |
932 | /* |
933 | @@ -2270,7 +2273,10 @@ static int kprobes_module_callback(struct notifier_block *nb, |
934 | mutex_lock(&kprobe_mutex); |
935 | for (i = 0; i < KPROBE_TABLE_SIZE; i++) { |
936 | head = &kprobe_table[i]; |
937 | - hlist_for_each_entry_rcu(p, head, hlist) |
938 | + hlist_for_each_entry_rcu(p, head, hlist) { |
939 | + if (kprobe_gone(p)) |
940 | + continue; |
941 | + |
942 | if (within_module_init((unsigned long)p->addr, mod) || |
943 | (checkcore && |
944 | within_module_core((unsigned long)p->addr, mod))) { |
945 | @@ -2287,6 +2293,7 @@ static int kprobes_module_callback(struct notifier_block *nb, |
946 | */ |
947 | kill_kprobe(p); |
948 | } |
949 | + } |
950 | } |
951 | mutex_unlock(&kprobe_mutex); |
952 | return NOTIFY_DONE; |
953 | diff --git a/mm/huge_memory.c b/mm/huge_memory.c |
954 | index da9040a6838f8..873de55d93fb2 100644 |
955 | --- a/mm/huge_memory.c |
956 | +++ b/mm/huge_memory.c |
957 | @@ -2174,7 +2174,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, |
958 | put_page(page); |
959 | add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR); |
960 | return; |
961 | - } else if (is_huge_zero_pmd(*pmd)) { |
962 | + } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) { |
963 | /* |
964 | * FIXME: Do we want to invalidate secondary mmu by calling |
965 | * mmu_notifier_invalidate_range() see comments below inside |
966 | @@ -2262,27 +2262,33 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, |
967 | pte = pte_offset_map(&_pmd, addr); |
968 | BUG_ON(!pte_none(*pte)); |
969 | set_pte_at(mm, addr, pte, entry); |
970 | - atomic_inc(&page[i]._mapcount); |
971 | - pte_unmap(pte); |
972 | - } |
973 | - |
974 | - /* |
975 | - * Set PG_double_map before dropping compound_mapcount to avoid |
976 | - * false-negative page_mapped(). |
977 | - */ |
978 | - if (compound_mapcount(page) > 1 && !TestSetPageDoubleMap(page)) { |
979 | - for (i = 0; i < HPAGE_PMD_NR; i++) |
980 | + if (!pmd_migration) |
981 | atomic_inc(&page[i]._mapcount); |
982 | + pte_unmap(pte); |
983 | } |
984 | |
985 | - if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { |
986 | - /* Last compound_mapcount is gone. */ |
987 | - __dec_node_page_state(page, NR_ANON_THPS); |
988 | - if (TestClearPageDoubleMap(page)) { |
989 | - /* No need in mapcount reference anymore */ |
990 | + if (!pmd_migration) { |
991 | + /* |
992 | + * Set PG_double_map before dropping compound_mapcount to avoid |
993 | + * false-negative page_mapped(). |
994 | + */ |
995 | + if (compound_mapcount(page) > 1 && |
996 | + !TestSetPageDoubleMap(page)) { |
997 | for (i = 0; i < HPAGE_PMD_NR; i++) |
998 | - atomic_dec(&page[i]._mapcount); |
999 | + atomic_inc(&page[i]._mapcount); |
1000 | + } |
1001 | + |
1002 | + lock_page_memcg(page); |
1003 | + if (atomic_add_negative(-1, compound_mapcount_ptr(page))) { |
1004 | + /* Last compound_mapcount is gone. */ |
1005 | + __dec_lruvec_page_state(page, NR_ANON_THPS); |
1006 | + if (TestClearPageDoubleMap(page)) { |
1007 | + /* No need in mapcount reference anymore */ |
1008 | + for (i = 0; i < HPAGE_PMD_NR; i++) |
1009 | + atomic_dec(&page[i]._mapcount); |
1010 | + } |
1011 | } |
1012 | + unlock_page_memcg(page); |
1013 | } |
1014 | |
1015 | smp_wmb(); /* make pte visible before pmd */ |
1016 | diff --git a/mm/vmscan.c b/mm/vmscan.c |
1017 | index 7fde5f904c8d3..6db9176d8c63e 100644 |
1018 | --- a/mm/vmscan.c |
1019 | +++ b/mm/vmscan.c |
1020 | @@ -2775,6 +2775,14 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) |
1021 | unsigned long reclaimed; |
1022 | unsigned long scanned; |
1023 | |
1024 | + /* |
1025 | + * This loop can become CPU-bound when target memcgs |
1026 | + * aren't eligible for reclaim - either because they |
1027 | + * don't have any reclaimable pages, or because their |
1028 | + * memory is explicitly protected. Avoid soft lockups. |
1029 | + */ |
1030 | + cond_resched(); |
1031 | + |
1032 | switch (mem_cgroup_protected(root, memcg)) { |
1033 | case MEMCG_PROT_MIN: |
1034 | /* |
1035 | diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c |
1036 | index bb98984cd27d0..48413b5eb61fc 100644 |
1037 | --- a/net/bridge/br_vlan.c |
1038 | +++ b/net/bridge/br_vlan.c |
1039 | @@ -1229,11 +1229,13 @@ void br_vlan_get_stats(const struct net_bridge_vlan *v, |
1040 | } |
1041 | } |
1042 | |
1043 | -static int __br_vlan_get_pvid(const struct net_device *dev, |
1044 | - struct net_bridge_port *p, u16 *p_pvid) |
1045 | +int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid) |
1046 | { |
1047 | struct net_bridge_vlan_group *vg; |
1048 | + struct net_bridge_port *p; |
1049 | |
1050 | + ASSERT_RTNL(); |
1051 | + p = br_port_get_check_rtnl(dev); |
1052 | if (p) |
1053 | vg = nbp_vlan_group(p); |
1054 | else if (netif_is_bridge_master(dev)) |
1055 | @@ -1244,18 +1246,23 @@ static int __br_vlan_get_pvid(const struct net_device *dev, |
1056 | *p_pvid = br_get_pvid(vg); |
1057 | return 0; |
1058 | } |
1059 | - |
1060 | -int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid) |
1061 | -{ |
1062 | - ASSERT_RTNL(); |
1063 | - |
1064 | - return __br_vlan_get_pvid(dev, br_port_get_check_rtnl(dev), p_pvid); |
1065 | -} |
1066 | EXPORT_SYMBOL_GPL(br_vlan_get_pvid); |
1067 | |
1068 | int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid) |
1069 | { |
1070 | - return __br_vlan_get_pvid(dev, br_port_get_check_rcu(dev), p_pvid); |
1071 | + struct net_bridge_vlan_group *vg; |
1072 | + struct net_bridge_port *p; |
1073 | + |
1074 | + p = br_port_get_check_rcu(dev); |
1075 | + if (p) |
1076 | + vg = nbp_vlan_group_rcu(p); |
1077 | + else if (netif_is_bridge_master(dev)) |
1078 | + vg = br_vlan_group_rcu(netdev_priv(dev)); |
1079 | + else |
1080 | + return -EINVAL; |
1081 | + |
1082 | + *p_pvid = br_get_pvid(vg); |
1083 | + return 0; |
1084 | } |
1085 | EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu); |
1086 | |
1087 | diff --git a/net/core/dev.c b/net/core/dev.c |
1088 | index cdc1c3a144e1f..20c7fd7b8b4bc 100644 |
1089 | --- a/net/core/dev.c |
1090 | +++ b/net/core/dev.c |
1091 | @@ -8241,7 +8241,7 @@ int dev_get_port_parent_id(struct net_device *dev, |
1092 | if (!first.id_len) |
1093 | first = *ppid; |
1094 | else if (memcmp(&first, ppid, sizeof(*ppid))) |
1095 | - return -ENODATA; |
1096 | + return -EOPNOTSUPP; |
1097 | } |
1098 | |
1099 | return err; |
1100 | diff --git a/net/core/filter.c b/net/core/filter.c |
1101 | index 5c490d473df1d..cf2a68513bfd5 100644 |
1102 | --- a/net/core/filter.c |
1103 | +++ b/net/core/filter.c |
1104 | @@ -4650,6 +4650,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, |
1105 | fl4.saddr = params->ipv4_src; |
1106 | fl4.fl4_sport = params->sport; |
1107 | fl4.fl4_dport = params->dport; |
1108 | + fl4.flowi4_multipath_hash = 0; |
1109 | |
1110 | if (flags & BPF_FIB_LOOKUP_DIRECT) { |
1111 | u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN; |
1112 | diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c |
1113 | index d2a4553bcf39d..0fd1c2aa13615 100644 |
1114 | --- a/net/dcb/dcbnl.c |
1115 | +++ b/net/dcb/dcbnl.c |
1116 | @@ -1426,6 +1426,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, |
1117 | { |
1118 | const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; |
1119 | struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; |
1120 | + int prio; |
1121 | int err; |
1122 | |
1123 | if (!ops) |
1124 | @@ -1475,6 +1476,13 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, |
1125 | struct dcbnl_buffer *buffer = |
1126 | nla_data(ieee[DCB_ATTR_DCB_BUFFER]); |
1127 | |
1128 | + for (prio = 0; prio < ARRAY_SIZE(buffer->prio2buffer); prio++) { |
1129 | + if (buffer->prio2buffer[prio] >= DCBX_MAX_BUFFERS) { |
1130 | + err = -EINVAL; |
1131 | + goto err; |
1132 | + } |
1133 | + } |
1134 | + |
1135 | err = ops->dcbnl_setbuffer(netdev, buffer); |
1136 | if (err) |
1137 | goto err; |
1138 | diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c |
1139 | index 2b0521feadaa7..0a8220d30c992 100644 |
1140 | --- a/net/ipv4/fib_frontend.c |
1141 | +++ b/net/ipv4/fib_frontend.c |
1142 | @@ -372,6 +372,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, |
1143 | fl4.flowi4_tun_key.tun_id = 0; |
1144 | fl4.flowi4_flags = 0; |
1145 | fl4.flowi4_uid = sock_net_uid(net, NULL); |
1146 | + fl4.flowi4_multipath_hash = 0; |
1147 | |
1148 | no_addr = idev->ifa_list == NULL; |
1149 | |
1150 | diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c |
1151 | index b36c4a3159e52..079dcf9f0c56d 100644 |
1152 | --- a/net/ipv4/ip_output.c |
1153 | +++ b/net/ipv4/ip_output.c |
1154 | @@ -74,6 +74,7 @@ |
1155 | #include <net/icmp.h> |
1156 | #include <net/checksum.h> |
1157 | #include <net/inetpeer.h> |
1158 | +#include <net/inet_ecn.h> |
1159 | #include <net/lwtunnel.h> |
1160 | #include <linux/bpf-cgroup.h> |
1161 | #include <linux/igmp.h> |
1162 | @@ -1699,7 +1700,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, |
1163 | if (IS_ERR(rt)) |
1164 | return; |
1165 | |
1166 | - inet_sk(sk)->tos = arg->tos; |
1167 | + inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK; |
1168 | |
1169 | sk->sk_protocol = ip_hdr(skb)->protocol; |
1170 | sk->sk_bound_dev_if = arg->bound_dev_if; |
1171 | diff --git a/net/ipv4/route.c b/net/ipv4/route.c |
1172 | index b3a8d32f7d8df..aa77f989ba817 100644 |
1173 | --- a/net/ipv4/route.c |
1174 | +++ b/net/ipv4/route.c |
1175 | @@ -785,8 +785,10 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow |
1176 | neigh_event_send(n, NULL); |
1177 | } else { |
1178 | if (fib_lookup(net, fl4, &res, 0) == 0) { |
1179 | - struct fib_nh_common *nhc = FIB_RES_NHC(res); |
1180 | + struct fib_nh_common *nhc; |
1181 | |
1182 | + fib_select_path(net, &res, fl4, skb); |
1183 | + nhc = FIB_RES_NHC(res); |
1184 | update_or_create_fnhe(nhc, fl4->daddr, new_gw, |
1185 | 0, false, |
1186 | jiffies + ip_rt_gc_timeout); |
1187 | @@ -1012,6 +1014,7 @@ out: kfree_skb(skb); |
1188 | static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) |
1189 | { |
1190 | struct dst_entry *dst = &rt->dst; |
1191 | + struct net *net = dev_net(dst->dev); |
1192 | u32 old_mtu = ipv4_mtu(dst); |
1193 | struct fib_result res; |
1194 | bool lock = false; |
1195 | @@ -1032,9 +1035,11 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) |
1196 | return; |
1197 | |
1198 | rcu_read_lock(); |
1199 | - if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) { |
1200 | - struct fib_nh_common *nhc = FIB_RES_NHC(res); |
1201 | + if (fib_lookup(net, fl4, &res, 0) == 0) { |
1202 | + struct fib_nh_common *nhc; |
1203 | |
1204 | + fib_select_path(net, &res, fl4, NULL); |
1205 | + nhc = FIB_RES_NHC(res); |
1206 | update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock, |
1207 | jiffies + ip_rt_mtu_expires); |
1208 | } |
1209 | @@ -2104,6 +2109,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, |
1210 | fl4.daddr = daddr; |
1211 | fl4.saddr = saddr; |
1212 | fl4.flowi4_uid = sock_net_uid(net, NULL); |
1213 | + fl4.flowi4_multipath_hash = 0; |
1214 | |
1215 | if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) { |
1216 | flkeys = &_flkeys; |
1217 | @@ -2625,8 +2631,6 @@ struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4, |
1218 | fib_select_path(net, res, fl4, skb); |
1219 | |
1220 | dev_out = FIB_RES_DEV(*res); |
1221 | - fl4->flowi4_oif = dev_out->ifindex; |
1222 | - |
1223 | |
1224 | make_route: |
1225 | rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags); |
1226 | diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig |
1227 | index ae1344e4cec54..dce14470b15ac 100644 |
1228 | --- a/net/ipv6/Kconfig |
1229 | +++ b/net/ipv6/Kconfig |
1230 | @@ -289,6 +289,7 @@ config IPV6_SEG6_LWTUNNEL |
1231 | config IPV6_SEG6_HMAC |
1232 | bool "IPv6: Segment Routing HMAC support" |
1233 | depends on IPV6 |
1234 | + select CRYPTO |
1235 | select CRYPTO_HMAC |
1236 | select CRYPTO_SHA1 |
1237 | select CRYPTO_SHA256 |
1238 | diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c |
1239 | index 7a0c877ca306c..96d80e50bf35b 100644 |
1240 | --- a/net/ipv6/ip6_fib.c |
1241 | +++ b/net/ipv6/ip6_fib.c |
1242 | @@ -1896,14 +1896,19 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn, |
1243 | /* Need to own table->tb6_lock */ |
1244 | int fib6_del(struct fib6_info *rt, struct nl_info *info) |
1245 | { |
1246 | - struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node, |
1247 | - lockdep_is_held(&rt->fib6_table->tb6_lock)); |
1248 | - struct fib6_table *table = rt->fib6_table; |
1249 | struct net *net = info->nl_net; |
1250 | struct fib6_info __rcu **rtp; |
1251 | struct fib6_info __rcu **rtp_next; |
1252 | + struct fib6_table *table; |
1253 | + struct fib6_node *fn; |
1254 | + |
1255 | + if (rt == net->ipv6.fib6_null_entry) |
1256 | + return -ENOENT; |
1257 | |
1258 | - if (!fn || rt == net->ipv6.fib6_null_entry) |
1259 | + table = rt->fib6_table; |
1260 | + fn = rcu_dereference_protected(rt->fib6_node, |
1261 | + lockdep_is_held(&table->tb6_lock)); |
1262 | + if (!fn) |
1263 | return -ENOENT; |
1264 | |
1265 | WARN_ON(!(fn->fn_flags & RTN_RTINFO)); |
1266 | diff --git a/net/key/af_key.c b/net/key/af_key.c |
1267 | index 979c579afc63b..a915bc86620af 100644 |
1268 | --- a/net/key/af_key.c |
1269 | +++ b/net/key/af_key.c |
1270 | @@ -1849,6 +1849,13 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms |
1271 | if (ext_hdrs[SADB_X_EXT_FILTER - 1]) { |
1272 | struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1]; |
1273 | |
1274 | + if ((xfilter->sadb_x_filter_splen >= |
1275 | + (sizeof(xfrm_address_t) << 3)) || |
1276 | + (xfilter->sadb_x_filter_dplen >= |
1277 | + (sizeof(xfrm_address_t) << 3))) { |
1278 | + mutex_unlock(&pfk->dump_lock); |
1279 | + return -EINVAL; |
1280 | + } |
1281 | filter = kmalloc(sizeof(*filter), GFP_KERNEL); |
1282 | if (filter == NULL) { |
1283 | mutex_unlock(&pfk->dump_lock); |
1284 | diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c |
1285 | index a699e318b9a01..d6d2736ec9273 100644 |
1286 | --- a/net/qrtr/qrtr.c |
1287 | +++ b/net/qrtr/qrtr.c |
1288 | @@ -178,7 +178,7 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, |
1289 | { |
1290 | struct qrtr_hdr_v1 *hdr; |
1291 | size_t len = skb->len; |
1292 | - int rc = -ENODEV; |
1293 | + int rc; |
1294 | |
1295 | hdr = skb_push(skb, sizeof(*hdr)); |
1296 | hdr->version = cpu_to_le32(QRTR_PROTO_VER_1); |
1297 | @@ -196,15 +196,17 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, |
1298 | hdr->size = cpu_to_le32(len); |
1299 | hdr->confirm_rx = 0; |
1300 | |
1301 | - skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr)); |
1302 | - |
1303 | - mutex_lock(&node->ep_lock); |
1304 | - if (node->ep) |
1305 | - rc = node->ep->xmit(node->ep, skb); |
1306 | - else |
1307 | - kfree_skb(skb); |
1308 | - mutex_unlock(&node->ep_lock); |
1309 | + rc = skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr)); |
1310 | |
1311 | + if (!rc) { |
1312 | + mutex_lock(&node->ep_lock); |
1313 | + rc = -ENODEV; |
1314 | + if (node->ep) |
1315 | + rc = node->ep->xmit(node->ep, skb); |
1316 | + else |
1317 | + kfree_skb(skb); |
1318 | + mutex_unlock(&node->ep_lock); |
1319 | + } |
1320 | return rc; |
1321 | } |
1322 | |
1323 | diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c |
1324 | index a0cfb4793c93f..778371bac93e2 100644 |
1325 | --- a/net/sched/act_ife.c |
1326 | +++ b/net/sched/act_ife.c |
1327 | @@ -436,6 +436,25 @@ static void tcf_ife_cleanup(struct tc_action *a) |
1328 | kfree_rcu(p, rcu); |
1329 | } |
1330 | |
1331 | +static int load_metalist(struct nlattr **tb, bool rtnl_held) |
1332 | +{ |
1333 | + int i; |
1334 | + |
1335 | + for (i = 1; i < max_metacnt; i++) { |
1336 | + if (tb[i]) { |
1337 | + void *val = nla_data(tb[i]); |
1338 | + int len = nla_len(tb[i]); |
1339 | + int rc; |
1340 | + |
1341 | + rc = load_metaops_and_vet(i, val, len, rtnl_held); |
1342 | + if (rc != 0) |
1343 | + return rc; |
1344 | + } |
1345 | + } |
1346 | + |
1347 | + return 0; |
1348 | +} |
1349 | + |
1350 | static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, |
1351 | bool exists, bool rtnl_held) |
1352 | { |
1353 | @@ -449,10 +468,6 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, |
1354 | val = nla_data(tb[i]); |
1355 | len = nla_len(tb[i]); |
1356 | |
1357 | - rc = load_metaops_and_vet(i, val, len, rtnl_held); |
1358 | - if (rc != 0) |
1359 | - return rc; |
1360 | - |
1361 | rc = add_metainfo(ife, i, val, len, exists); |
1362 | if (rc) |
1363 | return rc; |
1364 | @@ -508,6 +523,21 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, |
1365 | if (!p) |
1366 | return -ENOMEM; |
1367 | |
1368 | + if (tb[TCA_IFE_METALST]) { |
1369 | + err = nla_parse_nested_deprecated(tb2, IFE_META_MAX, |
1370 | + tb[TCA_IFE_METALST], NULL, |
1371 | + NULL); |
1372 | + if (err) { |
1373 | + kfree(p); |
1374 | + return err; |
1375 | + } |
1376 | + err = load_metalist(tb2, rtnl_held); |
1377 | + if (err) { |
1378 | + kfree(p); |
1379 | + return err; |
1380 | + } |
1381 | + } |
1382 | + |
1383 | index = parm->index; |
1384 | err = tcf_idr_check_alloc(tn, &index, a, bind); |
1385 | if (err < 0) { |
1386 | @@ -569,15 +599,9 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, |
1387 | } |
1388 | |
1389 | if (tb[TCA_IFE_METALST]) { |
1390 | - err = nla_parse_nested_deprecated(tb2, IFE_META_MAX, |
1391 | - tb[TCA_IFE_METALST], NULL, |
1392 | - NULL); |
1393 | - if (err) |
1394 | - goto metadata_parse_err; |
1395 | err = populate_metalist(ife, tb2, exists, rtnl_held); |
1396 | if (err) |
1397 | goto metadata_parse_err; |
1398 | - |
1399 | } else { |
1400 | /* if no passed metadata allow list or passed allow-all |
1401 | * then here we process by adding as many supported metadatum |
1402 | diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c |
1403 | index 896c9037155a5..0e275e11f5115 100644 |
1404 | --- a/net/sched/sch_generic.c |
1405 | +++ b/net/sched/sch_generic.c |
1406 | @@ -1126,27 +1126,36 @@ static void dev_deactivate_queue(struct net_device *dev, |
1407 | struct netdev_queue *dev_queue, |
1408 | void *_qdisc_default) |
1409 | { |
1410 | - struct Qdisc *qdisc_default = _qdisc_default; |
1411 | - struct Qdisc *qdisc; |
1412 | + struct Qdisc *qdisc = rtnl_dereference(dev_queue->qdisc); |
1413 | |
1414 | - qdisc = rtnl_dereference(dev_queue->qdisc); |
1415 | if (qdisc) { |
1416 | - bool nolock = qdisc->flags & TCQ_F_NOLOCK; |
1417 | - |
1418 | - if (nolock) |
1419 | - spin_lock_bh(&qdisc->seqlock); |
1420 | - spin_lock_bh(qdisc_lock(qdisc)); |
1421 | - |
1422 | if (!(qdisc->flags & TCQ_F_BUILTIN)) |
1423 | set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); |
1424 | + } |
1425 | +} |
1426 | |
1427 | - rcu_assign_pointer(dev_queue->qdisc, qdisc_default); |
1428 | - qdisc_reset(qdisc); |
1429 | +static void dev_reset_queue(struct net_device *dev, |
1430 | + struct netdev_queue *dev_queue, |
1431 | + void *_unused) |
1432 | +{ |
1433 | + struct Qdisc *qdisc; |
1434 | + bool nolock; |
1435 | |
1436 | - spin_unlock_bh(qdisc_lock(qdisc)); |
1437 | - if (nolock) |
1438 | - spin_unlock_bh(&qdisc->seqlock); |
1439 | - } |
1440 | + qdisc = dev_queue->qdisc_sleeping; |
1441 | + if (!qdisc) |
1442 | + return; |
1443 | + |
1444 | + nolock = qdisc->flags & TCQ_F_NOLOCK; |
1445 | + |
1446 | + if (nolock) |
1447 | + spin_lock_bh(&qdisc->seqlock); |
1448 | + spin_lock_bh(qdisc_lock(qdisc)); |
1449 | + |
1450 | + qdisc_reset(qdisc); |
1451 | + |
1452 | + spin_unlock_bh(qdisc_lock(qdisc)); |
1453 | + if (nolock) |
1454 | + spin_unlock_bh(&qdisc->seqlock); |
1455 | } |
1456 | |
1457 | static bool some_qdisc_is_busy(struct net_device *dev) |
1458 | @@ -1207,12 +1216,20 @@ void dev_deactivate_many(struct list_head *head) |
1459 | dev_watchdog_down(dev); |
1460 | } |
1461 | |
1462 | - /* Wait for outstanding qdisc-less dev_queue_xmit calls. |
1463 | + /* Wait for outstanding qdisc-less dev_queue_xmit calls or |
1464 | + * outstanding qdisc enqueuing calls. |
1465 | * This is avoided if all devices are in dismantle phase : |
1466 | * Caller will call synchronize_net() for us |
1467 | */ |
1468 | synchronize_net(); |
1469 | |
1470 | + list_for_each_entry(dev, head, close_list) { |
1471 | + netdev_for_each_tx_queue(dev, dev_reset_queue, NULL); |
1472 | + |
1473 | + if (dev_ingress_queue(dev)) |
1474 | + dev_reset_queue(dev, dev_ingress_queue(dev), NULL); |
1475 | + } |
1476 | + |
1477 | /* Wait for outstanding qdisc_run calls. */ |
1478 | list_for_each_entry(dev, head, close_list) { |
1479 | while (some_qdisc_is_busy(dev)) |
1480 | diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c |
1481 | index 6a5086e586efb..2b797a71e9bda 100644 |
1482 | --- a/net/sched/sch_taprio.c |
1483 | +++ b/net/sched/sch_taprio.c |
1484 | @@ -777,9 +777,11 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { |
1485 | [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 }, |
1486 | }; |
1487 | |
1488 | -static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry, |
1489 | +static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb, |
1490 | + struct sched_entry *entry, |
1491 | struct netlink_ext_ack *extack) |
1492 | { |
1493 | + int min_duration = length_to_duration(q, ETH_ZLEN); |
1494 | u32 interval = 0; |
1495 | |
1496 | if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD]) |
1497 | @@ -794,7 +796,10 @@ static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry, |
1498 | interval = nla_get_u32( |
1499 | tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]); |
1500 | |
1501 | - if (interval == 0) { |
1502 | + /* The interval should allow at least the minimum ethernet |
1503 | + * frame to go out. |
1504 | + */ |
1505 | + if (interval < min_duration) { |
1506 | NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry"); |
1507 | return -EINVAL; |
1508 | } |
1509 | @@ -804,8 +809,9 @@ static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry, |
1510 | return 0; |
1511 | } |
1512 | |
1513 | -static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry, |
1514 | - int index, struct netlink_ext_ack *extack) |
1515 | +static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n, |
1516 | + struct sched_entry *entry, int index, |
1517 | + struct netlink_ext_ack *extack) |
1518 | { |
1519 | struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { }; |
1520 | int err; |
1521 | @@ -819,10 +825,10 @@ static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry, |
1522 | |
1523 | entry->index = index; |
1524 | |
1525 | - return fill_sched_entry(tb, entry, extack); |
1526 | + return fill_sched_entry(q, tb, entry, extack); |
1527 | } |
1528 | |
1529 | -static int parse_sched_list(struct nlattr *list, |
1530 | +static int parse_sched_list(struct taprio_sched *q, struct nlattr *list, |
1531 | struct sched_gate_list *sched, |
1532 | struct netlink_ext_ack *extack) |
1533 | { |
1534 | @@ -847,7 +853,7 @@ static int parse_sched_list(struct nlattr *list, |
1535 | return -ENOMEM; |
1536 | } |
1537 | |
1538 | - err = parse_sched_entry(n, entry, i, extack); |
1539 | + err = parse_sched_entry(q, n, entry, i, extack); |
1540 | if (err < 0) { |
1541 | kfree(entry); |
1542 | return err; |
1543 | @@ -862,7 +868,7 @@ static int parse_sched_list(struct nlattr *list, |
1544 | return i; |
1545 | } |
1546 | |
1547 | -static int parse_taprio_schedule(struct nlattr **tb, |
1548 | +static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb, |
1549 | struct sched_gate_list *new, |
1550 | struct netlink_ext_ack *extack) |
1551 | { |
1552 | @@ -883,8 +889,8 @@ static int parse_taprio_schedule(struct nlattr **tb, |
1553 | new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]); |
1554 | |
1555 | if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]) |
1556 | - err = parse_sched_list( |
1557 | - tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], new, extack); |
1558 | + err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], |
1559 | + new, extack); |
1560 | if (err < 0) |
1561 | return err; |
1562 | |
1563 | @@ -1474,7 +1480,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, |
1564 | goto free_sched; |
1565 | } |
1566 | |
1567 | - err = parse_taprio_schedule(tb, new_admin, extack); |
1568 | + err = parse_taprio_schedule(q, tb, new_admin, extack); |
1569 | if (err < 0) |
1570 | goto free_sched; |
1571 | |
1572 | diff --git a/net/sctp/socket.c b/net/sctp/socket.c |
1573 | index 3a11212bb4c0e..1fcc13f6073ef 100644 |
1574 | --- a/net/sctp/socket.c |
1575 | +++ b/net/sctp/socket.c |
1576 | @@ -9337,13 +9337,10 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, |
1577 | static inline void sctp_copy_descendant(struct sock *sk_to, |
1578 | const struct sock *sk_from) |
1579 | { |
1580 | - int ancestor_size = sizeof(struct inet_sock) + |
1581 | - sizeof(struct sctp_sock) - |
1582 | - offsetof(struct sctp_sock, pd_lobby); |
1583 | - |
1584 | - if (sk_from->sk_family == PF_INET6) |
1585 | - ancestor_size += sizeof(struct ipv6_pinfo); |
1586 | + size_t ancestor_size = sizeof(struct inet_sock); |
1587 | |
1588 | + ancestor_size += sk_from->sk_prot->obj_size; |
1589 | + ancestor_size -= offsetof(struct sctp_sock, pd_lobby); |
1590 | __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size); |
1591 | } |
1592 | |
1593 | diff --git a/net/tipc/group.c b/net/tipc/group.c |
1594 | index 89257e2a980de..f53871baa42eb 100644 |
1595 | --- a/net/tipc/group.c |
1596 | +++ b/net/tipc/group.c |
1597 | @@ -273,8 +273,8 @@ static struct tipc_member *tipc_group_find_node(struct tipc_group *grp, |
1598 | return NULL; |
1599 | } |
1600 | |
1601 | -static void tipc_group_add_to_tree(struct tipc_group *grp, |
1602 | - struct tipc_member *m) |
1603 | +static int tipc_group_add_to_tree(struct tipc_group *grp, |
1604 | + struct tipc_member *m) |
1605 | { |
1606 | u64 nkey, key = (u64)m->node << 32 | m->port; |
1607 | struct rb_node **n, *parent = NULL; |
1608 | @@ -291,10 +291,11 @@ static void tipc_group_add_to_tree(struct tipc_group *grp, |
1609 | else if (key > nkey) |
1610 | n = &(*n)->rb_right; |
1611 | else |
1612 | - return; |
1613 | + return -EEXIST; |
1614 | } |
1615 | rb_link_node(&m->tree_node, parent, n); |
1616 | rb_insert_color(&m->tree_node, &grp->members); |
1617 | + return 0; |
1618 | } |
1619 | |
1620 | static struct tipc_member *tipc_group_create_member(struct tipc_group *grp, |
1621 | @@ -302,6 +303,7 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp, |
1622 | u32 instance, int state) |
1623 | { |
1624 | struct tipc_member *m; |
1625 | + int ret; |
1626 | |
1627 | m = kzalloc(sizeof(*m), GFP_ATOMIC); |
1628 | if (!m) |
1629 | @@ -314,8 +316,12 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp, |
1630 | m->port = port; |
1631 | m->instance = instance; |
1632 | m->bc_acked = grp->bc_snd_nxt - 1; |
1633 | + ret = tipc_group_add_to_tree(grp, m); |
1634 | + if (ret < 0) { |
1635 | + kfree(m); |
1636 | + return NULL; |
1637 | + } |
1638 | grp->member_cnt++; |
1639 | - tipc_group_add_to_tree(grp, m); |
1640 | tipc_nlist_add(&grp->dests, m->node); |
1641 | m->state = state; |
1642 | return m; |
1643 | diff --git a/net/tipc/msg.c b/net/tipc/msg.c |
1644 | index 922d262e153ff..ee4b2261e7957 100644 |
1645 | --- a/net/tipc/msg.c |
1646 | +++ b/net/tipc/msg.c |
1647 | @@ -140,7 +140,8 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) |
1648 | if (fragid == FIRST_FRAGMENT) { |
1649 | if (unlikely(head)) |
1650 | goto err; |
1651 | - if (unlikely(skb_unclone(frag, GFP_ATOMIC))) |
1652 | + frag = skb_unshare(frag, GFP_ATOMIC); |
1653 | + if (unlikely(!frag)) |
1654 | goto err; |
1655 | head = *headbuf = frag; |
1656 | *buf = NULL; |
1657 | diff --git a/net/tipc/socket.c b/net/tipc/socket.c |
1658 | index 5318bb6611abc..959155c3a1608 100644 |
1659 | --- a/net/tipc/socket.c |
1660 | +++ b/net/tipc/socket.c |
1661 | @@ -2616,10 +2616,7 @@ static int tipc_shutdown(struct socket *sock, int how) |
1662 | |
1663 | trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " "); |
1664 | __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN); |
1665 | - if (tipc_sk_type_connectionless(sk)) |
1666 | - sk->sk_shutdown = SHUTDOWN_MASK; |
1667 | - else |
1668 | - sk->sk_shutdown = SEND_SHUTDOWN; |
1669 | + sk->sk_shutdown = SHUTDOWN_MASK; |
1670 | |
1671 | if (sk->sk_state == TIPC_DISCONNECTING) { |
1672 | /* Discard any unreceived messages */ |