Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0275-5.4.176-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3637 - (show annotations) (download)
Mon Oct 24 12:40:44 2022 UTC (18 months, 1 week ago) by niro
File size: 72810 byte(s)
-add missing
1 diff --git a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
2 index 9cb3560756d00..53c26ffd020a3 100644
3 --- a/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
4 +++ b/Documentation/devicetree/bindings/net/can/tcan4x5x.txt
5 @@ -31,7 +31,7 @@ tcan4x5x: tcan4x5x@0 {
6 #address-cells = <1>;
7 #size-cells = <1>;
8 spi-max-frequency = <10000000>;
9 - bosch,mram-cfg = <0x0 0 0 32 0 0 1 1>;
10 + bosch,mram-cfg = <0x0 0 0 16 0 0 1 1>;
11 interrupt-parent = <&gpio1>;
12 interrupts = <14 IRQ_TYPE_LEVEL_LOW>;
13 device-state-gpios = <&gpio3 21 GPIO_ACTIVE_HIGH>;
14 diff --git a/Makefile b/Makefile
15 index 2f6c51097d003..b23aa51ada93e 100644
16 --- a/Makefile
17 +++ b/Makefile
18 @@ -1,7 +1,7 @@
19 # SPDX-License-Identifier: GPL-2.0
20 VERSION = 5
21 PATCHLEVEL = 4
22 -SUBLEVEL = 175
23 +SUBLEVEL = 176
24 EXTRAVERSION =
25 NAME = Kleptomaniac Octopus
26
27 diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
28 index f61ef46ebff74..d07fbc21f14ce 100644
29 --- a/arch/arm64/kernel/process.c
30 +++ b/arch/arm64/kernel/process.c
31 @@ -500,34 +500,26 @@ static void entry_task_switch(struct task_struct *next)
32
33 /*
34 * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
35 - * Assuming the virtual counter is enabled at the beginning of times:
36 - *
37 - * - disable access when switching from a 64bit task to a 32bit task
38 - * - enable access when switching from a 32bit task to a 64bit task
39 + * Ensure access is disabled when switching to a 32bit task, ensure
40 + * access is enabled when switching to a 64bit task.
41 */
42 -static void erratum_1418040_thread_switch(struct task_struct *prev,
43 - struct task_struct *next)
44 +static void erratum_1418040_thread_switch(struct task_struct *next)
45 {
46 - bool prev32, next32;
47 - u64 val;
48 -
49 - if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040))
50 - return;
51 -
52 - prev32 = is_compat_thread(task_thread_info(prev));
53 - next32 = is_compat_thread(task_thread_info(next));
54 -
55 - if (prev32 == next32 || !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
56 + if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) ||
57 + !this_cpu_has_cap(ARM64_WORKAROUND_1418040))
58 return;
59
60 - val = read_sysreg(cntkctl_el1);
61 -
62 - if (!next32)
63 - val |= ARCH_TIMER_USR_VCT_ACCESS_EN;
64 + if (is_compat_thread(task_thread_info(next)))
65 + sysreg_clear_set(cntkctl_el1, ARCH_TIMER_USR_VCT_ACCESS_EN, 0);
66 else
67 - val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
68 + sysreg_clear_set(cntkctl_el1, 0, ARCH_TIMER_USR_VCT_ACCESS_EN);
69 +}
70
71 - write_sysreg(val, cntkctl_el1);
72 +static void erratum_1418040_new_exec(void)
73 +{
74 + preempt_disable();
75 + erratum_1418040_thread_switch(current);
76 + preempt_enable();
77 }
78
79 /*
80 @@ -546,7 +538,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
81 uao_thread_switch(next);
82 ptrauth_thread_switch(next);
83 ssbs_thread_switch(next);
84 - erratum_1418040_thread_switch(prev, next);
85 + erratum_1418040_thread_switch(next);
86
87 /*
88 * Complete any pending TLB or cache maintenance on this CPU in case
89 @@ -605,6 +597,7 @@ void arch_setup_new_exec(void)
90 current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0;
91
92 ptrauth_thread_init_user(current);
93 + erratum_1418040_new_exec();
94 }
95
96 #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI
97 diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
98 index afbd47b0a75cc..5819a577d267a 100644
99 --- a/arch/powerpc/kernel/Makefile
100 +++ b/arch/powerpc/kernel/Makefile
101 @@ -13,6 +13,7 @@ CFLAGS_prom_init.o += -fPIC
102 CFLAGS_btext.o += -fPIC
103 endif
104
105 +CFLAGS_early_32.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
106 CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
107 CFLAGS_prom_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
108 CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
109 diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
110 index b8de3be10eb47..8656b8d2ce555 100644
111 --- a/arch/powerpc/lib/Makefile
112 +++ b/arch/powerpc/lib/Makefile
113 @@ -16,6 +16,9 @@ CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING
114 CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING
115 endif
116
117 +CFLAGS_code-patching.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
118 +CFLAGS_feature-fixups.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
119 +
120 obj-y += alloc.o code-patching.o feature-fixups.o pmem.o
121
122 ifndef CONFIG_KASAN
123 diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
124 index e1fcc03159ef2..a927adccb4ba7 100644
125 --- a/arch/s390/hypfs/hypfs_vm.c
126 +++ b/arch/s390/hypfs/hypfs_vm.c
127 @@ -20,6 +20,7 @@
128
129 static char local_guest[] = " ";
130 static char all_guests[] = "* ";
131 +static char *all_groups = all_guests;
132 static char *guest_query;
133
134 struct diag2fc_data {
135 @@ -62,10 +63,11 @@ static int diag2fc(int size, char* query, void *addr)
136
137 memcpy(parm_list.userid, query, NAME_LEN);
138 ASCEBC(parm_list.userid, NAME_LEN);
139 - parm_list.addr = (unsigned long) addr ;
140 + memcpy(parm_list.aci_grp, all_groups, NAME_LEN);
141 + ASCEBC(parm_list.aci_grp, NAME_LEN);
142 + parm_list.addr = (unsigned long)addr;
143 parm_list.size = size;
144 parm_list.fmt = 0x02;
145 - memset(parm_list.aci_grp, 0x40, NAME_LEN);
146 rc = -1;
147
148 diag_stat_inc(DIAG_STAT_X2FC);
149 diff --git a/block/bio.c b/block/bio.c
150 index cb38d6f3acceb..1c52d0196e15c 100644
151 --- a/block/bio.c
152 +++ b/block/bio.c
153 @@ -569,7 +569,8 @@ void bio_truncate(struct bio *bio, unsigned new_size)
154 offset = new_size - done;
155 else
156 offset = 0;
157 - zero_user(bv.bv_page, offset, bv.bv_len - offset);
158 + zero_user(bv.bv_page, bv.bv_offset + offset,
159 + bv.bv_len - offset);
160 truncated = true;
161 }
162 done += bv.bv_len;
163 diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
164 index 7d155938e2916..9baf5af919e1e 100644
165 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
166 +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
167 @@ -471,8 +471,8 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
168 return -EINVAL;
169 }
170
171 - if (args->stream_size > SZ_64K || args->nr_relocs > SZ_64K ||
172 - args->nr_bos > SZ_64K || args->nr_pmrs > 128) {
173 + if (args->stream_size > SZ_128K || args->nr_relocs > SZ_128K ||
174 + args->nr_bos > SZ_128K || args->nr_pmrs > 128) {
175 DRM_ERROR("submit arguments out of size limits\n");
176 return -EINVAL;
177 }
178 diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
179 index a11b98e990019..16194971a99f9 100644
180 --- a/drivers/gpu/drm/msm/dsi/dsi.c
181 +++ b/drivers/gpu/drm/msm/dsi/dsi.c
182 @@ -33,7 +33,12 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
183
184 of_node_put(phy_node);
185
186 - if (!phy_pdev || !msm_dsi->phy) {
187 + if (!phy_pdev) {
188 + DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
189 + return -EPROBE_DEFER;
190 + }
191 + if (!msm_dsi->phy) {
192 + put_device(&phy_pdev->dev);
193 DRM_DEV_ERROR(&pdev->dev, "%s: phy driver is not ready\n", __func__);
194 return -EPROBE_DEFER;
195 }
196 diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
197 index 21519229fe73a..60d50643d0b5c 100644
198 --- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
199 +++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
200 @@ -665,12 +665,14 @@ void __exit msm_dsi_phy_driver_unregister(void)
201 int msm_dsi_phy_enable(struct msm_dsi_phy *phy, int src_pll_id,
202 struct msm_dsi_phy_clk_request *clk_req)
203 {
204 - struct device *dev = &phy->pdev->dev;
205 + struct device *dev;
206 int ret;
207
208 if (!phy || !phy->cfg->ops.enable)
209 return -EINVAL;
210
211 + dev = &phy->pdev->dev;
212 +
213 ret = dsi_phy_enable_resource(phy);
214 if (ret) {
215 DRM_DEV_ERROR(dev, "%s: resource enable failed, %d\n",
216 diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
217 index 355afb936401a..1a7e77373407f 100644
218 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c
219 +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
220 @@ -97,10 +97,15 @@ static int msm_hdmi_get_phy(struct hdmi *hdmi)
221
222 of_node_put(phy_node);
223
224 - if (!phy_pdev || !hdmi->phy) {
225 + if (!phy_pdev) {
226 DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
227 return -EPROBE_DEFER;
228 }
229 + if (!hdmi->phy) {
230 + DRM_DEV_ERROR(&pdev->dev, "phy driver is not ready\n");
231 + put_device(&phy_pdev->dev);
232 + return -EPROBE_DEFER;
233 + }
234
235 hdmi->phy_dev = get_device(&phy_pdev->dev);
236
237 diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
238 index 7443df77cadb5..407b51cf67909 100644
239 --- a/drivers/gpu/drm/msm/msm_drv.c
240 +++ b/drivers/gpu/drm/msm/msm_drv.c
241 @@ -337,7 +337,7 @@ static int msm_init_vram(struct drm_device *dev)
242 of_node_put(node);
243 if (ret)
244 return ret;
245 - size = r.end - r.start;
246 + size = r.end - r.start + 1;
247 DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
248
249 /* if we have no IOMMU, then we need to use carveout allocator.
250 diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
251 index 9abb4507f572b..b62763a85d6e4 100644
252 --- a/drivers/hwmon/lm90.c
253 +++ b/drivers/hwmon/lm90.c
254 @@ -373,7 +373,7 @@ static const struct lm90_params lm90_params[] = {
255 .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
256 | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_CRIT,
257 .alert_alarms = 0x7c,
258 - .max_convrate = 8,
259 + .max_convrate = 7,
260 },
261 [lm86] = {
262 .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
263 @@ -394,12 +394,13 @@ static const struct lm90_params lm90_params[] = {
264 .max_convrate = 9,
265 },
266 [max6646] = {
267 - .flags = LM90_HAVE_CRIT,
268 + .flags = LM90_HAVE_CRIT | LM90_HAVE_BROKEN_ALERT,
269 .alert_alarms = 0x7c,
270 .max_convrate = 6,
271 .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
272 },
273 [max6654] = {
274 + .flags = LM90_HAVE_BROKEN_ALERT,
275 .alert_alarms = 0x7c,
276 .max_convrate = 7,
277 .reg_local_ext = MAX6657_REG_R_LOCAL_TEMPL,
278 @@ -418,7 +419,7 @@ static const struct lm90_params lm90_params[] = {
279 },
280 [max6680] = {
281 .flags = LM90_HAVE_OFFSET | LM90_HAVE_CRIT
282 - | LM90_HAVE_CRIT_ALRM_SWP,
283 + | LM90_HAVE_CRIT_ALRM_SWP | LM90_HAVE_BROKEN_ALERT,
284 .alert_alarms = 0x7c,
285 .max_convrate = 7,
286 },
287 diff --git a/drivers/mtd/nand/raw/mpc5121_nfc.c b/drivers/mtd/nand/raw/mpc5121_nfc.c
288 index 8b90def6686fb..a5eb0a1f559c7 100644
289 --- a/drivers/mtd/nand/raw/mpc5121_nfc.c
290 +++ b/drivers/mtd/nand/raw/mpc5121_nfc.c
291 @@ -290,7 +290,6 @@ static int ads5121_chipselect_init(struct mtd_info *mtd)
292 /* Control chips select signal on ADS5121 board */
293 static void ads5121_select_chip(struct nand_chip *nand, int chip)
294 {
295 - struct mtd_info *mtd = nand_to_mtd(nand);
296 struct mpc5121_nfc_prv *prv = nand_get_controller_data(nand);
297 u8 v;
298
299 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
300 index ce6a4e1965e1d..403c1b9cf6ab8 100644
301 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
302 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
303 @@ -1970,8 +1970,7 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
304 break;
305 }
306
307 - if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
308 - hclgevf_enable_vector(&hdev->misc_vector, true);
309 + hclgevf_enable_vector(&hdev->misc_vector, true);
310
311 return IRQ_HANDLED;
312 }
313 diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
314 index 9adfc0a7ab823..26d49dcdbeb3e 100644
315 --- a/drivers/net/ethernet/ibm/ibmvnic.c
316 +++ b/drivers/net/ethernet/ibm/ibmvnic.c
317 @@ -3258,11 +3258,25 @@ static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
318 struct device *dev = &adapter->vdev->dev;
319 union ibmvnic_crq crq;
320 int max_entries;
321 + int cap_reqs;
322 +
323 + /* We send out 6 or 7 REQUEST_CAPABILITY CRQs below (depending on
324 + * the PROMISC flag). Initialize this count upfront. When the tasklet
325 + * receives a response to all of these, it will send the next protocol
326 + * message (QUERY_IP_OFFLOAD).
327 + */
328 + if (!(adapter->netdev->flags & IFF_PROMISC) ||
329 + adapter->promisc_supported)
330 + cap_reqs = 7;
331 + else
332 + cap_reqs = 6;
333
334 if (!retry) {
335 /* Sub-CRQ entries are 32 byte long */
336 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
337
338 + atomic_set(&adapter->running_cap_crqs, cap_reqs);
339 +
340 if (adapter->min_tx_entries_per_subcrq > entries_page ||
341 adapter->min_rx_add_entries_per_subcrq > entries_page) {
342 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
343 @@ -3323,44 +3337,45 @@ static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
344 adapter->opt_rx_comp_queues;
345
346 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
347 + } else {
348 + atomic_add(cap_reqs, &adapter->running_cap_crqs);
349 }
350 -
351 memset(&crq, 0, sizeof(crq));
352 crq.request_capability.first = IBMVNIC_CRQ_CMD;
353 crq.request_capability.cmd = REQUEST_CAPABILITY;
354
355 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
356 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
357 - atomic_inc(&adapter->running_cap_crqs);
358 + cap_reqs--;
359 ibmvnic_send_crq(adapter, &crq);
360
361 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
362 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
363 - atomic_inc(&adapter->running_cap_crqs);
364 + cap_reqs--;
365 ibmvnic_send_crq(adapter, &crq);
366
367 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
368 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
369 - atomic_inc(&adapter->running_cap_crqs);
370 + cap_reqs--;
371 ibmvnic_send_crq(adapter, &crq);
372
373 crq.request_capability.capability =
374 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
375 crq.request_capability.number =
376 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
377 - atomic_inc(&adapter->running_cap_crqs);
378 + cap_reqs--;
379 ibmvnic_send_crq(adapter, &crq);
380
381 crq.request_capability.capability =
382 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
383 crq.request_capability.number =
384 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
385 - atomic_inc(&adapter->running_cap_crqs);
386 + cap_reqs--;
387 ibmvnic_send_crq(adapter, &crq);
388
389 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
390 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
391 - atomic_inc(&adapter->running_cap_crqs);
392 + cap_reqs--;
393 ibmvnic_send_crq(adapter, &crq);
394
395 if (adapter->netdev->flags & IFF_PROMISC) {
396 @@ -3368,16 +3383,21 @@ static void ibmvnic_send_req_caps(struct ibmvnic_adapter *adapter, int retry)
397 crq.request_capability.capability =
398 cpu_to_be16(PROMISC_REQUESTED);
399 crq.request_capability.number = cpu_to_be64(1);
400 - atomic_inc(&adapter->running_cap_crqs);
401 + cap_reqs--;
402 ibmvnic_send_crq(adapter, &crq);
403 }
404 } else {
405 crq.request_capability.capability =
406 cpu_to_be16(PROMISC_REQUESTED);
407 crq.request_capability.number = cpu_to_be64(0);
408 - atomic_inc(&adapter->running_cap_crqs);
409 + cap_reqs--;
410 ibmvnic_send_crq(adapter, &crq);
411 }
412 +
413 + /* Keep at end to catch any discrepancy between expected and actual
414 + * CRQs sent.
415 + */
416 + WARN_ON(cap_reqs != 0);
417 }
418
419 static int pending_scrq(struct ibmvnic_adapter *adapter,
420 @@ -3782,118 +3802,132 @@ static void send_map_query(struct ibmvnic_adapter *adapter)
421 static void send_cap_queries(struct ibmvnic_adapter *adapter)
422 {
423 union ibmvnic_crq crq;
424 + int cap_reqs;
425 +
426 + /* We send out 25 QUERY_CAPABILITY CRQs below. Initialize this count
427 + * upfront. When the tasklet receives a response to all of these, it
428 + * can send out the next protocol messaage (REQUEST_CAPABILITY).
429 + */
430 + cap_reqs = 25;
431 +
432 + atomic_set(&adapter->running_cap_crqs, cap_reqs);
433
434 - atomic_set(&adapter->running_cap_crqs, 0);
435 memset(&crq, 0, sizeof(crq));
436 crq.query_capability.first = IBMVNIC_CRQ_CMD;
437 crq.query_capability.cmd = QUERY_CAPABILITY;
438
439 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
440 - atomic_inc(&adapter->running_cap_crqs);
441 ibmvnic_send_crq(adapter, &crq);
442 + cap_reqs--;
443
444 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
445 - atomic_inc(&adapter->running_cap_crqs);
446 ibmvnic_send_crq(adapter, &crq);
447 + cap_reqs--;
448
449 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
450 - atomic_inc(&adapter->running_cap_crqs);
451 ibmvnic_send_crq(adapter, &crq);
452 + cap_reqs--;
453
454 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
455 - atomic_inc(&adapter->running_cap_crqs);
456 ibmvnic_send_crq(adapter, &crq);
457 + cap_reqs--;
458
459 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
460 - atomic_inc(&adapter->running_cap_crqs);
461 ibmvnic_send_crq(adapter, &crq);
462 + cap_reqs--;
463
464 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
465 - atomic_inc(&adapter->running_cap_crqs);
466 ibmvnic_send_crq(adapter, &crq);
467 + cap_reqs--;
468
469 crq.query_capability.capability =
470 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
471 - atomic_inc(&adapter->running_cap_crqs);
472 ibmvnic_send_crq(adapter, &crq);
473 + cap_reqs--;
474
475 crq.query_capability.capability =
476 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
477 - atomic_inc(&adapter->running_cap_crqs);
478 ibmvnic_send_crq(adapter, &crq);
479 + cap_reqs--;
480
481 crq.query_capability.capability =
482 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
483 - atomic_inc(&adapter->running_cap_crqs);
484 ibmvnic_send_crq(adapter, &crq);
485 + cap_reqs--;
486
487 crq.query_capability.capability =
488 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
489 - atomic_inc(&adapter->running_cap_crqs);
490 ibmvnic_send_crq(adapter, &crq);
491 + cap_reqs--;
492
493 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
494 - atomic_inc(&adapter->running_cap_crqs);
495 ibmvnic_send_crq(adapter, &crq);
496 + cap_reqs--;
497
498 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
499 - atomic_inc(&adapter->running_cap_crqs);
500 ibmvnic_send_crq(adapter, &crq);
501 + cap_reqs--;
502
503 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
504 - atomic_inc(&adapter->running_cap_crqs);
505 ibmvnic_send_crq(adapter, &crq);
506 + cap_reqs--;
507
508 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
509 - atomic_inc(&adapter->running_cap_crqs);
510 ibmvnic_send_crq(adapter, &crq);
511 + cap_reqs--;
512
513 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
514 - atomic_inc(&adapter->running_cap_crqs);
515 ibmvnic_send_crq(adapter, &crq);
516 + cap_reqs--;
517
518 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
519 - atomic_inc(&adapter->running_cap_crqs);
520 ibmvnic_send_crq(adapter, &crq);
521 + cap_reqs--;
522
523 crq.query_capability.capability = cpu_to_be16(RX_VLAN_HEADER_INSERTION);
524 - atomic_inc(&adapter->running_cap_crqs);
525 ibmvnic_send_crq(adapter, &crq);
526 + cap_reqs--;
527
528 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
529 - atomic_inc(&adapter->running_cap_crqs);
530 ibmvnic_send_crq(adapter, &crq);
531 + cap_reqs--;
532
533 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
534 - atomic_inc(&adapter->running_cap_crqs);
535 ibmvnic_send_crq(adapter, &crq);
536 + cap_reqs--;
537
538 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
539 - atomic_inc(&adapter->running_cap_crqs);
540 ibmvnic_send_crq(adapter, &crq);
541 + cap_reqs--;
542
543 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
544 - atomic_inc(&adapter->running_cap_crqs);
545 ibmvnic_send_crq(adapter, &crq);
546 + cap_reqs--;
547
548 crq.query_capability.capability =
549 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
550 - atomic_inc(&adapter->running_cap_crqs);
551 ibmvnic_send_crq(adapter, &crq);
552 + cap_reqs--;
553
554 crq.query_capability.capability =
555 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
556 - atomic_inc(&adapter->running_cap_crqs);
557 ibmvnic_send_crq(adapter, &crq);
558 + cap_reqs--;
559
560 crq.query_capability.capability =
561 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
562 - atomic_inc(&adapter->running_cap_crqs);
563 ibmvnic_send_crq(adapter, &crq);
564 + cap_reqs--;
565
566 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
567 - atomic_inc(&adapter->running_cap_crqs);
568 +
569 ibmvnic_send_crq(adapter, &crq);
570 + cap_reqs--;
571 +
572 + /* Keep at end to catch any discrepancy between expected and actual
573 + * CRQs sent.
574 + */
575 + WARN_ON(cap_reqs != 0);
576 }
577
578 static void handle_vpd_size_rsp(union ibmvnic_crq *crq,
579 @@ -4160,6 +4194,8 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
580 char *name;
581
582 atomic_dec(&adapter->running_cap_crqs);
583 + netdev_dbg(adapter->netdev, "Outstanding request-caps: %d\n",
584 + atomic_read(&adapter->running_cap_crqs));
585 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
586 case REQ_TX_QUEUES:
587 req_value = &adapter->req_tx_queues;
588 @@ -4787,12 +4823,6 @@ static void ibmvnic_tasklet(void *data)
589 ibmvnic_handle_crq(crq, adapter);
590 crq->generic.first = 0;
591 }
592 -
593 - /* remain in tasklet until all
594 - * capabilities responses are received
595 - */
596 - if (!adapter->wait_capability)
597 - done = true;
598 }
599 /* if capabilities CRQ's were sent in this tasklet, the following
600 * tasklet must wait until all responses are received
601 diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
602 index f8422dbfd54e6..4c8c31692e9e0 100644
603 --- a/drivers/net/ethernet/intel/i40e/i40e.h
604 +++ b/drivers/net/ethernet/intel/i40e/i40e.h
605 @@ -182,7 +182,6 @@ enum i40e_interrupt_policy {
606
607 struct i40e_lump_tracking {
608 u16 num_entries;
609 - u16 search_hint;
610 u16 list[0];
611 #define I40E_PILE_VALID_BIT 0x8000
612 #define I40E_IWARP_IRQ_PILE_ID (I40E_PILE_VALID_BIT - 2)
613 @@ -757,12 +756,12 @@ struct i40e_vsi {
614 struct rtnl_link_stats64 net_stats_offsets;
615 struct i40e_eth_stats eth_stats;
616 struct i40e_eth_stats eth_stats_offsets;
617 - u32 tx_restart;
618 - u32 tx_busy;
619 + u64 tx_restart;
620 + u64 tx_busy;
621 u64 tx_linearize;
622 u64 tx_force_wb;
623 - u32 rx_buf_failed;
624 - u32 rx_page_failed;
625 + u64 rx_buf_failed;
626 + u64 rx_page_failed;
627
628 /* These are containers of ring pointers, allocated at run-time */
629 struct i40e_ring **rx_rings;
630 diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
631 index 99ea543dd2453..276f04c0e51d6 100644
632 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
633 +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
634 @@ -234,7 +234,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
635 (unsigned long int)vsi->net_stats_offsets.rx_compressed,
636 (unsigned long int)vsi->net_stats_offsets.tx_compressed);
637 dev_info(&pf->pdev->dev,
638 - " tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
639 + " tx_restart = %llu, tx_busy = %llu, rx_buf_failed = %llu, rx_page_failed = %llu\n",
640 vsi->tx_restart, vsi->tx_busy,
641 vsi->rx_buf_failed, vsi->rx_page_failed);
642 rcu_read_lock();
643 diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
644 index a2326683be170..a6ae4b7b11afd 100644
645 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
646 +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
647 @@ -204,10 +204,6 @@ int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
648 * @id: an owner id to stick on the items assigned
649 *
650 * Returns the base item index of the lump, or negative for error
651 - *
652 - * The search_hint trick and lack of advanced fit-finding only work
653 - * because we're highly likely to have all the same size lump requests.
654 - * Linear search time and any fragmentation should be minimal.
655 **/
656 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
657 u16 needed, u16 id)
658 @@ -222,8 +218,21 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
659 return -EINVAL;
660 }
661
662 - /* start the linear search with an imperfect hint */
663 - i = pile->search_hint;
664 + /* Allocate last queue in the pile for FDIR VSI queue
665 + * so it doesn't fragment the qp_pile
666 + */
667 + if (pile == pf->qp_pile && pf->vsi[id]->type == I40E_VSI_FDIR) {
668 + if (pile->list[pile->num_entries - 1] & I40E_PILE_VALID_BIT) {
669 + dev_err(&pf->pdev->dev,
670 + "Cannot allocate queue %d for I40E_VSI_FDIR\n",
671 + pile->num_entries - 1);
672 + return -ENOMEM;
673 + }
674 + pile->list[pile->num_entries - 1] = id | I40E_PILE_VALID_BIT;
675 + return pile->num_entries - 1;
676 + }
677 +
678 + i = 0;
679 while (i < pile->num_entries) {
680 /* skip already allocated entries */
681 if (pile->list[i] & I40E_PILE_VALID_BIT) {
682 @@ -242,7 +251,6 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
683 for (j = 0; j < needed; j++)
684 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
685 ret = i;
686 - pile->search_hint = i + j;
687 break;
688 }
689
690 @@ -265,7 +273,7 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
691 {
692 int valid_id = (id | I40E_PILE_VALID_BIT);
693 int count = 0;
694 - int i;
695 + u16 i;
696
697 if (!pile || index >= pile->num_entries)
698 return -EINVAL;
699 @@ -277,8 +285,6 @@ static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
700 count++;
701 }
702
703 - if (count && index < pile->search_hint)
704 - pile->search_hint = index;
705
706 return count;
707 }
708 @@ -798,9 +804,9 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
709 struct rtnl_link_stats64 *ns; /* netdev stats */
710 struct i40e_eth_stats *oes;
711 struct i40e_eth_stats *es; /* device's eth stats */
712 - u32 tx_restart, tx_busy;
713 + u64 tx_restart, tx_busy;
714 struct i40e_ring *p;
715 - u32 rx_page, rx_buf;
716 + u64 rx_page, rx_buf;
717 u64 bytes, packets;
718 unsigned int start;
719 u64 tx_linearize;
720 @@ -10084,15 +10090,9 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
721 }
722 i40e_get_oem_version(&pf->hw);
723
724 - if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
725 - ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
726 - hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
727 - /* The following delay is necessary for 4.33 firmware and older
728 - * to recover after EMP reset. 200 ms should suffice but we
729 - * put here 300 ms to be sure that FW is ready to operate
730 - * after reset.
731 - */
732 - mdelay(300);
733 + if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state)) {
734 + /* The following delay is necessary for firmware update. */
735 + mdelay(1000);
736 }
737
738 /* re-verify the eeprom if we just had an EMP reset */
739 @@ -11388,7 +11388,6 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
740 return -ENOMEM;
741
742 pf->irq_pile->num_entries = vectors;
743 - pf->irq_pile->search_hint = 0;
744
745 /* track first vector for misc interrupts, ignore return */
746 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
747 @@ -12139,7 +12138,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
748 goto sw_init_done;
749 }
750 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
751 - pf->qp_pile->search_hint = 0;
752
753 pf->tx_timeout_recovery_level = 1;
754
755 diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
756 index cd6f5bd982559..4962e6193eeca 100644
757 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
758 +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
759 @@ -2485,6 +2485,59 @@ error_param:
760 aq_ret);
761 }
762
763 +/**
764 + * i40e_check_enough_queue - find big enough queue number
765 + * @vf: pointer to the VF info
766 + * @needed: the number of items needed
767 + *
768 + * Returns the base item index of the queue, or negative for error
769 + **/
770 +static int i40e_check_enough_queue(struct i40e_vf *vf, u16 needed)
771 +{
772 + unsigned int i, cur_queues, more, pool_size;
773 + struct i40e_lump_tracking *pile;
774 + struct i40e_pf *pf = vf->pf;
775 + struct i40e_vsi *vsi;
776 +
777 + vsi = pf->vsi[vf->lan_vsi_idx];
778 + cur_queues = vsi->alloc_queue_pairs;
779 +
780 + /* if current allocated queues are enough for need */
781 + if (cur_queues >= needed)
782 + return vsi->base_queue;
783 +
784 + pile = pf->qp_pile;
785 + if (cur_queues > 0) {
786 + /* if the allocated queues are not zero
787 + * just check if there are enough queues for more
788 + * behind the allocated queues.
789 + */
790 + more = needed - cur_queues;
791 + for (i = vsi->base_queue + cur_queues;
792 + i < pile->num_entries; i++) {
793 + if (pile->list[i] & I40E_PILE_VALID_BIT)
794 + break;
795 +
796 + if (more-- == 1)
797 + /* there is enough */
798 + return vsi->base_queue;
799 + }
800 + }
801 +
802 + pool_size = 0;
803 + for (i = 0; i < pile->num_entries; i++) {
804 + if (pile->list[i] & I40E_PILE_VALID_BIT) {
805 + pool_size = 0;
806 + continue;
807 + }
808 + if (needed <= ++pool_size)
809 + /* there is enough */
810 + return i;
811 + }
812 +
813 + return -ENOMEM;
814 +}
815 +
816 /**
817 * i40e_vc_request_queues_msg
818 * @vf: pointer to the VF info
819 @@ -2519,6 +2572,12 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg)
820 req_pairs - cur_pairs,
821 pf->queues_left);
822 vfres->num_queue_pairs = pf->queues_left + cur_pairs;
823 + } else if (i40e_check_enough_queue(vf, req_pairs) < 0) {
824 + dev_warn(&pf->pdev->dev,
825 + "VF %d requested %d more queues, but there is not enough for it.\n",
826 + vf->vf_id,
827 + req_pairs - cur_pairs);
828 + vfres->num_queue_pairs = cur_pairs;
829 } else {
830 /* successful request */
831 vf->num_req_queues = req_pairs;
832 diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
833 index 5ab53e9942f30..5d30b3e1806ab 100644
834 --- a/drivers/net/hamradio/yam.c
835 +++ b/drivers/net/hamradio/yam.c
836 @@ -951,9 +951,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
837 sizeof(struct yamdrv_ioctl_mcs));
838 if (IS_ERR(ym))
839 return PTR_ERR(ym);
840 - if (ym->cmd != SIOCYAMSMCS)
841 - return -EINVAL;
842 - if (ym->bitrate > YAM_MAXBITRATE) {
843 + if (ym->cmd != SIOCYAMSMCS || ym->bitrate > YAM_MAXBITRATE) {
844 kfree(ym);
845 return -EINVAL;
846 }
847 diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
848 index 5e956089bf525..c23fec34b50e9 100644
849 --- a/drivers/net/phy/broadcom.c
850 +++ b/drivers/net/phy/broadcom.c
851 @@ -646,6 +646,7 @@ static struct phy_driver broadcom_drivers[] = {
852 .phy_id_mask = 0xfffffff0,
853 .name = "Broadcom BCM54616S",
854 /* PHY_GBIT_FEATURES */
855 + .soft_reset = genphy_soft_reset,
856 .config_init = bcm54xx_config_init,
857 .config_aneg = bcm54616s_config_aneg,
858 .ack_interrupt = bcm_phy_ack_intr,
859 diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
860 index 35ade5d21de51..78b918dcd5472 100644
861 --- a/drivers/net/phy/phy_device.c
862 +++ b/drivers/net/phy/phy_device.c
863 @@ -1433,6 +1433,9 @@ void phy_detach(struct phy_device *phydev)
864 phy_driver_is_genphy_10g(phydev))
865 device_release_driver(&phydev->mdio.dev);
866
867 + /* Assert the reset signal */
868 + phy_device_reset(phydev, 1);
869 +
870 /*
871 * The phydev might go away on the put_device() below, so avoid
872 * a use-after-free bug by reading the underlying bus first.
873 @@ -1444,9 +1447,6 @@ void phy_detach(struct phy_device *phydev)
874 ndev_owner = dev->dev.parent->driver->owner;
875 if (ndev_owner != bus->owner)
876 module_put(bus->owner);
877 -
878 - /* Assert the reset signal */
879 - phy_device_reset(phydev, 1);
880 }
881 EXPORT_SYMBOL(phy_detach);
882
883 diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
884 index 7be43a1eaefda..5b2bf75269033 100644
885 --- a/drivers/net/phy/phylink.c
886 +++ b/drivers/net/phy/phylink.c
887 @@ -582,6 +582,11 @@ static int phylink_register_sfp(struct phylink *pl,
888 return ret;
889 }
890
891 + if (!fwnode_device_is_available(ref.fwnode)) {
892 + fwnode_handle_put(ref.fwnode);
893 + return 0;
894 + }
895 +
896 pl->sfp_bus = sfp_register_upstream(ref.fwnode, pl, &sfp_phylink_ops);
897 if (!pl->sfp_bus)
898 return -ENOMEM;
899 diff --git a/drivers/rpmsg/rpmsg_char.c b/drivers/rpmsg/rpmsg_char.c
900 index c655074c07c2e..ac50ed7577651 100644
901 --- a/drivers/rpmsg/rpmsg_char.c
902 +++ b/drivers/rpmsg/rpmsg_char.c
903 @@ -92,7 +92,7 @@ static int rpmsg_eptdev_destroy(struct device *dev, void *data)
904 /* wake up any blocked readers */
905 wake_up_interruptible(&eptdev->readq);
906
907 - device_del(&eptdev->dev);
908 + cdev_device_del(&eptdev->cdev, &eptdev->dev);
909 put_device(&eptdev->dev);
910
911 return 0;
912 @@ -336,7 +336,6 @@ static void rpmsg_eptdev_release_device(struct device *dev)
913
914 ida_simple_remove(&rpmsg_ept_ida, dev->id);
915 ida_simple_remove(&rpmsg_minor_ida, MINOR(eptdev->dev.devt));
916 - cdev_del(&eptdev->cdev);
917 kfree(eptdev);
918 }
919
920 @@ -381,19 +380,13 @@ static int rpmsg_eptdev_create(struct rpmsg_ctrldev *ctrldev,
921 dev->id = ret;
922 dev_set_name(dev, "rpmsg%d", ret);
923
924 - ret = cdev_add(&eptdev->cdev, dev->devt, 1);
925 + ret = cdev_device_add(&eptdev->cdev, &eptdev->dev);
926 if (ret)
927 goto free_ept_ida;
928
929 /* We can now rely on the release function for cleanup */
930 dev->release = rpmsg_eptdev_release_device;
931
932 - ret = device_add(dev);
933 - if (ret) {
934 - dev_err(dev, "device_add failed: %d\n", ret);
935 - put_device(dev);
936 - }
937 -
938 return ret;
939
940 free_ept_ida:
941 @@ -462,7 +455,6 @@ static void rpmsg_ctrldev_release_device(struct device *dev)
942
943 ida_simple_remove(&rpmsg_ctrl_ida, dev->id);
944 ida_simple_remove(&rpmsg_minor_ida, MINOR(dev->devt));
945 - cdev_del(&ctrldev->cdev);
946 kfree(ctrldev);
947 }
948
949 @@ -497,19 +489,13 @@ static int rpmsg_chrdev_probe(struct rpmsg_device *rpdev)
950 dev->id = ret;
951 dev_set_name(&ctrldev->dev, "rpmsg_ctrl%d", ret);
952
953 - ret = cdev_add(&ctrldev->cdev, dev->devt, 1);
954 + ret = cdev_device_add(&ctrldev->cdev, &ctrldev->dev);
955 if (ret)
956 goto free_ctrl_ida;
957
958 /* We can now rely on the release function for cleanup */
959 dev->release = rpmsg_ctrldev_release_device;
960
961 - ret = device_add(dev);
962 - if (ret) {
963 - dev_err(&rpdev->dev, "device_add failed: %d\n", ret);
964 - put_device(dev);
965 - }
966 -
967 dev_set_drvdata(&rpdev->dev, ctrldev);
968
969 return ret;
970 @@ -535,7 +521,7 @@ static void rpmsg_chrdev_remove(struct rpmsg_device *rpdev)
971 if (ret)
972 dev_warn(&rpdev->dev, "failed to nuke endpoints: %d\n", ret);
973
974 - device_del(&ctrldev->dev);
975 + cdev_device_del(&ctrldev->cdev, &ctrldev->dev);
976 put_device(&ctrldev->dev);
977 }
978
979 diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
980 index b018b61bd168e..d4c2c44b863dd 100644
981 --- a/drivers/s390/scsi/zfcp_fc.c
982 +++ b/drivers/s390/scsi/zfcp_fc.c
983 @@ -521,6 +521,8 @@ static void zfcp_fc_adisc_handler(void *data)
984 goto out;
985 }
986
987 + /* re-init to undo drop from zfcp_fc_adisc() */
988 + port->d_id = ntoh24(adisc_resp->adisc_port_id);
989 /* port is good, unblock rport without going through erp */
990 zfcp_scsi_schedule_rport_register(port);
991 out:
992 @@ -534,6 +536,7 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
993 struct zfcp_fc_req *fc_req;
994 struct zfcp_adapter *adapter = port->adapter;
995 struct Scsi_Host *shost = adapter->scsi_host;
996 + u32 d_id;
997 int ret;
998
999 fc_req = kmem_cache_zalloc(zfcp_fc_req_cache, GFP_ATOMIC);
1000 @@ -558,7 +561,15 @@ static int zfcp_fc_adisc(struct zfcp_port *port)
1001 fc_req->u.adisc.req.adisc_cmd = ELS_ADISC;
1002 hton24(fc_req->u.adisc.req.adisc_port_id, fc_host_port_id(shost));
1003
1004 - ret = zfcp_fsf_send_els(adapter, port->d_id, &fc_req->ct_els,
1005 + d_id = port->d_id; /* remember as destination for send els below */
1006 + /*
1007 + * Force fresh GID_PN lookup on next port recovery.
1008 + * Must happen after request setup and before sending request,
1009 + * to prevent race with port->d_id re-init in zfcp_fc_adisc_handler().
1010 + */
1011 + port->d_id = 0;
1012 +
1013 + ret = zfcp_fsf_send_els(adapter, d_id, &fc_req->ct_els,
1014 ZFCP_FC_CTELS_TMO);
1015 if (ret)
1016 kmem_cache_free(zfcp_fc_req_cache, fc_req);
1017 diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
1018 index b4bfab5edf8ff..e6c3e7c070aaf 100644
1019 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
1020 +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
1021 @@ -80,7 +80,7 @@ static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba);
1022 static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba);
1023 static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1024 struct device *parent, int npiv);
1025 -static void bnx2fc_destroy_work(struct work_struct *work);
1026 +static void bnx2fc_port_destroy(struct fcoe_port *port);
1027
1028 static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev);
1029 static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device
1030 @@ -902,9 +902,6 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
1031 __bnx2fc_destroy(interface);
1032 }
1033 mutex_unlock(&bnx2fc_dev_lock);
1034 -
1035 - /* Ensure ALL destroy work has been completed before return */
1036 - flush_workqueue(bnx2fc_wq);
1037 return;
1038
1039 default:
1040 @@ -1211,8 +1208,8 @@ static int bnx2fc_vport_destroy(struct fc_vport *vport)
1041 mutex_unlock(&n_port->lp_mutex);
1042 bnx2fc_free_vport(interface->hba, port->lport);
1043 bnx2fc_port_shutdown(port->lport);
1044 + bnx2fc_port_destroy(port);
1045 bnx2fc_interface_put(interface);
1046 - queue_work(bnx2fc_wq, &port->destroy_work);
1047 return 0;
1048 }
1049
1050 @@ -1521,7 +1518,6 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1051 port->lport = lport;
1052 port->priv = interface;
1053 port->get_netdev = bnx2fc_netdev;
1054 - INIT_WORK(&port->destroy_work, bnx2fc_destroy_work);
1055
1056 /* Configure fcoe_port */
1057 rc = bnx2fc_lport_config(lport);
1058 @@ -1649,8 +1645,8 @@ static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
1059 bnx2fc_interface_cleanup(interface);
1060 bnx2fc_stop(interface);
1061 list_del(&interface->list);
1062 + bnx2fc_port_destroy(port);
1063 bnx2fc_interface_put(interface);
1064 - queue_work(bnx2fc_wq, &port->destroy_work);
1065 }
1066
1067 /**
1068 @@ -1691,15 +1687,12 @@ netdev_err:
1069 return rc;
1070 }
1071
1072 -static void bnx2fc_destroy_work(struct work_struct *work)
1073 +static void bnx2fc_port_destroy(struct fcoe_port *port)
1074 {
1075 - struct fcoe_port *port;
1076 struct fc_lport *lport;
1077
1078 - port = container_of(work, struct fcoe_port, destroy_work);
1079 lport = port->lport;
1080 -
1081 - BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n");
1082 + BNX2FC_HBA_DBG(lport, "Entered %s, destroying lport %p\n", __func__, lport);
1083
1084 bnx2fc_if_destroy(lport);
1085 }
1086 @@ -2553,9 +2546,6 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev)
1087 __bnx2fc_destroy(interface);
1088 mutex_unlock(&bnx2fc_dev_lock);
1089
1090 - /* Ensure ALL destroy work has been completed before return */
1091 - flush_workqueue(bnx2fc_wq);
1092 -
1093 bnx2fc_ulp_stop(hba);
1094 /* unregister cnic device */
1095 if (test_and_clear_bit(BNX2FC_CNIC_REGISTERED, &hba->reg_with_cnic))
1096 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
1097 index 38eb49ba361f0..3d3d616e58989 100644
1098 --- a/drivers/tty/n_gsm.c
1099 +++ b/drivers/tty/n_gsm.c
1100 @@ -313,6 +313,7 @@ static struct tty_driver *gsm_tty_driver;
1101 #define GSM1_ESCAPE_BITS 0x20
1102 #define XON 0x11
1103 #define XOFF 0x13
1104 +#define ISO_IEC_646_MASK 0x7F
1105
1106 static const struct tty_port_operations gsm_port_ops;
1107
1108 @@ -531,7 +532,8 @@ static int gsm_stuff_frame(const u8 *input, u8 *output, int len)
1109 int olen = 0;
1110 while (len--) {
1111 if (*input == GSM1_SOF || *input == GSM1_ESCAPE
1112 - || *input == XON || *input == XOFF) {
1113 + || (*input & ISO_IEC_646_MASK) == XON
1114 + || (*input & ISO_IEC_646_MASK) == XOFF) {
1115 *output++ = GSM1_ESCAPE;
1116 *output++ = *input++ ^ GSM1_ESCAPE_BITS;
1117 olen++;
1118 diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
1119 index 9ba31701a372e..a9b0a84b1e433 100644
1120 --- a/drivers/tty/serial/8250/8250_of.c
1121 +++ b/drivers/tty/serial/8250/8250_of.c
1122 @@ -105,8 +105,17 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
1123 port->mapsize = resource_size(&resource);
1124
1125 /* Check for shifted address mapping */
1126 - if (of_property_read_u32(np, "reg-offset", &prop) == 0)
1127 + if (of_property_read_u32(np, "reg-offset", &prop) == 0) {
1128 + if (prop >= port->mapsize) {
1129 + dev_warn(&ofdev->dev, "reg-offset %u exceeds region size %pa\n",
1130 + prop, &port->mapsize);
1131 + ret = -EINVAL;
1132 + goto err_unprepare;
1133 + }
1134 +
1135 port->mapbase += prop;
1136 + port->mapsize -= prop;
1137 + }
1138
1139 port->iotype = UPIO_MEM;
1140 if (of_property_read_u32(np, "reg-io-width", &prop) == 0) {
1141 diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
1142 index c82c7181348de..fd443bc4c2983 100644
1143 --- a/drivers/tty/serial/8250/8250_pci.c
1144 +++ b/drivers/tty/serial/8250/8250_pci.c
1145 @@ -5130,8 +5130,30 @@ static const struct pci_device_id serial_pci_tbl[] = {
1146 { PCI_VENDOR_ID_INTASHIELD, PCI_DEVICE_ID_INTASHIELD_IS400,
1147 PCI_ANY_ID, PCI_ANY_ID, 0, 0, /* 135a.0dc0 */
1148 pbn_b2_4_115200 },
1149 + /* Brainboxes Devices */
1150 /*
1151 - * BrainBoxes UC-260
1152 + * Brainboxes UC-101
1153 + */
1154 + { PCI_VENDOR_ID_INTASHIELD, 0x0BA1,
1155 + PCI_ANY_ID, PCI_ANY_ID,
1156 + 0, 0,
1157 + pbn_b2_2_115200 },
1158 + /*
1159 + * Brainboxes UC-235/246
1160 + */
1161 + { PCI_VENDOR_ID_INTASHIELD, 0x0AA1,
1162 + PCI_ANY_ID, PCI_ANY_ID,
1163 + 0, 0,
1164 + pbn_b2_1_115200 },
1165 + /*
1166 + * Brainboxes UC-257
1167 + */
1168 + { PCI_VENDOR_ID_INTASHIELD, 0x0861,
1169 + PCI_ANY_ID, PCI_ANY_ID,
1170 + 0, 0,
1171 + pbn_b2_2_115200 },
1172 + /*
1173 + * Brainboxes UC-260/271/701/756
1174 */
1175 { PCI_VENDOR_ID_INTASHIELD, 0x0D21,
1176 PCI_ANY_ID, PCI_ANY_ID,
1177 @@ -5139,7 +5161,81 @@ static const struct pci_device_id serial_pci_tbl[] = {
1178 pbn_b2_4_115200 },
1179 { PCI_VENDOR_ID_INTASHIELD, 0x0E34,
1180 PCI_ANY_ID, PCI_ANY_ID,
1181 - PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
1182 + PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, 0xffff00,
1183 + pbn_b2_4_115200 },
1184 + /*
1185 + * Brainboxes UC-268
1186 + */
1187 + { PCI_VENDOR_ID_INTASHIELD, 0x0841,
1188 + PCI_ANY_ID, PCI_ANY_ID,
1189 + 0, 0,
1190 + pbn_b2_4_115200 },
1191 + /*
1192 + * Brainboxes UC-275/279
1193 + */
1194 + { PCI_VENDOR_ID_INTASHIELD, 0x0881,
1195 + PCI_ANY_ID, PCI_ANY_ID,
1196 + 0, 0,
1197 + pbn_b2_8_115200 },
1198 + /*
1199 + * Brainboxes UC-302
1200 + */
1201 + { PCI_VENDOR_ID_INTASHIELD, 0x08E1,
1202 + PCI_ANY_ID, PCI_ANY_ID,
1203 + 0, 0,
1204 + pbn_b2_2_115200 },
1205 + /*
1206 + * Brainboxes UC-310
1207 + */
1208 + { PCI_VENDOR_ID_INTASHIELD, 0x08C1,
1209 + PCI_ANY_ID, PCI_ANY_ID,
1210 + 0, 0,
1211 + pbn_b2_2_115200 },
1212 + /*
1213 + * Brainboxes UC-313
1214 + */
1215 + { PCI_VENDOR_ID_INTASHIELD, 0x08A3,
1216 + PCI_ANY_ID, PCI_ANY_ID,
1217 + 0, 0,
1218 + pbn_b2_2_115200 },
1219 + /*
1220 + * Brainboxes UC-320/324
1221 + */
1222 + { PCI_VENDOR_ID_INTASHIELD, 0x0A61,
1223 + PCI_ANY_ID, PCI_ANY_ID,
1224 + 0, 0,
1225 + pbn_b2_1_115200 },
1226 + /*
1227 + * Brainboxes UC-346
1228 + */
1229 + { PCI_VENDOR_ID_INTASHIELD, 0x0B02,
1230 + PCI_ANY_ID, PCI_ANY_ID,
1231 + 0, 0,
1232 + pbn_b2_4_115200 },
1233 + /*
1234 + * Brainboxes UC-357
1235 + */
1236 + { PCI_VENDOR_ID_INTASHIELD, 0x0A81,
1237 + PCI_ANY_ID, PCI_ANY_ID,
1238 + 0, 0,
1239 + pbn_b2_2_115200 },
1240 + { PCI_VENDOR_ID_INTASHIELD, 0x0A83,
1241 + PCI_ANY_ID, PCI_ANY_ID,
1242 + 0, 0,
1243 + pbn_b2_2_115200 },
1244 + /*
1245 + * Brainboxes UC-368
1246 + */
1247 + { PCI_VENDOR_ID_INTASHIELD, 0x0C41,
1248 + PCI_ANY_ID, PCI_ANY_ID,
1249 + 0, 0,
1250 + pbn_b2_4_115200 },
1251 + /*
1252 + * Brainboxes UC-420/431
1253 + */
1254 + { PCI_VENDOR_ID_INTASHIELD, 0x0921,
1255 + PCI_ANY_ID, PCI_ANY_ID,
1256 + 0, 0,
1257 pbn_b2_4_115200 },
1258 /*
1259 * Perle PCI-RAS cards
1260 diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
1261 index 23b7bdae173c8..d517b911cd042 100644
1262 --- a/drivers/tty/serial/stm32-usart.c
1263 +++ b/drivers/tty/serial/stm32-usart.c
1264 @@ -536,7 +536,7 @@ static void stm32_start_tx(struct uart_port *port)
1265 {
1266 struct circ_buf *xmit = &port->state->xmit;
1267
1268 - if (uart_circ_empty(xmit))
1269 + if (uart_circ_empty(xmit) && !port->x_char)
1270 return;
1271
1272 stm32_transmit_chars(port);
1273 diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
1274 index 9a2ab6751a23c..5a4d08de546fe 100644
1275 --- a/drivers/usb/common/ulpi.c
1276 +++ b/drivers/usb/common/ulpi.c
1277 @@ -39,8 +39,11 @@ static int ulpi_match(struct device *dev, struct device_driver *driver)
1278 struct ulpi *ulpi = to_ulpi_dev(dev);
1279 const struct ulpi_device_id *id;
1280
1281 - /* Some ULPI devices don't have a vendor id so rely on OF match */
1282 - if (ulpi->id.vendor == 0)
1283 + /*
1284 + * Some ULPI devices don't have a vendor id
1285 + * or provide an id_table so rely on OF match.
1286 + */
1287 + if (ulpi->id.vendor == 0 || !drv->id_table)
1288 return of_driver_match_device(dev, driver);
1289
1290 for (id = drv->id_table; id->vendor; id++)
1291 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
1292 index fe9b392bffee3..39203f2ce6a19 100644
1293 --- a/drivers/usb/core/hcd.c
1294 +++ b/drivers/usb/core/hcd.c
1295 @@ -1567,6 +1567,13 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
1296 urb->hcpriv = NULL;
1297 INIT_LIST_HEAD(&urb->urb_list);
1298 atomic_dec(&urb->use_count);
1299 + /*
1300 + * Order the write of urb->use_count above before the read
1301 + * of urb->reject below. Pairs with the memory barriers in
1302 + * usb_kill_urb() and usb_poison_urb().
1303 + */
1304 + smp_mb__after_atomic();
1305 +
1306 atomic_dec(&urb->dev->urbnum);
1307 if (atomic_read(&urb->reject))
1308 wake_up(&usb_kill_urb_queue);
1309 @@ -1662,6 +1669,13 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
1310
1311 usb_anchor_resume_wakeups(anchor);
1312 atomic_dec(&urb->use_count);
1313 + /*
1314 + * Order the write of urb->use_count above before the read
1315 + * of urb->reject below. Pairs with the memory barriers in
1316 + * usb_kill_urb() and usb_poison_urb().
1317 + */
1318 + smp_mb__after_atomic();
1319 +
1320 if (unlikely(atomic_read(&urb->reject)))
1321 wake_up(&usb_kill_urb_queue);
1322 usb_put_urb(urb);
1323 diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
1324 index 31ca5abb4c12a..0045bbc3627dd 100644
1325 --- a/drivers/usb/core/urb.c
1326 +++ b/drivers/usb/core/urb.c
1327 @@ -691,6 +691,12 @@ void usb_kill_urb(struct urb *urb)
1328 if (!(urb && urb->dev && urb->ep))
1329 return;
1330 atomic_inc(&urb->reject);
1331 + /*
1332 + * Order the write of urb->reject above before the read
1333 + * of urb->use_count below. Pairs with the barriers in
1334 + * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
1335 + */
1336 + smp_mb__after_atomic();
1337
1338 usb_hcd_unlink_urb(urb, -ENOENT);
1339 wait_event(usb_kill_urb_queue, atomic_read(&urb->use_count) == 0);
1340 @@ -732,6 +738,12 @@ void usb_poison_urb(struct urb *urb)
1341 if (!urb)
1342 return;
1343 atomic_inc(&urb->reject);
1344 + /*
1345 + * Order the write of urb->reject above before the read
1346 + * of urb->use_count below. Pairs with the barriers in
1347 + * __usb_hcd_giveback_urb() and usb_hcd_submit_urb().
1348 + */
1349 + smp_mb__after_atomic();
1350
1351 if (!urb->dev || !urb->ep)
1352 return;
1353 diff --git a/drivers/usb/gadget/function/f_sourcesink.c b/drivers/usb/gadget/function/f_sourcesink.c
1354 index 282737e4609ce..2c65a9bb3c81b 100644
1355 --- a/drivers/usb/gadget/function/f_sourcesink.c
1356 +++ b/drivers/usb/gadget/function/f_sourcesink.c
1357 @@ -583,6 +583,7 @@ static int source_sink_start_ep(struct f_sourcesink *ss, bool is_in,
1358
1359 if (is_iso) {
1360 switch (speed) {
1361 + case USB_SPEED_SUPER_PLUS:
1362 case USB_SPEED_SUPER:
1363 size = ss->isoc_maxpacket *
1364 (ss->isoc_mult + 1) *
1365 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
1366 index 3ba4e060fd051..66e7f5d123c46 100644
1367 --- a/drivers/usb/storage/unusual_devs.h
1368 +++ b/drivers/usb/storage/unusual_devs.h
1369 @@ -2301,6 +2301,16 @@ UNUSUAL_DEV( 0x2027, 0xa001, 0x0000, 0x9999,
1370 USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
1371 US_FL_SCM_MULT_TARG ),
1372
1373 +/*
1374 + * Reported by DocMAX <mail@vacharakis.de>
1375 + * and Thomas Weißschuh <linux@weissschuh.net>
1376 + */
1377 +UNUSUAL_DEV( 0x2109, 0x0715, 0x9999, 0x9999,
1378 + "VIA Labs, Inc.",
1379 + "VL817 SATA Bridge",
1380 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1381 + US_FL_IGNORE_UAS),
1382 +
1383 UNUSUAL_DEV( 0x2116, 0x0320, 0x0001, 0x0001,
1384 "ST",
1385 "2A",
1386 diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
1387 index 1316464cf2933..fb18264b702e6 100644
1388 --- a/drivers/usb/typec/tcpm/tcpm.c
1389 +++ b/drivers/usb/typec/tcpm/tcpm.c
1390 @@ -3903,7 +3903,8 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
1391 case SNK_TRYWAIT_DEBOUNCE:
1392 break;
1393 case SNK_ATTACH_WAIT:
1394 - tcpm_set_state(port, SNK_UNATTACHED, 0);
1395 + case SNK_DEBOUNCED:
1396 + /* Do nothing, as TCPM is still waiting for vbus to reaach VSAFE5V to connect */
1397 break;
1398
1399 case SNK_NEGOTIATE_CAPABILITIES:
1400 diff --git a/drivers/usb/typec/ucsi/ucsi_ccg.c b/drivers/usb/typec/ucsi/ucsi_ccg.c
1401 index d772fce519057..0a38f98f78650 100644
1402 --- a/drivers/usb/typec/ucsi/ucsi_ccg.c
1403 +++ b/drivers/usb/typec/ucsi/ucsi_ccg.c
1404 @@ -304,7 +304,7 @@ static int ucsi_ccg_init(struct ucsi_ccg *uc)
1405 if (status < 0)
1406 return status;
1407
1408 - if (!data)
1409 + if (!(data & DEV_INT))
1410 return 0;
1411
1412 status = ccg_write(uc, CCGX_RAB_INTR_REG, &data, sizeof(data));
1413 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
1414 index e9d3eb7f0e2b8..675112aa998f2 100644
1415 --- a/fs/btrfs/ioctl.c
1416 +++ b/fs/btrfs/ioctl.c
1417 @@ -3027,10 +3027,8 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
1418 inode_lock(inode);
1419 err = btrfs_delete_subvolume(dir, dentry);
1420 inode_unlock(inode);
1421 - if (!err) {
1422 - fsnotify_rmdir(dir, dentry);
1423 - d_delete(dentry);
1424 - }
1425 + if (!err)
1426 + d_delete_notify(dir, dentry);
1427
1428 out_dput:
1429 dput(dentry);
1430 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
1431 index cb733652ecca6..2992cebb78661 100644
1432 --- a/fs/configfs/dir.c
1433 +++ b/fs/configfs/dir.c
1434 @@ -1805,8 +1805,8 @@ void configfs_unregister_group(struct config_group *group)
1435 configfs_detach_group(&group->cg_item);
1436 d_inode(dentry)->i_flags |= S_DEAD;
1437 dont_mount(dentry);
1438 + d_drop(dentry);
1439 fsnotify_rmdir(d_inode(parent), dentry);
1440 - d_delete(dentry);
1441 inode_unlock(d_inode(parent));
1442
1443 dput(dentry);
1444 @@ -1947,10 +1947,10 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
1445 configfs_detach_group(&group->cg_item);
1446 d_inode(dentry)->i_flags |= S_DEAD;
1447 dont_mount(dentry);
1448 - fsnotify_rmdir(d_inode(root), dentry);
1449 inode_unlock(d_inode(dentry));
1450
1451 - d_delete(dentry);
1452 + d_drop(dentry);
1453 + fsnotify_rmdir(d_inode(root), dentry);
1454
1455 inode_unlock(d_inode(root));
1456
1457 diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
1458 index 42e5a766d33c7..4f25015aa5342 100644
1459 --- a/fs/devpts/inode.c
1460 +++ b/fs/devpts/inode.c
1461 @@ -621,8 +621,8 @@ void devpts_pty_kill(struct dentry *dentry)
1462
1463 dentry->d_fsdata = NULL;
1464 drop_nlink(dentry->d_inode);
1465 - fsnotify_unlink(d_inode(dentry->d_parent), dentry);
1466 d_drop(dentry);
1467 + fsnotify_unlink(d_inode(dentry->d_parent), dentry);
1468 dput(dentry); /* d_alloc_name() in devpts_pty_new() */
1469 }
1470
1471 diff --git a/fs/namei.c b/fs/namei.c
1472 index 5b5759d708220..b952ecbd49c29 100644
1473 --- a/fs/namei.c
1474 +++ b/fs/namei.c
1475 @@ -3878,13 +3878,12 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry)
1476 dentry->d_inode->i_flags |= S_DEAD;
1477 dont_mount(dentry);
1478 detach_mounts(dentry);
1479 - fsnotify_rmdir(dir, dentry);
1480
1481 out:
1482 inode_unlock(dentry->d_inode);
1483 dput(dentry);
1484 if (!error)
1485 - d_delete(dentry);
1486 + d_delete_notify(dir, dentry);
1487 return error;
1488 }
1489 EXPORT_SYMBOL(vfs_rmdir);
1490 @@ -3995,7 +3994,6 @@ int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegate
1491 if (!error) {
1492 dont_mount(dentry);
1493 detach_mounts(dentry);
1494 - fsnotify_unlink(dir, dentry);
1495 }
1496 }
1497 }
1498 @@ -4003,9 +4001,11 @@ out:
1499 inode_unlock(target);
1500
1501 /* We don't d_delete() NFS sillyrenamed files--they still exist. */
1502 - if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
1503 + if (!error && dentry->d_flags & DCACHE_NFSFS_RENAMED) {
1504 + fsnotify_unlink(dir, dentry);
1505 + } else if (!error) {
1506 fsnotify_link_count(target);
1507 - d_delete(dentry);
1508 + d_delete_notify(dir, dentry);
1509 }
1510
1511 return error;
1512 diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
1513 index e7c0790308fe0..f1c99fe486c4d 100644
1514 --- a/fs/nfs/dir.c
1515 +++ b/fs/nfs/dir.c
1516 @@ -1638,6 +1638,24 @@ out:
1517
1518 no_open:
1519 res = nfs_lookup(dir, dentry, lookup_flags);
1520 + if (!res) {
1521 + inode = d_inode(dentry);
1522 + if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
1523 + !S_ISDIR(inode->i_mode))
1524 + res = ERR_PTR(-ENOTDIR);
1525 + else if (inode && S_ISREG(inode->i_mode))
1526 + res = ERR_PTR(-EOPENSTALE);
1527 + } else if (!IS_ERR(res)) {
1528 + inode = d_inode(res);
1529 + if ((lookup_flags & LOOKUP_DIRECTORY) && inode &&
1530 + !S_ISDIR(inode->i_mode)) {
1531 + dput(res);
1532 + res = ERR_PTR(-ENOTDIR);
1533 + } else if (inode && S_ISREG(inode->i_mode)) {
1534 + dput(res);
1535 + res = ERR_PTR(-EOPENSTALE);
1536 + }
1537 + }
1538 if (switched) {
1539 d_lookup_done(dentry);
1540 if (!res)
1541 @@ -2035,6 +2053,8 @@ nfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1542
1543 trace_nfs_link_enter(inode, dir, dentry);
1544 d_drop(dentry);
1545 + if (S_ISREG(inode->i_mode))
1546 + nfs_sync_inode(inode);
1547 error = NFS_PROTO(dir)->link(inode, dir, &dentry->d_name);
1548 if (error == 0) {
1549 ihold(inode);
1550 @@ -2123,6 +2143,8 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
1551 }
1552 }
1553
1554 + if (S_ISREG(old_inode->i_mode))
1555 + nfs_sync_inode(old_inode);
1556 task = nfs_async_rename(old_dir, new_dir, old_dentry, new_dentry, NULL);
1557 if (IS_ERR(task)) {
1558 error = PTR_ERR(task);
1559 diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
1560 index f6328ae9b2da4..055cc0458f270 100644
1561 --- a/fs/nfsd/nfsctl.c
1562 +++ b/fs/nfsd/nfsctl.c
1563 @@ -1247,7 +1247,8 @@ static void nfsdfs_remove_file(struct inode *dir, struct dentry *dentry)
1564 clear_ncl(d_inode(dentry));
1565 dget(dentry);
1566 ret = simple_unlink(dir, dentry);
1567 - d_delete(dentry);
1568 + d_drop(dentry);
1569 + fsnotify_unlink(dir, dentry);
1570 dput(dentry);
1571 WARN_ON_ONCE(ret);
1572 }
1573 @@ -1336,8 +1337,8 @@ void nfsd_client_rmdir(struct dentry *dentry)
1574 dget(dentry);
1575 ret = simple_rmdir(dir, dentry);
1576 WARN_ON_ONCE(ret);
1577 + d_drop(dentry);
1578 fsnotify_rmdir(dir, dentry);
1579 - d_delete(dentry);
1580 dput(dentry);
1581 inode_unlock(dir);
1582 }
1583 diff --git a/fs/udf/inode.c b/fs/udf/inode.c
1584 index 507f8f9103270..639aabf30eaf0 100644
1585 --- a/fs/udf/inode.c
1586 +++ b/fs/udf/inode.c
1587 @@ -258,10 +258,6 @@ int udf_expand_file_adinicb(struct inode *inode)
1588 char *kaddr;
1589 struct udf_inode_info *iinfo = UDF_I(inode);
1590 int err;
1591 - struct writeback_control udf_wbc = {
1592 - .sync_mode = WB_SYNC_NONE,
1593 - .nr_to_write = 1,
1594 - };
1595
1596 WARN_ON_ONCE(!inode_is_locked(inode));
1597 if (!iinfo->i_lenAlloc) {
1598 @@ -305,8 +301,10 @@ int udf_expand_file_adinicb(struct inode *inode)
1599 iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
1600 /* from now on we have normal address_space methods */
1601 inode->i_data.a_ops = &udf_aops;
1602 + set_page_dirty(page);
1603 + unlock_page(page);
1604 up_write(&iinfo->i_data_sem);
1605 - err = inode->i_data.a_ops->writepage(page, &udf_wbc);
1606 + err = filemap_fdatawrite(inode->i_mapping);
1607 if (err) {
1608 /* Restore everything back so that we don't lose data... */
1609 lock_page(page);
1610 @@ -318,6 +316,7 @@ int udf_expand_file_adinicb(struct inode *inode)
1611 unlock_page(page);
1612 iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
1613 inode->i_data.a_ops = &udf_adinicb_aops;
1614 + iinfo->i_lenAlloc = inode->i_size;
1615 up_write(&iinfo->i_data_sem);
1616 }
1617 put_page(page);
1618 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
1619 index a2d5d175d3c15..e9d2024473b0f 100644
1620 --- a/include/linux/fsnotify.h
1621 +++ b/include/linux/fsnotify.h
1622 @@ -188,6 +188,42 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct
1623 fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, &new_dentry->d_name, 0);
1624 }
1625
1626 +/*
1627 + * fsnotify_delete - @dentry was unlinked and unhashed
1628 + *
1629 + * Caller must make sure that dentry->d_name is stable.
1630 + *
1631 + * Note: unlike fsnotify_unlink(), we have to pass also the unlinked inode
1632 + * as this may be called after d_delete() and old_dentry may be negative.
1633 + */
1634 +static inline void fsnotify_delete(struct inode *dir, struct inode *inode,
1635 + struct dentry *dentry)
1636 +{
1637 + __u32 mask = FS_DELETE;
1638 +
1639 + if (S_ISDIR(inode->i_mode))
1640 + mask |= FS_ISDIR;
1641 +
1642 + fsnotify(dir, mask, inode, FSNOTIFY_EVENT_INODE, &dentry->d_name, 0);
1643 +}
1644 +
1645 +/**
1646 + * d_delete_notify - delete a dentry and call fsnotify_delete()
1647 + * @dentry: The dentry to delete
1648 + *
1649 + * This helper is used to guaranty that the unlinked inode cannot be found
1650 + * by lookup of this name after fsnotify_delete() event has been delivered.
1651 + */
1652 +static inline void d_delete_notify(struct inode *dir, struct dentry *dentry)
1653 +{
1654 + struct inode *inode = d_inode(dentry);
1655 +
1656 + ihold(inode);
1657 + d_delete(dentry);
1658 + fsnotify_delete(dir, inode, dentry);
1659 + iput(inode);
1660 +}
1661 +
1662 /*
1663 * fsnotify_unlink - 'name' was unlinked
1664 *
1665 @@ -195,10 +231,10 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct
1666 */
1667 static inline void fsnotify_unlink(struct inode *dir, struct dentry *dentry)
1668 {
1669 - /* Expected to be called before d_delete() */
1670 - WARN_ON_ONCE(d_is_negative(dentry));
1671 + if (WARN_ON_ONCE(d_is_negative(dentry)))
1672 + return;
1673
1674 - fsnotify_dirent(dir, dentry, FS_DELETE);
1675 + fsnotify_delete(dir, d_inode(dentry), dentry);
1676 }
1677
1678 /*
1679 @@ -218,10 +254,10 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
1680 */
1681 static inline void fsnotify_rmdir(struct inode *dir, struct dentry *dentry)
1682 {
1683 - /* Expected to be called before d_delete() */
1684 - WARN_ON_ONCE(d_is_negative(dentry));
1685 + if (WARN_ON_ONCE(d_is_negative(dentry)))
1686 + return;
1687
1688 - fsnotify_dirent(dir, dentry, FS_DELETE | FS_ISDIR);
1689 + fsnotify_delete(dir, d_inode(dentry), dentry);
1690 }
1691
1692 /*
1693 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
1694 index 4860944e936db..288a586782563 100644
1695 --- a/include/linux/netdevice.h
1696 +++ b/include/linux/netdevice.h
1697 @@ -2397,6 +2397,7 @@ struct packet_type {
1698 struct net_device *);
1699 bool (*id_match)(struct packet_type *ptype,
1700 struct sock *sk);
1701 + struct net *af_packet_net;
1702 void *af_packet_priv;
1703 struct list_head list;
1704 };
1705 diff --git a/include/net/ip.h b/include/net/ip.h
1706 index 52abfc00b5e3d..3f3ea86b2173c 100644
1707 --- a/include/net/ip.h
1708 +++ b/include/net/ip.h
1709 @@ -509,19 +509,18 @@ static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
1710 {
1711 struct iphdr *iph = ip_hdr(skb);
1712
1713 + /* We had many attacks based on IPID, use the private
1714 + * generator as much as we can.
1715 + */
1716 + if (sk && inet_sk(sk)->inet_daddr) {
1717 + iph->id = htons(inet_sk(sk)->inet_id);
1718 + inet_sk(sk)->inet_id += segs;
1719 + return;
1720 + }
1721 if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
1722 - /* This is only to work around buggy Windows95/2000
1723 - * VJ compression implementations. If the ID field
1724 - * does not change, they drop every other packet in
1725 - * a TCP stream using header compression.
1726 - */
1727 - if (sk && inet_sk(sk)->inet_daddr) {
1728 - iph->id = htons(inet_sk(sk)->inet_id);
1729 - inet_sk(sk)->inet_id += segs;
1730 - } else {
1731 - iph->id = 0;
1732 - }
1733 + iph->id = 0;
1734 } else {
1735 + /* Unfortunately we need the big hammer to get a suitable IPID */
1736 __ip_select_ident(net, iph, segs);
1737 }
1738 }
1739 diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
1740 index 05ecaefeb6322..780754b9cbcd4 100644
1741 --- a/include/net/ip6_fib.h
1742 +++ b/include/net/ip6_fib.h
1743 @@ -247,7 +247,7 @@ static inline bool fib6_get_cookie_safe(const struct fib6_info *f6i,
1744 fn = rcu_dereference(f6i->fib6_node);
1745
1746 if (fn) {
1747 - *cookie = fn->fn_sernum;
1748 + *cookie = READ_ONCE(fn->fn_sernum);
1749 /* pairs with smp_wmb() in fib6_update_sernum_upto_root() */
1750 smp_rmb();
1751 status = true;
1752 diff --git a/include/net/route.h b/include/net/route.h
1753 index 6c516840380db..b85d1912d84fd 100644
1754 --- a/include/net/route.h
1755 +++ b/include/net/route.h
1756 @@ -359,7 +359,7 @@ static inline struct neighbour *ip_neigh_gw4(struct net_device *dev,
1757 {
1758 struct neighbour *neigh;
1759
1760 - neigh = __ipv4_neigh_lookup_noref(dev, daddr);
1761 + neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)daddr);
1762 if (unlikely(!neigh))
1763 neigh = __neigh_create(&arp_tbl, &daddr, dev, false);
1764
1765 diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
1766 index 105df4dfc7839..52571dcad768b 100644
1767 --- a/kernel/power/wakelock.c
1768 +++ b/kernel/power/wakelock.c
1769 @@ -39,23 +39,20 @@ ssize_t pm_show_wakelocks(char *buf, bool show_active)
1770 {
1771 struct rb_node *node;
1772 struct wakelock *wl;
1773 - char *str = buf;
1774 - char *end = buf + PAGE_SIZE;
1775 + int len = 0;
1776
1777 mutex_lock(&wakelocks_lock);
1778
1779 for (node = rb_first(&wakelocks_tree); node; node = rb_next(node)) {
1780 wl = rb_entry(node, struct wakelock, node);
1781 if (wl->ws->active == show_active)
1782 - str += scnprintf(str, end - str, "%s ", wl->name);
1783 + len += sysfs_emit_at(buf, len, "%s ", wl->name);
1784 }
1785 - if (str > buf)
1786 - str--;
1787
1788 - str += scnprintf(str, end - str, "\n");
1789 + len += sysfs_emit_at(buf, len, "\n");
1790
1791 mutex_unlock(&wakelocks_lock);
1792 - return (str - buf);
1793 + return len;
1794 }
1795
1796 #if CONFIG_PM_WAKELOCKS_LIMIT > 0
1797 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
1798 index 54f5b2f080f53..5a4dfb55ba16b 100644
1799 --- a/kernel/trace/trace.c
1800 +++ b/kernel/trace/trace.c
1801 @@ -6994,7 +6994,8 @@ static struct tracing_log_err *get_tracing_log_err(struct trace_array *tr)
1802 err = kzalloc(sizeof(*err), GFP_KERNEL);
1803 if (!err)
1804 err = ERR_PTR(-ENOMEM);
1805 - tr->n_err_log_entries++;
1806 + else
1807 + tr->n_err_log_entries++;
1808
1809 return err;
1810 }
1811 diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
1812 index 8b33a3c872750..3cef24c6391a5 100644
1813 --- a/kernel/trace/trace_events_hist.c
1814 +++ b/kernel/trace/trace_events_hist.c
1815 @@ -4398,6 +4398,7 @@ static int trace_action_create(struct hist_trigger_data *hist_data,
1816
1817 var_ref_idx = find_var_ref_idx(hist_data, var_ref);
1818 if (WARN_ON(var_ref_idx < 0)) {
1819 + kfree(p);
1820 ret = var_ref_idx;
1821 goto err;
1822 }
1823 diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
1824 index 40f1593651e84..082a262ab49c3 100644
1825 --- a/net/bluetooth/hci_event.c
1826 +++ b/net/bluetooth/hci_event.c
1827 @@ -5506,6 +5506,11 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
1828 struct hci_ev_le_advertising_info *ev = ptr;
1829 s8 rssi;
1830
1831 + if (ptr > (void *)skb_tail_pointer(skb) - sizeof(*ev)) {
1832 + bt_dev_err(hdev, "Malicious advertising data.");
1833 + break;
1834 + }
1835 +
1836 if (ev->length <= HCI_MAX_AD_LENGTH &&
1837 ev->data + ev->length <= skb_tail_pointer(skb)) {
1838 rssi = ev->data[ev->length];
1839 @@ -5517,11 +5522,6 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
1840 }
1841
1842 ptr += sizeof(*ev) + ev->length + 1;
1843 -
1844 - if (ptr > (void *) skb_tail_pointer(skb) - sizeof(*ev)) {
1845 - bt_dev_err(hdev, "Malicious advertising data. Stopping processing");
1846 - break;
1847 - }
1848 }
1849
1850 hci_dev_unlock(hdev);
1851 diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
1852 index 36347933ec3af..61f5570645e38 100644
1853 --- a/net/core/net-procfs.c
1854 +++ b/net/core/net-procfs.c
1855 @@ -182,12 +182,23 @@ static const struct seq_operations softnet_seq_ops = {
1856 .show = softnet_seq_show,
1857 };
1858
1859 -static void *ptype_get_idx(loff_t pos)
1860 +static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
1861 {
1862 + struct list_head *ptype_list = NULL;
1863 struct packet_type *pt = NULL;
1864 + struct net_device *dev;
1865 loff_t i = 0;
1866 int t;
1867
1868 + for_each_netdev_rcu(seq_file_net(seq), dev) {
1869 + ptype_list = &dev->ptype_all;
1870 + list_for_each_entry_rcu(pt, ptype_list, list) {
1871 + if (i == pos)
1872 + return pt;
1873 + ++i;
1874 + }
1875 + }
1876 +
1877 list_for_each_entry_rcu(pt, &ptype_all, list) {
1878 if (i == pos)
1879 return pt;
1880 @@ -208,22 +219,40 @@ static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
1881 __acquires(RCU)
1882 {
1883 rcu_read_lock();
1884 - return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
1885 + return *pos ? ptype_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
1886 }
1887
1888 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1889 {
1890 + struct net_device *dev;
1891 struct packet_type *pt;
1892 struct list_head *nxt;
1893 int hash;
1894
1895 ++*pos;
1896 if (v == SEQ_START_TOKEN)
1897 - return ptype_get_idx(0);
1898 + return ptype_get_idx(seq, 0);
1899
1900 pt = v;
1901 nxt = pt->list.next;
1902 + if (pt->dev) {
1903 + if (nxt != &pt->dev->ptype_all)
1904 + goto found;
1905 +
1906 + dev = pt->dev;
1907 + for_each_netdev_continue_rcu(seq_file_net(seq), dev) {
1908 + if (!list_empty(&dev->ptype_all)) {
1909 + nxt = dev->ptype_all.next;
1910 + goto found;
1911 + }
1912 + }
1913 +
1914 + nxt = ptype_all.next;
1915 + goto ptype_all;
1916 + }
1917 +
1918 if (pt->type == htons(ETH_P_ALL)) {
1919 +ptype_all:
1920 if (nxt != &ptype_all)
1921 goto found;
1922 hash = 0;
1923 @@ -252,7 +281,8 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
1924
1925 if (v == SEQ_START_TOKEN)
1926 seq_puts(seq, "Type Device Function\n");
1927 - else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
1928 + else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) &&
1929 + (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) {
1930 if (pt->type == htons(ETH_P_ALL))
1931 seq_puts(seq, "ALL ");
1932 else
1933 diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
1934 index 0ec529d77a56e..418e939878004 100644
1935 --- a/net/ipv4/ip_output.c
1936 +++ b/net/ipv4/ip_output.c
1937 @@ -161,12 +161,19 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
1938 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
1939 iph->saddr = saddr;
1940 iph->protocol = sk->sk_protocol;
1941 - if (ip_dont_fragment(sk, &rt->dst)) {
1942 + /* Do not bother generating IPID for small packets (eg SYNACK) */
1943 + if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) {
1944 iph->frag_off = htons(IP_DF);
1945 iph->id = 0;
1946 } else {
1947 iph->frag_off = 0;
1948 - __ip_select_ident(net, iph, 1);
1949 + /* TCP packets here are SYNACK with fat IPv4/TCP options.
1950 + * Avoid using the hashed IP ident generator.
1951 + */
1952 + if (sk->sk_protocol == IPPROTO_TCP)
1953 + iph->id = (__force __be16)prandom_u32();
1954 + else
1955 + __ip_select_ident(net, iph, 1);
1956 }
1957
1958 if (opt && opt->opt.optlen) {
1959 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
1960 index 1c3d5d3702a10..b0f51c7cc0d90 100644
1961 --- a/net/ipv4/ping.c
1962 +++ b/net/ipv4/ping.c
1963 @@ -220,7 +220,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
1964 continue;
1965 }
1966
1967 - if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
1968 + if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif &&
1969 + sk->sk_bound_dev_if != inet_sdif(skb))
1970 continue;
1971
1972 sock_hold(sk);
1973 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
1974 index 3183413ebc6c2..ddc24e57dc555 100644
1975 --- a/net/ipv4/raw.c
1976 +++ b/net/ipv4/raw.c
1977 @@ -720,6 +720,7 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
1978 int ret = -EINVAL;
1979 int chk_addr_ret;
1980
1981 + lock_sock(sk);
1982 if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
1983 goto out;
1984
1985 @@ -739,7 +740,9 @@ static int raw_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
1986 inet->inet_saddr = 0; /* Use device */
1987 sk_dst_reset(sk);
1988 ret = 0;
1989 -out: return ret;
1990 +out:
1991 + release_sock(sk);
1992 + return ret;
1993 }
1994
1995 /*
1996 diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
1997 index 9a6f66e0e9a27..ef55489651f87 100644
1998 --- a/net/ipv6/ip6_fib.c
1999 +++ b/net/ipv6/ip6_fib.c
2000 @@ -110,7 +110,7 @@ void fib6_update_sernum(struct net *net, struct fib6_info *f6i)
2001 fn = rcu_dereference_protected(f6i->fib6_node,
2002 lockdep_is_held(&f6i->fib6_table->tb6_lock));
2003 if (fn)
2004 - fn->fn_sernum = fib6_new_sernum(net);
2005 + WRITE_ONCE(fn->fn_sernum, fib6_new_sernum(net));
2006 }
2007
2008 /*
2009 @@ -535,12 +535,13 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
2010 spin_unlock_bh(&table->tb6_lock);
2011 if (res > 0) {
2012 cb->args[4] = 1;
2013 - cb->args[5] = w->root->fn_sernum;
2014 + cb->args[5] = READ_ONCE(w->root->fn_sernum);
2015 }
2016 } else {
2017 - if (cb->args[5] != w->root->fn_sernum) {
2018 + int sernum = READ_ONCE(w->root->fn_sernum);
2019 + if (cb->args[5] != sernum) {
2020 /* Begin at the root if the tree changed */
2021 - cb->args[5] = w->root->fn_sernum;
2022 + cb->args[5] = sernum;
2023 w->state = FWS_INIT;
2024 w->node = w->root;
2025 w->skip = w->count;
2026 @@ -1276,7 +1277,7 @@ static void __fib6_update_sernum_upto_root(struct fib6_info *rt,
2027 /* paired with smp_rmb() in rt6_get_cookie_safe() */
2028 smp_wmb();
2029 while (fn) {
2030 - fn->fn_sernum = sernum;
2031 + WRITE_ONCE(fn->fn_sernum, sernum);
2032 fn = rcu_dereference_protected(fn->parent,
2033 lockdep_is_held(&rt->fib6_table->tb6_lock));
2034 }
2035 @@ -2068,8 +2069,8 @@ static int fib6_clean_node(struct fib6_walker *w)
2036 };
2037
2038 if (c->sernum != FIB6_NO_SERNUM_CHANGE &&
2039 - w->node->fn_sernum != c->sernum)
2040 - w->node->fn_sernum = c->sernum;
2041 + READ_ONCE(w->node->fn_sernum) != c->sernum)
2042 + WRITE_ONCE(w->node->fn_sernum, c->sernum);
2043
2044 if (!c->func) {
2045 WARN_ON_ONCE(c->sernum == FIB6_NO_SERNUM_CHANGE);
2046 @@ -2433,7 +2434,7 @@ static void ipv6_route_seq_setup_walk(struct ipv6_route_iter *iter,
2047 iter->w.state = FWS_INIT;
2048 iter->w.node = iter->w.root;
2049 iter->w.args = iter;
2050 - iter->sernum = iter->w.root->fn_sernum;
2051 + iter->sernum = READ_ONCE(iter->w.root->fn_sernum);
2052 INIT_LIST_HEAD(&iter->w.lh);
2053 fib6_walker_link(net, &iter->w);
2054 }
2055 @@ -2461,8 +2462,10 @@ static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl,
2056
2057 static void ipv6_route_check_sernum(struct ipv6_route_iter *iter)
2058 {
2059 - if (iter->sernum != iter->w.root->fn_sernum) {
2060 - iter->sernum = iter->w.root->fn_sernum;
2061 + int sernum = READ_ONCE(iter->w.root->fn_sernum);
2062 +
2063 + if (iter->sernum != sernum) {
2064 + iter->sernum = sernum;
2065 iter->w.state = FWS_INIT;
2066 iter->w.node = iter->w.root;
2067 WARN_ON(iter->w.skip);
2068 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
2069 index fd0d1cee2d3f5..878a08c40fffd 100644
2070 --- a/net/ipv6/ip6_tunnel.c
2071 +++ b/net/ipv6/ip6_tunnel.c
2072 @@ -1000,14 +1000,14 @@ int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
2073
2074 if (unlikely(!ipv6_chk_addr_and_flags(net, laddr, ldev, false,
2075 0, IFA_F_TENTATIVE)))
2076 - pr_warn("%s xmit: Local address not yet configured!\n",
2077 - p->name);
2078 + pr_warn_ratelimited("%s xmit: Local address not yet configured!\n",
2079 + p->name);
2080 else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) &&
2081 !ipv6_addr_is_multicast(raddr) &&
2082 unlikely(ipv6_chk_addr_and_flags(net, raddr, ldev,
2083 true, 0, IFA_F_TENTATIVE)))
2084 - pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
2085 - p->name);
2086 + pr_warn_ratelimited("%s xmit: Routing loop! Remote address found on this node!\n",
2087 + p->name);
2088 else
2089 ret = 1;
2090 rcu_read_unlock();
2091 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
2092 index 5ef6e27e026e9..2a13394ab8541 100644
2093 --- a/net/ipv6/route.c
2094 +++ b/net/ipv6/route.c
2095 @@ -2697,7 +2697,7 @@ static void ip6_link_failure(struct sk_buff *skb)
2096 if (from) {
2097 fn = rcu_dereference(from->fib6_node);
2098 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2099 - fn->fn_sernum = -1;
2100 + WRITE_ONCE(fn->fn_sernum, -1);
2101 }
2102 }
2103 rcu_read_unlock();
2104 diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
2105 index 4bcc36e4b2ef0..d9b6f2001d006 100644
2106 --- a/net/netfilter/nf_conntrack_core.c
2107 +++ b/net/netfilter/nf_conntrack_core.c
2108 @@ -1709,15 +1709,17 @@ repeat:
2109 pr_debug("nf_conntrack_in: Can't track with proto module\n");
2110 nf_conntrack_put(&ct->ct_general);
2111 skb->_nfct = 0;
2112 - NF_CT_STAT_INC_ATOMIC(state->net, invalid);
2113 - if (ret == -NF_DROP)
2114 - NF_CT_STAT_INC_ATOMIC(state->net, drop);
2115 /* Special case: TCP tracker reports an attempt to reopen a
2116 * closed/aborted connection. We have to go back and create a
2117 * fresh conntrack.
2118 */
2119 if (ret == -NF_REPEAT)
2120 goto repeat;
2121 +
2122 + NF_CT_STAT_INC_ATOMIC(state->net, invalid);
2123 + if (ret == -NF_DROP)
2124 + NF_CT_STAT_INC_ATOMIC(state->net, drop);
2125 +
2126 ret = -ret;
2127 goto out;
2128 }
2129 diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
2130 index 921f8f45b17f4..cf0512fc648e7 100644
2131 --- a/net/netfilter/nft_payload.c
2132 +++ b/net/netfilter/nft_payload.c
2133 @@ -420,6 +420,9 @@ static int nft_payload_l4csum_offset(const struct nft_pktinfo *pkt,
2134 struct sk_buff *skb,
2135 unsigned int *l4csum_offset)
2136 {
2137 + if (pkt->xt.fragoff)
2138 + return -1;
2139 +
2140 switch (pkt->tprot) {
2141 case IPPROTO_TCP:
2142 *l4csum_offset = offsetof(struct tcphdr, check);
2143 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
2144 index 6062bd5bf132b..839e1caa57a59 100644
2145 --- a/net/packet/af_packet.c
2146 +++ b/net/packet/af_packet.c
2147 @@ -1715,6 +1715,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
2148 match->prot_hook.dev = po->prot_hook.dev;
2149 match->prot_hook.func = packet_rcv_fanout;
2150 match->prot_hook.af_packet_priv = match;
2151 + match->prot_hook.af_packet_net = read_pnet(&match->net);
2152 match->prot_hook.id_match = match_fanout_group;
2153 list_add(&match->list, &fanout_list);
2154 }
2155 @@ -3294,6 +3295,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
2156 po->prot_hook.func = packet_rcv_spkt;
2157
2158 po->prot_hook.af_packet_priv = sk;
2159 + po->prot_hook.af_packet_net = sock_net(sk);
2160
2161 if (proto) {
2162 po->prot_hook.type = proto;
2163 diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
2164 index 9ff85ee8337cd..80e15310f1b29 100644
2165 --- a/net/rxrpc/call_event.c
2166 +++ b/net/rxrpc/call_event.c
2167 @@ -157,7 +157,7 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call)
2168 static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
2169 {
2170 struct sk_buff *skb;
2171 - unsigned long resend_at, rto_j;
2172 + unsigned long resend_at;
2173 rxrpc_seq_t cursor, seq, top;
2174 ktime_t now, max_age, oldest, ack_ts;
2175 int ix;
2176 @@ -165,10 +165,8 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
2177
2178 _enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
2179
2180 - rto_j = call->peer->rto_j;
2181 -
2182 now = ktime_get_real();
2183 - max_age = ktime_sub(now, jiffies_to_usecs(rto_j));
2184 + max_age = ktime_sub(now, jiffies_to_usecs(call->peer->rto_j));
2185
2186 spin_lock_bh(&call->lock);
2187
2188 @@ -213,7 +211,7 @@ static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
2189 }
2190
2191 resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
2192 - resend_at += jiffies + rto_j;
2193 + resend_at += jiffies + rxrpc_get_rto_backoff(call->peer, retrans);
2194 WRITE_ONCE(call->resend_at, resend_at);
2195
2196 if (unacked)
2197 diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
2198 index f8b632a5c6197..a4a6f8ee07201 100644
2199 --- a/net/rxrpc/output.c
2200 +++ b/net/rxrpc/output.c
2201 @@ -426,7 +426,7 @@ done:
2202 if (call->peer->rtt_count > 1) {
2203 unsigned long nowj = jiffies, ack_lost_at;
2204
2205 - ack_lost_at = rxrpc_get_rto_backoff(call->peer, retrans);
2206 + ack_lost_at = rxrpc_get_rto_backoff(call->peer, false);
2207 ack_lost_at += nowj;
2208 WRITE_ONCE(call->ack_lost_at, ack_lost_at);
2209 rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
2210 diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
2211 index 37792675ed571..3b825942e2f67 100644
2212 --- a/net/sunrpc/rpc_pipe.c
2213 +++ b/net/sunrpc/rpc_pipe.c
2214 @@ -599,9 +599,9 @@ static int __rpc_rmdir(struct inode *dir, struct dentry *dentry)
2215
2216 dget(dentry);
2217 ret = simple_rmdir(dir, dentry);
2218 + d_drop(dentry);
2219 if (!ret)
2220 fsnotify_rmdir(dir, dentry);
2221 - d_delete(dentry);
2222 dput(dentry);
2223 return ret;
2224 }
2225 @@ -612,9 +612,9 @@ static int __rpc_unlink(struct inode *dir, struct dentry *dentry)
2226
2227 dget(dentry);
2228 ret = simple_unlink(dir, dentry);
2229 + d_drop(dentry);
2230 if (!ret)
2231 fsnotify_unlink(dir, dentry);
2232 - d_delete(dentry);
2233 dput(dentry);
2234 return ret;
2235 }