Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0195-5.4.96-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3637 - (show annotations) (download)
Mon Oct 24 12:40:44 2022 UTC (19 months, 1 week ago) by niro
File size: 42073 byte(s)
-add missing
1 diff --git a/Makefile b/Makefile
2 index aa3c2e834442e..7a47a2594f957 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 95
10 +SUBLEVEL = 96
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
15 index 51d867cf146c1..6c295a231882a 100644
16 --- a/arch/arm64/include/asm/memory.h
17 +++ b/arch/arm64/include/asm/memory.h
18 @@ -247,11 +247,11 @@ static inline const void *__tag_set(const void *addr, u8 tag)
19
20
21 /*
22 - * The linear kernel range starts at the bottom of the virtual address
23 - * space. Testing the top bit for the start of the region is a
24 - * sufficient check and avoids having to worry about the tag.
25 + * Check whether an arbitrary address is within the linear map, which
26 + * lives in the [PAGE_OFFSET, PAGE_END) interval at the bottom of the
27 + * kernel's TTBR1 address range.
28 */
29 -#define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1)))
30 +#define __is_lm_address(addr) (((u64)(addr) ^ PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET))
31
32 #define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET)
33 #define __kimg_to_phys(addr) ((addr) - kimage_voffset)
34 @@ -332,7 +332,7 @@ static inline void *phys_to_virt(phys_addr_t x)
35 #endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */
36
37 #define virt_addr_valid(addr) ({ \
38 - __typeof__(addr) __addr = addr; \
39 + __typeof__(addr) __addr = __tag_reset(addr); \
40 __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \
41 })
42
43 diff --git a/arch/arm64/mm/physaddr.c b/arch/arm64/mm/physaddr.c
44 index 67a9ba9eaa96b..cde44c13dda1b 100644
45 --- a/arch/arm64/mm/physaddr.c
46 +++ b/arch/arm64/mm/physaddr.c
47 @@ -9,7 +9,7 @@
48
49 phys_addr_t __virt_to_phys(unsigned long x)
50 {
51 - WARN(!__is_lm_address(x),
52 + WARN(!__is_lm_address(__tag_reset(x)),
53 "virt_to_phys used for non-linear address: %pK (%pS)\n",
54 (void *)x,
55 (void *)x);
56 diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
57 index 86f20d520a079..b40d0295d8129 100644
58 --- a/arch/x86/include/asm/msr.h
59 +++ b/arch/x86/include/asm/msr.h
60 @@ -88,7 +88,7 @@ static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {}
61 * think of extending them - you will be slapped with a stinking trout or a frozen
62 * shark will reach you, wherever you are! You've been warned.
63 */
64 -static inline unsigned long long notrace __rdmsr(unsigned int msr)
65 +static __always_inline unsigned long long __rdmsr(unsigned int msr)
66 {
67 DECLARE_ARGS(val, low, high);
68
69 @@ -100,7 +100,7 @@ static inline unsigned long long notrace __rdmsr(unsigned int msr)
70 return EAX_EDX_VAL(val, low, high);
71 }
72
73 -static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high)
74 +static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high)
75 {
76 asm volatile("1: wrmsr\n"
77 "2:\n"
78 diff --git a/block/blk-core.c b/block/blk-core.c
79 index d2213220099d3..5808baa950c35 100644
80 --- a/block/blk-core.c
81 +++ b/block/blk-core.c
82 @@ -886,11 +886,14 @@ generic_make_request_checks(struct bio *bio)
83 }
84
85 /*
86 - * For a REQ_NOWAIT based request, return -EOPNOTSUPP
87 - * if queue is not a request based queue.
88 + * Non-mq queues do not honor REQ_NOWAIT, so complete a bio
89 + * with BLK_STS_AGAIN status in order to catch -EAGAIN and
90 + * to give a chance to the caller to repeat request gracefully.
91 */
92 - if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q))
93 - goto not_supported;
94 + if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_mq(q)) {
95 + status = BLK_STS_AGAIN;
96 + goto end_io;
97 + }
98
99 if (should_fail_bio(bio))
100 goto end_io;
101 diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
102 index d831a61e0010e..383c7029d3cee 100644
103 --- a/drivers/acpi/thermal.c
104 +++ b/drivers/acpi/thermal.c
105 @@ -174,6 +174,8 @@ struct acpi_thermal {
106 int tz_enabled;
107 int kelvin_offset;
108 struct work_struct thermal_check_work;
109 + struct mutex thermal_check_lock;
110 + refcount_t thermal_check_count;
111 };
112
113 /* --------------------------------------------------------------------------
114 @@ -494,17 +496,6 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz)
115 return 0;
116 }
117
118 -static void acpi_thermal_check(void *data)
119 -{
120 - struct acpi_thermal *tz = data;
121 -
122 - if (!tz->tz_enabled)
123 - return;
124 -
125 - thermal_zone_device_update(tz->thermal_zone,
126 - THERMAL_EVENT_UNSPECIFIED);
127 -}
128 -
129 /* sys I/F for generic thermal sysfs support */
130
131 static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp)
132 @@ -538,6 +529,8 @@ static int thermal_get_mode(struct thermal_zone_device *thermal,
133 return 0;
134 }
135
136 +static void acpi_thermal_check_fn(struct work_struct *work);
137 +
138 static int thermal_set_mode(struct thermal_zone_device *thermal,
139 enum thermal_device_mode mode)
140 {
141 @@ -563,7 +556,7 @@ static int thermal_set_mode(struct thermal_zone_device *thermal,
142 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
143 "%s kernel ACPI thermal control\n",
144 tz->tz_enabled ? "Enable" : "Disable"));
145 - acpi_thermal_check(tz);
146 + acpi_thermal_check_fn(&tz->thermal_check_work);
147 }
148 return 0;
149 }
150 @@ -932,6 +925,12 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz)
151 Driver Interface
152 -------------------------------------------------------------------------- */
153
154 +static void acpi_queue_thermal_check(struct acpi_thermal *tz)
155 +{
156 + if (!work_pending(&tz->thermal_check_work))
157 + queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
158 +}
159 +
160 static void acpi_thermal_notify(struct acpi_device *device, u32 event)
161 {
162 struct acpi_thermal *tz = acpi_driver_data(device);
163 @@ -942,17 +941,17 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event)
164
165 switch (event) {
166 case ACPI_THERMAL_NOTIFY_TEMPERATURE:
167 - acpi_thermal_check(tz);
168 + acpi_queue_thermal_check(tz);
169 break;
170 case ACPI_THERMAL_NOTIFY_THRESHOLDS:
171 acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS);
172 - acpi_thermal_check(tz);
173 + acpi_queue_thermal_check(tz);
174 acpi_bus_generate_netlink_event(device->pnp.device_class,
175 dev_name(&device->dev), event, 0);
176 break;
177 case ACPI_THERMAL_NOTIFY_DEVICES:
178 acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES);
179 - acpi_thermal_check(tz);
180 + acpi_queue_thermal_check(tz);
181 acpi_bus_generate_netlink_event(device->pnp.device_class,
182 dev_name(&device->dev), event, 0);
183 break;
184 @@ -1052,7 +1051,27 @@ static void acpi_thermal_check_fn(struct work_struct *work)
185 {
186 struct acpi_thermal *tz = container_of(work, struct acpi_thermal,
187 thermal_check_work);
188 - acpi_thermal_check(tz);
189 +
190 + if (!tz->tz_enabled)
191 + return;
192 + /*
193 + * In general, it is not sufficient to check the pending bit, because
194 + * subsequent instances of this function may be queued after one of them
195 + * has started running (e.g. if _TMP sleeps). Avoid bailing out if just
196 + * one of them is running, though, because it may have done the actual
197 + * check some time ago, so allow at least one of them to block on the
198 + * mutex while another one is running the update.
199 + */
200 + if (!refcount_dec_not_one(&tz->thermal_check_count))
201 + return;
202 +
203 + mutex_lock(&tz->thermal_check_lock);
204 +
205 + thermal_zone_device_update(tz->thermal_zone, THERMAL_EVENT_UNSPECIFIED);
206 +
207 + refcount_inc(&tz->thermal_check_count);
208 +
209 + mutex_unlock(&tz->thermal_check_lock);
210 }
211
212 static int acpi_thermal_add(struct acpi_device *device)
213 @@ -1084,6 +1103,8 @@ static int acpi_thermal_add(struct acpi_device *device)
214 if (result)
215 goto free_memory;
216
217 + refcount_set(&tz->thermal_check_count, 3);
218 + mutex_init(&tz->thermal_check_lock);
219 INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn);
220
221 pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device),
222 @@ -1149,7 +1170,7 @@ static int acpi_thermal_resume(struct device *dev)
223 tz->state.active |= tz->trips.active[i].flags.enabled;
224 }
225
226 - queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work);
227 + acpi_queue_thermal_check(tz);
228
229 return AE_OK;
230 }
231 diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
232 index 959eb075d11ed..c18f39271b034 100644
233 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
234 +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
235 @@ -1914,6 +1914,9 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting
236 initial_link_setting;
237 uint32_t link_bw;
238
239 + if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap))
240 + return false;
241 +
242 /* search for the minimum link setting that:
243 * 1. is supported according to the link training result
244 * 2. could support the b/w requested by the timing
245 diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
246 index bb7add5ea2273..a6d5beada6634 100644
247 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
248 +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
249 @@ -257,7 +257,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
250 .num_banks = 8,
251 .num_chans = 4,
252 .vmm_page_size_bytes = 4096,
253 - .dram_clock_change_latency_us = 23.84,
254 + .dram_clock_change_latency_us = 11.72,
255 .return_bus_width_bytes = 64,
256 .dispclk_dppclk_vco_speed_mhz = 3600,
257 .xfc_bus_transport_time_us = 4,
258 diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
259 index b16aea0e39992..6dd29bad1609f 100644
260 --- a/drivers/net/dsa/bcm_sf2.c
261 +++ b/drivers/net/dsa/bcm_sf2.c
262 @@ -421,15 +421,19 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds)
263 /* Find our integrated MDIO bus node */
264 dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio");
265 priv->master_mii_bus = of_mdio_find_bus(dn);
266 - if (!priv->master_mii_bus)
267 + if (!priv->master_mii_bus) {
268 + of_node_put(dn);
269 return -EPROBE_DEFER;
270 + }
271
272 get_device(&priv->master_mii_bus->dev);
273 priv->master_mii_dn = dn;
274
275 priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
276 - if (!priv->slave_mii_bus)
277 + if (!priv->slave_mii_bus) {
278 + of_node_put(dn);
279 return -ENOMEM;
280 + }
281
282 priv->slave_mii_bus->priv = priv;
283 priv->slave_mii_bus->name = "sf2 slave mii";
284 diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
285 index 9040340fad198..c3079f436f6d7 100644
286 --- a/drivers/net/ethernet/ibm/ibmvnic.c
287 +++ b/drivers/net/ethernet/ibm/ibmvnic.c
288 @@ -4752,6 +4752,12 @@ static void ibmvnic_tasklet(void *data)
289 while (!done) {
290 /* Pull all the valid messages off the CRQ */
291 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
292 + /* This barrier makes sure ibmvnic_next_crq()'s
293 + * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded
294 + * before ibmvnic_handle_crq()'s
295 + * switch(gen_crq->first) and switch(gen_crq->cmd).
296 + */
297 + dma_rmb();
298 ibmvnic_handle_crq(crq, adapter);
299 crq->generic.first = 0;
300 }
301 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
302 index 7a964271959d8..c2cabd77884bf 100644
303 --- a/drivers/nvme/host/core.c
304 +++ b/drivers/nvme/host/core.c
305 @@ -1295,8 +1295,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
306 }
307
308 length = (io.nblocks + 1) << ns->lba_shift;
309 - meta_len = (io.nblocks + 1) * ns->ms;
310 - metadata = nvme_to_user_ptr(io.metadata);
311 +
312 + if ((io.control & NVME_RW_PRINFO_PRACT) &&
313 + ns->ms == sizeof(struct t10_pi_tuple)) {
314 + /*
315 + * Protection information is stripped/inserted by the
316 + * controller.
317 + */
318 + if (nvme_to_user_ptr(io.metadata))
319 + return -EINVAL;
320 + meta_len = 0;
321 + metadata = NULL;
322 + } else {
323 + meta_len = (io.nblocks + 1) * ns->ms;
324 + metadata = nvme_to_user_ptr(io.metadata);
325 + }
326
327 if (ns->ext) {
328 length += meta_len;
329 diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c
330 index 5baf64dfb24de..1bebad36bf2e5 100644
331 --- a/drivers/phy/motorola/phy-cpcap-usb.c
332 +++ b/drivers/phy/motorola/phy-cpcap-usb.c
333 @@ -625,35 +625,42 @@ static int cpcap_usb_phy_probe(struct platform_device *pdev)
334 generic_phy = devm_phy_create(ddata->dev, NULL, &ops);
335 if (IS_ERR(generic_phy)) {
336 error = PTR_ERR(generic_phy);
337 - return PTR_ERR(generic_phy);
338 + goto out_reg_disable;
339 }
340
341 phy_set_drvdata(generic_phy, ddata);
342
343 phy_provider = devm_of_phy_provider_register(ddata->dev,
344 of_phy_simple_xlate);
345 - if (IS_ERR(phy_provider))
346 - return PTR_ERR(phy_provider);
347 + if (IS_ERR(phy_provider)) {
348 + error = PTR_ERR(phy_provider);
349 + goto out_reg_disable;
350 + }
351
352 error = cpcap_usb_init_optional_pins(ddata);
353 if (error)
354 - return error;
355 + goto out_reg_disable;
356
357 cpcap_usb_init_optional_gpios(ddata);
358
359 error = cpcap_usb_init_iio(ddata);
360 if (error)
361 - return error;
362 + goto out_reg_disable;
363
364 error = cpcap_usb_init_interrupts(pdev, ddata);
365 if (error)
366 - return error;
367 + goto out_reg_disable;
368
369 usb_add_phy_dev(&ddata->phy);
370 atomic_set(&ddata->active, 1);
371 schedule_delayed_work(&ddata->detect_work, msecs_to_jiffies(1));
372
373 return 0;
374 +
375 +out_reg_disable:
376 + regulator_disable(ddata->vusb);
377 +
378 + return error;
379 }
380
381 static int cpcap_usb_phy_remove(struct platform_device *pdev)
382 diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c
383 index 37035dca469cf..d4fc2cbf78703 100644
384 --- a/drivers/platform/x86/intel-vbtn.c
385 +++ b/drivers/platform/x86/intel-vbtn.c
386 @@ -203,6 +203,12 @@ static const struct dmi_system_id dmi_switches_allow_list[] = {
387 DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"),
388 },
389 },
390 + {
391 + .matches = {
392 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
393 + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7352"),
394 + },
395 + },
396 {} /* Array terminator */
397 };
398
399 diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
400 index 1e072dbba30d6..7ed1189a7200c 100644
401 --- a/drivers/platform/x86/touchscreen_dmi.c
402 +++ b/drivers/platform/x86/touchscreen_dmi.c
403 @@ -231,6 +231,16 @@ static const struct ts_dmi_data digma_citi_e200_data = {
404 .properties = digma_citi_e200_props,
405 };
406
407 +static const struct property_entry estar_beauty_hd_props[] = {
408 + PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
409 + { }
410 +};
411 +
412 +static const struct ts_dmi_data estar_beauty_hd_data = {
413 + .acpi_name = "GDIX1001:00",
414 + .properties = estar_beauty_hd_props,
415 +};
416 +
417 static const struct property_entry gp_electronic_t701_props[] = {
418 PROPERTY_ENTRY_U32("touchscreen-size-x", 960),
419 PROPERTY_ENTRY_U32("touchscreen-size-y", 640),
420 @@ -747,6 +757,14 @@ static const struct dmi_system_id touchscreen_dmi_table[] = {
421 DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
422 },
423 },
424 + {
425 + /* Estar Beauty HD (MID 7316R) */
426 + .driver_data = (void *)&estar_beauty_hd_data,
427 + .matches = {
428 + DMI_MATCH(DMI_SYS_VENDOR, "Estar"),
429 + DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"),
430 + },
431 + },
432 {
433 /* GP-electronic T701 */
434 .driver_data = (void *)&gp_electronic_t701_data,
435 diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c
436 index 522636e946282..c8bf8c7ada6a7 100644
437 --- a/drivers/scsi/fnic/vnic_dev.c
438 +++ b/drivers/scsi/fnic/vnic_dev.c
439 @@ -444,7 +444,8 @@ int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
440 fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index);
441 if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */
442 pr_err("error in devcmd2 init");
443 - return -ENODEV;
444 + err = -ENODEV;
445 + goto err_free_wq;
446 }
447
448 /*
449 @@ -460,7 +461,7 @@ int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
450 err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring,
451 DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE);
452 if (err)
453 - goto err_free_wq;
454 + goto err_disable_wq;
455
456 vdev->devcmd2->result =
457 (struct devcmd2_result *) vdev->devcmd2->results_ring.descs;
458 @@ -481,8 +482,9 @@ int vnic_dev_init_devcmd2(struct vnic_dev *vdev)
459
460 err_free_desc_ring:
461 vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring);
462 -err_free_wq:
463 +err_disable_wq:
464 vnic_wq_disable(&vdev->devcmd2->wq);
465 +err_free_wq:
466 vnic_wq_free(&vdev->devcmd2->wq);
467 err_free_devcmd2:
468 kfree(vdev->devcmd2);
469 diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
470 index 8a76284b59b08..523809a8a2323 100644
471 --- a/drivers/scsi/ibmvscsi/ibmvfc.c
472 +++ b/drivers/scsi/ibmvscsi/ibmvfc.c
473 @@ -2881,8 +2881,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev)
474 unsigned long flags = 0;
475
476 spin_lock_irqsave(shost->host_lock, flags);
477 - if (sdev->type == TYPE_DISK)
478 + if (sdev->type == TYPE_DISK) {
479 sdev->allow_restart = 1;
480 + blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
481 + }
482 spin_unlock_irqrestore(shost->host_lock, flags);
483 return 0;
484 }
485 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
486 index 52e8666598531..e5b18e5d46dac 100644
487 --- a/drivers/scsi/libfc/fc_exch.c
488 +++ b/drivers/scsi/libfc/fc_exch.c
489 @@ -1619,8 +1619,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
490 rc = fc_exch_done_locked(ep);
491 WARN_ON(fc_seq_exch(sp) != ep);
492 spin_unlock_bh(&ep->ex_lock);
493 - if (!rc)
494 + if (!rc) {
495 fc_exch_delete(ep);
496 + } else {
497 + FC_EXCH_DBG(ep, "ep is completed already,"
498 + "hence skip calling the resp\n");
499 + goto skip_resp;
500 + }
501 }
502
503 /*
504 @@ -1639,6 +1644,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
505 if (!fc_invoke_resp(ep, sp, fp))
506 fc_frame_free(fp);
507
508 +skip_resp:
509 fc_exch_release(ep);
510 return;
511 rel:
512 @@ -1895,10 +1901,16 @@ static void fc_exch_reset(struct fc_exch *ep)
513
514 fc_exch_hold(ep);
515
516 - if (!rc)
517 + if (!rc) {
518 fc_exch_delete(ep);
519 + } else {
520 + FC_EXCH_DBG(ep, "ep is completed already,"
521 + "hence skip calling the resp\n");
522 + goto skip_resp;
523 + }
524
525 fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
526 +skip_resp:
527 fc_seq_set_resp(sp, NULL, ep->arg);
528 fc_exch_release(ep);
529 }
530 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
531 index d4d1104fac991..8cd0a87764dfd 100644
532 --- a/drivers/scsi/scsi_transport_srp.c
533 +++ b/drivers/scsi/scsi_transport_srp.c
534 @@ -541,7 +541,14 @@ int srp_reconnect_rport(struct srp_rport *rport)
535 res = mutex_lock_interruptible(&rport->mutex);
536 if (res)
537 goto out;
538 - scsi_target_block(&shost->shost_gendev);
539 + if (rport->state != SRP_RPORT_FAIL_FAST)
540 + /*
541 + * sdev state must be SDEV_TRANSPORT_OFFLINE, transition
542 + * to SDEV_BLOCK is illegal. Calling scsi_target_unblock()
543 + * later is ok though, scsi_internal_device_unblock_nowait()
544 + * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK.
545 + */
546 + scsi_target_block(&shost->shost_gendev);
547 res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV;
548 pr_debug("%s (state %d): transport.reconnect() returned %d\n",
549 dev_name(&shost->shost_gendev), rport->state, res);
550 diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
551 index 86e280edf8040..7f644a58db511 100644
552 --- a/fs/btrfs/backref.c
553 +++ b/fs/btrfs/backref.c
554 @@ -347,33 +347,10 @@ static int add_prelim_ref(const struct btrfs_fs_info *fs_info,
555 return -ENOMEM;
556
557 ref->root_id = root_id;
558 - if (key) {
559 + if (key)
560 ref->key_for_search = *key;
561 - /*
562 - * We can often find data backrefs with an offset that is too
563 - * large (>= LLONG_MAX, maximum allowed file offset) due to
564 - * underflows when subtracting a file's offset with the data
565 - * offset of its corresponding extent data item. This can
566 - * happen for example in the clone ioctl.
567 - * So if we detect such case we set the search key's offset to
568 - * zero to make sure we will find the matching file extent item
569 - * at add_all_parents(), otherwise we will miss it because the
570 - * offset taken form the backref is much larger then the offset
571 - * of the file extent item. This can make us scan a very large
572 - * number of file extent items, but at least it will not make
573 - * us miss any.
574 - * This is an ugly workaround for a behaviour that should have
575 - * never existed, but it does and a fix for the clone ioctl
576 - * would touch a lot of places, cause backwards incompatibility
577 - * and would not fix the problem for extents cloned with older
578 - * kernels.
579 - */
580 - if (ref->key_for_search.type == BTRFS_EXTENT_DATA_KEY &&
581 - ref->key_for_search.offset >= LLONG_MAX)
582 - ref->key_for_search.offset = 0;
583 - } else {
584 + else
585 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
586 - }
587
588 ref->inode_list = NULL;
589 ref->level = level;
590 @@ -409,10 +386,36 @@ static int add_indirect_ref(const struct btrfs_fs_info *fs_info,
591 wanted_disk_byte, count, sc, gfp_mask);
592 }
593
594 +static int is_shared_data_backref(struct preftrees *preftrees, u64 bytenr)
595 +{
596 + struct rb_node **p = &preftrees->direct.root.rb_root.rb_node;
597 + struct rb_node *parent = NULL;
598 + struct prelim_ref *ref = NULL;
599 + struct prelim_ref target = {0};
600 + int result;
601 +
602 + target.parent = bytenr;
603 +
604 + while (*p) {
605 + parent = *p;
606 + ref = rb_entry(parent, struct prelim_ref, rbnode);
607 + result = prelim_ref_compare(ref, &target);
608 +
609 + if (result < 0)
610 + p = &(*p)->rb_left;
611 + else if (result > 0)
612 + p = &(*p)->rb_right;
613 + else
614 + return 1;
615 + }
616 + return 0;
617 +}
618 +
619 static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
620 - struct ulist *parents, struct prelim_ref *ref,
621 + struct ulist *parents,
622 + struct preftrees *preftrees, struct prelim_ref *ref,
623 int level, u64 time_seq, const u64 *extent_item_pos,
624 - u64 total_refs, bool ignore_offset)
625 + bool ignore_offset)
626 {
627 int ret = 0;
628 int slot;
629 @@ -424,6 +427,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
630 u64 disk_byte;
631 u64 wanted_disk_byte = ref->wanted_disk_byte;
632 u64 count = 0;
633 + u64 data_offset;
634
635 if (level != 0) {
636 eb = path->nodes[level];
637 @@ -434,18 +438,26 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
638 }
639
640 /*
641 - * We normally enter this function with the path already pointing to
642 - * the first item to check. But sometimes, we may enter it with
643 - * slot==nritems. In that case, go to the next leaf before we continue.
644 + * 1. We normally enter this function with the path already pointing to
645 + * the first item to check. But sometimes, we may enter it with
646 + * slot == nritems.
647 + * 2. We are searching for normal backref but bytenr of this leaf
648 + * matches shared data backref
649 + * 3. The leaf owner is not equal to the root we are searching
650 + *
651 + * For these cases, go to the next leaf before we continue.
652 */
653 - if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
654 + eb = path->nodes[0];
655 + if (path->slots[0] >= btrfs_header_nritems(eb) ||
656 + is_shared_data_backref(preftrees, eb->start) ||
657 + ref->root_id != btrfs_header_owner(eb)) {
658 if (time_seq == SEQ_LAST)
659 ret = btrfs_next_leaf(root, path);
660 else
661 ret = btrfs_next_old_leaf(root, path, time_seq);
662 }
663
664 - while (!ret && count < total_refs) {
665 + while (!ret && count < ref->count) {
666 eb = path->nodes[0];
667 slot = path->slots[0];
668
669 @@ -455,13 +467,31 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
670 key.type != BTRFS_EXTENT_DATA_KEY)
671 break;
672
673 + /*
674 + * We are searching for normal backref but bytenr of this leaf
675 + * matches shared data backref, OR
676 + * the leaf owner is not equal to the root we are searching for
677 + */
678 + if (slot == 0 &&
679 + (is_shared_data_backref(preftrees, eb->start) ||
680 + ref->root_id != btrfs_header_owner(eb))) {
681 + if (time_seq == SEQ_LAST)
682 + ret = btrfs_next_leaf(root, path);
683 + else
684 + ret = btrfs_next_old_leaf(root, path, time_seq);
685 + continue;
686 + }
687 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
688 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
689 + data_offset = btrfs_file_extent_offset(eb, fi);
690
691 if (disk_byte == wanted_disk_byte) {
692 eie = NULL;
693 old = NULL;
694 - count++;
695 + if (ref->key_for_search.offset == key.offset - data_offset)
696 + count++;
697 + else
698 + goto next;
699 if (extent_item_pos) {
700 ret = check_extent_in_eb(&key, eb, fi,
701 *extent_item_pos,
702 @@ -502,9 +532,9 @@ next:
703 */
704 static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
705 struct btrfs_path *path, u64 time_seq,
706 + struct preftrees *preftrees,
707 struct prelim_ref *ref, struct ulist *parents,
708 - const u64 *extent_item_pos, u64 total_refs,
709 - bool ignore_offset)
710 + const u64 *extent_item_pos, bool ignore_offset)
711 {
712 struct btrfs_root *root;
713 struct btrfs_key root_key;
714 @@ -513,6 +543,7 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
715 int root_level;
716 int level = ref->level;
717 int index;
718 + struct btrfs_key search_key = ref->key_for_search;
719
720 root_key.objectid = ref->root_id;
721 root_key.type = BTRFS_ROOT_ITEM_KEY;
722 @@ -545,13 +576,33 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
723 goto out;
724 }
725
726 + /*
727 + * We can often find data backrefs with an offset that is too large
728 + * (>= LLONG_MAX, maximum allowed file offset) due to underflows when
729 + * subtracting a file's offset with the data offset of its
730 + * corresponding extent data item. This can happen for example in the
731 + * clone ioctl.
732 + *
733 + * So if we detect such case we set the search key's offset to zero to
734 + * make sure we will find the matching file extent item at
735 + * add_all_parents(), otherwise we will miss it because the offset
736 + * taken form the backref is much larger then the offset of the file
737 + * extent item. This can make us scan a very large number of file
738 + * extent items, but at least it will not make us miss any.
739 + *
740 + * This is an ugly workaround for a behaviour that should have never
741 + * existed, but it does and a fix for the clone ioctl would touch a lot
742 + * of places, cause backwards incompatibility and would not fix the
743 + * problem for extents cloned with older kernels.
744 + */
745 + if (search_key.type == BTRFS_EXTENT_DATA_KEY &&
746 + search_key.offset >= LLONG_MAX)
747 + search_key.offset = 0;
748 path->lowest_level = level;
749 if (time_seq == SEQ_LAST)
750 - ret = btrfs_search_slot(NULL, root, &ref->key_for_search, path,
751 - 0, 0);
752 + ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0);
753 else
754 - ret = btrfs_search_old_slot(root, &ref->key_for_search, path,
755 - time_seq);
756 + ret = btrfs_search_old_slot(root, &search_key, path, time_seq);
757
758 /* root node has been locked, we can release @subvol_srcu safely here */
759 srcu_read_unlock(&fs_info->subvol_srcu, index);
760 @@ -574,8 +625,8 @@ static int resolve_indirect_ref(struct btrfs_fs_info *fs_info,
761 eb = path->nodes[level];
762 }
763
764 - ret = add_all_parents(root, path, parents, ref, level, time_seq,
765 - extent_item_pos, total_refs, ignore_offset);
766 + ret = add_all_parents(root, path, parents, preftrees, ref, level,
767 + time_seq, extent_item_pos, ignore_offset);
768 out:
769 path->lowest_level = 0;
770 btrfs_release_path(path);
771 @@ -609,7 +660,7 @@ unode_aux_to_inode_list(struct ulist_node *node)
772 static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
773 struct btrfs_path *path, u64 time_seq,
774 struct preftrees *preftrees,
775 - const u64 *extent_item_pos, u64 total_refs,
776 + const u64 *extent_item_pos,
777 struct share_check *sc, bool ignore_offset)
778 {
779 int err;
780 @@ -653,9 +704,9 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
781 ret = BACKREF_FOUND_SHARED;
782 goto out;
783 }
784 - err = resolve_indirect_ref(fs_info, path, time_seq, ref,
785 - parents, extent_item_pos,
786 - total_refs, ignore_offset);
787 + err = resolve_indirect_ref(fs_info, path, time_seq, preftrees,
788 + ref, parents, extent_item_pos,
789 + ignore_offset);
790 /*
791 * we can only tolerate ENOENT,otherwise,we should catch error
792 * and return directly.
793 @@ -758,8 +809,7 @@ static int add_missing_keys(struct btrfs_fs_info *fs_info,
794 */
795 static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
796 struct btrfs_delayed_ref_head *head, u64 seq,
797 - struct preftrees *preftrees, u64 *total_refs,
798 - struct share_check *sc)
799 + struct preftrees *preftrees, struct share_check *sc)
800 {
801 struct btrfs_delayed_ref_node *node;
802 struct btrfs_delayed_extent_op *extent_op = head->extent_op;
803 @@ -793,7 +843,6 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
804 default:
805 BUG();
806 }
807 - *total_refs += count;
808 switch (node->type) {
809 case BTRFS_TREE_BLOCK_REF_KEY: {
810 /* NORMAL INDIRECT METADATA backref */
811 @@ -876,7 +925,7 @@ out:
812 static int add_inline_refs(const struct btrfs_fs_info *fs_info,
813 struct btrfs_path *path, u64 bytenr,
814 int *info_level, struct preftrees *preftrees,
815 - u64 *total_refs, struct share_check *sc)
816 + struct share_check *sc)
817 {
818 int ret = 0;
819 int slot;
820 @@ -900,7 +949,6 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
821
822 ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
823 flags = btrfs_extent_flags(leaf, ei);
824 - *total_refs += btrfs_extent_refs(leaf, ei);
825 btrfs_item_key_to_cpu(leaf, &found_key, slot);
826
827 ptr = (unsigned long)(ei + 1);
828 @@ -1125,8 +1173,6 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
829 struct prelim_ref *ref;
830 struct rb_node *node;
831 struct extent_inode_elem *eie = NULL;
832 - /* total of both direct AND indirect refs! */
833 - u64 total_refs = 0;
834 struct preftrees preftrees = {
835 .direct = PREFTREE_INIT,
836 .indirect = PREFTREE_INIT,
837 @@ -1195,7 +1241,7 @@ again:
838 }
839 spin_unlock(&delayed_refs->lock);
840 ret = add_delayed_refs(fs_info, head, time_seq,
841 - &preftrees, &total_refs, sc);
842 + &preftrees, sc);
843 mutex_unlock(&head->mutex);
844 if (ret)
845 goto out;
846 @@ -1216,8 +1262,7 @@ again:
847 (key.type == BTRFS_EXTENT_ITEM_KEY ||
848 key.type == BTRFS_METADATA_ITEM_KEY)) {
849 ret = add_inline_refs(fs_info, path, bytenr,
850 - &info_level, &preftrees,
851 - &total_refs, sc);
852 + &info_level, &preftrees, sc);
853 if (ret)
854 goto out;
855 ret = add_keyed_refs(fs_info, path, bytenr, info_level,
856 @@ -1236,7 +1281,7 @@ again:
857 WARN_ON(!RB_EMPTY_ROOT(&preftrees.indirect_missing_keys.root.rb_root));
858
859 ret = resolve_indirect_refs(fs_info, path, time_seq, &preftrees,
860 - extent_item_pos, total_refs, sc, ignore_offset);
861 + extent_item_pos, sc, ignore_offset);
862 if (ret)
863 goto out;
864
865 diff --git a/fs/udf/super.c b/fs/udf/super.c
866 index 4aba4878ed967..8bb001c7927f0 100644
867 --- a/fs/udf/super.c
868 +++ b/fs/udf/super.c
869 @@ -705,6 +705,7 @@ static int udf_check_vsd(struct super_block *sb)
870 struct buffer_head *bh = NULL;
871 int nsr = 0;
872 struct udf_sb_info *sbi;
873 + loff_t session_offset;
874
875 sbi = UDF_SB(sb);
876 if (sb->s_blocksize < sizeof(struct volStructDesc))
877 @@ -712,7 +713,8 @@ static int udf_check_vsd(struct super_block *sb)
878 else
879 sectorsize = sb->s_blocksize;
880
881 - sector += (((loff_t)sbi->s_session) << sb->s_blocksize_bits);
882 + session_offset = (loff_t)sbi->s_session << sb->s_blocksize_bits;
883 + sector += session_offset;
884
885 udf_debug("Starting at sector %u (%lu byte sectors)\n",
886 (unsigned int)(sector >> sb->s_blocksize_bits),
887 @@ -757,8 +759,7 @@ static int udf_check_vsd(struct super_block *sb)
888
889 if (nsr > 0)
890 return 1;
891 - else if (!bh && sector - (sbi->s_session << sb->s_blocksize_bits) ==
892 - VSD_FIRST_SECTOR_OFFSET)
893 + else if (!bh && sector - session_offset == VSD_FIRST_SECTOR_OFFSET)
894 return -1;
895 else
896 return 0;
897 diff --git a/include/linux/kthread.h b/include/linux/kthread.h
898 index 0f9da966934e2..c7108ce5a051c 100644
899 --- a/include/linux/kthread.h
900 +++ b/include/linux/kthread.h
901 @@ -31,6 +31,9 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
902 unsigned int cpu,
903 const char *namefmt);
904
905 +void kthread_set_per_cpu(struct task_struct *k, int cpu);
906 +bool kthread_is_per_cpu(struct task_struct *k);
907 +
908 /**
909 * kthread_run - create and wake a thread.
910 * @threadfn: the function to run until signal_pending(current).
911 diff --git a/include/net/tcp.h b/include/net/tcp.h
912 index 4b38ba101b9b7..37b51456784f8 100644
913 --- a/include/net/tcp.h
914 +++ b/include/net/tcp.h
915 @@ -619,6 +619,7 @@ static inline void tcp_clear_xmit_timers(struct sock *sk)
916
917 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
918 unsigned int tcp_current_mss(struct sock *sk);
919 +u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when);
920
921 /* Bound MSS / TSO packet size with the half of the window */
922 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
923 diff --git a/kernel/kthread.c b/kernel/kthread.c
924 index e51f0006057df..1d4c98a19043f 100644
925 --- a/kernel/kthread.c
926 +++ b/kernel/kthread.c
927 @@ -469,11 +469,36 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
928 return p;
929 kthread_bind(p, cpu);
930 /* CPU hotplug need to bind once again when unparking the thread. */
931 - set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
932 to_kthread(p)->cpu = cpu;
933 return p;
934 }
935
936 +void kthread_set_per_cpu(struct task_struct *k, int cpu)
937 +{
938 + struct kthread *kthread = to_kthread(k);
939 + if (!kthread)
940 + return;
941 +
942 + WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
943 +
944 + if (cpu < 0) {
945 + clear_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
946 + return;
947 + }
948 +
949 + kthread->cpu = cpu;
950 + set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
951 +}
952 +
953 +bool kthread_is_per_cpu(struct task_struct *k)
954 +{
955 + struct kthread *kthread = to_kthread(k);
956 + if (!kthread)
957 + return false;
958 +
959 + return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
960 +}
961 +
962 /**
963 * kthread_unpark - unpark a thread created by kthread_create().
964 * @k: thread created by kthread_create().
965 diff --git a/kernel/smpboot.c b/kernel/smpboot.c
966 index 2efe1e206167c..f25208e8df836 100644
967 --- a/kernel/smpboot.c
968 +++ b/kernel/smpboot.c
969 @@ -188,6 +188,7 @@ __smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu)
970 kfree(td);
971 return PTR_ERR(tsk);
972 }
973 + kthread_set_per_cpu(tsk, cpu);
974 /*
975 * Park the thread so that it could start right on the CPU
976 * when it is available.
977 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
978 index 28e52657e0930..29c36c0290623 100644
979 --- a/kernel/workqueue.c
980 +++ b/kernel/workqueue.c
981 @@ -1847,12 +1847,6 @@ static void worker_attach_to_pool(struct worker *worker,
982 {
983 mutex_lock(&wq_pool_attach_mutex);
984
985 - /*
986 - * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
987 - * online CPUs. It'll be re-applied when any of the CPUs come up.
988 - */
989 - set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
990 -
991 /*
992 * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
993 * stable across this function. See the comments above the flag
994 @@ -1861,6 +1855,9 @@ static void worker_attach_to_pool(struct worker *worker,
995 if (pool->flags & POOL_DISASSOCIATED)
996 worker->flags |= WORKER_UNBOUND;
997
998 + if (worker->rescue_wq)
999 + set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1000 +
1001 list_add_tail(&worker->node, &pool->workers);
1002 worker->pool = pool;
1003
1004 diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c
1005 index bfe7bdd4c3406..98c396769be94 100644
1006 --- a/net/core/gen_estimator.c
1007 +++ b/net/core/gen_estimator.c
1008 @@ -80,11 +80,11 @@ static void est_timer(struct timer_list *t)
1009 u64 rate, brate;
1010
1011 est_fetch_counters(est, &b);
1012 - brate = (b.bytes - est->last_bytes) << (10 - est->ewma_log - est->intvl_log);
1013 - brate -= (est->avbps >> est->ewma_log);
1014 + brate = (b.bytes - est->last_bytes) << (10 - est->intvl_log);
1015 + brate = (brate >> est->ewma_log) - (est->avbps >> est->ewma_log);
1016
1017 - rate = (u64)(b.packets - est->last_packets) << (10 - est->ewma_log - est->intvl_log);
1018 - rate -= (est->avpps >> est->ewma_log);
1019 + rate = (u64)(b.packets - est->last_packets) << (10 - est->intvl_log);
1020 + rate = (rate >> est->ewma_log) - (est->avpps >> est->ewma_log);
1021
1022 write_seqcount_begin(&est->seq);
1023 est->avbps += brate;
1024 @@ -143,6 +143,9 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
1025 if (parm->interval < -2 || parm->interval > 3)
1026 return -EINVAL;
1027
1028 + if (parm->ewma_log == 0 || parm->ewma_log >= 31)
1029 + return -EINVAL;
1030 +
1031 est = kzalloc(sizeof(*est), GFP_KERNEL);
1032 if (!est)
1033 return -ENOBUFS;
1034 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
1035 index 26305aa88651f..a1768ded2d545 100644
1036 --- a/net/ipv4/tcp_input.c
1037 +++ b/net/ipv4/tcp_input.c
1038 @@ -3295,6 +3295,7 @@ static void tcp_ack_probe(struct sock *sk)
1039 } else {
1040 unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX);
1041
1042 + when = tcp_clamp_probe0_to_user_timeout(sk, when);
1043 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1044 when, TCP_RTO_MAX, NULL);
1045 }
1046 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
1047 index 5da6ffce390c2..d0774b4e934d6 100644
1048 --- a/net/ipv4/tcp_output.c
1049 +++ b/net/ipv4/tcp_output.c
1050 @@ -3850,6 +3850,8 @@ void tcp_send_probe0(struct sock *sk)
1051 */
1052 timeout = TCP_RESOURCE_PROBE_INTERVAL;
1053 }
1054 +
1055 + timeout = tcp_clamp_probe0_to_user_timeout(sk, timeout);
1056 tcp_reset_xmit_timer(sk, ICSK_TIME_PROBE0, timeout, TCP_RTO_MAX, NULL);
1057 }
1058
1059 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
1060 index 7fcd116fbd378..fa2ae96ecdc40 100644
1061 --- a/net/ipv4/tcp_timer.c
1062 +++ b/net/ipv4/tcp_timer.c
1063 @@ -40,6 +40,24 @@ static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
1064 return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining));
1065 }
1066
1067 +u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when)
1068 +{
1069 + struct inet_connection_sock *icsk = inet_csk(sk);
1070 + u32 remaining;
1071 + s32 elapsed;
1072 +
1073 + if (!icsk->icsk_user_timeout || !icsk->icsk_probes_tstamp)
1074 + return when;
1075 +
1076 + elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp;
1077 + if (unlikely(elapsed < 0))
1078 + elapsed = 0;
1079 + remaining = msecs_to_jiffies(icsk->icsk_user_timeout) - elapsed;
1080 + remaining = max_t(u32, remaining, TCP_TIMEOUT_MIN);
1081 +
1082 + return min_t(u32, remaining, when);
1083 +}
1084 +
1085 /**
1086 * tcp_write_err() - close socket and save error info
1087 * @sk: The socket the error has appeared on.
1088 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
1089 index 3ab85e1e38d82..1a15e7bae106a 100644
1090 --- a/net/mac80211/rx.c
1091 +++ b/net/mac80211/rx.c
1092 @@ -4080,6 +4080,8 @@ void ieee80211_check_fast_rx(struct sta_info *sta)
1093
1094 rcu_read_lock();
1095 key = rcu_dereference(sta->ptk[sta->ptk_idx]);
1096 + if (!key)
1097 + key = rcu_dereference(sdata->default_unicast_key);
1098 if (key) {
1099 switch (key->conf.cipher) {
1100 case WLAN_CIPHER_SUITE_TKIP:
1101 diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
1102 index 3a1d428c13369..ea9ddea35a886 100644
1103 --- a/net/switchdev/switchdev.c
1104 +++ b/net/switchdev/switchdev.c
1105 @@ -461,10 +461,11 @@ static int __switchdev_handle_port_obj_add(struct net_device *dev,
1106 extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
1107
1108 if (check_cb(dev)) {
1109 - /* This flag is only checked if the return value is success. */
1110 - port_obj_info->handled = true;
1111 - return add_cb(dev, port_obj_info->obj, port_obj_info->trans,
1112 - extack);
1113 + err = add_cb(dev, port_obj_info->obj, port_obj_info->trans,
1114 + extack);
1115 + if (err != -EOPNOTSUPP)
1116 + port_obj_info->handled = true;
1117 + return err;
1118 }
1119
1120 /* Switch ports might be stacked under e.g. a LAG. Ignore the
1121 @@ -513,9 +514,10 @@ static int __switchdev_handle_port_obj_del(struct net_device *dev,
1122 int err = -EOPNOTSUPP;
1123
1124 if (check_cb(dev)) {
1125 - /* This flag is only checked if the return value is success. */
1126 - port_obj_info->handled = true;
1127 - return del_cb(dev, port_obj_info->obj);
1128 + err = del_cb(dev, port_obj_info->obj);
1129 + if (err != -EOPNOTSUPP)
1130 + port_obj_info->handled = true;
1131 + return err;
1132 }
1133
1134 /* Switch ports might be stacked under e.g. a LAG. Ignore the
1135 @@ -563,9 +565,10 @@ static int __switchdev_handle_port_attr_set(struct net_device *dev,
1136 int err = -EOPNOTSUPP;
1137
1138 if (check_cb(dev)) {
1139 - port_attr_info->handled = true;
1140 - return set_cb(dev, port_attr_info->attr,
1141 - port_attr_info->trans);
1142 + err = set_cb(dev, port_attr_info->attr, port_attr_info->trans);
1143 + if (err != -EOPNOTSUPP)
1144 + port_attr_info->handled = true;
1145 + return err;
1146 }
1147
1148 /* Switch ports might be stacked under e.g. a LAG. Ignore the
1149 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
1150 index 5f515a29668c8..b3667a5efdc1f 100644
1151 --- a/sound/pci/hda/hda_intel.c
1152 +++ b/sound/pci/hda/hda_intel.c
1153 @@ -2450,6 +2450,9 @@ static const struct pci_device_id azx_ids[] = {
1154 /* CometLake-S */
1155 { PCI_DEVICE(0x8086, 0xa3f0),
1156 .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
1157 + /* CometLake-R */
1158 + { PCI_DEVICE(0x8086, 0xf0c8),
1159 + .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
1160 /* Icelake */
1161 { PCI_DEVICE(0x8086, 0x34c8),
1162 .driver_data = AZX_DRIVER_SKL | AZX_DCAPS_INTEL_SKYLAKE},
1163 diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
1164 index 9e8233c10d860..df38616c431a6 100644
1165 --- a/sound/soc/sof/intel/hda-codec.c
1166 +++ b/sound/soc/sof/intel/hda-codec.c
1167 @@ -68,8 +68,7 @@ void hda_codec_jack_check(struct snd_sof_dev *sdev)
1168 * has been recorded in STATESTS
1169 */
1170 if (codec->jacktbl.used)
1171 - schedule_delayed_work(&codec->jackpoll_work,
1172 - codec->jackpoll_interval);
1173 + pm_request_resume(&codec->core.dev);
1174 }
1175 #else
1176 void hda_codec_jack_wake_enable(struct snd_sof_dev *sdev) {}
1177 diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
1178 index edba4745f25a9..693d740107a8b 100644
1179 --- a/tools/objtool/elf.c
1180 +++ b/tools/objtool/elf.c
1181 @@ -214,8 +214,11 @@ static int read_symbols(struct elf *elf)
1182
1183 symtab = find_section_by_name(elf, ".symtab");
1184 if (!symtab) {
1185 - WARN("missing symbol table");
1186 - return -1;
1187 + /*
1188 + * A missing symbol table is actually possible if it's an empty
1189 + * .o file. This can happen for thunk_64.o.
1190 + */
1191 + return 0;
1192 }
1193
1194 symbols_nr = symtab->sh.sh_size / symtab->sh.sh_entsize;
1195 diff --git a/tools/testing/selftests/powerpc/alignment/alignment_handler.c b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
1196 index 0453c50c949cb..0725239bbd85c 100644
1197 --- a/tools/testing/selftests/powerpc/alignment/alignment_handler.c
1198 +++ b/tools/testing/selftests/powerpc/alignment/alignment_handler.c
1199 @@ -380,7 +380,6 @@ int test_alignment_handler_integer(void)
1200 LOAD_DFORM_TEST(ldu);
1201 LOAD_XFORM_TEST(ldx);
1202 LOAD_XFORM_TEST(ldux);
1203 - LOAD_DFORM_TEST(lmw);
1204 STORE_DFORM_TEST(stb);
1205 STORE_XFORM_TEST(stbx);
1206 STORE_DFORM_TEST(stbu);
1207 @@ -399,7 +398,11 @@ int test_alignment_handler_integer(void)
1208 STORE_XFORM_TEST(stdx);
1209 STORE_DFORM_TEST(stdu);
1210 STORE_XFORM_TEST(stdux);
1211 +
1212 +#ifdef __BIG_ENDIAN__
1213 + LOAD_DFORM_TEST(lmw);
1214 STORE_DFORM_TEST(stmw);
1215 +#endif
1216
1217 return rc;
1218 }