Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0126-5.4.27-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3507 - (show annotations) (download)
Mon May 11 14:36:30 2020 UTC (4 years ago) by niro
File size: 68423 byte(s)
-linux-5.4.27
1 diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
2 index 5594c8bf1dcd..b5c933fa971f 100644
3 --- a/Documentation/admin-guide/kernel-parameters.txt
4 +++ b/Documentation/admin-guide/kernel-parameters.txt
5 @@ -136,6 +136,10 @@
6 dynamic table installation which will install SSDT
7 tables to /sys/firmware/acpi/tables/dynamic.
8
9 + acpi_no_watchdog [HW,ACPI,WDT]
10 + Ignore the ACPI-based watchdog interface (WDAT) and let
11 + a native driver control the watchdog device instead.
12 +
13 acpi_rsdp= [ACPI,EFI,KEXEC]
14 Pass the RSDP address to the kernel, mostly used
15 on machines running EFI runtime service to boot the
16 diff --git a/Makefile b/Makefile
17 index 2250b1bb8aa9..36a0847534dd 100644
18 --- a/Makefile
19 +++ b/Makefile
20 @@ -1,7 +1,7 @@
21 # SPDX-License-Identifier: GPL-2.0
22 VERSION = 5
23 PATCHLEVEL = 4
24 -SUBLEVEL = 26
25 +SUBLEVEL = 27
26 EXTRAVERSION =
27 NAME = Kleptomaniac Octopus
28
29 @@ -1237,7 +1237,7 @@ ifneq ($(dtstree),)
30 %.dtb: include/config/kernel.release scripts_dtc
31 $(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@
32
33 -PHONY += dtbs dtbs_install dt_binding_check
34 +PHONY += dtbs dtbs_install dtbs_check
35 dtbs dtbs_check: include/config/kernel.release scripts_dtc
36 $(Q)$(MAKE) $(build)=$(dtstree)
37
38 @@ -1257,6 +1257,7 @@ PHONY += scripts_dtc
39 scripts_dtc: scripts_basic
40 $(Q)$(MAKE) $(build)=scripts/dtc
41
42 +PHONY += dt_binding_check
43 dt_binding_check: scripts_dtc
44 $(Q)$(MAKE) $(build)=Documentation/devicetree/bindings
45
46 diff --git a/arch/arm/Makefile b/arch/arm/Makefile
47 index db857d07114f..1fc32b611f8a 100644
48 --- a/arch/arm/Makefile
49 +++ b/arch/arm/Makefile
50 @@ -307,13 +307,15 @@ endif
51 ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
52 prepare: stack_protector_prepare
53 stack_protector_prepare: prepare0
54 - $(eval KBUILD_CFLAGS += \
55 + $(eval SSP_PLUGIN_CFLAGS := \
56 -fplugin-arg-arm_ssp_per_task_plugin-tso=$(shell \
57 awk '{if ($$2 == "THREAD_SZ_ORDER") print $$3;}'\
58 include/generated/asm-offsets.h) \
59 -fplugin-arg-arm_ssp_per_task_plugin-offset=$(shell \
60 awk '{if ($$2 == "TI_STACK_CANARY") print $$3;}'\
61 include/generated/asm-offsets.h))
62 + $(eval KBUILD_CFLAGS += $(SSP_PLUGIN_CFLAGS))
63 + $(eval GCC_PLUGINS_CFLAGS += $(SSP_PLUGIN_CFLAGS))
64 endif
65
66 all: $(notdir $(KBUILD_IMAGE))
67 diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
68 index 9219389bbe61..1483966dcf23 100644
69 --- a/arch/arm/boot/compressed/Makefile
70 +++ b/arch/arm/boot/compressed/Makefile
71 @@ -101,7 +101,6 @@ clean-files += piggy_data lib1funcs.S ashldi3.S bswapsdi2.S \
72 $(libfdt) $(libfdt_hdrs) hyp-stub.S
73
74 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
75 -KBUILD_CFLAGS += $(DISABLE_ARM_SSP_PER_TASK_PLUGIN)
76
77 ifeq ($(CONFIG_FUNCTION_TRACER),y)
78 ORIG_CFLAGS := $(KBUILD_CFLAGS)
79 @@ -117,7 +116,8 @@ CFLAGS_fdt_ro.o := $(nossp_flags)
80 CFLAGS_fdt_rw.o := $(nossp_flags)
81 CFLAGS_fdt_wip.o := $(nossp_flags)
82
83 -ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin -I$(obj)
84 +ccflags-y := -fpic $(call cc-option,-mno-single-pic-base,) -fno-builtin \
85 + -I$(obj) $(DISABLE_ARM_SSP_PER_TASK_PLUGIN)
86 asflags-y := -DZIMAGE
87
88 # Supply kernel BSS size to the decompressor via a linker symbol.
89 diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
90 index 9bf16c93ee6a..f00e45fa62c4 100644
91 --- a/arch/arm/kernel/vdso.c
92 +++ b/arch/arm/kernel/vdso.c
93 @@ -92,6 +92,8 @@ static bool __init cntvct_functional(void)
94 * this.
95 */
96 np = of_find_compatible_node(NULL, NULL, "arm,armv7-timer");
97 + if (!np)
98 + np = of_find_compatible_node(NULL, NULL, "arm,armv8-timer");
99 if (!np)
100 goto out_put;
101
102 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
103 index 95b2e1ce559c..f8016e3db65d 100644
104 --- a/arch/arm/lib/copy_from_user.S
105 +++ b/arch/arm/lib/copy_from_user.S
106 @@ -118,7 +118,7 @@ ENTRY(arm_copy_from_user)
107
108 ENDPROC(arm_copy_from_user)
109
110 - .pushsection .fixup,"ax"
111 + .pushsection .text.fixup,"ax"
112 .align 0
113 copy_abort_preamble
114 ldmfd sp!, {r1, r2, r3}
115 diff --git a/block/blk-flush.c b/block/blk-flush.c
116 index b1f0a1ac505c..5aa6fada2259 100644
117 --- a/block/blk-flush.c
118 +++ b/block/blk-flush.c
119 @@ -399,7 +399,7 @@ void blk_insert_flush(struct request *rq)
120 */
121 if ((policy & REQ_FSEQ_DATA) &&
122 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
123 - blk_mq_request_bypass_insert(rq, false);
124 + blk_mq_request_bypass_insert(rq, false, false);
125 return;
126 }
127
128 diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
129 index ca22afd47b3d..74cedea56034 100644
130 --- a/block/blk-mq-sched.c
131 +++ b/block/blk-mq-sched.c
132 @@ -361,13 +361,19 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
133 bool has_sched,
134 struct request *rq)
135 {
136 - /* dispatch flush rq directly */
137 - if (rq->rq_flags & RQF_FLUSH_SEQ) {
138 - spin_lock(&hctx->lock);
139 - list_add(&rq->queuelist, &hctx->dispatch);
140 - spin_unlock(&hctx->lock);
141 + /*
142 + * dispatch flush and passthrough rq directly
143 + *
144 + * passthrough request has to be added to hctx->dispatch directly.
145 + * For some reason, device may be in one situation which can't
146 + * handle FS request, so STS_RESOURCE is always returned and the
147 + * FS request will be added to hctx->dispatch. However passthrough
148 + * request may be required at that time for fixing the problem. If
149 + * passthrough request is added to scheduler queue, there isn't any
150 + * chance to dispatch it given we prioritize requests in hctx->dispatch.
151 + */
152 + if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
153 return true;
154 - }
155
156 if (has_sched)
157 rq->rq_flags |= RQF_SORTED;
158 @@ -391,8 +397,32 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
159
160 WARN_ON(e && (rq->tag != -1));
161
162 - if (blk_mq_sched_bypass_insert(hctx, !!e, rq))
163 + if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
164 + /*
165 + * Firstly normal IO request is inserted to scheduler queue or
166 + * sw queue, meantime we add flush request to dispatch queue(
167 + * hctx->dispatch) directly and there is at most one in-flight
168 + * flush request for each hw queue, so it doesn't matter to add
169 + * flush request to tail or front of the dispatch queue.
170 + *
171 + * Secondly in case of NCQ, flush request belongs to non-NCQ
172 + * command, and queueing it will fail when there is any
173 + * in-flight normal IO request(NCQ command). When adding flush
174 + * rq to the front of hctx->dispatch, it is easier to introduce
175 + * extra time to flush rq's latency because of S_SCHED_RESTART
176 + * compared with adding to the tail of dispatch queue, then
177 + * chance of flush merge is increased, and less flush requests
178 + * will be issued to controller. It is observed that ~10% time
179 + * is saved in blktests block/004 on disk attached to AHCI/NCQ
180 + * drive when adding flush rq to the front of hctx->dispatch.
181 + *
182 + * Simply queue flush rq to the front of hctx->dispatch so that
183 + * intensive flush workloads can benefit in case of NCQ HW.
184 + */
185 + at_head = (rq->rq_flags & RQF_FLUSH_SEQ) ? true : at_head;
186 + blk_mq_request_bypass_insert(rq, at_head, false);
187 goto run;
188 + }
189
190 if (e && e->type->ops.insert_requests) {
191 LIST_HEAD(list);
192 diff --git a/block/blk-mq.c b/block/blk-mq.c
193 index ec791156e9cc..3c1abab1fdf5 100644
194 --- a/block/blk-mq.c
195 +++ b/block/blk-mq.c
196 @@ -761,7 +761,7 @@ static void blk_mq_requeue_work(struct work_struct *work)
197 * merge.
198 */
199 if (rq->rq_flags & RQF_DONTPREP)
200 - blk_mq_request_bypass_insert(rq, false);
201 + blk_mq_request_bypass_insert(rq, false, false);
202 else
203 blk_mq_sched_insert_request(rq, true, false, false);
204 }
205 @@ -1313,7 +1313,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
206 q->mq_ops->commit_rqs(hctx);
207
208 spin_lock(&hctx->lock);
209 - list_splice_init(list, &hctx->dispatch);
210 + list_splice_tail_init(list, &hctx->dispatch);
211 spin_unlock(&hctx->lock);
212
213 /*
214 @@ -1668,12 +1668,16 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
215 * Should only be used carefully, when the caller knows we want to
216 * bypass a potential IO scheduler on the target device.
217 */
218 -void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
219 +void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
220 + bool run_queue)
221 {
222 struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
223
224 spin_lock(&hctx->lock);
225 - list_add_tail(&rq->queuelist, &hctx->dispatch);
226 + if (at_head)
227 + list_add(&rq->queuelist, &hctx->dispatch);
228 + else
229 + list_add_tail(&rq->queuelist, &hctx->dispatch);
230 spin_unlock(&hctx->lock);
231
232 if (run_queue)
233 @@ -1863,7 +1867,7 @@ insert:
234 if (bypass_insert)
235 return BLK_STS_RESOURCE;
236
237 - blk_mq_request_bypass_insert(rq, run_queue);
238 + blk_mq_request_bypass_insert(rq, false, run_queue);
239 return BLK_STS_OK;
240 }
241
242 @@ -1879,7 +1883,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
243
244 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
245 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
246 - blk_mq_request_bypass_insert(rq, true);
247 + blk_mq_request_bypass_insert(rq, false, true);
248 else if (ret != BLK_STS_OK)
249 blk_mq_end_request(rq, ret);
250
251 @@ -1913,7 +1917,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
252 if (ret != BLK_STS_OK) {
253 if (ret == BLK_STS_RESOURCE ||
254 ret == BLK_STS_DEV_RESOURCE) {
255 - blk_mq_request_bypass_insert(rq,
256 + blk_mq_request_bypass_insert(rq, false,
257 list_empty(list));
258 break;
259 }
260 diff --git a/block/blk-mq.h b/block/blk-mq.h
261 index 32c62c64e6c2..f2075978db50 100644
262 --- a/block/blk-mq.h
263 +++ b/block/blk-mq.h
264 @@ -66,7 +66,8 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
265 */
266 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
267 bool at_head);
268 -void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
269 +void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
270 + bool run_queue);
271 void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
272 struct list_head *list);
273
274 diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
275 index d827a4a3e946..6e9ec6e3fe47 100644
276 --- a/drivers/acpi/acpi_watchdog.c
277 +++ b/drivers/acpi/acpi_watchdog.c
278 @@ -55,12 +55,14 @@ static bool acpi_watchdog_uses_rtc(const struct acpi_table_wdat *wdat)
279 }
280 #endif
281
282 +static bool acpi_no_watchdog;
283 +
284 static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
285 {
286 const struct acpi_table_wdat *wdat = NULL;
287 acpi_status status;
288
289 - if (acpi_disabled)
290 + if (acpi_disabled || acpi_no_watchdog)
291 return NULL;
292
293 status = acpi_get_table(ACPI_SIG_WDAT, 0,
294 @@ -88,6 +90,14 @@ bool acpi_has_watchdog(void)
295 }
296 EXPORT_SYMBOL_GPL(acpi_has_watchdog);
297
298 +/* ACPI watchdog can be disabled on boot command line */
299 +static int __init disable_acpi_watchdog(char *str)
300 +{
301 + acpi_no_watchdog = true;
302 + return 1;
303 +}
304 +__setup("acpi_no_watchdog", disable_acpi_watchdog);
305 +
306 void __init acpi_watchdog_init(void)
307 {
308 const struct acpi_wdat_entry *entries;
309 diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
310 index a7ba4c6cf7a1..f642e066e67a 100644
311 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
312 +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
313 @@ -230,7 +230,8 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
314 unsigned int vmhub, uint32_t flush_type)
315 {
316 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub];
317 - u32 tmp = gmc_v10_0_get_invalidate_req(vmid, flush_type);
318 + u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type);
319 + u32 tmp;
320 /* Use register 17 for GART */
321 const unsigned eng = 17;
322 unsigned int i;
323 @@ -258,7 +259,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
324 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
325 }
326
327 - WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
328 + WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
329
330 /*
331 * Issue a dummy read to wait for the ACK register to be cleared
332 diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
333 index da53a55bf955..688111ef814d 100644
334 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
335 +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
336 @@ -487,13 +487,13 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
337 {
338 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
339 const unsigned eng = 17;
340 - u32 j, tmp;
341 + u32 j, inv_req, tmp;
342 struct amdgpu_vmhub *hub;
343
344 BUG_ON(vmhub >= adev->num_vmhubs);
345
346 hub = &adev->vmhub[vmhub];
347 - tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type);
348 + inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
349
350 /* This is necessary for a HW workaround under SRIOV as well
351 * as GFXOFF under bare metal
352 @@ -504,7 +504,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
353 uint32_t req = hub->vm_inv_eng0_req + eng;
354 uint32_t ack = hub->vm_inv_eng0_ack + eng;
355
356 - amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
357 + amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req,
358 1 << vmid);
359 return;
360 }
361 @@ -532,7 +532,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
362 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
363 }
364
365 - WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp);
366 + WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
367
368 /*
369 * Issue a dummy read to wait for the ACK register to be cleared
370 diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
371 index c5257ae3188a..0922d9cd858a 100644
372 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
373 +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
374 @@ -988,8 +988,12 @@ static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
375 struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
376 int ret = 0;
377
378 - max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
379 + if (!smu->smu_table.max_sustainable_clocks)
380 + max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
381 GFP_KERNEL);
382 + else
383 + max_sustainable_clocks = smu->smu_table.max_sustainable_clocks;
384 +
385 smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
386
387 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
388 diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
389 index 6ac8becc2372..d732d1d10caf 100644
390 --- a/drivers/hid/hid-apple.c
391 +++ b/drivers/hid/hid-apple.c
392 @@ -340,7 +340,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
393 unsigned long **bit, int *max)
394 {
395 if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
396 - usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
397 + usage->hid == (HID_UP_MSVENDOR | 0x0003) ||
398 + usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) {
399 /* The fn key on Apple USB keyboards */
400 set_bit(EV_REP, hi->input->evbit);
401 hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
402 diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c
403 index 3f6abd190df4..db6da21ade06 100644
404 --- a/drivers/hid/hid-bigbenff.c
405 +++ b/drivers/hid/hid-bigbenff.c
406 @@ -174,6 +174,7 @@ static __u8 pid0902_rdesc_fixed[] = {
407 struct bigben_device {
408 struct hid_device *hid;
409 struct hid_report *report;
410 + bool removed;
411 u8 led_state; /* LED1 = 1 .. LED4 = 8 */
412 u8 right_motor_on; /* right motor off/on 0/1 */
413 u8 left_motor_force; /* left motor force 0-255 */
414 @@ -190,6 +191,9 @@ static void bigben_worker(struct work_struct *work)
415 struct bigben_device, worker);
416 struct hid_field *report_field = bigben->report->field[0];
417
418 + if (bigben->removed)
419 + return;
420 +
421 if (bigben->work_led) {
422 bigben->work_led = false;
423 report_field->value[0] = 0x01; /* 1 = led message */
424 @@ -220,10 +224,16 @@ static void bigben_worker(struct work_struct *work)
425 static int hid_bigben_play_effect(struct input_dev *dev, void *data,
426 struct ff_effect *effect)
427 {
428 - struct bigben_device *bigben = data;
429 + struct hid_device *hid = input_get_drvdata(dev);
430 + struct bigben_device *bigben = hid_get_drvdata(hid);
431 u8 right_motor_on;
432 u8 left_motor_force;
433
434 + if (!bigben) {
435 + hid_err(hid, "no device data\n");
436 + return 0;
437 + }
438 +
439 if (effect->type != FF_RUMBLE)
440 return 0;
441
442 @@ -298,8 +308,8 @@ static void bigben_remove(struct hid_device *hid)
443 {
444 struct bigben_device *bigben = hid_get_drvdata(hid);
445
446 + bigben->removed = true;
447 cancel_work_sync(&bigben->worker);
448 - hid_hw_close(hid);
449 hid_hw_stop(hid);
450 }
451
452 @@ -319,6 +329,7 @@ static int bigben_probe(struct hid_device *hid,
453 return -ENOMEM;
454 hid_set_drvdata(hid, bigben);
455 bigben->hid = hid;
456 + bigben->removed = false;
457
458 error = hid_parse(hid);
459 if (error) {
460 @@ -341,10 +352,10 @@ static int bigben_probe(struct hid_device *hid,
461
462 INIT_WORK(&bigben->worker, bigben_worker);
463
464 - error = input_ff_create_memless(hidinput->input, bigben,
465 + error = input_ff_create_memless(hidinput->input, NULL,
466 hid_bigben_play_effect);
467 if (error)
468 - return error;
469 + goto error_hw_stop;
470
471 name_sz = strlen(dev_name(&hid->dev)) + strlen(":red:bigben#") + 1;
472
473 @@ -354,8 +365,10 @@ static int bigben_probe(struct hid_device *hid,
474 sizeof(struct led_classdev) + name_sz,
475 GFP_KERNEL
476 );
477 - if (!led)
478 - return -ENOMEM;
479 + if (!led) {
480 + error = -ENOMEM;
481 + goto error_hw_stop;
482 + }
483 name = (void *)(&led[1]);
484 snprintf(name, name_sz,
485 "%s:red:bigben%d",
486 @@ -369,7 +382,7 @@ static int bigben_probe(struct hid_device *hid,
487 bigben->leds[n] = led;
488 error = devm_led_classdev_register(&hid->dev, led);
489 if (error)
490 - return error;
491 + goto error_hw_stop;
492 }
493
494 /* initial state: LED1 is on, no rumble effect */
495 @@ -383,6 +396,10 @@ static int bigben_probe(struct hid_device *hid,
496 hid_info(hid, "LED and force feedback support for BigBen gamepad\n");
497
498 return 0;
499 +
500 +error_hw_stop:
501 + hid_hw_stop(hid);
502 + return error;
503 }
504
505 static __u8 *bigben_report_fixup(struct hid_device *hid, __u8 *rdesc,
506 diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
507 index d86a9189e88f..aeb351658ad3 100644
508 --- a/drivers/hid/hid-google-hammer.c
509 +++ b/drivers/hid/hid-google-hammer.c
510 @@ -473,6 +473,8 @@ static const struct hid_device_id hammer_devices[] = {
511 USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MAGNEMITE) },
512 { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
513 USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MASTERBALL) },
514 + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
515 + USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_MOONBALL) },
516 { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
517 USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) },
518 { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
519 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
520 index 5fc82029a03b..646b98809ed3 100644
521 --- a/drivers/hid/hid-ids.h
522 +++ b/drivers/hid/hid-ids.h
523 @@ -478,6 +478,7 @@
524 #define USB_DEVICE_ID_GOOGLE_WHISKERS 0x5030
525 #define USB_DEVICE_ID_GOOGLE_MASTERBALL 0x503c
526 #define USB_DEVICE_ID_GOOGLE_MAGNEMITE 0x503d
527 +#define USB_DEVICE_ID_GOOGLE_MOONBALL 0x5044
528
529 #define USB_VENDOR_ID_GOTOP 0x08f2
530 #define USB_DEVICE_ID_SUPER_Q2 0x007f
531 @@ -726,6 +727,7 @@
532 #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085
533 #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3
534 #define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5
535 +#define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d
536
537 #define USB_VENDOR_ID_LG 0x1fd2
538 #define USB_DEVICE_ID_LG_MULTITOUCH 0x0064
539 diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
540 index fa58a7cbb3ff..ae64a286a68f 100644
541 --- a/drivers/hid/hid-quirks.c
542 +++ b/drivers/hid/hid-quirks.c
543 @@ -103,6 +103,7 @@ static const struct hid_device_id hid_quirks[] = {
544 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912), HID_QUIRK_MULTI_INPUT },
545 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M406XE), HID_QUIRK_MULTI_INPUT },
546 { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_PIXART_USB_OPTICAL_MOUSE_ID2), HID_QUIRK_ALWAYS_POLL },
547 + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D), HID_QUIRK_ALWAYS_POLL },
548 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C007), HID_QUIRK_ALWAYS_POLL },
549 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_C077), HID_QUIRK_ALWAYS_POLL },
550 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KEYBOARD_G710_PLUS), HID_QUIRK_NOGET },
551 diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
552 index d31ea82b84c1..a66f08041a1a 100644
553 --- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
554 +++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
555 @@ -341,6 +341,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
556 },
557 .driver_data = (void *)&sipodev_desc
558 },
559 + {
560 + .ident = "Trekstor SURFBOOK E11B",
561 + .matches = {
562 + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
563 + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SURFBOOK E11B"),
564 + },
565 + .driver_data = (void *)&sipodev_desc
566 + },
567 {
568 .ident = "Direkt-Tek DTLAPY116-2",
569 .matches = {
570 diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
571 index 6f2cf569a283..79b3d53f2fbf 100644
572 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
573 +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
574 @@ -297,6 +297,7 @@ static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth,
575 }
576
577 hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
578 + hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwif);
579
580 hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT;
581 hw_ioctxt.cmdq_depth = 0;
582 diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
583 index b069045de416..66fd2340d447 100644
584 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
585 +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
586 @@ -151,8 +151,8 @@ struct hinic_cmd_hw_ioctxt {
587
588 u8 lro_en;
589 u8 rsvd3;
590 + u8 ppf_idx;
591 u8 rsvd4;
592 - u8 rsvd5;
593
594 u16 rq_depth;
595 u16 rx_buf_sz_idx;
596 diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
597 index 517794509eb2..c7bb9ceca72c 100644
598 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
599 +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
600 @@ -137,6 +137,7 @@
601 #define HINIC_HWIF_FUNC_IDX(hwif) ((hwif)->attr.func_idx)
602 #define HINIC_HWIF_PCI_INTF(hwif) ((hwif)->attr.pci_intf_idx)
603 #define HINIC_HWIF_PF_IDX(hwif) ((hwif)->attr.pf_idx)
604 +#define HINIC_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx)
605
606 #define HINIC_FUNC_TYPE(hwif) ((hwif)->attr.func_type)
607 #define HINIC_IS_PF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PF)
608 diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
609 index f4a339b10b10..79091e131418 100644
610 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
611 +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
612 @@ -94,6 +94,7 @@ struct hinic_rq {
613
614 struct hinic_wq *wq;
615
616 + struct cpumask affinity_mask;
617 u32 irq;
618 u16 msix_entry;
619
620 diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
621 index 2411ad270c98..42d00b049c6e 100644
622 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
623 +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
624 @@ -356,7 +356,8 @@ static void hinic_enable_rss(struct hinic_dev *nic_dev)
625 if (!num_cpus)
626 num_cpus = num_online_cpus();
627
628 - nic_dev->num_qps = min_t(u16, nic_dev->max_qps, num_cpus);
629 + nic_dev->num_qps = hinic_hwdev_num_qps(hwdev);
630 + nic_dev->num_qps = min_t(u16, nic_dev->num_qps, num_cpus);
631
632 nic_dev->rss_limit = nic_dev->num_qps;
633 nic_dev->num_rss = nic_dev->num_qps;
634 diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
635 index 56ea6d692f1c..2695ad69fca6 100644
636 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
637 +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
638 @@ -475,7 +475,6 @@ static int rx_request_irq(struct hinic_rxq *rxq)
639 struct hinic_hwdev *hwdev = nic_dev->hwdev;
640 struct hinic_rq *rq = rxq->rq;
641 struct hinic_qp *qp;
642 - struct cpumask mask;
643 int err;
644
645 rx_add_napi(rxq);
646 @@ -492,8 +491,8 @@ static int rx_request_irq(struct hinic_rxq *rxq)
647 }
648
649 qp = container_of(rq, struct hinic_qp, rq);
650 - cpumask_set_cpu(qp->q_id % num_online_cpus(), &mask);
651 - return irq_set_affinity_hint(rq->irq, &mask);
652 + cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
653 + return irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
654 }
655
656 static void rx_free_irq(struct hinic_rxq *rxq)
657 diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
658 index 1c9e70c8cc30..58579baf3f7a 100644
659 --- a/drivers/net/ethernet/micrel/ks8851_mll.c
660 +++ b/drivers/net/ethernet/micrel/ks8851_mll.c
661 @@ -513,14 +513,17 @@ static irqreturn_t ks_irq(int irq, void *pw)
662 {
663 struct net_device *netdev = pw;
664 struct ks_net *ks = netdev_priv(netdev);
665 + unsigned long flags;
666 u16 status;
667
668 + spin_lock_irqsave(&ks->statelock, flags);
669 /*this should be the first in IRQ handler */
670 ks_save_cmd_reg(ks);
671
672 status = ks_rdreg16(ks, KS_ISR);
673 if (unlikely(!status)) {
674 ks_restore_cmd_reg(ks);
675 + spin_unlock_irqrestore(&ks->statelock, flags);
676 return IRQ_NONE;
677 }
678
679 @@ -546,6 +549,7 @@ static irqreturn_t ks_irq(int irq, void *pw)
680 ks->netdev->stats.rx_over_errors++;
681 /* this should be the last in IRQ handler*/
682 ks_restore_cmd_reg(ks);
683 + spin_unlock_irqrestore(&ks->statelock, flags);
684 return IRQ_HANDLED;
685 }
686
687 @@ -615,6 +619,7 @@ static int ks_net_stop(struct net_device *netdev)
688
689 /* shutdown RX/TX QMU */
690 ks_disable_qmu(ks);
691 + ks_disable_int(ks);
692
693 /* set powermode to soft power down to save power */
694 ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
695 @@ -671,10 +676,9 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
696 {
697 netdev_tx_t retv = NETDEV_TX_OK;
698 struct ks_net *ks = netdev_priv(netdev);
699 + unsigned long flags;
700
701 - disable_irq(netdev->irq);
702 - ks_disable_int(ks);
703 - spin_lock(&ks->statelock);
704 + spin_lock_irqsave(&ks->statelock, flags);
705
706 /* Extra space are required:
707 * 4 byte for alignment, 4 for status/length, 4 for CRC
708 @@ -688,9 +692,7 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
709 dev_kfree_skb(skb);
710 } else
711 retv = NETDEV_TX_BUSY;
712 - spin_unlock(&ks->statelock);
713 - ks_enable_int(ks);
714 - enable_irq(netdev->irq);
715 + spin_unlock_irqrestore(&ks->statelock, flags);
716 return retv;
717 }
718
719 diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
720 index 06de59521fc4..fbf4cbcf1a65 100644
721 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
722 +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
723 @@ -13,25 +13,6 @@
724 #include "rmnet_vnd.h"
725 #include "rmnet_private.h"
726
727 -/* Locking scheme -
728 - * The shared resource which needs to be protected is realdev->rx_handler_data.
729 - * For the writer path, this is using rtnl_lock(). The writer paths are
730 - * rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These
731 - * paths are already called with rtnl_lock() acquired in. There is also an
732 - * ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For
733 - * dereference here, we will need to use rtnl_dereference(). Dev list writing
734 - * needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link().
735 - * For the reader path, the real_dev->rx_handler_data is called in the TX / RX
736 - * path. We only need rcu_read_lock() for these scenarios. In these cases,
737 - * the rcu_read_lock() is held in __dev_queue_xmit() and
738 - * netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl()
739 - * to get the relevant information. For dev list reading, we again acquire
740 - * rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu().
741 - * We also use unregister_netdevice_many() to free all rmnet devices in
742 - * rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in
743 - * same context.
744 - */
745 -
746 /* Local Definitions and Declarations */
747
748 static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 1] = {
749 @@ -51,9 +32,10 @@ rmnet_get_port_rtnl(const struct net_device *real_dev)
750 return rtnl_dereference(real_dev->rx_handler_data);
751 }
752
753 -static int rmnet_unregister_real_device(struct net_device *real_dev,
754 - struct rmnet_port *port)
755 +static int rmnet_unregister_real_device(struct net_device *real_dev)
756 {
757 + struct rmnet_port *port = rmnet_get_port_rtnl(real_dev);
758 +
759 if (port->nr_rmnet_devs)
760 return -EINVAL;
761
762 @@ -61,9 +43,6 @@ static int rmnet_unregister_real_device(struct net_device *real_dev,
763
764 kfree(port);
765
766 - /* release reference on real_dev */
767 - dev_put(real_dev);
768 -
769 netdev_dbg(real_dev, "Removed from rmnet\n");
770 return 0;
771 }
772 @@ -89,9 +68,6 @@ static int rmnet_register_real_device(struct net_device *real_dev)
773 return -EBUSY;
774 }
775
776 - /* hold on to real dev for MAP data */
777 - dev_hold(real_dev);
778 -
779 for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
780 INIT_HLIST_HEAD(&port->muxed_ep[entry]);
781
782 @@ -99,28 +75,33 @@ static int rmnet_register_real_device(struct net_device *real_dev)
783 return 0;
784 }
785
786 -static void rmnet_unregister_bridge(struct net_device *dev,
787 - struct rmnet_port *port)
788 +static void rmnet_unregister_bridge(struct rmnet_port *port)
789 {
790 - struct rmnet_port *bridge_port;
791 - struct net_device *bridge_dev;
792 + struct net_device *bridge_dev, *real_dev, *rmnet_dev;
793 + struct rmnet_port *real_port;
794
795 if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
796 return;
797
798 - /* bridge slave handling */
799 + rmnet_dev = port->rmnet_dev;
800 if (!port->nr_rmnet_devs) {
801 - bridge_dev = port->bridge_ep;
802 + /* bridge device */
803 + real_dev = port->bridge_ep;
804 + bridge_dev = port->dev;
805
806 - bridge_port = rmnet_get_port_rtnl(bridge_dev);
807 - bridge_port->bridge_ep = NULL;
808 - bridge_port->rmnet_mode = RMNET_EPMODE_VND;
809 + real_port = rmnet_get_port_rtnl(real_dev);
810 + real_port->bridge_ep = NULL;
811 + real_port->rmnet_mode = RMNET_EPMODE_VND;
812 } else {
813 + /* real device */
814 bridge_dev = port->bridge_ep;
815
816 - bridge_port = rmnet_get_port_rtnl(bridge_dev);
817 - rmnet_unregister_real_device(bridge_dev, bridge_port);
818 + port->bridge_ep = NULL;
819 + port->rmnet_mode = RMNET_EPMODE_VND;
820 }
821 +
822 + netdev_upper_dev_unlink(bridge_dev, rmnet_dev);
823 + rmnet_unregister_real_device(bridge_dev);
824 }
825
826 static int rmnet_newlink(struct net *src_net, struct net_device *dev,
827 @@ -135,6 +116,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
828 int err = 0;
829 u16 mux_id;
830
831 + if (!tb[IFLA_LINK]) {
832 + NL_SET_ERR_MSG_MOD(extack, "link not specified");
833 + return -EINVAL;
834 + }
835 +
836 real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
837 if (!real_dev || !dev)
838 return -ENODEV;
839 @@ -157,7 +143,12 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
840 if (err)
841 goto err1;
842
843 + err = netdev_upper_dev_link(real_dev, dev, extack);
844 + if (err < 0)
845 + goto err2;
846 +
847 port->rmnet_mode = mode;
848 + port->rmnet_dev = dev;
849
850 hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
851
852 @@ -173,8 +164,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
853
854 return 0;
855
856 +err2:
857 + unregister_netdevice(dev);
858 + rmnet_vnd_dellink(mux_id, port, ep);
859 err1:
860 - rmnet_unregister_real_device(real_dev, port);
861 + rmnet_unregister_real_device(real_dev);
862 err0:
863 kfree(ep);
864 return err;
865 @@ -183,77 +177,74 @@ err0:
866 static void rmnet_dellink(struct net_device *dev, struct list_head *head)
867 {
868 struct rmnet_priv *priv = netdev_priv(dev);
869 - struct net_device *real_dev;
870 + struct net_device *real_dev, *bridge_dev;
871 + struct rmnet_port *real_port, *bridge_port;
872 struct rmnet_endpoint *ep;
873 - struct rmnet_port *port;
874 - u8 mux_id;
875 + u8 mux_id = priv->mux_id;
876
877 real_dev = priv->real_dev;
878
879 - if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
880 + if (!rmnet_is_real_dev_registered(real_dev))
881 return;
882
883 - port = rmnet_get_port_rtnl(real_dev);
884 -
885 - mux_id = rmnet_vnd_get_mux(dev);
886 + real_port = rmnet_get_port_rtnl(real_dev);
887 + bridge_dev = real_port->bridge_ep;
888 + if (bridge_dev) {
889 + bridge_port = rmnet_get_port_rtnl(bridge_dev);
890 + rmnet_unregister_bridge(bridge_port);
891 + }
892
893 - ep = rmnet_get_endpoint(port, mux_id);
894 + ep = rmnet_get_endpoint(real_port, mux_id);
895 if (ep) {
896 hlist_del_init_rcu(&ep->hlnode);
897 - rmnet_unregister_bridge(dev, port);
898 - rmnet_vnd_dellink(mux_id, port, ep);
899 + rmnet_vnd_dellink(mux_id, real_port, ep);
900 kfree(ep);
901 }
902 - rmnet_unregister_real_device(real_dev, port);
903
904 + netdev_upper_dev_unlink(real_dev, dev);
905 + rmnet_unregister_real_device(real_dev);
906 unregister_netdevice_queue(dev, head);
907 }
908
909 -static void rmnet_force_unassociate_device(struct net_device *dev)
910 +static void rmnet_force_unassociate_device(struct net_device *real_dev)
911 {
912 - struct net_device *real_dev = dev;
913 struct hlist_node *tmp_ep;
914 struct rmnet_endpoint *ep;
915 struct rmnet_port *port;
916 unsigned long bkt_ep;
917 LIST_HEAD(list);
918
919 - if (!rmnet_is_real_dev_registered(real_dev))
920 - return;
921 -
922 - ASSERT_RTNL();
923 -
924 - port = rmnet_get_port_rtnl(dev);
925 -
926 - rcu_read_lock();
927 - rmnet_unregister_bridge(dev, port);
928 -
929 - hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
930 - unregister_netdevice_queue(ep->egress_dev, &list);
931 - rmnet_vnd_dellink(ep->mux_id, port, ep);
932 + port = rmnet_get_port_rtnl(real_dev);
933
934 - hlist_del_init_rcu(&ep->hlnode);
935 - kfree(ep);
936 + if (port->nr_rmnet_devs) {
937 + /* real device */
938 + rmnet_unregister_bridge(port);
939 + hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
940 + unregister_netdevice_queue(ep->egress_dev, &list);
941 + netdev_upper_dev_unlink(real_dev, ep->egress_dev);
942 + rmnet_vnd_dellink(ep->mux_id, port, ep);
943 + hlist_del_init_rcu(&ep->hlnode);
944 + kfree(ep);
945 + }
946 + rmnet_unregister_real_device(real_dev);
947 + unregister_netdevice_many(&list);
948 + } else {
949 + rmnet_unregister_bridge(port);
950 }
951 -
952 - rcu_read_unlock();
953 - unregister_netdevice_many(&list);
954 -
955 - rmnet_unregister_real_device(real_dev, port);
956 }
957
958 static int rmnet_config_notify_cb(struct notifier_block *nb,
959 unsigned long event, void *data)
960 {
961 - struct net_device *dev = netdev_notifier_info_to_dev(data);
962 + struct net_device *real_dev = netdev_notifier_info_to_dev(data);
963
964 - if (!dev)
965 + if (!rmnet_is_real_dev_registered(real_dev))
966 return NOTIFY_DONE;
967
968 switch (event) {
969 case NETDEV_UNREGISTER:
970 - netdev_dbg(dev, "Kernel unregister\n");
971 - rmnet_force_unassociate_device(dev);
972 + netdev_dbg(real_dev, "Kernel unregister\n");
973 + rmnet_force_unassociate_device(real_dev);
974 break;
975
976 default:
977 @@ -295,16 +286,18 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
978 if (!dev)
979 return -ENODEV;
980
981 - real_dev = __dev_get_by_index(dev_net(dev),
982 - nla_get_u32(tb[IFLA_LINK]));
983 -
984 - if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
985 + real_dev = priv->real_dev;
986 + if (!rmnet_is_real_dev_registered(real_dev))
987 return -ENODEV;
988
989 port = rmnet_get_port_rtnl(real_dev);
990
991 if (data[IFLA_RMNET_MUX_ID]) {
992 mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
993 + if (rmnet_get_endpoint(port, mux_id)) {
994 + NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists");
995 + return -EINVAL;
996 + }
997 ep = rmnet_get_endpoint(port, priv->mux_id);
998 if (!ep)
999 return -ENODEV;
1000 @@ -379,11 +372,10 @@ struct rtnl_link_ops rmnet_link_ops __read_mostly = {
1001 .fill_info = rmnet_fill_info,
1002 };
1003
1004 -/* Needs either rcu_read_lock() or rtnl lock */
1005 -struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
1006 +struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev)
1007 {
1008 if (rmnet_is_real_dev_registered(real_dev))
1009 - return rcu_dereference_rtnl(real_dev->rx_handler_data);
1010 + return rcu_dereference_bh(real_dev->rx_handler_data);
1011 else
1012 return NULL;
1013 }
1014 @@ -409,7 +401,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
1015 struct rmnet_port *port, *slave_port;
1016 int err;
1017
1018 - port = rmnet_get_port(real_dev);
1019 + port = rmnet_get_port_rtnl(real_dev);
1020
1021 /* If there is more than one rmnet dev attached, its probably being
1022 * used for muxing. Skip the briding in that case
1023 @@ -417,6 +409,9 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
1024 if (port->nr_rmnet_devs > 1)
1025 return -EINVAL;
1026
1027 + if (port->rmnet_mode != RMNET_EPMODE_VND)
1028 + return -EINVAL;
1029 +
1030 if (rmnet_is_real_dev_registered(slave_dev))
1031 return -EBUSY;
1032
1033 @@ -424,9 +419,17 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
1034 if (err)
1035 return -EBUSY;
1036
1037 - slave_port = rmnet_get_port(slave_dev);
1038 + err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL,
1039 + extack);
1040 + if (err) {
1041 + rmnet_unregister_real_device(slave_dev);
1042 + return err;
1043 + }
1044 +
1045 + slave_port = rmnet_get_port_rtnl(slave_dev);
1046 slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
1047 slave_port->bridge_ep = real_dev;
1048 + slave_port->rmnet_dev = rmnet_dev;
1049
1050 port->rmnet_mode = RMNET_EPMODE_BRIDGE;
1051 port->bridge_ep = slave_dev;
1052 @@ -438,16 +441,9 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
1053 int rmnet_del_bridge(struct net_device *rmnet_dev,
1054 struct net_device *slave_dev)
1055 {
1056 - struct rmnet_priv *priv = netdev_priv(rmnet_dev);
1057 - struct net_device *real_dev = priv->real_dev;
1058 - struct rmnet_port *port, *slave_port;
1059 + struct rmnet_port *port = rmnet_get_port_rtnl(slave_dev);
1060
1061 - port = rmnet_get_port(real_dev);
1062 - port->rmnet_mode = RMNET_EPMODE_VND;
1063 - port->bridge_ep = NULL;
1064 -
1065 - slave_port = rmnet_get_port(slave_dev);
1066 - rmnet_unregister_real_device(slave_dev, slave_port);
1067 + rmnet_unregister_bridge(port);
1068
1069 netdev_dbg(slave_dev, "removed from rmnet as slave\n");
1070 return 0;
1071 @@ -473,8 +469,8 @@ static int __init rmnet_init(void)
1072
1073 static void __exit rmnet_exit(void)
1074 {
1075 - unregister_netdevice_notifier(&rmnet_dev_notifier);
1076 rtnl_link_unregister(&rmnet_link_ops);
1077 + unregister_netdevice_notifier(&rmnet_dev_notifier);
1078 }
1079
1080 module_init(rmnet_init)
1081 diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
1082 index cd0a6bcbe74a..be515982d628 100644
1083 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
1084 +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
1085 @@ -28,6 +28,7 @@ struct rmnet_port {
1086 u8 rmnet_mode;
1087 struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
1088 struct net_device *bridge_ep;
1089 + struct net_device *rmnet_dev;
1090 };
1091
1092 extern struct rtnl_link_ops rmnet_link_ops;
1093 @@ -65,7 +66,7 @@ struct rmnet_priv {
1094 struct rmnet_priv_stats stats;
1095 };
1096
1097 -struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
1098 +struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev);
1099 struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id);
1100 int rmnet_add_bridge(struct net_device *rmnet_dev,
1101 struct net_device *slave_dev,
1102 diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
1103 index 1b74bc160402..29a7bfa2584d 100644
1104 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
1105 +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
1106 @@ -159,6 +159,9 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
1107 static void
1108 rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
1109 {
1110 + if (skb_mac_header_was_set(skb))
1111 + skb_push(skb, skb->mac_len);
1112 +
1113 if (bridge_dev) {
1114 skb->dev = bridge_dev;
1115 dev_queue_xmit(skb);
1116 @@ -184,7 +187,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
1117 return RX_HANDLER_PASS;
1118
1119 dev = skb->dev;
1120 - port = rmnet_get_port(dev);
1121 + port = rmnet_get_port_rcu(dev);
1122
1123 switch (port->rmnet_mode) {
1124 case RMNET_EPMODE_VND:
1125 @@ -217,7 +220,7 @@ void rmnet_egress_handler(struct sk_buff *skb)
1126 skb->dev = priv->real_dev;
1127 mux_id = priv->mux_id;
1128
1129 - port = rmnet_get_port(skb->dev);
1130 + port = rmnet_get_port_rcu(skb->dev);
1131 if (!port)
1132 goto drop;
1133
1134 diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
1135 index 509dfc895a33..26ad40f19c64 100644
1136 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
1137 +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
1138 @@ -266,14 +266,6 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
1139 return 0;
1140 }
1141
1142 -u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev)
1143 -{
1144 - struct rmnet_priv *priv;
1145 -
1146 - priv = netdev_priv(rmnet_dev);
1147 - return priv->mux_id;
1148 -}
1149 -
1150 int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
1151 {
1152 netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
1153 diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
1154 index 54cbaf3c3bc4..14d77c709d4a 100644
1155 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
1156 +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
1157 @@ -16,6 +16,5 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
1158 struct rmnet_endpoint *ep);
1159 void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
1160 void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
1161 -u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev);
1162 void rmnet_vnd_setup(struct net_device *dev);
1163 #endif /* _RMNET_VND_H_ */
1164 diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
1165 index af15a737c675..59b4f16896a8 100644
1166 --- a/drivers/net/ethernet/sfc/ptp.c
1167 +++ b/drivers/net/ethernet/sfc/ptp.c
1168 @@ -560,13 +560,45 @@ efx_ptp_mac_nic_to_ktime_correction(struct efx_nic *efx,
1169 u32 nic_major, u32 nic_minor,
1170 s32 correction)
1171 {
1172 + u32 sync_timestamp;
1173 ktime_t kt = { 0 };
1174 + s16 delta;
1175
1176 if (!(nic_major & 0x80000000)) {
1177 WARN_ON_ONCE(nic_major >> 16);
1178 - /* Use the top bits from the latest sync event. */
1179 - nic_major &= 0xffff;
1180 - nic_major |= (last_sync_timestamp_major(efx) & 0xffff0000);
1181 +
1182 + /* Medford provides 48 bits of timestamp, so we must get the top
1183 + * 16 bits from the timesync event state.
1184 + *
1185 + * We only have the lower 16 bits of the time now, but we do
1186 + * have a full resolution timestamp at some point in past. As
1187 + * long as the difference between the (real) now and the sync
1188 + * is less than 2^15, then we can reconstruct the difference
1189 + * between those two numbers using only the lower 16 bits of
1190 + * each.
1191 + *
1192 + * Put another way
1193 + *
1194 + * a - b = ((a mod k) - b) mod k
1195 + *
1196 + * when -k/2 < (a-b) < k/2. In our case k is 2^16. We know
1197 + * (a mod k) and b, so can calculate the delta, a - b.
1198 + *
1199 + */
1200 + sync_timestamp = last_sync_timestamp_major(efx);
1201 +
1202 + /* Because delta is s16 this does an implicit mask down to
1203 + * 16 bits which is what we need, assuming
1204 + * MEDFORD_TX_SECS_EVENT_BITS is 16. delta is signed so that
1205 + * we can deal with the (unlikely) case of sync timestamps
1206 + * arriving from the future.
1207 + */
1208 + delta = nic_major - sync_timestamp;
1209 +
1210 + /* Recover the fully specified time now, by applying the offset
1211 + * to the (fully specified) sync time.
1212 + */
1213 + nic_major = sync_timestamp + delta;
1214
1215 kt = ptp->nic_to_kernel_time(nic_major, nic_minor,
1216 correction);
1217 diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
1218 index 276292bca334..53fb8141f1a6 100644
1219 --- a/drivers/net/ethernet/xilinx/ll_temac.h
1220 +++ b/drivers/net/ethernet/xilinx/ll_temac.h
1221 @@ -375,10 +375,14 @@ struct temac_local {
1222 int tx_bd_next;
1223 int tx_bd_tail;
1224 int rx_bd_ci;
1225 + int rx_bd_tail;
1226
1227 /* DMA channel control setup */
1228 u32 tx_chnl_ctrl;
1229 u32 rx_chnl_ctrl;
1230 + u8 coalesce_count_rx;
1231 +
1232 + struct delayed_work restart_work;
1233 };
1234
1235 /* Wrappers for temac_ior()/temac_iow() function pointers above */
1236 diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
1237 index 21c1b4322ea7..eb480204cdbe 100644
1238 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c
1239 +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
1240 @@ -51,6 +51,7 @@
1241 #include <linux/ip.h>
1242 #include <linux/slab.h>
1243 #include <linux/interrupt.h>
1244 +#include <linux/workqueue.h>
1245 #include <linux/dma-mapping.h>
1246 #include <linux/processor.h>
1247 #include <linux/platform_data/xilinx-ll-temac.h>
1248 @@ -367,6 +368,8 @@ static int temac_dma_bd_init(struct net_device *ndev)
1249 skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
1250 XTE_MAX_JUMBO_FRAME_SIZE,
1251 DMA_FROM_DEVICE);
1252 + if (dma_mapping_error(ndev->dev.parent, skb_dma_addr))
1253 + goto out;
1254 lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr);
1255 lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
1256 lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
1257 @@ -387,12 +390,13 @@ static int temac_dma_bd_init(struct net_device *ndev)
1258 lp->tx_bd_next = 0;
1259 lp->tx_bd_tail = 0;
1260 lp->rx_bd_ci = 0;
1261 + lp->rx_bd_tail = RX_BD_NUM - 1;
1262
1263 /* Enable RX DMA transfers */
1264 wmb();
1265 lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
1266 lp->dma_out(lp, RX_TAILDESC_PTR,
1267 - lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
1268 + lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * lp->rx_bd_tail));
1269
1270 /* Prepare for TX DMA transfer */
1271 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
1272 @@ -788,6 +792,9 @@ static void temac_start_xmit_done(struct net_device *ndev)
1273 stat = be32_to_cpu(cur_p->app0);
1274 }
1275
1276 + /* Matches barrier in temac_start_xmit */
1277 + smp_mb();
1278 +
1279 netif_wake_queue(ndev);
1280 }
1281
1282 @@ -830,9 +837,19 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1283 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
1284
1285 if (temac_check_tx_bd_space(lp, num_frag + 1)) {
1286 - if (!netif_queue_stopped(ndev))
1287 - netif_stop_queue(ndev);
1288 - return NETDEV_TX_BUSY;
1289 + if (netif_queue_stopped(ndev))
1290 + return NETDEV_TX_BUSY;
1291 +
1292 + netif_stop_queue(ndev);
1293 +
1294 + /* Matches barrier in temac_start_xmit_done */
1295 + smp_mb();
1296 +
1297 + /* Space might have just been freed - check again */
1298 + if (temac_check_tx_bd_space(lp, num_frag))
1299 + return NETDEV_TX_BUSY;
1300 +
1301 + netif_wake_queue(ndev);
1302 }
1303
1304 cur_p->app0 = 0;
1305 @@ -850,12 +867,16 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1306 skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
1307 skb_headlen(skb), DMA_TO_DEVICE);
1308 cur_p->len = cpu_to_be32(skb_headlen(skb));
1309 + if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) {
1310 + dev_kfree_skb_any(skb);
1311 + ndev->stats.tx_dropped++;
1312 + return NETDEV_TX_OK;
1313 + }
1314 cur_p->phys = cpu_to_be32(skb_dma_addr);
1315 ptr_to_txbd((void *)skb, cur_p);
1316
1317 for (ii = 0; ii < num_frag; ii++) {
1318 - lp->tx_bd_tail++;
1319 - if (lp->tx_bd_tail >= TX_BD_NUM)
1320 + if (++lp->tx_bd_tail >= TX_BD_NUM)
1321 lp->tx_bd_tail = 0;
1322
1323 cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
1324 @@ -863,6 +884,27 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1325 skb_frag_address(frag),
1326 skb_frag_size(frag),
1327 DMA_TO_DEVICE);
1328 + if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
1329 + if (--lp->tx_bd_tail < 0)
1330 + lp->tx_bd_tail = TX_BD_NUM - 1;
1331 + cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
1332 + while (--ii >= 0) {
1333 + --frag;
1334 + dma_unmap_single(ndev->dev.parent,
1335 + be32_to_cpu(cur_p->phys),
1336 + skb_frag_size(frag),
1337 + DMA_TO_DEVICE);
1338 + if (--lp->tx_bd_tail < 0)
1339 + lp->tx_bd_tail = TX_BD_NUM - 1;
1340 + cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
1341 + }
1342 + dma_unmap_single(ndev->dev.parent,
1343 + be32_to_cpu(cur_p->phys),
1344 + skb_headlen(skb), DMA_TO_DEVICE);
1345 + dev_kfree_skb_any(skb);
1346 + ndev->stats.tx_dropped++;
1347 + return NETDEV_TX_OK;
1348 + }
1349 cur_p->phys = cpu_to_be32(skb_dma_addr);
1350 cur_p->len = cpu_to_be32(skb_frag_size(frag));
1351 cur_p->app0 = 0;
1352 @@ -884,31 +926,56 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1353 return NETDEV_TX_OK;
1354 }
1355
1356 +static int ll_temac_recv_buffers_available(struct temac_local *lp)
1357 +{
1358 + int available;
1359 +
1360 + if (!lp->rx_skb[lp->rx_bd_ci])
1361 + return 0;
1362 + available = 1 + lp->rx_bd_tail - lp->rx_bd_ci;
1363 + if (available <= 0)
1364 + available += RX_BD_NUM;
1365 + return available;
1366 +}
1367
1368 static void ll_temac_recv(struct net_device *ndev)
1369 {
1370 struct temac_local *lp = netdev_priv(ndev);
1371 - struct sk_buff *skb, *new_skb;
1372 - unsigned int bdstat;
1373 - struct cdmac_bd *cur_p;
1374 - dma_addr_t tail_p, skb_dma_addr;
1375 - int length;
1376 unsigned long flags;
1377 + int rx_bd;
1378 + bool update_tail = false;
1379
1380 spin_lock_irqsave(&lp->rx_lock, flags);
1381
1382 - tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
1383 - cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1384 -
1385 - bdstat = be32_to_cpu(cur_p->app0);
1386 - while ((bdstat & STS_CTRL_APP0_CMPLT)) {
1387 + /* Process all received buffers, passing them on network
1388 + * stack. After this, the buffer descriptors will be in an
1389 + * un-allocated stage, where no skb is allocated for it, and
1390 + * they are therefore not available for TEMAC/DMA.
1391 + */
1392 + do {
1393 + struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci];
1394 + struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci];
1395 + unsigned int bdstat = be32_to_cpu(bd->app0);
1396 + int length;
1397 +
1398 + /* While this should not normally happen, we can end
1399 + * here when GFP_ATOMIC allocations fail, and we
1400 + * therefore have un-allocated buffers.
1401 + */
1402 + if (!skb)
1403 + break;
1404
1405 - skb = lp->rx_skb[lp->rx_bd_ci];
1406 - length = be32_to_cpu(cur_p->app4) & 0x3FFF;
1407 + /* Loop over all completed buffer descriptors */
1408 + if (!(bdstat & STS_CTRL_APP0_CMPLT))
1409 + break;
1410
1411 - dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
1412 + dma_unmap_single(ndev->dev.parent, be32_to_cpu(bd->phys),
1413 XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
1414 + /* The buffer is not valid for DMA anymore */
1415 + bd->phys = 0;
1416 + bd->len = 0;
1417
1418 + length = be32_to_cpu(bd->app4) & 0x3FFF;
1419 skb_put(skb, length);
1420 skb->protocol = eth_type_trans(skb, ndev);
1421 skb_checksum_none_assert(skb);
1422 @@ -923,43 +990,102 @@ static void ll_temac_recv(struct net_device *ndev)
1423 * (back) for proper IP checksum byte order
1424 * (be16).
1425 */
1426 - skb->csum = htons(be32_to_cpu(cur_p->app3) & 0xFFFF);
1427 + skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF);
1428 skb->ip_summed = CHECKSUM_COMPLETE;
1429 }
1430
1431 if (!skb_defer_rx_timestamp(skb))
1432 netif_rx(skb);
1433 + /* The skb buffer is now owned by network stack above */
1434 + lp->rx_skb[lp->rx_bd_ci] = NULL;
1435
1436 ndev->stats.rx_packets++;
1437 ndev->stats.rx_bytes += length;
1438
1439 - new_skb = netdev_alloc_skb_ip_align(ndev,
1440 - XTE_MAX_JUMBO_FRAME_SIZE);
1441 - if (!new_skb) {
1442 - spin_unlock_irqrestore(&lp->rx_lock, flags);
1443 - return;
1444 + rx_bd = lp->rx_bd_ci;
1445 + if (++lp->rx_bd_ci >= RX_BD_NUM)
1446 + lp->rx_bd_ci = 0;
1447 + } while (rx_bd != lp->rx_bd_tail);
1448 +
1449 + /* DMA operations will halt when the last buffer descriptor is
1450 + * processed (ie. the one pointed to by RX_TAILDESC_PTR).
1451 + * When that happens, no more interrupt events will be
1452 + * generated. No IRQ_COAL or IRQ_DLY, and not even an
1453 + * IRQ_ERR. To avoid stalling, we schedule a delayed work
1454 + * when there is a potential risk of that happening. The work
1455 + * will call this function, and thus re-schedule itself until
1456 + * enough buffers are available again.
1457 + */
1458 + if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx)
1459 + schedule_delayed_work(&lp->restart_work, HZ / 1000);
1460 +
1461 + /* Allocate new buffers for those buffer descriptors that were
1462 + * passed to network stack. Note that GFP_ATOMIC allocations
1463 + * can fail (e.g. when a larger burst of GFP_ATOMIC
1464 + * allocations occurs), so while we try to allocate all
1465 + * buffers in the same interrupt where they were processed, we
1466 + * continue with what we could get in case of allocation
1467 + * failure. Allocation of remaining buffers will be retried
1468 + * in following calls.
1469 + */
1470 + while (1) {
1471 + struct sk_buff *skb;
1472 + struct cdmac_bd *bd;
1473 + dma_addr_t skb_dma_addr;
1474 +
1475 + rx_bd = lp->rx_bd_tail + 1;
1476 + if (rx_bd >= RX_BD_NUM)
1477 + rx_bd = 0;
1478 + bd = &lp->rx_bd_v[rx_bd];
1479 +
1480 + if (bd->phys)
1481 + break; /* All skb's allocated */
1482 +
1483 + skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE);
1484 + if (!skb) {
1485 + dev_warn(&ndev->dev, "skb alloc failed\n");
1486 + break;
1487 }
1488
1489 - cur_p->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
1490 - skb_dma_addr = dma_map_single(ndev->dev.parent, new_skb->data,
1491 + skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
1492 XTE_MAX_JUMBO_FRAME_SIZE,
1493 DMA_FROM_DEVICE);
1494 - cur_p->phys = cpu_to_be32(skb_dma_addr);
1495 - cur_p->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
1496 - lp->rx_skb[lp->rx_bd_ci] = new_skb;
1497 + if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent,
1498 + skb_dma_addr))) {
1499 + dev_kfree_skb_any(skb);
1500 + break;
1501 + }
1502
1503 - lp->rx_bd_ci++;
1504 - if (lp->rx_bd_ci >= RX_BD_NUM)
1505 - lp->rx_bd_ci = 0;
1506 + bd->phys = cpu_to_be32(skb_dma_addr);
1507 + bd->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
1508 + bd->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
1509 + lp->rx_skb[rx_bd] = skb;
1510
1511 - cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1512 - bdstat = be32_to_cpu(cur_p->app0);
1513 + lp->rx_bd_tail = rx_bd;
1514 + update_tail = true;
1515 + }
1516 +
1517 + /* Move tail pointer when buffers have been allocated */
1518 + if (update_tail) {
1519 + lp->dma_out(lp, RX_TAILDESC_PTR,
1520 + lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail);
1521 }
1522 - lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
1523
1524 spin_unlock_irqrestore(&lp->rx_lock, flags);
1525 }
1526
1527 +/* Function scheduled to ensure a restart in case of DMA halt
1528 + * condition caused by running out of buffer descriptors.
1529 + */
1530 +static void ll_temac_restart_work_func(struct work_struct *work)
1531 +{
1532 + struct temac_local *lp = container_of(work, struct temac_local,
1533 + restart_work.work);
1534 + struct net_device *ndev = lp->ndev;
1535 +
1536 + ll_temac_recv(ndev);
1537 +}
1538 +
1539 static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
1540 {
1541 struct net_device *ndev = _ndev;
1542 @@ -1052,6 +1178,8 @@ static int temac_stop(struct net_device *ndev)
1543
1544 dev_dbg(&ndev->dev, "temac_close()\n");
1545
1546 + cancel_delayed_work_sync(&lp->restart_work);
1547 +
1548 free_irq(lp->tx_irq, ndev);
1549 free_irq(lp->rx_irq, ndev);
1550
1551 @@ -1184,6 +1312,7 @@ static int temac_probe(struct platform_device *pdev)
1552 lp->dev = &pdev->dev;
1553 lp->options = XTE_OPTION_DEFAULTS;
1554 spin_lock_init(&lp->rx_lock);
1555 + INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func);
1556
1557 /* Setup mutex for synchronization of indirect register access */
1558 if (pdata) {
1559 @@ -1290,6 +1419,7 @@ static int temac_probe(struct platform_device *pdev)
1560 */
1561 lp->tx_chnl_ctrl = 0x10220000;
1562 lp->rx_chnl_ctrl = 0xff070000;
1563 + lp->coalesce_count_rx = 0x07;
1564
1565 /* Finished with the DMA node; drop the reference */
1566 of_node_put(dma_np);
1567 @@ -1321,11 +1451,14 @@ static int temac_probe(struct platform_device *pdev)
1568 (pdata->tx_irq_count << 16);
1569 else
1570 lp->tx_chnl_ctrl = 0x10220000;
1571 - if (pdata->rx_irq_timeout || pdata->rx_irq_count)
1572 + if (pdata->rx_irq_timeout || pdata->rx_irq_count) {
1573 lp->rx_chnl_ctrl = (pdata->rx_irq_timeout << 24) |
1574 (pdata->rx_irq_count << 16);
1575 - else
1576 + lp->coalesce_count_rx = pdata->rx_irq_count;
1577 + } else {
1578 lp->rx_chnl_ctrl = 0xff070000;
1579 + lp->coalesce_count_rx = 0x07;
1580 + }
1581 }
1582
1583 /* Error handle returned DMA RX and TX interrupts */
1584 diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
1585 index 7ada1fd9ca71..2339b9381d21 100644
1586 --- a/drivers/net/phy/mscc.c
1587 +++ b/drivers/net/phy/mscc.c
1588 @@ -302,11 +302,11 @@ enum rgmii_rx_clock_delay {
1589 BIT(VSC8531_FORCE_LED_OFF) | \
1590 BIT(VSC8531_FORCE_LED_ON))
1591
1592 -#define MSCC_VSC8584_REVB_INT8051_FW "mscc_vsc8584_revb_int8051_fb48.bin"
1593 +#define MSCC_VSC8584_REVB_INT8051_FW "microchip/mscc_vsc8584_revb_int8051_fb48.bin"
1594 #define MSCC_VSC8584_REVB_INT8051_FW_START_ADDR 0xe800
1595 #define MSCC_VSC8584_REVB_INT8051_FW_CRC 0xfb48
1596
1597 -#define MSCC_VSC8574_REVB_INT8051_FW "mscc_vsc8574_revb_int8051_29e8.bin"
1598 +#define MSCC_VSC8574_REVB_INT8051_FW "microchip/mscc_vsc8574_revb_int8051_29e8.bin"
1599 #define MSCC_VSC8574_REVB_INT8051_FW_START_ADDR 0x4000
1600 #define MSCC_VSC8574_REVB_INT8051_FW_CRC 0x29e8
1601
1602 diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
1603 index 61d7e0d1d77d..8e56a41dd758 100644
1604 --- a/drivers/net/slip/slip.c
1605 +++ b/drivers/net/slip/slip.c
1606 @@ -863,7 +863,10 @@ err_free_chan:
1607 tty->disc_data = NULL;
1608 clear_bit(SLF_INUSE, &sl->flags);
1609 sl_free_netdev(sl->dev);
1610 + /* do not call free_netdev before rtnl_unlock */
1611 + rtnl_unlock();
1612 free_netdev(sl->dev);
1613 + return err;
1614
1615 err_exit:
1616 rtnl_unlock();
1617 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
1618 index 3b7a3b8a5e06..5754bb6ca0ee 100644
1619 --- a/drivers/net/usb/qmi_wwan.c
1620 +++ b/drivers/net/usb/qmi_wwan.c
1621 @@ -337,6 +337,9 @@ static void qmi_wwan_netdev_setup(struct net_device *net)
1622 netdev_dbg(net, "mode: raw IP\n");
1623 } else if (!net->header_ops) { /* don't bother if already set */
1624 ether_setup(net);
1625 + /* Restoring min/max mtu values set originally by usbnet */
1626 + net->min_mtu = 0;
1627 + net->max_mtu = ETH_MAX_MTU;
1628 clear_bit(EVENT_NO_IP_ALIGN, &dev->flags);
1629 netdev_dbg(net, "mode: Ethernet\n");
1630 }
1631 diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
1632 index 9c5f7c9178c6..2b865c6423e2 100644
1633 --- a/drivers/scsi/libfc/fc_disc.c
1634 +++ b/drivers/scsi/libfc/fc_disc.c
1635 @@ -628,6 +628,8 @@ redisc:
1636 }
1637 out:
1638 kref_put(&rdata->kref, fc_rport_destroy);
1639 + if (!IS_ERR(fp))
1640 + fc_frame_free(fp);
1641 }
1642
1643 /**
1644 diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
1645 index 1ce39de917f0..88c5e6361aa0 100644
1646 --- a/drivers/watchdog/wdat_wdt.c
1647 +++ b/drivers/watchdog/wdat_wdt.c
1648 @@ -54,6 +54,13 @@ module_param(nowayout, bool, 0);
1649 MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
1650 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
1651
1652 +#define WDAT_DEFAULT_TIMEOUT 30
1653 +
1654 +static int timeout = WDAT_DEFAULT_TIMEOUT;
1655 +module_param(timeout, int, 0);
1656 +MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default="
1657 + __MODULE_STRING(WDAT_DEFAULT_TIMEOUT) ")");
1658 +
1659 static int wdat_wdt_read(struct wdat_wdt *wdat,
1660 const struct wdat_instruction *instr, u32 *value)
1661 {
1662 @@ -438,6 +445,22 @@ static int wdat_wdt_probe(struct platform_device *pdev)
1663
1664 platform_set_drvdata(pdev, wdat);
1665
1666 + /*
1667 + * Set initial timeout so that userspace has time to configure the
1668 + * watchdog properly after it has opened the device. In some cases
1669 + * the BIOS default is too short and causes immediate reboot.
1670 + */
1671 + if (timeout * 1000 < wdat->wdd.min_hw_heartbeat_ms ||
1672 + timeout * 1000 > wdat->wdd.max_hw_heartbeat_ms) {
1673 + dev_warn(dev, "Invalid timeout %d given, using %d\n",
1674 + timeout, WDAT_DEFAULT_TIMEOUT);
1675 + timeout = WDAT_DEFAULT_TIMEOUT;
1676 + }
1677 +
1678 + ret = wdat_wdt_set_timeout(&wdat->wdd, timeout);
1679 + if (ret)
1680 + return ret;
1681 +
1682 watchdog_set_nowayout(&wdat->wdd, nowayout);
1683 return devm_watchdog_register_device(dev, &wdat->wdd);
1684 }
1685 diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
1686 index b17f05ae6011..de992a70ddfe 100644
1687 --- a/fs/jbd2/transaction.c
1688 +++ b/fs/jbd2/transaction.c
1689 @@ -1079,8 +1079,8 @@ static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh,
1690 /* For undo access buffer must have data copied */
1691 if (undo && !jh->b_committed_data)
1692 goto out;
1693 - if (jh->b_transaction != handle->h_transaction &&
1694 - jh->b_next_transaction != handle->h_transaction)
1695 + if (READ_ONCE(jh->b_transaction) != handle->h_transaction &&
1696 + READ_ONCE(jh->b_next_transaction) != handle->h_transaction)
1697 goto out;
1698 /*
1699 * There are two reasons for the barrier here:
1700 @@ -2535,8 +2535,8 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
1701 * our jh reference and thus __jbd2_journal_file_buffer() must not
1702 * take a new one.
1703 */
1704 - jh->b_transaction = jh->b_next_transaction;
1705 - jh->b_next_transaction = NULL;
1706 + WRITE_ONCE(jh->b_transaction, jh->b_next_transaction);
1707 + WRITE_ONCE(jh->b_next_transaction, NULL);
1708 if (buffer_freed(bh))
1709 jlist = BJ_Forget;
1710 else if (jh->b_modified)
1711 diff --git a/kernel/signal.c b/kernel/signal.c
1712 index bcd46f547db3..eea748174ade 100644
1713 --- a/kernel/signal.c
1714 +++ b/kernel/signal.c
1715 @@ -413,27 +413,32 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
1716 {
1717 struct sigqueue *q = NULL;
1718 struct user_struct *user;
1719 + int sigpending;
1720
1721 /*
1722 * Protect access to @t credentials. This can go away when all
1723 * callers hold rcu read lock.
1724 + *
1725 + * NOTE! A pending signal will hold on to the user refcount,
1726 + * and we get/put the refcount only when the sigpending count
1727 + * changes from/to zero.
1728 */
1729 rcu_read_lock();
1730 - user = get_uid(__task_cred(t)->user);
1731 - atomic_inc(&user->sigpending);
1732 + user = __task_cred(t)->user;
1733 + sigpending = atomic_inc_return(&user->sigpending);
1734 + if (sigpending == 1)
1735 + get_uid(user);
1736 rcu_read_unlock();
1737
1738 - if (override_rlimit ||
1739 - atomic_read(&user->sigpending) <=
1740 - task_rlimit(t, RLIMIT_SIGPENDING)) {
1741 + if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
1742 q = kmem_cache_alloc(sigqueue_cachep, flags);
1743 } else {
1744 print_dropped_signal(sig);
1745 }
1746
1747 if (unlikely(q == NULL)) {
1748 - atomic_dec(&user->sigpending);
1749 - free_uid(user);
1750 + if (atomic_dec_and_test(&user->sigpending))
1751 + free_uid(user);
1752 } else {
1753 INIT_LIST_HEAD(&q->list);
1754 q->flags = 0;
1755 @@ -447,8 +452,8 @@ static void __sigqueue_free(struct sigqueue *q)
1756 {
1757 if (q->flags & SIGQUEUE_PREALLOC)
1758 return;
1759 - atomic_dec(&q->user->sigpending);
1760 - free_uid(q->user);
1761 + if (atomic_dec_and_test(&q->user->sigpending))
1762 + free_uid(q->user);
1763 kmem_cache_free(sigqueue_cachep, q);
1764 }
1765
1766 diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
1767 index a31be3fce3e8..6495800fb92a 100644
1768 --- a/kernel/trace/trace_events_hist.c
1769 +++ b/kernel/trace/trace_events_hist.c
1770 @@ -811,6 +811,29 @@ static const char *synth_field_fmt(char *type)
1771 return fmt;
1772 }
1773
1774 +static void print_synth_event_num_val(struct trace_seq *s,
1775 + char *print_fmt, char *name,
1776 + int size, u64 val, char *space)
1777 +{
1778 + switch (size) {
1779 + case 1:
1780 + trace_seq_printf(s, print_fmt, name, (u8)val, space);
1781 + break;
1782 +
1783 + case 2:
1784 + trace_seq_printf(s, print_fmt, name, (u16)val, space);
1785 + break;
1786 +
1787 + case 4:
1788 + trace_seq_printf(s, print_fmt, name, (u32)val, space);
1789 + break;
1790 +
1791 + default:
1792 + trace_seq_printf(s, print_fmt, name, val, space);
1793 + break;
1794 + }
1795 +}
1796 +
1797 static enum print_line_t print_synth_event(struct trace_iterator *iter,
1798 int flags,
1799 struct trace_event *event)
1800 @@ -849,10 +872,13 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter,
1801 } else {
1802 struct trace_print_flags __flags[] = {
1803 __def_gfpflag_names, {-1, NULL} };
1804 + char *space = (i == se->n_fields - 1 ? "" : " ");
1805
1806 - trace_seq_printf(s, print_fmt, se->fields[i]->name,
1807 - entry->fields[n_u64],
1808 - i == se->n_fields - 1 ? "" : " ");
1809 + print_synth_event_num_val(s, print_fmt,
1810 + se->fields[i]->name,
1811 + se->fields[i]->size,
1812 + entry->fields[n_u64],
1813 + space);
1814
1815 if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
1816 trace_seq_puts(s, " (");
1817 diff --git a/mm/slub.c b/mm/slub.c
1818 index 20d72cb20515..3ca4a223f44c 100644
1819 --- a/mm/slub.c
1820 +++ b/mm/slub.c
1821 @@ -3154,6 +3154,15 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
1822 void *object = c->freelist;
1823
1824 if (unlikely(!object)) {
1825 + /*
1826 + * We may have removed an object from c->freelist using
1827 + * the fastpath in the previous iteration; in that case,
1828 + * c->tid has not been bumped yet.
1829 + * Since ___slab_alloc() may reenable interrupts while
1830 + * allocating memory, we should bump c->tid now.
1831 + */
1832 + c->tid = next_tid(c->tid);
1833 +
1834 /*
1835 * Invoking slow path likely have side-effect
1836 * of re-populating per CPU c->freelist
1837 diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
1838 index 376882215919..0bd10a1f477f 100644
1839 --- a/net/ipv4/cipso_ipv4.c
1840 +++ b/net/ipv4/cipso_ipv4.c
1841 @@ -1724,6 +1724,7 @@ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
1842 {
1843 unsigned char optbuf[sizeof(struct ip_options) + 40];
1844 struct ip_options *opt = (struct ip_options *)optbuf;
1845 + int res;
1846
1847 if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
1848 return;
1849 @@ -1735,7 +1736,11 @@ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
1850
1851 memset(opt, 0, sizeof(struct ip_options));
1852 opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
1853 - if (__ip_options_compile(dev_net(skb->dev), opt, skb, NULL))
1854 + rcu_read_lock();
1855 + res = __ip_options_compile(dev_net(skb->dev), opt, skb, NULL);
1856 + rcu_read_unlock();
1857 +
1858 + if (res)
1859 return;
1860
1861 if (gateway)
1862 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
1863 index 0e05ff037672..0ba98ad9bc85 100644
1864 --- a/net/mac80211/rx.c
1865 +++ b/net/mac80211/rx.c
1866 @@ -4114,7 +4114,7 @@ void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
1867
1868 lockdep_assert_held(&local->sta_mtx);
1869
1870 - list_for_each_entry_rcu(sta, &local->sta_list, list) {
1871 + list_for_each_entry(sta, &local->sta_list, list) {
1872 if (sdata != sta->sdata &&
1873 (!sta->sdata->bss || sta->sdata->bss != sdata->bss))
1874 continue;
1875 diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
1876 index 1b68a131083c..8c835ad63729 100644
1877 --- a/net/netfilter/xt_hashlimit.c
1878 +++ b/net/netfilter/xt_hashlimit.c
1879 @@ -358,21 +358,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
1880 return 0;
1881 }
1882
1883 -static bool select_all(const struct xt_hashlimit_htable *ht,
1884 - const struct dsthash_ent *he)
1885 -{
1886 - return true;
1887 -}
1888 -
1889 -static bool select_gc(const struct xt_hashlimit_htable *ht,
1890 - const struct dsthash_ent *he)
1891 -{
1892 - return time_after_eq(jiffies, he->expires);
1893 -}
1894 -
1895 -static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
1896 - bool (*select)(const struct xt_hashlimit_htable *ht,
1897 - const struct dsthash_ent *he))
1898 +static void htable_selective_cleanup(struct xt_hashlimit_htable *ht, bool select_all)
1899 {
1900 unsigned int i;
1901
1902 @@ -382,7 +368,7 @@ static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
1903
1904 spin_lock_bh(&ht->lock);
1905 hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) {
1906 - if ((*select)(ht, dh))
1907 + if (time_after_eq(jiffies, dh->expires) || select_all)
1908 dsthash_free(ht, dh);
1909 }
1910 spin_unlock_bh(&ht->lock);
1911 @@ -396,7 +382,7 @@ static void htable_gc(struct work_struct *work)
1912
1913 ht = container_of(work, struct xt_hashlimit_htable, gc_work.work);
1914
1915 - htable_selective_cleanup(ht, select_gc);
1916 + htable_selective_cleanup(ht, false);
1917
1918 queue_delayed_work(system_power_efficient_wq,
1919 &ht->gc_work, msecs_to_jiffies(ht->cfg.gc_interval));
1920 @@ -416,15 +402,6 @@ static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
1921 remove_proc_entry(hinfo->name, parent);
1922 }
1923
1924 -static void htable_destroy(struct xt_hashlimit_htable *hinfo)
1925 -{
1926 - cancel_delayed_work_sync(&hinfo->gc_work);
1927 - htable_remove_proc_entry(hinfo);
1928 - htable_selective_cleanup(hinfo, select_all);
1929 - kfree(hinfo->name);
1930 - vfree(hinfo);
1931 -}
1932 -
1933 static struct xt_hashlimit_htable *htable_find_get(struct net *net,
1934 const char *name,
1935 u_int8_t family)
1936 @@ -446,8 +423,13 @@ static void htable_put(struct xt_hashlimit_htable *hinfo)
1937 {
1938 if (refcount_dec_and_mutex_lock(&hinfo->use, &hashlimit_mutex)) {
1939 hlist_del(&hinfo->node);
1940 + htable_remove_proc_entry(hinfo);
1941 mutex_unlock(&hashlimit_mutex);
1942 - htable_destroy(hinfo);
1943 +
1944 + cancel_delayed_work_sync(&hinfo->gc_work);
1945 + htable_selective_cleanup(hinfo, true);
1946 + kfree(hinfo->name);
1947 + vfree(hinfo);
1948 }
1949 }
1950
1951 diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
1952 index 88f98f27ad88..3d24d45be5f4 100644
1953 --- a/net/qrtr/qrtr.c
1954 +++ b/net/qrtr/qrtr.c
1955 @@ -196,7 +196,7 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
1956 hdr->size = cpu_to_le32(len);
1957 hdr->confirm_rx = 0;
1958
1959 - skb_put_padto(skb, ALIGN(len, 4));
1960 + skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr));
1961
1962 mutex_lock(&node->ep_lock);
1963 if (node->ep)
1964 diff --git a/net/wireless/reg.c b/net/wireless/reg.c
1965 index fff9a74891fc..1a8218f1bbe0 100644
1966 --- a/net/wireless/reg.c
1967 +++ b/net/wireless/reg.c
1968 @@ -2276,7 +2276,7 @@ static void handle_channel_custom(struct wiphy *wiphy,
1969 break;
1970 }
1971
1972 - if (IS_ERR(reg_rule)) {
1973 + if (IS_ERR_OR_NULL(reg_rule)) {
1974 pr_debug("Disabling freq %d MHz as custom regd has no rule that fits it\n",
1975 chan->center_freq);
1976 if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) {
1977 diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile
1978 index f1053630bb6f..2af9d39a9716 100644
1979 --- a/tools/testing/selftests/rseq/Makefile
1980 +++ b/tools/testing/selftests/rseq/Makefile
1981 @@ -4,7 +4,7 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
1982 CLANG_FLAGS += -no-integrated-as
1983 endif
1984
1985 -CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L./ -Wl,-rpath=./ \
1986 +CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L$(OUTPUT) -Wl,-rpath=./ \
1987 $(CLANG_FLAGS)
1988 LDLIBS += -lpthread
1989