Magellan Linux

Annotation of /trunk/kernel-alx/patches-5.4/0209-5.4.110-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3637 - (hide annotations) (download)
Mon Oct 24 12:40:44 2022 UTC (23 months ago) by niro
File size: 178834 byte(s)
-add missing
1 niro 3637 diff --git a/Makefile b/Makefile
2     index e037662c369ba..b028b5ead5f7e 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 5
8     PATCHLEVEL = 4
9     -SUBLEVEL = 109
10     +SUBLEVEL = 110
11     EXTRAVERSION =
12     NAME = Kleptomaniac Octopus
13    
14     diff --git a/arch/powerpc/include/asm/cpu_has_feature.h b/arch/powerpc/include/asm/cpu_has_feature.h
15     index 7897d16e09904..727d4b3219379 100644
16     --- a/arch/powerpc/include/asm/cpu_has_feature.h
17     +++ b/arch/powerpc/include/asm/cpu_has_feature.h
18     @@ -7,7 +7,7 @@
19     #include <linux/bug.h>
20     #include <asm/cputable.h>
21    
22     -static inline bool early_cpu_has_feature(unsigned long feature)
23     +static __always_inline bool early_cpu_has_feature(unsigned long feature)
24     {
25     return !!((CPU_FTRS_ALWAYS & feature) ||
26     (CPU_FTRS_POSSIBLE & cur_cpu_spec->cpu_features & feature));
27     @@ -46,7 +46,7 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
28     return static_branch_likely(&cpu_feature_keys[i]);
29     }
30     #else
31     -static inline bool cpu_has_feature(unsigned long feature)
32     +static __always_inline bool cpu_has_feature(unsigned long feature)
33     {
34     return early_cpu_has_feature(feature);
35     }
36     diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S
37     index 80828b95a51f0..d956f87fcb095 100644
38     --- a/arch/xtensa/kernel/coprocessor.S
39     +++ b/arch/xtensa/kernel/coprocessor.S
40     @@ -108,37 +108,6 @@
41    
42     .previous
43    
44     -/*
45     - * coprocessor_flush(struct thread_info*, index)
46     - * a2 a3
47     - *
48     - * Save coprocessor registers for coprocessor 'index'.
49     - * The register values are saved to or loaded from the coprocessor area
50     - * inside the task_info structure.
51     - *
52     - * Note that this function doesn't update the coprocessor_owner information!
53     - *
54     - */
55     -
56     -ENTRY(coprocessor_flush)
57     -
58     - /* reserve 4 bytes on stack to save a0 */
59     - abi_entry(4)
60     -
61     - s32i a0, a1, 0
62     - movi a0, .Lsave_cp_regs_jump_table
63     - addx8 a3, a3, a0
64     - l32i a4, a3, 4
65     - l32i a3, a3, 0
66     - add a2, a2, a4
67     - beqz a3, 1f
68     - callx0 a3
69     -1: l32i a0, a1, 0
70     -
71     - abi_ret(4)
72     -
73     -ENDPROC(coprocessor_flush)
74     -
75     /*
76     * Entry condition:
77     *
78     @@ -261,6 +230,39 @@ ENTRY(fast_coprocessor)
79    
80     ENDPROC(fast_coprocessor)
81    
82     + .text
83     +
84     +/*
85     + * coprocessor_flush(struct thread_info*, index)
86     + * a2 a3
87     + *
88     + * Save coprocessor registers for coprocessor 'index'.
89     + * The register values are saved to or loaded from the coprocessor area
90     + * inside the task_info structure.
91     + *
92     + * Note that this function doesn't update the coprocessor_owner information!
93     + *
94     + */
95     +
96     +ENTRY(coprocessor_flush)
97     +
98     + /* reserve 4 bytes on stack to save a0 */
99     + abi_entry(4)
100     +
101     + s32i a0, a1, 0
102     + movi a0, .Lsave_cp_regs_jump_table
103     + addx8 a3, a3, a0
104     + l32i a4, a3, 4
105     + l32i a3, a3, 0
106     + add a2, a2, a4
107     + beqz a3, 1f
108     + callx0 a3
109     +1: l32i a0, a1, 0
110     +
111     + abi_ret(4)
112     +
113     +ENDPROC(coprocessor_flush)
114     +
115     .data
116    
117     ENTRY(coprocessor_owner)
118     diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
119     index e0c4ef06ca917..94785083c018a 100644
120     --- a/drivers/base/power/runtime.c
121     +++ b/drivers/base/power/runtime.c
122     @@ -1663,8 +1663,8 @@ void pm_runtime_get_suppliers(struct device *dev)
123     device_links_read_lock_held())
124     if (link->flags & DL_FLAG_PM_RUNTIME) {
125     link->supplier_preactivated = true;
126     - refcount_inc(&link->rpm_active);
127     pm_runtime_get_sync(link->supplier);
128     + refcount_inc(&link->rpm_active);
129     }
130    
131     device_links_read_unlock(idx);
132     @@ -1677,6 +1677,8 @@ void pm_runtime_get_suppliers(struct device *dev)
133     void pm_runtime_put_suppliers(struct device *dev)
134     {
135     struct device_link *link;
136     + unsigned long flags;
137     + bool put;
138     int idx;
139    
140     idx = device_links_read_lock();
141     @@ -1685,7 +1687,11 @@ void pm_runtime_put_suppliers(struct device *dev)
142     device_links_read_lock_held())
143     if (link->supplier_preactivated) {
144     link->supplier_preactivated = false;
145     - if (refcount_dec_not_one(&link->rpm_active))
146     + spin_lock_irqsave(&dev->power.lock, flags);
147     + put = pm_runtime_status_suspended(dev) &&
148     + refcount_dec_not_one(&link->rpm_active);
149     + spin_unlock_irqrestore(&dev->power.lock, flags);
150     + if (put)
151     pm_runtime_put(link->supplier);
152     }
153    
154     diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
155     index e055893fd5c39..5c9e156cd0862 100644
156     --- a/drivers/extcon/extcon.c
157     +++ b/drivers/extcon/extcon.c
158     @@ -1241,6 +1241,7 @@ int extcon_dev_register(struct extcon_dev *edev)
159     sizeof(*edev->nh), GFP_KERNEL);
160     if (!edev->nh) {
161     ret = -ENOMEM;
162     + device_unregister(&edev->dev);
163     goto err_dev;
164     }
165    
166     diff --git a/drivers/firewire/nosy.c b/drivers/firewire/nosy.c
167     index 0cc7466736773..9ee747a85ee49 100644
168     --- a/drivers/firewire/nosy.c
169     +++ b/drivers/firewire/nosy.c
170     @@ -346,6 +346,7 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
171     struct client *client = file->private_data;
172     spinlock_t *client_list_lock = &client->lynx->client_list_lock;
173     struct nosy_stats stats;
174     + int ret;
175    
176     switch (cmd) {
177     case NOSY_IOC_GET_STATS:
178     @@ -360,11 +361,15 @@ nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
179     return 0;
180    
181     case NOSY_IOC_START:
182     + ret = -EBUSY;
183     spin_lock_irq(client_list_lock);
184     - list_add_tail(&client->link, &client->lynx->client_list);
185     + if (list_empty(&client->link)) {
186     + list_add_tail(&client->link, &client->lynx->client_list);
187     + ret = 0;
188     + }
189     spin_unlock_irq(client_list_lock);
190    
191     - return 0;
192     + return ret;
193    
194     case NOSY_IOC_STOP:
195     spin_lock_irq(client_list_lock);
196     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
197     index 6335bd4ae374a..fb47ddc6f7f4e 100644
198     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
199     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
200     @@ -2123,8 +2123,8 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
201     uint64_t eaddr;
202    
203     /* validate the parameters */
204     - if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
205     - size == 0 || size & AMDGPU_GPU_PAGE_MASK)
206     + if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
207     + size == 0 || size & ~PAGE_MASK)
208     return -EINVAL;
209    
210     /* make sure object fit at this offset */
211     @@ -2188,8 +2188,8 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
212     int r;
213    
214     /* validate the parameters */
215     - if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
216     - size == 0 || size & AMDGPU_GPU_PAGE_MASK)
217     + if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
218     + size == 0 || size & ~PAGE_MASK)
219     return -EINVAL;
220    
221     /* make sure object fit at this offset */
222     @@ -2333,7 +2333,7 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
223     after->start = eaddr + 1;
224     after->last = tmp->last;
225     after->offset = tmp->offset;
226     - after->offset += after->start - tmp->start;
227     + after->offset += (after->start - tmp->start) << PAGE_SHIFT;
228     after->flags = tmp->flags;
229     after->bo_va = tmp->bo_va;
230     list_add(&after->list, &tmp->bo_va->invalids);
231     diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
232     index 22164300122d5..a2b4463d84802 100644
233     --- a/drivers/net/can/Makefile
234     +++ b/drivers/net/can/Makefile
235     @@ -7,12 +7,7 @@ obj-$(CONFIG_CAN_VCAN) += vcan.o
236     obj-$(CONFIG_CAN_VXCAN) += vxcan.o
237     obj-$(CONFIG_CAN_SLCAN) += slcan.o
238    
239     -obj-$(CONFIG_CAN_DEV) += can-dev.o
240     -can-dev-y += dev.o
241     -can-dev-y += rx-offload.o
242     -
243     -can-dev-$(CONFIG_CAN_LEDS) += led.o
244     -
245     +obj-y += dev/
246     obj-y += rcar/
247     obj-y += spi/
248     obj-y += usb/
249     diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
250     deleted file mode 100644
251     index 1e0c1a05df82d..0000000000000
252     --- a/drivers/net/can/dev.c
253     +++ /dev/null
254     @@ -1,1310 +0,0 @@
255     -// SPDX-License-Identifier: GPL-2.0-only
256     -/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
257     - * Copyright (C) 2006 Andrey Volkov, Varma Electronics
258     - * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
259     - */
260     -
261     -#include <linux/module.h>
262     -#include <linux/kernel.h>
263     -#include <linux/slab.h>
264     -#include <linux/netdevice.h>
265     -#include <linux/if_arp.h>
266     -#include <linux/workqueue.h>
267     -#include <linux/can.h>
268     -#include <linux/can/can-ml.h>
269     -#include <linux/can/dev.h>
270     -#include <linux/can/skb.h>
271     -#include <linux/can/netlink.h>
272     -#include <linux/can/led.h>
273     -#include <linux/of.h>
274     -#include <net/rtnetlink.h>
275     -
276     -#define MOD_DESC "CAN device driver interface"
277     -
278     -MODULE_DESCRIPTION(MOD_DESC);
279     -MODULE_LICENSE("GPL v2");
280     -MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
281     -
282     -/* CAN DLC to real data length conversion helpers */
283     -
284     -static const u8 dlc2len[] = {0, 1, 2, 3, 4, 5, 6, 7,
285     - 8, 12, 16, 20, 24, 32, 48, 64};
286     -
287     -/* get data length from can_dlc with sanitized can_dlc */
288     -u8 can_dlc2len(u8 can_dlc)
289     -{
290     - return dlc2len[can_dlc & 0x0F];
291     -}
292     -EXPORT_SYMBOL_GPL(can_dlc2len);
293     -
294     -static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */
295     - 9, 9, 9, 9, /* 9 - 12 */
296     - 10, 10, 10, 10, /* 13 - 16 */
297     - 11, 11, 11, 11, /* 17 - 20 */
298     - 12, 12, 12, 12, /* 21 - 24 */
299     - 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */
300     - 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */
301     - 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */
302     - 15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */
303     - 15, 15, 15, 15, 15, 15, 15, 15}; /* 57 - 64 */
304     -
305     -/* map the sanitized data length to an appropriate data length code */
306     -u8 can_len2dlc(u8 len)
307     -{
308     - if (unlikely(len > 64))
309     - return 0xF;
310     -
311     - return len2dlc[len];
312     -}
313     -EXPORT_SYMBOL_GPL(can_len2dlc);
314     -
315     -#ifdef CONFIG_CAN_CALC_BITTIMING
316     -#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
317     -#define CAN_CALC_SYNC_SEG 1
318     -
319     -/* Bit-timing calculation derived from:
320     - *
321     - * Code based on LinCAN sources and H8S2638 project
322     - * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
323     - * Copyright 2005 Stanislav Marek
324     - * email: pisa@cmp.felk.cvut.cz
325     - *
326     - * Calculates proper bit-timing parameters for a specified bit-rate
327     - * and sample-point, which can then be used to set the bit-timing
328     - * registers of the CAN controller. You can find more information
329     - * in the header file linux/can/netlink.h.
330     - */
331     -static int
332     -can_update_sample_point(const struct can_bittiming_const *btc,
333     - unsigned int sample_point_nominal, unsigned int tseg,
334     - unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
335     - unsigned int *sample_point_error_ptr)
336     -{
337     - unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
338     - unsigned int sample_point, best_sample_point = 0;
339     - unsigned int tseg1, tseg2;
340     - int i;
341     -
342     - for (i = 0; i <= 1; i++) {
343     - tseg2 = tseg + CAN_CALC_SYNC_SEG -
344     - (sample_point_nominal * (tseg + CAN_CALC_SYNC_SEG)) /
345     - 1000 - i;
346     - tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
347     - tseg1 = tseg - tseg2;
348     - if (tseg1 > btc->tseg1_max) {
349     - tseg1 = btc->tseg1_max;
350     - tseg2 = tseg - tseg1;
351     - }
352     -
353     - sample_point = 1000 * (tseg + CAN_CALC_SYNC_SEG - tseg2) /
354     - (tseg + CAN_CALC_SYNC_SEG);
355     - sample_point_error = abs(sample_point_nominal - sample_point);
356     -
357     - if (sample_point <= sample_point_nominal &&
358     - sample_point_error < best_sample_point_error) {
359     - best_sample_point = sample_point;
360     - best_sample_point_error = sample_point_error;
361     - *tseg1_ptr = tseg1;
362     - *tseg2_ptr = tseg2;
363     - }
364     - }
365     -
366     - if (sample_point_error_ptr)
367     - *sample_point_error_ptr = best_sample_point_error;
368     -
369     - return best_sample_point;
370     -}
371     -
372     -static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
373     - const struct can_bittiming_const *btc)
374     -{
375     - struct can_priv *priv = netdev_priv(dev);
376     - unsigned int bitrate; /* current bitrate */
377     - unsigned int bitrate_error; /* difference between current and nominal value */
378     - unsigned int best_bitrate_error = UINT_MAX;
379     - unsigned int sample_point_error; /* difference between current and nominal value */
380     - unsigned int best_sample_point_error = UINT_MAX;
381     - unsigned int sample_point_nominal; /* nominal sample point */
382     - unsigned int best_tseg = 0; /* current best value for tseg */
383     - unsigned int best_brp = 0; /* current best value for brp */
384     - unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
385     - u64 v64;
386     -
387     - /* Use CiA recommended sample points */
388     - if (bt->sample_point) {
389     - sample_point_nominal = bt->sample_point;
390     - } else {
391     - if (bt->bitrate > 800000)
392     - sample_point_nominal = 750;
393     - else if (bt->bitrate > 500000)
394     - sample_point_nominal = 800;
395     - else
396     - sample_point_nominal = 875;
397     - }
398     -
399     - /* tseg even = round down, odd = round up */
400     - for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
401     - tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
402     - tsegall = CAN_CALC_SYNC_SEG + tseg / 2;
403     -
404     - /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
405     - brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
406     -
407     - /* choose brp step which is possible in system */
408     - brp = (brp / btc->brp_inc) * btc->brp_inc;
409     - if (brp < btc->brp_min || brp > btc->brp_max)
410     - continue;
411     -
412     - bitrate = priv->clock.freq / (brp * tsegall);
413     - bitrate_error = abs(bt->bitrate - bitrate);
414     -
415     - /* tseg brp biterror */
416     - if (bitrate_error > best_bitrate_error)
417     - continue;
418     -
419     - /* reset sample point error if we have a better bitrate */
420     - if (bitrate_error < best_bitrate_error)
421     - best_sample_point_error = UINT_MAX;
422     -
423     - can_update_sample_point(btc, sample_point_nominal, tseg / 2,
424     - &tseg1, &tseg2, &sample_point_error);
425     - if (sample_point_error > best_sample_point_error)
426     - continue;
427     -
428     - best_sample_point_error = sample_point_error;
429     - best_bitrate_error = bitrate_error;
430     - best_tseg = tseg / 2;
431     - best_brp = brp;
432     -
433     - if (bitrate_error == 0 && sample_point_error == 0)
434     - break;
435     - }
436     -
437     - if (best_bitrate_error) {
438     - /* Error in one-tenth of a percent */
439     - v64 = (u64)best_bitrate_error * 1000;
440     - do_div(v64, bt->bitrate);
441     - bitrate_error = (u32)v64;
442     - if (bitrate_error > CAN_CALC_MAX_ERROR) {
443     - netdev_err(dev,
444     - "bitrate error %d.%d%% too high\n",
445     - bitrate_error / 10, bitrate_error % 10);
446     - return -EDOM;
447     - }
448     - netdev_warn(dev, "bitrate error %d.%d%%\n",
449     - bitrate_error / 10, bitrate_error % 10);
450     - }
451     -
452     - /* real sample point */
453     - bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
454     - best_tseg, &tseg1, &tseg2,
455     - NULL);
456     -
457     - v64 = (u64)best_brp * 1000 * 1000 * 1000;
458     - do_div(v64, priv->clock.freq);
459     - bt->tq = (u32)v64;
460     - bt->prop_seg = tseg1 / 2;
461     - bt->phase_seg1 = tseg1 - bt->prop_seg;
462     - bt->phase_seg2 = tseg2;
463     -
464     - /* check for sjw user settings */
465     - if (!bt->sjw || !btc->sjw_max) {
466     - bt->sjw = 1;
467     - } else {
468     - /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
469     - if (bt->sjw > btc->sjw_max)
470     - bt->sjw = btc->sjw_max;
471     - /* bt->sjw must not be higher than tseg2 */
472     - if (tseg2 < bt->sjw)
473     - bt->sjw = tseg2;
474     - }
475     -
476     - bt->brp = best_brp;
477     -
478     - /* real bitrate */
479     - bt->bitrate = priv->clock.freq /
480     - (bt->brp * (CAN_CALC_SYNC_SEG + tseg1 + tseg2));
481     -
482     - return 0;
483     -}
484     -#else /* !CONFIG_CAN_CALC_BITTIMING */
485     -static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
486     - const struct can_bittiming_const *btc)
487     -{
488     - netdev_err(dev, "bit-timing calculation not available\n");
489     - return -EINVAL;
490     -}
491     -#endif /* CONFIG_CAN_CALC_BITTIMING */
492     -
493     -/* Checks the validity of the specified bit-timing parameters prop_seg,
494     - * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
495     - * prescaler value brp. You can find more information in the header
496     - * file linux/can/netlink.h.
497     - */
498     -static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
499     - const struct can_bittiming_const *btc)
500     -{
501     - struct can_priv *priv = netdev_priv(dev);
502     - int tseg1, alltseg;
503     - u64 brp64;
504     -
505     - tseg1 = bt->prop_seg + bt->phase_seg1;
506     - if (!bt->sjw)
507     - bt->sjw = 1;
508     - if (bt->sjw > btc->sjw_max ||
509     - tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max ||
510     - bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max)
511     - return -ERANGE;
512     -
513     - brp64 = (u64)priv->clock.freq * (u64)bt->tq;
514     - if (btc->brp_inc > 1)
515     - do_div(brp64, btc->brp_inc);
516     - brp64 += 500000000UL - 1;
517     - do_div(brp64, 1000000000UL); /* the practicable BRP */
518     - if (btc->brp_inc > 1)
519     - brp64 *= btc->brp_inc;
520     - bt->brp = (u32)brp64;
521     -
522     - if (bt->brp < btc->brp_min || bt->brp > btc->brp_max)
523     - return -EINVAL;
524     -
525     - alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1;
526     - bt->bitrate = priv->clock.freq / (bt->brp * alltseg);
527     - bt->sample_point = ((tseg1 + 1) * 1000) / alltseg;
528     -
529     - return 0;
530     -}
531     -
532     -/* Checks the validity of predefined bitrate settings */
533     -static int
534     -can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
535     - const u32 *bitrate_const,
536     - const unsigned int bitrate_const_cnt)
537     -{
538     - struct can_priv *priv = netdev_priv(dev);
539     - unsigned int i;
540     -
541     - for (i = 0; i < bitrate_const_cnt; i++) {
542     - if (bt->bitrate == bitrate_const[i])
543     - break;
544     - }
545     -
546     - if (i >= priv->bitrate_const_cnt)
547     - return -EINVAL;
548     -
549     - return 0;
550     -}
551     -
552     -static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
553     - const struct can_bittiming_const *btc,
554     - const u32 *bitrate_const,
555     - const unsigned int bitrate_const_cnt)
556     -{
557     - int err;
558     -
559     - /* Depending on the given can_bittiming parameter structure the CAN
560     - * timing parameters are calculated based on the provided bitrate OR
561     - * alternatively the CAN timing parameters (tq, prop_seg, etc.) are
562     - * provided directly which are then checked and fixed up.
563     - */
564     - if (!bt->tq && bt->bitrate && btc)
565     - err = can_calc_bittiming(dev, bt, btc);
566     - else if (bt->tq && !bt->bitrate && btc)
567     - err = can_fixup_bittiming(dev, bt, btc);
568     - else if (!bt->tq && bt->bitrate && bitrate_const)
569     - err = can_validate_bitrate(dev, bt, bitrate_const,
570     - bitrate_const_cnt);
571     - else
572     - err = -EINVAL;
573     -
574     - return err;
575     -}
576     -
577     -static void can_update_state_error_stats(struct net_device *dev,
578     - enum can_state new_state)
579     -{
580     - struct can_priv *priv = netdev_priv(dev);
581     -
582     - if (new_state <= priv->state)
583     - return;
584     -
585     - switch (new_state) {
586     - case CAN_STATE_ERROR_WARNING:
587     - priv->can_stats.error_warning++;
588     - break;
589     - case CAN_STATE_ERROR_PASSIVE:
590     - priv->can_stats.error_passive++;
591     - break;
592     - case CAN_STATE_BUS_OFF:
593     - priv->can_stats.bus_off++;
594     - break;
595     - default:
596     - break;
597     - }
598     -}
599     -
600     -static int can_tx_state_to_frame(struct net_device *dev, enum can_state state)
601     -{
602     - switch (state) {
603     - case CAN_STATE_ERROR_ACTIVE:
604     - return CAN_ERR_CRTL_ACTIVE;
605     - case CAN_STATE_ERROR_WARNING:
606     - return CAN_ERR_CRTL_TX_WARNING;
607     - case CAN_STATE_ERROR_PASSIVE:
608     - return CAN_ERR_CRTL_TX_PASSIVE;
609     - default:
610     - return 0;
611     - }
612     -}
613     -
614     -static int can_rx_state_to_frame(struct net_device *dev, enum can_state state)
615     -{
616     - switch (state) {
617     - case CAN_STATE_ERROR_ACTIVE:
618     - return CAN_ERR_CRTL_ACTIVE;
619     - case CAN_STATE_ERROR_WARNING:
620     - return CAN_ERR_CRTL_RX_WARNING;
621     - case CAN_STATE_ERROR_PASSIVE:
622     - return CAN_ERR_CRTL_RX_PASSIVE;
623     - default:
624     - return 0;
625     - }
626     -}
627     -
628     -void can_change_state(struct net_device *dev, struct can_frame *cf,
629     - enum can_state tx_state, enum can_state rx_state)
630     -{
631     - struct can_priv *priv = netdev_priv(dev);
632     - enum can_state new_state = max(tx_state, rx_state);
633     -
634     - if (unlikely(new_state == priv->state)) {
635     - netdev_warn(dev, "%s: oops, state did not change", __func__);
636     - return;
637     - }
638     -
639     - netdev_dbg(dev, "New error state: %d\n", new_state);
640     -
641     - can_update_state_error_stats(dev, new_state);
642     - priv->state = new_state;
643     -
644     - if (!cf)
645     - return;
646     -
647     - if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
648     - cf->can_id |= CAN_ERR_BUSOFF;
649     - return;
650     - }
651     -
652     - cf->can_id |= CAN_ERR_CRTL;
653     - cf->data[1] |= tx_state >= rx_state ?
654     - can_tx_state_to_frame(dev, tx_state) : 0;
655     - cf->data[1] |= tx_state <= rx_state ?
656     - can_rx_state_to_frame(dev, rx_state) : 0;
657     -}
658     -EXPORT_SYMBOL_GPL(can_change_state);
659     -
660     -/* Local echo of CAN messages
661     - *
662     - * CAN network devices *should* support a local echo functionality
663     - * (see Documentation/networking/can.rst). To test the handling of CAN
664     - * interfaces that do not support the local echo both driver types are
665     - * implemented. In the case that the driver does not support the echo
666     - * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core
667     - * to perform the echo as a fallback solution.
668     - */
669     -static void can_flush_echo_skb(struct net_device *dev)
670     -{
671     - struct can_priv *priv = netdev_priv(dev);
672     - struct net_device_stats *stats = &dev->stats;
673     - int i;
674     -
675     - for (i = 0; i < priv->echo_skb_max; i++) {
676     - if (priv->echo_skb[i]) {
677     - kfree_skb(priv->echo_skb[i]);
678     - priv->echo_skb[i] = NULL;
679     - stats->tx_dropped++;
680     - stats->tx_aborted_errors++;
681     - }
682     - }
683     -}
684     -
685     -/* Put the skb on the stack to be looped backed locally lateron
686     - *
687     - * The function is typically called in the start_xmit function
688     - * of the device driver. The driver must protect access to
689     - * priv->echo_skb, if necessary.
690     - */
691     -void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
692     - unsigned int idx)
693     -{
694     - struct can_priv *priv = netdev_priv(dev);
695     -
696     - BUG_ON(idx >= priv->echo_skb_max);
697     -
698     - /* check flag whether this packet has to be looped back */
699     - if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK ||
700     - (skb->protocol != htons(ETH_P_CAN) &&
701     - skb->protocol != htons(ETH_P_CANFD))) {
702     - kfree_skb(skb);
703     - return;
704     - }
705     -
706     - if (!priv->echo_skb[idx]) {
707     - skb = can_create_echo_skb(skb);
708     - if (!skb)
709     - return;
710     -
711     - /* make settings for echo to reduce code in irq context */
712     - skb->pkt_type = PACKET_BROADCAST;
713     - skb->ip_summed = CHECKSUM_UNNECESSARY;
714     - skb->dev = dev;
715     -
716     - /* save this skb for tx interrupt echo handling */
717     - priv->echo_skb[idx] = skb;
718     - } else {
719     - /* locking problem with netif_stop_queue() ?? */
720     - netdev_err(dev, "%s: BUG! echo_skb is occupied!\n", __func__);
721     - kfree_skb(skb);
722     - }
723     -}
724     -EXPORT_SYMBOL_GPL(can_put_echo_skb);
725     -
726     -struct sk_buff *
727     -__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
728     -{
729     - struct can_priv *priv = netdev_priv(dev);
730     -
731     - if (idx >= priv->echo_skb_max) {
732     - netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
733     - __func__, idx, priv->echo_skb_max);
734     - return NULL;
735     - }
736     -
737     - if (priv->echo_skb[idx]) {
738     - /* Using "struct canfd_frame::len" for the frame
739     - * length is supported on both CAN and CANFD frames.
740     - */
741     - struct sk_buff *skb = priv->echo_skb[idx];
742     - struct canfd_frame *cf = (struct canfd_frame *)skb->data;
743     -
744     - /* get the real payload length for netdev statistics */
745     - if (cf->can_id & CAN_RTR_FLAG)
746     - *len_ptr = 0;
747     - else
748     - *len_ptr = cf->len;
749     -
750     - priv->echo_skb[idx] = NULL;
751     -
752     - return skb;
753     - }
754     -
755     - return NULL;
756     -}
757     -
758     -/* Get the skb from the stack and loop it back locally
759     - *
760     - * The function is typically called when the TX done interrupt
761     - * is handled in the device driver. The driver must protect
762     - * access to priv->echo_skb, if necessary.
763     - */
764     -unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
765     -{
766     - struct sk_buff *skb;
767     - u8 len;
768     -
769     - skb = __can_get_echo_skb(dev, idx, &len);
770     - if (!skb)
771     - return 0;
772     -
773     - skb_get(skb);
774     - if (netif_rx(skb) == NET_RX_SUCCESS)
775     - dev_consume_skb_any(skb);
776     - else
777     - dev_kfree_skb_any(skb);
778     -
779     - return len;
780     -}
781     -EXPORT_SYMBOL_GPL(can_get_echo_skb);
782     -
783     -/* Remove the skb from the stack and free it.
784     - *
785     - * The function is typically called when TX failed.
786     - */
787     -void can_free_echo_skb(struct net_device *dev, unsigned int idx)
788     -{
789     - struct can_priv *priv = netdev_priv(dev);
790     -
791     - BUG_ON(idx >= priv->echo_skb_max);
792     -
793     - if (priv->echo_skb[idx]) {
794     - dev_kfree_skb_any(priv->echo_skb[idx]);
795     - priv->echo_skb[idx] = NULL;
796     - }
797     -}
798     -EXPORT_SYMBOL_GPL(can_free_echo_skb);
799     -
800     -/* CAN device restart for bus-off recovery */
801     -static void can_restart(struct net_device *dev)
802     -{
803     - struct can_priv *priv = netdev_priv(dev);
804     - struct net_device_stats *stats = &dev->stats;
805     - struct sk_buff *skb;
806     - struct can_frame *cf;
807     - int err;
808     -
809     - BUG_ON(netif_carrier_ok(dev));
810     -
811     - /* No synchronization needed because the device is bus-off and
812     - * no messages can come in or go out.
813     - */
814     - can_flush_echo_skb(dev);
815     -
816     - /* send restart message upstream */
817     - skb = alloc_can_err_skb(dev, &cf);
818     - if (!skb) {
819     - err = -ENOMEM;
820     - goto restart;
821     - }
822     - cf->can_id |= CAN_ERR_RESTARTED;
823     -
824     - stats->rx_packets++;
825     - stats->rx_bytes += cf->can_dlc;
826     -
827     - netif_rx_ni(skb);
828     -
829     -restart:
830     - netdev_dbg(dev, "restarted\n");
831     - priv->can_stats.restarts++;
832     -
833     - /* Now restart the device */
834     - err = priv->do_set_mode(dev, CAN_MODE_START);
835     -
836     - netif_carrier_on(dev);
837     - if (err)
838     - netdev_err(dev, "Error %d during restart", err);
839     -}
840     -
841     -static void can_restart_work(struct work_struct *work)
842     -{
843     - struct delayed_work *dwork = to_delayed_work(work);
844     - struct can_priv *priv = container_of(dwork, struct can_priv,
845     - restart_work);
846     -
847     - can_restart(priv->dev);
848     -}
849     -
850     -int can_restart_now(struct net_device *dev)
851     -{
852     - struct can_priv *priv = netdev_priv(dev);
853     -
854     - /* A manual restart is only permitted if automatic restart is
855     - * disabled and the device is in the bus-off state
856     - */
857     - if (priv->restart_ms)
858     - return -EINVAL;
859     - if (priv->state != CAN_STATE_BUS_OFF)
860     - return -EBUSY;
861     -
862     - cancel_delayed_work_sync(&priv->restart_work);
863     - can_restart(dev);
864     -
865     - return 0;
866     -}
867     -
868     -/* CAN bus-off
869     - *
870     - * This functions should be called when the device goes bus-off to
871     - * tell the netif layer that no more packets can be sent or received.
872     - * If enabled, a timer is started to trigger bus-off recovery.
873     - */
874     -void can_bus_off(struct net_device *dev)
875     -{
876     - struct can_priv *priv = netdev_priv(dev);
877     -
878     - netdev_info(dev, "bus-off\n");
879     -
880     - netif_carrier_off(dev);
881     -
882     - if (priv->restart_ms)
883     - schedule_delayed_work(&priv->restart_work,
884     - msecs_to_jiffies(priv->restart_ms));
885     -}
886     -EXPORT_SYMBOL_GPL(can_bus_off);
887     -
888     -static void can_setup(struct net_device *dev)
889     -{
890     - dev->type = ARPHRD_CAN;
891     - dev->mtu = CAN_MTU;
892     - dev->hard_header_len = 0;
893     - dev->addr_len = 0;
894     - dev->tx_queue_len = 10;
895     -
896     - /* New-style flags. */
897     - dev->flags = IFF_NOARP;
898     - dev->features = NETIF_F_HW_CSUM;
899     -}
900     -
901     -struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
902     -{
903     - struct sk_buff *skb;
904     -
905     - skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
906     - sizeof(struct can_frame));
907     - if (unlikely(!skb))
908     - return NULL;
909     -
910     - skb->protocol = htons(ETH_P_CAN);
911     - skb->pkt_type = PACKET_BROADCAST;
912     - skb->ip_summed = CHECKSUM_UNNECESSARY;
913     -
914     - skb_reset_mac_header(skb);
915     - skb_reset_network_header(skb);
916     - skb_reset_transport_header(skb);
917     -
918     - can_skb_reserve(skb);
919     - can_skb_prv(skb)->ifindex = dev->ifindex;
920     - can_skb_prv(skb)->skbcnt = 0;
921     -
922     - *cf = skb_put_zero(skb, sizeof(struct can_frame));
923     -
924     - return skb;
925     -}
926     -EXPORT_SYMBOL_GPL(alloc_can_skb);
927     -
928     -struct sk_buff *alloc_canfd_skb(struct net_device *dev,
929     - struct canfd_frame **cfd)
930     -{
931     - struct sk_buff *skb;
932     -
933     - skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
934     - sizeof(struct canfd_frame));
935     - if (unlikely(!skb))
936     - return NULL;
937     -
938     - skb->protocol = htons(ETH_P_CANFD);
939     - skb->pkt_type = PACKET_BROADCAST;
940     - skb->ip_summed = CHECKSUM_UNNECESSARY;
941     -
942     - skb_reset_mac_header(skb);
943     - skb_reset_network_header(skb);
944     - skb_reset_transport_header(skb);
945     -
946     - can_skb_reserve(skb);
947     - can_skb_prv(skb)->ifindex = dev->ifindex;
948     - can_skb_prv(skb)->skbcnt = 0;
949     -
950     - *cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
951     -
952     - return skb;
953     -}
954     -EXPORT_SYMBOL_GPL(alloc_canfd_skb);
955     -
956     -struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
957     -{
958     - struct sk_buff *skb;
959     -
960     - skb = alloc_can_skb(dev, cf);
961     - if (unlikely(!skb))
962     - return NULL;
963     -
964     - (*cf)->can_id = CAN_ERR_FLAG;
965     - (*cf)->can_dlc = CAN_ERR_DLC;
966     -
967     - return skb;
968     -}
969     -EXPORT_SYMBOL_GPL(alloc_can_err_skb);
970     -
971     -/* Allocate and setup space for the CAN network device */
972     -struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
973     - unsigned int txqs, unsigned int rxqs)
974     -{
975     - struct net_device *dev;
976     - struct can_priv *priv;
977     - int size;
978     -
979     - /* We put the driver's priv, the CAN mid layer priv and the
980     - * echo skb into the netdevice's priv. The memory layout for
981     - * the netdev_priv is like this:
982     - *
983     - * +-------------------------+
984     - * | driver's priv |
985     - * +-------------------------+
986     - * | struct can_ml_priv |
987     - * +-------------------------+
988     - * | array of struct sk_buff |
989     - * +-------------------------+
990     - */
991     -
992     - size = ALIGN(sizeof_priv, NETDEV_ALIGN) + sizeof(struct can_ml_priv);
993     -
994     - if (echo_skb_max)
995     - size = ALIGN(size, sizeof(struct sk_buff *)) +
996     - echo_skb_max * sizeof(struct sk_buff *);
997     -
998     - dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup,
999     - txqs, rxqs);
1000     - if (!dev)
1001     - return NULL;
1002     -
1003     - priv = netdev_priv(dev);
1004     - priv->dev = dev;
1005     -
1006     - dev->ml_priv = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN);
1007     -
1008     - if (echo_skb_max) {
1009     - priv->echo_skb_max = echo_skb_max;
1010     - priv->echo_skb = (void *)priv +
1011     - (size - echo_skb_max * sizeof(struct sk_buff *));
1012     - }
1013     -
1014     - priv->state = CAN_STATE_STOPPED;
1015     -
1016     - INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
1017     -
1018     - return dev;
1019     -}
1020     -EXPORT_SYMBOL_GPL(alloc_candev_mqs);
1021     -
1022     -/* Free space of the CAN network device */
1023     -void free_candev(struct net_device *dev)
1024     -{
1025     - free_netdev(dev);
1026     -}
1027     -EXPORT_SYMBOL_GPL(free_candev);
1028     -
1029     -/* changing MTU and control mode for CAN/CANFD devices */
1030     -int can_change_mtu(struct net_device *dev, int new_mtu)
1031     -{
1032     - struct can_priv *priv = netdev_priv(dev);
1033     -
1034     - /* Do not allow changing the MTU while running */
1035     - if (dev->flags & IFF_UP)
1036     - return -EBUSY;
1037     -
1038     - /* allow change of MTU according to the CANFD ability of the device */
1039     - switch (new_mtu) {
1040     - case CAN_MTU:
1041     - /* 'CANFD-only' controllers can not switch to CAN_MTU */
1042     - if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
1043     - return -EINVAL;
1044     -
1045     - priv->ctrlmode &= ~CAN_CTRLMODE_FD;
1046     - break;
1047     -
1048     - case CANFD_MTU:
1049     - /* check for potential CANFD ability */
1050     - if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
1051     - !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
1052     - return -EINVAL;
1053     -
1054     - priv->ctrlmode |= CAN_CTRLMODE_FD;
1055     - break;
1056     -
1057     - default:
1058     - return -EINVAL;
1059     - }
1060     -
1061     - dev->mtu = new_mtu;
1062     - return 0;
1063     -}
1064     -EXPORT_SYMBOL_GPL(can_change_mtu);
1065     -
1066     -/* Common open function when the device gets opened.
1067     - *
1068     - * This function should be called in the open function of the device
1069     - * driver.
1070     - */
1071     -int open_candev(struct net_device *dev)
1072     -{
1073     - struct can_priv *priv = netdev_priv(dev);
1074     -
1075     - if (!priv->bittiming.bitrate) {
1076     - netdev_err(dev, "bit-timing not yet defined\n");
1077     - return -EINVAL;
1078     - }
1079     -
1080     - /* For CAN FD the data bitrate has to be >= the arbitration bitrate */
1081     - if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
1082     - (!priv->data_bittiming.bitrate ||
1083     - priv->data_bittiming.bitrate < priv->bittiming.bitrate)) {
1084     - netdev_err(dev, "incorrect/missing data bit-timing\n");
1085     - return -EINVAL;
1086     - }
1087     -
1088     - /* Switch carrier on if device was stopped while in bus-off state */
1089     - if (!netif_carrier_ok(dev))
1090     - netif_carrier_on(dev);
1091     -
1092     - return 0;
1093     -}
1094     -EXPORT_SYMBOL_GPL(open_candev);
1095     -
1096     -#ifdef CONFIG_OF
1097     -/* Common function that can be used to understand the limitation of
1098     - * a transceiver when it provides no means to determine these limitations
1099     - * at runtime.
1100     - */
1101     -void of_can_transceiver(struct net_device *dev)
1102     -{
1103     - struct device_node *dn;
1104     - struct can_priv *priv = netdev_priv(dev);
1105     - struct device_node *np = dev->dev.parent->of_node;
1106     - int ret;
1107     -
1108     - dn = of_get_child_by_name(np, "can-transceiver");
1109     - if (!dn)
1110     - return;
1111     -
1112     - ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
1113     - of_node_put(dn);
1114     - if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
1115     - netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
1116     -}
1117     -EXPORT_SYMBOL_GPL(of_can_transceiver);
1118     -#endif
1119     -
1120     -/* Common close function for cleanup before the device gets closed.
1121     - *
1122     - * This function should be called in the close function of the device
1123     - * driver.
1124     - */
1125     -void close_candev(struct net_device *dev)
1126     -{
1127     - struct can_priv *priv = netdev_priv(dev);
1128     -
1129     - cancel_delayed_work_sync(&priv->restart_work);
1130     - can_flush_echo_skb(dev);
1131     -}
1132     -EXPORT_SYMBOL_GPL(close_candev);
1133     -
1134     -/* CAN netlink interface */
1135     -static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
1136     - [IFLA_CAN_STATE] = { .type = NLA_U32 },
1137     - [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) },
1138     - [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 },
1139     - [IFLA_CAN_RESTART] = { .type = NLA_U32 },
1140     - [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) },
1141     - [IFLA_CAN_BITTIMING_CONST]
1142     - = { .len = sizeof(struct can_bittiming_const) },
1143     - [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
1144     - [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
1145     - [IFLA_CAN_DATA_BITTIMING]
1146     - = { .len = sizeof(struct can_bittiming) },
1147     - [IFLA_CAN_DATA_BITTIMING_CONST]
1148     - = { .len = sizeof(struct can_bittiming_const) },
1149     - [IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
1150     -};
1151     -
1152     -static int can_validate(struct nlattr *tb[], struct nlattr *data[],
1153     - struct netlink_ext_ack *extack)
1154     -{
1155     - bool is_can_fd = false;
1156     -
1157     - /* Make sure that valid CAN FD configurations always consist of
1158     - * - nominal/arbitration bittiming
1159     - * - data bittiming
1160     - * - control mode with CAN_CTRLMODE_FD set
1161     - */
1162     -
1163     - if (!data)
1164     - return 0;
1165     -
1166     - if (data[IFLA_CAN_CTRLMODE]) {
1167     - struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
1168     -
1169     - is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
1170     - }
1171     -
1172     - if (is_can_fd) {
1173     - if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
1174     - return -EOPNOTSUPP;
1175     - }
1176     -
1177     - if (data[IFLA_CAN_DATA_BITTIMING]) {
1178     - if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
1179     - return -EOPNOTSUPP;
1180     - }
1181     -
1182     - return 0;
1183     -}
1184     -
1185     -static int can_changelink(struct net_device *dev, struct nlattr *tb[],
1186     - struct nlattr *data[],
1187     - struct netlink_ext_ack *extack)
1188     -{
1189     - struct can_priv *priv = netdev_priv(dev);
1190     - int err;
1191     -
1192     - /* We need synchronization with dev->stop() */
1193     - ASSERT_RTNL();
1194     -
1195     - if (data[IFLA_CAN_BITTIMING]) {
1196     - struct can_bittiming bt;
1197     -
1198     - /* Do not allow changing bittiming while running */
1199     - if (dev->flags & IFF_UP)
1200     - return -EBUSY;
1201     -
1202     - /* Calculate bittiming parameters based on
1203     - * bittiming_const if set, otherwise pass bitrate
1204     - * directly via do_set_bitrate(). Bail out if neither
1205     - * is given.
1206     - */
1207     - if (!priv->bittiming_const && !priv->do_set_bittiming)
1208     - return -EOPNOTSUPP;
1209     -
1210     - memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
1211     - err = can_get_bittiming(dev, &bt,
1212     - priv->bittiming_const,
1213     - priv->bitrate_const,
1214     - priv->bitrate_const_cnt);
1215     - if (err)
1216     - return err;
1217     -
1218     - if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
1219     - netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n",
1220     - priv->bitrate_max);
1221     - return -EINVAL;
1222     - }
1223     -
1224     - memcpy(&priv->bittiming, &bt, sizeof(bt));
1225     -
1226     - if (priv->do_set_bittiming) {
1227     - /* Finally, set the bit-timing registers */
1228     - err = priv->do_set_bittiming(dev);
1229     - if (err)
1230     - return err;
1231     - }
1232     - }
1233     -
1234     - if (data[IFLA_CAN_CTRLMODE]) {
1235     - struct can_ctrlmode *cm;
1236     - u32 ctrlstatic;
1237     - u32 maskedflags;
1238     -
1239     - /* Do not allow changing controller mode while running */
1240     - if (dev->flags & IFF_UP)
1241     - return -EBUSY;
1242     - cm = nla_data(data[IFLA_CAN_CTRLMODE]);
1243     - ctrlstatic = priv->ctrlmode_static;
1244     - maskedflags = cm->flags & cm->mask;
1245     -
1246     - /* check whether provided bits are allowed to be passed */
1247     - if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
1248     - return -EOPNOTSUPP;
1249     -
1250     - /* do not check for static fd-non-iso if 'fd' is disabled */
1251     - if (!(maskedflags & CAN_CTRLMODE_FD))
1252     - ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
1253     -
1254     - /* make sure static options are provided by configuration */
1255     - if ((maskedflags & ctrlstatic) != ctrlstatic)
1256     - return -EOPNOTSUPP;
1257     -
1258     - /* clear bits to be modified and copy the flag values */
1259     - priv->ctrlmode &= ~cm->mask;
1260     - priv->ctrlmode |= maskedflags;
1261     -
1262     - /* CAN_CTRLMODE_FD can only be set when driver supports FD */
1263     - if (priv->ctrlmode & CAN_CTRLMODE_FD)
1264     - dev->mtu = CANFD_MTU;
1265     - else
1266     - dev->mtu = CAN_MTU;
1267     - }
1268     -
1269     - if (data[IFLA_CAN_RESTART_MS]) {
1270     - /* Do not allow changing restart delay while running */
1271     - if (dev->flags & IFF_UP)
1272     - return -EBUSY;
1273     - priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
1274     - }
1275     -
1276     - if (data[IFLA_CAN_RESTART]) {
1277     - /* Do not allow a restart while not running */
1278     - if (!(dev->flags & IFF_UP))
1279     - return -EINVAL;
1280     - err = can_restart_now(dev);
1281     - if (err)
1282     - return err;
1283     - }
1284     -
1285     - if (data[IFLA_CAN_DATA_BITTIMING]) {
1286     - struct can_bittiming dbt;
1287     -
1288     - /* Do not allow changing bittiming while running */
1289     - if (dev->flags & IFF_UP)
1290     - return -EBUSY;
1291     -
1292     - /* Calculate bittiming parameters based on
1293     - * data_bittiming_const if set, otherwise pass bitrate
1294     - * directly via do_set_bitrate(). Bail out if neither
1295     - * is given.
1296     - */
1297     - if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
1298     - return -EOPNOTSUPP;
1299     -
1300     - memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
1301     - sizeof(dbt));
1302     - err = can_get_bittiming(dev, &dbt,
1303     - priv->data_bittiming_const,
1304     - priv->data_bitrate_const,
1305     - priv->data_bitrate_const_cnt);
1306     - if (err)
1307     - return err;
1308     -
1309     - if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
1310     - netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n",
1311     - priv->bitrate_max);
1312     - return -EINVAL;
1313     - }
1314     -
1315     - memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
1316     -
1317     - if (priv->do_set_data_bittiming) {
1318     - /* Finally, set the bit-timing registers */
1319     - err = priv->do_set_data_bittiming(dev);
1320     - if (err)
1321     - return err;
1322     - }
1323     - }
1324     -
1325     - if (data[IFLA_CAN_TERMINATION]) {
1326     - const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
1327     - const unsigned int num_term = priv->termination_const_cnt;
1328     - unsigned int i;
1329     -
1330     - if (!priv->do_set_termination)
1331     - return -EOPNOTSUPP;
1332     -
1333     - /* check whether given value is supported by the interface */
1334     - for (i = 0; i < num_term; i++) {
1335     - if (termval == priv->termination_const[i])
1336     - break;
1337     - }
1338     - if (i >= num_term)
1339     - return -EINVAL;
1340     -
1341     - /* Finally, set the termination value */
1342     - err = priv->do_set_termination(dev, termval);
1343     - if (err)
1344     - return err;
1345     -
1346     - priv->termination = termval;
1347     - }
1348     -
1349     - return 0;
1350     -}
1351     -
1352     -static size_t can_get_size(const struct net_device *dev)
1353     -{
1354     - struct can_priv *priv = netdev_priv(dev);
1355     - size_t size = 0;
1356     -
1357     - if (priv->bittiming.bitrate) /* IFLA_CAN_BITTIMING */
1358     - size += nla_total_size(sizeof(struct can_bittiming));
1359     - if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
1360     - size += nla_total_size(sizeof(struct can_bittiming_const));
1361     - size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
1362     - size += nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
1363     - size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
1364     - size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
1365     - if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
1366     - size += nla_total_size(sizeof(struct can_berr_counter));
1367     - if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
1368     - size += nla_total_size(sizeof(struct can_bittiming));
1369     - if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
1370     - size += nla_total_size(sizeof(struct can_bittiming_const));
1371     - if (priv->termination_const) {
1372     - size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
1373     - size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */
1374     - priv->termination_const_cnt);
1375     - }
1376     - if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
1377     - size += nla_total_size(sizeof(*priv->bitrate_const) *
1378     - priv->bitrate_const_cnt);
1379     - if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
1380     - size += nla_total_size(sizeof(*priv->data_bitrate_const) *
1381     - priv->data_bitrate_const_cnt);
1382     - size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
1383     -
1384     - return size;
1385     -}
1386     -
1387     -static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
1388     -{
1389     - struct can_priv *priv = netdev_priv(dev);
1390     - struct can_ctrlmode cm = {.flags = priv->ctrlmode};
1391     - struct can_berr_counter bec = { };
1392     - enum can_state state = priv->state;
1393     -
1394     - if (priv->do_get_state)
1395     - priv->do_get_state(dev, &state);
1396     -
1397     - if ((priv->bittiming.bitrate &&
1398     - nla_put(skb, IFLA_CAN_BITTIMING,
1399     - sizeof(priv->bittiming), &priv->bittiming)) ||
1400     -
1401     - (priv->bittiming_const &&
1402     - nla_put(skb, IFLA_CAN_BITTIMING_CONST,
1403     - sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
1404     -
1405     - nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
1406     - nla_put_u32(skb, IFLA_CAN_STATE, state) ||
1407     - nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
1408     - nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
1409     -
1410     - (priv->do_get_berr_counter &&
1411     - !priv->do_get_berr_counter(dev, &bec) &&
1412     - nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
1413     -
1414     - (priv->data_bittiming.bitrate &&
1415     - nla_put(skb, IFLA_CAN_DATA_BITTIMING,
1416     - sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
1417     -
1418     - (priv->data_bittiming_const &&
1419     - nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
1420     - sizeof(*priv->data_bittiming_const),
1421     - priv->data_bittiming_const)) ||
1422     -
1423     - (priv->termination_const &&
1424     - (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
1425     - nla_put(skb, IFLA_CAN_TERMINATION_CONST,
1426     - sizeof(*priv->termination_const) *
1427     - priv->termination_const_cnt,
1428     - priv->termination_const))) ||
1429     -
1430     - (priv->bitrate_const &&
1431     - nla_put(skb, IFLA_CAN_BITRATE_CONST,
1432     - sizeof(*priv->bitrate_const) *
1433     - priv->bitrate_const_cnt,
1434     - priv->bitrate_const)) ||
1435     -
1436     - (priv->data_bitrate_const &&
1437     - nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
1438     - sizeof(*priv->data_bitrate_const) *
1439     - priv->data_bitrate_const_cnt,
1440     - priv->data_bitrate_const)) ||
1441     -
1442     - (nla_put(skb, IFLA_CAN_BITRATE_MAX,
1443     - sizeof(priv->bitrate_max),
1444     - &priv->bitrate_max))
1445     - )
1446     -
1447     - return -EMSGSIZE;
1448     -
1449     - return 0;
1450     -}
1451     -
1452     -static size_t can_get_xstats_size(const struct net_device *dev)
1453     -{
1454     - return sizeof(struct can_device_stats);
1455     -}
1456     -
1457     -static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
1458     -{
1459     - struct can_priv *priv = netdev_priv(dev);
1460     -
1461     - if (nla_put(skb, IFLA_INFO_XSTATS,
1462     - sizeof(priv->can_stats), &priv->can_stats))
1463     - goto nla_put_failure;
1464     - return 0;
1465     -
1466     -nla_put_failure:
1467     - return -EMSGSIZE;
1468     -}
1469     -
1470     -static int can_newlink(struct net *src_net, struct net_device *dev,
1471     - struct nlattr *tb[], struct nlattr *data[],
1472     - struct netlink_ext_ack *extack)
1473     -{
1474     - return -EOPNOTSUPP;
1475     -}
1476     -
1477     -static void can_dellink(struct net_device *dev, struct list_head *head)
1478     -{
1479     -}
1480     -
1481     -static struct rtnl_link_ops can_link_ops __read_mostly = {
1482     - .kind = "can",
1483     - .netns_refund = true,
1484     - .maxtype = IFLA_CAN_MAX,
1485     - .policy = can_policy,
1486     - .setup = can_setup,
1487     - .validate = can_validate,
1488     - .newlink = can_newlink,
1489     - .changelink = can_changelink,
1490     - .dellink = can_dellink,
1491     - .get_size = can_get_size,
1492     - .fill_info = can_fill_info,
1493     - .get_xstats_size = can_get_xstats_size,
1494     - .fill_xstats = can_fill_xstats,
1495     -};
1496     -
1497     -/* Register the CAN network device */
1498     -int register_candev(struct net_device *dev)
1499     -{
1500     - struct can_priv *priv = netdev_priv(dev);
1501     -
1502     - /* Ensure termination_const, termination_const_cnt and
1503     - * do_set_termination consistency. All must be either set or
1504     - * unset.
1505     - */
1506     - if ((!priv->termination_const != !priv->termination_const_cnt) ||
1507     - (!priv->termination_const != !priv->do_set_termination))
1508     - return -EINVAL;
1509     -
1510     - if (!priv->bitrate_const != !priv->bitrate_const_cnt)
1511     - return -EINVAL;
1512     -
1513     - if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
1514     - return -EINVAL;
1515     -
1516     - dev->rtnl_link_ops = &can_link_ops;
1517     - netif_carrier_off(dev);
1518     -
1519     - return register_netdev(dev);
1520     -}
1521     -EXPORT_SYMBOL_GPL(register_candev);
1522     -
1523     -/* Unregister the CAN network device */
1524     -void unregister_candev(struct net_device *dev)
1525     -{
1526     - unregister_netdev(dev);
1527     -}
1528     -EXPORT_SYMBOL_GPL(unregister_candev);
1529     -
1530     -/* Test if a network device is a candev based device
1531     - * and return the can_priv* if so.
1532     - */
1533     -struct can_priv *safe_candev_priv(struct net_device *dev)
1534     -{
1535     - if (dev->type != ARPHRD_CAN || dev->rtnl_link_ops != &can_link_ops)
1536     - return NULL;
1537     -
1538     - return netdev_priv(dev);
1539     -}
1540     -EXPORT_SYMBOL_GPL(safe_candev_priv);
1541     -
1542     -static __init int can_dev_init(void)
1543     -{
1544     - int err;
1545     -
1546     - can_led_notifier_init();
1547     -
1548     - err = rtnl_link_register(&can_link_ops);
1549     - if (!err)
1550     - pr_info(MOD_DESC "\n");
1551     -
1552     - return err;
1553     -}
1554     -module_init(can_dev_init);
1555     -
1556     -static __exit void can_dev_exit(void)
1557     -{
1558     - rtnl_link_unregister(&can_link_ops);
1559     -
1560     - can_led_notifier_exit();
1561     -}
1562     -module_exit(can_dev_exit);
1563     -
1564     -MODULE_ALIAS_RTNL_LINK("can");
1565     diff --git a/drivers/net/can/dev/Makefile b/drivers/net/can/dev/Makefile
1566     new file mode 100644
1567     index 0000000000000..cba92e6bcf6f5
1568     --- /dev/null
1569     +++ b/drivers/net/can/dev/Makefile
1570     @@ -0,0 +1,7 @@
1571     +# SPDX-License-Identifier: GPL-2.0
1572     +
1573     +obj-$(CONFIG_CAN_DEV) += can-dev.o
1574     +can-dev-y += dev.o
1575     +can-dev-y += rx-offload.o
1576     +
1577     +can-dev-$(CONFIG_CAN_LEDS) += led.o
1578     diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c
1579     new file mode 100644
1580     index 0000000000000..322da89cb9c60
1581     --- /dev/null
1582     +++ b/drivers/net/can/dev/dev.c
1583     @@ -0,0 +1,1312 @@
1584     +// SPDX-License-Identifier: GPL-2.0-only
1585     +/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
1586     + * Copyright (C) 2006 Andrey Volkov, Varma Electronics
1587     + * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
1588     + */
1589     +
1590     +#include <linux/module.h>
1591     +#include <linux/kernel.h>
1592     +#include <linux/slab.h>
1593     +#include <linux/netdevice.h>
1594     +#include <linux/if_arp.h>
1595     +#include <linux/workqueue.h>
1596     +#include <linux/can.h>
1597     +#include <linux/can/can-ml.h>
1598     +#include <linux/can/dev.h>
1599     +#include <linux/can/skb.h>
1600     +#include <linux/can/netlink.h>
1601     +#include <linux/can/led.h>
1602     +#include <linux/of.h>
1603     +#include <net/rtnetlink.h>
1604     +
1605     +#define MOD_DESC "CAN device driver interface"
1606     +
1607     +MODULE_DESCRIPTION(MOD_DESC);
1608     +MODULE_LICENSE("GPL v2");
1609     +MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
1610     +
1611     +/* CAN DLC to real data length conversion helpers */
1612     +
1613     +static const u8 dlc2len[] = {0, 1, 2, 3, 4, 5, 6, 7,
1614     + 8, 12, 16, 20, 24, 32, 48, 64};
1615     +
1616     +/* get data length from can_dlc with sanitized can_dlc */
1617     +u8 can_dlc2len(u8 can_dlc)
1618     +{
1619     + return dlc2len[can_dlc & 0x0F];
1620     +}
1621     +EXPORT_SYMBOL_GPL(can_dlc2len);
1622     +
1623     +static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */
1624     + 9, 9, 9, 9, /* 9 - 12 */
1625     + 10, 10, 10, 10, /* 13 - 16 */
1626     + 11, 11, 11, 11, /* 17 - 20 */
1627     + 12, 12, 12, 12, /* 21 - 24 */
1628     + 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */
1629     + 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */
1630     + 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */
1631     + 15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */
1632     + 15, 15, 15, 15, 15, 15, 15, 15}; /* 57 - 64 */
1633     +
1634     +/* map the sanitized data length to an appropriate data length code */
1635     +u8 can_len2dlc(u8 len)
1636     +{
1637     + if (unlikely(len > 64))
1638     + return 0xF;
1639     +
1640     + return len2dlc[len];
1641     +}
1642     +EXPORT_SYMBOL_GPL(can_len2dlc);
1643     +
1644     +#ifdef CONFIG_CAN_CALC_BITTIMING
1645     +#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
1646     +#define CAN_CALC_SYNC_SEG 1
1647     +
1648     +/* Bit-timing calculation derived from:
1649     + *
1650     + * Code based on LinCAN sources and H8S2638 project
1651     + * Copyright 2004-2006 Pavel Pisa - DCE FELK CVUT cz
1652     + * Copyright 2005 Stanislav Marek
1653     + * email: pisa@cmp.felk.cvut.cz
1654     + *
1655     + * Calculates proper bit-timing parameters for a specified bit-rate
1656     + * and sample-point, which can then be used to set the bit-timing
1657     + * registers of the CAN controller. You can find more information
1658     + * in the header file linux/can/netlink.h.
1659     + */
1660     +static int
1661     +can_update_sample_point(const struct can_bittiming_const *btc,
1662     + unsigned int sample_point_nominal, unsigned int tseg,
1663     + unsigned int *tseg1_ptr, unsigned int *tseg2_ptr,
1664     + unsigned int *sample_point_error_ptr)
1665     +{
1666     + unsigned int sample_point_error, best_sample_point_error = UINT_MAX;
1667     + unsigned int sample_point, best_sample_point = 0;
1668     + unsigned int tseg1, tseg2;
1669     + int i;
1670     +
1671     + for (i = 0; i <= 1; i++) {
1672     + tseg2 = tseg + CAN_CALC_SYNC_SEG -
1673     + (sample_point_nominal * (tseg + CAN_CALC_SYNC_SEG)) /
1674     + 1000 - i;
1675     + tseg2 = clamp(tseg2, btc->tseg2_min, btc->tseg2_max);
1676     + tseg1 = tseg - tseg2;
1677     + if (tseg1 > btc->tseg1_max) {
1678     + tseg1 = btc->tseg1_max;
1679     + tseg2 = tseg - tseg1;
1680     + }
1681     +
1682     + sample_point = 1000 * (tseg + CAN_CALC_SYNC_SEG - tseg2) /
1683     + (tseg + CAN_CALC_SYNC_SEG);
1684     + sample_point_error = abs(sample_point_nominal - sample_point);
1685     +
1686     + if (sample_point <= sample_point_nominal &&
1687     + sample_point_error < best_sample_point_error) {
1688     + best_sample_point = sample_point;
1689     + best_sample_point_error = sample_point_error;
1690     + *tseg1_ptr = tseg1;
1691     + *tseg2_ptr = tseg2;
1692     + }
1693     + }
1694     +
1695     + if (sample_point_error_ptr)
1696     + *sample_point_error_ptr = best_sample_point_error;
1697     +
1698     + return best_sample_point;
1699     +}
1700     +
1701     +static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
1702     + const struct can_bittiming_const *btc)
1703     +{
1704     + struct can_priv *priv = netdev_priv(dev);
1705     + unsigned int bitrate; /* current bitrate */
1706     + unsigned int bitrate_error; /* difference between current and nominal value */
1707     + unsigned int best_bitrate_error = UINT_MAX;
1708     + unsigned int sample_point_error; /* difference between current and nominal value */
1709     + unsigned int best_sample_point_error = UINT_MAX;
1710     + unsigned int sample_point_nominal; /* nominal sample point */
1711     + unsigned int best_tseg = 0; /* current best value for tseg */
1712     + unsigned int best_brp = 0; /* current best value for brp */
1713     + unsigned int brp, tsegall, tseg, tseg1 = 0, tseg2 = 0;
1714     + u64 v64;
1715     +
1716     + /* Use CiA recommended sample points */
1717     + if (bt->sample_point) {
1718     + sample_point_nominal = bt->sample_point;
1719     + } else {
1720     + if (bt->bitrate > 800000)
1721     + sample_point_nominal = 750;
1722     + else if (bt->bitrate > 500000)
1723     + sample_point_nominal = 800;
1724     + else
1725     + sample_point_nominal = 875;
1726     + }
1727     +
1728     + /* tseg even = round down, odd = round up */
1729     + for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
1730     + tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
1731     + tsegall = CAN_CALC_SYNC_SEG + tseg / 2;
1732     +
1733     + /* Compute all possible tseg choices (tseg=tseg1+tseg2) */
1734     + brp = priv->clock.freq / (tsegall * bt->bitrate) + tseg % 2;
1735     +
1736     + /* choose brp step which is possible in system */
1737     + brp = (brp / btc->brp_inc) * btc->brp_inc;
1738     + if (brp < btc->brp_min || brp > btc->brp_max)
1739     + continue;
1740     +
1741     + bitrate = priv->clock.freq / (brp * tsegall);
1742     + bitrate_error = abs(bt->bitrate - bitrate);
1743     +
1744     + /* tseg brp biterror */
1745     + if (bitrate_error > best_bitrate_error)
1746     + continue;
1747     +
1748     + /* reset sample point error if we have a better bitrate */
1749     + if (bitrate_error < best_bitrate_error)
1750     + best_sample_point_error = UINT_MAX;
1751     +
1752     + can_update_sample_point(btc, sample_point_nominal, tseg / 2,
1753     + &tseg1, &tseg2, &sample_point_error);
1754     + if (sample_point_error > best_sample_point_error)
1755     + continue;
1756     +
1757     + best_sample_point_error = sample_point_error;
1758     + best_bitrate_error = bitrate_error;
1759     + best_tseg = tseg / 2;
1760     + best_brp = brp;
1761     +
1762     + if (bitrate_error == 0 && sample_point_error == 0)
1763     + break;
1764     + }
1765     +
1766     + if (best_bitrate_error) {
1767     + /* Error in one-tenth of a percent */
1768     + v64 = (u64)best_bitrate_error * 1000;
1769     + do_div(v64, bt->bitrate);
1770     + bitrate_error = (u32)v64;
1771     + if (bitrate_error > CAN_CALC_MAX_ERROR) {
1772     + netdev_err(dev,
1773     + "bitrate error %d.%d%% too high\n",
1774     + bitrate_error / 10, bitrate_error % 10);
1775     + return -EDOM;
1776     + }
1777     + netdev_warn(dev, "bitrate error %d.%d%%\n",
1778     + bitrate_error / 10, bitrate_error % 10);
1779     + }
1780     +
1781     + /* real sample point */
1782     + bt->sample_point = can_update_sample_point(btc, sample_point_nominal,
1783     + best_tseg, &tseg1, &tseg2,
1784     + NULL);
1785     +
1786     + v64 = (u64)best_brp * 1000 * 1000 * 1000;
1787     + do_div(v64, priv->clock.freq);
1788     + bt->tq = (u32)v64;
1789     + bt->prop_seg = tseg1 / 2;
1790     + bt->phase_seg1 = tseg1 - bt->prop_seg;
1791     + bt->phase_seg2 = tseg2;
1792     +
1793     + /* check for sjw user settings */
1794     + if (!bt->sjw || !btc->sjw_max) {
1795     + bt->sjw = 1;
1796     + } else {
1797     + /* bt->sjw is at least 1 -> sanitize upper bound to sjw_max */
1798     + if (bt->sjw > btc->sjw_max)
1799     + bt->sjw = btc->sjw_max;
1800     + /* bt->sjw must not be higher than tseg2 */
1801     + if (tseg2 < bt->sjw)
1802     + bt->sjw = tseg2;
1803     + }
1804     +
1805     + bt->brp = best_brp;
1806     +
1807     + /* real bitrate */
1808     + bt->bitrate = priv->clock.freq /
1809     + (bt->brp * (CAN_CALC_SYNC_SEG + tseg1 + tseg2));
1810     +
1811     + return 0;
1812     +}
1813     +#else /* !CONFIG_CAN_CALC_BITTIMING */
1814     +static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
1815     + const struct can_bittiming_const *btc)
1816     +{
1817     + netdev_err(dev, "bit-timing calculation not available\n");
1818     + return -EINVAL;
1819     +}
1820     +#endif /* CONFIG_CAN_CALC_BITTIMING */
1821     +
1822     +/* Checks the validity of the specified bit-timing parameters prop_seg,
1823     + * phase_seg1, phase_seg2 and sjw and tries to determine the bitrate
1824     + * prescaler value brp. You can find more information in the header
1825     + * file linux/can/netlink.h.
1826     + */
1827     +static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
1828     + const struct can_bittiming_const *btc)
1829     +{
1830     + struct can_priv *priv = netdev_priv(dev);
1831     + int tseg1, alltseg;
1832     + u64 brp64;
1833     +
1834     + tseg1 = bt->prop_seg + bt->phase_seg1;
1835     + if (!bt->sjw)
1836     + bt->sjw = 1;
1837     + if (bt->sjw > btc->sjw_max ||
1838     + tseg1 < btc->tseg1_min || tseg1 > btc->tseg1_max ||
1839     + bt->phase_seg2 < btc->tseg2_min || bt->phase_seg2 > btc->tseg2_max)
1840     + return -ERANGE;
1841     +
1842     + brp64 = (u64)priv->clock.freq * (u64)bt->tq;
1843     + if (btc->brp_inc > 1)
1844     + do_div(brp64, btc->brp_inc);
1845     + brp64 += 500000000UL - 1;
1846     + do_div(brp64, 1000000000UL); /* the practicable BRP */
1847     + if (btc->brp_inc > 1)
1848     + brp64 *= btc->brp_inc;
1849     + bt->brp = (u32)brp64;
1850     +
1851     + if (bt->brp < btc->brp_min || bt->brp > btc->brp_max)
1852     + return -EINVAL;
1853     +
1854     + alltseg = bt->prop_seg + bt->phase_seg1 + bt->phase_seg2 + 1;
1855     + bt->bitrate = priv->clock.freq / (bt->brp * alltseg);
1856     + bt->sample_point = ((tseg1 + 1) * 1000) / alltseg;
1857     +
1858     + return 0;
1859     +}
1860     +
1861     +/* Checks the validity of predefined bitrate settings */
1862     +static int
1863     +can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
1864     + const u32 *bitrate_const,
1865     + const unsigned int bitrate_const_cnt)
1866     +{
1867     + struct can_priv *priv = netdev_priv(dev);
1868     + unsigned int i;
1869     +
1870     + for (i = 0; i < bitrate_const_cnt; i++) {
1871     + if (bt->bitrate == bitrate_const[i])
1872     + break;
1873     + }
1874     +
1875     + if (i >= priv->bitrate_const_cnt)
1876     + return -EINVAL;
1877     +
1878     + return 0;
1879     +}
1880     +
1881     +static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
1882     + const struct can_bittiming_const *btc,
1883     + const u32 *bitrate_const,
1884     + const unsigned int bitrate_const_cnt)
1885     +{
1886     + int err;
1887     +
1888     + /* Depending on the given can_bittiming parameter structure the CAN
1889     + * timing parameters are calculated based on the provided bitrate OR
1890     + * alternatively the CAN timing parameters (tq, prop_seg, etc.) are
1891     + * provided directly which are then checked and fixed up.
1892     + */
1893     + if (!bt->tq && bt->bitrate && btc)
1894     + err = can_calc_bittiming(dev, bt, btc);
1895     + else if (bt->tq && !bt->bitrate && btc)
1896     + err = can_fixup_bittiming(dev, bt, btc);
1897     + else if (!bt->tq && bt->bitrate && bitrate_const)
1898     + err = can_validate_bitrate(dev, bt, bitrate_const,
1899     + bitrate_const_cnt);
1900     + else
1901     + err = -EINVAL;
1902     +
1903     + return err;
1904     +}
1905     +
1906     +static void can_update_state_error_stats(struct net_device *dev,
1907     + enum can_state new_state)
1908     +{
1909     + struct can_priv *priv = netdev_priv(dev);
1910     +
1911     + if (new_state <= priv->state)
1912     + return;
1913     +
1914     + switch (new_state) {
1915     + case CAN_STATE_ERROR_WARNING:
1916     + priv->can_stats.error_warning++;
1917     + break;
1918     + case CAN_STATE_ERROR_PASSIVE:
1919     + priv->can_stats.error_passive++;
1920     + break;
1921     + case CAN_STATE_BUS_OFF:
1922     + priv->can_stats.bus_off++;
1923     + break;
1924     + default:
1925     + break;
1926     + }
1927     +}
1928     +
1929     +static int can_tx_state_to_frame(struct net_device *dev, enum can_state state)
1930     +{
1931     + switch (state) {
1932     + case CAN_STATE_ERROR_ACTIVE:
1933     + return CAN_ERR_CRTL_ACTIVE;
1934     + case CAN_STATE_ERROR_WARNING:
1935     + return CAN_ERR_CRTL_TX_WARNING;
1936     + case CAN_STATE_ERROR_PASSIVE:
1937     + return CAN_ERR_CRTL_TX_PASSIVE;
1938     + default:
1939     + return 0;
1940     + }
1941     +}
1942     +
1943     +static int can_rx_state_to_frame(struct net_device *dev, enum can_state state)
1944     +{
1945     + switch (state) {
1946     + case CAN_STATE_ERROR_ACTIVE:
1947     + return CAN_ERR_CRTL_ACTIVE;
1948     + case CAN_STATE_ERROR_WARNING:
1949     + return CAN_ERR_CRTL_RX_WARNING;
1950     + case CAN_STATE_ERROR_PASSIVE:
1951     + return CAN_ERR_CRTL_RX_PASSIVE;
1952     + default:
1953     + return 0;
1954     + }
1955     +}
1956     +
1957     +void can_change_state(struct net_device *dev, struct can_frame *cf,
1958     + enum can_state tx_state, enum can_state rx_state)
1959     +{
1960     + struct can_priv *priv = netdev_priv(dev);
1961     + enum can_state new_state = max(tx_state, rx_state);
1962     +
1963     + if (unlikely(new_state == priv->state)) {
1964     + netdev_warn(dev, "%s: oops, state did not change", __func__);
1965     + return;
1966     + }
1967     +
1968     + netdev_dbg(dev, "New error state: %d\n", new_state);
1969     +
1970     + can_update_state_error_stats(dev, new_state);
1971     + priv->state = new_state;
1972     +
1973     + if (!cf)
1974     + return;
1975     +
1976     + if (unlikely(new_state == CAN_STATE_BUS_OFF)) {
1977     + cf->can_id |= CAN_ERR_BUSOFF;
1978     + return;
1979     + }
1980     +
1981     + cf->can_id |= CAN_ERR_CRTL;
1982     + cf->data[1] |= tx_state >= rx_state ?
1983     + can_tx_state_to_frame(dev, tx_state) : 0;
1984     + cf->data[1] |= tx_state <= rx_state ?
1985     + can_rx_state_to_frame(dev, rx_state) : 0;
1986     +}
1987     +EXPORT_SYMBOL_GPL(can_change_state);
1988     +
1989     +/* Local echo of CAN messages
1990     + *
1991     + * CAN network devices *should* support a local echo functionality
1992     + * (see Documentation/networking/can.rst). To test the handling of CAN
1993     + * interfaces that do not support the local echo both driver types are
1994     + * implemented. In the case that the driver does not support the echo
1995     + * the IFF_ECHO remains clear in dev->flags. This causes the PF_CAN core
1996     + * to perform the echo as a fallback solution.
1997     + */
1998     +static void can_flush_echo_skb(struct net_device *dev)
1999     +{
2000     + struct can_priv *priv = netdev_priv(dev);
2001     + struct net_device_stats *stats = &dev->stats;
2002     + int i;
2003     +
2004     + for (i = 0; i < priv->echo_skb_max; i++) {
2005     + if (priv->echo_skb[i]) {
2006     + kfree_skb(priv->echo_skb[i]);
2007     + priv->echo_skb[i] = NULL;
2008     + stats->tx_dropped++;
2009     + stats->tx_aborted_errors++;
2010     + }
2011     + }
2012     +}
2013     +
2014     +/* Put the skb on the stack to be looped backed locally lateron
2015     + *
2016     + * The function is typically called in the start_xmit function
2017     + * of the device driver. The driver must protect access to
2018     + * priv->echo_skb, if necessary.
2019     + */
2020     +void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
2021     + unsigned int idx)
2022     +{
2023     + struct can_priv *priv = netdev_priv(dev);
2024     +
2025     + BUG_ON(idx >= priv->echo_skb_max);
2026     +
2027     + /* check flag whether this packet has to be looped back */
2028     + if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK ||
2029     + (skb->protocol != htons(ETH_P_CAN) &&
2030     + skb->protocol != htons(ETH_P_CANFD))) {
2031     + kfree_skb(skb);
2032     + return;
2033     + }
2034     +
2035     + if (!priv->echo_skb[idx]) {
2036     + skb = can_create_echo_skb(skb);
2037     + if (!skb)
2038     + return;
2039     +
2040     + /* make settings for echo to reduce code in irq context */
2041     + skb->pkt_type = PACKET_BROADCAST;
2042     + skb->ip_summed = CHECKSUM_UNNECESSARY;
2043     + skb->dev = dev;
2044     +
2045     + /* save this skb for tx interrupt echo handling */
2046     + priv->echo_skb[idx] = skb;
2047     + } else {
2048     + /* locking problem with netif_stop_queue() ?? */
2049     + netdev_err(dev, "%s: BUG! echo_skb is occupied!\n", __func__);
2050     + kfree_skb(skb);
2051     + }
2052     +}
2053     +EXPORT_SYMBOL_GPL(can_put_echo_skb);
2054     +
2055     +struct sk_buff *
2056     +__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
2057     +{
2058     + struct can_priv *priv = netdev_priv(dev);
2059     +
2060     + if (idx >= priv->echo_skb_max) {
2061     + netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
2062     + __func__, idx, priv->echo_skb_max);
2063     + return NULL;
2064     + }
2065     +
2066     + if (priv->echo_skb[idx]) {
2067     + /* Using "struct canfd_frame::len" for the frame
2068     + * length is supported on both CAN and CANFD frames.
2069     + */
2070     + struct sk_buff *skb = priv->echo_skb[idx];
2071     + struct canfd_frame *cf = (struct canfd_frame *)skb->data;
2072     +
2073     + /* get the real payload length for netdev statistics */
2074     + if (cf->can_id & CAN_RTR_FLAG)
2075     + *len_ptr = 0;
2076     + else
2077     + *len_ptr = cf->len;
2078     +
2079     + priv->echo_skb[idx] = NULL;
2080     +
2081     + return skb;
2082     + }
2083     +
2084     + return NULL;
2085     +}
2086     +
2087     +/* Get the skb from the stack and loop it back locally
2088     + *
2089     + * The function is typically called when the TX done interrupt
2090     + * is handled in the device driver. The driver must protect
2091     + * access to priv->echo_skb, if necessary.
2092     + */
2093     +unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx)
2094     +{
2095     + struct sk_buff *skb;
2096     + u8 len;
2097     +
2098     + skb = __can_get_echo_skb(dev, idx, &len);
2099     + if (!skb)
2100     + return 0;
2101     +
2102     + skb_get(skb);
2103     + if (netif_rx(skb) == NET_RX_SUCCESS)
2104     + dev_consume_skb_any(skb);
2105     + else
2106     + dev_kfree_skb_any(skb);
2107     +
2108     + return len;
2109     +}
2110     +EXPORT_SYMBOL_GPL(can_get_echo_skb);
2111     +
2112     +/* Remove the skb from the stack and free it.
2113     + *
2114     + * The function is typically called when TX failed.
2115     + */
2116     +void can_free_echo_skb(struct net_device *dev, unsigned int idx)
2117     +{
2118     + struct can_priv *priv = netdev_priv(dev);
2119     +
2120     + BUG_ON(idx >= priv->echo_skb_max);
2121     +
2122     + if (priv->echo_skb[idx]) {
2123     + dev_kfree_skb_any(priv->echo_skb[idx]);
2124     + priv->echo_skb[idx] = NULL;
2125     + }
2126     +}
2127     +EXPORT_SYMBOL_GPL(can_free_echo_skb);
2128     +
2129     +/* CAN device restart for bus-off recovery */
2130     +static void can_restart(struct net_device *dev)
2131     +{
2132     + struct can_priv *priv = netdev_priv(dev);
2133     + struct net_device_stats *stats = &dev->stats;
2134     + struct sk_buff *skb;
2135     + struct can_frame *cf;
2136     + int err;
2137     +
2138     + BUG_ON(netif_carrier_ok(dev));
2139     +
2140     + /* No synchronization needed because the device is bus-off and
2141     + * no messages can come in or go out.
2142     + */
2143     + can_flush_echo_skb(dev);
2144     +
2145     + /* send restart message upstream */
2146     + skb = alloc_can_err_skb(dev, &cf);
2147     + if (!skb) {
2148     + err = -ENOMEM;
2149     + goto restart;
2150     + }
2151     + cf->can_id |= CAN_ERR_RESTARTED;
2152     +
2153     + stats->rx_packets++;
2154     + stats->rx_bytes += cf->can_dlc;
2155     +
2156     + netif_rx_ni(skb);
2157     +
2158     +restart:
2159     + netdev_dbg(dev, "restarted\n");
2160     + priv->can_stats.restarts++;
2161     +
2162     + /* Now restart the device */
2163     + err = priv->do_set_mode(dev, CAN_MODE_START);
2164     +
2165     + netif_carrier_on(dev);
2166     + if (err)
2167     + netdev_err(dev, "Error %d during restart", err);
2168     +}
2169     +
2170     +static void can_restart_work(struct work_struct *work)
2171     +{
2172     + struct delayed_work *dwork = to_delayed_work(work);
2173     + struct can_priv *priv = container_of(dwork, struct can_priv,
2174     + restart_work);
2175     +
2176     + can_restart(priv->dev);
2177     +}
2178     +
2179     +int can_restart_now(struct net_device *dev)
2180     +{
2181     + struct can_priv *priv = netdev_priv(dev);
2182     +
2183     + /* A manual restart is only permitted if automatic restart is
2184     + * disabled and the device is in the bus-off state
2185     + */
2186     + if (priv->restart_ms)
2187     + return -EINVAL;
2188     + if (priv->state != CAN_STATE_BUS_OFF)
2189     + return -EBUSY;
2190     +
2191     + cancel_delayed_work_sync(&priv->restart_work);
2192     + can_restart(dev);
2193     +
2194     + return 0;
2195     +}
2196     +
2197     +/* CAN bus-off
2198     + *
2199     + * This functions should be called when the device goes bus-off to
2200     + * tell the netif layer that no more packets can be sent or received.
2201     + * If enabled, a timer is started to trigger bus-off recovery.
2202     + */
2203     +void can_bus_off(struct net_device *dev)
2204     +{
2205     + struct can_priv *priv = netdev_priv(dev);
2206     +
2207     + netdev_info(dev, "bus-off\n");
2208     +
2209     + netif_carrier_off(dev);
2210     +
2211     + if (priv->restart_ms)
2212     + schedule_delayed_work(&priv->restart_work,
2213     + msecs_to_jiffies(priv->restart_ms));
2214     +}
2215     +EXPORT_SYMBOL_GPL(can_bus_off);
2216     +
2217     +static void can_setup(struct net_device *dev)
2218     +{
2219     + dev->type = ARPHRD_CAN;
2220     + dev->mtu = CAN_MTU;
2221     + dev->hard_header_len = 0;
2222     + dev->addr_len = 0;
2223     + dev->tx_queue_len = 10;
2224     +
2225     + /* New-style flags. */
2226     + dev->flags = IFF_NOARP;
2227     + dev->features = NETIF_F_HW_CSUM;
2228     +}
2229     +
2230     +struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
2231     +{
2232     + struct sk_buff *skb;
2233     +
2234     + skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
2235     + sizeof(struct can_frame));
2236     + if (unlikely(!skb))
2237     + return NULL;
2238     +
2239     + skb->protocol = htons(ETH_P_CAN);
2240     + skb->pkt_type = PACKET_BROADCAST;
2241     + skb->ip_summed = CHECKSUM_UNNECESSARY;
2242     +
2243     + skb_reset_mac_header(skb);
2244     + skb_reset_network_header(skb);
2245     + skb_reset_transport_header(skb);
2246     +
2247     + can_skb_reserve(skb);
2248     + can_skb_prv(skb)->ifindex = dev->ifindex;
2249     + can_skb_prv(skb)->skbcnt = 0;
2250     +
2251     + *cf = skb_put_zero(skb, sizeof(struct can_frame));
2252     +
2253     + return skb;
2254     +}
2255     +EXPORT_SYMBOL_GPL(alloc_can_skb);
2256     +
2257     +struct sk_buff *alloc_canfd_skb(struct net_device *dev,
2258     + struct canfd_frame **cfd)
2259     +{
2260     + struct sk_buff *skb;
2261     +
2262     + skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
2263     + sizeof(struct canfd_frame));
2264     + if (unlikely(!skb))
2265     + return NULL;
2266     +
2267     + skb->protocol = htons(ETH_P_CANFD);
2268     + skb->pkt_type = PACKET_BROADCAST;
2269     + skb->ip_summed = CHECKSUM_UNNECESSARY;
2270     +
2271     + skb_reset_mac_header(skb);
2272     + skb_reset_network_header(skb);
2273     + skb_reset_transport_header(skb);
2274     +
2275     + can_skb_reserve(skb);
2276     + can_skb_prv(skb)->ifindex = dev->ifindex;
2277     + can_skb_prv(skb)->skbcnt = 0;
2278     +
2279     + *cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
2280     +
2281     + return skb;
2282     +}
2283     +EXPORT_SYMBOL_GPL(alloc_canfd_skb);
2284     +
2285     +struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
2286     +{
2287     + struct sk_buff *skb;
2288     +
2289     + skb = alloc_can_skb(dev, cf);
2290     + if (unlikely(!skb))
2291     + return NULL;
2292     +
2293     + (*cf)->can_id = CAN_ERR_FLAG;
2294     + (*cf)->can_dlc = CAN_ERR_DLC;
2295     +
2296     + return skb;
2297     +}
2298     +EXPORT_SYMBOL_GPL(alloc_can_err_skb);
2299     +
2300     +/* Allocate and setup space for the CAN network device */
2301     +struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
2302     + unsigned int txqs, unsigned int rxqs)
2303     +{
2304     + struct can_ml_priv *can_ml;
2305     + struct net_device *dev;
2306     + struct can_priv *priv;
2307     + int size;
2308     +
2309     + /* We put the driver's priv, the CAN mid layer priv and the
2310     + * echo skb into the netdevice's priv. The memory layout for
2311     + * the netdev_priv is like this:
2312     + *
2313     + * +-------------------------+
2314     + * | driver's priv |
2315     + * +-------------------------+
2316     + * | struct can_ml_priv |
2317     + * +-------------------------+
2318     + * | array of struct sk_buff |
2319     + * +-------------------------+
2320     + */
2321     +
2322     + size = ALIGN(sizeof_priv, NETDEV_ALIGN) + sizeof(struct can_ml_priv);
2323     +
2324     + if (echo_skb_max)
2325     + size = ALIGN(size, sizeof(struct sk_buff *)) +
2326     + echo_skb_max * sizeof(struct sk_buff *);
2327     +
2328     + dev = alloc_netdev_mqs(size, "can%d", NET_NAME_UNKNOWN, can_setup,
2329     + txqs, rxqs);
2330     + if (!dev)
2331     + return NULL;
2332     +
2333     + priv = netdev_priv(dev);
2334     + priv->dev = dev;
2335     +
2336     + can_ml = (void *)priv + ALIGN(sizeof_priv, NETDEV_ALIGN);
2337     + can_set_ml_priv(dev, can_ml);
2338     +
2339     + if (echo_skb_max) {
2340     + priv->echo_skb_max = echo_skb_max;
2341     + priv->echo_skb = (void *)priv +
2342     + (size - echo_skb_max * sizeof(struct sk_buff *));
2343     + }
2344     +
2345     + priv->state = CAN_STATE_STOPPED;
2346     +
2347     + INIT_DELAYED_WORK(&priv->restart_work, can_restart_work);
2348     +
2349     + return dev;
2350     +}
2351     +EXPORT_SYMBOL_GPL(alloc_candev_mqs);
2352     +
2353     +/* Free space of the CAN network device */
2354     +void free_candev(struct net_device *dev)
2355     +{
2356     + free_netdev(dev);
2357     +}
2358     +EXPORT_SYMBOL_GPL(free_candev);
2359     +
2360     +/* changing MTU and control mode for CAN/CANFD devices */
2361     +int can_change_mtu(struct net_device *dev, int new_mtu)
2362     +{
2363     + struct can_priv *priv = netdev_priv(dev);
2364     +
2365     + /* Do not allow changing the MTU while running */
2366     + if (dev->flags & IFF_UP)
2367     + return -EBUSY;
2368     +
2369     + /* allow change of MTU according to the CANFD ability of the device */
2370     + switch (new_mtu) {
2371     + case CAN_MTU:
2372     + /* 'CANFD-only' controllers can not switch to CAN_MTU */
2373     + if (priv->ctrlmode_static & CAN_CTRLMODE_FD)
2374     + return -EINVAL;
2375     +
2376     + priv->ctrlmode &= ~CAN_CTRLMODE_FD;
2377     + break;
2378     +
2379     + case CANFD_MTU:
2380     + /* check for potential CANFD ability */
2381     + if (!(priv->ctrlmode_supported & CAN_CTRLMODE_FD) &&
2382     + !(priv->ctrlmode_static & CAN_CTRLMODE_FD))
2383     + return -EINVAL;
2384     +
2385     + priv->ctrlmode |= CAN_CTRLMODE_FD;
2386     + break;
2387     +
2388     + default:
2389     + return -EINVAL;
2390     + }
2391     +
2392     + dev->mtu = new_mtu;
2393     + return 0;
2394     +}
2395     +EXPORT_SYMBOL_GPL(can_change_mtu);
2396     +
2397     +/* Common open function when the device gets opened.
2398     + *
2399     + * This function should be called in the open function of the device
2400     + * driver.
2401     + */
2402     +int open_candev(struct net_device *dev)
2403     +{
2404     + struct can_priv *priv = netdev_priv(dev);
2405     +
2406     + if (!priv->bittiming.bitrate) {
2407     + netdev_err(dev, "bit-timing not yet defined\n");
2408     + return -EINVAL;
2409     + }
2410     +
2411     + /* For CAN FD the data bitrate has to be >= the arbitration bitrate */
2412     + if ((priv->ctrlmode & CAN_CTRLMODE_FD) &&
2413     + (!priv->data_bittiming.bitrate ||
2414     + priv->data_bittiming.bitrate < priv->bittiming.bitrate)) {
2415     + netdev_err(dev, "incorrect/missing data bit-timing\n");
2416     + return -EINVAL;
2417     + }
2418     +
2419     + /* Switch carrier on if device was stopped while in bus-off state */
2420     + if (!netif_carrier_ok(dev))
2421     + netif_carrier_on(dev);
2422     +
2423     + return 0;
2424     +}
2425     +EXPORT_SYMBOL_GPL(open_candev);
2426     +
2427     +#ifdef CONFIG_OF
2428     +/* Common function that can be used to understand the limitation of
2429     + * a transceiver when it provides no means to determine these limitations
2430     + * at runtime.
2431     + */
2432     +void of_can_transceiver(struct net_device *dev)
2433     +{
2434     + struct device_node *dn;
2435     + struct can_priv *priv = netdev_priv(dev);
2436     + struct device_node *np = dev->dev.parent->of_node;
2437     + int ret;
2438     +
2439     + dn = of_get_child_by_name(np, "can-transceiver");
2440     + if (!dn)
2441     + return;
2442     +
2443     + ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
2444     + of_node_put(dn);
2445     + if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
2446     + netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
2447     +}
2448     +EXPORT_SYMBOL_GPL(of_can_transceiver);
2449     +#endif
2450     +
2451     +/* Common close function for cleanup before the device gets closed.
2452     + *
2453     + * This function should be called in the close function of the device
2454     + * driver.
2455     + */
2456     +void close_candev(struct net_device *dev)
2457     +{
2458     + struct can_priv *priv = netdev_priv(dev);
2459     +
2460     + cancel_delayed_work_sync(&priv->restart_work);
2461     + can_flush_echo_skb(dev);
2462     +}
2463     +EXPORT_SYMBOL_GPL(close_candev);
2464     +
2465     +/* CAN netlink interface */
2466     +static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
2467     + [IFLA_CAN_STATE] = { .type = NLA_U32 },
2468     + [IFLA_CAN_CTRLMODE] = { .len = sizeof(struct can_ctrlmode) },
2469     + [IFLA_CAN_RESTART_MS] = { .type = NLA_U32 },
2470     + [IFLA_CAN_RESTART] = { .type = NLA_U32 },
2471     + [IFLA_CAN_BITTIMING] = { .len = sizeof(struct can_bittiming) },
2472     + [IFLA_CAN_BITTIMING_CONST]
2473     + = { .len = sizeof(struct can_bittiming_const) },
2474     + [IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
2475     + [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
2476     + [IFLA_CAN_DATA_BITTIMING]
2477     + = { .len = sizeof(struct can_bittiming) },
2478     + [IFLA_CAN_DATA_BITTIMING_CONST]
2479     + = { .len = sizeof(struct can_bittiming_const) },
2480     + [IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
2481     +};
2482     +
2483     +static int can_validate(struct nlattr *tb[], struct nlattr *data[],
2484     + struct netlink_ext_ack *extack)
2485     +{
2486     + bool is_can_fd = false;
2487     +
2488     + /* Make sure that valid CAN FD configurations always consist of
2489     + * - nominal/arbitration bittiming
2490     + * - data bittiming
2491     + * - control mode with CAN_CTRLMODE_FD set
2492     + */
2493     +
2494     + if (!data)
2495     + return 0;
2496     +
2497     + if (data[IFLA_CAN_CTRLMODE]) {
2498     + struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
2499     +
2500     + is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
2501     + }
2502     +
2503     + if (is_can_fd) {
2504     + if (!data[IFLA_CAN_BITTIMING] || !data[IFLA_CAN_DATA_BITTIMING])
2505     + return -EOPNOTSUPP;
2506     + }
2507     +
2508     + if (data[IFLA_CAN_DATA_BITTIMING]) {
2509     + if (!is_can_fd || !data[IFLA_CAN_BITTIMING])
2510     + return -EOPNOTSUPP;
2511     + }
2512     +
2513     + return 0;
2514     +}
2515     +
2516     +static int can_changelink(struct net_device *dev, struct nlattr *tb[],
2517     + struct nlattr *data[],
2518     + struct netlink_ext_ack *extack)
2519     +{
2520     + struct can_priv *priv = netdev_priv(dev);
2521     + int err;
2522     +
2523     + /* We need synchronization with dev->stop() */
2524     + ASSERT_RTNL();
2525     +
2526     + if (data[IFLA_CAN_BITTIMING]) {
2527     + struct can_bittiming bt;
2528     +
2529     + /* Do not allow changing bittiming while running */
2530     + if (dev->flags & IFF_UP)
2531     + return -EBUSY;
2532     +
2533     + /* Calculate bittiming parameters based on
2534     + * bittiming_const if set, otherwise pass bitrate
2535     + * directly via do_set_bitrate(). Bail out if neither
2536     + * is given.
2537     + */
2538     + if (!priv->bittiming_const && !priv->do_set_bittiming)
2539     + return -EOPNOTSUPP;
2540     +
2541     + memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
2542     + err = can_get_bittiming(dev, &bt,
2543     + priv->bittiming_const,
2544     + priv->bitrate_const,
2545     + priv->bitrate_const_cnt);
2546     + if (err)
2547     + return err;
2548     +
2549     + if (priv->bitrate_max && bt.bitrate > priv->bitrate_max) {
2550     + netdev_err(dev, "arbitration bitrate surpasses transceiver capabilities of %d bps\n",
2551     + priv->bitrate_max);
2552     + return -EINVAL;
2553     + }
2554     +
2555     + memcpy(&priv->bittiming, &bt, sizeof(bt));
2556     +
2557     + if (priv->do_set_bittiming) {
2558     + /* Finally, set the bit-timing registers */
2559     + err = priv->do_set_bittiming(dev);
2560     + if (err)
2561     + return err;
2562     + }
2563     + }
2564     +
2565     + if (data[IFLA_CAN_CTRLMODE]) {
2566     + struct can_ctrlmode *cm;
2567     + u32 ctrlstatic;
2568     + u32 maskedflags;
2569     +
2570     + /* Do not allow changing controller mode while running */
2571     + if (dev->flags & IFF_UP)
2572     + return -EBUSY;
2573     + cm = nla_data(data[IFLA_CAN_CTRLMODE]);
2574     + ctrlstatic = priv->ctrlmode_static;
2575     + maskedflags = cm->flags & cm->mask;
2576     +
2577     + /* check whether provided bits are allowed to be passed */
2578     + if (cm->mask & ~(priv->ctrlmode_supported | ctrlstatic))
2579     + return -EOPNOTSUPP;
2580     +
2581     + /* do not check for static fd-non-iso if 'fd' is disabled */
2582     + if (!(maskedflags & CAN_CTRLMODE_FD))
2583     + ctrlstatic &= ~CAN_CTRLMODE_FD_NON_ISO;
2584     +
2585     + /* make sure static options are provided by configuration */
2586     + if ((maskedflags & ctrlstatic) != ctrlstatic)
2587     + return -EOPNOTSUPP;
2588     +
2589     + /* clear bits to be modified and copy the flag values */
2590     + priv->ctrlmode &= ~cm->mask;
2591     + priv->ctrlmode |= maskedflags;
2592     +
2593     + /* CAN_CTRLMODE_FD can only be set when driver supports FD */
2594     + if (priv->ctrlmode & CAN_CTRLMODE_FD)
2595     + dev->mtu = CANFD_MTU;
2596     + else
2597     + dev->mtu = CAN_MTU;
2598     + }
2599     +
2600     + if (data[IFLA_CAN_RESTART_MS]) {
2601     + /* Do not allow changing restart delay while running */
2602     + if (dev->flags & IFF_UP)
2603     + return -EBUSY;
2604     + priv->restart_ms = nla_get_u32(data[IFLA_CAN_RESTART_MS]);
2605     + }
2606     +
2607     + if (data[IFLA_CAN_RESTART]) {
2608     + /* Do not allow a restart while not running */
2609     + if (!(dev->flags & IFF_UP))
2610     + return -EINVAL;
2611     + err = can_restart_now(dev);
2612     + if (err)
2613     + return err;
2614     + }
2615     +
2616     + if (data[IFLA_CAN_DATA_BITTIMING]) {
2617     + struct can_bittiming dbt;
2618     +
2619     + /* Do not allow changing bittiming while running */
2620     + if (dev->flags & IFF_UP)
2621     + return -EBUSY;
2622     +
2623     + /* Calculate bittiming parameters based on
2624     + * data_bittiming_const if set, otherwise pass bitrate
2625     + * directly via do_set_bitrate(). Bail out if neither
2626     + * is given.
2627     + */
2628     + if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
2629     + return -EOPNOTSUPP;
2630     +
2631     + memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
2632     + sizeof(dbt));
2633     + err = can_get_bittiming(dev, &dbt,
2634     + priv->data_bittiming_const,
2635     + priv->data_bitrate_const,
2636     + priv->data_bitrate_const_cnt);
2637     + if (err)
2638     + return err;
2639     +
2640     + if (priv->bitrate_max && dbt.bitrate > priv->bitrate_max) {
2641     + netdev_err(dev, "canfd data bitrate surpasses transceiver capabilities of %d bps\n",
2642     + priv->bitrate_max);
2643     + return -EINVAL;
2644     + }
2645     +
2646     + memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
2647     +
2648     + if (priv->do_set_data_bittiming) {
2649     + /* Finally, set the bit-timing registers */
2650     + err = priv->do_set_data_bittiming(dev);
2651     + if (err)
2652     + return err;
2653     + }
2654     + }
2655     +
2656     + if (data[IFLA_CAN_TERMINATION]) {
2657     + const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
2658     + const unsigned int num_term = priv->termination_const_cnt;
2659     + unsigned int i;
2660     +
2661     + if (!priv->do_set_termination)
2662     + return -EOPNOTSUPP;
2663     +
2664     + /* check whether given value is supported by the interface */
2665     + for (i = 0; i < num_term; i++) {
2666     + if (termval == priv->termination_const[i])
2667     + break;
2668     + }
2669     + if (i >= num_term)
2670     + return -EINVAL;
2671     +
2672     + /* Finally, set the termination value */
2673     + err = priv->do_set_termination(dev, termval);
2674     + if (err)
2675     + return err;
2676     +
2677     + priv->termination = termval;
2678     + }
2679     +
2680     + return 0;
2681     +}
2682     +
2683     +static size_t can_get_size(const struct net_device *dev)
2684     +{
2685     + struct can_priv *priv = netdev_priv(dev);
2686     + size_t size = 0;
2687     +
2688     + if (priv->bittiming.bitrate) /* IFLA_CAN_BITTIMING */
2689     + size += nla_total_size(sizeof(struct can_bittiming));
2690     + if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
2691     + size += nla_total_size(sizeof(struct can_bittiming_const));
2692     + size += nla_total_size(sizeof(struct can_clock)); /* IFLA_CAN_CLOCK */
2693     + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_STATE */
2694     + size += nla_total_size(sizeof(struct can_ctrlmode)); /* IFLA_CAN_CTRLMODE */
2695     + size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
2696     + if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
2697     + size += nla_total_size(sizeof(struct can_berr_counter));
2698     + if (priv->data_bittiming.bitrate) /* IFLA_CAN_DATA_BITTIMING */
2699     + size += nla_total_size(sizeof(struct can_bittiming));
2700     + if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
2701     + size += nla_total_size(sizeof(struct can_bittiming_const));
2702     + if (priv->termination_const) {
2703     + size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
2704     + size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */
2705     + priv->termination_const_cnt);
2706     + }
2707     + if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
2708     + size += nla_total_size(sizeof(*priv->bitrate_const) *
2709     + priv->bitrate_const_cnt);
2710     + if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
2711     + size += nla_total_size(sizeof(*priv->data_bitrate_const) *
2712     + priv->data_bitrate_const_cnt);
2713     + size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
2714     +
2715     + return size;
2716     +}
2717     +
2718     +static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
2719     +{
2720     + struct can_priv *priv = netdev_priv(dev);
2721     + struct can_ctrlmode cm = {.flags = priv->ctrlmode};
2722     + struct can_berr_counter bec = { };
2723     + enum can_state state = priv->state;
2724     +
2725     + if (priv->do_get_state)
2726     + priv->do_get_state(dev, &state);
2727     +
2728     + if ((priv->bittiming.bitrate &&
2729     + nla_put(skb, IFLA_CAN_BITTIMING,
2730     + sizeof(priv->bittiming), &priv->bittiming)) ||
2731     +
2732     + (priv->bittiming_const &&
2733     + nla_put(skb, IFLA_CAN_BITTIMING_CONST,
2734     + sizeof(*priv->bittiming_const), priv->bittiming_const)) ||
2735     +
2736     + nla_put(skb, IFLA_CAN_CLOCK, sizeof(priv->clock), &priv->clock) ||
2737     + nla_put_u32(skb, IFLA_CAN_STATE, state) ||
2738     + nla_put(skb, IFLA_CAN_CTRLMODE, sizeof(cm), &cm) ||
2739     + nla_put_u32(skb, IFLA_CAN_RESTART_MS, priv->restart_ms) ||
2740     +
2741     + (priv->do_get_berr_counter &&
2742     + !priv->do_get_berr_counter(dev, &bec) &&
2743     + nla_put(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec)) ||
2744     +
2745     + (priv->data_bittiming.bitrate &&
2746     + nla_put(skb, IFLA_CAN_DATA_BITTIMING,
2747     + sizeof(priv->data_bittiming), &priv->data_bittiming)) ||
2748     +
2749     + (priv->data_bittiming_const &&
2750     + nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
2751     + sizeof(*priv->data_bittiming_const),
2752     + priv->data_bittiming_const)) ||
2753     +
2754     + (priv->termination_const &&
2755     + (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
2756     + nla_put(skb, IFLA_CAN_TERMINATION_CONST,
2757     + sizeof(*priv->termination_const) *
2758     + priv->termination_const_cnt,
2759     + priv->termination_const))) ||
2760     +
2761     + (priv->bitrate_const &&
2762     + nla_put(skb, IFLA_CAN_BITRATE_CONST,
2763     + sizeof(*priv->bitrate_const) *
2764     + priv->bitrate_const_cnt,
2765     + priv->bitrate_const)) ||
2766     +
2767     + (priv->data_bitrate_const &&
2768     + nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
2769     + sizeof(*priv->data_bitrate_const) *
2770     + priv->data_bitrate_const_cnt,
2771     + priv->data_bitrate_const)) ||
2772     +
2773     + (nla_put(skb, IFLA_CAN_BITRATE_MAX,
2774     + sizeof(priv->bitrate_max),
2775     + &priv->bitrate_max))
2776     + )
2777     +
2778     + return -EMSGSIZE;
2779     +
2780     + return 0;
2781     +}
2782     +
2783     +static size_t can_get_xstats_size(const struct net_device *dev)
2784     +{
2785     + return sizeof(struct can_device_stats);
2786     +}
2787     +
2788     +static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
2789     +{
2790     + struct can_priv *priv = netdev_priv(dev);
2791     +
2792     + if (nla_put(skb, IFLA_INFO_XSTATS,
2793     + sizeof(priv->can_stats), &priv->can_stats))
2794     + goto nla_put_failure;
2795     + return 0;
2796     +
2797     +nla_put_failure:
2798     + return -EMSGSIZE;
2799     +}
2800     +
2801     +static int can_newlink(struct net *src_net, struct net_device *dev,
2802     + struct nlattr *tb[], struct nlattr *data[],
2803     + struct netlink_ext_ack *extack)
2804     +{
2805     + return -EOPNOTSUPP;
2806     +}
2807     +
2808     +static void can_dellink(struct net_device *dev, struct list_head *head)
2809     +{
2810     +}
2811     +
2812     +static struct rtnl_link_ops can_link_ops __read_mostly = {
2813     + .kind = "can",
2814     + .netns_refund = true,
2815     + .maxtype = IFLA_CAN_MAX,
2816     + .policy = can_policy,
2817     + .setup = can_setup,
2818     + .validate = can_validate,
2819     + .newlink = can_newlink,
2820     + .changelink = can_changelink,
2821     + .dellink = can_dellink,
2822     + .get_size = can_get_size,
2823     + .fill_info = can_fill_info,
2824     + .get_xstats_size = can_get_xstats_size,
2825     + .fill_xstats = can_fill_xstats,
2826     +};
2827     +
2828     +/* Register the CAN network device */
2829     +int register_candev(struct net_device *dev)
2830     +{
2831     + struct can_priv *priv = netdev_priv(dev);
2832     +
2833     + /* Ensure termination_const, termination_const_cnt and
2834     + * do_set_termination consistency. All must be either set or
2835     + * unset.
2836     + */
2837     + if ((!priv->termination_const != !priv->termination_const_cnt) ||
2838     + (!priv->termination_const != !priv->do_set_termination))
2839     + return -EINVAL;
2840     +
2841     + if (!priv->bitrate_const != !priv->bitrate_const_cnt)
2842     + return -EINVAL;
2843     +
2844     + if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
2845     + return -EINVAL;
2846     +
2847     + dev->rtnl_link_ops = &can_link_ops;
2848     + netif_carrier_off(dev);
2849     +
2850     + return register_netdev(dev);
2851     +}
2852     +EXPORT_SYMBOL_GPL(register_candev);
2853     +
2854     +/* Unregister the CAN network device */
2855     +void unregister_candev(struct net_device *dev)
2856     +{
2857     + unregister_netdev(dev);
2858     +}
2859     +EXPORT_SYMBOL_GPL(unregister_candev);
2860     +
2861     +/* Test if a network device is a candev based device
2862     + * and return the can_priv* if so.
2863     + */
2864     +struct can_priv *safe_candev_priv(struct net_device *dev)
2865     +{
2866     + if (dev->type != ARPHRD_CAN || dev->rtnl_link_ops != &can_link_ops)
2867     + return NULL;
2868     +
2869     + return netdev_priv(dev);
2870     +}
2871     +EXPORT_SYMBOL_GPL(safe_candev_priv);
2872     +
2873     +static __init int can_dev_init(void)
2874     +{
2875     + int err;
2876     +
2877     + can_led_notifier_init();
2878     +
2879     + err = rtnl_link_register(&can_link_ops);
2880     + if (!err)
2881     + pr_info(MOD_DESC "\n");
2882     +
2883     + return err;
2884     +}
2885     +module_init(can_dev_init);
2886     +
2887     +static __exit void can_dev_exit(void)
2888     +{
2889     + rtnl_link_unregister(&can_link_ops);
2890     +
2891     + can_led_notifier_exit();
2892     +}
2893     +module_exit(can_dev_exit);
2894     +
2895     +MODULE_ALIAS_RTNL_LINK("can");
2896     diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c
2897     new file mode 100644
2898     index 0000000000000..7e75a87a8a6a9
2899     --- /dev/null
2900     +++ b/drivers/net/can/dev/rx-offload.c
2901     @@ -0,0 +1,395 @@
2902     +// SPDX-License-Identifier: GPL-2.0-only
2903     +/*
2904     + * Copyright (c) 2014 David Jander, Protonic Holland
2905     + * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
2906     + */
2907     +
2908     +#include <linux/can/dev.h>
2909     +#include <linux/can/rx-offload.h>
2910     +
2911     +struct can_rx_offload_cb {
2912     + u32 timestamp;
2913     +};
2914     +
2915     +static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb)
2916     +{
2917     + BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
2918     +
2919     + return (struct can_rx_offload_cb *)skb->cb;
2920     +}
2921     +
2922     +static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b)
2923     +{
2924     + if (offload->inc)
2925     + return a <= b;
2926     + else
2927     + return a >= b;
2928     +}
2929     +
2930     +static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
2931     +{
2932     + if (offload->inc)
2933     + return (*val)++;
2934     + else
2935     + return (*val)--;
2936     +}
2937     +
2938     +static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
2939     +{
2940     + struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi);
2941     + struct net_device *dev = offload->dev;
2942     + struct net_device_stats *stats = &dev->stats;
2943     + struct sk_buff *skb;
2944     + int work_done = 0;
2945     +
2946     + while ((work_done < quota) &&
2947     + (skb = skb_dequeue(&offload->skb_queue))) {
2948     + struct can_frame *cf = (struct can_frame *)skb->data;
2949     +
2950     + work_done++;
2951     + stats->rx_packets++;
2952     + stats->rx_bytes += cf->can_dlc;
2953     + netif_receive_skb(skb);
2954     + }
2955     +
2956     + if (work_done < quota) {
2957     + napi_complete_done(napi, work_done);
2958     +
2959     + /* Check if there was another interrupt */
2960     + if (!skb_queue_empty(&offload->skb_queue))
2961     + napi_reschedule(&offload->napi);
2962     + }
2963     +
2964     + can_led_event(offload->dev, CAN_LED_EVENT_RX);
2965     +
2966     + return work_done;
2967     +}
2968     +
2969     +static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
2970     + int (*compare)(struct sk_buff *a, struct sk_buff *b))
2971     +{
2972     + struct sk_buff *pos, *insert = NULL;
2973     +
2974     + skb_queue_reverse_walk(head, pos) {
2975     + const struct can_rx_offload_cb *cb_pos, *cb_new;
2976     +
2977     + cb_pos = can_rx_offload_get_cb(pos);
2978     + cb_new = can_rx_offload_get_cb(new);
2979     +
2980     + netdev_dbg(new->dev,
2981     + "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
2982     + __func__,
2983     + cb_pos->timestamp, cb_new->timestamp,
2984     + cb_new->timestamp - cb_pos->timestamp,
2985     + skb_queue_len(head));
2986     +
2987     + if (compare(pos, new) < 0)
2988     + continue;
2989     + insert = pos;
2990     + break;
2991     + }
2992     + if (!insert)
2993     + __skb_queue_head(head, new);
2994     + else
2995     + __skb_queue_after(head, insert, new);
2996     +}
2997     +
2998     +static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
2999     +{
3000     + const struct can_rx_offload_cb *cb_a, *cb_b;
3001     +
3002     + cb_a = can_rx_offload_get_cb(a);
3003     + cb_b = can_rx_offload_get_cb(b);
3004     +
3005     + /* Substract two u32 and return result as int, to keep
3006     + * difference steady around the u32 overflow.
3007     + */
3008     + return cb_b->timestamp - cb_a->timestamp;
3009     +}
3010     +
3011     +/**
3012     + * can_rx_offload_offload_one() - Read one CAN frame from HW
3013     + * @offload: pointer to rx_offload context
3014     + * @n: number of mailbox to read
3015     + *
3016     + * The task of this function is to read a CAN frame from mailbox @n
3017     + * from the device and return the mailbox's content as a struct
3018     + * sk_buff.
3019     + *
3020     + * If the struct can_rx_offload::skb_queue exceeds the maximal queue
3021     + * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
3022     + * allocated, the mailbox contents is discarded by reading it into an
3023     + * overflow buffer. This way the mailbox is marked as free by the
3024     + * driver.
3025     + *
3026     + * Return: A pointer to skb containing the CAN frame on success.
3027     + *
3028     + * NULL if the mailbox @n is empty.
3029     + *
3030     + * ERR_PTR() in case of an error
3031     + */
3032     +static struct sk_buff *
3033     +can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
3034     +{
3035     + struct sk_buff *skb = NULL, *skb_error = NULL;
3036     + struct can_rx_offload_cb *cb;
3037     + struct can_frame *cf;
3038     + int ret;
3039     +
3040     + if (likely(skb_queue_len(&offload->skb_queue) <
3041     + offload->skb_queue_len_max)) {
3042     + skb = alloc_can_skb(offload->dev, &cf);
3043     + if (unlikely(!skb))
3044     + skb_error = ERR_PTR(-ENOMEM); /* skb alloc failed */
3045     + } else {
3046     + skb_error = ERR_PTR(-ENOBUFS); /* skb_queue is full */
3047     + }
3048     +
3049     + /* If queue is full or skb not available, drop by reading into
3050     + * overflow buffer.
3051     + */
3052     + if (unlikely(skb_error)) {
3053     + struct can_frame cf_overflow;
3054     + u32 timestamp;
3055     +
3056     + ret = offload->mailbox_read(offload, &cf_overflow,
3057     + &timestamp, n);
3058     +
3059     + /* Mailbox was empty. */
3060     + if (unlikely(!ret))
3061     + return NULL;
3062     +
3063     + /* Mailbox has been read and we're dropping it or
3064     + * there was a problem reading the mailbox.
3065     + *
3066     + * Increment error counters in any case.
3067     + */
3068     + offload->dev->stats.rx_dropped++;
3069     + offload->dev->stats.rx_fifo_errors++;
3070     +
3071     + /* There was a problem reading the mailbox, propagate
3072     + * error value.
3073     + */
3074     + if (unlikely(ret < 0))
3075     + return ERR_PTR(ret);
3076     +
3077     + return skb_error;
3078     + }
3079     +
3080     + cb = can_rx_offload_get_cb(skb);
3081     + ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
3082     +
3083     + /* Mailbox was empty. */
3084     + if (unlikely(!ret)) {
3085     + kfree_skb(skb);
3086     + return NULL;
3087     + }
3088     +
3089     + /* There was a problem reading the mailbox, propagate error value. */
3090     + if (unlikely(ret < 0)) {
3091     + kfree_skb(skb);
3092     +
3093     + offload->dev->stats.rx_dropped++;
3094     + offload->dev->stats.rx_fifo_errors++;
3095     +
3096     + return ERR_PTR(ret);
3097     + }
3098     +
3099     + /* Mailbox was read. */
3100     + return skb;
3101     +}
3102     +
3103     +int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending)
3104     +{
3105     + struct sk_buff_head skb_queue;
3106     + unsigned int i;
3107     +
3108     + __skb_queue_head_init(&skb_queue);
3109     +
3110     + for (i = offload->mb_first;
3111     + can_rx_offload_le(offload, i, offload->mb_last);
3112     + can_rx_offload_inc(offload, &i)) {
3113     + struct sk_buff *skb;
3114     +
3115     + if (!(pending & BIT_ULL(i)))
3116     + continue;
3117     +
3118     + skb = can_rx_offload_offload_one(offload, i);
3119     + if (IS_ERR_OR_NULL(skb))
3120     + continue;
3121     +
3122     + __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
3123     + }
3124     +
3125     + if (!skb_queue_empty(&skb_queue)) {
3126     + unsigned long flags;
3127     + u32 queue_len;
3128     +
3129     + spin_lock_irqsave(&offload->skb_queue.lock, flags);
3130     + skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
3131     + spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
3132     +
3133     + if ((queue_len = skb_queue_len(&offload->skb_queue)) >
3134     + (offload->skb_queue_len_max / 8))
3135     + netdev_dbg(offload->dev, "%s: queue_len=%d\n",
3136     + __func__, queue_len);
3137     +
3138     + can_rx_offload_schedule(offload);
3139     + }
3140     +
3141     + return skb_queue_len(&skb_queue);
3142     +}
3143     +EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
3144     +
3145     +int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
3146     +{
3147     + struct sk_buff *skb;
3148     + int received = 0;
3149     +
3150     + while (1) {
3151     + skb = can_rx_offload_offload_one(offload, 0);
3152     + if (IS_ERR(skb))
3153     + continue;
3154     + if (!skb)
3155     + break;
3156     +
3157     + skb_queue_tail(&offload->skb_queue, skb);
3158     + received++;
3159     + }
3160     +
3161     + if (received)
3162     + can_rx_offload_schedule(offload);
3163     +
3164     + return received;
3165     +}
3166     +EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
3167     +
3168     +int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
3169     + struct sk_buff *skb, u32 timestamp)
3170     +{
3171     + struct can_rx_offload_cb *cb;
3172     + unsigned long flags;
3173     +
3174     + if (skb_queue_len(&offload->skb_queue) >
3175     + offload->skb_queue_len_max) {
3176     + dev_kfree_skb_any(skb);
3177     + return -ENOBUFS;
3178     + }
3179     +
3180     + cb = can_rx_offload_get_cb(skb);
3181     + cb->timestamp = timestamp;
3182     +
3183     + spin_lock_irqsave(&offload->skb_queue.lock, flags);
3184     + __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
3185     + spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
3186     +
3187     + can_rx_offload_schedule(offload);
3188     +
3189     + return 0;
3190     +}
3191     +EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
3192     +
3193     +unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
3194     + unsigned int idx, u32 timestamp)
3195     +{
3196     + struct net_device *dev = offload->dev;
3197     + struct net_device_stats *stats = &dev->stats;
3198     + struct sk_buff *skb;
3199     + u8 len;
3200     + int err;
3201     +
3202     + skb = __can_get_echo_skb(dev, idx, &len);
3203     + if (!skb)
3204     + return 0;
3205     +
3206     + err = can_rx_offload_queue_sorted(offload, skb, timestamp);
3207     + if (err) {
3208     + stats->rx_errors++;
3209     + stats->tx_fifo_errors++;
3210     + }
3211     +
3212     + return len;
3213     +}
3214     +EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
3215     +
3216     +int can_rx_offload_queue_tail(struct can_rx_offload *offload,
3217     + struct sk_buff *skb)
3218     +{
3219     + if (skb_queue_len(&offload->skb_queue) >
3220     + offload->skb_queue_len_max) {
3221     + dev_kfree_skb_any(skb);
3222     + return -ENOBUFS;
3223     + }
3224     +
3225     + skb_queue_tail(&offload->skb_queue, skb);
3226     + can_rx_offload_schedule(offload);
3227     +
3228     + return 0;
3229     +}
3230     +EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
3231     +
3232     +static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
3233     +{
3234     + offload->dev = dev;
3235     +
3236     + /* Limit queue len to 4x the weight (rounted to next power of two) */
3237     + offload->skb_queue_len_max = 2 << fls(weight);
3238     + offload->skb_queue_len_max *= 4;
3239     + skb_queue_head_init(&offload->skb_queue);
3240     +
3241     + can_rx_offload_reset(offload);
3242     + netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
3243     +
3244     + dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
3245     + __func__, offload->skb_queue_len_max);
3246     +
3247     + return 0;
3248     +}
3249     +
3250     +int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload)
3251     +{
3252     + unsigned int weight;
3253     +
3254     + if (offload->mb_first > BITS_PER_LONG_LONG ||
3255     + offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
3256     + return -EINVAL;
3257     +
3258     + if (offload->mb_first < offload->mb_last) {
3259     + offload->inc = true;
3260     + weight = offload->mb_last - offload->mb_first;
3261     + } else {
3262     + offload->inc = false;
3263     + weight = offload->mb_first - offload->mb_last;
3264     + }
3265     +
3266     + return can_rx_offload_init_queue(dev, offload, weight);
3267     +}
3268     +EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
3269     +
3270     +int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
3271     +{
3272     + if (!offload->mailbox_read)
3273     + return -EINVAL;
3274     +
3275     + return can_rx_offload_init_queue(dev, offload, weight);
3276     +}
3277     +EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
3278     +
3279     +void can_rx_offload_enable(struct can_rx_offload *offload)
3280     +{
3281     + can_rx_offload_reset(offload);
3282     + napi_enable(&offload->napi);
3283     +}
3284     +EXPORT_SYMBOL_GPL(can_rx_offload_enable);
3285     +
3286     +void can_rx_offload_del(struct can_rx_offload *offload)
3287     +{
3288     + netif_napi_del(&offload->napi);
3289     + skb_queue_purge(&offload->skb_queue);
3290     +}
3291     +EXPORT_SYMBOL_GPL(can_rx_offload_del);
3292     +
3293     +void can_rx_offload_reset(struct can_rx_offload *offload)
3294     +{
3295     +}
3296     +EXPORT_SYMBOL_GPL(can_rx_offload_reset);
3297     diff --git a/drivers/net/can/m_can/tcan4x5x.c b/drivers/net/can/m_can/tcan4x5x.c
3298     index 32cb479fe6ac8..0d66582bd3560 100644
3299     --- a/drivers/net/can/m_can/tcan4x5x.c
3300     +++ b/drivers/net/can/m_can/tcan4x5x.c
3301     @@ -88,7 +88,7 @@
3302    
3303     #define TCAN4X5X_MRAM_START 0x8000
3304     #define TCAN4X5X_MCAN_OFFSET 0x1000
3305     -#define TCAN4X5X_MAX_REGISTER 0x8fff
3306     +#define TCAN4X5X_MAX_REGISTER 0x8ffc
3307    
3308     #define TCAN4X5X_CLEAR_ALL_INT 0xffffffff
3309     #define TCAN4X5X_SET_ALL_INT 0xffffffff
3310     diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
3311     deleted file mode 100644
3312     index 7e75a87a8a6a9..0000000000000
3313     --- a/drivers/net/can/rx-offload.c
3314     +++ /dev/null
3315     @@ -1,395 +0,0 @@
3316     -// SPDX-License-Identifier: GPL-2.0-only
3317     -/*
3318     - * Copyright (c) 2014 David Jander, Protonic Holland
3319     - * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
3320     - */
3321     -
3322     -#include <linux/can/dev.h>
3323     -#include <linux/can/rx-offload.h>
3324     -
3325     -struct can_rx_offload_cb {
3326     - u32 timestamp;
3327     -};
3328     -
3329     -static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb)
3330     -{
3331     - BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb));
3332     -
3333     - return (struct can_rx_offload_cb *)skb->cb;
3334     -}
3335     -
3336     -static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b)
3337     -{
3338     - if (offload->inc)
3339     - return a <= b;
3340     - else
3341     - return a >= b;
3342     -}
3343     -
3344     -static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val)
3345     -{
3346     - if (offload->inc)
3347     - return (*val)++;
3348     - else
3349     - return (*val)--;
3350     -}
3351     -
3352     -static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota)
3353     -{
3354     - struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi);
3355     - struct net_device *dev = offload->dev;
3356     - struct net_device_stats *stats = &dev->stats;
3357     - struct sk_buff *skb;
3358     - int work_done = 0;
3359     -
3360     - while ((work_done < quota) &&
3361     - (skb = skb_dequeue(&offload->skb_queue))) {
3362     - struct can_frame *cf = (struct can_frame *)skb->data;
3363     -
3364     - work_done++;
3365     - stats->rx_packets++;
3366     - stats->rx_bytes += cf->can_dlc;
3367     - netif_receive_skb(skb);
3368     - }
3369     -
3370     - if (work_done < quota) {
3371     - napi_complete_done(napi, work_done);
3372     -
3373     - /* Check if there was another interrupt */
3374     - if (!skb_queue_empty(&offload->skb_queue))
3375     - napi_reschedule(&offload->napi);
3376     - }
3377     -
3378     - can_led_event(offload->dev, CAN_LED_EVENT_RX);
3379     -
3380     - return work_done;
3381     -}
3382     -
3383     -static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new,
3384     - int (*compare)(struct sk_buff *a, struct sk_buff *b))
3385     -{
3386     - struct sk_buff *pos, *insert = NULL;
3387     -
3388     - skb_queue_reverse_walk(head, pos) {
3389     - const struct can_rx_offload_cb *cb_pos, *cb_new;
3390     -
3391     - cb_pos = can_rx_offload_get_cb(pos);
3392     - cb_new = can_rx_offload_get_cb(new);
3393     -
3394     - netdev_dbg(new->dev,
3395     - "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n",
3396     - __func__,
3397     - cb_pos->timestamp, cb_new->timestamp,
3398     - cb_new->timestamp - cb_pos->timestamp,
3399     - skb_queue_len(head));
3400     -
3401     - if (compare(pos, new) < 0)
3402     - continue;
3403     - insert = pos;
3404     - break;
3405     - }
3406     - if (!insert)
3407     - __skb_queue_head(head, new);
3408     - else
3409     - __skb_queue_after(head, insert, new);
3410     -}
3411     -
3412     -static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
3413     -{
3414     - const struct can_rx_offload_cb *cb_a, *cb_b;
3415     -
3416     - cb_a = can_rx_offload_get_cb(a);
3417     - cb_b = can_rx_offload_get_cb(b);
3418     -
3419     - /* Substract two u32 and return result as int, to keep
3420     - * difference steady around the u32 overflow.
3421     - */
3422     - return cb_b->timestamp - cb_a->timestamp;
3423     -}
3424     -
3425     -/**
3426     - * can_rx_offload_offload_one() - Read one CAN frame from HW
3427     - * @offload: pointer to rx_offload context
3428     - * @n: number of mailbox to read
3429     - *
3430     - * The task of this function is to read a CAN frame from mailbox @n
3431     - * from the device and return the mailbox's content as a struct
3432     - * sk_buff.
3433     - *
3434     - * If the struct can_rx_offload::skb_queue exceeds the maximal queue
3435     - * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
3436     - * allocated, the mailbox contents is discarded by reading it into an
3437     - * overflow buffer. This way the mailbox is marked as free by the
3438     - * driver.
3439     - *
3440     - * Return: A pointer to skb containing the CAN frame on success.
3441     - *
3442     - * NULL if the mailbox @n is empty.
3443     - *
3444     - * ERR_PTR() in case of an error
3445     - */
3446     -static struct sk_buff *
3447     -can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
3448     -{
3449     - struct sk_buff *skb = NULL, *skb_error = NULL;
3450     - struct can_rx_offload_cb *cb;
3451     - struct can_frame *cf;
3452     - int ret;
3453     -
3454     - if (likely(skb_queue_len(&offload->skb_queue) <
3455     - offload->skb_queue_len_max)) {
3456     - skb = alloc_can_skb(offload->dev, &cf);
3457     - if (unlikely(!skb))
3458     - skb_error = ERR_PTR(-ENOMEM); /* skb alloc failed */
3459     - } else {
3460     - skb_error = ERR_PTR(-ENOBUFS); /* skb_queue is full */
3461     - }
3462     -
3463     - /* If queue is full or skb not available, drop by reading into
3464     - * overflow buffer.
3465     - */
3466     - if (unlikely(skb_error)) {
3467     - struct can_frame cf_overflow;
3468     - u32 timestamp;
3469     -
3470     - ret = offload->mailbox_read(offload, &cf_overflow,
3471     - &timestamp, n);
3472     -
3473     - /* Mailbox was empty. */
3474     - if (unlikely(!ret))
3475     - return NULL;
3476     -
3477     - /* Mailbox has been read and we're dropping it or
3478     - * there was a problem reading the mailbox.
3479     - *
3480     - * Increment error counters in any case.
3481     - */
3482     - offload->dev->stats.rx_dropped++;
3483     - offload->dev->stats.rx_fifo_errors++;
3484     -
3485     - /* There was a problem reading the mailbox, propagate
3486     - * error value.
3487     - */
3488     - if (unlikely(ret < 0))
3489     - return ERR_PTR(ret);
3490     -
3491     - return skb_error;
3492     - }
3493     -
3494     - cb = can_rx_offload_get_cb(skb);
3495     - ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
3496     -
3497     - /* Mailbox was empty. */
3498     - if (unlikely(!ret)) {
3499     - kfree_skb(skb);
3500     - return NULL;
3501     - }
3502     -
3503     - /* There was a problem reading the mailbox, propagate error value. */
3504     - if (unlikely(ret < 0)) {
3505     - kfree_skb(skb);
3506     -
3507     - offload->dev->stats.rx_dropped++;
3508     - offload->dev->stats.rx_fifo_errors++;
3509     -
3510     - return ERR_PTR(ret);
3511     - }
3512     -
3513     - /* Mailbox was read. */
3514     - return skb;
3515     -}
3516     -
3517     -int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending)
3518     -{
3519     - struct sk_buff_head skb_queue;
3520     - unsigned int i;
3521     -
3522     - __skb_queue_head_init(&skb_queue);
3523     -
3524     - for (i = offload->mb_first;
3525     - can_rx_offload_le(offload, i, offload->mb_last);
3526     - can_rx_offload_inc(offload, &i)) {
3527     - struct sk_buff *skb;
3528     -
3529     - if (!(pending & BIT_ULL(i)))
3530     - continue;
3531     -
3532     - skb = can_rx_offload_offload_one(offload, i);
3533     - if (IS_ERR_OR_NULL(skb))
3534     - continue;
3535     -
3536     - __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
3537     - }
3538     -
3539     - if (!skb_queue_empty(&skb_queue)) {
3540     - unsigned long flags;
3541     - u32 queue_len;
3542     -
3543     - spin_lock_irqsave(&offload->skb_queue.lock, flags);
3544     - skb_queue_splice_tail(&skb_queue, &offload->skb_queue);
3545     - spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
3546     -
3547     - if ((queue_len = skb_queue_len(&offload->skb_queue)) >
3548     - (offload->skb_queue_len_max / 8))
3549     - netdev_dbg(offload->dev, "%s: queue_len=%d\n",
3550     - __func__, queue_len);
3551     -
3552     - can_rx_offload_schedule(offload);
3553     - }
3554     -
3555     - return skb_queue_len(&skb_queue);
3556     -}
3557     -EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp);
3558     -
3559     -int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
3560     -{
3561     - struct sk_buff *skb;
3562     - int received = 0;
3563     -
3564     - while (1) {
3565     - skb = can_rx_offload_offload_one(offload, 0);
3566     - if (IS_ERR(skb))
3567     - continue;
3568     - if (!skb)
3569     - break;
3570     -
3571     - skb_queue_tail(&offload->skb_queue, skb);
3572     - received++;
3573     - }
3574     -
3575     - if (received)
3576     - can_rx_offload_schedule(offload);
3577     -
3578     - return received;
3579     -}
3580     -EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo);
3581     -
3582     -int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
3583     - struct sk_buff *skb, u32 timestamp)
3584     -{
3585     - struct can_rx_offload_cb *cb;
3586     - unsigned long flags;
3587     -
3588     - if (skb_queue_len(&offload->skb_queue) >
3589     - offload->skb_queue_len_max) {
3590     - dev_kfree_skb_any(skb);
3591     - return -ENOBUFS;
3592     - }
3593     -
3594     - cb = can_rx_offload_get_cb(skb);
3595     - cb->timestamp = timestamp;
3596     -
3597     - spin_lock_irqsave(&offload->skb_queue.lock, flags);
3598     - __skb_queue_add_sort(&offload->skb_queue, skb, can_rx_offload_compare);
3599     - spin_unlock_irqrestore(&offload->skb_queue.lock, flags);
3600     -
3601     - can_rx_offload_schedule(offload);
3602     -
3603     - return 0;
3604     -}
3605     -EXPORT_SYMBOL_GPL(can_rx_offload_queue_sorted);
3606     -
3607     -unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
3608     - unsigned int idx, u32 timestamp)
3609     -{
3610     - struct net_device *dev = offload->dev;
3611     - struct net_device_stats *stats = &dev->stats;
3612     - struct sk_buff *skb;
3613     - u8 len;
3614     - int err;
3615     -
3616     - skb = __can_get_echo_skb(dev, idx, &len);
3617     - if (!skb)
3618     - return 0;
3619     -
3620     - err = can_rx_offload_queue_sorted(offload, skb, timestamp);
3621     - if (err) {
3622     - stats->rx_errors++;
3623     - stats->tx_fifo_errors++;
3624     - }
3625     -
3626     - return len;
3627     -}
3628     -EXPORT_SYMBOL_GPL(can_rx_offload_get_echo_skb);
3629     -
3630     -int can_rx_offload_queue_tail(struct can_rx_offload *offload,
3631     - struct sk_buff *skb)
3632     -{
3633     - if (skb_queue_len(&offload->skb_queue) >
3634     - offload->skb_queue_len_max) {
3635     - dev_kfree_skb_any(skb);
3636     - return -ENOBUFS;
3637     - }
3638     -
3639     - skb_queue_tail(&offload->skb_queue, skb);
3640     - can_rx_offload_schedule(offload);
3641     -
3642     - return 0;
3643     -}
3644     -EXPORT_SYMBOL_GPL(can_rx_offload_queue_tail);
3645     -
3646     -static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
3647     -{
3648     - offload->dev = dev;
3649     -
3650     - /* Limit queue len to 4x the weight (rounted to next power of two) */
3651     - offload->skb_queue_len_max = 2 << fls(weight);
3652     - offload->skb_queue_len_max *= 4;
3653     - skb_queue_head_init(&offload->skb_queue);
3654     -
3655     - can_rx_offload_reset(offload);
3656     - netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight);
3657     -
3658     - dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n",
3659     - __func__, offload->skb_queue_len_max);
3660     -
3661     - return 0;
3662     -}
3663     -
3664     -int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload)
3665     -{
3666     - unsigned int weight;
3667     -
3668     - if (offload->mb_first > BITS_PER_LONG_LONG ||
3669     - offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read)
3670     - return -EINVAL;
3671     -
3672     - if (offload->mb_first < offload->mb_last) {
3673     - offload->inc = true;
3674     - weight = offload->mb_last - offload->mb_first;
3675     - } else {
3676     - offload->inc = false;
3677     - weight = offload->mb_first - offload->mb_last;
3678     - }
3679     -
3680     - return can_rx_offload_init_queue(dev, offload, weight);
3681     -}
3682     -EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp);
3683     -
3684     -int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight)
3685     -{
3686     - if (!offload->mailbox_read)
3687     - return -EINVAL;
3688     -
3689     - return can_rx_offload_init_queue(dev, offload, weight);
3690     -}
3691     -EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo);
3692     -
3693     -void can_rx_offload_enable(struct can_rx_offload *offload)
3694     -{
3695     - can_rx_offload_reset(offload);
3696     - napi_enable(&offload->napi);
3697     -}
3698     -EXPORT_SYMBOL_GPL(can_rx_offload_enable);
3699     -
3700     -void can_rx_offload_del(struct can_rx_offload *offload)
3701     -{
3702     - netif_napi_del(&offload->napi);
3703     - skb_queue_purge(&offload->skb_queue);
3704     -}
3705     -EXPORT_SYMBOL_GPL(can_rx_offload_del);
3706     -
3707     -void can_rx_offload_reset(struct can_rx_offload *offload)
3708     -{
3709     -}
3710     -EXPORT_SYMBOL_GPL(can_rx_offload_reset);
3711     diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
3712     index 4dfa459ef5c73..95fefb1eef361 100644
3713     --- a/drivers/net/can/slcan.c
3714     +++ b/drivers/net/can/slcan.c
3715     @@ -519,6 +519,7 @@ static struct slcan *slc_alloc(void)
3716     int i;
3717     char name[IFNAMSIZ];
3718     struct net_device *dev = NULL;
3719     + struct can_ml_priv *can_ml;
3720     struct slcan *sl;
3721     int size;
3722    
3723     @@ -541,7 +542,8 @@ static struct slcan *slc_alloc(void)
3724    
3725     dev->base_addr = i;
3726     sl = netdev_priv(dev);
3727     - dev->ml_priv = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN);
3728     + can_ml = (void *)sl + ALIGN(sizeof(*sl), NETDEV_ALIGN);
3729     + can_set_ml_priv(dev, can_ml);
3730    
3731     /* Initialize channel control data */
3732     sl->magic = SLCAN_MAGIC;
3733     diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
3734     index 39ca14b0585dc..067705e2850b3 100644
3735     --- a/drivers/net/can/vcan.c
3736     +++ b/drivers/net/can/vcan.c
3737     @@ -153,7 +153,7 @@ static void vcan_setup(struct net_device *dev)
3738     dev->addr_len = 0;
3739     dev->tx_queue_len = 0;
3740     dev->flags = IFF_NOARP;
3741     - dev->ml_priv = netdev_priv(dev);
3742     + can_set_ml_priv(dev, netdev_priv(dev));
3743    
3744     /* set flags according to driver capabilities */
3745     if (echo)
3746     diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
3747     index b1baa4ac1d537..7000c6cd1e48b 100644
3748     --- a/drivers/net/can/vxcan.c
3749     +++ b/drivers/net/can/vxcan.c
3750     @@ -141,6 +141,8 @@ static const struct net_device_ops vxcan_netdev_ops = {
3751    
3752     static void vxcan_setup(struct net_device *dev)
3753     {
3754     + struct can_ml_priv *can_ml;
3755     +
3756     dev->type = ARPHRD_CAN;
3757     dev->mtu = CANFD_MTU;
3758     dev->hard_header_len = 0;
3759     @@ -149,7 +151,9 @@ static void vxcan_setup(struct net_device *dev)
3760     dev->flags = (IFF_NOARP|IFF_ECHO);
3761     dev->netdev_ops = &vxcan_netdev_ops;
3762     dev->needs_free_netdev = true;
3763     - dev->ml_priv = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
3764     +
3765     + can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN);
3766     + can_set_ml_priv(dev, can_ml);
3767     }
3768    
3769     /* forward declaration for rtnl_create_link() */
3770     diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
3771     index bb65dd39f8474..72c7404ae6c5b 100644
3772     --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
3773     +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
3774     @@ -66,8 +66,10 @@ static int aq_ndev_open(struct net_device *ndev)
3775     goto err_exit;
3776    
3777     err = aq_nic_start(aq_nic);
3778     - if (err < 0)
3779     + if (err < 0) {
3780     + aq_nic_stop(aq_nic);
3781     goto err_exit;
3782     + }
3783    
3784     err_exit:
3785     if (err < 0)
3786     diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3787     index 9a77b70ad601b..491bcfd36ac25 100644
3788     --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3789     +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3790     @@ -1073,7 +1073,7 @@ static void mvpp2_interrupts_unmask(void *arg)
3791     u32 val;
3792    
3793     /* If the thread isn't used, don't do anything */
3794     - if (smp_processor_id() > port->priv->nthreads)
3795     + if (smp_processor_id() >= port->priv->nthreads)
3796     return;
3797    
3798     val = MVPP2_CAUSE_MISC_SUM_MASK |
3799     @@ -2078,7 +2078,7 @@ static void mvpp2_txq_sent_counter_clear(void *arg)
3800     int queue;
3801    
3802     /* If the thread isn't used, don't do anything */
3803     - if (smp_processor_id() > port->priv->nthreads)
3804     + if (smp_processor_id() >= port->priv->nthreads)
3805     return;
3806    
3807     for (queue = 0; queue < port->ntxqs; queue++) {
3808     diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
3809     index 0e6a51525d913..f3deb2a2fa47d 100644
3810     --- a/drivers/net/wan/lmc/lmc_main.c
3811     +++ b/drivers/net/wan/lmc/lmc_main.c
3812     @@ -912,6 +912,8 @@ static int lmc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3813     break;
3814     default:
3815     printk(KERN_WARNING "%s: LMC UNKNOWN CARD!\n", dev->name);
3816     + unregister_hdlc_device(dev);
3817     + return -EIO;
3818     break;
3819     }
3820    
3821     diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
3822     index 3ec71f52e8fe1..d38276ac375e9 100644
3823     --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
3824     +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
3825     @@ -445,13 +445,13 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
3826     case WMI_TDLS_TEARDOWN_REASON_TX:
3827     case WMI_TDLS_TEARDOWN_REASON_RSSI:
3828     case WMI_TDLS_TEARDOWN_REASON_PTR_TIMEOUT:
3829     + rcu_read_lock();
3830     station = ieee80211_find_sta_by_ifaddr(ar->hw,
3831     ev->peer_macaddr.addr,
3832     NULL);
3833     if (!station) {
3834     ath10k_warn(ar, "did not find station from tdls peer event");
3835     - kfree(tb);
3836     - return;
3837     + goto exit;
3838     }
3839     arvif = ath10k_get_arvif(ar, __le32_to_cpu(ev->vdev_id));
3840     ieee80211_tdls_oper_request(
3841     @@ -462,6 +462,9 @@ static void ath10k_wmi_event_tdls_peer(struct ath10k *ar, struct sk_buff *skb)
3842     );
3843     break;
3844     }
3845     +
3846     +exit:
3847     + rcu_read_unlock();
3848     kfree(tb);
3849     }
3850    
3851     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
3852     index 4ca50353538ef..cd813c69a1781 100644
3853     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
3854     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
3855     @@ -5382,7 +5382,8 @@ static bool brcmf_is_linkup(struct brcmf_cfg80211_vif *vif,
3856     return false;
3857     }
3858    
3859     -static bool brcmf_is_linkdown(const struct brcmf_event_msg *e)
3860     +static bool brcmf_is_linkdown(struct brcmf_cfg80211_vif *vif,
3861     + const struct brcmf_event_msg *e)
3862     {
3863     u32 event = e->event_code;
3864     u16 flags = e->flags;
3865     @@ -5391,6 +5392,8 @@ static bool brcmf_is_linkdown(const struct brcmf_event_msg *e)
3866     (event == BRCMF_E_DISASSOC_IND) ||
3867     ((event == BRCMF_E_LINK) && (!(flags & BRCMF_EVENT_MSG_LINK)))) {
3868     brcmf_dbg(CONN, "Processing link down\n");
3869     + clear_bit(BRCMF_VIF_STATUS_EAP_SUCCESS, &vif->sme_state);
3870     + clear_bit(BRCMF_VIF_STATUS_ASSOC_SUCCESS, &vif->sme_state);
3871     return true;
3872     }
3873     return false;
3874     @@ -5683,7 +5686,7 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
3875     } else
3876     brcmf_bss_connect_done(cfg, ndev, e, true);
3877     brcmf_net_setcarrier(ifp, true);
3878     - } else if (brcmf_is_linkdown(e)) {
3879     + } else if (brcmf_is_linkdown(ifp->vif, e)) {
3880     brcmf_dbg(CONN, "Linkdown\n");
3881     if (!brcmf_is_ibssmode(ifp->vif)) {
3882     brcmf_bss_connect_done(cfg, ndev, e, false);
3883     diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
3884     index 930edfc32f597..59fe3204e965d 100644
3885     --- a/drivers/pinctrl/pinctrl-rockchip.c
3886     +++ b/drivers/pinctrl/pinctrl-rockchip.c
3887     @@ -3386,12 +3386,15 @@ static int __maybe_unused rockchip_pinctrl_suspend(struct device *dev)
3888     static int __maybe_unused rockchip_pinctrl_resume(struct device *dev)
3889     {
3890     struct rockchip_pinctrl *info = dev_get_drvdata(dev);
3891     - int ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
3892     - rk3288_grf_gpio6c_iomux |
3893     - GPIO6C6_SEL_WRITE_ENABLE);
3894     + int ret;
3895    
3896     - if (ret)
3897     - return ret;
3898     + if (info->ctrl->type == RK3288) {
3899     + ret = regmap_write(info->regmap_base, RK3288_GRF_GPIO6C_IOMUX,
3900     + rk3288_grf_gpio6c_iomux |
3901     + GPIO6C6_SEL_WRITE_ENABLE);
3902     + if (ret)
3903     + return ret;
3904     + }
3905    
3906     return pinctrl_force_default(info->pctl_dev);
3907     }
3908     diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
3909     index d006f0a97b8cf..2236751a3a568 100644
3910     --- a/drivers/scsi/qla2xxx/qla_target.h
3911     +++ b/drivers/scsi/qla2xxx/qla_target.h
3912     @@ -116,7 +116,6 @@
3913     (min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \
3914     QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0))
3915     #endif
3916     -#endif
3917    
3918     #define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \
3919     ? le16_to_cpu((iocb)->u.isp2x.target.extended) \
3920     @@ -244,6 +243,7 @@ struct ctio_to_2xxx {
3921     #ifndef CTIO_RET_TYPE
3922     #define CTIO_RET_TYPE 0x17 /* CTIO return entry */
3923     #define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */
3924     +#endif
3925    
3926     struct fcp_hdr {
3927     uint8_t r_ctl;
3928     diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
3929     index e3266a64a4770..2121e44c342f8 100644
3930     --- a/drivers/scsi/st.c
3931     +++ b/drivers/scsi/st.c
3932     @@ -1267,8 +1267,8 @@ static int st_open(struct inode *inode, struct file *filp)
3933     spin_lock(&st_use_lock);
3934     if (STp->in_use) {
3935     spin_unlock(&st_use_lock);
3936     - scsi_tape_put(STp);
3937     DEBC_printk(STp, "Device already in use.\n");
3938     + scsi_tape_put(STp);
3939     return (-EBUSY);
3940     }
3941    
3942     diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
3943     index 1893c70de0b93..10f097a7d847d 100644
3944     --- a/drivers/staging/comedi/drivers/cb_pcidas.c
3945     +++ b/drivers/staging/comedi/drivers/cb_pcidas.c
3946     @@ -1281,7 +1281,7 @@ static int cb_pcidas_auto_attach(struct comedi_device *dev,
3947     devpriv->amcc + AMCC_OP_REG_INTCSR);
3948    
3949     ret = request_irq(pcidev->irq, cb_pcidas_interrupt, IRQF_SHARED,
3950     - dev->board_name, dev);
3951     + "cb_pcidas", dev);
3952     if (ret) {
3953     dev_dbg(dev->class_dev, "unable to allocate irq %d\n",
3954     pcidev->irq);
3955     diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
3956     index e1774e09a3203..9fe8b65cd9e3a 100644
3957     --- a/drivers/staging/comedi/drivers/cb_pcidas64.c
3958     +++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
3959     @@ -4035,7 +4035,7 @@ static int auto_attach(struct comedi_device *dev,
3960     init_stc_registers(dev);
3961    
3962     retval = request_irq(pcidev->irq, handle_interrupt, IRQF_SHARED,
3963     - dev->board_name, dev);
3964     + "cb_pcidas64", dev);
3965     if (retval) {
3966     dev_dbg(dev->class_dev, "unable to allocate irq %u\n",
3967     pcidev->irq);
3968     diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
3969     index 328f410daa035..2eeb9a43734e3 100644
3970     --- a/drivers/staging/rtl8192e/rtllib.h
3971     +++ b/drivers/staging/rtl8192e/rtllib.h
3972     @@ -1105,7 +1105,7 @@ struct rtllib_network {
3973     bool bWithAironetIE;
3974     bool bCkipSupported;
3975     bool bCcxRmEnable;
3976     - u16 CcxRmState[2];
3977     + u8 CcxRmState[2];
3978     bool bMBssidValid;
3979     u8 MBssidMask;
3980     u8 MBssid[ETH_ALEN];
3981     diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
3982     index 0bae0a0a4cbe1..83c30e2d82f5f 100644
3983     --- a/drivers/staging/rtl8192e/rtllib_rx.c
3984     +++ b/drivers/staging/rtl8192e/rtllib_rx.c
3985     @@ -1968,7 +1968,7 @@ static void rtllib_parse_mife_generic(struct rtllib_device *ieee,
3986     info_element->data[2] == 0x96 &&
3987     info_element->data[3] == 0x01) {
3988     if (info_element->len == 6) {
3989     - memcpy(network->CcxRmState, &info_element[4], 2);
3990     + memcpy(network->CcxRmState, &info_element->data[4], 2);
3991     if (network->CcxRmState[0] != 0)
3992     network->bCcxRmEnable = true;
3993     else
3994     diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
3995     index aa99edb4dff7d..4dce4a8f71ed9 100644
3996     --- a/drivers/thermal/thermal_sysfs.c
3997     +++ b/drivers/thermal/thermal_sysfs.c
3998     @@ -770,6 +770,9 @@ void thermal_cooling_device_stats_update(struct thermal_cooling_device *cdev,
3999     {
4000     struct cooling_dev_stats *stats = cdev->stats;
4001    
4002     + if (!stats)
4003     + return;
4004     +
4005     spin_lock(&stats->lock);
4006    
4007     if (stats->state == new_state)
4008     diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
4009     index 00bfc81f24702..2b672840dfd9a 100644
4010     --- a/drivers/usb/class/cdc-acm.c
4011     +++ b/drivers/usb/class/cdc-acm.c
4012     @@ -147,17 +147,29 @@ static inline int acm_set_control(struct acm *acm, int control)
4013     #define acm_send_break(acm, ms) \
4014     acm_ctrl_msg(acm, USB_CDC_REQ_SEND_BREAK, ms, NULL, 0)
4015    
4016     -static void acm_kill_urbs(struct acm *acm)
4017     +static void acm_poison_urbs(struct acm *acm)
4018     {
4019     int i;
4020    
4021     - usb_kill_urb(acm->ctrlurb);
4022     + usb_poison_urb(acm->ctrlurb);
4023     for (i = 0; i < ACM_NW; i++)
4024     - usb_kill_urb(acm->wb[i].urb);
4025     + usb_poison_urb(acm->wb[i].urb);
4026     for (i = 0; i < acm->rx_buflimit; i++)
4027     - usb_kill_urb(acm->read_urbs[i]);
4028     + usb_poison_urb(acm->read_urbs[i]);
4029     }
4030    
4031     +static void acm_unpoison_urbs(struct acm *acm)
4032     +{
4033     + int i;
4034     +
4035     + for (i = 0; i < acm->rx_buflimit; i++)
4036     + usb_unpoison_urb(acm->read_urbs[i]);
4037     + for (i = 0; i < ACM_NW; i++)
4038     + usb_unpoison_urb(acm->wb[i].urb);
4039     + usb_unpoison_urb(acm->ctrlurb);
4040     +}
4041     +
4042     +
4043     /*
4044     * Write buffer management.
4045     * All of these assume proper locks taken by the caller.
4046     @@ -225,9 +237,10 @@ static int acm_start_wb(struct acm *acm, struct acm_wb *wb)
4047    
4048     rc = usb_submit_urb(wb->urb, GFP_ATOMIC);
4049     if (rc < 0) {
4050     - dev_err(&acm->data->dev,
4051     - "%s - usb_submit_urb(write bulk) failed: %d\n",
4052     - __func__, rc);
4053     + if (rc != -EPERM)
4054     + dev_err(&acm->data->dev,
4055     + "%s - usb_submit_urb(write bulk) failed: %d\n",
4056     + __func__, rc);
4057     acm_write_done(acm, wb);
4058     }
4059     return rc;
4060     @@ -312,8 +325,10 @@ static void acm_process_notification(struct acm *acm, unsigned char *buf)
4061     acm->iocount.dsr++;
4062     if (difference & ACM_CTRL_DCD)
4063     acm->iocount.dcd++;
4064     - if (newctrl & ACM_CTRL_BRK)
4065     + if (newctrl & ACM_CTRL_BRK) {
4066     acm->iocount.brk++;
4067     + tty_insert_flip_char(&acm->port, 0, TTY_BREAK);
4068     + }
4069     if (newctrl & ACM_CTRL_RI)
4070     acm->iocount.rng++;
4071     if (newctrl & ACM_CTRL_FRAMING)
4072     @@ -479,11 +494,6 @@ static void acm_read_bulk_callback(struct urb *urb)
4073     dev_vdbg(&acm->data->dev, "got urb %d, len %d, status %d\n",
4074     rb->index, urb->actual_length, status);
4075    
4076     - if (!acm->dev) {
4077     - dev_dbg(&acm->data->dev, "%s - disconnected\n", __func__);
4078     - return;
4079     - }
4080     -
4081     switch (status) {
4082     case 0:
4083     usb_mark_last_busy(acm->dev);
4084     @@ -648,7 +658,8 @@ static void acm_port_dtr_rts(struct tty_port *port, int raise)
4085    
4086     res = acm_set_control(acm, val);
4087     if (res && (acm->ctrl_caps & USB_CDC_CAP_LINE))
4088     - dev_err(&acm->control->dev, "failed to set dtr/rts\n");
4089     + /* This is broken in too many devices to spam the logs */
4090     + dev_dbg(&acm->control->dev, "failed to set dtr/rts\n");
4091     }
4092    
4093     static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
4094     @@ -730,6 +741,7 @@ static void acm_port_shutdown(struct tty_port *port)
4095     * Need to grab write_lock to prevent race with resume, but no need to
4096     * hold it due to the tty-port initialised flag.
4097     */
4098     + acm_poison_urbs(acm);
4099     spin_lock_irq(&acm->write_lock);
4100     spin_unlock_irq(&acm->write_lock);
4101    
4102     @@ -746,7 +758,8 @@ static void acm_port_shutdown(struct tty_port *port)
4103     usb_autopm_put_interface_async(acm->control);
4104     }
4105    
4106     - acm_kill_urbs(acm);
4107     + acm_unpoison_urbs(acm);
4108     +
4109     }
4110    
4111     static void acm_tty_cleanup(struct tty_struct *tty)
4112     @@ -1516,12 +1529,16 @@ skip_countries:
4113    
4114     return 0;
4115     alloc_fail6:
4116     + if (!acm->combined_interfaces) {
4117     + /* Clear driver data so that disconnect() returns early. */
4118     + usb_set_intfdata(data_interface, NULL);
4119     + usb_driver_release_interface(&acm_driver, data_interface);
4120     + }
4121     if (acm->country_codes) {
4122     device_remove_file(&acm->control->dev,
4123     &dev_attr_wCountryCodes);
4124     device_remove_file(&acm->control->dev,
4125     &dev_attr_iCountryCodeRelDate);
4126     - kfree(acm->country_codes);
4127     }
4128     device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
4129     alloc_fail5:
4130     @@ -1553,8 +1570,14 @@ static void acm_disconnect(struct usb_interface *intf)
4131     if (!acm)
4132     return;
4133    
4134     - mutex_lock(&acm->mutex);
4135     acm->disconnected = true;
4136     + /*
4137     + * there is a circular dependency. acm_softint() can resubmit
4138     + * the URBs in error handling so we need to block any
4139     + * submission right away
4140     + */
4141     + acm_poison_urbs(acm);
4142     + mutex_lock(&acm->mutex);
4143     if (acm->country_codes) {
4144     device_remove_file(&acm->control->dev,
4145     &dev_attr_wCountryCodes);
4146     @@ -1573,7 +1596,6 @@ static void acm_disconnect(struct usb_interface *intf)
4147     tty_kref_put(tty);
4148     }
4149    
4150     - acm_kill_urbs(acm);
4151     cancel_delayed_work_sync(&acm->dwork);
4152    
4153     tty_unregister_device(acm_tty_driver, acm->minor);
4154     @@ -1615,7 +1637,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
4155     if (cnt)
4156     return 0;
4157    
4158     - acm_kill_urbs(acm);
4159     + acm_poison_urbs(acm);
4160     cancel_delayed_work_sync(&acm->dwork);
4161     acm->urbs_in_error_delay = 0;
4162    
4163     @@ -1628,6 +1650,7 @@ static int acm_resume(struct usb_interface *intf)
4164     struct urb *urb;
4165     int rv = 0;
4166    
4167     + acm_unpoison_urbs(acm);
4168     spin_lock_irq(&acm->write_lock);
4169    
4170     if (--acm->susp_count)
4171     diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
4172     index 239443ce52001..b8a4707dfafab 100644
4173     --- a/drivers/usb/core/quirks.c
4174     +++ b/drivers/usb/core/quirks.c
4175     @@ -498,6 +498,10 @@ static const struct usb_device_id usb_quirk_list[] = {
4176     /* DJI CineSSD */
4177     { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
4178    
4179     + /* Fibocom L850-GL LTE Modem */
4180     + { USB_DEVICE(0x2cb7, 0x0007), .driver_info =
4181     + USB_QUIRK_IGNORE_REMOTE_WAKEUP },
4182     +
4183     /* INTEL VALUE SSD */
4184     { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
4185    
4186     diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
4187     index a91f2aa24118a..258056c822069 100644
4188     --- a/drivers/usb/dwc2/hcd.c
4189     +++ b/drivers/usb/dwc2/hcd.c
4190     @@ -4322,7 +4322,8 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
4191     if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
4192     goto unlock;
4193    
4194     - if (hsotg->params.power_down > DWC2_POWER_DOWN_PARAM_PARTIAL)
4195     + if (hsotg->params.power_down != DWC2_POWER_DOWN_PARAM_PARTIAL ||
4196     + hsotg->flags.b.port_connect_status == 0)
4197     goto skip_power_saving;
4198    
4199     /*
4200     @@ -5398,7 +5399,7 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
4201     dwc2_writel(hsotg, hprt0, HPRT0);
4202    
4203     /* Wait for the HPRT0.PrtSusp register field to be set */
4204     - if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000))
4205     + if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 5000))
4206     dev_warn(hsotg->dev, "Suspend wasn't generated\n");
4207    
4208     /*
4209     diff --git a/drivers/usb/gadget/udc/amd5536udc_pci.c b/drivers/usb/gadget/udc/amd5536udc_pci.c
4210     index 57b6f66331cfa..362284057d307 100644
4211     --- a/drivers/usb/gadget/udc/amd5536udc_pci.c
4212     +++ b/drivers/usb/gadget/udc/amd5536udc_pci.c
4213     @@ -154,6 +154,11 @@ static int udc_pci_probe(
4214     pci_set_master(pdev);
4215     pci_try_set_mwi(pdev);
4216    
4217     + dev->phys_addr = resource;
4218     + dev->irq = pdev->irq;
4219     + dev->pdev = pdev;
4220     + dev->dev = &pdev->dev;
4221     +
4222     /* init dma pools */
4223     if (use_dma) {
4224     retval = init_dma_pools(dev);
4225     @@ -161,11 +166,6 @@ static int udc_pci_probe(
4226     goto err_dma;
4227     }
4228    
4229     - dev->phys_addr = resource;
4230     - dev->irq = pdev->irq;
4231     - dev->pdev = pdev;
4232     - dev->dev = &pdev->dev;
4233     -
4234     /* general probing */
4235     if (udc_probe(dev)) {
4236     retval = -ENODEV;
4237     diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
4238     index 09b67219fd146..5fc3ea6d46c56 100644
4239     --- a/drivers/usb/host/xhci-mtk.c
4240     +++ b/drivers/usb/host/xhci-mtk.c
4241     @@ -397,6 +397,13 @@ static void xhci_mtk_quirks(struct device *dev, struct xhci_hcd *xhci)
4242     xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
4243     if (mtk->lpm_support)
4244     xhci->quirks |= XHCI_LPM_SUPPORT;
4245     +
4246     + /*
4247     + * MTK xHCI 0.96: PSA is 1 by default even if doesn't support stream,
4248     + * and it's 3 when support it.
4249     + */
4250     + if (xhci->hci_version < 0x100 && HCC_MAX_PSA(xhci->hcc_params) == 4)
4251     + xhci->quirks |= XHCI_BROKEN_STREAMS;
4252     }
4253    
4254     /* called during probe() after chip reset completes */
4255     @@ -553,7 +560,8 @@ static int xhci_mtk_probe(struct platform_device *pdev)
4256     if (ret)
4257     goto put_usb3_hcd;
4258    
4259     - if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
4260     + if (HCC_MAX_PSA(xhci->hcc_params) >= 4 &&
4261     + !(xhci->quirks & XHCI_BROKEN_STREAMS))
4262     xhci->shared_hcd->can_do_streams = 1;
4263    
4264     ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
4265     diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
4266     index 9fcff4e94484e..166f68f639c28 100644
4267     --- a/drivers/usb/musb/musb_core.c
4268     +++ b/drivers/usb/musb/musb_core.c
4269     @@ -1866,10 +1866,14 @@ static void musb_pm_runtime_check_session(struct musb *musb)
4270     MUSB_DEVCTL_HR;
4271     switch (devctl & ~s) {
4272     case MUSB_QUIRK_B_DISCONNECT_99:
4273     - musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
4274     - schedule_delayed_work(&musb->irq_work,
4275     - msecs_to_jiffies(1000));
4276     - break;
4277     + if (musb->quirk_retries && !musb->flush_irq_work) {
4278     + musb_dbg(musb, "Poll devctl in case of suspend after disconnect\n");
4279     + schedule_delayed_work(&musb->irq_work,
4280     + msecs_to_jiffies(1000));
4281     + musb->quirk_retries--;
4282     + break;
4283     + }
4284     + fallthrough;
4285     case MUSB_QUIRK_B_INVALID_VBUS_91:
4286     if (musb->quirk_retries && !musb->flush_irq_work) {
4287     musb_dbg(musb,
4288     diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
4289     index fee511437abe3..1e0b618e2e6ec 100644
4290     --- a/drivers/usb/usbip/vhci_hcd.c
4291     +++ b/drivers/usb/usbip/vhci_hcd.c
4292     @@ -595,6 +595,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
4293     pr_err("invalid port number %d\n", wIndex);
4294     goto error;
4295     }
4296     + if (wValue >= 32)
4297     + goto error;
4298     if (hcd->speed == HCD_USB3) {
4299     if ((vhci_hcd->port_status[rhport] &
4300     USB_SS_PORT_STAT_POWER) != 0) {
4301     diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
4302     index ac3c1dd3edeff..4abddbebd4b23 100644
4303     --- a/drivers/vfio/pci/Kconfig
4304     +++ b/drivers/vfio/pci/Kconfig
4305     @@ -42,6 +42,6 @@ config VFIO_PCI_IGD
4306    
4307     config VFIO_PCI_NVLINK2
4308     def_bool y
4309     - depends on VFIO_PCI && PPC_POWERNV
4310     + depends on VFIO_PCI && PPC_POWERNV && SPAPR_TCE_IOMMU
4311     help
4312     VFIO PCI support for P9 Witherspoon machine with NVIDIA V100 GPUs
4313     diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
4314     index 57ab79fbcee95..a279ecacbf60a 100644
4315     --- a/drivers/vhost/vhost.c
4316     +++ b/drivers/vhost/vhost.c
4317     @@ -320,8 +320,8 @@ static void vhost_vq_reset(struct vhost_dev *dev,
4318     vq->kick = NULL;
4319     vq->call_ctx = NULL;
4320     vq->log_ctx = NULL;
4321     - vhost_reset_is_le(vq);
4322     vhost_disable_cross_endian(vq);
4323     + vhost_reset_is_le(vq);
4324     vq->busyloop_timeout = 0;
4325     vq->umem = NULL;
4326     vq->iotlb = NULL;
4327     diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
4328     index 4cf71ee0965a6..0a9202176ffc9 100644
4329     --- a/drivers/video/fbdev/core/fbcon.c
4330     +++ b/drivers/video/fbdev/core/fbcon.c
4331     @@ -1339,6 +1339,9 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
4332    
4333     ops->cursor_flash = (mode == CM_ERASE) ? 0 : 1;
4334    
4335     + if (!ops->cursor)
4336     + return;
4337     +
4338     ops->cursor(vc, info, mode, get_color(vc, info, c, 1),
4339     get_color(vc, info, c, 0));
4340     }
4341     diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
4342     index 5aba67a504cf8..031ff3f190184 100644
4343     --- a/fs/ext4/balloc.c
4344     +++ b/fs/ext4/balloc.c
4345     @@ -612,27 +612,41 @@ int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
4346    
4347     /**
4348     * ext4_should_retry_alloc() - check if a block allocation should be retried
4349     - * @sb: super block
4350     - * @retries: number of attemps has been made
4351     + * @sb: superblock
4352     + * @retries: number of retry attempts made so far
4353     *
4354     - * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
4355     - * it is profitable to retry the operation, this function will wait
4356     - * for the current or committing transaction to complete, and then
4357     - * return TRUE. We will only retry once.
4358     + * ext4_should_retry_alloc() is called when ENOSPC is returned while
4359     + * attempting to allocate blocks. If there's an indication that a pending
4360     + * journal transaction might free some space and allow another attempt to
4361     + * succeed, this function will wait for the current or committing transaction
4362     + * to complete and then return TRUE.
4363     */
4364     int ext4_should_retry_alloc(struct super_block *sb, int *retries)
4365     {
4366     - if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) ||
4367     - (*retries)++ > 1 ||
4368     - !EXT4_SB(sb)->s_journal)
4369     + struct ext4_sb_info *sbi = EXT4_SB(sb);
4370     +
4371     + if (!sbi->s_journal)
4372     return 0;
4373    
4374     - smp_mb();
4375     - if (EXT4_SB(sb)->s_mb_free_pending == 0)
4376     + if (++(*retries) > 3) {
4377     + percpu_counter_inc(&sbi->s_sra_exceeded_retry_limit);
4378     return 0;
4379     + }
4380    
4381     + /*
4382     + * if there's no indication that blocks are about to be freed it's
4383     + * possible we just missed a transaction commit that did so
4384     + */
4385     + smp_mb();
4386     + if (sbi->s_mb_free_pending == 0)
4387     + return ext4_has_free_clusters(sbi, 1, 0);
4388     +
4389     + /*
4390     + * it's possible we've just missed a transaction commit here,
4391     + * so ignore the returned status
4392     + */
4393     jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
4394     - jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
4395     + (void) jbd2_journal_force_commit_nested(sbi->s_journal);
4396     return 1;
4397     }
4398    
4399     diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
4400     index 1c558b5547889..bf3eaa9030335 100644
4401     --- a/fs/ext4/ext4.h
4402     +++ b/fs/ext4/ext4.h
4403     @@ -1420,6 +1420,7 @@ struct ext4_sb_info {
4404     struct percpu_counter s_freeinodes_counter;
4405     struct percpu_counter s_dirs_counter;
4406     struct percpu_counter s_dirtyclusters_counter;
4407     + struct percpu_counter s_sra_exceeded_retry_limit;
4408     struct blockgroup_lock *s_blockgroup_lock;
4409     struct proc_dir_entry *s_proc;
4410     struct kobject s_kobj;
4411     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
4412     index efce97b938b7a..1429d01d836bb 100644
4413     --- a/fs/ext4/inode.c
4414     +++ b/fs/ext4/inode.c
4415     @@ -2076,13 +2076,13 @@ static int __ext4_journalled_writepage(struct page *page,
4416     if (!ret)
4417     ret = err;
4418    
4419     - if (!ext4_has_inline_data(inode))
4420     - ext4_walk_page_buffers(NULL, page_bufs, 0, len,
4421     - NULL, bput_one);
4422     ext4_set_inode_state(inode, EXT4_STATE_JDATA);
4423     out:
4424     unlock_page(page);
4425     out_no_pagelock:
4426     + if (!inline_data && page_bufs)
4427     + ext4_walk_page_buffers(NULL, page_bufs, 0, len,
4428     + NULL, bput_one);
4429     brelse(inode_bh);
4430     return ret;
4431     }
4432     diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
4433     index e992a9f156714..4c37abe768512 100644
4434     --- a/fs/ext4/namei.c
4435     +++ b/fs/ext4/namei.c
4436     @@ -3731,14 +3731,14 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
4437     */
4438     retval = -ENOENT;
4439     if (!old.bh || le32_to_cpu(old.de->inode) != old.inode->i_ino)
4440     - goto end_rename;
4441     + goto release_bh;
4442    
4443     new.bh = ext4_find_entry(new.dir, &new.dentry->d_name,
4444     &new.de, &new.inlined);
4445     if (IS_ERR(new.bh)) {
4446     retval = PTR_ERR(new.bh);
4447     new.bh = NULL;
4448     - goto end_rename;
4449     + goto release_bh;
4450     }
4451     if (new.bh) {
4452     if (!new.inode) {
4453     @@ -3755,15 +3755,13 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
4454     handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits);
4455     if (IS_ERR(handle)) {
4456     retval = PTR_ERR(handle);
4457     - handle = NULL;
4458     - goto end_rename;
4459     + goto release_bh;
4460     }
4461     } else {
4462     whiteout = ext4_whiteout_for_rename(&old, credits, &handle);
4463     if (IS_ERR(whiteout)) {
4464     retval = PTR_ERR(whiteout);
4465     - whiteout = NULL;
4466     - goto end_rename;
4467     + goto release_bh;
4468     }
4469     }
4470    
4471     @@ -3871,16 +3869,18 @@ end_rename:
4472     ext4_resetent(handle, &old,
4473     old.inode->i_ino, old_file_type);
4474     drop_nlink(whiteout);
4475     + ext4_orphan_add(handle, whiteout);
4476     }
4477     unlock_new_inode(whiteout);
4478     + ext4_journal_stop(handle);
4479     iput(whiteout);
4480     -
4481     + } else {
4482     + ext4_journal_stop(handle);
4483     }
4484     +release_bh:
4485     brelse(old.dir_bh);
4486     brelse(old.bh);
4487     brelse(new.bh);
4488     - if (handle)
4489     - ext4_journal_stop(handle);
4490     return retval;
4491     }
4492    
4493     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
4494     index 06568467b0c27..2ecf4594a20dd 100644
4495     --- a/fs/ext4/super.c
4496     +++ b/fs/ext4/super.c
4497     @@ -1017,6 +1017,7 @@ static void ext4_put_super(struct super_block *sb)
4498     percpu_counter_destroy(&sbi->s_freeinodes_counter);
4499     percpu_counter_destroy(&sbi->s_dirs_counter);
4500     percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
4501     + percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
4502     percpu_free_rwsem(&sbi->s_writepages_rwsem);
4503     #ifdef CONFIG_QUOTA
4504     for (i = 0; i < EXT4_MAXQUOTAS; i++)
4505     @@ -4597,6 +4598,9 @@ no_journal:
4506     if (!err)
4507     err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
4508     GFP_KERNEL);
4509     + if (!err)
4510     + err = percpu_counter_init(&sbi->s_sra_exceeded_retry_limit, 0,
4511     + GFP_KERNEL);
4512     if (!err)
4513     err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
4514    
4515     @@ -4699,6 +4703,7 @@ failed_mount6:
4516     percpu_counter_destroy(&sbi->s_freeinodes_counter);
4517     percpu_counter_destroy(&sbi->s_dirs_counter);
4518     percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
4519     + percpu_counter_destroy(&sbi->s_sra_exceeded_retry_limit);
4520     percpu_free_rwsem(&sbi->s_writepages_rwsem);
4521     failed_mount5:
4522     ext4_ext_release(sb);
4523     diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
4524     index eb1efad0e20a3..9394360ff1373 100644
4525     --- a/fs/ext4/sysfs.c
4526     +++ b/fs/ext4/sysfs.c
4527     @@ -23,6 +23,7 @@ typedef enum {
4528     attr_session_write_kbytes,
4529     attr_lifetime_write_kbytes,
4530     attr_reserved_clusters,
4531     + attr_sra_exceeded_retry_limit,
4532     attr_inode_readahead,
4533     attr_trigger_test_error,
4534     attr_first_error_time,
4535     @@ -176,6 +177,7 @@ EXT4_ATTR_FUNC(delayed_allocation_blocks, 0444);
4536     EXT4_ATTR_FUNC(session_write_kbytes, 0444);
4537     EXT4_ATTR_FUNC(lifetime_write_kbytes, 0444);
4538     EXT4_ATTR_FUNC(reserved_clusters, 0644);
4539     +EXT4_ATTR_FUNC(sra_exceeded_retry_limit, 0444);
4540    
4541     EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, inode_readahead,
4542     ext4_sb_info, s_inode_readahead_blks);
4543     @@ -207,6 +209,7 @@ static struct attribute *ext4_attrs[] = {
4544     ATTR_LIST(session_write_kbytes),
4545     ATTR_LIST(lifetime_write_kbytes),
4546     ATTR_LIST(reserved_clusters),
4547     + ATTR_LIST(sra_exceeded_retry_limit),
4548     ATTR_LIST(inode_readahead_blks),
4549     ATTR_LIST(inode_goal),
4550     ATTR_LIST(mb_stats),
4551     @@ -308,6 +311,10 @@ static ssize_t ext4_attr_show(struct kobject *kobj,
4552     return snprintf(buf, PAGE_SIZE, "%llu\n",
4553     (unsigned long long)
4554     atomic64_read(&sbi->s_resv_clusters));
4555     + case attr_sra_exceeded_retry_limit:
4556     + return snprintf(buf, PAGE_SIZE, "%llu\n",
4557     + (unsigned long long)
4558     + percpu_counter_sum(&sbi->s_sra_exceeded_retry_limit));
4559     case attr_inode_readahead:
4560     case attr_pointer_ui:
4561     if (!ptr)
4562     diff --git a/fs/iomap/swapfile.c b/fs/iomap/swapfile.c
4563     index 152a230f668d4..bd0cc3dcc9807 100644
4564     --- a/fs/iomap/swapfile.c
4565     +++ b/fs/iomap/swapfile.c
4566     @@ -169,6 +169,16 @@ int iomap_swapfile_activate(struct swap_info_struct *sis,
4567     return ret;
4568     }
4569    
4570     + /*
4571     + * If this swapfile doesn't contain even a single page-aligned
4572     + * contiguous range of blocks, reject this useless swapfile to
4573     + * prevent confusion later on.
4574     + */
4575     + if (isi.nr_pages == 0) {
4576     + pr_warn("swapon: Cannot find a single usable page in file.\n");
4577     + return -EINVAL;
4578     + }
4579     +
4580     *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage;
4581     sis->max = isi.nr_pages;
4582     sis->pages = isi.nr_pages - 1;
4583     diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
4584     index f2f81561ebb65..4d6e71335bce2 100644
4585     --- a/fs/nfsd/Kconfig
4586     +++ b/fs/nfsd/Kconfig
4587     @@ -73,6 +73,7 @@ config NFSD_V4
4588     select NFSD_V3
4589     select FS_POSIX_ACL
4590     select SUNRPC_GSS
4591     + select CRYPTO
4592     select CRYPTO_MD5
4593     select CRYPTO_SHA256
4594     select GRACE_PERIOD
4595     diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
4596     index efe55d101b0ed..3c50d18fe8a9b 100644
4597     --- a/fs/nfsd/nfs4callback.c
4598     +++ b/fs/nfsd/nfs4callback.c
4599     @@ -1121,6 +1121,7 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
4600     switch (task->tk_status) {
4601     case -EIO:
4602     case -ETIMEDOUT:
4603     + case -EACCES:
4604     nfsd4_mark_cb_down(clp, task->tk_status);
4605     }
4606     break;
4607     diff --git a/fs/reiserfs/xattr.h b/fs/reiserfs/xattr.h
4608     index c764352447ba1..81bec2c80b25c 100644
4609     --- a/fs/reiserfs/xattr.h
4610     +++ b/fs/reiserfs/xattr.h
4611     @@ -43,7 +43,7 @@ void reiserfs_security_free(struct reiserfs_security_handle *sec);
4612    
4613     static inline int reiserfs_xattrs_initialized(struct super_block *sb)
4614     {
4615     - return REISERFS_SB(sb)->priv_root != NULL;
4616     + return REISERFS_SB(sb)->priv_root && REISERFS_SB(sb)->xattr_root;
4617     }
4618    
4619     #define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header))
4620     diff --git a/include/linux/can/can-ml.h b/include/linux/can/can-ml.h
4621     index 2f5d731ae251d..8afa92d15a664 100644
4622     --- a/include/linux/can/can-ml.h
4623     +++ b/include/linux/can/can-ml.h
4624     @@ -44,6 +44,7 @@
4625    
4626     #include <linux/can.h>
4627     #include <linux/list.h>
4628     +#include <linux/netdevice.h>
4629    
4630     #define CAN_SFF_RCV_ARRAY_SZ (1 << CAN_SFF_ID_BITS)
4631     #define CAN_EFF_RCV_HASH_BITS 10
4632     @@ -65,4 +66,15 @@ struct can_ml_priv {
4633     #endif
4634     };
4635    
4636     +static inline struct can_ml_priv *can_get_ml_priv(struct net_device *dev)
4637     +{
4638     + return netdev_get_ml_priv(dev, ML_PRIV_CAN);
4639     +}
4640     +
4641     +static inline void can_set_ml_priv(struct net_device *dev,
4642     + struct can_ml_priv *ml_priv)
4643     +{
4644     + netdev_set_ml_priv(dev, ml_priv, ML_PRIV_CAN);
4645     +}
4646     +
4647     #endif /* CAN_ML_H */
4648     diff --git a/include/linux/extcon.h b/include/linux/extcon.h
4649     index 2bdf643d85937..47d3e19a49ae4 100644
4650     --- a/include/linux/extcon.h
4651     +++ b/include/linux/extcon.h
4652     @@ -271,6 +271,29 @@ static inline void devm_extcon_unregister_notifier(struct device *dev,
4653     struct extcon_dev *edev, unsigned int id,
4654     struct notifier_block *nb) { }
4655    
4656     +static inline int extcon_register_notifier_all(struct extcon_dev *edev,
4657     + struct notifier_block *nb)
4658     +{
4659     + return 0;
4660     +}
4661     +
4662     +static inline int extcon_unregister_notifier_all(struct extcon_dev *edev,
4663     + struct notifier_block *nb)
4664     +{
4665     + return 0;
4666     +}
4667     +
4668     +static inline int devm_extcon_register_notifier_all(struct device *dev,
4669     + struct extcon_dev *edev,
4670     + struct notifier_block *nb)
4671     +{
4672     + return 0;
4673     +}
4674     +
4675     +static inline void devm_extcon_unregister_notifier_all(struct device *dev,
4676     + struct extcon_dev *edev,
4677     + struct notifier_block *nb) { }
4678     +
4679     static inline struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name)
4680     {
4681     return ERR_PTR(-ENODEV);
4682     diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
4683     index db1b9623977c8..11a52f2fa35de 100644
4684     --- a/include/linux/netdevice.h
4685     +++ b/include/linux/netdevice.h
4686     @@ -1555,6 +1555,12 @@ enum netdev_priv_flags {
4687     #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER
4688     #define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK
4689    
4690     +/* Specifies the type of the struct net_device::ml_priv pointer */
4691     +enum netdev_ml_priv_type {
4692     + ML_PRIV_NONE,
4693     + ML_PRIV_CAN,
4694     +};
4695     +
4696     /**
4697     * struct net_device - The DEVICE structure.
4698     *
4699     @@ -1732,6 +1738,7 @@ enum netdev_priv_flags {
4700     * @nd_net: Network namespace this network device is inside
4701     *
4702     * @ml_priv: Mid-layer private
4703     + * @ml_priv_type: Mid-layer private type
4704     * @lstats: Loopback statistics
4705     * @tstats: Tunnel statistics
4706     * @dstats: Dummy statistics
4707     @@ -2019,8 +2026,10 @@ struct net_device {
4708     possible_net_t nd_net;
4709    
4710     /* mid-layer private */
4711     + void *ml_priv;
4712     + enum netdev_ml_priv_type ml_priv_type;
4713     +
4714     union {
4715     - void *ml_priv;
4716     struct pcpu_lstats __percpu *lstats;
4717     struct pcpu_sw_netstats __percpu *tstats;
4718     struct pcpu_dstats __percpu *dstats;
4719     @@ -2167,6 +2176,29 @@ static inline void netdev_reset_rx_headroom(struct net_device *dev)
4720     netdev_set_rx_headroom(dev, -1);
4721     }
4722    
4723     +static inline void *netdev_get_ml_priv(struct net_device *dev,
4724     + enum netdev_ml_priv_type type)
4725     +{
4726     + if (dev->ml_priv_type != type)
4727     + return NULL;
4728     +
4729     + return dev->ml_priv;
4730     +}
4731     +
4732     +static inline void netdev_set_ml_priv(struct net_device *dev,
4733     + void *ml_priv,
4734     + enum netdev_ml_priv_type type)
4735     +{
4736     + WARN(dev->ml_priv_type && dev->ml_priv_type != type,
4737     + "Overwriting already set ml_priv_type (%u) with different ml_priv_type (%u)!\n",
4738     + dev->ml_priv_type, type);
4739     + WARN(!dev->ml_priv_type && dev->ml_priv,
4740     + "Overwriting already set ml_priv and ml_priv_type is ML_PRIV_NONE!\n");
4741     +
4742     + dev->ml_priv = ml_priv;
4743     + dev->ml_priv_type = type;
4744     +}
4745     +
4746     /*
4747     * Net namespace inlines
4748     */
4749     diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
4750     index 468a9b8422e39..07a9f9f46e035 100644
4751     --- a/kernel/locking/mutex.c
4752     +++ b/kernel/locking/mutex.c
4753     @@ -636,7 +636,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
4754     */
4755     static __always_inline bool
4756     mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
4757     - const bool use_ww_ctx, struct mutex_waiter *waiter)
4758     + struct mutex_waiter *waiter)
4759     {
4760     if (!waiter) {
4761     /*
4762     @@ -712,7 +712,7 @@ fail:
4763     #else
4764     static __always_inline bool
4765     mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
4766     - const bool use_ww_ctx, struct mutex_waiter *waiter)
4767     + struct mutex_waiter *waiter)
4768     {
4769     return false;
4770     }
4771     @@ -932,6 +932,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
4772     struct ww_mutex *ww;
4773     int ret;
4774    
4775     + if (!use_ww_ctx)
4776     + ww_ctx = NULL;
4777     +
4778     might_sleep();
4779    
4780     #ifdef CONFIG_DEBUG_MUTEXES
4781     @@ -939,7 +942,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
4782     #endif
4783    
4784     ww = container_of(lock, struct ww_mutex, base);
4785     - if (use_ww_ctx && ww_ctx) {
4786     + if (ww_ctx) {
4787     if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
4788     return -EALREADY;
4789    
4790     @@ -956,10 +959,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
4791     mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
4792    
4793     if (__mutex_trylock(lock) ||
4794     - mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
4795     + mutex_optimistic_spin(lock, ww_ctx, NULL)) {
4796     /* got the lock, yay! */
4797     lock_acquired(&lock->dep_map, ip);
4798     - if (use_ww_ctx && ww_ctx)
4799     + if (ww_ctx)
4800     ww_mutex_set_context_fastpath(ww, ww_ctx);
4801     preempt_enable();
4802     return 0;
4803     @@ -970,7 +973,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
4804     * After waiting to acquire the wait_lock, try again.
4805     */
4806     if (__mutex_trylock(lock)) {
4807     - if (use_ww_ctx && ww_ctx)
4808     + if (ww_ctx)
4809     __ww_mutex_check_waiters(lock, ww_ctx);
4810    
4811     goto skip_wait;
4812     @@ -1023,7 +1026,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
4813     goto err;
4814     }
4815    
4816     - if (use_ww_ctx && ww_ctx) {
4817     + if (ww_ctx) {
4818     ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
4819     if (ret)
4820     goto err;
4821     @@ -1036,7 +1039,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
4822     * ww_mutex needs to always recheck its position since its waiter
4823     * list is not FIFO ordered.
4824     */
4825     - if ((use_ww_ctx && ww_ctx) || !first) {
4826     + if (ww_ctx || !first) {
4827     first = __mutex_waiter_is_first(lock, &waiter);
4828     if (first)
4829     __mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
4830     @@ -1049,7 +1052,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
4831     * or we must see its unlock and acquire.
4832     */
4833     if (__mutex_trylock(lock) ||
4834     - (first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
4835     + (first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
4836     break;
4837    
4838     spin_lock(&lock->wait_lock);
4839     @@ -1058,7 +1061,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
4840     acquired:
4841     __set_current_state(TASK_RUNNING);
4842    
4843     - if (use_ww_ctx && ww_ctx) {
4844     + if (ww_ctx) {
4845     /*
4846     * Wound-Wait; we stole the lock (!first_waiter), check the
4847     * waiters as anyone might want to wound us.
4848     @@ -1078,7 +1081,7 @@ skip_wait:
4849     /* got the lock - cleanup and rejoice! */
4850     lock_acquired(&lock->dep_map, ip);
4851    
4852     - if (use_ww_ctx && ww_ctx)
4853     + if (ww_ctx)
4854     ww_mutex_lock_acquired(ww, ww_ctx);
4855    
4856     spin_unlock(&lock->wait_lock);
4857     diff --git a/kernel/module.c b/kernel/module.c
4858     index ab1f97cfe18dc..c60559b5bf101 100644
4859     --- a/kernel/module.c
4860     +++ b/kernel/module.c
4861     @@ -2908,20 +2908,14 @@ static int module_sig_check(struct load_info *info, int flags)
4862     * enforcing, certain errors are non-fatal.
4863     */
4864     case -ENODATA:
4865     - reason = "Loading of unsigned module";
4866     - goto decide;
4867     + reason = "unsigned module";
4868     + break;
4869     case -ENOPKG:
4870     - reason = "Loading of module with unsupported crypto";
4871     - goto decide;
4872     + reason = "module with unsupported crypto";
4873     + break;
4874     case -ENOKEY:
4875     - reason = "Loading of module with unavailable key";
4876     - decide:
4877     - if (is_module_sig_enforced()) {
4878     - pr_notice("%s is rejected\n", reason);
4879     - return -EKEYREJECTED;
4880     - }
4881     -
4882     - return security_locked_down(LOCKDOWN_MODULE_SIGNATURE);
4883     + reason = "module with unavailable key";
4884     + break;
4885    
4886     /* All other errors are fatal, including nomem, unparseable
4887     * signatures and signature check failures - even if signatures
4888     @@ -2930,6 +2924,13 @@ static int module_sig_check(struct load_info *info, int flags)
4889     default:
4890     return err;
4891     }
4892     +
4893     + if (is_module_sig_enforced()) {
4894     + pr_notice("Loading of %s is rejected\n", reason);
4895     + return -EKEYREJECTED;
4896     + }
4897     +
4898     + return security_locked_down(LOCKDOWN_MODULE_SIGNATURE);
4899     }
4900     #else /* !CONFIG_MODULE_SIG */
4901     static int module_sig_check(struct load_info *info, int flags)
4902     @@ -2938,9 +2939,33 @@ static int module_sig_check(struct load_info *info, int flags)
4903     }
4904     #endif /* !CONFIG_MODULE_SIG */
4905    
4906     -/* Sanity checks against invalid binaries, wrong arch, weird elf version. */
4907     -static int elf_header_check(struct load_info *info)
4908     +static int validate_section_offset(struct load_info *info, Elf_Shdr *shdr)
4909     +{
4910     + unsigned long secend;
4911     +
4912     + /*
4913     + * Check for both overflow and offset/size being
4914     + * too large.
4915     + */
4916     + secend = shdr->sh_offset + shdr->sh_size;
4917     + if (secend < shdr->sh_offset || secend > info->len)
4918     + return -ENOEXEC;
4919     +
4920     + return 0;
4921     +}
4922     +
4923     +/*
4924     + * Sanity checks against invalid binaries, wrong arch, weird elf version.
4925     + *
4926     + * Also do basic validity checks against section offsets and sizes, the
4927     + * section name string table, and the indices used for it (sh_name).
4928     + */
4929     +static int elf_validity_check(struct load_info *info)
4930     {
4931     + unsigned int i;
4932     + Elf_Shdr *shdr, *strhdr;
4933     + int err;
4934     +
4935     if (info->len < sizeof(*(info->hdr)))
4936     return -ENOEXEC;
4937    
4938     @@ -2950,11 +2975,78 @@ static int elf_header_check(struct load_info *info)
4939     || info->hdr->e_shentsize != sizeof(Elf_Shdr))
4940     return -ENOEXEC;
4941    
4942     + /*
4943     + * e_shnum is 16 bits, and sizeof(Elf_Shdr) is
4944     + * known and small. So e_shnum * sizeof(Elf_Shdr)
4945     + * will not overflow unsigned long on any platform.
4946     + */
4947     if (info->hdr->e_shoff >= info->len
4948     || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
4949     info->len - info->hdr->e_shoff))
4950     return -ENOEXEC;
4951    
4952     + info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
4953     +
4954     + /*
4955     + * Verify if the section name table index is valid.
4956     + */
4957     + if (info->hdr->e_shstrndx == SHN_UNDEF
4958     + || info->hdr->e_shstrndx >= info->hdr->e_shnum)
4959     + return -ENOEXEC;
4960     +
4961     + strhdr = &info->sechdrs[info->hdr->e_shstrndx];
4962     + err = validate_section_offset(info, strhdr);
4963     + if (err < 0)
4964     + return err;
4965     +
4966     + /*
4967     + * The section name table must be NUL-terminated, as required
4968     + * by the spec. This makes strcmp and pr_* calls that access
4969     + * strings in the section safe.
4970     + */
4971     + info->secstrings = (void *)info->hdr + strhdr->sh_offset;
4972     + if (info->secstrings[strhdr->sh_size - 1] != '\0')
4973     + return -ENOEXEC;
4974     +
4975     + /*
4976     + * The code assumes that section 0 has a length of zero and
4977     + * an addr of zero, so check for it.
4978     + */
4979     + if (info->sechdrs[0].sh_type != SHT_NULL
4980     + || info->sechdrs[0].sh_size != 0
4981     + || info->sechdrs[0].sh_addr != 0)
4982     + return -ENOEXEC;
4983     +
4984     + for (i = 1; i < info->hdr->e_shnum; i++) {
4985     + shdr = &info->sechdrs[i];
4986     + switch (shdr->sh_type) {
4987     + case SHT_NULL:
4988     + case SHT_NOBITS:
4989     + continue;
4990     + case SHT_SYMTAB:
4991     + if (shdr->sh_link == SHN_UNDEF
4992     + || shdr->sh_link >= info->hdr->e_shnum)
4993     + return -ENOEXEC;
4994     + fallthrough;
4995     + default:
4996     + err = validate_section_offset(info, shdr);
4997     + if (err < 0) {
4998     + pr_err("Invalid ELF section in module (section %u type %u)\n",
4999     + i, shdr->sh_type);
5000     + return err;
5001     + }
5002     +
5003     + if (shdr->sh_flags & SHF_ALLOC) {
5004     + if (shdr->sh_name >= strhdr->sh_size) {
5005     + pr_err("Invalid ELF section name in module (section %u type %u)\n",
5006     + i, shdr->sh_type);
5007     + return -ENOEXEC;
5008     + }
5009     + }
5010     + break;
5011     + }
5012     + }
5013     +
5014     return 0;
5015     }
5016    
5017     @@ -3051,11 +3143,6 @@ static int rewrite_section_headers(struct load_info *info, int flags)
5018    
5019     for (i = 1; i < info->hdr->e_shnum; i++) {
5020     Elf_Shdr *shdr = &info->sechdrs[i];
5021     - if (shdr->sh_type != SHT_NOBITS
5022     - && info->len < shdr->sh_offset + shdr->sh_size) {
5023     - pr_err("Module len %lu truncated\n", info->len);
5024     - return -ENOEXEC;
5025     - }
5026    
5027     /* Mark all sections sh_addr with their address in the
5028     temporary image. */
5029     @@ -3087,11 +3174,6 @@ static int setup_load_info(struct load_info *info, int flags)
5030     {
5031     unsigned int i;
5032    
5033     - /* Set up the convenience variables */
5034     - info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
5035     - info->secstrings = (void *)info->hdr
5036     - + info->sechdrs[info->hdr->e_shstrndx].sh_offset;
5037     -
5038     /* Try to find a name early so we can log errors with a module name */
5039     info->index.info = find_sec(info, ".modinfo");
5040     if (info->index.info)
5041     @@ -3819,23 +3901,49 @@ static int load_module(struct load_info *info, const char __user *uargs,
5042     long err = 0;
5043     char *after_dashes;
5044    
5045     - err = elf_header_check(info);
5046     + /*
5047     + * Do the signature check (if any) first. All that
5048     + * the signature check needs is info->len, it does
5049     + * not need any of the section info. That can be
5050     + * set up later. This will minimize the chances
5051     + * of a corrupt module causing problems before
5052     + * we even get to the signature check.
5053     + *
5054     + * The check will also adjust info->len by stripping
5055     + * off the sig length at the end of the module, making
5056     + * checks against info->len more correct.
5057     + */
5058     + err = module_sig_check(info, flags);
5059     if (err)
5060     goto free_copy;
5061    
5062     + /*
5063     + * Do basic sanity checks against the ELF header and
5064     + * sections.
5065     + */
5066     + err = elf_validity_check(info);
5067     + if (err) {
5068     + pr_err("Module has invalid ELF structures\n");
5069     + goto free_copy;
5070     + }
5071     +
5072     + /*
5073     + * Everything checks out, so set up the section info
5074     + * in the info structure.
5075     + */
5076     err = setup_load_info(info, flags);
5077     if (err)
5078     goto free_copy;
5079    
5080     + /*
5081     + * Now that we know we have the correct module name, check
5082     + * if it's blacklisted.
5083     + */
5084     if (blacklisted(info->name)) {
5085     err = -EPERM;
5086     goto free_copy;
5087     }
5088    
5089     - err = module_sig_check(info, flags);
5090     - if (err)
5091     - goto free_copy;
5092     -
5093     err = rewrite_section_headers(info, flags);
5094     if (err)
5095     goto free_copy;
5096     diff --git a/kernel/module_signature.c b/kernel/module_signature.c
5097     index 4224a1086b7d8..00132d12487cd 100644
5098     --- a/kernel/module_signature.c
5099     +++ b/kernel/module_signature.c
5100     @@ -25,7 +25,7 @@ int mod_check_sig(const struct module_signature *ms, size_t file_len,
5101     return -EBADMSG;
5102    
5103     if (ms->id_type != PKEY_ID_PKCS7) {
5104     - pr_err("%s: Module is not signed with expected PKCS#7 message\n",
5105     + pr_err("%s: not signed with expected PKCS#7 message\n",
5106     name);
5107     return -ENOPKG;
5108     }
5109     diff --git a/kernel/module_signing.c b/kernel/module_signing.c
5110     index 9d9fc678c91d6..8723ae70ea1fe 100644
5111     --- a/kernel/module_signing.c
5112     +++ b/kernel/module_signing.c
5113     @@ -30,7 +30,7 @@ int mod_verify_sig(const void *mod, struct load_info *info)
5114    
5115     memcpy(&ms, mod + (modlen - sizeof(ms)), sizeof(ms));
5116    
5117     - ret = mod_check_sig(&ms, modlen, info->name);
5118     + ret = mod_check_sig(&ms, modlen, "module");
5119     if (ret)
5120     return ret;
5121    
5122     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
5123     index 1a75610f5f57b..1b5f54b309be5 100644
5124     --- a/kernel/trace/trace.c
5125     +++ b/kernel/trace/trace.c
5126     @@ -2857,7 +2857,8 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
5127    
5128     size = nr_entries * sizeof(unsigned long);
5129     event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
5130     - sizeof(*entry) + size, flags, pc);
5131     + (sizeof(*entry) - sizeof(entry->caller)) + size,
5132     + flags, pc);
5133     if (!event)
5134     goto out;
5135     entry = ring_buffer_event_data(event);
5136     diff --git a/mm/memory.c b/mm/memory.c
5137     index c432e7c764451..13a575ce2ec8f 100644
5138     --- a/mm/memory.c
5139     +++ b/mm/memory.c
5140     @@ -150,7 +150,7 @@ static int __init init_zero_pfn(void)
5141     zero_pfn = page_to_pfn(ZERO_PAGE(0));
5142     return 0;
5143     }
5144     -core_initcall(init_zero_pfn);
5145     +early_initcall(init_zero_pfn);
5146    
5147    
5148     #if defined(SPLIT_RSS_COUNTING)
5149     diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
5150     index b41375d4d295d..4610c352849bc 100644
5151     --- a/net/appletalk/ddp.c
5152     +++ b/net/appletalk/ddp.c
5153     @@ -1568,8 +1568,8 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
5154     struct sk_buff *skb;
5155     struct net_device *dev;
5156     struct ddpehdr *ddp;
5157     - int size;
5158     - struct atalk_route *rt;
5159     + int size, hard_header_len;
5160     + struct atalk_route *rt, *rt_lo = NULL;
5161     int err;
5162    
5163     if (flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
5164     @@ -1632,7 +1632,22 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
5165     SOCK_DEBUG(sk, "SK %p: Size needed %d, device %s\n",
5166     sk, size, dev->name);
5167    
5168     - size += dev->hard_header_len;
5169     + hard_header_len = dev->hard_header_len;
5170     + /* Leave room for loopback hardware header if necessary */
5171     + if (usat->sat_addr.s_node == ATADDR_BCAST &&
5172     + (dev->flags & IFF_LOOPBACK || !(rt->flags & RTF_GATEWAY))) {
5173     + struct atalk_addr at_lo;
5174     +
5175     + at_lo.s_node = 0;
5176     + at_lo.s_net = 0;
5177     +
5178     + rt_lo = atrtr_find(&at_lo);
5179     +
5180     + if (rt_lo && rt_lo->dev->hard_header_len > hard_header_len)
5181     + hard_header_len = rt_lo->dev->hard_header_len;
5182     + }
5183     +
5184     + size += hard_header_len;
5185     release_sock(sk);
5186     skb = sock_alloc_send_skb(sk, size, (flags & MSG_DONTWAIT), &err);
5187     lock_sock(sk);
5188     @@ -1640,7 +1655,7 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
5189     goto out;
5190    
5191     skb_reserve(skb, ddp_dl->header_length);
5192     - skb_reserve(skb, dev->hard_header_len);
5193     + skb_reserve(skb, hard_header_len);
5194     skb->dev = dev;
5195    
5196     SOCK_DEBUG(sk, "SK %p: Begin build.\n", sk);
5197     @@ -1691,18 +1706,12 @@ static int atalk_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
5198     /* loop back */
5199     skb_orphan(skb);
5200     if (ddp->deh_dnode == ATADDR_BCAST) {
5201     - struct atalk_addr at_lo;
5202     -
5203     - at_lo.s_node = 0;
5204     - at_lo.s_net = 0;
5205     -
5206     - rt = atrtr_find(&at_lo);
5207     - if (!rt) {
5208     + if (!rt_lo) {
5209     kfree_skb(skb);
5210     err = -ENETUNREACH;
5211     goto out;
5212     }
5213     - dev = rt->dev;
5214     + dev = rt_lo->dev;
5215     skb->dev = dev;
5216     }
5217     ddp_dl->request(ddp_dl, skb, dev->dev_addr);
5218     diff --git a/net/can/af_can.c b/net/can/af_can.c
5219     index 306d3584a4417..c758a12ffe461 100644
5220     --- a/net/can/af_can.c
5221     +++ b/net/can/af_can.c
5222     @@ -304,8 +304,8 @@ static struct can_dev_rcv_lists *can_dev_rcv_lists_find(struct net *net,
5223     struct net_device *dev)
5224     {
5225     if (dev) {
5226     - struct can_ml_priv *ml_priv = dev->ml_priv;
5227     - return &ml_priv->dev_rcv_lists;
5228     + struct can_ml_priv *can_ml = can_get_ml_priv(dev);
5229     + return &can_ml->dev_rcv_lists;
5230     } else {
5231     return net->can.rx_alldev_list;
5232     }
5233     @@ -788,25 +788,6 @@ void can_proto_unregister(const struct can_proto *cp)
5234     }
5235     EXPORT_SYMBOL(can_proto_unregister);
5236    
5237     -/* af_can notifier to create/remove CAN netdevice specific structs */
5238     -static int can_notifier(struct notifier_block *nb, unsigned long msg,
5239     - void *ptr)
5240     -{
5241     - struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5242     -
5243     - if (dev->type != ARPHRD_CAN)
5244     - return NOTIFY_DONE;
5245     -
5246     - switch (msg) {
5247     - case NETDEV_REGISTER:
5248     - WARN(!dev->ml_priv,
5249     - "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n");
5250     - break;
5251     - }
5252     -
5253     - return NOTIFY_DONE;
5254     -}
5255     -
5256     static int can_pernet_init(struct net *net)
5257     {
5258     spin_lock_init(&net->can.rcvlists_lock);
5259     @@ -874,11 +855,6 @@ static const struct net_proto_family can_family_ops = {
5260     .owner = THIS_MODULE,
5261     };
5262    
5263     -/* notifier block for netdevice event */
5264     -static struct notifier_block can_netdev_notifier __read_mostly = {
5265     - .notifier_call = can_notifier,
5266     -};
5267     -
5268     static struct pernet_operations can_pernet_ops __read_mostly = {
5269     .init = can_pernet_init,
5270     .exit = can_pernet_exit,
5271     @@ -909,17 +885,12 @@ static __init int can_init(void)
5272     err = sock_register(&can_family_ops);
5273     if (err)
5274     goto out_sock;
5275     - err = register_netdevice_notifier(&can_netdev_notifier);
5276     - if (err)
5277     - goto out_notifier;
5278    
5279     dev_add_pack(&can_packet);
5280     dev_add_pack(&canfd_packet);
5281    
5282     return 0;
5283    
5284     -out_notifier:
5285     - sock_unregister(PF_CAN);
5286     out_sock:
5287     unregister_pernet_subsys(&can_pernet_ops);
5288     out_pernet:
5289     @@ -933,7 +904,6 @@ static __exit void can_exit(void)
5290     /* protocol unregister */
5291     dev_remove_pack(&canfd_packet);
5292     dev_remove_pack(&can_packet);
5293     - unregister_netdevice_notifier(&can_netdev_notifier);
5294     sock_unregister(PF_CAN);
5295    
5296     unregister_pernet_subsys(&can_pernet_ops);
5297     diff --git a/net/can/j1939/main.c b/net/can/j1939/main.c
5298     index 137054bff9ec7..e52330f628c9f 100644
5299     --- a/net/can/j1939/main.c
5300     +++ b/net/can/j1939/main.c
5301     @@ -140,9 +140,9 @@ static struct j1939_priv *j1939_priv_create(struct net_device *ndev)
5302     static inline void j1939_priv_set(struct net_device *ndev,
5303     struct j1939_priv *priv)
5304     {
5305     - struct can_ml_priv *can_ml_priv = ndev->ml_priv;
5306     + struct can_ml_priv *can_ml = can_get_ml_priv(ndev);
5307    
5308     - can_ml_priv->j1939_priv = priv;
5309     + can_ml->j1939_priv = priv;
5310     }
5311    
5312     static void __j1939_priv_release(struct kref *kref)
5313     @@ -211,12 +211,9 @@ static void __j1939_rx_release(struct kref *kref)
5314     /* get pointer to priv without increasing ref counter */
5315     static inline struct j1939_priv *j1939_ndev_to_priv(struct net_device *ndev)
5316     {
5317     - struct can_ml_priv *can_ml_priv = ndev->ml_priv;
5318     + struct can_ml_priv *can_ml = can_get_ml_priv(ndev);
5319    
5320     - if (!can_ml_priv)
5321     - return NULL;
5322     -
5323     - return can_ml_priv->j1939_priv;
5324     + return can_ml->j1939_priv;
5325     }
5326    
5327     static struct j1939_priv *j1939_priv_get_by_ndev_locked(struct net_device *ndev)
5328     @@ -225,9 +222,6 @@ static struct j1939_priv *j1939_priv_get_by_ndev_locked(struct net_device *ndev)
5329    
5330     lockdep_assert_held(&j1939_netdev_lock);
5331    
5332     - if (ndev->type != ARPHRD_CAN)
5333     - return NULL;
5334     -
5335     priv = j1939_ndev_to_priv(ndev);
5336     if (priv)
5337     j1939_priv_get(priv);
5338     @@ -348,15 +342,16 @@ static int j1939_netdev_notify(struct notifier_block *nb,
5339     unsigned long msg, void *data)
5340     {
5341     struct net_device *ndev = netdev_notifier_info_to_dev(data);
5342     + struct can_ml_priv *can_ml = can_get_ml_priv(ndev);
5343     struct j1939_priv *priv;
5344    
5345     + if (!can_ml)
5346     + goto notify_done;
5347     +
5348     priv = j1939_priv_get_by_ndev(ndev);
5349     if (!priv)
5350     goto notify_done;
5351    
5352     - if (ndev->type != ARPHRD_CAN)
5353     - goto notify_put;
5354     -
5355     switch (msg) {
5356     case NETDEV_DOWN:
5357     j1939_cancel_active_session(priv, NULL);
5358     @@ -365,7 +360,6 @@ static int j1939_netdev_notify(struct notifier_block *nb,
5359     break;
5360     }
5361    
5362     -notify_put:
5363     j1939_priv_put(priv);
5364    
5365     notify_done:
5366     diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
5367     index 0470909605392..d57475c8ba07f 100644
5368     --- a/net/can/j1939/socket.c
5369     +++ b/net/can/j1939/socket.c
5370     @@ -12,6 +12,7 @@
5371    
5372     #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5373    
5374     +#include <linux/can/can-ml.h>
5375     #include <linux/can/core.h>
5376     #include <linux/can/skb.h>
5377     #include <linux/errqueue.h>
5378     @@ -453,6 +454,7 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
5379     j1939_jsk_del(priv, jsk);
5380     j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa);
5381     } else {
5382     + struct can_ml_priv *can_ml;
5383     struct net_device *ndev;
5384    
5385     ndev = dev_get_by_index(net, addr->can_ifindex);
5386     @@ -461,15 +463,8 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
5387     goto out_release_sock;
5388     }
5389    
5390     - if (ndev->type != ARPHRD_CAN) {
5391     - dev_put(ndev);
5392     - ret = -ENODEV;
5393     - goto out_release_sock;
5394     - }
5395     -
5396     - if (!ndev->ml_priv) {
5397     - netdev_warn_once(ndev,
5398     - "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n");
5399     + can_ml = can_get_ml_priv(ndev);
5400     + if (!can_ml) {
5401     dev_put(ndev);
5402     ret = -ENODEV;
5403     goto out_release_sock;
5404     diff --git a/net/can/proc.c b/net/can/proc.c
5405     index 077af42c26ba5..a5fc63c78370e 100644
5406     --- a/net/can/proc.c
5407     +++ b/net/can/proc.c
5408     @@ -329,8 +329,11 @@ static int can_rcvlist_proc_show(struct seq_file *m, void *v)
5409    
5410     /* receive list for registered CAN devices */
5411     for_each_netdev_rcu(net, dev) {
5412     - if (dev->type == ARPHRD_CAN && dev->ml_priv)
5413     - can_rcvlist_proc_show_one(m, idx, dev, dev->ml_priv);
5414     + struct can_ml_priv *can_ml = can_get_ml_priv(dev);
5415     +
5416     + if (can_ml)
5417     + can_rcvlist_proc_show_one(m, idx, dev,
5418     + &can_ml->dev_rcv_lists);
5419     }
5420    
5421     rcu_read_unlock();
5422     @@ -382,8 +385,10 @@ static int can_rcvlist_sff_proc_show(struct seq_file *m, void *v)
5423    
5424     /* sff receive list for registered CAN devices */
5425     for_each_netdev_rcu(net, dev) {
5426     - if (dev->type == ARPHRD_CAN && dev->ml_priv) {
5427     - dev_rcv_lists = dev->ml_priv;
5428     + struct can_ml_priv *can_ml = can_get_ml_priv(dev);
5429     +
5430     + if (can_ml) {
5431     + dev_rcv_lists = &can_ml->dev_rcv_lists;
5432     can_rcvlist_proc_show_array(m, dev, dev_rcv_lists->rx_sff,
5433     ARRAY_SIZE(dev_rcv_lists->rx_sff));
5434     }
5435     @@ -413,8 +418,10 @@ static int can_rcvlist_eff_proc_show(struct seq_file *m, void *v)
5436    
5437     /* eff receive list for registered CAN devices */
5438     for_each_netdev_rcu(net, dev) {
5439     - if (dev->type == ARPHRD_CAN && dev->ml_priv) {
5440     - dev_rcv_lists = dev->ml_priv;
5441     + struct can_ml_priv *can_ml = can_get_ml_priv(dev);
5442     +
5443     + if (can_ml) {
5444     + dev_rcv_lists = &can_ml->dev_rcv_lists;
5445     can_rcvlist_proc_show_array(m, dev, dev_rcv_lists->rx_eff,
5446     ARRAY_SIZE(dev_rcv_lists->rx_eff));
5447     }
5448     diff --git a/net/core/filter.c b/net/core/filter.c
5449     index 524f3364f8a05..7fbb274b7fe32 100644
5450     --- a/net/core/filter.c
5451     +++ b/net/core/filter.c
5452     @@ -3146,18 +3146,14 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
5453     return 0;
5454     }
5455    
5456     -static u32 __bpf_skb_max_len(const struct sk_buff *skb)
5457     -{
5458     - return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len :
5459     - SKB_MAX_ALLOC;
5460     -}
5461     +#define BPF_SKB_MAX_LEN SKB_MAX_ALLOC
5462    
5463     BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
5464     u32, mode, u64, flags)
5465     {
5466     u32 len_cur, len_diff_abs = abs(len_diff);
5467     u32 len_min = bpf_skb_net_base_len(skb);
5468     - u32 len_max = __bpf_skb_max_len(skb);
5469     + u32 len_max = BPF_SKB_MAX_LEN;
5470     __be16 proto = skb->protocol;
5471     bool shrink = len_diff < 0;
5472     u32 off;
5473     @@ -3237,7 +3233,7 @@ static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
5474     static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len,
5475     u64 flags)
5476     {
5477     - u32 max_len = __bpf_skb_max_len(skb);
5478     + u32 max_len = BPF_SKB_MAX_LEN;
5479     u32 min_len = __bpf_skb_min_len(skb);
5480     int ret;
5481    
5482     @@ -3313,7 +3309,7 @@ static const struct bpf_func_proto sk_skb_change_tail_proto = {
5483     static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
5484     u64 flags)
5485     {
5486     - u32 max_len = __bpf_skb_max_len(skb);
5487     + u32 max_len = BPF_SKB_MAX_LEN;
5488     u32 new_len = skb->len + head_room;
5489     int ret;
5490    
5491     diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
5492     index e3bdd859c895c..da86c0e1b677d 100644
5493     --- a/net/core/flow_dissector.c
5494     +++ b/net/core/flow_dissector.c
5495     @@ -1028,6 +1028,9 @@ proto_again:
5496     key_control->addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
5497     }
5498    
5499     + __skb_flow_dissect_ipv4(skb, flow_dissector,
5500     + target_container, data, iph);
5501     +
5502     if (ip_is_fragment(iph)) {
5503     key_control->flags |= FLOW_DIS_IS_FRAGMENT;
5504    
5505     @@ -1044,9 +1047,6 @@ proto_again:
5506     }
5507     }
5508    
5509     - __skb_flow_dissect_ipv4(skb, flow_dissector,
5510     - target_container, data, iph);
5511     -
5512     break;
5513     }
5514     case htons(ETH_P_IPV6): {
5515     diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
5516     index 1e5e08cc0bfc3..9f73ccf46c9b1 100644
5517     --- a/net/dccp/ipv6.c
5518     +++ b/net/dccp/ipv6.c
5519     @@ -319,6 +319,11 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
5520     if (!ipv6_unicast_destination(skb))
5521     return 0; /* discard, don't send a reset here */
5522    
5523     + if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
5524     + __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
5525     + return 0;
5526     + }
5527     +
5528     if (dccp_bad_service_code(sk, service)) {
5529     dcb->dccpd_reset_code = DCCP_RESET_CODE_BAD_SERVICE_CODE;
5530     goto drop;
5531     diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
5532     index 3d71c7d6102c4..7e5df23cbe7bf 100644
5533     --- a/net/ipv6/ip6_input.c
5534     +++ b/net/ipv6/ip6_input.c
5535     @@ -223,16 +223,6 @@ static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev,
5536     if (ipv6_addr_is_multicast(&hdr->saddr))
5537     goto err;
5538    
5539     - /* While RFC4291 is not explicit about v4mapped addresses
5540     - * in IPv6 headers, it seems clear linux dual-stack
5541     - * model can not deal properly with these.
5542     - * Security models could be fooled by ::ffff:127.0.0.1 for example.
5543     - *
5544     - * https://tools.ietf.org/html/draft-itojun-v6ops-v4mapped-harmful-02
5545     - */
5546     - if (ipv6_addr_v4mapped(&hdr->saddr))
5547     - goto err;
5548     -
5549     skb->transport_header = skb->network_header + sizeof(*hdr);
5550     IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
5551    
5552     diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
5553     index b42fa41cfceb5..2f061a911bc2c 100644
5554     --- a/net/ipv6/tcp_ipv6.c
5555     +++ b/net/ipv6/tcp_ipv6.c
5556     @@ -1093,6 +1093,11 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
5557     if (!ipv6_unicast_destination(skb))
5558     goto drop;
5559    
5560     + if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
5561     + __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
5562     + return 0;
5563     + }
5564     +
5565     return tcp_conn_request(&tcp6_request_sock_ops,
5566     &tcp_request_sock_ipv6_ops, sk, skb);
5567    
5568     diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
5569     index cf4d6d7e72822..d5470c7fe8792 100644
5570     --- a/net/sunrpc/auth_gss/svcauth_gss.c
5571     +++ b/net/sunrpc/auth_gss/svcauth_gss.c
5572     @@ -1782,11 +1782,14 @@ static int
5573     svcauth_gss_release(struct svc_rqst *rqstp)
5574     {
5575     struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data;
5576     - struct rpc_gss_wire_cred *gc = &gsd->clcred;
5577     + struct rpc_gss_wire_cred *gc;
5578     struct xdr_buf *resbuf = &rqstp->rq_res;
5579     int stat = -EINVAL;
5580     struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id);
5581    
5582     + if (!gsd)
5583     + goto out;
5584     + gc = &gsd->clcred;
5585     if (gc->gc_proc != RPC_GSS_PROC_DATA)
5586     goto out;
5587     /* Release can be called twice, but we only wrap once. */
5588     @@ -1827,10 +1830,10 @@ out_err:
5589     if (rqstp->rq_cred.cr_group_info)
5590     put_group_info(rqstp->rq_cred.cr_group_info);
5591     rqstp->rq_cred.cr_group_info = NULL;
5592     - if (gsd->rsci)
5593     + if (gsd && gsd->rsci) {
5594     cache_put(&gsd->rsci->h, sn->rsc_cache);
5595     - gsd->rsci = NULL;
5596     -
5597     + gsd->rsci = NULL;
5598     + }
5599     return stat;
5600     }
5601    
5602     diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
5603     index 5d323574d04fe..c82e7b52ab1f8 100644
5604     --- a/net/vmw_vsock/af_vsock.c
5605     +++ b/net/vmw_vsock/af_vsock.c
5606     @@ -620,6 +620,7 @@ struct sock *__vsock_create(struct net *net,
5607     vsk->trusted = psk->trusted;
5608     vsk->owner = get_cred(psk->owner);
5609     vsk->connect_timeout = psk->connect_timeout;
5610     + security_sk_clone(parent, sk);
5611     } else {
5612     vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
5613     vsk->owner = get_current_cred();
5614     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
5615     index febd16c9efd7a..ebb1ee69dd0c3 100644
5616     --- a/sound/pci/hda/hda_intel.c
5617     +++ b/sound/pci/hda/hda_intel.c
5618     @@ -1022,8 +1022,12 @@ static int azx_prepare(struct device *dev)
5619     struct snd_card *card = dev_get_drvdata(dev);
5620     struct azx *chip;
5621    
5622     + if (!azx_is_pm_ready(card))
5623     + return 0;
5624     +
5625     chip = card->private_data;
5626     chip->pm_prepared = 1;
5627     + snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
5628    
5629     flush_work(&azx_bus(chip)->unsol_work);
5630    
5631     @@ -1038,7 +1042,11 @@ static void azx_complete(struct device *dev)
5632     struct snd_card *card = dev_get_drvdata(dev);
5633     struct azx *chip;
5634    
5635     + if (!azx_is_pm_ready(card))
5636     + return;
5637     +
5638     chip = card->private_data;
5639     + snd_power_change_state(card, SNDRV_CTL_POWER_D0);
5640     chip->pm_prepared = 0;
5641     }
5642    
5643     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5644     index 73580e8208ed1..3c9e072db3538 100644
5645     --- a/sound/pci/hda/patch_realtek.c
5646     +++ b/sound/pci/hda/patch_realtek.c
5647     @@ -5192,7 +5192,7 @@ static void alc_determine_headset_type(struct hda_codec *codec)
5648     case 0x10ec0274:
5649     case 0x10ec0294:
5650     alc_process_coef_fw(codec, coef0274);
5651     - msleep(80);
5652     + msleep(850);
5653     val = alc_read_coef_idx(codec, 0x46);
5654     is_ctia = (val & 0x00f0) == 0x00f0;
5655     break;
5656     @@ -5376,6 +5376,7 @@ static void alc_update_headset_jack_cb(struct hda_codec *codec,
5657     struct hda_jack_callback *jack)
5658     {
5659     snd_hda_gen_hp_automute(codec, jack);
5660     + alc_update_headset_mode(codec);
5661     }
5662    
5663     static void alc_probe_headset_mode(struct hda_codec *codec)
5664     diff --git a/sound/soc/codecs/cs42l42.c b/sound/soc/codecs/cs42l42.c
5665     index 5125bb9b37b55..dcd2acb2c3cef 100644
5666     --- a/sound/soc/codecs/cs42l42.c
5667     +++ b/sound/soc/codecs/cs42l42.c
5668     @@ -401,7 +401,7 @@ static const struct regmap_config cs42l42_regmap = {
5669     };
5670    
5671     static DECLARE_TLV_DB_SCALE(adc_tlv, -9600, 100, false);
5672     -static DECLARE_TLV_DB_SCALE(mixer_tlv, -6200, 100, false);
5673     +static DECLARE_TLV_DB_SCALE(mixer_tlv, -6300, 100, true);
5674    
5675     static const char * const cs42l42_hpf_freq_text[] = {
5676     "1.86Hz", "120Hz", "235Hz", "466Hz"
5677     @@ -458,7 +458,7 @@ static const struct snd_kcontrol_new cs42l42_snd_controls[] = {
5678     CS42L42_DAC_HPF_EN_SHIFT, true, false),
5679     SOC_DOUBLE_R_TLV("Mixer Volume", CS42L42_MIXER_CHA_VOL,
5680     CS42L42_MIXER_CHB_VOL, CS42L42_MIXER_CH_VOL_SHIFT,
5681     - 0x3e, 1, mixer_tlv)
5682     + 0x3f, 1, mixer_tlv)
5683     };
5684    
5685     static int cs42l42_hpdrv_evt(struct snd_soc_dapm_widget *w,
5686     @@ -691,24 +691,6 @@ static int cs42l42_pll_config(struct snd_soc_component *component)
5687     CS42L42_CLK_OASRC_SEL_MASK,
5688     CS42L42_CLK_OASRC_SEL_12 <<
5689     CS42L42_CLK_OASRC_SEL_SHIFT);
5690     - /* channel 1 on low LRCLK, 32 bit */
5691     - snd_soc_component_update_bits(component,
5692     - CS42L42_ASP_RX_DAI0_CH1_AP_RES,
5693     - CS42L42_ASP_RX_CH_AP_MASK |
5694     - CS42L42_ASP_RX_CH_RES_MASK,
5695     - (CS42L42_ASP_RX_CH_AP_LOW <<
5696     - CS42L42_ASP_RX_CH_AP_SHIFT) |
5697     - (CS42L42_ASP_RX_CH_RES_32 <<
5698     - CS42L42_ASP_RX_CH_RES_SHIFT));
5699     - /* Channel 2 on high LRCLK, 32 bit */
5700     - snd_soc_component_update_bits(component,
5701     - CS42L42_ASP_RX_DAI0_CH2_AP_RES,
5702     - CS42L42_ASP_RX_CH_AP_MASK |
5703     - CS42L42_ASP_RX_CH_RES_MASK,
5704     - (CS42L42_ASP_RX_CH_AP_HI <<
5705     - CS42L42_ASP_RX_CH_AP_SHIFT) |
5706     - (CS42L42_ASP_RX_CH_RES_32 <<
5707     - CS42L42_ASP_RX_CH_RES_SHIFT));
5708     if (pll_ratio_table[i].mclk_src_sel == 0) {
5709     /* Pass the clock straight through */
5710     snd_soc_component_update_bits(component,
5711     @@ -797,27 +779,23 @@ static int cs42l42_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
5712     /* Bitclock/frame inversion */
5713     switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
5714     case SND_SOC_DAIFMT_NB_NF:
5715     + asp_cfg_val |= CS42L42_ASP_SCPOL_NOR << CS42L42_ASP_SCPOL_SHIFT;
5716     break;
5717     case SND_SOC_DAIFMT_NB_IF:
5718     - asp_cfg_val |= CS42L42_ASP_POL_INV <<
5719     - CS42L42_ASP_LCPOL_IN_SHIFT;
5720     + asp_cfg_val |= CS42L42_ASP_SCPOL_NOR << CS42L42_ASP_SCPOL_SHIFT;
5721     + asp_cfg_val |= CS42L42_ASP_LCPOL_INV << CS42L42_ASP_LCPOL_SHIFT;
5722     break;
5723     case SND_SOC_DAIFMT_IB_NF:
5724     - asp_cfg_val |= CS42L42_ASP_POL_INV <<
5725     - CS42L42_ASP_SCPOL_IN_DAC_SHIFT;
5726     break;
5727     case SND_SOC_DAIFMT_IB_IF:
5728     - asp_cfg_val |= CS42L42_ASP_POL_INV <<
5729     - CS42L42_ASP_LCPOL_IN_SHIFT;
5730     - asp_cfg_val |= CS42L42_ASP_POL_INV <<
5731     - CS42L42_ASP_SCPOL_IN_DAC_SHIFT;
5732     + asp_cfg_val |= CS42L42_ASP_LCPOL_INV << CS42L42_ASP_LCPOL_SHIFT;
5733     break;
5734     }
5735    
5736     - snd_soc_component_update_bits(component, CS42L42_ASP_CLK_CFG,
5737     - CS42L42_ASP_MODE_MASK |
5738     - CS42L42_ASP_SCPOL_IN_DAC_MASK |
5739     - CS42L42_ASP_LCPOL_IN_MASK, asp_cfg_val);
5740     + snd_soc_component_update_bits(component, CS42L42_ASP_CLK_CFG, CS42L42_ASP_MODE_MASK |
5741     + CS42L42_ASP_SCPOL_MASK |
5742     + CS42L42_ASP_LCPOL_MASK,
5743     + asp_cfg_val);
5744    
5745     return 0;
5746     }
5747     @@ -828,14 +806,29 @@ static int cs42l42_pcm_hw_params(struct snd_pcm_substream *substream,
5748     {
5749     struct snd_soc_component *component = dai->component;
5750     struct cs42l42_private *cs42l42 = snd_soc_component_get_drvdata(component);
5751     - int retval;
5752     + unsigned int width = (params_width(params) / 8) - 1;
5753     + unsigned int val = 0;
5754    
5755     cs42l42->srate = params_rate(params);
5756     - cs42l42->swidth = params_width(params);
5757    
5758     - retval = cs42l42_pll_config(component);
5759     + switch(substream->stream) {
5760     + case SNDRV_PCM_STREAM_PLAYBACK:
5761     + val |= width << CS42L42_ASP_RX_CH_RES_SHIFT;
5762     + /* channel 1 on low LRCLK */
5763     + snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_CH1_AP_RES,
5764     + CS42L42_ASP_RX_CH_AP_MASK |
5765     + CS42L42_ASP_RX_CH_RES_MASK, val);
5766     + /* Channel 2 on high LRCLK */
5767     + val |= CS42L42_ASP_RX_CH_AP_HI << CS42L42_ASP_RX_CH_AP_SHIFT;
5768     + snd_soc_component_update_bits(component, CS42L42_ASP_RX_DAI0_CH2_AP_RES,
5769     + CS42L42_ASP_RX_CH_AP_MASK |
5770     + CS42L42_ASP_RX_CH_RES_MASK, val);
5771     + break;
5772     + default:
5773     + break;
5774     + }
5775    
5776     - return retval;
5777     + return cs42l42_pll_config(component);
5778     }
5779    
5780     static int cs42l42_set_sysclk(struct snd_soc_dai *dai,
5781     @@ -900,9 +893,9 @@ static int cs42l42_digital_mute(struct snd_soc_dai *dai, int mute)
5782     return 0;
5783     }
5784    
5785     -#define CS42L42_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S18_3LE | \
5786     - SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE | \
5787     - SNDRV_PCM_FMTBIT_S32_LE)
5788     +#define CS42L42_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\
5789     + SNDRV_PCM_FMTBIT_S24_LE |\
5790     + SNDRV_PCM_FMTBIT_S32_LE )
5791    
5792    
5793     static const struct snd_soc_dai_ops cs42l42_ops = {
5794     @@ -1803,7 +1796,7 @@ static int cs42l42_i2c_probe(struct i2c_client *i2c_client,
5795     dev_dbg(&i2c_client->dev, "Found reset GPIO\n");
5796     gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
5797     }
5798     - mdelay(3);
5799     + usleep_range(CS42L42_BOOT_TIME_US, CS42L42_BOOT_TIME_US * 2);
5800    
5801     /* Request IRQ */
5802     ret = devm_request_threaded_irq(&i2c_client->dev,
5803     @@ -1928,6 +1921,7 @@ static int cs42l42_runtime_resume(struct device *dev)
5804     }
5805    
5806     gpiod_set_value_cansleep(cs42l42->reset_gpio, 1);
5807     + usleep_range(CS42L42_BOOT_TIME_US, CS42L42_BOOT_TIME_US * 2);
5808    
5809     regcache_cache_only(cs42l42->regmap, false);
5810     regcache_sync(cs42l42->regmap);
5811     diff --git a/sound/soc/codecs/cs42l42.h b/sound/soc/codecs/cs42l42.h
5812     index 9e3cc528dcff0..866d7c873e3c9 100644
5813     --- a/sound/soc/codecs/cs42l42.h
5814     +++ b/sound/soc/codecs/cs42l42.h
5815     @@ -258,11 +258,12 @@
5816     #define CS42L42_ASP_SLAVE_MODE 0x00
5817     #define CS42L42_ASP_MODE_SHIFT 4
5818     #define CS42L42_ASP_MODE_MASK (1 << CS42L42_ASP_MODE_SHIFT)
5819     -#define CS42L42_ASP_SCPOL_IN_DAC_SHIFT 2
5820     -#define CS42L42_ASP_SCPOL_IN_DAC_MASK (1 << CS42L42_ASP_SCPOL_IN_DAC_SHIFT)
5821     -#define CS42L42_ASP_LCPOL_IN_SHIFT 0
5822     -#define CS42L42_ASP_LCPOL_IN_MASK (1 << CS42L42_ASP_LCPOL_IN_SHIFT)
5823     -#define CS42L42_ASP_POL_INV 1
5824     +#define CS42L42_ASP_SCPOL_SHIFT 2
5825     +#define CS42L42_ASP_SCPOL_MASK (3 << CS42L42_ASP_SCPOL_SHIFT)
5826     +#define CS42L42_ASP_SCPOL_NOR 3
5827     +#define CS42L42_ASP_LCPOL_SHIFT 0
5828     +#define CS42L42_ASP_LCPOL_MASK (3 << CS42L42_ASP_LCPOL_SHIFT)
5829     +#define CS42L42_ASP_LCPOL_INV 3
5830    
5831     #define CS42L42_ASP_FRM_CFG (CS42L42_PAGE_12 + 0x08)
5832     #define CS42L42_ASP_STP_SHIFT 4
5833     @@ -739,6 +740,7 @@
5834     #define CS42L42_FRAC2_VAL(val) (((val) & 0xff0000) >> 16)
5835    
5836     #define CS42L42_NUM_SUPPLIES 5
5837     +#define CS42L42_BOOT_TIME_US 3000
5838    
5839     static const char *const cs42l42_supply_names[CS42L42_NUM_SUPPLIES] = {
5840     "VA",
5841     @@ -756,7 +758,6 @@ struct cs42l42_private {
5842     struct completion pdn_done;
5843     u32 sclk;
5844     u32 srate;
5845     - u32 swidth;
5846     u8 plug_state;
5847     u8 hs_type;
5848     u8 ts_inv;
5849     diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
5850     index 36eef1fb3d181..b781b28de0126 100644
5851     --- a/sound/soc/codecs/es8316.c
5852     +++ b/sound/soc/codecs/es8316.c
5853     @@ -63,13 +63,8 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(adc_pga_gain_tlv,
5854     1, 1, TLV_DB_SCALE_ITEM(0, 0, 0),
5855     2, 2, TLV_DB_SCALE_ITEM(250, 0, 0),
5856     3, 3, TLV_DB_SCALE_ITEM(450, 0, 0),
5857     - 4, 4, TLV_DB_SCALE_ITEM(700, 0, 0),
5858     - 5, 5, TLV_DB_SCALE_ITEM(1000, 0, 0),
5859     - 6, 6, TLV_DB_SCALE_ITEM(1300, 0, 0),
5860     - 7, 7, TLV_DB_SCALE_ITEM(1600, 0, 0),
5861     - 8, 8, TLV_DB_SCALE_ITEM(1800, 0, 0),
5862     - 9, 9, TLV_DB_SCALE_ITEM(2100, 0, 0),
5863     - 10, 10, TLV_DB_SCALE_ITEM(2400, 0, 0),
5864     + 4, 7, TLV_DB_SCALE_ITEM(700, 300, 0),
5865     + 8, 10, TLV_DB_SCALE_ITEM(1800, 300, 0),
5866     );
5867    
5868     static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(hpout_vol_tlv,
5869     diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
5870     index 747ca248bf10c..3bc63fbcb1889 100644
5871     --- a/sound/soc/codecs/rt5640.c
5872     +++ b/sound/soc/codecs/rt5640.c
5873     @@ -339,9 +339,9 @@ static bool rt5640_readable_register(struct device *dev, unsigned int reg)
5874     }
5875    
5876     static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0);
5877     -static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
5878     +static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0);
5879     static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
5880     -static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
5881     +static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000);
5882     static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
5883    
5884     /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
5885     diff --git a/sound/soc/codecs/rt5651.c b/sound/soc/codecs/rt5651.c
5886     index c506c9305043e..829cf552fe3e8 100644
5887     --- a/sound/soc/codecs/rt5651.c
5888     +++ b/sound/soc/codecs/rt5651.c
5889     @@ -285,9 +285,9 @@ static bool rt5651_readable_register(struct device *dev, unsigned int reg)
5890     }
5891    
5892     static const DECLARE_TLV_DB_SCALE(out_vol_tlv, -4650, 150, 0);
5893     -static const DECLARE_TLV_DB_SCALE(dac_vol_tlv, -65625, 375, 0);
5894     +static const DECLARE_TLV_DB_MINMAX(dac_vol_tlv, -6562, 0);
5895     static const DECLARE_TLV_DB_SCALE(in_vol_tlv, -3450, 150, 0);
5896     -static const DECLARE_TLV_DB_SCALE(adc_vol_tlv, -17625, 375, 0);
5897     +static const DECLARE_TLV_DB_MINMAX(adc_vol_tlv, -1762, 3000);
5898     static const DECLARE_TLV_DB_SCALE(adc_bst_tlv, 0, 1200, 0);
5899    
5900     /* {0, +20, +24, +30, +35, +40, +44, +50, +52} dB */
5901     diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c
5902     index e66d08398f746..afd61599d94c9 100644
5903     --- a/sound/soc/codecs/rt5659.c
5904     +++ b/sound/soc/codecs/rt5659.c
5905     @@ -3463,12 +3463,17 @@ static int rt5659_set_component_sysclk(struct snd_soc_component *component, int
5906     {
5907     struct rt5659_priv *rt5659 = snd_soc_component_get_drvdata(component);
5908     unsigned int reg_val = 0;
5909     + int ret;
5910    
5911     if (freq == rt5659->sysclk && clk_id == rt5659->sysclk_src)
5912     return 0;
5913    
5914     switch (clk_id) {
5915     case RT5659_SCLK_S_MCLK:
5916     + ret = clk_set_rate(rt5659->mclk, freq);
5917     + if (ret)
5918     + return ret;
5919     +
5920     reg_val |= RT5659_SCLK_SRC_MCLK;
5921     break;
5922     case RT5659_SCLK_S_PLL1:
5923     diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
5924     index f5b59305c957a..8a1e485982d81 100644
5925     --- a/sound/soc/codecs/sgtl5000.c
5926     +++ b/sound/soc/codecs/sgtl5000.c
5927     @@ -71,7 +71,7 @@ static const struct reg_default sgtl5000_reg_defaults[] = {
5928     { SGTL5000_DAP_EQ_BASS_BAND4, 0x002f },
5929     { SGTL5000_DAP_MAIN_CHAN, 0x8000 },
5930     { SGTL5000_DAP_MIX_CHAN, 0x0000 },
5931     - { SGTL5000_DAP_AVC_CTRL, 0x0510 },
5932     + { SGTL5000_DAP_AVC_CTRL, 0x5100 },
5933     { SGTL5000_DAP_AVC_THRESHOLD, 0x1473 },
5934     { SGTL5000_DAP_AVC_ATTACK, 0x0028 },
5935     { SGTL5000_DAP_AVC_DECAY, 0x0050 },
5936     diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
5937     index 9fb03c646a88f..3d1585c12b074 100644
5938     --- a/sound/usb/quirks.c
5939     +++ b/sound/usb/quirks.c
5940     @@ -1453,6 +1453,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
5941     case USB_ID(0x21B4, 0x0081): /* AudioQuest DragonFly */
5942     case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */
5943     case USB_ID(0x413c, 0xa506): /* Dell AE515 sound bar */
5944     + case USB_ID(0x046d, 0x084c): /* Logitech ConferenceCam Connect */
5945     return true;
5946     }
5947    
5948     diff --git a/tools/testing/selftests/net/forwarding/tc_flower.sh b/tools/testing/selftests/net/forwarding/tc_flower.sh
5949     index 058c746ee3006..b11d8e6b5bc14 100755
5950     --- a/tools/testing/selftests/net/forwarding/tc_flower.sh
5951     +++ b/tools/testing/selftests/net/forwarding/tc_flower.sh
5952     @@ -3,7 +3,7 @@
5953    
5954     ALL_TESTS="match_dst_mac_test match_src_mac_test match_dst_ip_test \
5955     match_src_ip_test match_ip_flags_test match_pcp_test match_vlan_test \
5956     - match_ip_tos_test match_indev_test"
5957     + match_ip_tos_test match_indev_test match_ip_ttl_test"
5958     NUM_NETIFS=2
5959     source tc_common.sh
5960     source lib.sh
5961     @@ -310,6 +310,42 @@ match_ip_tos_test()
5962     log_test "ip_tos match ($tcflags)"
5963     }
5964    
5965     +match_ip_ttl_test()
5966     +{
5967     + RET=0
5968     +
5969     + tc filter add dev $h2 ingress protocol ip pref 1 handle 101 flower \
5970     + $tcflags dst_ip 192.0.2.2 ip_ttl 63 action drop
5971     + tc filter add dev $h2 ingress protocol ip pref 2 handle 102 flower \
5972     + $tcflags dst_ip 192.0.2.2 action drop
5973     +
5974     + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
5975     + -t ip "ttl=63" -q
5976     +
5977     + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
5978     + -t ip "ttl=63,mf,frag=256" -q
5979     +
5980     + tc_check_packets "dev $h2 ingress" 102 1
5981     + check_fail $? "Matched on the wrong filter (no check on ttl)"
5982     +
5983     + tc_check_packets "dev $h2 ingress" 101 2
5984     + check_err $? "Did not match on correct filter (ttl=63)"
5985     +
5986     + $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \
5987     + -t ip "ttl=255" -q
5988     +
5989     + tc_check_packets "dev $h2 ingress" 101 3
5990     + check_fail $? "Matched on a wrong filter (ttl=63)"
5991     +
5992     + tc_check_packets "dev $h2 ingress" 102 1
5993     + check_err $? "Did not match on correct filter (no check on ttl)"
5994     +
5995     + tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower
5996     + tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower
5997     +
5998     + log_test "ip_ttl match ($tcflags)"
5999     +}
6000     +
6001     match_indev_test()
6002     {
6003     RET=0