Contents of /trunk/kernel-magellan/patches-4.16/0105-4.16.6-all-fixes.patch
Parent Directory | Revision Log
Revision 3109 -
(show annotations)
(download)
Wed May 16 14:24:32 2018 UTC (6 years, 4 months ago) by niro
File size: 121097 byte(s)
Wed May 16 14:24:32 2018 UTC (6 years, 4 months ago) by niro
File size: 121097 byte(s)
-linux-4.16.6
1 | diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt |
2 | index a553d4e4a0fb..f778901c4297 100644 |
3 | --- a/Documentation/networking/ip-sysctl.txt |
4 | +++ b/Documentation/networking/ip-sysctl.txt |
5 | @@ -1386,26 +1386,26 @@ mld_qrv - INTEGER |
6 | Default: 2 (as specified by RFC3810 9.1) |
7 | Minimum: 1 (as specified by RFC6636 4.5) |
8 | |
9 | -max_dst_opts_cnt - INTEGER |
10 | +max_dst_opts_number - INTEGER |
11 | Maximum number of non-padding TLVs allowed in a Destination |
12 | options extension header. If this value is less than zero |
13 | then unknown options are disallowed and the number of known |
14 | TLVs allowed is the absolute value of this number. |
15 | Default: 8 |
16 | |
17 | -max_hbh_opts_cnt - INTEGER |
18 | +max_hbh_opts_number - INTEGER |
19 | Maximum number of non-padding TLVs allowed in a Hop-by-Hop |
20 | options extension header. If this value is less than zero |
21 | then unknown options are disallowed and the number of known |
22 | TLVs allowed is the absolute value of this number. |
23 | Default: 8 |
24 | |
25 | -max dst_opts_len - INTEGER |
26 | +max_dst_opts_length - INTEGER |
27 | Maximum length allowed for a Destination options extension |
28 | header. |
29 | Default: INT_MAX (unlimited) |
30 | |
31 | -max hbh_opts_len - INTEGER |
32 | +max_hbh_length - INTEGER |
33 | Maximum length allowed for a Hop-by-Hop options extension |
34 | header. |
35 | Default: INT_MAX (unlimited) |
36 | diff --git a/Makefile b/Makefile |
37 | index 6678a90f355b..41f07b2b7905 100644 |
38 | --- a/Makefile |
39 | +++ b/Makefile |
40 | @@ -1,7 +1,7 @@ |
41 | # SPDX-License-Identifier: GPL-2.0 |
42 | VERSION = 4 |
43 | PATCHLEVEL = 16 |
44 | -SUBLEVEL = 5 |
45 | +SUBLEVEL = 6 |
46 | EXTRAVERSION = |
47 | NAME = Fearless Coyote |
48 | |
49 | diff --git a/arch/s390/kernel/perf_cpum_cf_events.c b/arch/s390/kernel/perf_cpum_cf_events.c |
50 | index 5ee27dc9a10c..feebb2944882 100644 |
51 | --- a/arch/s390/kernel/perf_cpum_cf_events.c |
52 | +++ b/arch/s390/kernel/perf_cpum_cf_events.c |
53 | @@ -123,7 +123,7 @@ CPUMF_EVENT_ATTR(cf_zec12, L1I_OFFBOOK_L3_SOURCED_WRITES_IV, 0x00a1); |
54 | CPUMF_EVENT_ATTR(cf_zec12, TX_NC_TABORT, 0x00b1); |
55 | CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_NO_SPECIAL, 0x00b2); |
56 | CPUMF_EVENT_ATTR(cf_zec12, TX_C_TABORT_SPECIAL, 0x00b3); |
57 | -CPUMF_EVENT_ATTR(cf_z13, L1D_WRITES_RO_EXCL, 0x0080); |
58 | +CPUMF_EVENT_ATTR(cf_z13, L1D_RO_EXCL_WRITES, 0x0080); |
59 | CPUMF_EVENT_ATTR(cf_z13, DTLB1_WRITES, 0x0081); |
60 | CPUMF_EVENT_ATTR(cf_z13, DTLB1_MISSES, 0x0082); |
61 | CPUMF_EVENT_ATTR(cf_z13, DTLB1_HPAGE_WRITES, 0x0083); |
62 | @@ -179,7 +179,7 @@ CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_NO_SPECIAL, 0x00db); |
63 | CPUMF_EVENT_ATTR(cf_z13, TX_C_TABORT_SPECIAL, 0x00dc); |
64 | CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0); |
65 | CPUMF_EVENT_ATTR(cf_z13, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1); |
66 | -CPUMF_EVENT_ATTR(cf_z14, L1D_WRITES_RO_EXCL, 0x0080); |
67 | +CPUMF_EVENT_ATTR(cf_z14, L1D_RO_EXCL_WRITES, 0x0080); |
68 | CPUMF_EVENT_ATTR(cf_z14, DTLB2_WRITES, 0x0081); |
69 | CPUMF_EVENT_ATTR(cf_z14, DTLB2_MISSES, 0x0082); |
70 | CPUMF_EVENT_ATTR(cf_z14, DTLB2_HPAGE_WRITES, 0x0083); |
71 | @@ -371,7 +371,7 @@ static struct attribute *cpumcf_zec12_pmu_event_attr[] __initdata = { |
72 | }; |
73 | |
74 | static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = { |
75 | - CPUMF_EVENT_PTR(cf_z13, L1D_WRITES_RO_EXCL), |
76 | + CPUMF_EVENT_PTR(cf_z13, L1D_RO_EXCL_WRITES), |
77 | CPUMF_EVENT_PTR(cf_z13, DTLB1_WRITES), |
78 | CPUMF_EVENT_PTR(cf_z13, DTLB1_MISSES), |
79 | CPUMF_EVENT_PTR(cf_z13, DTLB1_HPAGE_WRITES), |
80 | @@ -431,7 +431,7 @@ static struct attribute *cpumcf_z13_pmu_event_attr[] __initdata = { |
81 | }; |
82 | |
83 | static struct attribute *cpumcf_z14_pmu_event_attr[] __initdata = { |
84 | - CPUMF_EVENT_PTR(cf_z14, L1D_WRITES_RO_EXCL), |
85 | + CPUMF_EVENT_PTR(cf_z14, L1D_RO_EXCL_WRITES), |
86 | CPUMF_EVENT_PTR(cf_z14, DTLB2_WRITES), |
87 | CPUMF_EVENT_PTR(cf_z14, DTLB2_MISSES), |
88 | CPUMF_EVENT_PTR(cf_z14, DTLB2_HPAGE_WRITES), |
89 | diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c |
90 | index d9d1f512f019..5007fac01bb5 100644 |
91 | --- a/arch/s390/kernel/uprobes.c |
92 | +++ b/arch/s390/kernel/uprobes.c |
93 | @@ -150,6 +150,15 @@ unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline, |
94 | return orig; |
95 | } |
96 | |
97 | +bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx, |
98 | + struct pt_regs *regs) |
99 | +{ |
100 | + if (ctx == RP_CHECK_CHAIN_CALL) |
101 | + return user_stack_pointer(regs) <= ret->stack; |
102 | + else |
103 | + return user_stack_pointer(regs) < ret->stack; |
104 | +} |
105 | + |
106 | /* Instruction Emulation */ |
107 | |
108 | static void adjust_psw_addr(psw_t *psw, unsigned long len) |
109 | diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c |
110 | index 76fb96966f7b..2f2e737be0f8 100644 |
111 | --- a/drivers/acpi/acpi_video.c |
112 | +++ b/drivers/acpi/acpi_video.c |
113 | @@ -2123,6 +2123,25 @@ static int __init intel_opregion_present(void) |
114 | return opregion; |
115 | } |
116 | |
117 | +static bool dmi_is_desktop(void) |
118 | +{ |
119 | + const char *chassis_type; |
120 | + |
121 | + chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE); |
122 | + if (!chassis_type) |
123 | + return false; |
124 | + |
125 | + if (!strcmp(chassis_type, "3") || /* 3: Desktop */ |
126 | + !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */ |
127 | + !strcmp(chassis_type, "5") || /* 5: Pizza Box */ |
128 | + !strcmp(chassis_type, "6") || /* 6: Mini Tower */ |
129 | + !strcmp(chassis_type, "7") || /* 7: Tower */ |
130 | + !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */ |
131 | + return true; |
132 | + |
133 | + return false; |
134 | +} |
135 | + |
136 | int acpi_video_register(void) |
137 | { |
138 | int ret = 0; |
139 | @@ -2143,8 +2162,12 @@ int acpi_video_register(void) |
140 | * win8 ready (where we also prefer the native backlight driver, so |
141 | * normally the acpi_video code should not register there anyways). |
142 | */ |
143 | - if (only_lcd == -1) |
144 | - only_lcd = acpi_osi_is_win8(); |
145 | + if (only_lcd == -1) { |
146 | + if (dmi_is_desktop() && acpi_osi_is_win8()) |
147 | + only_lcd = true; |
148 | + else |
149 | + only_lcd = false; |
150 | + } |
151 | |
152 | dmi_check_system(video_dmi_table); |
153 | |
154 | diff --git a/drivers/block/swim.c b/drivers/block/swim.c |
155 | index 64e066eba72e..0e31884a9519 100644 |
156 | --- a/drivers/block/swim.c |
157 | +++ b/drivers/block/swim.c |
158 | @@ -110,7 +110,7 @@ struct iwm { |
159 | /* Select values for swim_select and swim_readbit */ |
160 | |
161 | #define READ_DATA_0 0x074 |
162 | -#define TWOMEG_DRIVE 0x075 |
163 | +#define ONEMEG_DRIVE 0x075 |
164 | #define SINGLE_SIDED 0x076 |
165 | #define DRIVE_PRESENT 0x077 |
166 | #define DISK_IN 0x170 |
167 | @@ -118,9 +118,9 @@ struct iwm { |
168 | #define TRACK_ZERO 0x172 |
169 | #define TACHO 0x173 |
170 | #define READ_DATA_1 0x174 |
171 | -#define MFM_MODE 0x175 |
172 | +#define GCR_MODE 0x175 |
173 | #define SEEK_COMPLETE 0x176 |
174 | -#define ONEMEG_MEDIA 0x177 |
175 | +#define TWOMEG_MEDIA 0x177 |
176 | |
177 | /* Bits in handshake register */ |
178 | |
179 | @@ -612,7 +612,6 @@ static void setup_medium(struct floppy_state *fs) |
180 | struct floppy_struct *g; |
181 | fs->disk_in = 1; |
182 | fs->write_protected = swim_readbit(base, WRITE_PROT); |
183 | - fs->type = swim_readbit(base, ONEMEG_MEDIA); |
184 | |
185 | if (swim_track00(base)) |
186 | printk(KERN_ERR |
187 | @@ -620,6 +619,9 @@ static void setup_medium(struct floppy_state *fs) |
188 | |
189 | swim_track00(base); |
190 | |
191 | + fs->type = swim_readbit(base, TWOMEG_MEDIA) ? |
192 | + HD_MEDIA : DD_MEDIA; |
193 | + fs->head_number = swim_readbit(base, SINGLE_SIDED) ? 1 : 2; |
194 | get_floppy_geometry(fs, 0, &g); |
195 | fs->total_secs = g->size; |
196 | fs->secpercyl = g->head * g->sect; |
197 | @@ -646,7 +648,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) |
198 | |
199 | swim_write(base, setup, S_IBM_DRIVE | S_FCLK_DIV2); |
200 | udelay(10); |
201 | - swim_drive(base, INTERNAL_DRIVE); |
202 | + swim_drive(base, fs->location); |
203 | swim_motor(base, ON); |
204 | swim_action(base, SETMFM); |
205 | if (fs->ejected) |
206 | @@ -656,6 +658,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) |
207 | goto out; |
208 | } |
209 | |
210 | + set_capacity(fs->disk, fs->total_secs); |
211 | + |
212 | if (mode & FMODE_NDELAY) |
213 | return 0; |
214 | |
215 | @@ -727,14 +731,9 @@ static int floppy_ioctl(struct block_device *bdev, fmode_t mode, |
216 | if (copy_to_user((void __user *) param, (void *) &floppy_type, |
217 | sizeof(struct floppy_struct))) |
218 | return -EFAULT; |
219 | - break; |
220 | - |
221 | - default: |
222 | - printk(KERN_DEBUG "SWIM floppy_ioctl: unknown cmd %d\n", |
223 | - cmd); |
224 | - return -ENOSYS; |
225 | + return 0; |
226 | } |
227 | - return 0; |
228 | + return -ENOTTY; |
229 | } |
230 | |
231 | static int floppy_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
232 | @@ -795,7 +794,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) |
233 | struct swim_priv *swd = data; |
234 | int drive = (*part & 3); |
235 | |
236 | - if (drive > swd->floppy_count) |
237 | + if (drive >= swd->floppy_count) |
238 | return NULL; |
239 | |
240 | *part = 0; |
241 | @@ -813,10 +812,9 @@ static int swim_add_floppy(struct swim_priv *swd, enum drive_location location) |
242 | |
243 | swim_motor(base, OFF); |
244 | |
245 | - if (swim_readbit(base, SINGLE_SIDED)) |
246 | - fs->head_number = 1; |
247 | - else |
248 | - fs->head_number = 2; |
249 | + fs->type = HD_MEDIA; |
250 | + fs->head_number = 2; |
251 | + |
252 | fs->ref_count = 0; |
253 | fs->ejected = 1; |
254 | |
255 | @@ -834,10 +832,12 @@ static int swim_floppy_init(struct swim_priv *swd) |
256 | /* scan floppy drives */ |
257 | |
258 | swim_drive(base, INTERNAL_DRIVE); |
259 | - if (swim_readbit(base, DRIVE_PRESENT)) |
260 | + if (swim_readbit(base, DRIVE_PRESENT) && |
261 | + !swim_readbit(base, ONEMEG_DRIVE)) |
262 | swim_add_floppy(swd, INTERNAL_DRIVE); |
263 | swim_drive(base, EXTERNAL_DRIVE); |
264 | - if (swim_readbit(base, DRIVE_PRESENT)) |
265 | + if (swim_readbit(base, DRIVE_PRESENT) && |
266 | + !swim_readbit(base, ONEMEG_DRIVE)) |
267 | swim_add_floppy(swd, EXTERNAL_DRIVE); |
268 | |
269 | /* register floppy drives */ |
270 | @@ -861,7 +861,6 @@ static int swim_floppy_init(struct swim_priv *swd) |
271 | &swd->lock); |
272 | if (!swd->unit[drive].disk->queue) { |
273 | err = -ENOMEM; |
274 | - put_disk(swd->unit[drive].disk); |
275 | goto exit_put_disks; |
276 | } |
277 | blk_queue_bounce_limit(swd->unit[drive].disk->queue, |
278 | @@ -911,7 +910,7 @@ static int swim_probe(struct platform_device *dev) |
279 | goto out; |
280 | } |
281 | |
282 | - swim_base = ioremap(res->start, resource_size(res)); |
283 | + swim_base = (struct swim __iomem *)res->start; |
284 | if (!swim_base) { |
285 | ret = -ENOMEM; |
286 | goto out_release_io; |
287 | @@ -923,7 +922,7 @@ static int swim_probe(struct platform_device *dev) |
288 | if (!get_swim_mode(swim_base)) { |
289 | printk(KERN_INFO "SWIM device not found !\n"); |
290 | ret = -ENODEV; |
291 | - goto out_iounmap; |
292 | + goto out_release_io; |
293 | } |
294 | |
295 | /* set platform driver data */ |
296 | @@ -931,7 +930,7 @@ static int swim_probe(struct platform_device *dev) |
297 | swd = kzalloc(sizeof(struct swim_priv), GFP_KERNEL); |
298 | if (!swd) { |
299 | ret = -ENOMEM; |
300 | - goto out_iounmap; |
301 | + goto out_release_io; |
302 | } |
303 | platform_set_drvdata(dev, swd); |
304 | |
305 | @@ -945,8 +944,6 @@ static int swim_probe(struct platform_device *dev) |
306 | |
307 | out_kfree: |
308 | kfree(swd); |
309 | -out_iounmap: |
310 | - iounmap(swim_base); |
311 | out_release_io: |
312 | release_mem_region(res->start, resource_size(res)); |
313 | out: |
314 | @@ -974,8 +971,6 @@ static int swim_remove(struct platform_device *dev) |
315 | for (drive = 0; drive < swd->floppy_count; drive++) |
316 | floppy_eject(&swd->unit[drive]); |
317 | |
318 | - iounmap(swd->base); |
319 | - |
320 | res = platform_get_resource(dev, IORESOURCE_MEM, 0); |
321 | if (res) |
322 | release_mem_region(res->start, resource_size(res)); |
323 | diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c |
324 | index af51015d056e..469541c1e51e 100644 |
325 | --- a/drivers/block/swim3.c |
326 | +++ b/drivers/block/swim3.c |
327 | @@ -148,7 +148,7 @@ struct swim3 { |
328 | #define MOTOR_ON 2 |
329 | #define RELAX 3 /* also eject in progress */ |
330 | #define READ_DATA_0 4 |
331 | -#define TWOMEG_DRIVE 5 |
332 | +#define ONEMEG_DRIVE 5 |
333 | #define SINGLE_SIDED 6 /* drive or diskette is 4MB type? */ |
334 | #define DRIVE_PRESENT 7 |
335 | #define DISK_IN 8 |
336 | @@ -156,9 +156,9 @@ struct swim3 { |
337 | #define TRACK_ZERO 10 |
338 | #define TACHO 11 |
339 | #define READ_DATA_1 12 |
340 | -#define MFM_MODE 13 |
341 | +#define GCR_MODE 13 |
342 | #define SEEK_COMPLETE 14 |
343 | -#define ONEMEG_MEDIA 15 |
344 | +#define TWOMEG_MEDIA 15 |
345 | |
346 | /* Definitions of values used in writing and formatting */ |
347 | #define DATA_ESCAPE 0x99 |
348 | diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c |
349 | index e36d160c458f..5f7d86509f2f 100644 |
350 | --- a/drivers/cdrom/cdrom.c |
351 | +++ b/drivers/cdrom/cdrom.c |
352 | @@ -2374,7 +2374,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi, |
353 | if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT) |
354 | return media_changed(cdi, 1); |
355 | |
356 | - if ((unsigned int)arg >= cdi->capacity) |
357 | + if (arg >= cdi->capacity) |
358 | return -EINVAL; |
359 | |
360 | info = kmalloc(sizeof(*info), GFP_KERNEL); |
361 | diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c |
362 | index 248c04090dea..255db6fe15c8 100644 |
363 | --- a/drivers/char/tpm/tpm-interface.c |
364 | +++ b/drivers/char/tpm/tpm-interface.c |
365 | @@ -369,20 +369,40 @@ static int tpm_validate_command(struct tpm_chip *chip, |
366 | return -EINVAL; |
367 | } |
368 | |
369 | -/** |
370 | - * tmp_transmit - Internal kernel interface to transmit TPM commands. |
371 | - * |
372 | - * @chip: TPM chip to use |
373 | - * @buf: TPM command buffer |
374 | - * @bufsiz: length of the TPM command buffer |
375 | - * @flags: tpm transmit flags - bitmap |
376 | - * |
377 | - * Return: |
378 | - * 0 when the operation is successful. |
379 | - * A negative number for system errors (errno). |
380 | - */ |
381 | -ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, |
382 | - u8 *buf, size_t bufsiz, unsigned int flags) |
383 | +static int tpm_request_locality(struct tpm_chip *chip) |
384 | +{ |
385 | + int rc; |
386 | + |
387 | + if (!chip->ops->request_locality) |
388 | + return 0; |
389 | + |
390 | + rc = chip->ops->request_locality(chip, 0); |
391 | + if (rc < 0) |
392 | + return rc; |
393 | + |
394 | + chip->locality = rc; |
395 | + |
396 | + return 0; |
397 | +} |
398 | + |
399 | +static void tpm_relinquish_locality(struct tpm_chip *chip) |
400 | +{ |
401 | + int rc; |
402 | + |
403 | + if (!chip->ops->relinquish_locality) |
404 | + return; |
405 | + |
406 | + rc = chip->ops->relinquish_locality(chip, chip->locality); |
407 | + if (rc) |
408 | + dev_err(&chip->dev, "%s: : error %d\n", __func__, rc); |
409 | + |
410 | + chip->locality = -1; |
411 | +} |
412 | + |
413 | +static ssize_t tpm_try_transmit(struct tpm_chip *chip, |
414 | + struct tpm_space *space, |
415 | + u8 *buf, size_t bufsiz, |
416 | + unsigned int flags) |
417 | { |
418 | struct tpm_output_header *header = (void *)buf; |
419 | int rc; |
420 | @@ -422,8 +442,6 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, |
421 | if (!(flags & TPM_TRANSMIT_UNLOCKED)) |
422 | mutex_lock(&chip->tpm_mutex); |
423 | |
424 | - if (chip->dev.parent) |
425 | - pm_runtime_get_sync(chip->dev.parent); |
426 | |
427 | if (chip->ops->clk_enable != NULL) |
428 | chip->ops->clk_enable(chip, true); |
429 | @@ -431,14 +449,15 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, |
430 | /* Store the decision as chip->locality will be changed. */ |
431 | need_locality = chip->locality == -1; |
432 | |
433 | - if (!(flags & TPM_TRANSMIT_RAW) && |
434 | - need_locality && chip->ops->request_locality) { |
435 | - rc = chip->ops->request_locality(chip, 0); |
436 | + if (!(flags & TPM_TRANSMIT_RAW) && need_locality) { |
437 | + rc = tpm_request_locality(chip); |
438 | if (rc < 0) |
439 | goto out_no_locality; |
440 | - chip->locality = rc; |
441 | } |
442 | |
443 | + if (chip->dev.parent) |
444 | + pm_runtime_get_sync(chip->dev.parent); |
445 | + |
446 | rc = tpm2_prepare_space(chip, space, ordinal, buf); |
447 | if (rc) |
448 | goto out; |
449 | @@ -499,27 +518,83 @@ ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, |
450 | rc = tpm2_commit_space(chip, space, ordinal, buf, &len); |
451 | |
452 | out: |
453 | - if (need_locality && chip->ops->relinquish_locality) { |
454 | - chip->ops->relinquish_locality(chip, chip->locality); |
455 | - chip->locality = -1; |
456 | - } |
457 | + if (chip->dev.parent) |
458 | + pm_runtime_put_sync(chip->dev.parent); |
459 | + |
460 | + if (need_locality) |
461 | + tpm_relinquish_locality(chip); |
462 | + |
463 | out_no_locality: |
464 | if (chip->ops->clk_enable != NULL) |
465 | chip->ops->clk_enable(chip, false); |
466 | |
467 | - if (chip->dev.parent) |
468 | - pm_runtime_put_sync(chip->dev.parent); |
469 | - |
470 | if (!(flags & TPM_TRANSMIT_UNLOCKED)) |
471 | mutex_unlock(&chip->tpm_mutex); |
472 | return rc ? rc : len; |
473 | } |
474 | |
475 | /** |
476 | - * tmp_transmit_cmd - send a tpm command to the device |
477 | + * tpm_transmit - Internal kernel interface to transmit TPM commands. |
478 | + * |
479 | + * @chip: TPM chip to use |
480 | + * @space: tpm space |
481 | + * @buf: TPM command buffer |
482 | + * @bufsiz: length of the TPM command buffer |
483 | + * @flags: tpm transmit flags - bitmap |
484 | + * |
485 | + * A wrapper around tpm_try_transmit that handles TPM2_RC_RETRY |
486 | + * returns from the TPM and retransmits the command after a delay up |
487 | + * to a maximum wait of TPM2_DURATION_LONG. |
488 | + * |
489 | + * Note: TPM1 never returns TPM2_RC_RETRY so the retry logic is TPM2 |
490 | + * only |
491 | + * |
492 | + * Return: |
493 | + * the length of the return when the operation is successful. |
494 | + * A negative number for system errors (errno). |
495 | + */ |
496 | +ssize_t tpm_transmit(struct tpm_chip *chip, struct tpm_space *space, |
497 | + u8 *buf, size_t bufsiz, unsigned int flags) |
498 | +{ |
499 | + struct tpm_output_header *header = (struct tpm_output_header *)buf; |
500 | + /* space for header and handles */ |
501 | + u8 save[TPM_HEADER_SIZE + 3*sizeof(u32)]; |
502 | + unsigned int delay_msec = TPM2_DURATION_SHORT; |
503 | + u32 rc = 0; |
504 | + ssize_t ret; |
505 | + const size_t save_size = min(space ? sizeof(save) : TPM_HEADER_SIZE, |
506 | + bufsiz); |
507 | + |
508 | + /* |
509 | + * Subtlety here: if we have a space, the handles will be |
510 | + * transformed, so when we restore the header we also have to |
511 | + * restore the handles. |
512 | + */ |
513 | + memcpy(save, buf, save_size); |
514 | + |
515 | + for (;;) { |
516 | + ret = tpm_try_transmit(chip, space, buf, bufsiz, flags); |
517 | + if (ret < 0) |
518 | + break; |
519 | + rc = be32_to_cpu(header->return_code); |
520 | + if (rc != TPM2_RC_RETRY) |
521 | + break; |
522 | + delay_msec *= 2; |
523 | + if (delay_msec > TPM2_DURATION_LONG) { |
524 | + dev_err(&chip->dev, "TPM is in retry loop\n"); |
525 | + break; |
526 | + } |
527 | + tpm_msleep(delay_msec); |
528 | + memcpy(buf, save, save_size); |
529 | + } |
530 | + return ret; |
531 | +} |
532 | +/** |
533 | + * tpm_transmit_cmd - send a tpm command to the device |
534 | * The function extracts tpm out header return code |
535 | * |
536 | * @chip: TPM chip to use |
537 | + * @space: tpm space |
538 | * @buf: TPM command buffer |
539 | * @bufsiz: length of the buffer |
540 | * @min_rsp_body_length: minimum expected length of response body |
541 | diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h |
542 | index f895fba4e20d..d73f3fb81b42 100644 |
543 | --- a/drivers/char/tpm/tpm.h |
544 | +++ b/drivers/char/tpm/tpm.h |
545 | @@ -108,6 +108,7 @@ enum tpm2_return_codes { |
546 | TPM2_RC_COMMAND_CODE = 0x0143, |
547 | TPM2_RC_TESTING = 0x090A, /* RC_WARN */ |
548 | TPM2_RC_REFERENCE_H0 = 0x0910, |
549 | + TPM2_RC_RETRY = 0x0922, |
550 | }; |
551 | |
552 | enum tpm2_algorithms { |
553 | diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c |
554 | index 7b3c2a8aa9de..497edd9848cd 100644 |
555 | --- a/drivers/char/tpm/tpm_crb.c |
556 | +++ b/drivers/char/tpm/tpm_crb.c |
557 | @@ -112,6 +112,25 @@ struct tpm2_crb_smc { |
558 | u32 smc_func_id; |
559 | }; |
560 | |
561 | +static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value, |
562 | + unsigned long timeout) |
563 | +{ |
564 | + ktime_t start; |
565 | + ktime_t stop; |
566 | + |
567 | + start = ktime_get(); |
568 | + stop = ktime_add(start, ms_to_ktime(timeout)); |
569 | + |
570 | + do { |
571 | + if ((ioread32(reg) & mask) == value) |
572 | + return true; |
573 | + |
574 | + usleep_range(50, 100); |
575 | + } while (ktime_before(ktime_get(), stop)); |
576 | + |
577 | + return ((ioread32(reg) & mask) == value); |
578 | +} |
579 | + |
580 | /** |
581 | * crb_go_idle - request tpm crb device to go the idle state |
582 | * |
583 | @@ -128,7 +147,7 @@ struct tpm2_crb_smc { |
584 | * |
585 | * Return: 0 always |
586 | */ |
587 | -static int __maybe_unused crb_go_idle(struct device *dev, struct crb_priv *priv) |
588 | +static int crb_go_idle(struct device *dev, struct crb_priv *priv) |
589 | { |
590 | if ((priv->sm == ACPI_TPM2_START_METHOD) || |
591 | (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) || |
592 | @@ -136,30 +155,17 @@ static int __maybe_unused crb_go_idle(struct device *dev, struct crb_priv *priv) |
593 | return 0; |
594 | |
595 | iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req); |
596 | - /* we don't really care when this settles */ |
597 | |
598 | + if (!crb_wait_for_reg_32(&priv->regs_t->ctrl_req, |
599 | + CRB_CTRL_REQ_GO_IDLE/* mask */, |
600 | + 0, /* value */ |
601 | + TPM2_TIMEOUT_C)) { |
602 | + dev_warn(dev, "goIdle timed out\n"); |
603 | + return -ETIME; |
604 | + } |
605 | return 0; |
606 | } |
607 | |
608 | -static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value, |
609 | - unsigned long timeout) |
610 | -{ |
611 | - ktime_t start; |
612 | - ktime_t stop; |
613 | - |
614 | - start = ktime_get(); |
615 | - stop = ktime_add(start, ms_to_ktime(timeout)); |
616 | - |
617 | - do { |
618 | - if ((ioread32(reg) & mask) == value) |
619 | - return true; |
620 | - |
621 | - usleep_range(50, 100); |
622 | - } while (ktime_before(ktime_get(), stop)); |
623 | - |
624 | - return false; |
625 | -} |
626 | - |
627 | /** |
628 | * crb_cmd_ready - request tpm crb device to enter ready state |
629 | * |
630 | @@ -175,8 +181,7 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value, |
631 | * |
632 | * Return: 0 on success -ETIME on timeout; |
633 | */ |
634 | -static int __maybe_unused crb_cmd_ready(struct device *dev, |
635 | - struct crb_priv *priv) |
636 | +static int crb_cmd_ready(struct device *dev, struct crb_priv *priv) |
637 | { |
638 | if ((priv->sm == ACPI_TPM2_START_METHOD) || |
639 | (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) || |
640 | @@ -195,11 +200,11 @@ static int __maybe_unused crb_cmd_ready(struct device *dev, |
641 | return 0; |
642 | } |
643 | |
644 | -static int crb_request_locality(struct tpm_chip *chip, int loc) |
645 | +static int __crb_request_locality(struct device *dev, |
646 | + struct crb_priv *priv, int loc) |
647 | { |
648 | - struct crb_priv *priv = dev_get_drvdata(&chip->dev); |
649 | u32 value = CRB_LOC_STATE_LOC_ASSIGNED | |
650 | - CRB_LOC_STATE_TPM_REG_VALID_STS; |
651 | + CRB_LOC_STATE_TPM_REG_VALID_STS; |
652 | |
653 | if (!priv->regs_h) |
654 | return 0; |
655 | @@ -207,21 +212,45 @@ static int crb_request_locality(struct tpm_chip *chip, int loc) |
656 | iowrite32(CRB_LOC_CTRL_REQUEST_ACCESS, &priv->regs_h->loc_ctrl); |
657 | if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, value, value, |
658 | TPM2_TIMEOUT_C)) { |
659 | - dev_warn(&chip->dev, "TPM_LOC_STATE_x.requestAccess timed out\n"); |
660 | + dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n"); |
661 | return -ETIME; |
662 | } |
663 | |
664 | return 0; |
665 | } |
666 | |
667 | -static void crb_relinquish_locality(struct tpm_chip *chip, int loc) |
668 | +static int crb_request_locality(struct tpm_chip *chip, int loc) |
669 | { |
670 | struct crb_priv *priv = dev_get_drvdata(&chip->dev); |
671 | |
672 | + return __crb_request_locality(&chip->dev, priv, loc); |
673 | +} |
674 | + |
675 | +static int __crb_relinquish_locality(struct device *dev, |
676 | + struct crb_priv *priv, int loc) |
677 | +{ |
678 | + u32 mask = CRB_LOC_STATE_LOC_ASSIGNED | |
679 | + CRB_LOC_STATE_TPM_REG_VALID_STS; |
680 | + u32 value = CRB_LOC_STATE_TPM_REG_VALID_STS; |
681 | + |
682 | if (!priv->regs_h) |
683 | - return; |
684 | + return 0; |
685 | |
686 | iowrite32(CRB_LOC_CTRL_RELINQUISH, &priv->regs_h->loc_ctrl); |
687 | + if (!crb_wait_for_reg_32(&priv->regs_h->loc_state, mask, value, |
688 | + TPM2_TIMEOUT_C)) { |
689 | + dev_warn(dev, "TPM_LOC_STATE_x.requestAccess timed out\n"); |
690 | + return -ETIME; |
691 | + } |
692 | + |
693 | + return 0; |
694 | +} |
695 | + |
696 | +static int crb_relinquish_locality(struct tpm_chip *chip, int loc) |
697 | +{ |
698 | + struct crb_priv *priv = dev_get_drvdata(&chip->dev); |
699 | + |
700 | + return __crb_relinquish_locality(&chip->dev, priv, loc); |
701 | } |
702 | |
703 | static u8 crb_status(struct tpm_chip *chip) |
704 | @@ -475,6 +504,10 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, |
705 | dev_warn(dev, FW_BUG "Bad ACPI memory layout"); |
706 | } |
707 | |
708 | + ret = __crb_request_locality(dev, priv, 0); |
709 | + if (ret) |
710 | + return ret; |
711 | + |
712 | priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address, |
713 | sizeof(struct crb_regs_tail)); |
714 | if (IS_ERR(priv->regs_t)) |
715 | @@ -531,6 +564,8 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, |
716 | |
717 | crb_go_idle(dev, priv); |
718 | |
719 | + __crb_relinquish_locality(dev, priv, 0); |
720 | + |
721 | return ret; |
722 | } |
723 | |
724 | @@ -588,10 +623,14 @@ static int crb_acpi_add(struct acpi_device *device) |
725 | chip->acpi_dev_handle = device->handle; |
726 | chip->flags = TPM_CHIP_FLAG_TPM2; |
727 | |
728 | - rc = crb_cmd_ready(dev, priv); |
729 | + rc = __crb_request_locality(dev, priv, 0); |
730 | if (rc) |
731 | return rc; |
732 | |
733 | + rc = crb_cmd_ready(dev, priv); |
734 | + if (rc) |
735 | + goto out; |
736 | + |
737 | pm_runtime_get_noresume(dev); |
738 | pm_runtime_set_active(dev); |
739 | pm_runtime_enable(dev); |
740 | @@ -601,12 +640,15 @@ static int crb_acpi_add(struct acpi_device *device) |
741 | crb_go_idle(dev, priv); |
742 | pm_runtime_put_noidle(dev); |
743 | pm_runtime_disable(dev); |
744 | - return rc; |
745 | + goto out; |
746 | } |
747 | |
748 | - pm_runtime_put(dev); |
749 | + pm_runtime_put_sync(dev); |
750 | |
751 | - return 0; |
752 | +out: |
753 | + __crb_relinquish_locality(dev, priv, 0); |
754 | + |
755 | + return rc; |
756 | } |
757 | |
758 | static int crb_acpi_remove(struct acpi_device *device) |
759 | diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c |
760 | index da074e3db19b..5a1f47b43947 100644 |
761 | --- a/drivers/char/tpm/tpm_tis_core.c |
762 | +++ b/drivers/char/tpm/tpm_tis_core.c |
763 | @@ -143,11 +143,13 @@ static bool check_locality(struct tpm_chip *chip, int l) |
764 | return false; |
765 | } |
766 | |
767 | -static void release_locality(struct tpm_chip *chip, int l) |
768 | +static int release_locality(struct tpm_chip *chip, int l) |
769 | { |
770 | struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); |
771 | |
772 | tpm_tis_write8(priv, TPM_ACCESS(l), TPM_ACCESS_ACTIVE_LOCALITY); |
773 | + |
774 | + return 0; |
775 | } |
776 | |
777 | static int request_locality(struct tpm_chip *chip, int l) |
778 | diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c |
779 | index a38db40ce990..b2447ee3b245 100644 |
780 | --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c |
781 | +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c |
782 | @@ -1637,6 +1637,8 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi) |
783 | * (and possibly on the platform). So far only i.MX6Q (v1.30a) and |
784 | * i.MX6DL (v1.31a) have been identified as needing the workaround, with |
785 | * 4 and 1 iterations respectively. |
786 | + * The Amlogic Meson GX SoCs (v2.01a) have been identified as needing |
787 | + * the workaround with a single iteration. |
788 | */ |
789 | |
790 | switch (hdmi->version) { |
791 | @@ -1644,6 +1646,7 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi) |
792 | count = 4; |
793 | break; |
794 | case 0x131a: |
795 | + case 0x201a: |
796 | count = 1; |
797 | break; |
798 | default: |
799 | diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c |
800 | index 051a72eecb24..d2cc55e21374 100644 |
801 | --- a/drivers/hwmon/k10temp.c |
802 | +++ b/drivers/hwmon/k10temp.c |
803 | @@ -40,6 +40,10 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); |
804 | #define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 |
805 | #endif |
806 | |
807 | +#ifndef PCI_DEVICE_ID_AMD_17H_RR_NB |
808 | +#define PCI_DEVICE_ID_AMD_17H_RR_NB 0x15d0 |
809 | +#endif |
810 | + |
811 | /* CPUID function 0x80000001, ebx */ |
812 | #define CPUID_PKGTYPE_MASK 0xf0000000 |
813 | #define CPUID_PKGTYPE_F 0x00000000 |
814 | @@ -72,6 +76,7 @@ struct k10temp_data { |
815 | struct pci_dev *pdev; |
816 | void (*read_tempreg)(struct pci_dev *pdev, u32 *regval); |
817 | int temp_offset; |
818 | + u32 temp_adjust_mask; |
819 | }; |
820 | |
821 | struct tctl_offset { |
822 | @@ -84,6 +89,7 @@ static const struct tctl_offset tctl_offset_table[] = { |
823 | { 0x17, "AMD Ryzen 5 1600X", 20000 }, |
824 | { 0x17, "AMD Ryzen 7 1700X", 20000 }, |
825 | { 0x17, "AMD Ryzen 7 1800X", 20000 }, |
826 | + { 0x17, "AMD Ryzen 7 2700X", 10000 }, |
827 | { 0x17, "AMD Ryzen Threadripper 1950X", 27000 }, |
828 | { 0x17, "AMD Ryzen Threadripper 1920X", 27000 }, |
829 | { 0x17, "AMD Ryzen Threadripper 1900X", 27000 }, |
830 | @@ -129,6 +135,8 @@ static ssize_t temp1_input_show(struct device *dev, |
831 | |
832 | data->read_tempreg(data->pdev, ®val); |
833 | temp = (regval >> 21) * 125; |
834 | + if (regval & data->temp_adjust_mask) |
835 | + temp -= 49000; |
836 | if (temp > data->temp_offset) |
837 | temp -= data->temp_offset; |
838 | else |
839 | @@ -259,12 +267,14 @@ static int k10temp_probe(struct pci_dev *pdev, |
840 | data->pdev = pdev; |
841 | |
842 | if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 || |
843 | - boot_cpu_data.x86_model == 0x70)) |
844 | + boot_cpu_data.x86_model == 0x70)) { |
845 | data->read_tempreg = read_tempreg_nb_f15; |
846 | - else if (boot_cpu_data.x86 == 0x17) |
847 | + } else if (boot_cpu_data.x86 == 0x17) { |
848 | + data->temp_adjust_mask = 0x80000; |
849 | data->read_tempreg = read_tempreg_nb_f17; |
850 | - else |
851 | + } else { |
852 | data->read_tempreg = read_tempreg_pci; |
853 | + } |
854 | |
855 | for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) { |
856 | const struct tctl_offset *entry = &tctl_offset_table[i]; |
857 | @@ -292,6 +302,7 @@ static const struct pci_device_id k10temp_id_table[] = { |
858 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, |
859 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, |
860 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, |
861 | + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_RR_NB) }, |
862 | {} |
863 | }; |
864 | MODULE_DEVICE_TABLE(pci, k10temp_id_table); |
865 | diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c |
866 | index 439ee9c5f535..c59b5da85321 100644 |
867 | --- a/drivers/message/fusion/mptsas.c |
868 | +++ b/drivers/message/fusion/mptsas.c |
869 | @@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template = { |
870 | .cmd_per_lun = 7, |
871 | .use_clustering = ENABLE_CLUSTERING, |
872 | .shost_attrs = mptscsih_host_attrs, |
873 | + .no_write_same = 1, |
874 | }; |
875 | |
876 | static int mptsas_get_linkerrors(struct sas_phy *phy) |
877 | diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c |
878 | index b7b113018853..718e4914e3a0 100644 |
879 | --- a/drivers/net/bonding/bond_main.c |
880 | +++ b/drivers/net/bonding/bond_main.c |
881 | @@ -1660,8 +1660,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, |
882 | } /* switch(bond_mode) */ |
883 | |
884 | #ifdef CONFIG_NET_POLL_CONTROLLER |
885 | - slave_dev->npinfo = bond->dev->npinfo; |
886 | - if (slave_dev->npinfo) { |
887 | + if (bond->dev->npinfo) { |
888 | if (slave_enable_netpoll(new_slave)) { |
889 | netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n"); |
890 | res = -EBUSY; |
891 | diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h |
892 | index 7ea72ef11a55..d272dc6984ac 100644 |
893 | --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h |
894 | +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h |
895 | @@ -1321,6 +1321,10 @@ |
896 | #define MDIO_VEND2_AN_STAT 0x8002 |
897 | #endif |
898 | |
899 | +#ifndef MDIO_VEND2_PMA_CDR_CONTROL |
900 | +#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056 |
901 | +#endif |
902 | + |
903 | #ifndef MDIO_CTRL1_SPEED1G |
904 | #define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100) |
905 | #endif |
906 | @@ -1369,6 +1373,10 @@ |
907 | #define XGBE_AN_CL37_TX_CONFIG_MASK 0x08 |
908 | #define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100 |
909 | |
910 | +#define XGBE_PMA_CDR_TRACK_EN_MASK 0x01 |
911 | +#define XGBE_PMA_CDR_TRACK_EN_OFF 0x00 |
912 | +#define XGBE_PMA_CDR_TRACK_EN_ON 0x01 |
913 | + |
914 | /* Bit setting and getting macros |
915 | * The get macro will extract the current bit field value from within |
916 | * the variable |
917 | diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c |
918 | index 7d128be61310..b91143947ed2 100644 |
919 | --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c |
920 | +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c |
921 | @@ -519,6 +519,22 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata) |
922 | "debugfs_create_file failed\n"); |
923 | } |
924 | |
925 | + if (pdata->vdata->an_cdr_workaround) { |
926 | + pfile = debugfs_create_bool("an_cdr_workaround", 0600, |
927 | + pdata->xgbe_debugfs, |
928 | + &pdata->debugfs_an_cdr_workaround); |
929 | + if (!pfile) |
930 | + netdev_err(pdata->netdev, |
931 | + "debugfs_create_bool failed\n"); |
932 | + |
933 | + pfile = debugfs_create_bool("an_cdr_track_early", 0600, |
934 | + pdata->xgbe_debugfs, |
935 | + &pdata->debugfs_an_cdr_track_early); |
936 | + if (!pfile) |
937 | + netdev_err(pdata->netdev, |
938 | + "debugfs_create_bool failed\n"); |
939 | + } |
940 | + |
941 | kfree(buf); |
942 | } |
943 | |
944 | diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c |
945 | index d91fa595be98..e31d9d1fb6a6 100644 |
946 | --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c |
947 | +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c |
948 | @@ -349,6 +349,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata) |
949 | XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); |
950 | |
951 | /* Call MDIO/PHY initialization routine */ |
952 | + pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround; |
953 | ret = pdata->phy_if.phy_init(pdata); |
954 | if (ret) |
955 | return ret; |
956 | diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c |
957 | index 072b9f664597..1b45cd73a258 100644 |
958 | --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c |
959 | +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c |
960 | @@ -432,11 +432,16 @@ static void xgbe_an73_disable(struct xgbe_prv_data *pdata) |
961 | xgbe_an73_set(pdata, false, false); |
962 | xgbe_an73_disable_interrupts(pdata); |
963 | |
964 | + pdata->an_start = 0; |
965 | + |
966 | netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n"); |
967 | } |
968 | |
969 | static void xgbe_an_restart(struct xgbe_prv_data *pdata) |
970 | { |
971 | + if (pdata->phy_if.phy_impl.an_pre) |
972 | + pdata->phy_if.phy_impl.an_pre(pdata); |
973 | + |
974 | switch (pdata->an_mode) { |
975 | case XGBE_AN_MODE_CL73: |
976 | case XGBE_AN_MODE_CL73_REDRV: |
977 | @@ -453,6 +458,9 @@ static void xgbe_an_restart(struct xgbe_prv_data *pdata) |
978 | |
979 | static void xgbe_an_disable(struct xgbe_prv_data *pdata) |
980 | { |
981 | + if (pdata->phy_if.phy_impl.an_post) |
982 | + pdata->phy_if.phy_impl.an_post(pdata); |
983 | + |
984 | switch (pdata->an_mode) { |
985 | case XGBE_AN_MODE_CL73: |
986 | case XGBE_AN_MODE_CL73_REDRV: |
987 | @@ -505,11 +513,11 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata, |
988 | XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, |
989 | reg); |
990 | |
991 | - if (pdata->phy_if.phy_impl.kr_training_post) |
992 | - pdata->phy_if.phy_impl.kr_training_post(pdata); |
993 | - |
994 | netif_dbg(pdata, link, pdata->netdev, |
995 | "KR training initiated\n"); |
996 | + |
997 | + if (pdata->phy_if.phy_impl.kr_training_post) |
998 | + pdata->phy_if.phy_impl.kr_training_post(pdata); |
999 | } |
1000 | |
1001 | return XGBE_AN_PAGE_RECEIVED; |
1002 | @@ -637,11 +645,11 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata) |
1003 | return XGBE_AN_NO_LINK; |
1004 | } |
1005 | |
1006 | - xgbe_an73_disable(pdata); |
1007 | + xgbe_an_disable(pdata); |
1008 | |
1009 | xgbe_switch_mode(pdata); |
1010 | |
1011 | - xgbe_an73_restart(pdata); |
1012 | + xgbe_an_restart(pdata); |
1013 | |
1014 | return XGBE_AN_INCOMPAT_LINK; |
1015 | } |
1016 | @@ -820,6 +828,9 @@ static void xgbe_an37_state_machine(struct xgbe_prv_data *pdata) |
1017 | pdata->an_result = pdata->an_state; |
1018 | pdata->an_state = XGBE_AN_READY; |
1019 | |
1020 | + if (pdata->phy_if.phy_impl.an_post) |
1021 | + pdata->phy_if.phy_impl.an_post(pdata); |
1022 | + |
1023 | netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n", |
1024 | xgbe_state_as_string(pdata->an_result)); |
1025 | } |
1026 | @@ -903,6 +914,9 @@ static void xgbe_an73_state_machine(struct xgbe_prv_data *pdata) |
1027 | pdata->kx_state = XGBE_RX_BPA; |
1028 | pdata->an_start = 0; |
1029 | |
1030 | + if (pdata->phy_if.phy_impl.an_post) |
1031 | + pdata->phy_if.phy_impl.an_post(pdata); |
1032 | + |
1033 | netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n", |
1034 | xgbe_state_as_string(pdata->an_result)); |
1035 | } |
1036 | diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c |
1037 | index eb23f9ba1a9a..82d1f416ee2a 100644 |
1038 | --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c |
1039 | +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c |
1040 | @@ -456,6 +456,7 @@ static const struct xgbe_version_data xgbe_v2a = { |
1041 | .irq_reissue_support = 1, |
1042 | .tx_desc_prefetch = 5, |
1043 | .rx_desc_prefetch = 5, |
1044 | + .an_cdr_workaround = 1, |
1045 | }; |
1046 | |
1047 | static const struct xgbe_version_data xgbe_v2b = { |
1048 | @@ -470,6 +471,7 @@ static const struct xgbe_version_data xgbe_v2b = { |
1049 | .irq_reissue_support = 1, |
1050 | .tx_desc_prefetch = 5, |
1051 | .rx_desc_prefetch = 5, |
1052 | + .an_cdr_workaround = 1, |
1053 | }; |
1054 | |
1055 | static const struct pci_device_id xgbe_pci_table[] = { |
1056 | diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c |
1057 | index 3304a291aa96..aac884314000 100644 |
1058 | --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c |
1059 | +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c |
1060 | @@ -147,6 +147,14 @@ |
1061 | /* Rate-change complete wait/retry count */ |
1062 | #define XGBE_RATECHANGE_COUNT 500 |
1063 | |
1064 | +/* CDR delay values for KR support (in usec) */ |
1065 | +#define XGBE_CDR_DELAY_INIT 10000 |
1066 | +#define XGBE_CDR_DELAY_INC 10000 |
1067 | +#define XGBE_CDR_DELAY_MAX 100000 |
1068 | + |
1069 | +/* RRC frequency during link status check */ |
1070 | +#define XGBE_RRC_FREQUENCY 10 |
1071 | + |
1072 | enum xgbe_port_mode { |
1073 | XGBE_PORT_MODE_RSVD = 0, |
1074 | XGBE_PORT_MODE_BACKPLANE, |
1075 | @@ -245,6 +253,10 @@ enum xgbe_sfp_speed { |
1076 | #define XGBE_SFP_BASE_VENDOR_SN 4 |
1077 | #define XGBE_SFP_BASE_VENDOR_SN_LEN 16 |
1078 | |
1079 | +#define XGBE_SFP_EXTD_OPT1 1 |
1080 | +#define XGBE_SFP_EXTD_OPT1_RX_LOS BIT(1) |
1081 | +#define XGBE_SFP_EXTD_OPT1_TX_FAULT BIT(3) |
1082 | + |
1083 | #define XGBE_SFP_EXTD_DIAG 28 |
1084 | #define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2) |
1085 | |
1086 | @@ -324,6 +336,7 @@ struct xgbe_phy_data { |
1087 | |
1088 | unsigned int sfp_gpio_address; |
1089 | unsigned int sfp_gpio_mask; |
1090 | + unsigned int sfp_gpio_inputs; |
1091 | unsigned int sfp_gpio_rx_los; |
1092 | unsigned int sfp_gpio_tx_fault; |
1093 | unsigned int sfp_gpio_mod_absent; |
1094 | @@ -355,6 +368,10 @@ struct xgbe_phy_data { |
1095 | unsigned int redrv_addr; |
1096 | unsigned int redrv_lane; |
1097 | unsigned int redrv_model; |
1098 | + |
1099 | + /* KR AN support */ |
1100 | + unsigned int phy_cdr_notrack; |
1101 | + unsigned int phy_cdr_delay; |
1102 | }; |
1103 | |
1104 | /* I2C, MDIO and GPIO lines are muxed, so only one device at a time */ |
1105 | @@ -974,6 +991,49 @@ static void xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata) |
1106 | phy_data->sfp_phy_avail = 1; |
1107 | } |
1108 | |
1109 | +static bool xgbe_phy_check_sfp_rx_los(struct xgbe_phy_data *phy_data) |
1110 | +{ |
1111 | + u8 *sfp_extd = phy_data->sfp_eeprom.extd; |
1112 | + |
1113 | + if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_RX_LOS)) |
1114 | + return false; |
1115 | + |
1116 | + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) |
1117 | + return false; |
1118 | + |
1119 | + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_rx_los)) |
1120 | + return true; |
1121 | + |
1122 | + return false; |
1123 | +} |
1124 | + |
1125 | +static bool xgbe_phy_check_sfp_tx_fault(struct xgbe_phy_data *phy_data) |
1126 | +{ |
1127 | + u8 *sfp_extd = phy_data->sfp_eeprom.extd; |
1128 | + |
1129 | + if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_TX_FAULT)) |
1130 | + return false; |
1131 | + |
1132 | + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) |
1133 | + return false; |
1134 | + |
1135 | + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_tx_fault)) |
1136 | + return true; |
1137 | + |
1138 | + return false; |
1139 | +} |
1140 | + |
1141 | +static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data) |
1142 | +{ |
1143 | + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) |
1144 | + return false; |
1145 | + |
1146 | + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_mod_absent)) |
1147 | + return true; |
1148 | + |
1149 | + return false; |
1150 | +} |
1151 | + |
1152 | static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata) |
1153 | { |
1154 | struct xgbe_phy_data *phy_data = pdata->phy_data; |
1155 | @@ -1019,6 +1079,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) |
1156 | if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP) |
1157 | return; |
1158 | |
1159 | + /* Update transceiver signals (eeprom extd/options) */ |
1160 | + phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data); |
1161 | + phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data); |
1162 | + |
1163 | if (xgbe_phy_sfp_parse_quirks(pdata)) |
1164 | return; |
1165 | |
1166 | @@ -1184,7 +1248,6 @@ static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata) |
1167 | static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata) |
1168 | { |
1169 | struct xgbe_phy_data *phy_data = pdata->phy_data; |
1170 | - unsigned int gpio_input; |
1171 | u8 gpio_reg, gpio_ports[2]; |
1172 | int ret; |
1173 | |
1174 | @@ -1199,23 +1262,9 @@ static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata) |
1175 | return; |
1176 | } |
1177 | |
1178 | - gpio_input = (gpio_ports[1] << 8) | gpio_ports[0]; |
1179 | - |
1180 | - if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) { |
1181 | - /* No GPIO, just assume the module is present for now */ |
1182 | - phy_data->sfp_mod_absent = 0; |
1183 | - } else { |
1184 | - if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent))) |
1185 | - phy_data->sfp_mod_absent = 0; |
1186 | - } |
1187 | - |
1188 | - if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) && |
1189 | - (gpio_input & (1 << phy_data->sfp_gpio_rx_los))) |
1190 | - phy_data->sfp_rx_los = 1; |
1191 | + phy_data->sfp_gpio_inputs = (gpio_ports[1] << 8) | gpio_ports[0]; |
1192 | |
1193 | - if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) && |
1194 | - (gpio_input & (1 << phy_data->sfp_gpio_tx_fault))) |
1195 | - phy_data->sfp_tx_fault = 1; |
1196 | + phy_data->sfp_mod_absent = xgbe_phy_check_sfp_mod_absent(phy_data); |
1197 | } |
1198 | |
1199 | static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata) |
1200 | @@ -2361,7 +2410,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) |
1201 | return 1; |
1202 | |
1203 | /* No link, attempt a receiver reset cycle */ |
1204 | - if (phy_data->rrc_count++) { |
1205 | + if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) { |
1206 | phy_data->rrc_count = 0; |
1207 | xgbe_phy_rrc(pdata); |
1208 | } |
1209 | @@ -2669,6 +2718,103 @@ static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata) |
1210 | return true; |
1211 | } |
1212 | |
1213 | +static void xgbe_phy_cdr_track(struct xgbe_prv_data *pdata) |
1214 | +{ |
1215 | + struct xgbe_phy_data *phy_data = pdata->phy_data; |
1216 | + |
1217 | + if (!pdata->debugfs_an_cdr_workaround) |
1218 | + return; |
1219 | + |
1220 | + if (!phy_data->phy_cdr_notrack) |
1221 | + return; |
1222 | + |
1223 | + usleep_range(phy_data->phy_cdr_delay, |
1224 | + phy_data->phy_cdr_delay + 500); |
1225 | + |
1226 | + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, |
1227 | + XGBE_PMA_CDR_TRACK_EN_MASK, |
1228 | + XGBE_PMA_CDR_TRACK_EN_ON); |
1229 | + |
1230 | + phy_data->phy_cdr_notrack = 0; |
1231 | +} |
1232 | + |
1233 | +static void xgbe_phy_cdr_notrack(struct xgbe_prv_data *pdata) |
1234 | +{ |
1235 | + struct xgbe_phy_data *phy_data = pdata->phy_data; |
1236 | + |
1237 | + if (!pdata->debugfs_an_cdr_workaround) |
1238 | + return; |
1239 | + |
1240 | + if (phy_data->phy_cdr_notrack) |
1241 | + return; |
1242 | + |
1243 | + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, |
1244 | + XGBE_PMA_CDR_TRACK_EN_MASK, |
1245 | + XGBE_PMA_CDR_TRACK_EN_OFF); |
1246 | + |
1247 | + xgbe_phy_rrc(pdata); |
1248 | + |
1249 | + phy_data->phy_cdr_notrack = 1; |
1250 | +} |
1251 | + |
1252 | +static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata) |
1253 | +{ |
1254 | + if (!pdata->debugfs_an_cdr_track_early) |
1255 | + xgbe_phy_cdr_track(pdata); |
1256 | +} |
1257 | + |
1258 | +static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata) |
1259 | +{ |
1260 | + if (pdata->debugfs_an_cdr_track_early) |
1261 | + xgbe_phy_cdr_track(pdata); |
1262 | +} |
1263 | + |
1264 | +static void xgbe_phy_an_post(struct xgbe_prv_data *pdata) |
1265 | +{ |
1266 | + struct xgbe_phy_data *phy_data = pdata->phy_data; |
1267 | + |
1268 | + switch (pdata->an_mode) { |
1269 | + case XGBE_AN_MODE_CL73: |
1270 | + case XGBE_AN_MODE_CL73_REDRV: |
1271 | + if (phy_data->cur_mode != XGBE_MODE_KR) |
1272 | + break; |
1273 | + |
1274 | + xgbe_phy_cdr_track(pdata); |
1275 | + |
1276 | + switch (pdata->an_result) { |
1277 | + case XGBE_AN_READY: |
1278 | + case XGBE_AN_COMPLETE: |
1279 | + break; |
1280 | + default: |
1281 | + if (phy_data->phy_cdr_delay < XGBE_CDR_DELAY_MAX) |
1282 | + phy_data->phy_cdr_delay += XGBE_CDR_DELAY_INC; |
1283 | + else |
1284 | + phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT; |
1285 | + break; |
1286 | + } |
1287 | + break; |
1288 | + default: |
1289 | + break; |
1290 | + } |
1291 | +} |
1292 | + |
1293 | +static void xgbe_phy_an_pre(struct xgbe_prv_data *pdata) |
1294 | +{ |
1295 | + struct xgbe_phy_data *phy_data = pdata->phy_data; |
1296 | + |
1297 | + switch (pdata->an_mode) { |
1298 | + case XGBE_AN_MODE_CL73: |
1299 | + case XGBE_AN_MODE_CL73_REDRV: |
1300 | + if (phy_data->cur_mode != XGBE_MODE_KR) |
1301 | + break; |
1302 | + |
1303 | + xgbe_phy_cdr_notrack(pdata); |
1304 | + break; |
1305 | + default: |
1306 | + break; |
1307 | + } |
1308 | +} |
1309 | + |
1310 | static void xgbe_phy_stop(struct xgbe_prv_data *pdata) |
1311 | { |
1312 | struct xgbe_phy_data *phy_data = pdata->phy_data; |
1313 | @@ -2680,6 +2826,9 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata) |
1314 | xgbe_phy_sfp_reset(phy_data); |
1315 | xgbe_phy_sfp_mod_absent(pdata); |
1316 | |
1317 | + /* Reset CDR support */ |
1318 | + xgbe_phy_cdr_track(pdata); |
1319 | + |
1320 | /* Power off the PHY */ |
1321 | xgbe_phy_power_off(pdata); |
1322 | |
1323 | @@ -2712,6 +2861,9 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata) |
1324 | /* Start in highest supported mode */ |
1325 | xgbe_phy_set_mode(pdata, phy_data->start_mode); |
1326 | |
1327 | + /* Reset CDR support */ |
1328 | + xgbe_phy_cdr_track(pdata); |
1329 | + |
1330 | /* After starting the I2C controller, we can check for an SFP */ |
1331 | switch (phy_data->port_mode) { |
1332 | case XGBE_PORT_MODE_SFP: |
1333 | @@ -3019,6 +3171,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) |
1334 | } |
1335 | } |
1336 | |
1337 | + phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT; |
1338 | + |
1339 | /* Register for driving external PHYs */ |
1340 | mii = devm_mdiobus_alloc(pdata->dev); |
1341 | if (!mii) { |
1342 | @@ -3071,4 +3225,10 @@ void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if) |
1343 | phy_impl->an_advertising = xgbe_phy_an_advertising; |
1344 | |
1345 | phy_impl->an_outcome = xgbe_phy_an_outcome; |
1346 | + |
1347 | + phy_impl->an_pre = xgbe_phy_an_pre; |
1348 | + phy_impl->an_post = xgbe_phy_an_post; |
1349 | + |
1350 | + phy_impl->kr_training_pre = xgbe_phy_kr_training_pre; |
1351 | + phy_impl->kr_training_post = xgbe_phy_kr_training_post; |
1352 | } |
1353 | diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h |
1354 | index ad102c8bac7b..95d4b56448c6 100644 |
1355 | --- a/drivers/net/ethernet/amd/xgbe/xgbe.h |
1356 | +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h |
1357 | @@ -833,6 +833,7 @@ struct xgbe_hw_if { |
1358 | /* This structure represents implementation specific routines for an |
1359 | * implementation of a PHY. All routines are required unless noted below. |
1360 | * Optional routines: |
1361 | + * an_pre, an_post |
1362 | * kr_training_pre, kr_training_post |
1363 | */ |
1364 | struct xgbe_phy_impl_if { |
1365 | @@ -875,6 +876,10 @@ struct xgbe_phy_impl_if { |
1366 | /* Process results of auto-negotiation */ |
1367 | enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *); |
1368 | |
1369 | + /* Pre/Post auto-negotiation support */ |
1370 | + void (*an_pre)(struct xgbe_prv_data *); |
1371 | + void (*an_post)(struct xgbe_prv_data *); |
1372 | + |
1373 | /* Pre/Post KR training enablement support */ |
1374 | void (*kr_training_pre)(struct xgbe_prv_data *); |
1375 | void (*kr_training_post)(struct xgbe_prv_data *); |
1376 | @@ -989,6 +994,7 @@ struct xgbe_version_data { |
1377 | unsigned int irq_reissue_support; |
1378 | unsigned int tx_desc_prefetch; |
1379 | unsigned int rx_desc_prefetch; |
1380 | + unsigned int an_cdr_workaround; |
1381 | }; |
1382 | |
1383 | struct xgbe_vxlan_data { |
1384 | @@ -1257,6 +1263,9 @@ struct xgbe_prv_data { |
1385 | unsigned int debugfs_xprop_reg; |
1386 | |
1387 | unsigned int debugfs_xi2c_reg; |
1388 | + |
1389 | + bool debugfs_an_cdr_workaround; |
1390 | + bool debugfs_an_cdr_track_early; |
1391 | }; |
1392 | |
1393 | /* Function prototypes*/ |
1394 | diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c |
1395 | index c96a92118b8b..32f6d2e24d66 100644 |
1396 | --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c |
1397 | +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c |
1398 | @@ -951,9 +951,11 @@ void aq_nic_shutdown(struct aq_nic_s *self) |
1399 | |
1400 | netif_device_detach(self->ndev); |
1401 | |
1402 | - err = aq_nic_stop(self); |
1403 | - if (err < 0) |
1404 | - goto err_exit; |
1405 | + if (netif_running(self->ndev)) { |
1406 | + err = aq_nic_stop(self); |
1407 | + if (err < 0) |
1408 | + goto err_exit; |
1409 | + } |
1410 | aq_nic_deinit(self); |
1411 | |
1412 | err_exit: |
1413 | diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c |
1414 | index d3b847ec7465..c58b2c227260 100644 |
1415 | --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c |
1416 | +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c |
1417 | @@ -48,6 +48,8 @@ |
1418 | #define FORCE_FLASHLESS 0 |
1419 | |
1420 | static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual); |
1421 | +static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self, |
1422 | + enum hal_atl_utils_fw_state_e state); |
1423 | |
1424 | int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops) |
1425 | { |
1426 | @@ -247,6 +249,20 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self) |
1427 | |
1428 | self->rbl_enabled = (boot_exit_code != 0); |
1429 | |
1430 | + /* FW 1.x may bootup in an invalid POWER state (WOL feature). |
1431 | + * We should work around this by forcing its state back to DEINIT |
1432 | + */ |
1433 | + if (!hw_atl_utils_ver_match(HW_ATL_FW_VER_1X, |
1434 | + aq_hw_read_reg(self, |
1435 | + HW_ATL_MPI_FW_VERSION))) { |
1436 | + int err = 0; |
1437 | + |
1438 | + hw_atl_utils_mpi_set_state(self, MPI_DEINIT); |
1439 | + AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR) & |
1440 | + HW_ATL_MPI_STATE_MSK) == MPI_DEINIT, |
1441 | + 10, 1000U); |
1442 | + } |
1443 | + |
1444 | if (self->rbl_enabled) |
1445 | return hw_atl_utils_soft_reset_rbl(self); |
1446 | else |
1447 | diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c |
1448 | index 1801582076be..9442605f4fd4 100644 |
1449 | --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c |
1450 | +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c |
1451 | @@ -1874,22 +1874,39 @@ static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen) |
1452 | return retval; |
1453 | } |
1454 | |
1455 | -static char *bnxt_get_pkgver(struct net_device *dev, char *buf, size_t buflen) |
1456 | +static void bnxt_get_pkgver(struct net_device *dev) |
1457 | { |
1458 | + struct bnxt *bp = netdev_priv(dev); |
1459 | u16 index = 0; |
1460 | - u32 datalen; |
1461 | + char *pkgver; |
1462 | + u32 pkglen; |
1463 | + u8 *pkgbuf; |
1464 | + int len; |
1465 | |
1466 | if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG, |
1467 | BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, |
1468 | - &index, NULL, &datalen) != 0) |
1469 | - return NULL; |
1470 | + &index, NULL, &pkglen) != 0) |
1471 | + return; |
1472 | |
1473 | - memset(buf, 0, buflen); |
1474 | - if (bnxt_get_nvram_item(dev, index, 0, datalen, buf) != 0) |
1475 | - return NULL; |
1476 | + pkgbuf = kzalloc(pkglen, GFP_KERNEL); |
1477 | + if (!pkgbuf) { |
1478 | + dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n", |
1479 | + pkglen); |
1480 | + return; |
1481 | + } |
1482 | + |
1483 | + if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf)) |
1484 | + goto err; |
1485 | |
1486 | - return bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, buf, |
1487 | - datalen); |
1488 | + pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf, |
1489 | + pkglen); |
1490 | + if (pkgver && *pkgver != 0 && isdigit(*pkgver)) { |
1491 | + len = strlen(bp->fw_ver_str); |
1492 | + snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, |
1493 | + "/pkg %s", pkgver); |
1494 | + } |
1495 | +err: |
1496 | + kfree(pkgbuf); |
1497 | } |
1498 | |
1499 | static int bnxt_get_eeprom(struct net_device *dev, |
1500 | @@ -2558,22 +2575,10 @@ void bnxt_ethtool_init(struct bnxt *bp) |
1501 | struct hwrm_selftest_qlist_input req = {0}; |
1502 | struct bnxt_test_info *test_info; |
1503 | struct net_device *dev = bp->dev; |
1504 | - char *pkglog; |
1505 | int i, rc; |
1506 | |
1507 | - pkglog = kzalloc(BNX_PKG_LOG_MAX_LENGTH, GFP_KERNEL); |
1508 | - if (pkglog) { |
1509 | - char *pkgver; |
1510 | - int len; |
1511 | + bnxt_get_pkgver(dev); |
1512 | |
1513 | - pkgver = bnxt_get_pkgver(dev, pkglog, BNX_PKG_LOG_MAX_LENGTH); |
1514 | - if (pkgver && *pkgver != 0 && isdigit(*pkgver)) { |
1515 | - len = strlen(bp->fw_ver_str); |
1516 | - snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1, |
1517 | - "/pkg %s", pkgver); |
1518 | - } |
1519 | - kfree(pkglog); |
1520 | - } |
1521 | if (bp->hwrm_spec_code < 0x10704 || !BNXT_SINGLE_PF(bp)) |
1522 | return; |
1523 | |
1524 | diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h |
1525 | index 73f2249555b5..83444811d3c6 100644 |
1526 | --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h |
1527 | +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h |
1528 | @@ -59,8 +59,6 @@ enum bnxt_nvm_directory_type { |
1529 | #define BNX_DIR_ATTR_NO_CHKSUM (1 << 0) |
1530 | #define BNX_DIR_ATTR_PROP_STREAM (1 << 1) |
1531 | |
1532 | -#define BNX_PKG_LOG_MAX_LENGTH 4096 |
1533 | - |
1534 | enum bnxnvm_pkglog_field_index { |
1535 | BNX_PKG_LOG_FIELD_IDX_INSTALLED_TIMESTAMP = 0, |
1536 | BNX_PKG_LOG_FIELD_IDX_PKG_DESCRIPTION = 1, |
1537 | diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c |
1538 | index e9309fb9084b..21a21934e5bf 100644 |
1539 | --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c |
1540 | +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c |
1541 | @@ -2889,6 +2889,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) |
1542 | int ret = 0; |
1543 | struct hlist_node *h; |
1544 | int bkt; |
1545 | + u8 i; |
1546 | |
1547 | /* validate the request */ |
1548 | if (vf_id >= pf->num_alloc_vfs) { |
1549 | @@ -2900,6 +2901,16 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) |
1550 | |
1551 | vf = &(pf->vf[vf_id]); |
1552 | vsi = pf->vsi[vf->lan_vsi_idx]; |
1553 | + |
1554 | + /* When the VF is resetting wait until it is done. |
1555 | + * It can take up to 200 milliseconds, |
1556 | + * but wait for up to 300 milliseconds to be safe. |
1557 | + */ |
1558 | + for (i = 0; i < 15; i++) { |
1559 | + if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) |
1560 | + break; |
1561 | + msleep(20); |
1562 | + } |
1563 | if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { |
1564 | dev_err(&pf->pdev->dev, "VF %d still in reset. Try again.\n", |
1565 | vf_id); |
1566 | diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c |
1567 | index 5a1668cdb461..7f1083ce23da 100644 |
1568 | --- a/drivers/net/ethernet/marvell/mvpp2.c |
1569 | +++ b/drivers/net/ethernet/marvell/mvpp2.c |
1570 | @@ -838,6 +838,8 @@ enum mvpp2_bm_type { |
1571 | |
1572 | #define MVPP2_MIB_COUNTERS_STATS_DELAY (1 * HZ) |
1573 | |
1574 | +#define MVPP2_DESC_DMA_MASK DMA_BIT_MASK(40) |
1575 | + |
1576 | /* Definitions */ |
1577 | |
1578 | /* Shared Packet Processor resources */ |
1579 | @@ -1336,7 +1338,7 @@ static dma_addr_t mvpp2_txdesc_dma_addr_get(struct mvpp2_port *port, |
1580 | if (port->priv->hw_version == MVPP21) |
1581 | return tx_desc->pp21.buf_dma_addr; |
1582 | else |
1583 | - return tx_desc->pp22.buf_dma_addr_ptp & GENMASK_ULL(40, 0); |
1584 | + return tx_desc->pp22.buf_dma_addr_ptp & MVPP2_DESC_DMA_MASK; |
1585 | } |
1586 | |
1587 | static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, |
1588 | @@ -1354,7 +1356,7 @@ static void mvpp2_txdesc_dma_addr_set(struct mvpp2_port *port, |
1589 | } else { |
1590 | u64 val = (u64)addr; |
1591 | |
1592 | - tx_desc->pp22.buf_dma_addr_ptp &= ~GENMASK_ULL(40, 0); |
1593 | + tx_desc->pp22.buf_dma_addr_ptp &= ~MVPP2_DESC_DMA_MASK; |
1594 | tx_desc->pp22.buf_dma_addr_ptp |= val; |
1595 | tx_desc->pp22.packet_offset = offset; |
1596 | } |
1597 | @@ -1414,7 +1416,7 @@ static dma_addr_t mvpp2_rxdesc_dma_addr_get(struct mvpp2_port *port, |
1598 | if (port->priv->hw_version == MVPP21) |
1599 | return rx_desc->pp21.buf_dma_addr; |
1600 | else |
1601 | - return rx_desc->pp22.buf_dma_addr_key_hash & GENMASK_ULL(40, 0); |
1602 | + return rx_desc->pp22.buf_dma_addr_key_hash & MVPP2_DESC_DMA_MASK; |
1603 | } |
1604 | |
1605 | static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, |
1606 | @@ -1423,7 +1425,7 @@ static unsigned long mvpp2_rxdesc_cookie_get(struct mvpp2_port *port, |
1607 | if (port->priv->hw_version == MVPP21) |
1608 | return rx_desc->pp21.buf_cookie; |
1609 | else |
1610 | - return rx_desc->pp22.buf_cookie_misc & GENMASK_ULL(40, 0); |
1611 | + return rx_desc->pp22.buf_cookie_misc & MVPP2_DESC_DMA_MASK; |
1612 | } |
1613 | |
1614 | static size_t mvpp2_rxdesc_size_get(struct mvpp2_port *port, |
1615 | @@ -8347,7 +8349,7 @@ static int mvpp2_probe(struct platform_device *pdev) |
1616 | } |
1617 | |
1618 | if (priv->hw_version == MVPP22) { |
1619 | - err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); |
1620 | + err = dma_set_mask(&pdev->dev, MVPP2_DESC_DMA_MASK); |
1621 | if (err) |
1622 | goto err_mg_clk; |
1623 | /* Sadly, the BM pools all share the same register to |
1624 | diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h |
1625 | index 7761a26ec9c5..e7565416639b 100644 |
1626 | --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h |
1627 | +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h |
1628 | @@ -343,7 +343,7 @@ enum power_event { |
1629 | #define MTL_RX_OVERFLOW_INT BIT(16) |
1630 | |
1631 | /* Default operating mode of the MAC */ |
1632 | -#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | GMAC_CONFIG_ACS | \ |
1633 | +#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | \ |
1634 | GMAC_CONFIG_BE | GMAC_CONFIG_DCRS) |
1635 | |
1636 | /* To dump the core regs excluding the Address Registers */ |
1637 | diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c |
1638 | index 63795ecafc8d..26dfb75e927a 100644 |
1639 | --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c |
1640 | +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c |
1641 | @@ -30,13 +30,6 @@ static void dwmac4_core_init(struct mac_device_info *hw, |
1642 | |
1643 | value |= GMAC_CORE_INIT; |
1644 | |
1645 | - /* Clear ACS bit because Ethernet switch tagging formats such as |
1646 | - * Broadcom tags can look like invalid LLC/SNAP packets and cause the |
1647 | - * hardware to truncate packets on reception. |
1648 | - */ |
1649 | - if (netdev_uses_dsa(dev)) |
1650 | - value &= ~GMAC_CONFIG_ACS; |
1651 | - |
1652 | if (mtu > 1500) |
1653 | value |= GMAC_CONFIG_2K; |
1654 | if (mtu > 2000) |
1655 | diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
1656 | index 7ad841434ec8..3ea343b45d93 100644 |
1657 | --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
1658 | +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |
1659 | @@ -3435,8 +3435,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) |
1660 | |
1661 | /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 |
1662 | * Type frames (LLC/LLC-SNAP) |
1663 | + * |
1664 | + * llc_snap is never checked in GMAC >= 4, so this ACS |
1665 | + * feature is always disabled and packets need to be |
1666 | + * stripped manually. |
1667 | */ |
1668 | - if (unlikely(status != llc_snap)) |
1669 | + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00) || |
1670 | + unlikely(status != llc_snap)) |
1671 | frame_len -= ETH_FCS_LEN; |
1672 | |
1673 | if (netif_msg_rx_status(priv)) { |
1674 | diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c |
1675 | index b2b30c9df037..33c35b2df7d5 100644 |
1676 | --- a/drivers/net/ethernet/ti/cpsw.c |
1677 | +++ b/drivers/net/ethernet/ti/cpsw.c |
1678 | @@ -125,7 +125,7 @@ do { \ |
1679 | |
1680 | #define RX_PRIORITY_MAPPING 0x76543210 |
1681 | #define TX_PRIORITY_MAPPING 0x33221100 |
1682 | -#define CPDMA_TX_PRIORITY_MAP 0x01234567 |
1683 | +#define CPDMA_TX_PRIORITY_MAP 0x76543210 |
1684 | |
1685 | #define CPSW_VLAN_AWARE BIT(1) |
1686 | #define CPSW_ALE_VLAN_AWARE 1 |
1687 | diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c |
1688 | index 9cbb0c8a896a..7de88b33d5b9 100644 |
1689 | --- a/drivers/net/macsec.c |
1690 | +++ b/drivers/net/macsec.c |
1691 | @@ -3277,7 +3277,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev, |
1692 | |
1693 | err = netdev_upper_dev_link(real_dev, dev, extack); |
1694 | if (err < 0) |
1695 | - goto put_dev; |
1696 | + goto unregister; |
1697 | |
1698 | /* need to be already registered so that ->init has run and |
1699 | * the MAC addr is set |
1700 | @@ -3316,8 +3316,7 @@ static int macsec_newlink(struct net *net, struct net_device *dev, |
1701 | macsec_del_dev(macsec); |
1702 | unlink: |
1703 | netdev_upper_dev_unlink(real_dev, dev); |
1704 | -put_dev: |
1705 | - dev_put(real_dev); |
1706 | +unregister: |
1707 | unregister_netdevice(dev); |
1708 | return err; |
1709 | } |
1710 | diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c |
1711 | index 5aa59f41bf8c..71e2aef6b7a1 100644 |
1712 | --- a/drivers/net/ppp/pppoe.c |
1713 | +++ b/drivers/net/ppp/pppoe.c |
1714 | @@ -620,6 +620,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, |
1715 | lock_sock(sk); |
1716 | |
1717 | error = -EINVAL; |
1718 | + |
1719 | + if (sockaddr_len != sizeof(struct sockaddr_pppox)) |
1720 | + goto end; |
1721 | + |
1722 | if (sp->sa_protocol != PX_PROTO_OE) |
1723 | goto end; |
1724 | |
1725 | diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c |
1726 | index befed2d22bf4..3175f7410baf 100644 |
1727 | --- a/drivers/net/team/team.c |
1728 | +++ b/drivers/net/team/team.c |
1729 | @@ -261,6 +261,17 @@ static void __team_option_inst_mark_removed_port(struct team *team, |
1730 | } |
1731 | } |
1732 | |
1733 | +static bool __team_option_inst_tmp_find(const struct list_head *opts, |
1734 | + const struct team_option_inst *needle) |
1735 | +{ |
1736 | + struct team_option_inst *opt_inst; |
1737 | + |
1738 | + list_for_each_entry(opt_inst, opts, tmp_list) |
1739 | + if (opt_inst == needle) |
1740 | + return true; |
1741 | + return false; |
1742 | +} |
1743 | + |
1744 | static int __team_options_register(struct team *team, |
1745 | const struct team_option *option, |
1746 | size_t option_count) |
1747 | @@ -1061,14 +1072,11 @@ static void team_port_leave(struct team *team, struct team_port *port) |
1748 | } |
1749 | |
1750 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1751 | -static int team_port_enable_netpoll(struct team *team, struct team_port *port) |
1752 | +static int __team_port_enable_netpoll(struct team_port *port) |
1753 | { |
1754 | struct netpoll *np; |
1755 | int err; |
1756 | |
1757 | - if (!team->dev->npinfo) |
1758 | - return 0; |
1759 | - |
1760 | np = kzalloc(sizeof(*np), GFP_KERNEL); |
1761 | if (!np) |
1762 | return -ENOMEM; |
1763 | @@ -1082,6 +1090,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port) |
1764 | return err; |
1765 | } |
1766 | |
1767 | +static int team_port_enable_netpoll(struct team_port *port) |
1768 | +{ |
1769 | + if (!port->team->dev->npinfo) |
1770 | + return 0; |
1771 | + |
1772 | + return __team_port_enable_netpoll(port); |
1773 | +} |
1774 | + |
1775 | static void team_port_disable_netpoll(struct team_port *port) |
1776 | { |
1777 | struct netpoll *np = port->np; |
1778 | @@ -1096,7 +1112,7 @@ static void team_port_disable_netpoll(struct team_port *port) |
1779 | kfree(np); |
1780 | } |
1781 | #else |
1782 | -static int team_port_enable_netpoll(struct team *team, struct team_port *port) |
1783 | +static int team_port_enable_netpoll(struct team_port *port) |
1784 | { |
1785 | return 0; |
1786 | } |
1787 | @@ -1204,7 +1220,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) |
1788 | goto err_vids_add; |
1789 | } |
1790 | |
1791 | - err = team_port_enable_netpoll(team, port); |
1792 | + err = team_port_enable_netpoll(port); |
1793 | if (err) { |
1794 | netdev_err(dev, "Failed to enable netpoll on device %s\n", |
1795 | portname); |
1796 | @@ -1901,7 +1917,7 @@ static int team_netpoll_setup(struct net_device *dev, |
1797 | |
1798 | mutex_lock(&team->lock); |
1799 | list_for_each_entry(port, &team->port_list, list) { |
1800 | - err = team_port_enable_netpoll(team, port); |
1801 | + err = __team_port_enable_netpoll(port); |
1802 | if (err) { |
1803 | __team_netpoll_cleanup(team); |
1804 | break; |
1805 | @@ -2562,6 +2578,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) |
1806 | if (err) |
1807 | goto team_put; |
1808 | opt_inst->changed = true; |
1809 | + |
1810 | + /* dumb/evil user-space can send us duplicate opt, |
1811 | + * keep only the last one |
1812 | + */ |
1813 | + if (__team_option_inst_tmp_find(&opt_inst_list, |
1814 | + opt_inst)) |
1815 | + continue; |
1816 | + |
1817 | list_add(&opt_inst->tmp_list, &opt_inst_list); |
1818 | } |
1819 | if (!opt_found) { |
1820 | diff --git a/drivers/net/tun.c b/drivers/net/tun.c |
1821 | index 28cfa642e39a..6c7bdd0c361a 100644 |
1822 | --- a/drivers/net/tun.c |
1823 | +++ b/drivers/net/tun.c |
1824 | @@ -1094,12 +1094,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) |
1825 | goto drop; |
1826 | |
1827 | len = run_ebpf_filter(tun, skb, len); |
1828 | - |
1829 | - /* Trim extra bytes since we may insert vlan proto & TCI |
1830 | - * in tun_put_user(). |
1831 | - */ |
1832 | - len -= skb_vlan_tag_present(skb) ? sizeof(struct veth) : 0; |
1833 | - if (len <= 0 || pskb_trim(skb, len)) |
1834 | + if (len == 0 || pskb_trim(skb, len)) |
1835 | goto drop; |
1836 | |
1837 | if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) |
1838 | diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c |
1839 | index ca066b785e9f..c853e7410f5a 100644 |
1840 | --- a/drivers/net/usb/qmi_wwan.c |
1841 | +++ b/drivers/net/usb/qmi_wwan.c |
1842 | @@ -1107,6 +1107,7 @@ static const struct usb_device_id products[] = { |
1843 | {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */ |
1844 | {QMI_FIXED_INTF(0x1435, 0xd181, 4)}, /* Wistron NeWeb D18Q1 */ |
1845 | {QMI_FIXED_INTF(0x1435, 0xd181, 5)}, /* Wistron NeWeb D18Q1 */ |
1846 | + {QMI_FIXED_INTF(0x1435, 0xd191, 4)}, /* Wistron NeWeb D19Q1 */ |
1847 | {QMI_FIXED_INTF(0x16d8, 0x6003, 0)}, /* CMOTech 6003 */ |
1848 | {QMI_FIXED_INTF(0x16d8, 0x6007, 0)}, /* CMOTech CHE-628S */ |
1849 | {QMI_FIXED_INTF(0x16d8, 0x6008, 0)}, /* CMOTech CMU-301 */ |
1850 | diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c |
1851 | index 23374603e4d9..aa21b2225679 100644 |
1852 | --- a/drivers/net/virtio_net.c |
1853 | +++ b/drivers/net/virtio_net.c |
1854 | @@ -147,6 +147,17 @@ struct receive_queue { |
1855 | struct xdp_rxq_info xdp_rxq; |
1856 | }; |
1857 | |
1858 | +/* Control VQ buffers: protected by the rtnl lock */ |
1859 | +struct control_buf { |
1860 | + struct virtio_net_ctrl_hdr hdr; |
1861 | + virtio_net_ctrl_ack status; |
1862 | + struct virtio_net_ctrl_mq mq; |
1863 | + u8 promisc; |
1864 | + u8 allmulti; |
1865 | + __virtio16 vid; |
1866 | + u64 offloads; |
1867 | +}; |
1868 | + |
1869 | struct virtnet_info { |
1870 | struct virtio_device *vdev; |
1871 | struct virtqueue *cvq; |
1872 | @@ -192,14 +203,7 @@ struct virtnet_info { |
1873 | struct hlist_node node; |
1874 | struct hlist_node node_dead; |
1875 | |
1876 | - /* Control VQ buffers: protected by the rtnl lock */ |
1877 | - struct virtio_net_ctrl_hdr ctrl_hdr; |
1878 | - virtio_net_ctrl_ack ctrl_status; |
1879 | - struct virtio_net_ctrl_mq ctrl_mq; |
1880 | - u8 ctrl_promisc; |
1881 | - u8 ctrl_allmulti; |
1882 | - u16 ctrl_vid; |
1883 | - u64 ctrl_offloads; |
1884 | + struct control_buf *ctrl; |
1885 | |
1886 | /* Ethtool settings */ |
1887 | u8 duplex; |
1888 | @@ -1269,7 +1273,9 @@ static int virtnet_poll(struct napi_struct *napi, int budget) |
1889 | { |
1890 | struct receive_queue *rq = |
1891 | container_of(napi, struct receive_queue, napi); |
1892 | - unsigned int received; |
1893 | + struct virtnet_info *vi = rq->vq->vdev->priv; |
1894 | + struct send_queue *sq; |
1895 | + unsigned int received, qp; |
1896 | bool xdp_xmit = false; |
1897 | |
1898 | virtnet_poll_cleantx(rq); |
1899 | @@ -1280,8 +1286,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget) |
1900 | if (received < budget) |
1901 | virtqueue_napi_complete(napi, rq->vq, received); |
1902 | |
1903 | - if (xdp_xmit) |
1904 | + if (xdp_xmit) { |
1905 | + qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + |
1906 | + smp_processor_id(); |
1907 | + sq = &vi->sq[qp]; |
1908 | + virtqueue_kick(sq->vq); |
1909 | xdp_do_flush_map(); |
1910 | + } |
1911 | |
1912 | return received; |
1913 | } |
1914 | @@ -1454,25 +1465,25 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, |
1915 | /* Caller should know better */ |
1916 | BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); |
1917 | |
1918 | - vi->ctrl_status = ~0; |
1919 | - vi->ctrl_hdr.class = class; |
1920 | - vi->ctrl_hdr.cmd = cmd; |
1921 | + vi->ctrl->status = ~0; |
1922 | + vi->ctrl->hdr.class = class; |
1923 | + vi->ctrl->hdr.cmd = cmd; |
1924 | /* Add header */ |
1925 | - sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr)); |
1926 | + sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr)); |
1927 | sgs[out_num++] = &hdr; |
1928 | |
1929 | if (out) |
1930 | sgs[out_num++] = out; |
1931 | |
1932 | /* Add return status. */ |
1933 | - sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status)); |
1934 | + sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status)); |
1935 | sgs[out_num] = &stat; |
1936 | |
1937 | BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); |
1938 | virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); |
1939 | |
1940 | if (unlikely(!virtqueue_kick(vi->cvq))) |
1941 | - return vi->ctrl_status == VIRTIO_NET_OK; |
1942 | + return vi->ctrl->status == VIRTIO_NET_OK; |
1943 | |
1944 | /* Spin for a response, the kick causes an ioport write, trapping |
1945 | * into the hypervisor, so the request should be handled immediately. |
1946 | @@ -1481,7 +1492,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, |
1947 | !virtqueue_is_broken(vi->cvq)) |
1948 | cpu_relax(); |
1949 | |
1950 | - return vi->ctrl_status == VIRTIO_NET_OK; |
1951 | + return vi->ctrl->status == VIRTIO_NET_OK; |
1952 | } |
1953 | |
1954 | static int virtnet_set_mac_address(struct net_device *dev, void *p) |
1955 | @@ -1593,8 +1604,8 @@ static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs) |
1956 | if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ)) |
1957 | return 0; |
1958 | |
1959 | - vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); |
1960 | - sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq)); |
1961 | + vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs); |
1962 | + sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq)); |
1963 | |
1964 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ, |
1965 | VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) { |
1966 | @@ -1653,22 +1664,22 @@ static void virtnet_set_rx_mode(struct net_device *dev) |
1967 | if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX)) |
1968 | return; |
1969 | |
1970 | - vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0); |
1971 | - vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0); |
1972 | + vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0); |
1973 | + vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0); |
1974 | |
1975 | - sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc)); |
1976 | + sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc)); |
1977 | |
1978 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
1979 | VIRTIO_NET_CTRL_RX_PROMISC, sg)) |
1980 | dev_warn(&dev->dev, "Failed to %sable promisc mode.\n", |
1981 | - vi->ctrl_promisc ? "en" : "dis"); |
1982 | + vi->ctrl->promisc ? "en" : "dis"); |
1983 | |
1984 | - sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti)); |
1985 | + sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti)); |
1986 | |
1987 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX, |
1988 | VIRTIO_NET_CTRL_RX_ALLMULTI, sg)) |
1989 | dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n", |
1990 | - vi->ctrl_allmulti ? "en" : "dis"); |
1991 | + vi->ctrl->allmulti ? "en" : "dis"); |
1992 | |
1993 | uc_count = netdev_uc_count(dev); |
1994 | mc_count = netdev_mc_count(dev); |
1995 | @@ -1714,8 +1725,8 @@ static int virtnet_vlan_rx_add_vid(struct net_device *dev, |
1996 | struct virtnet_info *vi = netdev_priv(dev); |
1997 | struct scatterlist sg; |
1998 | |
1999 | - vi->ctrl_vid = vid; |
2000 | - sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); |
2001 | + vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); |
2002 | + sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); |
2003 | |
2004 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
2005 | VIRTIO_NET_CTRL_VLAN_ADD, &sg)) |
2006 | @@ -1729,8 +1740,8 @@ static int virtnet_vlan_rx_kill_vid(struct net_device *dev, |
2007 | struct virtnet_info *vi = netdev_priv(dev); |
2008 | struct scatterlist sg; |
2009 | |
2010 | - vi->ctrl_vid = vid; |
2011 | - sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid)); |
2012 | + vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid); |
2013 | + sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid)); |
2014 | |
2015 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN, |
2016 | VIRTIO_NET_CTRL_VLAN_DEL, &sg)) |
2017 | @@ -2126,9 +2137,9 @@ static int virtnet_restore_up(struct virtio_device *vdev) |
2018 | static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads) |
2019 | { |
2020 | struct scatterlist sg; |
2021 | - vi->ctrl_offloads = cpu_to_virtio64(vi->vdev, offloads); |
2022 | + vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads); |
2023 | |
2024 | - sg_init_one(&sg, &vi->ctrl_offloads, sizeof(vi->ctrl_offloads)); |
2025 | + sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads)); |
2026 | |
2027 | if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS, |
2028 | VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET, &sg)) { |
2029 | @@ -2351,6 +2362,7 @@ static void virtnet_free_queues(struct virtnet_info *vi) |
2030 | |
2031 | kfree(vi->rq); |
2032 | kfree(vi->sq); |
2033 | + kfree(vi->ctrl); |
2034 | } |
2035 | |
2036 | static void _free_receive_bufs(struct virtnet_info *vi) |
2037 | @@ -2543,6 +2555,9 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) |
2038 | { |
2039 | int i; |
2040 | |
2041 | + vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL); |
2042 | + if (!vi->ctrl) |
2043 | + goto err_ctrl; |
2044 | vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL); |
2045 | if (!vi->sq) |
2046 | goto err_sq; |
2047 | @@ -2571,6 +2586,8 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) |
2048 | err_rq: |
2049 | kfree(vi->sq); |
2050 | err_sq: |
2051 | + kfree(vi->ctrl); |
2052 | +err_ctrl: |
2053 | return -ENOMEM; |
2054 | } |
2055 | |
2056 | diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c |
2057 | index e04937f44f33..9ebe2a689966 100644 |
2058 | --- a/drivers/net/vmxnet3/vmxnet3_drv.c |
2059 | +++ b/drivers/net/vmxnet3/vmxnet3_drv.c |
2060 | @@ -1218,6 +1218,7 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb, |
2061 | union { |
2062 | void *ptr; |
2063 | struct ethhdr *eth; |
2064 | + struct vlan_ethhdr *veth; |
2065 | struct iphdr *ipv4; |
2066 | struct ipv6hdr *ipv6; |
2067 | struct tcphdr *tcp; |
2068 | @@ -1228,16 +1229,24 @@ vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb, |
2069 | if (unlikely(sizeof(struct iphdr) + sizeof(struct tcphdr) > maplen)) |
2070 | return 0; |
2071 | |
2072 | + if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || |
2073 | + skb->protocol == cpu_to_be16(ETH_P_8021AD)) |
2074 | + hlen = sizeof(struct vlan_ethhdr); |
2075 | + else |
2076 | + hlen = sizeof(struct ethhdr); |
2077 | + |
2078 | hdr.eth = eth_hdr(skb); |
2079 | if (gdesc->rcd.v4) { |
2080 | - BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP)); |
2081 | - hdr.ptr += sizeof(struct ethhdr); |
2082 | + BUG_ON(hdr.eth->h_proto != htons(ETH_P_IP) && |
2083 | + hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IP)); |
2084 | + hdr.ptr += hlen; |
2085 | BUG_ON(hdr.ipv4->protocol != IPPROTO_TCP); |
2086 | hlen = hdr.ipv4->ihl << 2; |
2087 | hdr.ptr += hdr.ipv4->ihl << 2; |
2088 | } else if (gdesc->rcd.v6) { |
2089 | - BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6)); |
2090 | - hdr.ptr += sizeof(struct ethhdr); |
2091 | + BUG_ON(hdr.eth->h_proto != htons(ETH_P_IPV6) && |
2092 | + hdr.veth->h_vlan_encapsulated_proto != htons(ETH_P_IPV6)); |
2093 | + hdr.ptr += hlen; |
2094 | /* Use an estimated value, since we also need to handle |
2095 | * TSO case. |
2096 | */ |
2097 | diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h |
2098 | index 59ec34052a65..a3326463b71f 100644 |
2099 | --- a/drivers/net/vmxnet3/vmxnet3_int.h |
2100 | +++ b/drivers/net/vmxnet3/vmxnet3_int.h |
2101 | @@ -69,10 +69,10 @@ |
2102 | /* |
2103 | * Version numbers |
2104 | */ |
2105 | -#define VMXNET3_DRIVER_VERSION_STRING "1.4.13.0-k" |
2106 | +#define VMXNET3_DRIVER_VERSION_STRING "1.4.14.0-k" |
2107 | |
2108 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ |
2109 | -#define VMXNET3_DRIVER_VERSION_NUM 0x01040d00 |
2110 | +#define VMXNET3_DRIVER_VERSION_NUM 0x01040e00 |
2111 | |
2112 | #if defined(CONFIG_PCI_MSI) |
2113 | /* RSS only makes sense if MSI-X is supported. */ |
2114 | diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c |
2115 | index ebb3f1b046f3..800a86e2d671 100644 |
2116 | --- a/drivers/net/wireless/ath/ath10k/mac.c |
2117 | +++ b/drivers/net/wireless/ath/ath10k/mac.c |
2118 | @@ -6028,9 +6028,8 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk) |
2119 | sta->addr, smps, err); |
2120 | } |
2121 | |
2122 | - if (changed & IEEE80211_RC_SUPP_RATES_CHANGED || |
2123 | - changed & IEEE80211_RC_NSS_CHANGED) { |
2124 | - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n", |
2125 | + if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { |
2126 | + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n", |
2127 | sta->addr); |
2128 | |
2129 | err = ath10k_station_assoc(ar, arvif->vif, sta, true); |
2130 | diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c |
2131 | index 96e73e30204e..5f111f0ee7ca 100644 |
2132 | --- a/drivers/pinctrl/intel/pinctrl-intel.c |
2133 | +++ b/drivers/pinctrl/intel/pinctrl-intel.c |
2134 | @@ -425,18 +425,6 @@ static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input) |
2135 | writel(value, padcfg0); |
2136 | } |
2137 | |
2138 | -static void intel_gpio_set_gpio_mode(void __iomem *padcfg0) |
2139 | -{ |
2140 | - u32 value; |
2141 | - |
2142 | - /* Put the pad into GPIO mode */ |
2143 | - value = readl(padcfg0) & ~PADCFG0_PMODE_MASK; |
2144 | - /* Disable SCI/SMI/NMI generation */ |
2145 | - value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI); |
2146 | - value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI); |
2147 | - writel(value, padcfg0); |
2148 | -} |
2149 | - |
2150 | static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, |
2151 | struct pinctrl_gpio_range *range, |
2152 | unsigned pin) |
2153 | @@ -444,6 +432,7 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, |
2154 | struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); |
2155 | void __iomem *padcfg0; |
2156 | unsigned long flags; |
2157 | + u32 value; |
2158 | |
2159 | raw_spin_lock_irqsave(&pctrl->lock, flags); |
2160 | |
2161 | @@ -453,7 +442,13 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, |
2162 | } |
2163 | |
2164 | padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); |
2165 | - intel_gpio_set_gpio_mode(padcfg0); |
2166 | + /* Put the pad into GPIO mode */ |
2167 | + value = readl(padcfg0) & ~PADCFG0_PMODE_MASK; |
2168 | + /* Disable SCI/SMI/NMI generation */ |
2169 | + value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI); |
2170 | + value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI); |
2171 | + writel(value, padcfg0); |
2172 | + |
2173 | /* Disable TX buffer and enable RX (this will be input) */ |
2174 | __intel_gpio_set_direction(padcfg0, true); |
2175 | |
2176 | @@ -973,8 +968,6 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned type) |
2177 | |
2178 | raw_spin_lock_irqsave(&pctrl->lock, flags); |
2179 | |
2180 | - intel_gpio_set_gpio_mode(reg); |
2181 | - |
2182 | value = readl(reg); |
2183 | |
2184 | value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV); |
2185 | diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c |
2186 | index 62f5f04d8f61..5e963fe0e38d 100644 |
2187 | --- a/drivers/s390/block/dasd_alias.c |
2188 | +++ b/drivers/s390/block/dasd_alias.c |
2189 | @@ -592,13 +592,22 @@ static int _schedule_lcu_update(struct alias_lcu *lcu, |
2190 | int dasd_alias_add_device(struct dasd_device *device) |
2191 | { |
2192 | struct dasd_eckd_private *private = device->private; |
2193 | - struct alias_lcu *lcu; |
2194 | + __u8 uaddr = private->uid.real_unit_addr; |
2195 | + struct alias_lcu *lcu = private->lcu; |
2196 | unsigned long flags; |
2197 | int rc; |
2198 | |
2199 | - lcu = private->lcu; |
2200 | rc = 0; |
2201 | spin_lock_irqsave(&lcu->lock, flags); |
2202 | + /* |
2203 | + * Check if device and lcu type differ. If so, the uac data may be |
2204 | + * outdated and needs to be updated. |
2205 | + */ |
2206 | + if (private->uid.type != lcu->uac->unit[uaddr].ua_type) { |
2207 | + lcu->flags |= UPDATE_PENDING; |
2208 | + DBF_DEV_EVENT(DBF_WARNING, device, "%s", |
2209 | + "uid type mismatch - trigger rescan"); |
2210 | + } |
2211 | if (!(lcu->flags & UPDATE_PENDING)) { |
2212 | rc = _add_device_to_lcu(lcu, device, device); |
2213 | if (rc) |
2214 | diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c |
2215 | index c08fc5a8df0c..aea0b25eff29 100644 |
2216 | --- a/drivers/s390/cio/chsc.c |
2217 | +++ b/drivers/s390/cio/chsc.c |
2218 | @@ -452,6 +452,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area) |
2219 | |
2220 | static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) |
2221 | { |
2222 | + struct channel_path *chp; |
2223 | struct chp_link link; |
2224 | struct chp_id chpid; |
2225 | int status; |
2226 | @@ -464,10 +465,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area) |
2227 | chpid.id = sei_area->rsid; |
2228 | /* allocate a new channel path structure, if needed */ |
2229 | status = chp_get_status(chpid); |
2230 | - if (status < 0) |
2231 | - chp_new(chpid); |
2232 | - else if (!status) |
2233 | + if (!status) |
2234 | return; |
2235 | + |
2236 | + if (status < 0) { |
2237 | + chp_new(chpid); |
2238 | + } else { |
2239 | + chp = chpid_to_chp(chpid); |
2240 | + mutex_lock(&chp->lock); |
2241 | + chp_update_desc(chp); |
2242 | + mutex_unlock(&chp->lock); |
2243 | + } |
2244 | memset(&link, 0, sizeof(struct chp_link)); |
2245 | link.chpid = chpid; |
2246 | if ((sei_area->vf & 0xc0) != 0) { |
2247 | diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h |
2248 | index 959c65cf75d9..e338ce823c44 100644 |
2249 | --- a/drivers/s390/net/qeth_core.h |
2250 | +++ b/drivers/s390/net/qeth_core.h |
2251 | @@ -565,7 +565,6 @@ enum qeth_ip_types { |
2252 | enum qeth_cmd_buffer_state { |
2253 | BUF_STATE_FREE, |
2254 | BUF_STATE_LOCKED, |
2255 | - BUF_STATE_PROCESSED, |
2256 | }; |
2257 | |
2258 | enum qeth_cq { |
2259 | @@ -609,7 +608,6 @@ struct qeth_channel { |
2260 | struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO]; |
2261 | atomic_t irq_pending; |
2262 | int io_buf_no; |
2263 | - int buf_no; |
2264 | }; |
2265 | |
2266 | /** |
2267 | diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c |
2268 | index 3653bea38470..c11a083cd956 100644 |
2269 | --- a/drivers/s390/net/qeth_core_main.c |
2270 | +++ b/drivers/s390/net/qeth_core_main.c |
2271 | @@ -821,7 +821,6 @@ void qeth_clear_cmd_buffers(struct qeth_channel *channel) |
2272 | |
2273 | for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) |
2274 | qeth_release_buffer(channel, &channel->iob[cnt]); |
2275 | - channel->buf_no = 0; |
2276 | channel->io_buf_no = 0; |
2277 | } |
2278 | EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers); |
2279 | @@ -927,7 +926,6 @@ static int qeth_setup_channel(struct qeth_channel *channel) |
2280 | kfree(channel->iob[cnt].data); |
2281 | return -ENOMEM; |
2282 | } |
2283 | - channel->buf_no = 0; |
2284 | channel->io_buf_no = 0; |
2285 | atomic_set(&channel->irq_pending, 0); |
2286 | spin_lock_init(&channel->iob_lock); |
2287 | @@ -1103,11 +1101,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, |
2288 | { |
2289 | int rc; |
2290 | int cstat, dstat; |
2291 | - struct qeth_cmd_buffer *buffer; |
2292 | struct qeth_channel *channel; |
2293 | struct qeth_card *card; |
2294 | struct qeth_cmd_buffer *iob; |
2295 | - __u8 index; |
2296 | |
2297 | if (__qeth_check_irb_error(cdev, intparm, irb)) |
2298 | return; |
2299 | @@ -1185,25 +1181,18 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, |
2300 | channel->state = CH_STATE_RCD_DONE; |
2301 | goto out; |
2302 | } |
2303 | - if (intparm) { |
2304 | - buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm); |
2305 | - buffer->state = BUF_STATE_PROCESSED; |
2306 | - } |
2307 | if (channel == &card->data) |
2308 | return; |
2309 | if (channel == &card->read && |
2310 | channel->state == CH_STATE_UP) |
2311 | __qeth_issue_next_read(card); |
2312 | |
2313 | - iob = channel->iob; |
2314 | - index = channel->buf_no; |
2315 | - while (iob[index].state == BUF_STATE_PROCESSED) { |
2316 | - if (iob[index].callback != NULL) |
2317 | - iob[index].callback(channel, iob + index); |
2318 | - |
2319 | - index = (index + 1) % QETH_CMD_BUFFER_NO; |
2320 | + if (intparm) { |
2321 | + iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm); |
2322 | + if (iob->callback) |
2323 | + iob->callback(iob->channel, iob); |
2324 | } |
2325 | - channel->buf_no = index; |
2326 | + |
2327 | out: |
2328 | wake_up(&card->wait_q); |
2329 | return; |
2330 | @@ -2217,7 +2206,6 @@ int qeth_send_control_data(struct qeth_card *card, int len, |
2331 | error: |
2332 | atomic_set(&card->write.irq_pending, 0); |
2333 | qeth_release_buffer(iob->channel, iob); |
2334 | - card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO; |
2335 | rc = reply->rc; |
2336 | qeth_put_reply(reply); |
2337 | return rc; |
2338 | @@ -3037,28 +3025,23 @@ static int qeth_send_startlan(struct qeth_card *card) |
2339 | return rc; |
2340 | } |
2341 | |
2342 | -static int qeth_default_setadapterparms_cb(struct qeth_card *card, |
2343 | - struct qeth_reply *reply, unsigned long data) |
2344 | +static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd) |
2345 | { |
2346 | - struct qeth_ipa_cmd *cmd; |
2347 | - |
2348 | - QETH_CARD_TEXT(card, 4, "defadpcb"); |
2349 | - |
2350 | - cmd = (struct qeth_ipa_cmd *) data; |
2351 | - if (cmd->hdr.return_code == 0) |
2352 | + if (!cmd->hdr.return_code) |
2353 | cmd->hdr.return_code = |
2354 | cmd->data.setadapterparms.hdr.return_code; |
2355 | - return 0; |
2356 | + return cmd->hdr.return_code; |
2357 | } |
2358 | |
2359 | static int qeth_query_setadapterparms_cb(struct qeth_card *card, |
2360 | struct qeth_reply *reply, unsigned long data) |
2361 | { |
2362 | - struct qeth_ipa_cmd *cmd; |
2363 | + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; |
2364 | |
2365 | QETH_CARD_TEXT(card, 3, "quyadpcb"); |
2366 | + if (qeth_setadpparms_inspect_rc(cmd)) |
2367 | + return 0; |
2368 | |
2369 | - cmd = (struct qeth_ipa_cmd *) data; |
2370 | if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { |
2371 | card->info.link_type = |
2372 | cmd->data.setadapterparms.data.query_cmds_supp.lan_type; |
2373 | @@ -3066,7 +3049,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card, |
2374 | } |
2375 | card->options.adp.supported_funcs = |
2376 | cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds; |
2377 | - return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); |
2378 | + return 0; |
2379 | } |
2380 | |
2381 | static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, |
2382 | @@ -3158,22 +3141,20 @@ EXPORT_SYMBOL_GPL(qeth_query_ipassists); |
2383 | static int qeth_query_switch_attributes_cb(struct qeth_card *card, |
2384 | struct qeth_reply *reply, unsigned long data) |
2385 | { |
2386 | - struct qeth_ipa_cmd *cmd; |
2387 | - struct qeth_switch_info *sw_info; |
2388 | + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; |
2389 | struct qeth_query_switch_attributes *attrs; |
2390 | + struct qeth_switch_info *sw_info; |
2391 | |
2392 | QETH_CARD_TEXT(card, 2, "qswiatcb"); |
2393 | - cmd = (struct qeth_ipa_cmd *) data; |
2394 | - sw_info = (struct qeth_switch_info *)reply->param; |
2395 | - if (cmd->data.setadapterparms.hdr.return_code == 0) { |
2396 | - attrs = &cmd->data.setadapterparms.data.query_switch_attributes; |
2397 | - sw_info->capabilities = attrs->capabilities; |
2398 | - sw_info->settings = attrs->settings; |
2399 | - QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, |
2400 | - sw_info->settings); |
2401 | - } |
2402 | - qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); |
2403 | + if (qeth_setadpparms_inspect_rc(cmd)) |
2404 | + return 0; |
2405 | |
2406 | + sw_info = (struct qeth_switch_info *)reply->param; |
2407 | + attrs = &cmd->data.setadapterparms.data.query_switch_attributes; |
2408 | + sw_info->capabilities = attrs->capabilities; |
2409 | + sw_info->settings = attrs->settings; |
2410 | + QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, |
2411 | + sw_info->settings); |
2412 | return 0; |
2413 | } |
2414 | |
2415 | @@ -4211,16 +4192,13 @@ EXPORT_SYMBOL_GPL(qeth_do_send_packet); |
2416 | static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, |
2417 | struct qeth_reply *reply, unsigned long data) |
2418 | { |
2419 | - struct qeth_ipa_cmd *cmd; |
2420 | + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; |
2421 | struct qeth_ipacmd_setadpparms *setparms; |
2422 | |
2423 | QETH_CARD_TEXT(card, 4, "prmadpcb"); |
2424 | |
2425 | - cmd = (struct qeth_ipa_cmd *) data; |
2426 | setparms = &(cmd->data.setadapterparms); |
2427 | - |
2428 | - qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); |
2429 | - if (cmd->hdr.return_code) { |
2430 | + if (qeth_setadpparms_inspect_rc(cmd)) { |
2431 | QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); |
2432 | setparms->data.mode = SET_PROMISC_MODE_OFF; |
2433 | } |
2434 | @@ -4290,18 +4268,18 @@ EXPORT_SYMBOL_GPL(qeth_get_stats); |
2435 | static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, |
2436 | struct qeth_reply *reply, unsigned long data) |
2437 | { |
2438 | - struct qeth_ipa_cmd *cmd; |
2439 | + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; |
2440 | |
2441 | QETH_CARD_TEXT(card, 4, "chgmaccb"); |
2442 | + if (qeth_setadpparms_inspect_rc(cmd)) |
2443 | + return 0; |
2444 | |
2445 | - cmd = (struct qeth_ipa_cmd *) data; |
2446 | if (!card->options.layer2 || |
2447 | !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) { |
2448 | ether_addr_copy(card->dev->dev_addr, |
2449 | cmd->data.setadapterparms.data.change_addr.addr); |
2450 | card->info.mac_bits |= QETH_LAYER2_MAC_READ; |
2451 | } |
2452 | - qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); |
2453 | return 0; |
2454 | } |
2455 | |
2456 | @@ -4332,13 +4310,15 @@ EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); |
2457 | static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, |
2458 | struct qeth_reply *reply, unsigned long data) |
2459 | { |
2460 | - struct qeth_ipa_cmd *cmd; |
2461 | + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; |
2462 | struct qeth_set_access_ctrl *access_ctrl_req; |
2463 | int fallback = *(int *)reply->param; |
2464 | |
2465 | QETH_CARD_TEXT(card, 4, "setaccb"); |
2466 | + if (cmd->hdr.return_code) |
2467 | + return 0; |
2468 | + qeth_setadpparms_inspect_rc(cmd); |
2469 | |
2470 | - cmd = (struct qeth_ipa_cmd *) data; |
2471 | access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; |
2472 | QETH_DBF_TEXT_(SETUP, 2, "setaccb"); |
2473 | QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); |
2474 | @@ -4411,7 +4391,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, |
2475 | card->options.isolation = card->options.prev_isolation; |
2476 | break; |
2477 | } |
2478 | - qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); |
2479 | return 0; |
2480 | } |
2481 | |
2482 | @@ -4699,14 +4678,15 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata) |
2483 | static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, |
2484 | struct qeth_reply *reply, unsigned long data) |
2485 | { |
2486 | - struct qeth_ipa_cmd *cmd; |
2487 | + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; |
2488 | struct qeth_qoat_priv *priv; |
2489 | char *resdata; |
2490 | int resdatalen; |
2491 | |
2492 | QETH_CARD_TEXT(card, 3, "qoatcb"); |
2493 | + if (qeth_setadpparms_inspect_rc(cmd)) |
2494 | + return 0; |
2495 | |
2496 | - cmd = (struct qeth_ipa_cmd *)data; |
2497 | priv = (struct qeth_qoat_priv *)reply->param; |
2498 | resdatalen = cmd->data.setadapterparms.hdr.cmdlength; |
2499 | resdata = (char *)data + 28; |
2500 | @@ -4800,21 +4780,18 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) |
2501 | static int qeth_query_card_info_cb(struct qeth_card *card, |
2502 | struct qeth_reply *reply, unsigned long data) |
2503 | { |
2504 | - struct qeth_ipa_cmd *cmd; |
2505 | + struct carrier_info *carrier_info = (struct carrier_info *)reply->param; |
2506 | + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; |
2507 | struct qeth_query_card_info *card_info; |
2508 | - struct carrier_info *carrier_info; |
2509 | |
2510 | QETH_CARD_TEXT(card, 2, "qcrdincb"); |
2511 | - carrier_info = (struct carrier_info *)reply->param; |
2512 | - cmd = (struct qeth_ipa_cmd *)data; |
2513 | - card_info = &cmd->data.setadapterparms.data.card_info; |
2514 | - if (cmd->data.setadapterparms.hdr.return_code == 0) { |
2515 | - carrier_info->card_type = card_info->card_type; |
2516 | - carrier_info->port_mode = card_info->port_mode; |
2517 | - carrier_info->port_speed = card_info->port_speed; |
2518 | - } |
2519 | + if (qeth_setadpparms_inspect_rc(cmd)) |
2520 | + return 0; |
2521 | |
2522 | - qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); |
2523 | + card_info = &cmd->data.setadapterparms.data.card_info; |
2524 | + carrier_info->card_type = card_info->card_type; |
2525 | + carrier_info->port_mode = card_info->port_mode; |
2526 | + carrier_info->port_speed = card_info->port_speed; |
2527 | return 0; |
2528 | } |
2529 | |
2530 | @@ -6567,10 +6544,14 @@ static int __init qeth_core_init(void) |
2531 | mutex_init(&qeth_mod_mutex); |
2532 | |
2533 | qeth_wq = create_singlethread_workqueue("qeth_wq"); |
2534 | + if (!qeth_wq) { |
2535 | + rc = -ENOMEM; |
2536 | + goto out_err; |
2537 | + } |
2538 | |
2539 | rc = qeth_register_dbf_views(); |
2540 | if (rc) |
2541 | - goto out_err; |
2542 | + goto dbf_err; |
2543 | qeth_core_root_dev = root_device_register("qeth"); |
2544 | rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); |
2545 | if (rc) |
2546 | @@ -6607,6 +6588,8 @@ static int __init qeth_core_init(void) |
2547 | root_device_unregister(qeth_core_root_dev); |
2548 | register_err: |
2549 | qeth_unregister_dbf_views(); |
2550 | +dbf_err: |
2551 | + destroy_workqueue(qeth_wq); |
2552 | out_err: |
2553 | pr_err("Initializing the qeth device driver failed\n"); |
2554 | return rc; |
2555 | diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h |
2556 | index 067d52e95f02..d7191943ecb8 100644 |
2557 | --- a/include/linux/fsnotify_backend.h |
2558 | +++ b/include/linux/fsnotify_backend.h |
2559 | @@ -217,12 +217,10 @@ struct fsnotify_mark_connector { |
2560 | union { /* Object pointer [lock] */ |
2561 | struct inode *inode; |
2562 | struct vfsmount *mnt; |
2563 | - }; |
2564 | - union { |
2565 | - struct hlist_head list; |
2566 | /* Used listing heads to free after srcu period expires */ |
2567 | struct fsnotify_mark_connector *destroy_next; |
2568 | }; |
2569 | + struct hlist_head list; |
2570 | }; |
2571 | |
2572 | /* |
2573 | diff --git a/include/linux/hmm.h b/include/linux/hmm.h |
2574 | index 36dd21fe5caf..325017ad9311 100644 |
2575 | --- a/include/linux/hmm.h |
2576 | +++ b/include/linux/hmm.h |
2577 | @@ -498,16 +498,23 @@ struct hmm_device { |
2578 | struct hmm_device *hmm_device_new(void *drvdata); |
2579 | void hmm_device_put(struct hmm_device *hmm_device); |
2580 | #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ |
2581 | +#endif /* IS_ENABLED(CONFIG_HMM) */ |
2582 | |
2583 | /* Below are for HMM internal use only! Not to be used by device driver! */ |
2584 | +#if IS_ENABLED(CONFIG_HMM_MIRROR) |
2585 | void hmm_mm_destroy(struct mm_struct *mm); |
2586 | |
2587 | static inline void hmm_mm_init(struct mm_struct *mm) |
2588 | { |
2589 | mm->hmm = NULL; |
2590 | } |
2591 | +#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */ |
2592 | +static inline void hmm_mm_destroy(struct mm_struct *mm) {} |
2593 | +static inline void hmm_mm_init(struct mm_struct *mm) {} |
2594 | +#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */ |
2595 | + |
2596 | + |
2597 | #else /* IS_ENABLED(CONFIG_HMM) */ |
2598 | static inline void hmm_mm_destroy(struct mm_struct *mm) {} |
2599 | static inline void hmm_mm_init(struct mm_struct *mm) {} |
2600 | -#endif /* IS_ENABLED(CONFIG_HMM) */ |
2601 | #endif /* LINUX_HMM_H */ |
2602 | diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h |
2603 | index 7d30892da064..87b8c20d5b27 100644 |
2604 | --- a/include/linux/if_vlan.h |
2605 | +++ b/include/linux/if_vlan.h |
2606 | @@ -639,7 +639,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb) |
2607 | * Returns true if the skb is tagged with multiple vlan headers, regardless |
2608 | * of whether it is hardware accelerated or not. |
2609 | */ |
2610 | -static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) |
2611 | +static inline bool skb_vlan_tagged_multi(struct sk_buff *skb) |
2612 | { |
2613 | __be16 protocol = skb->protocol; |
2614 | |
2615 | @@ -649,6 +649,9 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) |
2616 | if (likely(!eth_type_vlan(protocol))) |
2617 | return false; |
2618 | |
2619 | + if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN))) |
2620 | + return false; |
2621 | + |
2622 | veh = (struct vlan_ethhdr *)skb->data; |
2623 | protocol = veh->h_vlan_encapsulated_proto; |
2624 | } |
2625 | @@ -666,7 +669,7 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb) |
2626 | * |
2627 | * Returns features without unsafe ones if the skb has multiple tags. |
2628 | */ |
2629 | -static inline netdev_features_t vlan_features_check(const struct sk_buff *skb, |
2630 | +static inline netdev_features_t vlan_features_check(struct sk_buff *skb, |
2631 | netdev_features_t features) |
2632 | { |
2633 | if (skb_vlan_tagged_multi(skb)) { |
2634 | diff --git a/include/linux/tpm.h b/include/linux/tpm.h |
2635 | index bcdd3790e94d..06639fb6ab85 100644 |
2636 | --- a/include/linux/tpm.h |
2637 | +++ b/include/linux/tpm.h |
2638 | @@ -44,7 +44,7 @@ struct tpm_class_ops { |
2639 | bool (*update_timeouts)(struct tpm_chip *chip, |
2640 | unsigned long *timeout_cap); |
2641 | int (*request_locality)(struct tpm_chip *chip, int loc); |
2642 | - void (*relinquish_locality)(struct tpm_chip *chip, int loc); |
2643 | + int (*relinquish_locality)(struct tpm_chip *chip, int loc); |
2644 | void (*clk_enable)(struct tpm_chip *chip, bool value); |
2645 | }; |
2646 | |
2647 | diff --git a/include/net/ife.h b/include/net/ife.h |
2648 | index 44b9c00f7223..e117617e3c34 100644 |
2649 | --- a/include/net/ife.h |
2650 | +++ b/include/net/ife.h |
2651 | @@ -12,7 +12,8 @@ |
2652 | void *ife_encode(struct sk_buff *skb, u16 metalen); |
2653 | void *ife_decode(struct sk_buff *skb, u16 *metalen); |
2654 | |
2655 | -void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen); |
2656 | +void *ife_tlv_meta_decode(void *skbdata, const void *ifehdr_end, u16 *attrtype, |
2657 | + u16 *dlen, u16 *totlen); |
2658 | int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, |
2659 | const void *dval); |
2660 | |
2661 | diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h |
2662 | index 5c40f118c0fa..df528a623548 100644 |
2663 | --- a/include/net/llc_conn.h |
2664 | +++ b/include/net/llc_conn.h |
2665 | @@ -97,6 +97,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb) |
2666 | |
2667 | struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, |
2668 | struct proto *prot, int kern); |
2669 | +void llc_sk_stop_all_timers(struct sock *sk, bool sync); |
2670 | void llc_sk_free(struct sock *sk); |
2671 | |
2672 | void llc_sk_reset(struct sock *sk); |
2673 | diff --git a/kernel/kprobes.c b/kernel/kprobes.c |
2674 | index 102160ff5c66..ea619021d901 100644 |
2675 | --- a/kernel/kprobes.c |
2676 | +++ b/kernel/kprobes.c |
2677 | @@ -2428,7 +2428,7 @@ static int kprobe_blacklist_seq_show(struct seq_file *m, void *v) |
2678 | struct kprobe_blacklist_entry *ent = |
2679 | list_entry(v, struct kprobe_blacklist_entry, list); |
2680 | |
2681 | - seq_printf(m, "0x%p-0x%p\t%ps\n", (void *)ent->start_addr, |
2682 | + seq_printf(m, "0x%px-0x%px\t%ps\n", (void *)ent->start_addr, |
2683 | (void *)ent->end_addr, (void *)ent->start_addr); |
2684 | return 0; |
2685 | } |
2686 | diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h |
2687 | index e954ae3d82c0..e3a658bac10f 100644 |
2688 | --- a/kernel/trace/trace_entries.h |
2689 | +++ b/kernel/trace/trace_entries.h |
2690 | @@ -356,7 +356,7 @@ FTRACE_ENTRY(hwlat, hwlat_entry, |
2691 | __field( unsigned int, seqnum ) |
2692 | ), |
2693 | |
2694 | - F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llunmi-ts:%llu\tnmi-count:%u\n", |
2695 | + F_printk("cnt:%u\tts:%010llu.%010lu\tinner:%llu\touter:%llu\tnmi-ts:%llu\tnmi-count:%u\n", |
2696 | __entry->seqnum, |
2697 | __entry->tv_sec, |
2698 | __entry->tv_nsec, |
2699 | diff --git a/net/core/dev.c b/net/core/dev.c |
2700 | index c4aa2941dbfd..3e550507e9f0 100644 |
2701 | --- a/net/core/dev.c |
2702 | +++ b/net/core/dev.c |
2703 | @@ -2942,7 +2942,7 @@ netdev_features_t passthru_features_check(struct sk_buff *skb, |
2704 | } |
2705 | EXPORT_SYMBOL(passthru_features_check); |
2706 | |
2707 | -static netdev_features_t dflt_features_check(const struct sk_buff *skb, |
2708 | +static netdev_features_t dflt_features_check(struct sk_buff *skb, |
2709 | struct net_device *dev, |
2710 | netdev_features_t features) |
2711 | { |
2712 | diff --git a/net/core/neighbour.c b/net/core/neighbour.c |
2713 | index 7b7a14abba28..ce519861be59 100644 |
2714 | --- a/net/core/neighbour.c |
2715 | +++ b/net/core/neighbour.c |
2716 | @@ -55,7 +55,8 @@ static void neigh_timer_handler(struct timer_list *t); |
2717 | static void __neigh_notify(struct neighbour *n, int type, int flags, |
2718 | u32 pid); |
2719 | static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid); |
2720 | -static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev); |
2721 | +static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, |
2722 | + struct net_device *dev); |
2723 | |
2724 | #ifdef CONFIG_PROC_FS |
2725 | static const struct file_operations neigh_stat_seq_fops; |
2726 | @@ -291,8 +292,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev) |
2727 | { |
2728 | write_lock_bh(&tbl->lock); |
2729 | neigh_flush_dev(tbl, dev); |
2730 | - pneigh_ifdown(tbl, dev); |
2731 | - write_unlock_bh(&tbl->lock); |
2732 | + pneigh_ifdown_and_unlock(tbl, dev); |
2733 | |
2734 | del_timer_sync(&tbl->proxy_timer); |
2735 | pneigh_queue_purge(&tbl->proxy_queue); |
2736 | @@ -681,9 +681,10 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey, |
2737 | return -ENOENT; |
2738 | } |
2739 | |
2740 | -static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev) |
2741 | +static int pneigh_ifdown_and_unlock(struct neigh_table *tbl, |
2742 | + struct net_device *dev) |
2743 | { |
2744 | - struct pneigh_entry *n, **np; |
2745 | + struct pneigh_entry *n, **np, *freelist = NULL; |
2746 | u32 h; |
2747 | |
2748 | for (h = 0; h <= PNEIGH_HASHMASK; h++) { |
2749 | @@ -691,16 +692,23 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev) |
2750 | while ((n = *np) != NULL) { |
2751 | if (!dev || n->dev == dev) { |
2752 | *np = n->next; |
2753 | - if (tbl->pdestructor) |
2754 | - tbl->pdestructor(n); |
2755 | - if (n->dev) |
2756 | - dev_put(n->dev); |
2757 | - kfree(n); |
2758 | + n->next = freelist; |
2759 | + freelist = n; |
2760 | continue; |
2761 | } |
2762 | np = &n->next; |
2763 | } |
2764 | } |
2765 | + write_unlock_bh(&tbl->lock); |
2766 | + while ((n = freelist)) { |
2767 | + freelist = n->next; |
2768 | + n->next = NULL; |
2769 | + if (tbl->pdestructor) |
2770 | + tbl->pdestructor(n); |
2771 | + if (n->dev) |
2772 | + dev_put(n->dev); |
2773 | + kfree(n); |
2774 | + } |
2775 | return -ENOENT; |
2776 | } |
2777 | |
2778 | @@ -2323,12 +2331,16 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, |
2779 | |
2780 | err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL, NULL); |
2781 | if (!err) { |
2782 | - if (tb[NDA_IFINDEX]) |
2783 | + if (tb[NDA_IFINDEX]) { |
2784 | + if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32)) |
2785 | + return -EINVAL; |
2786 | filter_idx = nla_get_u32(tb[NDA_IFINDEX]); |
2787 | - |
2788 | - if (tb[NDA_MASTER]) |
2789 | + } |
2790 | + if (tb[NDA_MASTER]) { |
2791 | + if (nla_len(tb[NDA_MASTER]) != sizeof(u32)) |
2792 | + return -EINVAL; |
2793 | filter_master_idx = nla_get_u32(tb[NDA_MASTER]); |
2794 | - |
2795 | + } |
2796 | if (filter_idx || filter_master_idx) |
2797 | flags |= NLM_F_DUMP_FILTERED; |
2798 | } |
2799 | diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c |
2800 | index e1d4d898a007..ed372d550137 100644 |
2801 | --- a/net/dns_resolver/dns_key.c |
2802 | +++ b/net/dns_resolver/dns_key.c |
2803 | @@ -91,9 +91,9 @@ dns_resolver_preparse(struct key_preparsed_payload *prep) |
2804 | |
2805 | next_opt = memchr(opt, '#', end - opt) ?: end; |
2806 | opt_len = next_opt - opt; |
2807 | - if (!opt_len) { |
2808 | - printk(KERN_WARNING |
2809 | - "Empty option to dns_resolver key\n"); |
2810 | + if (opt_len <= 0 || opt_len > 128) { |
2811 | + pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n", |
2812 | + opt_len); |
2813 | return -EINVAL; |
2814 | } |
2815 | |
2816 | @@ -127,10 +127,8 @@ dns_resolver_preparse(struct key_preparsed_payload *prep) |
2817 | } |
2818 | |
2819 | bad_option_value: |
2820 | - printk(KERN_WARNING |
2821 | - "Option '%*.*s' to dns_resolver key:" |
2822 | - " bad/missing value\n", |
2823 | - opt_nlen, opt_nlen, opt); |
2824 | + pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n", |
2825 | + opt_nlen, opt_nlen, opt); |
2826 | return -EINVAL; |
2827 | } while (opt = next_opt + 1, opt < end); |
2828 | } |
2829 | diff --git a/net/ife/ife.c b/net/ife/ife.c |
2830 | index 7d1ec76e7f43..13bbf8cb6a39 100644 |
2831 | --- a/net/ife/ife.c |
2832 | +++ b/net/ife/ife.c |
2833 | @@ -69,6 +69,9 @@ void *ife_decode(struct sk_buff *skb, u16 *metalen) |
2834 | int total_pull; |
2835 | u16 ifehdrln; |
2836 | |
2837 | + if (!pskb_may_pull(skb, skb->dev->hard_header_len + IFE_METAHDRLEN)) |
2838 | + return NULL; |
2839 | + |
2840 | ifehdr = (struct ifeheadr *) (skb->data + skb->dev->hard_header_len); |
2841 | ifehdrln = ntohs(ifehdr->metalen); |
2842 | total_pull = skb->dev->hard_header_len + ifehdrln; |
2843 | @@ -92,12 +95,43 @@ struct meta_tlvhdr { |
2844 | __be16 len; |
2845 | }; |
2846 | |
2847 | +static bool __ife_tlv_meta_valid(const unsigned char *skbdata, |
2848 | + const unsigned char *ifehdr_end) |
2849 | +{ |
2850 | + const struct meta_tlvhdr *tlv; |
2851 | + u16 tlvlen; |
2852 | + |
2853 | + if (unlikely(skbdata + sizeof(*tlv) > ifehdr_end)) |
2854 | + return false; |
2855 | + |
2856 | + tlv = (const struct meta_tlvhdr *)skbdata; |
2857 | + tlvlen = ntohs(tlv->len); |
2858 | + |
2859 | + /* tlv length field is inc header, check on minimum */ |
2860 | + if (tlvlen < NLA_HDRLEN) |
2861 | + return false; |
2862 | + |
2863 | + /* overflow by NLA_ALIGN check */ |
2864 | + if (NLA_ALIGN(tlvlen) < tlvlen) |
2865 | + return false; |
2866 | + |
2867 | + if (unlikely(skbdata + NLA_ALIGN(tlvlen) > ifehdr_end)) |
2868 | + return false; |
2869 | + |
2870 | + return true; |
2871 | +} |
2872 | + |
2873 | /* Caller takes care of presenting data in network order |
2874 | */ |
2875 | -void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen) |
2876 | +void *ife_tlv_meta_decode(void *skbdata, const void *ifehdr_end, u16 *attrtype, |
2877 | + u16 *dlen, u16 *totlen) |
2878 | { |
2879 | - struct meta_tlvhdr *tlv = (struct meta_tlvhdr *) skbdata; |
2880 | + struct meta_tlvhdr *tlv; |
2881 | + |
2882 | + if (!__ife_tlv_meta_valid(skbdata, ifehdr_end)) |
2883 | + return NULL; |
2884 | |
2885 | + tlv = (struct meta_tlvhdr *)skbdata; |
2886 | *dlen = ntohs(tlv->len) - NLA_HDRLEN; |
2887 | *attrtype = ntohs(tlv->type); |
2888 | |
2889 | diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c |
2890 | index 8b8059b7af4d..1ab8733dac5f 100644 |
2891 | --- a/net/ipv4/tcp.c |
2892 | +++ b/net/ipv4/tcp.c |
2893 | @@ -2385,6 +2385,7 @@ void tcp_write_queue_purge(struct sock *sk) |
2894 | INIT_LIST_HEAD(&tcp_sk(sk)->tsorted_sent_queue); |
2895 | sk_mem_reclaim(sk); |
2896 | tcp_clear_all_retrans_hints(tcp_sk(sk)); |
2897 | + tcp_sk(sk)->packets_out = 0; |
2898 | } |
2899 | |
2900 | int tcp_disconnect(struct sock *sk, int flags) |
2901 | @@ -2434,7 +2435,6 @@ int tcp_disconnect(struct sock *sk, int flags) |
2902 | icsk->icsk_backoff = 0; |
2903 | tp->snd_cwnd = 2; |
2904 | icsk->icsk_probes_out = 0; |
2905 | - tp->packets_out = 0; |
2906 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
2907 | tp->snd_cwnd_cnt = 0; |
2908 | tp->window_clamp = 0; |
2909 | @@ -2830,8 +2830,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level, |
2910 | #ifdef CONFIG_TCP_MD5SIG |
2911 | case TCP_MD5SIG: |
2912 | case TCP_MD5SIG_EXT: |
2913 | - /* Read the IP->Key mappings from userspace */ |
2914 | - err = tp->af_specific->md5_parse(sk, optname, optval, optlen); |
2915 | + if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) |
2916 | + err = tp->af_specific->md5_parse(sk, optname, optval, optlen); |
2917 | + else |
2918 | + err = -EINVAL; |
2919 | break; |
2920 | #endif |
2921 | case TCP_USER_TIMEOUT: |
2922 | diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c |
2923 | index ff6cd98ce8d5..31ca27fdde66 100644 |
2924 | --- a/net/ipv4/tcp_input.c |
2925 | +++ b/net/ipv4/tcp_input.c |
2926 | @@ -3871,11 +3871,8 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th) |
2927 | int length = (th->doff << 2) - sizeof(*th); |
2928 | const u8 *ptr = (const u8 *)(th + 1); |
2929 | |
2930 | - /* If the TCP option is too short, we can short cut */ |
2931 | - if (length < TCPOLEN_MD5SIG) |
2932 | - return NULL; |
2933 | - |
2934 | - while (length > 0) { |
2935 | + /* If not enough data remaining, we can short cut */ |
2936 | + while (length >= TCPOLEN_MD5SIG) { |
2937 | int opcode = *ptr++; |
2938 | int opsize; |
2939 | |
2940 | diff --git a/net/ipv6/route.c b/net/ipv6/route.c |
2941 | index fc74352fac12..74a2e37412b2 100644 |
2942 | --- a/net/ipv6/route.c |
2943 | +++ b/net/ipv6/route.c |
2944 | @@ -3862,6 +3862,7 @@ void rt6_mtu_change(struct net_device *dev, unsigned int mtu) |
2945 | |
2946 | static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { |
2947 | [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) }, |
2948 | + [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) }, |
2949 | [RTA_OIF] = { .type = NLA_U32 }, |
2950 | [RTA_IIF] = { .type = NLA_U32 }, |
2951 | [RTA_PRIORITY] = { .type = NLA_U32 }, |
2952 | @@ -3873,6 +3874,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { |
2953 | [RTA_EXPIRES] = { .type = NLA_U32 }, |
2954 | [RTA_UID] = { .type = NLA_U32 }, |
2955 | [RTA_MARK] = { .type = NLA_U32 }, |
2956 | + [RTA_TABLE] = { .type = NLA_U32 }, |
2957 | }; |
2958 | |
2959 | static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, |
2960 | diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c |
2961 | index f343e6f0fc95..5fe139484919 100644 |
2962 | --- a/net/ipv6/seg6_iptunnel.c |
2963 | +++ b/net/ipv6/seg6_iptunnel.c |
2964 | @@ -136,7 +136,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) |
2965 | isrh->nexthdr = proto; |
2966 | |
2967 | hdr->daddr = isrh->segments[isrh->first_segment]; |
2968 | - set_tun_src(net, ip6_dst_idev(dst)->dev, &hdr->daddr, &hdr->saddr); |
2969 | + set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr); |
2970 | |
2971 | #ifdef CONFIG_IPV6_SEG6_HMAC |
2972 | if (sr_has_hmac(isrh)) { |
2973 | diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c |
2974 | index 0fbd3ee26165..40261cb68e83 100644 |
2975 | --- a/net/l2tp/l2tp_core.c |
2976 | +++ b/net/l2tp/l2tp_core.c |
2977 | @@ -183,6 +183,26 @@ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id) |
2978 | } |
2979 | EXPORT_SYMBOL_GPL(l2tp_tunnel_get); |
2980 | |
2981 | +struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth) |
2982 | +{ |
2983 | + const struct l2tp_net *pn = l2tp_pernet(net); |
2984 | + struct l2tp_tunnel *tunnel; |
2985 | + int count = 0; |
2986 | + |
2987 | + rcu_read_lock_bh(); |
2988 | + list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { |
2989 | + if (++count > nth) { |
2990 | + l2tp_tunnel_inc_refcount(tunnel); |
2991 | + rcu_read_unlock_bh(); |
2992 | + return tunnel; |
2993 | + } |
2994 | + } |
2995 | + rcu_read_unlock_bh(); |
2996 | + |
2997 | + return NULL; |
2998 | +} |
2999 | +EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth); |
3000 | + |
3001 | /* Lookup a session. A new reference is held on the returned session. */ |
3002 | struct l2tp_session *l2tp_session_get(const struct net *net, |
3003 | struct l2tp_tunnel *tunnel, |
3004 | @@ -335,26 +355,6 @@ int l2tp_session_register(struct l2tp_session *session, |
3005 | } |
3006 | EXPORT_SYMBOL_GPL(l2tp_session_register); |
3007 | |
3008 | -struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth) |
3009 | -{ |
3010 | - struct l2tp_net *pn = l2tp_pernet(net); |
3011 | - struct l2tp_tunnel *tunnel; |
3012 | - int count = 0; |
3013 | - |
3014 | - rcu_read_lock_bh(); |
3015 | - list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) { |
3016 | - if (++count > nth) { |
3017 | - rcu_read_unlock_bh(); |
3018 | - return tunnel; |
3019 | - } |
3020 | - } |
3021 | - |
3022 | - rcu_read_unlock_bh(); |
3023 | - |
3024 | - return NULL; |
3025 | -} |
3026 | -EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth); |
3027 | - |
3028 | /***************************************************************************** |
3029 | * Receive data handling |
3030 | *****************************************************************************/ |
3031 | diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h |
3032 | index ba33cbec71eb..c199020f8a8a 100644 |
3033 | --- a/net/l2tp/l2tp_core.h |
3034 | +++ b/net/l2tp/l2tp_core.h |
3035 | @@ -212,6 +212,8 @@ static inline void *l2tp_session_priv(struct l2tp_session *session) |
3036 | } |
3037 | |
3038 | struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); |
3039 | +struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth); |
3040 | + |
3041 | void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); |
3042 | |
3043 | struct l2tp_session *l2tp_session_get(const struct net *net, |
3044 | @@ -220,7 +222,6 @@ struct l2tp_session *l2tp_session_get(const struct net *net, |
3045 | struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth); |
3046 | struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net, |
3047 | const char *ifname); |
3048 | -struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth); |
3049 | |
3050 | int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, |
3051 | u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, |
3052 | diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c |
3053 | index 72e713da4733..7f1e842ef05a 100644 |
3054 | --- a/net/l2tp/l2tp_debugfs.c |
3055 | +++ b/net/l2tp/l2tp_debugfs.c |
3056 | @@ -47,7 +47,11 @@ struct l2tp_dfs_seq_data { |
3057 | |
3058 | static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd) |
3059 | { |
3060 | - pd->tunnel = l2tp_tunnel_find_nth(pd->net, pd->tunnel_idx); |
3061 | + /* Drop reference taken during previous invocation */ |
3062 | + if (pd->tunnel) |
3063 | + l2tp_tunnel_dec_refcount(pd->tunnel); |
3064 | + |
3065 | + pd->tunnel = l2tp_tunnel_get_nth(pd->net, pd->tunnel_idx); |
3066 | pd->tunnel_idx++; |
3067 | } |
3068 | |
3069 | @@ -96,7 +100,17 @@ static void *l2tp_dfs_seq_next(struct seq_file *m, void *v, loff_t *pos) |
3070 | |
3071 | static void l2tp_dfs_seq_stop(struct seq_file *p, void *v) |
3072 | { |
3073 | - /* nothing to do */ |
3074 | + struct l2tp_dfs_seq_data *pd = v; |
3075 | + |
3076 | + if (!pd || pd == SEQ_START_TOKEN) |
3077 | + return; |
3078 | + |
3079 | + /* Drop reference taken by last invocation of l2tp_dfs_next_tunnel() */ |
3080 | + if (pd->tunnel) { |
3081 | + l2tp_tunnel_dec_refcount(pd->tunnel); |
3082 | + pd->tunnel = NULL; |
3083 | + pd->session = NULL; |
3084 | + } |
3085 | } |
3086 | |
3087 | static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) |
3088 | diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c |
3089 | index b05dbd9ffcb2..6616c9fd292f 100644 |
3090 | --- a/net/l2tp/l2tp_netlink.c |
3091 | +++ b/net/l2tp/l2tp_netlink.c |
3092 | @@ -487,14 +487,17 @@ static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback |
3093 | struct net *net = sock_net(skb->sk); |
3094 | |
3095 | for (;;) { |
3096 | - tunnel = l2tp_tunnel_find_nth(net, ti); |
3097 | + tunnel = l2tp_tunnel_get_nth(net, ti); |
3098 | if (tunnel == NULL) |
3099 | goto out; |
3100 | |
3101 | if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).portid, |
3102 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
3103 | - tunnel, L2TP_CMD_TUNNEL_GET) < 0) |
3104 | + tunnel, L2TP_CMD_TUNNEL_GET) < 0) { |
3105 | + l2tp_tunnel_dec_refcount(tunnel); |
3106 | goto out; |
3107 | + } |
3108 | + l2tp_tunnel_dec_refcount(tunnel); |
3109 | |
3110 | ti++; |
3111 | } |
3112 | @@ -848,7 +851,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback |
3113 | |
3114 | for (;;) { |
3115 | if (tunnel == NULL) { |
3116 | - tunnel = l2tp_tunnel_find_nth(net, ti); |
3117 | + tunnel = l2tp_tunnel_get_nth(net, ti); |
3118 | if (tunnel == NULL) |
3119 | goto out; |
3120 | } |
3121 | @@ -856,6 +859,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback |
3122 | session = l2tp_session_get_nth(tunnel, si); |
3123 | if (session == NULL) { |
3124 | ti++; |
3125 | + l2tp_tunnel_dec_refcount(tunnel); |
3126 | tunnel = NULL; |
3127 | si = 0; |
3128 | continue; |
3129 | @@ -865,6 +869,7 @@ static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback |
3130 | cb->nlh->nlmsg_seq, NLM_F_MULTI, |
3131 | session, L2TP_CMD_SESSION_GET) < 0) { |
3132 | l2tp_session_dec_refcount(session); |
3133 | + l2tp_tunnel_dec_refcount(tunnel); |
3134 | break; |
3135 | } |
3136 | l2tp_session_dec_refcount(session); |
3137 | diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c |
3138 | index 3d7887cc599b..0c4530ad74be 100644 |
3139 | --- a/net/l2tp/l2tp_ppp.c |
3140 | +++ b/net/l2tp/l2tp_ppp.c |
3141 | @@ -619,6 +619,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, |
3142 | lock_sock(sk); |
3143 | |
3144 | error = -EINVAL; |
3145 | + |
3146 | + if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) && |
3147 | + sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) && |
3148 | + sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) && |
3149 | + sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6)) |
3150 | + goto end; |
3151 | + |
3152 | if (sp->sa_protocol != PX_PROTO_OL2TP) |
3153 | goto end; |
3154 | |
3155 | @@ -1552,16 +1559,19 @@ struct pppol2tp_seq_data { |
3156 | |
3157 | static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd) |
3158 | { |
3159 | + /* Drop reference taken during previous invocation */ |
3160 | + if (pd->tunnel) |
3161 | + l2tp_tunnel_dec_refcount(pd->tunnel); |
3162 | + |
3163 | for (;;) { |
3164 | - pd->tunnel = l2tp_tunnel_find_nth(net, pd->tunnel_idx); |
3165 | + pd->tunnel = l2tp_tunnel_get_nth(net, pd->tunnel_idx); |
3166 | pd->tunnel_idx++; |
3167 | |
3168 | - if (pd->tunnel == NULL) |
3169 | - break; |
3170 | + /* Only accept L2TPv2 tunnels */ |
3171 | + if (!pd->tunnel || pd->tunnel->version == 2) |
3172 | + return; |
3173 | |
3174 | - /* Ignore L2TPv3 tunnels */ |
3175 | - if (pd->tunnel->version < 3) |
3176 | - break; |
3177 | + l2tp_tunnel_dec_refcount(pd->tunnel); |
3178 | } |
3179 | } |
3180 | |
3181 | @@ -1610,7 +1620,17 @@ static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos) |
3182 | |
3183 | static void pppol2tp_seq_stop(struct seq_file *p, void *v) |
3184 | { |
3185 | - /* nothing to do */ |
3186 | + struct pppol2tp_seq_data *pd = v; |
3187 | + |
3188 | + if (!pd || pd == SEQ_START_TOKEN) |
3189 | + return; |
3190 | + |
3191 | + /* Drop reference taken by last invocation of pppol2tp_next_tunnel() */ |
3192 | + if (pd->tunnel) { |
3193 | + l2tp_tunnel_dec_refcount(pd->tunnel); |
3194 | + pd->tunnel = NULL; |
3195 | + pd->session = NULL; |
3196 | + } |
3197 | } |
3198 | |
3199 | static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v) |
3200 | diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c |
3201 | index c38d16f22d2a..cf41d9b4a0b8 100644 |
3202 | --- a/net/llc/af_llc.c |
3203 | +++ b/net/llc/af_llc.c |
3204 | @@ -199,9 +199,19 @@ static int llc_ui_release(struct socket *sock) |
3205 | llc->laddr.lsap, llc->daddr.lsap); |
3206 | if (!llc_send_disc(sk)) |
3207 | llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); |
3208 | - if (!sock_flag(sk, SOCK_ZAPPED)) |
3209 | + if (!sock_flag(sk, SOCK_ZAPPED)) { |
3210 | + struct llc_sap *sap = llc->sap; |
3211 | + |
3212 | + /* Hold this for release_sock(), so that llc_backlog_rcv() |
3213 | + * could still use it. |
3214 | + */ |
3215 | + llc_sap_hold(sap); |
3216 | llc_sap_remove_socket(llc->sap, sk); |
3217 | - release_sock(sk); |
3218 | + release_sock(sk); |
3219 | + llc_sap_put(sap); |
3220 | + } else { |
3221 | + release_sock(sk); |
3222 | + } |
3223 | if (llc->dev) |
3224 | dev_put(llc->dev); |
3225 | sock_put(sk); |
3226 | diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c |
3227 | index 163121192aca..4d78375f9872 100644 |
3228 | --- a/net/llc/llc_c_ac.c |
3229 | +++ b/net/llc/llc_c_ac.c |
3230 | @@ -1099,14 +1099,7 @@ int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb) |
3231 | |
3232 | int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb) |
3233 | { |
3234 | - struct llc_sock *llc = llc_sk(sk); |
3235 | - |
3236 | - del_timer(&llc->pf_cycle_timer.timer); |
3237 | - del_timer(&llc->ack_timer.timer); |
3238 | - del_timer(&llc->rej_sent_timer.timer); |
3239 | - del_timer(&llc->busy_state_timer.timer); |
3240 | - llc->ack_must_be_send = 0; |
3241 | - llc->ack_pf = 0; |
3242 | + llc_sk_stop_all_timers(sk, false); |
3243 | return 0; |
3244 | } |
3245 | |
3246 | diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c |
3247 | index 110e32bcb399..c0ac522b48a1 100644 |
3248 | --- a/net/llc/llc_conn.c |
3249 | +++ b/net/llc/llc_conn.c |
3250 | @@ -961,6 +961,26 @@ struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct pr |
3251 | return sk; |
3252 | } |
3253 | |
3254 | +void llc_sk_stop_all_timers(struct sock *sk, bool sync) |
3255 | +{ |
3256 | + struct llc_sock *llc = llc_sk(sk); |
3257 | + |
3258 | + if (sync) { |
3259 | + del_timer_sync(&llc->pf_cycle_timer.timer); |
3260 | + del_timer_sync(&llc->ack_timer.timer); |
3261 | + del_timer_sync(&llc->rej_sent_timer.timer); |
3262 | + del_timer_sync(&llc->busy_state_timer.timer); |
3263 | + } else { |
3264 | + del_timer(&llc->pf_cycle_timer.timer); |
3265 | + del_timer(&llc->ack_timer.timer); |
3266 | + del_timer(&llc->rej_sent_timer.timer); |
3267 | + del_timer(&llc->busy_state_timer.timer); |
3268 | + } |
3269 | + |
3270 | + llc->ack_must_be_send = 0; |
3271 | + llc->ack_pf = 0; |
3272 | +} |
3273 | + |
3274 | /** |
3275 | * llc_sk_free - Frees a LLC socket |
3276 | * @sk - socket to free |
3277 | @@ -973,7 +993,7 @@ void llc_sk_free(struct sock *sk) |
3278 | |
3279 | llc->state = LLC_CONN_OUT_OF_SVC; |
3280 | /* Stop all (possibly) running timers */ |
3281 | - llc_conn_ac_stop_all_timers(sk, NULL); |
3282 | + llc_sk_stop_all_timers(sk, true); |
3283 | #ifdef DEBUG_LLC_CONN_ALLOC |
3284 | printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__, |
3285 | skb_queue_len(&llc->pdu_unack_q), |
3286 | diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c |
3287 | index e0f3f4aeeb4f..3b43b1fcd618 100644 |
3288 | --- a/net/packet/af_packet.c |
3289 | +++ b/net/packet/af_packet.c |
3290 | @@ -329,11 +329,11 @@ static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) |
3291 | skb_set_queue_mapping(skb, queue_index); |
3292 | } |
3293 | |
3294 | -/* register_prot_hook must be invoked with the po->bind_lock held, |
3295 | +/* __register_prot_hook must be invoked through register_prot_hook |
3296 | * or from a context in which asynchronous accesses to the packet |
3297 | * socket is not possible (packet_create()). |
3298 | */ |
3299 | -static void register_prot_hook(struct sock *sk) |
3300 | +static void __register_prot_hook(struct sock *sk) |
3301 | { |
3302 | struct packet_sock *po = pkt_sk(sk); |
3303 | |
3304 | @@ -348,8 +348,13 @@ static void register_prot_hook(struct sock *sk) |
3305 | } |
3306 | } |
3307 | |
3308 | -/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock |
3309 | - * held. If the sync parameter is true, we will temporarily drop |
3310 | +static void register_prot_hook(struct sock *sk) |
3311 | +{ |
3312 | + lockdep_assert_held_once(&pkt_sk(sk)->bind_lock); |
3313 | + __register_prot_hook(sk); |
3314 | +} |
3315 | + |
3316 | +/* If the sync parameter is true, we will temporarily drop |
3317 | * the po->bind_lock and do a synchronize_net to make sure no |
3318 | * asynchronous packet processing paths still refer to the elements |
3319 | * of po->prot_hook. If the sync parameter is false, it is the |
3320 | @@ -359,6 +364,8 @@ static void __unregister_prot_hook(struct sock *sk, bool sync) |
3321 | { |
3322 | struct packet_sock *po = pkt_sk(sk); |
3323 | |
3324 | + lockdep_assert_held_once(&po->bind_lock); |
3325 | + |
3326 | po->running = 0; |
3327 | |
3328 | if (po->fanout) |
3329 | @@ -3008,6 +3015,7 @@ static int packet_release(struct socket *sock) |
3330 | |
3331 | packet_flush_mclist(sk); |
3332 | |
3333 | + lock_sock(sk); |
3334 | if (po->rx_ring.pg_vec) { |
3335 | memset(&req_u, 0, sizeof(req_u)); |
3336 | packet_set_ring(sk, &req_u, 1, 0); |
3337 | @@ -3017,6 +3025,7 @@ static int packet_release(struct socket *sock) |
3338 | memset(&req_u, 0, sizeof(req_u)); |
3339 | packet_set_ring(sk, &req_u, 1, 1); |
3340 | } |
3341 | + release_sock(sk); |
3342 | |
3343 | f = fanout_release(sk); |
3344 | |
3345 | @@ -3250,7 +3259,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol, |
3346 | |
3347 | if (proto) { |
3348 | po->prot_hook.type = proto; |
3349 | - register_prot_hook(sk); |
3350 | + __register_prot_hook(sk); |
3351 | } |
3352 | |
3353 | mutex_lock(&net->packet.sklist_lock); |
3354 | @@ -3645,6 +3654,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv |
3355 | union tpacket_req_u req_u; |
3356 | int len; |
3357 | |
3358 | + lock_sock(sk); |
3359 | switch (po->tp_version) { |
3360 | case TPACKET_V1: |
3361 | case TPACKET_V2: |
3362 | @@ -3655,12 +3665,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv |
3363 | len = sizeof(req_u.req3); |
3364 | break; |
3365 | } |
3366 | - if (optlen < len) |
3367 | - return -EINVAL; |
3368 | - if (copy_from_user(&req_u.req, optval, len)) |
3369 | - return -EFAULT; |
3370 | - return packet_set_ring(sk, &req_u, 0, |
3371 | - optname == PACKET_TX_RING); |
3372 | + if (optlen < len) { |
3373 | + ret = -EINVAL; |
3374 | + } else { |
3375 | + if (copy_from_user(&req_u.req, optval, len)) |
3376 | + ret = -EFAULT; |
3377 | + else |
3378 | + ret = packet_set_ring(sk, &req_u, 0, |
3379 | + optname == PACKET_TX_RING); |
3380 | + } |
3381 | + release_sock(sk); |
3382 | + return ret; |
3383 | } |
3384 | case PACKET_COPY_THRESH: |
3385 | { |
3386 | @@ -3726,12 +3741,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv |
3387 | |
3388 | if (optlen != sizeof(val)) |
3389 | return -EINVAL; |
3390 | - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) |
3391 | - return -EBUSY; |
3392 | if (copy_from_user(&val, optval, sizeof(val))) |
3393 | return -EFAULT; |
3394 | - po->tp_loss = !!val; |
3395 | - return 0; |
3396 | + |
3397 | + lock_sock(sk); |
3398 | + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { |
3399 | + ret = -EBUSY; |
3400 | + } else { |
3401 | + po->tp_loss = !!val; |
3402 | + ret = 0; |
3403 | + } |
3404 | + release_sock(sk); |
3405 | + return ret; |
3406 | } |
3407 | case PACKET_AUXDATA: |
3408 | { |
3409 | @@ -3742,7 +3763,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv |
3410 | if (copy_from_user(&val, optval, sizeof(val))) |
3411 | return -EFAULT; |
3412 | |
3413 | + lock_sock(sk); |
3414 | po->auxdata = !!val; |
3415 | + release_sock(sk); |
3416 | return 0; |
3417 | } |
3418 | case PACKET_ORIGDEV: |
3419 | @@ -3754,7 +3777,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv |
3420 | if (copy_from_user(&val, optval, sizeof(val))) |
3421 | return -EFAULT; |
3422 | |
3423 | + lock_sock(sk); |
3424 | po->origdev = !!val; |
3425 | + release_sock(sk); |
3426 | return 0; |
3427 | } |
3428 | case PACKET_VNET_HDR: |
3429 | @@ -3763,15 +3788,20 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv |
3430 | |
3431 | if (sock->type != SOCK_RAW) |
3432 | return -EINVAL; |
3433 | - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) |
3434 | - return -EBUSY; |
3435 | if (optlen < sizeof(val)) |
3436 | return -EINVAL; |
3437 | if (copy_from_user(&val, optval, sizeof(val))) |
3438 | return -EFAULT; |
3439 | |
3440 | - po->has_vnet_hdr = !!val; |
3441 | - return 0; |
3442 | + lock_sock(sk); |
3443 | + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { |
3444 | + ret = -EBUSY; |
3445 | + } else { |
3446 | + po->has_vnet_hdr = !!val; |
3447 | + ret = 0; |
3448 | + } |
3449 | + release_sock(sk); |
3450 | + return ret; |
3451 | } |
3452 | case PACKET_TIMESTAMP: |
3453 | { |
3454 | @@ -3809,11 +3839,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv |
3455 | |
3456 | if (optlen != sizeof(val)) |
3457 | return -EINVAL; |
3458 | - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) |
3459 | - return -EBUSY; |
3460 | if (copy_from_user(&val, optval, sizeof(val))) |
3461 | return -EFAULT; |
3462 | - po->tp_tx_has_off = !!val; |
3463 | + |
3464 | + lock_sock(sk); |
3465 | + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { |
3466 | + ret = -EBUSY; |
3467 | + } else { |
3468 | + po->tp_tx_has_off = !!val; |
3469 | + ret = 0; |
3470 | + } |
3471 | + release_sock(sk); |
3472 | return 0; |
3473 | } |
3474 | case PACKET_QDISC_BYPASS: |
3475 | @@ -4210,8 +4246,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, |
3476 | /* Added to avoid minimal code churn */ |
3477 | struct tpacket_req *req = &req_u->req; |
3478 | |
3479 | - lock_sock(sk); |
3480 | - |
3481 | rb = tx_ring ? &po->tx_ring : &po->rx_ring; |
3482 | rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; |
3483 | |
3484 | @@ -4349,7 +4383,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, |
3485 | if (pg_vec) |
3486 | free_pg_vec(pg_vec, order, req->tp_block_nr); |
3487 | out: |
3488 | - release_sock(sk); |
3489 | return err; |
3490 | } |
3491 | |
3492 | diff --git a/net/packet/internal.h b/net/packet/internal.h |
3493 | index a1d2b2319ae9..3bb7c5fb3bff 100644 |
3494 | --- a/net/packet/internal.h |
3495 | +++ b/net/packet/internal.h |
3496 | @@ -112,10 +112,12 @@ struct packet_sock { |
3497 | int copy_thresh; |
3498 | spinlock_t bind_lock; |
3499 | struct mutex pg_vec_lock; |
3500 | - unsigned int running:1, /* prot_hook is attached*/ |
3501 | - auxdata:1, |
3502 | + unsigned int running; /* bind_lock must be held */ |
3503 | + unsigned int auxdata:1, /* writer must hold sock lock */ |
3504 | origdev:1, |
3505 | - has_vnet_hdr:1; |
3506 | + has_vnet_hdr:1, |
3507 | + tp_loss:1, |
3508 | + tp_tx_has_off:1; |
3509 | int pressure; |
3510 | int ifindex; /* bound device */ |
3511 | __be16 num; |
3512 | @@ -125,8 +127,6 @@ struct packet_sock { |
3513 | enum tpacket_versions tp_version; |
3514 | unsigned int tp_hdrlen; |
3515 | unsigned int tp_reserve; |
3516 | - unsigned int tp_loss:1; |
3517 | - unsigned int tp_tx_has_off:1; |
3518 | unsigned int tp_tstamp; |
3519 | struct net_device __rcu *cached_dev; |
3520 | int (*xmit)(struct sk_buff *skb); |
3521 | diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c |
3522 | index 5954e992685a..1d477b054f2e 100644 |
3523 | --- a/net/sched/act_ife.c |
3524 | +++ b/net/sched/act_ife.c |
3525 | @@ -652,7 +652,7 @@ static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife, |
3526 | } |
3527 | } |
3528 | |
3529 | - return 0; |
3530 | + return -ENOENT; |
3531 | } |
3532 | |
3533 | static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a, |
3534 | @@ -682,7 +682,12 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a, |
3535 | u16 mtype; |
3536 | u16 dlen; |
3537 | |
3538 | - curr_data = ife_tlv_meta_decode(tlv_data, &mtype, &dlen, NULL); |
3539 | + curr_data = ife_tlv_meta_decode(tlv_data, ifehdr_end, &mtype, |
3540 | + &dlen, NULL); |
3541 | + if (!curr_data) { |
3542 | + qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats)); |
3543 | + return TC_ACT_SHOT; |
3544 | + } |
3545 | |
3546 | if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) { |
3547 | /* abuse overlimits to count when we receive metadata |
3548 | diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c |
3549 | index f6d3d0c1e133..07b64719d1bc 100644 |
3550 | --- a/net/sctp/ipv6.c |
3551 | +++ b/net/sctp/ipv6.c |
3552 | @@ -521,46 +521,49 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr, |
3553 | addr->v6.sin6_scope_id = 0; |
3554 | } |
3555 | |
3556 | -/* Compare addresses exactly. |
3557 | - * v4-mapped-v6 is also in consideration. |
3558 | - */ |
3559 | -static int sctp_v6_cmp_addr(const union sctp_addr *addr1, |
3560 | - const union sctp_addr *addr2) |
3561 | +static int __sctp_v6_cmp_addr(const union sctp_addr *addr1, |
3562 | + const union sctp_addr *addr2) |
3563 | { |
3564 | if (addr1->sa.sa_family != addr2->sa.sa_family) { |
3565 | if (addr1->sa.sa_family == AF_INET && |
3566 | addr2->sa.sa_family == AF_INET6 && |
3567 | - ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) { |
3568 | - if (addr2->v6.sin6_port == addr1->v4.sin_port && |
3569 | - addr2->v6.sin6_addr.s6_addr32[3] == |
3570 | - addr1->v4.sin_addr.s_addr) |
3571 | - return 1; |
3572 | - } |
3573 | + ipv6_addr_v4mapped(&addr2->v6.sin6_addr) && |
3574 | + addr2->v6.sin6_addr.s6_addr32[3] == |
3575 | + addr1->v4.sin_addr.s_addr) |
3576 | + return 1; |
3577 | + |
3578 | if (addr2->sa.sa_family == AF_INET && |
3579 | addr1->sa.sa_family == AF_INET6 && |
3580 | - ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) { |
3581 | - if (addr1->v6.sin6_port == addr2->v4.sin_port && |
3582 | - addr1->v6.sin6_addr.s6_addr32[3] == |
3583 | - addr2->v4.sin_addr.s_addr) |
3584 | - return 1; |
3585 | - } |
3586 | + ipv6_addr_v4mapped(&addr1->v6.sin6_addr) && |
3587 | + addr1->v6.sin6_addr.s6_addr32[3] == |
3588 | + addr2->v4.sin_addr.s_addr) |
3589 | + return 1; |
3590 | + |
3591 | return 0; |
3592 | } |
3593 | - if (addr1->v6.sin6_port != addr2->v6.sin6_port) |
3594 | - return 0; |
3595 | + |
3596 | if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr)) |
3597 | return 0; |
3598 | + |
3599 | /* If this is a linklocal address, compare the scope_id. */ |
3600 | - if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) { |
3601 | - if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id && |
3602 | - (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) { |
3603 | - return 0; |
3604 | - } |
3605 | - } |
3606 | + if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) && |
3607 | + addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id && |
3608 | + addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id) |
3609 | + return 0; |
3610 | |
3611 | return 1; |
3612 | } |
3613 | |
3614 | +/* Compare addresses exactly. |
3615 | + * v4-mapped-v6 is also in consideration. |
3616 | + */ |
3617 | +static int sctp_v6_cmp_addr(const union sctp_addr *addr1, |
3618 | + const union sctp_addr *addr2) |
3619 | +{ |
3620 | + return __sctp_v6_cmp_addr(addr1, addr2) && |
3621 | + addr1->v6.sin6_port == addr2->v6.sin6_port; |
3622 | +} |
3623 | + |
3624 | /* Initialize addr struct to INADDR_ANY. */ |
3625 | static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port) |
3626 | { |
3627 | @@ -846,8 +849,8 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1, |
3628 | const union sctp_addr *addr2, |
3629 | struct sctp_sock *opt) |
3630 | { |
3631 | - struct sctp_af *af1, *af2; |
3632 | struct sock *sk = sctp_opt2sk(opt); |
3633 | + struct sctp_af *af1, *af2; |
3634 | |
3635 | af1 = sctp_get_af_specific(addr1->sa.sa_family); |
3636 | af2 = sctp_get_af_specific(addr2->sa.sa_family); |
3637 | @@ -863,10 +866,7 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1, |
3638 | if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2)) |
3639 | return 1; |
3640 | |
3641 | - if (addr1->sa.sa_family != addr2->sa.sa_family) |
3642 | - return 0; |
3643 | - |
3644 | - return af1->cmp_addr(addr1, addr2); |
3645 | + return __sctp_v6_cmp_addr(addr1, addr2); |
3646 | } |
3647 | |
3648 | /* Verify that the provided sockaddr looks bindable. Common verification, |
3649 | diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c |
3650 | index 1e0d780855c3..afd5a935bbcb 100644 |
3651 | --- a/net/smc/af_smc.c |
3652 | +++ b/net/smc/af_smc.c |
3653 | @@ -1254,14 +1254,12 @@ static int smc_shutdown(struct socket *sock, int how) |
3654 | rc = smc_close_shutdown_write(smc); |
3655 | break; |
3656 | case SHUT_RD: |
3657 | - if (sk->sk_state == SMC_LISTEN) |
3658 | - rc = smc_close_active(smc); |
3659 | - else |
3660 | - rc = 0; |
3661 | - /* nothing more to do because peer is not involved */ |
3662 | + rc = 0; |
3663 | + /* nothing more to do because peer is not involved */ |
3664 | break; |
3665 | } |
3666 | - rc1 = kernel_sock_shutdown(smc->clcsock, how); |
3667 | + if (smc->clcsock) |
3668 | + rc1 = kernel_sock_shutdown(smc->clcsock, how); |
3669 | /* map sock_shutdown_cmd constants to sk_shutdown value range */ |
3670 | sk->sk_shutdown |= how + 1; |
3671 | |
3672 | diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c |
3673 | index b9283ce5cd85..092bebc70048 100644 |
3674 | --- a/net/strparser/strparser.c |
3675 | +++ b/net/strparser/strparser.c |
3676 | @@ -67,7 +67,7 @@ static void strp_abort_strp(struct strparser *strp, int err) |
3677 | |
3678 | static void strp_start_timer(struct strparser *strp, long timeo) |
3679 | { |
3680 | - if (timeo) |
3681 | + if (timeo && timeo != LONG_MAX) |
3682 | mod_delayed_work(strp_wq, &strp->msg_timer_work, timeo); |
3683 | } |
3684 | |
3685 | @@ -296,9 +296,9 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, |
3686 | strp_start_timer(strp, timeo); |
3687 | } |
3688 | |
3689 | + stm->accum_len += cand_len; |
3690 | strp->need_bytes = stm->strp.full_len - |
3691 | stm->accum_len; |
3692 | - stm->accum_len += cand_len; |
3693 | stm->early_eaten = cand_len; |
3694 | STRP_STATS_ADD(strp->stats.bytes, cand_len); |
3695 | desc->count = 0; /* Stop reading socket */ |
3696 | @@ -321,6 +321,7 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, |
3697 | /* Hurray, we have a new message! */ |
3698 | cancel_delayed_work(&strp->msg_timer_work); |
3699 | strp->skb_head = NULL; |
3700 | + strp->need_bytes = 0; |
3701 | STRP_STATS_INCR(strp->stats.msgs); |
3702 | |
3703 | /* Give skb to upper layer */ |
3704 | @@ -410,9 +411,7 @@ void strp_data_ready(struct strparser *strp) |
3705 | return; |
3706 | |
3707 | if (strp->need_bytes) { |
3708 | - if (strp_peek_len(strp) >= strp->need_bytes) |
3709 | - strp->need_bytes = 0; |
3710 | - else |
3711 | + if (strp_peek_len(strp) < strp->need_bytes) |
3712 | return; |
3713 | } |
3714 | |
3715 | diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c |
3716 | index b76f13f6fea1..d4e0bbeee727 100644 |
3717 | --- a/net/tipc/netlink.c |
3718 | +++ b/net/tipc/netlink.c |
3719 | @@ -79,7 +79,8 @@ const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = { |
3720 | |
3721 | const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = { |
3722 | [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC }, |
3723 | - [TIPC_NLA_NET_ID] = { .type = NLA_U32 } |
3724 | + [TIPC_NLA_NET_ID] = { .type = NLA_U32 }, |
3725 | + [TIPC_NLA_NET_ADDR] = { .type = NLA_U32 }, |
3726 | }; |
3727 | |
3728 | const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = { |
3729 | diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c |
3730 | index e0fc84daed94..ad17a985f74e 100644 |
3731 | --- a/net/vmw_vsock/af_vsock.c |
3732 | +++ b/net/vmw_vsock/af_vsock.c |
3733 | @@ -2018,7 +2018,13 @@ const struct vsock_transport *vsock_core_get_transport(void) |
3734 | } |
3735 | EXPORT_SYMBOL_GPL(vsock_core_get_transport); |
3736 | |
3737 | +static void __exit vsock_exit(void) |
3738 | +{ |
3739 | + /* Do nothing. This function makes this module removable. */ |
3740 | +} |
3741 | + |
3742 | module_init(vsock_init_tables); |
3743 | +module_exit(vsock_exit); |
3744 | |
3745 | MODULE_AUTHOR("VMware, Inc."); |
3746 | MODULE_DESCRIPTION("VMware Virtual Socket Family"); |
3747 | diff --git a/security/commoncap.c b/security/commoncap.c |
3748 | index 48620c93d697..1ce701fcb3f3 100644 |
3749 | --- a/security/commoncap.c |
3750 | +++ b/security/commoncap.c |
3751 | @@ -449,6 +449,8 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer, |
3752 | magic |= VFS_CAP_FLAGS_EFFECTIVE; |
3753 | memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32); |
3754 | cap->magic_etc = cpu_to_le32(magic); |
3755 | + } else { |
3756 | + size = -ENOMEM; |
3757 | } |
3758 | } |
3759 | kfree(tmpbuf); |