Magellan Linux

Contents of /trunk/kernel-alx/patches-4.9/0253-4.9.154-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3306 - (show annotations) (download)
Tue Mar 12 10:43:12 2019 UTC (5 years, 1 month ago) by niro
File size: 55593 byte(s)
-linux-4.9.154
1 diff --git a/Makefile b/Makefile
2 index 44a487ee24d2..9964792e200f 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 4
7 PATCHLEVEL = 9
8 -SUBLEVEL = 153
9 +SUBLEVEL = 154
10 EXTRAVERSION =
11 NAME = Roaring Lionus
12
13 diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h
14 index 9185541035cc..6958545390f0 100644
15 --- a/arch/arc/include/asm/perf_event.h
16 +++ b/arch/arc/include/asm/perf_event.h
17 @@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = {
18
19 /* counts condition */
20 [PERF_COUNT_HW_INSTRUCTIONS] = "iall",
21 - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */
22 + /* All jump instructions that are taken */
23 + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
24 [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
25 #ifdef CONFIG_ISA_ARCV2
26 [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
27 diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
28 index 62ad4bcb841a..f230bb7092fd 100644
29 --- a/arch/arc/lib/memset-archs.S
30 +++ b/arch/arc/lib/memset-archs.S
31 @@ -7,11 +7,39 @@
32 */
33
34 #include <linux/linkage.h>
35 +#include <asm/cache.h>
36
37 -#undef PREALLOC_NOT_AVAIL
38 +/*
39 + * The memset implementation below is optimized to use prefetchw and prealloc
40 + * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6)
41 + * If you want to implement optimized memset for other possible L1 data cache
42 + * line lengths (32B and 128B) you should rewrite code carefully checking
43 + * we don't call any prefetchw/prealloc instruction for L1 cache lines which
44 + * don't belongs to memset area.
45 + */
46 +
47 +#if L1_CACHE_SHIFT == 6
48 +
49 +.macro PREALLOC_INSTR reg, off
50 + prealloc [\reg, \off]
51 +.endm
52 +
53 +.macro PREFETCHW_INSTR reg, off
54 + prefetchw [\reg, \off]
55 +.endm
56 +
57 +#else
58 +
59 +.macro PREALLOC_INSTR
60 +.endm
61 +
62 +.macro PREFETCHW_INSTR
63 +.endm
64 +
65 +#endif
66
67 ENTRY_CFI(memset)
68 - prefetchw [r0] ; Prefetch the write location
69 + PREFETCHW_INSTR r0, 0 ; Prefetch the first write location
70 mov.f 0, r2
71 ;;; if size is zero
72 jz.d [blink]
73 @@ -48,11 +76,8 @@ ENTRY_CFI(memset)
74
75 lpnz @.Lset64bytes
76 ;; LOOP START
77 -#ifdef PREALLOC_NOT_AVAIL
78 - prefetchw [r3, 64] ;Prefetch the next write location
79 -#else
80 - prealloc [r3, 64]
81 -#endif
82 + PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching
83 +
84 #ifdef CONFIG_ARC_HAS_LL64
85 std.ab r4, [r3, 8]
86 std.ab r4, [r3, 8]
87 @@ -85,7 +110,6 @@ ENTRY_CFI(memset)
88 lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
89 lpnz .Lset32bytes
90 ;; LOOP START
91 - prefetchw [r3, 32] ;Prefetch the next write location
92 #ifdef CONFIG_ARC_HAS_LL64
93 std.ab r4, [r3, 8]
94 std.ab r4, [r3, 8]
95 diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
96 index 0c7a7d5d95f1..a651c2bc94ef 100644
97 --- a/arch/s390/kernel/early.c
98 +++ b/arch/s390/kernel/early.c
99 @@ -224,10 +224,10 @@ static noinline __init void detect_machine_type(void)
100 if (stsi(vmms, 3, 2, 2) || !vmms->count)
101 return;
102
103 - /* Running under KVM? If not we assume z/VM */
104 + /* Detect known hypervisors */
105 if (!memcmp(vmms->vm[0].cpi, "\xd2\xe5\xd4", 3))
106 S390_lowcore.machine_flags |= MACHINE_FLAG_KVM;
107 - else
108 + else if (!memcmp(vmms->vm[0].cpi, "\xa9\x61\xe5\xd4", 4))
109 S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
110 }
111
112 diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
113 index feb9d97a9d14..a559908d180e 100644
114 --- a/arch/s390/kernel/setup.c
115 +++ b/arch/s390/kernel/setup.c
116 @@ -863,6 +863,8 @@ void __init setup_arch(char **cmdline_p)
117 pr_info("Linux is running under KVM in 64-bit mode\n");
118 else if (MACHINE_IS_LPAR)
119 pr_info("Linux is running natively in 64-bit mode\n");
120 + else
121 + pr_info("Linux is running as a guest in 64-bit mode\n");
122
123 /* Have one command line that is parsed and saved in /proc/cmdline */
124 /* boot_command_line has been already set up in early.c */
125 diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
126 index 0a31110f41f6..d52a94e9f57f 100644
127 --- a/arch/s390/kernel/smp.c
128 +++ b/arch/s390/kernel/smp.c
129 @@ -357,9 +357,13 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
130 */
131 void smp_call_ipl_cpu(void (*func)(void *), void *data)
132 {
133 + struct lowcore *lc = pcpu_devices->lowcore;
134 +
135 + if (pcpu_devices[0].address == stap())
136 + lc = &S390_lowcore;
137 +
138 pcpu_delegate(&pcpu_devices[0], func, data,
139 - pcpu_devices->lowcore->panic_stack -
140 - PANIC_FRAME_OFFSET + PAGE_SIZE);
141 + lc->panic_stack - PANIC_FRAME_OFFSET + PAGE_SIZE);
142 }
143
144 int smp_find_processor_id(u16 address)
145 @@ -1139,7 +1143,11 @@ static ssize_t __ref rescan_store(struct device *dev,
146 {
147 int rc;
148
149 + rc = lock_device_hotplug_sysfs();
150 + if (rc)
151 + return rc;
152 rc = smp_rescan_cpus();
153 + unlock_device_hotplug();
154 return rc ? rc : count;
155 }
156 static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
157 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
158 index 46e0ad71b4da..851e9d6c864f 100644
159 --- a/arch/x86/kvm/x86.c
160 +++ b/arch/x86/kvm/x86.c
161 @@ -5795,8 +5795,7 @@ restart:
162 toggle_interruptibility(vcpu, ctxt->interruptibility);
163 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
164 kvm_rip_write(vcpu, ctxt->eip);
165 - if (r == EMULATE_DONE &&
166 - (ctxt->tf || (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)))
167 + if (r == EMULATE_DONE && ctxt->tf)
168 kvm_vcpu_do_singlestep(vcpu, &r);
169 if (!ctxt->have_exception ||
170 exception_type(ctxt->exception.vector) == EXCPT_TRAP)
171 diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
172 index 0c7fe444dcdd..d8d868070e24 100644
173 --- a/arch/x86/lib/kaslr.c
174 +++ b/arch/x86/lib/kaslr.c
175 @@ -35,8 +35,8 @@ static inline u16 i8254(void)
176 u16 status, timer;
177
178 do {
179 - outb(I8254_PORT_CONTROL,
180 - I8254_CMD_READBACK | I8254_SELECT_COUNTER0);
181 + outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
182 + I8254_PORT_CONTROL);
183 status = inb(I8254_PORT_COUNTER0);
184 timer = inb(I8254_PORT_COUNTER0);
185 timer |= inb(I8254_PORT_COUNTER0) << 8;
186 diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
187 index ef32e5766a01..06cf7427d0c4 100644
188 --- a/drivers/acpi/nfit/core.c
189 +++ b/drivers/acpi/nfit/core.c
190 @@ -185,6 +185,32 @@ static int xlat_status(struct nvdimm *nvdimm, void *buf, unsigned int cmd,
191 return 0;
192 }
193
194 +static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd,
195 + struct nd_cmd_pkg *call_pkg)
196 +{
197 + if (call_pkg) {
198 + int i;
199 +
200 + if (nfit_mem->family != call_pkg->nd_family)
201 + return -ENOTTY;
202 +
203 + for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++)
204 + if (call_pkg->nd_reserved2[i])
205 + return -EINVAL;
206 + return call_pkg->nd_command;
207 + }
208 +
209 + /* Linux ND commands == NVDIMM_FAMILY_INTEL function numbers */
210 + if (nfit_mem->family == NVDIMM_FAMILY_INTEL)
211 + return cmd;
212 +
213 + /*
214 + * Force function number validation to fail since 0 is never
215 + * published as a valid function in dsm_mask.
216 + */
217 + return 0;
218 +}
219 +
220 int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
221 unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc)
222 {
223 @@ -197,17 +223,11 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
224 unsigned long cmd_mask, dsm_mask;
225 u32 offset, fw_status = 0;
226 acpi_handle handle;
227 - unsigned int func;
228 const u8 *uuid;
229 - int rc, i;
230 + int func, rc, i;
231
232 if (cmd_rc)
233 *cmd_rc = -EINVAL;
234 - func = cmd;
235 - if (cmd == ND_CMD_CALL) {
236 - call_pkg = buf;
237 - func = call_pkg->nd_command;
238 - }
239
240 if (nvdimm) {
241 struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm);
242 @@ -215,9 +235,12 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
243
244 if (!adev)
245 return -ENOTTY;
246 - if (call_pkg && nfit_mem->family != call_pkg->nd_family)
247 - return -ENOTTY;
248
249 + if (cmd == ND_CMD_CALL)
250 + call_pkg = buf;
251 + func = cmd_to_func(nfit_mem, cmd, call_pkg);
252 + if (func < 0)
253 + return func;
254 dimm_name = nvdimm_name(nvdimm);
255 cmd_name = nvdimm_cmd_name(cmd);
256 cmd_mask = nvdimm_cmd_mask(nvdimm);
257 @@ -228,6 +251,7 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
258 } else {
259 struct acpi_device *adev = to_acpi_dev(acpi_desc);
260
261 + func = cmd;
262 cmd_name = nvdimm_bus_cmd_name(cmd);
263 cmd_mask = nd_desc->cmd_mask;
264 dsm_mask = cmd_mask;
265 @@ -240,7 +264,13 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
266 if (!desc || (cmd && (desc->out_num + desc->in_num == 0)))
267 return -ENOTTY;
268
269 - if (!test_bit(cmd, &cmd_mask) || !test_bit(func, &dsm_mask))
270 + /*
271 + * Check for a valid command. For ND_CMD_CALL, we also have to
272 + * make sure that the DSM function is supported.
273 + */
274 + if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask))
275 + return -ENOTTY;
276 + else if (!test_bit(cmd, &cmd_mask))
277 return -ENOTTY;
278
279 in_obj.type = ACPI_TYPE_PACKAGE;
280 @@ -1433,6 +1463,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc,
281 return 0;
282 }
283
284 + /*
285 + * Function 0 is the command interrogation function, don't
286 + * export it to potential userspace use, and enable it to be
287 + * used as an error value in acpi_nfit_ctl().
288 + */
289 + dsm_mask &= ~1UL;
290 +
291 uuid = to_nfit_uuid(nfit_mem->family);
292 for_each_set_bit(i, &dsm_mask, BITS_PER_LONG)
293 if (acpi_check_dsm(adev_dimm->handle, uuid, 1, 1ULL << i))
294 diff --git a/drivers/char/mwave/mwavedd.c b/drivers/char/mwave/mwavedd.c
295 index 3a3ff2eb6cba..7a4f9346cccf 100644
296 --- a/drivers/char/mwave/mwavedd.c
297 +++ b/drivers/char/mwave/mwavedd.c
298 @@ -59,6 +59,7 @@
299 #include <linux/mutex.h>
300 #include <linux/delay.h>
301 #include <linux/serial_8250.h>
302 +#include <linux/nospec.h>
303 #include "smapi.h"
304 #include "mwavedd.h"
305 #include "3780i.h"
306 @@ -289,6 +290,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
307 ipcnum);
308 return -EINVAL;
309 }
310 + ipcnum = array_index_nospec(ipcnum,
311 + ARRAY_SIZE(pDrvData->IPCs));
312 PRINTK_3(TRACE_MWAVE,
313 "mwavedd::mwave_ioctl IOCTL_MW_REGISTER_IPC"
314 " ipcnum %x entry usIntCount %x\n",
315 @@ -317,6 +320,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
316 " Invalid ipcnum %x\n", ipcnum);
317 return -EINVAL;
318 }
319 + ipcnum = array_index_nospec(ipcnum,
320 + ARRAY_SIZE(pDrvData->IPCs));
321 PRINTK_3(TRACE_MWAVE,
322 "mwavedd::mwave_ioctl IOCTL_MW_GET_IPC"
323 " ipcnum %x, usIntCount %x\n",
324 @@ -383,6 +388,8 @@ static long mwave_ioctl(struct file *file, unsigned int iocmd,
325 ipcnum);
326 return -EINVAL;
327 }
328 + ipcnum = array_index_nospec(ipcnum,
329 + ARRAY_SIZE(pDrvData->IPCs));
330 mutex_lock(&mwave_mutex);
331 if (pDrvData->IPCs[ipcnum].bIsEnabled == true) {
332 pDrvData->IPCs[ipcnum].bIsEnabled = false;
333 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
334 index f55dcdf99bc5..26476a64e663 100644
335 --- a/drivers/input/joystick/xpad.c
336 +++ b/drivers/input/joystick/xpad.c
337 @@ -255,6 +255,8 @@ static const struct xpad_device {
338 { 0x0f30, 0x0202, "Joytech Advanced Controller", 0, XTYPE_XBOX },
339 { 0x0f30, 0x8888, "BigBen XBMiniPad Controller", 0, XTYPE_XBOX },
340 { 0x102c, 0xff0c, "Joytech Wireless Advanced Controller", 0, XTYPE_XBOX },
341 + { 0x1038, 0x1430, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
342 + { 0x1038, 0x1431, "SteelSeries Stratus Duo", 0, XTYPE_XBOX360 },
343 { 0x11c9, 0x55f0, "Nacon GC-100XF", 0, XTYPE_XBOX360 },
344 { 0x12ab, 0x0004, "Honey Bee Xbox360 dancepad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX360 },
345 { 0x12ab, 0x0301, "PDP AFTERGLOW AX.1", 0, XTYPE_XBOX360 },
346 @@ -431,6 +433,7 @@ static const struct usb_device_id xpad_table[] = {
347 XPAD_XBOXONE_VENDOR(0x0e6f), /* 0x0e6f X-Box One controllers */
348 XPAD_XBOX360_VENDOR(0x0f0d), /* Hori Controllers */
349 XPAD_XBOXONE_VENDOR(0x0f0d), /* Hori Controllers */
350 + XPAD_XBOX360_VENDOR(0x1038), /* SteelSeries Controllers */
351 XPAD_XBOX360_VENDOR(0x11c9), /* Nacon GC100XF */
352 XPAD_XBOX360_VENDOR(0x12ab), /* X-Box 360 dance pads */
353 XPAD_XBOX360_VENDOR(0x1430), /* RedOctane X-Box 360 controllers */
354 diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
355 index 022be0e22eba..a306453d40d2 100644
356 --- a/drivers/input/misc/uinput.c
357 +++ b/drivers/input/misc/uinput.c
358 @@ -39,6 +39,7 @@
359 #include <linux/fs.h>
360 #include <linux/miscdevice.h>
361 #include <linux/uinput.h>
362 +#include <linux/overflow.h>
363 #include <linux/input/mt.h>
364 #include "../input-compat.h"
365
366 @@ -335,7 +336,7 @@ static int uinput_open(struct inode *inode, struct file *file)
367 static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
368 const struct input_absinfo *abs)
369 {
370 - int min, max;
371 + int min, max, range;
372
373 min = abs->minimum;
374 max = abs->maximum;
375 @@ -347,7 +348,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
376 return -EINVAL;
377 }
378
379 - if (abs->flat > max - min) {
380 + if (!check_sub_overflow(max, min, &range) && abs->flat > range) {
381 printk(KERN_DEBUG
382 "%s: abs_flat #%02x out of range: %d (min:%d/max:%d)\n",
383 UINPUT_NAME, code, abs->flat, min, max);
384 diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
385 index 558c7589c329..83ca754250fb 100644
386 --- a/drivers/irqchip/irq-gic-v3-its.c
387 +++ b/drivers/irqchip/irq-gic-v3-its.c
388 @@ -1372,13 +1372,14 @@ static void its_free_device(struct its_device *its_dev)
389 kfree(its_dev);
390 }
391
392 -static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq)
393 +static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
394 {
395 int idx;
396
397 - idx = find_first_zero_bit(dev->event_map.lpi_map,
398 - dev->event_map.nr_lpis);
399 - if (idx == dev->event_map.nr_lpis)
400 + idx = bitmap_find_free_region(dev->event_map.lpi_map,
401 + dev->event_map.nr_lpis,
402 + get_count_order(nvecs));
403 + if (idx < 0)
404 return -ENOSPC;
405
406 *hwirq = dev->event_map.lpi_base + idx;
407 @@ -1464,20 +1465,20 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
408 int err;
409 int i;
410
411 - for (i = 0; i < nr_irqs; i++) {
412 - err = its_alloc_device_irq(its_dev, &hwirq);
413 - if (err)
414 - return err;
415 + err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
416 + if (err)
417 + return err;
418
419 - err = its_irq_gic_domain_alloc(domain, virq + i, hwirq);
420 + for (i = 0; i < nr_irqs; i++) {
421 + err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
422 if (err)
423 return err;
424
425 irq_domain_set_hwirq_and_chip(domain, virq + i,
426 - hwirq, &its_irq_chip, its_dev);
427 + hwirq + i, &its_irq_chip, its_dev);
428 pr_debug("ID:%d pID:%d vID:%d\n",
429 - (int)(hwirq - its_dev->event_map.lpi_base),
430 - (int) hwirq, virq + i);
431 + (int)(hwirq + i - its_dev->event_map.lpi_base),
432 + (int)(hwirq + i), virq + i);
433 }
434
435 return 0;
436 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
437 index 149fbac97cb6..d20f4023f6c1 100644
438 --- a/drivers/md/dm-thin-metadata.c
439 +++ b/drivers/md/dm-thin-metadata.c
440 @@ -1689,7 +1689,7 @@ int dm_thin_remove_range(struct dm_thin_device *td,
441 return r;
442 }
443
444 -int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
445 +int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
446 {
447 int r;
448 uint32_t ref_count;
449 @@ -1697,7 +1697,7 @@ int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *resu
450 down_read(&pmd->root_lock);
451 r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
452 if (!r)
453 - *result = (ref_count != 0);
454 + *result = (ref_count > 1);
455 up_read(&pmd->root_lock);
456
457 return r;
458 diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
459 index 35e954ea20a9..f6be0d733c20 100644
460 --- a/drivers/md/dm-thin-metadata.h
461 +++ b/drivers/md/dm-thin-metadata.h
462 @@ -195,7 +195,7 @@ int dm_pool_get_metadata_dev_size(struct dm_pool_metadata *pmd,
463
464 int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result);
465
466 -int dm_pool_block_is_used(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
467 +int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result);
468
469 int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
470 int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e);
471 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
472 index 81309d7836c5..914c8a6bf93c 100644
473 --- a/drivers/md/dm-thin.c
474 +++ b/drivers/md/dm-thin.c
475 @@ -1017,7 +1017,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
476 * passdown we have to check that these blocks are now unused.
477 */
478 int r = 0;
479 - bool used = true;
480 + bool shared = true;
481 struct thin_c *tc = m->tc;
482 struct pool *pool = tc->pool;
483 dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin;
484 @@ -1027,11 +1027,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
485 while (b != end) {
486 /* find start of unmapped run */
487 for (; b < end; b++) {
488 - r = dm_pool_block_is_used(pool->pmd, b, &used);
489 + r = dm_pool_block_is_shared(pool->pmd, b, &shared);
490 if (r)
491 goto out;
492
493 - if (!used)
494 + if (!shared)
495 break;
496 }
497
498 @@ -1040,11 +1040,11 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
499
500 /* find end of run */
501 for (e = b + 1; e != end; e++) {
502 - r = dm_pool_block_is_used(pool->pmd, e, &used);
503 + r = dm_pool_block_is_shared(pool->pmd, e, &shared);
504 if (r)
505 goto out;
506
507 - if (used)
508 + if (shared)
509 break;
510 }
511
512 diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
513 index ff3d9fc0f1b3..214a48703a4e 100644
514 --- a/drivers/net/can/dev.c
515 +++ b/drivers/net/can/dev.c
516 @@ -456,8 +456,6 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
517 struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr)
518 {
519 struct can_priv *priv = netdev_priv(dev);
520 - struct sk_buff *skb = priv->echo_skb[idx];
521 - struct canfd_frame *cf;
522
523 if (idx >= priv->echo_skb_max) {
524 netdev_err(dev, "%s: BUG! Trying to access can_priv::echo_skb out of bounds (%u/max %u)\n",
525 @@ -465,20 +463,21 @@ struct sk_buff *__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8
526 return NULL;
527 }
528
529 - if (!skb) {
530 - netdev_err(dev, "%s: BUG! Trying to echo non existing skb: can_priv::echo_skb[%u]\n",
531 - __func__, idx);
532 - return NULL;
533 - }
534 + if (priv->echo_skb[idx]) {
535 + /* Using "struct canfd_frame::len" for the frame
536 + * length is supported on both CAN and CANFD frames.
537 + */
538 + struct sk_buff *skb = priv->echo_skb[idx];
539 + struct canfd_frame *cf = (struct canfd_frame *)skb->data;
540 + u8 len = cf->len;
541 +
542 + *len_ptr = len;
543 + priv->echo_skb[idx] = NULL;
544
545 - /* Using "struct canfd_frame::len" for the frame
546 - * length is supported on both CAN and CANFD frames.
547 - */
548 - cf = (struct canfd_frame *)skb->data;
549 - *len_ptr = cf->len;
550 - priv->echo_skb[idx] = NULL;
551 + return skb;
552 + }
553
554 - return skb;
555 + return NULL;
556 }
557
558 /*
559 diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
560 index fa2c7bd638be..8c93ed5c9763 100644
561 --- a/drivers/net/ppp/pppoe.c
562 +++ b/drivers/net/ppp/pppoe.c
563 @@ -442,6 +442,7 @@ static int pppoe_rcv(struct sk_buff *skb, struct net_device *dev,
564 if (pskb_trim_rcsum(skb, len))
565 goto drop;
566
567 + ph = pppoe_hdr(skb);
568 pn = pppoe_pernet(dev_net(dev));
569
570 /* Note that get_item does a sock_hold(), so sk_pppox(po)
571 diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
572 index 486393fa4f3e..0f1201e6e957 100644
573 --- a/drivers/nvme/target/rdma.c
574 +++ b/drivers/nvme/target/rdma.c
575 @@ -137,6 +137,10 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
576 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
577 static void nvmet_rdma_qp_event(struct ib_event *event, void *priv);
578 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
579 +static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
580 + struct nvmet_rdma_rsp *r);
581 +static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
582 + struct nvmet_rdma_rsp *r);
583
584 static struct nvmet_fabrics_ops nvmet_rdma_ops;
585
586 @@ -175,9 +179,17 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
587 spin_unlock_irqrestore(&queue->rsps_lock, flags);
588
589 if (unlikely(!rsp)) {
590 - rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
591 + int ret;
592 +
593 + rsp = kzalloc(sizeof(*rsp), GFP_KERNEL);
594 if (unlikely(!rsp))
595 return NULL;
596 + ret = nvmet_rdma_alloc_rsp(queue->dev, rsp);
597 + if (unlikely(ret)) {
598 + kfree(rsp);
599 + return NULL;
600 + }
601 +
602 rsp->allocated = true;
603 }
604
605 @@ -189,7 +201,8 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
606 {
607 unsigned long flags;
608
609 - if (rsp->allocated) {
610 + if (unlikely(rsp->allocated)) {
611 + nvmet_rdma_free_rsp(rsp->queue->dev, rsp);
612 kfree(rsp);
613 return;
614 }
615 diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
616 index 1406fb688a26..73e2f89aded8 100644
617 --- a/drivers/s390/char/sclp_config.c
618 +++ b/drivers/s390/char/sclp_config.c
619 @@ -59,7 +59,9 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
620
621 static void __ref sclp_cpu_change_notify(struct work_struct *work)
622 {
623 + lock_device_hotplug();
624 smp_rescan_cpus();
625 + unlock_device_hotplug();
626 }
627
628 static void sclp_conf_receiver_fn(struct evbuf_header *evbuf)
629 diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
630 index 0f63a36a519e..d22360849b88 100644
631 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
632 +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
633 @@ -43,6 +43,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
634 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
635 {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
636 {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
637 + {USB_DEVICE(0x2001, 0x331B)}, /* D-Link DWA-121 rev B1 */
638 {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
639 {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
640 {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
641 diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
642 index 6d1e2f746ab4..8d6253903f24 100644
643 --- a/drivers/tty/n_hdlc.c
644 +++ b/drivers/tty/n_hdlc.c
645 @@ -598,6 +598,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
646 /* too large for caller's buffer */
647 ret = -EOVERFLOW;
648 } else {
649 + __set_current_state(TASK_RUNNING);
650 if (copy_to_user(buf, rbuf->buf, rbuf->count))
651 ret = -EFAULT;
652 else
653 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
654 index bcfdaf6ddbb2..95fc7e893fd2 100644
655 --- a/drivers/tty/serial/serial_core.c
656 +++ b/drivers/tty/serial/serial_core.c
657 @@ -540,10 +540,12 @@ static int uart_put_char(struct tty_struct *tty, unsigned char c)
658 int ret = 0;
659
660 circ = &state->xmit;
661 - if (!circ->buf)
662 + port = uart_port_lock(state, flags);
663 + if (!circ->buf) {
664 + uart_port_unlock(port, flags);
665 return 0;
666 + }
667
668 - port = uart_port_lock(state, flags);
669 if (port && uart_circ_chars_free(circ) != 0) {
670 circ->buf[circ->head] = c;
671 circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
672 @@ -576,11 +578,13 @@ static int uart_write(struct tty_struct *tty,
673 return -EL3HLT;
674 }
675
676 + port = uart_port_lock(state, flags);
677 circ = &state->xmit;
678 - if (!circ->buf)
679 + if (!circ->buf) {
680 + uart_port_unlock(port, flags);
681 return 0;
682 + }
683
684 - port = uart_port_lock(state, flags);
685 while (port) {
686 c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
687 if (count < c)
688 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
689 index f61f8650665f..19fe1e8fc124 100644
690 --- a/drivers/tty/tty_io.c
691 +++ b/drivers/tty/tty_io.c
692 @@ -2324,7 +2324,8 @@ static int tiocsti(struct tty_struct *tty, char __user *p)
693 ld = tty_ldisc_ref_wait(tty);
694 if (!ld)
695 return -EIO;
696 - ld->ops->receive_buf(tty, &ch, &mbz, 1);
697 + if (ld->ops->receive_buf)
698 + ld->ops->receive_buf(tty, &ch, &mbz, 1);
699 tty_ldisc_deref(ld);
700 return 0;
701 }
702 diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
703 index 9d3e413f48c6..232cb0a760b9 100644
704 --- a/drivers/tty/vt/vt.c
705 +++ b/drivers/tty/vt/vt.c
706 @@ -956,6 +956,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
707 if (con_is_visible(vc))
708 update_screen(vc);
709 vt_event_post(VT_EVENT_RESIZE, vc->vc_num, vc->vc_num);
710 + notify_update(vc);
711 return err;
712 }
713
714 diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
715 index 4966768d3c98..9706d214c409 100644
716 --- a/drivers/usb/serial/pl2303.c
717 +++ b/drivers/usb/serial/pl2303.c
718 @@ -47,6 +47,7 @@ static const struct usb_device_id id_table[] = {
719 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_HCR331) },
720 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MOTOROLA) },
721 { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ZTEK) },
722 + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_TB) },
723 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
724 { USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
725 { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
726 diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
727 index a84f0959ab34..d84c3b3d477b 100644
728 --- a/drivers/usb/serial/pl2303.h
729 +++ b/drivers/usb/serial/pl2303.h
730 @@ -13,6 +13,7 @@
731
732 #define PL2303_VENDOR_ID 0x067b
733 #define PL2303_PRODUCT_ID 0x2303
734 +#define PL2303_PRODUCT_ID_TB 0x2304
735 #define PL2303_PRODUCT_ID_RSAQ2 0x04bb
736 #define PL2303_PRODUCT_ID_DCU11 0x1234
737 #define PL2303_PRODUCT_ID_PHAROS 0xaaa0
738 @@ -25,6 +26,7 @@
739 #define PL2303_PRODUCT_ID_MOTOROLA 0x0307
740 #define PL2303_PRODUCT_ID_ZTEK 0xe1f1
741
742 +
743 #define ATEN_VENDOR_ID 0x0557
744 #define ATEN_VENDOR_ID2 0x0547
745 #define ATEN_PRODUCT_ID 0x2008
746 diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
747 index 6d6acf2c07c3..511242111403 100644
748 --- a/drivers/usb/serial/usb-serial-simple.c
749 +++ b/drivers/usb/serial/usb-serial-simple.c
750 @@ -88,7 +88,8 @@ DEVICE(moto_modem, MOTO_IDS);
751 /* Motorola Tetra driver */
752 #define MOTOROLA_TETRA_IDS() \
753 { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
754 - { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */
755 + { USB_DEVICE(0x0cad, 0x9012) }, /* MTP6550 */ \
756 + { USB_DEVICE(0x0cad, 0x9016) } /* TPG2200 */
757 DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
758
759 /* Novatel Wireless GPS driver */
760 diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
761 index 353c93bc459b..681d0eade82f 100644
762 --- a/drivers/vhost/net.c
763 +++ b/drivers/vhost/net.c
764 @@ -751,7 +751,8 @@ static void handle_rx(struct vhost_net *net)
765 vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
766 headcount);
767 if (unlikely(vq_log))
768 - vhost_log_write(vq, vq_log, log, vhost_len);
769 + vhost_log_write(vq, vq_log, log, vhost_len,
770 + vq->iov, in);
771 total_len += vhost_len;
772 if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
773 vhost_poll_queue(&vq->poll);
774 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
775 index 53b1b3cfce84..dc387a974325 100644
776 --- a/drivers/vhost/vhost.c
777 +++ b/drivers/vhost/vhost.c
778 @@ -1646,13 +1646,87 @@ static int log_write(void __user *log_base,
779 return r;
780 }
781
782 +static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len)
783 +{
784 + struct vhost_umem *umem = vq->umem;
785 + struct vhost_umem_node *u;
786 + u64 start, end, l, min;
787 + int r;
788 + bool hit = false;
789 +
790 + while (len) {
791 + min = len;
792 + /* More than one GPAs can be mapped into a single HVA. So
793 + * iterate all possible umems here to be safe.
794 + */
795 + list_for_each_entry(u, &umem->umem_list, link) {
796 + if (u->userspace_addr > hva - 1 + len ||
797 + u->userspace_addr - 1 + u->size < hva)
798 + continue;
799 + start = max(u->userspace_addr, hva);
800 + end = min(u->userspace_addr - 1 + u->size,
801 + hva - 1 + len);
802 + l = end - start + 1;
803 + r = log_write(vq->log_base,
804 + u->start + start - u->userspace_addr,
805 + l);
806 + if (r < 0)
807 + return r;
808 + hit = true;
809 + min = min(l, min);
810 + }
811 +
812 + if (!hit)
813 + return -EFAULT;
814 +
815 + len -= min;
816 + hva += min;
817 + }
818 +
819 + return 0;
820 +}
821 +
822 +static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len)
823 +{
824 + struct iovec iov[64];
825 + int i, ret;
826 +
827 + if (!vq->iotlb)
828 + return log_write(vq->log_base, vq->log_addr + used_offset, len);
829 +
830 + ret = translate_desc(vq, (uintptr_t)vq->used + used_offset,
831 + len, iov, 64, VHOST_ACCESS_WO);
832 + if (ret)
833 + return ret;
834 +
835 + for (i = 0; i < ret; i++) {
836 + ret = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
837 + iov[i].iov_len);
838 + if (ret)
839 + return ret;
840 + }
841 +
842 + return 0;
843 +}
844 +
845 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
846 - unsigned int log_num, u64 len)
847 + unsigned int log_num, u64 len, struct iovec *iov, int count)
848 {
849 int i, r;
850
851 /* Make sure data written is seen before log. */
852 smp_wmb();
853 +
854 + if (vq->iotlb) {
855 + for (i = 0; i < count; i++) {
856 + r = log_write_hva(vq, (uintptr_t)iov[i].iov_base,
857 + iov[i].iov_len);
858 + if (r < 0)
859 + return r;
860 + }
861 + return 0;
862 + }
863 +
864 for (i = 0; i < log_num; ++i) {
865 u64 l = min(log[i].len, len);
866 r = log_write(vq->log_base, log[i].addr, l);
867 @@ -1682,9 +1756,8 @@ static int vhost_update_used_flags(struct vhost_virtqueue *vq)
868 smp_wmb();
869 /* Log used flag write. */
870 used = &vq->used->flags;
871 - log_write(vq->log_base, vq->log_addr +
872 - (used - (void __user *)vq->used),
873 - sizeof vq->used->flags);
874 + log_used(vq, (used - (void __user *)vq->used),
875 + sizeof vq->used->flags);
876 if (vq->log_ctx)
877 eventfd_signal(vq->log_ctx, 1);
878 }
879 @@ -1702,9 +1775,8 @@ static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
880 smp_wmb();
881 /* Log avail event write */
882 used = vhost_avail_event(vq);
883 - log_write(vq->log_base, vq->log_addr +
884 - (used - (void __user *)vq->used),
885 - sizeof *vhost_avail_event(vq));
886 + log_used(vq, (used - (void __user *)vq->used),
887 + sizeof *vhost_avail_event(vq));
888 if (vq->log_ctx)
889 eventfd_signal(vq->log_ctx, 1);
890 }
891 @@ -2103,10 +2175,8 @@ static int __vhost_add_used_n(struct vhost_virtqueue *vq,
892 /* Make sure data is seen before log. */
893 smp_wmb();
894 /* Log used ring entry write. */
895 - log_write(vq->log_base,
896 - vq->log_addr +
897 - ((void __user *)used - (void __user *)vq->used),
898 - count * sizeof *used);
899 + log_used(vq, ((void __user *)used - (void __user *)vq->used),
900 + count * sizeof *used);
901 }
902 old = vq->last_used_idx;
903 new = (vq->last_used_idx += count);
904 @@ -2148,9 +2218,8 @@ int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
905 /* Make sure used idx is seen before log. */
906 smp_wmb();
907 /* Log used index update. */
908 - log_write(vq->log_base,
909 - vq->log_addr + offsetof(struct vring_used, idx),
910 - sizeof vq->used->idx);
911 + log_used(vq, offsetof(struct vring_used, idx),
912 + sizeof vq->used->idx);
913 if (vq->log_ctx)
914 eventfd_signal(vq->log_ctx, 1);
915 }
916 diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
917 index 78f3c5fc02e4..e8efe1af7487 100644
918 --- a/drivers/vhost/vhost.h
919 +++ b/drivers/vhost/vhost.h
920 @@ -199,7 +199,8 @@ bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
921 bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
922
923 int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
924 - unsigned int log_num, u64 len);
925 + unsigned int log_num, u64 len,
926 + struct iovec *iov, int count);
927 int vq_iotlb_prefetch(struct vhost_virtqueue *vq);
928
929 struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
930 diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
931 index b450adf65236..fb973cc0af66 100644
932 --- a/fs/btrfs/dev-replace.c
933 +++ b/fs/btrfs/dev-replace.c
934 @@ -350,6 +350,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
935 break;
936 case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED:
937 case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED:
938 + ASSERT(0);
939 ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED;
940 goto leave;
941 }
942 @@ -394,6 +395,10 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
943 if (IS_ERR(trans)) {
944 ret = PTR_ERR(trans);
945 btrfs_dev_replace_lock(dev_replace, 1);
946 + dev_replace->replace_state =
947 + BTRFS_IOCTL_DEV_REPLACE_STATE_NEVER_STARTED;
948 + dev_replace->srcdev = NULL;
949 + dev_replace->tgtdev = NULL;
950 goto leave;
951 }
952
953 @@ -415,8 +420,6 @@ int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name,
954 return ret;
955
956 leave:
957 - dev_replace->srcdev = NULL;
958 - dev_replace->tgtdev = NULL;
959 btrfs_dev_replace_unlock(dev_replace, 1);
960 btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
961 return ret;
962 @@ -784,6 +787,8 @@ int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info)
963 "cannot continue dev_replace, tgtdev is missing");
964 btrfs_info(fs_info,
965 "you may cancel the operation after 'mount -o degraded'");
966 + dev_replace->replace_state =
967 + BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED;
968 btrfs_dev_replace_unlock(dev_replace, 1);
969 return 0;
970 }
971 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
972 index 08c1c86c2ad9..2db7968febfe 100644
973 --- a/fs/cifs/smb2ops.c
974 +++ b/fs/cifs/smb2ops.c
975 @@ -148,14 +148,14 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size,
976
977 scredits = server->credits;
978 /* can deadlock with reopen */
979 - if (scredits == 1) {
980 + if (scredits <= 8) {
981 *num = SMB2_MAX_BUFFER_SIZE;
982 *credits = 0;
983 break;
984 }
985
986 - /* leave one credit for a possible reopen */
987 - scredits--;
988 + /* leave some credits for reopen and other ops */
989 + scredits -= 8;
990 *num = min_t(unsigned int, size,
991 scredits * SMB2_MAX_BUFFER_SIZE);
992
993 diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
994 index f4fe54047fb7..d87c48e4a9ba 100644
995 --- a/fs/f2fs/node.c
996 +++ b/fs/f2fs/node.c
997 @@ -656,6 +656,7 @@ static void truncate_node(struct dnode_of_data *dn)
998 {
999 struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
1000 struct node_info ni;
1001 + pgoff_t index;
1002
1003 get_node_info(sbi, dn->nid, &ni);
1004 if (dn->inode->i_blocks == 0) {
1005 @@ -678,10 +679,11 @@ invalidate:
1006 clear_node_page_dirty(dn->node_page);
1007 set_sbi_flag(sbi, SBI_IS_DIRTY);
1008
1009 + index = dn->node_page->index;
1010 f2fs_put_page(dn->node_page, 1);
1011
1012 invalidate_mapping_pages(NODE_MAPPING(sbi),
1013 - dn->node_page->index, dn->node_page->index);
1014 + index, index);
1015
1016 dn->node_page = NULL;
1017 trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
1018 diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
1019 index 21c88a7ac23b..697988be62df 100644
1020 --- a/include/linux/compiler-clang.h
1021 +++ b/include/linux/compiler-clang.h
1022 @@ -23,3 +23,17 @@
1023 #ifdef __noretpoline
1024 #undef __noretpoline
1025 #endif
1026 +
1027 +/*
1028 + * Not all versions of clang implement the the type-generic versions
1029 + * of the builtin overflow checkers. Fortunately, clang implements
1030 + * __has_builtin allowing us to avoid awkward version
1031 + * checks. Unfortunately, we don't know which version of gcc clang
1032 + * pretends to be, so the macro may or may not be defined.
1033 + */
1034 +#undef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
1035 +#if __has_builtin(__builtin_mul_overflow) && \
1036 + __has_builtin(__builtin_add_overflow) && \
1037 + __has_builtin(__builtin_sub_overflow)
1038 +#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
1039 +#endif
1040 diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
1041 index 8e82e3373eaf..8e9b0cb8db41 100644
1042 --- a/include/linux/compiler-gcc.h
1043 +++ b/include/linux/compiler-gcc.h
1044 @@ -334,3 +334,7 @@
1045 * code
1046 */
1047 #define uninitialized_var(x) x = x
1048 +
1049 +#if GCC_VERSION >= 50100
1050 +#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
1051 +#endif
1052 diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
1053 index d4c71132d07f..8c9897b1b953 100644
1054 --- a/include/linux/compiler-intel.h
1055 +++ b/include/linux/compiler-intel.h
1056 @@ -43,3 +43,7 @@
1057 #define __builtin_bswap16 _bswap16
1058 #endif
1059
1060 +/*
1061 + * icc defines __GNUC__, but does not implement the builtin overflow checkers.
1062 + */
1063 +#undef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
1064 diff --git a/include/linux/overflow.h b/include/linux/overflow.h
1065 new file mode 100644
1066 index 000000000000..c8890ec358a7
1067 --- /dev/null
1068 +++ b/include/linux/overflow.h
1069 @@ -0,0 +1,205 @@
1070 +/* SPDX-License-Identifier: GPL-2.0 OR MIT */
1071 +#ifndef __LINUX_OVERFLOW_H
1072 +#define __LINUX_OVERFLOW_H
1073 +
1074 +#include <linux/compiler.h>
1075 +
1076 +/*
1077 + * In the fallback code below, we need to compute the minimum and
1078 + * maximum values representable in a given type. These macros may also
1079 + * be useful elsewhere, so we provide them outside the
1080 + * COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW block.
1081 + *
1082 + * It would seem more obvious to do something like
1083 + *
1084 + * #define type_min(T) (T)(is_signed_type(T) ? (T)1 << (8*sizeof(T)-1) : 0)
1085 + * #define type_max(T) (T)(is_signed_type(T) ? ((T)1 << (8*sizeof(T)-1)) - 1 : ~(T)0)
1086 + *
1087 + * Unfortunately, the middle expressions, strictly speaking, have
1088 + * undefined behaviour, and at least some versions of gcc warn about
1089 + * the type_max expression (but not if -fsanitize=undefined is in
1090 + * effect; in that case, the warning is deferred to runtime...).
1091 + *
1092 + * The slightly excessive casting in type_min is to make sure the
1093 + * macros also produce sensible values for the exotic type _Bool. [The
1094 + * overflow checkers only almost work for _Bool, but that's
1095 + * a-feature-not-a-bug, since people shouldn't be doing arithmetic on
1096 + * _Bools. Besides, the gcc builtins don't allow _Bool* as third
1097 + * argument.]
1098 + *
1099 + * Idea stolen from
1100 + * https://mail-index.netbsd.org/tech-misc/2007/02/05/0000.html -
1101 + * credit to Christian Biere.
1102 + */
1103 +#define is_signed_type(type) (((type)(-1)) < (type)1)
1104 +#define __type_half_max(type) ((type)1 << (8*sizeof(type) - 1 - is_signed_type(type)))
1105 +#define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
1106 +#define type_min(T) ((T)((T)-type_max(T)-(T)1))
1107 +
1108 +
1109 +#ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
1110 +/*
1111 + * For simplicity and code hygiene, the fallback code below insists on
1112 + * a, b and *d having the same type (similar to the min() and max()
1113 + * macros), whereas gcc's type-generic overflow checkers accept
1114 + * different types. Hence we don't just make check_add_overflow an
1115 + * alias for __builtin_add_overflow, but add type checks similar to
1116 + * below.
1117 + */
1118 +#define check_add_overflow(a, b, d) ({ \
1119 + typeof(a) __a = (a); \
1120 + typeof(b) __b = (b); \
1121 + typeof(d) __d = (d); \
1122 + (void) (&__a == &__b); \
1123 + (void) (&__a == __d); \
1124 + __builtin_add_overflow(__a, __b, __d); \
1125 +})
1126 +
1127 +#define check_sub_overflow(a, b, d) ({ \
1128 + typeof(a) __a = (a); \
1129 + typeof(b) __b = (b); \
1130 + typeof(d) __d = (d); \
1131 + (void) (&__a == &__b); \
1132 + (void) (&__a == __d); \
1133 + __builtin_sub_overflow(__a, __b, __d); \
1134 +})
1135 +
1136 +#define check_mul_overflow(a, b, d) ({ \
1137 + typeof(a) __a = (a); \
1138 + typeof(b) __b = (b); \
1139 + typeof(d) __d = (d); \
1140 + (void) (&__a == &__b); \
1141 + (void) (&__a == __d); \
1142 + __builtin_mul_overflow(__a, __b, __d); \
1143 +})
1144 +
1145 +#else
1146 +
1147 +
1148 +/* Checking for unsigned overflow is relatively easy without causing UB. */
1149 +#define __unsigned_add_overflow(a, b, d) ({ \
1150 + typeof(a) __a = (a); \
1151 + typeof(b) __b = (b); \
1152 + typeof(d) __d = (d); \
1153 + (void) (&__a == &__b); \
1154 + (void) (&__a == __d); \
1155 + *__d = __a + __b; \
1156 + *__d < __a; \
1157 +})
1158 +#define __unsigned_sub_overflow(a, b, d) ({ \
1159 + typeof(a) __a = (a); \
1160 + typeof(b) __b = (b); \
1161 + typeof(d) __d = (d); \
1162 + (void) (&__a == &__b); \
1163 + (void) (&__a == __d); \
1164 + *__d = __a - __b; \
1165 + __a < __b; \
1166 +})
1167 +/*
1168 + * If one of a or b is a compile-time constant, this avoids a division.
1169 + */
1170 +#define __unsigned_mul_overflow(a, b, d) ({ \
1171 + typeof(a) __a = (a); \
1172 + typeof(b) __b = (b); \
1173 + typeof(d) __d = (d); \
1174 + (void) (&__a == &__b); \
1175 + (void) (&__a == __d); \
1176 + *__d = __a * __b; \
1177 + __builtin_constant_p(__b) ? \
1178 + __b > 0 && __a > type_max(typeof(__a)) / __b : \
1179 + __a > 0 && __b > type_max(typeof(__b)) / __a; \
1180 +})
1181 +
1182 +/*
1183 + * For signed types, detecting overflow is much harder, especially if
1184 + * we want to avoid UB. But the interface of these macros is such that
1185 + * we must provide a result in *d, and in fact we must produce the
1186 + * result promised by gcc's builtins, which is simply the possibly
1187 + * wrapped-around value. Fortunately, we can just formally do the
1188 + * operations in the widest relevant unsigned type (u64) and then
1189 + * truncate the result - gcc is smart enough to generate the same code
1190 + * with and without the (u64) casts.
1191 + */
1192 +
1193 +/*
1194 + * Adding two signed integers can overflow only if they have the same
1195 + * sign, and overflow has happened iff the result has the opposite
1196 + * sign.
1197 + */
1198 +#define __signed_add_overflow(a, b, d) ({ \
1199 + typeof(a) __a = (a); \
1200 + typeof(b) __b = (b); \
1201 + typeof(d) __d = (d); \
1202 + (void) (&__a == &__b); \
1203 + (void) (&__a == __d); \
1204 + *__d = (u64)__a + (u64)__b; \
1205 + (((~(__a ^ __b)) & (*__d ^ __a)) \
1206 + & type_min(typeof(__a))) != 0; \
1207 +})
1208 +
1209 +/*
1210 + * Subtraction is similar, except that overflow can now happen only
1211 + * when the signs are opposite. In this case, overflow has happened if
1212 + * the result has the opposite sign of a.
1213 + */
1214 +#define __signed_sub_overflow(a, b, d) ({ \
1215 + typeof(a) __a = (a); \
1216 + typeof(b) __b = (b); \
1217 + typeof(d) __d = (d); \
1218 + (void) (&__a == &__b); \
1219 + (void) (&__a == __d); \
1220 + *__d = (u64)__a - (u64)__b; \
1221 + ((((__a ^ __b)) & (*__d ^ __a)) \
1222 + & type_min(typeof(__a))) != 0; \
1223 +})
1224 +
1225 +/*
1226 + * Signed multiplication is rather hard. gcc always follows C99, so
1227 + * division is truncated towards 0. This means that we can write the
1228 + * overflow check like this:
1229 + *
1230 + * (a > 0 && (b > MAX/a || b < MIN/a)) ||
1231 + * (a < -1 && (b > MIN/a || b < MAX/a) ||
1232 + * (a == -1 && b == MIN)
1233 + *
1234 + * The redundant casts of -1 are to silence an annoying -Wtype-limits
1235 + * (included in -Wextra) warning: When the type is u8 or u16, the
1236 + * __b_c_e in check_mul_overflow obviously selects
1237 + * __unsigned_mul_overflow, but unfortunately gcc still parses this
1238 + * code and warns about the limited range of __b.
1239 + */
1240 +
1241 +#define __signed_mul_overflow(a, b, d) ({ \
1242 + typeof(a) __a = (a); \
1243 + typeof(b) __b = (b); \
1244 + typeof(d) __d = (d); \
1245 + typeof(a) __tmax = type_max(typeof(a)); \
1246 + typeof(a) __tmin = type_min(typeof(a)); \
1247 + (void) (&__a == &__b); \
1248 + (void) (&__a == __d); \
1249 + *__d = (u64)__a * (u64)__b; \
1250 + (__b > 0 && (__a > __tmax/__b || __a < __tmin/__b)) || \
1251 + (__b < (typeof(__b))-1 && (__a > __tmin/__b || __a < __tmax/__b)) || \
1252 + (__b == (typeof(__b))-1 && __a == __tmin); \
1253 +})
1254 +
1255 +
1256 +#define check_add_overflow(a, b, d) \
1257 + __builtin_choose_expr(is_signed_type(typeof(a)), \
1258 + __signed_add_overflow(a, b, d), \
1259 + __unsigned_add_overflow(a, b, d))
1260 +
1261 +#define check_sub_overflow(a, b, d) \
1262 + __builtin_choose_expr(is_signed_type(typeof(a)), \
1263 + __signed_sub_overflow(a, b, d), \
1264 + __unsigned_sub_overflow(a, b, d))
1265 +
1266 +#define check_mul_overflow(a, b, d) \
1267 + __builtin_choose_expr(is_signed_type(typeof(a)), \
1268 + __signed_mul_overflow(a, b, d), \
1269 + __unsigned_mul_overflow(a, b, d))
1270 +
1271 +
1272 +#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
1273 +
1274 +#endif /* __LINUX_OVERFLOW_H */
1275 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
1276 index e90fe6b83e00..ed329a39d621 100644
1277 --- a/include/linux/skbuff.h
1278 +++ b/include/linux/skbuff.h
1279 @@ -2962,6 +2962,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
1280 *
1281 * This is exactly the same as pskb_trim except that it ensures the
1282 * checksum of received packets are still valid after the operation.
1283 + * It can change skb pointers.
1284 */
1285
1286 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
1287 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
1288 index a6446d72c5d9..a85028e06b1e 100644
1289 --- a/include/net/ip_fib.h
1290 +++ b/include/net/ip_fib.h
1291 @@ -242,7 +242,7 @@ int fib_table_insert(struct net *, struct fib_table *, struct fib_config *);
1292 int fib_table_delete(struct net *, struct fib_table *, struct fib_config *);
1293 int fib_table_dump(struct fib_table *table, struct sk_buff *skb,
1294 struct netlink_callback *cb);
1295 -int fib_table_flush(struct net *net, struct fib_table *table);
1296 +int fib_table_flush(struct net *net, struct fib_table *table, bool flush_all);
1297 struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
1298 void fib_table_flush_external(struct fib_table *table);
1299 void fib_free_table(struct fib_table *tb);
1300 diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
1301 index 8498e3503605..5b675695c661 100644
1302 --- a/net/bridge/br_forward.c
1303 +++ b/net/bridge/br_forward.c
1304 @@ -35,10 +35,10 @@ static inline int should_deliver(const struct net_bridge_port *p,
1305
1306 int br_dev_queue_push_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
1307 {
1308 + skb_push(skb, ETH_HLEN);
1309 if (!is_skb_forwardable(skb->dev, skb))
1310 goto drop;
1311
1312 - skb_push(skb, ETH_HLEN);
1313 br_drop_fake_rtable(skb);
1314
1315 if (skb->ip_summed == CHECKSUM_PARTIAL &&
1316 @@ -96,12 +96,11 @@ static void __br_forward(const struct net_bridge_port *to,
1317 net = dev_net(indev);
1318 } else {
1319 if (unlikely(netpoll_tx_running(to->br->dev))) {
1320 - if (!is_skb_forwardable(skb->dev, skb)) {
1321 + skb_push(skb, ETH_HLEN);
1322 + if (!is_skb_forwardable(skb->dev, skb))
1323 kfree_skb(skb);
1324 - } else {
1325 - skb_push(skb, ETH_HLEN);
1326 + else
1327 br_netpoll_send_skb(to, skb);
1328 - }
1329 return;
1330 }
1331 br_hook = NF_BR_LOCAL_OUT;
1332 diff --git a/net/bridge/br_netfilter_ipv6.c b/net/bridge/br_netfilter_ipv6.c
1333 index 5989661c659f..a1b57cb07f1e 100644
1334 --- a/net/bridge/br_netfilter_ipv6.c
1335 +++ b/net/bridge/br_netfilter_ipv6.c
1336 @@ -131,6 +131,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb)
1337 IPSTATS_MIB_INDISCARDS);
1338 goto drop;
1339 }
1340 + hdr = ipv6_hdr(skb);
1341 }
1342 if (hdr->nexthdr == NEXTHDR_HOP && br_nf_check_hbh_len(skb))
1343 goto drop;
1344 diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
1345 index 4b3df6b0e3b9..d94aaf7c7685 100644
1346 --- a/net/bridge/netfilter/nft_reject_bridge.c
1347 +++ b/net/bridge/netfilter/nft_reject_bridge.c
1348 @@ -236,6 +236,7 @@ static bool reject6_br_csum_ok(struct sk_buff *skb, int hook)
1349 pskb_trim_rcsum(skb, ntohs(ip6h->payload_len) + sizeof(*ip6h)))
1350 return false;
1351
1352 + ip6h = ipv6_hdr(skb);
1353 thoff = ipv6_skip_exthdr(skb, ((u8*)(ip6h+1) - skb->data), &proto, &fo);
1354 if (thoff < 0 || thoff >= skb->len || (fo & htons(~0x7)) != 0)
1355 return false;
1356 diff --git a/net/can/bcm.c b/net/can/bcm.c
1357 index e4f694dfcf83..c99e7c75eeee 100644
1358 --- a/net/can/bcm.c
1359 +++ b/net/can/bcm.c
1360 @@ -67,6 +67,9 @@
1361 */
1362 #define MAX_NFRAMES 256
1363
1364 +/* limit timers to 400 days for sending/timeouts */
1365 +#define BCM_TIMER_SEC_MAX (400 * 24 * 60 * 60)
1366 +
1367 /* use of last_frames[index].flags */
1368 #define RX_RECV 0x40 /* received data for this element */
1369 #define RX_THR 0x80 /* element not been sent due to throttle feature */
1370 @@ -142,6 +145,22 @@ static inline ktime_t bcm_timeval_to_ktime(struct bcm_timeval tv)
1371 return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
1372 }
1373
1374 +/* check limitations for timeval provided by user */
1375 +static bool bcm_is_invalid_tv(struct bcm_msg_head *msg_head)
1376 +{
1377 + if ((msg_head->ival1.tv_sec < 0) ||
1378 + (msg_head->ival1.tv_sec > BCM_TIMER_SEC_MAX) ||
1379 + (msg_head->ival1.tv_usec < 0) ||
1380 + (msg_head->ival1.tv_usec >= USEC_PER_SEC) ||
1381 + (msg_head->ival2.tv_sec < 0) ||
1382 + (msg_head->ival2.tv_sec > BCM_TIMER_SEC_MAX) ||
1383 + (msg_head->ival2.tv_usec < 0) ||
1384 + (msg_head->ival2.tv_usec >= USEC_PER_SEC))
1385 + return true;
1386 +
1387 + return false;
1388 +}
1389 +
1390 #define CFSIZ(flags) ((flags & CAN_FD_FRAME) ? CANFD_MTU : CAN_MTU)
1391 #define OPSIZ sizeof(struct bcm_op)
1392 #define MHSIZ sizeof(struct bcm_msg_head)
1393 @@ -884,6 +903,10 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1394 if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES)
1395 return -EINVAL;
1396
1397 + /* check timeval limitations */
1398 + if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
1399 + return -EINVAL;
1400 +
1401 /* check the given can_id */
1402 op = bcm_find_op(&bo->tx_ops, msg_head, ifindex);
1403 if (op) {
1404 @@ -1063,6 +1086,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
1405 (!(msg_head->can_id & CAN_RTR_FLAG))))
1406 return -EINVAL;
1407
1408 + /* check timeval limitations */
1409 + if ((msg_head->flags & SETTIMER) && bcm_is_invalid_tv(msg_head))
1410 + return -EINVAL;
1411 +
1412 /* check the given can_id */
1413 op = bcm_find_op(&bo->rx_ops, msg_head, ifindex);
1414 if (op) {
1415 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
1416 index 9364c39d0555..cbe3fdba4a2c 100644
1417 --- a/net/ipv4/fib_frontend.c
1418 +++ b/net/ipv4/fib_frontend.c
1419 @@ -193,7 +193,7 @@ static void fib_flush(struct net *net)
1420 struct fib_table *tb;
1421
1422 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist)
1423 - flushed += fib_table_flush(net, tb);
1424 + flushed += fib_table_flush(net, tb, false);
1425 }
1426
1427 if (flushed)
1428 @@ -1277,7 +1277,7 @@ static void ip_fib_net_exit(struct net *net)
1429
1430 hlist_for_each_entry_safe(tb, tmp, head, tb_hlist) {
1431 hlist_del(&tb->tb_hlist);
1432 - fib_table_flush(net, tb);
1433 + fib_table_flush(net, tb, true);
1434 fib_free_table(tb);
1435 }
1436 }
1437 diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
1438 index ef40bb659a7a..36f0a8c581d0 100644
1439 --- a/net/ipv4/fib_trie.c
1440 +++ b/net/ipv4/fib_trie.c
1441 @@ -1826,7 +1826,7 @@ void fib_table_flush_external(struct fib_table *tb)
1442 }
1443
1444 /* Caller must hold RTNL. */
1445 -int fib_table_flush(struct net *net, struct fib_table *tb)
1446 +int fib_table_flush(struct net *net, struct fib_table *tb, bool flush_all)
1447 {
1448 struct trie *t = (struct trie *)tb->tb_data;
1449 struct key_vector *pn = t->kv;
1450 @@ -1874,7 +1874,17 @@ int fib_table_flush(struct net *net, struct fib_table *tb)
1451 hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
1452 struct fib_info *fi = fa->fa_info;
1453
1454 - if (!fi || !(fi->fib_flags & RTNH_F_DEAD)) {
1455 + if (!fi ||
1456 + (!(fi->fib_flags & RTNH_F_DEAD) &&
1457 + !fib_props[fa->fa_type].error)) {
1458 + slen = fa->fa_slen;
1459 + continue;
1460 + }
1461 +
1462 + /* Do not flush error routes if network namespace is
1463 + * not being dismantled
1464 + */
1465 + if (!flush_all && fib_props[fa->fa_type].error) {
1466 slen = fa->fa_slen;
1467 continue;
1468 }
1469 diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
1470 index 5a8c26c9872d..0fb49dedc9fb 100644
1471 --- a/net/ipv4/inet_fragment.c
1472 +++ b/net/ipv4/inet_fragment.c
1473 @@ -90,7 +90,7 @@ static void inet_frags_free_cb(void *ptr, void *arg)
1474
1475 void inet_frags_exit_net(struct netns_frags *nf)
1476 {
1477 - nf->low_thresh = 0; /* prevent creation of new frags */
1478 + nf->high_thresh = 0; /* prevent creation of new frags */
1479
1480 rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL);
1481 }
1482 diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
1483 index d6feabb03516..bcadca26523b 100644
1484 --- a/net/ipv4/ip_input.c
1485 +++ b/net/ipv4/ip_input.c
1486 @@ -475,6 +475,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
1487 goto drop;
1488 }
1489
1490 + iph = ip_hdr(skb);
1491 skb->transport_header = skb->network_header + iph->ihl*4;
1492
1493 /* Remove any debris in the socket control block */
1494 diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
1495 index 326945d9be5f..3bd4d5d0c346 100644
1496 --- a/net/openvswitch/flow_netlink.c
1497 +++ b/net/openvswitch/flow_netlink.c
1498 @@ -409,7 +409,7 @@ static int __parse_flow_nlattrs(const struct nlattr *attr,
1499 return -EINVAL;
1500 }
1501
1502 - if (!nz || !is_all_zero(nla_data(nla), expected_len)) {
1503 + if (!nz || !is_all_zero(nla_data(nla), nla_len(nla))) {
1504 attrs |= 1 << type;
1505 a[type] = nla;
1506 }
1507 diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
1508 index ea13df1be067..912ed9b901ac 100644
1509 --- a/net/sched/sch_api.c
1510 +++ b/net/sched/sch_api.c
1511 @@ -1850,7 +1850,6 @@ done:
1512 int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1513 struct tcf_result *res, bool compat_mode)
1514 {
1515 - __be16 protocol = tc_skb_protocol(skb);
1516 #ifdef CONFIG_NET_CLS_ACT
1517 const struct tcf_proto *old_tp = tp;
1518 int limit = 0;
1519 @@ -1858,6 +1857,7 @@ int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
1520 reclassify:
1521 #endif
1522 for (; tp; tp = rcu_dereference_bh(tp->next)) {
1523 + __be16 protocol = tc_skb_protocol(skb);
1524 int err;
1525
1526 if (tp->protocol != protocol &&
1527 @@ -1884,7 +1884,6 @@ reset:
1528 }
1529
1530 tp = old_tp;
1531 - protocol = tc_skb_protocol(skb);
1532 goto reclassify;
1533 #endif
1534 }
1535 diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c
1536 index 09103aab0cb2..7d410e39d1a0 100644
1537 --- a/sound/soc/codecs/rt5514-spi.c
1538 +++ b/sound/soc/codecs/rt5514-spi.c
1539 @@ -253,6 +253,8 @@ static int rt5514_spi_pcm_probe(struct snd_soc_platform *platform)
1540
1541 rt5514_dsp = devm_kzalloc(platform->dev, sizeof(*rt5514_dsp),
1542 GFP_KERNEL);
1543 + if (!rt5514_dsp)
1544 + return -ENOMEM;
1545
1546 rt5514_dsp->dev = &rt5514_spi->dev;
1547 mutex_init(&rt5514_dsp->dma_lock);
1548 diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
1549 index f5a8050351b5..e83e314a76a5 100644
1550 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
1551 +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
1552 @@ -399,7 +399,13 @@ static int sst_media_hw_params(struct snd_pcm_substream *substream,
1553 struct snd_pcm_hw_params *params,
1554 struct snd_soc_dai *dai)
1555 {
1556 - snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
1557 + int ret;
1558 +
1559 + ret =
1560 + snd_pcm_lib_malloc_pages(substream,
1561 + params_buffer_bytes(params));
1562 + if (ret)
1563 + return ret;
1564 memset(substream->runtime->dma_area, 0, params_buffer_bytes(params));
1565 return 0;
1566 }
1567 diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c
1568 index b46e1cf347e5..046a4850e3df 100644
1569 --- a/tools/perf/util/unwind-libdw.c
1570 +++ b/tools/perf/util/unwind-libdw.c
1571 @@ -42,13 +42,13 @@ static int __report_module(struct addr_location *al, u64 ip,
1572 Dwarf_Addr s;
1573
1574 dwfl_module_info(mod, NULL, &s, NULL, NULL, NULL, NULL, NULL);
1575 - if (s != al->map->start)
1576 + if (s != al->map->start - al->map->pgoff)
1577 mod = 0;
1578 }
1579
1580 if (!mod)
1581 mod = dwfl_report_elf(ui->dwfl, dso->short_name,
1582 - dso->long_name, -1, al->map->start,
1583 + (dso->symsrc_filename ? dso->symsrc_filename : dso->long_name), -1, al->map->start - al->map->pgoff,
1584 false);
1585
1586 return mod && dwfl_addrmodule(ui->dwfl, ip) == mod ? 0 : -1;
1587 diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
1588 index 85a78eba0a93..874972ccfc95 100644
1589 --- a/tools/testing/selftests/x86/protection_keys.c
1590 +++ b/tools/testing/selftests/x86/protection_keys.c
1591 @@ -1129,6 +1129,21 @@ void test_pkey_syscalls_bad_args(int *ptr, u16 pkey)
1592 pkey_assert(err);
1593 }
1594
1595 +void become_child(void)
1596 +{
1597 + pid_t forkret;
1598 +
1599 + forkret = fork();
1600 + pkey_assert(forkret >= 0);
1601 + dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
1602 +
1603 + if (!forkret) {
1604 + /* in the child */
1605 + return;
1606 + }
1607 + exit(0);
1608 +}
1609 +
1610 /* Assumes that all pkeys other than 'pkey' are unallocated */
1611 void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
1612 {
1613 @@ -1139,7 +1154,7 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
1614 int nr_allocated_pkeys = 0;
1615 int i;
1616
1617 - for (i = 0; i < NR_PKEYS*2; i++) {
1618 + for (i = 0; i < NR_PKEYS*3; i++) {
1619 int new_pkey;
1620 dprintf1("%s() alloc loop: %d\n", __func__, i);
1621 new_pkey = alloc_pkey();
1622 @@ -1150,20 +1165,26 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
1623 if ((new_pkey == -1) && (errno == ENOSPC)) {
1624 dprintf2("%s() failed to allocate pkey after %d tries\n",
1625 __func__, nr_allocated_pkeys);
1626 - break;
1627 + } else {
1628 + /*
1629 + * Ensure the number of successes never
1630 + * exceeds the number of keys supported
1631 + * in the hardware.
1632 + */
1633 + pkey_assert(nr_allocated_pkeys < NR_PKEYS);
1634 + allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
1635 }
1636 - pkey_assert(nr_allocated_pkeys < NR_PKEYS);
1637 - allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
1638 +
1639 + /*
1640 + * Make sure that allocation state is properly
1641 + * preserved across fork().
1642 + */
1643 + if (i == NR_PKEYS*2)
1644 + become_child();
1645 }
1646
1647 dprintf3("%s()::%d\n", __func__, __LINE__);
1648
1649 - /*
1650 - * ensure it did not reach the end of the loop without
1651 - * failure:
1652 - */
1653 - pkey_assert(i < NR_PKEYS*2);
1654 -
1655 /*
1656 * There are 16 pkeys supported in hardware. One is taken
1657 * up for the default (0) and another can be taken up by