Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.16/0102-4.16.3-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3102 - (show annotations) (download)
Mon Apr 23 10:15:11 2018 UTC (5 years, 11 months ago) by niro
File size: 106603 byte(s)
-added patches up to linux-4.16.3
1 diff --git a/Makefile b/Makefile
2 index f0040b05df30..38df392e45e4 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 16
9 -SUBLEVEL = 2
10 +SUBLEVEL = 3
11 EXTRAVERSION =
12 NAME = Fearless Coyote
13
14 diff --git a/arch/arm/boot/compressed/misc.c b/arch/arm/boot/compressed/misc.c
15 index 16a8a804e958..e8fe51f4e97a 100644
16 --- a/arch/arm/boot/compressed/misc.c
17 +++ b/arch/arm/boot/compressed/misc.c
18 @@ -128,12 +128,7 @@ asmlinkage void __div0(void)
19 error("Attempting division by 0!");
20 }
21
22 -unsigned long __stack_chk_guard;
23 -
24 -void __stack_chk_guard_setup(void)
25 -{
26 - __stack_chk_guard = 0x000a0dff;
27 -}
28 +const unsigned long __stack_chk_guard = 0x000a0dff;
29
30 void __stack_chk_fail(void)
31 {
32 @@ -150,8 +145,6 @@ decompress_kernel(unsigned long output_start, unsigned long free_mem_ptr_p,
33 {
34 int ret;
35
36 - __stack_chk_guard_setup();
37 -
38 output_data = (unsigned char *)output_start;
39 free_mem_ptr = free_mem_ptr_p;
40 free_mem_end_ptr = free_mem_ptr_end_p;
41 diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
42 index fdf99e9dd4c3..81df9047e110 100644
43 --- a/arch/mips/boot/compressed/decompress.c
44 +++ b/arch/mips/boot/compressed/decompress.c
45 @@ -76,12 +76,7 @@ void error(char *x)
46 #include "../../../../lib/decompress_unxz.c"
47 #endif
48
49 -unsigned long __stack_chk_guard;
50 -
51 -void __stack_chk_guard_setup(void)
52 -{
53 - __stack_chk_guard = 0x000a0dff;
54 -}
55 +const unsigned long __stack_chk_guard = 0x000a0dff;
56
57 void __stack_chk_fail(void)
58 {
59 @@ -92,8 +87,6 @@ void decompress_kernel(unsigned long boot_heap_start)
60 {
61 unsigned long zimage_start, zimage_size;
62
63 - __stack_chk_guard_setup();
64 -
65 zimage_start = (unsigned long)(&__image_begin);
66 zimage_size = (unsigned long)(&__image_end) -
67 (unsigned long)(&__image_begin);
68 diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
69 index 29b99b8964aa..d4240aa7f8b1 100644
70 --- a/arch/parisc/kernel/drivers.c
71 +++ b/arch/parisc/kernel/drivers.c
72 @@ -651,6 +651,10 @@ static int match_pci_device(struct device *dev, int index,
73 (modpath->mod == PCI_FUNC(devfn)));
74 }
75
76 + /* index might be out of bounds for bc[] */
77 + if (index >= 6)
78 + return 0;
79 +
80 id = PCI_SLOT(pdev->devfn) | (PCI_FUNC(pdev->devfn) << 5);
81 return (modpath->bc[index] == id);
82 }
83 diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S
84 index 8d072c44f300..781c3b9a3e46 100644
85 --- a/arch/parisc/kernel/hpmc.S
86 +++ b/arch/parisc/kernel/hpmc.S
87 @@ -84,6 +84,7 @@ END(hpmc_pim_data)
88 .text
89
90 .import intr_save, code
91 + .align 16
92 ENTRY_CFI(os_hpmc)
93 .os_hpmc:
94
95 @@ -300,12 +301,15 @@ os_hpmc_6:
96
97 b .
98 nop
99 + .align 16 /* make function length multiple of 16 bytes */
100 ENDPROC_CFI(os_hpmc)
101 .os_hpmc_end:
102
103
104 __INITRODATA
105 +.globl os_hpmc_size
106 .align 4
107 - .export os_hpmc_size
108 + .type os_hpmc_size, @object
109 + .size os_hpmc_size, 4
110 os_hpmc_size:
111 .word .os_hpmc_end-.os_hpmc
112 diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
113 index e1c083fbe434..78e6a392330f 100644
114 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
115 +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
116 @@ -470,8 +470,6 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
117 for (i = 0; i < npages; ++i) {
118 asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
119 "r" (rbvalues[i]), "r" (kvm->arch.lpid));
120 - trace_tlbie(kvm->arch.lpid, 0, rbvalues[i],
121 - kvm->arch.lpid, 0, 0, 0);
122 }
123
124 if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
125 @@ -492,8 +490,6 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
126 for (i = 0; i < npages; ++i) {
127 asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : :
128 "r" (rbvalues[i]), "r" (0));
129 - trace_tlbie(kvm->arch.lpid, 1, rbvalues[i],
130 - 0, 0, 0, 0);
131 }
132 asm volatile("ptesync" : : : "memory");
133 }
134 diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
135 index 18c1eeb847b2..6f2a193ccccc 100644
136 --- a/arch/s390/kernel/compat_signal.c
137 +++ b/arch/s390/kernel/compat_signal.c
138 @@ -279,7 +279,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
139 if (put_compat_sigset((compat_sigset_t __user *)frame->sc.oldmask,
140 set, sizeof(compat_sigset_t)))
141 return -EFAULT;
142 - if (__put_user(ptr_to_compat(&frame->sc), &frame->sc.sregs))
143 + if (__put_user(ptr_to_compat(&frame->sregs), &frame->sc.sregs))
144 return -EFAULT;
145
146 /* Store registers needed to create the signal frame */
147 diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
148 index 34477c1aee6d..502c90525a0e 100644
149 --- a/arch/s390/kernel/ipl.c
150 +++ b/arch/s390/kernel/ipl.c
151 @@ -776,6 +776,7 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb,
152 /* copy and convert to ebcdic */
153 memcpy(ipb->hdr.loadparm, buf, lp_len);
154 ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN);
155 + ipb->hdr.flags |= DIAG308_FLAGS_LP_VALID;
156 return len;
157 }
158
159 diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
160 index 627ce8e75e01..c15cac9251b9 100644
161 --- a/arch/sh/boot/compressed/misc.c
162 +++ b/arch/sh/boot/compressed/misc.c
163 @@ -104,12 +104,7 @@ static void error(char *x)
164 while(1); /* Halt */
165 }
166
167 -unsigned long __stack_chk_guard;
168 -
169 -void __stack_chk_guard_setup(void)
170 -{
171 - __stack_chk_guard = 0x000a0dff;
172 -}
173 +const unsigned long __stack_chk_guard = 0x000a0dff;
174
175 void __stack_chk_fail(void)
176 {
177 @@ -130,8 +125,6 @@ void decompress_kernel(void)
178 {
179 unsigned long output_addr;
180
181 - __stack_chk_guard_setup();
182 -
183 #ifdef CONFIG_SUPERH64
184 output_addr = (CONFIG_MEMORY_START + 0x2000);
185 #else
186 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
187 index 98722773391d..f01eef8b392e 100644
188 --- a/arch/x86/include/asm/apic.h
189 +++ b/arch/x86/include/asm/apic.h
190 @@ -319,7 +319,7 @@ struct apic {
191 /* Probe, setup and smpboot functions */
192 int (*probe)(void);
193 int (*acpi_madt_oem_check)(char *oem_id, char *oem_table_id);
194 - int (*apic_id_valid)(int apicid);
195 + int (*apic_id_valid)(u32 apicid);
196 int (*apic_id_registered)(void);
197
198 bool (*check_apicid_used)(physid_mask_t *map, int apicid);
199 @@ -492,7 +492,7 @@ static inline unsigned int read_apic_id(void)
200 return apic->get_apic_id(reg);
201 }
202
203 -extern int default_apic_id_valid(int apicid);
204 +extern int default_apic_id_valid(u32 apicid);
205 extern int default_acpi_madt_oem_check(char *, char *);
206 extern void default_setup_apic_routing(void);
207
208 diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h
209 index 96ea4b5ba658..340070415c2c 100644
210 --- a/arch/x86/include/asm/mce.h
211 +++ b/arch/x86/include/asm/mce.h
212 @@ -346,6 +346,7 @@ enum smca_bank_types {
213 SMCA_IF, /* Instruction Fetch */
214 SMCA_L2_CACHE, /* L2 Cache */
215 SMCA_DE, /* Decoder Unit */
216 + SMCA_RESERVED, /* Reserved */
217 SMCA_EX, /* Execution Unit */
218 SMCA_FP, /* Floating Point */
219 SMCA_L3_CACHE, /* L3 Cache */
220 diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h
221 index aebf60357758..a06cbf019744 100644
222 --- a/arch/x86/include/uapi/asm/bootparam.h
223 +++ b/arch/x86/include/uapi/asm/bootparam.h
224 @@ -137,15 +137,15 @@ struct boot_e820_entry {
225 * setup data structure.
226 */
227 struct jailhouse_setup_data {
228 - u16 version;
229 - u16 compatible_version;
230 - u16 pm_timer_address;
231 - u16 num_cpus;
232 - u64 pci_mmconfig_base;
233 - u32 tsc_khz;
234 - u32 apic_khz;
235 - u8 standard_ioapic;
236 - u8 cpu_ids[255];
237 + __u16 version;
238 + __u16 compatible_version;
239 + __u16 pm_timer_address;
240 + __u16 num_cpus;
241 + __u64 pci_mmconfig_base;
242 + __u32 tsc_khz;
243 + __u32 apic_khz;
244 + __u8 standard_ioapic;
245 + __u8 cpu_ids[255];
246 } __attribute__((packed));
247
248 /* The so-called "zeropage" */
249 diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
250 index 2aa92094b59d..5ee33a6e33bb 100644
251 --- a/arch/x86/kernel/acpi/boot.c
252 +++ b/arch/x86/kernel/acpi/boot.c
253 @@ -200,7 +200,7 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
254 {
255 struct acpi_madt_local_x2apic *processor = NULL;
256 #ifdef CONFIG_X86_X2APIC
257 - int apic_id;
258 + u32 apic_id;
259 u8 enabled;
260 #endif
261
262 @@ -222,10 +222,13 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
263 * to not preallocating memory for all NR_CPUS
264 * when we use CPU hotplug.
265 */
266 - if (!apic->apic_id_valid(apic_id) && enabled)
267 - printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
268 - else
269 - acpi_register_lapic(apic_id, processor->uid, enabled);
270 + if (!apic->apic_id_valid(apic_id)) {
271 + if (enabled)
272 + pr_warn(PREFIX "x2apic entry ignored\n");
273 + return 0;
274 + }
275 +
276 + acpi_register_lapic(apic_id, processor->uid, enabled);
277 #else
278 printk(KERN_WARNING PREFIX "x2apic entry ignored\n");
279 #endif
280 diff --git a/arch/x86/kernel/apic/apic_common.c b/arch/x86/kernel/apic/apic_common.c
281 index a360801779ae..02b4839478b1 100644
282 --- a/arch/x86/kernel/apic/apic_common.c
283 +++ b/arch/x86/kernel/apic/apic_common.c
284 @@ -40,7 +40,7 @@ int default_check_phys_apicid_present(int phys_apicid)
285 return physid_isset(phys_apicid, phys_cpu_present_map);
286 }
287
288 -int default_apic_id_valid(int apicid)
289 +int default_apic_id_valid(u32 apicid)
290 {
291 return (apicid < 255);
292 }
293 diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
294 index 134e04506ab4..78778b54f904 100644
295 --- a/arch/x86/kernel/apic/apic_numachip.c
296 +++ b/arch/x86/kernel/apic/apic_numachip.c
297 @@ -56,7 +56,7 @@ static u32 numachip2_set_apic_id(unsigned int id)
298 return id << 24;
299 }
300
301 -static int numachip_apic_id_valid(int apicid)
302 +static int numachip_apic_id_valid(u32 apicid)
303 {
304 /* Trust what bootloader passes in MADT */
305 return 1;
306 diff --git a/arch/x86/kernel/apic/x2apic.h b/arch/x86/kernel/apic/x2apic.h
307 index b107de381cb5..a49b3604027f 100644
308 --- a/arch/x86/kernel/apic/x2apic.h
309 +++ b/arch/x86/kernel/apic/x2apic.h
310 @@ -1,6 +1,6 @@
311 /* Common bits for X2APIC cluster/physical modes. */
312
313 -int x2apic_apic_id_valid(int apicid);
314 +int x2apic_apic_id_valid(u32 apicid);
315 int x2apic_apic_id_registered(void);
316 void __x2apic_send_IPI_dest(unsigned int apicid, int vector, unsigned int dest);
317 unsigned int x2apic_get_apic_id(unsigned long id);
318 diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
319 index f8d9d69994e6..e972405eb2b5 100644
320 --- a/arch/x86/kernel/apic/x2apic_phys.c
321 +++ b/arch/x86/kernel/apic/x2apic_phys.c
322 @@ -101,7 +101,7 @@ static int x2apic_phys_probe(void)
323 }
324
325 /* Common x2apic functions, also used by x2apic_cluster */
326 -int x2apic_apic_id_valid(int apicid)
327 +int x2apic_apic_id_valid(u32 apicid)
328 {
329 return 1;
330 }
331 diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
332 index f11910b44638..efaf2d4f9c3c 100644
333 --- a/arch/x86/kernel/apic/x2apic_uv_x.c
334 +++ b/arch/x86/kernel/apic/x2apic_uv_x.c
335 @@ -557,7 +557,7 @@ static void uv_send_IPI_all(int vector)
336 uv_send_IPI_mask(cpu_online_mask, vector);
337 }
338
339 -static int uv_apic_id_valid(int apicid)
340 +static int uv_apic_id_valid(u32 apicid)
341 {
342 return 1;
343 }
344 diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
345 index 0f32ad242324..12bc2863a4d6 100644
346 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
347 +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
348 @@ -82,6 +82,7 @@ static struct smca_bank_name smca_names[] = {
349 [SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" },
350 [SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" },
351 [SMCA_DE] = { "decode_unit", "Decode Unit" },
352 + [SMCA_RESERVED] = { "reserved", "Reserved" },
353 [SMCA_EX] = { "execution_unit", "Execution Unit" },
354 [SMCA_FP] = { "floating_point", "Floating Point Unit" },
355 [SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" },
356 @@ -110,14 +111,14 @@ const char *smca_get_long_name(enum smca_bank_types t)
357 }
358 EXPORT_SYMBOL_GPL(smca_get_long_name);
359
360 -static enum smca_bank_types smca_get_bank_type(struct mce *m)
361 +static enum smca_bank_types smca_get_bank_type(unsigned int bank)
362 {
363 struct smca_bank *b;
364
365 - if (m->bank >= N_SMCA_BANK_TYPES)
366 + if (bank >= MAX_NR_BANKS)
367 return N_SMCA_BANK_TYPES;
368
369 - b = &smca_banks[m->bank];
370 + b = &smca_banks[bank];
371 if (!b->hwid)
372 return N_SMCA_BANK_TYPES;
373
374 @@ -127,6 +128,9 @@ static enum smca_bank_types smca_get_bank_type(struct mce *m)
375 static struct smca_hwid smca_hwid_mcatypes[] = {
376 /* { bank_type, hwid_mcatype, xec_bitmap } */
377
378 + /* Reserved type */
379 + { SMCA_RESERVED, HWID_MCATYPE(0x00, 0x0), 0x0 },
380 +
381 /* ZN Core (HWID=0xB0) MCA types */
382 { SMCA_LS, HWID_MCATYPE(0xB0, 0x0), 0x1FFFEF },
383 { SMCA_IF, HWID_MCATYPE(0xB0, 0x1), 0x3FFF },
384 @@ -432,7 +436,25 @@ static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 hi
385 {
386 u32 addr = 0, offset = 0;
387
388 + if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
389 + return addr;
390 +
391 + /* Get address from already initialized block. */
392 + if (per_cpu(threshold_banks, cpu)) {
393 + struct threshold_bank *bankp = per_cpu(threshold_banks, cpu)[bank];
394 +
395 + if (bankp && bankp->blocks) {
396 + struct threshold_block *blockp = &bankp->blocks[block];
397 +
398 + if (blockp)
399 + return blockp->address;
400 + }
401 + }
402 +
403 if (mce_flags.smca) {
404 + if (smca_get_bank_type(bank) == SMCA_RESERVED)
405 + return addr;
406 +
407 if (!block) {
408 addr = MSR_AMD64_SMCA_MCx_MISC(bank);
409 } else {
410 @@ -760,7 +782,7 @@ bool amd_mce_is_memory_error(struct mce *m)
411 u8 xec = (m->status >> 16) & 0x1f;
412
413 if (mce_flags.smca)
414 - return smca_get_bank_type(m) == SMCA_UMC && xec == 0x0;
415 + return smca_get_bank_type(m->bank) == SMCA_UMC && xec == 0x0;
416
417 return m->bank == 4 && xec == 0x8;
418 }
419 @@ -1063,7 +1085,7 @@ static struct kobj_type threshold_ktype = {
420
421 static const char *get_name(unsigned int bank, struct threshold_block *b)
422 {
423 - unsigned int bank_type;
424 + enum smca_bank_types bank_type;
425
426 if (!mce_flags.smca) {
427 if (b && bank == 4)
428 @@ -1072,11 +1094,10 @@ static const char *get_name(unsigned int bank, struct threshold_block *b)
429 return th_names[bank];
430 }
431
432 - if (!smca_banks[bank].hwid)
433 + bank_type = smca_get_bank_type(bank);
434 + if (bank_type >= N_SMCA_BANK_TYPES)
435 return NULL;
436
437 - bank_type = smca_banks[bank].hwid->bank_type;
438 -
439 if (b && bank_type == SMCA_UMC) {
440 if (b->block < ARRAY_SIZE(smca_umc_block_names))
441 return smca_umc_block_names[b->block];
442 diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c
443 index de58533d3664..2fa79e2e73ea 100644
444 --- a/arch/x86/xen/apic.c
445 +++ b/arch/x86/xen/apic.c
446 @@ -112,7 +112,7 @@ static int xen_madt_oem_check(char *oem_id, char *oem_table_id)
447 return xen_pv_domain();
448 }
449
450 -static int xen_id_always_valid(int apicid)
451 +static int xen_id_always_valid(u32 apicid)
452 {
453 return 1;
454 }
455 diff --git a/block/blk-core.c b/block/blk-core.c
456 index 6d82c4f7fadd..3b489527c8f2 100644
457 --- a/block/blk-core.c
458 +++ b/block/blk-core.c
459 @@ -827,7 +827,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
460 bool success = false;
461 int ret;
462
463 - rcu_read_lock_sched();
464 + rcu_read_lock();
465 if (percpu_ref_tryget_live(&q->q_usage_counter)) {
466 /*
467 * The code that sets the PREEMPT_ONLY flag is
468 @@ -840,7 +840,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
469 percpu_ref_put(&q->q_usage_counter);
470 }
471 }
472 - rcu_read_unlock_sched();
473 + rcu_read_unlock();
474
475 if (success)
476 return 0;
477 diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
478 index 9f8cffc8a701..3eb169f15842 100644
479 --- a/block/blk-mq-cpumap.c
480 +++ b/block/blk-mq-cpumap.c
481 @@ -16,11 +16,6 @@
482
483 static int cpu_to_queue_index(unsigned int nr_queues, const int cpu)
484 {
485 - /*
486 - * Non present CPU will be mapped to queue index 0.
487 - */
488 - if (!cpu_present(cpu))
489 - return 0;
490 return cpu % nr_queues;
491 }
492
493 diff --git a/block/blk-mq.c b/block/blk-mq.c
494 index 16e83e6df404..56e0c3699f9e 100644
495 --- a/block/blk-mq.c
496 +++ b/block/blk-mq.c
497 @@ -1188,7 +1188,12 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
498 struct blk_mq_queue_data bd;
499
500 rq = list_first_entry(list, struct request, queuelist);
501 - if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
502 +
503 + hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu);
504 + if (!got_budget && !blk_mq_get_dispatch_budget(hctx))
505 + break;
506 +
507 + if (!blk_mq_get_driver_tag(rq, NULL, false)) {
508 /*
509 * The initial allocation attempt failed, so we need to
510 * rerun the hardware queue when a tag is freed. The
511 @@ -1197,8 +1202,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
512 * we'll re-run it below.
513 */
514 if (!blk_mq_mark_tag_wait(&hctx, rq)) {
515 - if (got_budget)
516 - blk_mq_put_dispatch_budget(hctx);
517 + blk_mq_put_dispatch_budget(hctx);
518 /*
519 * For non-shared tags, the RESTART check
520 * will suffice.
521 @@ -1209,11 +1213,6 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
522 }
523 }
524
525 - if (!got_budget && !blk_mq_get_dispatch_budget(hctx)) {
526 - blk_mq_put_driver_tag(rq);
527 - break;
528 - }
529 -
530 list_del_init(&rq->queuelist);
531
532 bd.rq = rq;
533 @@ -1812,11 +1811,11 @@ static blk_status_t __blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
534 if (q->elevator && !bypass_insert)
535 goto insert;
536
537 - if (!blk_mq_get_driver_tag(rq, NULL, false))
538 + if (!blk_mq_get_dispatch_budget(hctx))
539 goto insert;
540
541 - if (!blk_mq_get_dispatch_budget(hctx)) {
542 - blk_mq_put_driver_tag(rq);
543 + if (!blk_mq_get_driver_tag(rq, NULL, false)) {
544 + blk_mq_put_dispatch_budget(hctx);
545 goto insert;
546 }
547
548 @@ -2440,6 +2439,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
549 */
550 hctx->next_cpu = cpumask_first_and(hctx->cpumask,
551 cpu_online_mask);
552 + if (hctx->next_cpu >= nr_cpu_ids)
553 + hctx->next_cpu = cpumask_first(hctx->cpumask);
554 hctx->next_cpu_batch = BLK_MQ_CPU_WORK_BATCH;
555 }
556 }
557 diff --git a/block/blk-timeout.c b/block/blk-timeout.c
558 index a05e3676d24a..f0e6e412891f 100644
559 --- a/block/blk-timeout.c
560 +++ b/block/blk-timeout.c
561 @@ -165,7 +165,7 @@ void blk_abort_request(struct request *req)
562 * No need for fancy synchronizations.
563 */
564 blk_rq_set_deadline(req, jiffies);
565 - mod_timer(&req->q->timeout, 0);
566 + kblockd_schedule_work(&req->q->timeout_work);
567 } else {
568 if (blk_mark_rq_complete(req))
569 return;
570 diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
571 index eb09ef55c38a..9f8f39d49396 100644
572 --- a/drivers/acpi/nfit/core.c
573 +++ b/drivers/acpi/nfit/core.c
574 @@ -3024,15 +3024,21 @@ static void acpi_nfit_scrub(struct work_struct *work)
575 static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
576 {
577 struct nfit_spa *nfit_spa;
578 - int rc;
579
580 - list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
581 - if (nfit_spa_type(nfit_spa->spa) == NFIT_SPA_DCR) {
582 - /* BLK regions don't need to wait for ars results */
583 - rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
584 - if (rc)
585 - return rc;
586 - }
587 + list_for_each_entry(nfit_spa, &acpi_desc->spas, list) {
588 + int rc, type = nfit_spa_type(nfit_spa->spa);
589 +
590 + /* PMEM and VMEM will be registered by the ARS workqueue */
591 + if (type == NFIT_SPA_PM || type == NFIT_SPA_VOLATILE)
592 + continue;
593 + /* BLK apertures belong to BLK region registration below */
594 + if (type == NFIT_SPA_BDW)
595 + continue;
596 + /* BLK regions don't need to wait for ARS results */
597 + rc = acpi_nfit_register_region(acpi_desc, nfit_spa);
598 + if (rc)
599 + return rc;
600 + }
601
602 acpi_desc->ars_start_flags = 0;
603 if (!acpi_desc->cancel)
604 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
605 index ee62d2d517bf..fe92cb972dd1 100644
606 --- a/drivers/block/loop.c
607 +++ b/drivers/block/loop.c
608 @@ -1103,11 +1103,15 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
609 if (info->lo_encrypt_type) {
610 unsigned int type = info->lo_encrypt_type;
611
612 - if (type >= MAX_LO_CRYPT)
613 - return -EINVAL;
614 + if (type >= MAX_LO_CRYPT) {
615 + err = -EINVAL;
616 + goto exit;
617 + }
618 xfer = xfer_funcs[type];
619 - if (xfer == NULL)
620 - return -EINVAL;
621 + if (xfer == NULL) {
622 + err = -EINVAL;
623 + goto exit;
624 + }
625 } else
626 xfer = NULL;
627
628 diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
629 index 47a4127a6067..1a81f6b8c2ce 100644
630 --- a/drivers/bluetooth/hci_bcm.c
631 +++ b/drivers/bluetooth/hci_bcm.c
632 @@ -795,22 +795,6 @@ static const struct acpi_gpio_mapping acpi_bcm_int_first_gpios[] = {
633 #ifdef CONFIG_ACPI
634 /* IRQ polarity of some chipsets are not defined correctly in ACPI table. */
635 static const struct dmi_system_id bcm_active_low_irq_dmi_table[] = {
636 - {
637 - .ident = "Asus T100TA",
638 - .matches = {
639 - DMI_EXACT_MATCH(DMI_SYS_VENDOR,
640 - "ASUSTeK COMPUTER INC."),
641 - DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"),
642 - },
643 - },
644 - {
645 - .ident = "Asus T100CHI",
646 - .matches = {
647 - DMI_EXACT_MATCH(DMI_SYS_VENDOR,
648 - "ASUSTeK COMPUTER INC."),
649 - DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100CHI"),
650 - },
651 - },
652 { /* Handle ThinkPad 8 tablets with BCM2E55 chipset ACPI ID */
653 .ident = "Lenovo ThinkPad 8",
654 .matches = {
655 @@ -838,7 +822,9 @@ static int bcm_resource(struct acpi_resource *ares, void *data)
656 switch (ares->type) {
657 case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
658 irq = &ares->data.extended_irq;
659 - dev->irq_active_low = irq->polarity == ACPI_ACTIVE_LOW;
660 + if (irq->polarity != ACPI_ACTIVE_LOW)
661 + dev_info(dev->dev, "ACPI Interrupt resource is active-high, this is usually wrong, treating the IRQ as active-low\n");
662 + dev->irq_active_low = true;
663 break;
664
665 case ACPI_RESOURCE_TYPE_GPIO:
666 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
667 index 6768cb2dd740..f5b2d69316a1 100644
668 --- a/drivers/char/ipmi/ipmi_si_intf.c
669 +++ b/drivers/char/ipmi/ipmi_si_intf.c
670 @@ -252,6 +252,9 @@ struct smi_info {
671 /* Default driver model device. */
672 struct platform_device *pdev;
673
674 + /* Have we added the device group to the device? */
675 + bool dev_group_added;
676 +
677 /* Counters and things for the proc filesystem. */
678 atomic_t stats[SI_NUM_STATS];
679
680 @@ -2027,8 +2030,8 @@ int ipmi_si_add_smi(struct si_sm_io *io)
681 if (initialized) {
682 rv = try_smi_init(new_smi);
683 if (rv) {
684 - mutex_unlock(&smi_infos_lock);
685 cleanup_one_si(new_smi);
686 + mutex_unlock(&smi_infos_lock);
687 return rv;
688 }
689 }
690 @@ -2187,6 +2190,7 @@ static int try_smi_init(struct smi_info *new_smi)
691 rv);
692 goto out_err_stop_timer;
693 }
694 + new_smi->dev_group_added = true;
695
696 rv = ipmi_register_smi(&handlers,
697 new_smi,
698 @@ -2240,7 +2244,10 @@ static int try_smi_init(struct smi_info *new_smi)
699 return 0;
700
701 out_err_remove_attrs:
702 - device_remove_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
703 + if (new_smi->dev_group_added) {
704 + device_remove_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
705 + new_smi->dev_group_added = false;
706 + }
707 dev_set_drvdata(new_smi->io.dev, NULL);
708
709 out_err_stop_timer:
710 @@ -2288,6 +2295,7 @@ static int try_smi_init(struct smi_info *new_smi)
711 else
712 platform_device_put(new_smi->pdev);
713 new_smi->pdev = NULL;
714 + new_smi->io.dev = NULL;
715 }
716
717 kfree(init_name);
718 @@ -2384,8 +2392,10 @@ static void cleanup_one_si(struct smi_info *to_clean)
719 }
720 }
721
722 - device_remove_group(to_clean->io.dev, &ipmi_si_dev_attr_group);
723 - dev_set_drvdata(to_clean->io.dev, NULL);
724 + if (to_clean->dev_group_added)
725 + device_remove_group(to_clean->io.dev, &ipmi_si_dev_attr_group);
726 + if (to_clean->io.dev)
727 + dev_set_drvdata(to_clean->io.dev, NULL);
728
729 list_del(&to_clean->link);
730
731 diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
732 index a11a671c7a38..2ab4d61ee47e 100644
733 --- a/drivers/edac/mce_amd.c
734 +++ b/drivers/edac/mce_amd.c
735 @@ -854,21 +854,24 @@ static void decode_mc6_mce(struct mce *m)
736 static void decode_smca_error(struct mce *m)
737 {
738 struct smca_hwid *hwid;
739 - unsigned int bank_type;
740 + enum smca_bank_types bank_type;
741 const char *ip_name;
742 u8 xec = XEC(m->status, xec_mask);
743
744 if (m->bank >= ARRAY_SIZE(smca_banks))
745 return;
746
747 - if (x86_family(m->cpuid) >= 0x17 && m->bank == 4)
748 - pr_emerg(HW_ERR "Bank 4 is reserved on Fam17h.\n");
749 -
750 hwid = smca_banks[m->bank].hwid;
751 if (!hwid)
752 return;
753
754 bank_type = hwid->bank_type;
755 +
756 + if (bank_type == SMCA_RESERVED) {
757 + pr_emerg(HW_ERR "Bank %d is reserved.\n", m->bank);
758 + return;
759 + }
760 +
761 ip_name = smca_get_long_name(bank_type);
762
763 pr_emerg(HW_ERR "%s Extended Error Code: %d\n", ip_name, xec);
764 diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
765 index 31f5ad605e59..5b6aeccd3d90 100644
766 --- a/drivers/gpu/drm/radeon/radeon_object.c
767 +++ b/drivers/gpu/drm/radeon/radeon_object.c
768 @@ -240,9 +240,10 @@ int radeon_bo_create(struct radeon_device *rdev,
769 * may be slow
770 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
771 */
772 -
773 +#ifndef CONFIG_COMPILE_TEST
774 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
775 thanks to write-combining
776 +#endif
777
778 if (bo->flags & RADEON_GEM_GTT_WC)
779 DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
780 diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
781 index c21020b69114..55ee5e87073a 100644
782 --- a/drivers/hv/channel_mgmt.c
783 +++ b/drivers/hv/channel_mgmt.c
784 @@ -71,7 +71,7 @@ static const struct vmbus_device vmbus_devs[] = {
785 /* PCIE */
786 { .dev_type = HV_PCIE,
787 HV_PCIE_GUID,
788 - .perf_device = true,
789 + .perf_device = false,
790 },
791
792 /* Synthetic Frame Buffer */
793 diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
794 index 4257451f1bd8..0b86ed01e85d 100644
795 --- a/drivers/media/platform/vsp1/vsp1_dl.c
796 +++ b/drivers/media/platform/vsp1/vsp1_dl.c
797 @@ -509,7 +509,8 @@ static bool vsp1_dl_list_hw_update_pending(struct vsp1_dl_manager *dlm)
798 return !!(vsp1_read(vsp1, VI6_DL_BODY_SIZE)
799 & VI6_DL_BODY_SIZE_UPD);
800 else
801 - return !!(vsp1_read(vsp1, VI6_CMD(dlm->index) & VI6_CMD_UPDHDR));
802 + return !!(vsp1_read(vsp1, VI6_CMD(dlm->index))
803 + & VI6_CMD_UPDHDR);
804 }
805
806 static void vsp1_dl_list_hw_enqueue(struct vsp1_dl_list *dl)
807 diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
808 index 5198c9eeb348..4312935f1dfc 100644
809 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
810 +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
811 @@ -101,7 +101,7 @@ static int get_v4l2_window32(struct v4l2_window __user *kp,
812 static int put_v4l2_window32(struct v4l2_window __user *kp,
813 struct v4l2_window32 __user *up)
814 {
815 - struct v4l2_clip __user *kclips = kp->clips;
816 + struct v4l2_clip __user *kclips;
817 struct v4l2_clip32 __user *uclips;
818 compat_caddr_t p;
819 u32 clipcount;
820 @@ -116,6 +116,8 @@ static int put_v4l2_window32(struct v4l2_window __user *kp,
821 if (!clipcount)
822 return 0;
823
824 + if (get_user(kclips, &kp->clips))
825 + return -EFAULT;
826 if (get_user(p, &up->clips))
827 return -EFAULT;
828 uclips = compat_ptr(p);
829 diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
830 index 0301fe426a43..1d0b2208e8fb 100644
831 --- a/drivers/media/v4l2-core/v4l2-dev.c
832 +++ b/drivers/media/v4l2-core/v4l2-dev.c
833 @@ -939,10 +939,14 @@ int __video_register_device(struct video_device *vdev,
834 #endif
835 vdev->minor = i + minor_offset;
836 vdev->num = nr;
837 - devnode_set(vdev);
838
839 /* Should not happen since we thought this minor was free */
840 - WARN_ON(video_device[vdev->minor] != NULL);
841 + if (WARN_ON(video_device[vdev->minor])) {
842 + mutex_unlock(&videodev_lock);
843 + printk(KERN_ERR "video_device not empty!\n");
844 + return -ENFILE;
845 + }
846 + devnode_set(vdev);
847 vdev->index = get_index(vdev);
848 video_device[vdev->minor] = vdev;
849 mutex_unlock(&videodev_lock);
850 diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
851 index 5782733959f0..f4e93f5fc204 100644
852 --- a/drivers/net/slip/slhc.c
853 +++ b/drivers/net/slip/slhc.c
854 @@ -509,6 +509,10 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
855 if(x < 0 || x > comp->rslot_limit)
856 goto bad;
857
858 + /* Check if the cstate is initialized */
859 + if (!comp->rstate[x].initialized)
860 + goto bad;
861 +
862 comp->flags &=~ SLF_TOSS;
863 comp->recv_current = x;
864 } else {
865 @@ -673,6 +677,7 @@ slhc_remember(struct slcompress *comp, unsigned char *icp, int isize)
866 if (cs->cs_tcp.doff > 5)
867 memcpy(cs->cs_tcpopt, icp + ihl*4 + sizeof(struct tcphdr), (cs->cs_tcp.doff - 5) * 4);
868 cs->cs_hsize = ihl*2 + cs->cs_tcp.doff*2;
869 + cs->initialized = true;
870 /* Put headers back on packet
871 * Neither header checksum is recalculated
872 */
873 diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
874 index fff4b13eece2..5c42cf81a08b 100644
875 --- a/drivers/net/usb/cdc_ether.c
876 +++ b/drivers/net/usb/cdc_ether.c
877 @@ -901,6 +901,12 @@ static const struct usb_device_id products[] = {
878 USB_CDC_SUBCLASS_ETHERNET,
879 USB_CDC_PROTO_NONE),
880 .driver_info = (unsigned long)&wwan_info,
881 +}, {
882 + /* Cinterion AHS3 modem by GEMALTO */
883 + USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0055, USB_CLASS_COMM,
884 + USB_CDC_SUBCLASS_ETHERNET,
885 + USB_CDC_PROTO_NONE),
886 + .driver_info = (unsigned long)&wwan_info,
887 }, {
888 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
889 USB_CDC_PROTO_NONE),
890 diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
891 index 55a78eb96961..32cf21716f19 100644
892 --- a/drivers/net/usb/lan78xx.c
893 +++ b/drivers/net/usb/lan78xx.c
894 @@ -928,7 +928,8 @@ static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
895 offset += 0x100;
896 else
897 ret = -EINVAL;
898 - ret = lan78xx_read_raw_otp(dev, offset, length, data);
899 + if (!ret)
900 + ret = lan78xx_read_raw_otp(dev, offset, length, data);
901 }
902
903 return ret;
904 diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
905 index 396bf05c6bf6..d8b041f48ca8 100644
906 --- a/drivers/net/wireless/ath/ath9k/xmit.c
907 +++ b/drivers/net/wireless/ath/ath9k/xmit.c
908 @@ -2892,6 +2892,8 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
909 struct ath_txq *txq;
910 int tidno;
911
912 + rcu_read_lock();
913 +
914 for (tidno = 0; tidno < IEEE80211_NUM_TIDS; tidno++) {
915 tid = ath_node_to_tid(an, tidno);
916 txq = tid->txq;
917 @@ -2909,6 +2911,8 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
918 if (!an->sta)
919 break; /* just one multicast ath_atx_tid */
920 }
921 +
922 + rcu_read_unlock();
923 }
924
925 #ifdef CONFIG_ATH9K_TX99
926 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
927 index e323d3abb6ac..959de2f8bb28 100644
928 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
929 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
930 @@ -8,6 +8,7 @@
931 * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
932 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
933 * Copyright(c) 2016-2017 Intel Deutschland GmbH
934 + * Copyright(c) 2018 Intel Corporation
935 *
936 * This program is free software; you can redistribute it and/or modify
937 * it under the terms of version 2 of the GNU General Public License as
938 @@ -36,6 +37,7 @@
939 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
940 * All rights reserved.
941 * Copyright(c) 2017 Intel Deutschland GmbH
942 + * Copyright(c) 2018 Intel Corporation
943 *
944 * Redistribution and use in source and binary forms, with or without
945 * modification, are permitted provided that the following conditions
946 @@ -517,9 +519,9 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
947 {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
948
949 /* 9000 Series */
950 - {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
951 {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
952 {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
953 + {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)},
954 {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
955 {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
956 {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)},
957 @@ -544,11 +546,15 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
958 {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
959 {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)},
960 {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
961 + {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)},
962 + {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)},
963 {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
964 {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
965 + {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_cfg_soc)},
966 {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
967 - {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
968 + {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
969 {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
970 + {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
971 {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
972 {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
973 {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
974 @@ -569,16 +575,42 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
975 {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
976 {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
977 {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)},
978 + {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)},
979 + {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)},
980 + {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)},
981 + {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)},
982 + {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)},
983 {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
984 + {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_cfg_soc)},
985 {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
986 + {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)},
987 + {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)},
988 + {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_cfg_soc)},
989 + {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)},
990 + {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_cfg_soc)},
991 + {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_cfg_soc)},
992 {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
993 {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
994 {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
995 {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
996 + {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_soc)},
997 + {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_soc)},
998 + {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_soc)},
999 + {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_soc)},
1000 {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
1001 {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
1002 {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
1003 {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
1004 + {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
1005 + {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
1006 + {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
1007 + {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)},
1008 + {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)},
1009 + {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)},
1010 + {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_cfg_soc)},
1011 + {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)},
1012 + {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)},
1013 + {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)},
1014 {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_shared_clk)},
1015 {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
1016 {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_shared_clk)},
1017 @@ -595,12 +627,94 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
1018 {IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_shared_clk)},
1019 {IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_shared_clk)},
1020 {IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_shared_clk)},
1021 + {IWL_PCI_DEVICE(0x31DC, 0x1010, iwl9260_2ac_cfg)},
1022 + {IWL_PCI_DEVICE(0x31DC, 0x1030, iwl9560_2ac_cfg_shared_clk)},
1023 + {IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)},
1024 + {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)},
1025 + {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)},
1026 {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)},
1027 {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_shared_clk)},
1028 {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)},
1029 + {IWL_PCI_DEVICE(0x31DC, 0x4234, iwl9560_2ac_cfg_shared_clk)},
1030 + {IWL_PCI_DEVICE(0x31DC, 0x42A4, iwl9462_2ac_cfg_shared_clk)},
1031 {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)},
1032 {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)},
1033 + {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_soc)},
1034 + {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_soc)},
1035 + {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_soc)},
1036 + {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_soc)},
1037 + {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_soc)},
1038 + {IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_soc)},
1039 + {IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_soc)},
1040 + {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_soc)},
1041 + {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_soc)},
1042 + {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_soc)},
1043 + {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_soc)},
1044 + {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_soc)},
1045 + {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_soc)},
1046 {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)},
1047 + {IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)},
1048 + {IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)},
1049 + {IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)},
1050 + {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)},
1051 + {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)},
1052 + {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)},
1053 + {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_soc)},
1054 + {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_soc)},
1055 + {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_soc)},
1056 + {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_soc)},
1057 + {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_cfg_soc)},
1058 + {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)},
1059 + {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_cfg_soc)},
1060 + {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_cfg_soc)},
1061 + {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_soc)},
1062 + {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_soc)},
1063 + {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
1064 + {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
1065 + {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_soc)},
1066 + {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_soc)},
1067 + {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_soc)},
1068 + {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_soc)},
1069 + {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_soc)},
1070 + {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_soc)},
1071 + {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
1072 + {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
1073 + {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)},
1074 + {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)},
1075 + {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
1076 + {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)},
1077 + {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)},
1078 + {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)},
1079 + {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_cfg_soc)},
1080 + {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
1081 + {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_soc)},
1082 + {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
1083 + {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_cfg_soc)},
1084 + {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)},
1085 + {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_cfg_soc)},
1086 + {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_cfg_soc)},
1087 + {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)},
1088 + {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)},
1089 + {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)},
1090 + {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_soc)},
1091 + {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_soc)},
1092 + {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)},
1093 + {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)},
1094 + {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)},
1095 + {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)},
1096 + {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)},
1097 + {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)},
1098 + {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_soc)},
1099 + {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)},
1100 + {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)},
1101 + {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
1102 + {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)},
1103 + {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)},
1104 + {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)},
1105 + {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_cfg_soc)},
1106 + {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_soc)},
1107 + {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_soc)},
1108 + {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_soc)},
1109 {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
1110 {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
1111 {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)},
1112 @@ -626,11 +740,44 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
1113 {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg_soc)},
1114 {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg_soc)},
1115 {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg_soc)},
1116 + {IWL_PCI_DEVICE(0x9DF0, 0x1010, iwl9260_2ac_cfg)},
1117 + {IWL_PCI_DEVICE(0x9DF0, 0x1030, iwl9560_2ac_cfg_soc)},
1118 + {IWL_PCI_DEVICE(0x9DF0, 0x1210, iwl9260_2ac_cfg)},
1119 {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
1120 + {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)},
1121 + {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)},
1122 {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)},
1123 {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)},
1124 {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)},
1125 {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
1126 + {IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)},
1127 + {IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
1128 + {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_cfg_soc)},
1129 + {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)},
1130 + {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_cfg_soc)},
1131 + {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_cfg_soc)},
1132 + {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)},
1133 + {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)},
1134 + {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)},
1135 + {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_soc)},
1136 + {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_soc)},
1137 + {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)},
1138 + {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)},
1139 + {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)},
1140 + {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)},
1141 + {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)},
1142 + {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)},
1143 + {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_soc)},
1144 + {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)},
1145 + {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)},
1146 + {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
1147 + {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)},
1148 + {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)},
1149 + {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)},
1150 + {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_cfg_soc)},
1151 + {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_soc)},
1152 + {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_soc)},
1153 + {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_soc)},
1154 {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)},
1155 {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
1156 {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)},
1157 @@ -647,10 +794,16 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
1158 {IWL_PCI_DEVICE(0xA370, 0x0264, iwl9461_2ac_cfg_soc)},
1159 {IWL_PCI_DEVICE(0xA370, 0x02A0, iwl9462_2ac_cfg_soc)},
1160 {IWL_PCI_DEVICE(0xA370, 0x02A4, iwl9462_2ac_cfg_soc)},
1161 + {IWL_PCI_DEVICE(0xA370, 0x1010, iwl9260_2ac_cfg)},
1162 {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)},
1163 + {IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)},
1164 + {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)},
1165 + {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)},
1166 {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
1167 {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)},
1168 {IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
1169 + {IWL_PCI_DEVICE(0xA370, 0x4234, iwl9560_2ac_cfg_soc)},
1170 + {IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)},
1171
1172 /* 22000 Series */
1173 {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)},
1174 diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1175 index 121b94f09714..9a1d15b3ce45 100644
1176 --- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1177 +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c
1178 @@ -1450,6 +1450,7 @@ static int rtl8187_probe(struct usb_interface *intf,
1179 goto err_free_dev;
1180 }
1181 mutex_init(&priv->io_mutex);
1182 + mutex_init(&priv->conf_mutex);
1183
1184 SET_IEEE80211_DEV(dev, &intf->dev);
1185 usb_set_intfdata(intf, dev);
1186 @@ -1625,7 +1626,6 @@ static int rtl8187_probe(struct usb_interface *intf,
1187 printk(KERN_ERR "rtl8187: Cannot register device\n");
1188 goto err_free_dmabuf;
1189 }
1190 - mutex_init(&priv->conf_mutex);
1191 skb_queue_head_init(&priv->b_tx_status.queue);
1192
1193 wiphy_info(dev->wiphy, "hwaddr %pM, %s V%d + %s, rfkill mask %d\n",
1194 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
1195 index 7aeca5db7916..0b9e60861e53 100644
1196 --- a/drivers/nvme/host/core.c
1197 +++ b/drivers/nvme/host/core.c
1198 @@ -2793,6 +2793,7 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys,
1199
1200 list_for_each_entry(h, &subsys->nsheads, entry) {
1201 if (nvme_ns_ids_valid(&new->ids) &&
1202 + !list_empty(&h->list) &&
1203 nvme_ns_ids_equal(&new->ids, &h->ids))
1204 return -EINVAL;
1205 }
1206 diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
1207 index 2faf38eab785..cb694d2a1228 100644
1208 --- a/drivers/pci/host/pci-hyperv.c
1209 +++ b/drivers/pci/host/pci-hyperv.c
1210 @@ -447,7 +447,6 @@ struct hv_pcibus_device {
1211 spinlock_t device_list_lock; /* Protect lists below */
1212 void __iomem *cfg_addr;
1213
1214 - struct semaphore enum_sem;
1215 struct list_head resources_for_children;
1216
1217 struct list_head children;
1218 @@ -461,6 +460,8 @@ struct hv_pcibus_device {
1219 struct retarget_msi_interrupt retarget_msi_interrupt_params;
1220
1221 spinlock_t retarget_msi_interrupt_lock;
1222 +
1223 + struct workqueue_struct *wq;
1224 };
1225
1226 /*
1227 @@ -520,6 +521,8 @@ struct hv_pci_compl {
1228 s32 completion_status;
1229 };
1230
1231 +static void hv_pci_onchannelcallback(void *context);
1232 +
1233 /**
1234 * hv_pci_generic_compl() - Invoked for a completion packet
1235 * @context: Set up by the sender of the packet.
1236 @@ -664,6 +667,31 @@ static void _hv_pcifront_read_config(struct hv_pci_dev *hpdev, int where,
1237 }
1238 }
1239
1240 +static u16 hv_pcifront_get_vendor_id(struct hv_pci_dev *hpdev)
1241 +{
1242 + u16 ret;
1243 + unsigned long flags;
1244 + void __iomem *addr = hpdev->hbus->cfg_addr + CFG_PAGE_OFFSET +
1245 + PCI_VENDOR_ID;
1246 +
1247 + spin_lock_irqsave(&hpdev->hbus->config_lock, flags);
1248 +
1249 + /* Choose the function to be read. (See comment above) */
1250 + writel(hpdev->desc.win_slot.slot, hpdev->hbus->cfg_addr);
1251 + /* Make sure the function was chosen before we start reading. */
1252 + mb();
1253 + /* Read from that function's config space. */
1254 + ret = readw(addr);
1255 + /*
1256 + * mb() is not required here, because the spin_unlock_irqrestore()
1257 + * is a barrier.
1258 + */
1259 +
1260 + spin_unlock_irqrestore(&hpdev->hbus->config_lock, flags);
1261 +
1262 + return ret;
1263 +}
1264 +
1265 /**
1266 * _hv_pcifront_write_config() - Internal PCI config write
1267 * @hpdev: The PCI driver's representation of the device
1268 @@ -1106,8 +1134,37 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1269 * Since this function is called with IRQ locks held, can't
1270 * do normal wait for completion; instead poll.
1271 */
1272 - while (!try_wait_for_completion(&comp.comp_pkt.host_event))
1273 + while (!try_wait_for_completion(&comp.comp_pkt.host_event)) {
1274 + /* 0xFFFF means an invalid PCI VENDOR ID. */
1275 + if (hv_pcifront_get_vendor_id(hpdev) == 0xFFFF) {
1276 + dev_err_once(&hbus->hdev->device,
1277 + "the device has gone\n");
1278 + goto free_int_desc;
1279 + }
1280 +
1281 + /*
1282 + * When the higher level interrupt code calls us with
1283 + * interrupt disabled, we must poll the channel by calling
1284 + * the channel callback directly when channel->target_cpu is
1285 + * the current CPU. When the higher level interrupt code
1286 + * calls us with interrupt enabled, let's add the
1287 + * local_bh_disable()/enable() to avoid race.
1288 + */
1289 + local_bh_disable();
1290 +
1291 + if (hbus->hdev->channel->target_cpu == smp_processor_id())
1292 + hv_pci_onchannelcallback(hbus);
1293 +
1294 + local_bh_enable();
1295 +
1296 + if (hpdev->state == hv_pcichild_ejecting) {
1297 + dev_err_once(&hbus->hdev->device,
1298 + "the device is being ejected\n");
1299 + goto free_int_desc;
1300 + }
1301 +
1302 udelay(100);
1303 + }
1304
1305 if (comp.comp_pkt.completion_status < 0) {
1306 dev_err(&hbus->hdev->device,
1307 @@ -1590,12 +1647,8 @@ static struct hv_pci_dev *get_pcichild_wslot(struct hv_pcibus_device *hbus,
1308 * It must also treat the omission of a previously observed device as
1309 * notification that the device no longer exists.
1310 *
1311 - * Note that this function is a work item, and it may not be
1312 - * invoked in the order that it was queued. Back to back
1313 - * updates of the list of present devices may involve queuing
1314 - * multiple work items, and this one may run before ones that
1315 - * were sent later. As such, this function only does something
1316 - * if is the last one in the queue.
1317 + * Note that this function is serialized with hv_eject_device_work(),
1318 + * because both are pushed to the ordered workqueue hbus->wq.
1319 */
1320 static void pci_devices_present_work(struct work_struct *work)
1321 {
1322 @@ -1616,11 +1669,6 @@ static void pci_devices_present_work(struct work_struct *work)
1323
1324 INIT_LIST_HEAD(&removed);
1325
1326 - if (down_interruptible(&hbus->enum_sem)) {
1327 - put_hvpcibus(hbus);
1328 - return;
1329 - }
1330 -
1331 /* Pull this off the queue and process it if it was the last one. */
1332 spin_lock_irqsave(&hbus->device_list_lock, flags);
1333 while (!list_empty(&hbus->dr_list)) {
1334 @@ -1637,7 +1685,6 @@ static void pci_devices_present_work(struct work_struct *work)
1335 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1336
1337 if (!dr) {
1338 - up(&hbus->enum_sem);
1339 put_hvpcibus(hbus);
1340 return;
1341 }
1342 @@ -1724,7 +1771,6 @@ static void pci_devices_present_work(struct work_struct *work)
1343 break;
1344 }
1345
1346 - up(&hbus->enum_sem);
1347 put_hvpcibus(hbus);
1348 kfree(dr);
1349 }
1350 @@ -1770,7 +1816,7 @@ static void hv_pci_devices_present(struct hv_pcibus_device *hbus,
1351 spin_unlock_irqrestore(&hbus->device_list_lock, flags);
1352
1353 get_hvpcibus(hbus);
1354 - schedule_work(&dr_wrk->wrk);
1355 + queue_work(hbus->wq, &dr_wrk->wrk);
1356 }
1357
1358 /**
1359 @@ -1848,7 +1894,7 @@ static void hv_pci_eject_device(struct hv_pci_dev *hpdev)
1360 get_pcichild(hpdev, hv_pcidev_ref_pnp);
1361 INIT_WORK(&hpdev->wrk, hv_eject_device_work);
1362 get_hvpcibus(hpdev->hbus);
1363 - schedule_work(&hpdev->wrk);
1364 + queue_work(hpdev->hbus->wq, &hpdev->wrk);
1365 }
1366
1367 /**
1368 @@ -2461,13 +2507,18 @@ static int hv_pci_probe(struct hv_device *hdev,
1369 spin_lock_init(&hbus->config_lock);
1370 spin_lock_init(&hbus->device_list_lock);
1371 spin_lock_init(&hbus->retarget_msi_interrupt_lock);
1372 - sema_init(&hbus->enum_sem, 1);
1373 init_completion(&hbus->remove_event);
1374 + hbus->wq = alloc_ordered_workqueue("hv_pci_%x", 0,
1375 + hbus->sysdata.domain);
1376 + if (!hbus->wq) {
1377 + ret = -ENOMEM;
1378 + goto free_bus;
1379 + }
1380
1381 ret = vmbus_open(hdev->channel, pci_ring_size, pci_ring_size, NULL, 0,
1382 hv_pci_onchannelcallback, hbus);
1383 if (ret)
1384 - goto free_bus;
1385 + goto destroy_wq;
1386
1387 hv_set_drvdata(hdev, hbus);
1388
1389 @@ -2536,6 +2587,8 @@ static int hv_pci_probe(struct hv_device *hdev,
1390 hv_free_config_window(hbus);
1391 close:
1392 vmbus_close(hdev->channel);
1393 +destroy_wq:
1394 + destroy_workqueue(hbus->wq);
1395 free_bus:
1396 free_page((unsigned long)hbus);
1397 return ret;
1398 @@ -2615,6 +2668,7 @@ static int hv_pci_remove(struct hv_device *hdev)
1399 irq_domain_free_fwnode(hbus->sysdata.fwnode);
1400 put_hvpcibus(hbus);
1401 wait_for_completion(&hbus->remove_event);
1402 + destroy_workqueue(hbus->wq);
1403 free_page((unsigned long)hbus);
1404 return 0;
1405 }
1406 diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
1407 index d5b02de02a3a..bfad63b5a13d 100644
1408 --- a/drivers/s390/cio/qdio_main.c
1409 +++ b/drivers/s390/cio/qdio_main.c
1410 @@ -128,7 +128,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
1411 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
1412 int start, int count, int auto_ack)
1413 {
1414 - int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0;
1415 + int rc, tmp_count = count, tmp_start = start, nr = q->nr;
1416 unsigned int ccq = 0;
1417
1418 qperf_inc(q, eqbs);
1419 @@ -151,14 +151,7 @@ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
1420 qperf_inc(q, eqbs_partial);
1421 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS part:%02x",
1422 tmp_count);
1423 - /*
1424 - * Retry once, if that fails bail out and process the
1425 - * extracted buffers before trying again.
1426 - */
1427 - if (!retried++)
1428 - goto again;
1429 - else
1430 - return count - tmp_count;
1431 + return count - tmp_count;
1432 }
1433
1434 DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
1435 @@ -214,7 +207,10 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
1436 return 0;
1437 }
1438
1439 -/* returns number of examined buffers and their common state in *state */
1440 +/*
1441 + * Returns number of examined buffers and their common state in *state.
1442 + * Requested number of buffers-to-examine must be > 0.
1443 + */
1444 static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
1445 unsigned char *state, unsigned int count,
1446 int auto_ack, int merge_pending)
1447 @@ -225,17 +221,23 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
1448 if (is_qebsm(q))
1449 return qdio_do_eqbs(q, state, bufnr, count, auto_ack);
1450
1451 - for (i = 0; i < count; i++) {
1452 - if (!__state) {
1453 - __state = q->slsb.val[bufnr];
1454 - if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
1455 - __state = SLSB_P_OUTPUT_EMPTY;
1456 - } else if (merge_pending) {
1457 - if ((q->slsb.val[bufnr] & __state) != __state)
1458 - break;
1459 - } else if (q->slsb.val[bufnr] != __state)
1460 - break;
1461 + /* get initial state: */
1462 + __state = q->slsb.val[bufnr];
1463 + if (merge_pending && __state == SLSB_P_OUTPUT_PENDING)
1464 + __state = SLSB_P_OUTPUT_EMPTY;
1465 +
1466 + for (i = 1; i < count; i++) {
1467 bufnr = next_buf(bufnr);
1468 +
1469 + /* merge PENDING into EMPTY: */
1470 + if (merge_pending &&
1471 + q->slsb.val[bufnr] == SLSB_P_OUTPUT_PENDING &&
1472 + __state == SLSB_P_OUTPUT_EMPTY)
1473 + continue;
1474 +
1475 + /* stop if next state differs from initial state: */
1476 + if (q->slsb.val[bufnr] != __state)
1477 + break;
1478 }
1479 *state = __state;
1480 return i;
1481 diff --git a/drivers/sbus/char/oradax.c b/drivers/sbus/char/oradax.c
1482 index 03dc04739225..c44d7c7ffc92 100644
1483 --- a/drivers/sbus/char/oradax.c
1484 +++ b/drivers/sbus/char/oradax.c
1485 @@ -880,7 +880,7 @@ static int dax_ccb_exec(struct dax_ctx *ctx, const char __user *buf,
1486 dax_dbg("args: ccb_buf_len=%ld, idx=%d", count, idx);
1487
1488 /* for given index and length, verify ca_buf range exists */
1489 - if (idx + nccbs >= DAX_CA_ELEMS) {
1490 + if (idx < 0 || idx > (DAX_CA_ELEMS - nccbs)) {
1491 ctx->result.exec.status = DAX_SUBMIT_ERR_NO_CA_AVAIL;
1492 return 0;
1493 }
1494 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
1495 index 5c5dcca4d1da..e1cf8c0d73dd 100644
1496 --- a/drivers/scsi/qla2xxx/qla_os.c
1497 +++ b/drivers/scsi/qla2xxx/qla_os.c
1498 @@ -471,9 +471,6 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
1499
1500 static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
1501 {
1502 - if (!ha->req_q_map)
1503 - return;
1504 -
1505 if (IS_QLAFX00(ha)) {
1506 if (req && req->ring_fx00)
1507 dma_free_coherent(&ha->pdev->dev,
1508 @@ -484,17 +481,14 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
1509 (req->length + 1) * sizeof(request_t),
1510 req->ring, req->dma);
1511
1512 - if (req) {
1513 + if (req)
1514 kfree(req->outstanding_cmds);
1515 - kfree(req);
1516 - }
1517 +
1518 + kfree(req);
1519 }
1520
1521 static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
1522 {
1523 - if (!ha->rsp_q_map)
1524 - return;
1525 -
1526 if (IS_QLAFX00(ha)) {
1527 if (rsp && rsp->ring)
1528 dma_free_coherent(&ha->pdev->dev,
1529 @@ -505,8 +499,7 @@ static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
1530 (rsp->length + 1) * sizeof(response_t),
1531 rsp->ring, rsp->dma);
1532 }
1533 - if (rsp)
1534 - kfree(rsp);
1535 + kfree(rsp);
1536 }
1537
1538 static void qla2x00_free_queues(struct qla_hw_data *ha)
1539 @@ -3107,7 +3100,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1540 goto probe_failed;
1541
1542 /* Alloc arrays of request and response ring ptrs */
1543 - if (qla2x00_alloc_queues(ha, req, rsp)) {
1544 + ret = qla2x00_alloc_queues(ha, req, rsp);
1545 + if (ret) {
1546 ql_log(ql_log_fatal, base_vha, 0x003d,
1547 "Failed to allocate memory for queue pointers..."
1548 "aborting.\n");
1549 @@ -3408,8 +3402,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1550 }
1551
1552 qla2x00_free_device(base_vha);
1553 -
1554 scsi_host_put(base_vha->host);
1555 + /*
1556 + * Need to NULL out local req/rsp after
1557 + * qla2x00_free_device => qla2x00_free_queues frees
1558 + * what these are pointing to. Or else we'll
1559 + * fall over below in qla2x00_free_req/rsp_que.
1560 + */
1561 + req = NULL;
1562 + rsp = NULL;
1563
1564 probe_hw_failed:
1565 qla2x00_mem_free(ha);
1566 @@ -4115,6 +4116,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
1567 (*rsp)->dma = 0;
1568 fail_rsp_ring:
1569 kfree(*rsp);
1570 + *rsp = NULL;
1571 fail_rsp:
1572 dma_free_coherent(&ha->pdev->dev, ((*req)->length + 1) *
1573 sizeof(request_t), (*req)->ring, (*req)->dma);
1574 @@ -4122,6 +4124,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
1575 (*req)->dma = 0;
1576 fail_req_ring:
1577 kfree(*req);
1578 + *req = NULL;
1579 fail_req:
1580 dma_free_coherent(&ha->pdev->dev, sizeof(struct ct_sns_pkt),
1581 ha->ct_sns, ha->ct_sns_dma);
1582 @@ -4509,16 +4512,11 @@ qla2x00_mem_free(struct qla_hw_data *ha)
1583 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
1584 ha->init_cb, ha->init_cb_dma);
1585
1586 - if (ha->optrom_buffer)
1587 - vfree(ha->optrom_buffer);
1588 - if (ha->nvram)
1589 - kfree(ha->nvram);
1590 - if (ha->npiv_info)
1591 - kfree(ha->npiv_info);
1592 - if (ha->swl)
1593 - kfree(ha->swl);
1594 - if (ha->loop_id_map)
1595 - kfree(ha->loop_id_map);
1596 + vfree(ha->optrom_buffer);
1597 + kfree(ha->nvram);
1598 + kfree(ha->npiv_info);
1599 + kfree(ha->swl);
1600 + kfree(ha->loop_id_map);
1601
1602 ha->srb_mempool = NULL;
1603 ha->ctx_mempool = NULL;
1604 diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
1605 index b88b5dbbc444..188f30572aa1 100644
1606 --- a/drivers/scsi/scsi_dh.c
1607 +++ b/drivers/scsi/scsi_dh.c
1608 @@ -112,6 +112,9 @@ static struct scsi_device_handler *scsi_dh_lookup(const char *name)
1609 {
1610 struct scsi_device_handler *dh;
1611
1612 + if (!name || strlen(name) == 0)
1613 + return NULL;
1614 +
1615 dh = __scsi_dh_lookup(name);
1616 if (!dh) {
1617 request_module("scsi_dh_%s", name);
1618 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
1619 index c84f931388f2..912eacdc2d83 100644
1620 --- a/drivers/scsi/scsi_lib.c
1621 +++ b/drivers/scsi/scsi_lib.c
1622 @@ -721,8 +721,6 @@ static blk_status_t __scsi_error_from_host_byte(struct scsi_cmnd *cmd,
1623 int result)
1624 {
1625 switch (host_byte(result)) {
1626 - case DID_OK:
1627 - return BLK_STS_OK;
1628 case DID_TRANSPORT_FAILFAST:
1629 return BLK_STS_TRANSPORT;
1630 case DID_TARGET_FAILURE:
1631 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
1632 index 5320039671b7..be6a4b6a76c6 100644
1633 --- a/drivers/vhost/vhost.c
1634 +++ b/drivers/vhost/vhost.c
1635 @@ -744,7 +744,7 @@ static int vhost_copy_to_user(struct vhost_virtqueue *vq, void __user *to,
1636 struct iov_iter t;
1637 void __user *uaddr = vhost_vq_meta_fetch(vq,
1638 (u64)(uintptr_t)to, size,
1639 - VHOST_ADDR_DESC);
1640 + VHOST_ADDR_USED);
1641
1642 if (uaddr)
1643 return __copy_to_user(uaddr, from, size);
1644 @@ -1244,10 +1244,12 @@ static int vq_log_access_ok(struct vhost_virtqueue *vq,
1645 /* Caller should have vq mutex and device mutex */
1646 int vhost_vq_access_ok(struct vhost_virtqueue *vq)
1647 {
1648 - int ret = vq_log_access_ok(vq, vq->log_base);
1649 + if (!vq_log_access_ok(vq, vq->log_base))
1650 + return 0;
1651
1652 - if (ret || vq->iotlb)
1653 - return ret;
1654 + /* Access validation occurs at prefetch time with IOTLB */
1655 + if (vq->iotlb)
1656 + return 1;
1657
1658 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used);
1659 }
1660 diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
1661 index a493e99bed21..81a84b3c1c50 100644
1662 --- a/drivers/xen/xenbus/xenbus_dev_frontend.c
1663 +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
1664 @@ -365,7 +365,7 @@ void xenbus_dev_queue_reply(struct xb_req_data *req)
1665 if (WARN_ON(rc))
1666 goto out;
1667 }
1668 - } else if (req->msg.type == XS_TRANSACTION_END) {
1669 + } else if (req->type == XS_TRANSACTION_END) {
1670 trans = xenbus_get_transaction(u, req->msg.tx_id);
1671 if (WARN_ON(!trans))
1672 goto out;
1673 diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
1674 index aa720cc44509..b9d93fd532a9 100644
1675 --- a/fs/f2fs/gc.c
1676 +++ b/fs/f2fs/gc.c
1677 @@ -191,8 +191,9 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
1678 if (gc_type != FG_GC && p->max_search > sbi->max_victim_search)
1679 p->max_search = sbi->max_victim_search;
1680
1681 - /* let's select beginning hot/small space first */
1682 - if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
1683 + /* let's select beginning hot/small space first in no_heap mode*/
1684 + if (test_opt(sbi, NOHEAP) &&
1685 + (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
1686 p->offset = 0;
1687 else
1688 p->offset = SIT_I(sbi)->last_victim[p->gc_mode];
1689 diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
1690 index b16a8e6625aa..205b0d934c44 100644
1691 --- a/fs/f2fs/segment.c
1692 +++ b/fs/f2fs/segment.c
1693 @@ -2164,7 +2164,8 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
1694 if (sbi->segs_per_sec != 1)
1695 return CURSEG_I(sbi, type)->segno;
1696
1697 - if (type == CURSEG_HOT_DATA || IS_NODESEG(type))
1698 + if (test_opt(sbi, NOHEAP) &&
1699 + (type == CURSEG_HOT_DATA || IS_NODESEG(type)))
1700 return 0;
1701
1702 if (SIT_I(sbi)->last_victim[ALLOC_NEXT])
1703 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
1704 index b9a254dcc0e7..d508c7844681 100644
1705 --- a/fs/hugetlbfs/inode.c
1706 +++ b/fs/hugetlbfs/inode.c
1707 @@ -138,10 +138,14 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
1708
1709 /*
1710 * page based offset in vm_pgoff could be sufficiently large to
1711 - * overflow a (l)off_t when converted to byte offset.
1712 + * overflow a loff_t when converted to byte offset. This can
1713 + * only happen on architectures where sizeof(loff_t) ==
1714 + * sizeof(unsigned long). So, only check in those instances.
1715 */
1716 - if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
1717 - return -EINVAL;
1718 + if (sizeof(unsigned long) == sizeof(loff_t)) {
1719 + if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
1720 + return -EINVAL;
1721 + }
1722
1723 /* must be huge page aligned */
1724 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
1725 diff --git a/fs/namei.c b/fs/namei.c
1726 index cafa365eeb70..b61d6aa9279d 100644
1727 --- a/fs/namei.c
1728 +++ b/fs/namei.c
1729 @@ -222,9 +222,10 @@ getname_kernel(const char * filename)
1730 if (len <= EMBEDDED_NAME_MAX) {
1731 result->name = (char *)result->iname;
1732 } else if (len <= PATH_MAX) {
1733 + const size_t size = offsetof(struct filename, iname[1]);
1734 struct filename *tmp;
1735
1736 - tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
1737 + tmp = kmalloc(size, GFP_KERNEL);
1738 if (unlikely(!tmp)) {
1739 __putname(result);
1740 return ERR_PTR(-ENOMEM);
1741 diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
1742 index a0bed2b2004d..7fce5c3540ce 100644
1743 --- a/fs/nfsd/nfs4proc.c
1744 +++ b/fs/nfsd/nfs4proc.c
1745 @@ -32,6 +32,7 @@
1746 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
1747 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1748 */
1749 +#include <linux/fs_struct.h>
1750 #include <linux/file.h>
1751 #include <linux/falloc.h>
1752 #include <linux/slab.h>
1753 @@ -252,11 +253,13 @@ do_open_lookup(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, stru
1754 * Note: create modes (UNCHECKED,GUARDED...) are the same
1755 * in NFSv4 as in v3 except EXCLUSIVE4_1.
1756 */
1757 + current->fs->umask = open->op_umask;
1758 status = do_nfsd_create(rqstp, current_fh, open->op_fname.data,
1759 open->op_fname.len, &open->op_iattr,
1760 *resfh, open->op_createmode,
1761 (u32 *)open->op_verf.data,
1762 &open->op_truncate, &open->op_created);
1763 + current->fs->umask = 0;
1764
1765 if (!status && open->op_label.len)
1766 nfsd4_security_inode_setsecctx(*resfh, &open->op_label, open->op_bmval);
1767 @@ -603,6 +606,7 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
1768 if (status)
1769 return status;
1770
1771 + current->fs->umask = create->cr_umask;
1772 switch (create->cr_type) {
1773 case NF4LNK:
1774 status = nfsd_symlink(rqstp, &cstate->current_fh,
1775 @@ -611,20 +615,22 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
1776 break;
1777
1778 case NF4BLK:
1779 + status = nfserr_inval;
1780 rdev = MKDEV(create->cr_specdata1, create->cr_specdata2);
1781 if (MAJOR(rdev) != create->cr_specdata1 ||
1782 MINOR(rdev) != create->cr_specdata2)
1783 - return nfserr_inval;
1784 + goto out_umask;
1785 status = nfsd_create(rqstp, &cstate->current_fh,
1786 create->cr_name, create->cr_namelen,
1787 &create->cr_iattr, S_IFBLK, rdev, &resfh);
1788 break;
1789
1790 case NF4CHR:
1791 + status = nfserr_inval;
1792 rdev = MKDEV(create->cr_specdata1, create->cr_specdata2);
1793 if (MAJOR(rdev) != create->cr_specdata1 ||
1794 MINOR(rdev) != create->cr_specdata2)
1795 - return nfserr_inval;
1796 + goto out_umask;
1797 status = nfsd_create(rqstp, &cstate->current_fh,
1798 create->cr_name, create->cr_namelen,
1799 &create->cr_iattr,S_IFCHR, rdev, &resfh);
1800 @@ -668,6 +674,8 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
1801 fh_dup2(&cstate->current_fh, &resfh);
1802 out:
1803 fh_put(&resfh);
1804 +out_umask:
1805 + current->fs->umask = 0;
1806 return status;
1807 }
1808
1809 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
1810 index e502fd16246b..45f0f0500ee4 100644
1811 --- a/fs/nfsd/nfs4xdr.c
1812 +++ b/fs/nfsd/nfs4xdr.c
1813 @@ -33,7 +33,6 @@
1814 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
1815 */
1816
1817 -#include <linux/fs_struct.h>
1818 #include <linux/file.h>
1819 #include <linux/slab.h>
1820 #include <linux/namei.h>
1821 @@ -682,7 +681,7 @@ nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create
1822
1823 status = nfsd4_decode_fattr(argp, create->cr_bmval, &create->cr_iattr,
1824 &create->cr_acl, &create->cr_label,
1825 - &current->fs->umask);
1826 + &create->cr_umask);
1827 if (status)
1828 goto out;
1829
1830 @@ -927,7 +926,6 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
1831 case NFS4_OPEN_NOCREATE:
1832 break;
1833 case NFS4_OPEN_CREATE:
1834 - current->fs->umask = 0;
1835 READ_BUF(4);
1836 open->op_createmode = be32_to_cpup(p++);
1837 switch (open->op_createmode) {
1838 @@ -935,7 +933,7 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
1839 case NFS4_CREATE_GUARDED:
1840 status = nfsd4_decode_fattr(argp, open->op_bmval,
1841 &open->op_iattr, &open->op_acl, &open->op_label,
1842 - &current->fs->umask);
1843 + &open->op_umask);
1844 if (status)
1845 goto out;
1846 break;
1847 @@ -950,7 +948,7 @@ nfsd4_decode_open(struct nfsd4_compoundargs *argp, struct nfsd4_open *open)
1848 COPYMEM(open->op_verf.data, NFS4_VERIFIER_SIZE);
1849 status = nfsd4_decode_fattr(argp, open->op_bmval,
1850 &open->op_iattr, &open->op_acl, &open->op_label,
1851 - &current->fs->umask);
1852 + &open->op_umask);
1853 if (status)
1854 goto out;
1855 break;
1856 diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
1857 index bc29511b6405..f47c392cbd57 100644
1858 --- a/fs/nfsd/xdr4.h
1859 +++ b/fs/nfsd/xdr4.h
1860 @@ -118,6 +118,7 @@ struct nfsd4_create {
1861 } u;
1862 u32 cr_bmval[3]; /* request */
1863 struct iattr cr_iattr; /* request */
1864 + int cr_umask; /* request */
1865 struct nfsd4_change_info cr_cinfo; /* response */
1866 struct nfs4_acl *cr_acl;
1867 struct xdr_netobj cr_label;
1868 @@ -228,6 +229,7 @@ struct nfsd4_open {
1869 u32 op_why_no_deleg; /* response - DELEG_NONE_EXT only */
1870 u32 op_create; /* request */
1871 u32 op_createmode; /* request */
1872 + int op_umask; /* request */
1873 u32 op_bmval[3]; /* request */
1874 struct iattr op_iattr; /* UNCHECKED4, GUARDED4, EXCLUSIVE4_1 */
1875 nfs4_verifier op_verf __attribute__((aligned(32)));
1876 diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
1877 index 3b1bd469accd..1d75b2e96c96 100644
1878 --- a/fs/overlayfs/inode.c
1879 +++ b/fs/overlayfs/inode.c
1880 @@ -118,13 +118,10 @@ int ovl_getattr(const struct path *path, struct kstat *stat,
1881 */
1882 if (ovl_test_flag(OVL_INDEX, d_inode(dentry)) ||
1883 (!ovl_verify_lower(dentry->d_sb) &&
1884 - (is_dir || lowerstat.nlink == 1)))
1885 + (is_dir || lowerstat.nlink == 1))) {
1886 stat->ino = lowerstat.ino;
1887 -
1888 - if (samefs)
1889 - WARN_ON_ONCE(stat->dev != lowerstat.dev);
1890 - else
1891 stat->dev = ovl_get_pseudo_dev(dentry);
1892 + }
1893 }
1894 if (samefs) {
1895 /*
1896 @@ -459,9 +456,20 @@ static inline void ovl_lockdep_annotate_inode_mutex_key(struct inode *inode)
1897 #endif
1898 }
1899
1900 -static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev)
1901 +static void ovl_fill_inode(struct inode *inode, umode_t mode, dev_t rdev,
1902 + unsigned long ino)
1903 {
1904 - inode->i_ino = get_next_ino();
1905 + /*
1906 + * When NFS export is enabled and d_ino is consistent with st_ino
1907 + * (samefs), set the same value to i_ino, because nfsd readdirplus
1908 + * compares d_ino values to i_ino values of child entries. When called
1909 + * from ovl_new_inode(), ino arg is 0, so i_ino will be updated to real
1910 + * upper inode i_ino on ovl_inode_init() or ovl_inode_update().
1911 + */
1912 + if (inode->i_sb->s_export_op && ovl_same_sb(inode->i_sb))
1913 + inode->i_ino = ino;
1914 + else
1915 + inode->i_ino = get_next_ino();
1916 inode->i_mode = mode;
1917 inode->i_flags |= S_NOCMTIME;
1918 #ifdef CONFIG_FS_POSIX_ACL
1919 @@ -597,7 +605,7 @@ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev)
1920
1921 inode = new_inode(sb);
1922 if (inode)
1923 - ovl_fill_inode(inode, mode, rdev);
1924 + ovl_fill_inode(inode, mode, rdev, 0);
1925
1926 return inode;
1927 }
1928 @@ -710,6 +718,7 @@ struct inode *ovl_get_inode(struct super_block *sb, struct dentry *upperdentry,
1929 struct inode *inode;
1930 bool bylower = ovl_hash_bylower(sb, upperdentry, lowerdentry, index);
1931 bool is_dir;
1932 + unsigned long ino = 0;
1933
1934 if (!realinode)
1935 realinode = d_inode(lowerdentry);
1936 @@ -748,13 +757,14 @@ struct inode *ovl_get_inode(struct super_block *sb, struct dentry *upperdentry,
1937 if (!is_dir)
1938 nlink = ovl_get_nlink(lowerdentry, upperdentry, nlink);
1939 set_nlink(inode, nlink);
1940 + ino = key->i_ino;
1941 } else {
1942 /* Lower hardlink that will be broken on copy up */
1943 inode = new_inode(sb);
1944 if (!inode)
1945 goto out_nomem;
1946 }
1947 - ovl_fill_inode(inode, realinode->i_mode, realinode->i_rdev);
1948 + ovl_fill_inode(inode, realinode->i_mode, realinode->i_rdev, ino);
1949 ovl_inode_init(inode, upperdentry, lowerdentry);
1950
1951 if (upperdentry && ovl_is_impuredir(upperdentry))
1952 diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
1953 index 70fcfcc684cc..35418317ecf2 100644
1954 --- a/fs/overlayfs/namei.c
1955 +++ b/fs/overlayfs/namei.c
1956 @@ -56,6 +56,15 @@ static int ovl_check_redirect(struct dentry *dentry, struct ovl_lookup_data *d,
1957 if (s == next)
1958 goto invalid;
1959 }
1960 + /*
1961 + * One of the ancestor path elements in an absolute path
1962 + * lookup in ovl_lookup_layer() could have been opaque and
1963 + * that will stop further lookup in lower layers (d->stop=true)
1964 + * But we have found an absolute redirect in decendant path
1965 + * element and that should force continue lookup in lower
1966 + * layers (reset d->stop).
1967 + */
1968 + d->stop = false;
1969 } else {
1970 if (strchr(buf, '/') != NULL)
1971 goto invalid;
1972 @@ -815,7 +824,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
1973 .is_dir = false,
1974 .opaque = false,
1975 .stop = false,
1976 - .last = !poe->numlower,
1977 + .last = ofs->config.redirect_follow ? false : !poe->numlower,
1978 .redirect = NULL,
1979 };
1980
1981 @@ -873,7 +882,11 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
1982 for (i = 0; !d.stop && i < poe->numlower; i++) {
1983 struct ovl_path lower = poe->lowerstack[i];
1984
1985 - d.last = i == poe->numlower - 1;
1986 + if (!ofs->config.redirect_follow)
1987 + d.last = i == poe->numlower - 1;
1988 + else
1989 + d.last = lower.layer->idx == roe->numlower;
1990 +
1991 err = ovl_lookup_layer(lower.dentry, &d, &this);
1992 if (err)
1993 goto out_put;
1994 diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
1995 index 930784a26623..493f9b76fbf6 100644
1996 --- a/fs/overlayfs/util.c
1997 +++ b/fs/overlayfs/util.c
1998 @@ -279,12 +279,16 @@ void ovl_dentry_set_redirect(struct dentry *dentry, const char *redirect)
1999 void ovl_inode_init(struct inode *inode, struct dentry *upperdentry,
2000 struct dentry *lowerdentry)
2001 {
2002 + struct inode *realinode = d_inode(upperdentry ?: lowerdentry);
2003 +
2004 if (upperdentry)
2005 OVL_I(inode)->__upperdentry = upperdentry;
2006 if (lowerdentry)
2007 OVL_I(inode)->lower = igrab(d_inode(lowerdentry));
2008
2009 - ovl_copyattr(d_inode(upperdentry ?: lowerdentry), inode);
2010 + ovl_copyattr(realinode, inode);
2011 + if (!inode->i_ino)
2012 + inode->i_ino = realinode->i_ino;
2013 }
2014
2015 void ovl_inode_update(struct inode *inode, struct dentry *upperdentry)
2016 @@ -299,6 +303,8 @@ void ovl_inode_update(struct inode *inode, struct dentry *upperdentry)
2017 smp_wmb();
2018 OVL_I(inode)->__upperdentry = upperdentry;
2019 if (inode_unhashed(inode)) {
2020 + if (!inode->i_ino)
2021 + inode->i_ino = upperinode->i_ino;
2022 inode->i_private = upperinode;
2023 __insert_inode_hash(inode, (unsigned long) upperinode);
2024 }
2025 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
2026 index 53f32022fabe..7f0bda760a58 100644
2027 --- a/include/media/v4l2-dev.h
2028 +++ b/include/media/v4l2-dev.h
2029 @@ -33,13 +33,13 @@
2030 */
2031 enum vfl_devnode_type {
2032 VFL_TYPE_GRABBER = 0,
2033 - VFL_TYPE_VBI = 1,
2034 - VFL_TYPE_RADIO = 2,
2035 - VFL_TYPE_SUBDEV = 3,
2036 - VFL_TYPE_SDR = 4,
2037 - VFL_TYPE_TOUCH = 5,
2038 + VFL_TYPE_VBI,
2039 + VFL_TYPE_RADIO,
2040 + VFL_TYPE_SUBDEV,
2041 + VFL_TYPE_SDR,
2042 + VFL_TYPE_TOUCH,
2043 + VFL_TYPE_MAX /* Shall be the last one */
2044 };
2045 -#define VFL_TYPE_MAX VFL_TYPE_TOUCH
2046
2047 /**
2048 * enum vfl_direction - Identifies if a &struct video_device corresponds
2049 diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
2050 index 95ccc1eef558..b619a190ff12 100644
2051 --- a/include/net/bluetooth/hci_core.h
2052 +++ b/include/net/bluetooth/hci_core.h
2053 @@ -895,7 +895,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
2054 u16 conn_timeout);
2055 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
2056 u8 dst_type, u8 sec_level, u16 conn_timeout,
2057 - u8 role);
2058 + u8 role, bdaddr_t *direct_rpa);
2059 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
2060 u8 sec_level, u8 auth_type);
2061 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
2062 diff --git a/include/net/slhc_vj.h b/include/net/slhc_vj.h
2063 index 8716d5942b65..8fcf8908a694 100644
2064 --- a/include/net/slhc_vj.h
2065 +++ b/include/net/slhc_vj.h
2066 @@ -127,6 +127,7 @@ typedef __u32 int32;
2067 */
2068 struct cstate {
2069 byte_t cs_this; /* connection id number (xmit) */
2070 + bool initialized; /* true if initialized */
2071 struct cstate *next; /* next in ring (xmit) */
2072 struct iphdr cs_ip; /* ip/tcp hdr from most recent packet */
2073 struct tcphdr cs_tcp;
2074 diff --git a/kernel/events/core.c b/kernel/events/core.c
2075 index 709a55b9ad97..b32bc0698a2a 100644
2076 --- a/kernel/events/core.c
2077 +++ b/kernel/events/core.c
2078 @@ -4123,6 +4123,9 @@ static void _free_event(struct perf_event *event)
2079 if (event->ctx)
2080 put_ctx(event->ctx);
2081
2082 + if (event->hw.target)
2083 + put_task_struct(event->hw.target);
2084 +
2085 exclusive_event_destroy(event);
2086 module_put(event->pmu->module);
2087
2088 @@ -9488,6 +9491,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
2089 * and we cannot use the ctx information because we need the
2090 * pmu before we get a ctx.
2091 */
2092 + get_task_struct(task);
2093 event->hw.target = task;
2094 }
2095
2096 @@ -9603,6 +9607,8 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
2097 perf_detach_cgroup(event);
2098 if (event->ns)
2099 put_pid_ns(event->ns);
2100 + if (event->hw.target)
2101 + put_task_struct(event->hw.target);
2102 kfree(event);
2103
2104 return ERR_PTR(err);
2105 diff --git a/lib/bitmap.c b/lib/bitmap.c
2106 index 9e498c77ed0e..a42eff7e8c48 100644
2107 --- a/lib/bitmap.c
2108 +++ b/lib/bitmap.c
2109 @@ -607,7 +607,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
2110 /* if no digit is after '-', it's wrong*/
2111 if (at_start && in_range)
2112 return -EINVAL;
2113 - if (!(a <= b) || !(used_size <= group_size))
2114 + if (!(a <= b) || group_size == 0 || !(used_size <= group_size))
2115 return -EINVAL;
2116 if (b >= nmaskbits)
2117 return -ERANGE;
2118 diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
2119 index b3f235baa05d..413367cf569e 100644
2120 --- a/lib/test_bitmap.c
2121 +++ b/lib/test_bitmap.c
2122 @@ -255,6 +255,10 @@ static const struct test_bitmap_parselist parselist_tests[] __initconst = {
2123 {-EINVAL, "-1", NULL, 8, 0},
2124 {-EINVAL, "-0", NULL, 8, 0},
2125 {-EINVAL, "10-1", NULL, 8, 0},
2126 + {-EINVAL, "0-31:", NULL, 8, 0},
2127 + {-EINVAL, "0-31:0", NULL, 8, 0},
2128 + {-EINVAL, "0-31:0/0", NULL, 8, 0},
2129 + {-EINVAL, "0-31:1/0", NULL, 8, 0},
2130 {-EINVAL, "0-31:10/1", NULL, 8, 0},
2131 };
2132
2133 diff --git a/mm/gup.c b/mm/gup.c
2134 index 6afae32571ca..8f3a06408e28 100644
2135 --- a/mm/gup.c
2136 +++ b/mm/gup.c
2137 @@ -1806,9 +1806,12 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
2138 len = (unsigned long) nr_pages << PAGE_SHIFT;
2139 end = start + len;
2140
2141 + if (nr_pages <= 0)
2142 + return 0;
2143 +
2144 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
2145 (void __user *)start, len)))
2146 - return 0;
2147 + return -EFAULT;
2148
2149 if (gup_fast_permitted(start, nr_pages, write)) {
2150 local_irq_disable();
2151 diff --git a/mm/gup_benchmark.c b/mm/gup_benchmark.c
2152 index 5c8e2abeaa15..0f44759486e2 100644
2153 --- a/mm/gup_benchmark.c
2154 +++ b/mm/gup_benchmark.c
2155 @@ -23,7 +23,7 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
2156 struct page **pages;
2157
2158 nr_pages = gup->size / PAGE_SIZE;
2159 - pages = kvmalloc(sizeof(void *) * nr_pages, GFP_KERNEL);
2160 + pages = kvzalloc(sizeof(void *) * nr_pages, GFP_KERNEL);
2161 if (!pages)
2162 return -ENOMEM;
2163
2164 @@ -41,6 +41,8 @@ static int __gup_benchmark_ioctl(unsigned int cmd,
2165 }
2166
2167 nr = get_user_pages_fast(addr, nr, gup->flags & 1, pages + i);
2168 + if (nr <= 0)
2169 + break;
2170 i += nr;
2171 }
2172 end_time = ktime_get();
2173 diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
2174 index a9682534c377..45ff5dc124cc 100644
2175 --- a/net/bluetooth/hci_conn.c
2176 +++ b/net/bluetooth/hci_conn.c
2177 @@ -749,18 +749,31 @@ static bool conn_use_rpa(struct hci_conn *conn)
2178 }
2179
2180 static void hci_req_add_le_create_conn(struct hci_request *req,
2181 - struct hci_conn *conn)
2182 + struct hci_conn *conn,
2183 + bdaddr_t *direct_rpa)
2184 {
2185 struct hci_cp_le_create_conn cp;
2186 struct hci_dev *hdev = conn->hdev;
2187 u8 own_addr_type;
2188
2189 - /* Update random address, but set require_privacy to false so
2190 - * that we never connect with an non-resolvable address.
2191 + /* If direct address was provided we use it instead of current
2192 + * address.
2193 */
2194 - if (hci_update_random_address(req, false, conn_use_rpa(conn),
2195 - &own_addr_type))
2196 - return;
2197 + if (direct_rpa) {
2198 + if (bacmp(&req->hdev->random_addr, direct_rpa))
2199 + hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
2200 + direct_rpa);
2201 +
2202 + /* direct address is always RPA */
2203 + own_addr_type = ADDR_LE_DEV_RANDOM;
2204 + } else {
2205 + /* Update random address, but set require_privacy to false so
2206 + * that we never connect with an non-resolvable address.
2207 + */
2208 + if (hci_update_random_address(req, false, conn_use_rpa(conn),
2209 + &own_addr_type))
2210 + return;
2211 + }
2212
2213 memset(&cp, 0, sizeof(cp));
2214
2215 @@ -825,7 +838,7 @@ static void hci_req_directed_advertising(struct hci_request *req,
2216
2217 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
2218 u8 dst_type, u8 sec_level, u16 conn_timeout,
2219 - u8 role)
2220 + u8 role, bdaddr_t *direct_rpa)
2221 {
2222 struct hci_conn_params *params;
2223 struct hci_conn *conn;
2224 @@ -940,7 +953,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
2225 hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
2226 }
2227
2228 - hci_req_add_le_create_conn(&req, conn);
2229 + hci_req_add_le_create_conn(&req, conn, direct_rpa);
2230
2231 create_conn:
2232 err = hci_req_run(&req, create_le_conn_complete);
2233 diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
2234 index cd3bbb766c24..139707cd9d35 100644
2235 --- a/net/bluetooth/hci_event.c
2236 +++ b/net/bluetooth/hci_event.c
2237 @@ -4648,7 +4648,8 @@ static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
2238 /* This function requires the caller holds hdev->lock */
2239 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
2240 bdaddr_t *addr,
2241 - u8 addr_type, u8 adv_type)
2242 + u8 addr_type, u8 adv_type,
2243 + bdaddr_t *direct_rpa)
2244 {
2245 struct hci_conn *conn;
2246 struct hci_conn_params *params;
2247 @@ -4699,7 +4700,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
2248 }
2249
2250 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
2251 - HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
2252 + HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
2253 + direct_rpa);
2254 if (!IS_ERR(conn)) {
2255 /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
2256 * by higher layer that tried to connect, if no then
2257 @@ -4808,8 +4810,13 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
2258 bdaddr_type = irk->addr_type;
2259 }
2260
2261 - /* Check if we have been requested to connect to this device */
2262 - conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
2263 + /* Check if we have been requested to connect to this device.
2264 + *
2265 + * direct_addr is set only for directed advertising reports (it is NULL
2266 + * for advertising reports) and is already verified to be RPA above.
2267 + */
2268 + conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
2269 + direct_addr);
2270 if (conn && type == LE_ADV_IND) {
2271 /* Store report for later inclusion by
2272 * mgmt_device_connected
2273 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
2274 index fc6615d59165..9b7907ebfa01 100644
2275 --- a/net/bluetooth/l2cap_core.c
2276 +++ b/net/bluetooth/l2cap_core.c
2277 @@ -7156,7 +7156,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
2278 hcon = hci_connect_le(hdev, dst, dst_type,
2279 chan->sec_level,
2280 HCI_LE_CONN_TIMEOUT,
2281 - HCI_ROLE_SLAVE);
2282 + HCI_ROLE_SLAVE, NULL);
2283 else
2284 hcon = hci_connect_le_scan(hdev, dst, dst_type,
2285 chan->sec_level,
2286 diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
2287 index 0901de42ed85..586a008b1642 100644
2288 --- a/net/ipv4/ip_gre.c
2289 +++ b/net/ipv4/ip_gre.c
2290 @@ -778,8 +778,14 @@ static void ipgre_link_update(struct net_device *dev, bool set_mtu)
2291 tunnel->encap.type == TUNNEL_ENCAP_NONE) {
2292 dev->features |= NETIF_F_GSO_SOFTWARE;
2293 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
2294 + } else {
2295 + dev->features &= ~NETIF_F_GSO_SOFTWARE;
2296 + dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
2297 }
2298 dev->features |= NETIF_F_LLTX;
2299 + } else {
2300 + dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
2301 + dev->features &= ~(NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE);
2302 }
2303 }
2304
2305 diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
2306 index 14b67dfacc4b..0fbd3ee26165 100644
2307 --- a/net/l2tp/l2tp_core.c
2308 +++ b/net/l2tp/l2tp_core.c
2309 @@ -335,26 +335,6 @@ int l2tp_session_register(struct l2tp_session *session,
2310 }
2311 EXPORT_SYMBOL_GPL(l2tp_session_register);
2312
2313 -/* Lookup a tunnel by id
2314 - */
2315 -struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id)
2316 -{
2317 - struct l2tp_tunnel *tunnel;
2318 - struct l2tp_net *pn = l2tp_pernet(net);
2319 -
2320 - rcu_read_lock_bh();
2321 - list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
2322 - if (tunnel->tunnel_id == tunnel_id) {
2323 - rcu_read_unlock_bh();
2324 - return tunnel;
2325 - }
2326 - }
2327 - rcu_read_unlock_bh();
2328 -
2329 - return NULL;
2330 -}
2331 -EXPORT_SYMBOL_GPL(l2tp_tunnel_find);
2332 -
2333 struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth)
2334 {
2335 struct l2tp_net *pn = l2tp_pernet(net);
2336 @@ -1436,74 +1416,11 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
2337 {
2338 struct l2tp_tunnel *tunnel = NULL;
2339 int err;
2340 - struct socket *sock = NULL;
2341 - struct sock *sk = NULL;
2342 - struct l2tp_net *pn;
2343 enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
2344
2345 - /* Get the tunnel socket from the fd, which was opened by
2346 - * the userspace L2TP daemon. If not specified, create a
2347 - * kernel socket.
2348 - */
2349 - if (fd < 0) {
2350 - err = l2tp_tunnel_sock_create(net, tunnel_id, peer_tunnel_id,
2351 - cfg, &sock);
2352 - if (err < 0)
2353 - goto err;
2354 - } else {
2355 - sock = sockfd_lookup(fd, &err);
2356 - if (!sock) {
2357 - pr_err("tunl %u: sockfd_lookup(fd=%d) returned %d\n",
2358 - tunnel_id, fd, err);
2359 - err = -EBADF;
2360 - goto err;
2361 - }
2362 -
2363 - /* Reject namespace mismatches */
2364 - if (!net_eq(sock_net(sock->sk), net)) {
2365 - pr_err("tunl %u: netns mismatch\n", tunnel_id);
2366 - err = -EINVAL;
2367 - goto err;
2368 - }
2369 - }
2370 -
2371 - sk = sock->sk;
2372 -
2373 if (cfg != NULL)
2374 encap = cfg->encap;
2375
2376 - /* Quick sanity checks */
2377 - err = -EPROTONOSUPPORT;
2378 - if (sk->sk_type != SOCK_DGRAM) {
2379 - pr_debug("tunl %hu: fd %d wrong socket type\n",
2380 - tunnel_id, fd);
2381 - goto err;
2382 - }
2383 - switch (encap) {
2384 - case L2TP_ENCAPTYPE_UDP:
2385 - if (sk->sk_protocol != IPPROTO_UDP) {
2386 - pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
2387 - tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
2388 - goto err;
2389 - }
2390 - break;
2391 - case L2TP_ENCAPTYPE_IP:
2392 - if (sk->sk_protocol != IPPROTO_L2TP) {
2393 - pr_err("tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
2394 - tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
2395 - goto err;
2396 - }
2397 - break;
2398 - }
2399 -
2400 - /* Check if this socket has already been prepped */
2401 - tunnel = l2tp_tunnel(sk);
2402 - if (tunnel != NULL) {
2403 - /* This socket has already been prepped */
2404 - err = -EBUSY;
2405 - goto err;
2406 - }
2407 -
2408 tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL);
2409 if (tunnel == NULL) {
2410 err = -ENOMEM;
2411 @@ -1520,72 +1437,126 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
2412 rwlock_init(&tunnel->hlist_lock);
2413 tunnel->acpt_newsess = true;
2414
2415 - /* The net we belong to */
2416 - tunnel->l2tp_net = net;
2417 - pn = l2tp_pernet(net);
2418 -
2419 if (cfg != NULL)
2420 tunnel->debug = cfg->debug;
2421
2422 - /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
2423 tunnel->encap = encap;
2424 - if (encap == L2TP_ENCAPTYPE_UDP) {
2425 - struct udp_tunnel_sock_cfg udp_cfg = { };
2426 -
2427 - udp_cfg.sk_user_data = tunnel;
2428 - udp_cfg.encap_type = UDP_ENCAP_L2TPINUDP;
2429 - udp_cfg.encap_rcv = l2tp_udp_encap_recv;
2430 - udp_cfg.encap_destroy = l2tp_udp_encap_destroy;
2431 -
2432 - setup_udp_tunnel_sock(net, sock, &udp_cfg);
2433 - } else {
2434 - sk->sk_user_data = tunnel;
2435 - }
2436
2437 - /* Bump the reference count. The tunnel context is deleted
2438 - * only when this drops to zero. A reference is also held on
2439 - * the tunnel socket to ensure that it is not released while
2440 - * the tunnel is extant. Must be done before sk_destruct is
2441 - * set.
2442 - */
2443 refcount_set(&tunnel->ref_count, 1);
2444 - sock_hold(sk);
2445 - tunnel->sock = sk;
2446 tunnel->fd = fd;
2447
2448 - /* Hook on the tunnel socket destructor so that we can cleanup
2449 - * if the tunnel socket goes away.
2450 - */
2451 - tunnel->old_sk_destruct = sk->sk_destruct;
2452 - sk->sk_destruct = &l2tp_tunnel_destruct;
2453 - lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock");
2454 -
2455 - sk->sk_allocation = GFP_ATOMIC;
2456 -
2457 /* Init delete workqueue struct */
2458 INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work);
2459
2460 - /* Add tunnel to our list */
2461 INIT_LIST_HEAD(&tunnel->list);
2462 - spin_lock_bh(&pn->l2tp_tunnel_list_lock);
2463 - list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
2464 - spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
2465
2466 err = 0;
2467 err:
2468 if (tunnelp)
2469 *tunnelp = tunnel;
2470
2471 - /* If tunnel's socket was created by the kernel, it doesn't
2472 - * have a file.
2473 - */
2474 - if (sock && sock->file)
2475 - sockfd_put(sock);
2476 -
2477 return err;
2478 }
2479 EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
2480
2481 +static int l2tp_validate_socket(const struct sock *sk, const struct net *net,
2482 + enum l2tp_encap_type encap)
2483 +{
2484 + if (!net_eq(sock_net(sk), net))
2485 + return -EINVAL;
2486 +
2487 + if (sk->sk_type != SOCK_DGRAM)
2488 + return -EPROTONOSUPPORT;
2489 +
2490 + if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) ||
2491 + (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP))
2492 + return -EPROTONOSUPPORT;
2493 +
2494 + if (sk->sk_user_data)
2495 + return -EBUSY;
2496 +
2497 + return 0;
2498 +}
2499 +
2500 +int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
2501 + struct l2tp_tunnel_cfg *cfg)
2502 +{
2503 + struct l2tp_tunnel *tunnel_walk;
2504 + struct l2tp_net *pn;
2505 + struct socket *sock;
2506 + struct sock *sk;
2507 + int ret;
2508 +
2509 + if (tunnel->fd < 0) {
2510 + ret = l2tp_tunnel_sock_create(net, tunnel->tunnel_id,
2511 + tunnel->peer_tunnel_id, cfg,
2512 + &sock);
2513 + if (ret < 0)
2514 + goto err;
2515 + } else {
2516 + sock = sockfd_lookup(tunnel->fd, &ret);
2517 + if (!sock)
2518 + goto err;
2519 +
2520 + ret = l2tp_validate_socket(sock->sk, net, tunnel->encap);
2521 + if (ret < 0)
2522 + goto err_sock;
2523 + }
2524 +
2525 + sk = sock->sk;
2526 +
2527 + sock_hold(sk);
2528 + tunnel->sock = sk;
2529 + tunnel->l2tp_net = net;
2530 +
2531 + pn = l2tp_pernet(net);
2532 +
2533 + spin_lock_bh(&pn->l2tp_tunnel_list_lock);
2534 + list_for_each_entry(tunnel_walk, &pn->l2tp_tunnel_list, list) {
2535 + if (tunnel_walk->tunnel_id == tunnel->tunnel_id) {
2536 + spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
2537 +
2538 + ret = -EEXIST;
2539 + goto err_sock;
2540 + }
2541 + }
2542 + list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
2543 + spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
2544 +
2545 + if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
2546 + struct udp_tunnel_sock_cfg udp_cfg = {
2547 + .sk_user_data = tunnel,
2548 + .encap_type = UDP_ENCAP_L2TPINUDP,
2549 + .encap_rcv = l2tp_udp_encap_recv,
2550 + .encap_destroy = l2tp_udp_encap_destroy,
2551 + };
2552 +
2553 + setup_udp_tunnel_sock(net, sock, &udp_cfg);
2554 + } else {
2555 + sk->sk_user_data = tunnel;
2556 + }
2557 +
2558 + tunnel->old_sk_destruct = sk->sk_destruct;
2559 + sk->sk_destruct = &l2tp_tunnel_destruct;
2560 + lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class,
2561 + "l2tp_sock");
2562 + sk->sk_allocation = GFP_ATOMIC;
2563 +
2564 + if (tunnel->fd >= 0)
2565 + sockfd_put(sock);
2566 +
2567 + return 0;
2568 +
2569 +err_sock:
2570 + if (tunnel->fd < 0)
2571 + sock_release(sock);
2572 + else
2573 + sockfd_put(sock);
2574 +err:
2575 + return ret;
2576 +}
2577 +EXPORT_SYMBOL_GPL(l2tp_tunnel_register);
2578 +
2579 /* This function is used by the netlink TUNNEL_DELETE command.
2580 */
2581 void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
2582 diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
2583 index 2718d0b284d0..ba33cbec71eb 100644
2584 --- a/net/l2tp/l2tp_core.h
2585 +++ b/net/l2tp/l2tp_core.h
2586 @@ -220,12 +220,14 @@ struct l2tp_session *l2tp_session_get(const struct net *net,
2587 struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth);
2588 struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net,
2589 const char *ifname);
2590 -struct l2tp_tunnel *l2tp_tunnel_find(const struct net *net, u32 tunnel_id);
2591 struct l2tp_tunnel *l2tp_tunnel_find_nth(const struct net *net, int nth);
2592
2593 int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id,
2594 u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg,
2595 struct l2tp_tunnel **tunnelp);
2596 +int l2tp_tunnel_register(struct l2tp_tunnel *tunnel, struct net *net,
2597 + struct l2tp_tunnel_cfg *cfg);
2598 +
2599 void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
2600 void l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
2601 struct l2tp_session *l2tp_session_create(int priv_size,
2602 diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
2603 index e7ea9c4b89ff..b05dbd9ffcb2 100644
2604 --- a/net/l2tp/l2tp_netlink.c
2605 +++ b/net/l2tp/l2tp_netlink.c
2606 @@ -236,12 +236,6 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
2607 if (info->attrs[L2TP_ATTR_DEBUG])
2608 cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
2609
2610 - tunnel = l2tp_tunnel_find(net, tunnel_id);
2611 - if (tunnel != NULL) {
2612 - ret = -EEXIST;
2613 - goto out;
2614 - }
2615 -
2616 ret = -EINVAL;
2617 switch (cfg.encap) {
2618 case L2TP_ENCAPTYPE_UDP:
2619 @@ -251,9 +245,19 @@ static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info
2620 break;
2621 }
2622
2623 - if (ret >= 0)
2624 - ret = l2tp_tunnel_notify(&l2tp_nl_family, info,
2625 - tunnel, L2TP_CMD_TUNNEL_CREATE);
2626 + if (ret < 0)
2627 + goto out;
2628 +
2629 + l2tp_tunnel_inc_refcount(tunnel);
2630 + ret = l2tp_tunnel_register(tunnel, net, &cfg);
2631 + if (ret < 0) {
2632 + kfree(tunnel);
2633 + goto out;
2634 + }
2635 + ret = l2tp_tunnel_notify(&l2tp_nl_family, info, tunnel,
2636 + L2TP_CMD_TUNNEL_CREATE);
2637 + l2tp_tunnel_dec_refcount(tunnel);
2638 +
2639 out:
2640 return ret;
2641 }
2642 diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
2643 index 3b02f24ea9ec..3d7887cc599b 100644
2644 --- a/net/l2tp/l2tp_ppp.c
2645 +++ b/net/l2tp/l2tp_ppp.c
2646 @@ -698,6 +698,15 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
2647 error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel);
2648 if (error < 0)
2649 goto end;
2650 +
2651 + l2tp_tunnel_inc_refcount(tunnel);
2652 + error = l2tp_tunnel_register(tunnel, sock_net(sk),
2653 + &tcfg);
2654 + if (error < 0) {
2655 + kfree(tunnel);
2656 + goto end;
2657 + }
2658 + drop_tunnel = true;
2659 }
2660 } else {
2661 /* Error if we can't find the tunnel */
2662 diff --git a/net/rds/send.c b/net/rds/send.c
2663 index b1b0022b8370..85734e5a018e 100644
2664 --- a/net/rds/send.c
2665 +++ b/net/rds/send.c
2666 @@ -1,5 +1,5 @@
2667 /*
2668 - * Copyright (c) 2006 Oracle. All rights reserved.
2669 + * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved.
2670 *
2671 * This software is available to you under a choice of one of two
2672 * licenses. You may choose to be licensed under the terms of the GNU
2673 @@ -997,10 +997,15 @@ static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
2674 if (conn->c_npaths == 0 && hash != 0) {
2675 rds_send_ping(conn, 0);
2676
2677 - if (conn->c_npaths == 0) {
2678 - wait_event_interruptible(conn->c_hs_waitq,
2679 - (conn->c_npaths != 0));
2680 - }
2681 + /* The underlying connection is not up yet. Need to wait
2682 + * until it is up to be sure that the non-zero c_path can be
2683 + * used. But if we are interrupted, we have to use the zero
2684 + * c_path in case the connection ends up being non-MP capable.
2685 + */
2686 + if (conn->c_npaths == 0)
2687 + if (wait_event_interruptible(conn->c_hs_waitq,
2688 + conn->c_npaths != 0))
2689 + hash = 0;
2690 if (conn->c_npaths == 1)
2691 hash = 0;
2692 }
2693 diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
2694 index 12649c9fedab..8654494b4d0a 100644
2695 --- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
2696 +++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
2697 @@ -237,9 +237,6 @@ make_checksum_hmac_md5(struct krb5_ctx *kctx, char *header, int hdrlen,
2698
2699 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
2700
2701 - err = crypto_ahash_init(req);
2702 - if (err)
2703 - goto out;
2704 err = crypto_ahash_setkey(hmac_md5, cksumkey, kctx->gk5e->keylength);
2705 if (err)
2706 goto out;
2707 diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
2708 index a9428daa69f3..b28c55447e63 100644
2709 --- a/security/apparmor/apparmorfs.c
2710 +++ b/security/apparmor/apparmorfs.c
2711 @@ -1189,9 +1189,7 @@ static int seq_ns_level_show(struct seq_file *seq, void *v)
2712 static int seq_ns_name_show(struct seq_file *seq, void *v)
2713 {
2714 struct aa_label *label = begin_current_label_crit_section();
2715 -
2716 - seq_printf(seq, "%s\n", aa_ns_name(labels_ns(label),
2717 - labels_ns(label), true));
2718 + seq_printf(seq, "%s\n", labels_ns(label)->base.name);
2719 end_current_label_crit_section(label);
2720
2721 return 0;
2722 diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
2723 index 4ac095118717..2ebc00a579fd 100644
2724 --- a/security/apparmor/include/audit.h
2725 +++ b/security/apparmor/include/audit.h
2726 @@ -126,6 +126,10 @@ struct apparmor_audit_data {
2727 const char *target;
2728 kuid_t ouid;
2729 } fs;
2730 + struct {
2731 + int rlim;
2732 + unsigned long max;
2733 + } rlim;
2734 int signal;
2735 };
2736 };
2737 @@ -134,10 +138,6 @@ struct apparmor_audit_data {
2738 const char *ns;
2739 long pos;
2740 } iface;
2741 - struct {
2742 - int rlim;
2743 - unsigned long max;
2744 - } rlim;
2745 struct {
2746 const char *src_name;
2747 const char *type;
2748 diff --git a/security/apparmor/include/sig_names.h b/security/apparmor/include/sig_names.h
2749 index 92e62fe95292..5ca47c50dfa7 100644
2750 --- a/security/apparmor/include/sig_names.h
2751 +++ b/security/apparmor/include/sig_names.h
2752 @@ -2,6 +2,8 @@
2753
2754 #define SIGUNKNOWN 0
2755 #define MAXMAPPED_SIG 35
2756 +#define MAXMAPPED_SIGNAME (MAXMAPPED_SIG + 1)
2757 +
2758 /* provide a mapping of arch signal to internal signal # for mediation
2759 * those that are always an alias SIGCLD for SIGCLHD and SIGPOLL for SIGIO
2760 * map to the same entry those that may/or may not get a separate entry
2761 @@ -56,7 +58,7 @@ static const int sig_map[MAXMAPPED_SIG] = {
2762 };
2763
2764 /* this table is ordered post sig_map[sig] mapping */
2765 -static const char *const sig_names[MAXMAPPED_SIG + 1] = {
2766 +static const char *const sig_names[MAXMAPPED_SIGNAME] = {
2767 "unknown",
2768 "hup",
2769 "int",
2770 diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
2771 index b40678f3c1d5..586facd35f7c 100644
2772 --- a/security/apparmor/ipc.c
2773 +++ b/security/apparmor/ipc.c
2774 @@ -174,7 +174,7 @@ static void audit_signal_cb(struct audit_buffer *ab, void *va)
2775 audit_signal_mask(ab, aad(sa)->denied);
2776 }
2777 }
2778 - if (aad(sa)->signal < MAXMAPPED_SIG)
2779 + if (aad(sa)->signal < MAXMAPPED_SIGNAME)
2780 audit_log_format(ab, " signal=%s", sig_names[aad(sa)->signal]);
2781 else
2782 audit_log_format(ab, " signal=rtmin+%d",
2783 diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
2784 index aa1593ce551d..f9157aed1289 100644
2785 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
2786 +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
2787 @@ -1378,6 +1378,7 @@ static int intel_pt_overflow(struct intel_pt_decoder *decoder)
2788 intel_pt_clear_tx_flags(decoder);
2789 decoder->have_tma = false;
2790 decoder->cbr = 0;
2791 + decoder->timestamp_insn_cnt = 0;
2792 decoder->pkt_state = INTEL_PT_STATE_ERR_RESYNC;
2793 decoder->overflow = true;
2794 return -EOVERFLOW;
2795 @@ -1616,6 +1617,7 @@ static int intel_pt_walk_fup_tip(struct intel_pt_decoder *decoder)
2796 case INTEL_PT_PWRX:
2797 intel_pt_log("ERROR: Missing TIP after FUP\n");
2798 decoder->pkt_state = INTEL_PT_STATE_ERR3;
2799 + decoder->pkt_step = 0;
2800 return -ENOENT;
2801
2802 case INTEL_PT_OVF:
2803 @@ -2390,14 +2392,6 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
2804 return &decoder->state;
2805 }
2806
2807 -static bool intel_pt_at_psb(unsigned char *buf, size_t len)
2808 -{
2809 - if (len < INTEL_PT_PSB_LEN)
2810 - return false;
2811 - return memmem(buf, INTEL_PT_PSB_LEN, INTEL_PT_PSB_STR,
2812 - INTEL_PT_PSB_LEN);
2813 -}
2814 -
2815 /**
2816 * intel_pt_next_psb - move buffer pointer to the start of the next PSB packet.
2817 * @buf: pointer to buffer pointer
2818 @@ -2486,6 +2480,7 @@ static unsigned char *intel_pt_last_psb(unsigned char *buf, size_t len)
2819 * @buf: buffer
2820 * @len: size of buffer
2821 * @tsc: TSC value returned
2822 + * @rem: returns remaining size when TSC is found
2823 *
2824 * Find a TSC packet in @buf and return the TSC value. This function assumes
2825 * that @buf starts at a PSB and that PSB+ will contain TSC and so stops if a
2826 @@ -2493,7 +2488,8 @@ static unsigned char *intel_pt_last_psb(unsigned char *buf, size_t len)
2827 *
2828 * Return: %true if TSC is found, false otherwise.
2829 */
2830 -static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc)
2831 +static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc,
2832 + size_t *rem)
2833 {
2834 struct intel_pt_pkt packet;
2835 int ret;
2836 @@ -2504,6 +2500,7 @@ static bool intel_pt_next_tsc(unsigned char *buf, size_t len, uint64_t *tsc)
2837 return false;
2838 if (packet.type == INTEL_PT_TSC) {
2839 *tsc = packet.payload;
2840 + *rem = len;
2841 return true;
2842 }
2843 if (packet.type == INTEL_PT_PSBEND)
2844 @@ -2554,6 +2551,8 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
2845 * @len_a: size of first buffer
2846 * @buf_b: second buffer
2847 * @len_b: size of second buffer
2848 + * @consecutive: returns true if there is data in buf_b that is consecutive
2849 + * to buf_a
2850 *
2851 * If the trace contains TSC we can look at the last TSC of @buf_a and the
2852 * first TSC of @buf_b in order to determine if the buffers overlap, and then
2853 @@ -2566,33 +2565,41 @@ static int intel_pt_tsc_cmp(uint64_t tsc1, uint64_t tsc2)
2854 static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
2855 size_t len_a,
2856 unsigned char *buf_b,
2857 - size_t len_b)
2858 + size_t len_b, bool *consecutive)
2859 {
2860 uint64_t tsc_a, tsc_b;
2861 unsigned char *p;
2862 - size_t len;
2863 + size_t len, rem_a, rem_b;
2864
2865 p = intel_pt_last_psb(buf_a, len_a);
2866 if (!p)
2867 return buf_b; /* No PSB in buf_a => no overlap */
2868
2869 len = len_a - (p - buf_a);
2870 - if (!intel_pt_next_tsc(p, len, &tsc_a)) {
2871 + if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a)) {
2872 /* The last PSB+ in buf_a is incomplete, so go back one more */
2873 len_a -= len;
2874 p = intel_pt_last_psb(buf_a, len_a);
2875 if (!p)
2876 return buf_b; /* No full PSB+ => assume no overlap */
2877 len = len_a - (p - buf_a);
2878 - if (!intel_pt_next_tsc(p, len, &tsc_a))
2879 + if (!intel_pt_next_tsc(p, len, &tsc_a, &rem_a))
2880 return buf_b; /* No TSC in buf_a => assume no overlap */
2881 }
2882
2883 while (1) {
2884 /* Ignore PSB+ with no TSC */
2885 - if (intel_pt_next_tsc(buf_b, len_b, &tsc_b) &&
2886 - intel_pt_tsc_cmp(tsc_a, tsc_b) < 0)
2887 - return buf_b; /* tsc_a < tsc_b => no overlap */
2888 + if (intel_pt_next_tsc(buf_b, len_b, &tsc_b, &rem_b)) {
2889 + int cmp = intel_pt_tsc_cmp(tsc_a, tsc_b);
2890 +
2891 + /* Same TSC, so buffers are consecutive */
2892 + if (!cmp && rem_b >= rem_a) {
2893 + *consecutive = true;
2894 + return buf_b + len_b - (rem_b - rem_a);
2895 + }
2896 + if (cmp < 0)
2897 + return buf_b; /* tsc_a < tsc_b => no overlap */
2898 + }
2899
2900 if (!intel_pt_step_psb(&buf_b, &len_b))
2901 return buf_b + len_b; /* No PSB in buf_b => no data */
2902 @@ -2606,6 +2613,8 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
2903 * @buf_b: second buffer
2904 * @len_b: size of second buffer
2905 * @have_tsc: can use TSC packets to detect overlap
2906 + * @consecutive: returns true if there is data in buf_b that is consecutive
2907 + * to buf_a
2908 *
2909 * When trace samples or snapshots are recorded there is the possibility that
2910 * the data overlaps. Note that, for the purposes of decoding, data is only
2911 @@ -2616,7 +2625,7 @@ static unsigned char *intel_pt_find_overlap_tsc(unsigned char *buf_a,
2912 */
2913 unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
2914 unsigned char *buf_b, size_t len_b,
2915 - bool have_tsc)
2916 + bool have_tsc, bool *consecutive)
2917 {
2918 unsigned char *found;
2919
2920 @@ -2628,7 +2637,8 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
2921 return buf_b; /* No overlap */
2922
2923 if (have_tsc) {
2924 - found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b);
2925 + found = intel_pt_find_overlap_tsc(buf_a, len_a, buf_b, len_b,
2926 + consecutive);
2927 if (found)
2928 return found;
2929 }
2930 @@ -2643,28 +2653,16 @@ unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
2931 }
2932
2933 /* Now len_b >= len_a */
2934 - if (len_b > len_a) {
2935 - /* The leftover buffer 'b' must start at a PSB */
2936 - while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) {
2937 - if (!intel_pt_step_psb(&buf_a, &len_a))
2938 - return buf_b; /* No overlap */
2939 - }
2940 - }
2941 -
2942 while (1) {
2943 /* Potential overlap so check the bytes */
2944 found = memmem(buf_a, len_a, buf_b, len_a);
2945 - if (found)
2946 + if (found) {
2947 + *consecutive = true;
2948 return buf_b + len_a;
2949 + }
2950
2951 /* Try again at next PSB in buffer 'a' */
2952 if (!intel_pt_step_psb(&buf_a, &len_a))
2953 return buf_b; /* No overlap */
2954 -
2955 - /* The leftover buffer 'b' must start at a PSB */
2956 - while (!intel_pt_at_psb(buf_b + len_a, len_b - len_a)) {
2957 - if (!intel_pt_step_psb(&buf_a, &len_a))
2958 - return buf_b; /* No overlap */
2959 - }
2960 }
2961 }
2962 diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
2963 index 921b22e8ca0e..fc1752d50019 100644
2964 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
2965 +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.h
2966 @@ -117,7 +117,7 @@ const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder);
2967
2968 unsigned char *intel_pt_find_overlap(unsigned char *buf_a, size_t len_a,
2969 unsigned char *buf_b, size_t len_b,
2970 - bool have_tsc);
2971 + bool have_tsc, bool *consecutive);
2972
2973 int intel_pt__strerror(int code, char *buf, size_t buflen);
2974
2975 diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
2976 index 3773d9c54f45..0979a6e8b2b7 100644
2977 --- a/tools/perf/util/intel-pt.c
2978 +++ b/tools/perf/util/intel-pt.c
2979 @@ -143,6 +143,7 @@ struct intel_pt_queue {
2980 bool stop;
2981 bool step_through_buffers;
2982 bool use_buffer_pid_tid;
2983 + bool sync_switch;
2984 pid_t pid, tid;
2985 int cpu;
2986 int switch_state;
2987 @@ -207,14 +208,17 @@ static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
2988 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
2989 struct auxtrace_buffer *b)
2990 {
2991 + bool consecutive = false;
2992 void *start;
2993
2994 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
2995 - pt->have_tsc);
2996 + pt->have_tsc, &consecutive);
2997 if (!start)
2998 return -EINVAL;
2999 b->use_size = b->data + b->size - start;
3000 b->use_data = start;
3001 + if (b->use_size && consecutive)
3002 + b->consecutive = true;
3003 return 0;
3004 }
3005
3006 @@ -960,10 +964,12 @@ static int intel_pt_setup_queue(struct intel_pt *pt,
3007 if (pt->timeless_decoding || !pt->have_sched_switch)
3008 ptq->use_buffer_pid_tid = true;
3009 }
3010 +
3011 + ptq->sync_switch = pt->sync_switch;
3012 }
3013
3014 if (!ptq->on_heap &&
3015 - (!pt->sync_switch ||
3016 + (!ptq->sync_switch ||
3017 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
3018 const struct intel_pt_state *state;
3019 int ret;
3020 @@ -1546,7 +1552,7 @@ static int intel_pt_sample(struct intel_pt_queue *ptq)
3021 if (pt->synth_opts.last_branch)
3022 intel_pt_update_last_branch_rb(ptq);
3023
3024 - if (!pt->sync_switch)
3025 + if (!ptq->sync_switch)
3026 return 0;
3027
3028 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
3029 @@ -1627,6 +1633,21 @@ static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
3030 return switch_ip;
3031 }
3032
3033 +static void intel_pt_enable_sync_switch(struct intel_pt *pt)
3034 +{
3035 + unsigned int i;
3036 +
3037 + pt->sync_switch = true;
3038 +
3039 + for (i = 0; i < pt->queues.nr_queues; i++) {
3040 + struct auxtrace_queue *queue = &pt->queues.queue_array[i];
3041 + struct intel_pt_queue *ptq = queue->priv;
3042 +
3043 + if (ptq)
3044 + ptq->sync_switch = true;
3045 + }
3046 +}
3047 +
3048 static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
3049 {
3050 const struct intel_pt_state *state = ptq->state;
3051 @@ -1643,7 +1664,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
3052 if (pt->switch_ip) {
3053 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
3054 pt->switch_ip, pt->ptss_ip);
3055 - pt->sync_switch = true;
3056 + intel_pt_enable_sync_switch(pt);
3057 }
3058 }
3059 }
3060 @@ -1659,9 +1680,9 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
3061 if (state->err) {
3062 if (state->err == INTEL_PT_ERR_NODATA)
3063 return 1;
3064 - if (pt->sync_switch &&
3065 + if (ptq->sync_switch &&
3066 state->from_ip >= pt->kernel_start) {
3067 - pt->sync_switch = false;
3068 + ptq->sync_switch = false;
3069 intel_pt_next_tid(pt, ptq);
3070 }
3071 if (pt->synth_opts.errors) {
3072 @@ -1687,7 +1708,7 @@ static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
3073 state->timestamp, state->est_timestamp);
3074 ptq->timestamp = state->est_timestamp;
3075 /* Use estimated TSC in unknown switch state */
3076 - } else if (pt->sync_switch &&
3077 + } else if (ptq->sync_switch &&
3078 ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
3079 intel_pt_is_switch_ip(ptq, state->to_ip) &&
3080 ptq->next_tid == -1) {
3081 @@ -1834,7 +1855,7 @@ static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
3082 return 1;
3083
3084 ptq = intel_pt_cpu_to_ptq(pt, cpu);
3085 - if (!ptq)
3086 + if (!ptq || !ptq->sync_switch)
3087 return 1;
3088
3089 switch (ptq->switch_state) {