Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.14/0175-4.14.76-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (hide annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 7 months ago) by niro
File size: 65675 byte(s)
-added up to patches-4.14.79
1 niro 3238 diff --git a/Makefile b/Makefile
2     index 7fc373c011c0..332dd011b3b9 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 14
9     -SUBLEVEL = 75
10     +SUBLEVEL = 76
11     EXTRAVERSION =
12     NAME = Petit Gorille
13    
14     diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
15     index 4674541eba3f..8ce6e7235915 100644
16     --- a/arch/arc/kernel/process.c
17     +++ b/arch/arc/kernel/process.c
18     @@ -241,6 +241,26 @@ int copy_thread(unsigned long clone_flags,
19     task_thread_info(current)->thr_ptr;
20     }
21    
22     +
23     + /*
24     + * setup usermode thread pointer #1:
25     + * when child is picked by scheduler, __switch_to() uses @c_callee to
26     + * populate usermode callee regs: this works (despite being in a kernel
27     + * function) since special return path for child @ret_from_fork()
28     + * ensures those regs are not clobbered all the way to RTIE to usermode
29     + */
30     + c_callee->r25 = task_thread_info(p)->thr_ptr;
31     +
32     +#ifdef CONFIG_ARC_CURR_IN_REG
33     + /*
34     + * setup usermode thread pointer #2:
35     + * however for this special use of r25 in kernel, __switch_to() sets
36     + * r25 for kernel needs and only in the final return path is usermode
37     + * r25 setup, from pt_regs->user_r25. So set that up as well
38     + */
39     + c_regs->user_r25 = c_callee->r25;
40     +#endif
41     +
42     return 0;
43     }
44    
45     diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
46     index bbcdf929be54..a5e919e34c42 100644
47     --- a/arch/powerpc/include/asm/setup.h
48     +++ b/arch/powerpc/include/asm/setup.h
49     @@ -9,6 +9,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
50    
51     extern unsigned int rtas_data;
52     extern unsigned long long memory_limit;
53     +extern bool init_mem_is_free;
54     extern unsigned long klimit;
55     extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
56    
57     diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
58     index 096d4e4d31e6..882c750dc519 100644
59     --- a/arch/powerpc/lib/code-patching.c
60     +++ b/arch/powerpc/lib/code-patching.c
61     @@ -22,20 +22,28 @@
62     #include <asm/page.h>
63     #include <asm/code-patching.h>
64     #include <asm/setup.h>
65     +#include <asm/sections.h>
66    
67     -static int __patch_instruction(unsigned int *addr, unsigned int instr)
68     +static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
69     + unsigned int *patch_addr)
70     {
71     int err;
72    
73     - __put_user_size(instr, addr, 4, err);
74     + __put_user_size(instr, patch_addr, 4, err);
75     if (err)
76     return err;
77    
78     - asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" :: "r" (addr));
79     + asm ("dcbst 0, %0; sync; icbi 0,%1; sync; isync" :: "r" (patch_addr),
80     + "r" (exec_addr));
81    
82     return 0;
83     }
84    
85     +static int raw_patch_instruction(unsigned int *addr, unsigned int instr)
86     +{
87     + return __patch_instruction(addr, instr, addr);
88     +}
89     +
90     #ifdef CONFIG_STRICT_KERNEL_RWX
91     static DEFINE_PER_CPU(struct vm_struct *, text_poke_area);
92    
93     @@ -135,10 +143,10 @@ static inline int unmap_patch_area(unsigned long addr)
94     return 0;
95     }
96    
97     -int patch_instruction(unsigned int *addr, unsigned int instr)
98     +static int do_patch_instruction(unsigned int *addr, unsigned int instr)
99     {
100     int err;
101     - unsigned int *dest = NULL;
102     + unsigned int *patch_addr = NULL;
103     unsigned long flags;
104     unsigned long text_poke_addr;
105     unsigned long kaddr = (unsigned long)addr;
106     @@ -149,7 +157,7 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
107     * to allow patching. We just do the plain old patching
108     */
109     if (!this_cpu_read(*PTRRELOC(&text_poke_area)))
110     - return __patch_instruction(addr, instr);
111     + return raw_patch_instruction(addr, instr);
112    
113     local_irq_save(flags);
114    
115     @@ -159,17 +167,10 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
116     goto out;
117     }
118    
119     - dest = (unsigned int *)(text_poke_addr) +
120     + patch_addr = (unsigned int *)(text_poke_addr) +
121     ((kaddr & ~PAGE_MASK) / sizeof(unsigned int));
122    
123     - /*
124     - * We use __put_user_size so that we can handle faults while
125     - * writing to dest and return err to handle faults gracefully
126     - */
127     - __put_user_size(instr, dest, 4, err);
128     - if (!err)
129     - asm ("dcbst 0, %0; sync; icbi 0,%0; icbi 0,%1; sync; isync"
130     - ::"r" (dest), "r"(addr));
131     + __patch_instruction(addr, instr, patch_addr);
132    
133     err = unmap_patch_area(text_poke_addr);
134     if (err)
135     @@ -182,12 +183,22 @@ out:
136     }
137     #else /* !CONFIG_STRICT_KERNEL_RWX */
138    
139     -int patch_instruction(unsigned int *addr, unsigned int instr)
140     +static int do_patch_instruction(unsigned int *addr, unsigned int instr)
141     {
142     - return __patch_instruction(addr, instr);
143     + return raw_patch_instruction(addr, instr);
144     }
145    
146     #endif /* CONFIG_STRICT_KERNEL_RWX */
147     +
148     +int patch_instruction(unsigned int *addr, unsigned int instr)
149     +{
150     + /* Make sure we aren't patching a freed init section */
151     + if (init_mem_is_free && init_section_contains(addr, 4)) {
152     + pr_debug("Skipping init section patching addr: 0x%px\n", addr);
153     + return 0;
154     + }
155     + return do_patch_instruction(addr, instr);
156     +}
157     NOKPROBE_SYMBOL(patch_instruction);
158    
159     int patch_branch(unsigned int *addr, unsigned long target, int flags)
160     diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
161     index 9c2f83331e5b..30bf13b72e5e 100644
162     --- a/arch/powerpc/mm/mem.c
163     +++ b/arch/powerpc/mm/mem.c
164     @@ -63,6 +63,7 @@
165     #endif
166    
167     unsigned long long memory_limit;
168     +bool init_mem_is_free;
169    
170     #ifdef CONFIG_HIGHMEM
171     pte_t *kmap_pte;
172     @@ -405,6 +406,7 @@ void free_initmem(void)
173     {
174     ppc_md.progress = ppc_printk_progress;
175     mark_initmem_nx();
176     + init_mem_is_free = true;
177     free_initmem_default(POISON_FREE_INITMEM);
178     }
179    
180     diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
181     index b545bf9d2328..0a550dc5c525 100644
182     --- a/arch/x86/entry/vdso/Makefile
183     +++ b/arch/x86/entry/vdso/Makefile
184     @@ -74,7 +74,13 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
185     CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
186     $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
187     -fno-omit-frame-pointer -foptimize-sibling-calls \
188     - -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS)
189     + -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
190     +
191     +ifdef CONFIG_RETPOLINE
192     +ifneq ($(RETPOLINE_VDSO_CFLAGS),)
193     + CFL += $(RETPOLINE_VDSO_CFLAGS)
194     +endif
195     +endif
196    
197     $(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
198    
199     @@ -153,7 +159,13 @@ KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
200     KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
201     KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
202     KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
203     -KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
204     +
205     +ifdef CONFIG_RETPOLINE
206     +ifneq ($(RETPOLINE_VDSO_CFLAGS),)
207     + KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
208     +endif
209     +endif
210     +
211     $(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
212    
213     $(obj)/vdso32.so.dbg: FORCE \
214     diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
215     index fa8dbfcf7ed3..9c35dc0a9d64 100644
216     --- a/arch/x86/entry/vdso/vclock_gettime.c
217     +++ b/arch/x86/entry/vdso/vclock_gettime.c
218     @@ -43,8 +43,9 @@ extern u8 hvclock_page
219     notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
220     {
221     long ret;
222     - asm("syscall" : "=a" (ret) :
223     - "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
224     + asm ("syscall" : "=a" (ret), "=m" (*ts) :
225     + "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
226     + "memory", "rcx", "r11");
227     return ret;
228     }
229    
230     @@ -52,8 +53,9 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
231     {
232     long ret;
233    
234     - asm("syscall" : "=a" (ret) :
235     - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
236     + asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
237     + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
238     + "memory", "rcx", "r11");
239     return ret;
240     }
241    
242     @@ -64,13 +66,13 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
243     {
244     long ret;
245    
246     - asm(
247     + asm (
248     "mov %%ebx, %%edx \n"
249     - "mov %2, %%ebx \n"
250     + "mov %[clock], %%ebx \n"
251     "call __kernel_vsyscall \n"
252     "mov %%edx, %%ebx \n"
253     - : "=a" (ret)
254     - : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
255     + : "=a" (ret), "=m" (*ts)
256     + : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
257     : "memory", "edx");
258     return ret;
259     }
260     @@ -79,13 +81,13 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
261     {
262     long ret;
263    
264     - asm(
265     + asm (
266     "mov %%ebx, %%edx \n"
267     - "mov %2, %%ebx \n"
268     + "mov %[tv], %%ebx \n"
269     "call __kernel_vsyscall \n"
270     "mov %%edx, %%ebx \n"
271     - : "=a" (ret)
272     - : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
273     + : "=a" (ret), "=m" (*tv), "=m" (*tz)
274     + : "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
275     : "memory", "edx");
276     return ret;
277     }
278     diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
279     index 1dfb808abd23..d755e0d44ac1 100644
280     --- a/arch/x86/kvm/mmu.c
281     +++ b/arch/x86/kvm/mmu.c
282     @@ -231,6 +231,17 @@ static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
283     */
284     static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
285    
286     +/*
287     + * In some cases, we need to preserve the GFN of a non-present or reserved
288     + * SPTE when we usurp the upper five bits of the physical address space to
289     + * defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll
290     + * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
291     + * left into the reserved bits, i.e. the GFN in the SPTE will be split into
292     + * high and low parts. This mask covers the lower bits of the GFN.
293     + */
294     +static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
295     +
296     +
297     static void mmu_spte_set(u64 *sptep, u64 spte);
298     static void mmu_free_roots(struct kvm_vcpu *vcpu);
299    
300     @@ -338,9 +349,7 @@ static bool is_mmio_spte(u64 spte)
301    
302     static gfn_t get_mmio_spte_gfn(u64 spte)
303     {
304     - u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
305     - shadow_nonpresent_or_rsvd_mask;
306     - u64 gpa = spte & ~mask;
307     + u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
308    
309     gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
310     & shadow_nonpresent_or_rsvd_mask;
311     @@ -404,6 +413,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
312    
313     static void kvm_mmu_reset_all_pte_masks(void)
314     {
315     + u8 low_phys_bits;
316     +
317     shadow_user_mask = 0;
318     shadow_accessed_mask = 0;
319     shadow_dirty_mask = 0;
320     @@ -418,12 +429,17 @@ static void kvm_mmu_reset_all_pte_masks(void)
321     * appropriate mask to guard against L1TF attacks. Otherwise, it is
322     * assumed that the CPU is not vulnerable to L1TF.
323     */
324     + low_phys_bits = boot_cpu_data.x86_phys_bits;
325     if (boot_cpu_data.x86_phys_bits <
326     - 52 - shadow_nonpresent_or_rsvd_mask_len)
327     + 52 - shadow_nonpresent_or_rsvd_mask_len) {
328     shadow_nonpresent_or_rsvd_mask =
329     rsvd_bits(boot_cpu_data.x86_phys_bits -
330     shadow_nonpresent_or_rsvd_mask_len,
331     boot_cpu_data.x86_phys_bits - 1);
332     + low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
333     + }
334     + shadow_nonpresent_or_rsvd_lower_gfn_mask =
335     + GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
336     }
337    
338     static int is_cpuid_PSE36(void)
339     diff --git a/block/blk-mq.c b/block/blk-mq.c
340     index 49979c095f31..eac444804736 100644
341     --- a/block/blk-mq.c
342     +++ b/block/blk-mq.c
343     @@ -1512,7 +1512,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
344     BUG_ON(!rq->q);
345     if (rq->mq_ctx != this_ctx) {
346     if (this_ctx) {
347     - trace_block_unplug(this_q, depth, from_schedule);
348     + trace_block_unplug(this_q, depth, !from_schedule);
349     blk_mq_sched_insert_requests(this_q, this_ctx,
350     &ctx_list,
351     from_schedule);
352     @@ -1532,7 +1532,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
353     * on 'ctx_list'. Do those.
354     */
355     if (this_ctx) {
356     - trace_block_unplug(this_q, depth, from_schedule);
357     + trace_block_unplug(this_q, depth, !from_schedule);
358     blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
359     from_schedule);
360     }
361     diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
362     index 770b1539a083..d16b40cd26cc 100644
363     --- a/drivers/base/power/main.c
364     +++ b/drivers/base/power/main.c
365     @@ -1462,8 +1462,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
366    
367     dpm_wait_for_subordinate(dev, async);
368    
369     - if (async_error)
370     + if (async_error) {
371     + dev->power.direct_complete = false;
372     goto Complete;
373     + }
374    
375     /*
376     * If a device configured to wake up the system from sleep states
377     @@ -1475,6 +1477,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
378     pm_wakeup_event(dev, 0);
379    
380     if (pm_wakeup_pending()) {
381     + dev->power.direct_complete = false;
382     async_error = -EBUSY;
383     goto Complete;
384     }
385     diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c
386     index ec8a4376f74f..2fab18fae4fc 100644
387     --- a/drivers/clocksource/timer-atmel-pit.c
388     +++ b/drivers/clocksource/timer-atmel-pit.c
389     @@ -180,26 +180,29 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
390     data->base = of_iomap(node, 0);
391     if (!data->base) {
392     pr_err("Could not map PIT address\n");
393     - return -ENXIO;
394     + ret = -ENXIO;
395     + goto exit;
396     }
397    
398     data->mck = of_clk_get(node, 0);
399     if (IS_ERR(data->mck)) {
400     pr_err("Unable to get mck clk\n");
401     - return PTR_ERR(data->mck);
402     + ret = PTR_ERR(data->mck);
403     + goto exit;
404     }
405    
406     ret = clk_prepare_enable(data->mck);
407     if (ret) {
408     pr_err("Unable to enable mck\n");
409     - return ret;
410     + goto exit;
411     }
412    
413     /* Get the interrupts property */
414     data->irq = irq_of_parse_and_map(node, 0);
415     if (!data->irq) {
416     pr_err("Unable to get IRQ from DT\n");
417     - return -EINVAL;
418     + ret = -EINVAL;
419     + goto exit;
420     }
421    
422     /*
423     @@ -227,7 +230,7 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
424     ret = clocksource_register_hz(&data->clksrc, pit_rate);
425     if (ret) {
426     pr_err("Failed to register clocksource\n");
427     - return ret;
428     + goto exit;
429     }
430    
431     /* Set up irq handler */
432     @@ -236,7 +239,8 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
433     "at91_tick", data);
434     if (ret) {
435     pr_err("Unable to setup IRQ\n");
436     - return ret;
437     + clocksource_unregister(&data->clksrc);
438     + goto exit;
439     }
440    
441     /* Set up and register clockevents */
442     @@ -254,6 +258,10 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
443     clockevents_register_device(&data->clkevt);
444    
445     return 0;
446     +
447     +exit:
448     + kfree(data);
449     + return ret;
450     }
451     TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
452     at91sam926x_pit_dt_init);
453     diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
454     index 0e8160701833..bb7b59fc5c08 100644
455     --- a/drivers/crypto/chelsio/chcr_algo.c
456     +++ b/drivers/crypto/chelsio/chcr_algo.c
457     @@ -384,7 +384,8 @@ static inline int is_hmac(struct crypto_tfm *tfm)
458    
459     static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
460     struct scatterlist *sg,
461     - struct phys_sge_parm *sg_param)
462     + struct phys_sge_parm *sg_param,
463     + int pci_chan_id)
464     {
465     struct phys_sge_pairs *to;
466     unsigned int len = 0, left_size = sg_param->obsize;
467     @@ -402,6 +403,7 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
468     phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
469     phys_cpl->rss_hdr_int.qid = htons(sg_param->qid);
470     phys_cpl->rss_hdr_int.hash_val = 0;
471     + phys_cpl->rss_hdr_int.channel = pci_chan_id;
472     to = (struct phys_sge_pairs *)((unsigned char *)phys_cpl +
473     sizeof(struct cpl_rx_phys_dsgl));
474     for (i = 0; nents && left_size; to++) {
475     @@ -418,7 +420,8 @@ static void write_phys_cpl(struct cpl_rx_phys_dsgl *phys_cpl,
476     static inline int map_writesg_phys_cpl(struct device *dev,
477     struct cpl_rx_phys_dsgl *phys_cpl,
478     struct scatterlist *sg,
479     - struct phys_sge_parm *sg_param)
480     + struct phys_sge_parm *sg_param,
481     + int pci_chan_id)
482     {
483     if (!sg || !sg_param->nents)
484     return -EINVAL;
485     @@ -428,7 +431,7 @@ static inline int map_writesg_phys_cpl(struct device *dev,
486     pr_err("CHCR : DMA mapping failed\n");
487     return -EINVAL;
488     }
489     - write_phys_cpl(phys_cpl, sg, sg_param);
490     + write_phys_cpl(phys_cpl, sg, sg_param, pci_chan_id);
491     return 0;
492     }
493    
494     @@ -608,7 +611,7 @@ static inline void create_wreq(struct chcr_context *ctx,
495     is_iv ? iv_loc : IV_NOP, !!lcb,
496     ctx->tx_qidx);
497    
498     - chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
499     + chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
500     qid);
501     chcr_req->ulptx.len = htonl((DIV_ROUND_UP((calc_tx_flits_ofld(skb) * 8),
502     16) - ((sizeof(chcr_req->wreq)) >> 4)));
503     @@ -698,7 +701,8 @@ static struct sk_buff *create_cipher_wr(struct cipher_wr_param *wrparam)
504     sg_param.obsize = wrparam->bytes;
505     sg_param.qid = wrparam->qid;
506     error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
507     - reqctx->dst, &sg_param);
508     + reqctx->dst, &sg_param,
509     + ctx->pci_chan_id);
510     if (error)
511     goto map_fail1;
512    
513     @@ -1228,16 +1232,23 @@ static int chcr_device_init(struct chcr_context *ctx)
514     adap->vres.ncrypto_fc);
515     rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
516     txq_perchan = ntxq / u_ctx->lldi.nchan;
517     - rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
518     - rxq_idx += id % rxq_perchan;
519     - txq_idx = ctx->dev->tx_channel_id * txq_perchan;
520     - txq_idx += id % txq_perchan;
521     spin_lock(&ctx->dev->lock_chcr_dev);
522     - ctx->rx_qidx = rxq_idx;
523     - ctx->tx_qidx = txq_idx;
524     + ctx->tx_chan_id = ctx->dev->tx_channel_id;
525     ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
526     ctx->dev->rx_channel_id = 0;
527     spin_unlock(&ctx->dev->lock_chcr_dev);
528     + rxq_idx = ctx->tx_chan_id * rxq_perchan;
529     + rxq_idx += id % rxq_perchan;
530     + txq_idx = ctx->tx_chan_id * txq_perchan;
531     + txq_idx += id % txq_perchan;
532     + ctx->rx_qidx = rxq_idx;
533     + ctx->tx_qidx = txq_idx;
534     + /* Channel Id used by SGE to forward packet to Host.
535     + * Same value should be used in cpl_fw6_pld RSS_CH field
536     + * by FW. Driver programs PCI channel ID to be used in fw
537     + * at the time of queue allocation with value "pi->tx_chan"
538     + */
539     + ctx->pci_chan_id = txq_idx / txq_perchan;
540     }
541     out:
542     return err;
543     @@ -2066,7 +2077,8 @@ static struct sk_buff *create_authenc_wr(struct aead_request *req,
544     sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
545     sg_param.qid = qid;
546     error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
547     - reqctx->dst, &sg_param);
548     + reqctx->dst, &sg_param,
549     + ctx->pci_chan_id);
550     if (error)
551     goto dstmap_fail;
552    
553     @@ -2389,7 +2401,7 @@ static struct sk_buff *create_aead_ccm_wr(struct aead_request *req,
554     sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
555     sg_param.qid = qid;
556     error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
557     - reqctx->dst, &sg_param);
558     + reqctx->dst, &sg_param, ctx->pci_chan_id);
559     if (error)
560     goto dstmap_fail;
561    
562     @@ -2545,7 +2557,8 @@ static struct sk_buff *create_gcm_wr(struct aead_request *req,
563     sg_param.obsize = req->cryptlen + (op_type ? -authsize : authsize);
564     sg_param.qid = qid;
565     error = map_writesg_phys_cpl(&u_ctx->lldi.pdev->dev, phys_cpl,
566     - reqctx->dst, &sg_param);
567     + reqctx->dst, &sg_param,
568     + ctx->pci_chan_id);
569     if (error)
570     goto dstmap_fail;
571    
572     diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
573     index 30af1ee17b87..e039d9aeb651 100644
574     --- a/drivers/crypto/chelsio/chcr_crypto.h
575     +++ b/drivers/crypto/chelsio/chcr_crypto.h
576     @@ -222,6 +222,8 @@ struct chcr_context {
577     struct chcr_dev *dev;
578     unsigned char tx_qidx;
579     unsigned char rx_qidx;
580     + unsigned char tx_chan_id;
581     + unsigned char pci_chan_id;
582     struct __crypto_ctx crypto_ctx[0];
583     };
584    
585     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
586     index 9fc3d387eae3..fb36425e21ff 100644
587     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
588     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
589     @@ -231,6 +231,8 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
590     {
591     int i;
592    
593     + cancel_delayed_work_sync(&adev->vce.idle_work);
594     +
595     if (adev->vce.vcpu_bo == NULL)
596     return 0;
597    
598     @@ -241,7 +243,6 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
599     if (i == AMDGPU_MAX_VCE_HANDLES)
600     return 0;
601    
602     - cancel_delayed_work_sync(&adev->vce.idle_work);
603     /* TODO: suspending running encoding sessions isn't supported */
604     return -EINVAL;
605     }
606     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
607     index 1612d8aa6ad6..fca1b10628a6 100644
608     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
609     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
610     @@ -155,11 +155,11 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
611     unsigned size;
612     void *ptr;
613    
614     + cancel_delayed_work_sync(&adev->vcn.idle_work);
615     +
616     if (adev->vcn.vcpu_bo == NULL)
617     return 0;
618    
619     - cancel_delayed_work_sync(&adev->vcn.idle_work);
620     -
621     size = amdgpu_bo_size(adev->vcn.vcpu_bo);
622     ptr = adev->vcn.cpu_addr;
623    
624     diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
625     index 7bcf5702c91c..889c95d4feec 100644
626     --- a/drivers/gpu/drm/drm_syncobj.c
627     +++ b/drivers/gpu/drm/drm_syncobj.c
628     @@ -96,6 +96,8 @@ static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
629     {
630     int ret;
631    
632     + WARN_ON(*fence);
633     +
634     *fence = drm_syncobj_fence_get(syncobj);
635     if (*fence)
636     return 1;
637     @@ -656,6 +658,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
638    
639     if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
640     for (i = 0; i < count; ++i) {
641     + if (entries[i].fence)
642     + continue;
643     +
644     drm_syncobj_fence_get_or_add_callback(syncobjs[i],
645     &entries[i].fence,
646     &entries[i].syncobj_cb,
647     diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
648     index 16423d7ab599..17144a781aeb 100644
649     --- a/drivers/infiniband/core/ucma.c
650     +++ b/drivers/infiniband/core/ucma.c
651     @@ -1742,6 +1742,8 @@ static int ucma_close(struct inode *inode, struct file *filp)
652     mutex_lock(&mut);
653     if (!ctx->closing) {
654     mutex_unlock(&mut);
655     + ucma_put_ctx(ctx);
656     + wait_for_completion(&ctx->comp);
657     /* rdma_destroy_id ensures that no event handlers are
658     * inflight for that id before releasing it.
659     */
660     diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
661     index 0a5a45f3ec5f..7f1c64c4ad24 100644
662     --- a/drivers/md/dm-cache-metadata.c
663     +++ b/drivers/md/dm-cache-metadata.c
664     @@ -1454,8 +1454,8 @@ static int __load_mappings(struct dm_cache_metadata *cmd,
665     if (hints_valid) {
666     r = dm_array_cursor_next(&cmd->hint_cursor);
667     if (r) {
668     - DMERR("dm_array_cursor_next for hint failed");
669     - goto out;
670     + dm_array_cursor_end(&cmd->hint_cursor);
671     + hints_valid = false;
672     }
673     }
674    
675     diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
676     index a4b7c2698096..e2ea57d5376e 100644
677     --- a/drivers/md/dm-cache-target.c
678     +++ b/drivers/md/dm-cache-target.c
679     @@ -3097,8 +3097,13 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
680    
681     static bool can_resize(struct cache *cache, dm_cblock_t new_size)
682     {
683     - if (from_cblock(new_size) > from_cblock(cache->cache_size))
684     - return true;
685     + if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
686     + if (cache->sized) {
687     + DMERR("%s: unable to extend cache due to missing cache table reload",
688     + cache_device_name(cache));
689     + return false;
690     + }
691     + }
692    
693     /*
694     * We can't drop a dirty block when shrinking the cache.
695     diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
696     index df514507d3f1..22003895f854 100644
697     --- a/drivers/net/wireless/ath/ath10k/debug.c
698     +++ b/drivers/net/wireless/ath/ath10k/debug.c
699     @@ -1,6 +1,7 @@
700     /*
701     * Copyright (c) 2005-2011 Atheros Communications Inc.
702     * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
703     + * Copyright (c) 2018, The Linux Foundation. All rights reserved.
704     *
705     * Permission to use, copy, modify, and/or distribute this software for any
706     * purpose with or without fee is hereby granted, provided that the above
707     @@ -163,6 +164,8 @@ void ath10k_debug_print_hwfw_info(struct ath10k *ar)
708     void ath10k_debug_print_board_info(struct ath10k *ar)
709     {
710     char boardinfo[100];
711     + const struct firmware *board;
712     + u32 crc;
713    
714     if (ar->id.bmi_ids_valid)
715     scnprintf(boardinfo, sizeof(boardinfo), "%d:%d",
716     @@ -170,11 +173,16 @@ void ath10k_debug_print_board_info(struct ath10k *ar)
717     else
718     scnprintf(boardinfo, sizeof(boardinfo), "N/A");
719    
720     + board = ar->normal_mode_fw.board;
721     + if (!IS_ERR_OR_NULL(board))
722     + crc = crc32_le(0, board->data, board->size);
723     + else
724     + crc = 0;
725     +
726     ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x",
727     ar->bd_api,
728     boardinfo,
729     - crc32_le(0, ar->normal_mode_fw.board->data,
730     - ar->normal_mode_fw.board->size));
731     + crc);
732     }
733    
734     void ath10k_debug_print_boot_info(struct ath10k *ar)
735     diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
736     index e0d00cef0bd8..5b974bb76e6c 100644
737     --- a/drivers/net/wireless/ath/ath10k/trace.h
738     +++ b/drivers/net/wireless/ath/ath10k/trace.h
739     @@ -152,10 +152,9 @@ TRACE_EVENT(ath10k_log_dbg_dump,
740     );
741    
742     TRACE_EVENT(ath10k_wmi_cmd,
743     - TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len,
744     - int ret),
745     + TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len),
746    
747     - TP_ARGS(ar, id, buf, buf_len, ret),
748     + TP_ARGS(ar, id, buf, buf_len),
749    
750     TP_STRUCT__entry(
751     __string(device, dev_name(ar->dev))
752     @@ -163,7 +162,6 @@ TRACE_EVENT(ath10k_wmi_cmd,
753     __field(unsigned int, id)
754     __field(size_t, buf_len)
755     __dynamic_array(u8, buf, buf_len)
756     - __field(int, ret)
757     ),
758    
759     TP_fast_assign(
760     @@ -171,17 +169,15 @@ TRACE_EVENT(ath10k_wmi_cmd,
761     __assign_str(driver, dev_driver_string(ar->dev));
762     __entry->id = id;
763     __entry->buf_len = buf_len;
764     - __entry->ret = ret;
765     memcpy(__get_dynamic_array(buf), buf, buf_len);
766     ),
767    
768     TP_printk(
769     - "%s %s id %d len %zu ret %d",
770     + "%s %s id %d len %zu",
771     __get_str(driver),
772     __get_str(device),
773     __entry->id,
774     - __entry->buf_len,
775     - __entry->ret
776     + __entry->buf_len
777     )
778     );
779    
780     diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
781     index baec856af90f..b54001e97ced 100644
782     --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
783     +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
784     @@ -1486,10 +1486,10 @@ ath10k_wmi_tlv_op_gen_start_scan(struct ath10k *ar,
785     bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
786     ie_len = roundup(arg->ie_len, 4);
787     len = (sizeof(*tlv) + sizeof(*cmd)) +
788     - (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
789     - (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
790     - (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
791     - (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
792     + sizeof(*tlv) + chan_len +
793     + sizeof(*tlv) + ssid_len +
794     + sizeof(*tlv) + bssid_len +
795     + sizeof(*tlv) + ie_len;
796    
797     skb = ath10k_wmi_alloc_skb(ar, len);
798     if (!skb)
799     diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
800     index 38a97086708b..2ab5311659ea 100644
801     --- a/drivers/net/wireless/ath/ath10k/wmi.c
802     +++ b/drivers/net/wireless/ath/ath10k/wmi.c
803     @@ -1741,8 +1741,8 @@ int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
804     cmd_hdr->cmd_id = __cpu_to_le32(cmd);
805    
806     memset(skb_cb, 0, sizeof(*skb_cb));
807     + trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len);
808     ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
809     - trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret);
810    
811     if (ret)
812     goto err_pull;
813     diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
814     index 3c4c58b9fe76..3b6fb5b3bdb2 100644
815     --- a/drivers/net/xen-netback/hash.c
816     +++ b/drivers/net/xen-netback/hash.c
817     @@ -332,20 +332,22 @@ u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size)
818     u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
819     u32 off)
820     {
821     - u32 *mapping = &vif->hash.mapping[off];
822     + u32 *mapping = vif->hash.mapping;
823     struct gnttab_copy copy_op = {
824     .source.u.ref = gref,
825     .source.domid = vif->domid,
826     - .dest.u.gmfn = virt_to_gfn(mapping),
827     .dest.domid = DOMID_SELF,
828     - .dest.offset = xen_offset_in_page(mapping),
829     - .len = len * sizeof(u32),
830     + .len = len * sizeof(*mapping),
831     .flags = GNTCOPY_source_gref
832     };
833    
834     - if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
835     + if ((off + len < off) || (off + len > vif->hash.size) ||
836     + len > XEN_PAGE_SIZE / sizeof(*mapping))
837     return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
838    
839     + copy_op.dest.u.gmfn = virt_to_gfn(mapping + off);
840     + copy_op.dest.offset = xen_offset_in_page(mapping + off);
841     +
842     while (len-- != 0)
843     if (mapping[off++] >= vif->num_queues)
844     return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
845     diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
846     index 7deb7b5d8683..058d542647dd 100644
847     --- a/drivers/nvme/host/fc.c
848     +++ b/drivers/nvme/host/fc.c
849     @@ -2868,6 +2868,10 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
850     }
851    
852     if (ret) {
853     + nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
854     + cancel_work_sync(&ctrl->ctrl.reset_work);
855     + cancel_delayed_work_sync(&ctrl->connect_work);
856     +
857     /* couldn't schedule retry - fail out */
858     dev_err(ctrl->ctrl.device,
859     "NVME-FC{%d}: Connect retry failed\n", ctrl->cnum);
860     diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
861     index 985a85f281a8..7c6aff761800 100644
862     --- a/drivers/of/unittest.c
863     +++ b/drivers/of/unittest.c
864     @@ -614,6 +614,9 @@ static void __init of_unittest_parse_interrupts(void)
865     struct of_phandle_args args;
866     int i, rc;
867    
868     + if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
869     + return;
870     +
871     np = of_find_node_by_path("/testcase-data/interrupts/interrupts0");
872     if (!np) {
873     pr_err("missing testcase data\n");
874     @@ -688,6 +691,9 @@ static void __init of_unittest_parse_interrupts_extended(void)
875     struct of_phandle_args args;
876     int i, rc;
877    
878     + if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
879     + return;
880     +
881     np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
882     if (!np) {
883     pr_err("missing testcase data\n");
884     @@ -844,15 +850,19 @@ static void __init of_unittest_platform_populate(void)
885     pdev = of_find_device_by_node(np);
886     unittest(pdev, "device 1 creation failed\n");
887    
888     - irq = platform_get_irq(pdev, 0);
889     - unittest(irq == -EPROBE_DEFER, "device deferred probe failed - %d\n", irq);
890     + if (!(of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)) {
891     + irq = platform_get_irq(pdev, 0);
892     + unittest(irq == -EPROBE_DEFER,
893     + "device deferred probe failed - %d\n", irq);
894    
895     - /* Test that a parsing failure does not return -EPROBE_DEFER */
896     - np = of_find_node_by_path("/testcase-data/testcase-device2");
897     - pdev = of_find_device_by_node(np);
898     - unittest(pdev, "device 2 creation failed\n");
899     - irq = platform_get_irq(pdev, 0);
900     - unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq);
901     + /* Test that a parsing failure does not return -EPROBE_DEFER */
902     + np = of_find_node_by_path("/testcase-data/testcase-device2");
903     + pdev = of_find_device_by_node(np);
904     + unittest(pdev, "device 2 creation failed\n");
905     + irq = platform_get_irq(pdev, 0);
906     + unittest(irq < 0 && irq != -EPROBE_DEFER,
907     + "device parsing error failed - %d\n", irq);
908     + }
909    
910     np = of_find_node_by_path("/testcase-data/platform-tests");
911     unittest(np, "No testcase data in device tree\n");
912     diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
913     index 22924629e64a..1af30c881566 100644
914     --- a/drivers/pci/pci.c
915     +++ b/drivers/pci/pci.c
916     @@ -1112,12 +1112,12 @@ int pci_save_state(struct pci_dev *dev)
917     EXPORT_SYMBOL(pci_save_state);
918    
919     static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
920     - u32 saved_val, int retry)
921     + u32 saved_val, int retry, bool force)
922     {
923     u32 val;
924    
925     pci_read_config_dword(pdev, offset, &val);
926     - if (val == saved_val)
927     + if (!force && val == saved_val)
928     return;
929    
930     for (;;) {
931     @@ -1136,25 +1136,36 @@ static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
932     }
933    
934     static void pci_restore_config_space_range(struct pci_dev *pdev,
935     - int start, int end, int retry)
936     + int start, int end, int retry,
937     + bool force)
938     {
939     int index;
940    
941     for (index = end; index >= start; index--)
942     pci_restore_config_dword(pdev, 4 * index,
943     pdev->saved_config_space[index],
944     - retry);
945     + retry, force);
946     }
947    
948     static void pci_restore_config_space(struct pci_dev *pdev)
949     {
950     if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
951     - pci_restore_config_space_range(pdev, 10, 15, 0);
952     + pci_restore_config_space_range(pdev, 10, 15, 0, false);
953     /* Restore BARs before the command register. */
954     - pci_restore_config_space_range(pdev, 4, 9, 10);
955     - pci_restore_config_space_range(pdev, 0, 3, 0);
956     + pci_restore_config_space_range(pdev, 4, 9, 10, false);
957     + pci_restore_config_space_range(pdev, 0, 3, 0, false);
958     + } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
959     + pci_restore_config_space_range(pdev, 12, 15, 0, false);
960     +
961     + /*
962     + * Force rewriting of prefetch registers to avoid S3 resume
963     + * issues on Intel PCI bridges that occur when these
964     + * registers are not explicitly written.
965     + */
966     + pci_restore_config_space_range(pdev, 9, 11, 0, true);
967     + pci_restore_config_space_range(pdev, 0, 8, 0, false);
968     } else {
969     - pci_restore_config_space_range(pdev, 0, 15, 0);
970     + pci_restore_config_space_range(pdev, 0, 15, 0, false);
971     }
972     }
973    
974     diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
975     index 562d31073f9a..8d65b2f9ee80 100644
976     --- a/drivers/tty/tty_io.c
977     +++ b/drivers/tty/tty_io.c
978     @@ -1254,6 +1254,7 @@ static void tty_driver_remove_tty(struct tty_driver *driver, struct tty_struct *
979     static int tty_reopen(struct tty_struct *tty)
980     {
981     struct tty_driver *driver = tty->driver;
982     + int retval;
983    
984     if (driver->type == TTY_DRIVER_TYPE_PTY &&
985     driver->subtype == PTY_TYPE_MASTER)
986     @@ -1267,10 +1268,14 @@ static int tty_reopen(struct tty_struct *tty)
987    
988     tty->count++;
989    
990     - if (!tty->ldisc)
991     - return tty_ldisc_reinit(tty, tty->termios.c_line);
992     + if (tty->ldisc)
993     + return 0;
994    
995     - return 0;
996     + retval = tty_ldisc_reinit(tty, tty->termios.c_line);
997     + if (retval)
998     + tty->count--;
999     +
1000     + return retval;
1001     }
1002    
1003     /**
1004     diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
1005     index feaa0d8f830a..9f6f402470ac 100644
1006     --- a/drivers/usb/class/cdc-acm.c
1007     +++ b/drivers/usb/class/cdc-acm.c
1008     @@ -1527,6 +1527,7 @@ static void acm_disconnect(struct usb_interface *intf)
1009     {
1010     struct acm *acm = usb_get_intfdata(intf);
1011     struct tty_struct *tty;
1012     + int i;
1013    
1014     /* sibling interface is already cleaning up */
1015     if (!acm)
1016     @@ -1557,6 +1558,11 @@ static void acm_disconnect(struct usb_interface *intf)
1017    
1018     tty_unregister_device(acm_tty_driver, acm->minor);
1019    
1020     + usb_free_urb(acm->ctrlurb);
1021     + for (i = 0; i < ACM_NW; i++)
1022     + usb_free_urb(acm->wb[i].urb);
1023     + for (i = 0; i < acm->rx_buflimit; i++)
1024     + usb_free_urb(acm->read_urbs[i]);
1025     acm_write_buffers_free(acm);
1026     usb_free_coherent(acm->dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
1027     acm_read_buffers_free(acm);
1028     diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
1029     index 8fb60657ed4f..510d28a9d190 100644
1030     --- a/drivers/usb/host/xhci-mtk.c
1031     +++ b/drivers/usb/host/xhci-mtk.c
1032     @@ -780,10 +780,10 @@ static int __maybe_unused xhci_mtk_resume(struct device *dev)
1033     xhci_mtk_host_enable(mtk);
1034    
1035     xhci_dbg(xhci, "%s: restart port polling\n", __func__);
1036     - set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1037     - usb_hcd_poll_rh_status(hcd);
1038     set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
1039     usb_hcd_poll_rh_status(xhci->shared_hcd);
1040     + set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
1041     + usb_hcd_poll_rh_status(hcd);
1042     return 0;
1043     }
1044    
1045     diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
1046     index 838d37e79fa2..9218f506f8e3 100644
1047     --- a/drivers/usb/host/xhci-pci.c
1048     +++ b/drivers/usb/host/xhci-pci.c
1049     @@ -196,6 +196,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
1050     }
1051     if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
1052     (pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
1053     + pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
1054     + pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
1055     pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
1056     pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
1057     xhci->quirks |= XHCI_MISSING_CAS;
1058     diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
1059     index 2674da40d9cd..6d6acf2c07c3 100644
1060     --- a/drivers/usb/serial/usb-serial-simple.c
1061     +++ b/drivers/usb/serial/usb-serial-simple.c
1062     @@ -87,7 +87,8 @@ DEVICE(moto_modem, MOTO_IDS);
1063    
1064     /* Motorola Tetra driver */
1065     #define MOTOROLA_TETRA_IDS() \
1066     - { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */
1067     + { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
1068     + { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */
1069     DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
1070    
1071     /* Novatel Wireless GPS driver */
1072     diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
1073     index ef69273074ba..a3edb20ea4c3 100644
1074     --- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
1075     +++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
1076     @@ -496,6 +496,9 @@ static int omapfb_memory_read(struct fb_info *fbi,
1077     if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
1078     return -EFAULT;
1079    
1080     + if (mr->w > 4096 || mr->h > 4096)
1081     + return -EINVAL;
1082     +
1083     if (mr->w * mr->h * 3 > mr->buffer_size)
1084     return -EINVAL;
1085    
1086     @@ -509,7 +512,7 @@ static int omapfb_memory_read(struct fb_info *fbi,
1087     mr->x, mr->y, mr->w, mr->h);
1088    
1089     if (r > 0) {
1090     - if (copy_to_user(mr->buffer, buf, mr->buffer_size))
1091     + if (copy_to_user(mr->buffer, buf, r))
1092     r = -EFAULT;
1093     }
1094    
1095     diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
1096     index 36c9fbf70d44..d9873aa014a6 100644
1097     --- a/drivers/virtio/virtio_balloon.c
1098     +++ b/drivers/virtio/virtio_balloon.c
1099     @@ -143,16 +143,17 @@ static void set_page_pfns(struct virtio_balloon *vb,
1100    
1101     static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
1102     {
1103     - struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info;
1104     unsigned num_allocated_pages;
1105     + unsigned num_pfns;
1106     + struct page *page;
1107     + LIST_HEAD(pages);
1108    
1109     /* We can only do one array worth at a time. */
1110     num = min(num, ARRAY_SIZE(vb->pfns));
1111    
1112     - mutex_lock(&vb->balloon_lock);
1113     - for (vb->num_pfns = 0; vb->num_pfns < num;
1114     - vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
1115     - struct page *page = balloon_page_enqueue(vb_dev_info);
1116     + for (num_pfns = 0; num_pfns < num;
1117     + num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) {
1118     + struct page *page = balloon_page_alloc();
1119    
1120     if (!page) {
1121     dev_info_ratelimited(&vb->vdev->dev,
1122     @@ -162,11 +163,23 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
1123     msleep(200);
1124     break;
1125     }
1126     +
1127     + balloon_page_push(&pages, page);
1128     + }
1129     +
1130     + mutex_lock(&vb->balloon_lock);
1131     +
1132     + vb->num_pfns = 0;
1133     +
1134     + while ((page = balloon_page_pop(&pages))) {
1135     + balloon_page_enqueue(&vb->vb_dev_info, page);
1136     +
1137     set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
1138     vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
1139     if (!virtio_has_feature(vb->vdev,
1140     VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
1141     adjust_managed_page_count(page, -1);
1142     + vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE;
1143     }
1144    
1145     num_allocated_pages = vb->num_pfns;
1146     diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
1147     index c282e21f5b5e..41fce930f44c 100644
1148     --- a/fs/f2fs/checkpoint.c
1149     +++ b/fs/f2fs/checkpoint.c
1150     @@ -708,6 +708,7 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
1151    
1152     crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
1153     if (crc_offset > (blk_size - sizeof(__le32))) {
1154     + f2fs_put_page(*cp_page, 1);
1155     f2fs_msg(sbi->sb, KERN_WARNING,
1156     "invalid crc_offset: %zu", crc_offset);
1157     return -EINVAL;
1158     @@ -715,6 +716,7 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
1159    
1160     crc = cur_cp_crc(*cp_block);
1161     if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
1162     + f2fs_put_page(*cp_page, 1);
1163     f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
1164     return -EINVAL;
1165     }
1166     @@ -734,14 +736,14 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
1167     err = get_checkpoint_version(sbi, cp_addr, &cp_block,
1168     &cp_page_1, version);
1169     if (err)
1170     - goto invalid_cp1;
1171     + return NULL;
1172     pre_version = *version;
1173    
1174     cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
1175     err = get_checkpoint_version(sbi, cp_addr, &cp_block,
1176     &cp_page_2, version);
1177     if (err)
1178     - goto invalid_cp2;
1179     + goto invalid_cp;
1180     cur_version = *version;
1181    
1182     if (cur_version == pre_version) {
1183     @@ -749,9 +751,8 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
1184     f2fs_put_page(cp_page_2, 1);
1185     return cp_page_1;
1186     }
1187     -invalid_cp2:
1188     f2fs_put_page(cp_page_2, 1);
1189     -invalid_cp1:
1190     +invalid_cp:
1191     f2fs_put_page(cp_page_1, 1);
1192     return NULL;
1193     }
1194     diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
1195     index e1cd3dcf5a03..ad827cf642fe 100644
1196     --- a/fs/ubifs/super.c
1197     +++ b/fs/ubifs/super.c
1198     @@ -1930,6 +1930,9 @@ static struct ubi_volume_desc *open_ubi(const char *name, int mode)
1199     int dev, vol;
1200     char *endptr;
1201    
1202     + if (!name || !*name)
1203     + return ERR_PTR(-EINVAL);
1204     +
1205     /* First, try to open using the device node path method */
1206     ubi = ubi_open_volume_path(name, mode);
1207     if (!IS_ERR(ubi))
1208     diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
1209     index fbbe6da40fed..53051f3d8f25 100644
1210     --- a/include/linux/balloon_compaction.h
1211     +++ b/include/linux/balloon_compaction.h
1212     @@ -50,6 +50,7 @@
1213     #include <linux/gfp.h>
1214     #include <linux/err.h>
1215     #include <linux/fs.h>
1216     +#include <linux/list.h>
1217    
1218     /*
1219     * Balloon device information descriptor.
1220     @@ -67,7 +68,9 @@ struct balloon_dev_info {
1221     struct inode *inode;
1222     };
1223    
1224     -extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info);
1225     +extern struct page *balloon_page_alloc(void);
1226     +extern void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
1227     + struct page *page);
1228     extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info);
1229    
1230     static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
1231     @@ -193,4 +196,34 @@ static inline gfp_t balloon_mapping_gfp_mask(void)
1232     }
1233    
1234     #endif /* CONFIG_BALLOON_COMPACTION */
1235     +
1236     +/*
1237     + * balloon_page_push - insert a page into a page list.
1238     + * @head : pointer to list
1239     + * @page : page to be added
1240     + *
1241     + * Caller must ensure the page is private and protect the list.
1242     + */
1243     +static inline void balloon_page_push(struct list_head *pages, struct page *page)
1244     +{
1245     + list_add(&page->lru, pages);
1246     +}
1247     +
1248     +/*
1249     + * balloon_page_pop - remove a page from a page list.
1250     + * @head : pointer to list
1251     + * @page : page to be added
1252     + *
1253     + * Caller must ensure the page is private and protect the list.
1254     + */
1255     +static inline struct page *balloon_page_pop(struct list_head *pages)
1256     +{
1257     + struct page *page = list_first_entry_or_null(pages, struct page, lru);
1258     +
1259     + if (!page)
1260     + return NULL;
1261     +
1262     + list_del(&page->lru);
1263     + return page;
1264     +}
1265     #endif /* _LINUX_BALLOON_COMPACTION_H */
1266     diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
1267     index 82a25880714a..7aa2de25c09c 100644
1268     --- a/include/linux/hugetlb.h
1269     +++ b/include/linux/hugetlb.h
1270     @@ -140,6 +140,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
1271     pte_t *huge_pte_offset(struct mm_struct *mm,
1272     unsigned long addr, unsigned long sz);
1273     int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
1274     +void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
1275     + unsigned long *start, unsigned long *end);
1276     struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
1277     int write);
1278     struct page *follow_huge_pd(struct vm_area_struct *vma,
1279     @@ -169,6 +171,18 @@ static inline unsigned long hugetlb_total_pages(void)
1280     return 0;
1281     }
1282    
1283     +static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
1284     + pte_t *ptep)
1285     +{
1286     + return 0;
1287     +}
1288     +
1289     +static inline void adjust_range_if_pmd_sharing_possible(
1290     + struct vm_area_struct *vma,
1291     + unsigned long *start, unsigned long *end)
1292     +{
1293     +}
1294     +
1295     #define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
1296     #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
1297     #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
1298     diff --git a/include/linux/mm.h b/include/linux/mm.h
1299     index a26cf767407e..58f2263de4de 100644
1300     --- a/include/linux/mm.h
1301     +++ b/include/linux/mm.h
1302     @@ -2322,6 +2322,12 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
1303     return vma;
1304     }
1305    
1306     +static inline bool range_in_vma(struct vm_area_struct *vma,
1307     + unsigned long start, unsigned long end)
1308     +{
1309     + return (vma && vma->vm_start <= start && end <= vma->vm_end);
1310     +}
1311     +
1312     #ifdef CONFIG_MMU
1313     pgprot_t vm_get_page_prot(unsigned long vm_flags);
1314     void vma_set_page_prot(struct vm_area_struct *vma);
1315     diff --git a/kernel/events/core.c b/kernel/events/core.c
1316     index 812ebf1cbb87..4dbce29a9313 100644
1317     --- a/kernel/events/core.c
1318     +++ b/kernel/events/core.c
1319     @@ -3757,6 +3757,12 @@ int perf_event_read_local(struct perf_event *event, u64 *value)
1320     goto out;
1321     }
1322    
1323     + /* If this is a pinned event it must be running on this CPU */
1324     + if (event->attr.pinned && event->oncpu != smp_processor_id()) {
1325     + ret = -EBUSY;
1326     + goto out;
1327     + }
1328     +
1329     /*
1330     * If the event is currently on this CPU, its either a per-task event,
1331     * or local to this CPU. Furthermore it means its ACTIVE (otherwise
1332     diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
1333     index 68d28924ba79..ef858d547e2d 100644
1334     --- a/mm/balloon_compaction.c
1335     +++ b/mm/balloon_compaction.c
1336     @@ -10,23 +10,38 @@
1337     #include <linux/export.h>
1338     #include <linux/balloon_compaction.h>
1339    
1340     +/*
1341     + * balloon_page_alloc - allocates a new page for insertion into the balloon
1342     + * page list.
1343     + *
1344     + * Driver must call it to properly allocate a new enlisted balloon page.
1345     + * Driver must call balloon_page_enqueue before definitively removing it from
1346     + * the guest system. This function returns the page address for the recently
1347     + * allocated page or NULL in the case we fail to allocate a new page this turn.
1348     + */
1349     +struct page *balloon_page_alloc(void)
1350     +{
1351     + struct page *page = alloc_page(balloon_mapping_gfp_mask() |
1352     + __GFP_NOMEMALLOC | __GFP_NORETRY);
1353     + return page;
1354     +}
1355     +EXPORT_SYMBOL_GPL(balloon_page_alloc);
1356     +
1357     /*
1358     * balloon_page_enqueue - allocates a new page and inserts it into the balloon
1359     * page list.
1360     * @b_dev_info: balloon device descriptor where we will insert a new page to
1361     + * @page: new page to enqueue - allocated using balloon_page_alloc.
1362     *
1363     - * Driver must call it to properly allocate a new enlisted balloon page
1364     + * Driver must call it to properly enqueue a new allocated balloon page
1365     * before definitively removing it from the guest system.
1366     * This function returns the page address for the recently enqueued page or
1367     * NULL in the case we fail to allocate a new page this turn.
1368     */
1369     -struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info)
1370     +void balloon_page_enqueue(struct balloon_dev_info *b_dev_info,
1371     + struct page *page)
1372     {
1373     unsigned long flags;
1374     - struct page *page = alloc_page(balloon_mapping_gfp_mask() |
1375     - __GFP_NOMEMALLOC | __GFP_NORETRY);
1376     - if (!page)
1377     - return NULL;
1378    
1379     /*
1380     * Block others from accessing the 'page' when we get around to
1381     @@ -39,7 +54,6 @@ struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info)
1382     __count_vm_event(BALLOON_INFLATE);
1383     spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
1384     unlock_page(page);
1385     - return page;
1386     }
1387     EXPORT_SYMBOL_GPL(balloon_page_enqueue);
1388    
1389     diff --git a/mm/huge_memory.c b/mm/huge_memory.c
1390     index 255469f78217..174612f8339c 100644
1391     --- a/mm/huge_memory.c
1392     +++ b/mm/huge_memory.c
1393     @@ -2886,7 +2886,7 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new)
1394     flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE);
1395     page_add_anon_rmap(new, vma, mmun_start, true);
1396     set_pmd_at(mm, mmun_start, pvmw->pmd, pmde);
1397     - if (vma->vm_flags & VM_LOCKED)
1398     + if ((vma->vm_flags & VM_LOCKED) && !PageDoubleMap(new))
1399     mlock_vma_page(new);
1400     update_mmu_cache_pmd(vma, address, pvmw->pmd);
1401     }
1402     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1403     index dfd2947e046e..9801dc0250e2 100644
1404     --- a/mm/hugetlb.c
1405     +++ b/mm/hugetlb.c
1406     @@ -4517,12 +4517,40 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
1407     /*
1408     * check on proper vm_flags and page table alignment
1409     */
1410     - if (vma->vm_flags & VM_MAYSHARE &&
1411     - vma->vm_start <= base && end <= vma->vm_end)
1412     + if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
1413     return true;
1414     return false;
1415     }
1416    
1417     +/*
1418     + * Determine if start,end range within vma could be mapped by shared pmd.
1419     + * If yes, adjust start and end to cover range associated with possible
1420     + * shared pmd mappings.
1421     + */
1422     +void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
1423     + unsigned long *start, unsigned long *end)
1424     +{
1425     + unsigned long check_addr = *start;
1426     +
1427     + if (!(vma->vm_flags & VM_MAYSHARE))
1428     + return;
1429     +
1430     + for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
1431     + unsigned long a_start = check_addr & PUD_MASK;
1432     + unsigned long a_end = a_start + PUD_SIZE;
1433     +
1434     + /*
1435     + * If sharing is possible, adjust start/end if necessary.
1436     + */
1437     + if (range_in_vma(vma, a_start, a_end)) {
1438     + if (a_start < *start)
1439     + *start = a_start;
1440     + if (a_end > *end)
1441     + *end = a_end;
1442     + }
1443     + }
1444     +}
1445     +
1446     /*
1447     * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
1448     * and returns the corresponding pte. While this is not necessary for the
1449     @@ -4620,6 +4648,11 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
1450     {
1451     return 0;
1452     }
1453     +
1454     +void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
1455     + unsigned long *start, unsigned long *end)
1456     +{
1457     +}
1458     #define want_pmd_share() (0)
1459     #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
1460    
1461     diff --git a/mm/migrate.c b/mm/migrate.c
1462     index 1236449b4777..cbb025239071 100644
1463     --- a/mm/migrate.c
1464     +++ b/mm/migrate.c
1465     @@ -274,6 +274,9 @@ static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
1466     if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
1467     mlock_vma_page(new);
1468    
1469     + if (PageTransHuge(page) && PageMlocked(page))
1470     + clear_page_mlock(page);
1471     +
1472     /* No need to invalidate - it was non-present before */
1473     update_mmu_cache(vma, pvmw.address, pvmw.pte);
1474     }
1475     diff --git a/mm/rmap.c b/mm/rmap.c
1476     index 97edcf44d88c..8bd2ddd8febd 100644
1477     --- a/mm/rmap.c
1478     +++ b/mm/rmap.c
1479     @@ -1358,11 +1358,21 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1480     }
1481    
1482     /*
1483     - * We have to assume the worse case ie pmd for invalidation. Note that
1484     - * the page can not be free in this function as call of try_to_unmap()
1485     - * must hold a reference on the page.
1486     + * For THP, we have to assume the worse case ie pmd for invalidation.
1487     + * For hugetlb, it could be much worse if we need to do pud
1488     + * invalidation in the case of pmd sharing.
1489     + *
1490     + * Note that the page can not be free in this function as call of
1491     + * try_to_unmap() must hold a reference on the page.
1492     */
1493     end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page)));
1494     + if (PageHuge(page)) {
1495     + /*
1496     + * If sharing is possible, start and end will be adjusted
1497     + * accordingly.
1498     + */
1499     + adjust_range_if_pmd_sharing_possible(vma, &start, &end);
1500     + }
1501     mmu_notifier_invalidate_range_start(vma->vm_mm, start, end);
1502    
1503     while (page_vma_mapped_walk(&pvmw)) {
1504     @@ -1408,6 +1418,32 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1505     subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte);
1506     address = pvmw.address;
1507    
1508     + if (PageHuge(page)) {
1509     + if (huge_pmd_unshare(mm, &address, pvmw.pte)) {
1510     + /*
1511     + * huge_pmd_unshare unmapped an entire PMD
1512     + * page. There is no way of knowing exactly
1513     + * which PMDs may be cached for this mm, so
1514     + * we must flush them all. start/end were
1515     + * already adjusted above to cover this range.
1516     + */
1517     + flush_cache_range(vma, start, end);
1518     + flush_tlb_range(vma, start, end);
1519     + mmu_notifier_invalidate_range(mm, start, end);
1520     +
1521     + /*
1522     + * The ref count of the PMD page was dropped
1523     + * which is part of the way map counting
1524     + * is done for shared PMDs. Return 'true'
1525     + * here. When there is no other sharing,
1526     + * huge_pmd_unshare returns false and we will
1527     + * unmap the actual page and drop map count
1528     + * to zero.
1529     + */
1530     + page_vma_mapped_walk_done(&pvmw);
1531     + break;
1532     + }
1533     + }
1534    
1535     if (IS_ENABLED(CONFIG_MIGRATION) &&
1536     (flags & TTU_MIGRATION) &&
1537     diff --git a/mm/vmstat.c b/mm/vmstat.c
1538     index 4bb13e72ac97..2bdc962b2dfe 100644
1539     --- a/mm/vmstat.c
1540     +++ b/mm/vmstat.c
1541     @@ -1203,6 +1203,9 @@ const char * const vmstat_text[] = {
1542     #ifdef CONFIG_SMP
1543     "nr_tlb_remote_flush",
1544     "nr_tlb_remote_flush_received",
1545     +#else
1546     + "", /* nr_tlb_remote_flush */
1547     + "", /* nr_tlb_remote_flush_received */
1548     #endif /* CONFIG_SMP */
1549     "nr_tlb_local_flush_all",
1550     "nr_tlb_local_flush_one",
1551     diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
1552     index b456b882a6ea..63558335e41e 100644
1553     --- a/net/mac80211/cfg.c
1554     +++ b/net/mac80211/cfg.c
1555     @@ -426,7 +426,7 @@ static int ieee80211_add_key(struct wiphy *wiphy, struct net_device *dev,
1556     case NL80211_IFTYPE_AP:
1557     case NL80211_IFTYPE_AP_VLAN:
1558     /* Keys without a station are used for TX only */
1559     - if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP))
1560     + if (sta && test_sta_flag(sta, WLAN_STA_MFP))
1561     key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
1562     break;
1563     case NL80211_IFTYPE_ADHOC:
1564     diff --git a/net/rds/ib.h b/net/rds/ib.h
1565     index 86a8578d95b8..7db93f7f5c61 100644
1566     --- a/net/rds/ib.h
1567     +++ b/net/rds/ib.h
1568     @@ -373,7 +373,7 @@ void rds_ib_mr_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc);
1569     int rds_ib_recv_init(void);
1570     void rds_ib_recv_exit(void);
1571     int rds_ib_recv_path(struct rds_conn_path *conn);
1572     -int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic);
1573     +int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp);
1574     void rds_ib_recv_free_caches(struct rds_ib_connection *ic);
1575     void rds_ib_recv_refill(struct rds_connection *conn, int prefill, gfp_t gfp);
1576     void rds_ib_inc_free(struct rds_incoming *inc);
1577     diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
1578     index 6e721c449c4b..e086395a2355 100644
1579     --- a/net/rds/ib_cm.c
1580     +++ b/net/rds/ib_cm.c
1581     @@ -946,7 +946,7 @@ int rds_ib_conn_alloc(struct rds_connection *conn, gfp_t gfp)
1582     if (!ic)
1583     return -ENOMEM;
1584    
1585     - ret = rds_ib_recv_alloc_caches(ic);
1586     + ret = rds_ib_recv_alloc_caches(ic, gfp);
1587     if (ret) {
1588     kfree(ic);
1589     return ret;
1590     diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
1591     index b4e421aa9727..918d2e676b9b 100644
1592     --- a/net/rds/ib_recv.c
1593     +++ b/net/rds/ib_recv.c
1594     @@ -98,12 +98,12 @@ static void rds_ib_cache_xfer_to_ready(struct rds_ib_refill_cache *cache)
1595     }
1596     }
1597    
1598     -static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache)
1599     +static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache, gfp_t gfp)
1600     {
1601     struct rds_ib_cache_head *head;
1602     int cpu;
1603    
1604     - cache->percpu = alloc_percpu(struct rds_ib_cache_head);
1605     + cache->percpu = alloc_percpu_gfp(struct rds_ib_cache_head, gfp);
1606     if (!cache->percpu)
1607     return -ENOMEM;
1608    
1609     @@ -118,13 +118,13 @@ static int rds_ib_recv_alloc_cache(struct rds_ib_refill_cache *cache)
1610     return 0;
1611     }
1612    
1613     -int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic)
1614     +int rds_ib_recv_alloc_caches(struct rds_ib_connection *ic, gfp_t gfp)
1615     {
1616     int ret;
1617    
1618     - ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs);
1619     + ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs, gfp);
1620     if (!ret) {
1621     - ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags);
1622     + ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags, gfp);
1623     if (ret)
1624     free_percpu(ic->i_cache_incs.percpu);
1625     }
1626     diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
1627     index 615fdc63452e..e37653b0f2d0 100644
1628     --- a/tools/perf/builtin-script.c
1629     +++ b/tools/perf/builtin-script.c
1630     @@ -25,6 +25,7 @@
1631     #include "util/string2.h"
1632     #include "util/thread-stack.h"
1633     #include "util/time-utils.h"
1634     +#include "util/path.h"
1635     #include "print_binary.h"
1636     #include <linux/bitmap.h>
1637     #include <linux/kernel.h>
1638     @@ -2129,19 +2130,6 @@ out:
1639     return rc;
1640     }
1641    
1642     -/* Helper function for filesystems that return a dent->d_type DT_UNKNOWN */
1643     -static int is_directory(const char *base_path, const struct dirent *dent)
1644     -{
1645     - char path[PATH_MAX];
1646     - struct stat st;
1647     -
1648     - sprintf(path, "%s/%s", base_path, dent->d_name);
1649     - if (stat(path, &st))
1650     - return 0;
1651     -
1652     - return S_ISDIR(st.st_mode);
1653     -}
1654     -
1655     #define for_each_lang(scripts_path, scripts_dir, lang_dirent) \
1656     while ((lang_dirent = readdir(scripts_dir)) != NULL) \
1657     if ((lang_dirent->d_type == DT_DIR || \
1658     diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
1659     index dac76ac117c1..398d4cc2f0e4 100644
1660     --- a/tools/perf/util/annotate.c
1661     +++ b/tools/perf/util/annotate.c
1662     @@ -1432,7 +1432,7 @@ int symbol__disassemble(struct symbol *sym, struct map *map,
1663     struct arch **parch, char *cpuid)
1664     {
1665     struct dso *dso = map->dso;
1666     - char command[PATH_MAX * 2];
1667     + char *command;
1668     struct arch *arch = NULL;
1669     FILE *file;
1670     char symfs_filename[PATH_MAX];
1671     @@ -1496,7 +1496,7 @@ int symbol__disassemble(struct symbol *sym, struct map *map,
1672     strcpy(symfs_filename, tmp);
1673     }
1674    
1675     - snprintf(command, sizeof(command),
1676     + err = asprintf(&command,
1677     "%s %s%s --start-address=0x%016" PRIx64
1678     " --stop-address=0x%016" PRIx64
1679     " -l -d %s %s -C \"%s\" 2>/dev/null|grep -v \"%s:\"|expand",
1680     @@ -1509,12 +1509,17 @@ int symbol__disassemble(struct symbol *sym, struct map *map,
1681     symbol_conf.annotate_src ? "-S" : "",
1682     symfs_filename, symfs_filename);
1683    
1684     + if (err < 0) {
1685     + pr_err("Failure allocating memory for the command to run\n");
1686     + goto out_remove_tmp;
1687     + }
1688     +
1689     pr_debug("Executing: %s\n", command);
1690    
1691     err = -1;
1692     if (pipe(stdout_fd) < 0) {
1693     pr_err("Failure creating the pipe to run %s\n", command);
1694     - goto out_remove_tmp;
1695     + goto out_free_command;
1696     }
1697    
1698     pid = fork();
1699     @@ -1541,7 +1546,7 @@ int symbol__disassemble(struct symbol *sym, struct map *map,
1700     * If we were using debug info should retry with
1701     * original binary.
1702     */
1703     - goto out_remove_tmp;
1704     + goto out_free_command;
1705     }
1706    
1707     nline = 0;
1708     @@ -1570,6 +1575,8 @@ int symbol__disassemble(struct symbol *sym, struct map *map,
1709    
1710     fclose(file);
1711     err = 0;
1712     +out_free_command:
1713     + free(command);
1714     out_remove_tmp:
1715     close(stdout_fd[0]);
1716    
1717     @@ -1583,7 +1590,7 @@ out:
1718    
1719     out_close_stdout:
1720     close(stdout_fd[1]);
1721     - goto out_remove_tmp;
1722     + goto out_free_command;
1723     }
1724    
1725     static void insert_source_line(struct rb_root *root, struct source_line *src_line)
1726     diff --git a/tools/perf/util/path.c b/tools/perf/util/path.c
1727     index 933f5c6bffb4..ca56ba2dd3da 100644
1728     --- a/tools/perf/util/path.c
1729     +++ b/tools/perf/util/path.c
1730     @@ -18,6 +18,7 @@
1731     #include <stdio.h>
1732     #include <sys/types.h>
1733     #include <sys/stat.h>
1734     +#include <dirent.h>
1735     #include <unistd.h>
1736    
1737     static char bad_path[] = "/bad-path/";
1738     @@ -77,3 +78,16 @@ bool is_regular_file(const char *file)
1739    
1740     return S_ISREG(st.st_mode);
1741     }
1742     +
1743     +/* Helper function for filesystems that return a dent->d_type DT_UNKNOWN */
1744     +bool is_directory(const char *base_path, const struct dirent *dent)
1745     +{
1746     + char path[PATH_MAX];
1747     + struct stat st;
1748     +
1749     + sprintf(path, "%s/%s", base_path, dent->d_name);
1750     + if (stat(path, &st))
1751     + return false;
1752     +
1753     + return S_ISDIR(st.st_mode);
1754     +}
1755     diff --git a/tools/perf/util/path.h b/tools/perf/util/path.h
1756     index 14a254ada7eb..f014f905df50 100644
1757     --- a/tools/perf/util/path.h
1758     +++ b/tools/perf/util/path.h
1759     @@ -2,9 +2,12 @@
1760     #ifndef _PERF_PATH_H
1761     #define _PERF_PATH_H
1762    
1763     +struct dirent;
1764     +
1765     int path__join(char *bf, size_t size, const char *path1, const char *path2);
1766     int path__join3(char *bf, size_t size, const char *path1, const char *path2, const char *path3);
1767    
1768     bool is_regular_file(const char *file);
1769     +bool is_directory(const char *base_path, const struct dirent *dent);
1770    
1771     #endif /* _PERF_PATH_H */
1772     diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py
1773     index af415febbc46..da4df7fd43a2 100644
1774     --- a/tools/perf/util/setup.py
1775     +++ b/tools/perf/util/setup.py
1776     @@ -28,6 +28,8 @@ class install_lib(_install_lib):
1777     cflags = getenv('CFLAGS', '').split()
1778     # switch off several checks (need to be at the end of cflags list)
1779     cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
1780     +if cc != "clang":
1781     + cflags += ['-Wno-cast-function-type' ]
1782    
1783     src_perf = getenv('srctree') + '/tools/perf'
1784     build_lib = getenv('PYTHON_EXTBUILD_LIB')
1785     diff --git a/tools/testing/selftests/x86/test_vdso.c b/tools/testing/selftests/x86/test_vdso.c
1786     index 235259011704..35edd61d1663 100644
1787     --- a/tools/testing/selftests/x86/test_vdso.c
1788     +++ b/tools/testing/selftests/x86/test_vdso.c
1789     @@ -17,6 +17,7 @@
1790     #include <errno.h>
1791     #include <sched.h>
1792     #include <stdbool.h>
1793     +#include <limits.h>
1794    
1795     #ifndef SYS_getcpu
1796     # ifdef __x86_64__
1797     @@ -31,6 +32,14 @@
1798    
1799     int nerrs = 0;
1800    
1801     +typedef int (*vgettime_t)(clockid_t, struct timespec *);
1802     +
1803     +vgettime_t vdso_clock_gettime;
1804     +
1805     +typedef long (*vgtod_t)(struct timeval *tv, struct timezone *tz);
1806     +
1807     +vgtod_t vdso_gettimeofday;
1808     +
1809     typedef long (*getcpu_t)(unsigned *, unsigned *, void *);
1810    
1811     getcpu_t vgetcpu;
1812     @@ -95,6 +104,15 @@ static void fill_function_pointers()
1813     printf("Warning: failed to find getcpu in vDSO\n");
1814    
1815     vgetcpu = (getcpu_t) vsyscall_getcpu();
1816     +
1817     + vdso_clock_gettime = (vgettime_t)dlsym(vdso, "__vdso_clock_gettime");
1818     + if (!vdso_clock_gettime)
1819     + printf("Warning: failed to find clock_gettime in vDSO\n");
1820     +
1821     + vdso_gettimeofday = (vgtod_t)dlsym(vdso, "__vdso_gettimeofday");
1822     + if (!vdso_gettimeofday)
1823     + printf("Warning: failed to find gettimeofday in vDSO\n");
1824     +
1825     }
1826    
1827     static long sys_getcpu(unsigned * cpu, unsigned * node,
1828     @@ -103,6 +121,16 @@ static long sys_getcpu(unsigned * cpu, unsigned * node,
1829     return syscall(__NR_getcpu, cpu, node, cache);
1830     }
1831    
1832     +static inline int sys_clock_gettime(clockid_t id, struct timespec *ts)
1833     +{
1834     + return syscall(__NR_clock_gettime, id, ts);
1835     +}
1836     +
1837     +static inline int sys_gettimeofday(struct timeval *tv, struct timezone *tz)
1838     +{
1839     + return syscall(__NR_gettimeofday, tv, tz);
1840     +}
1841     +
1842     static void test_getcpu(void)
1843     {
1844     printf("[RUN]\tTesting getcpu...\n");
1845     @@ -155,10 +183,154 @@ static void test_getcpu(void)
1846     }
1847     }
1848    
1849     +static bool ts_leq(const struct timespec *a, const struct timespec *b)
1850     +{
1851     + if (a->tv_sec != b->tv_sec)
1852     + return a->tv_sec < b->tv_sec;
1853     + else
1854     + return a->tv_nsec <= b->tv_nsec;
1855     +}
1856     +
1857     +static bool tv_leq(const struct timeval *a, const struct timeval *b)
1858     +{
1859     + if (a->tv_sec != b->tv_sec)
1860     + return a->tv_sec < b->tv_sec;
1861     + else
1862     + return a->tv_usec <= b->tv_usec;
1863     +}
1864     +
1865     +static char const * const clocknames[] = {
1866     + [0] = "CLOCK_REALTIME",
1867     + [1] = "CLOCK_MONOTONIC",
1868     + [2] = "CLOCK_PROCESS_CPUTIME_ID",
1869     + [3] = "CLOCK_THREAD_CPUTIME_ID",
1870     + [4] = "CLOCK_MONOTONIC_RAW",
1871     + [5] = "CLOCK_REALTIME_COARSE",
1872     + [6] = "CLOCK_MONOTONIC_COARSE",
1873     + [7] = "CLOCK_BOOTTIME",
1874     + [8] = "CLOCK_REALTIME_ALARM",
1875     + [9] = "CLOCK_BOOTTIME_ALARM",
1876     + [10] = "CLOCK_SGI_CYCLE",
1877     + [11] = "CLOCK_TAI",
1878     +};
1879     +
1880     +static void test_one_clock_gettime(int clock, const char *name)
1881     +{
1882     + struct timespec start, vdso, end;
1883     + int vdso_ret, end_ret;
1884     +
1885     + printf("[RUN]\tTesting clock_gettime for clock %s (%d)...\n", name, clock);
1886     +
1887     + if (sys_clock_gettime(clock, &start) < 0) {
1888     + if (errno == EINVAL) {
1889     + vdso_ret = vdso_clock_gettime(clock, &vdso);
1890     + if (vdso_ret == -EINVAL) {
1891     + printf("[OK]\tNo such clock.\n");
1892     + } else {
1893     + printf("[FAIL]\tNo such clock, but __vdso_clock_gettime returned %d\n", vdso_ret);
1894     + nerrs++;
1895     + }
1896     + } else {
1897     + printf("[WARN]\t clock_gettime(%d) syscall returned error %d\n", clock, errno);
1898     + }
1899     + return;
1900     + }
1901     +
1902     + vdso_ret = vdso_clock_gettime(clock, &vdso);
1903     + end_ret = sys_clock_gettime(clock, &end);
1904     +
1905     + if (vdso_ret != 0 || end_ret != 0) {
1906     + printf("[FAIL]\tvDSO returned %d, syscall errno=%d\n",
1907     + vdso_ret, errno);
1908     + nerrs++;
1909     + return;
1910     + }
1911     +
1912     + printf("\t%llu.%09ld %llu.%09ld %llu.%09ld\n",
1913     + (unsigned long long)start.tv_sec, start.tv_nsec,
1914     + (unsigned long long)vdso.tv_sec, vdso.tv_nsec,
1915     + (unsigned long long)end.tv_sec, end.tv_nsec);
1916     +
1917     + if (!ts_leq(&start, &vdso) || !ts_leq(&vdso, &end)) {
1918     + printf("[FAIL]\tTimes are out of sequence\n");
1919     + nerrs++;
1920     + }
1921     +}
1922     +
1923     +static void test_clock_gettime(void)
1924     +{
1925     + for (int clock = 0; clock < sizeof(clocknames) / sizeof(clocknames[0]);
1926     + clock++) {
1927     + test_one_clock_gettime(clock, clocknames[clock]);
1928     + }
1929     +
1930     + /* Also test some invalid clock ids */
1931     + test_one_clock_gettime(-1, "invalid");
1932     + test_one_clock_gettime(INT_MIN, "invalid");
1933     + test_one_clock_gettime(INT_MAX, "invalid");
1934     +}
1935     +
1936     +static void test_gettimeofday(void)
1937     +{
1938     + struct timeval start, vdso, end;
1939     + struct timezone sys_tz, vdso_tz;
1940     + int vdso_ret, end_ret;
1941     +
1942     + if (!vdso_gettimeofday)
1943     + return;
1944     +
1945     + printf("[RUN]\tTesting gettimeofday...\n");
1946     +
1947     + if (sys_gettimeofday(&start, &sys_tz) < 0) {
1948     + printf("[FAIL]\tsys_gettimeofday failed (%d)\n", errno);
1949     + nerrs++;
1950     + return;
1951     + }
1952     +
1953     + vdso_ret = vdso_gettimeofday(&vdso, &vdso_tz);
1954     + end_ret = sys_gettimeofday(&end, NULL);
1955     +
1956     + if (vdso_ret != 0 || end_ret != 0) {
1957     + printf("[FAIL]\tvDSO returned %d, syscall errno=%d\n",
1958     + vdso_ret, errno);
1959     + nerrs++;
1960     + return;
1961     + }
1962     +
1963     + printf("\t%llu.%06ld %llu.%06ld %llu.%06ld\n",
1964     + (unsigned long long)start.tv_sec, start.tv_usec,
1965     + (unsigned long long)vdso.tv_sec, vdso.tv_usec,
1966     + (unsigned long long)end.tv_sec, end.tv_usec);
1967     +
1968     + if (!tv_leq(&start, &vdso) || !tv_leq(&vdso, &end)) {
1969     + printf("[FAIL]\tTimes are out of sequence\n");
1970     + nerrs++;
1971     + }
1972     +
1973     + if (sys_tz.tz_minuteswest == vdso_tz.tz_minuteswest &&
1974     + sys_tz.tz_dsttime == vdso_tz.tz_dsttime) {
1975     + printf("[OK]\ttimezones match: minuteswest=%d, dsttime=%d\n",
1976     + sys_tz.tz_minuteswest, sys_tz.tz_dsttime);
1977     + } else {
1978     + printf("[FAIL]\ttimezones do not match\n");
1979     + nerrs++;
1980     + }
1981     +
1982     + /* And make sure that passing NULL for tz doesn't crash. */
1983     + vdso_gettimeofday(&vdso, NULL);
1984     +}
1985     +
1986     int main(int argc, char **argv)
1987     {
1988     fill_function_pointers();
1989    
1990     + test_clock_gettime();
1991     + test_gettimeofday();
1992     +
1993     + /*
1994     + * Test getcpu() last so that, if something goes wrong setting affinity,
1995     + * we still run the other tests.
1996     + */
1997     test_getcpu();
1998    
1999     return nerrs ? 1 : 0;