Magellan Linux

Annotation of /trunk/kernel-alx-legacy/patches-4.9/0145-4.9.46-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (hide annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 9 months ago) by niro
File size: 104883 byte(s)
-added kerenl-alx-legacy pkg
1 niro 3608 diff --git a/Makefile b/Makefile
2     index ccd6d91f616e..846ef1b57a02 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 45
9     +SUBLEVEL = 46
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
14     index b3410ff6a62d..4fd6272e6c01 100644
15     --- a/arch/arc/include/asm/cache.h
16     +++ b/arch/arc/include/asm/cache.h
17     @@ -89,7 +89,9 @@ extern unsigned long perip_base, perip_end;
18     #define ARC_REG_SLC_FLUSH 0x904
19     #define ARC_REG_SLC_INVALIDATE 0x905
20     #define ARC_REG_SLC_RGN_START 0x914
21     +#define ARC_REG_SLC_RGN_START1 0x915
22     #define ARC_REG_SLC_RGN_END 0x916
23     +#define ARC_REG_SLC_RGN_END1 0x917
24    
25     /* Bit val in SLC_CONTROL */
26     #define SLC_CTRL_IM 0x040
27     diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
28     index 8147583c4434..bbdfeb31dee6 100644
29     --- a/arch/arc/mm/cache.c
30     +++ b/arch/arc/mm/cache.c
31     @@ -562,6 +562,7 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
32     static DEFINE_SPINLOCK(lock);
33     unsigned long flags;
34     unsigned int ctrl;
35     + phys_addr_t end;
36    
37     spin_lock_irqsave(&lock, flags);
38    
39     @@ -591,8 +592,16 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
40     * END needs to be setup before START (latter triggers the operation)
41     * END can't be same as START, so add (l2_line_sz - 1) to sz
42     */
43     - write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
44     - write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
45     + end = paddr + sz + l2_line_sz - 1;
46     + if (is_pae40_enabled())
47     + write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
48     +
49     + write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
50     +
51     + if (is_pae40_enabled())
52     + write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
53     +
54     + write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
55    
56     while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
57    
58     diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
59     index 0012f0353fd6..fe208b70b8b1 100644
60     --- a/arch/powerpc/include/asm/mmu_context.h
61     +++ b/arch/powerpc/include/asm/mmu_context.h
62     @@ -75,9 +75,27 @@ static inline void switch_mm_irqs_off(struct mm_struct *prev,
63     struct task_struct *tsk)
64     {
65     /* Mark this context has been used on the new CPU */
66     - if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next)))
67     + if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(next))) {
68     cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
69    
70     + /*
71     + * This full barrier orders the store to the cpumask above vs
72     + * a subsequent operation which allows this CPU to begin loading
73     + * translations for next.
74     + *
75     + * When using the radix MMU that operation is the load of the
76     + * MMU context id, which is then moved to SPRN_PID.
77     + *
78     + * For the hash MMU it is either the first load from slb_cache
79     + * in switch_slb(), and/or the store of paca->mm_ctx_id in
80     + * copy_mm_to_paca().
81     + *
82     + * On the read side the barrier is in pte_xchg(), which orders
83     + * the store to the PTE vs the load of mm_cpumask.
84     + */
85     + smp_mb();
86     + }
87     +
88     /* 32-bit keeps track of the current PGDIR in the thread struct */
89     #ifdef CONFIG_PPC32
90     tsk->thread.pgdir = next->pgd;
91     diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h
92     index 49c0a5a80efa..68e087e807f8 100644
93     --- a/arch/powerpc/include/asm/pgtable-be-types.h
94     +++ b/arch/powerpc/include/asm/pgtable-be-types.h
95     @@ -87,6 +87,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
96     unsigned long *p = (unsigned long *)ptep;
97     __be64 prev;
98    
99     + /* See comment in switch_mm_irqs_off() */
100     prev = (__force __be64)__cmpxchg_u64(p, (__force unsigned long)pte_raw(old),
101     (__force unsigned long)pte_raw(new));
102    
103     diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h
104     index e7f4f3e0fcde..41e9d0a6cbeb 100644
105     --- a/arch/powerpc/include/asm/pgtable-types.h
106     +++ b/arch/powerpc/include/asm/pgtable-types.h
107     @@ -62,6 +62,7 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
108     {
109     unsigned long *p = (unsigned long *)ptep;
110    
111     + /* See comment in switch_mm_irqs_off() */
112     return pte_val(old) == __cmpxchg_u64(p, pte_val(old), pte_val(new));
113     }
114     #endif
115     diff --git a/arch/s390/kvm/sthyi.c b/arch/s390/kvm/sthyi.c
116     index 05c98bb853cf..2f04ad1ea01c 100644
117     --- a/arch/s390/kvm/sthyi.c
118     +++ b/arch/s390/kvm/sthyi.c
119     @@ -394,7 +394,7 @@ static int sthyi(u64 vaddr)
120     "srl %[cc],28\n"
121     : [cc] "=d" (cc)
122     : [code] "d" (code), [addr] "a" (addr)
123     - : "memory", "cc");
124     + : "3", "memory", "cc");
125     return cc;
126     }
127    
128     @@ -422,7 +422,7 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
129     VCPU_EVENT(vcpu, 3, "STHYI: fc: %llu addr: 0x%016llx", code, addr);
130     trace_kvm_s390_handle_sthyi(vcpu, code, addr);
131    
132     - if (reg1 == reg2 || reg1 & 1 || reg2 & 1 || addr & ~PAGE_MASK)
133     + if (reg1 == reg2 || reg1 & 1 || reg2 & 1)
134     return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
135    
136     if (code & 0xffff) {
137     @@ -430,6 +430,9 @@ int handle_sthyi(struct kvm_vcpu *vcpu)
138     goto out;
139     }
140    
141     + if (addr & ~PAGE_MASK)
142     + return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
143     +
144     /*
145     * If the page has not yet been faulted in, we want to do that
146     * now and not after all the expensive calculations.
147     diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
148     index 06981cc716b6..d04111a5c615 100644
149     --- a/arch/sparc/kernel/pci_sun4v.c
150     +++ b/arch/sparc/kernel/pci_sun4v.c
151     @@ -1240,8 +1240,6 @@ static int pci_sun4v_probe(struct platform_device *op)
152     * ATU group, but ATU hcalls won't be available.
153     */
154     hv_atu = false;
155     - pr_err(PFX "Could not register hvapi ATU err=%d\n",
156     - err);
157     } else {
158     pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
159     vatu_major, vatu_minor);
160     diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
161     index 970c1de3b86e..4c1b7ea18541 100644
162     --- a/arch/x86/events/intel/rapl.c
163     +++ b/arch/x86/events/intel/rapl.c
164     @@ -161,7 +161,13 @@ static u64 rapl_timer_ms;
165    
166     static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
167     {
168     - return rapl_pmus->pmus[topology_logical_package_id(cpu)];
169     + unsigned int pkgid = topology_logical_package_id(cpu);
170     +
171     + /*
172     + * The unsigned check also catches the '-1' return value for non
173     + * existent mappings in the topology map.
174     + */
175     + return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
176     }
177    
178     static inline u64 rapl_read_counter(struct perf_event *event)
179     @@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event)
180    
181     /* must be done before validate_group */
182     pmu = cpu_to_rapl_pmu(event->cpu);
183     + if (!pmu)
184     + return -EINVAL;
185     event->cpu = pmu->cpu;
186     event->pmu_private = pmu;
187     event->hw.event_base = msr;
188     @@ -585,6 +593,19 @@ static int rapl_cpu_online(unsigned int cpu)
189     struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
190     int target;
191    
192     + if (!pmu) {
193     + pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
194     + if (!pmu)
195     + return -ENOMEM;
196     +
197     + raw_spin_lock_init(&pmu->lock);
198     + INIT_LIST_HEAD(&pmu->active_list);
199     + pmu->pmu = &rapl_pmus->pmu;
200     + pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
201     + rapl_hrtimer_init(pmu);
202     +
203     + rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
204     + }
205     /*
206     * Check if there is an online cpu in the package which collects rapl
207     * events already.
208     @@ -598,27 +619,6 @@ static int rapl_cpu_online(unsigned int cpu)
209     return 0;
210     }
211    
212     -static int rapl_cpu_prepare(unsigned int cpu)
213     -{
214     - struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
215     -
216     - if (pmu)
217     - return 0;
218     -
219     - pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
220     - if (!pmu)
221     - return -ENOMEM;
222     -
223     - raw_spin_lock_init(&pmu->lock);
224     - INIT_LIST_HEAD(&pmu->active_list);
225     - pmu->pmu = &rapl_pmus->pmu;
226     - pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
227     - pmu->cpu = -1;
228     - rapl_hrtimer_init(pmu);
229     - rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
230     - return 0;
231     -}
232     -
233     static int rapl_check_hw_unit(bool apply_quirk)
234     {
235     u64 msr_rapl_power_unit_bits;
236     @@ -804,28 +804,21 @@ static int __init rapl_pmu_init(void)
237     * Install callbacks. Core will call them for each online cpu.
238     */
239    
240     - ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "PERF_X86_RAPL_PREP",
241     - rapl_cpu_prepare, NULL);
242     - if (ret)
243     - goto out;
244     -
245     ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
246     "AP_PERF_X86_RAPL_ONLINE",
247     rapl_cpu_online, rapl_cpu_offline);
248     if (ret)
249     - goto out1;
250     + goto out;
251    
252     ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
253     if (ret)
254     - goto out2;
255     + goto out1;
256    
257     rapl_advertise();
258     return 0;
259    
260     -out2:
261     - cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
262     out1:
263     - cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
264     + cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
265     out:
266     pr_warn("Initialization failed (%d), disabled\n", ret);
267     cleanup_rapl_pmus();
268     @@ -836,7 +829,6 @@ module_init(rapl_pmu_init);
269     static void __exit intel_rapl_exit(void)
270     {
271     cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
272     - cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
273     perf_pmu_unregister(&rapl_pmus->pmu);
274     cleanup_rapl_pmus();
275     }
276     diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
277     index 8e0a9fe86de4..f9dd22469388 100644
278     --- a/arch/x86/include/asm/mmu_context.h
279     +++ b/arch/x86/include/asm/mmu_context.h
280     @@ -116,9 +116,7 @@ static inline int init_new_context(struct task_struct *tsk,
281     mm->context.execute_only_pkey = -1;
282     }
283     #endif
284     - init_new_context_ldt(tsk, mm);
285     -
286     - return 0;
287     + return init_new_context_ldt(tsk, mm);
288     }
289     static inline void destroy_context(struct mm_struct *mm)
290     {
291     diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
292     index 649d8f2c1e40..91af75e37306 100644
293     --- a/arch/x86/kvm/cpuid.c
294     +++ b/arch/x86/kvm/cpuid.c
295     @@ -456,7 +456,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
296     entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
297     cpuid_mask(&entry->ecx, CPUID_7_ECX);
298     /* PKU is not yet implemented for shadow paging. */
299     - if (!tdp_enabled)
300     + if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
301     entry->ecx &= ~F(PKU);
302     } else {
303     entry->ebx = 0;
304     diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
305     index e53bef6cf53c..0375c6024062 100644
306     --- a/drivers/acpi/apei/ghes.c
307     +++ b/drivers/acpi/apei/ghes.c
308     @@ -1072,6 +1072,7 @@ static int ghes_remove(struct platform_device *ghes_dev)
309     if (list_empty(&ghes_sci))
310     unregister_acpi_hed_notifier(&ghes_notifier_sci);
311     mutex_unlock(&ghes_list_mutex);
312     + synchronize_rcu();
313     break;
314     case ACPI_HEST_NOTIFY_NMI:
315     ghes_nmi_remove(ghes);
316     diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
317     index 79152dbc5528..51874695a730 100644
318     --- a/drivers/acpi/ec.c
319     +++ b/drivers/acpi/ec.c
320     @@ -1728,7 +1728,7 @@ int __init acpi_ec_dsdt_probe(void)
321     * functioning ECDT EC first in order to handle the events.
322     * https://bugzilla.kernel.org/show_bug.cgi?id=115021
323     */
324     -int __init acpi_ec_ecdt_start(void)
325     +static int __init acpi_ec_ecdt_start(void)
326     {
327     acpi_handle handle;
328    
329     @@ -1959,20 +1959,17 @@ static inline void acpi_ec_query_exit(void)
330     int __init acpi_ec_init(void)
331     {
332     int result;
333     + int ecdt_fail, dsdt_fail;
334    
335     /* register workqueue for _Qxx evaluations */
336     result = acpi_ec_query_init();
337     if (result)
338     - goto err_exit;
339     - /* Now register the driver for the EC */
340     - result = acpi_bus_register_driver(&acpi_ec_driver);
341     - if (result)
342     - goto err_exit;
343     + return result;
344    
345     -err_exit:
346     - if (result)
347     - acpi_ec_query_exit();
348     - return result;
349     + /* Drivers must be started after acpi_ec_query_init() */
350     + ecdt_fail = acpi_ec_ecdt_start();
351     + dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
352     + return ecdt_fail && dsdt_fail ? -ENODEV : 0;
353     }
354    
355     /* EC driver currently not unloadable */
356     diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
357     index 219b90bc0922..08b3ca0ead69 100644
358     --- a/drivers/acpi/internal.h
359     +++ b/drivers/acpi/internal.h
360     @@ -185,7 +185,6 @@ typedef int (*acpi_ec_query_func) (void *data);
361     int acpi_ec_init(void);
362     int acpi_ec_ecdt_probe(void);
363     int acpi_ec_dsdt_probe(void);
364     -int acpi_ec_ecdt_start(void);
365     void acpi_ec_block_transactions(void);
366     void acpi_ec_unblock_transactions(void);
367     int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
368     diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c
369     index 6d7ce6e12aaa..5e18ccf5ab57 100644
370     --- a/drivers/acpi/ioapic.c
371     +++ b/drivers/acpi/ioapic.c
372     @@ -45,6 +45,12 @@ static acpi_status setup_res(struct acpi_resource *acpi_res, void *data)
373     struct resource *res = data;
374     struct resource_win win;
375    
376     + /*
377     + * We might assign this to 'res' later, make sure all pointers are
378     + * cleared before the resource is added to the global list
379     + */
380     + memset(&win, 0, sizeof(win));
381     +
382     res->flags = 0;
383     if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM))
384     return AE_OK;
385     diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
386     index dd3786acba89..cf725d581cae 100644
387     --- a/drivers/acpi/scan.c
388     +++ b/drivers/acpi/scan.c
389     @@ -2051,7 +2051,6 @@ int __init acpi_scan_init(void)
390    
391     acpi_gpe_apply_masked_gpes();
392     acpi_update_all_gpes();
393     - acpi_ec_ecdt_start();
394    
395     acpi_scan_initialized = true;
396    
397     diff --git a/drivers/android/binder.c b/drivers/android/binder.c
398     index 3c71b982bf2a..15009b2b33c7 100644
399     --- a/drivers/android/binder.c
400     +++ b/drivers/android/binder.c
401     @@ -1724,8 +1724,12 @@ static void binder_transaction(struct binder_proc *proc,
402     list_add_tail(&t->work.entry, target_list);
403     tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
404     list_add_tail(&tcomplete->entry, &thread->todo);
405     - if (target_wait)
406     - wake_up_interruptible(target_wait);
407     + if (target_wait) {
408     + if (reply || !(t->flags & TF_ONE_WAY))
409     + wake_up_interruptible_sync(target_wait);
410     + else
411     + wake_up_interruptible(target_wait);
412     + }
413     return;
414    
415     err_get_unused_fd_failed:
416     @@ -2760,10 +2764,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
417     /*pr_info("binder_ioctl: %d:%d %x %lx\n",
418     proc->pid, current->pid, cmd, arg);*/
419    
420     - if (unlikely(current->mm != proc->vma_vm_mm)) {
421     - pr_err("current mm mismatch proc mm\n");
422     - return -EINVAL;
423     - }
424     trace_binder_ioctl(cmd, arg);
425    
426     ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
427     @@ -2875,7 +2875,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
428     const char *failure_string;
429     struct binder_buffer *buffer;
430    
431     - if (proc->tsk != current)
432     + if (proc->tsk != current->group_leader)
433     return -EINVAL;
434    
435     if ((vma->vm_end - vma->vm_start) > SZ_4M)
436     @@ -2976,9 +2976,8 @@ static int binder_open(struct inode *nodp, struct file *filp)
437     proc = kzalloc(sizeof(*proc), GFP_KERNEL);
438     if (proc == NULL)
439     return -ENOMEM;
440     - get_task_struct(current);
441     - proc->tsk = current;
442     - proc->vma_vm_mm = current->mm;
443     + get_task_struct(current->group_leader);
444     + proc->tsk = current->group_leader;
445     INIT_LIST_HEAD(&proc->todo);
446     init_waitqueue_head(&proc->wait);
447     proc->default_priority = task_nice(current);
448     diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
449     index 4e19bde4bbff..34adde169a78 100644
450     --- a/drivers/gpu/drm/drm_atomic.c
451     +++ b/drivers/gpu/drm/drm_atomic.c
452     @@ -1386,6 +1386,9 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
453     if (config->funcs->atomic_check)
454     ret = config->funcs->atomic_check(state->dev, state);
455    
456     + if (ret)
457     + return ret;
458     +
459     if (!state->allow_modeset) {
460     for_each_crtc_in_state(state, crtc, crtc_state, i) {
461     if (drm_atomic_crtc_needs_modeset(crtc_state)) {
462     @@ -1396,7 +1399,7 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
463     }
464     }
465    
466     - return ret;
467     + return 0;
468     }
469     EXPORT_SYMBOL(drm_atomic_check_only);
470    
471     diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
472     index 465bacd0a630..48e99ab525c3 100644
473     --- a/drivers/gpu/drm/drm_gem.c
474     +++ b/drivers/gpu/drm/drm_gem.c
475     @@ -255,13 +255,13 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
476     struct drm_gem_object *obj = ptr;
477     struct drm_device *dev = obj->dev;
478    
479     + if (dev->driver->gem_close_object)
480     + dev->driver->gem_close_object(obj, file_priv);
481     +
482     if (drm_core_check_feature(dev, DRIVER_PRIME))
483     drm_gem_remove_prime_handles(obj, file_priv);
484     drm_vma_node_revoke(&obj->vma_node, file_priv);
485    
486     - if (dev->driver->gem_close_object)
487     - dev->driver->gem_close_object(obj, file_priv);
488     -
489     drm_gem_object_handle_unreference_unlocked(obj);
490    
491     return 0;
492     diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
493     index 7316fc7fa0bd..a2ec6d8796a0 100644
494     --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
495     +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
496     @@ -149,8 +149,8 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
497     rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0);
498    
499     /* Signal polarities */
500     - value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
501     - | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL)
502     + value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0)
503     + | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0)
504     | DSMR_DIPM_DISP | DSMR_CSPM;
505     rcar_du_crtc_write(rcrtc, DSMR, value);
506    
507     @@ -172,7 +172,7 @@ static void rcar_du_crtc_set_display_timing(struct rcar_du_crtc *rcrtc)
508     mode->crtc_vsync_start - 1);
509     rcar_du_crtc_write(rcrtc, VCR, mode->crtc_vtotal - 1);
510    
511     - rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start);
512     + rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start - 1);
513     rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay);
514     }
515    
516     diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
517     index cfc302c65b0b..c58602b638e4 100644
518     --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
519     +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
520     @@ -453,13 +453,13 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
521     }
522    
523     ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector);
524     - of_node_put(encoder);
525     - of_node_put(connector);
526     -
527     if (ret && ret != -EPROBE_DEFER)
528     dev_warn(rcdu->dev,
529     - "failed to initialize encoder %s (%d), skipping\n",
530     - encoder->full_name, ret);
531     + "failed to initialize encoder %s on output %u (%d), skipping\n",
532     + of_node_full_name(encoder), output, ret);
533     +
534     + of_node_put(encoder);
535     + of_node_put(connector);
536    
537     return ret;
538     }
539     diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
540     index 0b42a12171f3..b42d95f09c68 100644
541     --- a/drivers/i2c/busses/i2c-designware-platdrv.c
542     +++ b/drivers/i2c/busses/i2c-designware-platdrv.c
543     @@ -319,7 +319,7 @@ static void dw_i2c_plat_complete(struct device *dev)
544     #endif
545    
546     #ifdef CONFIG_PM
547     -static int dw_i2c_plat_suspend(struct device *dev)
548     +static int dw_i2c_plat_runtime_suspend(struct device *dev)
549     {
550     struct platform_device *pdev = to_platform_device(dev);
551     struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
552     @@ -343,11 +343,21 @@ static int dw_i2c_plat_resume(struct device *dev)
553     return 0;
554     }
555    
556     +#ifdef CONFIG_PM_SLEEP
557     +static int dw_i2c_plat_suspend(struct device *dev)
558     +{
559     + pm_runtime_resume(dev);
560     + return dw_i2c_plat_runtime_suspend(dev);
561     +}
562     +#endif
563     +
564     static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
565     .prepare = dw_i2c_plat_prepare,
566     .complete = dw_i2c_plat_complete,
567     SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
568     - SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL)
569     + SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend,
570     + dw_i2c_plat_resume,
571     + NULL)
572     };
573    
574     #define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops)
575     diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
576     index 60829340a82e..b60e5d87c257 100644
577     --- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
578     +++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
579     @@ -36,8 +36,6 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
580     s32 poll_value = 0;
581    
582     if (state) {
583     - if (!atomic_read(&st->user_requested_state))
584     - return 0;
585     if (sensor_hub_device_open(st->hsdev))
586     return -EIO;
587    
588     @@ -86,6 +84,9 @@ static int _hid_sensor_power_state(struct hid_sensor_common *st, bool state)
589     &report_val);
590     }
591    
592     + pr_debug("HID_SENSOR %s set power_state %d report_state %d\n",
593     + st->pdev->name, state_val, report_val);
594     +
595     sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
596     st->power_state.index,
597     sizeof(state_val), &state_val);
598     @@ -107,6 +108,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
599     ret = pm_runtime_get_sync(&st->pdev->dev);
600     else {
601     pm_runtime_mark_last_busy(&st->pdev->dev);
602     + pm_runtime_use_autosuspend(&st->pdev->dev);
603     ret = pm_runtime_put_autosuspend(&st->pdev->dev);
604     }
605     if (ret < 0) {
606     @@ -201,8 +203,6 @@ int hid_sensor_setup_trigger(struct iio_dev *indio_dev, const char *name,
607     /* Default to 3 seconds, but can be changed from sysfs */
608     pm_runtime_set_autosuspend_delay(&attrb->pdev->dev,
609     3000);
610     - pm_runtime_use_autosuspend(&attrb->pdev->dev);
611     -
612     return ret;
613     error_unreg_trigger:
614     iio_trigger_unregister(trig);
615     diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
616     index 8cf84d3488b2..12898424d838 100644
617     --- a/drivers/iio/imu/adis16480.c
618     +++ b/drivers/iio/imu/adis16480.c
619     @@ -696,7 +696,7 @@ static const struct adis16480_chip_info adis16480_chip_info[] = {
620     .gyro_max_val = IIO_RAD_TO_DEGREE(22500),
621     .gyro_max_scale = 450,
622     .accel_max_val = IIO_M_S_2_TO_G(12500),
623     - .accel_max_scale = 5,
624     + .accel_max_scale = 10,
625     },
626     [ADIS16485] = {
627     .channels = adis16485_channels,
628     diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
629     index 518e8a7bd5f9..f26807c75be4 100644
630     --- a/drivers/input/mouse/alps.c
631     +++ b/drivers/input/mouse/alps.c
632     @@ -1212,14 +1212,24 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
633    
634     case SS4_PACKET_ID_TWO:
635     if (priv->flags & ALPS_BUTTONPAD) {
636     - f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
637     + if (IS_SS4PLUS_DEV(priv->dev_id)) {
638     + f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
639     + f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
640     + } else {
641     + f->mt[0].x = SS4_BTL_MF_X_V2(p, 0);
642     + f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
643     + }
644     f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0);
645     - f->mt[1].x = SS4_BTL_MF_X_V2(p, 1);
646     f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1);
647     } else {
648     - f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
649     + if (IS_SS4PLUS_DEV(priv->dev_id)) {
650     + f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
651     + f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
652     + } else {
653     + f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
654     + f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
655     + }
656     f->mt[0].y = SS4_STD_MF_Y_V2(p, 0);
657     - f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
658     f->mt[1].y = SS4_STD_MF_Y_V2(p, 1);
659     }
660     f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0;
661     @@ -1236,16 +1246,27 @@ static int alps_decode_ss4_v2(struct alps_fields *f,
662    
663     case SS4_PACKET_ID_MULTI:
664     if (priv->flags & ALPS_BUTTONPAD) {
665     - f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
666     + if (IS_SS4PLUS_DEV(priv->dev_id)) {
667     + f->mt[0].x = SS4_PLUS_BTL_MF_X_V2(p, 0);
668     + f->mt[1].x = SS4_PLUS_BTL_MF_X_V2(p, 1);
669     + } else {
670     + f->mt[2].x = SS4_BTL_MF_X_V2(p, 0);
671     + f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
672     + }
673     +
674     f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0);
675     - f->mt[3].x = SS4_BTL_MF_X_V2(p, 1);
676     f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1);
677     no_data_x = SS4_MFPACKET_NO_AX_BL;
678     no_data_y = SS4_MFPACKET_NO_AY_BL;
679     } else {
680     - f->mt[2].x = SS4_STD_MF_X_V2(p, 0);
681     + if (IS_SS4PLUS_DEV(priv->dev_id)) {
682     + f->mt[0].x = SS4_PLUS_STD_MF_X_V2(p, 0);
683     + f->mt[1].x = SS4_PLUS_STD_MF_X_V2(p, 1);
684     + } else {
685     + f->mt[0].x = SS4_STD_MF_X_V2(p, 0);
686     + f->mt[1].x = SS4_STD_MF_X_V2(p, 1);
687     + }
688     f->mt[2].y = SS4_STD_MF_Y_V2(p, 0);
689     - f->mt[3].x = SS4_STD_MF_X_V2(p, 1);
690     f->mt[3].y = SS4_STD_MF_Y_V2(p, 1);
691     no_data_x = SS4_MFPACKET_NO_AX;
692     no_data_y = SS4_MFPACKET_NO_AY;
693     @@ -2535,8 +2556,8 @@ static int alps_set_defaults_ss4_v2(struct psmouse *psmouse,
694    
695     memset(otp, 0, sizeof(otp));
696    
697     - if (alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]) ||
698     - alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]))
699     + if (alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0]) ||
700     + alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]))
701     return -1;
702    
703     alps_update_device_area_ss4_v2(otp, priv);
704     diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
705     index dbfd26073e1a..793123717145 100644
706     --- a/drivers/input/mouse/alps.h
707     +++ b/drivers/input/mouse/alps.h
708     @@ -91,6 +91,10 @@ enum SS4_PACKET_ID {
709     ((_b[1 + _i * 3] << 5) & 0x1F00) \
710     )
711    
712     +#define SS4_PLUS_STD_MF_X_V2(_b, _i) (((_b[0 + (_i) * 3] << 4) & 0x0070) | \
713     + ((_b[1 + (_i) * 3] << 4) & 0x0F80) \
714     + )
715     +
716     #define SS4_STD_MF_Y_V2(_b, _i) (((_b[1 + (_i) * 3] << 3) & 0x0010) | \
717     ((_b[2 + (_i) * 3] << 5) & 0x01E0) | \
718     ((_b[2 + (_i) * 3] << 4) & 0x0E00) \
719     @@ -100,6 +104,10 @@ enum SS4_PACKET_ID {
720     ((_b[0 + (_i) * 3] >> 3) & 0x0010) \
721     )
722    
723     +#define SS4_PLUS_BTL_MF_X_V2(_b, _i) (SS4_PLUS_STD_MF_X_V2(_b, _i) | \
724     + ((_b[0 + (_i) * 3] >> 4) & 0x0008) \
725     + )
726     +
727     #define SS4_BTL_MF_Y_V2(_b, _i) (SS4_STD_MF_Y_V2(_b, _i) | \
728     ((_b[0 + (_i) * 3] >> 3) & 0x0008) \
729     )
730     diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
731     index 98d4e515587a..681dce15fbc8 100644
732     --- a/drivers/input/mouse/elan_i2c_core.c
733     +++ b/drivers/input/mouse/elan_i2c_core.c
734     @@ -1234,6 +1234,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
735     { "ELAN0000", 0 },
736     { "ELAN0100", 0 },
737     { "ELAN0600", 0 },
738     + { "ELAN0602", 0 },
739     { "ELAN0605", 0 },
740     { "ELAN0608", 0 },
741     { "ELAN0605", 0 },
742     diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
743     index 354d47ecd66a..ce6ff9b301bb 100644
744     --- a/drivers/input/mouse/trackpoint.c
745     +++ b/drivers/input/mouse/trackpoint.c
746     @@ -265,7 +265,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, unsigned char *fir
747     if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
748     return -1;
749    
750     - if (param[0] != TP_MAGIC_IDENT)
751     + /* add new TP ID. */
752     + if (!(param[0] & TP_MAGIC_IDENT))
753     return -1;
754    
755     if (firmware_id)
756     diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
757     index 5617ed3a7d7a..88055755f82e 100644
758     --- a/drivers/input/mouse/trackpoint.h
759     +++ b/drivers/input/mouse/trackpoint.h
760     @@ -21,8 +21,9 @@
761     #define TP_COMMAND 0xE2 /* Commands start with this */
762    
763     #define TP_READ_ID 0xE1 /* Sent for device identification */
764     -#define TP_MAGIC_IDENT 0x01 /* Sent after a TP_READ_ID followed */
765     +#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */
766     /* by the firmware ID */
767     + /* Firmware ID includes 0x1, 0x2, 0x3 */
768    
769    
770     /*
771     diff --git a/drivers/leds/trigger/ledtrig-heartbeat.c b/drivers/leds/trigger/ledtrig-heartbeat.c
772     index c9f386213e9e..410c39c62dc7 100644
773     --- a/drivers/leds/trigger/ledtrig-heartbeat.c
774     +++ b/drivers/leds/trigger/ledtrig-heartbeat.c
775     @@ -19,7 +19,6 @@
776     #include <linux/sched.h>
777     #include <linux/leds.h>
778     #include <linux/reboot.h>
779     -#include <linux/suspend.h>
780     #include "../leds.h"
781    
782     static int panic_heartbeats;
783     @@ -155,30 +154,6 @@ static struct led_trigger heartbeat_led_trigger = {
784     .deactivate = heartbeat_trig_deactivate,
785     };
786    
787     -static int heartbeat_pm_notifier(struct notifier_block *nb,
788     - unsigned long pm_event, void *unused)
789     -{
790     - int rc;
791     -
792     - switch (pm_event) {
793     - case PM_SUSPEND_PREPARE:
794     - case PM_HIBERNATION_PREPARE:
795     - case PM_RESTORE_PREPARE:
796     - led_trigger_unregister(&heartbeat_led_trigger);
797     - break;
798     - case PM_POST_SUSPEND:
799     - case PM_POST_HIBERNATION:
800     - case PM_POST_RESTORE:
801     - rc = led_trigger_register(&heartbeat_led_trigger);
802     - if (rc)
803     - pr_err("could not re-register heartbeat trigger\n");
804     - break;
805     - default:
806     - break;
807     - }
808     - return NOTIFY_DONE;
809     -}
810     -
811     static int heartbeat_reboot_notifier(struct notifier_block *nb,
812     unsigned long code, void *unused)
813     {
814     @@ -193,10 +168,6 @@ static int heartbeat_panic_notifier(struct notifier_block *nb,
815     return NOTIFY_DONE;
816     }
817    
818     -static struct notifier_block heartbeat_pm_nb = {
819     - .notifier_call = heartbeat_pm_notifier,
820     -};
821     -
822     static struct notifier_block heartbeat_reboot_nb = {
823     .notifier_call = heartbeat_reboot_notifier,
824     };
825     @@ -213,14 +184,12 @@ static int __init heartbeat_trig_init(void)
826     atomic_notifier_chain_register(&panic_notifier_list,
827     &heartbeat_panic_nb);
828     register_reboot_notifier(&heartbeat_reboot_nb);
829     - register_pm_notifier(&heartbeat_pm_nb);
830     }
831     return rc;
832     }
833    
834     static void __exit heartbeat_trig_exit(void)
835     {
836     - unregister_pm_notifier(&heartbeat_pm_nb);
837     unregister_reboot_notifier(&heartbeat_reboot_nb);
838     atomic_notifier_chain_unregister(&panic_notifier_list,
839     &heartbeat_panic_nb);
840     diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
841     index 551786f58e59..ba652d8a2b93 100644
842     --- a/drivers/net/ethernet/mellanox/mlx4/main.c
843     +++ b/drivers/net/ethernet/mellanox/mlx4/main.c
844     @@ -430,7 +430,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
845     /* Virtual PCI function needs to determine UAR page size from
846     * firmware. Only master PCI function can set the uar page size
847     */
848     - if (enable_4k_uar)
849     + if (enable_4k_uar || !dev->persist->num_vfs)
850     dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
851     else
852     dev->uar_page_shift = PAGE_SHIFT;
853     @@ -2269,7 +2269,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
854    
855     dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
856    
857     - if (enable_4k_uar) {
858     + if (enable_4k_uar || !dev->persist->num_vfs) {
859     init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
860     PAGE_SHIFT - DEFAULT_UAR_PAGE_SHIFT;
861     init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
862     diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
863     index aee3fd2b6538..4ca82bd8c4f0 100644
864     --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
865     +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
866     @@ -871,8 +871,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
867     return NETDEV_TX_OK;
868    
869     err_unmap:
870     - --f;
871     - while (f >= 0) {
872     + while (--f >= 0) {
873     frag = &skb_shinfo(skb)->frags[f];
874     dma_unmap_page(&nn->pdev->dev,
875     tx_ring->txbufs[wr_idx].dma_addr,
876     diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
877     index c234ee43b6ef..24222a5d8df2 100644
878     --- a/drivers/ntb/ntb_transport.c
879     +++ b/drivers/ntb/ntb_transport.c
880     @@ -176,14 +176,12 @@ struct ntb_transport_qp {
881     u64 rx_err_ver;
882     u64 rx_memcpy;
883     u64 rx_async;
884     - u64 dma_rx_prep_err;
885     u64 tx_bytes;
886     u64 tx_pkts;
887     u64 tx_ring_full;
888     u64 tx_err_no_buf;
889     u64 tx_memcpy;
890     u64 tx_async;
891     - u64 dma_tx_prep_err;
892     };
893    
894     struct ntb_transport_mw {
895     @@ -256,8 +254,6 @@ enum {
896     #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
897     #define NTB_QP_DEF_NUM_ENTRIES 100
898     #define NTB_LINK_DOWN_TIMEOUT 10
899     -#define DMA_RETRIES 20
900     -#define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50)
901    
902     static void ntb_transport_rxc_db(unsigned long data);
903     static const struct ntb_ctx_ops ntb_transport_ops;
904     @@ -518,12 +514,6 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
905     out_offset += snprintf(buf + out_offset, out_count - out_offset,
906     "free tx - \t%u\n",
907     ntb_transport_tx_free_entry(qp));
908     - out_offset += snprintf(buf + out_offset, out_count - out_offset,
909     - "DMA tx prep err - \t%llu\n",
910     - qp->dma_tx_prep_err);
911     - out_offset += snprintf(buf + out_offset, out_count - out_offset,
912     - "DMA rx prep err - \t%llu\n",
913     - qp->dma_rx_prep_err);
914    
915     out_offset += snprintf(buf + out_offset, out_count - out_offset,
916     "\n");
917     @@ -625,7 +615,7 @@ static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
918     if (!mw->virt_addr)
919     return -ENOMEM;
920    
921     - if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
922     + if (mw_num < qp_count % mw_count)
923     num_qps_mw = qp_count / mw_count + 1;
924     else
925     num_qps_mw = qp_count / mw_count;
926     @@ -770,8 +760,6 @@ static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
927     qp->tx_err_no_buf = 0;
928     qp->tx_memcpy = 0;
929     qp->tx_async = 0;
930     - qp->dma_tx_prep_err = 0;
931     - qp->dma_rx_prep_err = 0;
932     }
933    
934     static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
935     @@ -933,10 +921,8 @@ static void ntb_transport_link_work(struct work_struct *work)
936     ntb_free_mw(nt, i);
937    
938     /* if there's an actual failure, we should just bail */
939     - if (rc < 0) {
940     - ntb_link_disable(ndev);
941     + if (rc < 0)
942     return;
943     - }
944    
945     out:
946     if (ntb_link_is_up(ndev, NULL, NULL) == 1)
947     @@ -1002,7 +988,7 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
948     qp->event_handler = NULL;
949     ntb_qp_link_down_reset(qp);
950    
951     - if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
952     + if (mw_num < qp_count % mw_count)
953     num_qps_mw = qp_count / mw_count + 1;
954     else
955     num_qps_mw = qp_count / mw_count;
956     @@ -1125,8 +1111,8 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
957     qp_count = ilog2(qp_bitmap);
958     if (max_num_clients && max_num_clients < qp_count)
959     qp_count = max_num_clients;
960     - else if (mw_count < qp_count)
961     - qp_count = mw_count;
962     + else if (nt->mw_count < qp_count)
963     + qp_count = nt->mw_count;
964    
965     qp_bitmap &= BIT_ULL(qp_count) - 1;
966    
967     @@ -1314,7 +1300,6 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
968     struct dmaengine_unmap_data *unmap;
969     dma_cookie_t cookie;
970     void *buf = entry->buf;
971     - int retries = 0;
972    
973     len = entry->len;
974     device = chan->device;
975     @@ -1343,22 +1328,11 @@ static int ntb_async_rx_submit(struct ntb_queue_entry *entry, void *offset)
976    
977     unmap->from_cnt = 1;
978    
979     - for (retries = 0; retries < DMA_RETRIES; retries++) {
980     - txd = device->device_prep_dma_memcpy(chan,
981     - unmap->addr[1],
982     - unmap->addr[0], len,
983     - DMA_PREP_INTERRUPT);
984     - if (txd)
985     - break;
986     -
987     - set_current_state(TASK_INTERRUPTIBLE);
988     - schedule_timeout(DMA_OUT_RESOURCE_TO);
989     - }
990     -
991     - if (!txd) {
992     - qp->dma_rx_prep_err++;
993     + txd = device->device_prep_dma_memcpy(chan, unmap->addr[1],
994     + unmap->addr[0], len,
995     + DMA_PREP_INTERRUPT);
996     + if (!txd)
997     goto err_get_unmap;
998     - }
999    
1000     txd->callback_result = ntb_rx_copy_callback;
1001     txd->callback_param = entry;
1002     @@ -1603,7 +1577,6 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
1003     struct dmaengine_unmap_data *unmap;
1004     dma_addr_t dest;
1005     dma_cookie_t cookie;
1006     - int retries = 0;
1007    
1008     device = chan->device;
1009     dest = qp->tx_mw_phys + qp->tx_max_frame * entry->tx_index;
1010     @@ -1625,21 +1598,10 @@ static int ntb_async_tx_submit(struct ntb_transport_qp *qp,
1011    
1012     unmap->to_cnt = 1;
1013    
1014     - for (retries = 0; retries < DMA_RETRIES; retries++) {
1015     - txd = device->device_prep_dma_memcpy(chan, dest,
1016     - unmap->addr[0], len,
1017     - DMA_PREP_INTERRUPT);
1018     - if (txd)
1019     - break;
1020     -
1021     - set_current_state(TASK_INTERRUPTIBLE);
1022     - schedule_timeout(DMA_OUT_RESOURCE_TO);
1023     - }
1024     -
1025     - if (!txd) {
1026     - qp->dma_tx_prep_err++;
1027     + txd = device->device_prep_dma_memcpy(chan, dest, unmap->addr[0], len,
1028     + DMA_PREP_INTERRUPT);
1029     + if (!txd)
1030     goto err_get_unmap;
1031     - }
1032    
1033     txd->callback_result = ntb_tx_copy_callback;
1034     txd->callback_param = entry;
1035     diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
1036     index b432153a6c5a..0f63a36a519e 100644
1037     --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
1038     +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
1039     @@ -45,6 +45,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
1040     {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
1041     {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
1042     {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
1043     + {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
1044     {} /* Terminating entry */
1045     };
1046    
1047     diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
1048     index 789ff1df2d8d..581712534c93 100644
1049     --- a/fs/cifs/dir.c
1050     +++ b/fs/cifs/dir.c
1051     @@ -183,15 +183,20 @@ build_path_from_dentry(struct dentry *direntry)
1052     }
1053    
1054     /*
1055     + * Don't allow path components longer than the server max.
1056     * Don't allow the separator character in a path component.
1057     * The VFS will not allow "/", but "\" is allowed by posix.
1058     */
1059     static int
1060     -check_name(struct dentry *direntry)
1061     +check_name(struct dentry *direntry, struct cifs_tcon *tcon)
1062     {
1063     struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
1064     int i;
1065    
1066     + if (unlikely(direntry->d_name.len >
1067     + tcon->fsAttrInfo.MaxPathNameComponentLength))
1068     + return -ENAMETOOLONG;
1069     +
1070     if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
1071     for (i = 0; i < direntry->d_name.len; i++) {
1072     if (direntry->d_name.name[i] == '\\') {
1073     @@ -489,10 +494,6 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
1074     return finish_no_open(file, res);
1075     }
1076    
1077     - rc = check_name(direntry);
1078     - if (rc)
1079     - return rc;
1080     -
1081     xid = get_xid();
1082    
1083     cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
1084     @@ -505,6 +506,11 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
1085     }
1086    
1087     tcon = tlink_tcon(tlink);
1088     +
1089     + rc = check_name(direntry, tcon);
1090     + if (rc)
1091     + goto out_free_xid;
1092     +
1093     server = tcon->ses->server;
1094    
1095     if (server->ops->new_lease_key)
1096     @@ -765,7 +771,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
1097     }
1098     pTcon = tlink_tcon(tlink);
1099    
1100     - rc = check_name(direntry);
1101     + rc = check_name(direntry, pTcon);
1102     if (rc)
1103     goto lookup_out;
1104    
1105     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
1106     index 7c1c6c39d582..0437e5fdba56 100644
1107     --- a/fs/cifs/smb2pdu.c
1108     +++ b/fs/cifs/smb2pdu.c
1109     @@ -2930,8 +2930,8 @@ copy_fs_info_to_kstatfs(struct smb2_fs_full_size_info *pfs_inf,
1110     kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
1111     le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
1112     kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
1113     - kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
1114     - kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
1115     + kst->f_bfree = kst->f_bavail =
1116     + le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
1117     return;
1118     }
1119    
1120     diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
1121     index 4e7a56a0a9b6..2c4f7a22e128 100644
1122     --- a/fs/nfsd/nfs4xdr.c
1123     +++ b/fs/nfsd/nfs4xdr.c
1124     @@ -129,7 +129,7 @@ static void next_decode_page(struct nfsd4_compoundargs *argp)
1125     argp->p = page_address(argp->pagelist[0]);
1126     argp->pagelist++;
1127     if (argp->pagelen < PAGE_SIZE) {
1128     - argp->end = argp->p + (argp->pagelen>>2);
1129     + argp->end = argp->p + XDR_QUADLEN(argp->pagelen);
1130     argp->pagelen = 0;
1131     } else {
1132     argp->end = argp->p + (PAGE_SIZE>>2);
1133     @@ -1246,9 +1246,7 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write)
1134     argp->pagelen -= pages * PAGE_SIZE;
1135     len -= pages * PAGE_SIZE;
1136    
1137     - argp->p = (__be32 *)page_address(argp->pagelist[0]);
1138     - argp->pagelist++;
1139     - argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE);
1140     + next_decode_page(argp);
1141     }
1142     argp->p += XDR_QUADLEN(len);
1143    
1144     diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
1145     index 31e1d639abed..dc81e5287ebf 100644
1146     --- a/include/asm-generic/vmlinux.lds.h
1147     +++ b/include/asm-generic/vmlinux.lds.h
1148     @@ -59,6 +59,22 @@
1149     /* Align . to a 8 byte boundary equals to maximum function alignment. */
1150     #define ALIGN_FUNCTION() . = ALIGN(8)
1151    
1152     +/*
1153     + * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections, which
1154     + * generates .data.identifier sections, which need to be pulled in with
1155     + * .data. We don't want to pull in .data..other sections, which Linux
1156     + * has defined. Same for text and bss.
1157     + */
1158     +#ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
1159     +#define TEXT_MAIN .text .text.[0-9a-zA-Z_]*
1160     +#define DATA_MAIN .data .data.[0-9a-zA-Z_]*
1161     +#define BSS_MAIN .bss .bss.[0-9a-zA-Z_]*
1162     +#else
1163     +#define TEXT_MAIN .text
1164     +#define DATA_MAIN .data
1165     +#define BSS_MAIN .bss
1166     +#endif
1167     +
1168     /*
1169     * Align to a 32 byte boundary equal to the
1170     * alignment gcc 4.5 uses for a struct
1171     @@ -198,12 +214,9 @@
1172    
1173     /*
1174     * .data section
1175     - * LD_DEAD_CODE_DATA_ELIMINATION option enables -fdata-sections generates
1176     - * .data.identifier which needs to be pulled in with .data, but don't want to
1177     - * pull in .data..stuff which has its own requirements. Same for bss.
1178     */
1179     #define DATA_DATA \
1180     - *(.data .data.[0-9a-zA-Z_]*) \
1181     + *(DATA_MAIN) \
1182     *(.ref.data) \
1183     *(.data..shared_aligned) /* percpu related */ \
1184     MEM_KEEP(init.data) \
1185     @@ -436,16 +449,17 @@
1186     VMLINUX_SYMBOL(__security_initcall_end) = .; \
1187     }
1188    
1189     -/* .text section. Map to function alignment to avoid address changes
1190     +/*
1191     + * .text section. Map to function alignment to avoid address changes
1192     * during second ld run in second ld pass when generating System.map
1193     - * LD_DEAD_CODE_DATA_ELIMINATION option enables -ffunction-sections generates
1194     - * .text.identifier which needs to be pulled in with .text , but some
1195     - * architectures define .text.foo which is not intended to be pulled in here.
1196     - * Those enabling LD_DEAD_CODE_DATA_ELIMINATION must ensure they don't have
1197     - * conflicting section names, and must pull in .text.[0-9a-zA-Z_]* */
1198     + *
1199     + * TEXT_MAIN here will match .text.fixup and .text.unlikely if dead
1200     + * code elimination is enabled, so these sections should be converted
1201     + * to use ".." first.
1202     + */
1203     #define TEXT_TEXT \
1204     ALIGN_FUNCTION(); \
1205     - *(.text.hot .text .text.fixup .text.unlikely) \
1206     + *(.text.hot TEXT_MAIN .text.fixup .text.unlikely) \
1207     *(.ref.text) \
1208     MEM_KEEP(init.text) \
1209     MEM_KEEP(exit.text) \
1210     @@ -613,7 +627,7 @@
1211     BSS_FIRST_SECTIONS \
1212     *(.bss..page_aligned) \
1213     *(.dynbss) \
1214     - *(.bss .bss.[0-9a-zA-Z_]*) \
1215     + *(BSS_MAIN) \
1216     *(COMMON) \
1217     }
1218    
1219     diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
1220     index a13b031dc6b8..3101141661a1 100644
1221     --- a/include/linux/bpf_verifier.h
1222     +++ b/include/linux/bpf_verifier.h
1223     @@ -40,6 +40,7 @@ struct bpf_reg_state {
1224     */
1225     s64 min_value;
1226     u64 max_value;
1227     + bool value_from_signed;
1228     };
1229    
1230     enum bpf_stack_slot_type {
1231     diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
1232     index ba1cad7b97cf..965cc5693a46 100644
1233     --- a/include/linux/cpuhotplug.h
1234     +++ b/include/linux/cpuhotplug.h
1235     @@ -10,7 +10,6 @@ enum cpuhp_state {
1236     CPUHP_PERF_X86_PREPARE,
1237     CPUHP_PERF_X86_UNCORE_PREP,
1238     CPUHP_PERF_X86_AMD_UNCORE_PREP,
1239     - CPUHP_PERF_X86_RAPL_PREP,
1240     CPUHP_PERF_BFIN,
1241     CPUHP_PERF_POWER,
1242     CPUHP_PERF_SUPERH,
1243     diff --git a/include/linux/fs.h b/include/linux/fs.h
1244     index 2f63d44368bd..dd88ded27fc8 100644
1245     --- a/include/linux/fs.h
1246     +++ b/include/linux/fs.h
1247     @@ -941,9 +941,9 @@ static inline struct file *get_file(struct file *f)
1248     /* Page cache limit. The filesystems should put that into their s_maxbytes
1249     limits, otherwise bad things can happen in VM. */
1250     #if BITS_PER_LONG==32
1251     -#define MAX_LFS_FILESIZE (((loff_t)PAGE_SIZE << (BITS_PER_LONG-1))-1)
1252     +#define MAX_LFS_FILESIZE ((loff_t)ULONG_MAX << PAGE_SHIFT)
1253     #elif BITS_PER_LONG==64
1254     -#define MAX_LFS_FILESIZE ((loff_t)0x7fffffffffffffffLL)
1255     +#define MAX_LFS_FILESIZE ((loff_t)LLONG_MAX)
1256     #endif
1257    
1258     #define FL_POSIX 1
1259     diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
1260     index 6c70444da3b9..b83507c0640c 100644
1261     --- a/include/linux/ptr_ring.h
1262     +++ b/include/linux/ptr_ring.h
1263     @@ -340,9 +340,9 @@ static inline void *ptr_ring_consume_bh(struct ptr_ring *r)
1264     __PTR_RING_PEEK_CALL_v; \
1265     })
1266    
1267     -static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp)
1268     +static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
1269     {
1270     - return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp);
1271     + return kcalloc(size, sizeof(void *), gfp);
1272     }
1273    
1274     static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
1275     @@ -417,7 +417,8 @@ static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp,
1276     * In particular if you consume ring in interrupt or BH context, you must
1277     * disable interrupts/BH when doing so.
1278     */
1279     -static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
1280     +static inline int ptr_ring_resize_multiple(struct ptr_ring **rings,
1281     + unsigned int nrings,
1282     int size,
1283     gfp_t gfp, void (*destroy)(void *))
1284     {
1285     @@ -425,7 +426,7 @@ static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, int nrings,
1286     void ***queues;
1287     int i;
1288    
1289     - queues = kmalloc(nrings * sizeof *queues, gfp);
1290     + queues = kmalloc_array(nrings, sizeof(*queues), gfp);
1291     if (!queues)
1292     goto noqueues;
1293    
1294     diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h
1295     index f4dfade428f0..be8b902b5845 100644
1296     --- a/include/linux/skb_array.h
1297     +++ b/include/linux/skb_array.h
1298     @@ -162,7 +162,8 @@ static inline int skb_array_resize(struct skb_array *a, int size, gfp_t gfp)
1299     }
1300    
1301     static inline int skb_array_resize_multiple(struct skb_array **rings,
1302     - int nrings, int size, gfp_t gfp)
1303     + int nrings, unsigned int size,
1304     + gfp_t gfp)
1305     {
1306     BUILD_BUG_ON(offsetof(struct skb_array, ring));
1307     return ptr_ring_resize_multiple((struct ptr_ring **)rings,
1308     diff --git a/include/net/ip.h b/include/net/ip.h
1309     index d3a107850a41..51c6b9786c46 100644
1310     --- a/include/net/ip.h
1311     +++ b/include/net/ip.h
1312     @@ -339,7 +339,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
1313     !forwarding)
1314     return dst_mtu(dst);
1315    
1316     - return min(dst->dev->mtu, IP_MAX_MTU);
1317     + return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
1318     }
1319    
1320     static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
1321     @@ -351,7 +351,7 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
1322     return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
1323     }
1324    
1325     - return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
1326     + return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
1327     }
1328    
1329     u32 ip_idents_reserve(u32 hash, int segs);
1330     diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
1331     index e6aa0a249672..f18fc1a0321f 100644
1332     --- a/include/net/sch_generic.h
1333     +++ b/include/net/sch_generic.h
1334     @@ -768,8 +768,11 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
1335     old = *pold;
1336     *pold = new;
1337     if (old != NULL) {
1338     - qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
1339     + unsigned int qlen = old->q.qlen;
1340     + unsigned int backlog = old->qstats.backlog;
1341     +
1342     qdisc_reset(old);
1343     + qdisc_tree_reduce_backlog(old, qlen, backlog);
1344     }
1345     sch_tree_unlock(sch);
1346    
1347     diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
1348     index 8ce679d36c58..779c871c5dcd 100644
1349     --- a/kernel/bpf/verifier.c
1350     +++ b/kernel/bpf/verifier.c
1351     @@ -139,7 +139,7 @@ struct bpf_verifier_stack_elem {
1352     struct bpf_verifier_stack_elem *next;
1353     };
1354    
1355     -#define BPF_COMPLEXITY_LIMIT_INSNS 65536
1356     +#define BPF_COMPLEXITY_LIMIT_INSNS 98304
1357     #define BPF_COMPLEXITY_LIMIT_STACK 1024
1358    
1359     struct bpf_call_arg_meta {
1360     @@ -682,12 +682,13 @@ static int check_ctx_access(struct bpf_verifier_env *env, int off, int size,
1361     return -EACCES;
1362     }
1363    
1364     -static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
1365     +static bool __is_pointer_value(bool allow_ptr_leaks,
1366     + const struct bpf_reg_state *reg)
1367     {
1368     - if (env->allow_ptr_leaks)
1369     + if (allow_ptr_leaks)
1370     return false;
1371    
1372     - switch (env->cur_state.regs[regno].type) {
1373     + switch (reg->type) {
1374     case UNKNOWN_VALUE:
1375     case CONST_IMM:
1376     return false;
1377     @@ -696,6 +697,11 @@ static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
1378     }
1379     }
1380    
1381     +static bool is_pointer_value(struct bpf_verifier_env *env, int regno)
1382     +{
1383     + return __is_pointer_value(env->allow_ptr_leaks, &env->cur_state.regs[regno]);
1384     +}
1385     +
1386     static int check_ptr_alignment(struct bpf_verifier_env *env,
1387     struct bpf_reg_state *reg, int off, int size)
1388     {
1389     @@ -1467,6 +1473,65 @@ static int evaluate_reg_alu(struct bpf_verifier_env *env, struct bpf_insn *insn)
1390     return 0;
1391     }
1392    
1393     +static int evaluate_reg_imm_alu_unknown(struct bpf_verifier_env *env,
1394     + struct bpf_insn *insn)
1395     +{
1396     + struct bpf_reg_state *regs = env->cur_state.regs;
1397     + struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
1398     + struct bpf_reg_state *src_reg = &regs[insn->src_reg];
1399     + u8 opcode = BPF_OP(insn->code);
1400     + s64 imm_log2 = __ilog2_u64((long long)dst_reg->imm);
1401     +
1402     + /* BPF_X code with src_reg->type UNKNOWN_VALUE here. */
1403     + if (src_reg->imm > 0 && dst_reg->imm) {
1404     + switch (opcode) {
1405     + case BPF_ADD:
1406     + /* dreg += sreg
1407     + * where both have zero upper bits. Adding them
1408     + * can only result making one more bit non-zero
1409     + * in the larger value.
1410     + * Ex. 0xffff (imm=48) + 1 (imm=63) = 0x10000 (imm=47)
1411     + * 0xffff (imm=48) + 0xffff = 0x1fffe (imm=47)
1412     + */
1413     + dst_reg->imm = min(src_reg->imm, 63 - imm_log2);
1414     + dst_reg->imm--;
1415     + break;
1416     + case BPF_AND:
1417     + /* dreg &= sreg
1418     + * AND can not extend zero bits only shrink
1419     + * Ex. 0x00..00ffffff
1420     + * & 0x0f..ffffffff
1421     + * ----------------
1422     + * 0x00..00ffffff
1423     + */
1424     + dst_reg->imm = max(src_reg->imm, 63 - imm_log2);
1425     + break;
1426     + case BPF_OR:
1427     + /* dreg |= sreg
1428     + * OR can only extend zero bits
1429     + * Ex. 0x00..00ffffff
1430     + * | 0x0f..ffffffff
1431     + * ----------------
1432     + * 0x0f..00ffffff
1433     + */
1434     + dst_reg->imm = min(src_reg->imm, 63 - imm_log2);
1435     + break;
1436     + case BPF_SUB:
1437     + case BPF_MUL:
1438     + case BPF_RSH:
1439     + case BPF_LSH:
1440     + /* These may be flushed out later */
1441     + default:
1442     + mark_reg_unknown_value(regs, insn->dst_reg);
1443     + }
1444     + } else {
1445     + mark_reg_unknown_value(regs, insn->dst_reg);
1446     + }
1447     +
1448     + dst_reg->type = UNKNOWN_VALUE;
1449     + return 0;
1450     +}
1451     +
1452     static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
1453     struct bpf_insn *insn)
1454     {
1455     @@ -1475,6 +1540,9 @@ static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
1456     struct bpf_reg_state *src_reg = &regs[insn->src_reg];
1457     u8 opcode = BPF_OP(insn->code);
1458    
1459     + if (BPF_SRC(insn->code) == BPF_X && src_reg->type == UNKNOWN_VALUE)
1460     + return evaluate_reg_imm_alu_unknown(env, insn);
1461     +
1462     /* dst_reg->type == CONST_IMM here, simulate execution of 'add' insn.
1463     * Don't care about overflow or negative values, just add them
1464     */
1465     @@ -1530,10 +1598,24 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1466     }
1467    
1468     /* We don't know anything about what was done to this register, mark it
1469     - * as unknown.
1470     + * as unknown. Also, if both derived bounds came from signed/unsigned
1471     + * mixed compares and one side is unbounded, we cannot really do anything
1472     + * with them as boundaries cannot be trusted. Thus, arithmetic of two
1473     + * regs of such kind will get invalidated bounds on the dst side.
1474     */
1475     - if (min_val == BPF_REGISTER_MIN_RANGE &&
1476     - max_val == BPF_REGISTER_MAX_RANGE) {
1477     + if ((min_val == BPF_REGISTER_MIN_RANGE &&
1478     + max_val == BPF_REGISTER_MAX_RANGE) ||
1479     + (BPF_SRC(insn->code) == BPF_X &&
1480     + ((min_val != BPF_REGISTER_MIN_RANGE &&
1481     + max_val == BPF_REGISTER_MAX_RANGE) ||
1482     + (min_val == BPF_REGISTER_MIN_RANGE &&
1483     + max_val != BPF_REGISTER_MAX_RANGE) ||
1484     + (dst_reg->min_value != BPF_REGISTER_MIN_RANGE &&
1485     + dst_reg->max_value == BPF_REGISTER_MAX_RANGE) ||
1486     + (dst_reg->min_value == BPF_REGISTER_MIN_RANGE &&
1487     + dst_reg->max_value != BPF_REGISTER_MAX_RANGE)) &&
1488     + regs[insn->dst_reg].value_from_signed !=
1489     + regs[insn->src_reg].value_from_signed)) {
1490     reset_reg_range_values(regs, insn->dst_reg);
1491     return;
1492     }
1493     @@ -1542,10 +1624,12 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1494     * do our normal operations to the register, we need to set the values
1495     * to the min/max since they are undefined.
1496     */
1497     - if (min_val == BPF_REGISTER_MIN_RANGE)
1498     - dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1499     - if (max_val == BPF_REGISTER_MAX_RANGE)
1500     - dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
1501     + if (opcode != BPF_SUB) {
1502     + if (min_val == BPF_REGISTER_MIN_RANGE)
1503     + dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1504     + if (max_val == BPF_REGISTER_MAX_RANGE)
1505     + dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
1506     + }
1507    
1508     switch (opcode) {
1509     case BPF_ADD:
1510     @@ -1555,10 +1639,17 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
1511     dst_reg->max_value += max_val;
1512     break;
1513     case BPF_SUB:
1514     + /* If one of our values was at the end of our ranges, then the
1515     + * _opposite_ value in the dst_reg goes to the end of our range.
1516     + */
1517     + if (min_val == BPF_REGISTER_MIN_RANGE)
1518     + dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
1519     + if (max_val == BPF_REGISTER_MAX_RANGE)
1520     + dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
1521     if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1522     - dst_reg->min_value -= min_val;
1523     + dst_reg->min_value -= max_val;
1524     if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
1525     - dst_reg->max_value -= max_val;
1526     + dst_reg->max_value -= min_val;
1527     break;
1528     case BPF_MUL:
1529     if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
1530     @@ -1808,6 +1899,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
1531     * register as unknown.
1532     */
1533     if (env->allow_ptr_leaks &&
1534     + BPF_CLASS(insn->code) == BPF_ALU64 && opcode == BPF_ADD &&
1535     (dst_reg->type == PTR_TO_MAP_VALUE ||
1536     dst_reg->type == PTR_TO_MAP_VALUE_ADJ))
1537     dst_reg->type = PTR_TO_MAP_VALUE_ADJ;
1538     @@ -1876,38 +1968,63 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
1539     struct bpf_reg_state *false_reg, u64 val,
1540     u8 opcode)
1541     {
1542     + bool value_from_signed = true;
1543     + bool is_range = true;
1544     +
1545     switch (opcode) {
1546     case BPF_JEQ:
1547     /* If this is false then we know nothing Jon Snow, but if it is
1548     * true then we know for sure.
1549     */
1550     true_reg->max_value = true_reg->min_value = val;
1551     + is_range = false;
1552     break;
1553     case BPF_JNE:
1554     /* If this is true we know nothing Jon Snow, but if it is false
1555     * we know the value for sure;
1556     */
1557     false_reg->max_value = false_reg->min_value = val;
1558     + is_range = false;
1559     break;
1560     case BPF_JGT:
1561     - /* Unsigned comparison, the minimum value is 0. */
1562     - false_reg->min_value = 0;
1563     + value_from_signed = false;
1564     + /* fallthrough */
1565     case BPF_JSGT:
1566     + if (true_reg->value_from_signed != value_from_signed)
1567     + reset_reg_range_values(true_reg, 0);
1568     + if (false_reg->value_from_signed != value_from_signed)
1569     + reset_reg_range_values(false_reg, 0);
1570     + if (opcode == BPF_JGT) {
1571     + /* Unsigned comparison, the minimum value is 0. */
1572     + false_reg->min_value = 0;
1573     + }
1574     /* If this is false then we know the maximum val is val,
1575     * otherwise we know the min val is val+1.
1576     */
1577     false_reg->max_value = val;
1578     + false_reg->value_from_signed = value_from_signed;
1579     true_reg->min_value = val + 1;
1580     + true_reg->value_from_signed = value_from_signed;
1581     break;
1582     case BPF_JGE:
1583     - /* Unsigned comparison, the minimum value is 0. */
1584     - false_reg->min_value = 0;
1585     + value_from_signed = false;
1586     + /* fallthrough */
1587     case BPF_JSGE:
1588     + if (true_reg->value_from_signed != value_from_signed)
1589     + reset_reg_range_values(true_reg, 0);
1590     + if (false_reg->value_from_signed != value_from_signed)
1591     + reset_reg_range_values(false_reg, 0);
1592     + if (opcode == BPF_JGE) {
1593     + /* Unsigned comparison, the minimum value is 0. */
1594     + false_reg->min_value = 0;
1595     + }
1596     /* If this is false then we know the maximum value is val - 1,
1597     * otherwise we know the mimimum value is val.
1598     */
1599     false_reg->max_value = val - 1;
1600     + false_reg->value_from_signed = value_from_signed;
1601     true_reg->min_value = val;
1602     + true_reg->value_from_signed = value_from_signed;
1603     break;
1604     default:
1605     break;
1606     @@ -1915,6 +2032,12 @@ static void reg_set_min_max(struct bpf_reg_state *true_reg,
1607    
1608     check_reg_overflow(false_reg);
1609     check_reg_overflow(true_reg);
1610     + if (is_range) {
1611     + if (__is_pointer_value(false, false_reg))
1612     + reset_reg_range_values(false_reg, 0);
1613     + if (__is_pointer_value(false, true_reg))
1614     + reset_reg_range_values(true_reg, 0);
1615     + }
1616     }
1617    
1618     /* Same as above, but for the case that dst_reg is a CONST_IMM reg and src_reg
1619     @@ -1924,39 +2047,64 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
1620     struct bpf_reg_state *false_reg, u64 val,
1621     u8 opcode)
1622     {
1623     + bool value_from_signed = true;
1624     + bool is_range = true;
1625     +
1626     switch (opcode) {
1627     case BPF_JEQ:
1628     /* If this is false then we know nothing Jon Snow, but if it is
1629     * true then we know for sure.
1630     */
1631     true_reg->max_value = true_reg->min_value = val;
1632     + is_range = false;
1633     break;
1634     case BPF_JNE:
1635     /* If this is true we know nothing Jon Snow, but if it is false
1636     * we know the value for sure;
1637     */
1638     false_reg->max_value = false_reg->min_value = val;
1639     + is_range = false;
1640     break;
1641     case BPF_JGT:
1642     - /* Unsigned comparison, the minimum value is 0. */
1643     - true_reg->min_value = 0;
1644     + value_from_signed = false;
1645     + /* fallthrough */
1646     case BPF_JSGT:
1647     + if (true_reg->value_from_signed != value_from_signed)
1648     + reset_reg_range_values(true_reg, 0);
1649     + if (false_reg->value_from_signed != value_from_signed)
1650     + reset_reg_range_values(false_reg, 0);
1651     + if (opcode == BPF_JGT) {
1652     + /* Unsigned comparison, the minimum value is 0. */
1653     + true_reg->min_value = 0;
1654     + }
1655     /*
1656     * If this is false, then the val is <= the register, if it is
1657     * true the register <= to the val.
1658     */
1659     false_reg->min_value = val;
1660     + false_reg->value_from_signed = value_from_signed;
1661     true_reg->max_value = val - 1;
1662     + true_reg->value_from_signed = value_from_signed;
1663     break;
1664     case BPF_JGE:
1665     - /* Unsigned comparison, the minimum value is 0. */
1666     - true_reg->min_value = 0;
1667     + value_from_signed = false;
1668     + /* fallthrough */
1669     case BPF_JSGE:
1670     + if (true_reg->value_from_signed != value_from_signed)
1671     + reset_reg_range_values(true_reg, 0);
1672     + if (false_reg->value_from_signed != value_from_signed)
1673     + reset_reg_range_values(false_reg, 0);
1674     + if (opcode == BPF_JGE) {
1675     + /* Unsigned comparison, the minimum value is 0. */
1676     + true_reg->min_value = 0;
1677     + }
1678     /* If this is false then constant < register, if it is true then
1679     * the register < constant.
1680     */
1681     false_reg->min_value = val + 1;
1682     + false_reg->value_from_signed = value_from_signed;
1683     true_reg->max_value = val;
1684     + true_reg->value_from_signed = value_from_signed;
1685     break;
1686     default:
1687     break;
1688     @@ -1964,6 +2112,12 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg,
1689    
1690     check_reg_overflow(false_reg);
1691     check_reg_overflow(true_reg);
1692     + if (is_range) {
1693     + if (__is_pointer_value(false, false_reg))
1694     + reset_reg_range_values(false_reg, 0);
1695     + if (__is_pointer_value(false, true_reg))
1696     + reset_reg_range_values(true_reg, 0);
1697     + }
1698     }
1699    
1700     static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id,
1701     @@ -2390,6 +2544,7 @@ static int check_cfg(struct bpf_verifier_env *env)
1702     env->explored_states[t + 1] = STATE_LIST_MARK;
1703     } else {
1704     /* conditional jump with two edges */
1705     + env->explored_states[t] = STATE_LIST_MARK;
1706     ret = push_insn(t, t + 1, FALLTHROUGH, env);
1707     if (ret == 1)
1708     goto peek_stack;
1709     @@ -2548,6 +2703,12 @@ static bool states_equal(struct bpf_verifier_env *env,
1710     rcur->type != NOT_INIT))
1711     continue;
1712    
1713     + /* Don't care about the reg->id in this case. */
1714     + if (rold->type == PTR_TO_MAP_VALUE_OR_NULL &&
1715     + rcur->type == PTR_TO_MAP_VALUE_OR_NULL &&
1716     + rold->map_ptr == rcur->map_ptr)
1717     + continue;
1718     +
1719     if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET &&
1720     compare_ptrs_to_packet(rold, rcur))
1721     continue;
1722     @@ -2682,6 +2843,9 @@ static int do_check(struct bpf_verifier_env *env)
1723     goto process_bpf_exit;
1724     }
1725    
1726     + if (need_resched())
1727     + cond_resched();
1728     +
1729     if (log_level && do_print_state) {
1730     verbose("\nfrom %d to %d:", prev_insn_idx, insn_idx);
1731     print_verifier_state(&env->cur_state);
1732     diff --git a/kernel/events/core.c b/kernel/events/core.c
1733     index f5a693589d66..c774773ac3a4 100644
1734     --- a/kernel/events/core.c
1735     +++ b/kernel/events/core.c
1736     @@ -9786,28 +9786,27 @@ SYSCALL_DEFINE5(perf_event_open,
1737     goto err_context;
1738    
1739     /*
1740     - * Do not allow to attach to a group in a different
1741     - * task or CPU context:
1742     + * Make sure we're both events for the same CPU;
1743     + * grouping events for different CPUs is broken; since
1744     + * you can never concurrently schedule them anyhow.
1745     */
1746     - if (move_group) {
1747     - /*
1748     - * Make sure we're both on the same task, or both
1749     - * per-cpu events.
1750     - */
1751     - if (group_leader->ctx->task != ctx->task)
1752     - goto err_context;
1753     + if (group_leader->cpu != event->cpu)
1754     + goto err_context;
1755    
1756     - /*
1757     - * Make sure we're both events for the same CPU;
1758     - * grouping events for different CPUs is broken; since
1759     - * you can never concurrently schedule them anyhow.
1760     - */
1761     - if (group_leader->cpu != event->cpu)
1762     - goto err_context;
1763     - } else {
1764     - if (group_leader->ctx != ctx)
1765     - goto err_context;
1766     - }
1767     + /*
1768     + * Make sure we're both on the same task, or both
1769     + * per-CPU events.
1770     + */
1771     + if (group_leader->ctx->task != ctx->task)
1772     + goto err_context;
1773     +
1774     + /*
1775     + * Do not allow to attach to a group in a different task
1776     + * or CPU context. If we're moving SW events, we'll fix
1777     + * this up later, so allow that.
1778     + */
1779     + if (!move_group && group_leader->ctx != ctx)
1780     + goto err_context;
1781    
1782     /*
1783     * Only a group leader can be exclusive or pinned
1784     diff --git a/kernel/fork.c b/kernel/fork.c
1785     index 59faac4de181..50bf262cc427 100644
1786     --- a/kernel/fork.c
1787     +++ b/kernel/fork.c
1788     @@ -766,6 +766,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p,
1789     mm_init_cpumask(mm);
1790     mm_init_aio(mm);
1791     mm_init_owner(mm, p);
1792     + RCU_INIT_POINTER(mm->exe_file, NULL);
1793     mmu_notifier_mm_init(mm);
1794     clear_tlb_flush_pending(mm);
1795     #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
1796     diff --git a/kernel/time/timer.c b/kernel/time/timer.c
1797     index 944ad64277a6..df445cde8a1e 100644
1798     --- a/kernel/time/timer.c
1799     +++ b/kernel/time/timer.c
1800     @@ -201,6 +201,7 @@ struct timer_base {
1801     bool migration_enabled;
1802     bool nohz_active;
1803     bool is_idle;
1804     + bool must_forward_clk;
1805     DECLARE_BITMAP(pending_map, WHEEL_SIZE);
1806     struct hlist_head vectors[WHEEL_SIZE];
1807     } ____cacheline_aligned;
1808     @@ -891,13 +892,19 @@ get_target_base(struct timer_base *base, unsigned tflags)
1809    
1810     static inline void forward_timer_base(struct timer_base *base)
1811     {
1812     - unsigned long jnow = READ_ONCE(jiffies);
1813     + unsigned long jnow;
1814    
1815     /*
1816     - * We only forward the base when it's idle and we have a delta between
1817     - * base clock and jiffies.
1818     + * We only forward the base when we are idle or have just come out of
1819     + * idle (must_forward_clk logic), and have a delta between base clock
1820     + * and jiffies. In the common case, run_timers will take care of it.
1821     */
1822     - if (!base->is_idle || (long) (jnow - base->clk) < 2)
1823     + if (likely(!base->must_forward_clk))
1824     + return;
1825     +
1826     + jnow = READ_ONCE(jiffies);
1827     + base->must_forward_clk = base->is_idle;
1828     + if ((long)(jnow - base->clk) < 2)
1829     return;
1830    
1831     /*
1832     @@ -973,6 +980,11 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
1833     * same array bucket then just return:
1834     */
1835     if (timer_pending(timer)) {
1836     + /*
1837     + * The downside of this optimization is that it can result in
1838     + * larger granularity than you would get from adding a new
1839     + * timer with this expiry.
1840     + */
1841     if (timer->expires == expires)
1842     return 1;
1843    
1844     @@ -983,6 +995,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
1845     * dequeue/enqueue dance.
1846     */
1847     base = lock_timer_base(timer, &flags);
1848     + forward_timer_base(base);
1849    
1850     clk = base->clk;
1851     idx = calc_wheel_index(expires, clk);
1852     @@ -999,6 +1012,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
1853     }
1854     } else {
1855     base = lock_timer_base(timer, &flags);
1856     + forward_timer_base(base);
1857     }
1858    
1859     timer_stats_timer_set_start_info(timer);
1860     @@ -1028,12 +1042,10 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
1861     spin_lock(&base->lock);
1862     WRITE_ONCE(timer->flags,
1863     (timer->flags & ~TIMER_BASEMASK) | base->cpu);
1864     + forward_timer_base(base);
1865     }
1866     }
1867    
1868     - /* Try to forward a stale timer base clock */
1869     - forward_timer_base(base);
1870     -
1871     timer->expires = expires;
1872     /*
1873     * If 'idx' was calculated above and the base time did not advance
1874     @@ -1150,6 +1162,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
1875     WRITE_ONCE(timer->flags,
1876     (timer->flags & ~TIMER_BASEMASK) | cpu);
1877     }
1878     + forward_timer_base(base);
1879    
1880     debug_activate(timer, timer->expires);
1881     internal_add_timer(base, timer);
1882     @@ -1538,10 +1551,16 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
1883     if (!is_max_delta)
1884     expires = basem + (u64)(nextevt - basej) * TICK_NSEC;
1885     /*
1886     - * If we expect to sleep more than a tick, mark the base idle:
1887     + * If we expect to sleep more than a tick, mark the base idle.
1888     + * Also the tick is stopped so any added timer must forward
1889     + * the base clk itself to keep granularity small. This idle
1890     + * logic is only maintained for the BASE_STD base, deferrable
1891     + * timers may still see large granularity skew (by design).
1892     */
1893     - if ((expires - basem) > TICK_NSEC)
1894     + if ((expires - basem) > TICK_NSEC) {
1895     + base->must_forward_clk = true;
1896     base->is_idle = true;
1897     + }
1898     }
1899     spin_unlock(&base->lock);
1900    
1901     @@ -1651,6 +1670,19 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h)
1902     {
1903     struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
1904    
1905     + /*
1906     + * must_forward_clk must be cleared before running timers so that any
1907     + * timer functions that call mod_timer will not try to forward the
1908     + * base. idle trcking / clock forwarding logic is only used with
1909     + * BASE_STD timers.
1910     + *
1911     + * The deferrable base does not do idle tracking at all, so we do
1912     + * not forward it. This can result in very large variations in
1913     + * granularity for deferrable timers, but they can be deferred for
1914     + * long periods due to idle.
1915     + */
1916     + base->must_forward_clk = false;
1917     +
1918     __run_timers(base);
1919     if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
1920     __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
1921     diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
1922     index 5dcb99281259..41805fb3c661 100644
1923     --- a/kernel/trace/bpf_trace.c
1924     +++ b/kernel/trace/bpf_trace.c
1925     @@ -203,10 +203,36 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
1926     fmt_cnt++;
1927     }
1928    
1929     - return __trace_printk(1/* fake ip will not be printed */, fmt,
1930     - mod[0] == 2 ? arg1 : mod[0] == 1 ? (long) arg1 : (u32) arg1,
1931     - mod[1] == 2 ? arg2 : mod[1] == 1 ? (long) arg2 : (u32) arg2,
1932     - mod[2] == 2 ? arg3 : mod[2] == 1 ? (long) arg3 : (u32) arg3);
1933     +/* Horrid workaround for getting va_list handling working with different
1934     + * argument type combinations generically for 32 and 64 bit archs.
1935     + */
1936     +#define __BPF_TP_EMIT() __BPF_ARG3_TP()
1937     +#define __BPF_TP(...) \
1938     + __trace_printk(1 /* Fake ip will not be printed. */, \
1939     + fmt, ##__VA_ARGS__)
1940     +
1941     +#define __BPF_ARG1_TP(...) \
1942     + ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
1943     + ? __BPF_TP(arg1, ##__VA_ARGS__) \
1944     + : ((mod[0] == 1 || (mod[0] == 0 && __BITS_PER_LONG == 32)) \
1945     + ? __BPF_TP((long)arg1, ##__VA_ARGS__) \
1946     + : __BPF_TP((u32)arg1, ##__VA_ARGS__)))
1947     +
1948     +#define __BPF_ARG2_TP(...) \
1949     + ((mod[1] == 2 || (mod[1] == 1 && __BITS_PER_LONG == 64)) \
1950     + ? __BPF_ARG1_TP(arg2, ##__VA_ARGS__) \
1951     + : ((mod[1] == 1 || (mod[1] == 0 && __BITS_PER_LONG == 32)) \
1952     + ? __BPF_ARG1_TP((long)arg2, ##__VA_ARGS__) \
1953     + : __BPF_ARG1_TP((u32)arg2, ##__VA_ARGS__)))
1954     +
1955     +#define __BPF_ARG3_TP(...) \
1956     + ((mod[2] == 2 || (mod[2] == 1 && __BITS_PER_LONG == 64)) \
1957     + ? __BPF_ARG2_TP(arg3, ##__VA_ARGS__) \
1958     + : ((mod[2] == 1 || (mod[2] == 0 && __BITS_PER_LONG == 32)) \
1959     + ? __BPF_ARG2_TP((long)arg3, ##__VA_ARGS__) \
1960     + : __BPF_ARG2_TP((u32)arg3, ##__VA_ARGS__)))
1961     +
1962     + return __BPF_TP_EMIT();
1963     }
1964    
1965     static const struct bpf_func_proto bpf_trace_printk_proto = {
1966     diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
1967     index 4f7ea8446bb5..6e432ed7d0fe 100644
1968     --- a/kernel/trace/ftrace.c
1969     +++ b/kernel/trace/ftrace.c
1970     @@ -876,6 +876,10 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace)
1971    
1972     function_profile_call(trace->func, 0, NULL, NULL);
1973    
1974     + /* If function graph is shutting down, ret_stack can be NULL */
1975     + if (!current->ret_stack)
1976     + return 0;
1977     +
1978     if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
1979     current->ret_stack[index].subtime = 0;
1980    
1981     diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
1982     index 53c308068e39..7379f735a9f4 100644
1983     --- a/kernel/trace/trace.c
1984     +++ b/kernel/trace/trace.c
1985     @@ -7767,4 +7767,4 @@ __init static int clear_boot_tracer(void)
1986     }
1987    
1988     fs_initcall(tracer_init_tracefs);
1989     -late_initcall(clear_boot_tracer);
1990     +late_initcall_sync(clear_boot_tracer);
1991     diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
1992     index 9daa9b3bc6d9..0193f58c45f0 100644
1993     --- a/kernel/trace/trace_events_filter.c
1994     +++ b/kernel/trace/trace_events_filter.c
1995     @@ -1926,6 +1926,10 @@ static int create_filter(struct trace_event_call *call,
1996     if (err && set_str)
1997     append_filter_err(ps, filter);
1998     }
1999     + if (err && !set_str) {
2000     + free_event_filter(filter);
2001     + filter = NULL;
2002     + }
2003     create_filter_finish(ps);
2004    
2005     *filterp = filter;
2006     diff --git a/kernel/trace/tracing_map.c b/kernel/trace/tracing_map.c
2007     index 0a689bbb78ef..305039b122fa 100644
2008     --- a/kernel/trace/tracing_map.c
2009     +++ b/kernel/trace/tracing_map.c
2010     @@ -221,16 +221,19 @@ void tracing_map_array_free(struct tracing_map_array *a)
2011     if (!a)
2012     return;
2013    
2014     - if (!a->pages) {
2015     - kfree(a);
2016     - return;
2017     - }
2018     + if (!a->pages)
2019     + goto free;
2020    
2021     for (i = 0; i < a->n_pages; i++) {
2022     if (!a->pages[i])
2023     break;
2024     free_page((unsigned long)a->pages[i]);
2025     }
2026     +
2027     + kfree(a->pages);
2028     +
2029     + free:
2030     + kfree(a);
2031     }
2032    
2033     struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
2034     diff --git a/mm/madvise.c b/mm/madvise.c
2035     index 253b1533fba5..63a12162f4c6 100644
2036     --- a/mm/madvise.c
2037     +++ b/mm/madvise.c
2038     @@ -331,8 +331,8 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
2039     pte_offset_map_lock(mm, pmd, addr, &ptl);
2040     goto out;
2041     }
2042     - put_page(page);
2043     unlock_page(page);
2044     + put_page(page);
2045     pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
2046     pte--;
2047     addr -= PAGE_SIZE;
2048     diff --git a/mm/memblock.c b/mm/memblock.c
2049     index ccec42c12ba8..42b98af6a415 100644
2050     --- a/mm/memblock.c
2051     +++ b/mm/memblock.c
2052     @@ -311,7 +311,7 @@ void __init memblock_discard(void)
2053     __memblock_free_late(addr, size);
2054     }
2055    
2056     - if (memblock.memory.regions == memblock_memory_init_regions) {
2057     + if (memblock.memory.regions != memblock_memory_init_regions) {
2058     addr = __pa(memblock.memory.regions);
2059     size = PAGE_ALIGN(sizeof(struct memblock_region) *
2060     memblock.memory.max);
2061     diff --git a/mm/shmem.c b/mm/shmem.c
2062     index 7ee5444ffb6d..004e0f87e8a8 100644
2063     --- a/mm/shmem.c
2064     +++ b/mm/shmem.c
2065     @@ -3810,7 +3810,7 @@ int __init shmem_init(void)
2066     }
2067    
2068     #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
2069     - if (has_transparent_hugepage() && shmem_huge < SHMEM_HUGE_DENY)
2070     + if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
2071     SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
2072     else
2073     shmem_huge = 0; /* just in case it was patched */
2074     @@ -3871,7 +3871,7 @@ static ssize_t shmem_enabled_store(struct kobject *kobj,
2075     return -EINVAL;
2076    
2077     shmem_huge = huge;
2078     - if (shmem_huge < SHMEM_HUGE_DENY)
2079     + if (shmem_huge > SHMEM_HUGE_DENY)
2080     SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
2081     return count;
2082     }
2083     diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
2084     index fbf251fef70f..4d6b94d7ce5f 100644
2085     --- a/net/bluetooth/bnep/core.c
2086     +++ b/net/bluetooth/bnep/core.c
2087     @@ -484,16 +484,16 @@ static int bnep_session(void *arg)
2088     struct net_device *dev = s->dev;
2089     struct sock *sk = s->sock->sk;
2090     struct sk_buff *skb;
2091     - wait_queue_t wait;
2092     + DEFINE_WAIT_FUNC(wait, woken_wake_function);
2093    
2094     BT_DBG("");
2095    
2096     set_user_nice(current, -15);
2097    
2098     - init_waitqueue_entry(&wait, current);
2099     add_wait_queue(sk_sleep(sk), &wait);
2100     while (1) {
2101     - set_current_state(TASK_INTERRUPTIBLE);
2102     + /* Ensure session->terminate is updated */
2103     + smp_mb__before_atomic();
2104    
2105     if (atomic_read(&s->terminate))
2106     break;
2107     @@ -515,9 +515,8 @@ static int bnep_session(void *arg)
2108     break;
2109     netif_wake_queue(dev);
2110    
2111     - schedule();
2112     + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
2113     }
2114     - __set_current_state(TASK_RUNNING);
2115     remove_wait_queue(sk_sleep(sk), &wait);
2116    
2117     /* Cleanup session */
2118     @@ -666,7 +665,7 @@ int bnep_del_connection(struct bnep_conndel_req *req)
2119     s = __bnep_get_session(req->dst);
2120     if (s) {
2121     atomic_inc(&s->terminate);
2122     - wake_up_process(s->task);
2123     + wake_up_interruptible(sk_sleep(s->sock->sk));
2124     } else
2125     err = -ENOENT;
2126    
2127     diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
2128     index 9e59b6654126..1152ce34dad4 100644
2129     --- a/net/bluetooth/cmtp/core.c
2130     +++ b/net/bluetooth/cmtp/core.c
2131     @@ -280,16 +280,16 @@ static int cmtp_session(void *arg)
2132     struct cmtp_session *session = arg;
2133     struct sock *sk = session->sock->sk;
2134     struct sk_buff *skb;
2135     - wait_queue_t wait;
2136     + DEFINE_WAIT_FUNC(wait, woken_wake_function);
2137    
2138     BT_DBG("session %p", session);
2139    
2140     set_user_nice(current, -15);
2141    
2142     - init_waitqueue_entry(&wait, current);
2143     add_wait_queue(sk_sleep(sk), &wait);
2144     while (1) {
2145     - set_current_state(TASK_INTERRUPTIBLE);
2146     + /* Ensure session->terminate is updated */
2147     + smp_mb__before_atomic();
2148    
2149     if (atomic_read(&session->terminate))
2150     break;
2151     @@ -306,9 +306,8 @@ static int cmtp_session(void *arg)
2152    
2153     cmtp_process_transmit(session);
2154    
2155     - schedule();
2156     + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
2157     }
2158     - __set_current_state(TASK_RUNNING);
2159     remove_wait_queue(sk_sleep(sk), &wait);
2160    
2161     down_write(&cmtp_session_sem);
2162     @@ -393,7 +392,7 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
2163     err = cmtp_attach_device(session);
2164     if (err < 0) {
2165     atomic_inc(&session->terminate);
2166     - wake_up_process(session->task);
2167     + wake_up_interruptible(sk_sleep(session->sock->sk));
2168     up_write(&cmtp_session_sem);
2169     return err;
2170     }
2171     @@ -431,7 +430,11 @@ int cmtp_del_connection(struct cmtp_conndel_req *req)
2172    
2173     /* Stop session thread */
2174     atomic_inc(&session->terminate);
2175     - wake_up_process(session->task);
2176     +
2177     + /* Ensure session->terminate is updated */
2178     + smp_mb__after_atomic();
2179     +
2180     + wake_up_interruptible(sk_sleep(session->sock->sk));
2181     } else
2182     err = -ENOENT;
2183    
2184     diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
2185     index 0bec4588c3c8..1fc076420d1e 100644
2186     --- a/net/bluetooth/hidp/core.c
2187     +++ b/net/bluetooth/hidp/core.c
2188     @@ -36,6 +36,7 @@
2189     #define VERSION "1.2"
2190    
2191     static DECLARE_RWSEM(hidp_session_sem);
2192     +static DECLARE_WAIT_QUEUE_HEAD(hidp_session_wq);
2193     static LIST_HEAD(hidp_session_list);
2194    
2195     static unsigned char hidp_keycode[256] = {
2196     @@ -1068,12 +1069,12 @@ static int hidp_session_start_sync(struct hidp_session *session)
2197     * Wake up session thread and notify it to stop. This is asynchronous and
2198     * returns immediately. Call this whenever a runtime error occurs and you want
2199     * the session to stop.
2200     - * Note: wake_up_process() performs any necessary memory-barriers for us.
2201     + * Note: wake_up_interruptible() performs any necessary memory-barriers for us.
2202     */
2203     static void hidp_session_terminate(struct hidp_session *session)
2204     {
2205     atomic_inc(&session->terminate);
2206     - wake_up_process(session->task);
2207     + wake_up_interruptible(&hidp_session_wq);
2208     }
2209    
2210     /*
2211     @@ -1180,7 +1181,9 @@ static void hidp_session_run(struct hidp_session *session)
2212     struct sock *ctrl_sk = session->ctrl_sock->sk;
2213     struct sock *intr_sk = session->intr_sock->sk;
2214     struct sk_buff *skb;
2215     + DEFINE_WAIT_FUNC(wait, woken_wake_function);
2216    
2217     + add_wait_queue(&hidp_session_wq, &wait);
2218     for (;;) {
2219     /*
2220     * This thread can be woken up two ways:
2221     @@ -1188,12 +1191,10 @@ static void hidp_session_run(struct hidp_session *session)
2222     * session->terminate flag and wakes this thread up.
2223     * - Via modifying the socket state of ctrl/intr_sock. This
2224     * thread is woken up by ->sk_state_changed().
2225     - *
2226     - * Note: set_current_state() performs any necessary
2227     - * memory-barriers for us.
2228     */
2229     - set_current_state(TASK_INTERRUPTIBLE);
2230    
2231     + /* Ensure session->terminate is updated */
2232     + smp_mb__before_atomic();
2233     if (atomic_read(&session->terminate))
2234     break;
2235    
2236     @@ -1227,11 +1228,22 @@ static void hidp_session_run(struct hidp_session *session)
2237     hidp_process_transmit(session, &session->ctrl_transmit,
2238     session->ctrl_sock);
2239    
2240     - schedule();
2241     + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
2242     }
2243     + remove_wait_queue(&hidp_session_wq, &wait);
2244    
2245     atomic_inc(&session->terminate);
2246     - set_current_state(TASK_RUNNING);
2247     +
2248     + /* Ensure session->terminate is updated */
2249     + smp_mb__after_atomic();
2250     +}
2251     +
2252     +static int hidp_session_wake_function(wait_queue_t *wait,
2253     + unsigned int mode,
2254     + int sync, void *key)
2255     +{
2256     + wake_up_interruptible(&hidp_session_wq);
2257     + return false;
2258     }
2259    
2260     /*
2261     @@ -1244,7 +1256,8 @@ static void hidp_session_run(struct hidp_session *session)
2262     static int hidp_session_thread(void *arg)
2263     {
2264     struct hidp_session *session = arg;
2265     - wait_queue_t ctrl_wait, intr_wait;
2266     + DEFINE_WAIT_FUNC(ctrl_wait, hidp_session_wake_function);
2267     + DEFINE_WAIT_FUNC(intr_wait, hidp_session_wake_function);
2268    
2269     BT_DBG("session %p", session);
2270    
2271     @@ -1254,8 +1267,6 @@ static int hidp_session_thread(void *arg)
2272     set_user_nice(current, -15);
2273     hidp_set_timer(session);
2274    
2275     - init_waitqueue_entry(&ctrl_wait, current);
2276     - init_waitqueue_entry(&intr_wait, current);
2277     add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait);
2278     add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
2279     /* This memory barrier is paired with wq_has_sleeper(). See
2280     diff --git a/net/dccp/proto.c b/net/dccp/proto.c
2281     index 9fe25bf63296..b68168fcc06a 100644
2282     --- a/net/dccp/proto.c
2283     +++ b/net/dccp/proto.c
2284     @@ -24,6 +24,7 @@
2285     #include <net/checksum.h>
2286    
2287     #include <net/inet_sock.h>
2288     +#include <net/inet_common.h>
2289     #include <net/sock.h>
2290     #include <net/xfrm.h>
2291    
2292     @@ -170,6 +171,15 @@ const char *dccp_packet_name(const int type)
2293    
2294     EXPORT_SYMBOL_GPL(dccp_packet_name);
2295    
2296     +static void dccp_sk_destruct(struct sock *sk)
2297     +{
2298     + struct dccp_sock *dp = dccp_sk(sk);
2299     +
2300     + ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
2301     + dp->dccps_hc_tx_ccid = NULL;
2302     + inet_sock_destruct(sk);
2303     +}
2304     +
2305     int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
2306     {
2307     struct dccp_sock *dp = dccp_sk(sk);
2308     @@ -179,6 +189,7 @@ int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
2309     icsk->icsk_syn_retries = sysctl_dccp_request_retries;
2310     sk->sk_state = DCCP_CLOSED;
2311     sk->sk_write_space = dccp_write_space;
2312     + sk->sk_destruct = dccp_sk_destruct;
2313     icsk->icsk_sync_mss = dccp_sync_mss;
2314     dp->dccps_mss_cache = 536;
2315     dp->dccps_rate_last = jiffies;
2316     @@ -201,10 +212,7 @@ void dccp_destroy_sock(struct sock *sk)
2317     {
2318     struct dccp_sock *dp = dccp_sk(sk);
2319    
2320     - /*
2321     - * DCCP doesn't use sk_write_queue, just sk_send_head
2322     - * for retransmissions
2323     - */
2324     + __skb_queue_purge(&sk->sk_write_queue);
2325     if (sk->sk_send_head != NULL) {
2326     kfree_skb(sk->sk_send_head);
2327     sk->sk_send_head = NULL;
2328     @@ -222,8 +230,7 @@ void dccp_destroy_sock(struct sock *sk)
2329     dp->dccps_hc_rx_ackvec = NULL;
2330     }
2331     ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
2332     - ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
2333     - dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
2334     + dp->dccps_hc_rx_ccid = NULL;
2335    
2336     /* clean up feature negotiation state */
2337     dccp_feat_list_purge(&dp->dccps_featneg);
2338     diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
2339     index 7563831fa432..38c1c979ecb1 100644
2340     --- a/net/ipv4/fib_semantics.c
2341     +++ b/net/ipv4/fib_semantics.c
2342     @@ -1044,15 +1044,17 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
2343     fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
2344     if (!fi)
2345     goto failure;
2346     - fib_info_cnt++;
2347     if (cfg->fc_mx) {
2348     fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
2349     - if (!fi->fib_metrics)
2350     - goto failure;
2351     + if (unlikely(!fi->fib_metrics)) {
2352     + kfree(fi);
2353     + return ERR_PTR(err);
2354     + }
2355     atomic_set(&fi->fib_metrics->refcnt, 1);
2356     - } else
2357     + } else {
2358     fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
2359     -
2360     + }
2361     + fib_info_cnt++;
2362     fi->fib_net = net;
2363     fi->fib_protocol = cfg->fc_protocol;
2364     fi->fib_scope = cfg->fc_scope;
2365     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
2366     index 6cd49fd17ac0..6a5b7783932e 100644
2367     --- a/net/ipv4/route.c
2368     +++ b/net/ipv4/route.c
2369     @@ -1247,7 +1247,7 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
2370     if (mtu)
2371     return mtu;
2372    
2373     - mtu = dst->dev->mtu;
2374     + mtu = READ_ONCE(dst->dev->mtu);
2375    
2376     if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
2377     if (rt->rt_uses_gateway && mtu > 576)
2378     diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
2379     index 32c540145c17..c03850771a4e 100644
2380     --- a/net/ipv4/tcp_input.c
2381     +++ b/net/ipv4/tcp_input.c
2382     @@ -3036,8 +3036,7 @@ void tcp_rearm_rto(struct sock *sk)
2383     /* delta may not be positive if the socket is locked
2384     * when the retrans timer fires and is rescheduled.
2385     */
2386     - if (delta > 0)
2387     - rto = delta;
2388     + rto = max(delta, 1);
2389     }
2390     inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
2391     TCP_RTO_MAX);
2392     diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
2393     index 4345ee39f180..ff389591a340 100644
2394     --- a/net/ipv6/ip6_fib.c
2395     +++ b/net/ipv6/ip6_fib.c
2396     @@ -897,6 +897,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
2397     }
2398     nsiblings = iter->rt6i_nsiblings;
2399     fib6_purge_rt(iter, fn, info->nl_net);
2400     + if (fn->rr_ptr == iter)
2401     + fn->rr_ptr = NULL;
2402     rt6_release(iter);
2403    
2404     if (nsiblings) {
2405     @@ -909,6 +911,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
2406     if (rt6_qualify_for_ecmp(iter)) {
2407     *ins = iter->dst.rt6_next;
2408     fib6_purge_rt(iter, fn, info->nl_net);
2409     + if (fn->rr_ptr == iter)
2410     + fn->rr_ptr = NULL;
2411     rt6_release(iter);
2412     nsiblings--;
2413     } else {
2414     @@ -997,7 +1001,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
2415     /* Create subtree root node */
2416     sfn = node_alloc();
2417     if (!sfn)
2418     - goto st_failure;
2419     + goto failure;
2420    
2421     sfn->leaf = info->nl_net->ipv6.ip6_null_entry;
2422     atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref);
2423     @@ -1013,12 +1017,12 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
2424    
2425     if (IS_ERR(sn)) {
2426     /* If it is failed, discard just allocated
2427     - root, and then (in st_failure) stale node
2428     + root, and then (in failure) stale node
2429     in main tree.
2430     */
2431     node_free(sfn);
2432     err = PTR_ERR(sn);
2433     - goto st_failure;
2434     + goto failure;
2435     }
2436    
2437     /* Now link new subtree to main tree */
2438     @@ -1032,7 +1036,7 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
2439    
2440     if (IS_ERR(sn)) {
2441     err = PTR_ERR(sn);
2442     - goto st_failure;
2443     + goto failure;
2444     }
2445     }
2446    
2447     @@ -1074,22 +1078,22 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt,
2448     atomic_inc(&pn->leaf->rt6i_ref);
2449     }
2450     #endif
2451     - if (!(rt->dst.flags & DST_NOCACHE))
2452     - dst_free(&rt->dst);
2453     + goto failure;
2454     }
2455     return err;
2456    
2457     -#ifdef CONFIG_IPV6_SUBTREES
2458     - /* Subtree creation failed, probably main tree node
2459     - is orphan. If it is, shoot it.
2460     +failure:
2461     + /* fn->leaf could be NULL if fn is an intermediate node and we
2462     + * failed to add the new route to it in both subtree creation
2463     + * failure and fib6_add_rt2node() failure case.
2464     + * In both cases, fib6_repair_tree() should be called to fix
2465     + * fn->leaf.
2466     */
2467     -st_failure:
2468     if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
2469     fib6_repair_tree(info->nl_net, fn);
2470     if (!(rt->dst.flags & DST_NOCACHE))
2471     dst_free(&rt->dst);
2472     return err;
2473     -#endif
2474     }
2475    
2476     /*
2477     diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
2478     index 391c3cbd2eed..101ed6c42808 100644
2479     --- a/net/irda/af_irda.c
2480     +++ b/net/irda/af_irda.c
2481     @@ -2223,7 +2223,7 @@ static int irda_getsockopt(struct socket *sock, int level, int optname,
2482     {
2483     struct sock *sk = sock->sk;
2484     struct irda_sock *self = irda_sk(sk);
2485     - struct irda_device_list list;
2486     + struct irda_device_list list = { 0 };
2487     struct irda_device_info *discoveries;
2488     struct irda_ias_set * ias_opt; /* IAS get/query params */
2489     struct ias_object * ias_obj; /* Object in IAS */
2490     diff --git a/net/key/af_key.c b/net/key/af_key.c
2491     index 2e1050ec2cf0..94bf810ad242 100644
2492     --- a/net/key/af_key.c
2493     +++ b/net/key/af_key.c
2494     @@ -228,7 +228,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
2495     #define BROADCAST_ONE 1
2496     #define BROADCAST_REGISTERED 2
2497     #define BROADCAST_PROMISC_ONLY 4
2498     -static int pfkey_broadcast(struct sk_buff *skb,
2499     +static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
2500     int broadcast_flags, struct sock *one_sk,
2501     struct net *net)
2502     {
2503     @@ -278,7 +278,7 @@ static int pfkey_broadcast(struct sk_buff *skb,
2504     rcu_read_unlock();
2505    
2506     if (one_sk != NULL)
2507     - err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk);
2508     + err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
2509    
2510     kfree_skb(skb2);
2511     kfree_skb(skb);
2512     @@ -311,7 +311,7 @@ static int pfkey_do_dump(struct pfkey_sock *pfk)
2513     hdr = (struct sadb_msg *) pfk->dump.skb->data;
2514     hdr->sadb_msg_seq = 0;
2515     hdr->sadb_msg_errno = rc;
2516     - pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
2517     + pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
2518     &pfk->sk, sock_net(&pfk->sk));
2519     pfk->dump.skb = NULL;
2520     }
2521     @@ -355,7 +355,7 @@ static int pfkey_error(const struct sadb_msg *orig, int err, struct sock *sk)
2522     hdr->sadb_msg_len = (sizeof(struct sadb_msg) /
2523     sizeof(uint64_t));
2524    
2525     - pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
2526     + pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk));
2527    
2528     return 0;
2529     }
2530     @@ -1396,7 +1396,7 @@ static int pfkey_getspi(struct sock *sk, struct sk_buff *skb, const struct sadb_
2531    
2532     xfrm_state_put(x);
2533    
2534     - pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net);
2535     + pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net);
2536    
2537     return 0;
2538     }
2539     @@ -1483,7 +1483,7 @@ static int key_notify_sa(struct xfrm_state *x, const struct km_event *c)
2540     hdr->sadb_msg_seq = c->seq;
2541     hdr->sadb_msg_pid = c->portid;
2542    
2543     - pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x));
2544     + pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
2545    
2546     return 0;
2547     }
2548     @@ -1596,7 +1596,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, const struct sadb_msg
2549     out_hdr->sadb_msg_reserved = 0;
2550     out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
2551     out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
2552     - pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk));
2553     + pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
2554    
2555     return 0;
2556     }
2557     @@ -1701,8 +1701,8 @@ static int pfkey_register(struct sock *sk, struct sk_buff *skb, const struct sad
2558     return -ENOBUFS;
2559     }
2560    
2561     - pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk));
2562     -
2563     + pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk,
2564     + sock_net(sk));
2565     return 0;
2566     }
2567    
2568     @@ -1720,7 +1720,8 @@ static int unicast_flush_resp(struct sock *sk, const struct sadb_msg *ihdr)
2569     hdr->sadb_msg_errno = (uint8_t) 0;
2570     hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
2571    
2572     - return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
2573     + return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk,
2574     + sock_net(sk));
2575     }
2576    
2577     static int key_notify_sa_flush(const struct km_event *c)
2578     @@ -1741,7 +1742,7 @@ static int key_notify_sa_flush(const struct km_event *c)
2579     hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
2580     hdr->sadb_msg_reserved = 0;
2581    
2582     - pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net);
2583     + pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
2584    
2585     return 0;
2586     }
2587     @@ -1798,7 +1799,7 @@ static int dump_sa(struct xfrm_state *x, int count, void *ptr)
2588     out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
2589    
2590     if (pfk->dump.skb)
2591     - pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
2592     + pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
2593     &pfk->sk, sock_net(&pfk->sk));
2594     pfk->dump.skb = out_skb;
2595    
2596     @@ -1886,7 +1887,7 @@ static int pfkey_promisc(struct sock *sk, struct sk_buff *skb, const struct sadb
2597     new_hdr->sadb_msg_errno = 0;
2598     }
2599    
2600     - pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk));
2601     + pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
2602     return 0;
2603     }
2604    
2605     @@ -2219,7 +2220,7 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, const struct km_ev
2606     out_hdr->sadb_msg_errno = 0;
2607     out_hdr->sadb_msg_seq = c->seq;
2608     out_hdr->sadb_msg_pid = c->portid;
2609     - pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp));
2610     + pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
2611     return 0;
2612    
2613     }
2614     @@ -2439,7 +2440,7 @@ static int key_pol_get_resp(struct sock *sk, struct xfrm_policy *xp, const struc
2615     out_hdr->sadb_msg_errno = 0;
2616     out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
2617     out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
2618     - pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp));
2619     + pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp));
2620     err = 0;
2621    
2622     out:
2623     @@ -2695,7 +2696,7 @@ static int dump_sp(struct xfrm_policy *xp, int dir, int count, void *ptr)
2624     out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
2625    
2626     if (pfk->dump.skb)
2627     - pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
2628     + pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
2629     &pfk->sk, sock_net(&pfk->sk));
2630     pfk->dump.skb = out_skb;
2631    
2632     @@ -2752,7 +2753,7 @@ static int key_notify_policy_flush(const struct km_event *c)
2633     hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
2634     hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
2635     hdr->sadb_msg_reserved = 0;
2636     - pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net);
2637     + pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
2638     return 0;
2639    
2640     }
2641     @@ -2814,7 +2815,7 @@ static int pfkey_process(struct sock *sk, struct sk_buff *skb, const struct sadb
2642     void *ext_hdrs[SADB_EXT_MAX];
2643     int err;
2644    
2645     - pfkey_broadcast(skb_clone(skb, GFP_KERNEL),
2646     + pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
2647     BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
2648    
2649     memset(ext_hdrs, 0, sizeof(ext_hdrs));
2650     @@ -3036,7 +3037,8 @@ static int key_notify_sa_expire(struct xfrm_state *x, const struct km_event *c)
2651     out_hdr->sadb_msg_seq = 0;
2652     out_hdr->sadb_msg_pid = 0;
2653    
2654     - pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x));
2655     + pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
2656     + xs_net(x));
2657     return 0;
2658     }
2659    
2660     @@ -3226,7 +3228,8 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct
2661     xfrm_ctx->ctx_len);
2662     }
2663    
2664     - return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
2665     + return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
2666     + xs_net(x));
2667     }
2668    
2669     static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
2670     @@ -3424,7 +3427,8 @@ static int pfkey_send_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
2671     n_port->sadb_x_nat_t_port_port = sport;
2672     n_port->sadb_x_nat_t_port_reserved = 0;
2673    
2674     - return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
2675     + return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
2676     + xs_net(x));
2677     }
2678    
2679     #ifdef CONFIG_NET_KEY_MIGRATE
2680     @@ -3616,7 +3620,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2681     }
2682    
2683     /* broadcast migrate message to sockets */
2684     - pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net);
2685     + pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net);
2686    
2687     return 0;
2688    
2689     diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
2690     index 5b9c884a452e..dde64c4565d2 100644
2691     --- a/net/netfilter/nf_nat_core.c
2692     +++ b/net/netfilter/nf_nat_core.c
2693     @@ -225,20 +225,21 @@ find_appropriate_src(struct net *net,
2694     .tuple = tuple,
2695     .zone = zone
2696     };
2697     - struct rhlist_head *hl;
2698     + struct rhlist_head *hl, *h;
2699    
2700     hl = rhltable_lookup(&nf_nat_bysource_table, &key,
2701     nf_nat_bysource_params);
2702     - if (!hl)
2703     - return 0;
2704    
2705     - ct = container_of(hl, typeof(*ct), nat_bysource);
2706     + rhl_for_each_entry_rcu(ct, h, hl, nat_bysource) {
2707     + nf_ct_invert_tuplepr(result,
2708     + &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
2709     + result->dst = tuple->dst;
2710    
2711     - nf_ct_invert_tuplepr(result,
2712     - &ct->tuplehash[IP_CT_DIR_REPLY].tuple);
2713     - result->dst = tuple->dst;
2714     + if (in_range(l3proto, l4proto, result, range))
2715     + return 1;
2716     + }
2717    
2718     - return in_range(l3proto, l4proto, result, range);
2719     + return 0;
2720     }
2721    
2722     /* For [FUTURE] fragmentation handling, we want the least-used
2723     diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
2724     index 4e03f64709bc..05d9f42fc309 100644
2725     --- a/net/openvswitch/actions.c
2726     +++ b/net/openvswitch/actions.c
2727     @@ -1240,6 +1240,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb,
2728     goto out;
2729     }
2730    
2731     + OVS_CB(skb)->acts_origlen = acts->orig_len;
2732     err = do_execute_actions(dp, skb, key,
2733     acts->actions, acts->actions_len);
2734    
2735     diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
2736     index 4d67ea856067..453f806afe6e 100644
2737     --- a/net/openvswitch/datapath.c
2738     +++ b/net/openvswitch/datapath.c
2739     @@ -383,7 +383,7 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
2740     }
2741    
2742     static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
2743     - unsigned int hdrlen)
2744     + unsigned int hdrlen, int actions_attrlen)
2745     {
2746     size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
2747     + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
2748     @@ -400,7 +400,7 @@ static size_t upcall_msg_size(const struct dp_upcall_info *upcall_info,
2749    
2750     /* OVS_PACKET_ATTR_ACTIONS */
2751     if (upcall_info->actions_len)
2752     - size += nla_total_size(upcall_info->actions_len);
2753     + size += nla_total_size(actions_attrlen);
2754    
2755     /* OVS_PACKET_ATTR_MRU */
2756     if (upcall_info->mru)
2757     @@ -467,7 +467,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
2758     else
2759     hlen = skb->len;
2760    
2761     - len = upcall_msg_size(upcall_info, hlen - cutlen);
2762     + len = upcall_msg_size(upcall_info, hlen - cutlen,
2763     + OVS_CB(skb)->acts_origlen);
2764     user_skb = genlmsg_new(len, GFP_ATOMIC);
2765     if (!user_skb) {
2766     err = -ENOMEM;
2767     diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
2768     index ab85c1cae255..e19ace428e38 100644
2769     --- a/net/openvswitch/datapath.h
2770     +++ b/net/openvswitch/datapath.h
2771     @@ -100,12 +100,14 @@ struct datapath {
2772     * @input_vport: The original vport packet came in on. This value is cached
2773     * when a packet is received by OVS.
2774     * @mru: The maximum received fragement size; 0 if the packet is not
2775     + * @acts_origlen: The netlink size of the flow actions applied to this skb.
2776     * @cutlen: The number of bytes from the packet end to be removed.
2777     * fragmented.
2778     */
2779     struct ovs_skb_cb {
2780     struct vport *input_vport;
2781     u16 mru;
2782     + u16 acts_origlen;
2783     u32 cutlen;
2784     };
2785     #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb)
2786     diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
2787     index a1aec0a6c789..50030519a89b 100644
2788     --- a/net/sched/act_ipt.c
2789     +++ b/net/sched/act_ipt.c
2790     @@ -41,6 +41,7 @@ static int ipt_init_target(struct xt_entry_target *t, char *table,
2791     {
2792     struct xt_tgchk_param par;
2793     struct xt_target *target;
2794     + struct ipt_entry e = {};
2795     int ret = 0;
2796    
2797     target = xt_request_find_target(AF_INET, t->u.user.name,
2798     @@ -51,6 +52,7 @@ static int ipt_init_target(struct xt_entry_target *t, char *table,
2799     t->u.kernel.target = target;
2800     memset(&par, 0, sizeof(par));
2801     par.table = table;
2802     + par.entryinfo = &e;
2803     par.target = target;
2804     par.targinfo = t->data;
2805     par.hook_mask = hook;
2806     diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
2807     index ff27a85a71a9..195a3b2d9afc 100644
2808     --- a/net/sched/sch_api.c
2809     +++ b/net/sched/sch_api.c
2810     @@ -277,9 +277,6 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
2811     void qdisc_hash_add(struct Qdisc *q)
2812     {
2813     if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
2814     - struct Qdisc *root = qdisc_dev(q)->qdisc;
2815     -
2816     - WARN_ON_ONCE(root == &noop_qdisc);
2817     ASSERT_RTNL();
2818     hash_add_rcu(qdisc_dev(q)->qdisc_hash, &q->hash, q->handle);
2819     }
2820     diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
2821     index bc5e99584e41..ea8a56f76b32 100644
2822     --- a/net/sched/sch_sfq.c
2823     +++ b/net/sched/sch_sfq.c
2824     @@ -434,6 +434,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
2825     qdisc_drop(head, sch, to_free);
2826    
2827     slot_queue_add(slot, skb);
2828     + qdisc_tree_reduce_backlog(sch, 0, delta);
2829     return NET_XMIT_CN;
2830     }
2831    
2832     @@ -465,8 +466,10 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
2833     /* Return Congestion Notification only if we dropped a packet
2834     * from this flow.
2835     */
2836     - if (qlen != slot->qlen)
2837     + if (qlen != slot->qlen) {
2838     + qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb));
2839     return NET_XMIT_CN;
2840     + }
2841    
2842     /* As we dropped a packet, better let upper stack know this */
2843     qdisc_tree_reduce_backlog(sch, 1, dropped);
2844     diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
2845     index 0c090600f377..ca4a63e3eadd 100644
2846     --- a/net/sctp/ipv6.c
2847     +++ b/net/sctp/ipv6.c
2848     @@ -512,7 +512,9 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
2849     {
2850     addr->sa.sa_family = AF_INET6;
2851     addr->v6.sin6_port = port;
2852     + addr->v6.sin6_flowinfo = 0;
2853     addr->v6.sin6_addr = *saddr;
2854     + addr->v6.sin6_scope_id = 0;
2855     }
2856    
2857     /* Compare addresses exactly.
2858     diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
2859     index a4bc98265d88..266a30c8b88b 100644
2860     --- a/net/sunrpc/svcsock.c
2861     +++ b/net/sunrpc/svcsock.c
2862     @@ -408,6 +408,9 @@ static void svc_data_ready(struct sock *sk)
2863     dprintk("svc: socket %p(inet %p), busy=%d\n",
2864     svsk, sk,
2865     test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
2866     +
2867     + /* Refer to svc_setup_socket() for details. */
2868     + rmb();
2869     svsk->sk_odata(sk);
2870     if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags))
2871     svc_xprt_enqueue(&svsk->sk_xprt);
2872     @@ -424,6 +427,9 @@ static void svc_write_space(struct sock *sk)
2873     if (svsk) {
2874     dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
2875     svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
2876     +
2877     + /* Refer to svc_setup_socket() for details. */
2878     + rmb();
2879     svsk->sk_owspace(sk);
2880     svc_xprt_enqueue(&svsk->sk_xprt);
2881     }
2882     @@ -748,8 +754,12 @@ static void svc_tcp_listen_data_ready(struct sock *sk)
2883     dprintk("svc: socket %p TCP (listen) state change %d\n",
2884     sk, sk->sk_state);
2885    
2886     - if (svsk)
2887     + if (svsk) {
2888     + /* Refer to svc_setup_socket() for details. */
2889     + rmb();
2890     svsk->sk_odata(sk);
2891     + }
2892     +
2893     /*
2894     * This callback may called twice when a new connection
2895     * is established as a child socket inherits everything
2896     @@ -782,6 +792,8 @@ static void svc_tcp_state_change(struct sock *sk)
2897     if (!svsk)
2898     printk("svc: socket %p: no user data\n", sk);
2899     else {
2900     + /* Refer to svc_setup_socket() for details. */
2901     + rmb();
2902     svsk->sk_ostate(sk);
2903     if (sk->sk_state != TCP_ESTABLISHED) {
2904     set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
2905     @@ -1368,12 +1380,18 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
2906     return ERR_PTR(err);
2907     }
2908    
2909     - inet->sk_user_data = svsk;
2910     svsk->sk_sock = sock;
2911     svsk->sk_sk = inet;
2912     svsk->sk_ostate = inet->sk_state_change;
2913     svsk->sk_odata = inet->sk_data_ready;
2914     svsk->sk_owspace = inet->sk_write_space;
2915     + /*
2916     + * This barrier is necessary in order to prevent race condition
2917     + * with svc_data_ready(), svc_listen_data_ready() and others
2918     + * when calling callbacks above.
2919     + */
2920     + wmb();
2921     + inet->sk_user_data = svsk;
2922    
2923     /* Initialize the socket */
2924     if (sock->type == SOCK_DGRAM)
2925     diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
2926     index 1fd464764765..aedc476fac02 100644
2927     --- a/net/tipc/netlink_compat.c
2928     +++ b/net/tipc/netlink_compat.c
2929     @@ -258,13 +258,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
2930     arg = nlmsg_new(0, GFP_KERNEL);
2931     if (!arg) {
2932     kfree_skb(msg->rep);
2933     + msg->rep = NULL;
2934     return -ENOMEM;
2935     }
2936    
2937     err = __tipc_nl_compat_dumpit(cmd, msg, arg);
2938     - if (err)
2939     + if (err) {
2940     kfree_skb(msg->rep);
2941     -
2942     + msg->rep = NULL;
2943     + }
2944     kfree_skb(arg);
2945    
2946     return err;
2947     diff --git a/sound/core/control.c b/sound/core/control.c
2948     index fb096cb20a80..995cde48c1be 100644
2949     --- a/sound/core/control.c
2950     +++ b/sound/core/control.c
2951     @@ -1156,7 +1156,7 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
2952     mutex_lock(&ue->card->user_ctl_lock);
2953     change = ue->tlv_data_size != size;
2954     if (!change)
2955     - change = memcmp(ue->tlv_data, new_data, size);
2956     + change = memcmp(ue->tlv_data, new_data, size) != 0;
2957     kfree(ue->tlv_data);
2958     ue->tlv_data = new_data;
2959     ue->tlv_data_size = size;
2960     diff --git a/sound/firewire/iso-resources.c b/sound/firewire/iso-resources.c
2961     index f0e4d502d604..066b5df666f4 100644
2962     --- a/sound/firewire/iso-resources.c
2963     +++ b/sound/firewire/iso-resources.c
2964     @@ -210,9 +210,14 @@ EXPORT_SYMBOL(fw_iso_resources_update);
2965     */
2966     void fw_iso_resources_free(struct fw_iso_resources *r)
2967     {
2968     - struct fw_card *card = fw_parent_device(r->unit)->card;
2969     + struct fw_card *card;
2970     int bandwidth, channel;
2971    
2972     + /* Not initialized. */
2973     + if (r->unit == NULL)
2974     + return;
2975     + card = fw_parent_device(r->unit)->card;
2976     +
2977     mutex_lock(&r->mutex);
2978    
2979     if (r->allocated) {
2980     diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
2981     index c15c51bea26d..f2e4e99ce651 100644
2982     --- a/sound/pci/hda/patch_conexant.c
2983     +++ b/sound/pci/hda/patch_conexant.c
2984     @@ -854,6 +854,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
2985     SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
2986     SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
2987     SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
2988     + SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC),
2989     SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
2990     SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
2991     SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
2992     diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
2993     index 95c2749ac8a3..286efc3a6116 100644
2994     --- a/sound/usb/quirks.c
2995     +++ b/sound/usb/quirks.c
2996     @@ -1309,10 +1309,13 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
2997     && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
2998     mdelay(20);
2999    
3000     - /* Zoom R16/24 needs a tiny delay here, otherwise requests like
3001     - * get/set frequency return as failed despite actually succeeding.
3002     + /* Zoom R16/24, Logitech H650e, Jabra 550a needs a tiny delay here,
3003     + * otherwise requests like get/set frequency return as failed despite
3004     + * actually succeeding.
3005     */
3006     - if (chip->usb_id == USB_ID(0x1686, 0x00dd) &&
3007     + if ((chip->usb_id == USB_ID(0x1686, 0x00dd) ||
3008     + chip->usb_id == USB_ID(0x046d, 0x0a46) ||
3009     + chip->usb_id == USB_ID(0x0b0e, 0x0349)) &&
3010     (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
3011     mdelay(1);
3012     }
3013     diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
3014     index 6c50d9f8e210..6a6f44dd594b 100644
3015     --- a/tools/perf/util/probe-event.c
3016     +++ b/tools/perf/util/probe-event.c
3017     @@ -163,7 +163,7 @@ static struct map *kernel_get_module_map(const char *module)
3018    
3019     /* A file path -- this is an offline module */
3020     if (module && strchr(module, '/'))
3021     - return machine__findnew_module_map(host_machine, 0, module);
3022     + return dso__new_map(module);
3023    
3024     if (!module)
3025     module = "kernel";
3026     @@ -173,6 +173,7 @@ static struct map *kernel_get_module_map(const char *module)
3027     if (strncmp(pos->dso->short_name + 1, module,
3028     pos->dso->short_name_len - 2) == 0 &&
3029     module[pos->dso->short_name_len - 2] == '\0') {
3030     + map__get(pos);
3031     return pos;
3032     }
3033     }
3034     @@ -188,15 +189,6 @@ struct map *get_target_map(const char *target, bool user)
3035     return kernel_get_module_map(target);
3036     }
3037    
3038     -static void put_target_map(struct map *map, bool user)
3039     -{
3040     - if (map && user) {
3041     - /* Only the user map needs to be released */
3042     - map__put(map);
3043     - }
3044     -}
3045     -
3046     -
3047     static int convert_exec_to_group(const char *exec, char **result)
3048     {
3049     char *ptr1, *ptr2, *exec_copy;
3050     @@ -412,7 +404,7 @@ static int find_alternative_probe_point(struct debuginfo *dinfo,
3051     }
3052    
3053     out:
3054     - put_target_map(map, uprobes);
3055     + map__put(map);
3056     return ret;
3057    
3058     }
3059     @@ -2944,7 +2936,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
3060     }
3061    
3062     out:
3063     - put_target_map(map, pev->uprobes);
3064     + map__put(map);
3065     free(syms);
3066     return ret;
3067    
3068     @@ -3437,10 +3429,7 @@ int show_available_funcs(const char *target, struct strfilter *_filter,
3069     return ret;
3070    
3071     /* Get a symbol map */
3072     - if (user)
3073     - map = dso__new_map(target);
3074     - else
3075     - map = kernel_get_module_map(target);
3076     + map = get_target_map(target, user);
3077     if (!map) {
3078     pr_err("Failed to get a map for %s\n", (target) ? : "kernel");
3079     return -EINVAL;
3080     @@ -3472,9 +3461,7 @@ int show_available_funcs(const char *target, struct strfilter *_filter,
3081     }
3082    
3083     end:
3084     - if (user) {
3085     - map__put(map);
3086     - }
3087     + map__put(map);
3088     exit_probe_symbol_maps();
3089    
3090     return ret;
3091     diff --git a/tools/testing/selftests/ntb/ntb_test.sh b/tools/testing/selftests/ntb/ntb_test.sh
3092     index a676d3eefefb..b3c48fc6ea4b 100755
3093     --- a/tools/testing/selftests/ntb/ntb_test.sh
3094     +++ b/tools/testing/selftests/ntb/ntb_test.sh
3095     @@ -305,7 +305,7 @@ function perf_test()
3096     echo "Running remote perf test $WITH DMA"
3097     write_file "" $REMOTE_PERF/run
3098     echo -n " "
3099     - read_file $LOCAL_PERF/run
3100     + read_file $REMOTE_PERF/run
3101     echo " Passed"
3102    
3103     _modprobe -r ntb_perf
3104     @@ -326,6 +326,10 @@ function ntb_tool_tests()
3105     link_test $LOCAL_TOOL $REMOTE_TOOL
3106     link_test $REMOTE_TOOL $LOCAL_TOOL
3107    
3108     + #Ensure the link is up on both sides before continuing
3109     + write_file Y $LOCAL_TOOL/link_event
3110     + write_file Y $REMOTE_TOOL/link_event
3111     +
3112     for PEER_TRANS in $(ls $LOCAL_TOOL/peer_trans*); do
3113     PT=$(basename $PEER_TRANS)
3114     write_file $MW_SIZE $LOCAL_TOOL/$PT