Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.14/0136-4.14.37-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (hide annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 7 months ago) by niro
File size: 271556 byte(s)
-added up to patches-4.14.79
1 niro 3238 diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
2     index 1699a55b7b70..ef639960b272 100644
3     --- a/Documentation/device-mapper/thin-provisioning.txt
4     +++ b/Documentation/device-mapper/thin-provisioning.txt
5     @@ -112,9 +112,11 @@ $low_water_mark is expressed in blocks of size $data_block_size. If
6     free space on the data device drops below this level then a dm event
7     will be triggered which a userspace daemon should catch allowing it to
8     extend the pool device. Only one such event will be sent.
9     -Resuming a device with a new table itself triggers an event so the
10     -userspace daemon can use this to detect a situation where a new table
11     -already exceeds the threshold.
12     +
13     +No special event is triggered if a just resumed device's free space is below
14     +the low water mark. However, resuming a device always triggers an
15     +event; a userspace daemon should verify that free space exceeds the low
16     +water mark when handling this event.
17    
18     A low water mark for the metadata device is maintained in the kernel and
19     will trigger a dm event if free space on the metadata device drops below
20     diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
21     index e63a35fafef0..0f9089416b4c 100644
22     --- a/Documentation/virtual/kvm/api.txt
23     +++ b/Documentation/virtual/kvm/api.txt
24     @@ -1837,6 +1837,7 @@ registers, find a list below:
25     PPC | KVM_REG_PPC_DBSR | 32
26     PPC | KVM_REG_PPC_TIDR | 64
27     PPC | KVM_REG_PPC_PSSCR | 64
28     + PPC | KVM_REG_PPC_DEC_EXPIRY | 64
29     PPC | KVM_REG_PPC_TM_GPR0 | 64
30     ...
31     PPC | KVM_REG_PPC_TM_GPR31 | 64
32     diff --git a/Makefile b/Makefile
33     index 0a1f941899f4..ee330f5449e6 100644
34     --- a/Makefile
35     +++ b/Makefile
36     @@ -1,7 +1,7 @@
37     # SPDX-License-Identifier: GPL-2.0
38     VERSION = 4
39     PATCHLEVEL = 14
40     -SUBLEVEL = 36
41     +SUBLEVEL = 37
42     EXTRAVERSION =
43     NAME = Petit Gorille
44    
45     diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
46     index 95ad7102b63c..82375b896be5 100644
47     --- a/arch/arm64/include/asm/spinlock.h
48     +++ b/arch/arm64/include/asm/spinlock.h
49     @@ -89,8 +89,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
50     " cbnz %w1, 1f\n"
51     " add %w1, %w0, %3\n"
52     " casa %w0, %w1, %2\n"
53     - " and %w1, %w1, #0xffff\n"
54     - " eor %w1, %w1, %w0, lsr #16\n"
55     + " sub %w1, %w1, %3\n"
56     + " eor %w1, %w1, %w0\n"
57     "1:")
58     : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
59     : "I" (1 << TICKET_SHIFT)
60     diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
61     index c675eece389a..adce180f3ee4 100644
62     --- a/arch/mips/boot/compressed/Makefile
63     +++ b/arch/mips/boot/compressed/Makefile
64     @@ -133,4 +133,8 @@ vmlinuz.srec: vmlinuz
65     uzImage.bin: vmlinuz.bin FORCE
66     $(call if_changed,uimage,none)
67    
68     -clean-files := $(objtree)/vmlinuz $(objtree)/vmlinuz.{32,ecoff,bin,srec}
69     +clean-files += $(objtree)/vmlinuz
70     +clean-files += $(objtree)/vmlinuz.32
71     +clean-files += $(objtree)/vmlinuz.ecoff
72     +clean-files += $(objtree)/vmlinuz.bin
73     +clean-files += $(objtree)/vmlinuz.srec
74     diff --git a/arch/mips/generic/irq.c b/arch/mips/generic/irq.c
75     index 394f8161e462..cb7fdaeef426 100644
76     --- a/arch/mips/generic/irq.c
77     +++ b/arch/mips/generic/irq.c
78     @@ -22,10 +22,10 @@ int get_c0_fdc_int(void)
79     {
80     int mips_cpu_fdc_irq;
81    
82     - if (cpu_has_veic)
83     - panic("Unimplemented!");
84     - else if (mips_gic_present())
85     + if (mips_gic_present())
86     mips_cpu_fdc_irq = gic_get_c0_fdc_int();
87     + else if (cpu_has_veic)
88     + panic("Unimplemented!");
89     else if (cp0_fdc_irq >= 0)
90     mips_cpu_fdc_irq = MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
91     else
92     @@ -38,10 +38,10 @@ int get_c0_perfcount_int(void)
93     {
94     int mips_cpu_perf_irq;
95    
96     - if (cpu_has_veic)
97     - panic("Unimplemented!");
98     - else if (mips_gic_present())
99     + if (mips_gic_present())
100     mips_cpu_perf_irq = gic_get_c0_perfcount_int();
101     + else if (cpu_has_veic)
102     + panic("Unimplemented!");
103     else if (cp0_perfcount_irq >= 0)
104     mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
105     else
106     @@ -54,10 +54,10 @@ unsigned int get_c0_compare_int(void)
107     {
108     int mips_cpu_timer_irq;
109    
110     - if (cpu_has_veic)
111     - panic("Unimplemented!");
112     - else if (mips_gic_present())
113     + if (mips_gic_present())
114     mips_cpu_timer_irq = gic_get_c0_compare_int();
115     + else if (cpu_has_veic)
116     + panic("Unimplemented!");
117     else
118     mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
119    
120     diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c
121     index 8b937300fb7f..fd26fadc8617 100644
122     --- a/arch/mips/txx9/rbtx4939/setup.c
123     +++ b/arch/mips/txx9/rbtx4939/setup.c
124     @@ -186,7 +186,7 @@ static void __init rbtx4939_update_ioc_pen(void)
125    
126     #define RBTX4939_MAX_7SEGLEDS 8
127    
128     -#if IS_ENABLED(CONFIG_LEDS_CLASS)
129     +#if IS_BUILTIN(CONFIG_LEDS_CLASS)
130     static u8 led_val[RBTX4939_MAX_7SEGLEDS];
131     struct rbtx4939_led_data {
132     struct led_classdev cdev;
133     @@ -261,7 +261,7 @@ static inline void rbtx4939_led_setup(void)
134    
135     static void __rbtx4939_7segled_putc(unsigned int pos, unsigned char val)
136     {
137     -#if IS_ENABLED(CONFIG_LEDS_CLASS)
138     +#if IS_BUILTIN(CONFIG_LEDS_CLASS)
139     unsigned long flags;
140     local_irq_save(flags);
141     /* bit7: reserved for LED class */
142     diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h
143     index 1fcfa425cefa..f326b40b7c7b 100644
144     --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h
145     +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h
146     @@ -73,10 +73,16 @@ static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
147    
148     static inline pgd_t *pgd_alloc(struct mm_struct *mm)
149     {
150     + pgd_t *pgd;
151     +
152     if (radix_enabled())
153     return radix__pgd_alloc(mm);
154     - return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
155     - pgtable_gfp_flags(mm, GFP_KERNEL));
156     +
157     + pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
158     + pgtable_gfp_flags(mm, GFP_KERNEL));
159     + memset(pgd, 0, PGD_TABLE_SIZE);
160     +
161     + return pgd;
162     }
163    
164     static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
165     diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
166     index 61d6049f4c1e..8aaec831053a 100644
167     --- a/arch/powerpc/include/uapi/asm/kvm.h
168     +++ b/arch/powerpc/include/uapi/asm/kvm.h
169     @@ -607,6 +607,8 @@ struct kvm_ppc_rmmu_info {
170     #define KVM_REG_PPC_TIDR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbc)
171     #define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd)
172    
173     +#define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe)
174     +
175     /* Transactional Memory checkpointed state:
176     * This is all GPRs, all VSX regs and a subset of SPRs
177     */
178     diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
179     index 13c9dcdcba69..d17007451f62 100644
180     --- a/arch/powerpc/kernel/traps.c
181     +++ b/arch/powerpc/kernel/traps.c
182     @@ -336,7 +336,7 @@ void system_reset_exception(struct pt_regs *regs)
183     * No debugger or crash dump registered, print logs then
184     * panic.
185     */
186     - __die("System Reset", regs, SIGABRT);
187     + die("System Reset", regs, SIGABRT);
188    
189     mdelay(2*MSEC_PER_SEC); /* Wait a little while for others to print */
190     add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
191     diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
192     index 2645d484e945..df9b53f40b1e 100644
193     --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
194     +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
195     @@ -1348,12 +1348,8 @@ static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize,
196     }
197    
198     new_pteg = hash & new_hash_mask;
199     - if (vpte & HPTE_V_SECONDARY) {
200     - BUG_ON(~pteg != (hash & old_hash_mask));
201     - new_pteg = ~new_pteg;
202     - } else {
203     - BUG_ON(pteg != (hash & old_hash_mask));
204     - }
205     + if (vpte & HPTE_V_SECONDARY)
206     + new_pteg = ~hash & new_hash_mask;
207    
208     new_idx = new_pteg * HPTES_PER_GROUP + (idx % HPTES_PER_GROUP);
209     new_hptep = (__be64 *)(new->virt + (new_idx << 4));
210     diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
211     index f48e3379a18a..e094dc90ff1b 100644
212     --- a/arch/powerpc/kvm/book3s_hv.c
213     +++ b/arch/powerpc/kvm/book3s_hv.c
214     @@ -1497,6 +1497,10 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
215     case KVM_REG_PPC_ARCH_COMPAT:
216     *val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
217     break;
218     + case KVM_REG_PPC_DEC_EXPIRY:
219     + *val = get_reg_val(id, vcpu->arch.dec_expires +
220     + vcpu->arch.vcore->tb_offset);
221     + break;
222     default:
223     r = -EINVAL;
224     break;
225     @@ -1724,6 +1728,10 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
226     case KVM_REG_PPC_ARCH_COMPAT:
227     r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
228     break;
229     + case KVM_REG_PPC_DEC_EXPIRY:
230     + vcpu->arch.dec_expires = set_reg_val(id, *val) -
231     + vcpu->arch.vcore->tb_offset;
232     + break;
233     default:
234     r = -EINVAL;
235     break;
236     diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
237     index 2b02d51d14d8..ecb45361095b 100644
238     --- a/arch/powerpc/kvm/powerpc.c
239     +++ b/arch/powerpc/kvm/powerpc.c
240     @@ -758,7 +758,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
241    
242     hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
243     vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
244     - vcpu->arch.dec_expires = ~(u64)0;
245     + vcpu->arch.dec_expires = get_tb();
246    
247     #ifdef CONFIG_KVM_EXIT_TIMING
248     mutex_init(&vcpu->arch.exit_timing_lock);
249     diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
250     index a81279249bfb..9fead0796364 100644
251     --- a/arch/powerpc/mm/numa.c
252     +++ b/arch/powerpc/mm/numa.c
253     @@ -546,7 +546,7 @@ static int numa_setup_cpu(unsigned long lcpu)
254     nid = of_node_to_nid_single(cpu);
255    
256     out_present:
257     - if (nid < 0 || !node_online(nid))
258     + if (nid < 0 || !node_possible(nid))
259     nid = first_online_node;
260    
261     map_cpu_to_node(lcpu, nid);
262     @@ -887,6 +887,32 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
263     NODE_DATA(nid)->node_spanned_pages = spanned_pages;
264     }
265    
266     +static void __init find_possible_nodes(void)
267     +{
268     + struct device_node *rtas;
269     + u32 numnodes, i;
270     +
271     + if (min_common_depth <= 0)
272     + return;
273     +
274     + rtas = of_find_node_by_path("/rtas");
275     + if (!rtas)
276     + return;
277     +
278     + if (of_property_read_u32_index(rtas,
279     + "ibm,max-associativity-domains",
280     + min_common_depth, &numnodes))
281     + goto out;
282     +
283     + for (i = 0; i < numnodes; i++) {
284     + if (!node_possible(i))
285     + node_set(i, node_possible_map);
286     + }
287     +
288     +out:
289     + of_node_put(rtas);
290     +}
291     +
292     void __init initmem_init(void)
293     {
294     int nid, cpu;
295     @@ -900,12 +926,15 @@ void __init initmem_init(void)
296     memblock_dump_all();
297    
298     /*
299     - * Reduce the possible NUMA nodes to the online NUMA nodes,
300     - * since we do not support node hotplug. This ensures that we
301     - * lower the maximum NUMA node ID to what is actually present.
302     + * Modify the set of possible NUMA nodes to reflect information
303     + * available about the set of online nodes, and the set of nodes
304     + * that we expect to make use of for this platform's affinity
305     + * calculations.
306     */
307     nodes_and(node_possible_map, node_possible_map, node_online_map);
308    
309     + find_possible_nodes();
310     +
311     for_each_online_node(nid) {
312     unsigned long start_pfn, end_pfn;
313    
314     @@ -1246,6 +1275,40 @@ static long vphn_get_associativity(unsigned long cpu,
315     return rc;
316     }
317    
318     +static inline int find_and_online_cpu_nid(int cpu)
319     +{
320     + __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
321     + int new_nid;
322     +
323     + /* Use associativity from first thread for all siblings */
324     + vphn_get_associativity(cpu, associativity);
325     + new_nid = associativity_to_nid(associativity);
326     + if (new_nid < 0 || !node_possible(new_nid))
327     + new_nid = first_online_node;
328     +
329     + if (NODE_DATA(new_nid) == NULL) {
330     +#ifdef CONFIG_MEMORY_HOTPLUG
331     + /*
332     + * Need to ensure that NODE_DATA is initialized for a node from
333     + * available memory (see memblock_alloc_try_nid). If unable to
334     + * init the node, then default to nearest node that has memory
335     + * installed.
336     + */
337     + if (try_online_node(new_nid))
338     + new_nid = first_online_node;
339     +#else
340     + /*
341     + * Default to using the nearest node that has memory installed.
342     + * Otherwise, it would be necessary to patch the kernel MM code
343     + * to deal with more memoryless-node error conditions.
344     + */
345     + new_nid = first_online_node;
346     +#endif
347     + }
348     +
349     + return new_nid;
350     +}
351     +
352     /*
353     * Update the CPU maps and sysfs entries for a single CPU when its NUMA
354     * characteristics change. This function doesn't perform any locking and is
355     @@ -1313,7 +1376,6 @@ int numa_update_cpu_topology(bool cpus_locked)
356     {
357     unsigned int cpu, sibling, changed = 0;
358     struct topology_update_data *updates, *ud;
359     - __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
360     cpumask_t updated_cpus;
361     struct device *dev;
362     int weight, new_nid, i = 0;
363     @@ -1348,11 +1410,7 @@ int numa_update_cpu_topology(bool cpus_locked)
364     continue;
365     }
366    
367     - /* Use associativity from first thread for all siblings */
368     - vphn_get_associativity(cpu, associativity);
369     - new_nid = associativity_to_nid(associativity);
370     - if (new_nid < 0 || !node_online(new_nid))
371     - new_nid = first_online_node;
372     + new_nid = find_and_online_cpu_nid(cpu);
373    
374     if (new_nid == numa_cpu_lookup_table[cpu]) {
375     cpumask_andnot(&cpu_associativity_changes_mask,
376     diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
377     index b150f4deaccf..6914b289c86b 100644
378     --- a/arch/powerpc/platforms/powernv/opal-imc.c
379     +++ b/arch/powerpc/platforms/powernv/opal-imc.c
380     @@ -126,9 +126,11 @@ static void disable_nest_pmu_counters(void)
381     const struct cpumask *l_cpumask;
382    
383     get_online_cpus();
384     - for_each_online_node(nid) {
385     + for_each_node_with_cpus(nid) {
386     l_cpumask = cpumask_of_node(nid);
387     - cpu = cpumask_first(l_cpumask);
388     + cpu = cpumask_first_and(l_cpumask, cpu_online_mask);
389     + if (cpu >= nr_cpu_ids)
390     + continue;
391     opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
392     get_hard_smp_processor_id(cpu));
393     }
394     diff --git a/arch/s390/include/asm/eadm.h b/arch/s390/include/asm/eadm.h
395     index eb5323161f11..bb63b2afdf6f 100644
396     --- a/arch/s390/include/asm/eadm.h
397     +++ b/arch/s390/include/asm/eadm.h
398     @@ -4,7 +4,7 @@
399    
400     #include <linux/types.h>
401     #include <linux/device.h>
402     -#include <linux/blkdev.h>
403     +#include <linux/blk_types.h>
404    
405     struct arqb {
406     u64 data;
407     diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
408     index 0fa3a788dd20..0bce918db11a 100644
409     --- a/arch/s390/kvm/kvm-s390.c
410     +++ b/arch/s390/kvm/kvm-s390.c
411     @@ -601,7 +601,7 @@ static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
412     case KVM_CAP_S390_GS:
413     r = -EINVAL;
414     mutex_lock(&kvm->lock);
415     - if (atomic_read(&kvm->online_vcpus)) {
416     + if (kvm->created_vcpus) {
417     r = -EBUSY;
418     } else if (test_facility(133)) {
419     set_kvm_facility(kvm->arch.model.fac_mask, 133);
420     @@ -1121,7 +1121,7 @@ static int kvm_s390_set_processor_feat(struct kvm *kvm,
421     return -EINVAL;
422    
423     mutex_lock(&kvm->lock);
424     - if (!atomic_read(&kvm->online_vcpus)) {
425     + if (!kvm->created_vcpus) {
426     bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
427     KVM_S390_VM_CPU_FEAT_NR_BITS);
428     ret = 0;
429     diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
430     index b18b5652e5c5..a74204db759b 100644
431     --- a/arch/s390/kvm/vsie.c
432     +++ b/arch/s390/kvm/vsie.c
433     @@ -31,7 +31,11 @@ struct vsie_page {
434     * the same offset as that in struct sie_page!
435     */
436     struct mcck_volatile_info mcck_info; /* 0x0200 */
437     - /* the pinned originial scb */
438     + /*
439     + * The pinned original scb. Be aware that other VCPUs can modify
440     + * it while we read from it. Values that are used for conditions or
441     + * are reused conditionally, should be accessed via READ_ONCE.
442     + */
443     struct kvm_s390_sie_block *scb_o; /* 0x0218 */
444     /* the shadow gmap in use by the vsie_page */
445     struct gmap *gmap; /* 0x0220 */
446     @@ -143,12 +147,13 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
447     {
448     struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
449     struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
450     - u32 crycb_addr = scb_o->crycbd & 0x7ffffff8U;
451     + const uint32_t crycbd_o = READ_ONCE(scb_o->crycbd);
452     + const u32 crycb_addr = crycbd_o & 0x7ffffff8U;
453     unsigned long *b1, *b2;
454     u8 ecb3_flags;
455    
456     scb_s->crycbd = 0;
457     - if (!(scb_o->crycbd & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1))
458     + if (!(crycbd_o & vcpu->arch.sie_block->crycbd & CRYCB_FORMAT1))
459     return 0;
460     /* format-1 is supported with message-security-assist extension 3 */
461     if (!test_kvm_facility(vcpu->kvm, 76))
462     @@ -186,12 +191,15 @@ static void prepare_ibc(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
463     {
464     struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
465     struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
466     + /* READ_ONCE does not work on bitfields - use a temporary variable */
467     + const uint32_t __new_ibc = scb_o->ibc;
468     + const uint32_t new_ibc = READ_ONCE(__new_ibc) & 0x0fffU;
469     __u64 min_ibc = (sclp.ibc >> 16) & 0x0fffU;
470    
471     scb_s->ibc = 0;
472     /* ibc installed in g2 and requested for g3 */
473     - if (vcpu->kvm->arch.model.ibc && (scb_o->ibc & 0x0fffU)) {
474     - scb_s->ibc = scb_o->ibc & 0x0fffU;
475     + if (vcpu->kvm->arch.model.ibc && new_ibc) {
476     + scb_s->ibc = new_ibc;
477     /* takte care of the minimum ibc level of the machine */
478     if (scb_s->ibc < min_ibc)
479     scb_s->ibc = min_ibc;
480     @@ -256,6 +264,10 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
481     {
482     struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
483     struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
484     + /* READ_ONCE does not work on bitfields - use a temporary variable */
485     + const uint32_t __new_prefix = scb_o->prefix;
486     + const uint32_t new_prefix = READ_ONCE(__new_prefix);
487     + const bool wants_tx = READ_ONCE(scb_o->ecb) & ECB_TE;
488     bool had_tx = scb_s->ecb & ECB_TE;
489     unsigned long new_mso = 0;
490     int rc;
491     @@ -302,14 +314,14 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
492     scb_s->icpua = scb_o->icpua;
493    
494     if (!(atomic_read(&scb_s->cpuflags) & CPUSTAT_SM))
495     - new_mso = scb_o->mso & 0xfffffffffff00000UL;
496     + new_mso = READ_ONCE(scb_o->mso) & 0xfffffffffff00000UL;
497     /* if the hva of the prefix changes, we have to remap the prefix */
498     - if (scb_s->mso != new_mso || scb_s->prefix != scb_o->prefix)
499     + if (scb_s->mso != new_mso || scb_s->prefix != new_prefix)
500     prefix_unmapped(vsie_page);
501     /* SIE will do mso/msl validity and exception checks for us */
502     scb_s->msl = scb_o->msl & 0xfffffffffff00000UL;
503     scb_s->mso = new_mso;
504     - scb_s->prefix = scb_o->prefix;
505     + scb_s->prefix = new_prefix;
506    
507     /* We have to definetly flush the tlb if this scb never ran */
508     if (scb_s->ihcpu != 0xffffU)
509     @@ -321,11 +333,11 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
510     if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_ESOP))
511     scb_s->ecb |= scb_o->ecb & ECB_HOSTPROTINT;
512     /* transactional execution */
513     - if (test_kvm_facility(vcpu->kvm, 73)) {
514     + if (test_kvm_facility(vcpu->kvm, 73) && wants_tx) {
515     /* remap the prefix is tx is toggled on */
516     - if ((scb_o->ecb & ECB_TE) && !had_tx)
517     + if (!had_tx)
518     prefix_unmapped(vsie_page);
519     - scb_s->ecb |= scb_o->ecb & ECB_TE;
520     + scb_s->ecb |= ECB_TE;
521     }
522     /* SIMD */
523     if (test_kvm_facility(vcpu->kvm, 129)) {
524     @@ -544,9 +556,9 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
525     gpa_t gpa;
526     int rc = 0;
527    
528     - gpa = scb_o->scaol & ~0xfUL;
529     + gpa = READ_ONCE(scb_o->scaol) & ~0xfUL;
530     if (test_kvm_cpu_feat(vcpu->kvm, KVM_S390_VM_CPU_FEAT_64BSCAO))
531     - gpa |= (u64) scb_o->scaoh << 32;
532     + gpa |= (u64) READ_ONCE(scb_o->scaoh) << 32;
533     if (gpa) {
534     if (!(gpa & ~0x1fffUL))
535     rc = set_validity_icpt(scb_s, 0x0038U);
536     @@ -566,7 +578,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
537     scb_s->scaol = (u32)(u64)hpa;
538     }
539    
540     - gpa = scb_o->itdba & ~0xffUL;
541     + gpa = READ_ONCE(scb_o->itdba) & ~0xffUL;
542     if (gpa && (scb_s->ecb & ECB_TE)) {
543     if (!(gpa & ~0x1fffU)) {
544     rc = set_validity_icpt(scb_s, 0x0080U);
545     @@ -581,7 +593,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
546     scb_s->itdba = hpa;
547     }
548    
549     - gpa = scb_o->gvrd & ~0x1ffUL;
550     + gpa = READ_ONCE(scb_o->gvrd) & ~0x1ffUL;
551     if (gpa && (scb_s->eca & ECA_VX) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
552     if (!(gpa & ~0x1fffUL)) {
553     rc = set_validity_icpt(scb_s, 0x1310U);
554     @@ -599,7 +611,7 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
555     scb_s->gvrd = hpa;
556     }
557    
558     - gpa = scb_o->riccbd & ~0x3fUL;
559     + gpa = READ_ONCE(scb_o->riccbd) & ~0x3fUL;
560     if (gpa && (scb_s->ecb3 & ECB3_RI)) {
561     if (!(gpa & ~0x1fffUL)) {
562     rc = set_validity_icpt(scb_s, 0x0043U);
563     @@ -617,8 +629,8 @@ static int pin_blocks(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
564     if ((scb_s->ecb & ECB_GS) && !(scb_s->ecd & ECD_HOSTREGMGMT)) {
565     unsigned long sdnxc;
566    
567     - gpa = scb_o->sdnxo & ~0xfUL;
568     - sdnxc = scb_o->sdnxo & 0xfUL;
569     + gpa = READ_ONCE(scb_o->sdnxo) & ~0xfUL;
570     + sdnxc = READ_ONCE(scb_o->sdnxo) & 0xfUL;
571     if (!gpa || !(gpa & ~0x1fffUL)) {
572     rc = set_validity_icpt(scb_s, 0x10b0U);
573     goto unpin;
574     @@ -785,7 +797,7 @@ static void retry_vsie_icpt(struct vsie_page *vsie_page)
575     static int handle_stfle(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
576     {
577     struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
578     - __u32 fac = vsie_page->scb_o->fac & 0x7ffffff8U;
579     + __u32 fac = READ_ONCE(vsie_page->scb_o->fac) & 0x7ffffff8U;
580    
581     if (fac && test_kvm_facility(vcpu->kvm, 7)) {
582     retry_vsie_icpt(vsie_page);
583     diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
584     index fd9d9bac7cfa..79c3bdaaa0b4 100644
585     --- a/arch/sparc/include/asm/pgtable_64.h
586     +++ b/arch/sparc/include/asm/pgtable_64.h
587     @@ -980,7 +980,7 @@ void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
588     pmd_t *pmd);
589    
590     #define __HAVE_ARCH_PMDP_INVALIDATE
591     -extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
592     +extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
593     pmd_t *pmdp);
594    
595     #define __HAVE_ARCH_PGTABLE_DEPOSIT
596     diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
597     index 4ae86bc0d35c..847ddffbf38a 100644
598     --- a/arch/sparc/mm/tlb.c
599     +++ b/arch/sparc/mm/tlb.c
600     @@ -219,17 +219,28 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
601     }
602     }
603    
604     +static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
605     + unsigned long address, pmd_t *pmdp, pmd_t pmd)
606     +{
607     + pmd_t old;
608     +
609     + do {
610     + old = *pmdp;
611     + } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
612     +
613     + return old;
614     +}
615     +
616     /*
617     * This routine is only called when splitting a THP
618     */
619     -void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
620     +pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
621     pmd_t *pmdp)
622     {
623     - pmd_t entry = *pmdp;
624     -
625     - pmd_val(entry) &= ~_PAGE_VALID;
626     + pmd_t old, entry;
627    
628     - set_pmd_at(vma->vm_mm, address, pmdp, entry);
629     + entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
630     + old = pmdp_establish(vma, address, pmdp, entry);
631     flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
632    
633     /*
634     @@ -240,6 +251,8 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
635     if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
636     !is_huge_zero_page(pmd_page(entry)))
637     (vma->vm_mm)->context.thp_pte_count--;
638     +
639     + return old;
640     }
641    
642     void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
643     diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
644     index a0b86cf486e0..2e9d58cc371e 100644
645     --- a/arch/x86/hyperv/hv_init.c
646     +++ b/arch/x86/hyperv/hv_init.c
647     @@ -110,12 +110,19 @@ static int hv_cpu_init(unsigned int cpu)
648     */
649     void hyperv_init(void)
650     {
651     - u64 guest_id;
652     + u64 guest_id, required_msrs;
653     union hv_x64_msr_hypercall_contents hypercall_msr;
654    
655     if (x86_hyper_type != X86_HYPER_MS_HYPERV)
656     return;
657    
658     + /* Absolutely required MSRs */
659     + required_msrs = HV_X64_MSR_HYPERCALL_AVAILABLE |
660     + HV_X64_MSR_VP_INDEX_AVAILABLE;
661     +
662     + if ((ms_hyperv.features & required_msrs) != required_msrs)
663     + return;
664     +
665     /* Allocate percpu VP index */
666     hv_vp_index = kmalloc_array(num_possible_cpus(), sizeof(*hv_vp_index),
667     GFP_KERNEL);
668     diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
669     index 9cc9e1c1e2db..56c9ebac946f 100644
670     --- a/arch/x86/hyperv/mmu.c
671     +++ b/arch/x86/hyperv/mmu.c
672     @@ -137,7 +137,12 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
673     }
674    
675     if (info->mm) {
676     + /*
677     + * AddressSpace argument must match the CR3 with PCID bits
678     + * stripped out.
679     + */
680     flush->address_space = virt_to_phys(info->mm->pgd);
681     + flush->address_space &= CR3_ADDR_MASK;
682     flush->flags = 0;
683     } else {
684     flush->address_space = 0;
685     @@ -219,7 +224,12 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus,
686     }
687    
688     if (info->mm) {
689     + /*
690     + * AddressSpace argument must match the CR3 with PCID bits
691     + * stripped out.
692     + */
693     flush->address_space = virt_to_phys(info->mm->pgd);
694     + flush->address_space &= CR3_ADDR_MASK;
695     flush->flags = 0;
696     } else {
697     flush->address_space = 0;
698     @@ -278,8 +288,6 @@ void hyperv_setup_mmu_ops(void)
699     if (!(ms_hyperv.hints & HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED))
700     return;
701    
702     - setup_clear_cpu_cap(X86_FEATURE_PCID);
703     -
704     if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) {
705     pr_info("Using hypercall for remote TLB flush\n");
706     pv_mmu_ops.flush_tlb_others = hyperv_flush_tlb_others;
707     diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
708     index c8376b40e882..5cdcdbd4d892 100644
709     --- a/arch/x86/include/asm/i8259.h
710     +++ b/arch/x86/include/asm/i8259.h
711     @@ -69,6 +69,11 @@ struct legacy_pic {
712     extern struct legacy_pic *legacy_pic;
713     extern struct legacy_pic null_legacy_pic;
714    
715     +static inline bool has_legacy_pic(void)
716     +{
717     + return legacy_pic != &null_legacy_pic;
718     +}
719     +
720     static inline int nr_legacy_irqs(void)
721     {
722     return legacy_pic->nr_legacy_irqs;
723     diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
724     index 9c2a002d9297..6dda3595acf8 100644
725     --- a/arch/x86/kernel/acpi/boot.c
726     +++ b/arch/x86/kernel/acpi/boot.c
727     @@ -215,6 +215,10 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end)
728     apic_id = processor->local_apic_id;
729     enabled = processor->lapic_flags & ACPI_MADT_ENABLED;
730    
731     + /* Ignore invalid ID */
732     + if (apic_id == 0xffffffff)
733     + return 0;
734     +
735     /*
736     * We need to register disabled CPU as well to permit
737     * counting disabled CPUs. This allows us to size
738     diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
739     index c0b694810ff4..02cfc615e3fb 100644
740     --- a/arch/x86/kernel/apic/x2apic_uv_x.c
741     +++ b/arch/x86/kernel/apic/x2apic_uv_x.c
742     @@ -1140,16 +1140,25 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
743    
744     uv_gre_table = gre;
745     for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) {
746     + unsigned long size = ((unsigned long)(gre->limit - lgre)
747     + << UV_GAM_RANGE_SHFT);
748     + int order = 0;
749     + char suffix[] = " KMGTPE";
750     +
751     + while (size > 9999 && order < sizeof(suffix)) {
752     + size /= 1024;
753     + order++;
754     + }
755     +
756     if (!index) {
757     pr_info("UV: GAM Range Table...\n");
758     pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
759     }
760     - pr_info("UV: %2d: 0x%014lx-0x%014lx %5luG %3d %04x %02x %02x\n",
761     + pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d %04x %02x %02x\n",
762     index++,
763     (unsigned long)lgre << UV_GAM_RANGE_SHFT,
764     (unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
765     - ((unsigned long)(gre->limit - lgre)) >>
766     - (30 - UV_GAM_RANGE_SHFT), /* 64M -> 1G */
767     + size, suffix[order],
768     gre->type, gre->nasid, gre->sockid, gre->pnode);
769    
770     lgre = gre->limit;
771     diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
772     index afbecff161d1..a2d8a3908670 100644
773     --- a/arch/x86/kernel/dumpstack.c
774     +++ b/arch/x86/kernel/dumpstack.c
775     @@ -109,7 +109,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
776     struct stack_info stack_info = {0};
777     unsigned long visit_mask = 0;
778     int graph_idx = 0;
779     - bool partial;
780     + bool partial = false;
781    
782     printk("%sCall Trace:\n", log_lvl);
783    
784     diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
785     index 47506567435e..0bf06fa3027e 100644
786     --- a/arch/x86/kernel/tsc.c
787     +++ b/arch/x86/kernel/tsc.c
788     @@ -25,6 +25,7 @@
789     #include <asm/geode.h>
790     #include <asm/apic.h>
791     #include <asm/intel-family.h>
792     +#include <asm/i8259.h>
793    
794     unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
795     EXPORT_SYMBOL(cpu_khz);
796     @@ -316,7 +317,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
797     hpet2 -= hpet1;
798     tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
799     do_div(tmp, 1000000);
800     - do_div(deltatsc, tmp);
801     + deltatsc = div64_u64(deltatsc, tmp);
802    
803     return (unsigned long) deltatsc;
804     }
805     @@ -363,6 +364,20 @@ static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
806     unsigned long tscmin, tscmax;
807     int pitcnt;
808    
809     + if (!has_legacy_pic()) {
810     + /*
811     + * Relies on tsc_early_delay_calibrate() to have given us semi
812     + * usable udelay(), wait for the same 50ms we would have with
813     + * the PIT loop below.
814     + */
815     + udelay(10 * USEC_PER_MSEC);
816     + udelay(10 * USEC_PER_MSEC);
817     + udelay(10 * USEC_PER_MSEC);
818     + udelay(10 * USEC_PER_MSEC);
819     + udelay(10 * USEC_PER_MSEC);
820     + return ULONG_MAX;
821     + }
822     +
823     /* Set the Gate high, disable speaker */
824     outb((inb(0x61) & ~0x02) | 0x01, 0x61);
825    
826     @@ -487,6 +502,9 @@ static unsigned long quick_pit_calibrate(void)
827     u64 tsc, delta;
828     unsigned long d1, d2;
829    
830     + if (!has_legacy_pic())
831     + return 0;
832     +
833     /* Set the Gate high, disable speaker */
834     outb((inb(0x61) & ~0x02) | 0x01, 0x61);
835    
836     diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
837     index f438e0c4aa8c..43bbece92632 100644
838     --- a/arch/x86/kvm/mmu.c
839     +++ b/arch/x86/kvm/mmu.c
840     @@ -3019,7 +3019,7 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn)
841     return RET_PF_RETRY;
842     }
843    
844     - return RET_PF_EMULATE;
845     + return -EFAULT;
846     }
847    
848     static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
849     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
850     index ae4803b213d0..bdd84ce4491e 100644
851     --- a/arch/x86/kvm/vmx.c
852     +++ b/arch/x86/kvm/vmx.c
853     @@ -6765,7 +6765,21 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
854     if (!is_guest_mode(vcpu) &&
855     !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
856     trace_kvm_fast_mmio(gpa);
857     - return kvm_skip_emulated_instruction(vcpu);
858     + /*
859     + * Doing kvm_skip_emulated_instruction() depends on undefined
860     + * behavior: Intel's manual doesn't mandate
861     + * VM_EXIT_INSTRUCTION_LEN to be set in VMCS when EPT MISCONFIG
862     + * occurs and while on real hardware it was observed to be set,
863     + * other hypervisors (namely Hyper-V) don't set it, we end up
864     + * advancing IP with some random value. Disable fast mmio when
865     + * running nested and keep it for real hardware in hope that
866     + * VM_EXIT_INSTRUCTION_LEN will always be set correctly.
867     + */
868     + if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
869     + return kvm_skip_emulated_instruction(vcpu);
870     + else
871     + return x86_emulate_instruction(vcpu, gpa, EMULTYPE_SKIP,
872     + NULL, 0) == EMULATE_DONE;
873     }
874    
875     ret = kvm_mmu_page_fault(vcpu, gpa, PFERR_RSVD_MASK, NULL, 0);
876     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
877     index b9afb4784d12..3b2c3aa2cd07 100644
878     --- a/arch/x86/kvm/x86.c
879     +++ b/arch/x86/kvm/x86.c
880     @@ -4225,13 +4225,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
881     mutex_unlock(&kvm->lock);
882     break;
883     case KVM_XEN_HVM_CONFIG: {
884     + struct kvm_xen_hvm_config xhc;
885     r = -EFAULT;
886     - if (copy_from_user(&kvm->arch.xen_hvm_config, argp,
887     - sizeof(struct kvm_xen_hvm_config)))
888     + if (copy_from_user(&xhc, argp, sizeof(xhc)))
889     goto out;
890     r = -EINVAL;
891     - if (kvm->arch.xen_hvm_config.flags)
892     + if (xhc.flags)
893     goto out;
894     + memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc));
895     r = 0;
896     break;
897     }
898     @@ -5698,7 +5699,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
899     * handle watchpoints yet, those would be handled in
900     * the emulate_ops.
901     */
902     - if (kvm_vcpu_check_breakpoint(vcpu, &r))
903     + if (!(emulation_type & EMULTYPE_SKIP) &&
904     + kvm_vcpu_check_breakpoint(vcpu, &r))
905     return r;
906    
907     ctxt->interruptibility = 0;
908     diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
909     index fe85d1204db8..642357aff216 100644
910     --- a/arch/x86/mm/init_64.c
911     +++ b/arch/x86/mm/init_64.c
912     @@ -1180,8 +1180,7 @@ void __init mem_init(void)
913     after_bootmem = 1;
914    
915     /* Register memory areas for /proc/kcore */
916     - kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
917     - PAGE_SIZE, KCORE_OTHER);
918     + kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER);
919    
920     mem_init_print_info(NULL);
921     }
922     diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c
923     index c35fdb585c68..afc4ed7b1578 100644
924     --- a/arch/x86/power/hibernate_32.c
925     +++ b/arch/x86/power/hibernate_32.c
926     @@ -145,7 +145,7 @@ static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
927     #endif
928     }
929    
930     -int swsusp_arch_resume(void)
931     +asmlinkage int swsusp_arch_resume(void)
932     {
933     int error;
934    
935     diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c
936     index f910c514438f..0ef5e5204968 100644
937     --- a/arch/x86/power/hibernate_64.c
938     +++ b/arch/x86/power/hibernate_64.c
939     @@ -174,7 +174,7 @@ static int relocate_restore_code(void)
940     return 0;
941     }
942    
943     -int swsusp_arch_resume(void)
944     +asmlinkage int swsusp_arch_resume(void)
945     {
946     int error;
947    
948     diff --git a/block/bio.c b/block/bio.c
949     index dbaa82c967f4..90f19d7df66c 100644
950     --- a/block/bio.c
951     +++ b/block/bio.c
952     @@ -1893,7 +1893,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
953     bio_advance(bio, split->bi_iter.bi_size);
954    
955     if (bio_flagged(bio, BIO_TRACE_COMPLETION))
956     - bio_set_flag(bio, BIO_TRACE_COMPLETION);
957     + bio_set_flag(split, BIO_TRACE_COMPLETION);
958    
959     return split;
960     }
961     diff --git a/block/blk-core.c b/block/blk-core.c
962     index c01f4907dbbc..1feeb1a8aad9 100644
963     --- a/block/blk-core.c
964     +++ b/block/blk-core.c
965     @@ -3065,6 +3065,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
966     {
967     if (bio_has_data(bio))
968     rq->nr_phys_segments = bio_phys_segments(q, bio);
969     + else if (bio_op(bio) == REQ_OP_DISCARD)
970     + rq->nr_phys_segments = 1;
971    
972     rq->__data_len = bio->bi_iter.bi_size;
973     rq->bio = rq->biotail = bio;
974     diff --git a/block/blk-merge.c b/block/blk-merge.c
975     index f5dedd57dff6..8d60a5bbcef9 100644
976     --- a/block/blk-merge.c
977     +++ b/block/blk-merge.c
978     @@ -551,6 +551,24 @@ static bool req_no_special_merge(struct request *req)
979     return !q->mq_ops && req->special;
980     }
981    
982     +static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
983     + struct request *next)
984     +{
985     + unsigned short segments = blk_rq_nr_discard_segments(req);
986     +
987     + if (segments >= queue_max_discard_segments(q))
988     + goto no_merge;
989     + if (blk_rq_sectors(req) + bio_sectors(next->bio) >
990     + blk_rq_get_max_sectors(req, blk_rq_pos(req)))
991     + goto no_merge;
992     +
993     + req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
994     + return true;
995     +no_merge:
996     + req_set_nomerge(q, req);
997     + return false;
998     +}
999     +
1000     static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
1001     struct request *next)
1002     {
1003     @@ -684,9 +702,13 @@ static struct request *attempt_merge(struct request_queue *q,
1004     * If we are allowed to merge, then append bio list
1005     * from next to rq and release next. merge_requests_fn
1006     * will have updated segment counts, update sector
1007     - * counts here.
1008     + * counts here. Handle DISCARDs separately, as they
1009     + * have separate settings.
1010     */
1011     - if (!ll_merge_requests_fn(q, req, next))
1012     + if (req_op(req) == REQ_OP_DISCARD) {
1013     + if (!req_attempt_discard_merge(q, req, next))
1014     + return NULL;
1015     + } else if (!ll_merge_requests_fn(q, req, next))
1016     return NULL;
1017    
1018     /*
1019     @@ -716,7 +738,8 @@ static struct request *attempt_merge(struct request_queue *q,
1020    
1021     req->__data_len += blk_rq_bytes(next);
1022    
1023     - elv_merge_requests(q, req, next);
1024     + if (req_op(req) != REQ_OP_DISCARD)
1025     + elv_merge_requests(q, req, next);
1026    
1027     /*
1028     * 'next' is going away, so update stats accordingly
1029     diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
1030     index de294d775acf..d95439154556 100644
1031     --- a/block/blk-mq-debugfs.c
1032     +++ b/block/blk-mq-debugfs.c
1033     @@ -704,7 +704,11 @@ static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
1034     const struct blk_mq_debugfs_attr *attr = m->private;
1035     void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
1036    
1037     - if (!attr->write)
1038     + /*
1039     + * Attributes that only implement .seq_ops are read-only and 'attr' is
1040     + * the same with 'data' in this case.
1041     + */
1042     + if (attr == data || !attr->write)
1043     return -EPERM;
1044    
1045     return attr->write(data, buf, count, ppos);
1046     diff --git a/block/blk-mq.c b/block/blk-mq.c
1047     index 6f899669cbdd..007f96611364 100644
1048     --- a/block/blk-mq.c
1049     +++ b/block/blk-mq.c
1050     @@ -1143,9 +1143,27 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
1051     /*
1052     * We should be running this queue from one of the CPUs that
1053     * are mapped to it.
1054     + *
1055     + * There are at least two related races now between setting
1056     + * hctx->next_cpu from blk_mq_hctx_next_cpu() and running
1057     + * __blk_mq_run_hw_queue():
1058     + *
1059     + * - hctx->next_cpu is found offline in blk_mq_hctx_next_cpu(),
1060     + * but later it becomes online, then this warning is harmless
1061     + * at all
1062     + *
1063     + * - hctx->next_cpu is found online in blk_mq_hctx_next_cpu(),
1064     + * but later it becomes offline, then the warning can't be
1065     + * triggered, and we depend on blk-mq timeout handler to
1066     + * handle dispatched requests to this hctx
1067     */
1068     - WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1069     - cpu_online(hctx->next_cpu));
1070     + if (!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
1071     + cpu_online(hctx->next_cpu)) {
1072     + printk(KERN_WARNING "run queue from wrong CPU %d, hctx %s\n",
1073     + raw_smp_processor_id(),
1074     + cpumask_empty(hctx->cpumask) ? "inactive": "active");
1075     + dump_stack();
1076     + }
1077    
1078     /*
1079     * We can't run the queue inline with ints disabled. Ensure that
1080     diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
1081     index 032ae44710e5..a2be3fd2c72b 100644
1082     --- a/drivers/acpi/acpi_lpss.c
1083     +++ b/drivers/acpi/acpi_lpss.c
1084     @@ -465,6 +465,8 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
1085     acpi_dev_free_resource_list(&resource_list);
1086    
1087     if (!pdata->mmio_base) {
1088     + /* Avoid acpi_bus_attach() instantiating a pdev for this dev. */
1089     + adev->pnp.type.platform_id = 0;
1090     /* Skip the device, but continue the namespace scan. */
1091     ret = 0;
1092     goto err_out;
1093     diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
1094     index b6d58cc58f5f..f0348e388d01 100644
1095     --- a/drivers/acpi/bus.c
1096     +++ b/drivers/acpi/bus.c
1097     @@ -146,6 +146,12 @@ int acpi_bus_get_status(struct acpi_device *device)
1098     return 0;
1099     }
1100    
1101     + /* Battery devices must have their deps met before calling _STA */
1102     + if (acpi_device_is_battery(device) && device->dep_unmet) {
1103     + acpi_set_device_status(device, 0);
1104     + return 0;
1105     + }
1106     +
1107     status = acpi_bus_get_status_handle(device->handle, &sta);
1108     if (ACPI_FAILURE(status))
1109     return -ENODEV;
1110     diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
1111     index 6adcda057b36..58bc28aff3aa 100644
1112     --- a/drivers/acpi/ec.c
1113     +++ b/drivers/acpi/ec.c
1114     @@ -1927,6 +1927,9 @@ static int acpi_ec_suspend_noirq(struct device *dev)
1115     ec->reference_count >= 1)
1116     acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
1117    
1118     + if (acpi_sleep_no_ec_events())
1119     + acpi_ec_enter_noirq(ec);
1120     +
1121     return 0;
1122     }
1123    
1124     @@ -1934,6 +1937,9 @@ static int acpi_ec_resume_noirq(struct device *dev)
1125     {
1126     struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
1127    
1128     + if (acpi_sleep_no_ec_events())
1129     + acpi_ec_leave_noirq(ec);
1130     +
1131     if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
1132     ec->reference_count >= 1)
1133     acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
1134     diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
1135     index 18b72eec3507..c7cf48ad5cb9 100644
1136     --- a/drivers/acpi/processor_perflib.c
1137     +++ b/drivers/acpi/processor_perflib.c
1138     @@ -159,7 +159,7 @@ void acpi_processor_ppc_has_changed(struct acpi_processor *pr, int event_flag)
1139     {
1140     int ret;
1141    
1142     - if (ignore_ppc) {
1143     + if (ignore_ppc || !pr->performance) {
1144     /*
1145     * Only when it is notification event, the _OST object
1146     * will be evaluated. Otherwise it is skipped.
1147     diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
1148     index 2f2f50322ffb..c0984d33c4c8 100644
1149     --- a/drivers/acpi/scan.c
1150     +++ b/drivers/acpi/scan.c
1151     @@ -1568,6 +1568,8 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
1152     device_initialize(&device->dev);
1153     dev_set_uevent_suppress(&device->dev, true);
1154     acpi_init_coherency(device);
1155     + /* Assume there are unmet deps until acpi_device_dep_initialize() runs */
1156     + device->dep_unmet = 1;
1157     }
1158    
1159     void acpi_device_add_finalize(struct acpi_device *device)
1160     @@ -1591,6 +1593,14 @@ static int acpi_add_single_object(struct acpi_device **child,
1161     }
1162    
1163     acpi_init_device_object(device, handle, type, sta);
1164     + /*
1165     + * For ACPI_BUS_TYPE_DEVICE getting the status is delayed till here so
1166     + * that we can call acpi_bus_get_status() and use its quirk handling.
1167     + * Note this must be done before the get power-/wakeup_dev-flags calls.
1168     + */
1169     + if (type == ACPI_BUS_TYPE_DEVICE)
1170     + acpi_bus_get_status(device);
1171     +
1172     acpi_bus_get_power_flags(device);
1173     acpi_bus_get_wakeup_device_flags(device);
1174    
1175     @@ -1663,9 +1673,11 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type,
1176     return -ENODEV;
1177    
1178     *type = ACPI_BUS_TYPE_DEVICE;
1179     - status = acpi_bus_get_status_handle(handle, sta);
1180     - if (ACPI_FAILURE(status))
1181     - *sta = 0;
1182     + /*
1183     + * acpi_add_single_object updates this once we've an acpi_device
1184     + * so that acpi_bus_get_status' quirk handling can be used.
1185     + */
1186     + *sta = 0;
1187     break;
1188     case ACPI_TYPE_PROCESSOR:
1189     *type = ACPI_BUS_TYPE_PROCESSOR;
1190     @@ -1763,6 +1775,8 @@ static void acpi_device_dep_initialize(struct acpi_device *adev)
1191     acpi_status status;
1192     int i;
1193    
1194     + adev->dep_unmet = 0;
1195     +
1196     if (!acpi_has_method(adev->handle, "_DEP"))
1197     return;
1198    
1199     diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
1200     index 70f8904f46a3..b3b78079aa9f 100644
1201     --- a/drivers/base/power/domain.c
1202     +++ b/drivers/base/power/domain.c
1203     @@ -2206,6 +2206,38 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state,
1204     return 0;
1205     }
1206    
1207     +static int genpd_iterate_idle_states(struct device_node *dn,
1208     + struct genpd_power_state *states)
1209     +{
1210     + int ret;
1211     + struct of_phandle_iterator it;
1212     + struct device_node *np;
1213     + int i = 0;
1214     +
1215     + ret = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
1216     + if (ret <= 0)
1217     + return ret;
1218     +
1219     + /* Loop over the phandles until all the requested entry is found */
1220     + of_for_each_phandle(&it, ret, dn, "domain-idle-states", NULL, 0) {
1221     + np = it.node;
1222     + if (!of_match_node(idle_state_match, np))
1223     + continue;
1224     + if (states) {
1225     + ret = genpd_parse_state(&states[i], np);
1226     + if (ret) {
1227     + pr_err("Parsing idle state node %pOF failed with err %d\n",
1228     + np, ret);
1229     + of_node_put(np);
1230     + return ret;
1231     + }
1232     + }
1233     + i++;
1234     + }
1235     +
1236     + return i;
1237     +}
1238     +
1239     /**
1240     * of_genpd_parse_idle_states: Return array of idle states for the genpd.
1241     *
1242     @@ -2215,49 +2247,31 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state,
1243     *
1244     * Returns the device states parsed from the OF node. The memory for the states
1245     * is allocated by this function and is the responsibility of the caller to
1246     - * free the memory after use.
1247     + * free the memory after use. If no domain idle states is found it returns
1248     + * -EINVAL and in case of errors, a negative error code.
1249     */
1250     int of_genpd_parse_idle_states(struct device_node *dn,
1251     struct genpd_power_state **states, int *n)
1252     {
1253     struct genpd_power_state *st;
1254     - struct device_node *np;
1255     - int i = 0;
1256     - int err, ret;
1257     - int count;
1258     - struct of_phandle_iterator it;
1259     - const struct of_device_id *match_id;
1260     + int ret;
1261    
1262     - count = of_count_phandle_with_args(dn, "domain-idle-states", NULL);
1263     - if (count <= 0)
1264     - return -EINVAL;
1265     + ret = genpd_iterate_idle_states(dn, NULL);
1266     + if (ret <= 0)
1267     + return ret < 0 ? ret : -EINVAL;
1268    
1269     - st = kcalloc(count, sizeof(*st), GFP_KERNEL);
1270     + st = kcalloc(ret, sizeof(*st), GFP_KERNEL);
1271     if (!st)
1272     return -ENOMEM;
1273    
1274     - /* Loop over the phandles until all the requested entry is found */
1275     - of_for_each_phandle(&it, err, dn, "domain-idle-states", NULL, 0) {
1276     - np = it.node;
1277     - match_id = of_match_node(idle_state_match, np);
1278     - if (!match_id)
1279     - continue;
1280     - ret = genpd_parse_state(&st[i++], np);
1281     - if (ret) {
1282     - pr_err
1283     - ("Parsing idle state node %pOF failed with err %d\n",
1284     - np, ret);
1285     - of_node_put(np);
1286     - kfree(st);
1287     - return ret;
1288     - }
1289     + ret = genpd_iterate_idle_states(dn, st);
1290     + if (ret <= 0) {
1291     + kfree(st);
1292     + return ret < 0 ? ret : -EINVAL;
1293     }
1294    
1295     - *n = i;
1296     - if (!i)
1297     - kfree(st);
1298     - else
1299     - *states = st;
1300     + *states = st;
1301     + *n = ret;
1302    
1303     return 0;
1304     }
1305     diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
1306     index ae0429827f31..67c50738834b 100644
1307     --- a/drivers/base/power/wakeirq.c
1308     +++ b/drivers/base/power/wakeirq.c
1309     @@ -323,7 +323,8 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq)
1310     return;
1311    
1312     if (device_may_wakeup(wirq->dev)) {
1313     - if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)
1314     + if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
1315     + !pm_runtime_status_suspended(wirq->dev))
1316     enable_irq(wirq->irq);
1317    
1318     enable_irq_wake(wirq->irq);
1319     @@ -345,7 +346,8 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
1320     if (device_may_wakeup(wirq->dev)) {
1321     disable_irq_wake(wirq->irq);
1322    
1323     - if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)
1324     + if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
1325     + !pm_runtime_status_suspended(wirq->dev))
1326     disable_irq_nosync(wirq->irq);
1327     }
1328     }
1329     diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
1330     index b338a4becbf8..845efa0f724f 100644
1331     --- a/drivers/char/ipmi/ipmi_powernv.c
1332     +++ b/drivers/char/ipmi/ipmi_powernv.c
1333     @@ -251,8 +251,9 @@ static int ipmi_powernv_probe(struct platform_device *pdev)
1334     ipmi->irq = opal_event_request(prop);
1335     }
1336    
1337     - if (request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
1338     - "opal-ipmi", ipmi)) {
1339     + rc = request_irq(ipmi->irq, ipmi_opal_event, IRQ_TYPE_LEVEL_HIGH,
1340     + "opal-ipmi", ipmi);
1341     + if (rc) {
1342     dev_warn(dev, "Unable to request irq\n");
1343     goto err_dispose;
1344     }
1345     diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c
1346     index 21bffdcb2f20..557ed25b42e3 100644
1347     --- a/drivers/clocksource/timer-imx-tpm.c
1348     +++ b/drivers/clocksource/timer-imx-tpm.c
1349     @@ -105,7 +105,7 @@ static int tpm_set_next_event(unsigned long delta,
1350     * of writing CNT registers which may cause the min_delta event got
1351     * missed, so we need add a ETIME check here in case it happened.
1352     */
1353     - return (int)((next - now) <= 0) ? -ETIME : 0;
1354     + return (int)(next - now) <= 0 ? -ETIME : 0;
1355     }
1356    
1357     static int tpm_set_state_oneshot(struct clock_event_device *evt)
1358     diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
1359     index 93a0e88bef76..20226d4243f2 100644
1360     --- a/drivers/cpufreq/intel_pstate.c
1361     +++ b/drivers/cpufreq/intel_pstate.c
1362     @@ -779,6 +779,8 @@ static int intel_pstate_hwp_save_state(struct cpufreq_policy *policy)
1363     return 0;
1364     }
1365    
1366     +static void intel_pstate_hwp_enable(struct cpudata *cpudata);
1367     +
1368     static int intel_pstate_resume(struct cpufreq_policy *policy)
1369     {
1370     if (!hwp_active)
1371     @@ -786,6 +788,9 @@ static int intel_pstate_resume(struct cpufreq_policy *policy)
1372    
1373     mutex_lock(&intel_pstate_limits_lock);
1374    
1375     + if (policy->cpu == 0)
1376     + intel_pstate_hwp_enable(all_cpu_data[policy->cpu]);
1377     +
1378     all_cpu_data[policy->cpu]->epp_policy = 0;
1379     intel_pstate_hwp_set(policy->cpu);
1380    
1381     diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
1382     index fe33c199fc1a..143f8bc403b9 100644
1383     --- a/drivers/crypto/Kconfig
1384     +++ b/drivers/crypto/Kconfig
1385     @@ -721,7 +721,6 @@ config CRYPTO_DEV_ARTPEC6
1386     select CRYPTO_HASH
1387     select CRYPTO_SHA1
1388     select CRYPTO_SHA256
1389     - select CRYPTO_SHA384
1390     select CRYPTO_SHA512
1391     help
1392     Enables the driver for the on-chip crypto accelerator
1393     diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
1394     index 8bf89267dc25..d731b413cb2c 100644
1395     --- a/drivers/firewire/ohci.c
1396     +++ b/drivers/firewire/ohci.c
1397     @@ -1130,7 +1130,13 @@ static int context_add_buffer(struct context *ctx)
1398     return -ENOMEM;
1399    
1400     offset = (void *)&desc->buffer - (void *)desc;
1401     - desc->buffer_size = PAGE_SIZE - offset;
1402     + /*
1403     + * Some controllers, like JMicron ones, always issue 0x20-byte DMA reads
1404     + * for descriptors, even 0x10-byte ones. This can cause page faults when
1405     + * an IOMMU is in use and the oversized read crosses a page boundary.
1406     + * Work around this by always leaving at least 0x10 bytes of padding.
1407     + */
1408     + desc->buffer_size = PAGE_SIZE - offset - 0x10;
1409     desc->buffer_bus = bus_addr + offset;
1410     desc->used = 0;
1411    
1412     diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
1413     index 783041964439..e8db9659a36b 100644
1414     --- a/drivers/firmware/dmi_scan.c
1415     +++ b/drivers/firmware/dmi_scan.c
1416     @@ -18,7 +18,7 @@ EXPORT_SYMBOL_GPL(dmi_kobj);
1417     * of and an antecedent to, SMBIOS, which stands for System
1418     * Management BIOS. See further: http://www.dmtf.org/standards
1419     */
1420     -static const char dmi_empty_string[] = " ";
1421     +static const char dmi_empty_string[] = "";
1422    
1423     static u32 dmi_ver __initdata;
1424     static u32 dmi_len;
1425     @@ -44,25 +44,21 @@ static int dmi_memdev_nr;
1426     static const char * __init dmi_string_nosave(const struct dmi_header *dm, u8 s)
1427     {
1428     const u8 *bp = ((u8 *) dm) + dm->length;
1429     + const u8 *nsp;
1430    
1431     if (s) {
1432     - s--;
1433     - while (s > 0 && *bp) {
1434     + while (--s > 0 && *bp)
1435     bp += strlen(bp) + 1;
1436     - s--;
1437     - }
1438     -
1439     - if (*bp != 0) {
1440     - size_t len = strlen(bp)+1;
1441     - size_t cmp_len = len > 8 ? 8 : len;
1442    
1443     - if (!memcmp(bp, dmi_empty_string, cmp_len))
1444     - return dmi_empty_string;
1445     + /* Strings containing only spaces are considered empty */
1446     + nsp = bp;
1447     + while (*nsp == ' ')
1448     + nsp++;
1449     + if (*nsp != '\0')
1450     return bp;
1451     - }
1452     }
1453    
1454     - return "";
1455     + return dmi_empty_string;
1456     }
1457    
1458     static const char * __init dmi_string(const struct dmi_header *dm, u8 s)
1459     diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
1460     index 02a50929af67..e7f4fe2848a5 100644
1461     --- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
1462     +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
1463     @@ -350,19 +350,44 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
1464     {
1465     uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
1466     ssize_t ret;
1467     + int retry;
1468    
1469     if (type < DRM_DP_DUAL_MODE_TYPE2_DVI)
1470     return 0;
1471    
1472     - ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
1473     - &tmds_oen, sizeof(tmds_oen));
1474     - if (ret) {
1475     - DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n",
1476     - enable ? "enable" : "disable");
1477     - return ret;
1478     + /*
1479     + * LSPCON adapters in low-power state may ignore the first write, so
1480     + * read back and verify the written value a few times.
1481     + */
1482     + for (retry = 0; retry < 3; retry++) {
1483     + uint8_t tmp;
1484     +
1485     + ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
1486     + &tmds_oen, sizeof(tmds_oen));
1487     + if (ret) {
1488     + DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n",
1489     + enable ? "enable" : "disable",
1490     + retry + 1);
1491     + return ret;
1492     + }
1493     +
1494     + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
1495     + &tmp, sizeof(tmp));
1496     + if (ret) {
1497     + DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n",
1498     + enable ? "enabling" : "disabling",
1499     + retry + 1);
1500     + return ret;
1501     + }
1502     +
1503     + if (tmp == tmds_oen)
1504     + return 0;
1505     }
1506    
1507     - return 0;
1508     + DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n",
1509     + enable ? "enabling" : "disabling");
1510     +
1511     + return -EIO;
1512     }
1513     EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output);
1514    
1515     diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
1516     index 83e88c70272a..9bf4045cd679 100644
1517     --- a/drivers/gpu/drm/i915/gvt/kvmgt.c
1518     +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
1519     @@ -1153,7 +1153,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
1520     return 0;
1521     }
1522    
1523     - return 0;
1524     + return -ENOTTY;
1525     }
1526    
1527     static ssize_t
1528     diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
1529     index 3f818412765c..51411894d2cd 100644
1530     --- a/drivers/gpu/drm/i915/i915_drv.h
1531     +++ b/drivers/gpu/drm/i915/i915_drv.h
1532     @@ -3995,7 +3995,11 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
1533     struct intel_display_error_state *error);
1534    
1535     int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
1536     -int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
1537     +int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv, u32 mbox,
1538     + u32 val, int timeout_us);
1539     +#define sandybridge_pcode_write(dev_priv, mbox, val) \
1540     + sandybridge_pcode_write_timeout(dev_priv, mbox, val, 500)
1541     +
1542     int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
1543     u32 reply_mask, u32 reply, int timeout_base_ms);
1544    
1545     diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
1546     index de8ca5f1dd2e..4cc9ce4b5b16 100644
1547     --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
1548     +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
1549     @@ -722,7 +722,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
1550    
1551     err = radix_tree_insert(handles_vma, handle, vma);
1552     if (unlikely(err)) {
1553     - kfree(lut);
1554     + kmem_cache_free(eb->i915->luts, lut);
1555     goto err_obj;
1556     }
1557    
1558     diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
1559     index 27743be5b768..9240fa79de7c 100644
1560     --- a/drivers/gpu/drm/i915/intel_audio.c
1561     +++ b/drivers/gpu/drm/i915/intel_audio.c
1562     @@ -704,7 +704,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
1563     struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
1564     u32 tmp;
1565    
1566     - if (!IS_GEN9_BC(dev_priv))
1567     + if (!IS_GEN9(dev_priv))
1568     return;
1569    
1570     i915_audio_component_get_power(kdev);
1571     diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c
1572     index 26a8dcd2c549..47ad24229c78 100644
1573     --- a/drivers/gpu/drm/i915/intel_cdclk.c
1574     +++ b/drivers/gpu/drm/i915/intel_cdclk.c
1575     @@ -1289,10 +1289,15 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
1576     break;
1577     }
1578    
1579     - /* Inform power controller of upcoming frequency change */
1580     mutex_lock(&dev_priv->rps.hw_lock);
1581     - ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
1582     - 0x80000000);
1583     + /*
1584     + * Inform power controller of upcoming frequency change. BSpec
1585     + * requires us to wait up to 150usec, but that leads to timeouts;
1586     + * the 2ms used here is based on experiment.
1587     + */
1588     + ret = sandybridge_pcode_write_timeout(dev_priv,
1589     + HSW_PCODE_DE_WRITE_FREQ_REQ,
1590     + 0x80000000, 2000);
1591     mutex_unlock(&dev_priv->rps.hw_lock);
1592    
1593     if (ret) {
1594     @@ -1323,8 +1328,15 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
1595     I915_WRITE(CDCLK_CTL, val);
1596    
1597     mutex_lock(&dev_priv->rps.hw_lock);
1598     - ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
1599     - DIV_ROUND_UP(cdclk, 25000));
1600     + /*
1601     + * The timeout isn't specified, the 2ms used here is based on
1602     + * experiment.
1603     + * FIXME: Waiting for the request completion could be delayed until
1604     + * the next PCODE request based on BSpec.
1605     + */
1606     + ret = sandybridge_pcode_write_timeout(dev_priv,
1607     + HSW_PCODE_DE_WRITE_FREQ_REQ,
1608     + DIV_ROUND_UP(cdclk, 25000), 2000);
1609     mutex_unlock(&dev_priv->rps.hw_lock);
1610    
1611     if (ret) {
1612     diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
1613     index 014e5c08571a..87cccb5f8c5d 100644
1614     --- a/drivers/gpu/drm/i915/intel_pm.c
1615     +++ b/drivers/gpu/drm/i915/intel_pm.c
1616     @@ -8941,8 +8941,8 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
1617     return 0;
1618     }
1619    
1620     -int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
1621     - u32 mbox, u32 val)
1622     +int sandybridge_pcode_write_timeout(struct drm_i915_private *dev_priv,
1623     + u32 mbox, u32 val, int timeout_us)
1624     {
1625     int status;
1626    
1627     @@ -8965,7 +8965,7 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
1628    
1629     if (__intel_wait_for_register_fw(dev_priv,
1630     GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
1631     - 500, 0, NULL)) {
1632     + timeout_us, 0, NULL)) {
1633     DRM_ERROR("timeout waiting for pcode write of 0x%08x to mbox %x to finish for %ps\n",
1634     val, mbox, __builtin_return_address(0));
1635     return -ETIMEDOUT;
1636     diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
1637     index 53d01fb00a8b..1dbe593e5960 100644
1638     --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
1639     +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gf100.fuc3.h
1640     @@ -47,8 +47,8 @@ static uint32_t gf100_pmu_data[] = {
1641     0x00000000,
1642     0x00000000,
1643     0x584d454d,
1644     - 0x00000756,
1645     - 0x00000748,
1646     + 0x00000754,
1647     + 0x00000746,
1648     0x00000000,
1649     0x00000000,
1650     0x00000000,
1651     @@ -69,8 +69,8 @@ static uint32_t gf100_pmu_data[] = {
1652     0x00000000,
1653     0x00000000,
1654     0x46524550,
1655     - 0x0000075a,
1656     0x00000758,
1657     + 0x00000756,
1658     0x00000000,
1659     0x00000000,
1660     0x00000000,
1661     @@ -91,8 +91,8 @@ static uint32_t gf100_pmu_data[] = {
1662     0x00000000,
1663     0x00000000,
1664     0x5f433249,
1665     - 0x00000b8a,
1666     - 0x00000a2d,
1667     + 0x00000b88,
1668     + 0x00000a2b,
1669     0x00000000,
1670     0x00000000,
1671     0x00000000,
1672     @@ -113,8 +113,8 @@ static uint32_t gf100_pmu_data[] = {
1673     0x00000000,
1674     0x00000000,
1675     0x54534554,
1676     - 0x00000bb3,
1677     - 0x00000b8c,
1678     + 0x00000bb1,
1679     + 0x00000b8a,
1680     0x00000000,
1681     0x00000000,
1682     0x00000000,
1683     @@ -135,8 +135,8 @@ static uint32_t gf100_pmu_data[] = {
1684     0x00000000,
1685     0x00000000,
1686     0x454c4449,
1687     - 0x00000bbf,
1688     0x00000bbd,
1689     + 0x00000bbb,
1690     0x00000000,
1691     0x00000000,
1692     0x00000000,
1693     @@ -237,19 +237,19 @@ static uint32_t gf100_pmu_data[] = {
1694     0x000005d3,
1695     0x00000003,
1696     0x00000002,
1697     - 0x0000069d,
1698     + 0x0000069b,
1699     0x00040004,
1700     0x00000000,
1701     - 0x000006b9,
1702     + 0x000006b7,
1703     0x00010005,
1704     0x00000000,
1705     - 0x000006d6,
1706     + 0x000006d4,
1707     0x00010006,
1708     0x00000000,
1709     0x0000065b,
1710     0x00000007,
1711     0x00000000,
1712     - 0x000006e1,
1713     + 0x000006df,
1714     /* 0x03c4: memx_func_tail */
1715     /* 0x03c4: memx_ts_start */
1716     0x00000000,
1717     @@ -1373,432 +1373,432 @@ static uint32_t gf100_pmu_code[] = {
1718     /* 0x065b: memx_func_wait_vblank */
1719     0x9800f840,
1720     0x66b00016,
1721     - 0x130bf400,
1722     + 0x120bf400,
1723     0xf40166b0,
1724     0x0ef4060b,
1725     /* 0x066d: memx_func_wait_vblank_head1 */
1726     - 0x2077f12e,
1727     - 0x070ef400,
1728     -/* 0x0674: memx_func_wait_vblank_head0 */
1729     - 0x000877f1,
1730     -/* 0x0678: memx_func_wait_vblank_0 */
1731     - 0x07c467f1,
1732     - 0xcf0664b6,
1733     - 0x67fd0066,
1734     - 0xf31bf404,
1735     -/* 0x0688: memx_func_wait_vblank_1 */
1736     - 0x07c467f1,
1737     - 0xcf0664b6,
1738     - 0x67fd0066,
1739     - 0xf30bf404,
1740     -/* 0x0698: memx_func_wait_vblank_fini */
1741     - 0xf80410b6,
1742     -/* 0x069d: memx_func_wr32 */
1743     - 0x00169800,
1744     - 0xb6011598,
1745     - 0x60f90810,
1746     - 0xd0fc50f9,
1747     - 0x21f4e0fc,
1748     - 0x0242b640,
1749     - 0xf8e91bf4,
1750     -/* 0x06b9: memx_func_wait */
1751     - 0x2c87f000,
1752     - 0xcf0684b6,
1753     - 0x1e980088,
1754     - 0x011d9800,
1755     - 0x98021c98,
1756     - 0x10b6031b,
1757     - 0xa321f410,
1758     -/* 0x06d6: memx_func_delay */
1759     - 0x1e9800f8,
1760     - 0x0410b600,
1761     - 0xf87e21f4,
1762     -/* 0x06e1: memx_func_train */
1763     -/* 0x06e3: memx_exec */
1764     - 0xf900f800,
1765     - 0xb9d0f9e0,
1766     - 0xb2b902c1,
1767     -/* 0x06ed: memx_exec_next */
1768     - 0x00139802,
1769     - 0xe70410b6,
1770     - 0xe701f034,
1771     - 0xb601e033,
1772     - 0x30f00132,
1773     - 0xde35980c,
1774     - 0x12b855f9,
1775     - 0xe41ef406,
1776     - 0x98f10b98,
1777     - 0xcbbbf20c,
1778     - 0xc4b7f102,
1779     - 0x06b4b607,
1780     - 0xfc00bbcf,
1781     - 0xf5e0fcd0,
1782     - 0xf8033621,
1783     -/* 0x0729: memx_info */
1784     - 0x01c67000,
1785     -/* 0x072f: memx_info_data */
1786     - 0xf10e0bf4,
1787     - 0xf103ccc7,
1788     - 0xf40800b7,
1789     -/* 0x073a: memx_info_train */
1790     - 0xc7f10b0e,
1791     - 0xb7f10bcc,
1792     -/* 0x0742: memx_info_send */
1793     - 0x21f50100,
1794     - 0x00f80336,
1795     -/* 0x0748: memx_recv */
1796     - 0xf401d6b0,
1797     - 0xd6b0980b,
1798     - 0xd80bf400,
1799     -/* 0x0756: memx_init */
1800     - 0x00f800f8,
1801     -/* 0x0758: perf_recv */
1802     -/* 0x075a: perf_init */
1803     + 0x2077f02c,
1804     +/* 0x0673: memx_func_wait_vblank_head0 */
1805     + 0xf0060ef4,
1806     +/* 0x0676: memx_func_wait_vblank_0 */
1807     + 0x67f10877,
1808     + 0x64b607c4,
1809     + 0x0066cf06,
1810     + 0xf40467fd,
1811     +/* 0x0686: memx_func_wait_vblank_1 */
1812     + 0x67f1f31b,
1813     + 0x64b607c4,
1814     + 0x0066cf06,
1815     + 0xf40467fd,
1816     +/* 0x0696: memx_func_wait_vblank_fini */
1817     + 0x10b6f30b,
1818     +/* 0x069b: memx_func_wr32 */
1819     + 0x9800f804,
1820     + 0x15980016,
1821     + 0x0810b601,
1822     + 0x50f960f9,
1823     + 0xe0fcd0fc,
1824     + 0xb64021f4,
1825     + 0x1bf40242,
1826     +/* 0x06b7: memx_func_wait */
1827     + 0xf000f8e9,
1828     + 0x84b62c87,
1829     + 0x0088cf06,
1830     + 0x98001e98,
1831     + 0x1c98011d,
1832     + 0x031b9802,
1833     + 0xf41010b6,
1834     + 0x00f8a321,
1835     +/* 0x06d4: memx_func_delay */
1836     + 0xb6001e98,
1837     + 0x21f40410,
1838     +/* 0x06df: memx_func_train */
1839     + 0xf800f87e,
1840     +/* 0x06e1: memx_exec */
1841     + 0xf9e0f900,
1842     + 0x02c1b9d0,
1843     +/* 0x06eb: memx_exec_next */
1844     + 0x9802b2b9,
1845     + 0x10b60013,
1846     + 0xf034e704,
1847     + 0xe033e701,
1848     + 0x0132b601,
1849     + 0x980c30f0,
1850     + 0x55f9de35,
1851     + 0xf40612b8,
1852     + 0x0b98e41e,
1853     + 0xf20c98f1,
1854     + 0xf102cbbb,
1855     + 0xb607c4b7,
1856     + 0xbbcf06b4,
1857     + 0xfcd0fc00,
1858     + 0x3621f5e0,
1859     +/* 0x0727: memx_info */
1860     + 0x7000f803,
1861     + 0x0bf401c6,
1862     +/* 0x072d: memx_info_data */
1863     + 0xccc7f10e,
1864     + 0x00b7f103,
1865     + 0x0b0ef408,
1866     +/* 0x0738: memx_info_train */
1867     + 0x0bccc7f1,
1868     + 0x0100b7f1,
1869     +/* 0x0740: memx_info_send */
1870     + 0x033621f5,
1871     +/* 0x0746: memx_recv */
1872     + 0xd6b000f8,
1873     + 0x980bf401,
1874     + 0xf400d6b0,
1875     + 0x00f8d80b,
1876     +/* 0x0754: memx_init */
1877     +/* 0x0756: perf_recv */
1878     0x00f800f8,
1879     -/* 0x075c: i2c_drive_scl */
1880     - 0xf40036b0,
1881     - 0x07f1110b,
1882     - 0x04b607e0,
1883     - 0x0001d006,
1884     - 0x00f804bd,
1885     -/* 0x0770: i2c_drive_scl_lo */
1886     - 0x07e407f1,
1887     - 0xd00604b6,
1888     - 0x04bd0001,
1889     -/* 0x077e: i2c_drive_sda */
1890     +/* 0x0758: perf_init */
1891     +/* 0x075a: i2c_drive_scl */
1892     0x36b000f8,
1893     0x110bf400,
1894     0x07e007f1,
1895     0xd00604b6,
1896     - 0x04bd0002,
1897     -/* 0x0792: i2c_drive_sda_lo */
1898     + 0x04bd0001,
1899     +/* 0x076e: i2c_drive_scl_lo */
1900     0x07f100f8,
1901     0x04b607e4,
1902     + 0x0001d006,
1903     + 0x00f804bd,
1904     +/* 0x077c: i2c_drive_sda */
1905     + 0xf40036b0,
1906     + 0x07f1110b,
1907     + 0x04b607e0,
1908     0x0002d006,
1909     0x00f804bd,
1910     -/* 0x07a0: i2c_sense_scl */
1911     - 0xf10132f4,
1912     - 0xb607c437,
1913     - 0x33cf0634,
1914     - 0x0431fd00,
1915     - 0xf4060bf4,
1916     -/* 0x07b6: i2c_sense_scl_done */
1917     - 0x00f80131,
1918     -/* 0x07b8: i2c_sense_sda */
1919     - 0xf10132f4,
1920     - 0xb607c437,
1921     - 0x33cf0634,
1922     - 0x0432fd00,
1923     - 0xf4060bf4,
1924     -/* 0x07ce: i2c_sense_sda_done */
1925     - 0x00f80131,
1926     -/* 0x07d0: i2c_raise_scl */
1927     - 0x47f140f9,
1928     - 0x37f00898,
1929     - 0x5c21f501,
1930     -/* 0x07dd: i2c_raise_scl_wait */
1931     - 0xe8e7f107,
1932     - 0x7e21f403,
1933     - 0x07a021f5,
1934     - 0xb60901f4,
1935     - 0x1bf40142,
1936     -/* 0x07f1: i2c_raise_scl_done */
1937     - 0xf840fcef,
1938     -/* 0x07f5: i2c_start */
1939     - 0xa021f500,
1940     - 0x0d11f407,
1941     - 0x07b821f5,
1942     - 0xf40611f4,
1943     -/* 0x0806: i2c_start_rep */
1944     - 0x37f0300e,
1945     - 0x5c21f500,
1946     - 0x0137f007,
1947     - 0x077e21f5,
1948     - 0xb60076bb,
1949     - 0x50f90465,
1950     - 0xbb046594,
1951     - 0x50bd0256,
1952     - 0xfc0475fd,
1953     - 0xd021f550,
1954     - 0x0464b607,
1955     -/* 0x0833: i2c_start_send */
1956     - 0xf01f11f4,
1957     +/* 0x0790: i2c_drive_sda_lo */
1958     + 0x07e407f1,
1959     + 0xd00604b6,
1960     + 0x04bd0002,
1961     +/* 0x079e: i2c_sense_scl */
1962     + 0x32f400f8,
1963     + 0xc437f101,
1964     + 0x0634b607,
1965     + 0xfd0033cf,
1966     + 0x0bf40431,
1967     + 0x0131f406,
1968     +/* 0x07b4: i2c_sense_scl_done */
1969     +/* 0x07b6: i2c_sense_sda */
1970     + 0x32f400f8,
1971     + 0xc437f101,
1972     + 0x0634b607,
1973     + 0xfd0033cf,
1974     + 0x0bf40432,
1975     + 0x0131f406,
1976     +/* 0x07cc: i2c_sense_sda_done */
1977     +/* 0x07ce: i2c_raise_scl */
1978     + 0x40f900f8,
1979     + 0x089847f1,
1980     + 0xf50137f0,
1981     +/* 0x07db: i2c_raise_scl_wait */
1982     + 0xf1075a21,
1983     + 0xf403e8e7,
1984     + 0x21f57e21,
1985     + 0x01f4079e,
1986     + 0x0142b609,
1987     +/* 0x07ef: i2c_raise_scl_done */
1988     + 0xfcef1bf4,
1989     +/* 0x07f3: i2c_start */
1990     + 0xf500f840,
1991     + 0xf4079e21,
1992     + 0x21f50d11,
1993     + 0x11f407b6,
1994     + 0x300ef406,
1995     +/* 0x0804: i2c_start_rep */
1996     + 0xf50037f0,
1997     + 0xf0075a21,
1998     + 0x21f50137,
1999     + 0x76bb077c,
2000     + 0x0465b600,
2001     + 0x659450f9,
2002     + 0x0256bb04,
2003     + 0x75fd50bd,
2004     + 0xf550fc04,
2005     + 0xb607ce21,
2006     + 0x11f40464,
2007     +/* 0x0831: i2c_start_send */
2008     + 0x0037f01f,
2009     + 0x077c21f5,
2010     + 0x1388e7f1,
2011     + 0xf07e21f4,
2012     0x21f50037,
2013     - 0xe7f1077e,
2014     + 0xe7f1075a,
2015     0x21f41388,
2016     - 0x0037f07e,
2017     - 0x075c21f5,
2018     - 0x1388e7f1,
2019     -/* 0x084f: i2c_start_out */
2020     - 0xf87e21f4,
2021     -/* 0x0851: i2c_stop */
2022     - 0x0037f000,
2023     - 0x075c21f5,
2024     - 0xf50037f0,
2025     - 0xf1077e21,
2026     - 0xf403e8e7,
2027     - 0x37f07e21,
2028     - 0x5c21f501,
2029     - 0x88e7f107,
2030     - 0x7e21f413,
2031     +/* 0x084d: i2c_start_out */
2032     +/* 0x084f: i2c_stop */
2033     + 0xf000f87e,
2034     + 0x21f50037,
2035     + 0x37f0075a,
2036     + 0x7c21f500,
2037     + 0xe8e7f107,
2038     + 0x7e21f403,
2039     0xf50137f0,
2040     - 0xf1077e21,
2041     + 0xf1075a21,
2042     0xf41388e7,
2043     - 0x00f87e21,
2044     -/* 0x0884: i2c_bitw */
2045     - 0x077e21f5,
2046     - 0x03e8e7f1,
2047     - 0xbb7e21f4,
2048     - 0x65b60076,
2049     - 0x9450f904,
2050     - 0x56bb0465,
2051     - 0xfd50bd02,
2052     - 0x50fc0475,
2053     - 0x07d021f5,
2054     - 0xf40464b6,
2055     - 0xe7f11811,
2056     - 0x21f41388,
2057     - 0x0037f07e,
2058     - 0x075c21f5,
2059     - 0x1388e7f1,
2060     -/* 0x08c3: i2c_bitw_out */
2061     - 0xf87e21f4,
2062     -/* 0x08c5: i2c_bitr */
2063     - 0x0137f000,
2064     - 0x077e21f5,
2065     - 0x03e8e7f1,
2066     - 0xbb7e21f4,
2067     - 0x65b60076,
2068     - 0x9450f904,
2069     - 0x56bb0465,
2070     - 0xfd50bd02,
2071     - 0x50fc0475,
2072     - 0x07d021f5,
2073     - 0xf40464b6,
2074     - 0x21f51b11,
2075     - 0x37f007b8,
2076     - 0x5c21f500,
2077     + 0x37f07e21,
2078     + 0x7c21f501,
2079     0x88e7f107,
2080     0x7e21f413,
2081     - 0xf4013cf0,
2082     -/* 0x090a: i2c_bitr_done */
2083     - 0x00f80131,
2084     -/* 0x090c: i2c_get_byte */
2085     - 0xf00057f0,
2086     -/* 0x0912: i2c_get_byte_next */
2087     - 0x54b60847,
2088     - 0x0076bb01,
2089     +/* 0x0882: i2c_bitw */
2090     + 0x21f500f8,
2091     + 0xe7f1077c,
2092     + 0x21f403e8,
2093     + 0x0076bb7e,
2094     0xf90465b6,
2095     0x04659450,
2096     0xbd0256bb,
2097     0x0475fd50,
2098     0x21f550fc,
2099     - 0x64b608c5,
2100     - 0x2b11f404,
2101     - 0xb60553fd,
2102     - 0x1bf40142,
2103     - 0x0137f0d8,
2104     - 0xb60076bb,
2105     - 0x50f90465,
2106     - 0xbb046594,
2107     - 0x50bd0256,
2108     - 0xfc0475fd,
2109     - 0x8421f550,
2110     - 0x0464b608,
2111     -/* 0x095c: i2c_get_byte_done */
2112     -/* 0x095e: i2c_put_byte */
2113     - 0x47f000f8,
2114     -/* 0x0961: i2c_put_byte_next */
2115     - 0x0142b608,
2116     - 0xbb3854ff,
2117     + 0x64b607ce,
2118     + 0x1811f404,
2119     + 0x1388e7f1,
2120     + 0xf07e21f4,
2121     + 0x21f50037,
2122     + 0xe7f1075a,
2123     + 0x21f41388,
2124     +/* 0x08c1: i2c_bitw_out */
2125     +/* 0x08c3: i2c_bitr */
2126     + 0xf000f87e,
2127     + 0x21f50137,
2128     + 0xe7f1077c,
2129     + 0x21f403e8,
2130     + 0x0076bb7e,
2131     + 0xf90465b6,
2132     + 0x04659450,
2133     + 0xbd0256bb,
2134     + 0x0475fd50,
2135     + 0x21f550fc,
2136     + 0x64b607ce,
2137     + 0x1b11f404,
2138     + 0x07b621f5,
2139     + 0xf50037f0,
2140     + 0xf1075a21,
2141     + 0xf41388e7,
2142     + 0x3cf07e21,
2143     + 0x0131f401,
2144     +/* 0x0908: i2c_bitr_done */
2145     +/* 0x090a: i2c_get_byte */
2146     + 0x57f000f8,
2147     + 0x0847f000,
2148     +/* 0x0910: i2c_get_byte_next */
2149     + 0xbb0154b6,
2150     0x65b60076,
2151     0x9450f904,
2152     0x56bb0465,
2153     0xfd50bd02,
2154     0x50fc0475,
2155     - 0x088421f5,
2156     + 0x08c321f5,
2157     0xf40464b6,
2158     - 0x46b03411,
2159     - 0xd81bf400,
2160     - 0xb60076bb,
2161     - 0x50f90465,
2162     - 0xbb046594,
2163     - 0x50bd0256,
2164     - 0xfc0475fd,
2165     - 0xc521f550,
2166     - 0x0464b608,
2167     - 0xbb0f11f4,
2168     - 0x36b00076,
2169     - 0x061bf401,
2170     -/* 0x09b7: i2c_put_byte_done */
2171     - 0xf80132f4,
2172     -/* 0x09b9: i2c_addr */
2173     - 0x0076bb00,
2174     + 0x53fd2b11,
2175     + 0x0142b605,
2176     + 0xf0d81bf4,
2177     + 0x76bb0137,
2178     + 0x0465b600,
2179     + 0x659450f9,
2180     + 0x0256bb04,
2181     + 0x75fd50bd,
2182     + 0xf550fc04,
2183     + 0xb6088221,
2184     +/* 0x095a: i2c_get_byte_done */
2185     + 0x00f80464,
2186     +/* 0x095c: i2c_put_byte */
2187     +/* 0x095f: i2c_put_byte_next */
2188     + 0xb60847f0,
2189     + 0x54ff0142,
2190     + 0x0076bb38,
2191     0xf90465b6,
2192     0x04659450,
2193     0xbd0256bb,
2194     0x0475fd50,
2195     0x21f550fc,
2196     - 0x64b607f5,
2197     - 0x2911f404,
2198     - 0x012ec3e7,
2199     - 0xfd0134b6,
2200     - 0x76bb0553,
2201     + 0x64b60882,
2202     + 0x3411f404,
2203     + 0xf40046b0,
2204     + 0x76bbd81b,
2205     0x0465b600,
2206     0x659450f9,
2207     0x0256bb04,
2208     0x75fd50bd,
2209     0xf550fc04,
2210     - 0xb6095e21,
2211     -/* 0x09fe: i2c_addr_done */
2212     - 0x00f80464,
2213     -/* 0x0a00: i2c_acquire_addr */
2214     - 0xb6f8cec7,
2215     - 0xe0b702e4,
2216     - 0xee980d1c,
2217     -/* 0x0a0f: i2c_acquire */
2218     - 0xf500f800,
2219     - 0xf40a0021,
2220     - 0xd9f00421,
2221     - 0x4021f403,
2222     -/* 0x0a1e: i2c_release */
2223     - 0x21f500f8,
2224     - 0x21f40a00,
2225     - 0x03daf004,
2226     - 0xf84021f4,
2227     -/* 0x0a2d: i2c_recv */
2228     - 0x0132f400,
2229     - 0xb6f8c1c7,
2230     - 0x16b00214,
2231     - 0x3a1ff528,
2232     - 0xf413a001,
2233     - 0x0032980c,
2234     - 0x0ccc13a0,
2235     - 0xf4003198,
2236     - 0xd0f90231,
2237     - 0xd0f9e0f9,
2238     - 0x000067f1,
2239     - 0x100063f1,
2240     - 0xbb016792,
2241     + 0xb608c321,
2242     + 0x11f40464,
2243     + 0x0076bb0f,
2244     + 0xf40136b0,
2245     + 0x32f4061b,
2246     +/* 0x09b5: i2c_put_byte_done */
2247     +/* 0x09b7: i2c_addr */
2248     + 0xbb00f801,
2249     0x65b60076,
2250     0x9450f904,
2251     0x56bb0465,
2252     0xfd50bd02,
2253     0x50fc0475,
2254     - 0x0a0f21f5,
2255     - 0xfc0464b6,
2256     - 0x00d6b0d0,
2257     - 0x00b31bf5,
2258     - 0xbb0057f0,
2259     - 0x65b60076,
2260     - 0x9450f904,
2261     - 0x56bb0465,
2262     - 0xfd50bd02,
2263     - 0x50fc0475,
2264     - 0x09b921f5,
2265     - 0xf50464b6,
2266     - 0xc700d011,
2267     - 0x76bbe0c5,
2268     - 0x0465b600,
2269     - 0x659450f9,
2270     - 0x0256bb04,
2271     - 0x75fd50bd,
2272     - 0xf550fc04,
2273     - 0xb6095e21,
2274     - 0x11f50464,
2275     - 0x57f000ad,
2276     + 0x07f321f5,
2277     + 0xf40464b6,
2278     + 0xc3e72911,
2279     + 0x34b6012e,
2280     + 0x0553fd01,
2281     + 0xb60076bb,
2282     + 0x50f90465,
2283     + 0xbb046594,
2284     + 0x50bd0256,
2285     + 0xfc0475fd,
2286     + 0x5c21f550,
2287     + 0x0464b609,
2288     +/* 0x09fc: i2c_addr_done */
2289     +/* 0x09fe: i2c_acquire_addr */
2290     + 0xcec700f8,
2291     + 0x02e4b6f8,
2292     + 0x0d1ce0b7,
2293     + 0xf800ee98,
2294     +/* 0x0a0d: i2c_acquire */
2295     + 0xfe21f500,
2296     + 0x0421f409,
2297     + 0xf403d9f0,
2298     + 0x00f84021,
2299     +/* 0x0a1c: i2c_release */
2300     + 0x09fe21f5,
2301     + 0xf00421f4,
2302     + 0x21f403da,
2303     +/* 0x0a2b: i2c_recv */
2304     + 0xf400f840,
2305     + 0xc1c70132,
2306     + 0x0214b6f8,
2307     + 0xf52816b0,
2308     + 0xa0013a1f,
2309     + 0x980cf413,
2310     + 0x13a00032,
2311     + 0x31980ccc,
2312     + 0x0231f400,
2313     + 0xe0f9d0f9,
2314     + 0x67f1d0f9,
2315     + 0x63f10000,
2316     + 0x67921000,
2317     0x0076bb01,
2318     0xf90465b6,
2319     0x04659450,
2320     0xbd0256bb,
2321     0x0475fd50,
2322     0x21f550fc,
2323     - 0x64b609b9,
2324     - 0x8a11f504,
2325     + 0x64b60a0d,
2326     + 0xb0d0fc04,
2327     + 0x1bf500d6,
2328     + 0x57f000b3,
2329     0x0076bb00,
2330     0xf90465b6,
2331     0x04659450,
2332     0xbd0256bb,
2333     0x0475fd50,
2334     0x21f550fc,
2335     - 0x64b6090c,
2336     - 0x6a11f404,
2337     - 0xbbe05bcb,
2338     + 0x64b609b7,
2339     + 0xd011f504,
2340     + 0xe0c5c700,
2341     + 0xb60076bb,
2342     + 0x50f90465,
2343     + 0xbb046594,
2344     + 0x50bd0256,
2345     + 0xfc0475fd,
2346     + 0x5c21f550,
2347     + 0x0464b609,
2348     + 0x00ad11f5,
2349     + 0xbb0157f0,
2350     0x65b60076,
2351     0x9450f904,
2352     0x56bb0465,
2353     0xfd50bd02,
2354     0x50fc0475,
2355     - 0x085121f5,
2356     - 0xb90464b6,
2357     - 0x74bd025b,
2358     -/* 0x0b33: i2c_recv_not_rd08 */
2359     - 0xb0430ef4,
2360     - 0x1bf401d6,
2361     - 0x0057f03d,
2362     - 0x09b921f5,
2363     - 0xc73311f4,
2364     - 0x21f5e0c5,
2365     - 0x11f4095e,
2366     - 0x0057f029,
2367     - 0x09b921f5,
2368     - 0xc71f11f4,
2369     - 0x21f5e0b5,
2370     - 0x11f4095e,
2371     - 0x5121f515,
2372     - 0xc774bd08,
2373     - 0x1bf408c5,
2374     - 0x0232f409,
2375     -/* 0x0b73: i2c_recv_not_wr08 */
2376     -/* 0x0b73: i2c_recv_done */
2377     - 0xc7030ef4,
2378     - 0x21f5f8ce,
2379     - 0xe0fc0a1e,
2380     - 0x12f4d0fc,
2381     - 0x027cb90a,
2382     - 0x033621f5,
2383     -/* 0x0b88: i2c_recv_exit */
2384     -/* 0x0b8a: i2c_init */
2385     - 0x00f800f8,
2386     -/* 0x0b8c: test_recv */
2387     - 0x05d817f1,
2388     + 0x09b721f5,
2389     + 0xf50464b6,
2390     + 0xbb008a11,
2391     + 0x65b60076,
2392     + 0x9450f904,
2393     + 0x56bb0465,
2394     + 0xfd50bd02,
2395     + 0x50fc0475,
2396     + 0x090a21f5,
2397     + 0xf40464b6,
2398     + 0x5bcb6a11,
2399     + 0x0076bbe0,
2400     + 0xf90465b6,
2401     + 0x04659450,
2402     + 0xbd0256bb,
2403     + 0x0475fd50,
2404     + 0x21f550fc,
2405     + 0x64b6084f,
2406     + 0x025bb904,
2407     + 0x0ef474bd,
2408     +/* 0x0b31: i2c_recv_not_rd08 */
2409     + 0x01d6b043,
2410     + 0xf03d1bf4,
2411     + 0x21f50057,
2412     + 0x11f409b7,
2413     + 0xe0c5c733,
2414     + 0x095c21f5,
2415     + 0xf02911f4,
2416     + 0x21f50057,
2417     + 0x11f409b7,
2418     + 0xe0b5c71f,
2419     + 0x095c21f5,
2420     + 0xf51511f4,
2421     + 0xbd084f21,
2422     + 0x08c5c774,
2423     + 0xf4091bf4,
2424     + 0x0ef40232,
2425     +/* 0x0b71: i2c_recv_not_wr08 */
2426     +/* 0x0b71: i2c_recv_done */
2427     + 0xf8cec703,
2428     + 0x0a1c21f5,
2429     + 0xd0fce0fc,
2430     + 0xb90a12f4,
2431     + 0x21f5027c,
2432     +/* 0x0b86: i2c_recv_exit */
2433     + 0x00f80336,
2434     +/* 0x0b88: i2c_init */
2435     +/* 0x0b8a: test_recv */
2436     + 0x17f100f8,
2437     + 0x14b605d8,
2438     + 0x0011cf06,
2439     + 0xf10110b6,
2440     + 0xb605d807,
2441     + 0x01d00604,
2442     + 0xf104bd00,
2443     + 0xf1d900e7,
2444     + 0xf5134fe3,
2445     + 0xf8025621,
2446     +/* 0x0bb1: test_init */
2447     + 0x00e7f100,
2448     + 0x5621f508,
2449     +/* 0x0bbb: idle_recv */
2450     + 0xf800f802,
2451     +/* 0x0bbd: idle */
2452     + 0x0031f400,
2453     + 0x05d417f1,
2454     0xcf0614b6,
2455     0x10b60011,
2456     - 0xd807f101,
2457     + 0xd407f101,
2458     0x0604b605,
2459     0xbd0001d0,
2460     - 0x00e7f104,
2461     - 0x4fe3f1d9,
2462     - 0x5621f513,
2463     -/* 0x0bb3: test_init */
2464     - 0xf100f802,
2465     - 0xf50800e7,
2466     - 0xf8025621,
2467     -/* 0x0bbd: idle_recv */
2468     -/* 0x0bbf: idle */
2469     - 0xf400f800,
2470     - 0x17f10031,
2471     - 0x14b605d4,
2472     - 0x0011cf06,
2473     - 0xf10110b6,
2474     - 0xb605d407,
2475     - 0x01d00604,
2476     -/* 0x0bdb: idle_loop */
2477     - 0xf004bd00,
2478     - 0x32f45817,
2479     -/* 0x0be1: idle_proc */
2480     -/* 0x0be1: idle_proc_exec */
2481     - 0xb910f902,
2482     - 0x21f5021e,
2483     - 0x10fc033f,
2484     - 0xf40911f4,
2485     - 0x0ef40231,
2486     -/* 0x0bf5: idle_proc_next */
2487     - 0x5810b6ef,
2488     - 0xf4061fb8,
2489     - 0x02f4e61b,
2490     - 0x0028f4dd,
2491     - 0x00bb0ef4,
2492     +/* 0x0bd9: idle_loop */
2493     + 0x5817f004,
2494     +/* 0x0bdf: idle_proc */
2495     +/* 0x0bdf: idle_proc_exec */
2496     + 0xf90232f4,
2497     + 0x021eb910,
2498     + 0x033f21f5,
2499     + 0x11f410fc,
2500     + 0x0231f409,
2501     +/* 0x0bf3: idle_proc_next */
2502     + 0xb6ef0ef4,
2503     + 0x1fb85810,
2504     + 0xe61bf406,
2505     + 0xf4dd02f4,
2506     + 0x0ef40028,
2507     + 0x000000bb,
2508     0x00000000,
2509     0x00000000,
2510     0x00000000,
2511     diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
2512     index c4edbc79e41a..e0222cb832fb 100644
2513     --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
2514     +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gk208.fuc5.h
2515     @@ -47,8 +47,8 @@ static uint32_t gk208_pmu_data[] = {
2516     0x00000000,
2517     0x00000000,
2518     0x584d454d,
2519     - 0x000005f3,
2520     - 0x000005e5,
2521     + 0x000005ee,
2522     + 0x000005e0,
2523     0x00000000,
2524     0x00000000,
2525     0x00000000,
2526     @@ -69,8 +69,8 @@ static uint32_t gk208_pmu_data[] = {
2527     0x00000000,
2528     0x00000000,
2529     0x46524550,
2530     - 0x000005f7,
2531     - 0x000005f5,
2532     + 0x000005f2,
2533     + 0x000005f0,
2534     0x00000000,
2535     0x00000000,
2536     0x00000000,
2537     @@ -91,8 +91,8 @@ static uint32_t gk208_pmu_data[] = {
2538     0x00000000,
2539     0x00000000,
2540     0x5f433249,
2541     - 0x000009f8,
2542     - 0x000008a2,
2543     + 0x000009f3,
2544     + 0x0000089d,
2545     0x00000000,
2546     0x00000000,
2547     0x00000000,
2548     @@ -113,8 +113,8 @@ static uint32_t gk208_pmu_data[] = {
2549     0x00000000,
2550     0x00000000,
2551     0x54534554,
2552     - 0x00000a16,
2553     - 0x000009fa,
2554     + 0x00000a11,
2555     + 0x000009f5,
2556     0x00000000,
2557     0x00000000,
2558     0x00000000,
2559     @@ -135,8 +135,8 @@ static uint32_t gk208_pmu_data[] = {
2560     0x00000000,
2561     0x00000000,
2562     0x454c4449,
2563     - 0x00000a21,
2564     - 0x00000a1f,
2565     + 0x00000a1c,
2566     + 0x00000a1a,
2567     0x00000000,
2568     0x00000000,
2569     0x00000000,
2570     @@ -234,22 +234,22 @@ static uint32_t gk208_pmu_data[] = {
2571     /* 0x037c: memx_func_next */
2572     0x00000002,
2573     0x00000000,
2574     - 0x000004cf,
2575     + 0x000004cc,
2576     0x00000003,
2577     0x00000002,
2578     - 0x00000546,
2579     + 0x00000541,
2580     0x00040004,
2581     0x00000000,
2582     - 0x00000563,
2583     + 0x0000055e,
2584     0x00010005,
2585     0x00000000,
2586     - 0x0000057d,
2587     + 0x00000578,
2588     0x00010006,
2589     0x00000000,
2590     - 0x00000541,
2591     + 0x0000053c,
2592     0x00000007,
2593     0x00000000,
2594     - 0x00000589,
2595     + 0x00000584,
2596     /* 0x03c4: memx_func_tail */
2597     /* 0x03c4: memx_ts_start */
2598     0x00000000,
2599     @@ -1239,454 +1239,454 @@ static uint32_t gk208_pmu_code[] = {
2600     0x0001f604,
2601     0x00f804bd,
2602     /* 0x045c: memx_func_enter */
2603     - 0x162067f1,
2604     - 0xf55d77f1,
2605     - 0x047e6eb2,
2606     - 0xd8b20000,
2607     - 0xf90487fd,
2608     - 0xfc80f960,
2609     - 0x7ee0fcd0,
2610     - 0x0700002d,
2611     - 0x7e6eb2fe,
2612     + 0x47162046,
2613     + 0x6eb2f55d,
2614     + 0x0000047e,
2615     + 0x87fdd8b2,
2616     + 0xf960f904,
2617     + 0xfcd0fc80,
2618     + 0x002d7ee0,
2619     + 0xb2fe0700,
2620     + 0x00047e6e,
2621     + 0xfdd8b200,
2622     + 0x60f90487,
2623     + 0xd0fc80f9,
2624     + 0x2d7ee0fc,
2625     + 0xf0460000,
2626     + 0x7e6eb226,
2627     0xb2000004,
2628     0x0487fdd8,
2629     0x80f960f9,
2630     0xe0fcd0fc,
2631     0x00002d7e,
2632     - 0x26f067f1,
2633     - 0x047e6eb2,
2634     - 0xd8b20000,
2635     - 0xf90487fd,
2636     - 0xfc80f960,
2637     - 0x7ee0fcd0,
2638     - 0x0600002d,
2639     - 0x07e04004,
2640     - 0xbd0006f6,
2641     -/* 0x04b9: memx_func_enter_wait */
2642     - 0x07c04604,
2643     - 0xf00066cf,
2644     - 0x0bf40464,
2645     - 0xcf2c06f7,
2646     - 0x06b50066,
2647     -/* 0x04cf: memx_func_leave */
2648     - 0x0600f8f1,
2649     - 0x0066cf2c,
2650     - 0x06f206b5,
2651     - 0x07e44004,
2652     - 0xbd0006f6,
2653     -/* 0x04e1: memx_func_leave_wait */
2654     - 0x07c04604,
2655     - 0xf00066cf,
2656     - 0x1bf40464,
2657     - 0xf067f1f7,
2658     + 0xe0400406,
2659     + 0x0006f607,
2660     +/* 0x04b6: memx_func_enter_wait */
2661     + 0xc04604bd,
2662     + 0x0066cf07,
2663     + 0xf40464f0,
2664     + 0x2c06f70b,
2665     + 0xb50066cf,
2666     + 0x00f8f106,
2667     +/* 0x04cc: memx_func_leave */
2668     + 0x66cf2c06,
2669     + 0xf206b500,
2670     + 0xe4400406,
2671     + 0x0006f607,
2672     +/* 0x04de: memx_func_leave_wait */
2673     + 0xc04604bd,
2674     + 0x0066cf07,
2675     + 0xf40464f0,
2676     + 0xf046f71b,
2677     0xb2010726,
2678     0x00047e6e,
2679     0xfdd8b200,
2680     0x60f90587,
2681     0xd0fc80f9,
2682     0x2d7ee0fc,
2683     - 0x67f10000,
2684     - 0x6eb21620,
2685     - 0x0000047e,
2686     - 0x87fdd8b2,
2687     - 0xf960f905,
2688     - 0xfcd0fc80,
2689     - 0x002d7ee0,
2690     - 0x0aa24700,
2691     - 0x047e6eb2,
2692     - 0xd8b20000,
2693     - 0xf90587fd,
2694     - 0xfc80f960,
2695     - 0x7ee0fcd0,
2696     - 0xf800002d,
2697     -/* 0x0541: memx_func_wait_vblank */
2698     + 0x20460000,
2699     + 0x7e6eb216,
2700     + 0xb2000004,
2701     + 0x0587fdd8,
2702     + 0x80f960f9,
2703     + 0xe0fcd0fc,
2704     + 0x00002d7e,
2705     + 0xb20aa247,
2706     + 0x00047e6e,
2707     + 0xfdd8b200,
2708     + 0x60f90587,
2709     + 0xd0fc80f9,
2710     + 0x2d7ee0fc,
2711     + 0x00f80000,
2712     +/* 0x053c: memx_func_wait_vblank */
2713     + 0xf80410b6,
2714     +/* 0x0541: memx_func_wr32 */
2715     + 0x00169800,
2716     + 0xb6011598,
2717     + 0x60f90810,
2718     + 0xd0fc50f9,
2719     + 0x2d7ee0fc,
2720     + 0x42b60000,
2721     + 0xe81bf402,
2722     +/* 0x055e: memx_func_wait */
2723     + 0x2c0800f8,
2724     + 0x980088cf,
2725     + 0x1d98001e,
2726     + 0x021c9801,
2727     + 0xb6031b98,
2728     + 0x747e1010,
2729     + 0x00f80000,
2730     +/* 0x0578: memx_func_delay */
2731     + 0xb6001e98,
2732     + 0x587e0410,
2733     + 0x00f80000,
2734     +/* 0x0584: memx_func_train */
2735     +/* 0x0586: memx_exec */
2736     + 0xe0f900f8,
2737     + 0xc1b2d0f9,
2738     +/* 0x058e: memx_exec_next */
2739     + 0x1398b2b2,
2740     0x0410b600,
2741     -/* 0x0546: memx_func_wr32 */
2742     - 0x169800f8,
2743     - 0x01159800,
2744     - 0xf90810b6,
2745     - 0xfc50f960,
2746     + 0x01f034e7,
2747     + 0x01e033e7,
2748     + 0xf00132b6,
2749     + 0x35980c30,
2750     + 0xa655f9de,
2751     + 0xe51ef412,
2752     + 0x98f10b98,
2753     + 0xcbbbf20c,
2754     + 0x07c44b02,
2755     + 0xfc00bbcf,
2756     0x7ee0fcd0,
2757     - 0xb600002d,
2758     - 0x1bf40242,
2759     -/* 0x0563: memx_func_wait */
2760     - 0x0800f8e8,
2761     - 0x0088cf2c,
2762     - 0x98001e98,
2763     - 0x1c98011d,
2764     - 0x031b9802,
2765     - 0x7e1010b6,
2766     - 0xf8000074,
2767     -/* 0x057d: memx_func_delay */
2768     - 0x001e9800,
2769     - 0x7e0410b6,
2770     - 0xf8000058,
2771     -/* 0x0589: memx_func_train */
2772     -/* 0x058b: memx_exec */
2773     - 0xf900f800,
2774     - 0xb2d0f9e0,
2775     -/* 0x0593: memx_exec_next */
2776     - 0x98b2b2c1,
2777     - 0x10b60013,
2778     - 0xf034e704,
2779     - 0xe033e701,
2780     - 0x0132b601,
2781     - 0x980c30f0,
2782     - 0x55f9de35,
2783     - 0x1ef412a6,
2784     - 0xf10b98e5,
2785     - 0xbbf20c98,
2786     - 0xc44b02cb,
2787     - 0x00bbcf07,
2788     - 0xe0fcd0fc,
2789     - 0x00029f7e,
2790     -/* 0x05ca: memx_info */
2791     - 0xc67000f8,
2792     - 0x0c0bf401,
2793     -/* 0x05d0: memx_info_data */
2794     - 0x4b03cc4c,
2795     - 0x0ef40800,
2796     -/* 0x05d9: memx_info_train */
2797     - 0x0bcc4c09,
2798     -/* 0x05df: memx_info_send */
2799     - 0x7e01004b,
2800     0xf800029f,
2801     -/* 0x05e5: memx_recv */
2802     - 0x01d6b000,
2803     - 0xb0a30bf4,
2804     - 0x0bf400d6,
2805     -/* 0x05f3: memx_init */
2806     - 0xf800f8dc,
2807     -/* 0x05f5: perf_recv */
2808     -/* 0x05f7: perf_init */
2809     - 0xf800f800,
2810     -/* 0x05f9: i2c_drive_scl */
2811     - 0x0036b000,
2812     - 0x400d0bf4,
2813     - 0x01f607e0,
2814     - 0xf804bd00,
2815     -/* 0x0609: i2c_drive_scl_lo */
2816     - 0x07e44000,
2817     - 0xbd0001f6,
2818     -/* 0x0613: i2c_drive_sda */
2819     - 0xb000f804,
2820     - 0x0bf40036,
2821     - 0x07e0400d,
2822     - 0xbd0002f6,
2823     -/* 0x0623: i2c_drive_sda_lo */
2824     - 0x4000f804,
2825     - 0x02f607e4,
2826     - 0xf804bd00,
2827     -/* 0x062d: i2c_sense_scl */
2828     - 0x0132f400,
2829     - 0xcf07c443,
2830     - 0x31fd0033,
2831     - 0x060bf404,
2832     -/* 0x063f: i2c_sense_scl_done */
2833     - 0xf80131f4,
2834     -/* 0x0641: i2c_sense_sda */
2835     - 0x0132f400,
2836     - 0xcf07c443,
2837     - 0x32fd0033,
2838     - 0x060bf404,
2839     -/* 0x0653: i2c_sense_sda_done */
2840     - 0xf80131f4,
2841     -/* 0x0655: i2c_raise_scl */
2842     - 0x4440f900,
2843     - 0x01030898,
2844     - 0x0005f97e,
2845     -/* 0x0660: i2c_raise_scl_wait */
2846     - 0x7e03e84e,
2847     - 0x7e000058,
2848     - 0xf400062d,
2849     - 0x42b60901,
2850     - 0xef1bf401,
2851     -/* 0x0674: i2c_raise_scl_done */
2852     - 0x00f840fc,
2853     -/* 0x0678: i2c_start */
2854     - 0x00062d7e,
2855     - 0x7e0d11f4,
2856     - 0xf4000641,
2857     - 0x0ef40611,
2858     -/* 0x0689: i2c_start_rep */
2859     - 0x7e00032e,
2860     - 0x030005f9,
2861     - 0x06137e01,
2862     +/* 0x05c5: memx_info */
2863     + 0x01c67000,
2864     +/* 0x05cb: memx_info_data */
2865     + 0x4c0c0bf4,
2866     + 0x004b03cc,
2867     + 0x090ef408,
2868     +/* 0x05d4: memx_info_train */
2869     + 0x4b0bcc4c,
2870     +/* 0x05da: memx_info_send */
2871     + 0x9f7e0100,
2872     + 0x00f80002,
2873     +/* 0x05e0: memx_recv */
2874     + 0xf401d6b0,
2875     + 0xd6b0a30b,
2876     + 0xdc0bf400,
2877     +/* 0x05ee: memx_init */
2878     + 0x00f800f8,
2879     +/* 0x05f0: perf_recv */
2880     +/* 0x05f2: perf_init */
2881     + 0x00f800f8,
2882     +/* 0x05f4: i2c_drive_scl */
2883     + 0xf40036b0,
2884     + 0xe0400d0b,
2885     + 0x0001f607,
2886     + 0x00f804bd,
2887     +/* 0x0604: i2c_drive_scl_lo */
2888     + 0xf607e440,
2889     + 0x04bd0001,
2890     +/* 0x060e: i2c_drive_sda */
2891     + 0x36b000f8,
2892     + 0x0d0bf400,
2893     + 0xf607e040,
2894     + 0x04bd0002,
2895     +/* 0x061e: i2c_drive_sda_lo */
2896     + 0xe44000f8,
2897     + 0x0002f607,
2898     + 0x00f804bd,
2899     +/* 0x0628: i2c_sense_scl */
2900     + 0x430132f4,
2901     + 0x33cf07c4,
2902     + 0x0431fd00,
2903     + 0xf4060bf4,
2904     +/* 0x063a: i2c_sense_scl_done */
2905     + 0x00f80131,
2906     +/* 0x063c: i2c_sense_sda */
2907     + 0x430132f4,
2908     + 0x33cf07c4,
2909     + 0x0432fd00,
2910     + 0xf4060bf4,
2911     +/* 0x064e: i2c_sense_sda_done */
2912     + 0x00f80131,
2913     +/* 0x0650: i2c_raise_scl */
2914     + 0x984440f9,
2915     + 0x7e010308,
2916     +/* 0x065b: i2c_raise_scl_wait */
2917     + 0x4e0005f4,
2918     + 0x587e03e8,
2919     + 0x287e0000,
2920     + 0x01f40006,
2921     + 0x0142b609,
2922     +/* 0x066f: i2c_raise_scl_done */
2923     + 0xfcef1bf4,
2924     +/* 0x0673: i2c_start */
2925     + 0x7e00f840,
2926     + 0xf4000628,
2927     + 0x3c7e0d11,
2928     + 0x11f40006,
2929     + 0x2e0ef406,
2930     +/* 0x0684: i2c_start_rep */
2931     + 0xf47e0003,
2932     + 0x01030005,
2933     + 0x00060e7e,
2934     + 0xb60076bb,
2935     + 0x50f90465,
2936     + 0xbb046594,
2937     + 0x50bd0256,
2938     + 0xfc0475fd,
2939     + 0x06507e50,
2940     + 0x0464b600,
2941     +/* 0x06af: i2c_start_send */
2942     + 0x031d11f4,
2943     + 0x060e7e00,
2944     + 0x13884e00,
2945     + 0x0000587e,
2946     + 0xf47e0003,
2947     + 0x884e0005,
2948     + 0x00587e13,
2949     +/* 0x06c9: i2c_start_out */
2950     +/* 0x06cb: i2c_stop */
2951     + 0x0300f800,
2952     + 0x05f47e00,
2953     + 0x7e000300,
2954     + 0x4e00060e,
2955     + 0x587e03e8,
2956     + 0x01030000,
2957     + 0x0005f47e,
2958     + 0x7e13884e,
2959     + 0x03000058,
2960     + 0x060e7e01,
2961     + 0x13884e00,
2962     + 0x0000587e,
2963     +/* 0x06fa: i2c_bitw */
2964     + 0x0e7e00f8,
2965     + 0xe84e0006,
2966     + 0x00587e03,
2967     0x0076bb00,
2968     0xf90465b6,
2969     0x04659450,
2970     0xbd0256bb,
2971     0x0475fd50,
2972     - 0x557e50fc,
2973     + 0x507e50fc,
2974     0x64b60006,
2975     - 0x1d11f404,
2976     -/* 0x06b4: i2c_start_send */
2977     - 0x137e0003,
2978     - 0x884e0006,
2979     - 0x00587e13,
2980     - 0x7e000300,
2981     - 0x4e0005f9,
2982     - 0x587e1388,
2983     -/* 0x06ce: i2c_start_out */
2984     - 0x00f80000,
2985     -/* 0x06d0: i2c_stop */
2986     - 0xf97e0003,
2987     - 0x00030005,
2988     - 0x0006137e,
2989     - 0x7e03e84e,
2990     + 0x1711f404,
2991     + 0x7e13884e,
2992     0x03000058,
2993     - 0x05f97e01,
2994     + 0x05f47e00,
2995     0x13884e00,
2996     0x0000587e,
2997     - 0x137e0103,
2998     - 0x884e0006,
2999     - 0x00587e13,
3000     -/* 0x06ff: i2c_bitw */
3001     - 0x7e00f800,
3002     - 0x4e000613,
3003     - 0x587e03e8,
3004     - 0x76bb0000,
3005     +/* 0x0738: i2c_bitw_out */
3006     +/* 0x073a: i2c_bitr */
3007     + 0x010300f8,
3008     + 0x00060e7e,
3009     + 0x7e03e84e,
3010     + 0xbb000058,
3011     + 0x65b60076,
3012     + 0x9450f904,
3013     + 0x56bb0465,
3014     + 0xfd50bd02,
3015     + 0x50fc0475,
3016     + 0x0006507e,
3017     + 0xf40464b6,
3018     + 0x3c7e1a11,
3019     + 0x00030006,
3020     + 0x0005f47e,
3021     + 0x7e13884e,
3022     + 0xf0000058,
3023     + 0x31f4013c,
3024     +/* 0x077d: i2c_bitr_done */
3025     +/* 0x077f: i2c_get_byte */
3026     + 0x0500f801,
3027     +/* 0x0783: i2c_get_byte_next */
3028     + 0xb6080400,
3029     + 0x76bb0154,
3030     0x0465b600,
3031     0x659450f9,
3032     0x0256bb04,
3033     0x75fd50bd,
3034     0x7e50fc04,
3035     - 0xb6000655,
3036     + 0xb600073a,
3037     0x11f40464,
3038     - 0x13884e17,
3039     - 0x0000587e,
3040     - 0xf97e0003,
3041     - 0x884e0005,
3042     - 0x00587e13,
3043     -/* 0x073d: i2c_bitw_out */
3044     -/* 0x073f: i2c_bitr */
3045     - 0x0300f800,
3046     - 0x06137e01,
3047     - 0x03e84e00,
3048     - 0x0000587e,
3049     + 0x0553fd2a,
3050     + 0xf40142b6,
3051     + 0x0103d81b,
3052     0xb60076bb,
3053     0x50f90465,
3054     0xbb046594,
3055     0x50bd0256,
3056     0xfc0475fd,
3057     - 0x06557e50,
3058     + 0x06fa7e50,
3059     0x0464b600,
3060     - 0x7e1a11f4,
3061     - 0x03000641,
3062     - 0x05f97e00,
3063     - 0x13884e00,
3064     - 0x0000587e,
3065     - 0xf4013cf0,
3066     -/* 0x0782: i2c_bitr_done */
3067     - 0x00f80131,
3068     -/* 0x0784: i2c_get_byte */
3069     - 0x08040005,
3070     -/* 0x0788: i2c_get_byte_next */
3071     - 0xbb0154b6,
3072     - 0x65b60076,
3073     - 0x9450f904,
3074     - 0x56bb0465,
3075     - 0xfd50bd02,
3076     - 0x50fc0475,
3077     - 0x00073f7e,
3078     - 0xf40464b6,
3079     - 0x53fd2a11,
3080     - 0x0142b605,
3081     - 0x03d81bf4,
3082     - 0x0076bb01,
3083     - 0xf90465b6,
3084     - 0x04659450,
3085     - 0xbd0256bb,
3086     - 0x0475fd50,
3087     - 0xff7e50fc,
3088     - 0x64b60006,
3089     -/* 0x07d1: i2c_get_byte_done */
3090     -/* 0x07d3: i2c_put_byte */
3091     - 0x0400f804,
3092     -/* 0x07d5: i2c_put_byte_next */
3093     - 0x0142b608,
3094     - 0xbb3854ff,
3095     +/* 0x07cc: i2c_get_byte_done */
3096     +/* 0x07ce: i2c_put_byte */
3097     + 0x080400f8,
3098     +/* 0x07d0: i2c_put_byte_next */
3099     + 0xff0142b6,
3100     + 0x76bb3854,
3101     + 0x0465b600,
3102     + 0x659450f9,
3103     + 0x0256bb04,
3104     + 0x75fd50bd,
3105     + 0x7e50fc04,
3106     + 0xb60006fa,
3107     + 0x11f40464,
3108     + 0x0046b034,
3109     + 0xbbd81bf4,
3110     0x65b60076,
3111     0x9450f904,
3112     0x56bb0465,
3113     0xfd50bd02,
3114     0x50fc0475,
3115     - 0x0006ff7e,
3116     + 0x00073a7e,
3117     0xf40464b6,
3118     - 0x46b03411,
3119     - 0xd81bf400,
3120     + 0x76bb0f11,
3121     + 0x0136b000,
3122     + 0xf4061bf4,
3123     +/* 0x0826: i2c_put_byte_done */
3124     + 0x00f80132,
3125     +/* 0x0828: i2c_addr */
3126     0xb60076bb,
3127     0x50f90465,
3128     0xbb046594,
3129     0x50bd0256,
3130     0xfc0475fd,
3131     - 0x073f7e50,
3132     + 0x06737e50,
3133     0x0464b600,
3134     - 0xbb0f11f4,
3135     - 0x36b00076,
3136     - 0x061bf401,
3137     -/* 0x082b: i2c_put_byte_done */
3138     - 0xf80132f4,
3139     -/* 0x082d: i2c_addr */
3140     - 0x0076bb00,
3141     + 0xe72911f4,
3142     + 0xb6012ec3,
3143     + 0x53fd0134,
3144     + 0x0076bb05,
3145     0xf90465b6,
3146     0x04659450,
3147     0xbd0256bb,
3148     0x0475fd50,
3149     - 0x787e50fc,
3150     - 0x64b60006,
3151     - 0x2911f404,
3152     - 0x012ec3e7,
3153     - 0xfd0134b6,
3154     - 0x76bb0553,
3155     - 0x0465b600,
3156     - 0x659450f9,
3157     - 0x0256bb04,
3158     - 0x75fd50bd,
3159     - 0x7e50fc04,
3160     - 0xb60007d3,
3161     -/* 0x0872: i2c_addr_done */
3162     - 0x00f80464,
3163     -/* 0x0874: i2c_acquire_addr */
3164     - 0xb6f8cec7,
3165     - 0xe0b705e4,
3166     - 0x00f8d014,
3167     -/* 0x0880: i2c_acquire */
3168     - 0x0008747e,
3169     + 0xce7e50fc,
3170     + 0x64b60007,
3171     +/* 0x086d: i2c_addr_done */
3172     +/* 0x086f: i2c_acquire_addr */
3173     + 0xc700f804,
3174     + 0xe4b6f8ce,
3175     + 0x14e0b705,
3176     +/* 0x087b: i2c_acquire */
3177     + 0x7e00f8d0,
3178     + 0x7e00086f,
3179     + 0xf0000004,
3180     + 0x2d7e03d9,
3181     + 0x00f80000,
3182     +/* 0x088c: i2c_release */
3183     + 0x00086f7e,
3184     0x0000047e,
3185     - 0x7e03d9f0,
3186     + 0x7e03daf0,
3187     0xf800002d,
3188     -/* 0x0891: i2c_release */
3189     - 0x08747e00,
3190     - 0x00047e00,
3191     - 0x03daf000,
3192     - 0x00002d7e,
3193     -/* 0x08a2: i2c_recv */
3194     - 0x32f400f8,
3195     - 0xf8c1c701,
3196     - 0xb00214b6,
3197     - 0x1ff52816,
3198     - 0x13b80134,
3199     - 0x98000cf4,
3200     - 0x13b80032,
3201     - 0x98000ccc,
3202     - 0x31f40031,
3203     - 0xf9d0f902,
3204     - 0xd6d0f9e0,
3205     - 0x10000000,
3206     - 0xbb016792,
3207     - 0x65b60076,
3208     - 0x9450f904,
3209     - 0x56bb0465,
3210     - 0xfd50bd02,
3211     - 0x50fc0475,
3212     - 0x0008807e,
3213     - 0xfc0464b6,
3214     - 0x00d6b0d0,
3215     - 0x00b01bf5,
3216     - 0x76bb0005,
3217     +/* 0x089d: i2c_recv */
3218     + 0x0132f400,
3219     + 0xb6f8c1c7,
3220     + 0x16b00214,
3221     + 0x341ff528,
3222     + 0xf413b801,
3223     + 0x3298000c,
3224     + 0xcc13b800,
3225     + 0x3198000c,
3226     + 0x0231f400,
3227     + 0xe0f9d0f9,
3228     + 0x00d6d0f9,
3229     + 0x92100000,
3230     + 0x76bb0167,
3231     0x0465b600,
3232     0x659450f9,
3233     0x0256bb04,
3234     0x75fd50bd,
3235     0x7e50fc04,
3236     - 0xb600082d,
3237     - 0x11f50464,
3238     - 0xc5c700cc,
3239     - 0x0076bbe0,
3240     - 0xf90465b6,
3241     - 0x04659450,
3242     - 0xbd0256bb,
3243     - 0x0475fd50,
3244     - 0xd37e50fc,
3245     - 0x64b60007,
3246     - 0xa911f504,
3247     - 0xbb010500,
3248     - 0x65b60076,
3249     - 0x9450f904,
3250     - 0x56bb0465,
3251     - 0xfd50bd02,
3252     - 0x50fc0475,
3253     - 0x00082d7e,
3254     - 0xf50464b6,
3255     - 0xbb008711,
3256     - 0x65b60076,
3257     - 0x9450f904,
3258     - 0x56bb0465,
3259     - 0xfd50bd02,
3260     - 0x50fc0475,
3261     - 0x0007847e,
3262     - 0xf40464b6,
3263     - 0x5bcb6711,
3264     - 0x0076bbe0,
3265     + 0xb600087b,
3266     + 0xd0fc0464,
3267     + 0xf500d6b0,
3268     + 0x0500b01b,
3269     + 0x0076bb00,
3270     0xf90465b6,
3271     0x04659450,
3272     0xbd0256bb,
3273     0x0475fd50,
3274     - 0xd07e50fc,
3275     - 0x64b60006,
3276     - 0xbd5bb204,
3277     - 0x410ef474,
3278     -/* 0x09a4: i2c_recv_not_rd08 */
3279     - 0xf401d6b0,
3280     - 0x00053b1b,
3281     - 0x00082d7e,
3282     - 0xc73211f4,
3283     - 0xd37ee0c5,
3284     - 0x11f40007,
3285     - 0x7e000528,
3286     - 0xf400082d,
3287     - 0xb5c71f11,
3288     - 0x07d37ee0,
3289     - 0x1511f400,
3290     - 0x0006d07e,
3291     - 0xc5c774bd,
3292     - 0x091bf408,
3293     - 0xf40232f4,
3294     -/* 0x09e2: i2c_recv_not_wr08 */
3295     -/* 0x09e2: i2c_recv_done */
3296     - 0xcec7030e,
3297     - 0x08917ef8,
3298     - 0xfce0fc00,
3299     - 0x0912f4d0,
3300     - 0x9f7e7cb2,
3301     -/* 0x09f6: i2c_recv_exit */
3302     - 0x00f80002,
3303     -/* 0x09f8: i2c_init */
3304     -/* 0x09fa: test_recv */
3305     - 0x584100f8,
3306     - 0x0011cf04,
3307     - 0x400110b6,
3308     - 0x01f60458,
3309     - 0xde04bd00,
3310     - 0x134fd900,
3311     - 0x0001de7e,
3312     -/* 0x0a16: test_init */
3313     - 0x004e00f8,
3314     - 0x01de7e08,
3315     -/* 0x0a1f: idle_recv */
3316     + 0x287e50fc,
3317     + 0x64b60008,
3318     + 0xcc11f504,
3319     + 0xe0c5c700,
3320     + 0xb60076bb,
3321     + 0x50f90465,
3322     + 0xbb046594,
3323     + 0x50bd0256,
3324     + 0xfc0475fd,
3325     + 0x07ce7e50,
3326     + 0x0464b600,
3327     + 0x00a911f5,
3328     + 0x76bb0105,
3329     + 0x0465b600,
3330     + 0x659450f9,
3331     + 0x0256bb04,
3332     + 0x75fd50bd,
3333     + 0x7e50fc04,
3334     + 0xb6000828,
3335     + 0x11f50464,
3336     + 0x76bb0087,
3337     + 0x0465b600,
3338     + 0x659450f9,
3339     + 0x0256bb04,
3340     + 0x75fd50bd,
3341     + 0x7e50fc04,
3342     + 0xb600077f,
3343     + 0x11f40464,
3344     + 0xe05bcb67,
3345     + 0xb60076bb,
3346     + 0x50f90465,
3347     + 0xbb046594,
3348     + 0x50bd0256,
3349     + 0xfc0475fd,
3350     + 0x06cb7e50,
3351     + 0x0464b600,
3352     + 0x74bd5bb2,
3353     +/* 0x099f: i2c_recv_not_rd08 */
3354     + 0xb0410ef4,
3355     + 0x1bf401d6,
3356     + 0x7e00053b,
3357     + 0xf4000828,
3358     + 0xc5c73211,
3359     + 0x07ce7ee0,
3360     + 0x2811f400,
3361     + 0x287e0005,
3362     + 0x11f40008,
3363     + 0xe0b5c71f,
3364     + 0x0007ce7e,
3365     + 0x7e1511f4,
3366     + 0xbd0006cb,
3367     + 0x08c5c774,
3368     + 0xf4091bf4,
3369     + 0x0ef40232,
3370     +/* 0x09dd: i2c_recv_not_wr08 */
3371     +/* 0x09dd: i2c_recv_done */
3372     + 0xf8cec703,
3373     + 0x00088c7e,
3374     + 0xd0fce0fc,
3375     + 0xb20912f4,
3376     + 0x029f7e7c,
3377     +/* 0x09f1: i2c_recv_exit */
3378     +/* 0x09f3: i2c_init */
3379     0xf800f800,
3380     -/* 0x0a21: idle */
3381     - 0x0031f400,
3382     - 0xcf045441,
3383     - 0x10b60011,
3384     - 0x04544001,
3385     - 0xbd0001f6,
3386     -/* 0x0a35: idle_loop */
3387     - 0xf4580104,
3388     -/* 0x0a3a: idle_proc */
3389     -/* 0x0a3a: idle_proc_exec */
3390     - 0x10f90232,
3391     - 0xa87e1eb2,
3392     - 0x10fc0002,
3393     - 0xf40911f4,
3394     - 0x0ef40231,
3395     -/* 0x0a4d: idle_proc_next */
3396     - 0x5810b6f0,
3397     - 0x1bf41fa6,
3398     - 0xe002f4e8,
3399     - 0xf40028f4,
3400     - 0x0000c60e,
3401     +/* 0x09f5: test_recv */
3402     + 0x04584100,
3403     + 0xb60011cf,
3404     + 0x58400110,
3405     + 0x0001f604,
3406     + 0x00de04bd,
3407     + 0x7e134fd9,
3408     + 0xf80001de,
3409     +/* 0x0a11: test_init */
3410     + 0x08004e00,
3411     + 0x0001de7e,
3412     +/* 0x0a1a: idle_recv */
3413     + 0x00f800f8,
3414     +/* 0x0a1c: idle */
3415     + 0x410031f4,
3416     + 0x11cf0454,
3417     + 0x0110b600,
3418     + 0xf6045440,
3419     + 0x04bd0001,
3420     +/* 0x0a30: idle_loop */
3421     + 0x32f45801,
3422     +/* 0x0a35: idle_proc */
3423     +/* 0x0a35: idle_proc_exec */
3424     + 0xb210f902,
3425     + 0x02a87e1e,
3426     + 0xf410fc00,
3427     + 0x31f40911,
3428     + 0xf00ef402,
3429     +/* 0x0a48: idle_proc_next */
3430     + 0xa65810b6,
3431     + 0xe81bf41f,
3432     + 0xf4e002f4,
3433     + 0x0ef40028,
3434     + 0x000000c6,
3435     + 0x00000000,
3436     0x00000000,
3437     0x00000000,
3438     0x00000000,
3439     diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
3440     index 6a2572e8945a..defddf5957ee 100644
3441     --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
3442     +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/gt215.fuc3.h
3443     @@ -47,8 +47,8 @@ static uint32_t gt215_pmu_data[] = {
3444     0x00000000,
3445     0x00000000,
3446     0x584d454d,
3447     - 0x0000083a,
3448     - 0x0000082c,
3449     + 0x00000833,
3450     + 0x00000825,
3451     0x00000000,
3452     0x00000000,
3453     0x00000000,
3454     @@ -69,8 +69,8 @@ static uint32_t gt215_pmu_data[] = {
3455     0x00000000,
3456     0x00000000,
3457     0x46524550,
3458     - 0x0000083e,
3459     - 0x0000083c,
3460     + 0x00000837,
3461     + 0x00000835,
3462     0x00000000,
3463     0x00000000,
3464     0x00000000,
3465     @@ -91,8 +91,8 @@ static uint32_t gt215_pmu_data[] = {
3466     0x00000000,
3467     0x00000000,
3468     0x5f433249,
3469     - 0x00000c6e,
3470     - 0x00000b11,
3471     + 0x00000c67,
3472     + 0x00000b0a,
3473     0x00000000,
3474     0x00000000,
3475     0x00000000,
3476     @@ -113,8 +113,8 @@ static uint32_t gt215_pmu_data[] = {
3477     0x00000000,
3478     0x00000000,
3479     0x54534554,
3480     - 0x00000c97,
3481     - 0x00000c70,
3482     + 0x00000c90,
3483     + 0x00000c69,
3484     0x00000000,
3485     0x00000000,
3486     0x00000000,
3487     @@ -135,8 +135,8 @@ static uint32_t gt215_pmu_data[] = {
3488     0x00000000,
3489     0x00000000,
3490     0x454c4449,
3491     - 0x00000ca3,
3492     - 0x00000ca1,
3493     + 0x00000c9c,
3494     + 0x00000c9a,
3495     0x00000000,
3496     0x00000000,
3497     0x00000000,
3498     @@ -234,22 +234,22 @@ static uint32_t gt215_pmu_data[] = {
3499     /* 0x037c: memx_func_next */
3500     0x00000002,
3501     0x00000000,
3502     - 0x000005a0,
3503     + 0x0000059f,
3504     0x00000003,
3505     0x00000002,
3506     - 0x00000632,
3507     + 0x0000062f,
3508     0x00040004,
3509     0x00000000,
3510     - 0x0000064e,
3511     + 0x0000064b,
3512     0x00010005,
3513     0x00000000,
3514     - 0x0000066b,
3515     + 0x00000668,
3516     0x00010006,
3517     0x00000000,
3518     - 0x000005f0,
3519     + 0x000005ef,
3520     0x00000007,
3521     0x00000000,
3522     - 0x00000676,
3523     + 0x00000673,
3524     /* 0x03c4: memx_func_tail */
3525     /* 0x03c4: memx_ts_start */
3526     0x00000000,
3527     @@ -1305,560 +1305,560 @@ static uint32_t gt215_pmu_code[] = {
3528     0x67f102d7,
3529     0x63f1fffc,
3530     0x76fdffff,
3531     - 0x0267f104,
3532     - 0x0576fd00,
3533     - 0x70f980f9,
3534     - 0xe0fcd0fc,
3535     - 0xf04021f4,
3536     + 0x0267f004,
3537     + 0xf90576fd,
3538     + 0xfc70f980,
3539     + 0xf4e0fcd0,
3540     + 0x67f04021,
3541     + 0xe007f104,
3542     + 0x0604b607,
3543     + 0xbd0006d0,
3544     +/* 0x0581: memx_func_enter_wait */
3545     + 0xc067f104,
3546     + 0x0664b607,
3547     + 0xf00066cf,
3548     + 0x0bf40464,
3549     + 0x2c67f0f3,
3550     + 0xcf0664b6,
3551     + 0x06800066,
3552     +/* 0x059f: memx_func_leave */
3553     + 0xf000f8f1,
3554     + 0x64b62c67,
3555     + 0x0066cf06,
3556     + 0xf0f20680,
3557     0x07f10467,
3558     - 0x04b607e0,
3559     + 0x04b607e4,
3560     0x0006d006,
3561     -/* 0x0582: memx_func_enter_wait */
3562     +/* 0x05ba: memx_func_leave_wait */
3563     0x67f104bd,
3564     0x64b607c0,
3565     0x0066cf06,
3566     0xf40464f0,
3567     - 0x67f0f30b,
3568     - 0x0664b62c,
3569     - 0x800066cf,
3570     - 0x00f8f106,
3571     -/* 0x05a0: memx_func_leave */
3572     - 0xb62c67f0,
3573     - 0x66cf0664,
3574     - 0xf2068000,
3575     - 0xf10467f0,
3576     - 0xb607e407,
3577     - 0x06d00604,
3578     -/* 0x05bb: memx_func_leave_wait */
3579     - 0xf104bd00,
3580     - 0xb607c067,
3581     - 0x66cf0664,
3582     - 0x0464f000,
3583     - 0xf1f31bf4,
3584     - 0xb9161087,
3585     - 0x21f4028e,
3586     - 0x02d7b904,
3587     - 0xffcc67f1,
3588     - 0xffff63f1,
3589     - 0xf90476fd,
3590     - 0xfc70f980,
3591     - 0xf4e0fcd0,
3592     - 0x00f84021,
3593     -/* 0x05f0: memx_func_wait_vblank */
3594     - 0xb0001698,
3595     - 0x0bf40066,
3596     - 0x0166b013,
3597     - 0xf4060bf4,
3598     -/* 0x0602: memx_func_wait_vblank_head1 */
3599     - 0x77f12e0e,
3600     - 0x0ef40020,
3601     -/* 0x0609: memx_func_wait_vblank_head0 */
3602     - 0x0877f107,
3603     -/* 0x060d: memx_func_wait_vblank_0 */
3604     - 0xc467f100,
3605     - 0x0664b607,
3606     - 0xfd0066cf,
3607     - 0x1bf40467,
3608     -/* 0x061d: memx_func_wait_vblank_1 */
3609     - 0xc467f1f3,
3610     - 0x0664b607,
3611     - 0xfd0066cf,
3612     - 0x0bf40467,
3613     -/* 0x062d: memx_func_wait_vblank_fini */
3614     - 0x0410b6f3,
3615     -/* 0x0632: memx_func_wr32 */
3616     - 0x169800f8,
3617     - 0x01159800,
3618     - 0xf90810b6,
3619     - 0xfc50f960,
3620     - 0xf4e0fcd0,
3621     - 0x42b64021,
3622     - 0xe91bf402,
3623     -/* 0x064e: memx_func_wait */
3624     - 0x87f000f8,
3625     - 0x0684b62c,
3626     - 0x980088cf,
3627     - 0x1d98001e,
3628     - 0x021c9801,
3629     - 0xb6031b98,
3630     - 0x21f41010,
3631     -/* 0x066b: memx_func_delay */
3632     - 0x9800f8a3,
3633     - 0x10b6001e,
3634     - 0x7e21f404,
3635     -/* 0x0676: memx_func_train */
3636     - 0x57f100f8,
3637     - 0x77f10003,
3638     - 0x97f10000,
3639     - 0x93f00000,
3640     - 0x029eb970,
3641     - 0xb90421f4,
3642     - 0xe7f102d8,
3643     - 0x21f42710,
3644     -/* 0x0695: memx_func_train_loop_outer */
3645     - 0x0158e07e,
3646     - 0x0083f101,
3647     - 0xe097f102,
3648     - 0x1193f011,
3649     - 0x80f990f9,
3650     + 0x87f1f31b,
3651     + 0x8eb91610,
3652     + 0x0421f402,
3653     + 0xf102d7b9,
3654     + 0xf1ffcc67,
3655     + 0xfdffff63,
3656     + 0x80f90476,
3657     + 0xd0fc70f9,
3658     + 0x21f4e0fc,
3659     +/* 0x05ef: memx_func_wait_vblank */
3660     + 0x9800f840,
3661     + 0x66b00016,
3662     + 0x120bf400,
3663     + 0xf40166b0,
3664     + 0x0ef4060b,
3665     +/* 0x0601: memx_func_wait_vblank_head1 */
3666     + 0x2077f02c,
3667     +/* 0x0607: memx_func_wait_vblank_head0 */
3668     + 0xf0060ef4,
3669     +/* 0x060a: memx_func_wait_vblank_0 */
3670     + 0x67f10877,
3671     + 0x64b607c4,
3672     + 0x0066cf06,
3673     + 0xf40467fd,
3674     +/* 0x061a: memx_func_wait_vblank_1 */
3675     + 0x67f1f31b,
3676     + 0x64b607c4,
3677     + 0x0066cf06,
3678     + 0xf40467fd,
3679     +/* 0x062a: memx_func_wait_vblank_fini */
3680     + 0x10b6f30b,
3681     +/* 0x062f: memx_func_wr32 */
3682     + 0x9800f804,
3683     + 0x15980016,
3684     + 0x0810b601,
3685     + 0x50f960f9,
3686     0xe0fcd0fc,
3687     - 0xf94021f4,
3688     - 0x0067f150,
3689     -/* 0x06b5: memx_func_train_loop_inner */
3690     - 0x1187f100,
3691     - 0x9068ff11,
3692     - 0xfd109894,
3693     - 0x97f10589,
3694     - 0x93f00720,
3695     - 0xf990f910,
3696     - 0xfcd0fc80,
3697     - 0x4021f4e0,
3698     - 0x008097f1,
3699     - 0xb91093f0,
3700     - 0x21f4029e,
3701     - 0x02d8b904,
3702     - 0xf92088c5,
3703     + 0xb64021f4,
3704     + 0x1bf40242,
3705     +/* 0x064b: memx_func_wait */
3706     + 0xf000f8e9,
3707     + 0x84b62c87,
3708     + 0x0088cf06,
3709     + 0x98001e98,
3710     + 0x1c98011d,
3711     + 0x031b9802,
3712     + 0xf41010b6,
3713     + 0x00f8a321,
3714     +/* 0x0668: memx_func_delay */
3715     + 0xb6001e98,
3716     + 0x21f40410,
3717     +/* 0x0673: memx_func_train */
3718     + 0xf000f87e,
3719     + 0x77f00357,
3720     + 0x0097f100,
3721     + 0x7093f000,
3722     + 0xf4029eb9,
3723     + 0xd8b90421,
3724     + 0x10e7f102,
3725     + 0x7e21f427,
3726     +/* 0x0690: memx_func_train_loop_outer */
3727     + 0x010158e0,
3728     + 0x020083f1,
3729     + 0x11e097f1,
3730     + 0xf91193f0,
3731     + 0xfc80f990,
3732     + 0xf4e0fcd0,
3733     + 0x50f94021,
3734     +/* 0x06af: memx_func_train_loop_inner */
3735     + 0xf10067f0,
3736     + 0xff111187,
3737     + 0x98949068,
3738     + 0x0589fd10,
3739     + 0x072097f1,
3740     + 0xf91093f0,
3741     0xfc80f990,
3742     0xf4e0fcd0,
3743     0x97f14021,
3744     - 0x93f0053c,
3745     - 0x0287f110,
3746     - 0x0083f130,
3747     - 0xf990f980,
3748     + 0x93f00080,
3749     + 0x029eb910,
3750     + 0xb90421f4,
3751     + 0x88c502d8,
3752     + 0xf990f920,
3753     0xfcd0fc80,
3754     0x4021f4e0,
3755     - 0x0560e7f1,
3756     - 0xf110e3f0,
3757     - 0xf10000d7,
3758     - 0x908000d3,
3759     - 0xb7f100dc,
3760     - 0xb3f08480,
3761     - 0xa321f41e,
3762     - 0x000057f1,
3763     - 0xffff97f1,
3764     - 0x830093f1,
3765     -/* 0x0734: memx_func_train_loop_4x */
3766     - 0x0080a7f1,
3767     - 0xb910a3f0,
3768     - 0x21f402ae,
3769     - 0x02d8b904,
3770     - 0xffdfb7f1,
3771     - 0xffffb3f1,
3772     - 0xf9048bfd,
3773     - 0xfc80f9a0,
3774     + 0x053c97f1,
3775     + 0xf11093f0,
3776     + 0xf1300287,
3777     + 0xf9800083,
3778     + 0xfc80f990,
3779     0xf4e0fcd0,
3780     - 0xa7f14021,
3781     - 0xa3f0053c,
3782     - 0x0287f110,
3783     - 0x0083f130,
3784     - 0xf9a0f980,
3785     - 0xfcd0fc80,
3786     - 0x4021f4e0,
3787     - 0x0560e7f1,
3788     - 0xf110e3f0,
3789     - 0xf10000d7,
3790     - 0xb98000d3,
3791     - 0xb7f102dc,
3792     - 0xb3f02710,
3793     - 0xa321f400,
3794     - 0xf402eeb9,
3795     - 0xddb90421,
3796     - 0x949dff02,
3797     + 0xe7f14021,
3798     + 0xe3f00560,
3799     + 0x00d7f110,
3800     + 0x00d3f100,
3801     + 0x00dc9080,
3802     + 0x8480b7f1,
3803     + 0xf41eb3f0,
3804     + 0x57f0a321,
3805     + 0xff97f100,
3806     + 0x0093f1ff,
3807     +/* 0x072d: memx_func_train_loop_4x */
3808     + 0x80a7f183,
3809     + 0x10a3f000,
3810     + 0xf402aeb9,
3811     + 0xd8b90421,
3812     + 0xdfb7f102,
3813     + 0xffb3f1ff,
3814     + 0x048bfdff,
3815     + 0x80f9a0f9,
3816     + 0xe0fcd0fc,
3817     + 0xf14021f4,
3818     + 0xf0053ca7,
3819     + 0x87f110a3,
3820     + 0x83f13002,
3821     + 0xa0f98000,
3822     + 0xd0fc80f9,
3823     + 0x21f4e0fc,
3824     + 0x60e7f140,
3825     + 0x10e3f005,
3826     + 0x0000d7f1,
3827     + 0x8000d3f1,
3828     + 0xf102dcb9,
3829     + 0xf02710b7,
3830     + 0x21f400b3,
3831     + 0x02eeb9a3,
3832     + 0xb90421f4,
3833     + 0x9dff02dd,
3834     + 0x0150b694,
3835     + 0xf4045670,
3836     + 0x7aa0921e,
3837     + 0xa9800bcc,
3838     + 0x0160b600,
3839     + 0x700470b6,
3840     + 0x1ef51066,
3841     + 0x50fcff01,
3842     0x700150b6,
3843     - 0x1ef40456,
3844     - 0xcc7aa092,
3845     - 0x00a9800b,
3846     - 0xb60160b6,
3847     - 0x66700470,
3848     - 0x001ef510,
3849     - 0xb650fcff,
3850     - 0x56700150,
3851     - 0xd41ef507,
3852     -/* 0x07c7: memx_exec */
3853     - 0xf900f8fe,
3854     - 0xb9d0f9e0,
3855     - 0xb2b902c1,
3856     -/* 0x07d1: memx_exec_next */
3857     - 0x00139802,
3858     - 0xe70410b6,
3859     - 0xe701f034,
3860     - 0xb601e033,
3861     - 0x30f00132,
3862     - 0xde35980c,
3863     - 0x12b855f9,
3864     - 0xe41ef406,
3865     - 0x98f10b98,
3866     - 0xcbbbf20c,
3867     - 0xc4b7f102,
3868     - 0x06b4b607,
3869     - 0xfc00bbcf,
3870     - 0xf5e0fcd0,
3871     + 0x1ef50756,
3872     + 0x00f8fed6,
3873     +/* 0x07c0: memx_exec */
3874     + 0xd0f9e0f9,
3875     + 0xb902c1b9,
3876     +/* 0x07ca: memx_exec_next */
3877     + 0x139802b2,
3878     + 0x0410b600,
3879     + 0x01f034e7,
3880     + 0x01e033e7,
3881     + 0xf00132b6,
3882     + 0x35980c30,
3883     + 0xb855f9de,
3884     + 0x1ef40612,
3885     + 0xf10b98e4,
3886     + 0xbbf20c98,
3887     + 0xb7f102cb,
3888     + 0xb4b607c4,
3889     + 0x00bbcf06,
3890     + 0xe0fcd0fc,
3891     + 0x033621f5,
3892     +/* 0x0806: memx_info */
3893     + 0xc67000f8,
3894     + 0x0e0bf401,
3895     +/* 0x080c: memx_info_data */
3896     + 0x03ccc7f1,
3897     + 0x0800b7f1,
3898     +/* 0x0817: memx_info_train */
3899     + 0xf10b0ef4,
3900     + 0xf10bccc7,
3901     +/* 0x081f: memx_info_send */
3902     + 0xf50100b7,
3903     0xf8033621,
3904     -/* 0x080d: memx_info */
3905     - 0x01c67000,
3906     -/* 0x0813: memx_info_data */
3907     - 0xf10e0bf4,
3908     - 0xf103ccc7,
3909     - 0xf40800b7,
3910     -/* 0x081e: memx_info_train */
3911     - 0xc7f10b0e,
3912     - 0xb7f10bcc,
3913     -/* 0x0826: memx_info_send */
3914     - 0x21f50100,
3915     - 0x00f80336,
3916     -/* 0x082c: memx_recv */
3917     - 0xf401d6b0,
3918     - 0xd6b0980b,
3919     - 0xd80bf400,
3920     -/* 0x083a: memx_init */
3921     - 0x00f800f8,
3922     -/* 0x083c: perf_recv */
3923     -/* 0x083e: perf_init */
3924     - 0x00f800f8,
3925     -/* 0x0840: i2c_drive_scl */
3926     - 0xf40036b0,
3927     - 0x07f1110b,
3928     - 0x04b607e0,
3929     - 0x0001d006,
3930     - 0x00f804bd,
3931     -/* 0x0854: i2c_drive_scl_lo */
3932     - 0x07e407f1,
3933     - 0xd00604b6,
3934     - 0x04bd0001,
3935     -/* 0x0862: i2c_drive_sda */
3936     - 0x36b000f8,
3937     - 0x110bf400,
3938     - 0x07e007f1,
3939     - 0xd00604b6,
3940     - 0x04bd0002,
3941     -/* 0x0876: i2c_drive_sda_lo */
3942     - 0x07f100f8,
3943     - 0x04b607e4,
3944     - 0x0002d006,
3945     - 0x00f804bd,
3946     -/* 0x0884: i2c_sense_scl */
3947     - 0xf10132f4,
3948     - 0xb607c437,
3949     - 0x33cf0634,
3950     - 0x0431fd00,
3951     - 0xf4060bf4,
3952     -/* 0x089a: i2c_sense_scl_done */
3953     - 0x00f80131,
3954     -/* 0x089c: i2c_sense_sda */
3955     - 0xf10132f4,
3956     - 0xb607c437,
3957     - 0x33cf0634,
3958     - 0x0432fd00,
3959     - 0xf4060bf4,
3960     -/* 0x08b2: i2c_sense_sda_done */
3961     - 0x00f80131,
3962     -/* 0x08b4: i2c_raise_scl */
3963     - 0x47f140f9,
3964     - 0x37f00898,
3965     - 0x4021f501,
3966     -/* 0x08c1: i2c_raise_scl_wait */
3967     +/* 0x0825: memx_recv */
3968     + 0x01d6b000,
3969     + 0xb0980bf4,
3970     + 0x0bf400d6,
3971     +/* 0x0833: memx_init */
3972     + 0xf800f8d8,
3973     +/* 0x0835: perf_recv */
3974     +/* 0x0837: perf_init */
3975     + 0xf800f800,
3976     +/* 0x0839: i2c_drive_scl */
3977     + 0x0036b000,
3978     + 0xf1110bf4,
3979     + 0xb607e007,
3980     + 0x01d00604,
3981     + 0xf804bd00,
3982     +/* 0x084d: i2c_drive_scl_lo */
3983     + 0xe407f100,
3984     + 0x0604b607,
3985     + 0xbd0001d0,
3986     +/* 0x085b: i2c_drive_sda */
3987     + 0xb000f804,
3988     + 0x0bf40036,
3989     + 0xe007f111,
3990     + 0x0604b607,
3991     + 0xbd0002d0,
3992     +/* 0x086f: i2c_drive_sda_lo */
3993     + 0xf100f804,
3994     + 0xb607e407,
3995     + 0x02d00604,
3996     + 0xf804bd00,
3997     +/* 0x087d: i2c_sense_scl */
3998     + 0x0132f400,
3999     + 0x07c437f1,
4000     + 0xcf0634b6,
4001     + 0x31fd0033,
4002     + 0x060bf404,
4003     +/* 0x0893: i2c_sense_scl_done */
4004     + 0xf80131f4,
4005     +/* 0x0895: i2c_sense_sda */
4006     + 0x0132f400,
4007     + 0x07c437f1,
4008     + 0xcf0634b6,
4009     + 0x32fd0033,
4010     + 0x060bf404,
4011     +/* 0x08ab: i2c_sense_sda_done */
4012     + 0xf80131f4,
4013     +/* 0x08ad: i2c_raise_scl */
4014     + 0xf140f900,
4015     + 0xf0089847,
4016     + 0x21f50137,
4017     +/* 0x08ba: i2c_raise_scl_wait */
4018     + 0xe7f10839,
4019     + 0x21f403e8,
4020     + 0x7d21f57e,
4021     + 0x0901f408,
4022     + 0xf40142b6,
4023     +/* 0x08ce: i2c_raise_scl_done */
4024     + 0x40fcef1b,
4025     +/* 0x08d2: i2c_start */
4026     + 0x21f500f8,
4027     + 0x11f4087d,
4028     + 0x9521f50d,
4029     + 0x0611f408,
4030     +/* 0x08e3: i2c_start_rep */
4031     + 0xf0300ef4,
4032     + 0x21f50037,
4033     + 0x37f00839,
4034     + 0x5b21f501,
4035     + 0x0076bb08,
4036     + 0xf90465b6,
4037     + 0x04659450,
4038     + 0xbd0256bb,
4039     + 0x0475fd50,
4040     + 0x21f550fc,
4041     + 0x64b608ad,
4042     + 0x1f11f404,
4043     +/* 0x0910: i2c_start_send */
4044     + 0xf50037f0,
4045     + 0xf1085b21,
4046     + 0xf41388e7,
4047     + 0x37f07e21,
4048     + 0x3921f500,
4049     + 0x88e7f108,
4050     + 0x7e21f413,
4051     +/* 0x092c: i2c_start_out */
4052     +/* 0x092e: i2c_stop */
4053     + 0x37f000f8,
4054     + 0x3921f500,
4055     + 0x0037f008,
4056     + 0x085b21f5,
4057     + 0x03e8e7f1,
4058     + 0xf07e21f4,
4059     + 0x21f50137,
4060     + 0xe7f10839,
4061     + 0x21f41388,
4062     + 0x0137f07e,
4063     + 0x085b21f5,
4064     + 0x1388e7f1,
4065     + 0xf87e21f4,
4066     +/* 0x0961: i2c_bitw */
4067     + 0x5b21f500,
4068     0xe8e7f108,
4069     0x7e21f403,
4070     - 0x088421f5,
4071     - 0xb60901f4,
4072     - 0x1bf40142,
4073     -/* 0x08d5: i2c_raise_scl_done */
4074     - 0xf840fcef,
4075     -/* 0x08d9: i2c_start */
4076     - 0x8421f500,
4077     - 0x0d11f408,
4078     - 0x089c21f5,
4079     - 0xf40611f4,
4080     -/* 0x08ea: i2c_start_rep */
4081     - 0x37f0300e,
4082     - 0x4021f500,
4083     - 0x0137f008,
4084     - 0x086221f5,
4085     0xb60076bb,
4086     0x50f90465,
4087     0xbb046594,
4088     0x50bd0256,
4089     0xfc0475fd,
4090     - 0xb421f550,
4091     + 0xad21f550,
4092     0x0464b608,
4093     -/* 0x0917: i2c_start_send */
4094     - 0xf01f11f4,
4095     - 0x21f50037,
4096     - 0xe7f10862,
4097     - 0x21f41388,
4098     - 0x0037f07e,
4099     - 0x084021f5,
4100     - 0x1388e7f1,
4101     -/* 0x0933: i2c_start_out */
4102     - 0xf87e21f4,
4103     -/* 0x0935: i2c_stop */
4104     - 0x0037f000,
4105     - 0x084021f5,
4106     - 0xf50037f0,
4107     - 0xf1086221,
4108     - 0xf403e8e7,
4109     + 0xf11811f4,
4110     + 0xf41388e7,
4111     0x37f07e21,
4112     - 0x4021f501,
4113     + 0x3921f500,
4114     0x88e7f108,
4115     0x7e21f413,
4116     - 0xf50137f0,
4117     - 0xf1086221,
4118     - 0xf41388e7,
4119     - 0x00f87e21,
4120     -/* 0x0968: i2c_bitw */
4121     - 0x086221f5,
4122     - 0x03e8e7f1,
4123     - 0xbb7e21f4,
4124     - 0x65b60076,
4125     - 0x9450f904,
4126     - 0x56bb0465,
4127     - 0xfd50bd02,
4128     - 0x50fc0475,
4129     - 0x08b421f5,
4130     - 0xf40464b6,
4131     - 0xe7f11811,
4132     +/* 0x09a0: i2c_bitw_out */
4133     +/* 0x09a2: i2c_bitr */
4134     + 0x37f000f8,
4135     + 0x5b21f501,
4136     + 0xe8e7f108,
4137     + 0x7e21f403,
4138     + 0xb60076bb,
4139     + 0x50f90465,
4140     + 0xbb046594,
4141     + 0x50bd0256,
4142     + 0xfc0475fd,
4143     + 0xad21f550,
4144     + 0x0464b608,
4145     + 0xf51b11f4,
4146     + 0xf0089521,
4147     + 0x21f50037,
4148     + 0xe7f10839,
4149     0x21f41388,
4150     - 0x0037f07e,
4151     - 0x084021f5,
4152     - 0x1388e7f1,
4153     -/* 0x09a7: i2c_bitw_out */
4154     - 0xf87e21f4,
4155     -/* 0x09a9: i2c_bitr */
4156     - 0x0137f000,
4157     - 0x086221f5,
4158     - 0x03e8e7f1,
4159     - 0xbb7e21f4,
4160     - 0x65b60076,
4161     - 0x9450f904,
4162     - 0x56bb0465,
4163     - 0xfd50bd02,
4164     - 0x50fc0475,
4165     - 0x08b421f5,
4166     - 0xf40464b6,
4167     - 0x21f51b11,
4168     - 0x37f0089c,
4169     - 0x4021f500,
4170     - 0x88e7f108,
4171     - 0x7e21f413,
4172     - 0xf4013cf0,
4173     -/* 0x09ee: i2c_bitr_done */
4174     - 0x00f80131,
4175     -/* 0x09f0: i2c_get_byte */
4176     - 0xf00057f0,
4177     -/* 0x09f6: i2c_get_byte_next */
4178     - 0x54b60847,
4179     + 0x013cf07e,
4180     +/* 0x09e7: i2c_bitr_done */
4181     + 0xf80131f4,
4182     +/* 0x09e9: i2c_get_byte */
4183     + 0x0057f000,
4184     +/* 0x09ef: i2c_get_byte_next */
4185     + 0xb60847f0,
4186     + 0x76bb0154,
4187     + 0x0465b600,
4188     + 0x659450f9,
4189     + 0x0256bb04,
4190     + 0x75fd50bd,
4191     + 0xf550fc04,
4192     + 0xb609a221,
4193     + 0x11f40464,
4194     + 0x0553fd2b,
4195     + 0xf40142b6,
4196     + 0x37f0d81b,
4197     0x0076bb01,
4198     0xf90465b6,
4199     0x04659450,
4200     0xbd0256bb,
4201     0x0475fd50,
4202     0x21f550fc,
4203     - 0x64b609a9,
4204     - 0x2b11f404,
4205     - 0xb60553fd,
4206     - 0x1bf40142,
4207     - 0x0137f0d8,
4208     - 0xb60076bb,
4209     - 0x50f90465,
4210     - 0xbb046594,
4211     - 0x50bd0256,
4212     - 0xfc0475fd,
4213     - 0x6821f550,
4214     - 0x0464b609,
4215     -/* 0x0a40: i2c_get_byte_done */
4216     -/* 0x0a42: i2c_put_byte */
4217     - 0x47f000f8,
4218     -/* 0x0a45: i2c_put_byte_next */
4219     - 0x0142b608,
4220     - 0xbb3854ff,
4221     - 0x65b60076,
4222     - 0x9450f904,
4223     - 0x56bb0465,
4224     - 0xfd50bd02,
4225     - 0x50fc0475,
4226     - 0x096821f5,
4227     - 0xf40464b6,
4228     - 0x46b03411,
4229     - 0xd81bf400,
4230     + 0x64b60961,
4231     +/* 0x0a39: i2c_get_byte_done */
4232     +/* 0x0a3b: i2c_put_byte */
4233     + 0xf000f804,
4234     +/* 0x0a3e: i2c_put_byte_next */
4235     + 0x42b60847,
4236     + 0x3854ff01,
4237     0xb60076bb,
4238     0x50f90465,
4239     0xbb046594,
4240     0x50bd0256,
4241     0xfc0475fd,
4242     - 0xa921f550,
4243     + 0x6121f550,
4244     0x0464b609,
4245     - 0xbb0f11f4,
4246     - 0x36b00076,
4247     - 0x061bf401,
4248     -/* 0x0a9b: i2c_put_byte_done */
4249     - 0xf80132f4,
4250     -/* 0x0a9d: i2c_addr */
4251     - 0x0076bb00,
4252     + 0xb03411f4,
4253     + 0x1bf40046,
4254     + 0x0076bbd8,
4255     0xf90465b6,
4256     0x04659450,
4257     0xbd0256bb,
4258     0x0475fd50,
4259     0x21f550fc,
4260     - 0x64b608d9,
4261     - 0x2911f404,
4262     - 0x012ec3e7,
4263     - 0xfd0134b6,
4264     - 0x76bb0553,
4265     + 0x64b609a2,
4266     + 0x0f11f404,
4267     + 0xb00076bb,
4268     + 0x1bf40136,
4269     + 0x0132f406,
4270     +/* 0x0a94: i2c_put_byte_done */
4271     +/* 0x0a96: i2c_addr */
4272     + 0x76bb00f8,
4273     0x0465b600,
4274     0x659450f9,
4275     0x0256bb04,
4276     0x75fd50bd,
4277     0xf550fc04,
4278     - 0xb60a4221,
4279     -/* 0x0ae2: i2c_addr_done */
4280     - 0x00f80464,
4281     -/* 0x0ae4: i2c_acquire_addr */
4282     - 0xb6f8cec7,
4283     - 0xe0b702e4,
4284     - 0xee980d1c,
4285     -/* 0x0af3: i2c_acquire */
4286     - 0xf500f800,
4287     - 0xf40ae421,
4288     - 0xd9f00421,
4289     - 0x4021f403,
4290     -/* 0x0b02: i2c_release */
4291     - 0x21f500f8,
4292     - 0x21f40ae4,
4293     - 0x03daf004,
4294     - 0xf84021f4,
4295     -/* 0x0b11: i2c_recv */
4296     - 0x0132f400,
4297     - 0xb6f8c1c7,
4298     - 0x16b00214,
4299     - 0x3a1ff528,
4300     - 0xf413a001,
4301     - 0x0032980c,
4302     - 0x0ccc13a0,
4303     - 0xf4003198,
4304     - 0xd0f90231,
4305     - 0xd0f9e0f9,
4306     - 0x000067f1,
4307     - 0x100063f1,
4308     - 0xbb016792,
4309     + 0xb608d221,
4310     + 0x11f40464,
4311     + 0x2ec3e729,
4312     + 0x0134b601,
4313     + 0xbb0553fd,
4314     0x65b60076,
4315     0x9450f904,
4316     0x56bb0465,
4317     0xfd50bd02,
4318     0x50fc0475,
4319     - 0x0af321f5,
4320     - 0xfc0464b6,
4321     - 0x00d6b0d0,
4322     - 0x00b31bf5,
4323     - 0xbb0057f0,
4324     + 0x0a3b21f5,
4325     +/* 0x0adb: i2c_addr_done */
4326     + 0xf80464b6,
4327     +/* 0x0add: i2c_acquire_addr */
4328     + 0xf8cec700,
4329     + 0xb702e4b6,
4330     + 0x980d1ce0,
4331     + 0x00f800ee,
4332     +/* 0x0aec: i2c_acquire */
4333     + 0x0add21f5,
4334     + 0xf00421f4,
4335     + 0x21f403d9,
4336     +/* 0x0afb: i2c_release */
4337     + 0xf500f840,
4338     + 0xf40add21,
4339     + 0xdaf00421,
4340     + 0x4021f403,
4341     +/* 0x0b0a: i2c_recv */
4342     + 0x32f400f8,
4343     + 0xf8c1c701,
4344     + 0xb00214b6,
4345     + 0x1ff52816,
4346     + 0x13a0013a,
4347     + 0x32980cf4,
4348     + 0xcc13a000,
4349     + 0x0031980c,
4350     + 0xf90231f4,
4351     + 0xf9e0f9d0,
4352     + 0x0067f1d0,
4353     + 0x0063f100,
4354     + 0x01679210,
4355     + 0xb60076bb,
4356     + 0x50f90465,
4357     + 0xbb046594,
4358     + 0x50bd0256,
4359     + 0xfc0475fd,
4360     + 0xec21f550,
4361     + 0x0464b60a,
4362     + 0xd6b0d0fc,
4363     + 0xb31bf500,
4364     + 0x0057f000,
4365     + 0xb60076bb,
4366     + 0x50f90465,
4367     + 0xbb046594,
4368     + 0x50bd0256,
4369     + 0xfc0475fd,
4370     + 0x9621f550,
4371     + 0x0464b60a,
4372     + 0x00d011f5,
4373     + 0xbbe0c5c7,
4374     0x65b60076,
4375     0x9450f904,
4376     0x56bb0465,
4377     0xfd50bd02,
4378     0x50fc0475,
4379     - 0x0a9d21f5,
4380     + 0x0a3b21f5,
4381     0xf50464b6,
4382     - 0xc700d011,
4383     - 0x76bbe0c5,
4384     + 0xf000ad11,
4385     + 0x76bb0157,
4386     0x0465b600,
4387     0x659450f9,
4388     0x0256bb04,
4389     0x75fd50bd,
4390     0xf550fc04,
4391     - 0xb60a4221,
4392     + 0xb60a9621,
4393     0x11f50464,
4394     - 0x57f000ad,
4395     - 0x0076bb01,
4396     - 0xf90465b6,
4397     - 0x04659450,
4398     - 0xbd0256bb,
4399     - 0x0475fd50,
4400     - 0x21f550fc,
4401     - 0x64b60a9d,
4402     - 0x8a11f504,
4403     - 0x0076bb00,
4404     - 0xf90465b6,
4405     - 0x04659450,
4406     - 0xbd0256bb,
4407     - 0x0475fd50,
4408     - 0x21f550fc,
4409     - 0x64b609f0,
4410     - 0x6a11f404,
4411     - 0xbbe05bcb,
4412     - 0x65b60076,
4413     - 0x9450f904,
4414     - 0x56bb0465,
4415     - 0xfd50bd02,
4416     - 0x50fc0475,
4417     - 0x093521f5,
4418     - 0xb90464b6,
4419     - 0x74bd025b,
4420     -/* 0x0c17: i2c_recv_not_rd08 */
4421     - 0xb0430ef4,
4422     - 0x1bf401d6,
4423     - 0x0057f03d,
4424     - 0x0a9d21f5,
4425     - 0xc73311f4,
4426     - 0x21f5e0c5,
4427     - 0x11f40a42,
4428     - 0x0057f029,
4429     - 0x0a9d21f5,
4430     - 0xc71f11f4,
4431     - 0x21f5e0b5,
4432     - 0x11f40a42,
4433     - 0x3521f515,
4434     - 0xc774bd09,
4435     - 0x1bf408c5,
4436     - 0x0232f409,
4437     -/* 0x0c57: i2c_recv_not_wr08 */
4438     -/* 0x0c57: i2c_recv_done */
4439     - 0xc7030ef4,
4440     - 0x21f5f8ce,
4441     - 0xe0fc0b02,
4442     - 0x12f4d0fc,
4443     - 0x027cb90a,
4444     - 0x033621f5,
4445     -/* 0x0c6c: i2c_recv_exit */
4446     -/* 0x0c6e: i2c_init */
4447     + 0x76bb008a,
4448     + 0x0465b600,
4449     + 0x659450f9,
4450     + 0x0256bb04,
4451     + 0x75fd50bd,
4452     + 0xf550fc04,
4453     + 0xb609e921,
4454     + 0x11f40464,
4455     + 0xe05bcb6a,
4456     + 0xb60076bb,
4457     + 0x50f90465,
4458     + 0xbb046594,
4459     + 0x50bd0256,
4460     + 0xfc0475fd,
4461     + 0x2e21f550,
4462     + 0x0464b609,
4463     + 0xbd025bb9,
4464     + 0x430ef474,
4465     +/* 0x0c10: i2c_recv_not_rd08 */
4466     + 0xf401d6b0,
4467     + 0x57f03d1b,
4468     + 0x9621f500,
4469     + 0x3311f40a,
4470     + 0xf5e0c5c7,
4471     + 0xf40a3b21,
4472     + 0x57f02911,
4473     + 0x9621f500,
4474     + 0x1f11f40a,
4475     + 0xf5e0b5c7,
4476     + 0xf40a3b21,
4477     + 0x21f51511,
4478     + 0x74bd092e,
4479     + 0xf408c5c7,
4480     + 0x32f4091b,
4481     + 0x030ef402,
4482     +/* 0x0c50: i2c_recv_not_wr08 */
4483     +/* 0x0c50: i2c_recv_done */
4484     + 0xf5f8cec7,
4485     + 0xfc0afb21,
4486     + 0xf4d0fce0,
4487     + 0x7cb90a12,
4488     + 0x3621f502,
4489     +/* 0x0c65: i2c_recv_exit */
4490     +/* 0x0c67: i2c_init */
4491     + 0xf800f803,
4492     +/* 0x0c69: test_recv */
4493     + 0xd817f100,
4494     + 0x0614b605,
4495     + 0xb60011cf,
4496     + 0x07f10110,
4497     + 0x04b605d8,
4498     + 0x0001d006,
4499     + 0xe7f104bd,
4500     + 0xe3f1d900,
4501     + 0x21f5134f,
4502     + 0x00f80256,
4503     +/* 0x0c90: test_init */
4504     + 0x0800e7f1,
4505     + 0x025621f5,
4506     +/* 0x0c9a: idle_recv */
4507     0x00f800f8,
4508     -/* 0x0c70: test_recv */
4509     - 0x05d817f1,
4510     - 0xcf0614b6,
4511     - 0x10b60011,
4512     - 0xd807f101,
4513     - 0x0604b605,
4514     - 0xbd0001d0,
4515     - 0x00e7f104,
4516     - 0x4fe3f1d9,
4517     - 0x5621f513,
4518     -/* 0x0c97: test_init */
4519     - 0xf100f802,
4520     - 0xf50800e7,
4521     - 0xf8025621,
4522     -/* 0x0ca1: idle_recv */
4523     -/* 0x0ca3: idle */
4524     - 0xf400f800,
4525     - 0x17f10031,
4526     - 0x14b605d4,
4527     - 0x0011cf06,
4528     - 0xf10110b6,
4529     - 0xb605d407,
4530     - 0x01d00604,
4531     -/* 0x0cbf: idle_loop */
4532     - 0xf004bd00,
4533     - 0x32f45817,
4534     -/* 0x0cc5: idle_proc */
4535     -/* 0x0cc5: idle_proc_exec */
4536     - 0xb910f902,
4537     - 0x21f5021e,
4538     - 0x10fc033f,
4539     - 0xf40911f4,
4540     - 0x0ef40231,
4541     -/* 0x0cd9: idle_proc_next */
4542     - 0x5810b6ef,
4543     - 0xf4061fb8,
4544     - 0x02f4e61b,
4545     - 0x0028f4dd,
4546     - 0x00bb0ef4,
4547     +/* 0x0c9c: idle */
4548     + 0xf10031f4,
4549     + 0xb605d417,
4550     + 0x11cf0614,
4551     + 0x0110b600,
4552     + 0x05d407f1,
4553     + 0xd00604b6,
4554     + 0x04bd0001,
4555     +/* 0x0cb8: idle_loop */
4556     + 0xf45817f0,
4557     +/* 0x0cbe: idle_proc */
4558     +/* 0x0cbe: idle_proc_exec */
4559     + 0x10f90232,
4560     + 0xf5021eb9,
4561     + 0xfc033f21,
4562     + 0x0911f410,
4563     + 0xf40231f4,
4564     +/* 0x0cd2: idle_proc_next */
4565     + 0x10b6ef0e,
4566     + 0x061fb858,
4567     + 0xf4e61bf4,
4568     + 0x28f4dd02,
4569     + 0xbb0ef400,
4570     + 0x00000000,
4571     + 0x00000000,
4572     0x00000000,
4573     0x00000000,
4574     0x00000000,
4575     diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
4576     index ec03f9a4290b..1663bf943d77 100644
4577     --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
4578     +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/fuc/memx.fuc
4579     @@ -82,15 +82,15 @@ memx_train_tail:
4580     // $r0 - zero
4581     memx_func_enter:
4582     #if NVKM_PPWR_CHIPSET == GT215
4583     - movw $r8 0x1610
4584     + mov $r8 0x1610
4585     nv_rd32($r7, $r8)
4586     imm32($r6, 0xfffffffc)
4587     and $r7 $r6
4588     - movw $r6 0x2
4589     + mov $r6 0x2
4590     or $r7 $r6
4591     nv_wr32($r8, $r7)
4592     #else
4593     - movw $r6 0x001620
4594     + mov $r6 0x001620
4595     imm32($r7, ~0x00000aa2);
4596     nv_rd32($r8, $r6)
4597     and $r8 $r7
4598     @@ -101,7 +101,7 @@ memx_func_enter:
4599     and $r8 $r7
4600     nv_wr32($r6, $r8)
4601    
4602     - movw $r6 0x0026f0
4603     + mov $r6 0x0026f0
4604     nv_rd32($r8, $r6)
4605     and $r8 $r7
4606     nv_wr32($r6, $r8)
4607     @@ -136,19 +136,19 @@ memx_func_leave:
4608     bra nz #memx_func_leave_wait
4609    
4610     #if NVKM_PPWR_CHIPSET == GT215
4611     - movw $r8 0x1610
4612     + mov $r8 0x1610
4613     nv_rd32($r7, $r8)
4614     imm32($r6, 0xffffffcc)
4615     and $r7 $r6
4616     nv_wr32($r8, $r7)
4617     #else
4618     - movw $r6 0x0026f0
4619     + mov $r6 0x0026f0
4620     imm32($r7, 0x00000001)
4621     nv_rd32($r8, $r6)
4622     or $r8 $r7
4623     nv_wr32($r6, $r8)
4624    
4625     - movw $r6 0x001620
4626     + mov $r6 0x001620
4627     nv_rd32($r8, $r6)
4628     or $r8 $r7
4629     nv_wr32($r6, $r8)
4630     @@ -177,11 +177,11 @@ memx_func_wait_vblank:
4631     bra #memx_func_wait_vblank_fini
4632    
4633     memx_func_wait_vblank_head1:
4634     - movw $r7 0x20
4635     + mov $r7 0x20
4636     bra #memx_func_wait_vblank_0
4637    
4638     memx_func_wait_vblank_head0:
4639     - movw $r7 0x8
4640     + mov $r7 0x8
4641    
4642     memx_func_wait_vblank_0:
4643     nv_iord($r6, NV_PPWR_INPUT)
4644     @@ -273,13 +273,13 @@ memx_func_train:
4645     // $r5 - outer loop counter
4646     // $r6 - inner loop counter
4647     // $r7 - entry counter (#memx_train_head + $r7)
4648     - movw $r5 0x3
4649     - movw $r7 0x0
4650     + mov $r5 0x3
4651     + mov $r7 0x0
4652    
4653     // Read random memory to wake up... things
4654     imm32($r9, 0x700000)
4655     nv_rd32($r8,$r9)
4656     - movw $r14 0x2710
4657     + mov $r14 0x2710
4658     call(nsec)
4659    
4660     memx_func_train_loop_outer:
4661     @@ -289,9 +289,9 @@ memx_func_train:
4662     nv_wr32($r9, $r8)
4663     push $r5
4664    
4665     - movw $r6 0x0
4666     + mov $r6 0x0
4667     memx_func_train_loop_inner:
4668     - movw $r8 0x1111
4669     + mov $r8 0x1111
4670     mulu $r9 $r6 $r8
4671     shl b32 $r8 $r9 0x10
4672     or $r8 $r9
4673     @@ -315,7 +315,7 @@ memx_func_train:
4674    
4675     // $r5 - inner inner loop counter
4676     // $r9 - result
4677     - movw $r5 0
4678     + mov $r5 0
4679     imm32($r9, 0x8300ffff)
4680     memx_func_train_loop_4x:
4681     imm32($r10, 0x100080)
4682     diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
4683     index 3afdbf4bc10b..eff0a8ece8bc 100644
4684     --- a/drivers/gpu/drm/vc4/vc4_bo.c
4685     +++ b/drivers/gpu/drm/vc4/vc4_bo.c
4686     @@ -173,6 +173,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo)
4687     vc4_bo_set_label(obj, -1);
4688    
4689     if (bo->validated_shader) {
4690     + kfree(bo->validated_shader->uniform_addr_offsets);
4691     kfree(bo->validated_shader->texture_samples);
4692     kfree(bo->validated_shader);
4693     bo->validated_shader = NULL;
4694     @@ -432,6 +433,7 @@ void vc4_free_object(struct drm_gem_object *gem_bo)
4695     }
4696    
4697     if (bo->validated_shader) {
4698     + kfree(bo->validated_shader->uniform_addr_offsets);
4699     kfree(bo->validated_shader->texture_samples);
4700     kfree(bo->validated_shader);
4701     bo->validated_shader = NULL;
4702     diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
4703     index d3f15bf60900..7cf82b071de2 100644
4704     --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
4705     +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
4706     @@ -942,6 +942,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
4707     fail:
4708     kfree(validation_state.branch_targets);
4709     if (validated_shader) {
4710     + kfree(validated_shader->uniform_addr_offsets);
4711     kfree(validated_shader->texture_samples);
4712     kfree(validated_shader);
4713     }
4714     diff --git a/drivers/hid/hid-roccat-kovaplus.c b/drivers/hid/hid-roccat-kovaplus.c
4715     index 43617fb28b87..317c9c2c0a7c 100644
4716     --- a/drivers/hid/hid-roccat-kovaplus.c
4717     +++ b/drivers/hid/hid-roccat-kovaplus.c
4718     @@ -37,6 +37,8 @@ static uint kovaplus_convert_event_cpi(uint value)
4719     static void kovaplus_profile_activated(struct kovaplus_device *kovaplus,
4720     uint new_profile_index)
4721     {
4722     + if (new_profile_index >= ARRAY_SIZE(kovaplus->profile_settings))
4723     + return;
4724     kovaplus->actual_profile = new_profile_index;
4725     kovaplus->actual_cpi = kovaplus->profile_settings[new_profile_index].cpi_startup_level;
4726     kovaplus->actual_x_sensitivity = kovaplus->profile_settings[new_profile_index].sensitivity_x;
4727     diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
4728     index 25de7cc9f49f..6c725c435f5d 100644
4729     --- a/drivers/infiniband/core/cma.c
4730     +++ b/drivers/infiniband/core/cma.c
4731     @@ -624,11 +624,13 @@ static inline int cma_validate_port(struct ib_device *device, u8 port,
4732     if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
4733     return ret;
4734    
4735     - if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port))
4736     + if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
4737     ndev = dev_get_by_index(&init_net, bound_if_index);
4738     - else
4739     + if (!ndev)
4740     + return ret;
4741     + } else {
4742     gid_type = IB_GID_TYPE_IB;
4743     -
4744     + }
4745    
4746     ret = ib_find_cached_gid_by_port(device, gid, gid_type, port,
4747     ndev, NULL);
4748     diff --git a/drivers/infiniband/core/cq.c b/drivers/infiniband/core/cq.c
4749     index f2ae75fa3128..757d308bebe8 100644
4750     --- a/drivers/infiniband/core/cq.c
4751     +++ b/drivers/infiniband/core/cq.c
4752     @@ -17,6 +17,7 @@
4753    
4754     /* # of WCs to poll for with a single call to ib_poll_cq */
4755     #define IB_POLL_BATCH 16
4756     +#define IB_POLL_BATCH_DIRECT 8
4757    
4758     /* # of WCs to iterate over before yielding */
4759     #define IB_POLL_BUDGET_IRQ 256
4760     @@ -25,7 +26,8 @@
4761     #define IB_POLL_FLAGS \
4762     (IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)
4763    
4764     -static int __ib_process_cq(struct ib_cq *cq, int budget)
4765     +static int __ib_process_cq(struct ib_cq *cq, int budget, struct ib_wc *wcs,
4766     + int batch)
4767     {
4768     int i, n, completed = 0;
4769    
4770     @@ -34,10 +36,10 @@ static int __ib_process_cq(struct ib_cq *cq, int budget)
4771     * want to bound this call, thus we need unsigned
4772     * minimum here.
4773     */
4774     - while ((n = ib_poll_cq(cq, min_t(u32, IB_POLL_BATCH,
4775     - budget - completed), cq->wc)) > 0) {
4776     + while ((n = ib_poll_cq(cq, min_t(u32, batch,
4777     + budget - completed), wcs)) > 0) {
4778     for (i = 0; i < n; i++) {
4779     - struct ib_wc *wc = &cq->wc[i];
4780     + struct ib_wc *wc = &wcs[i];
4781    
4782     if (wc->wr_cqe)
4783     wc->wr_cqe->done(cq, wc);
4784     @@ -47,8 +49,7 @@ static int __ib_process_cq(struct ib_cq *cq, int budget)
4785    
4786     completed += n;
4787    
4788     - if (n != IB_POLL_BATCH ||
4789     - (budget != -1 && completed >= budget))
4790     + if (n != batch || (budget != -1 && completed >= budget))
4791     break;
4792     }
4793    
4794     @@ -60,18 +61,20 @@ static int __ib_process_cq(struct ib_cq *cq, int budget)
4795     * @cq: CQ to process
4796     * @budget: number of CQEs to poll for
4797     *
4798     - * This function is used to process all outstanding CQ entries on a
4799     - * %IB_POLL_DIRECT CQ. It does not offload CQ processing to a different
4800     - * context and does not ask for completion interrupts from the HCA.
4801     + * This function is used to process all outstanding CQ entries.
4802     + * It does not offload CQ processing to a different context and does
4803     + * not ask for completion interrupts from the HCA.
4804     + * Using direct processing on CQ with non IB_POLL_DIRECT type may trigger
4805     + * concurrent processing.
4806     *
4807     * Note: do not pass -1 as %budget unless it is guaranteed that the number
4808     * of completions that will be processed is small.
4809     */
4810     int ib_process_cq_direct(struct ib_cq *cq, int budget)
4811     {
4812     - WARN_ON_ONCE(cq->poll_ctx != IB_POLL_DIRECT);
4813     + struct ib_wc wcs[IB_POLL_BATCH_DIRECT];
4814    
4815     - return __ib_process_cq(cq, budget);
4816     + return __ib_process_cq(cq, budget, wcs, IB_POLL_BATCH_DIRECT);
4817     }
4818     EXPORT_SYMBOL(ib_process_cq_direct);
4819    
4820     @@ -85,7 +88,7 @@ static int ib_poll_handler(struct irq_poll *iop, int budget)
4821     struct ib_cq *cq = container_of(iop, struct ib_cq, iop);
4822     int completed;
4823    
4824     - completed = __ib_process_cq(cq, budget);
4825     + completed = __ib_process_cq(cq, budget, cq->wc, IB_POLL_BATCH);
4826     if (completed < budget) {
4827     irq_poll_complete(&cq->iop);
4828     if (ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
4829     @@ -105,7 +108,8 @@ static void ib_cq_poll_work(struct work_struct *work)
4830     struct ib_cq *cq = container_of(work, struct ib_cq, work);
4831     int completed;
4832    
4833     - completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE);
4834     + completed = __ib_process_cq(cq, IB_POLL_BUDGET_WORKQUEUE, cq->wc,
4835     + IB_POLL_BATCH);
4836     if (completed >= IB_POLL_BUDGET_WORKQUEUE ||
4837     ib_req_notify_cq(cq, IB_POLL_FLAGS) > 0)
4838     queue_work(ib_comp_wq, &cq->work);
4839     diff --git a/drivers/infiniband/core/uverbs_ioctl.c b/drivers/infiniband/core/uverbs_ioctl.c
4840     index 5286ad57d903..8f2dc79ad4ec 100644
4841     --- a/drivers/infiniband/core/uverbs_ioctl.c
4842     +++ b/drivers/infiniband/core/uverbs_ioctl.c
4843     @@ -245,16 +245,13 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
4844     uintptr_t data[UVERBS_OPTIMIZE_USING_STACK_SZ / sizeof(uintptr_t)];
4845     #endif
4846    
4847     - if (hdr->reserved)
4848     - return -EINVAL;
4849     -
4850     object_spec = uverbs_get_object(ib_dev, hdr->object_id);
4851     if (!object_spec)
4852     - return -EOPNOTSUPP;
4853     + return -EPROTONOSUPPORT;
4854    
4855     method_spec = uverbs_get_method(object_spec, hdr->method_id);
4856     if (!method_spec)
4857     - return -EOPNOTSUPP;
4858     + return -EPROTONOSUPPORT;
4859    
4860     if ((method_spec->flags & UVERBS_ACTION_FLAG_CREATE_ROOT) ^ !file->ucontext)
4861     return -EINVAL;
4862     @@ -310,6 +307,16 @@ static long ib_uverbs_cmd_verbs(struct ib_device *ib_dev,
4863    
4864     err = uverbs_handle_method(buf, ctx->uattrs, hdr->num_attrs, ib_dev,
4865     file, method_spec, ctx->uverbs_attr_bundle);
4866     +
4867     + /*
4868     + * EPROTONOSUPPORT is ONLY to be returned if the ioctl framework can
4869     + * not invoke the method because the request is not supported. No
4870     + * other cases should return this code.
4871     + */
4872     + if (unlikely(err == -EPROTONOSUPPORT)) {
4873     + WARN_ON_ONCE(err == -EPROTONOSUPPORT);
4874     + err = -EINVAL;
4875     + }
4876     out:
4877     #ifdef UVERBS_OPTIMIZE_USING_STACK_SZ
4878     if (ctx_size > UVERBS_OPTIMIZE_USING_STACK_SZ)
4879     @@ -348,7 +355,7 @@ long ib_uverbs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4880     }
4881    
4882     if (hdr.reserved) {
4883     - err = -EOPNOTSUPP;
4884     + err = -EPROTONOSUPPORT;
4885     goto out;
4886     }
4887    
4888     diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
4889     index 0e17d03ef1cb..82114ba86041 100644
4890     --- a/drivers/infiniband/hw/hfi1/chip.c
4891     +++ b/drivers/infiniband/hw/hfi1/chip.c
4892     @@ -8294,8 +8294,8 @@ static irqreturn_t sdma_interrupt(int irq, void *data)
4893     /* handle the interrupt(s) */
4894     sdma_engine_interrupt(sde, status);
4895     } else {
4896     - dd_dev_err_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
4897     - sde->this_idx);
4898     + dd_dev_info_ratelimited(dd, "SDMA engine %u interrupt, but no status bits set\n",
4899     + sde->this_idx);
4900     }
4901     return IRQ_HANDLED;
4902     }
4903     @@ -12967,7 +12967,14 @@ static void disable_intx(struct pci_dev *pdev)
4904     pci_intx(pdev, 0);
4905     }
4906    
4907     -static void clean_up_interrupts(struct hfi1_devdata *dd)
4908     +/**
4909     + * hfi1_clean_up_interrupts() - Free all IRQ resources
4910     + * @dd: valid device data data structure
4911     + *
4912     + * Free the MSI or INTx IRQs and assoicated PCI resources,
4913     + * if they have been allocated.
4914     + */
4915     +void hfi1_clean_up_interrupts(struct hfi1_devdata *dd)
4916     {
4917     int i;
4918    
4919     @@ -13344,7 +13351,7 @@ static int set_up_interrupts(struct hfi1_devdata *dd)
4920     return 0;
4921    
4922     fail:
4923     - clean_up_interrupts(dd);
4924     + hfi1_clean_up_interrupts(dd);
4925     return ret;
4926     }
4927    
4928     @@ -14770,7 +14777,6 @@ void hfi1_start_cleanup(struct hfi1_devdata *dd)
4929     aspm_exit(dd);
4930     free_cntrs(dd);
4931     free_rcverr(dd);
4932     - clean_up_interrupts(dd);
4933     finish_chip_resources(dd);
4934     }
4935    
4936     @@ -15229,7 +15235,7 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev,
4937     bail_free_cntrs:
4938     free_cntrs(dd);
4939     bail_clear_intr:
4940     - clean_up_interrupts(dd);
4941     + hfi1_clean_up_interrupts(dd);
4942     bail_cleanup:
4943     hfi1_pcie_ddcleanup(dd);
4944     bail_free:
4945     diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
4946     index fd28f09b4445..ee2253d06984 100644
4947     --- a/drivers/infiniband/hw/hfi1/file_ops.c
4948     +++ b/drivers/infiniband/hw/hfi1/file_ops.c
4949     @@ -191,9 +191,6 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
4950     if (!atomic_inc_not_zero(&dd->user_refcount))
4951     return -ENXIO;
4952    
4953     - /* Just take a ref now. Not all opens result in a context assign */
4954     - kobject_get(&dd->kobj);
4955     -
4956     /* The real work is performed later in assign_ctxt() */
4957    
4958     fd = kzalloc(sizeof(*fd), GFP_KERNEL);
4959     @@ -203,6 +200,7 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
4960     fd->mm = current->mm;
4961     mmgrab(fd->mm);
4962     fd->dd = dd;
4963     + kobject_get(&fd->dd->kobj);
4964     fp->private_data = fd;
4965     } else {
4966     fp->private_data = NULL;
4967     diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
4968     index 3409eee16092..dc9c951ef946 100644
4969     --- a/drivers/infiniband/hw/hfi1/hfi.h
4970     +++ b/drivers/infiniband/hw/hfi1/hfi.h
4971     @@ -1954,6 +1954,7 @@ void hfi1_verbs_unregister_sysfs(struct hfi1_devdata *dd);
4972     int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len);
4973    
4974     int hfi1_pcie_init(struct pci_dev *pdev, const struct pci_device_id *ent);
4975     +void hfi1_clean_up_interrupts(struct hfi1_devdata *dd);
4976     void hfi1_pcie_cleanup(struct pci_dev *pdev);
4977     int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev);
4978     void hfi1_pcie_ddcleanup(struct hfi1_devdata *);
4979     diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
4980     index fba77001c3a7..d4fc8795cdf6 100644
4981     --- a/drivers/infiniband/hw/hfi1/init.c
4982     +++ b/drivers/infiniband/hw/hfi1/init.c
4983     @@ -1039,8 +1039,9 @@ static void shutdown_device(struct hfi1_devdata *dd)
4984     }
4985     dd->flags &= ~HFI1_INITTED;
4986    
4987     - /* mask interrupts, but not errors */
4988     + /* mask and clean up interrupts, but not errors */
4989     set_intr_state(dd, 0);
4990     + hfi1_clean_up_interrupts(dd);
4991    
4992     for (pidx = 0; pidx < dd->num_pports; ++pidx) {
4993     ppd = dd->pport + pidx;
4994     @@ -1696,6 +1697,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
4995     dd_dev_err(dd, "Failed to create /dev devices: %d\n", -j);
4996    
4997     if (initfail || ret) {
4998     + hfi1_clean_up_interrupts(dd);
4999     stop_timers(dd);
5000     flush_workqueue(ib_wq);
5001     for (pidx = 0; pidx < dd->num_pports; ++pidx) {
5002     diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c
5003     index 14d38d733cb4..27a2d782f6d9 100644
5004     --- a/drivers/infiniband/hw/i40iw/i40iw_puda.c
5005     +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c
5006     @@ -48,7 +48,6 @@ static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid);
5007     static void i40iw_ilq_putback_rcvbuf(struct i40iw_sc_qp *qp, u32 wqe_idx);
5008     static enum i40iw_status_code i40iw_puda_replenish_rq(struct i40iw_puda_rsrc
5009     *rsrc, bool initial);
5010     -static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp);
5011     /**
5012     * i40iw_puda_get_listbuf - get buffer from puda list
5013     * @list: list to use for buffers (ILQ or IEQ)
5014     @@ -1480,7 +1479,7 @@ static void i40iw_ieq_tx_compl(struct i40iw_sc_vsi *vsi, void *sqwrid)
5015     * @ieq: ieq resource
5016     * @qp: all pending fpdu buffers
5017     */
5018     -static void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp)
5019     +void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp)
5020     {
5021     struct i40iw_puda_buf *buf;
5022     struct i40iw_pfpdu *pfpdu = &qp->pfpdu;
5023     diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.h b/drivers/infiniband/hw/i40iw/i40iw_puda.h
5024     index dba05ce7d392..ebe37f157d90 100644
5025     --- a/drivers/infiniband/hw/i40iw/i40iw_puda.h
5026     +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.h
5027     @@ -186,4 +186,5 @@ enum i40iw_status_code i40iw_cqp_qp_create_cmd(struct i40iw_sc_dev *dev, struct
5028     enum i40iw_status_code i40iw_cqp_cq_create_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
5029     void i40iw_cqp_qp_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
5030     void i40iw_cqp_cq_destroy_cmd(struct i40iw_sc_dev *dev, struct i40iw_sc_cq *cq);
5031     +void i40iw_ieq_cleanup_qp(struct i40iw_puda_rsrc *ieq, struct i40iw_sc_qp *qp);
5032     #endif
5033     diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
5034     index 62be0a41ad0b..b7961f21b555 100644
5035     --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
5036     +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
5037     @@ -428,6 +428,7 @@ void i40iw_free_qp_resources(struct i40iw_device *iwdev,
5038     {
5039     struct i40iw_pbl *iwpbl = &iwqp->iwpbl;
5040    
5041     + i40iw_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
5042     i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp);
5043     if (qp_num)
5044     i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num);
5045     @@ -1655,6 +1656,7 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd,
5046     err_code = -EOVERFLOW;
5047     goto err;
5048     }
5049     + stag &= ~I40IW_CQPSQ_STAG_KEY_MASK;
5050     iwmr->stag = stag;
5051     iwmr->ibmr.rkey = stag;
5052     iwmr->ibmr.lkey = stag;
5053     diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
5054     index c4d8cc1c2b1d..464c78f8cec9 100644
5055     --- a/drivers/infiniband/hw/mlx5/qp.c
5056     +++ b/drivers/infiniband/hw/mlx5/qp.c
5057     @@ -2923,7 +2923,8 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
5058     * If we moved a kernel QP to RESET, clean up all old CQ
5059     * entries and reinitialize the QP.
5060     */
5061     - if (new_state == IB_QPS_RESET && !ibqp->uobject) {
5062     + if (new_state == IB_QPS_RESET &&
5063     + !ibqp->uobject && ibqp->qp_type != IB_QPT_XRC_TGT) {
5064     mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
5065     ibqp->srq ? to_msrq(ibqp->srq) : NULL);
5066     if (send_cq != recv_cq)
5067     @@ -4636,13 +4637,10 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
5068     int err;
5069    
5070     err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
5071     - if (err) {
5072     + if (err)
5073     mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
5074     - return err;
5075     - }
5076    
5077     kfree(xrcd);
5078     -
5079     return 0;
5080     }
5081    
5082     diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
5083     index a009e943362a..6bc9a768f721 100644
5084     --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
5085     +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
5086     @@ -2273,6 +2273,9 @@ static struct net_device *ipoib_add_port(const char *format,
5087     priv->ca, ipoib_event);
5088     ib_register_event_handler(&priv->event_handler);
5089    
5090     + /* call event handler to ensure pkey in sync */
5091     + queue_work(ipoib_workqueue, &priv->flush_heavy);
5092     +
5093     result = register_netdev(priv->dev);
5094     if (result) {
5095     printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
5096     diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
5097     index 6a5649e52eed..8ac9e03c05b4 100644
5098     --- a/drivers/input/mouse/psmouse-base.c
5099     +++ b/drivers/input/mouse/psmouse-base.c
5100     @@ -975,6 +975,21 @@ static void psmouse_apply_defaults(struct psmouse *psmouse)
5101     psmouse->pt_deactivate = NULL;
5102     }
5103    
5104     +static bool psmouse_do_detect(int (*detect)(struct psmouse *, bool),
5105     + struct psmouse *psmouse, bool allow_passthrough,
5106     + bool set_properties)
5107     +{
5108     + if (psmouse->ps2dev.serio->id.type == SERIO_PS_PSTHRU &&
5109     + !allow_passthrough) {
5110     + return false;
5111     + }
5112     +
5113     + if (set_properties)
5114     + psmouse_apply_defaults(psmouse);
5115     +
5116     + return detect(psmouse, set_properties) == 0;
5117     +}
5118     +
5119     static bool psmouse_try_protocol(struct psmouse *psmouse,
5120     enum psmouse_type type,
5121     unsigned int *max_proto,
5122     @@ -986,15 +1001,8 @@ static bool psmouse_try_protocol(struct psmouse *psmouse,
5123     if (!proto)
5124     return false;
5125    
5126     - if (psmouse->ps2dev.serio->id.type == SERIO_PS_PSTHRU &&
5127     - !proto->try_passthru) {
5128     - return false;
5129     - }
5130     -
5131     - if (set_properties)
5132     - psmouse_apply_defaults(psmouse);
5133     -
5134     - if (proto->detect(psmouse, set_properties) != 0)
5135     + if (!psmouse_do_detect(proto->detect, psmouse, proto->try_passthru,
5136     + set_properties))
5137     return false;
5138    
5139     if (set_properties && proto->init && init_allowed) {
5140     @@ -1027,8 +1035,8 @@ static int psmouse_extensions(struct psmouse *psmouse,
5141     * Always check for focaltech, this is safe as it uses pnp-id
5142     * matching.
5143     */
5144     - if (psmouse_try_protocol(psmouse, PSMOUSE_FOCALTECH,
5145     - &max_proto, set_properties, false)) {
5146     + if (psmouse_do_detect(focaltech_detect,
5147     + psmouse, false, set_properties)) {
5148     if (max_proto > PSMOUSE_IMEX &&
5149     IS_ENABLED(CONFIG_MOUSE_PS2_FOCALTECH) &&
5150     (!set_properties || focaltech_init(psmouse) == 0)) {
5151     @@ -1074,8 +1082,8 @@ static int psmouse_extensions(struct psmouse *psmouse,
5152     * probing for IntelliMouse.
5153     */
5154     if (max_proto > PSMOUSE_PS2 &&
5155     - psmouse_try_protocol(psmouse, PSMOUSE_SYNAPTICS, &max_proto,
5156     - set_properties, false)) {
5157     + psmouse_do_detect(synaptics_detect,
5158     + psmouse, false, set_properties)) {
5159     synaptics_hardware = true;
5160    
5161     if (max_proto > PSMOUSE_IMEX) {
5162     diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
5163     index ee5466a374bf..a246fc686bb7 100644
5164     --- a/drivers/input/mouse/synaptics.c
5165     +++ b/drivers/input/mouse/synaptics.c
5166     @@ -1280,6 +1280,16 @@ static void set_input_params(struct psmouse *psmouse,
5167     INPUT_MT_POINTER |
5168     (cr48_profile_sensor ?
5169     INPUT_MT_TRACK : INPUT_MT_SEMI_MT));
5170     +
5171     + /*
5172     + * For semi-mt devices we send ABS_X/Y ourselves instead of
5173     + * input_mt_report_pointer_emulation. But
5174     + * input_mt_init_slots() resets the fuzz to 0, leading to a
5175     + * filtered ABS_MT_POSITION_X but an unfiltered ABS_X
5176     + * position. Let's re-initialize ABS_X/Y here.
5177     + */
5178     + if (!cr48_profile_sensor)
5179     + set_abs_position_params(dev, &priv->info, ABS_X, ABS_Y);
5180     }
5181    
5182     if (SYN_CAP_PALMDETECT(info->capabilities))
5183     diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c
5184     index 8c6c6178ec12..025bae3853cc 100644
5185     --- a/drivers/input/touchscreen/stmfts.c
5186     +++ b/drivers/input/touchscreen/stmfts.c
5187     @@ -687,6 +687,14 @@ static int stmfts_probe(struct i2c_client *client,
5188    
5189     input_set_drvdata(sdata->input, sdata);
5190    
5191     + /*
5192     + * stmfts_power_on expects interrupt to be disabled, but
5193     + * at this point the device is still off and I do not trust
5194     + * the status of the irq line that can generate some spurious
5195     + * interrupts. To be on the safe side it's better to not enable
5196     + * the interrupts during their request.
5197     + */
5198     + irq_set_status_flags(client->irq, IRQ_NOAUTOEN);
5199     err = devm_request_threaded_irq(&client->dev, client->irq,
5200     NULL, stmfts_irq_handler,
5201     IRQF_ONESHOT,
5202     @@ -694,9 +702,6 @@ static int stmfts_probe(struct i2c_client *client,
5203     if (err)
5204     return err;
5205    
5206     - /* stmfts_power_on expects interrupt to be disabled */
5207     - disable_irq(client->irq);
5208     -
5209     dev_dbg(&client->dev, "initializing ST-Microelectronics FTS...\n");
5210    
5211     err = stmfts_power_on(sdata);
5212     diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
5213     index 25c2c75f5332..13485a40dd46 100644
5214     --- a/drivers/iommu/exynos-iommu.c
5215     +++ b/drivers/iommu/exynos-iommu.c
5216     @@ -1344,8 +1344,15 @@ static const struct iommu_ops exynos_iommu_ops = {
5217    
5218     static int __init exynos_iommu_init(void)
5219     {
5220     + struct device_node *np;
5221     int ret;
5222    
5223     + np = of_find_matching_node(NULL, sysmmu_of_match);
5224     + if (!np)
5225     + return 0;
5226     +
5227     + of_node_put(np);
5228     +
5229     lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
5230     LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
5231     if (!lv2table_kmem_cache) {
5232     diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
5233     index 83f3d4831f94..e8414bcf8390 100644
5234     --- a/drivers/iommu/intel-iommu.c
5235     +++ b/drivers/iommu/intel-iommu.c
5236     @@ -1603,8 +1603,7 @@ static void iommu_flush_iotlb_psi(struct intel_iommu *iommu,
5237     * flush. However, device IOTLB doesn't need to be flushed in this case.
5238     */
5239     if (!cap_caching_mode(iommu->cap) || !map)
5240     - iommu_flush_dev_iotlb(get_iommu_domain(iommu, did),
5241     - addr, mask);
5242     + iommu_flush_dev_iotlb(domain, addr, mask);
5243     }
5244    
5245     static void iommu_flush_iova(struct iova_domain *iovad)
5246     diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
5247     index 14a8c0a7e095..25a98de5cfb2 100644
5248     --- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
5249     +++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
5250     @@ -132,6 +132,8 @@ static int __init its_pci_of_msi_init(void)
5251    
5252     for (np = of_find_matching_node(NULL, its_device_id); np;
5253     np = of_find_matching_node(np, its_device_id)) {
5254     + if (!of_device_is_available(np))
5255     + continue;
5256     if (!of_property_read_bool(np, "msi-controller"))
5257     continue;
5258    
5259     diff --git a/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
5260     index 833a90fe33ae..8881a053c173 100644
5261     --- a/drivers/irqchip/irq-gic-v3-its-platform-msi.c
5262     +++ b/drivers/irqchip/irq-gic-v3-its-platform-msi.c
5263     @@ -154,6 +154,8 @@ static void __init its_pmsi_of_init(void)
5264    
5265     for (np = of_find_matching_node(NULL, its_device_id); np;
5266     np = of_find_matching_node(np, its_device_id)) {
5267     + if (!of_device_is_available(np))
5268     + continue;
5269     if (!of_property_read_bool(np, "msi-controller"))
5270     continue;
5271    
5272     diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
5273     index af57f8473a88..13f195c9743e 100644
5274     --- a/drivers/irqchip/irq-gic-v3-its.c
5275     +++ b/drivers/irqchip/irq-gic-v3-its.c
5276     @@ -3083,6 +3083,8 @@ static int __init its_of_probe(struct device_node *node)
5277    
5278     for (np = of_find_matching_node(node, its_device_id); np;
5279     np = of_find_matching_node(np, its_device_id)) {
5280     + if (!of_device_is_available(np))
5281     + continue;
5282     if (!of_property_read_bool(np, "msi-controller")) {
5283     pr_warn("%pOF: no msi-controller property, ITS ignored\n",
5284     np);
5285     diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
5286     index 848fcdf6a112..3d7374655587 100644
5287     --- a/drivers/irqchip/irq-gic-v3.c
5288     +++ b/drivers/irqchip/irq-gic-v3.c
5289     @@ -645,7 +645,7 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
5290     MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
5291     tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
5292    
5293     - pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
5294     + pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
5295     gic_write_sgi1r(val);
5296     }
5297    
5298     diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
5299     index f0dc8e2aee65..8c13a9036d07 100644
5300     --- a/drivers/md/bcache/alloc.c
5301     +++ b/drivers/md/bcache/alloc.c
5302     @@ -287,8 +287,10 @@ do { \
5303     break; \
5304     \
5305     mutex_unlock(&(ca)->set->bucket_lock); \
5306     - if (kthread_should_stop()) \
5307     + if (kthread_should_stop()) { \
5308     + set_current_state(TASK_RUNNING); \
5309     return 0; \
5310     + } \
5311     \
5312     schedule(); \
5313     mutex_lock(&(ca)->set->bucket_lock); \
5314     diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
5315     index abd31e847f96..e4a3f692057b 100644
5316     --- a/drivers/md/bcache/bcache.h
5317     +++ b/drivers/md/bcache/bcache.h
5318     @@ -906,7 +906,7 @@ void bcache_write_super(struct cache_set *);
5319    
5320     int bch_flash_dev_create(struct cache_set *c, uint64_t size);
5321    
5322     -int bch_cached_dev_attach(struct cached_dev *, struct cache_set *);
5323     +int bch_cached_dev_attach(struct cached_dev *, struct cache_set *, uint8_t *);
5324     void bch_cached_dev_detach(struct cached_dev *);
5325     void bch_cached_dev_run(struct cached_dev *);
5326     void bcache_device_stop(struct bcache_device *);
5327     diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
5328     index 1598d1e04989..89d088cf95d9 100644
5329     --- a/drivers/md/bcache/btree.c
5330     +++ b/drivers/md/bcache/btree.c
5331     @@ -1868,14 +1868,17 @@ void bch_initial_gc_finish(struct cache_set *c)
5332     */
5333     for_each_cache(ca, c, i) {
5334     for_each_bucket(b, ca) {
5335     - if (fifo_full(&ca->free[RESERVE_PRIO]))
5336     + if (fifo_full(&ca->free[RESERVE_PRIO]) &&
5337     + fifo_full(&ca->free[RESERVE_BTREE]))
5338     break;
5339    
5340     if (bch_can_invalidate_bucket(ca, b) &&
5341     !GC_MARK(b)) {
5342     __bch_invalidate_one_bucket(ca, b);
5343     - fifo_push(&ca->free[RESERVE_PRIO],
5344     - b - ca->buckets);
5345     + if (!fifo_push(&ca->free[RESERVE_PRIO],
5346     + b - ca->buckets))
5347     + fifo_push(&ca->free[RESERVE_BTREE],
5348     + b - ca->buckets);
5349     }
5350     }
5351     }
5352     diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
5353     index 5d0430777dda..fe6e4c319b7c 100644
5354     --- a/drivers/md/bcache/super.c
5355     +++ b/drivers/md/bcache/super.c
5356     @@ -939,7 +939,8 @@ void bch_cached_dev_detach(struct cached_dev *dc)
5357     cached_dev_put(dc);
5358     }
5359    
5360     -int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
5361     +int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
5362     + uint8_t *set_uuid)
5363     {
5364     uint32_t rtime = cpu_to_le32(get_seconds());
5365     struct uuid_entry *u;
5366     @@ -948,7 +949,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c)
5367    
5368     bdevname(dc->bdev, buf);
5369    
5370     - if (memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16))
5371     + if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
5372     + (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
5373     return -ENOENT;
5374    
5375     if (dc->disk.c) {
5376     @@ -1190,7 +1192,7 @@ static void register_bdev(struct cache_sb *sb, struct page *sb_page,
5377    
5378     list_add(&dc->list, &uncached_devices);
5379     list_for_each_entry(c, &bch_cache_sets, list)
5380     - bch_cached_dev_attach(dc, c);
5381     + bch_cached_dev_attach(dc, c, NULL);
5382    
5383     if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
5384     BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
5385     @@ -1712,7 +1714,7 @@ static void run_cache_set(struct cache_set *c)
5386     bcache_write_super(c);
5387    
5388     list_for_each_entry_safe(dc, t, &uncached_devices, list)
5389     - bch_cached_dev_attach(dc, c);
5390     + bch_cached_dev_attach(dc, c, NULL);
5391    
5392     flash_devs_run(c);
5393    
5394     @@ -1829,6 +1831,7 @@ void bch_cache_release(struct kobject *kobj)
5395     static int cache_alloc(struct cache *ca)
5396     {
5397     size_t free;
5398     + size_t btree_buckets;
5399     struct bucket *b;
5400    
5401     __module_get(THIS_MODULE);
5402     @@ -1836,9 +1839,19 @@ static int cache_alloc(struct cache *ca)
5403    
5404     bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
5405    
5406     + /*
5407     + * when ca->sb.njournal_buckets is not zero, journal exists,
5408     + * and in bch_journal_replay(), tree node may split,
5409     + * so bucket of RESERVE_BTREE type is needed,
5410     + * the worst situation is all journal buckets are valid journal,
5411     + * and all the keys need to replay,
5412     + * so the number of RESERVE_BTREE type buckets should be as much
5413     + * as journal buckets
5414     + */
5415     + btree_buckets = ca->sb.njournal_buckets ?: 8;
5416     free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
5417    
5418     - if (!init_fifo(&ca->free[RESERVE_BTREE], 8, GFP_KERNEL) ||
5419     + if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, GFP_KERNEL) ||
5420     !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
5421     !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
5422     !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
5423     diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
5424     index 234b2f5b286d..5d81cd06af00 100644
5425     --- a/drivers/md/bcache/sysfs.c
5426     +++ b/drivers/md/bcache/sysfs.c
5427     @@ -193,7 +193,7 @@ STORE(__cached_dev)
5428     {
5429     struct cached_dev *dc = container_of(kobj, struct cached_dev,
5430     disk.kobj);
5431     - ssize_t v = size;
5432     + ssize_t v;
5433     struct cache_set *c;
5434     struct kobj_uevent_env *env;
5435    
5436     @@ -265,17 +265,20 @@ STORE(__cached_dev)
5437     }
5438    
5439     if (attr == &sysfs_attach) {
5440     - if (bch_parse_uuid(buf, dc->sb.set_uuid) < 16)
5441     + uint8_t set_uuid[16];
5442     +
5443     + if (bch_parse_uuid(buf, set_uuid) < 16)
5444     return -EINVAL;
5445    
5446     + v = -ENOENT;
5447     list_for_each_entry(c, &bch_cache_sets, list) {
5448     - v = bch_cached_dev_attach(dc, c);
5449     + v = bch_cached_dev_attach(dc, c, set_uuid);
5450     if (!v)
5451     return size;
5452     }
5453    
5454     pr_err("Can't attach %s: cache set not found", buf);
5455     - size = v;
5456     + return v;
5457     }
5458    
5459     if (attr == &sysfs_detach && dc->disk.c)
5460     diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
5461     index 70454f2ad2fa..f046dedc59ab 100644
5462     --- a/drivers/md/bcache/writeback.c
5463     +++ b/drivers/md/bcache/writeback.c
5464     @@ -420,18 +420,21 @@ static int bch_writeback_thread(void *arg)
5465    
5466     while (!kthread_should_stop()) {
5467     down_write(&dc->writeback_lock);
5468     + set_current_state(TASK_INTERRUPTIBLE);
5469     if (!atomic_read(&dc->has_dirty) ||
5470     (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) &&
5471     !dc->writeback_running)) {
5472     up_write(&dc->writeback_lock);
5473     - set_current_state(TASK_INTERRUPTIBLE);
5474    
5475     - if (kthread_should_stop())
5476     + if (kthread_should_stop()) {
5477     + set_current_state(TASK_RUNNING);
5478     return 0;
5479     + }
5480    
5481     schedule();
5482     continue;
5483     }
5484     + set_current_state(TASK_RUNNING);
5485    
5486     searched_full_index = refill_dirty(dc);
5487    
5488     diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
5489     index 2704a55f8b6e..8b7328666eaa 100644
5490     --- a/drivers/md/dm-mpath.c
5491     +++ b/drivers/md/dm-mpath.c
5492     @@ -502,8 +502,20 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
5493     if (queue_dying) {
5494     atomic_inc(&m->pg_init_in_progress);
5495     activate_or_offline_path(pgpath);
5496     + return DM_MAPIO_DELAY_REQUEUE;
5497     }
5498     - return DM_MAPIO_DELAY_REQUEUE;
5499     +
5500     + /*
5501     + * blk-mq's SCHED_RESTART can cover this requeue, so we
5502     + * needn't deal with it by DELAY_REQUEUE. More importantly,
5503     + * we have to return DM_MAPIO_REQUEUE so that blk-mq can
5504     + * get the queue busy feedback (via BLK_STS_RESOURCE),
5505     + * otherwise I/O merging can suffer.
5506     + */
5507     + if (q->mq_ops)
5508     + return DM_MAPIO_REQUEUE;
5509     + else
5510     + return DM_MAPIO_DELAY_REQUEUE;
5511     }
5512     clone->bio = clone->biotail = NULL;
5513     clone->rq_disk = bdev->bd_disk;
5514     diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
5515     index 7f837006bb6a..3bdeb295514b 100644
5516     --- a/drivers/net/ethernet/freescale/gianfar.c
5517     +++ b/drivers/net/ethernet/freescale/gianfar.c
5518     @@ -2932,7 +2932,7 @@ static irqreturn_t gfar_transmit(int irq, void *grp_id)
5519     static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
5520     struct sk_buff *skb, bool first)
5521     {
5522     - unsigned int size = lstatus & BD_LENGTH_MASK;
5523     + int size = lstatus & BD_LENGTH_MASK;
5524     struct page *page = rxb->page;
5525     bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
5526    
5527     @@ -2947,11 +2947,16 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
5528     if (last)
5529     size -= skb->len;
5530    
5531     - /* in case the last fragment consisted only of the FCS */
5532     + /* Add the last fragment if it contains something other than
5533     + * the FCS, otherwise drop it and trim off any part of the FCS
5534     + * that was already received.
5535     + */
5536     if (size > 0)
5537     skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
5538     rxb->page_offset + RXBUF_ALIGNMENT,
5539     size, GFAR_RXB_TRUESIZE);
5540     + else if (size < 0)
5541     + pskb_trim(skb, skb->len + size);
5542     }
5543    
5544     /* try reuse page */
5545     diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
5546     index e69d49d91d67..914258310ddd 100644
5547     --- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
5548     +++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
5549     @@ -815,8 +815,12 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
5550     if (vid >= VLAN_N_VID)
5551     return -EINVAL;
5552    
5553     - /* Verify we have permission to add VLANs */
5554     - if (hw->mac.vlan_override)
5555     + /* Verify that we have permission to add VLANs. If this is a request
5556     + * to remove a VLAN, we still want to allow the user to remove the
5557     + * VLAN device. In that case, we need to clear the bit in the
5558     + * active_vlans bitmask.
5559     + */
5560     + if (set && hw->mac.vlan_override)
5561     return -EACCES;
5562    
5563     /* update active_vlans bitmask */
5564     @@ -835,6 +839,12 @@ static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
5565     rx_ring->vid &= ~FM10K_VLAN_CLEAR;
5566     }
5567    
5568     + /* If our VLAN has been overridden, there is no reason to send VLAN
5569     + * removal requests as they will be silently ignored.
5570     + */
5571     + if (hw->mac.vlan_override)
5572     + return 0;
5573     +
5574     /* Do not remove default VLAN ID related entries from VLAN and MAC
5575     * tables
5576     */
5577     diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
5578     index 05e89864f781..ef22793d6a03 100644
5579     --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
5580     +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
5581     @@ -2588,16 +2588,16 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
5582    
5583     no_input_set:
5584     if (input_set & I40E_L3_SRC_MASK)
5585     - fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFF);
5586     + fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFFFFFF);
5587    
5588     if (input_set & I40E_L3_DST_MASK)
5589     - fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFF);
5590     + fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFFFFFF);
5591    
5592     if (input_set & I40E_L4_SRC_MASK)
5593     - fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFFFFFF);
5594     + fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFF);
5595    
5596     if (input_set & I40E_L4_DST_MASK)
5597     - fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFFFFFF);
5598     + fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFF);
5599    
5600     if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
5601     fsp->ring_cookie = RX_CLS_FLOW_DISC;
5602     @@ -3648,6 +3648,16 @@ static int i40e_check_fdir_input_set(struct i40e_vsi *vsi,
5603    
5604     i40e_write_fd_input_set(pf, index, new_mask);
5605    
5606     + /* IP_USER_FLOW filters match both IPv4/Other and IPv4/Fragmented
5607     + * frames. If we're programming the input set for IPv4/Other, we also
5608     + * need to program the IPv4/Fragmented input set. Since we don't have
5609     + * separate support, we'll always assume and enforce that the two flow
5610     + * types must have matching input sets.
5611     + */
5612     + if (index == I40E_FILTER_PCTYPE_NONF_IPV4_OTHER)
5613     + i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
5614     + new_mask);
5615     +
5616     /* Add the new offset and update table, if necessary */
5617     if (new_flex_offset) {
5618     err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset,
5619     diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
5620     index b1cde1b051a4..d36b799116e4 100644
5621     --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
5622     +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
5623     @@ -5828,6 +5828,9 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5624     /* Reprogram the default input set for Other/IPv4 */
5625     i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_NONF_IPV4_OTHER,
5626     I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
5627     +
5628     + i40e_write_fd_input_set(pf, I40E_FILTER_PCTYPE_FRAG_IPV4,
5629     + I40E_L3_SRC_MASK | I40E_L3_DST_MASK);
5630     }
5631    
5632     /**
5633     diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
5634     index 82f69031e5cd..2ef32ab1dfae 100644
5635     --- a/drivers/net/ethernet/intel/i40evf/i40evf.h
5636     +++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
5637     @@ -186,6 +186,7 @@ enum i40evf_state_t {
5638     enum i40evf_critical_section_t {
5639     __I40EVF_IN_CRITICAL_TASK, /* cannot be interrupted */
5640     __I40EVF_IN_CLIENT_TASK,
5641     + __I40EVF_IN_REMOVE_TASK, /* device being removed */
5642     };
5643    
5644     /* board specific private data structure */
5645     diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
5646     index 4eb6ff60e8fc..1b5d204c57c1 100644
5647     --- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
5648     +++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
5649     @@ -1839,6 +1839,12 @@ static void i40evf_reset_task(struct work_struct *work)
5650     int i = 0, err;
5651     bool running;
5652    
5653     + /* When device is being removed it doesn't make sense to run the reset
5654     + * task, just return in such a case.
5655     + */
5656     + if (test_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section))
5657     + return;
5658     +
5659     while (test_and_set_bit(__I40EVF_IN_CLIENT_TASK,
5660     &adapter->crit_section))
5661     usleep_range(500, 1000);
5662     @@ -3022,7 +3028,8 @@ static void i40evf_remove(struct pci_dev *pdev)
5663     struct i40evf_mac_filter *f, *ftmp;
5664     struct i40e_hw *hw = &adapter->hw;
5665     int err;
5666     -
5667     + /* Indicate we are in remove and not to run reset_task */
5668     + set_bit(__I40EVF_IN_REMOVE_TASK, &adapter->crit_section);
5669     cancel_delayed_work_sync(&adapter->init_task);
5670     cancel_work_sync(&adapter->reset_task);
5671     cancel_delayed_work_sync(&adapter->client_task);
5672     diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
5673     index 85876f4fb1fb..46bf11afba08 100644
5674     --- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
5675     +++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
5676     @@ -937,23 +937,34 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
5677     if (v_opcode == VIRTCHNL_OP_EVENT) {
5678     struct virtchnl_pf_event *vpe =
5679     (struct virtchnl_pf_event *)msg;
5680     + bool link_up = vpe->event_data.link_event.link_status;
5681     switch (vpe->event) {
5682     case VIRTCHNL_EVENT_LINK_CHANGE:
5683     adapter->link_speed =
5684     vpe->event_data.link_event.link_speed;
5685     - if (adapter->link_up !=
5686     - vpe->event_data.link_event.link_status) {
5687     - adapter->link_up =
5688     - vpe->event_data.link_event.link_status;
5689     - if (adapter->link_up) {
5690     - netif_tx_start_all_queues(netdev);
5691     - netif_carrier_on(netdev);
5692     - } else {
5693     - netif_tx_stop_all_queues(netdev);
5694     - netif_carrier_off(netdev);
5695     - }
5696     - i40evf_print_link_message(adapter);
5697     +
5698     + /* we've already got the right link status, bail */
5699     + if (adapter->link_up == link_up)
5700     + break;
5701     +
5702     + /* If we get link up message and start queues before
5703     + * our queues are configured it will trigger a TX hang.
5704     + * In that case, just ignore the link status message,
5705     + * we'll get another one after we enable queues and
5706     + * actually prepared to send traffic.
5707     + */
5708     + if (link_up && adapter->state != __I40EVF_RUNNING)
5709     + break;
5710     +
5711     + adapter->link_up = link_up;
5712     + if (link_up) {
5713     + netif_tx_start_all_queues(netdev);
5714     + netif_carrier_on(netdev);
5715     + } else {
5716     + netif_tx_stop_all_queues(netdev);
5717     + netif_carrier_off(netdev);
5718     }
5719     + i40evf_print_link_message(adapter);
5720     break;
5721     case VIRTCHNL_EVENT_RESET_IMPENDING:
5722     dev_info(&adapter->pdev->dev, "PF reset warning received\n");
5723     diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
5724     index d1a44a84c97e..6ca580cdfd84 100644
5725     --- a/drivers/net/ethernet/intel/igb/igb_main.c
5726     +++ b/drivers/net/ethernet/intel/igb/igb_main.c
5727     @@ -8373,7 +8373,8 @@ static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
5728    
5729     /* Indicate to hardware the Address is Valid. */
5730     if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
5731     - rar_high |= E1000_RAH_AV;
5732     + if (is_valid_ether_addr(addr))
5733     + rar_high |= E1000_RAH_AV;
5734    
5735     if (hw->mac.type == e1000_82575)
5736     rar_high |= E1000_RAH_POOL_1 *
5737     @@ -8411,17 +8412,36 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
5738     static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
5739     {
5740     struct igb_adapter *adapter = netdev_priv(netdev);
5741     - if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
5742     +
5743     + if (vf >= adapter->vfs_allocated_count)
5744     + return -EINVAL;
5745     +
5746     + /* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC
5747     + * flag and allows to overwrite the MAC via VF netdev. This
5748     + * is necessary to allow libvirt a way to restore the original
5749     + * MAC after unbinding vfio-pci and reloading igbvf after shutting
5750     + * down a VM.
5751     + */
5752     + if (is_zero_ether_addr(mac)) {
5753     + adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
5754     + dev_info(&adapter->pdev->dev,
5755     + "remove administratively set MAC on VF %d\n",
5756     + vf);
5757     + } else if (is_valid_ether_addr(mac)) {
5758     + adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
5759     + dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
5760     + mac, vf);
5761     + dev_info(&adapter->pdev->dev,
5762     + "Reload the VF driver to make this change effective.");
5763     + /* Generate additional warning if PF is down */
5764     + if (test_bit(__IGB_DOWN, &adapter->state)) {
5765     + dev_warn(&adapter->pdev->dev,
5766     + "The VF MAC address has been set, but the PF device is not up.\n");
5767     + dev_warn(&adapter->pdev->dev,
5768     + "Bring the PF device up before attempting to use the VF device.\n");
5769     + }
5770     + } else {
5771     return -EINVAL;
5772     - adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
5773     - dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
5774     - dev_info(&adapter->pdev->dev,
5775     - "Reload the VF driver to make this change effective.");
5776     - if (test_bit(__IGB_DOWN, &adapter->state)) {
5777     - dev_warn(&adapter->pdev->dev,
5778     - "The VF MAC address has been set, but the PF device is not up.\n");
5779     - dev_warn(&adapter->pdev->dev,
5780     - "Bring the PF device up before attempting to use the VF device.\n");
5781     }
5782     return igb_set_vf_mac(adapter, vf, mac);
5783     }
5784     diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
5785     index 841c2a083349..0746b19ec6d3 100644
5786     --- a/drivers/net/ethernet/intel/igb/igb_ptp.c
5787     +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
5788     @@ -643,6 +643,10 @@ static void igb_ptp_tx_work(struct work_struct *work)
5789     adapter->ptp_tx_skb = NULL;
5790     clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
5791     adapter->tx_hwtstamp_timeouts++;
5792     + /* Clear the tx valid bit in TSYNCTXCTL register to enable
5793     + * interrupt
5794     + */
5795     + rd32(E1000_TXSTMPH);
5796     dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
5797     return;
5798     }
5799     @@ -717,6 +721,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
5800     */
5801     void igb_ptp_tx_hang(struct igb_adapter *adapter)
5802     {
5803     + struct e1000_hw *hw = &adapter->hw;
5804     bool timeout = time_is_before_jiffies(adapter->ptp_tx_start +
5805     IGB_PTP_TX_TIMEOUT);
5806    
5807     @@ -736,6 +741,10 @@ void igb_ptp_tx_hang(struct igb_adapter *adapter)
5808     adapter->ptp_tx_skb = NULL;
5809     clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
5810     adapter->tx_hwtstamp_timeouts++;
5811     + /* Clear the tx valid bit in TSYNCTXCTL register to enable
5812     + * interrupt
5813     + */
5814     + rd32(E1000_TXSTMPH);
5815     dev_warn(&adapter->pdev->dev, "clearing Tx timestamp hang\n");
5816     }
5817     }
5818     diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
5819     index 29f600fd6977..9e30cfeac04b 100644
5820     --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
5821     +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
5822     @@ -3987,11 +3987,15 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
5823     rxdctl &= ~0x3FFFFF;
5824     rxdctl |= 0x080420;
5825     #if (PAGE_SIZE < 8192)
5826     - } else {
5827     + /* RXDCTL.RLPML does not work on 82599 */
5828     + } else if (hw->mac.type != ixgbe_mac_82599EB) {
5829     rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
5830     IXGBE_RXDCTL_RLPML_EN);
5831    
5832     - /* Limit the maximum frame size so we don't overrun the skb */
5833     + /* Limit the maximum frame size so we don't overrun the skb.
5834     + * This can happen in SRIOV mode when the MTU of the VF is
5835     + * higher than the MTU of the PF.
5836     + */
5837     if (ring_uses_build_skb(ring) &&
5838     !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
5839     rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB |
5840     diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
5841     index f8fa63b66739..a1a15e0c2245 100644
5842     --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
5843     +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
5844     @@ -492,6 +492,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
5845     dev_err(&pdev->dev,
5846     "Error: %d VFs already enabled, but loaded FW can only support %d\n",
5847     pf->num_vfs, pf->limit_vfs);
5848     + err = -EINVAL;
5849     goto err_fw_unload;
5850     }
5851    
5852     diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
5853     index 4404650b32c5..8be4b32544ef 100644
5854     --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
5855     +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
5856     @@ -116,7 +116,7 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
5857     snprintf(clk_name, sizeof(clk_name), "%s#m250_sel", dev_name(dev));
5858     init.name = clk_name;
5859     init.ops = &clk_mux_ops;
5860     - init.flags = 0;
5861     + init.flags = CLK_SET_RATE_PARENT;
5862     init.parent_names = mux_parent_names;
5863     init.num_parents = MUX_CLK_NUM_PARENTS;
5864    
5865     @@ -144,7 +144,9 @@ static int meson8b_init_clk(struct meson8b_dwmac *dwmac)
5866     dwmac->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
5867     dwmac->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
5868     dwmac->m250_div.hw.init = &init;
5869     - dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO;
5870     + dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED |
5871     + CLK_DIVIDER_ALLOW_ZERO |
5872     + CLK_DIVIDER_ROUND_CLOSEST;
5873    
5874     dwmac->m250_div_clk = devm_clk_register(dev, &dwmac->m250_div.hw);
5875     if (WARN_ON(IS_ERR(dwmac->m250_div_clk)))
5876     diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
5877     index 2f7d7ec59962..e1d03489ae63 100644
5878     --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
5879     +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
5880     @@ -562,10 +562,12 @@ static int dwmac4_irq_status(struct mac_device_info *hw,
5881     struct stmmac_extra_stats *x)
5882     {
5883     void __iomem *ioaddr = hw->pcsr;
5884     - u32 intr_status;
5885     + u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
5886     + u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
5887     int ret = 0;
5888    
5889     - intr_status = readl(ioaddr + GMAC_INT_STATUS);
5890     + /* Discard disabled bits */
5891     + intr_status &= intr_enable;
5892    
5893     /* Not used events (e.g. MMC interrupts) are not handled. */
5894     if ((intr_status & mmc_tx_irq))
5895     diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
5896     index d148dbf3beeb..d686ba10fecc 100644
5897     --- a/drivers/net/wireless/mac80211_hwsim.c
5898     +++ b/drivers/net/wireless/mac80211_hwsim.c
5899     @@ -3153,8 +3153,10 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
5900     if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) {
5901     u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]);
5902    
5903     - if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom))
5904     + if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) {
5905     + kfree(hwname);
5906     return -EINVAL;
5907     + }
5908     param.regd = hwsim_world_regdom_custom[idx];
5909     }
5910    
5911     @@ -3425,8 +3427,11 @@ static void __net_exit hwsim_exit_net(struct net *net)
5912     continue;
5913    
5914     list_del(&data->list);
5915     - INIT_WORK(&data->destroy_work, destroy_radio);
5916     - schedule_work(&data->destroy_work);
5917     + spin_unlock_bh(&hwsim_radio_lock);
5918     + mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
5919     + NULL);
5920     + spin_lock_bh(&hwsim_radio_lock);
5921     +
5922     }
5923     spin_unlock_bh(&hwsim_radio_lock);
5924     }
5925     diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
5926     index a9ba9fe263ca..f07b9c9bb5ba 100644
5927     --- a/drivers/net/xen-netfront.c
5928     +++ b/drivers/net/xen-netfront.c
5929     @@ -351,6 +351,9 @@ static int xennet_open(struct net_device *dev)
5930     unsigned int i = 0;
5931     struct netfront_queue *queue = NULL;
5932    
5933     + if (!np->queues)
5934     + return -ENODEV;
5935     +
5936     for (i = 0; i < num_queues; ++i) {
5937     queue = &np->queues[i];
5938     napi_enable(&queue->napi);
5939     @@ -1358,18 +1361,8 @@ static int netfront_probe(struct xenbus_device *dev,
5940     #ifdef CONFIG_SYSFS
5941     info->netdev->sysfs_groups[0] = &xennet_dev_group;
5942     #endif
5943     - err = register_netdev(info->netdev);
5944     - if (err) {
5945     - pr_warn("%s: register_netdev err=%d\n", __func__, err);
5946     - goto fail;
5947     - }
5948    
5949     return 0;
5950     -
5951     - fail:
5952     - xennet_free_netdev(netdev);
5953     - dev_set_drvdata(&dev->dev, NULL);
5954     - return err;
5955     }
5956    
5957     static void xennet_end_access(int ref, void *page)
5958     @@ -1738,8 +1731,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
5959     {
5960     unsigned int i;
5961    
5962     - rtnl_lock();
5963     -
5964     for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
5965     struct netfront_queue *queue = &info->queues[i];
5966    
5967     @@ -1748,8 +1739,6 @@ static void xennet_destroy_queues(struct netfront_info *info)
5968     netif_napi_del(&queue->napi);
5969     }
5970    
5971     - rtnl_unlock();
5972     -
5973     kfree(info->queues);
5974     info->queues = NULL;
5975     }
5976     @@ -1765,8 +1754,6 @@ static int xennet_create_queues(struct netfront_info *info,
5977     if (!info->queues)
5978     return -ENOMEM;
5979    
5980     - rtnl_lock();
5981     -
5982     for (i = 0; i < *num_queues; i++) {
5983     struct netfront_queue *queue = &info->queues[i];
5984    
5985     @@ -1775,7 +1762,7 @@ static int xennet_create_queues(struct netfront_info *info,
5986    
5987     ret = xennet_init_queue(queue);
5988     if (ret < 0) {
5989     - dev_warn(&info->netdev->dev,
5990     + dev_warn(&info->xbdev->dev,
5991     "only created %d queues\n", i);
5992     *num_queues = i;
5993     break;
5994     @@ -1789,10 +1776,8 @@ static int xennet_create_queues(struct netfront_info *info,
5995    
5996     netif_set_real_num_tx_queues(info->netdev, *num_queues);
5997    
5998     - rtnl_unlock();
5999     -
6000     if (*num_queues == 0) {
6001     - dev_err(&info->netdev->dev, "no queues\n");
6002     + dev_err(&info->xbdev->dev, "no queues\n");
6003     return -EINVAL;
6004     }
6005     return 0;
6006     @@ -1829,6 +1814,7 @@ static int talk_to_netback(struct xenbus_device *dev,
6007     goto out;
6008     }
6009    
6010     + rtnl_lock();
6011     if (info->queues)
6012     xennet_destroy_queues(info);
6013    
6014     @@ -1839,6 +1825,7 @@ static int talk_to_netback(struct xenbus_device *dev,
6015     info->queues = NULL;
6016     goto out;
6017     }
6018     + rtnl_unlock();
6019    
6020     /* Create shared ring, alloc event channel -- for each queue */
6021     for (i = 0; i < num_queues; ++i) {
6022     @@ -1935,8 +1922,10 @@ static int talk_to_netback(struct xenbus_device *dev,
6023     xenbus_transaction_end(xbt, 1);
6024     destroy_ring:
6025     xennet_disconnect_backend(info);
6026     + rtnl_lock();
6027     xennet_destroy_queues(info);
6028     out:
6029     + rtnl_unlock();
6030     device_unregister(&dev->dev);
6031     return err;
6032     }
6033     @@ -1966,6 +1955,15 @@ static int xennet_connect(struct net_device *dev)
6034     netdev_update_features(dev);
6035     rtnl_unlock();
6036    
6037     + if (dev->reg_state == NETREG_UNINITIALIZED) {
6038     + err = register_netdev(dev);
6039     + if (err) {
6040     + pr_warn("%s: register_netdev err=%d\n", __func__, err);
6041     + device_unregister(&np->xbdev->dev);
6042     + return err;
6043     + }
6044     + }
6045     +
6046     /*
6047     * All public and private state should now be sane. Get
6048     * ready to start sending and receiving packets and give the driver
6049     @@ -2156,10 +2154,14 @@ static int xennet_remove(struct xenbus_device *dev)
6050    
6051     xennet_disconnect_backend(info);
6052    
6053     - unregister_netdev(info->netdev);
6054     + if (info->netdev->reg_state == NETREG_REGISTERED)
6055     + unregister_netdev(info->netdev);
6056    
6057     - if (info->queues)
6058     + if (info->queues) {
6059     + rtnl_lock();
6060     xennet_destroy_queues(info);
6061     + rtnl_unlock();
6062     + }
6063     xennet_free_netdev(info->netdev);
6064    
6065     return 0;
6066     diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
6067     index f58d8e305323..18339b7e88a4 100644
6068     --- a/drivers/ntb/ntb_transport.c
6069     +++ b/drivers/ntb/ntb_transport.c
6070     @@ -998,6 +998,9 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
6071     mw_base = nt->mw_vec[mw_num].phys_addr;
6072     mw_size = nt->mw_vec[mw_num].phys_size;
6073    
6074     + if (max_mw_size && mw_size > max_mw_size)
6075     + mw_size = max_mw_size;
6076     +
6077     tx_size = (unsigned int)mw_size / num_qps_mw;
6078     qp_offset = tx_size * (qp_num / mw_count);
6079    
6080     diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
6081     index 5c5a8af66829..116127a0accb 100644
6082     --- a/drivers/pci/quirks.c
6083     +++ b/drivers/pci/quirks.c
6084     @@ -3879,6 +3879,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9120,
6085     quirk_dma_func1_alias);
6086     DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9123,
6087     quirk_dma_func1_alias);
6088     +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9128,
6089     + quirk_dma_func1_alias);
6090     /* https://bugzilla.kernel.org/show_bug.cgi?id=42679#c14 */
6091     DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MARVELL_EXT, 0x9130,
6092     quirk_dma_func1_alias);
6093     diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
6094     index 7424e53157b0..dd5043a6a114 100644
6095     --- a/drivers/platform/x86/dell-laptop.c
6096     +++ b/drivers/platform/x86/dell-laptop.c
6097     @@ -1177,6 +1177,7 @@ static u8 kbd_previous_mode_bit;
6098    
6099     static bool kbd_led_present;
6100     static DEFINE_MUTEX(kbd_led_mutex);
6101     +static enum led_brightness kbd_led_level;
6102    
6103     /*
6104     * NOTE: there are three ways to set the keyboard backlight level.
6105     @@ -2020,6 +2021,7 @@ static enum led_brightness kbd_led_level_get(struct led_classdev *led_cdev)
6106     static int kbd_led_level_set(struct led_classdev *led_cdev,
6107     enum led_brightness value)
6108     {
6109     + enum led_brightness new_value = value;
6110     struct kbd_state state;
6111     struct kbd_state new_state;
6112     u16 num;
6113     @@ -2049,6 +2051,9 @@ static int kbd_led_level_set(struct led_classdev *led_cdev,
6114     }
6115    
6116     out:
6117     + if (ret == 0)
6118     + kbd_led_level = new_value;
6119     +
6120     mutex_unlock(&kbd_led_mutex);
6121     return ret;
6122     }
6123     @@ -2076,6 +2081,9 @@ static int __init kbd_led_init(struct device *dev)
6124     if (kbd_led.max_brightness)
6125     kbd_led.max_brightness--;
6126     }
6127     +
6128     + kbd_led_level = kbd_led_level_get(NULL);
6129     +
6130     ret = led_classdev_register(dev, &kbd_led);
6131     if (ret)
6132     kbd_led_present = false;
6133     @@ -2100,13 +2108,25 @@ static void kbd_led_exit(void)
6134     static int dell_laptop_notifier_call(struct notifier_block *nb,
6135     unsigned long action, void *data)
6136     {
6137     + bool changed = false;
6138     + enum led_brightness new_kbd_led_level;
6139     +
6140     switch (action) {
6141     case DELL_LAPTOP_KBD_BACKLIGHT_BRIGHTNESS_CHANGED:
6142     if (!kbd_led_present)
6143     break;
6144    
6145     - led_classdev_notify_brightness_hw_changed(&kbd_led,
6146     - kbd_led_level_get(&kbd_led));
6147     + mutex_lock(&kbd_led_mutex);
6148     + new_kbd_led_level = kbd_led_level_get(&kbd_led);
6149     + if (kbd_led_level != new_kbd_led_level) {
6150     + kbd_led_level = new_kbd_led_level;
6151     + changed = true;
6152     + }
6153     + mutex_unlock(&kbd_led_mutex);
6154     +
6155     + if (changed)
6156     + led_classdev_notify_brightness_hw_changed(&kbd_led,
6157     + kbd_led_level);
6158     break;
6159     }
6160    
6161     diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
6162     index 2242d6035d9e..c407d52ef7cf 100644
6163     --- a/drivers/platform/x86/thinkpad_acpi.c
6164     +++ b/drivers/platform/x86/thinkpad_acpi.c
6165     @@ -214,6 +214,10 @@ enum tpacpi_hkey_event_t {
6166     /* AC-related events */
6167     TP_HKEY_EV_AC_CHANGED = 0x6040, /* AC status changed */
6168    
6169     + /* Further user-interface events */
6170     + TP_HKEY_EV_PALM_DETECTED = 0x60b0, /* palm hoveres keyboard */
6171     + TP_HKEY_EV_PALM_UNDETECTED = 0x60b1, /* palm removed */
6172     +
6173     /* Misc */
6174     TP_HKEY_EV_RFKILL_CHANGED = 0x7000, /* rfkill switch changed */
6175     };
6176     @@ -3973,6 +3977,12 @@ static bool hotkey_notify_6xxx(const u32 hkey,
6177     *send_acpi_ev = false;
6178     break;
6179    
6180     + case TP_HKEY_EV_PALM_DETECTED:
6181     + case TP_HKEY_EV_PALM_UNDETECTED:
6182     + /* palm detected hovering the keyboard, forward to user-space
6183     + * via netlink for consumption */
6184     + return true;
6185     +
6186     default:
6187     pr_warn("unknown possible thermal alarm or keyboard event received\n");
6188     known = false;
6189     diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c
6190     index 24388795ee9a..936e8c735656 100644
6191     --- a/drivers/scsi/arm/fas216.c
6192     +++ b/drivers/scsi/arm/fas216.c
6193     @@ -2011,7 +2011,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt,
6194     * have valid data in the sense buffer that could
6195     * confuse the higher levels.
6196     */
6197     - memset(SCpnt->sense_buffer, 0, sizeof(SCpnt->sense_buffer));
6198     + memset(SCpnt->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6199     //printk("scsi%d.%c: sense buffer: ", info->host->host_no, '0' + SCpnt->device->id);
6200     //{ int i; for (i = 0; i < 32; i++) printk("%02x ", SCpnt->sense_buffer[i]); printk("\n"); }
6201     /*
6202     diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
6203     index 2300c02ab5e6..e24f57946a17 100644
6204     --- a/drivers/scsi/qla2xxx/qla_init.c
6205     +++ b/drivers/scsi/qla2xxx/qla_init.c
6206     @@ -115,6 +115,8 @@ qla2x00_async_iocb_timeout(void *data)
6207    
6208     switch (sp->type) {
6209     case SRB_LOGIN_CMD:
6210     + if (!fcport)
6211     + break;
6212     /* Retry as needed. */
6213     lio->u.logio.data[0] = MBS_COMMAND_ERROR;
6214     lio->u.logio.data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
6215     @@ -128,6 +130,8 @@ qla2x00_async_iocb_timeout(void *data)
6216     qla24xx_handle_plogi_done_event(fcport->vha, &ea);
6217     break;
6218     case SRB_LOGOUT_CMD:
6219     + if (!fcport)
6220     + break;
6221     qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
6222     break;
6223     case SRB_CT_PTHRU_CMD:
6224     diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
6225     index cfc095f45e26..ea947a7c2596 100644
6226     --- a/drivers/scsi/scsi_devinfo.c
6227     +++ b/drivers/scsi/scsi_devinfo.c
6228     @@ -109,8 +109,8 @@ static struct {
6229     * seagate controller, which causes SCSI code to reset bus.
6230     */
6231     {"HP", "C1750A", "3226", BLIST_NOLUN}, /* scanjet iic */
6232     - {"HP", "C1790A", "", BLIST_NOLUN}, /* scanjet iip */
6233     - {"HP", "C2500A", "", BLIST_NOLUN}, /* scanjet iicx */
6234     + {"HP", "C1790A", NULL, BLIST_NOLUN}, /* scanjet iip */
6235     + {"HP", "C2500A", NULL, BLIST_NOLUN}, /* scanjet iicx */
6236     {"MEDIAVIS", "CDR-H93MV", "1.31", BLIST_NOLUN}, /* locks up */
6237     {"MICROTEK", "ScanMaker II", "5.61", BLIST_NOLUN}, /* responds to all lun */
6238     {"MITSUMI", "CD-R CR-2201CS", "6119", BLIST_NOLUN}, /* locks up */
6239     @@ -120,7 +120,7 @@ static struct {
6240     {"QUANTUM", "FIREBALL ST4.3S", "0F0C", BLIST_NOLUN}, /* locks up */
6241     {"RELISYS", "Scorpio", NULL, BLIST_NOLUN}, /* responds to all lun */
6242     {"SANKYO", "CP525", "6.64", BLIST_NOLUN}, /* causes failed REQ SENSE, extra reset */
6243     - {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN},
6244     + {"TEXEL", "CD-ROM", "1.06", BLIST_NOLUN | BLIST_BORKEN},
6245     {"transtec", "T5008", "0001", BLIST_NOREPORTLUN },
6246     {"YAMAHA", "CDR100", "1.00", BLIST_NOLUN}, /* locks up */
6247     {"YAMAHA", "CDR102", "1.00", BLIST_NOLUN}, /* locks up */
6248     @@ -255,7 +255,6 @@ static struct {
6249     {"ST650211", "CF", NULL, BLIST_RETRY_HWERROR},
6250     {"SUN", "T300", "*", BLIST_SPARSELUN},
6251     {"SUN", "T4", "*", BLIST_SPARSELUN},
6252     - {"TEXEL", "CD-ROM", "1.06", BLIST_BORKEN},
6253     {"Tornado-", "F4", "*", BLIST_NOREPORTLUN},
6254     {"TOSHIBA", "CDROM", NULL, BLIST_ISROM},
6255     {"TOSHIBA", "CD-ROM", NULL, BLIST_ISROM},
6256     diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
6257     index fe3fa1e8517a..4903f15177cf 100644
6258     --- a/drivers/spi/spi-armada-3700.c
6259     +++ b/drivers/spi/spi-armada-3700.c
6260     @@ -624,6 +624,11 @@ static int a3700_spi_transfer_one(struct spi_master *master,
6261     a3700_spi_header_set(a3700_spi);
6262    
6263     if (xfer->rx_buf) {
6264     + /* Clear WFIFO, since it's last 2 bytes are shifted out during
6265     + * a read operation
6266     + */
6267     + spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, 0);
6268     +
6269     /* Set read data length */
6270     spireg_write(a3700_spi, A3700_SPI_IF_DIN_CNT_REG,
6271     a3700_spi->buf_len);
6272     diff --git a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
6273     index 123e4af58408..50260cb5056d 100644
6274     --- a/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
6275     +++ b/drivers/staging/fsl-mc/bus/irq-gic-v3-its-fsl-mc-msi.c
6276     @@ -75,6 +75,8 @@ int __init its_fsl_mc_msi_init(void)
6277    
6278     for (np = of_find_matching_node(NULL, its_device_id); np;
6279     np = of_find_matching_node(np, its_device_id)) {
6280     + if (!of_device_is_available(np))
6281     + continue;
6282     if (!of_property_read_bool(np, "msi-controller"))
6283     continue;
6284    
6285     diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
6286     index c55624703fdf..e0aa5f03004c 100644
6287     --- a/drivers/tty/serial/8250/8250_exar.c
6288     +++ b/drivers/tty/serial/8250/8250_exar.c
6289     @@ -37,6 +37,7 @@
6290     #define PCI_DEVICE_ID_EXAR_XR17V4358 0x4358
6291     #define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358
6292    
6293     +#define UART_EXAR_INT0 0x80
6294     #define UART_EXAR_8XMODE 0x88 /* 8X sampling rate select */
6295    
6296     #define UART_EXAR_FCTR 0x08 /* Feature Control Register */
6297     @@ -124,6 +125,7 @@ struct exar8250_board {
6298     struct exar8250 {
6299     unsigned int nr;
6300     struct exar8250_board *board;
6301     + void __iomem *virt;
6302     int line[0];
6303     };
6304    
6305     @@ -134,12 +136,9 @@ static int default_setup(struct exar8250 *priv, struct pci_dev *pcidev,
6306     const struct exar8250_board *board = priv->board;
6307     unsigned int bar = 0;
6308    
6309     - if (!pcim_iomap_table(pcidev)[bar] && !pcim_iomap(pcidev, bar, 0))
6310     - return -ENOMEM;
6311     -
6312     port->port.iotype = UPIO_MEM;
6313     port->port.mapbase = pci_resource_start(pcidev, bar) + offset;
6314     - port->port.membase = pcim_iomap_table(pcidev)[bar] + offset;
6315     + port->port.membase = priv->virt + offset;
6316     port->port.regshift = board->reg_shift;
6317    
6318     return 0;
6319     @@ -423,6 +422,25 @@ static void pci_xr17v35x_exit(struct pci_dev *pcidev)
6320     port->port.private_data = NULL;
6321     }
6322    
6323     +/*
6324     + * These Exar UARTs have an extra interrupt indicator that could fire for a
6325     + * few interrupts that are not presented/cleared through IIR. One of which is
6326     + * a wakeup interrupt when coming out of sleep. These interrupts are only
6327     + * cleared by reading global INT0 or INT1 registers as interrupts are
6328     + * associated with channel 0. The INT[3:0] registers _are_ accessible from each
6329     + * channel's address space, but for the sake of bus efficiency we register a
6330     + * dedicated handler at the PCI device level to handle them.
6331     + */
6332     +static irqreturn_t exar_misc_handler(int irq, void *data)
6333     +{
6334     + struct exar8250 *priv = data;
6335     +
6336     + /* Clear all PCI interrupts by reading INT0. No effect on IIR */
6337     + ioread8(priv->virt + UART_EXAR_INT0);
6338     +
6339     + return IRQ_HANDLED;
6340     +}
6341     +
6342     static int
6343     exar_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
6344     {
6345     @@ -451,6 +469,9 @@ exar_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
6346     return -ENOMEM;
6347    
6348     priv->board = board;
6349     + priv->virt = pcim_iomap(pcidev, bar, 0);
6350     + if (!priv->virt)
6351     + return -ENOMEM;
6352    
6353     pci_set_master(pcidev);
6354    
6355     @@ -464,6 +485,11 @@ exar_pci_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
6356     uart.port.irq = pci_irq_vector(pcidev, 0);
6357     uart.port.dev = &pcidev->dev;
6358    
6359     + rc = devm_request_irq(&pcidev->dev, uart.port.irq, exar_misc_handler,
6360     + IRQF_SHARED, "exar_uart", priv);
6361     + if (rc)
6362     + return rc;
6363     +
6364     for (i = 0; i < nr_ports && i < maxnr; i++) {
6365     rc = board->setup(priv, pcidev, &uart, i);
6366     if (rc) {
6367     diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
6368     index fde34c84e707..e32c51d549c3 100644
6369     --- a/drivers/tty/serial/8250/8250_port.c
6370     +++ b/drivers/tty/serial/8250/8250_port.c
6371     @@ -458,7 +458,6 @@ static void io_serial_out(struct uart_port *p, int offset, int value)
6372     }
6373    
6374     static int serial8250_default_handle_irq(struct uart_port *port);
6375     -static int exar_handle_irq(struct uart_port *port);
6376    
6377     static void set_io_from_upio(struct uart_port *p)
6378     {
6379     @@ -1903,26 +1902,6 @@ static int serial8250_default_handle_irq(struct uart_port *port)
6380     return ret;
6381     }
6382    
6383     -/*
6384     - * These Exar UARTs have an extra interrupt indicator that could
6385     - * fire for a few unimplemented interrupts. One of which is a
6386     - * wakeup event when coming out of sleep. Put this here just
6387     - * to be on the safe side that these interrupts don't go unhandled.
6388     - */
6389     -static int exar_handle_irq(struct uart_port *port)
6390     -{
6391     - unsigned int iir = serial_port_in(port, UART_IIR);
6392     - int ret = 0;
6393     -
6394     - if (((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X)) &&
6395     - serial_port_in(port, UART_EXAR_INT0) != 0)
6396     - ret = 1;
6397     -
6398     - ret |= serial8250_handle_irq(port, iir);
6399     -
6400     - return ret;
6401     -}
6402     -
6403     /*
6404     * Newer 16550 compatible parts such as the SC16C650 & Altera 16550 Soft IP
6405     * have a programmable TX threshold that triggers the THRE interrupt in
6406     @@ -3107,11 +3086,6 @@ static void serial8250_config_port(struct uart_port *port, int flags)
6407     if (port->type == PORT_UNKNOWN)
6408     serial8250_release_std_resource(up);
6409    
6410     - /* Fixme: probably not the best place for this */
6411     - if ((port->type == PORT_XR17V35X) ||
6412     - (port->type == PORT_XR17D15X))
6413     - port->handle_irq = exar_handle_irq;
6414     -
6415     register_dev_spec_attr_grp(up);
6416     up->fcr = uart_config[up->port.type].fcr;
6417     }
6418     diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
6419     index ff5a1a8989d5..ff17e94ef465 100644
6420     --- a/drivers/usb/musb/musb_core.c
6421     +++ b/drivers/usb/musb/musb_core.c
6422     @@ -1780,6 +1780,7 @@ musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
6423     int vbus;
6424     u8 devctl;
6425    
6426     + pm_runtime_get_sync(dev);
6427     spin_lock_irqsave(&musb->lock, flags);
6428     val = musb->a_wait_bcon;
6429     vbus = musb_platform_get_vbus_status(musb);
6430     @@ -1793,6 +1794,7 @@ musb_vbus_show(struct device *dev, struct device_attribute *attr, char *buf)
6431     vbus = 0;
6432     }
6433     spin_unlock_irqrestore(&musb->lock, flags);
6434     + pm_runtime_put_sync(dev);
6435    
6436     return sprintf(buf, "Vbus %s, timeout %lu msec\n",
6437     vbus ? "on" : "off", val);
6438     @@ -2496,11 +2498,11 @@ static int musb_remove(struct platform_device *pdev)
6439     musb_disable_interrupts(musb);
6440     musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
6441     spin_unlock_irqrestore(&musb->lock, flags);
6442     + musb_platform_exit(musb);
6443    
6444     pm_runtime_dont_use_autosuspend(musb->controller);
6445     pm_runtime_put_sync(musb->controller);
6446     pm_runtime_disable(musb->controller);
6447     - musb_platform_exit(musb);
6448     musb_phy_callback = NULL;
6449     if (musb->dma_controller)
6450     musb_dma_controller_destroy(musb->dma_controller);
6451     @@ -2733,7 +2735,8 @@ static int musb_resume(struct device *dev)
6452     if ((devctl & mask) != (musb->context.devctl & mask))
6453     musb->port1_status = 0;
6454    
6455     - musb_start(musb);
6456     + musb_enable_interrupts(musb);
6457     + musb_platform_enable(musb);
6458    
6459     spin_lock_irqsave(&musb->lock, flags);
6460     error = musb_run_resume_work(musb);
6461     diff --git a/drivers/watchdog/sp5100_tco.h b/drivers/watchdog/sp5100_tco.h
6462     index 1af4dee71337..0e242d1110ce 100644
6463     --- a/drivers/watchdog/sp5100_tco.h
6464     +++ b/drivers/watchdog/sp5100_tco.h
6465     @@ -55,7 +55,7 @@
6466     #define SB800_PM_WATCHDOG_CONFIG 0x4C
6467    
6468     #define SB800_PCI_WATCHDOG_DECODE_EN (1 << 0)
6469     -#define SB800_PM_WATCHDOG_DISABLE (1 << 2)
6470     +#define SB800_PM_WATCHDOG_DISABLE (1 << 1)
6471     #define SB800_PM_WATCHDOG_SECOND_RES (3 << 0)
6472     #define SB800_ACPI_MMIO_DECODE_EN (1 << 0)
6473     #define SB800_ACPI_MMIO_SEL (1 << 1)
6474     diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
6475     index 2c6a9114d332..1fb374466e84 100644
6476     --- a/drivers/xen/grant-table.c
6477     +++ b/drivers/xen/grant-table.c
6478     @@ -328,7 +328,7 @@ static void gnttab_handle_deferred(unsigned long unused)
6479     if (entry->page) {
6480     pr_debug("freeing g.e. %#x (pfn %#lx)\n",
6481     entry->ref, page_to_pfn(entry->page));
6482     - __free_page(entry->page);
6483     + put_page(entry->page);
6484     } else
6485     pr_info("freeing g.e. %#x\n", entry->ref);
6486     kfree(entry);
6487     @@ -384,7 +384,7 @@ void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
6488     if (gnttab_end_foreign_access_ref(ref, readonly)) {
6489     put_free_entry(ref);
6490     if (page != 0)
6491     - free_page(page);
6492     + put_page(virt_to_page(page));
6493     } else
6494     gnttab_add_deferred(ref, readonly,
6495     page ? virt_to_page(page) : NULL);
6496     diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
6497     index e2bb2a065741..21cc27509993 100644
6498     --- a/fs/btrfs/ctree.c
6499     +++ b/fs/btrfs/ctree.c
6500     @@ -2774,6 +2774,8 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6501     * contention with the cow code
6502     */
6503     if (cow) {
6504     + bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
6505     +
6506     /*
6507     * if we don't really need to cow this block
6508     * then we don't want to set the path blocking,
6509     @@ -2798,9 +2800,13 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6510     }
6511    
6512     btrfs_set_path_blocking(p);
6513     - err = btrfs_cow_block(trans, root, b,
6514     - p->nodes[level + 1],
6515     - p->slots[level + 1], &b);
6516     + if (last_level)
6517     + err = btrfs_cow_block(trans, root, b, NULL, 0,
6518     + &b);
6519     + else
6520     + err = btrfs_cow_block(trans, root, b,
6521     + p->nodes[level + 1],
6522     + p->slots[level + 1], &b);
6523     if (err) {
6524     ret = err;
6525     goto done;
6526     diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
6527     index 167ce43cabe8..79f0f282a0ef 100644
6528     --- a/fs/btrfs/disk-io.c
6529     +++ b/fs/btrfs/disk-io.c
6530     @@ -4063,9 +4063,11 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info)
6531     btrfs_err(fs_info, "no valid FS found");
6532     ret = -EINVAL;
6533     }
6534     - if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)
6535     - btrfs_warn(fs_info, "unrecognized super flag: %llu",
6536     + if (btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP) {
6537     + btrfs_err(fs_info, "unrecognized or unsupported super flag: %llu",
6538     btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP);
6539     + ret = -EINVAL;
6540     + }
6541     if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
6542     btrfs_err(fs_info, "tree_root level too big: %d >= %d",
6543     btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
6544     diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
6545     index d564a7049d7f..5690feded0de 100644
6546     --- a/fs/btrfs/file.c
6547     +++ b/fs/btrfs/file.c
6548     @@ -2018,10 +2018,19 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
6549     static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
6550     {
6551     int ret;
6552     + struct blk_plug plug;
6553    
6554     + /*
6555     + * This is only called in fsync, which would do synchronous writes, so
6556     + * a plug can merge adjacent IOs as much as possible. Esp. in case of
6557     + * multiple disks using raid profile, a large IO can be split to
6558     + * several segments of stripe length (currently 64K).
6559     + */
6560     + blk_start_plug(&plug);
6561     atomic_inc(&BTRFS_I(inode)->sync_writers);
6562     ret = btrfs_fdatawrite_range(inode, start, end);
6563     atomic_dec(&BTRFS_I(inode)->sync_writers);
6564     + blk_finish_plug(&plug);
6565    
6566     return ret;
6567     }
6568     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
6569     index dfa360d18ae2..9f21c29d0259 100644
6570     --- a/fs/btrfs/inode.c
6571     +++ b/fs/btrfs/inode.c
6572     @@ -42,6 +42,7 @@
6573     #include <linux/blkdev.h>
6574     #include <linux/posix_acl_xattr.h>
6575     #include <linux/uio.h>
6576     +#include <asm/unaligned.h>
6577     #include "ctree.h"
6578     #include "disk-io.h"
6579     #include "transaction.h"
6580     @@ -5980,11 +5981,13 @@ static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx)
6581     struct dir_entry *entry = addr;
6582     char *name = (char *)(entry + 1);
6583    
6584     - ctx->pos = entry->offset;
6585     - if (!dir_emit(ctx, name, entry->name_len, entry->ino,
6586     - entry->type))
6587     + ctx->pos = get_unaligned(&entry->offset);
6588     + if (!dir_emit(ctx, name, get_unaligned(&entry->name_len),
6589     + get_unaligned(&entry->ino),
6590     + get_unaligned(&entry->type)))
6591     return 1;
6592     - addr += sizeof(struct dir_entry) + entry->name_len;
6593     + addr += sizeof(struct dir_entry) +
6594     + get_unaligned(&entry->name_len);
6595     ctx->pos++;
6596     }
6597     return 0;
6598     @@ -6078,14 +6081,15 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
6599     }
6600    
6601     entry = addr;
6602     - entry->name_len = name_len;
6603     + put_unaligned(name_len, &entry->name_len);
6604     name_ptr = (char *)(entry + 1);
6605     read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1),
6606     name_len);
6607     - entry->type = btrfs_filetype_table[btrfs_dir_type(leaf, di)];
6608     + put_unaligned(btrfs_filetype_table[btrfs_dir_type(leaf, di)],
6609     + &entry->type);
6610     btrfs_dir_item_key_to_cpu(leaf, di, &location);
6611     - entry->ino = location.objectid;
6612     - entry->offset = found_key.offset;
6613     + put_unaligned(location.objectid, &entry->ino);
6614     + put_unaligned(found_key.offset, &entry->offset);
6615     entries++;
6616     addr += sizeof(struct dir_entry) + name_len;
6617     total_len += sizeof(struct dir_entry) + name_len;
6618     @@ -7261,19 +7265,12 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
6619     * existing will always be non-NULL, since there must be
6620     * extent causing the -EEXIST.
6621     */
6622     - if (existing->start == em->start &&
6623     - extent_map_end(existing) >= extent_map_end(em) &&
6624     - em->block_start == existing->block_start) {
6625     - /*
6626     - * The existing extent map already encompasses the
6627     - * entire extent map we tried to add.
6628     - */
6629     + if (start >= existing->start &&
6630     + start < extent_map_end(existing)) {
6631     free_extent_map(em);
6632     em = existing;
6633     err = 0;
6634     -
6635     - } else if (start >= extent_map_end(existing) ||
6636     - start <= existing->start) {
6637     + } else {
6638     /*
6639     * The existing extent map is the one nearest to
6640     * the [start, start + len) range which overlaps
6641     @@ -7285,10 +7282,6 @@ struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
6642     free_extent_map(em);
6643     em = NULL;
6644     }
6645     - } else {
6646     - free_extent_map(em);
6647     - em = existing;
6648     - err = 0;
6649     }
6650     }
6651     write_unlock(&em_tree->lock);
6652     diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
6653     index 32b186c5694c..2e995e565633 100644
6654     --- a/fs/btrfs/raid56.c
6655     +++ b/fs/btrfs/raid56.c
6656     @@ -858,10 +858,17 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio)
6657     kfree(rbio);
6658     }
6659    
6660     -static void free_raid_bio(struct btrfs_raid_bio *rbio)
6661     +static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
6662     {
6663     - unlock_stripe(rbio);
6664     - __free_raid_bio(rbio);
6665     + struct bio *next;
6666     +
6667     + while (cur) {
6668     + next = cur->bi_next;
6669     + cur->bi_next = NULL;
6670     + cur->bi_status = err;
6671     + bio_endio(cur);
6672     + cur = next;
6673     + }
6674     }
6675    
6676     /*
6677     @@ -871,20 +878,26 @@ static void free_raid_bio(struct btrfs_raid_bio *rbio)
6678     static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
6679     {
6680     struct bio *cur = bio_list_get(&rbio->bio_list);
6681     - struct bio *next;
6682     + struct bio *extra;
6683    
6684     if (rbio->generic_bio_cnt)
6685     btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
6686    
6687     - free_raid_bio(rbio);
6688     + /*
6689     + * At this moment, rbio->bio_list is empty, however since rbio does not
6690     + * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
6691     + * hash list, rbio may be merged with others so that rbio->bio_list
6692     + * becomes non-empty.
6693     + * Once unlock_stripe() is done, rbio->bio_list will not be updated any
6694     + * more and we can call bio_endio() on all queued bios.
6695     + */
6696     + unlock_stripe(rbio);
6697     + extra = bio_list_get(&rbio->bio_list);
6698     + __free_raid_bio(rbio);
6699    
6700     - while (cur) {
6701     - next = cur->bi_next;
6702     - cur->bi_next = NULL;
6703     - cur->bi_status = err;
6704     - bio_endio(cur);
6705     - cur = next;
6706     - }
6707     + rbio_endio_bio_list(cur, err);
6708     + if (extra)
6709     + rbio_endio_bio_list(extra, err);
6710     }
6711    
6712     /*
6713     @@ -2159,11 +2172,21 @@ int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
6714     }
6715    
6716     /*
6717     - * reconstruct from the q stripe if they are
6718     - * asking for mirror 3
6719     + * Loop retry:
6720     + * for 'mirror == 2', reconstruct from all other stripes.
6721     + * for 'mirror_num > 2', select a stripe to fail on every retry.
6722     */
6723     - if (mirror_num == 3)
6724     - rbio->failb = rbio->real_stripes - 2;
6725     + if (mirror_num > 2) {
6726     + /*
6727     + * 'mirror == 3' is to fail the p stripe and
6728     + * reconstruct from the q stripe. 'mirror > 3' is to
6729     + * fail a data stripe and reconstruct from p+q stripe.
6730     + */
6731     + rbio->failb = rbio->real_stripes - (mirror_num - 1);
6732     + ASSERT(rbio->failb > 0);
6733     + if (rbio->failb <= rbio->faila)
6734     + rbio->failb--;
6735     + }
6736    
6737     ret = lock_stripe_add(rbio);
6738    
6739     diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
6740     index 71b3cd634436..b983e7fb200b 100644
6741     --- a/fs/btrfs/volumes.c
6742     +++ b/fs/btrfs/volumes.c
6743     @@ -5101,7 +5101,14 @@ int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
6744     else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
6745     ret = 2;
6746     else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
6747     - ret = 3;
6748     + /*
6749     + * There could be two corrupted data stripes, we need
6750     + * to loop retry in order to rebuild the correct data.
6751     + *
6752     + * Fail a stripe at a time on every retry except the
6753     + * stripe under reconstruction.
6754     + */
6755     + ret = map->num_stripes;
6756     else
6757     ret = 1;
6758     free_extent_map(em);
6759     diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
6760     index 35dc5bf01ee2..7fd39ea6e22e 100644
6761     --- a/fs/cifs/cifssmb.c
6762     +++ b/fs/cifs/cifssmb.c
6763     @@ -6331,9 +6331,7 @@ CIFSSMBSetEA(const unsigned int xid, struct cifs_tcon *tcon,
6764     pSMB->InformationLevel =
6765     cpu_to_le16(SMB_SET_FILE_EA);
6766    
6767     - parm_data =
6768     - (struct fealist *) (((char *) &pSMB->hdr.Protocol) +
6769     - offset);
6770     + parm_data = (void *)pSMB + offsetof(struct smb_hdr, Protocol) + offset;
6771     pSMB->ParameterOffset = cpu_to_le16(param_offset);
6772     pSMB->DataOffset = cpu_to_le16(offset);
6773     pSMB->SetupCount = 1;
6774     diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
6775     index 81ba6e0d88d8..925844343038 100644
6776     --- a/fs/cifs/dir.c
6777     +++ b/fs/cifs/dir.c
6778     @@ -684,6 +684,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
6779     goto mknod_out;
6780     }
6781    
6782     + if (!S_ISCHR(mode) && !S_ISBLK(mode))
6783     + goto mknod_out;
6784     +
6785     if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
6786     goto mknod_out;
6787    
6788     @@ -692,10 +695,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
6789    
6790     buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
6791     if (buf == NULL) {
6792     - kfree(full_path);
6793     rc = -ENOMEM;
6794     - free_xid(xid);
6795     - return rc;
6796     + goto mknod_out;
6797     }
6798    
6799     if (backup_cred(cifs_sb))
6800     @@ -742,7 +743,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
6801     pdev->minor = cpu_to_le64(MINOR(device_number));
6802     rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
6803     &bytes_written, iov, 1);
6804     - } /* else if (S_ISFIFO) */
6805     + }
6806     tcon->ses->server->ops->close(xid, tcon, &fid);
6807     d_drop(direntry);
6808    
6809     diff --git a/fs/dax.c b/fs/dax.c
6810     index 191306cd8b6b..ddb4981ae32e 100644
6811     --- a/fs/dax.c
6812     +++ b/fs/dax.c
6813     @@ -630,8 +630,8 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
6814     set_pmd_at(vma->vm_mm, address, pmdp, pmd);
6815     mmu_notifier_invalidate_range(vma->vm_mm, start, end);
6816     unlock_pmd:
6817     - spin_unlock(ptl);
6818     #endif
6819     + spin_unlock(ptl);
6820     } else {
6821     if (pfn != pte_pfn(*ptep))
6822     goto unlock_pte;
6823     diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
6824     index d5021ba69e7a..e5673a9b2619 100644
6825     --- a/fs/f2fs/gc.c
6826     +++ b/fs/f2fs/gc.c
6827     @@ -696,7 +696,12 @@ static void move_data_block(struct inode *inode, block_t bidx,
6828     fio.op = REQ_OP_WRITE;
6829     fio.op_flags = REQ_SYNC;
6830     fio.new_blkaddr = newaddr;
6831     - f2fs_submit_page_write(&fio);
6832     + err = f2fs_submit_page_write(&fio);
6833     + if (err) {
6834     + if (PageWriteback(fio.encrypted_page))
6835     + end_page_writeback(fio.encrypted_page);
6836     + goto put_page_out;
6837     + }
6838    
6839     f2fs_update_iostat(fio.sbi, FS_GC_DATA_IO, F2FS_BLKSIZE);
6840    
6841     diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
6842     index e96c6b05e43e..3c96f4bdc549 100644
6843     --- a/fs/jffs2/fs.c
6844     +++ b/fs/jffs2/fs.c
6845     @@ -362,7 +362,6 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
6846     ret = -EIO;
6847     error:
6848     mutex_unlock(&f->sem);
6849     - jffs2_do_clear_inode(c, f);
6850     iget_failed(inode);
6851     return ERR_PTR(ret);
6852     }
6853     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
6854     index 2241d52710f7..ae8f43d270d6 100644
6855     --- a/fs/nfs/nfs4proc.c
6856     +++ b/fs/nfs/nfs4proc.c
6857     @@ -1885,7 +1885,7 @@ static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *sta
6858     return ret;
6859     }
6860    
6861     -static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, int err)
6862     +static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct nfs4_state *state, const nfs4_stateid *stateid, struct file_lock *fl, int err)
6863     {
6864     switch (err) {
6865     default:
6866     @@ -1932,7 +1932,11 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
6867     return -EAGAIN;
6868     case -ENOMEM:
6869     case -NFS4ERR_DENIED:
6870     - /* kill_proc(fl->fl_pid, SIGLOST, 1); */
6871     + if (fl) {
6872     + struct nfs4_lock_state *lsp = fl->fl_u.nfs4_fl.owner;
6873     + if (lsp)
6874     + set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
6875     + }
6876     return 0;
6877     }
6878     return err;
6879     @@ -1968,7 +1972,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
6880     err = nfs4_open_recover_helper(opendata, FMODE_READ);
6881     }
6882     nfs4_opendata_put(opendata);
6883     - return nfs4_handle_delegation_recall_error(server, state, stateid, err);
6884     + return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
6885     }
6886    
6887     static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
6888     @@ -6595,7 +6599,7 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state,
6889     if (err != 0)
6890     return err;
6891     err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW);
6892     - return nfs4_handle_delegation_recall_error(server, state, stateid, err);
6893     + return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err);
6894     }
6895    
6896     struct nfs_release_lockowner_data {
6897     diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
6898     index 0378e2257ca7..45873ed92057 100644
6899     --- a/fs/nfs/nfs4state.c
6900     +++ b/fs/nfs/nfs4state.c
6901     @@ -1447,6 +1447,7 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
6902     struct inode *inode = state->inode;
6903     struct nfs_inode *nfsi = NFS_I(inode);
6904     struct file_lock *fl;
6905     + struct nfs4_lock_state *lsp;
6906     int status = 0;
6907     struct file_lock_context *flctx = inode->i_flctx;
6908     struct list_head *list;
6909     @@ -1487,7 +1488,9 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
6910     case -NFS4ERR_DENIED:
6911     case -NFS4ERR_RECLAIM_BAD:
6912     case -NFS4ERR_RECLAIM_CONFLICT:
6913     - /* kill_proc(fl->fl_pid, SIGLOST, 1); */
6914     + lsp = fl->fl_u.nfs4_fl.owner;
6915     + if (lsp)
6916     + set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
6917     status = 0;
6918     }
6919     spin_lock(&flctx->flc_lock);
6920     diff --git a/fs/nfs/nfs4sysctl.c b/fs/nfs/nfs4sysctl.c
6921     index 0d91d84e5822..c394e4447100 100644
6922     --- a/fs/nfs/nfs4sysctl.c
6923     +++ b/fs/nfs/nfs4sysctl.c
6924     @@ -32,7 +32,7 @@ static struct ctl_table nfs4_cb_sysctls[] = {
6925     .data = &nfs_idmap_cache_timeout,
6926     .maxlen = sizeof(int),
6927     .mode = 0644,
6928     - .proc_handler = proc_dointvec_jiffies,
6929     + .proc_handler = proc_dointvec,
6930     },
6931     { }
6932     };
6933     diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
6934     index 40b5cc97f7b0..917fadca8a7b 100644
6935     --- a/fs/ocfs2/acl.c
6936     +++ b/fs/ocfs2/acl.c
6937     @@ -311,7 +311,9 @@ struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type)
6938     if (had_lock < 0)
6939     return ERR_PTR(had_lock);
6940    
6941     + down_read(&OCFS2_I(inode)->ip_xattr_sem);
6942     acl = ocfs2_get_acl_nolock(inode, type, di_bh);
6943     + up_read(&OCFS2_I(inode)->ip_xattr_sem);
6944    
6945     ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
6946     brelse(di_bh);
6947     @@ -330,7 +332,9 @@ int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh)
6948     if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL))
6949     return 0;
6950    
6951     + down_read(&OCFS2_I(inode)->ip_xattr_sem);
6952     acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh);
6953     + up_read(&OCFS2_I(inode)->ip_xattr_sem);
6954     if (IS_ERR(acl) || !acl)
6955     return PTR_ERR(acl);
6956     ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode);
6957     @@ -361,8 +365,10 @@ int ocfs2_init_acl(handle_t *handle,
6958    
6959     if (!S_ISLNK(inode->i_mode)) {
6960     if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
6961     + down_read(&OCFS2_I(dir)->ip_xattr_sem);
6962     acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT,
6963     dir_bh);
6964     + up_read(&OCFS2_I(dir)->ip_xattr_sem);
6965     if (IS_ERR(acl))
6966     return PTR_ERR(acl);
6967     }
6968     diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
6969     index 36304434eacf..e5dcea6cee5f 100644
6970     --- a/fs/ocfs2/journal.c
6971     +++ b/fs/ocfs2/journal.c
6972     @@ -666,23 +666,24 @@ static int __ocfs2_journal_access(handle_t *handle,
6973     /* we can safely remove this assertion after testing. */
6974     if (!buffer_uptodate(bh)) {
6975     mlog(ML_ERROR, "giving me a buffer that's not uptodate!\n");
6976     - mlog(ML_ERROR, "b_blocknr=%llu\n",
6977     - (unsigned long long)bh->b_blocknr);
6978     + mlog(ML_ERROR, "b_blocknr=%llu, b_state=0x%lx\n",
6979     + (unsigned long long)bh->b_blocknr, bh->b_state);
6980    
6981     lock_buffer(bh);
6982     /*
6983     - * A previous attempt to write this buffer head failed.
6984     - * Nothing we can do but to retry the write and hope for
6985     - * the best.
6986     + * A previous transaction with a couple of buffer heads fail
6987     + * to checkpoint, so all the bhs are marked as BH_Write_EIO.
6988     + * For current transaction, the bh is just among those error
6989     + * bhs which previous transaction handle. We can't just clear
6990     + * its BH_Write_EIO and reuse directly, since other bhs are
6991     + * not written to disk yet and that will cause metadata
6992     + * inconsistency. So we should set fs read-only to avoid
6993     + * further damage.
6994     */
6995     if (buffer_write_io_error(bh) && !buffer_uptodate(bh)) {
6996     - clear_buffer_write_io_error(bh);
6997     - set_buffer_uptodate(bh);
6998     - }
6999     -
7000     - if (!buffer_uptodate(bh)) {
7001     unlock_buffer(bh);
7002     - return -EIO;
7003     + return ocfs2_error(osb->sb, "A previous attempt to "
7004     + "write this buffer head failed\n");
7005     }
7006     unlock_buffer(bh);
7007     }
7008     diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
7009     index 80733496b22a..24ab735d91dd 100644
7010     --- a/fs/ocfs2/super.c
7011     +++ b/fs/ocfs2/super.c
7012     @@ -474,9 +474,8 @@ static int ocfs2_init_global_system_inodes(struct ocfs2_super *osb)
7013     new = ocfs2_get_system_file_inode(osb, i, osb->slot_num);
7014     if (!new) {
7015     ocfs2_release_system_inodes(osb);
7016     - status = -EINVAL;
7017     + status = ocfs2_is_soft_readonly(osb) ? -EROFS : -EINVAL;
7018     mlog_errno(status);
7019     - /* FIXME: Should ERROR_RO_FS */
7020     mlog(ML_ERROR, "Unable to load system inode %d, "
7021     "possibly corrupt fs?", i);
7022     goto bail;
7023     @@ -505,7 +504,7 @@ static int ocfs2_init_local_system_inodes(struct ocfs2_super *osb)
7024     new = ocfs2_get_system_file_inode(osb, i, osb->slot_num);
7025     if (!new) {
7026     ocfs2_release_system_inodes(osb);
7027     - status = -EINVAL;
7028     + status = ocfs2_is_soft_readonly(osb) ? -EROFS : -EINVAL;
7029     mlog(ML_ERROR, "status=%d, sysfile=%d, slot=%d\n",
7030     status, i, osb->slot_num);
7031     goto bail;
7032     diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
7033     index 5fdf269ba82e..fb0a4eec310c 100644
7034     --- a/fs/ocfs2/xattr.c
7035     +++ b/fs/ocfs2/xattr.c
7036     @@ -638,9 +638,11 @@ int ocfs2_calc_xattr_init(struct inode *dir,
7037     si->value_len);
7038    
7039     if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
7040     + down_read(&OCFS2_I(dir)->ip_xattr_sem);
7041     acl_len = ocfs2_xattr_get_nolock(dir, dir_bh,
7042     OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT,
7043     "", NULL, 0);
7044     + up_read(&OCFS2_I(dir)->ip_xattr_sem);
7045     if (acl_len > 0) {
7046     a_size = ocfs2_xattr_entry_real_size(0, acl_len);
7047     if (S_ISDIR(mode))
7048     diff --git a/fs/proc/base.c b/fs/proc/base.c
7049     index 9d357b2ea6cb..2ff11a693360 100644
7050     --- a/fs/proc/base.c
7051     +++ b/fs/proc/base.c
7052     @@ -100,6 +100,8 @@
7053     #include "internal.h"
7054     #include "fd.h"
7055    
7056     +#include "../../lib/kstrtox.h"
7057     +
7058     /* NOTE:
7059     * Implementing inode permission operations in /proc is almost
7060     * certainly an error. Permission checks need to happen during
7061     @@ -1908,8 +1910,33 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx,
7062     static int dname_to_vma_addr(struct dentry *dentry,
7063     unsigned long *start, unsigned long *end)
7064     {
7065     - if (sscanf(dentry->d_name.name, "%lx-%lx", start, end) != 2)
7066     + const char *str = dentry->d_name.name;
7067     + unsigned long long sval, eval;
7068     + unsigned int len;
7069     +
7070     + len = _parse_integer(str, 16, &sval);
7071     + if (len & KSTRTOX_OVERFLOW)
7072     + return -EINVAL;
7073     + if (sval != (unsigned long)sval)
7074     + return -EINVAL;
7075     + str += len;
7076     +
7077     + if (*str != '-')
7078     return -EINVAL;
7079     + str++;
7080     +
7081     + len = _parse_integer(str, 16, &eval);
7082     + if (len & KSTRTOX_OVERFLOW)
7083     + return -EINVAL;
7084     + if (eval != (unsigned long)eval)
7085     + return -EINVAL;
7086     + str += len;
7087     +
7088     + if (*str != '\0')
7089     + return -EINVAL;
7090     +
7091     + *start = sval;
7092     + *end = eval;
7093    
7094     return 0;
7095     }
7096     diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
7097     index e8a93bc8285d..d1e82761de81 100644
7098     --- a/fs/proc/kcore.c
7099     +++ b/fs/proc/kcore.c
7100     @@ -510,6 +510,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
7101     /* we have to zero-fill user buffer even if no read */
7102     if (copy_to_user(buffer, buf, tsz))
7103     return -EFAULT;
7104     + } else if (m->type == KCORE_USER) {
7105     + /* User page is handled prior to normal kernel page: */
7106     + if (copy_to_user(buffer, (char *)start, tsz))
7107     + return -EFAULT;
7108     } else {
7109     if (kern_addr_valid(start)) {
7110     /*
7111     diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
7112     index 0a213dcba2a1..ba3d0e0f8615 100644
7113     --- a/fs/ubifs/tnc.c
7114     +++ b/fs/ubifs/tnc.c
7115     @@ -1890,35 +1890,28 @@ static int search_dh_cookie(struct ubifs_info *c, const union ubifs_key *key,
7116     union ubifs_key *dkey;
7117    
7118     for (;;) {
7119     - if (!err) {
7120     - err = tnc_next(c, &znode, n);
7121     - if (err)
7122     - goto out;
7123     - }
7124     -
7125     zbr = &znode->zbranch[*n];
7126     dkey = &zbr->key;
7127    
7128     if (key_inum(c, dkey) != key_inum(c, key) ||
7129     key_type(c, dkey) != key_type(c, key)) {
7130     - err = -ENOENT;
7131     - goto out;
7132     + return -ENOENT;
7133     }
7134    
7135     err = tnc_read_hashed_node(c, zbr, dent);
7136     if (err)
7137     - goto out;
7138     + return err;
7139    
7140     if (key_hash(c, key) == key_hash(c, dkey) &&
7141     le32_to_cpu(dent->cookie) == cookie) {
7142     *zn = znode;
7143     - goto out;
7144     + return 0;
7145     }
7146     - }
7147     -
7148     -out:
7149    
7150     - return err;
7151     + err = tnc_next(c, &znode, n);
7152     + if (err)
7153     + return err;
7154     + }
7155     }
7156    
7157     static int do_lookup_dh(struct ubifs_info *c, const union ubifs_key *key,
7158     diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
7159     index 77b891a8f191..2142bceaeb75 100644
7160     --- a/include/asm-generic/pgtable.h
7161     +++ b/include/asm-generic/pgtable.h
7162     @@ -309,6 +309,21 @@ extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
7163     extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
7164     #endif
7165    
7166     +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
7167     +/*
7168     + * This is an implementation of pmdp_establish() that is only suitable for an
7169     + * architecture that doesn't have hardware dirty/accessed bits. In this case we
7170     + * can't race with CPU which sets these bits and non-atomic aproach is fine.
7171     + */
7172     +static inline pmd_t generic_pmdp_establish(struct vm_area_struct *vma,
7173     + unsigned long address, pmd_t *pmdp, pmd_t pmd)
7174     +{
7175     + pmd_t old_pmd = *pmdp;
7176     + set_pmd_at(vma->vm_mm, address, pmdp, pmd);
7177     + return old_pmd;
7178     +}
7179     +#endif
7180     +
7181     #ifndef __HAVE_ARCH_PMDP_INVALIDATE
7182     extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
7183     pmd_t *pmdp);
7184     diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
7185     index 8d3125c493b2..db461a07bf38 100644
7186     --- a/include/linux/cpumask.h
7187     +++ b/include/linux/cpumask.h
7188     @@ -165,6 +165,8 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node)
7189     for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
7190     #define for_each_cpu_not(cpu, mask) \
7191     for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
7192     +#define for_each_cpu_wrap(cpu, mask, start) \
7193     + for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)(start))
7194     #define for_each_cpu_and(cpu, mask, and) \
7195     for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask, (void)and)
7196     #else
7197     diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
7198     index 2d9f80848d4b..c643cc7fefb5 100644
7199     --- a/include/linux/etherdevice.h
7200     +++ b/include/linux/etherdevice.h
7201     @@ -31,7 +31,7 @@
7202     #ifdef __KERNEL__
7203     struct device;
7204     int eth_platform_get_mac_address(struct device *dev, u8 *mac_addr);
7205     -unsigned char *arch_get_platform_get_mac_address(void);
7206     +unsigned char *arch_get_platform_mac_address(void);
7207     u32 eth_get_headlen(void *data, unsigned int max_len);
7208     __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
7209     extern const struct header_ops eth_header_ops;
7210     diff --git a/include/linux/kcore.h b/include/linux/kcore.h
7211     index 7ff25a808fef..80db19d3a505 100644
7212     --- a/include/linux/kcore.h
7213     +++ b/include/linux/kcore.h
7214     @@ -10,6 +10,7 @@ enum kcore_type {
7215     KCORE_VMALLOC,
7216     KCORE_RAM,
7217     KCORE_VMEMMAP,
7218     + KCORE_USER,
7219     KCORE_OTHER,
7220     };
7221    
7222     diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
7223     index 8e46c35d654b..54f346a45cd0 100644
7224     --- a/include/linux/netfilter/x_tables.h
7225     +++ b/include/linux/netfilter/x_tables.h
7226     @@ -301,6 +301,7 @@ int xt_data_to_user(void __user *dst, const void *src,
7227    
7228     void *xt_copy_counters_from_user(const void __user *user, unsigned int len,
7229     struct xt_counters_info *info, bool compat);
7230     +struct xt_counters *xt_counters_alloc(unsigned int counters);
7231    
7232     struct xt_table *xt_register_table(struct net *net,
7233     const struct xt_table *table,
7234     @@ -507,7 +508,7 @@ void xt_compat_unlock(u_int8_t af);
7235    
7236     int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta);
7237     void xt_compat_flush_offsets(u_int8_t af);
7238     -void xt_compat_init_offsets(u_int8_t af, unsigned int number);
7239     +int xt_compat_init_offsets(u8 af, unsigned int number);
7240     int xt_compat_calc_jump(u_int8_t af, unsigned int offset);
7241    
7242     int xt_compat_match_offset(const struct xt_match *match);
7243     diff --git a/include/linux/pci.h b/include/linux/pci.h
7244     index d16a7c037ec0..727e309baa5e 100644
7245     --- a/include/linux/pci.h
7246     +++ b/include/linux/pci.h
7247     @@ -1688,6 +1688,13 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
7248     #define dev_is_pf(d) (false)
7249     static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
7250     { return false; }
7251     +static inline int pci_irqd_intx_xlate(struct irq_domain *d,
7252     + struct device_node *node,
7253     + const u32 *intspec,
7254     + unsigned int intsize,
7255     + unsigned long *out_hwirq,
7256     + unsigned int *out_type)
7257     +{ return -EINVAL; }
7258     #endif /* CONFIG_PCI */
7259    
7260     /* Include architecture-dependent settings and functions */
7261     diff --git a/include/linux/property.h b/include/linux/property.h
7262     index 6bebee13c5e0..89d94b349912 100644
7263     --- a/include/linux/property.h
7264     +++ b/include/linux/property.h
7265     @@ -206,7 +206,7 @@ struct property_entry {
7266     */
7267    
7268     #define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _val_) \
7269     -{ \
7270     +(struct property_entry) { \
7271     .name = _name_, \
7272     .length = ARRAY_SIZE(_val_) * sizeof(_type_), \
7273     .is_array = true, \
7274     @@ -224,7 +224,7 @@ struct property_entry {
7275     PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, _val_)
7276    
7277     #define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \
7278     -{ \
7279     +(struct property_entry) { \
7280     .name = _name_, \
7281     .length = ARRAY_SIZE(_val_) * sizeof(const char *), \
7282     .is_array = true, \
7283     @@ -233,7 +233,7 @@ struct property_entry {
7284     }
7285    
7286     #define PROPERTY_ENTRY_INTEGER(_name_, _type_, _val_) \
7287     -{ \
7288     +(struct property_entry) { \
7289     .name = _name_, \
7290     .length = sizeof(_type_), \
7291     .is_string = false, \
7292     @@ -250,7 +250,7 @@ struct property_entry {
7293     PROPERTY_ENTRY_INTEGER(_name_, u64, _val_)
7294    
7295     #define PROPERTY_ENTRY_STRING(_name_, _val_) \
7296     -{ \
7297     +(struct property_entry) { \
7298     .name = _name_, \
7299     .length = sizeof(_val_), \
7300     .is_string = true, \
7301     @@ -258,7 +258,7 @@ struct property_entry {
7302     }
7303    
7304     #define PROPERTY_ENTRY_BOOL(_name_) \
7305     -{ \
7306     +(struct property_entry) { \
7307     .name = _name_, \
7308     }
7309    
7310     diff --git a/include/linux/suspend.h b/include/linux/suspend.h
7311     index d60b0f5c38d5..8544357d92d0 100644
7312     --- a/include/linux/suspend.h
7313     +++ b/include/linux/suspend.h
7314     @@ -384,6 +384,8 @@ extern int swsusp_page_is_forbidden(struct page *);
7315     extern void swsusp_set_page_free(struct page *);
7316     extern void swsusp_unset_page_free(struct page *);
7317     extern unsigned long get_safe_page(gfp_t gfp_mask);
7318     +extern asmlinkage int swsusp_arch_suspend(void);
7319     +extern asmlinkage int swsusp_arch_resume(void);
7320    
7321     extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
7322     extern int hibernate(void);
7323     diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
7324     index 6533aa64f009..08f3d8699a27 100644
7325     --- a/include/rdma/ib_verbs.h
7326     +++ b/include/rdma/ib_verbs.h
7327     @@ -866,6 +866,7 @@ struct ib_mr_status {
7328     __attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
7329    
7330     enum rdma_ah_attr_type {
7331     + RDMA_AH_ATTR_TYPE_UNDEFINED,
7332     RDMA_AH_ATTR_TYPE_IB,
7333     RDMA_AH_ATTR_TYPE_ROCE,
7334     RDMA_AH_ATTR_TYPE_OPA,
7335     @@ -3762,18 +3763,24 @@ static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
7336     grh->traffic_class = traffic_class;
7337     }
7338    
7339     -/*Get AH type */
7340     +/**
7341     + * rdma_ah_find_type - Return address handle type.
7342     + *
7343     + * @dev: Device to be checked
7344     + * @port_num: Port number
7345     + */
7346     static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
7347     - u32 port_num)
7348     + u8 port_num)
7349     {
7350     - if ((rdma_protocol_roce(dev, port_num)) ||
7351     - (rdma_protocol_iwarp(dev, port_num)))
7352     + if (rdma_protocol_roce(dev, port_num))
7353     return RDMA_AH_ATTR_TYPE_ROCE;
7354     - else if ((rdma_protocol_ib(dev, port_num)) &&
7355     - (rdma_cap_opa_ah(dev, port_num)))
7356     - return RDMA_AH_ATTR_TYPE_OPA;
7357     - else
7358     + if (rdma_protocol_ib(dev, port_num)) {
7359     + if (rdma_cap_opa_ah(dev, port_num))
7360     + return RDMA_AH_ATTR_TYPE_OPA;
7361     return RDMA_AH_ATTR_TYPE_IB;
7362     + }
7363     +
7364     + return RDMA_AH_ATTR_TYPE_UNDEFINED;
7365     }
7366    
7367     /**
7368     diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
7369     index 16e305e69f34..c6f728037c53 100644
7370     --- a/include/trace/events/timer.h
7371     +++ b/include/trace/events/timer.h
7372     @@ -136,6 +136,20 @@ DEFINE_EVENT(timer_class, timer_cancel,
7373     TP_ARGS(timer)
7374     );
7375    
7376     +#define decode_clockid(type) \
7377     + __print_symbolic(type, \
7378     + { CLOCK_REALTIME, "CLOCK_REALTIME" }, \
7379     + { CLOCK_MONOTONIC, "CLOCK_MONOTONIC" }, \
7380     + { CLOCK_BOOTTIME, "CLOCK_BOOTTIME" }, \
7381     + { CLOCK_TAI, "CLOCK_TAI" })
7382     +
7383     +#define decode_hrtimer_mode(mode) \
7384     + __print_symbolic(mode, \
7385     + { HRTIMER_MODE_ABS, "ABS" }, \
7386     + { HRTIMER_MODE_REL, "REL" }, \
7387     + { HRTIMER_MODE_ABS_PINNED, "ABS|PINNED" }, \
7388     + { HRTIMER_MODE_REL_PINNED, "REL|PINNED" })
7389     +
7390     /**
7391     * hrtimer_init - called when the hrtimer is initialized
7392     * @hrtimer: pointer to struct hrtimer
7393     @@ -162,10 +176,8 @@ TRACE_EVENT(hrtimer_init,
7394     ),
7395    
7396     TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
7397     - __entry->clockid == CLOCK_REALTIME ?
7398     - "CLOCK_REALTIME" : "CLOCK_MONOTONIC",
7399     - __entry->mode == HRTIMER_MODE_ABS ?
7400     - "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
7401     + decode_clockid(__entry->clockid),
7402     + decode_hrtimer_mode(__entry->mode))
7403     );
7404    
7405     /**
7406     diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
7407     index 1890be7ea9cd..53a4787c08d8 100644
7408     --- a/kernel/bpf/sockmap.c
7409     +++ b/kernel/bpf/sockmap.c
7410     @@ -601,11 +601,6 @@ static void sock_map_free(struct bpf_map *map)
7411     }
7412     rcu_read_unlock();
7413    
7414     - if (stab->bpf_verdict)
7415     - bpf_prog_put(stab->bpf_verdict);
7416     - if (stab->bpf_parse)
7417     - bpf_prog_put(stab->bpf_parse);
7418     -
7419     sock_map_remove_complete(stab);
7420     }
7421    
7422     @@ -877,6 +872,19 @@ static int sock_map_update_elem(struct bpf_map *map,
7423     return err;
7424     }
7425    
7426     +static void sock_map_release(struct bpf_map *map, struct file *map_file)
7427     +{
7428     + struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
7429     + struct bpf_prog *orig;
7430     +
7431     + orig = xchg(&stab->bpf_parse, NULL);
7432     + if (orig)
7433     + bpf_prog_put(orig);
7434     + orig = xchg(&stab->bpf_verdict, NULL);
7435     + if (orig)
7436     + bpf_prog_put(orig);
7437     +}
7438     +
7439     const struct bpf_map_ops sock_map_ops = {
7440     .map_alloc = sock_map_alloc,
7441     .map_free = sock_map_free,
7442     @@ -884,6 +892,7 @@ const struct bpf_map_ops sock_map_ops = {
7443     .map_get_next_key = sock_map_get_next_key,
7444     .map_update_elem = sock_map_update_elem,
7445     .map_delete_elem = sock_map_delete_elem,
7446     + .map_release = sock_map_release,
7447     };
7448    
7449     BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
7450     diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
7451     index 1b2be63c8528..fa4f47a0a631 100644
7452     --- a/kernel/events/callchain.c
7453     +++ b/kernel/events/callchain.c
7454     @@ -119,19 +119,22 @@ int get_callchain_buffers(int event_max_stack)
7455     goto exit;
7456     }
7457    
7458     + /*
7459     + * If requesting per event more than the global cap,
7460     + * return a different error to help userspace figure
7461     + * this out.
7462     + *
7463     + * And also do it here so that we have &callchain_mutex held.
7464     + */
7465     + if (event_max_stack > sysctl_perf_event_max_stack) {
7466     + err = -EOVERFLOW;
7467     + goto exit;
7468     + }
7469     +
7470     if (count > 1) {
7471     /* If the allocation failed, give up */
7472     if (!callchain_cpus_entries)
7473     err = -ENOMEM;
7474     - /*
7475     - * If requesting per event more than the global cap,
7476     - * return a different error to help userspace figure
7477     - * this out.
7478     - *
7479     - * And also do it here so that we have &callchain_mutex held.
7480     - */
7481     - if (event_max_stack > sysctl_perf_event_max_stack)
7482     - err = -EOVERFLOW;
7483     goto exit;
7484     }
7485    
7486     diff --git a/kernel/events/core.c b/kernel/events/core.c
7487     index e9b0beca830f..cb8274d7824f 100644
7488     --- a/kernel/events/core.c
7489     +++ b/kernel/events/core.c
7490     @@ -9750,9 +9750,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
7491     * __u16 sample size limit.
7492     */
7493     if (attr->sample_stack_user >= USHRT_MAX)
7494     - ret = -EINVAL;
7495     + return -EINVAL;
7496     else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
7497     - ret = -EINVAL;
7498     + return -EINVAL;
7499     }
7500    
7501     if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
7502     diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
7503     index 294294c71ba4..50dc42aeaa56 100644
7504     --- a/kernel/locking/qspinlock.c
7505     +++ b/kernel/locking/qspinlock.c
7506     @@ -379,6 +379,14 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
7507     tail = encode_tail(smp_processor_id(), idx);
7508    
7509     node += idx;
7510     +
7511     + /*
7512     + * Ensure that we increment the head node->count before initialising
7513     + * the actual node. If the compiler is kind enough to reorder these
7514     + * stores, then an IRQ could overwrite our assignments.
7515     + */
7516     + barrier();
7517     +
7518     node->locked = 0;
7519     node->next = NULL;
7520     pv_init_node(node);
7521     diff --git a/kernel/power/power.h b/kernel/power/power.h
7522     index f29cd178df90..9e58bdc8a562 100644
7523     --- a/kernel/power/power.h
7524     +++ b/kernel/power/power.h
7525     @@ -104,9 +104,6 @@ extern int in_suspend;
7526     extern dev_t swsusp_resume_device;
7527     extern sector_t swsusp_resume_block;
7528    
7529     -extern asmlinkage int swsusp_arch_suspend(void);
7530     -extern asmlinkage int swsusp_arch_resume(void);
7531     -
7532     extern int create_basic_memory_bitmaps(void);
7533     extern void free_basic_memory_bitmaps(void);
7534     extern int hibernate_preallocate_memory(void);
7535     diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
7536     index ec09ce9a6012..639321bf2e39 100644
7537     --- a/kernel/time/alarmtimer.c
7538     +++ b/kernel/time/alarmtimer.c
7539     @@ -326,6 +326,17 @@ static int alarmtimer_resume(struct device *dev)
7540     }
7541     #endif
7542    
7543     +static void
7544     +__alarm_init(struct alarm *alarm, enum alarmtimer_type type,
7545     + enum alarmtimer_restart (*function)(struct alarm *, ktime_t))
7546     +{
7547     + timerqueue_init(&alarm->node);
7548     + alarm->timer.function = alarmtimer_fired;
7549     + alarm->function = function;
7550     + alarm->type = type;
7551     + alarm->state = ALARMTIMER_STATE_INACTIVE;
7552     +}
7553     +
7554     /**
7555     * alarm_init - Initialize an alarm structure
7556     * @alarm: ptr to alarm to be initialized
7557     @@ -335,13 +346,9 @@ static int alarmtimer_resume(struct device *dev)
7558     void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
7559     enum alarmtimer_restart (*function)(struct alarm *, ktime_t))
7560     {
7561     - timerqueue_init(&alarm->node);
7562     hrtimer_init(&alarm->timer, alarm_bases[type].base_clockid,
7563     - HRTIMER_MODE_ABS);
7564     - alarm->timer.function = alarmtimer_fired;
7565     - alarm->function = function;
7566     - alarm->type = type;
7567     - alarm->state = ALARMTIMER_STATE_INACTIVE;
7568     + HRTIMER_MODE_ABS);
7569     + __alarm_init(alarm, type, function);
7570     }
7571     EXPORT_SYMBOL_GPL(alarm_init);
7572    
7573     @@ -719,6 +726,8 @@ static int alarmtimer_do_nsleep(struct alarm *alarm, ktime_t absexp,
7574    
7575     __set_current_state(TASK_RUNNING);
7576    
7577     + destroy_hrtimer_on_stack(&alarm->timer);
7578     +
7579     if (!alarm->data)
7580     return 0;
7581    
7582     @@ -740,6 +749,15 @@ static int alarmtimer_do_nsleep(struct alarm *alarm, ktime_t absexp,
7583     return -ERESTART_RESTARTBLOCK;
7584     }
7585    
7586     +static void
7587     +alarm_init_on_stack(struct alarm *alarm, enum alarmtimer_type type,
7588     + enum alarmtimer_restart (*function)(struct alarm *, ktime_t))
7589     +{
7590     + hrtimer_init_on_stack(&alarm->timer, alarm_bases[type].base_clockid,
7591     + HRTIMER_MODE_ABS);
7592     + __alarm_init(alarm, type, function);
7593     +}
7594     +
7595     /**
7596     * alarm_timer_nsleep_restart - restartblock alarmtimer nsleep
7597     * @restart: ptr to restart block
7598     @@ -752,7 +770,7 @@ static long __sched alarm_timer_nsleep_restart(struct restart_block *restart)
7599     ktime_t exp = restart->nanosleep.expires;
7600     struct alarm alarm;
7601    
7602     - alarm_init(&alarm, type, alarmtimer_nsleep_wakeup);
7603     + alarm_init_on_stack(&alarm, type, alarmtimer_nsleep_wakeup);
7604    
7605     return alarmtimer_do_nsleep(&alarm, exp, type);
7606     }
7607     @@ -784,7 +802,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
7608     if (!capable(CAP_WAKE_ALARM))
7609     return -EPERM;
7610    
7611     - alarm_init(&alarm, type, alarmtimer_nsleep_wakeup);
7612     + alarm_init_on_stack(&alarm, type, alarmtimer_nsleep_wakeup);
7613    
7614     exp = timespec64_to_ktime(*tsreq);
7615     /* Convert (if necessary) to absolute time */
7616     diff --git a/lib/test_bpf.c b/lib/test_bpf.c
7617     index 6fbb73f3f531..64701b4c9900 100644
7618     --- a/lib/test_bpf.c
7619     +++ b/lib/test_bpf.c
7620     @@ -83,6 +83,7 @@ struct bpf_test {
7621     __u32 result;
7622     } test[MAX_SUBTESTS];
7623     int (*fill_helper)(struct bpf_test *self);
7624     + int expected_errcode; /* used when FLAG_EXPECTED_FAIL is set in the aux */
7625     __u8 frag_data[MAX_DATA];
7626     int stack_depth; /* for eBPF only, since tests don't call verifier */
7627     };
7628     @@ -1987,7 +1988,9 @@ static struct bpf_test tests[] = {
7629     },
7630     CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
7631     { },
7632     - { }
7633     + { },
7634     + .fill_helper = NULL,
7635     + .expected_errcode = -EINVAL,
7636     },
7637     {
7638     "check: div_k_0",
7639     @@ -1997,7 +2000,9 @@ static struct bpf_test tests[] = {
7640     },
7641     CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
7642     { },
7643     - { }
7644     + { },
7645     + .fill_helper = NULL,
7646     + .expected_errcode = -EINVAL,
7647     },
7648     {
7649     "check: unknown insn",
7650     @@ -2008,7 +2013,9 @@ static struct bpf_test tests[] = {
7651     },
7652     CLASSIC | FLAG_EXPECTED_FAIL,
7653     { },
7654     - { }
7655     + { },
7656     + .fill_helper = NULL,
7657     + .expected_errcode = -EINVAL,
7658     },
7659     {
7660     "check: out of range spill/fill",
7661     @@ -2018,7 +2025,9 @@ static struct bpf_test tests[] = {
7662     },
7663     CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
7664     { },
7665     - { }
7666     + { },
7667     + .fill_helper = NULL,
7668     + .expected_errcode = -EINVAL,
7669     },
7670     {
7671     "JUMPS + HOLES",
7672     @@ -2110,6 +2119,8 @@ static struct bpf_test tests[] = {
7673     CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
7674     { },
7675     { },
7676     + .fill_helper = NULL,
7677     + .expected_errcode = -EINVAL,
7678     },
7679     {
7680     "check: LDX + RET X",
7681     @@ -2120,6 +2131,8 @@ static struct bpf_test tests[] = {
7682     CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
7683     { },
7684     { },
7685     + .fill_helper = NULL,
7686     + .expected_errcode = -EINVAL,
7687     },
7688     { /* Mainly checking JIT here. */
7689     "M[]: alt STX + LDX",
7690     @@ -2294,6 +2307,8 @@ static struct bpf_test tests[] = {
7691     CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
7692     { },
7693     { },
7694     + .fill_helper = NULL,
7695     + .expected_errcode = -EINVAL,
7696     },
7697     { /* Passes checker but fails during runtime. */
7698     "LD [SKF_AD_OFF-1]",
7699     @@ -5356,6 +5371,7 @@ static struct bpf_test tests[] = {
7700     { },
7701     { },
7702     .fill_helper = bpf_fill_maxinsns4,
7703     + .expected_errcode = -EINVAL,
7704     },
7705     { /* Mainly checking JIT here. */
7706     "BPF_MAXINSNS: Very long jump",
7707     @@ -5411,10 +5427,15 @@ static struct bpf_test tests[] = {
7708     {
7709     "BPF_MAXINSNS: Jump, gap, jump, ...",
7710     { },
7711     +#ifdef CONFIG_BPF_JIT_ALWAYS_ON
7712     + CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
7713     +#else
7714     CLASSIC | FLAG_NO_DATA,
7715     +#endif
7716     { },
7717     { { 0, 0xababcbac } },
7718     .fill_helper = bpf_fill_maxinsns11,
7719     + .expected_errcode = -ENOTSUPP,
7720     },
7721     {
7722     "BPF_MAXINSNS: ld_abs+get_processor_id",
7723     @@ -6193,7 +6214,7 @@ static struct bpf_prog *generate_filter(int which, int *err)
7724    
7725     *err = bpf_prog_create(&fp, &fprog);
7726     if (tests[which].aux & FLAG_EXPECTED_FAIL) {
7727     - if (*err == -EINVAL) {
7728     + if (*err == tests[which].expected_errcode) {
7729     pr_cont("PASS\n");
7730     /* Verifier rejected filter as expected. */
7731     *err = 0;
7732     diff --git a/mm/fadvise.c b/mm/fadvise.c
7733     index ec70d6e4b86d..767887f5f3bf 100644
7734     --- a/mm/fadvise.c
7735     +++ b/mm/fadvise.c
7736     @@ -127,7 +127,15 @@ SYSCALL_DEFINE4(fadvise64_64, int, fd, loff_t, offset, loff_t, len, int, advice)
7737     */
7738     start_index = (offset+(PAGE_SIZE-1)) >> PAGE_SHIFT;
7739     end_index = (endbyte >> PAGE_SHIFT);
7740     - if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK) {
7741     + /*
7742     + * The page at end_index will be inclusively discarded according
7743     + * by invalidate_mapping_pages(), so subtracting 1 from
7744     + * end_index means we will skip the last page. But if endbyte
7745     + * is page aligned or is at the end of file, we should not skip
7746     + * that page - discarding the last page is safe enough.
7747     + */
7748     + if ((endbyte & ~PAGE_MASK) != ~PAGE_MASK &&
7749     + endbyte != inode->i_size - 1) {
7750     /* First page is tricky as 0 - 1 = -1, but pgoff_t
7751     * is unsigned, so the end_index >= start_index
7752     * check below would be true and we'll discard the whole
7753     diff --git a/mm/khugepaged.c b/mm/khugepaged.c
7754     index 2fe26634e1a2..29221602d802 100644
7755     --- a/mm/khugepaged.c
7756     +++ b/mm/khugepaged.c
7757     @@ -1679,10 +1679,14 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
7758     spin_unlock(&khugepaged_mm_lock);
7759    
7760     mm = mm_slot->mm;
7761     - down_read(&mm->mmap_sem);
7762     - if (unlikely(khugepaged_test_exit(mm)))
7763     - vma = NULL;
7764     - else
7765     + /*
7766     + * Don't wait for semaphore (to avoid long wait times). Just move to
7767     + * the next mm on the list.
7768     + */
7769     + vma = NULL;
7770     + if (unlikely(!down_read_trylock(&mm->mmap_sem)))
7771     + goto breakouterloop_mmap_sem;
7772     + if (likely(!khugepaged_test_exit(mm)))
7773     vma = find_vma(mm, khugepaged_scan.address);
7774    
7775     progress++;
7776     diff --git a/mm/mempolicy.c b/mm/mempolicy.c
7777     index a2af6d58a68f..2d3077ce50cd 100644
7778     --- a/mm/mempolicy.c
7779     +++ b/mm/mempolicy.c
7780     @@ -1262,6 +1262,7 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
7781     unsigned long maxnode)
7782     {
7783     unsigned long k;
7784     + unsigned long t;
7785     unsigned long nlongs;
7786     unsigned long endmask;
7787    
7788     @@ -1278,13 +1279,19 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
7789     else
7790     endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
7791    
7792     - /* When the user specified more nodes than supported just check
7793     - if the non supported part is all zero. */
7794     + /*
7795     + * When the user specified more nodes than supported just check
7796     + * if the non supported part is all zero.
7797     + *
7798     + * If maxnode have more longs than MAX_NUMNODES, check
7799     + * the bits in that area first. And then go through to
7800     + * check the rest bits which equal or bigger than MAX_NUMNODES.
7801     + * Otherwise, just check bits [MAX_NUMNODES, maxnode).
7802     + */
7803     if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
7804     if (nlongs > PAGE_SIZE/sizeof(long))
7805     return -EINVAL;
7806     for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
7807     - unsigned long t;
7808     if (get_user(t, nmask + k))
7809     return -EFAULT;
7810     if (k == nlongs - 1) {
7811     @@ -1297,6 +1304,16 @@ static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
7812     endmask = ~0UL;
7813     }
7814    
7815     + if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
7816     + unsigned long valid_mask = endmask;
7817     +
7818     + valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
7819     + if (get_user(t, nmask + nlongs - 1))
7820     + return -EFAULT;
7821     + if (t & valid_mask)
7822     + return -EINVAL;
7823     + }
7824     +
7825     if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
7826     return -EFAULT;
7827     nodes_addr(*nodes)[nlongs-1] &= endmask;
7828     @@ -1423,10 +1440,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
7829     goto out_put;
7830     }
7831    
7832     - if (!nodes_subset(*new, node_states[N_MEMORY])) {
7833     - err = -EINVAL;
7834     + task_nodes = cpuset_mems_allowed(current);
7835     + nodes_and(*new, *new, task_nodes);
7836     + if (nodes_empty(*new))
7837     + goto out_put;
7838     +
7839     + nodes_and(*new, *new, node_states[N_MEMORY]);
7840     + if (nodes_empty(*new))
7841     goto out_put;
7842     - }
7843    
7844     err = security_task_movememory(task);
7845     if (err)
7846     diff --git a/mm/vmscan.c b/mm/vmscan.c
7847     index a8a3729bfaa9..b3f5e337b64a 100644
7848     --- a/mm/vmscan.c
7849     +++ b/mm/vmscan.c
7850     @@ -1436,14 +1436,24 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
7851    
7852     if (PageDirty(page)) {
7853     struct address_space *mapping;
7854     + bool migrate_dirty;
7855    
7856     /*
7857     * Only pages without mappings or that have a
7858     * ->migratepage callback are possible to migrate
7859     - * without blocking
7860     + * without blocking. However, we can be racing with
7861     + * truncation so it's necessary to lock the page
7862     + * to stabilise the mapping as truncation holds
7863     + * the page lock until after the page is removed
7864     + * from the page cache.
7865     */
7866     + if (!trylock_page(page))
7867     + return ret;
7868     +
7869     mapping = page_mapping(page);
7870     - if (mapping && !mapping->a_ops->migratepage)
7871     + migrate_dirty = mapping && mapping->a_ops->migratepage;
7872     + unlock_page(page);
7873     + if (!migrate_dirty)
7874     return ret;
7875     }
7876     }
7877     diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
7878     index 16eb99458df4..014a73b46064 100644
7879     --- a/net/bridge/netfilter/ebtables.c
7880     +++ b/net/bridge/netfilter/ebtables.c
7881     @@ -1819,10 +1819,14 @@ static int compat_table_info(const struct ebt_table_info *info,
7882     {
7883     unsigned int size = info->entries_size;
7884     const void *entries = info->entries;
7885     + int ret;
7886    
7887     newinfo->entries_size = size;
7888    
7889     - xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
7890     + ret = xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
7891     + if (ret)
7892     + return ret;
7893     +
7894     return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
7895     entries, newinfo);
7896     }
7897     @@ -2257,7 +2261,9 @@ static int compat_do_replace(struct net *net, void __user *user,
7898    
7899     xt_compat_lock(NFPROTO_BRIDGE);
7900    
7901     - xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
7902     + ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
7903     + if (ret < 0)
7904     + goto out_unlock;
7905     ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
7906     if (ret < 0)
7907     goto out_unlock;
7908     diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
7909     index aa4c3b7f7da4..356ae7da4f16 100644
7910     --- a/net/ipv4/netfilter/arp_tables.c
7911     +++ b/net/ipv4/netfilter/arp_tables.c
7912     @@ -768,7 +768,9 @@ static int compat_table_info(const struct xt_table_info *info,
7913     memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
7914     newinfo->initial_entries = 0;
7915     loc_cpu_entry = info->entries;
7916     - xt_compat_init_offsets(NFPROTO_ARP, info->number);
7917     + ret = xt_compat_init_offsets(NFPROTO_ARP, info->number);
7918     + if (ret)
7919     + return ret;
7920     xt_entry_foreach(iter, loc_cpu_entry, info->size) {
7921     ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
7922     if (ret != 0)
7923     @@ -883,7 +885,7 @@ static int __do_replace(struct net *net, const char *name,
7924     struct arpt_entry *iter;
7925    
7926     ret = 0;
7927     - counters = vzalloc(num_counters * sizeof(struct xt_counters));
7928     + counters = xt_counters_alloc(num_counters);
7929     if (!counters) {
7930     ret = -ENOMEM;
7931     goto out;
7932     @@ -1157,7 +1159,7 @@ static int translate_compat_table(struct xt_table_info **pinfo,
7933     struct compat_arpt_entry *iter0;
7934     struct arpt_replace repl;
7935     unsigned int size;
7936     - int ret = 0;
7937     + int ret;
7938    
7939     info = *pinfo;
7940     entry0 = *pentry0;
7941     @@ -1166,7 +1168,9 @@ static int translate_compat_table(struct xt_table_info **pinfo,
7942    
7943     j = 0;
7944     xt_compat_lock(NFPROTO_ARP);
7945     - xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
7946     + ret = xt_compat_init_offsets(NFPROTO_ARP, compatr->num_entries);
7947     + if (ret)
7948     + goto out_unlock;
7949     /* Walk through entries, checking offsets. */
7950     xt_entry_foreach(iter0, entry0, compatr->size) {
7951     ret = check_compat_entry_size_and_hooks(iter0, info, &size,
7952     diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
7953     index cadb82a906b8..1a925f2394ad 100644
7954     --- a/net/ipv4/netfilter/ip_tables.c
7955     +++ b/net/ipv4/netfilter/ip_tables.c
7956     @@ -931,7 +931,9 @@ static int compat_table_info(const struct xt_table_info *info,
7957     memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
7958     newinfo->initial_entries = 0;
7959     loc_cpu_entry = info->entries;
7960     - xt_compat_init_offsets(AF_INET, info->number);
7961     + ret = xt_compat_init_offsets(AF_INET, info->number);
7962     + if (ret)
7963     + return ret;
7964     xt_entry_foreach(iter, loc_cpu_entry, info->size) {
7965     ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
7966     if (ret != 0)
7967     @@ -1044,7 +1046,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
7968     struct ipt_entry *iter;
7969    
7970     ret = 0;
7971     - counters = vzalloc(num_counters * sizeof(struct xt_counters));
7972     + counters = xt_counters_alloc(num_counters);
7973     if (!counters) {
7974     ret = -ENOMEM;
7975     goto out;
7976     @@ -1407,7 +1409,9 @@ translate_compat_table(struct net *net,
7977    
7978     j = 0;
7979     xt_compat_lock(AF_INET);
7980     - xt_compat_init_offsets(AF_INET, compatr->num_entries);
7981     + ret = xt_compat_init_offsets(AF_INET, compatr->num_entries);
7982     + if (ret)
7983     + goto out_unlock;
7984     /* Walk through entries, checking offsets. */
7985     xt_entry_foreach(iter0, entry0, compatr->size) {
7986     ret = check_compat_entry_size_and_hooks(iter0, info, &size,
7987     diff --git a/net/ipv4/tcp_nv.c b/net/ipv4/tcp_nv.c
7988     index 125fc1450b01..09f8773fd769 100644
7989     --- a/net/ipv4/tcp_nv.c
7990     +++ b/net/ipv4/tcp_nv.c
7991     @@ -327,7 +327,7 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
7992     */
7993     cwnd_by_slope = (u32)
7994     div64_u64(((u64)ca->nv_rtt_max_rate) * ca->nv_min_rtt,
7995     - (u64)(80000 * tp->mss_cache));
7996     + 80000ULL * tp->mss_cache);
7997     max_win = cwnd_by_slope + nv_pad;
7998    
7999     /* If cwnd > max_win, decrease cwnd
8000     diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
8001     index a0a31972fc75..c5fe42e6b7f7 100644
8002     --- a/net/ipv6/netfilter/ip6_tables.c
8003     +++ b/net/ipv6/netfilter/ip6_tables.c
8004     @@ -949,7 +949,9 @@ static int compat_table_info(const struct xt_table_info *info,
8005     memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
8006     newinfo->initial_entries = 0;
8007     loc_cpu_entry = info->entries;
8008     - xt_compat_init_offsets(AF_INET6, info->number);
8009     + ret = xt_compat_init_offsets(AF_INET6, info->number);
8010     + if (ret)
8011     + return ret;
8012     xt_entry_foreach(iter, loc_cpu_entry, info->size) {
8013     ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
8014     if (ret != 0)
8015     @@ -1063,7 +1065,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
8016     struct ip6t_entry *iter;
8017    
8018     ret = 0;
8019     - counters = vzalloc(num_counters * sizeof(struct xt_counters));
8020     + counters = xt_counters_alloc(num_counters);
8021     if (!counters) {
8022     ret = -ENOMEM;
8023     goto out;
8024     @@ -1415,7 +1417,7 @@ translate_compat_table(struct net *net,
8025     struct compat_ip6t_entry *iter0;
8026     struct ip6t_replace repl;
8027     unsigned int size;
8028     - int ret = 0;
8029     + int ret;
8030    
8031     info = *pinfo;
8032     entry0 = *pentry0;
8033     @@ -1424,7 +1426,9 @@ translate_compat_table(struct net *net,
8034    
8035     j = 0;
8036     xt_compat_lock(AF_INET6);
8037     - xt_compat_init_offsets(AF_INET6, compatr->num_entries);
8038     + ret = xt_compat_init_offsets(AF_INET6, compatr->num_entries);
8039     + if (ret)
8040     + goto out_unlock;
8041     /* Walk through entries, checking offsets. */
8042     xt_entry_foreach(iter0, entry0, compatr->size) {
8043     ret = check_compat_entry_size_and_hooks(iter0, info, &size,
8044     diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
8045     index b263bf3a19f7..64ec23388450 100644
8046     --- a/net/ipv6/netfilter/nf_conntrack_reasm.c
8047     +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
8048     @@ -230,7 +230,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
8049    
8050     if ((unsigned int)end > IPV6_MAXPLEN) {
8051     pr_debug("offset is too large.\n");
8052     - return -1;
8053     + return -EINVAL;
8054     }
8055    
8056     ecn = ip6_frag_ecn(ipv6_hdr(skb));
8057     @@ -263,7 +263,8 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
8058     * this case. -DaveM
8059     */
8060     pr_debug("end of fragment not rounded to 8 bytes.\n");
8061     - return -1;
8062     + inet_frag_kill(&fq->q, &nf_frags);
8063     + return -EPROTO;
8064     }
8065     if (end > fq->q.len) {
8066     /* Some bits beyond end -> corruption. */
8067     @@ -357,7 +358,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
8068     discard_fq:
8069     inet_frag_kill(&fq->q, &nf_frags);
8070     err:
8071     - return -1;
8072     + return -EINVAL;
8073     }
8074    
8075     /*
8076     @@ -566,6 +567,7 @@ find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
8077    
8078     int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
8079     {
8080     + u16 savethdr = skb->transport_header;
8081     struct net_device *dev = skb->dev;
8082     int fhoff, nhoff, ret;
8083     struct frag_hdr *fhdr;
8084     @@ -599,8 +601,12 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
8085    
8086     spin_lock_bh(&fq->q.lock);
8087    
8088     - if (nf_ct_frag6_queue(fq, skb, fhdr, nhoff) < 0) {
8089     - ret = -EINVAL;
8090     + ret = nf_ct_frag6_queue(fq, skb, fhdr, nhoff);
8091     + if (ret < 0) {
8092     + if (ret == -EPROTO) {
8093     + skb->transport_header = savethdr;
8094     + ret = 0;
8095     + }
8096     goto out_unlock;
8097     }
8098    
8099     diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
8100     index a450a1c8804b..8e054c63b54e 100644
8101     --- a/net/netfilter/x_tables.c
8102     +++ b/net/netfilter/x_tables.c
8103     @@ -40,6 +40,7 @@ MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
8104     MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
8105    
8106     #define XT_PCPU_BLOCK_SIZE 4096
8107     +#define XT_MAX_TABLE_SIZE (512 * 1024 * 1024)
8108    
8109     struct compat_delta {
8110     unsigned int offset; /* offset in kernel */
8111     @@ -554,14 +555,8 @@ int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta)
8112     {
8113     struct xt_af *xp = &xt[af];
8114    
8115     - if (!xp->compat_tab) {
8116     - if (!xp->number)
8117     - return -EINVAL;
8118     - xp->compat_tab = vmalloc(sizeof(struct compat_delta) * xp->number);
8119     - if (!xp->compat_tab)
8120     - return -ENOMEM;
8121     - xp->cur = 0;
8122     - }
8123     + if (WARN_ON(!xp->compat_tab))
8124     + return -ENOMEM;
8125    
8126     if (xp->cur >= xp->number)
8127     return -EINVAL;
8128     @@ -604,10 +599,28 @@ int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
8129     }
8130     EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
8131    
8132     -void xt_compat_init_offsets(u_int8_t af, unsigned int number)
8133     +int xt_compat_init_offsets(u8 af, unsigned int number)
8134     {
8135     + size_t mem;
8136     +
8137     + if (!number || number > (INT_MAX / sizeof(struct compat_delta)))
8138     + return -EINVAL;
8139     +
8140     + if (WARN_ON(xt[af].compat_tab))
8141     + return -EINVAL;
8142     +
8143     + mem = sizeof(struct compat_delta) * number;
8144     + if (mem > XT_MAX_TABLE_SIZE)
8145     + return -ENOMEM;
8146     +
8147     + xt[af].compat_tab = vmalloc(mem);
8148     + if (!xt[af].compat_tab)
8149     + return -ENOMEM;
8150     +
8151     xt[af].number = number;
8152     xt[af].cur = 0;
8153     +
8154     + return 0;
8155     }
8156     EXPORT_SYMBOL(xt_compat_init_offsets);
8157    
8158     @@ -806,6 +819,9 @@ EXPORT_SYMBOL(xt_check_entry_offsets);
8159     */
8160     unsigned int *xt_alloc_entry_offsets(unsigned int size)
8161     {
8162     + if (size > XT_MAX_TABLE_SIZE / sizeof(unsigned int))
8163     + return NULL;
8164     +
8165     return kvmalloc_array(size, sizeof(unsigned int), GFP_KERNEL | __GFP_ZERO);
8166    
8167     }
8168     @@ -1031,7 +1047,7 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
8169     struct xt_table_info *info = NULL;
8170     size_t sz = sizeof(*info) + size;
8171    
8172     - if (sz < sizeof(*info))
8173     + if (sz < sizeof(*info) || sz >= XT_MAX_TABLE_SIZE)
8174     return NULL;
8175    
8176     /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
8177     @@ -1186,6 +1202,21 @@ static int xt_jumpstack_alloc(struct xt_table_info *i)
8178     return 0;
8179     }
8180    
8181     +struct xt_counters *xt_counters_alloc(unsigned int counters)
8182     +{
8183     + struct xt_counters *mem;
8184     +
8185     + if (counters == 0 || counters > INT_MAX / sizeof(*mem))
8186     + return NULL;
8187     +
8188     + counters *= sizeof(*mem);
8189     + if (counters > XT_MAX_TABLE_SIZE)
8190     + return NULL;
8191     +
8192     + return vzalloc(counters);
8193     +}
8194     +EXPORT_SYMBOL(xt_counters_alloc);
8195     +
8196     struct xt_table_info *
8197     xt_replace_table(struct xt_table *table,
8198     unsigned int num_counters,
8199     diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
8200     index bb5d6a058fb7..1141f08810b6 100644
8201     --- a/net/netfilter/xt_IDLETIMER.c
8202     +++ b/net/netfilter/xt_IDLETIMER.c
8203     @@ -256,6 +256,7 @@ static struct xt_target idletimer_tg __read_mostly = {
8204     .family = NFPROTO_UNSPEC,
8205     .target = idletimer_tg_target,
8206     .targetsize = sizeof(struct idletimer_tg_info),
8207     + .usersize = offsetof(struct idletimer_tg_info, timer),
8208     .checkentry = idletimer_tg_checkentry,
8209     .destroy = idletimer_tg_destroy,
8210     .me = THIS_MODULE,
8211     diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
8212     index 0858fe17e14a..2d1c5c169a26 100644
8213     --- a/net/netfilter/xt_LED.c
8214     +++ b/net/netfilter/xt_LED.c
8215     @@ -198,6 +198,7 @@ static struct xt_target led_tg_reg __read_mostly = {
8216     .family = NFPROTO_UNSPEC,
8217     .target = led_tg,
8218     .targetsize = sizeof(struct xt_led_info),
8219     + .usersize = offsetof(struct xt_led_info, internal_data),
8220     .checkentry = led_tg_check,
8221     .destroy = led_tg_destroy,
8222     .me = THIS_MODULE,
8223     diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
8224     index d27b5f1ea619..61403b77361c 100644
8225     --- a/net/netfilter/xt_limit.c
8226     +++ b/net/netfilter/xt_limit.c
8227     @@ -193,9 +193,8 @@ static struct xt_match limit_mt_reg __read_mostly = {
8228     .compatsize = sizeof(struct compat_xt_rateinfo),
8229     .compat_from_user = limit_mt_compat_from_user,
8230     .compat_to_user = limit_mt_compat_to_user,
8231     -#else
8232     - .usersize = offsetof(struct xt_rateinfo, prev),
8233     #endif
8234     + .usersize = offsetof(struct xt_rateinfo, prev),
8235     .me = THIS_MODULE,
8236     };
8237    
8238     diff --git a/net/netfilter/xt_nfacct.c b/net/netfilter/xt_nfacct.c
8239     index cc0518fe598e..6f92d25590a8 100644
8240     --- a/net/netfilter/xt_nfacct.c
8241     +++ b/net/netfilter/xt_nfacct.c
8242     @@ -62,6 +62,7 @@ static struct xt_match nfacct_mt_reg __read_mostly = {
8243     .match = nfacct_mt,
8244     .destroy = nfacct_mt_destroy,
8245     .matchsize = sizeof(struct xt_nfacct_match_info),
8246     + .usersize = offsetof(struct xt_nfacct_match_info, nfacct),
8247     .me = THIS_MODULE,
8248     };
8249    
8250     diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
8251     index 11de55e7a868..8710fdba2ae2 100644
8252     --- a/net/netfilter/xt_statistic.c
8253     +++ b/net/netfilter/xt_statistic.c
8254     @@ -84,6 +84,7 @@ static struct xt_match xt_statistic_mt_reg __read_mostly = {
8255     .checkentry = statistic_mt_check,
8256     .destroy = statistic_mt_destroy,
8257     .matchsize = sizeof(struct xt_statistic_info),
8258     + .usersize = offsetof(struct xt_statistic_info, master),
8259     .me = THIS_MODULE,
8260     };
8261    
8262     diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
8263     index d558e882ca0c..285f8797c26a 100644
8264     --- a/net/openvswitch/conntrack.c
8265     +++ b/net/openvswitch/conntrack.c
8266     @@ -1097,6 +1097,36 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key,
8267     return 0;
8268     }
8269    
8270     +/* Trim the skb to the length specified by the IP/IPv6 header,
8271     + * removing any trailing lower-layer padding. This prepares the skb
8272     + * for higher-layer processing that assumes skb->len excludes padding
8273     + * (such as nf_ip_checksum). The caller needs to pull the skb to the
8274     + * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
8275     + */
8276     +static int ovs_skb_network_trim(struct sk_buff *skb)
8277     +{
8278     + unsigned int len;
8279     + int err;
8280     +
8281     + switch (skb->protocol) {
8282     + case htons(ETH_P_IP):
8283     + len = ntohs(ip_hdr(skb)->tot_len);
8284     + break;
8285     + case htons(ETH_P_IPV6):
8286     + len = sizeof(struct ipv6hdr)
8287     + + ntohs(ipv6_hdr(skb)->payload_len);
8288     + break;
8289     + default:
8290     + len = skb->len;
8291     + }
8292     +
8293     + err = pskb_trim_rcsum(skb, len);
8294     + if (err)
8295     + kfree_skb(skb);
8296     +
8297     + return err;
8298     +}
8299     +
8300     /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
8301     * value if 'skb' is freed.
8302     */
8303     @@ -1111,6 +1141,10 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
8304     nh_ofs = skb_network_offset(skb);
8305     skb_pull_rcsum(skb, nh_ofs);
8306    
8307     + err = ovs_skb_network_trim(skb);
8308     + if (err)
8309     + return err;
8310     +
8311     if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
8312     err = handle_fragments(net, key, info->zone.id, skb);
8313     if (err)
8314     diff --git a/net/rds/ib.c b/net/rds/ib.c
8315     index a0954ace3774..c21eb4850b9d 100644
8316     --- a/net/rds/ib.c
8317     +++ b/net/rds/ib.c
8318     @@ -346,7 +346,8 @@ static int rds_ib_laddr_check(struct net *net, __be32 addr)
8319     /* Create a CMA ID and try to bind it. This catches both
8320     * IB and iWARP capable NICs.
8321     */
8322     - cm_id = rdma_create_id(&init_net, NULL, NULL, RDMA_PS_TCP, IB_QPT_RC);
8323     + cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler,
8324     + NULL, RDMA_PS_TCP, IB_QPT_RC);
8325     if (IS_ERR(cm_id))
8326     return PTR_ERR(cm_id);
8327    
8328     diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
8329     index 59a51a56e7c8..0435c4167a1a 100644
8330     --- a/net/rxrpc/conn_event.c
8331     +++ b/net/rxrpc/conn_event.c
8332     @@ -404,6 +404,7 @@ void rxrpc_process_connection(struct work_struct *work)
8333     case -EKEYEXPIRED:
8334     case -EKEYREJECTED:
8335     goto protocol_error;
8336     + case -ENOMEM:
8337     case -EAGAIN:
8338     goto requeue_and_leave;
8339     case -ECONNABORTED:
8340     diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
8341     index c38b3a1de56c..77cb23c7bd0a 100644
8342     --- a/net/rxrpc/rxkad.c
8343     +++ b/net/rxrpc/rxkad.c
8344     @@ -773,8 +773,7 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
8345     {
8346     const struct rxrpc_key_token *token;
8347     struct rxkad_challenge challenge;
8348     - struct rxkad_response resp
8349     - __attribute__((aligned(8))); /* must be aligned for crypto */
8350     + struct rxkad_response *resp;
8351     struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
8352     const char *eproto;
8353     u32 version, nonce, min_level, abort_code;
8354     @@ -818,26 +817,29 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
8355     token = conn->params.key->payload.data[0];
8356    
8357     /* build the response packet */
8358     - memset(&resp, 0, sizeof(resp));
8359     -
8360     - resp.version = htonl(RXKAD_VERSION);
8361     - resp.encrypted.epoch = htonl(conn->proto.epoch);
8362     - resp.encrypted.cid = htonl(conn->proto.cid);
8363     - resp.encrypted.securityIndex = htonl(conn->security_ix);
8364     - resp.encrypted.inc_nonce = htonl(nonce + 1);
8365     - resp.encrypted.level = htonl(conn->params.security_level);
8366     - resp.kvno = htonl(token->kad->kvno);
8367     - resp.ticket_len = htonl(token->kad->ticket_len);
8368     -
8369     - resp.encrypted.call_id[0] = htonl(conn->channels[0].call_counter);
8370     - resp.encrypted.call_id[1] = htonl(conn->channels[1].call_counter);
8371     - resp.encrypted.call_id[2] = htonl(conn->channels[2].call_counter);
8372     - resp.encrypted.call_id[3] = htonl(conn->channels[3].call_counter);
8373     + resp = kzalloc(sizeof(struct rxkad_response), GFP_NOFS);
8374     + if (!resp)
8375     + return -ENOMEM;
8376     +
8377     + resp->version = htonl(RXKAD_VERSION);
8378     + resp->encrypted.epoch = htonl(conn->proto.epoch);
8379     + resp->encrypted.cid = htonl(conn->proto.cid);
8380     + resp->encrypted.securityIndex = htonl(conn->security_ix);
8381     + resp->encrypted.inc_nonce = htonl(nonce + 1);
8382     + resp->encrypted.level = htonl(conn->params.security_level);
8383     + resp->kvno = htonl(token->kad->kvno);
8384     + resp->ticket_len = htonl(token->kad->ticket_len);
8385     + resp->encrypted.call_id[0] = htonl(conn->channels[0].call_counter);
8386     + resp->encrypted.call_id[1] = htonl(conn->channels[1].call_counter);
8387     + resp->encrypted.call_id[2] = htonl(conn->channels[2].call_counter);
8388     + resp->encrypted.call_id[3] = htonl(conn->channels[3].call_counter);
8389    
8390     /* calculate the response checksum and then do the encryption */
8391     - rxkad_calc_response_checksum(&resp);
8392     - rxkad_encrypt_response(conn, &resp, token->kad);
8393     - return rxkad_send_response(conn, &sp->hdr, &resp, token->kad);
8394     + rxkad_calc_response_checksum(resp);
8395     + rxkad_encrypt_response(conn, resp, token->kad);
8396     + ret = rxkad_send_response(conn, &sp->hdr, resp, token->kad);
8397     + kfree(resp);
8398     + return ret;
8399    
8400     protocol_error:
8401     trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto);
8402     @@ -1048,8 +1050,7 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
8403     struct sk_buff *skb,
8404     u32 *_abort_code)
8405     {
8406     - struct rxkad_response response
8407     - __attribute__((aligned(8))); /* must be aligned for crypto */
8408     + struct rxkad_response *response;
8409     struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
8410     struct rxrpc_crypt session_key;
8411     const char *eproto;
8412     @@ -1061,17 +1062,22 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
8413    
8414     _enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key));
8415    
8416     + ret = -ENOMEM;
8417     + response = kzalloc(sizeof(struct rxkad_response), GFP_NOFS);
8418     + if (!response)
8419     + goto temporary_error;
8420     +
8421     eproto = tracepoint_string("rxkad_rsp_short");
8422     abort_code = RXKADPACKETSHORT;
8423     if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
8424     - &response, sizeof(response)) < 0)
8425     + response, sizeof(*response)) < 0)
8426     goto protocol_error;
8427     - if (!pskb_pull(skb, sizeof(response)))
8428     + if (!pskb_pull(skb, sizeof(*response)))
8429     BUG();
8430    
8431     - version = ntohl(response.version);
8432     - ticket_len = ntohl(response.ticket_len);
8433     - kvno = ntohl(response.kvno);
8434     + version = ntohl(response->version);
8435     + ticket_len = ntohl(response->ticket_len);
8436     + kvno = ntohl(response->kvno);
8437     _proto("Rx RESPONSE %%%u { v=%u kv=%u tl=%u }",
8438     sp->hdr.serial, version, kvno, ticket_len);
8439    
8440     @@ -1105,31 +1111,31 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
8441     ret = rxkad_decrypt_ticket(conn, skb, ticket, ticket_len, &session_key,
8442     &expiry, _abort_code);
8443     if (ret < 0)
8444     - goto temporary_error_free;
8445     + goto temporary_error_free_resp;
8446    
8447     /* use the session key from inside the ticket to decrypt the
8448     * response */
8449     - rxkad_decrypt_response(conn, &response, &session_key);
8450     + rxkad_decrypt_response(conn, response, &session_key);
8451    
8452     eproto = tracepoint_string("rxkad_rsp_param");
8453     abort_code = RXKADSEALEDINCON;
8454     - if (ntohl(response.encrypted.epoch) != conn->proto.epoch)
8455     + if (ntohl(response->encrypted.epoch) != conn->proto.epoch)
8456     goto protocol_error_free;
8457     - if (ntohl(response.encrypted.cid) != conn->proto.cid)
8458     + if (ntohl(response->encrypted.cid) != conn->proto.cid)
8459     goto protocol_error_free;
8460     - if (ntohl(response.encrypted.securityIndex) != conn->security_ix)
8461     + if (ntohl(response->encrypted.securityIndex) != conn->security_ix)
8462     goto protocol_error_free;
8463     - csum = response.encrypted.checksum;
8464     - response.encrypted.checksum = 0;
8465     - rxkad_calc_response_checksum(&response);
8466     + csum = response->encrypted.checksum;
8467     + response->encrypted.checksum = 0;
8468     + rxkad_calc_response_checksum(response);
8469     eproto = tracepoint_string("rxkad_rsp_csum");
8470     - if (response.encrypted.checksum != csum)
8471     + if (response->encrypted.checksum != csum)
8472     goto protocol_error_free;
8473    
8474     spin_lock(&conn->channel_lock);
8475     for (i = 0; i < RXRPC_MAXCALLS; i++) {
8476     struct rxrpc_call *call;
8477     - u32 call_id = ntohl(response.encrypted.call_id[i]);
8478     + u32 call_id = ntohl(response->encrypted.call_id[i]);
8479    
8480     eproto = tracepoint_string("rxkad_rsp_callid");
8481     if (call_id > INT_MAX)
8482     @@ -1153,12 +1159,12 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
8483    
8484     eproto = tracepoint_string("rxkad_rsp_seq");
8485     abort_code = RXKADOUTOFSEQUENCE;
8486     - if (ntohl(response.encrypted.inc_nonce) != conn->security_nonce + 1)
8487     + if (ntohl(response->encrypted.inc_nonce) != conn->security_nonce + 1)
8488     goto protocol_error_free;
8489    
8490     eproto = tracepoint_string("rxkad_rsp_level");
8491     abort_code = RXKADLEVELFAIL;
8492     - level = ntohl(response.encrypted.level);
8493     + level = ntohl(response->encrypted.level);
8494     if (level > RXRPC_SECURITY_ENCRYPT)
8495     goto protocol_error_free;
8496     conn->params.security_level = level;
8497     @@ -1168,9 +1174,10 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
8498     * as for a client connection */
8499     ret = rxrpc_get_server_data_key(conn, &session_key, expiry, kvno);
8500     if (ret < 0)
8501     - goto temporary_error_free;
8502     + goto temporary_error_free_ticket;
8503    
8504     kfree(ticket);
8505     + kfree(response);
8506     _leave(" = 0");
8507     return 0;
8508    
8509     @@ -1179,12 +1186,15 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
8510     protocol_error_free:
8511     kfree(ticket);
8512     protocol_error:
8513     + kfree(response);
8514     trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto);
8515     *_abort_code = abort_code;
8516     return -EPROTO;
8517    
8518     -temporary_error_free:
8519     +temporary_error_free_ticket:
8520     kfree(ticket);
8521     +temporary_error_free_resp:
8522     + kfree(response);
8523     temporary_error:
8524     /* Ignore the response packet if we got a temporary error such as
8525     * ENOMEM. We just want to send the challenge again. Note that we
8526     diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
8527     index 823a781ec89c..25e3602aa41f 100644
8528     --- a/net/sunrpc/xprtrdma/backchannel.c
8529     +++ b/net/sunrpc/xprtrdma/backchannel.c
8530     @@ -74,21 +74,13 @@ static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
8531     static int rpcrdma_bc_setup_reps(struct rpcrdma_xprt *r_xprt,
8532     unsigned int count)
8533     {
8534     - struct rpcrdma_rep *rep;
8535     int rc = 0;
8536    
8537     while (count--) {
8538     - rep = rpcrdma_create_rep(r_xprt);
8539     - if (IS_ERR(rep)) {
8540     - pr_err("RPC: %s: reply buffer alloc failed\n",
8541     - __func__);
8542     - rc = PTR_ERR(rep);
8543     + rc = rpcrdma_create_rep(r_xprt);
8544     + if (rc)
8545     break;
8546     - }
8547     -
8548     - rpcrdma_recv_buffer_put(rep);
8549     }
8550     -
8551     return rc;
8552     }
8553    
8554     diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c
8555     index 9bd04549a1ad..12b9a7e0b6d2 100644
8556     --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c
8557     +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c
8558     @@ -727,12 +727,16 @@ static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp,
8559     head->arg.head[0].iov_len - info->ri_position;
8560     head->arg.head[0].iov_len = info->ri_position;
8561    
8562     - /* Read chunk may need XDR roundup (see RFC 5666, s. 3.7).
8563     + /* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2).
8564     *
8565     - * NFSv2/3 write decoders need the length of the tail to
8566     - * contain the size of the roundup padding.
8567     + * If the client already rounded up the chunk length, the
8568     + * length does not change. Otherwise, the length of the page
8569     + * list is increased to include XDR round-up.
8570     + *
8571     + * Currently these chunks always start at page offset 0,
8572     + * thus the rounded-up length never crosses a page boundary.
8573     */
8574     - head->arg.tail[0].iov_len += 4 - (info->ri_chunklen & 3);
8575     + info->ri_chunklen = XDR_QUADLEN(info->ri_chunklen) << 2;
8576    
8577     head->arg.page_len = info->ri_chunklen;
8578     head->arg.len += info->ri_chunklen;
8579     diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
8580     index 9e8e1de19b2e..97b9d4f671ac 100644
8581     --- a/net/sunrpc/xprtrdma/verbs.c
8582     +++ b/net/sunrpc/xprtrdma/verbs.c
8583     @@ -951,10 +951,17 @@ rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
8584     return req;
8585     }
8586    
8587     -struct rpcrdma_rep *
8588     +/**
8589     + * rpcrdma_create_rep - Allocate an rpcrdma_rep object
8590     + * @r_xprt: controlling transport
8591     + *
8592     + * Returns 0 on success or a negative errno on failure.
8593     + */
8594     +int
8595     rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
8596     {
8597     struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
8598     + struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
8599     struct rpcrdma_rep *rep;
8600     int rc;
8601    
8602     @@ -979,12 +986,18 @@ rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
8603     rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
8604     rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
8605     rep->rr_recv_wr.num_sge = 1;
8606     - return rep;
8607     +
8608     + spin_lock(&buf->rb_lock);
8609     + list_add(&rep->rr_list, &buf->rb_recv_bufs);
8610     + spin_unlock(&buf->rb_lock);
8611     + return 0;
8612    
8613     out_free:
8614     kfree(rep);
8615     out:
8616     - return ERR_PTR(rc);
8617     + dprintk("RPC: %s: reply buffer %d alloc failed\n",
8618     + __func__, rc);
8619     + return rc;
8620     }
8621    
8622     int
8623     @@ -1027,17 +1040,10 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
8624     }
8625    
8626     INIT_LIST_HEAD(&buf->rb_recv_bufs);
8627     - for (i = 0; i < buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS; i++) {
8628     - struct rpcrdma_rep *rep;
8629     -
8630     - rep = rpcrdma_create_rep(r_xprt);
8631     - if (IS_ERR(rep)) {
8632     - dprintk("RPC: %s: reply buffer %d alloc failed\n",
8633     - __func__, i);
8634     - rc = PTR_ERR(rep);
8635     + for (i = 0; i <= buf->rb_max_requests; i++) {
8636     + rc = rpcrdma_create_rep(r_xprt);
8637     + if (rc)
8638     goto out;
8639     - }
8640     - list_add(&rep->rr_list, &buf->rb_recv_bufs);
8641     }
8642    
8643     return 0;
8644     diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
8645     index e26a97d2f922..fcb0b3227ee1 100644
8646     --- a/net/sunrpc/xprtrdma/xprt_rdma.h
8647     +++ b/net/sunrpc/xprtrdma/xprt_rdma.h
8648     @@ -550,8 +550,8 @@ int rpcrdma_ep_post_recv(struct rpcrdma_ia *, struct rpcrdma_rep *);
8649     * Buffer calls - xprtrdma/verbs.c
8650     */
8651     struct rpcrdma_req *rpcrdma_create_req(struct rpcrdma_xprt *);
8652     -struct rpcrdma_rep *rpcrdma_create_rep(struct rpcrdma_xprt *);
8653     void rpcrdma_destroy_req(struct rpcrdma_req *);
8654     +int rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt);
8655     int rpcrdma_buffer_create(struct rpcrdma_xprt *);
8656     void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
8657    
8658     diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
8659     index 8cb40f8ffa5b..30192abfdc3b 100644
8660     --- a/net/sunrpc/xprtsock.c
8661     +++ b/net/sunrpc/xprtsock.c
8662     @@ -1069,18 +1069,18 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
8663    
8664     /* Suck it into the iovec, verify checksum if not done by hw. */
8665     if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
8666     - __UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
8667     spin_lock(&xprt->recv_lock);
8668     + __UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
8669     goto out_unpin;
8670     }
8671    
8672     - __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
8673    
8674     spin_lock_bh(&xprt->transport_lock);
8675     xprt_adjust_cwnd(xprt, task, copied);
8676     spin_unlock_bh(&xprt->transport_lock);
8677     spin_lock(&xprt->recv_lock);
8678     xprt_complete_rqst(task, copied);
8679     + __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
8680     out_unpin:
8681     xprt_unpin_rqst(rovr);
8682     out_unlock:
8683     diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
8684     index 9b4a66e3363e..c1dc632d4ea4 100644
8685     --- a/samples/bpf/Makefile
8686     +++ b/samples/bpf/Makefile
8687     @@ -179,13 +179,16 @@ LLC ?= llc
8688     CLANG ?= clang
8689    
8690     # Trick to allow make to be run from this directory
8691     -all:
8692     +all: $(LIBBPF)
8693     $(MAKE) -C ../../ $(CURDIR)/
8694    
8695     clean:
8696     $(MAKE) -C ../../ M=$(CURDIR) clean
8697     @rm -f *~
8698    
8699     +$(LIBBPF): FORCE
8700     + $(MAKE) -C $(dir $@) $(notdir $@)
8701     +
8702     $(obj)/syscall_nrs.s: $(src)/syscall_nrs.c
8703     $(call if_changed_dep,cc_s_c)
8704    
8705     diff --git a/scripts/kconfig/expr.c b/scripts/kconfig/expr.c
8706     index cbf4996dd9c1..ed29bad1f03a 100644
8707     --- a/scripts/kconfig/expr.c
8708     +++ b/scripts/kconfig/expr.c
8709     @@ -113,7 +113,7 @@ void expr_free(struct expr *e)
8710     break;
8711     case E_NOT:
8712     expr_free(e->left.expr);
8713     - return;
8714     + break;
8715     case E_EQUAL:
8716     case E_GEQ:
8717     case E_GTH:
8718     diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
8719     index e9357931b47d..749c2bd5fc51 100644
8720     --- a/scripts/kconfig/menu.c
8721     +++ b/scripts/kconfig/menu.c
8722     @@ -372,6 +372,7 @@ void menu_finalize(struct menu *parent)
8723     menu->parent = parent;
8724     last_menu = menu;
8725     }
8726     + expr_free(basedep);
8727     if (last_menu) {
8728     parent->list = parent->next;
8729     parent->next = last_menu->next;
8730     diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
8731     index c8f396c3b190..20d9caa4be99 100644
8732     --- a/scripts/kconfig/zconf.y
8733     +++ b/scripts/kconfig/zconf.y
8734     @@ -108,7 +108,27 @@ static struct menu *current_menu, *current_entry;
8735     %%
8736     input: nl start | start;
8737    
8738     -start: mainmenu_stmt stmt_list | stmt_list;
8739     +start: mainmenu_stmt stmt_list | no_mainmenu_stmt stmt_list;
8740     +
8741     +/* mainmenu entry */
8742     +
8743     +mainmenu_stmt: T_MAINMENU prompt nl
8744     +{
8745     + menu_add_prompt(P_MENU, $2, NULL);
8746     +};
8747     +
8748     +/* Default main menu, if there's no mainmenu entry */
8749     +
8750     +no_mainmenu_stmt: /* empty */
8751     +{
8752     + /*
8753     + * Hack: Keep the main menu title on the heap so we can safely free it
8754     + * later regardless of whether it comes from the 'prompt' in
8755     + * mainmenu_stmt or here
8756     + */
8757     + menu_add_prompt(P_MENU, strdup("Linux Kernel Configuration"), NULL);
8758     +};
8759     +
8760    
8761     stmt_list:
8762     /* empty */
8763     @@ -351,13 +371,6 @@ if_block:
8764     | if_block choice_stmt
8765     ;
8766    
8767     -/* mainmenu entry */
8768     -
8769     -mainmenu_stmt: T_MAINMENU prompt nl
8770     -{
8771     - menu_add_prompt(P_MENU, $2, NULL);
8772     -};
8773     -
8774     /* menu entry */
8775    
8776     menu: T_MENU prompt T_EOL
8777     @@ -502,6 +515,7 @@ word_opt: /* empty */ { $$ = NULL; }
8778    
8779     void conf_parse(const char *name)
8780     {
8781     + const char *tmp;
8782     struct symbol *sym;
8783     int i;
8784    
8785     @@ -509,7 +523,6 @@ void conf_parse(const char *name)
8786    
8787     sym_init();
8788     _menu_init();
8789     - rootmenu.prompt = menu_add_prompt(P_MENU, "Linux Kernel Configuration", NULL);
8790    
8791     if (getenv("ZCONF_DEBUG"))
8792     zconfdebug = 1;
8793     @@ -519,8 +532,10 @@ void conf_parse(const char *name)
8794     if (!modules_sym)
8795     modules_sym = sym_find( "n" );
8796    
8797     + tmp = rootmenu.prompt->text;
8798     rootmenu.prompt->text = _(rootmenu.prompt->text);
8799     rootmenu.prompt->text = sym_expand_string_value(rootmenu.prompt->text);
8800     + free((char*)tmp);
8801    
8802     menu_finalize(&rootmenu);
8803     for_all_symbols(i, sym) {
8804     diff --git a/sound/pci/hda/Kconfig b/sound/pci/hda/Kconfig
8805     index 7f3b5ed81995..f7a492c382d9 100644
8806     --- a/sound/pci/hda/Kconfig
8807     +++ b/sound/pci/hda/Kconfig
8808     @@ -88,7 +88,6 @@ config SND_HDA_PATCH_LOADER
8809     config SND_HDA_CODEC_REALTEK
8810     tristate "Build Realtek HD-audio codec support"
8811     select SND_HDA_GENERIC
8812     - select INPUT
8813     help
8814     Say Y or M here to include Realtek HD-audio codec support in
8815     snd-hda-intel driver, such as ALC880.
8816     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
8817     index b1b28c6928a7..590887d9b7a1 100644
8818     --- a/sound/pci/hda/patch_realtek.c
8819     +++ b/sound/pci/hda/patch_realtek.c
8820     @@ -3721,6 +3721,7 @@ static void alc280_fixup_hp_gpio4(struct hda_codec *codec,
8821     }
8822     }
8823    
8824     +#if IS_REACHABLE(INPUT)
8825     static void gpio2_mic_hotkey_event(struct hda_codec *codec,
8826     struct hda_jack_callback *event)
8827     {
8828     @@ -3853,6 +3854,10 @@ static void alc233_fixup_lenovo_line2_mic_hotkey(struct hda_codec *codec,
8829     spec->kb_dev = NULL;
8830     }
8831     }
8832     +#else /* INPUT */
8833     +#define alc280_fixup_hp_gpio2_mic_hotkey NULL
8834     +#define alc233_fixup_lenovo_line2_mic_hotkey NULL
8835     +#endif /* INPUT */
8836    
8837     static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
8838     const struct hda_fixup *fix, int action)
8839     diff --git a/sound/soc/au1x/ac97c.c b/sound/soc/au1x/ac97c.c
8840     index 29a97d52e8ad..66d6c52e7761 100644
8841     --- a/sound/soc/au1x/ac97c.c
8842     +++ b/sound/soc/au1x/ac97c.c
8843     @@ -91,8 +91,8 @@ static unsigned short au1xac97c_ac97_read(struct snd_ac97 *ac97,
8844     do {
8845     mutex_lock(&ctx->lock);
8846    
8847     - tmo = 5;
8848     - while ((RD(ctx, AC97_STATUS) & STAT_CP) && tmo--)
8849     + tmo = 6;
8850     + while ((RD(ctx, AC97_STATUS) & STAT_CP) && --tmo)
8851     udelay(21); /* wait an ac97 frame time */
8852     if (!tmo) {
8853     pr_debug("ac97rd timeout #1\n");
8854     @@ -105,7 +105,7 @@ static unsigned short au1xac97c_ac97_read(struct snd_ac97 *ac97,
8855     * poll, Forrest, poll...
8856     */
8857     tmo = 0x10000;
8858     - while ((RD(ctx, AC97_STATUS) & STAT_CP) && tmo--)
8859     + while ((RD(ctx, AC97_STATUS) & STAT_CP) && --tmo)
8860     asm volatile ("nop");
8861     data = RD(ctx, AC97_CMDRESP);
8862    
8863     diff --git a/sound/soc/rockchip/rk3399_gru_sound.c b/sound/soc/rockchip/rk3399_gru_sound.c
8864     index 0513fe480353..21ac8d6cce3a 100644
8865     --- a/sound/soc/rockchip/rk3399_gru_sound.c
8866     +++ b/sound/soc/rockchip/rk3399_gru_sound.c
8867     @@ -387,7 +387,8 @@ static const struct snd_soc_dai_link rockchip_dais[] = {
8868     [DAILINK_RT5514_DSP] = {
8869     .name = "RT5514 DSP",
8870     .stream_name = "Wake on Voice",
8871     - .codec_dai_name = "rt5514-dsp-cpu-dai",
8872     + .codec_name = "snd-soc-dummy",
8873     + .codec_dai_name = "snd-soc-dummy-dai",
8874     },
8875     };
8876    
8877     @@ -432,7 +433,18 @@ static int rockchip_sound_of_parse_dais(struct device *dev,
8878     if (index < 0)
8879     continue;
8880    
8881     - np_cpu = (index == DAILINK_CDNDP) ? np_cpu1 : np_cpu0;
8882     + switch (index) {
8883     + case DAILINK_CDNDP:
8884     + np_cpu = np_cpu1;
8885     + break;
8886     + case DAILINK_RT5514_DSP:
8887     + np_cpu = np_codec;
8888     + break;
8889     + default:
8890     + np_cpu = np_cpu0;
8891     + break;
8892     + }
8893     +
8894     if (!np_cpu) {
8895     dev_err(dev, "Missing 'rockchip,cpu' for %s\n",
8896     rockchip_dais[index].name);
8897     @@ -442,7 +454,8 @@ static int rockchip_sound_of_parse_dais(struct device *dev,
8898     dai = &card->dai_link[card->num_links++];
8899     *dai = rockchip_dais[index];
8900    
8901     - dai->codec_of_node = np_codec;
8902     + if (!dai->codec_name)
8903     + dai->codec_of_node = np_codec;
8904     dai->platform_of_node = np_cpu;
8905     dai->cpu_of_node = np_cpu;
8906     }
8907     diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
8908     index 4555304dc18e..f02448e86d38 100644
8909     --- a/tools/lib/bpf/Makefile
8910     +++ b/tools/lib/bpf/Makefile
8911     @@ -183,7 +183,7 @@ define do_install
8912     if [ ! -d '$(DESTDIR_SQ)$2' ]; then \
8913     $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$2'; \
8914     fi; \
8915     - $(INSTALL) $1 '$(DESTDIR_SQ)$2'
8916     + $(INSTALL) $1 $(if $3,-m $3,) '$(DESTDIR_SQ)$2'
8917     endef
8918    
8919     install_lib: all_cmd
8920     diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
8921     index 35f6dfcdc565..701d29c8364f 100644
8922     --- a/tools/lib/bpf/libbpf.c
8923     +++ b/tools/lib/bpf/libbpf.c
8924     @@ -661,6 +661,24 @@ bpf_object__init_maps(struct bpf_object *obj)
8925     return bpf_object__validate_maps(obj);
8926     }
8927    
8928     +static bool section_have_execinstr(struct bpf_object *obj, int idx)
8929     +{
8930     + Elf_Scn *scn;
8931     + GElf_Shdr sh;
8932     +
8933     + scn = elf_getscn(obj->efile.elf, idx);
8934     + if (!scn)
8935     + return false;
8936     +
8937     + if (gelf_getshdr(scn, &sh) != &sh)
8938     + return false;
8939     +
8940     + if (sh.sh_flags & SHF_EXECINSTR)
8941     + return true;
8942     +
8943     + return false;
8944     +}
8945     +
8946     static int bpf_object__elf_collect(struct bpf_object *obj)
8947     {
8948     Elf *elf = obj->efile.elf;
8949     @@ -742,6 +760,14 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
8950     } else if (sh.sh_type == SHT_REL) {
8951     void *reloc = obj->efile.reloc;
8952     int nr_reloc = obj->efile.nr_reloc + 1;
8953     + int sec = sh.sh_info; /* points to other section */
8954     +
8955     + /* Only do relo for section with exec instructions */
8956     + if (!section_have_execinstr(obj, sec)) {
8957     + pr_debug("skip relo %s(%d) for section(%d)\n",
8958     + name, idx, sec);
8959     + continue;
8960     + }
8961    
8962     reloc = realloc(reloc,
8963     sizeof(*obj->efile.reloc) * nr_reloc);
8964     diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
8965     index 7ce724fc0544..9a17bc27296e 100644
8966     --- a/tools/lib/traceevent/event-parse.c
8967     +++ b/tools/lib/traceevent/event-parse.c
8968     @@ -4949,21 +4949,22 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
8969     else
8970     ls = 2;
8971    
8972     - if (*(ptr+1) == 'F' || *(ptr+1) == 'f' ||
8973     - *(ptr+1) == 'S' || *(ptr+1) == 's') {
8974     + if (isalnum(ptr[1]))
8975     ptr++;
8976     +
8977     + if (*ptr == 'F' || *ptr == 'f' ||
8978     + *ptr == 'S' || *ptr == 's') {
8979     show_func = *ptr;
8980     - } else if (*(ptr+1) == 'M' || *(ptr+1) == 'm') {
8981     - print_mac_arg(s, *(ptr+1), data, size, event, arg);
8982     - ptr++;
8983     + } else if (*ptr == 'M' || *ptr == 'm') {
8984     + print_mac_arg(s, *ptr, data, size, event, arg);
8985     arg = arg->next;
8986     break;
8987     - } else if (*(ptr+1) == 'I' || *(ptr+1) == 'i') {
8988     + } else if (*ptr == 'I' || *ptr == 'i') {
8989     int n;
8990    
8991     - n = print_ip_arg(s, ptr+1, data, size, event, arg);
8992     + n = print_ip_arg(s, ptr, data, size, event, arg);
8993     if (n > 0) {
8994     - ptr += n;
8995     + ptr += n - 1;
8996     arg = arg->next;
8997     break;
8998     }
8999     diff --git a/tools/lib/traceevent/parse-filter.c b/tools/lib/traceevent/parse-filter.c
9000     index 7c214ceb9386..5e10ba796a6f 100644
9001     --- a/tools/lib/traceevent/parse-filter.c
9002     +++ b/tools/lib/traceevent/parse-filter.c
9003     @@ -1879,17 +1879,25 @@ static const char *get_field_str(struct filter_arg *arg, struct pevent_record *r
9004     struct pevent *pevent;
9005     unsigned long long addr;
9006     const char *val = NULL;
9007     + unsigned int size;
9008     char hex[64];
9009    
9010     /* If the field is not a string convert it */
9011     if (arg->str.field->flags & FIELD_IS_STRING) {
9012     val = record->data + arg->str.field->offset;
9013     + size = arg->str.field->size;
9014     +
9015     + if (arg->str.field->flags & FIELD_IS_DYNAMIC) {
9016     + addr = *(unsigned int *)val;
9017     + val = record->data + (addr & 0xffff);
9018     + size = addr >> 16;
9019     + }
9020    
9021     /*
9022     * We need to copy the data since we can't be sure the field
9023     * is null terminated.
9024     */
9025     - if (*(val + arg->str.field->size - 1)) {
9026     + if (*(val + size - 1)) {
9027     /* copy it */
9028     memcpy(arg->str.buffer, val, arg->str.field->size);
9029     /* the buffer is already NULL terminated */
9030     diff --git a/tools/perf/arch/x86/util/header.c b/tools/perf/arch/x86/util/header.c
9031     index 33027c5e6f92..c6b5204e0280 100644
9032     --- a/tools/perf/arch/x86/util/header.c
9033     +++ b/tools/perf/arch/x86/util/header.c
9034     @@ -70,7 +70,7 @@ get_cpuid_str(void)
9035     {
9036     char *buf = malloc(128);
9037    
9038     - if (__get_cpuid(buf, 128, "%s-%u-%X$") < 0) {
9039     + if (buf && __get_cpuid(buf, 128, "%s-%u-%X$") < 0) {
9040     free(buf);
9041     return NULL;
9042     }
9043     diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
9044     index d00aac51130d..3479a1bc7caa 100644
9045     --- a/tools/perf/builtin-c2c.c
9046     +++ b/tools/perf/builtin-c2c.c
9047     @@ -2393,9 +2393,10 @@ static int setup_callchain(struct perf_evlist *evlist)
9048     enum perf_call_graph_mode mode = CALLCHAIN_NONE;
9049    
9050     if ((sample_type & PERF_SAMPLE_REGS_USER) &&
9051     - (sample_type & PERF_SAMPLE_STACK_USER))
9052     + (sample_type & PERF_SAMPLE_STACK_USER)) {
9053     mode = CALLCHAIN_DWARF;
9054     - else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
9055     + dwarf_callchain_users = true;
9056     + } else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
9057     mode = CALLCHAIN_LBR;
9058     else if (sample_type & PERF_SAMPLE_CALLCHAIN)
9059     mode = CALLCHAIN_FP;
9060     diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
9061     index 1957abc1c8cf..b205c1340456 100644
9062     --- a/tools/perf/builtin-record.c
9063     +++ b/tools/perf/builtin-record.c
9064     @@ -1611,7 +1611,8 @@ static struct option __record_options[] = {
9065     OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
9066     &record.opts.sample_time_set,
9067     "Record the sample timestamps"),
9068     - OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"),
9069     + OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
9070     + "Record the sample period"),
9071     OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
9072     "don't sample"),
9073     OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
9074     diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
9075     index 183c3ed56e08..4ddb0726eebc 100644
9076     --- a/tools/perf/builtin-report.c
9077     +++ b/tools/perf/builtin-report.c
9078     @@ -328,9 +328,10 @@ static int report__setup_sample_type(struct report *rep)
9079    
9080     if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) {
9081     if ((sample_type & PERF_SAMPLE_REGS_USER) &&
9082     - (sample_type & PERF_SAMPLE_STACK_USER))
9083     + (sample_type & PERF_SAMPLE_STACK_USER)) {
9084     callchain_param.record_mode = CALLCHAIN_DWARF;
9085     - else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
9086     + dwarf_callchain_users = true;
9087     + } else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
9088     callchain_param.record_mode = CALLCHAIN_LBR;
9089     else
9090     callchain_param.record_mode = CALLCHAIN_FP;
9091     diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
9092     index 0fe02758de7d..615fdc63452e 100644
9093     --- a/tools/perf/builtin-script.c
9094     +++ b/tools/perf/builtin-script.c
9095     @@ -2574,9 +2574,10 @@ static void script__setup_sample_type(struct perf_script *script)
9096    
9097     if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) {
9098     if ((sample_type & PERF_SAMPLE_REGS_USER) &&
9099     - (sample_type & PERF_SAMPLE_STACK_USER))
9100     + (sample_type & PERF_SAMPLE_STACK_USER)) {
9101     callchain_param.record_mode = CALLCHAIN_DWARF;
9102     - else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
9103     + dwarf_callchain_users = true;
9104     + } else if (sample_type & PERF_SAMPLE_BRANCH_STACK)
9105     callchain_param.record_mode = CALLCHAIN_LBR;
9106     else
9107     callchain_param.record_mode = CALLCHAIN_FP;
9108     diff --git a/tools/perf/perf.h b/tools/perf/perf.h
9109     index f75f3dec7485..55086389fc06 100644
9110     --- a/tools/perf/perf.h
9111     +++ b/tools/perf/perf.h
9112     @@ -50,6 +50,7 @@ struct record_opts {
9113     bool sample_time_set;
9114     bool sample_cpu;
9115     bool period;
9116     + bool period_set;
9117     bool running_time;
9118     bool full_auxtrace;
9119     bool auxtrace_snapshot_mode;
9120     diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
9121     index ac40e05bcab4..260418969120 100644
9122     --- a/tools/perf/tests/dwarf-unwind.c
9123     +++ b/tools/perf/tests/dwarf-unwind.c
9124     @@ -173,6 +173,7 @@ int test__dwarf_unwind(struct test *test __maybe_unused, int subtest __maybe_unu
9125     }
9126    
9127     callchain_param.record_mode = CALLCHAIN_DWARF;
9128     + dwarf_callchain_users = true;
9129    
9130     if (init_live_machine(machine)) {
9131     pr_err("Could not init machine\n");
9132     diff --git a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
9133     index 7a84d73324e3..a2f757da49d9 100755
9134     --- a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
9135     +++ b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh
9136     @@ -22,10 +22,23 @@ trace_libc_inet_pton_backtrace() {
9137     expected[4]="rtt min.*"
9138     expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)"
9139     expected[6]=".*inet_pton[[:space:]]\($libc\)$"
9140     - expected[7]="getaddrinfo[[:space:]]\($libc\)$"
9141     - expected[8]=".*\(.*/bin/ping.*\)$"
9142     + case "$(uname -m)" in
9143     + s390x)
9144     + eventattr='call-graph=dwarf'
9145     + expected[7]="gaih_inet[[:space:]]\(inlined\)$"
9146     + expected[8]="__GI_getaddrinfo[[:space:]]\(inlined\)$"
9147     + expected[9]="main[[:space:]]\(.*/bin/ping.*\)$"
9148     + expected[10]="__libc_start_main[[:space:]]\($libc\)$"
9149     + expected[11]="_start[[:space:]]\(.*/bin/ping.*\)$"
9150     + ;;
9151     + *)
9152     + eventattr='max-stack=3'
9153     + expected[7]="getaddrinfo[[:space:]]\($libc\)$"
9154     + expected[8]=".*\(.*/bin/ping.*\)$"
9155     + ;;
9156     + esac
9157    
9158     - perf trace --no-syscalls -e probe_libc:inet_pton/max-stack=3/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do
9159     + perf trace --no-syscalls -e probe_libc:inet_pton/$eventattr/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do
9160     echo $line
9161     echo "$line" | egrep -q "${expected[$idx]}"
9162     if [ $? -ne 0 ] ; then
9163     @@ -33,7 +46,7 @@ trace_libc_inet_pton_backtrace() {
9164     exit 1
9165     fi
9166     let idx+=1
9167     - [ $idx -eq 9 ] && break
9168     + [ -z "${expected[$idx]}" ] && break
9169     done
9170     }
9171    
9172     diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
9173     index 6031933d811c..146683b1c28d 100644
9174     --- a/tools/perf/util/callchain.c
9175     +++ b/tools/perf/util/callchain.c
9176     @@ -37,6 +37,15 @@ struct callchain_param callchain_param = {
9177     CALLCHAIN_PARAM_DEFAULT
9178     };
9179    
9180     +/*
9181     + * Are there any events usind DWARF callchains?
9182     + *
9183     + * I.e.
9184     + *
9185     + * -e cycles/call-graph=dwarf/
9186     + */
9187     +bool dwarf_callchain_users;
9188     +
9189     struct callchain_param callchain_param_default = {
9190     CALLCHAIN_PARAM_DEFAULT
9191     };
9192     @@ -265,6 +274,7 @@ int parse_callchain_record(const char *arg, struct callchain_param *param)
9193     ret = 0;
9194     param->record_mode = CALLCHAIN_DWARF;
9195     param->dump_size = default_stack_dump_size;
9196     + dwarf_callchain_users = true;
9197    
9198     tok = strtok_r(NULL, ",", &saveptr);
9199     if (tok) {
9200     diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
9201     index f967aa47d0a1..9ba5903c8d3e 100644
9202     --- a/tools/perf/util/callchain.h
9203     +++ b/tools/perf/util/callchain.h
9204     @@ -89,6 +89,8 @@ enum chain_value {
9205     CCVAL_COUNT,
9206     };
9207    
9208     +extern bool dwarf_callchain_users;
9209     +
9210     struct callchain_param {
9211     bool enabled;
9212     enum perf_call_graph_mode record_mode;
9213     diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
9214     index ac19130c14d8..226a9245d1db 100644
9215     --- a/tools/perf/util/evsel.c
9216     +++ b/tools/perf/util/evsel.c
9217     @@ -722,26 +722,28 @@ static void apply_config_terms(struct perf_evsel *evsel,
9218     struct perf_evsel_config_term *term;
9219     struct list_head *config_terms = &evsel->config_terms;
9220     struct perf_event_attr *attr = &evsel->attr;
9221     - struct callchain_param param;
9222     + /* callgraph default */
9223     + struct callchain_param param = {
9224     + .record_mode = callchain_param.record_mode,
9225     + };
9226     u32 dump_size = 0;
9227     int max_stack = 0;
9228     const char *callgraph_buf = NULL;
9229    
9230     - /* callgraph default */
9231     - param.record_mode = callchain_param.record_mode;
9232     -
9233     list_for_each_entry(term, config_terms, list) {
9234     switch (term->type) {
9235     case PERF_EVSEL__CONFIG_TERM_PERIOD:
9236     if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
9237     attr->sample_period = term->val.period;
9238     attr->freq = 0;
9239     + perf_evsel__reset_sample_bit(evsel, PERIOD);
9240     }
9241     break;
9242     case PERF_EVSEL__CONFIG_TERM_FREQ:
9243     if (!(term->weak && opts->user_freq != UINT_MAX)) {
9244     attr->sample_freq = term->val.freq;
9245     attr->freq = 1;
9246     + perf_evsel__set_sample_bit(evsel, PERIOD);
9247     }
9248     break;
9249     case PERF_EVSEL__CONFIG_TERM_TIME:
9250     @@ -943,9 +945,6 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
9251     if (target__has_cpu(&opts->target) || opts->sample_cpu)
9252     perf_evsel__set_sample_bit(evsel, CPU);
9253    
9254     - if (opts->period)
9255     - perf_evsel__set_sample_bit(evsel, PERIOD);
9256     -
9257     /*
9258     * When the user explicitly disabled time don't force it here.
9259     */
9260     @@ -1047,6 +1046,14 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts,
9261     apply_config_terms(evsel, opts);
9262    
9263     evsel->ignore_missing_thread = opts->ignore_missing_thread;
9264     +
9265     + /* The --period option takes the precedence. */
9266     + if (opts->period_set) {
9267     + if (opts->period)
9268     + perf_evsel__set_sample_bit(evsel, PERIOD);
9269     + else
9270     + perf_evsel__reset_sample_bit(evsel, PERIOD);
9271     + }
9272     }
9273    
9274     static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
9275     diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
9276     index 7a42f703e858..af873044d33a 100644
9277     --- a/tools/perf/util/unwind-libunwind-local.c
9278     +++ b/tools/perf/util/unwind-libunwind-local.c
9279     @@ -631,9 +631,8 @@ static unw_accessors_t accessors = {
9280    
9281     static int _unwind__prepare_access(struct thread *thread)
9282     {
9283     - if (callchain_param.record_mode != CALLCHAIN_DWARF)
9284     + if (!dwarf_callchain_users)
9285     return 0;
9286     -
9287     thread->addr_space = unw_create_addr_space(&accessors, 0);
9288     if (!thread->addr_space) {
9289     pr_err("unwind: Can't create unwind address space.\n");
9290     @@ -646,17 +645,15 @@ static int _unwind__prepare_access(struct thread *thread)
9291    
9292     static void _unwind__flush_access(struct thread *thread)
9293     {
9294     - if (callchain_param.record_mode != CALLCHAIN_DWARF)
9295     + if (!dwarf_callchain_users)
9296     return;
9297     -
9298     unw_flush_cache(thread->addr_space, 0, 0);
9299     }
9300    
9301     static void _unwind__finish_access(struct thread *thread)
9302     {
9303     - if (callchain_param.record_mode != CALLCHAIN_DWARF)
9304     + if (!dwarf_callchain_users)
9305     return;
9306     -
9307     unw_destroy_addr_space(thread->addr_space);
9308     }
9309    
9310     diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
9311     index 50ce52d2013d..8b9470b5af6d 100644
9312     --- a/tools/testing/selftests/bpf/test_maps.c
9313     +++ b/tools/testing/selftests/bpf/test_maps.c
9314     @@ -463,7 +463,7 @@ static void test_devmap(int task, void *data)
9315     #define SOCKMAP_VERDICT_PROG "./sockmap_verdict_prog.o"
9316     static void test_sockmap(int tasks, void *data)
9317     {
9318     - int one = 1, map_fd_rx, map_fd_tx, map_fd_break, s, sc, rc;
9319     + int one = 1, map_fd_rx = 0, map_fd_tx = 0, map_fd_break, s, sc, rc;
9320     struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_break;
9321     int ports[] = {50200, 50201, 50202, 50204};
9322     int err, i, fd, udp, sfd[6] = {0xdeadbeef};
9323     @@ -868,9 +868,12 @@ static void test_sockmap(int tasks, void *data)
9324     goto out_sockmap;
9325     }
9326    
9327     - /* Test map close sockets */
9328     - for (i = 0; i < 6; i++)
9329     + /* Test map close sockets and empty maps */
9330     + for (i = 0; i < 6; i++) {
9331     + bpf_map_delete_elem(map_fd_tx, &i);
9332     + bpf_map_delete_elem(map_fd_rx, &i);
9333     close(sfd[i]);
9334     + }
9335     close(fd);
9336     close(map_fd_rx);
9337     bpf_object__close(obj);
9338     @@ -881,8 +884,13 @@ static void test_sockmap(int tasks, void *data)
9339     printf("Failed to create sockmap '%i:%s'!\n", i, strerror(errno));
9340     exit(1);
9341     out_sockmap:
9342     - for (i = 0; i < 6; i++)
9343     + for (i = 0; i < 6; i++) {
9344     + if (map_fd_tx)
9345     + bpf_map_delete_elem(map_fd_tx, &i);
9346     + if (map_fd_rx)
9347     + bpf_map_delete_elem(map_fd_rx, &i);
9348     close(sfd[i]);
9349     + }
9350     close(fd);
9351     exit(1);
9352     }
9353     diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
9354     index 589d52b211b7..27a54a17da65 100644
9355     --- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
9356     +++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
9357     @@ -29,6 +29,12 @@ ftrace_filter_check '*schedule*' '^.*schedule.*$'
9358     # filter by *, end match
9359     ftrace_filter_check 'schedule*' '^schedule.*$'
9360    
9361     +# filter by *mid*end
9362     +ftrace_filter_check '*aw*lock' '.*aw.*lock$'
9363     +
9364     +# filter by start*mid*
9365     +ftrace_filter_check 'mutex*try*' '^mutex.*try.*'
9366     +
9367     # Advanced full-glob matching feature is recently supported.
9368     # Skip the tests if we are sure the kernel does not support it.
9369     if grep -q 'accepts: .* glob-matching-pattern' README ; then
9370     diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
9371     index bb16cf91f1b5..e297bd7a2e79 100644
9372     --- a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
9373     +++ b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc
9374     @@ -12,8 +12,8 @@ case `uname -m` in
9375     *) OFFS=0;;
9376     esac
9377    
9378     -echo "Setup up to 256 kprobes"
9379     -grep t /proc/kallsyms | cut -f3 -d" " | grep -v .*\\..* | \
9380     +echo "Setup up kprobes on first 256 text symbols"
9381     +grep -i " t " /proc/kallsyms | cut -f3 -d" " | grep -v .*\\..* | \
9382     head -n 256 | while read i; do echo p ${i}+${OFFS} ; done > kprobe_events ||:
9383    
9384     echo 1 > events/kprobes/enable
9385     diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c
9386     index 4a8217448f20..cad14cd0ea92 100644
9387     --- a/tools/testing/selftests/net/reuseport_bpf.c
9388     +++ b/tools/testing/selftests/net/reuseport_bpf.c
9389     @@ -21,6 +21,7 @@
9390     #include <sys/epoll.h>
9391     #include <sys/types.h>
9392     #include <sys/socket.h>
9393     +#include <sys/resource.h>
9394     #include <unistd.h>
9395    
9396     #ifndef ARRAY_SIZE
9397     @@ -190,11 +191,14 @@ static void send_from(struct test_params p, uint16_t sport, char *buf,
9398     struct sockaddr * const saddr = new_any_sockaddr(p.send_family, sport);
9399     struct sockaddr * const daddr =
9400     new_loopback_sockaddr(p.send_family, p.recv_port);
9401     - const int fd = socket(p.send_family, p.protocol, 0);
9402     + const int fd = socket(p.send_family, p.protocol, 0), one = 1;
9403    
9404     if (fd < 0)
9405     error(1, errno, "failed to create send socket");
9406    
9407     + if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)))
9408     + error(1, errno, "failed to set reuseaddr");
9409     +
9410     if (bind(fd, saddr, sockaddr_size()))
9411     error(1, errno, "failed to bind send socket");
9412    
9413     @@ -433,6 +437,21 @@ void enable_fastopen(void)
9414     }
9415     }
9416    
9417     +static struct rlimit rlim_old, rlim_new;
9418     +
9419     +static __attribute__((constructor)) void main_ctor(void)
9420     +{
9421     + getrlimit(RLIMIT_MEMLOCK, &rlim_old);
9422     + rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
9423     + rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
9424     + setrlimit(RLIMIT_MEMLOCK, &rlim_new);
9425     +}
9426     +
9427     +static __attribute__((destructor)) void main_dtor(void)
9428     +{
9429     + setrlimit(RLIMIT_MEMLOCK, &rlim_old);
9430     +}
9431     +
9432     int main(void)
9433     {
9434     fprintf(stderr, "---- IPv4 UDP ----\n");
9435     diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
9436     index d81af263f50b..4f35f0dfe681 100644
9437     --- a/virt/kvm/kvm_main.c
9438     +++ b/virt/kvm/kvm_main.c
9439     @@ -1434,7 +1434,8 @@ static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
9440    
9441     static int hva_to_pfn_remapped(struct vm_area_struct *vma,
9442     unsigned long addr, bool *async,
9443     - bool write_fault, kvm_pfn_t *p_pfn)
9444     + bool write_fault, bool *writable,
9445     + kvm_pfn_t *p_pfn)
9446     {
9447     unsigned long pfn;
9448     int r;
9449     @@ -1460,6 +1461,8 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
9450    
9451     }
9452    
9453     + if (writable)
9454     + *writable = true;
9455    
9456     /*
9457     * Get a reference here because callers of *hva_to_pfn* and
9458     @@ -1525,7 +1528,7 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
9459     if (vma == NULL)
9460     pfn = KVM_PFN_ERR_FAULT;
9461     else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
9462     - r = hva_to_pfn_remapped(vma, addr, async, write_fault, &pfn);
9463     + r = hva_to_pfn_remapped(vma, addr, async, write_fault, writable, &pfn);
9464     if (r == -EAGAIN)
9465     goto retry;
9466     if (r < 0)