Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.2/0104-4.2.5-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2710 - (hide annotations) (download)
Mon Nov 2 12:49:35 2015 UTC (8 years, 6 months ago) by niro
File size: 62340 byte(s)
-linux-4.2.5
1 niro 2710 diff --git a/Makefile b/Makefile
2     index a952801a6cd5..96076dcad18e 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 2
8     -SUBLEVEL = 4
9     +SUBLEVEL = 5
10     EXTRAVERSION =
11     NAME = Hurr durr I'ma sheep
12    
13     diff --git a/arch/arm/mach-ux500/Makefile b/arch/arm/mach-ux500/Makefile
14     index 4418a5078833..c8643ac5db71 100644
15     --- a/arch/arm/mach-ux500/Makefile
16     +++ b/arch/arm/mach-ux500/Makefile
17     @@ -7,7 +7,7 @@ obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o
18     obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o
19     obj-$(CONFIG_MACH_MOP500) += board-mop500-regulators.o \
20     board-mop500-audio.o
21     -obj-$(CONFIG_SMP) += platsmp.o headsmp.o
22     +obj-$(CONFIG_SMP) += platsmp.o
23     obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
24     obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o
25    
26     diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
27     index 16913800bbf9..ba708ce08616 100644
28     --- a/arch/arm/mach-ux500/cpu-db8500.c
29     +++ b/arch/arm/mach-ux500/cpu-db8500.c
30     @@ -154,7 +154,6 @@ static const char * stericsson_dt_platform_compat[] = {
31     };
32    
33     DT_MACHINE_START(U8500_DT, "ST-Ericsson Ux5x0 platform (Device Tree Support)")
34     - .smp = smp_ops(ux500_smp_ops),
35     .map_io = u8500_map_io,
36     .init_irq = ux500_init_irq,
37     /* we re-use nomadik timer here */
38     diff --git a/arch/arm/mach-ux500/headsmp.S b/arch/arm/mach-ux500/headsmp.S
39     deleted file mode 100644
40     index 9cdea049485d..000000000000
41     --- a/arch/arm/mach-ux500/headsmp.S
42     +++ /dev/null
43     @@ -1,37 +0,0 @@
44     -/*
45     - * Copyright (c) 2009 ST-Ericsson
46     - * This file is based ARM Realview platform
47     - * Copyright (c) 2003 ARM Limited
48     - * All Rights Reserved
49     - *
50     - * This program is free software; you can redistribute it and/or modify
51     - * it under the terms of the GNU General Public License version 2 as
52     - * published by the Free Software Foundation.
53     - */
54     -#include <linux/linkage.h>
55     -#include <linux/init.h>
56     -
57     -/*
58     - * U8500 specific entry point for secondary CPUs.
59     - */
60     -ENTRY(u8500_secondary_startup)
61     - mrc p15, 0, r0, c0, c0, 5
62     - and r0, r0, #15
63     - adr r4, 1f
64     - ldmia r4, {r5, r6}
65     - sub r4, r4, r5
66     - add r6, r6, r4
67     -pen: ldr r7, [r6]
68     - cmp r7, r0
69     - bne pen
70     -
71     - /*
72     - * we've been released from the holding pen: secondary_stack
73     - * should now contain the SVC stack for this core
74     - */
75     - b secondary_startup
76     -ENDPROC(u8500_secondary_startup)
77     -
78     - .align 2
79     -1: .long .
80     - .long pen_release
81     diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
82     index 62b1de922bd8..70766b963758 100644
83     --- a/arch/arm/mach-ux500/platsmp.c
84     +++ b/arch/arm/mach-ux500/platsmp.c
85     @@ -28,135 +28,81 @@
86     #include "db8500-regs.h"
87     #include "id.h"
88    
89     -static void __iomem *scu_base;
90     -static void __iomem *backupram;
91     -
92     -/* This is called from headsmp.S to wakeup the secondary core */
93     -extern void u8500_secondary_startup(void);
94     -
95     -/*
96     - * Write pen_release in a way that is guaranteed to be visible to all
97     - * observers, irrespective of whether they're taking part in coherency
98     - * or not. This is necessary for the hotplug code to work reliably.
99     - */
100     -static void write_pen_release(int val)
101     -{
102     - pen_release = val;
103     - smp_wmb();
104     - sync_cache_w(&pen_release);
105     -}
106     -
107     -static DEFINE_SPINLOCK(boot_lock);
108     -
109     -static void ux500_secondary_init(unsigned int cpu)
110     -{
111     - /*
112     - * let the primary processor know we're out of the
113     - * pen, then head off into the C entry point
114     - */
115     - write_pen_release(-1);
116     -
117     - /*
118     - * Synchronise with the boot thread.
119     - */
120     - spin_lock(&boot_lock);
121     - spin_unlock(&boot_lock);
122     -}
123     +/* Magic triggers in backup RAM */
124     +#define UX500_CPU1_JUMPADDR_OFFSET 0x1FF4
125     +#define UX500_CPU1_WAKEMAGIC_OFFSET 0x1FF0
126    
127     -static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
128     +static void wakeup_secondary(void)
129     {
130     - unsigned long timeout;
131     -
132     - /*
133     - * set synchronisation state between this boot processor
134     - * and the secondary one
135     - */
136     - spin_lock(&boot_lock);
137     -
138     - /*
139     - * The secondary processor is waiting to be released from
140     - * the holding pen - release it, then wait for it to flag
141     - * that it has been released by resetting pen_release.
142     - */
143     - write_pen_release(cpu_logical_map(cpu));
144     -
145     - arch_send_wakeup_ipi_mask(cpumask_of(cpu));
146     + struct device_node *np;
147     + static void __iomem *backupram;
148    
149     - timeout = jiffies + (1 * HZ);
150     - while (time_before(jiffies, timeout)) {
151     - if (pen_release == -1)
152     - break;
153     + np = of_find_compatible_node(NULL, NULL, "ste,dbx500-backupram");
154     + if (!np) {
155     + pr_err("No backupram base address\n");
156     + return;
157     + }
158     + backupram = of_iomap(np, 0);
159     + of_node_put(np);
160     + if (!backupram) {
161     + pr_err("No backupram remap\n");
162     + return;
163     }
164    
165     /*
166     - * now the secondary core is starting up let it run its
167     - * calibrations, then wait for it to finish
168     - */
169     - spin_unlock(&boot_lock);
170     -
171     - return pen_release != -1 ? -ENOSYS : 0;
172     -}
173     -
174     -static void __init wakeup_secondary(void)
175     -{
176     - /*
177     * write the address of secondary startup into the backup ram register
178     * at offset 0x1FF4, then write the magic number 0xA1FEED01 to the
179     * backup ram register at offset 0x1FF0, which is what boot rom code
180     - * is waiting for. This would wake up the secondary core from WFE
181     + * is waiting for. This will wake up the secondary core from WFE.
182     */
183     -#define UX500_CPU1_JUMPADDR_OFFSET 0x1FF4
184     - __raw_writel(virt_to_phys(u8500_secondary_startup),
185     - backupram + UX500_CPU1_JUMPADDR_OFFSET);
186     -
187     -#define UX500_CPU1_WAKEMAGIC_OFFSET 0x1FF0
188     - __raw_writel(0xA1FEED01,
189     - backupram + UX500_CPU1_WAKEMAGIC_OFFSET);
190     + writel(virt_to_phys(secondary_startup),
191     + backupram + UX500_CPU1_JUMPADDR_OFFSET);
192     + writel(0xA1FEED01,
193     + backupram + UX500_CPU1_WAKEMAGIC_OFFSET);
194    
195     /* make sure write buffer is drained */
196     mb();
197     + iounmap(backupram);
198     }
199    
200     -/*
201     - * Initialise the CPU possible map early - this describes the CPUs
202     - * which may be present or become present in the system.
203     - */
204     -static void __init ux500_smp_init_cpus(void)
205     +static void __init ux500_smp_prepare_cpus(unsigned int max_cpus)
206     {
207     - unsigned int i, ncores;
208     struct device_node *np;
209     + static void __iomem *scu_base;
210     + unsigned int ncores;
211     + int i;
212    
213     np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-scu");
214     + if (!np) {
215     + pr_err("No SCU base address\n");
216     + return;
217     + }
218     scu_base = of_iomap(np, 0);
219     of_node_put(np);
220     - if (!scu_base)
221     + if (!scu_base) {
222     + pr_err("No SCU remap\n");
223     return;
224     - backupram = ioremap(U8500_BACKUPRAM0_BASE, SZ_8K);
225     - ncores = scu_get_core_count(scu_base);
226     -
227     - /* sanity check */
228     - if (ncores > nr_cpu_ids) {
229     - pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
230     - ncores, nr_cpu_ids);
231     - ncores = nr_cpu_ids;
232     }
233    
234     + scu_enable(scu_base);
235     + ncores = scu_get_core_count(scu_base);
236     for (i = 0; i < ncores; i++)
237     set_cpu_possible(i, true);
238     + iounmap(scu_base);
239     }
240    
241     -static void __init ux500_smp_prepare_cpus(unsigned int max_cpus)
242     +static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
243     {
244     - scu_enable(scu_base);
245     wakeup_secondary();
246     + arch_send_wakeup_ipi_mask(cpumask_of(cpu));
247     + return 0;
248     }
249    
250     struct smp_operations ux500_smp_ops __initdata = {
251     - .smp_init_cpus = ux500_smp_init_cpus,
252     .smp_prepare_cpus = ux500_smp_prepare_cpus,
253     - .smp_secondary_init = ux500_secondary_init,
254     .smp_boot_secondary = ux500_boot_secondary,
255     #ifdef CONFIG_HOTPLUG_CPU
256     .cpu_die = ux500_cpu_die,
257     #endif
258     };
259     +CPU_METHOD_OF_DECLARE(ux500_smp, "ste,dbx500-smp", &ux500_smp_ops);
260     diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
261     index 1fb6ad2789f1..65876eac0761 100644
262     --- a/arch/arm/mach-ux500/setup.h
263     +++ b/arch/arm/mach-ux500/setup.h
264     @@ -26,7 +26,6 @@ extern struct device *ux500_soc_device_init(const char *soc_id);
265    
266     extern void ux500_timer_init(void);
267    
268     -extern struct smp_operations ux500_smp_ops;
269     extern void ux500_cpu_die(unsigned int cpu);
270    
271     #endif /* __ASM_ARCH_SETUP_H */
272     diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
273     index 81151663ef38..3258174e6152 100644
274     --- a/arch/arm64/Makefile
275     +++ b/arch/arm64/Makefile
276     @@ -31,7 +31,7 @@ endif
277     CHECKFLAGS += -D__aarch64__
278    
279     ifeq ($(CONFIG_ARM64_ERRATUM_843419), y)
280     -CFLAGS_MODULE += -mcmodel=large
281     +KBUILD_CFLAGS_MODULE += -mcmodel=large
282     endif
283    
284     # Default value
285     diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
286     index 56283f8a675c..cf7319422768 100644
287     --- a/arch/arm64/include/asm/pgtable.h
288     +++ b/arch/arm64/include/asm/pgtable.h
289     @@ -80,7 +80,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
290     #define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
291     #define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDONLY | PTE_UXN)
292    
293     -#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
294     +#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
295     #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
296     #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
297     #define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
298     @@ -460,7 +460,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
299     static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
300     {
301     const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
302     - PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK;
303     + PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
304     pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
305     return pte;
306     }
307     diff --git a/arch/sparc/crypto/aes_glue.c b/arch/sparc/crypto/aes_glue.c
308     index 2e48eb8813ff..c90930de76ba 100644
309     --- a/arch/sparc/crypto/aes_glue.c
310     +++ b/arch/sparc/crypto/aes_glue.c
311     @@ -433,6 +433,7 @@ static struct crypto_alg algs[] = { {
312     .blkcipher = {
313     .min_keysize = AES_MIN_KEY_SIZE,
314     .max_keysize = AES_MAX_KEY_SIZE,
315     + .ivsize = AES_BLOCK_SIZE,
316     .setkey = aes_set_key,
317     .encrypt = cbc_encrypt,
318     .decrypt = cbc_decrypt,
319     @@ -452,6 +453,7 @@ static struct crypto_alg algs[] = { {
320     .blkcipher = {
321     .min_keysize = AES_MIN_KEY_SIZE,
322     .max_keysize = AES_MAX_KEY_SIZE,
323     + .ivsize = AES_BLOCK_SIZE,
324     .setkey = aes_set_key,
325     .encrypt = ctr_crypt,
326     .decrypt = ctr_crypt,
327     diff --git a/arch/sparc/crypto/camellia_glue.c b/arch/sparc/crypto/camellia_glue.c
328     index 6bf2479a12fb..561a84d93cf6 100644
329     --- a/arch/sparc/crypto/camellia_glue.c
330     +++ b/arch/sparc/crypto/camellia_glue.c
331     @@ -274,6 +274,7 @@ static struct crypto_alg algs[] = { {
332     .blkcipher = {
333     .min_keysize = CAMELLIA_MIN_KEY_SIZE,
334     .max_keysize = CAMELLIA_MAX_KEY_SIZE,
335     + .ivsize = CAMELLIA_BLOCK_SIZE,
336     .setkey = camellia_set_key,
337     .encrypt = cbc_encrypt,
338     .decrypt = cbc_decrypt,
339     diff --git a/arch/sparc/crypto/des_glue.c b/arch/sparc/crypto/des_glue.c
340     index dd6a34fa6e19..61af794aa2d3 100644
341     --- a/arch/sparc/crypto/des_glue.c
342     +++ b/arch/sparc/crypto/des_glue.c
343     @@ -429,6 +429,7 @@ static struct crypto_alg algs[] = { {
344     .blkcipher = {
345     .min_keysize = DES_KEY_SIZE,
346     .max_keysize = DES_KEY_SIZE,
347     + .ivsize = DES_BLOCK_SIZE,
348     .setkey = des_set_key,
349     .encrypt = cbc_encrypt,
350     .decrypt = cbc_decrypt,
351     @@ -485,6 +486,7 @@ static struct crypto_alg algs[] = { {
352     .blkcipher = {
353     .min_keysize = DES3_EDE_KEY_SIZE,
354     .max_keysize = DES3_EDE_KEY_SIZE,
355     + .ivsize = DES3_EDE_BLOCK_SIZE,
356     .setkey = des3_ede_set_key,
357     .encrypt = cbc3_encrypt,
358     .decrypt = cbc3_decrypt,
359     diff --git a/arch/x86/crypto/camellia_aesni_avx_glue.c b/arch/x86/crypto/camellia_aesni_avx_glue.c
360     index 80a0e4389c9a..bacaa13acac5 100644
361     --- a/arch/x86/crypto/camellia_aesni_avx_glue.c
362     +++ b/arch/x86/crypto/camellia_aesni_avx_glue.c
363     @@ -554,6 +554,11 @@ static int __init camellia_aesni_init(void)
364     {
365     const char *feature_name;
366    
367     + if (!cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) {
368     + pr_info("AVX or AES-NI instructions are not detected.\n");
369     + return -ENODEV;
370     + }
371     +
372     if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
373     pr_info("CPU feature '%s' is not supported.\n", feature_name);
374     return -ENODEV;
375     diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
376     index e7a4fde5d631..2392541a96e6 100644
377     --- a/arch/x86/kvm/emulate.c
378     +++ b/arch/x86/kvm/emulate.c
379     @@ -2418,7 +2418,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
380     u64 val, cr0, cr4;
381     u32 base3;
382     u16 selector;
383     - int i;
384     + int i, r;
385    
386     for (i = 0; i < 16; i++)
387     *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8);
388     @@ -2460,13 +2460,17 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase)
389     dt.address = GET_SMSTATE(u64, smbase, 0x7e68);
390     ctxt->ops->set_gdt(ctxt, &dt);
391    
392     + r = rsm_enter_protected_mode(ctxt, cr0, cr4);
393     + if (r != X86EMUL_CONTINUE)
394     + return r;
395     +
396     for (i = 0; i < 6; i++) {
397     - int r = rsm_load_seg_64(ctxt, smbase, i);
398     + r = rsm_load_seg_64(ctxt, smbase, i);
399     if (r != X86EMUL_CONTINUE)
400     return r;
401     }
402    
403     - return rsm_enter_protected_mode(ctxt, cr0, cr4);
404     + return X86EMUL_CONTINUE;
405     }
406    
407     static int em_rsm(struct x86_emulate_ctxt *ctxt)
408     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
409     index 32c6e6ac5964..373328b71599 100644
410     --- a/arch/x86/kvm/x86.c
411     +++ b/arch/x86/kvm/x86.c
412     @@ -6706,6 +6706,12 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
413     return 1;
414     }
415    
416     +static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
417     +{
418     + return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
419     + !vcpu->arch.apf.halted);
420     +}
421     +
422     static int vcpu_run(struct kvm_vcpu *vcpu)
423     {
424     int r;
425     @@ -6714,8 +6720,7 @@ static int vcpu_run(struct kvm_vcpu *vcpu)
426     vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
427    
428     for (;;) {
429     - if (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
430     - !vcpu->arch.apf.halted)
431     + if (kvm_vcpu_running(vcpu))
432     r = vcpu_enter_guest(vcpu);
433     else
434     r = vcpu_block(kvm, vcpu);
435     @@ -8011,19 +8016,36 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
436     kvm_mmu_invalidate_zap_all_pages(kvm);
437     }
438    
439     +static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
440     +{
441     + if (!list_empty_careful(&vcpu->async_pf.done))
442     + return true;
443     +
444     + if (kvm_apic_has_events(vcpu))
445     + return true;
446     +
447     + if (vcpu->arch.pv.pv_unhalted)
448     + return true;
449     +
450     + if (atomic_read(&vcpu->arch.nmi_queued))
451     + return true;
452     +
453     + if (test_bit(KVM_REQ_SMI, &vcpu->requests))
454     + return true;
455     +
456     + if (kvm_arch_interrupt_allowed(vcpu) &&
457     + kvm_cpu_has_interrupt(vcpu))
458     + return true;
459     +
460     + return false;
461     +}
462     +
463     int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
464     {
465     if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events)
466     kvm_x86_ops->check_nested_events(vcpu, false);
467    
468     - return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
469     - !vcpu->arch.apf.halted)
470     - || !list_empty_careful(&vcpu->async_pf.done)
471     - || kvm_apic_has_events(vcpu)
472     - || vcpu->arch.pv.pv_unhalted
473     - || atomic_read(&vcpu->arch.nmi_queued) ||
474     - (kvm_arch_interrupt_allowed(vcpu) &&
475     - kvm_cpu_has_interrupt(vcpu));
476     + return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
477     }
478    
479     int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
480     diff --git a/crypto/ahash.c b/crypto/ahash.c
481     index 8acb886032ae..9c1dc8d6106a 100644
482     --- a/crypto/ahash.c
483     +++ b/crypto/ahash.c
484     @@ -544,7 +544,8 @@ static int ahash_prepare_alg(struct ahash_alg *alg)
485     struct crypto_alg *base = &alg->halg.base;
486    
487     if (alg->halg.digestsize > PAGE_SIZE / 8 ||
488     - alg->halg.statesize > PAGE_SIZE / 8)
489     + alg->halg.statesize > PAGE_SIZE / 8 ||
490     + alg->halg.statesize == 0)
491     return -EINVAL;
492    
493     base->cra_type = &crypto_ahash_type;
494     diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
495     index bc67a93aa4f4..324bf35ec4dd 100644
496     --- a/drivers/block/rbd.c
497     +++ b/drivers/block/rbd.c
498     @@ -5201,7 +5201,6 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
499     out_err:
500     if (parent) {
501     rbd_dev_unparent(rbd_dev);
502     - kfree(rbd_dev->header_name);
503     rbd_dev_destroy(parent);
504     } else {
505     rbd_put_client(rbdc);
506     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
507     index b16b9256883e..4c4035fdeb6f 100644
508     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
509     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
510     @@ -76,8 +76,6 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
511     /* We borrow the event spin lock for protecting flip_status */
512     spin_lock_irqsave(&crtc->dev->event_lock, flags);
513    
514     - /* set the proper interrupt */
515     - amdgpu_irq_get(adev, &adev->pageflip_irq, work->crtc_id);
516     /* do the flip (mmio) */
517     adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
518     /* set the flip status */
519     diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
520     index 82e8d0730517..a1a35a5df8e7 100644
521     --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
522     +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
523     @@ -6185,6 +6185,11 @@ static int ci_dpm_late_init(void *handle)
524     if (!amdgpu_dpm)
525     return 0;
526    
527     + /* init the sysfs and debugfs files late */
528     + ret = amdgpu_pm_sysfs_init(adev);
529     + if (ret)
530     + return ret;
531     +
532     ret = ci_set_temperature_range(adev);
533     if (ret)
534     return ret;
535     @@ -6232,9 +6237,6 @@ static int ci_dpm_sw_init(void *handle)
536     adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
537     if (amdgpu_dpm == 1)
538     amdgpu_pm_print_power_states(adev);
539     - ret = amdgpu_pm_sysfs_init(adev);
540     - if (ret)
541     - goto dpm_failed;
542     mutex_unlock(&adev->pm.mutex);
543     DRM_INFO("amdgpu: dpm initialized\n");
544    
545     diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
546     index 341c56681841..519fa515c4d8 100644
547     --- a/drivers/gpu/drm/amd/amdgpu/cik.c
548     +++ b/drivers/gpu/drm/amd/amdgpu/cik.c
549     @@ -1565,6 +1565,9 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
550     int ret, i;
551     u16 tmp16;
552    
553     + if (pci_is_root_bus(adev->pdev->bus))
554     + return;
555     +
556     if (amdgpu_pcie_gen2 == 0)
557     return;
558    
559     diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
560     index ace870afc7d4..fd29c18fc14e 100644
561     --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
562     +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
563     @@ -596,6 +596,12 @@ static int cz_dpm_late_init(void *handle)
564     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
565    
566     if (amdgpu_dpm) {
567     + int ret;
568     + /* init the sysfs and debugfs files late */
569     + ret = amdgpu_pm_sysfs_init(adev);
570     + if (ret)
571     + return ret;
572     +
573     /* powerdown unused blocks for now */
574     cz_dpm_powergate_uvd(adev, true);
575     cz_dpm_powergate_vce(adev, true);
576     @@ -632,10 +638,6 @@ static int cz_dpm_sw_init(void *handle)
577     if (amdgpu_dpm == 1)
578     amdgpu_pm_print_power_states(adev);
579    
580     - ret = amdgpu_pm_sysfs_init(adev);
581     - if (ret)
582     - goto dpm_init_failed;
583     -
584     mutex_unlock(&adev->pm.mutex);
585     DRM_INFO("amdgpu: dpm initialized\n");
586    
587     diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
588     index e774a437dd65..ef36467c7e34 100644
589     --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
590     +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
591     @@ -233,6 +233,24 @@ static u32 dce_v10_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
592     return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
593     }
594    
595     +static void dce_v10_0_pageflip_interrupt_init(struct amdgpu_device *adev)
596     +{
597     + unsigned i;
598     +
599     + /* Enable pflip interrupts */
600     + for (i = 0; i < adev->mode_info.num_crtc; i++)
601     + amdgpu_irq_get(adev, &adev->pageflip_irq, i);
602     +}
603     +
604     +static void dce_v10_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
605     +{
606     + unsigned i;
607     +
608     + /* Disable pflip interrupts */
609     + for (i = 0; i < adev->mode_info.num_crtc; i++)
610     + amdgpu_irq_put(adev, &adev->pageflip_irq, i);
611     +}
612     +
613     /**
614     * dce_v10_0_page_flip - pageflip callback.
615     *
616     @@ -2641,9 +2659,10 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
617     dce_v10_0_vga_enable(crtc, true);
618     amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
619     dce_v10_0_vga_enable(crtc, false);
620     - /* Make sure VBLANK interrupt is still enabled */
621     + /* Make sure VBLANK and PFLIP interrupts are still enabled */
622     type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
623     amdgpu_irq_update(adev, &adev->crtc_irq, type);
624     + amdgpu_irq_update(adev, &adev->pageflip_irq, type);
625     drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
626     dce_v10_0_crtc_load_lut(crtc);
627     break;
628     @@ -3002,6 +3021,8 @@ static int dce_v10_0_hw_init(void *handle)
629     dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
630     }
631    
632     + dce_v10_0_pageflip_interrupt_init(adev);
633     +
634     return 0;
635     }
636    
637     @@ -3016,6 +3037,8 @@ static int dce_v10_0_hw_fini(void *handle)
638     dce_v10_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
639     }
640    
641     + dce_v10_0_pageflip_interrupt_fini(adev);
642     +
643     return 0;
644     }
645    
646     @@ -3027,6 +3050,8 @@ static int dce_v10_0_suspend(void *handle)
647    
648     dce_v10_0_hpd_fini(adev);
649    
650     + dce_v10_0_pageflip_interrupt_fini(adev);
651     +
652     return 0;
653     }
654    
655     @@ -3052,6 +3077,8 @@ static int dce_v10_0_resume(void *handle)
656     /* initialize hpd */
657     dce_v10_0_hpd_init(adev);
658    
659     + dce_v10_0_pageflip_interrupt_init(adev);
660     +
661     return 0;
662     }
663    
664     @@ -3346,7 +3373,6 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
665     spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
666    
667     drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
668     - amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
669     queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
670    
671     return 0;
672     diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
673     index c4a21a7afd68..329bca0f1331 100644
674     --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
675     +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
676     @@ -233,6 +233,24 @@ static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
677     return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
678     }
679    
680     +static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev)
681     +{
682     + unsigned i;
683     +
684     + /* Enable pflip interrupts */
685     + for (i = 0; i < adev->mode_info.num_crtc; i++)
686     + amdgpu_irq_get(adev, &adev->pageflip_irq, i);
687     +}
688     +
689     +static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
690     +{
691     + unsigned i;
692     +
693     + /* Disable pflip interrupts */
694     + for (i = 0; i < adev->mode_info.num_crtc; i++)
695     + amdgpu_irq_put(adev, &adev->pageflip_irq, i);
696     +}
697     +
698     /**
699     * dce_v11_0_page_flip - pageflip callback.
700     *
701     @@ -2640,9 +2658,10 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
702     dce_v11_0_vga_enable(crtc, true);
703     amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
704     dce_v11_0_vga_enable(crtc, false);
705     - /* Make sure VBLANK interrupt is still enabled */
706     + /* Make sure VBLANK and PFLIP interrupts are still enabled */
707     type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
708     amdgpu_irq_update(adev, &adev->crtc_irq, type);
709     + amdgpu_irq_update(adev, &adev->pageflip_irq, type);
710     drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
711     dce_v11_0_crtc_load_lut(crtc);
712     break;
713     @@ -2888,7 +2907,7 @@ static int dce_v11_0_early_init(void *handle)
714    
715     switch (adev->asic_type) {
716     case CHIP_CARRIZO:
717     - adev->mode_info.num_crtc = 4;
718     + adev->mode_info.num_crtc = 3;
719     adev->mode_info.num_hpd = 6;
720     adev->mode_info.num_dig = 9;
721     break;
722     @@ -3000,6 +3019,8 @@ static int dce_v11_0_hw_init(void *handle)
723     dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
724     }
725    
726     + dce_v11_0_pageflip_interrupt_init(adev);
727     +
728     return 0;
729     }
730    
731     @@ -3014,6 +3035,8 @@ static int dce_v11_0_hw_fini(void *handle)
732     dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
733     }
734    
735     + dce_v11_0_pageflip_interrupt_fini(adev);
736     +
737     return 0;
738     }
739    
740     @@ -3025,6 +3048,8 @@ static int dce_v11_0_suspend(void *handle)
741    
742     dce_v11_0_hpd_fini(adev);
743    
744     + dce_v11_0_pageflip_interrupt_fini(adev);
745     +
746     return 0;
747     }
748    
749     @@ -3051,6 +3076,8 @@ static int dce_v11_0_resume(void *handle)
750     /* initialize hpd */
751     dce_v11_0_hpd_init(adev);
752    
753     + dce_v11_0_pageflip_interrupt_init(adev);
754     +
755     return 0;
756     }
757    
758     @@ -3345,7 +3372,6 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
759     spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
760    
761     drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
762     - amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
763     queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
764    
765     return 0;
766     diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
767     index cc050a329c49..937879ed86bc 100644
768     --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
769     +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
770     @@ -204,6 +204,24 @@ static u32 dce_v8_0_vblank_get_counter(struct amdgpu_device *adev, int crtc)
771     return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
772     }
773    
774     +static void dce_v8_0_pageflip_interrupt_init(struct amdgpu_device *adev)
775     +{
776     + unsigned i;
777     +
778     + /* Enable pflip interrupts */
779     + for (i = 0; i < adev->mode_info.num_crtc; i++)
780     + amdgpu_irq_get(adev, &adev->pageflip_irq, i);
781     +}
782     +
783     +static void dce_v8_0_pageflip_interrupt_fini(struct amdgpu_device *adev)
784     +{
785     + unsigned i;
786     +
787     + /* Disable pflip interrupts */
788     + for (i = 0; i < adev->mode_info.num_crtc; i++)
789     + amdgpu_irq_put(adev, &adev->pageflip_irq, i);
790     +}
791     +
792     /**
793     * dce_v8_0_page_flip - pageflip callback.
794     *
795     @@ -2575,9 +2593,10 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
796     dce_v8_0_vga_enable(crtc, true);
797     amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
798     dce_v8_0_vga_enable(crtc, false);
799     - /* Make sure VBLANK interrupt is still enabled */
800     + /* Make sure VBLANK and PFLIP interrupts are still enabled */
801     type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
802     amdgpu_irq_update(adev, &adev->crtc_irq, type);
803     + amdgpu_irq_update(adev, &adev->pageflip_irq, type);
804     drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
805     dce_v8_0_crtc_load_lut(crtc);
806     break;
807     @@ -2933,6 +2952,8 @@ static int dce_v8_0_hw_init(void *handle)
808     dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
809     }
810    
811     + dce_v8_0_pageflip_interrupt_init(adev);
812     +
813     return 0;
814     }
815    
816     @@ -2947,6 +2968,8 @@ static int dce_v8_0_hw_fini(void *handle)
817     dce_v8_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false);
818     }
819    
820     + dce_v8_0_pageflip_interrupt_fini(adev);
821     +
822     return 0;
823     }
824    
825     @@ -2958,6 +2981,8 @@ static int dce_v8_0_suspend(void *handle)
826    
827     dce_v8_0_hpd_fini(adev);
828    
829     + dce_v8_0_pageflip_interrupt_fini(adev);
830     +
831     return 0;
832     }
833    
834     @@ -2981,6 +3006,8 @@ static int dce_v8_0_resume(void *handle)
835     /* initialize hpd */
836     dce_v8_0_hpd_init(adev);
837    
838     + dce_v8_0_pageflip_interrupt_init(adev);
839     +
840     return 0;
841     }
842    
843     @@ -3376,7 +3403,6 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
844     spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
845    
846     drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id);
847     - amdgpu_irq_put(adev, &adev->pageflip_irq, crtc_id);
848     queue_work(amdgpu_crtc->pflip_queue, &works->unpin_work);
849    
850     return 0;
851     diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
852     index 94ec04a9c4d5..9745ed3a9aef 100644
853     --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
854     +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
855     @@ -2995,6 +2995,12 @@ static int kv_dpm_late_init(void *handle)
856     {
857     /* powerdown unused blocks for now */
858     struct amdgpu_device *adev = (struct amdgpu_device *)handle;
859     + int ret;
860     +
861     + /* init the sysfs and debugfs files late */
862     + ret = amdgpu_pm_sysfs_init(adev);
863     + if (ret)
864     + return ret;
865    
866     kv_dpm_powergate_acp(adev, true);
867     kv_dpm_powergate_samu(adev, true);
868     @@ -3038,9 +3044,6 @@ static int kv_dpm_sw_init(void *handle)
869     adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
870     if (amdgpu_dpm == 1)
871     amdgpu_pm_print_power_states(adev);
872     - ret = amdgpu_pm_sysfs_init(adev);
873     - if (ret)
874     - goto dpm_failed;
875     mutex_unlock(&adev->pm.mutex);
876     DRM_INFO("amdgpu: dpm initialized\n");
877    
878     diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
879     index 4f58a1e18de6..9ffa56cebdbc 100644
880     --- a/drivers/gpu/drm/amd/amdgpu/vi.c
881     +++ b/drivers/gpu/drm/amd/amdgpu/vi.c
882     @@ -968,6 +968,9 @@ static void vi_pcie_gen3_enable(struct amdgpu_device *adev)
883     u32 mask;
884     int ret;
885    
886     + if (pci_is_root_bus(adev->pdev->bus))
887     + return;
888     +
889     if (amdgpu_pcie_gen2 == 0)
890     return;
891    
892     diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
893     index 969e7898a7ed..27a2426c3daa 100644
894     --- a/drivers/gpu/drm/drm_dp_mst_topology.c
895     +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
896     @@ -2789,12 +2789,13 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
897     if (msgs[num - 1].flags & I2C_M_RD)
898     reading = true;
899    
900     - if (!reading) {
901     + if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
902     DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
903     ret = -EIO;
904     goto out;
905     }
906    
907     + memset(&msg, 0, sizeof(msg));
908     msg.req_type = DP_REMOTE_I2C_READ;
909     msg.u.i2c_read.num_transactions = num - 1;
910     msg.u.i2c_read.port_number = port->port_num;
911     diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
912     index 0f6cd33b531f..684bd4a13843 100644
913     --- a/drivers/gpu/drm/drm_sysfs.c
914     +++ b/drivers/gpu/drm/drm_sysfs.c
915     @@ -235,18 +235,12 @@ static ssize_t dpms_show(struct device *device,
916     char *buf)
917     {
918     struct drm_connector *connector = to_drm_connector(device);
919     - struct drm_device *dev = connector->dev;
920     - uint64_t dpms_status;
921     - int ret;
922     + int dpms;
923    
924     - ret = drm_object_property_get_value(&connector->base,
925     - dev->mode_config.dpms_property,
926     - &dpms_status);
927     - if (ret)
928     - return 0;
929     + dpms = READ_ONCE(connector->dpms);
930    
931     return snprintf(buf, PAGE_SIZE, "%s\n",
932     - drm_get_dpms_name((int)dpms_status));
933     + drm_get_dpms_name(dpms));
934     }
935    
936     static ssize_t enabled_show(struct device *device,
937     diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
938     index 6751553abe4a..567791b27d6d 100644
939     --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
940     +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
941     @@ -178,8 +178,30 @@ nouveau_fbcon_sync(struct fb_info *info)
942     return 0;
943     }
944    
945     +static int
946     +nouveau_fbcon_open(struct fb_info *info, int user)
947     +{
948     + struct nouveau_fbdev *fbcon = info->par;
949     + struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
950     + int ret = pm_runtime_get_sync(drm->dev->dev);
951     + if (ret < 0 && ret != -EACCES)
952     + return ret;
953     + return 0;
954     +}
955     +
956     +static int
957     +nouveau_fbcon_release(struct fb_info *info, int user)
958     +{
959     + struct nouveau_fbdev *fbcon = info->par;
960     + struct nouveau_drm *drm = nouveau_drm(fbcon->dev);
961     + pm_runtime_put(drm->dev->dev);
962     + return 0;
963     +}
964     +
965     static struct fb_ops nouveau_fbcon_ops = {
966     .owner = THIS_MODULE,
967     + .fb_open = nouveau_fbcon_open,
968     + .fb_release = nouveau_fbcon_release,
969     .fb_check_var = drm_fb_helper_check_var,
970     .fb_set_par = drm_fb_helper_set_par,
971     .fb_fillrect = nouveau_fbcon_fillrect,
972     @@ -195,6 +217,8 @@ static struct fb_ops nouveau_fbcon_ops = {
973    
974     static struct fb_ops nouveau_fbcon_sw_ops = {
975     .owner = THIS_MODULE,
976     + .fb_open = nouveau_fbcon_open,
977     + .fb_release = nouveau_fbcon_release,
978     .fb_check_var = drm_fb_helper_check_var,
979     .fb_set_par = drm_fb_helper_set_par,
980     .fb_fillrect = cfb_fillrect,
981     diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
982     index 6b6e57e8c2d6..847a902e7385 100644
983     --- a/drivers/gpu/drm/qxl/qxl_fb.c
984     +++ b/drivers/gpu/drm/qxl/qxl_fb.c
985     @@ -144,14 +144,17 @@ static void qxl_dirty_update(struct qxl_fbdev *qfbdev,
986    
987     spin_lock_irqsave(&qfbdev->dirty.lock, flags);
988    
989     - if (qfbdev->dirty.y1 < y)
990     - y = qfbdev->dirty.y1;
991     - if (qfbdev->dirty.y2 > y2)
992     - y2 = qfbdev->dirty.y2;
993     - if (qfbdev->dirty.x1 < x)
994     - x = qfbdev->dirty.x1;
995     - if (qfbdev->dirty.x2 > x2)
996     - x2 = qfbdev->dirty.x2;
997     + if ((qfbdev->dirty.y2 - qfbdev->dirty.y1) &&
998     + (qfbdev->dirty.x2 - qfbdev->dirty.x1)) {
999     + if (qfbdev->dirty.y1 < y)
1000     + y = qfbdev->dirty.y1;
1001     + if (qfbdev->dirty.y2 > y2)
1002     + y2 = qfbdev->dirty.y2;
1003     + if (qfbdev->dirty.x1 < x)
1004     + x = qfbdev->dirty.x1;
1005     + if (qfbdev->dirty.x2 > x2)
1006     + x2 = qfbdev->dirty.x2;
1007     + }
1008    
1009     qfbdev->dirty.x1 = x;
1010     qfbdev->dirty.x2 = x2;
1011     diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
1012     index d2e9e9efc159..6743174acdbc 100644
1013     --- a/drivers/gpu/drm/radeon/radeon_display.c
1014     +++ b/drivers/gpu/drm/radeon/radeon_display.c
1015     @@ -1633,18 +1633,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
1016     radeon_fbdev_init(rdev);
1017     drm_kms_helper_poll_init(rdev->ddev);
1018    
1019     - if (rdev->pm.dpm_enabled) {
1020     - /* do dpm late init */
1021     - ret = radeon_pm_late_init(rdev);
1022     - if (ret) {
1023     - rdev->pm.dpm_enabled = false;
1024     - DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1025     - }
1026     - /* set the dpm state for PX since there won't be
1027     - * a modeset to call this.
1028     - */
1029     - radeon_pm_compute_clocks(rdev);
1030     - }
1031     + /* do pm late init */
1032     + ret = radeon_pm_late_init(rdev);
1033    
1034     return 0;
1035     }
1036     diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
1037     index 257b10be5cda..42986130cc63 100644
1038     --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
1039     +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
1040     @@ -283,6 +283,7 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
1041     radeon_connector->mst_encoder = radeon_dp_create_fake_mst_encoder(master);
1042    
1043     drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
1044     + drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
1045     drm_mode_connector_set_path_property(connector, pathprop);
1046     drm_reinit_primary_mode_group(dev);
1047    
1048     diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
1049     index c1ba83a8dd8c..948c33105801 100644
1050     --- a/drivers/gpu/drm/radeon/radeon_pm.c
1051     +++ b/drivers/gpu/drm/radeon/radeon_pm.c
1052     @@ -1331,14 +1331,6 @@ static int radeon_pm_init_old(struct radeon_device *rdev)
1053     INIT_DELAYED_WORK(&rdev->pm.dynpm_idle_work, radeon_dynpm_idle_work_handler);
1054    
1055     if (rdev->pm.num_power_states > 1) {
1056     - /* where's the best place to put these? */
1057     - ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1058     - if (ret)
1059     - DRM_ERROR("failed to create device file for power profile\n");
1060     - ret = device_create_file(rdev->dev, &dev_attr_power_method);
1061     - if (ret)
1062     - DRM_ERROR("failed to create device file for power method\n");
1063     -
1064     if (radeon_debugfs_pm_init(rdev)) {
1065     DRM_ERROR("Failed to register debugfs file for PM!\n");
1066     }
1067     @@ -1396,20 +1388,6 @@ static int radeon_pm_init_dpm(struct radeon_device *rdev)
1068     goto dpm_failed;
1069     rdev->pm.dpm_enabled = true;
1070    
1071     - ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
1072     - if (ret)
1073     - DRM_ERROR("failed to create device file for dpm state\n");
1074     - ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1075     - if (ret)
1076     - DRM_ERROR("failed to create device file for dpm state\n");
1077     - /* XXX: these are noops for dpm but are here for backwards compat */
1078     - ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1079     - if (ret)
1080     - DRM_ERROR("failed to create device file for power profile\n");
1081     - ret = device_create_file(rdev->dev, &dev_attr_power_method);
1082     - if (ret)
1083     - DRM_ERROR("failed to create device file for power method\n");
1084     -
1085     if (radeon_debugfs_pm_init(rdev)) {
1086     DRM_ERROR("Failed to register debugfs file for dpm!\n");
1087     }
1088     @@ -1550,9 +1528,44 @@ int radeon_pm_late_init(struct radeon_device *rdev)
1089     int ret = 0;
1090    
1091     if (rdev->pm.pm_method == PM_METHOD_DPM) {
1092     - mutex_lock(&rdev->pm.mutex);
1093     - ret = radeon_dpm_late_enable(rdev);
1094     - mutex_unlock(&rdev->pm.mutex);
1095     + if (rdev->pm.dpm_enabled) {
1096     + ret = device_create_file(rdev->dev, &dev_attr_power_dpm_state);
1097     + if (ret)
1098     + DRM_ERROR("failed to create device file for dpm state\n");
1099     + ret = device_create_file(rdev->dev, &dev_attr_power_dpm_force_performance_level);
1100     + if (ret)
1101     + DRM_ERROR("failed to create device file for dpm state\n");
1102     + /* XXX: these are noops for dpm but are here for backwards compat */
1103     + ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1104     + if (ret)
1105     + DRM_ERROR("failed to create device file for power profile\n");
1106     + ret = device_create_file(rdev->dev, &dev_attr_power_method);
1107     + if (ret)
1108     + DRM_ERROR("failed to create device file for power method\n");
1109     +
1110     + mutex_lock(&rdev->pm.mutex);
1111     + ret = radeon_dpm_late_enable(rdev);
1112     + mutex_unlock(&rdev->pm.mutex);
1113     + if (ret) {
1114     + rdev->pm.dpm_enabled = false;
1115     + DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1116     + } else {
1117     + /* set the dpm state for PX since there won't be
1118     + * a modeset to call this.
1119     + */
1120     + radeon_pm_compute_clocks(rdev);
1121     + }
1122     + }
1123     + } else {
1124     + if (rdev->pm.num_power_states > 1) {
1125     + /* where's the best place to put these? */
1126     + ret = device_create_file(rdev->dev, &dev_attr_power_profile);
1127     + if (ret)
1128     + DRM_ERROR("failed to create device file for power profile\n");
1129     + ret = device_create_file(rdev->dev, &dev_attr_power_method);
1130     + if (ret)
1131     + DRM_ERROR("failed to create device file for power method\n");
1132     + }
1133     }
1134     return ret;
1135     }
1136     diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
1137     index 3dd2de31a2f8..472b88285c75 100644
1138     --- a/drivers/i2c/busses/i2c-designware-platdrv.c
1139     +++ b/drivers/i2c/busses/i2c-designware-platdrv.c
1140     @@ -24,6 +24,7 @@
1141     #include <linux/kernel.h>
1142     #include <linux/module.h>
1143     #include <linux/delay.h>
1144     +#include <linux/dmi.h>
1145     #include <linux/i2c.h>
1146     #include <linux/clk.h>
1147     #include <linux/clk-provider.h>
1148     @@ -51,6 +52,22 @@ static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev)
1149     }
1150    
1151     #ifdef CONFIG_ACPI
1152     +/*
1153     + * The HCNT/LCNT information coming from ACPI should be the most accurate
1154     + * for given platform. However, some systems get it wrong. On such systems
1155     + * we get better results by calculating those based on the input clock.
1156     + */
1157     +static const struct dmi_system_id dw_i2c_no_acpi_params[] = {
1158     + {
1159     + .ident = "Dell Inspiron 7348",
1160     + .matches = {
1161     + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1162     + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7348"),
1163     + },
1164     + },
1165     + { }
1166     +};
1167     +
1168     static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
1169     u16 *hcnt, u16 *lcnt, u32 *sda_hold)
1170     {
1171     @@ -58,6 +75,9 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
1172     acpi_handle handle = ACPI_HANDLE(&pdev->dev);
1173     union acpi_object *obj;
1174    
1175     + if (dmi_check_system(dw_i2c_no_acpi_params))
1176     + return;
1177     +
1178     if (ACPI_FAILURE(acpi_evaluate_object(handle, method, NULL, &buf)))
1179     return;
1180    
1181     @@ -253,12 +273,6 @@ static int dw_i2c_probe(struct platform_device *pdev)
1182     adap->dev.parent = &pdev->dev;
1183     adap->dev.of_node = pdev->dev.of_node;
1184    
1185     - r = i2c_add_numbered_adapter(adap);
1186     - if (r) {
1187     - dev_err(&pdev->dev, "failure adding adapter\n");
1188     - return r;
1189     - }
1190     -
1191     if (dev->pm_runtime_disabled) {
1192     pm_runtime_forbid(&pdev->dev);
1193     } else {
1194     @@ -268,6 +282,13 @@ static int dw_i2c_probe(struct platform_device *pdev)
1195     pm_runtime_enable(&pdev->dev);
1196     }
1197    
1198     + r = i2c_add_numbered_adapter(adap);
1199     + if (r) {
1200     + dev_err(&pdev->dev, "failure adding adapter\n");
1201     + pm_runtime_disable(&pdev->dev);
1202     + return r;
1203     + }
1204     +
1205     return 0;
1206     }
1207    
1208     diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
1209     index d8361dada584..d8b5a8fee1e6 100644
1210     --- a/drivers/i2c/busses/i2c-rcar.c
1211     +++ b/drivers/i2c/busses/i2c-rcar.c
1212     @@ -690,15 +690,16 @@ static int rcar_i2c_probe(struct platform_device *pdev)
1213     return ret;
1214     }
1215    
1216     + pm_runtime_enable(dev);
1217     + platform_set_drvdata(pdev, priv);
1218     +
1219     ret = i2c_add_numbered_adapter(adap);
1220     if (ret < 0) {
1221     dev_err(dev, "reg adap failed: %d\n", ret);
1222     + pm_runtime_disable(dev);
1223     return ret;
1224     }
1225    
1226     - pm_runtime_enable(dev);
1227     - platform_set_drvdata(pdev, priv);
1228     -
1229     dev_info(dev, "probed\n");
1230    
1231     return 0;
1232     diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
1233     index 50bfd8cef5f2..5df819610d52 100644
1234     --- a/drivers/i2c/busses/i2c-s3c2410.c
1235     +++ b/drivers/i2c/busses/i2c-s3c2410.c
1236     @@ -1243,17 +1243,19 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1237     i2c->adap.nr = i2c->pdata->bus_num;
1238     i2c->adap.dev.of_node = pdev->dev.of_node;
1239    
1240     + platform_set_drvdata(pdev, i2c);
1241     +
1242     + pm_runtime_enable(&pdev->dev);
1243     +
1244     ret = i2c_add_numbered_adapter(&i2c->adap);
1245     if (ret < 0) {
1246     dev_err(&pdev->dev, "failed to add bus to i2c core\n");
1247     + pm_runtime_disable(&pdev->dev);
1248     s3c24xx_i2c_deregister_cpufreq(i2c);
1249     clk_unprepare(i2c->clk);
1250     return ret;
1251     }
1252    
1253     - platform_set_drvdata(pdev, i2c);
1254     -
1255     - pm_runtime_enable(&pdev->dev);
1256     pm_runtime_enable(&i2c->adap.dev);
1257    
1258     dev_info(&pdev->dev, "%s: S3C I2C adapter\n", dev_name(&i2c->adap.dev));
1259     diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1260     index 75aef240c2d1..493c38e08bd2 100644
1261     --- a/drivers/md/dm-thin.c
1262     +++ b/drivers/md/dm-thin.c
1263     @@ -3255,7 +3255,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1264     metadata_low_callback,
1265     pool);
1266     if (r)
1267     - goto out_free_pt;
1268     + goto out_flags_changed;
1269    
1270     pt->callbacks.congested_fn = pool_is_congested;
1271     dm_table_add_target_callbacks(ti->table, &pt->callbacks);
1272     diff --git a/drivers/mfd/max77843.c b/drivers/mfd/max77843.c
1273     index a354ac677ec7..1074a0d68680 100644
1274     --- a/drivers/mfd/max77843.c
1275     +++ b/drivers/mfd/max77843.c
1276     @@ -79,7 +79,7 @@ static int max77843_chg_init(struct max77843 *max77843)
1277     if (!max77843->i2c_chg) {
1278     dev_err(&max77843->i2c->dev,
1279     "Cannot allocate I2C device for Charger\n");
1280     - return PTR_ERR(max77843->i2c_chg);
1281     + return -ENODEV;
1282     }
1283     i2c_set_clientdata(max77843->i2c_chg, max77843);
1284    
1285     diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
1286     index 28df37420da9..ac02c675c59c 100644
1287     --- a/drivers/net/ethernet/ibm/emac/core.h
1288     +++ b/drivers/net/ethernet/ibm/emac/core.h
1289     @@ -460,8 +460,8 @@ struct emac_ethtool_regs_subhdr {
1290     u32 index;
1291     };
1292    
1293     -#define EMAC_ETHTOOL_REGS_VER 0
1294     -#define EMAC4_ETHTOOL_REGS_VER 1
1295     -#define EMAC4SYNC_ETHTOOL_REGS_VER 2
1296     +#define EMAC_ETHTOOL_REGS_VER 3
1297     +#define EMAC4_ETHTOOL_REGS_VER 4
1298     +#define EMAC4SYNC_ETHTOOL_REGS_VER 5
1299    
1300     #endif /* __IBM_NEWEMAC_CORE_H */
1301     diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
1302     index 3837ae344f63..2ed75060da50 100644
1303     --- a/drivers/net/ppp/pppoe.c
1304     +++ b/drivers/net/ppp/pppoe.c
1305     @@ -313,7 +313,6 @@ static void pppoe_flush_dev(struct net_device *dev)
1306     if (po->pppoe_dev == dev &&
1307     sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
1308     pppox_unbind_sock(sk);
1309     - sk->sk_state = PPPOX_ZOMBIE;
1310     sk->sk_state_change(sk);
1311     po->pppoe_dev = NULL;
1312     dev_put(dev);
1313     diff --git a/drivers/pinctrl/freescale/pinctrl-imx25.c b/drivers/pinctrl/freescale/pinctrl-imx25.c
1314     index faf635654312..293ed4381cc0 100644
1315     --- a/drivers/pinctrl/freescale/pinctrl-imx25.c
1316     +++ b/drivers/pinctrl/freescale/pinctrl-imx25.c
1317     @@ -26,7 +26,8 @@
1318     #include "pinctrl-imx.h"
1319    
1320     enum imx25_pads {
1321     - MX25_PAD_RESERVE0 = 1,
1322     + MX25_PAD_RESERVE0 = 0,
1323     + MX25_PAD_RESERVE1 = 1,
1324     MX25_PAD_A10 = 2,
1325     MX25_PAD_A13 = 3,
1326     MX25_PAD_A14 = 4,
1327     @@ -169,6 +170,7 @@ enum imx25_pads {
1328     /* Pad names for the pinmux subsystem */
1329     static const struct pinctrl_pin_desc imx25_pinctrl_pads[] = {
1330     IMX_PINCTRL_PIN(MX25_PAD_RESERVE0),
1331     + IMX_PINCTRL_PIN(MX25_PAD_RESERVE1),
1332     IMX_PINCTRL_PIN(MX25_PAD_A10),
1333     IMX_PINCTRL_PIN(MX25_PAD_A13),
1334     IMX_PINCTRL_PIN(MX25_PAD_A14),
1335     diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
1336     index 802fabb30e15..34cbe3505dac 100644
1337     --- a/fs/btrfs/backref.c
1338     +++ b/fs/btrfs/backref.c
1339     @@ -1809,7 +1809,6 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
1340     int found = 0;
1341     struct extent_buffer *eb;
1342     struct btrfs_inode_extref *extref;
1343     - struct extent_buffer *leaf;
1344     u32 item_size;
1345     u32 cur_offset;
1346     unsigned long ptr;
1347     @@ -1837,9 +1836,8 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
1348     btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1349     btrfs_release_path(path);
1350    
1351     - leaf = path->nodes[0];
1352     - item_size = btrfs_item_size_nr(leaf, slot);
1353     - ptr = btrfs_item_ptr_offset(leaf, slot);
1354     + item_size = btrfs_item_size_nr(eb, slot);
1355     + ptr = btrfs_item_ptr_offset(eb, slot);
1356     cur_offset = 0;
1357    
1358     while (cur_offset < item_size) {
1359     @@ -1853,7 +1851,7 @@ static int iterate_inode_extrefs(u64 inum, struct btrfs_root *fs_root,
1360     if (ret)
1361     break;
1362    
1363     - cur_offset += btrfs_inode_extref_name_len(leaf, extref);
1364     + cur_offset += btrfs_inode_extref_name_len(eb, extref);
1365     cur_offset += sizeof(*extref);
1366     }
1367     btrfs_tree_read_unlock_blocking(eb);
1368     diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
1369     index 0770c91586ca..f490b6155091 100644
1370     --- a/fs/btrfs/ioctl.c
1371     +++ b/fs/btrfs/ioctl.c
1372     @@ -4647,6 +4647,11 @@ locked:
1373     bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
1374     }
1375    
1376     + if (bctl->flags & ~(BTRFS_BALANCE_ARGS_MASK | BTRFS_BALANCE_TYPE_MASK)) {
1377     + ret = -EINVAL;
1378     + goto out_bargs;
1379     + }
1380     +
1381     do_balance:
1382     /*
1383     * Ownership of bctl and mutually_exclusive_operation_running
1384     diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
1385     index 95842a909e7f..2ac5f8cd701a 100644
1386     --- a/fs/btrfs/volumes.h
1387     +++ b/fs/btrfs/volumes.h
1388     @@ -376,6 +376,14 @@ struct map_lookup {
1389     #define BTRFS_BALANCE_ARGS_VRANGE (1ULL << 4)
1390     #define BTRFS_BALANCE_ARGS_LIMIT (1ULL << 5)
1391    
1392     +#define BTRFS_BALANCE_ARGS_MASK \
1393     + (BTRFS_BALANCE_ARGS_PROFILES | \
1394     + BTRFS_BALANCE_ARGS_USAGE | \
1395     + BTRFS_BALANCE_ARGS_DEVID | \
1396     + BTRFS_BALANCE_ARGS_DRANGE | \
1397     + BTRFS_BALANCE_ARGS_VRANGE | \
1398     + BTRFS_BALANCE_ARGS_LIMIT)
1399     +
1400     /*
1401     * Profile changing flags. When SOFT is set we won't relocate chunk if
1402     * it already has the target profile (even though it may be
1403     diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
1404     index cdefaa331a07..c29d9421bd5e 100644
1405     --- a/fs/nfsd/blocklayout.c
1406     +++ b/fs/nfsd/blocklayout.c
1407     @@ -56,14 +56,6 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp,
1408     u32 device_generation = 0;
1409     int error;
1410    
1411     - /*
1412     - * We do not attempt to support I/O smaller than the fs block size,
1413     - * or not aligned to it.
1414     - */
1415     - if (args->lg_minlength < block_size) {
1416     - dprintk("pnfsd: I/O too small\n");
1417     - goto out_layoutunavailable;
1418     - }
1419     if (seg->offset & (block_size - 1)) {
1420     dprintk("pnfsd: I/O misaligned\n");
1421     goto out_layoutunavailable;
1422     diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
1423     index 86d0b25ed054..a89f505c856b 100644
1424     --- a/include/drm/drm_dp_mst_helper.h
1425     +++ b/include/drm/drm_dp_mst_helper.h
1426     @@ -253,6 +253,7 @@ struct drm_dp_remote_dpcd_write {
1427     u8 *bytes;
1428     };
1429    
1430     +#define DP_REMOTE_I2C_READ_MAX_TRANSACTIONS 4
1431     struct drm_dp_remote_i2c_read {
1432     u8 num_transactions;
1433     u8 port_number;
1434     @@ -262,7 +263,7 @@ struct drm_dp_remote_i2c_read {
1435     u8 *bytes;
1436     u8 no_stop_bit;
1437     u8 i2c_transaction_delay;
1438     - } transactions[4];
1439     + } transactions[DP_REMOTE_I2C_READ_MAX_TRANSACTIONS];
1440     u8 read_i2c_device_id;
1441     u8 num_bytes_read;
1442     };
1443     diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
1444     index 9b88536487e6..275158803824 100644
1445     --- a/include/linux/skbuff.h
1446     +++ b/include/linux/skbuff.h
1447     @@ -2601,6 +2601,9 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
1448     {
1449     if (skb->ip_summed == CHECKSUM_COMPLETE)
1450     skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
1451     + else if (skb->ip_summed == CHECKSUM_PARTIAL &&
1452     + skb_checksum_start_offset(skb) < 0)
1453     + skb->ip_summed = CHECKSUM_NONE;
1454     }
1455    
1456     unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
1457     diff --git a/include/net/af_unix.h b/include/net/af_unix.h
1458     index 4a167b30a12f..cb1b9bbda332 100644
1459     --- a/include/net/af_unix.h
1460     +++ b/include/net/af_unix.h
1461     @@ -63,7 +63,11 @@ struct unix_sock {
1462     #define UNIX_GC_MAYBE_CYCLE 1
1463     struct socket_wq peer_wq;
1464     };
1465     -#define unix_sk(__sk) ((struct unix_sock *)__sk)
1466     +
1467     +static inline struct unix_sock *unix_sk(struct sock *sk)
1468     +{
1469     + return (struct unix_sock *)sk;
1470     +}
1471    
1472     #define peer_wait peer_wq.wait
1473    
1474     diff --git a/include/net/sock.h b/include/net/sock.h
1475     index f21f0708ec59..4ca4c3fe446f 100644
1476     --- a/include/net/sock.h
1477     +++ b/include/net/sock.h
1478     @@ -826,6 +826,14 @@ static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *s
1479     if (sk_rcvqueues_full(sk, limit))
1480     return -ENOBUFS;
1481    
1482     + /*
1483     + * If the skb was allocated from pfmemalloc reserves, only
1484     + * allow SOCK_MEMALLOC sockets to use it as this socket is
1485     + * helping free memory
1486     + */
1487     + if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC))
1488     + return -ENOMEM;
1489     +
1490     __sk_add_backlog(sk, skb);
1491     sk->sk_backlog.len += skb->truesize;
1492     return 0;
1493     diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
1494     index a20d4110e871..3688f1e07ebd 100644
1495     --- a/kernel/time/timekeeping.c
1496     +++ b/kernel/time/timekeeping.c
1497     @@ -1244,7 +1244,7 @@ void __init timekeeping_init(void)
1498     set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
1499     tk_set_wall_to_mono(tk, tmp);
1500    
1501     - timekeeping_update(tk, TK_MIRROR);
1502     + timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1503    
1504     write_seqcount_end(&tk_core.seq);
1505     raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1506     diff --git a/kernel/workqueue.c b/kernel/workqueue.c
1507     index a413acb59a07..1de0f5fabb98 100644
1508     --- a/kernel/workqueue.c
1509     +++ b/kernel/workqueue.c
1510     @@ -1458,13 +1458,13 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1511     timer_stats_timer_set_start_info(&dwork->timer);
1512    
1513     dwork->wq = wq;
1514     + /* timer isn't guaranteed to run in this cpu, record earlier */
1515     + if (cpu == WORK_CPU_UNBOUND)
1516     + cpu = raw_smp_processor_id();
1517     dwork->cpu = cpu;
1518     timer->expires = jiffies + delay;
1519    
1520     - if (unlikely(cpu != WORK_CPU_UNBOUND))
1521     - add_timer_on(timer, cpu);
1522     - else
1523     - add_timer(timer);
1524     + add_timer_on(timer, cpu);
1525     }
1526    
1527     /**
1528     diff --git a/mm/memcontrol.c b/mm/memcontrol.c
1529     index 237d4686482d..03a6f7506cf3 100644
1530     --- a/mm/memcontrol.c
1531     +++ b/mm/memcontrol.c
1532     @@ -3687,6 +3687,7 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg,
1533     ret = page_counter_memparse(args, "-1", &threshold);
1534     if (ret)
1535     return ret;
1536     + threshold <<= PAGE_SHIFT;
1537    
1538     mutex_lock(&memcg->thresholds_lock);
1539    
1540     diff --git a/net/core/ethtool.c b/net/core/ethtool.c
1541     index b495ab1797fa..29edf74846fc 100644
1542     --- a/net/core/ethtool.c
1543     +++ b/net/core/ethtool.c
1544     @@ -1284,7 +1284,7 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
1545    
1546     gstrings.len = ret;
1547    
1548     - data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
1549     + data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
1550     if (!data)
1551     return -ENOMEM;
1552    
1553     diff --git a/net/core/filter.c b/net/core/filter.c
1554     index be3098fb65e4..8dcdd86b68dd 100644
1555     --- a/net/core/filter.c
1556     +++ b/net/core/filter.c
1557     @@ -1412,6 +1412,7 @@ static u64 bpf_clone_redirect(u64 r1, u64 ifindex, u64 flags, u64 r4, u64 r5)
1558     return dev_forward_skb(dev, skb2);
1559    
1560     skb2->dev = dev;
1561     + skb_sender_cpu_clear(skb2);
1562     return dev_queue_xmit(skb2);
1563     }
1564    
1565     @@ -1701,9 +1702,13 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
1566     goto out;
1567    
1568     /* We're copying the filter that has been originally attached,
1569     - * so no conversion/decode needed anymore.
1570     + * so no conversion/decode needed anymore. eBPF programs that
1571     + * have no original program cannot be dumped through this.
1572     */
1573     + ret = -EACCES;
1574     fprog = filter->prog->orig_prog;
1575     + if (!fprog)
1576     + goto out;
1577    
1578     ret = fprog->len;
1579     if (!len)
1580     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1581     index 7b84330e5d30..7bfa18746681 100644
1582     --- a/net/core/skbuff.c
1583     +++ b/net/core/skbuff.c
1584     @@ -2958,11 +2958,12 @@ EXPORT_SYMBOL_GPL(skb_append_pagefrags);
1585     */
1586     unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
1587     {
1588     + unsigned char *data = skb->data;
1589     +
1590     BUG_ON(len > skb->len);
1591     - skb->len -= len;
1592     - BUG_ON(skb->len < skb->data_len);
1593     - skb_postpull_rcsum(skb, skb->data, len);
1594     - return skb->data += len;
1595     + __skb_pull(skb, len);
1596     + skb_postpull_rcsum(skb, data, len);
1597     + return skb->data;
1598     }
1599     EXPORT_SYMBOL_GPL(skb_pull_rcsum);
1600    
1601     diff --git a/net/dsa/slave.c b/net/dsa/slave.c
1602     index 35c47ddd04f0..25dbb91e1bc0 100644
1603     --- a/net/dsa/slave.c
1604     +++ b/net/dsa/slave.c
1605     @@ -348,12 +348,17 @@ static int dsa_slave_stp_update(struct net_device *dev, u8 state)
1606     static int dsa_slave_port_attr_set(struct net_device *dev,
1607     struct switchdev_attr *attr)
1608     {
1609     - int ret = 0;
1610     + struct dsa_slave_priv *p = netdev_priv(dev);
1611     + struct dsa_switch *ds = p->parent;
1612     + int ret;
1613    
1614     switch (attr->id) {
1615     case SWITCHDEV_ATTR_PORT_STP_STATE:
1616     - if (attr->trans == SWITCHDEV_TRANS_COMMIT)
1617     - ret = dsa_slave_stp_update(dev, attr->u.stp_state);
1618     + if (attr->trans == SWITCHDEV_TRANS_PREPARE)
1619     + ret = ds->drv->port_stp_update ? 0 : -EOPNOTSUPP;
1620     + else
1621     + ret = ds->drv->port_stp_update(ds, p->port,
1622     + attr->u.stp_state);
1623     break;
1624     default:
1625     ret = -EOPNOTSUPP;
1626     diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
1627     index 134957159c27..61b45a17fc73 100644
1628     --- a/net/ipv4/inet_connection_sock.c
1629     +++ b/net/ipv4/inet_connection_sock.c
1630     @@ -577,21 +577,22 @@ EXPORT_SYMBOL(inet_rtx_syn_ack);
1631     static bool reqsk_queue_unlink(struct request_sock_queue *queue,
1632     struct request_sock *req)
1633     {
1634     - struct listen_sock *lopt = queue->listen_opt;
1635     struct request_sock **prev;
1636     + struct listen_sock *lopt;
1637     bool found = false;
1638    
1639     spin_lock(&queue->syn_wait_lock);
1640     -
1641     - for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL;
1642     - prev = &(*prev)->dl_next) {
1643     - if (*prev == req) {
1644     - *prev = req->dl_next;
1645     - found = true;
1646     - break;
1647     + lopt = queue->listen_opt;
1648     + if (lopt) {
1649     + for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL;
1650     + prev = &(*prev)->dl_next) {
1651     + if (*prev == req) {
1652     + *prev = req->dl_next;
1653     + found = true;
1654     + break;
1655     + }
1656     }
1657     }
1658     -
1659     spin_unlock(&queue->syn_wait_lock);
1660     if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
1661     reqsk_put(req);
1662     @@ -685,20 +686,20 @@ void reqsk_queue_hash_req(struct request_sock_queue *queue,
1663     req->num_timeout = 0;
1664     req->sk = NULL;
1665    
1666     + setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
1667     + mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
1668     + req->rsk_hash = hash;
1669     +
1670     /* before letting lookups find us, make sure all req fields
1671     * are committed to memory and refcnt initialized.
1672     */
1673     smp_wmb();
1674     atomic_set(&req->rsk_refcnt, 2);
1675     - setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
1676     - req->rsk_hash = hash;
1677    
1678     spin_lock(&queue->syn_wait_lock);
1679     req->dl_next = lopt->syn_table[hash];
1680     lopt->syn_table[hash] = req;
1681     spin_unlock(&queue->syn_wait_lock);
1682     -
1683     - mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
1684     }
1685     EXPORT_SYMBOL(reqsk_queue_hash_req);
1686    
1687     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1688     index 00b64d402a57..dd6ebba5846c 100644
1689     --- a/net/ipv6/route.c
1690     +++ b/net/ipv6/route.c
1691     @@ -139,6 +139,9 @@ static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
1692     struct net_device *loopback_dev = net->loopback_dev;
1693     int cpu;
1694    
1695     + if (dev == loopback_dev)
1696     + return;
1697     +
1698     for_each_possible_cpu(cpu) {
1699     struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
1700     struct rt6_info *rt;
1701     @@ -148,14 +151,12 @@ static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
1702     struct inet6_dev *rt_idev = rt->rt6i_idev;
1703     struct net_device *rt_dev = rt->dst.dev;
1704    
1705     - if (rt_idev && (rt_idev->dev == dev || !dev) &&
1706     - rt_idev->dev != loopback_dev) {
1707     + if (rt_idev->dev == dev) {
1708     rt->rt6i_idev = in6_dev_get(loopback_dev);
1709     in6_dev_put(rt_idev);
1710     }
1711    
1712     - if (rt_dev && (rt_dev == dev || !dev) &&
1713     - rt_dev != loopback_dev) {
1714     + if (rt_dev == dev) {
1715     rt->dst.dev = loopback_dev;
1716     dev_hold(rt->dst.dev);
1717     dev_put(rt_dev);
1718     @@ -2577,7 +2578,8 @@ void rt6_ifdown(struct net *net, struct net_device *dev)
1719    
1720     fib6_clean_all(net, fib6_ifdown, &adn);
1721     icmp6_clean_all(fib6_ifdown, &adn);
1722     - rt6_uncached_list_flush_dev(net, dev);
1723     + if (dev)
1724     + rt6_uncached_list_flush_dev(net, dev);
1725     }
1726    
1727     struct rt6_mtu_change_arg {
1728     diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
1729     index f6b090df3930..afca2eb4dfa7 100644
1730     --- a/net/l2tp/l2tp_core.c
1731     +++ b/net/l2tp/l2tp_core.c
1732     @@ -1319,7 +1319,7 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
1733     tunnel = container_of(work, struct l2tp_tunnel, del_work);
1734     sk = l2tp_tunnel_sock_lookup(tunnel);
1735     if (!sk)
1736     - return;
1737     + goto out;
1738    
1739     sock = sk->sk_socket;
1740    
1741     @@ -1341,6 +1341,8 @@ static void l2tp_tunnel_del_work(struct work_struct *work)
1742     }
1743    
1744     l2tp_tunnel_sock_put(sk);
1745     +out:
1746     + l2tp_tunnel_dec_refcount(tunnel);
1747     }
1748    
1749     /* Create a socket for the tunnel, if one isn't set up by
1750     @@ -1636,8 +1638,13 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1751     */
1752     int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1753     {
1754     + l2tp_tunnel_inc_refcount(tunnel);
1755     l2tp_tunnel_closeall(tunnel);
1756     - return (false == queue_work(l2tp_wq, &tunnel->del_work));
1757     + if (false == queue_work(l2tp_wq, &tunnel->del_work)) {
1758     + l2tp_tunnel_dec_refcount(tunnel);
1759     + return 1;
1760     + }
1761     + return 0;
1762     }
1763     EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1764    
1765     diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
1766     index 0857f7243797..a133d16eb053 100644
1767     --- a/net/netlink/af_netlink.c
1768     +++ b/net/netlink/af_netlink.c
1769     @@ -2750,6 +2750,7 @@ static int netlink_dump(struct sock *sk)
1770     struct sk_buff *skb = NULL;
1771     struct nlmsghdr *nlh;
1772     int len, err = -ENOBUFS;
1773     + int alloc_min_size;
1774     int alloc_size;
1775    
1776     mutex_lock(nlk->cb_mutex);
1777     @@ -2758,9 +2759,6 @@ static int netlink_dump(struct sock *sk)
1778     goto errout_skb;
1779     }
1780    
1781     - cb = &nlk->cb;
1782     - alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
1783     -
1784     if (!netlink_rx_is_mmaped(sk) &&
1785     atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
1786     goto errout_skb;
1787     @@ -2770,23 +2768,35 @@ static int netlink_dump(struct sock *sk)
1788     * to reduce number of system calls on dump operations, if user
1789     * ever provided a big enough buffer.
1790     */
1791     - if (alloc_size < nlk->max_recvmsg_len) {
1792     - skb = netlink_alloc_skb(sk,
1793     - nlk->max_recvmsg_len,
1794     - nlk->portid,
1795     + cb = &nlk->cb;
1796     + alloc_min_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE);
1797     +
1798     + if (alloc_min_size < nlk->max_recvmsg_len) {
1799     + alloc_size = nlk->max_recvmsg_len;
1800     + skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
1801     GFP_KERNEL |
1802     __GFP_NOWARN |
1803     __GFP_NORETRY);
1804     - /* available room should be exact amount to avoid MSG_TRUNC */
1805     - if (skb)
1806     - skb_reserve(skb, skb_tailroom(skb) -
1807     - nlk->max_recvmsg_len);
1808     }
1809     - if (!skb)
1810     + if (!skb) {
1811     + alloc_size = alloc_min_size;
1812     skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
1813     GFP_KERNEL);
1814     + }
1815     if (!skb)
1816     goto errout_skb;
1817     +
1818     + /* Trim skb to allocated size. User is expected to provide buffer as
1819     + * large as max(min_dump_alloc, 16KiB (mac_recvmsg_len capped at
1820     + * netlink_recvmsg())). dump will pack as many smaller messages as
1821     + * could fit within the allocated skb. skb is typically allocated
1822     + * with larger space than required (could be as much as near 2x the
1823     + * requested size with align to next power of 2 approach). Allowing
1824     + * dump to use the excess space makes it difficult for a user to have a
1825     + * reasonable static buffer based on the expected largest dump of a
1826     + * single netdev. The outcome is MSG_TRUNC error.
1827     + */
1828     + skb_reserve(skb, skb_tailroom(skb) - alloc_size);
1829     netlink_skb_set_owner_r(skb, sk);
1830    
1831     len = cb->dump(skb, cb);
1832     diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
1833     index b5c3bba87fc8..af08e6fc9860 100644
1834     --- a/net/openvswitch/flow_table.c
1835     +++ b/net/openvswitch/flow_table.c
1836     @@ -92,7 +92,8 @@ struct sw_flow *ovs_flow_alloc(void)
1837    
1838     /* Initialize the default stat node. */
1839     stats = kmem_cache_alloc_node(flow_stats_cache,
1840     - GFP_KERNEL | __GFP_ZERO, 0);
1841     + GFP_KERNEL | __GFP_ZERO,
1842     + node_online(0) ? 0 : NUMA_NO_NODE);
1843     if (!stats)
1844     goto err;
1845    
1846     diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
1847     index 268545050ddb..b1768198ad59 100644
1848     --- a/net/sched/act_mirred.c
1849     +++ b/net/sched/act_mirred.c
1850     @@ -168,6 +168,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
1851    
1852     skb2->skb_iif = skb->dev->ifindex;
1853     skb2->dev = dev;
1854     + skb_sender_cpu_clear(skb2);
1855     err = dev_queue_xmit(skb2);
1856    
1857     out:
1858     diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
1859     index 2e1348bde325..96d886a866e9 100644
1860     --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
1861     +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
1862     @@ -146,7 +146,8 @@ int rdma_read_chunk_lcl(struct svcxprt_rdma *xprt,
1863     ctxt->read_hdr = head;
1864     pages_needed =
1865     min_t(int, pages_needed, rdma_read_max_sge(xprt, pages_needed));
1866     - read = min_t(int, pages_needed << PAGE_SHIFT, rs_length);
1867     + read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset,
1868     + rs_length);
1869    
1870     for (pno = 0; pno < pages_needed; pno++) {
1871     int len = min_t(int, rs_length, PAGE_SIZE - pg_off);
1872     @@ -245,7 +246,8 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
1873     ctxt->direction = DMA_FROM_DEVICE;
1874     ctxt->frmr = frmr;
1875     pages_needed = min_t(int, pages_needed, xprt->sc_frmr_pg_list_len);
1876     - read = min_t(int, pages_needed << PAGE_SHIFT, rs_length);
1877     + read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset,
1878     + rs_length);
1879    
1880     frmr->kva = page_address(rqstp->rq_arg.pages[pg_no]);
1881     frmr->direction = DMA_FROM_DEVICE;
1882     diff --git a/net/tipc/msg.h b/net/tipc/msg.h
1883     index 19c45fb66238..49f9a9648aa9 100644
1884     --- a/net/tipc/msg.h
1885     +++ b/net/tipc/msg.h
1886     @@ -357,7 +357,7 @@ static inline u32 msg_importance(struct tipc_msg *m)
1887     if (likely((usr <= TIPC_CRITICAL_IMPORTANCE) && !msg_errcode(m)))
1888     return usr;
1889     if ((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER))
1890     - return msg_bits(m, 5, 13, 0x7);
1891     + return msg_bits(m, 9, 0, 0x7);
1892     return TIPC_SYSTEM_IMPORTANCE;
1893     }
1894    
1895     @@ -366,7 +366,7 @@ static inline void msg_set_importance(struct tipc_msg *m, u32 i)
1896     int usr = msg_user(m);
1897    
1898     if (likely((usr == MSG_FRAGMENTER) || (usr == MSG_BUNDLER)))
1899     - msg_set_bits(m, 5, 13, 0x7, i);
1900     + msg_set_bits(m, 9, 0, 0x7, i);
1901     else if (i < TIPC_SYSTEM_IMPORTANCE)
1902     msg_set_user(m, i);
1903     else
1904     diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
1905     index 03ee4d359f6a..94f658235fb4 100644
1906     --- a/net/unix/af_unix.c
1907     +++ b/net/unix/af_unix.c
1908     @@ -2064,6 +2064,11 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state)
1909     goto out;
1910     }
1911    
1912     + if (flags & MSG_PEEK)
1913     + skip = sk_peek_offset(sk, flags);
1914     + else
1915     + skip = 0;
1916     +
1917     do {
1918     int chunk;
1919     struct sk_buff *skb, *last;
1920     @@ -2112,7 +2117,6 @@ unlock:
1921     break;
1922     }
1923    
1924     - skip = sk_peek_offset(sk, flags);
1925     while (skip >= unix_skb_len(skb)) {
1926     skip -= unix_skb_len(skb);
1927     last = skb;
1928     @@ -2181,6 +2185,17 @@ unlock:
1929    
1930     sk_peek_offset_fwd(sk, chunk);
1931    
1932     + if (UNIXCB(skb).fp)
1933     + break;
1934     +
1935     + skip = 0;
1936     + last = skb;
1937     + last_len = skb->len;
1938     + unix_state_lock(sk);
1939     + skb = skb_peek_next(skb, &sk->sk_receive_queue);
1940     + if (skb)
1941     + goto again;
1942     + unix_state_unlock(sk);
1943     break;
1944     }
1945     } while (size);