Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.4/0115-4.4.16-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2824 - (hide annotations) (download)
Tue Sep 13 07:18:05 2016 UTC (7 years, 8 months ago) by niro
File size: 190231 byte(s)
-linux-4.4.16
1 niro 2824 diff --git a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
2     index 6708c5e264aa..33e96f740639 100644
3     --- a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
4     +++ b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
5     @@ -1,4 +1,4 @@
6     -What /sys/bus/iio/devices/iio:deviceX/in_proximity_raw
7     +What /sys/bus/iio/devices/iio:deviceX/in_proximity_input
8     Date: March 2014
9     KernelVersion: 3.15
10     Contact: Matt Ranostay <mranostay@gmail.com>
11     diff --git a/Documentation/scsi/scsi_eh.txt b/Documentation/scsi/scsi_eh.txt
12     index 8638f61c8c9d..37eca00796ee 100644
13     --- a/Documentation/scsi/scsi_eh.txt
14     +++ b/Documentation/scsi/scsi_eh.txt
15     @@ -263,19 +263,23 @@ scmd->allowed.
16    
17     3. scmd recovered
18     ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
19     - - shost->host_failed--
20     - clear scmd->eh_eflags
21     - scsi_setup_cmd_retry()
22     - move from local eh_work_q to local eh_done_q
23     LOCKING: none
24     + CONCURRENCY: at most one thread per separate eh_work_q to
25     + keep queue manipulation lockless
26    
27     4. EH completes
28     ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
29     - layer of failure.
30     + layer of failure. May be called concurrently but must have
31     + a no more than one thread per separate eh_work_q to
32     + manipulate the queue locklessly
33     - scmd is removed from eh_done_q and scmd->eh_entry is cleared
34     - if retry is necessary, scmd is requeued using
35     scsi_queue_insert()
36     - otherwise, scsi_finish_command() is invoked for scmd
37     + - zero shost->host_failed
38     LOCKING: queue or finish function performs appropriate locking
39    
40    
41     diff --git a/Makefile b/Makefile
42     index 979088079338..da7621cadc8e 100644
43     --- a/Makefile
44     +++ b/Makefile
45     @@ -1,6 +1,6 @@
46     VERSION = 4
47     PATCHLEVEL = 4
48     -SUBLEVEL = 15
49     +SUBLEVEL = 16
50     EXTRAVERSION =
51     NAME = Blurry Fish Butt
52    
53     diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
54     index 6312f607932f..2d785f5a3041 100644
55     --- a/arch/arc/Kconfig
56     +++ b/arch/arc/Kconfig
57     @@ -387,7 +387,7 @@ config ARC_HAS_LLSC
58    
59     config ARC_STAR_9000923308
60     bool "Workaround for llock/scond livelock"
61     - default y
62     + default n
63     depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
64    
65     config ARC_HAS_SWAPE
66     diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
67     index e1b87444ea9a..05131805aa33 100644
68     --- a/arch/arc/kernel/setup.c
69     +++ b/arch/arc/kernel/setup.c
70     @@ -332,10 +332,6 @@ static void arc_chk_core_config(void)
71     pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n");
72     else if (!cpu->extn.fpu_dp && fpu_enabled)
73     panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
74     -
75     - if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
76     - !IS_ENABLED(CONFIG_ARC_STAR_9000923308))
77     - panic("llock/scond livelock workaround missing\n");
78     }
79    
80     /*
81     diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
82     index 8450944b28e6..22f7a13e20b4 100644
83     --- a/arch/arm/boot/dts/armada-385-linksys.dtsi
84     +++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
85     @@ -58,8 +58,8 @@
86     soc {
87     ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
88     MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
89     - MBUS_ID(0x09, 0x09) 0 0xf1100000 0x10000
90     - MBUS_ID(0x09, 0x05) 0 0xf1110000 0x10000>;
91     + MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000
92     + MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;
93    
94     internal-regs {
95    
96     diff --git a/arch/arm/boot/dts/sun5i-r8-chip.dts b/arch/arm/boot/dts/sun5i-r8-chip.dts
97     index 530ab28e9ca2..d21f50ba3172 100644
98     --- a/arch/arm/boot/dts/sun5i-r8-chip.dts
99     +++ b/arch/arm/boot/dts/sun5i-r8-chip.dts
100     @@ -52,7 +52,7 @@
101    
102     / {
103     model = "NextThing C.H.I.P.";
104     - compatible = "nextthing,chip", "allwinner,sun5i-r8";
105     + compatible = "nextthing,chip", "allwinner,sun5i-r8", "allwinner,sun5i-a13";
106    
107     aliases {
108     i2c0 = &i2c0;
109     diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
110     index aeddd28b3595..92fd2c8a9af0 100644
111     --- a/arch/arm/include/asm/pgtable-2level.h
112     +++ b/arch/arm/include/asm/pgtable-2level.h
113     @@ -193,6 +193,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
114    
115     #define pmd_large(pmd) (pmd_val(pmd) & 2)
116     #define pmd_bad(pmd) (pmd_val(pmd) & 2)
117     +#define pmd_present(pmd) (pmd_val(pmd))
118    
119     #define copy_pmd(pmdpd,pmdps) \
120     do { \
121     diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
122     index a745a2a53853..fd929b5ded9e 100644
123     --- a/arch/arm/include/asm/pgtable-3level.h
124     +++ b/arch/arm/include/asm/pgtable-3level.h
125     @@ -212,6 +212,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
126     : !!(pmd_val(pmd) & (val)))
127     #define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
128    
129     +#define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID))
130     #define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
131     #define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL))
132     static inline pte_t pte_mkspecial(pte_t pte)
133     @@ -257,10 +258,10 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
134     #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
135     #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
136    
137     -/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
138     +/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
139     static inline pmd_t pmd_mknotpresent(pmd_t pmd)
140     {
141     - return __pmd(0);
142     + return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
143     }
144    
145     static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
146     diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
147     index 348caabb7625..d62204060cbe 100644
148     --- a/arch/arm/include/asm/pgtable.h
149     +++ b/arch/arm/include/asm/pgtable.h
150     @@ -182,7 +182,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
151     #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
152    
153     #define pmd_none(pmd) (!pmd_val(pmd))
154     -#define pmd_present(pmd) (pmd_val(pmd))
155    
156     static inline pte_t *pmd_page_vaddr(pmd_t pmd)
157     {
158     diff --git a/arch/arm/mach-imx/mach-imx6ul.c b/arch/arm/mach-imx/mach-imx6ul.c
159     index acaf7056efa5..e08d02667c81 100644
160     --- a/arch/arm/mach-imx/mach-imx6ul.c
161     +++ b/arch/arm/mach-imx/mach-imx6ul.c
162     @@ -46,7 +46,7 @@ static int ksz8081_phy_fixup(struct phy_device *dev)
163     static void __init imx6ul_enet_phy_init(void)
164     {
165     if (IS_BUILTIN(CONFIG_PHYLIB))
166     - phy_register_fixup_for_uid(PHY_ID_KSZ8081, 0xffffffff,
167     + phy_register_fixup_for_uid(PHY_ID_KSZ8081, MICREL_PHY_ID_MASK,
168     ksz8081_phy_fixup);
169     }
170    
171     diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
172     index 55348ee5a352..feed36b32ff6 100644
173     --- a/arch/arm/mach-mvebu/coherency.c
174     +++ b/arch/arm/mach-mvebu/coherency.c
175     @@ -162,22 +162,16 @@ exit:
176     }
177    
178     /*
179     - * This ioremap hook is used on Armada 375/38x to ensure that PCIe
180     - * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
181     - * is needed as a workaround for a deadlock issue between the PCIe
182     - * interface and the cache controller.
183     + * This ioremap hook is used on Armada 375/38x to ensure that all MMIO
184     + * areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is
185     + * needed for the HW I/O coherency mechanism to work properly without
186     + * deadlock.
187     */
188     static void __iomem *
189     -armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
190     - unsigned int mtype, void *caller)
191     +armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
192     + unsigned int mtype, void *caller)
193     {
194     - struct resource pcie_mem;
195     -
196     - mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
197     -
198     - if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
199     - mtype = MT_UNCACHED;
200     -
201     + mtype = MT_UNCACHED;
202     return __arm_ioremap_caller(phys_addr, size, mtype, caller);
203     }
204    
205     @@ -186,7 +180,7 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
206     struct device_node *cache_dn;
207    
208     coherency_cpu_base = of_iomap(np, 0);
209     - arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
210     + arch_ioremap_caller = armada_wa_ioremap_caller;
211    
212     /*
213     * We should switch the PL310 to I/O coherency mode only if
214     diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
215     index e9e5467e0bf4..a307eb6e7fa8 100644
216     --- a/arch/arm64/include/asm/ptrace.h
217     +++ b/arch/arm64/include/asm/ptrace.h
218     @@ -58,6 +58,7 @@
219     #define COMPAT_PSR_Z_BIT 0x40000000
220     #define COMPAT_PSR_N_BIT 0x80000000
221     #define COMPAT_PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
222     +#define COMPAT_PSR_GE_MASK 0x000f0000
223    
224     #ifdef CONFIG_CPU_BIG_ENDIAN
225     #define COMPAT_PSR_ENDSTATE COMPAT_PSR_E_BIT
226     @@ -151,35 +152,9 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
227     return regs->regs[0];
228     }
229    
230     -/*
231     - * Are the current registers suitable for user mode? (used to maintain
232     - * security in signal handlers)
233     - */
234     -static inline int valid_user_regs(struct user_pt_regs *regs)
235     -{
236     - if (user_mode(regs) && (regs->pstate & PSR_I_BIT) == 0) {
237     - regs->pstate &= ~(PSR_F_BIT | PSR_A_BIT);
238     -
239     - /* The T bit is reserved for AArch64 */
240     - if (!(regs->pstate & PSR_MODE32_BIT))
241     - regs->pstate &= ~COMPAT_PSR_T_BIT;
242     -
243     - return 1;
244     - }
245     -
246     - /*
247     - * Force PSR to something logical...
248     - */
249     - regs->pstate &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | \
250     - COMPAT_PSR_T_BIT | PSR_MODE32_BIT;
251     -
252     - if (!(regs->pstate & PSR_MODE32_BIT)) {
253     - regs->pstate &= ~COMPAT_PSR_T_BIT;
254     - regs->pstate |= PSR_MODE_EL0t;
255     - }
256     -
257     - return 0;
258     -}
259     +/* We must avoid circular header include via sched.h */
260     +struct task_struct;
261     +int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task);
262    
263     #define instruction_pointer(regs) ((unsigned long)(regs)->pc)
264    
265     diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
266     index ff7f13239515..fc779ec6f051 100644
267     --- a/arch/arm64/kernel/ptrace.c
268     +++ b/arch/arm64/kernel/ptrace.c
269     @@ -39,6 +39,7 @@
270     #include <linux/elf.h>
271    
272     #include <asm/compat.h>
273     +#include <asm/cpufeature.h>
274     #include <asm/debug-monitors.h>
275     #include <asm/pgtable.h>
276     #include <asm/syscall.h>
277     @@ -500,7 +501,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
278     if (ret)
279     return ret;
280    
281     - if (!valid_user_regs(&newregs))
282     + if (!valid_user_regs(&newregs, target))
283     return -EINVAL;
284    
285     task_pt_regs(target)->user_regs = newregs;
286     @@ -770,7 +771,7 @@ static int compat_gpr_set(struct task_struct *target,
287    
288     }
289    
290     - if (valid_user_regs(&newregs.user_regs))
291     + if (valid_user_regs(&newregs.user_regs, target))
292     *task_pt_regs(target) = newregs;
293     else
294     ret = -EINVAL;
295     @@ -1272,3 +1273,79 @@ asmlinkage void syscall_trace_exit(struct pt_regs *regs)
296     if (test_thread_flag(TIF_SYSCALL_TRACE))
297     tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
298     }
299     +
300     +/*
301     + * Bits which are always architecturally RES0 per ARM DDI 0487A.h
302     + * Userspace cannot use these until they have an architectural meaning.
303     + * We also reserve IL for the kernel; SS is handled dynamically.
304     + */
305     +#define SPSR_EL1_AARCH64_RES0_BITS \
306     + (GENMASK_ULL(63,32) | GENMASK_ULL(27, 22) | GENMASK_ULL(20, 10) | \
307     + GENMASK_ULL(5, 5))
308     +#define SPSR_EL1_AARCH32_RES0_BITS \
309     + (GENMASK_ULL(63,32) | GENMASK_ULL(24, 22) | GENMASK_ULL(20,20))
310     +
311     +static int valid_compat_regs(struct user_pt_regs *regs)
312     +{
313     + regs->pstate &= ~SPSR_EL1_AARCH32_RES0_BITS;
314     +
315     + if (!system_supports_mixed_endian_el0()) {
316     + if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
317     + regs->pstate |= COMPAT_PSR_E_BIT;
318     + else
319     + regs->pstate &= ~COMPAT_PSR_E_BIT;
320     + }
321     +
322     + if (user_mode(regs) && (regs->pstate & PSR_MODE32_BIT) &&
323     + (regs->pstate & COMPAT_PSR_A_BIT) == 0 &&
324     + (regs->pstate & COMPAT_PSR_I_BIT) == 0 &&
325     + (regs->pstate & COMPAT_PSR_F_BIT) == 0) {
326     + return 1;
327     + }
328     +
329     + /*
330     + * Force PSR to a valid 32-bit EL0t, preserving the same bits as
331     + * arch/arm.
332     + */
333     + regs->pstate &= COMPAT_PSR_N_BIT | COMPAT_PSR_Z_BIT |
334     + COMPAT_PSR_C_BIT | COMPAT_PSR_V_BIT |
335     + COMPAT_PSR_Q_BIT | COMPAT_PSR_IT_MASK |
336     + COMPAT_PSR_GE_MASK | COMPAT_PSR_E_BIT |
337     + COMPAT_PSR_T_BIT;
338     + regs->pstate |= PSR_MODE32_BIT;
339     +
340     + return 0;
341     +}
342     +
343     +static int valid_native_regs(struct user_pt_regs *regs)
344     +{
345     + regs->pstate &= ~SPSR_EL1_AARCH64_RES0_BITS;
346     +
347     + if (user_mode(regs) && !(regs->pstate & PSR_MODE32_BIT) &&
348     + (regs->pstate & PSR_D_BIT) == 0 &&
349     + (regs->pstate & PSR_A_BIT) == 0 &&
350     + (regs->pstate & PSR_I_BIT) == 0 &&
351     + (regs->pstate & PSR_F_BIT) == 0) {
352     + return 1;
353     + }
354     +
355     + /* Force PSR to a valid 64-bit EL0t */
356     + regs->pstate &= PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT;
357     +
358     + return 0;
359     +}
360     +
361     +/*
362     + * Are the current registers suitable for user mode? (used to maintain
363     + * security in signal handlers)
364     + */
365     +int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
366     +{
367     + if (!test_tsk_thread_flag(task, TIF_SINGLESTEP))
368     + regs->pstate &= ~DBG_SPSR_SS;
369     +
370     + if (is_compat_thread(task_thread_info(task)))
371     + return valid_compat_regs(regs);
372     + else
373     + return valid_native_regs(regs);
374     +}
375     diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
376     index e18c48cb6db1..a8eafdbc7cb8 100644
377     --- a/arch/arm64/kernel/signal.c
378     +++ b/arch/arm64/kernel/signal.c
379     @@ -115,7 +115,7 @@ static int restore_sigframe(struct pt_regs *regs,
380     */
381     regs->syscallno = ~0UL;
382    
383     - err |= !valid_user_regs(&regs->user_regs);
384     + err |= !valid_user_regs(&regs->user_regs, current);
385    
386     if (err == 0) {
387     struct fpsimd_context *fpsimd_ctx =
388     @@ -307,7 +307,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
389     /*
390     * Check that the resulting registers are actually sane.
391     */
392     - ret |= !valid_user_regs(&regs->user_regs);
393     + ret |= !valid_user_regs(&regs->user_regs, current);
394    
395     /*
396     * Fast forward the stepping logic so we step into the signal
397     diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
398     index 71ef6dc89ae5..107335637390 100644
399     --- a/arch/arm64/kernel/signal32.c
400     +++ b/arch/arm64/kernel/signal32.c
401     @@ -356,7 +356,7 @@ static int compat_restore_sigframe(struct pt_regs *regs,
402     */
403     regs->syscallno = ~0UL;
404    
405     - err |= !valid_user_regs(&regs->user_regs);
406     + err |= !valid_user_regs(&regs->user_regs, current);
407    
408     aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace;
409     if (err == 0)
410     diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
411     index 4e956b3e16f5..dd7cee795709 100644
412     --- a/arch/mips/include/asm/kvm_host.h
413     +++ b/arch/mips/include/asm/kvm_host.h
414     @@ -372,6 +372,7 @@ struct kvm_mips_tlb {
415     #define KVM_MIPS_GUEST_TLB_SIZE 64
416     struct kvm_vcpu_arch {
417     void *host_ebase, *guest_ebase;
418     + int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
419     unsigned long host_stack;
420     unsigned long host_gp;
421    
422     diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
423     index 4ab4bdfad703..2143884709e4 100644
424     --- a/arch/mips/kvm/interrupt.h
425     +++ b/arch/mips/kvm/interrupt.h
426     @@ -28,6 +28,7 @@
427     #define MIPS_EXC_MAX 12
428     /* XXXSL More to follow */
429    
430     +extern char __kvm_mips_vcpu_run_end[];
431     extern char mips32_exception[], mips32_exceptionEnd[];
432     extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
433    
434     diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
435     index 7e2210846b8b..77706433651b 100644
436     --- a/arch/mips/kvm/locore.S
437     +++ b/arch/mips/kvm/locore.S
438     @@ -227,6 +227,7 @@ FEXPORT(__kvm_mips_load_k0k1)
439    
440     /* Jump to guest */
441     eret
442     +EXPORT(__kvm_mips_vcpu_run_end)
443    
444     VECTOR(MIPSX(exception), unknown)
445     /* Find out what mode we came from and jump to the proper handler. */
446     diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
447     index 2683d04fdda5..e86b7499921a 100644
448     --- a/arch/mips/kvm/mips.c
449     +++ b/arch/mips/kvm/mips.c
450     @@ -314,6 +314,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
451     memcpy(gebase + offset, mips32_GuestException,
452     mips32_GuestExceptionEnd - mips32_GuestException);
453    
454     +#ifdef MODULE
455     + offset += mips32_GuestExceptionEnd - mips32_GuestException;
456     + memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
457     + __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
458     + vcpu->arch.vcpu_run = gebase + offset;
459     +#else
460     + vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
461     +#endif
462     +
463     /* Invalidate the icache for these ranges */
464     local_flush_icache_range((unsigned long)gebase,
465     (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
466     @@ -403,7 +412,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
467     /* Disable hardware page table walking while in guest */
468     htw_stop();
469    
470     - r = __kvm_mips_vcpu_run(run, vcpu);
471     + r = vcpu->arch.vcpu_run(run, vcpu);
472    
473     /* Re-enable HTW before enabling interrupts */
474     htw_start();
475     diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
476     index 646bf4d222c1..cf788d7d7e56 100644
477     --- a/arch/powerpc/kernel/process.c
478     +++ b/arch/powerpc/kernel/process.c
479     @@ -1239,6 +1239,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
480     current->thread.regs = regs - 1;
481     }
482    
483     +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
484     + /*
485     + * Clear any transactional state, we're exec()ing. The cause is
486     + * not important as there will never be a recheckpoint so it's not
487     + * user visible.
488     + */
489     + if (MSR_TM_SUSPENDED(mfmsr()))
490     + tm_reclaim_current(0);
491     +#endif
492     +
493     memset(regs->gpr, 0, sizeof(regs->gpr));
494     regs->ctr = 0;
495     regs->link = 0;
496     diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
497     index e52b82b71d79..b7e86e00048f 100644
498     --- a/arch/powerpc/kernel/prom_init.c
499     +++ b/arch/powerpc/kernel/prom_init.c
500     @@ -718,7 +718,7 @@ unsigned char ibm_architecture_vec[] = {
501     * must match by the macro below. Update the definition if
502     * the structure layout changes.
503     */
504     -#define IBM_ARCH_VEC_NRCORES_OFFSET 125
505     +#define IBM_ARCH_VEC_NRCORES_OFFSET 133
506     W(NR_CPUS), /* number of cores supported */
507     0,
508     0,
509     diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
510     index bd98ce2be17b..3e8865b187de 100644
511     --- a/arch/powerpc/platforms/pseries/iommu.c
512     +++ b/arch/powerpc/platforms/pseries/iommu.c
513     @@ -912,7 +912,8 @@ machine_arch_initcall(pseries, find_existing_ddw_windows);
514     static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
515     struct ddw_query_response *query)
516     {
517     - struct eeh_dev *edev;
518     + struct device_node *dn;
519     + struct pci_dn *pdn;
520     u32 cfg_addr;
521     u64 buid;
522     int ret;
523     @@ -923,11 +924,10 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
524     * Retrieve them from the pci device, not the node with the
525     * dma-window property
526     */
527     - edev = pci_dev_to_eeh_dev(dev);
528     - cfg_addr = edev->config_addr;
529     - if (edev->pe_config_addr)
530     - cfg_addr = edev->pe_config_addr;
531     - buid = edev->phb->buid;
532     + dn = pci_device_to_OF_node(dev);
533     + pdn = PCI_DN(dn);
534     + buid = pdn->phb->buid;
535     + cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
536    
537     ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
538     cfg_addr, BUID_HI(buid), BUID_LO(buid));
539     @@ -941,7 +941,8 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
540     struct ddw_create_response *create, int page_shift,
541     int window_shift)
542     {
543     - struct eeh_dev *edev;
544     + struct device_node *dn;
545     + struct pci_dn *pdn;
546     u32 cfg_addr;
547     u64 buid;
548     int ret;
549     @@ -952,11 +953,10 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
550     * Retrieve them from the pci device, not the node with the
551     * dma-window property
552     */
553     - edev = pci_dev_to_eeh_dev(dev);
554     - cfg_addr = edev->config_addr;
555     - if (edev->pe_config_addr)
556     - cfg_addr = edev->pe_config_addr;
557     - buid = edev->phb->buid;
558     + dn = pci_device_to_OF_node(dev);
559     + pdn = PCI_DN(dn);
560     + buid = pdn->phb->buid;
561     + cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
562    
563     do {
564     /* extra outputs are LIOBN and dma-addr (hi, lo) */
565     diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h
566     index 5e04f3cbd320..8ae236b0f80b 100644
567     --- a/arch/s390/include/asm/fpu/api.h
568     +++ b/arch/s390/include/asm/fpu/api.h
569     @@ -22,7 +22,7 @@ static inline int test_fp_ctl(u32 fpc)
570     " la %0,0\n"
571     "1:\n"
572     EX_TABLE(0b,1b)
573     - : "=d" (rc), "=d" (orig_fpc)
574     + : "=d" (rc), "=&d" (orig_fpc)
575     : "d" (fpc), "0" (-EINVAL));
576     return rc;
577     }
578     diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
579     index 2ee62dba0373..c0cc2a6be0bf 100644
580     --- a/arch/x86/boot/Makefile
581     +++ b/arch/x86/boot/Makefile
582     @@ -162,6 +162,9 @@ isoimage: $(obj)/bzImage
583     for i in lib lib64 share end ; do \
584     if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
585     cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
586     + if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
587     + cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
588     + fi ; \
589     break ; \
590     fi ; \
591     if [ $$i = end ] ; then exit 1 ; fi ; \
592     diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
593     index 29fa475ec518..c986d0b3bc35 100644
594     --- a/arch/x86/kernel/amd_nb.c
595     +++ b/arch/x86/kernel/amd_nb.c
596     @@ -71,8 +71,8 @@ int amd_cache_northbridges(void)
597     while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
598     i++;
599    
600     - if (i == 0)
601     - return 0;
602     + if (!i)
603     + return -ENODEV;
604    
605     nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
606     if (!nb)
607     diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
608     index 078de2e86b7a..5f82cd59f0e5 100644
609     --- a/arch/x86/kernel/cpu/perf_event_intel.c
610     +++ b/arch/x86/kernel/cpu/perf_event_intel.c
611     @@ -3601,7 +3601,7 @@ __init int intel_pmu_init(void)
612     c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
613     }
614     c->idxmsk64 &=
615     - ~(~0UL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
616     + ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
617     c->weight = hweight64(c->idxmsk64);
618     }
619     }
620     diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
621     index 1deffe6cc873..023c442c33bb 100644
622     --- a/arch/x86/kernel/kprobes/core.c
623     +++ b/arch/x86/kernel/kprobes/core.c
624     @@ -959,7 +959,19 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
625     * normal page fault.
626     */
627     regs->ip = (unsigned long)cur->addr;
628     + /*
629     + * Trap flag (TF) has been set here because this fault
630     + * happened where the single stepping will be done.
631     + * So clear it by resetting the current kprobe:
632     + */
633     + regs->flags &= ~X86_EFLAGS_TF;
634     +
635     + /*
636     + * If the TF flag was set before the kprobe hit,
637     + * don't touch it:
638     + */
639     regs->flags |= kcb->kprobe_old_flags;
640     +
641     if (kcb->kprobe_status == KPROBE_REENTER)
642     restore_previous_kprobe(kcb);
643     else
644     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
645     index f314e9b9660b..41e7943004fe 100644
646     --- a/arch/x86/kvm/vmx.c
647     +++ b/arch/x86/kvm/vmx.c
648     @@ -6579,7 +6579,13 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
649    
650     /* Checks for #GP/#SS exceptions. */
651     exn = false;
652     - if (is_protmode(vcpu)) {
653     + if (is_long_mode(vcpu)) {
654     + /* Long mode: #GP(0)/#SS(0) if the memory address is in a
655     + * non-canonical form. This is the only check on the memory
656     + * destination for long mode!
657     + */
658     + exn = is_noncanonical_address(*ret);
659     + } else if (is_protmode(vcpu)) {
660     /* Protected mode: apply checks for segment validity in the
661     * following order:
662     * - segment type check (#GP(0) may be thrown)
663     @@ -6596,17 +6602,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
664     * execute-only code segment
665     */
666     exn = ((s.type & 0xa) == 8);
667     - }
668     - if (exn) {
669     - kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
670     - return 1;
671     - }
672     - if (is_long_mode(vcpu)) {
673     - /* Long mode: #GP(0)/#SS(0) if the memory address is in a
674     - * non-canonical form. This is an only check for long mode.
675     - */
676     - exn = is_noncanonical_address(*ret);
677     - } else if (is_protmode(vcpu)) {
678     + if (exn) {
679     + kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
680     + return 1;
681     + }
682     /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
683     */
684     exn = (s.unusable != 0);
685     diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
686     index 961acc788f44..91a9e6af2ec4 100644
687     --- a/drivers/ata/libata-eh.c
688     +++ b/drivers/ata/libata-eh.c
689     @@ -606,7 +606,7 @@ void ata_scsi_error(struct Scsi_Host *host)
690     ata_scsi_port_error_handler(host, ap);
691    
692     /* finish or retry handled scmd's and clean up */
693     - WARN_ON(host->host_failed || !list_empty(&eh_work_q));
694     + WARN_ON(!list_empty(&eh_work_q));
695    
696     DPRINTK("EXIT\n");
697     }
698     diff --git a/drivers/base/module.c b/drivers/base/module.c
699     index db930d3ee312..2a215780eda2 100644
700     --- a/drivers/base/module.c
701     +++ b/drivers/base/module.c
702     @@ -24,10 +24,12 @@ static char *make_driver_name(struct device_driver *drv)
703    
704     static void module_create_drivers_dir(struct module_kobject *mk)
705     {
706     - if (!mk || mk->drivers_dir)
707     - return;
708     + static DEFINE_MUTEX(drivers_dir_mutex);
709    
710     - mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
711     + mutex_lock(&drivers_dir_mutex);
712     + if (mk && !mk->drivers_dir)
713     + mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
714     + mutex_unlock(&drivers_dir_mutex);
715     }
716    
717     void module_add_driver(struct module *mod, struct device_driver *drv)
718     diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
719     index e3536da05c88..a084a4751fa9 100644
720     --- a/drivers/char/ipmi/ipmi_msghandler.c
721     +++ b/drivers/char/ipmi/ipmi_msghandler.c
722     @@ -3819,6 +3819,7 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
723     while (!list_empty(&intf->waiting_rcv_msgs)) {
724     smi_msg = list_entry(intf->waiting_rcv_msgs.next,
725     struct ipmi_smi_msg, link);
726     + list_del(&smi_msg->link);
727     if (!run_to_completion)
728     spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
729     flags);
730     @@ -3828,11 +3829,14 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
731     if (rv > 0) {
732     /*
733     * To preserve message order, quit if we
734     - * can't handle a message.
735     + * can't handle a message. Add the message
736     + * back at the head, this is safe because this
737     + * tasklet is the only thing that pulls the
738     + * messages.
739     */
740     + list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
741     break;
742     } else {
743     - list_del(&smi_msg->link);
744     if (rv == 0)
745     /* Message handled */
746     ipmi_free_smi_msg(smi_msg);
747     diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
748     index 9e9e196c6d51..45b5adaafa6f 100644
749     --- a/drivers/crypto/qat/qat_common/Makefile
750     +++ b/drivers/crypto/qat/qat_common/Makefile
751     @@ -2,6 +2,7 @@ $(obj)/qat_rsapubkey-asn1.o: $(obj)/qat_rsapubkey-asn1.c \
752     $(obj)/qat_rsapubkey-asn1.h
753     $(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \
754     $(obj)/qat_rsaprivkey-asn1.h
755     +$(obj)/qat_asym_algs.o: $(obj)/qat_rsapubkey-asn1.h $(obj)/qat_rsaprivkey-asn1.h
756    
757     clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h
758     clean-files += qat_rsaprivkey-asn1.c qat_rsapvivkey-asn1.h
759     diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
760     index 37649221f81c..ca64b174f8a3 100644
761     --- a/drivers/edac/sb_edac.c
762     +++ b/drivers/edac/sb_edac.c
763     @@ -218,8 +218,11 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
764     { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
765     };
766    
767     -#define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19)
768     -#define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14)
769     +#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
770     + GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
771     +
772     +#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
773     + GET_BITFIELD(reg, 2, 15) : GET_BITFIELD(reg, 2, 14))
774    
775     /* Device 16, functions 2-7 */
776    
777     @@ -1175,14 +1178,14 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
778     pci_read_config_dword(pvt->pci_tad[i],
779     rir_offset[j][k],
780     &reg);
781     - tmp_mb = RIR_OFFSET(reg) << 6;
782     + tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
783    
784     gb = div_u64_rem(tmp_mb, 1024, &mb);
785     edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
786     i, j, k,
787     gb, (mb*1000)/1024,
788     ((u64)tmp_mb) << 20L,
789     - (u32)RIR_RNK_TGT(reg),
790     + (u32)RIR_RNK_TGT(pvt->info.type, reg),
791     reg);
792     }
793     }
794     @@ -1512,7 +1515,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
795     pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
796     rir_offset[n_rir][idx],
797     &reg);
798     - *rank = RIR_RNK_TGT(reg);
799     + *rank = RIR_RNK_TGT(pvt->info.type, reg);
800    
801     edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
802     n_rir,
803     diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c
804     index 3a5c7011ad3b..8b830996fe02 100644
805     --- a/drivers/gpio/gpiolib-legacy.c
806     +++ b/drivers/gpio/gpiolib-legacy.c
807     @@ -28,6 +28,10 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
808     if (!desc && gpio_is_valid(gpio))
809     return -EPROBE_DEFER;
810    
811     + err = gpiod_request(desc, label);
812     + if (err)
813     + return err;
814     +
815     if (flags & GPIOF_OPEN_DRAIN)
816     set_bit(FLAG_OPEN_DRAIN, &desc->flags);
817    
818     @@ -37,10 +41,6 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
819     if (flags & GPIOF_ACTIVE_LOW)
820     set_bit(FLAG_ACTIVE_LOW, &desc->flags);
821    
822     - err = gpiod_request(desc, label);
823     - if (err)
824     - return err;
825     -
826     if (flags & GPIOF_DIR_IN)
827     err = gpiod_direction_input(desc);
828     else
829     diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
830     index 4e4c3083ae56..06d345b087f8 100644
831     --- a/drivers/gpio/gpiolib.c
832     +++ b/drivers/gpio/gpiolib.c
833     @@ -927,14 +927,6 @@ static int __gpiod_request(struct gpio_desc *desc, const char *label)
834     spin_lock_irqsave(&gpio_lock, flags);
835     }
836     done:
837     - if (status < 0) {
838     - /* Clear flags that might have been set by the caller before
839     - * requesting the GPIO.
840     - */
841     - clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
842     - clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
843     - clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
844     - }
845     spin_unlock_irqrestore(&gpio_lock, flags);
846     return status;
847     }
848     @@ -2062,28 +2054,13 @@ struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
849     }
850     EXPORT_SYMBOL_GPL(gpiod_get_optional);
851    
852     -/**
853     - * gpiod_parse_flags - helper function to parse GPIO lookup flags
854     - * @desc: gpio to be setup
855     - * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
856     - * of_get_gpio_hog()
857     - *
858     - * Set the GPIO descriptor flags based on the given GPIO lookup flags.
859     - */
860     -static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
861     -{
862     - if (lflags & GPIO_ACTIVE_LOW)
863     - set_bit(FLAG_ACTIVE_LOW, &desc->flags);
864     - if (lflags & GPIO_OPEN_DRAIN)
865     - set_bit(FLAG_OPEN_DRAIN, &desc->flags);
866     - if (lflags & GPIO_OPEN_SOURCE)
867     - set_bit(FLAG_OPEN_SOURCE, &desc->flags);
868     -}
869    
870     /**
871     * gpiod_configure_flags - helper function to configure a given GPIO
872     * @desc: gpio whose value will be assigned
873     * @con_id: function within the GPIO consumer
874     + * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
875     + * of_get_gpio_hog()
876     * @dflags: gpiod_flags - optional GPIO initialization flags
877     *
878     * Return 0 on success, -ENOENT if no GPIO has been assigned to the
879     @@ -2091,10 +2068,17 @@ static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
880     * occurred while trying to acquire the GPIO.
881     */
882     static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
883     - enum gpiod_flags dflags)
884     + unsigned long lflags, enum gpiod_flags dflags)
885     {
886     int status;
887    
888     + if (lflags & GPIO_ACTIVE_LOW)
889     + set_bit(FLAG_ACTIVE_LOW, &desc->flags);
890     + if (lflags & GPIO_OPEN_DRAIN)
891     + set_bit(FLAG_OPEN_DRAIN, &desc->flags);
892     + if (lflags & GPIO_OPEN_SOURCE)
893     + set_bit(FLAG_OPEN_SOURCE, &desc->flags);
894     +
895     /* No particular flag request, return here... */
896     if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
897     pr_debug("no flags found for %s\n", con_id);
898     @@ -2161,13 +2145,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
899     return desc;
900     }
901    
902     - gpiod_parse_flags(desc, lookupflags);
903     -
904     status = gpiod_request(desc, con_id);
905     if (status < 0)
906     return ERR_PTR(status);
907    
908     - status = gpiod_configure_flags(desc, con_id, flags);
909     + status = gpiod_configure_flags(desc, con_id, lookupflags, flags);
910     if (status < 0) {
911     dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
912     gpiod_put(desc);
913     @@ -2223,6 +2205,10 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
914     if (IS_ERR(desc))
915     return desc;
916    
917     + ret = gpiod_request(desc, NULL);
918     + if (ret)
919     + return ERR_PTR(ret);
920     +
921     if (active_low)
922     set_bit(FLAG_ACTIVE_LOW, &desc->flags);
923    
924     @@ -2233,10 +2219,6 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
925     set_bit(FLAG_OPEN_SOURCE, &desc->flags);
926     }
927    
928     - ret = gpiod_request(desc, NULL);
929     - if (ret)
930     - return ERR_PTR(ret);
931     -
932     return desc;
933     }
934     EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
935     @@ -2289,8 +2271,6 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
936     chip = gpiod_to_chip(desc);
937     hwnum = gpio_chip_hwgpio(desc);
938    
939     - gpiod_parse_flags(desc, lflags);
940     -
941     local_desc = gpiochip_request_own_desc(chip, hwnum, name);
942     if (IS_ERR(local_desc)) {
943     pr_err("requesting hog GPIO %s (chip %s, offset %d) failed\n",
944     @@ -2298,7 +2278,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
945     return PTR_ERR(local_desc);
946     }
947    
948     - status = gpiod_configure_flags(desc, name, dflags);
949     + status = gpiod_configure_flags(desc, name, lflags, dflags);
950     if (status < 0) {
951     pr_err("setup of hog GPIO %s (chip %s, offset %d) failed\n",
952     name, chip->label, hwnum);
953     diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
954     index 946300764609..b57fffc2d4af 100644
955     --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
956     +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
957     @@ -5463,7 +5463,7 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
958     case 2:
959     for (i = 0; i < adev->gfx.num_compute_rings; i++) {
960     ring = &adev->gfx.compute_ring[i];
961     - if ((ring->me == me_id) & (ring->pipe == pipe_id))
962     + if ((ring->me == me_id) && (ring->pipe == pipe_id))
963     amdgpu_fence_process(ring);
964     }
965     break;
966     diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
967     index 9be007081b72..eb1da83c9902 100644
968     --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
969     +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
970     @@ -242,13 +242,19 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
971     pqm_uninit(&p->pqm);
972    
973     /* Iterate over all process device data structure and check
974     - * if we should reset all wavefronts */
975     - list_for_each_entry(pdd, &p->per_device_data, per_device_list)
976     + * if we should delete debug managers and reset all wavefronts
977     + */
978     + list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
979     + if ((pdd->dev->dbgmgr) &&
980     + (pdd->dev->dbgmgr->pasid == p->pasid))
981     + kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
982     +
983     if (pdd->reset_wavefronts) {
984     pr_warn("amdkfd: Resetting all wave fronts\n");
985     dbgdev_wave_reset_wavefronts(pdd->dev, p);
986     pdd->reset_wavefronts = false;
987     }
988     + }
989    
990     mutex_unlock(&p->mutex);
991    
992     @@ -404,42 +410,52 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
993    
994     idx = srcu_read_lock(&kfd_processes_srcu);
995    
996     + /*
997     + * Look for the process that matches the pasid. If there is no such
998     + * process, we either released it in amdkfd's own notifier, or there
999     + * is a bug. Unfortunately, there is no way to tell...
1000     + */
1001     hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
1002     - if (p->pasid == pasid)
1003     - break;
1004     + if (p->pasid == pasid) {
1005    
1006     - srcu_read_unlock(&kfd_processes_srcu, idx);
1007     + srcu_read_unlock(&kfd_processes_srcu, idx);
1008    
1009     - BUG_ON(p->pasid != pasid);
1010     + pr_debug("Unbinding process %d from IOMMU\n", pasid);
1011    
1012     - mutex_lock(&p->mutex);
1013     + mutex_lock(&p->mutex);
1014    
1015     - if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
1016     - kfd_dbgmgr_destroy(dev->dbgmgr);
1017     + if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
1018     + kfd_dbgmgr_destroy(dev->dbgmgr);
1019    
1020     - pqm_uninit(&p->pqm);
1021     + pqm_uninit(&p->pqm);
1022    
1023     - pdd = kfd_get_process_device_data(dev, p);
1024     + pdd = kfd_get_process_device_data(dev, p);
1025    
1026     - if (!pdd) {
1027     - mutex_unlock(&p->mutex);
1028     - return;
1029     - }
1030     + if (!pdd) {
1031     + mutex_unlock(&p->mutex);
1032     + return;
1033     + }
1034    
1035     - if (pdd->reset_wavefronts) {
1036     - dbgdev_wave_reset_wavefronts(pdd->dev, p);
1037     - pdd->reset_wavefronts = false;
1038     - }
1039     + if (pdd->reset_wavefronts) {
1040     + dbgdev_wave_reset_wavefronts(pdd->dev, p);
1041     + pdd->reset_wavefronts = false;
1042     + }
1043    
1044     - /*
1045     - * Just mark pdd as unbound, because we still need it to call
1046     - * amd_iommu_unbind_pasid() in when the process exits.
1047     - * We don't call amd_iommu_unbind_pasid() here
1048     - * because the IOMMU called us.
1049     - */
1050     - pdd->bound = false;
1051     + /*
1052     + * Just mark pdd as unbound, because we still need it
1053     + * to call amd_iommu_unbind_pasid() in when the
1054     + * process exits.
1055     + * We don't call amd_iommu_unbind_pasid() here
1056     + * because the IOMMU called us.
1057     + */
1058     + pdd->bound = false;
1059    
1060     - mutex_unlock(&p->mutex);
1061     + mutex_unlock(&p->mutex);
1062     +
1063     + return;
1064     + }
1065     +
1066     + srcu_read_unlock(&kfd_processes_srcu, idx);
1067     }
1068    
1069     struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
1070     diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
1071     index d0299aed517e..59d1269626b1 100644
1072     --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
1073     +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
1074     @@ -335,6 +335,8 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
1075    
1076     atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff,
1077     factor_reg);
1078     + } else {
1079     + atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, 0);
1080     }
1081     }
1082    
1083     diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
1084     index aed2e3f8a1a2..6253775b8d9c 100644
1085     --- a/drivers/gpu/drm/drm_atomic.c
1086     +++ b/drivers/gpu/drm/drm_atomic.c
1087     @@ -367,6 +367,8 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
1088     drm_property_unreference_blob(state->mode_blob);
1089     state->mode_blob = NULL;
1090    
1091     + memset(&state->mode, 0, sizeof(state->mode));
1092     +
1093     if (blob) {
1094     if (blob->length != sizeof(struct drm_mode_modeinfo) ||
1095     drm_mode_convert_umode(&state->mode,
1096     @@ -379,7 +381,6 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
1097     DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
1098     state->mode.name, state);
1099     } else {
1100     - memset(&state->mode, 0, sizeof(state->mode));
1101     state->enable = false;
1102     DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
1103     state);
1104     diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
1105     index a02238c85e18..dc84003f694e 100644
1106     --- a/drivers/gpu/drm/drm_crtc.c
1107     +++ b/drivers/gpu/drm/drm_crtc.c
1108     @@ -2682,8 +2682,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
1109     goto out;
1110     }
1111    
1112     - drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
1113     -
1114     /*
1115     * Check whether the primary plane supports the fb pixel format.
1116     * Drivers not implementing the universal planes API use a
1117     diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
1118     index d268bf18a662..2485fb652716 100644
1119     --- a/drivers/gpu/drm/drm_dp_mst_topology.c
1120     +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
1121     @@ -2874,11 +2874,9 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
1122     drm_dp_port_teardown_pdt(port, port->pdt);
1123    
1124     if (!port->input && port->vcpi.vcpi > 0) {
1125     - if (mgr->mst_state) {
1126     - drm_dp_mst_reset_vcpi_slots(mgr, port);
1127     - drm_dp_update_payload_part1(mgr);
1128     - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
1129     - }
1130     + drm_dp_mst_reset_vcpi_slots(mgr, port);
1131     + drm_dp_update_payload_part1(mgr);
1132     + drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
1133     }
1134    
1135     kref_put(&port->kref, drm_dp_free_mst_port);
1136     diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
1137     index cd74a0953f42..39e30abddf08 100644
1138     --- a/drivers/gpu/drm/drm_modes.c
1139     +++ b/drivers/gpu/drm/drm_modes.c
1140     @@ -1487,6 +1487,8 @@ int drm_mode_convert_umode(struct drm_display_mode *out,
1141     if (out->status != MODE_OK)
1142     goto out;
1143    
1144     + drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V);
1145     +
1146     ret = 0;
1147    
1148     out:
1149     diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
1150     index f7df54a8ee2b..c0a96f1ee18e 100644
1151     --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
1152     +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
1153     @@ -39,7 +39,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
1154     if (!mutex_is_locked(mutex))
1155     return false;
1156    
1157     -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
1158     +#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
1159     return mutex->owner == task;
1160     #else
1161     /* Since UP may be pre-empted, we cannot assume that we own the lock */
1162     diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
1163     index 7e461dca564c..9ed9f6dde86f 100644
1164     --- a/drivers/gpu/drm/i915/i915_reg.h
1165     +++ b/drivers/gpu/drm/i915/i915_reg.h
1166     @@ -7357,6 +7357,8 @@ enum skl_disp_power_wells {
1167     #define TRANS_CLK_SEL_DISABLED (0x0<<29)
1168     #define TRANS_CLK_SEL_PORT(x) (((x)+1)<<29)
1169    
1170     +#define CDCLK_FREQ 0x46200
1171     +
1172     #define TRANSA_MSA_MISC 0x60410
1173     #define TRANSB_MSA_MISC 0x61410
1174     #define TRANSC_MSA_MISC 0x62410
1175     diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1176     index afa81691163d..c41bc42b6fa7 100644
1177     --- a/drivers/gpu/drm/i915/intel_display.c
1178     +++ b/drivers/gpu/drm/i915/intel_display.c
1179     @@ -8228,12 +8228,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
1180     {
1181     struct drm_i915_private *dev_priv = dev->dev_private;
1182     struct intel_encoder *encoder;
1183     + int i;
1184     u32 val, final;
1185     bool has_lvds = false;
1186     bool has_cpu_edp = false;
1187     bool has_panel = false;
1188     bool has_ck505 = false;
1189     bool can_ssc = false;
1190     + bool using_ssc_source = false;
1191    
1192     /* We need to take the global config into account */
1193     for_each_intel_encoder(dev, encoder) {
1194     @@ -8260,8 +8262,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
1195     can_ssc = true;
1196     }
1197    
1198     - DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
1199     - has_panel, has_lvds, has_ck505);
1200     + /* Check if any DPLLs are using the SSC source */
1201     + for (i = 0; i < dev_priv->num_shared_dpll; i++) {
1202     + u32 temp = I915_READ(PCH_DPLL(i));
1203     +
1204     + if (!(temp & DPLL_VCO_ENABLE))
1205     + continue;
1206     +
1207     + if ((temp & PLL_REF_INPUT_MASK) ==
1208     + PLLB_REF_INPUT_SPREADSPECTRUMIN) {
1209     + using_ssc_source = true;
1210     + break;
1211     + }
1212     + }
1213     +
1214     + DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
1215     + has_panel, has_lvds, has_ck505, using_ssc_source);
1216    
1217     /* Ironlake: try to setup display ref clock before DPLL
1218     * enabling. This is only under driver's control after
1219     @@ -8298,9 +8314,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
1220     final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
1221     } else
1222     final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
1223     - } else {
1224     - final |= DREF_SSC_SOURCE_DISABLE;
1225     - final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
1226     + } else if (using_ssc_source) {
1227     + final |= DREF_SSC_SOURCE_ENABLE;
1228     + final |= DREF_SSC1_ENABLE;
1229     }
1230    
1231     if (final == val)
1232     @@ -8346,7 +8362,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
1233     POSTING_READ(PCH_DREF_CONTROL);
1234     udelay(200);
1235     } else {
1236     - DRM_DEBUG_KMS("Disabling SSC entirely\n");
1237     + DRM_DEBUG_KMS("Disabling CPU source output\n");
1238    
1239     val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
1240    
1241     @@ -8357,16 +8373,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
1242     POSTING_READ(PCH_DREF_CONTROL);
1243     udelay(200);
1244    
1245     - /* Turn off the SSC source */
1246     - val &= ~DREF_SSC_SOURCE_MASK;
1247     - val |= DREF_SSC_SOURCE_DISABLE;
1248     + if (!using_ssc_source) {
1249     + DRM_DEBUG_KMS("Disabling SSC source\n");
1250    
1251     - /* Turn off SSC1 */
1252     - val &= ~DREF_SSC1_ENABLE;
1253     + /* Turn off the SSC source */
1254     + val &= ~DREF_SSC_SOURCE_MASK;
1255     + val |= DREF_SSC_SOURCE_DISABLE;
1256    
1257     - I915_WRITE(PCH_DREF_CONTROL, val);
1258     - POSTING_READ(PCH_DREF_CONTROL);
1259     - udelay(200);
1260     + /* Turn off SSC1 */
1261     + val &= ~DREF_SSC1_ENABLE;
1262     +
1263     + I915_WRITE(PCH_DREF_CONTROL, val);
1264     + POSTING_READ(PCH_DREF_CONTROL);
1265     + udelay(200);
1266     + }
1267     }
1268    
1269     BUG_ON(val != final);
1270     @@ -9669,6 +9689,8 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
1271     sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
1272     mutex_unlock(&dev_priv->rps.hw_lock);
1273    
1274     + I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
1275     +
1276     intel_update_cdclk(dev);
1277    
1278     WARN(cdclk != dev_priv->cdclk_freq,
1279     diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
1280     index e55a82a99e7f..8e1d6d74c203 100644
1281     --- a/drivers/gpu/drm/i915/intel_dp.c
1282     +++ b/drivers/gpu/drm/i915/intel_dp.c
1283     @@ -3628,8 +3628,7 @@ static bool
1284     intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
1285     uint8_t dp_train_pat)
1286     {
1287     - if (!intel_dp->train_set_valid)
1288     - memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
1289     + memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
1290     intel_dp_set_signal_levels(intel_dp, DP);
1291     return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
1292     }
1293     @@ -3746,22 +3745,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
1294     break;
1295     }
1296    
1297     - /*
1298     - * if we used previously trained voltage and pre-emphasis values
1299     - * and we don't get clock recovery, reset link training values
1300     - */
1301     - if (intel_dp->train_set_valid) {
1302     - DRM_DEBUG_KMS("clock recovery not ok, reset");
1303     - /* clear the flag as we are not reusing train set */
1304     - intel_dp->train_set_valid = false;
1305     - if (!intel_dp_reset_link_train(intel_dp, &DP,
1306     - DP_TRAINING_PATTERN_1 |
1307     - DP_LINK_SCRAMBLING_DISABLE)) {
1308     - DRM_ERROR("failed to enable link training\n");
1309     - return;
1310     - }
1311     - continue;
1312     - }
1313    
1314     /* Check to see if we've tried the max voltage */
1315     for (i = 0; i < intel_dp->lane_count; i++)
1316     @@ -3854,7 +3837,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
1317     /* Make sure clock is still ok */
1318     if (!drm_dp_clock_recovery_ok(link_status,
1319     intel_dp->lane_count)) {
1320     - intel_dp->train_set_valid = false;
1321     intel_dp_link_training_clock_recovery(intel_dp);
1322     intel_dp_set_link_train(intel_dp, &DP,
1323     training_pattern |
1324     @@ -3871,7 +3853,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
1325    
1326     /* Try 5 times, then try clock recovery if that fails */
1327     if (tries > 5) {
1328     - intel_dp->train_set_valid = false;
1329     intel_dp_link_training_clock_recovery(intel_dp);
1330     intel_dp_set_link_train(intel_dp, &DP,
1331     training_pattern |
1332     @@ -3893,10 +3874,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
1333    
1334     intel_dp->DP = DP;
1335    
1336     - if (channel_eq) {
1337     - intel_dp->train_set_valid = true;
1338     + if (channel_eq)
1339     DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
1340     - }
1341     }
1342    
1343     void intel_dp_stop_link_train(struct intel_dp *intel_dp)
1344     @@ -5079,13 +5058,15 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
1345    
1346     void intel_dp_encoder_reset(struct drm_encoder *encoder)
1347     {
1348     - struct intel_dp *intel_dp;
1349     + struct drm_i915_private *dev_priv = to_i915(encoder->dev);
1350     + struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1351     +
1352     + if (!HAS_DDI(dev_priv))
1353     + intel_dp->DP = I915_READ(intel_dp->output_reg);
1354    
1355     if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
1356     return;
1357    
1358     - intel_dp = enc_to_intel_dp(encoder);
1359     -
1360     pps_lock(intel_dp);
1361    
1362     /*
1363     @@ -5157,9 +5138,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
1364     intel_display_power_get(dev_priv, power_domain);
1365    
1366     if (long_hpd) {
1367     - /* indicate that we need to restart link training */
1368     - intel_dp->train_set_valid = false;
1369     -
1370     if (!intel_digital_port_connected(dev_priv, intel_dig_port))
1371     goto mst_fail;
1372    
1373     diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
1374     index f34a219ec5c4..c5f11e0c5d5b 100644
1375     --- a/drivers/gpu/drm/i915/intel_drv.h
1376     +++ b/drivers/gpu/drm/i915/intel_drv.h
1377     @@ -783,7 +783,6 @@ struct intel_dp {
1378     bool has_aux_irq,
1379     int send_bytes,
1380     uint32_t aux_clock_divider);
1381     - bool train_set_valid;
1382    
1383     /* Displayport compliance testing */
1384     unsigned long compliance_test_type;
1385     diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
1386     index c99d3fe12881..e5bb40e58020 100644
1387     --- a/drivers/gpu/drm/mgag200/mgag200_mode.c
1388     +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
1389     @@ -194,7 +194,7 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
1390     }
1391     }
1392    
1393     - fvv = pllreffreq * testn / testm;
1394     + fvv = pllreffreq * (n + 1) / (m + 1);
1395     fvv = (fvv - 800000) / 50000;
1396    
1397     if (fvv > 15)
1398     @@ -214,6 +214,14 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
1399     WREG_DAC(MGA1064_PIX_PLLC_M, m);
1400     WREG_DAC(MGA1064_PIX_PLLC_N, n);
1401     WREG_DAC(MGA1064_PIX_PLLC_P, p);
1402     +
1403     + if (mdev->unique_rev_id >= 0x04) {
1404     + WREG_DAC(0x1a, 0x09);
1405     + msleep(20);
1406     + WREG_DAC(0x1a, 0x01);
1407     +
1408     + }
1409     +
1410     return 0;
1411     }
1412    
1413     diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
1414     index 59f27e774acb..e40a1b07a014 100644
1415     --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
1416     +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
1417     @@ -557,6 +557,8 @@ nouveau_fbcon_init(struct drm_device *dev)
1418     if (ret)
1419     goto fini;
1420    
1421     + if (fbcon->helper.fbdev)
1422     + fbcon->helper.fbdev->pixmap.buf_align = 4;
1423     return 0;
1424    
1425     fini:
1426     diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
1427     index 789dc2993b0d..8f715feadf56 100644
1428     --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
1429     +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
1430     @@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1431     uint32_t fg;
1432     uint32_t bg;
1433     uint32_t dsize;
1434     - uint32_t width;
1435     uint32_t *data = (uint32_t *)image->data;
1436     int ret;
1437    
1438     @@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1439     if (ret)
1440     return ret;
1441    
1442     - width = ALIGN(image->width, 8);
1443     - dsize = ALIGN(width * image->height, 32) >> 5;
1444     -
1445     if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
1446     info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
1447     fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
1448     @@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1449     ((image->dx + image->width) & 0xffff));
1450     OUT_RING(chan, bg);
1451     OUT_RING(chan, fg);
1452     - OUT_RING(chan, (image->height << 16) | width);
1453     + OUT_RING(chan, (image->height << 16) | image->width);
1454     OUT_RING(chan, (image->height << 16) | image->width);
1455     OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
1456    
1457     + dsize = ALIGN(image->width * image->height, 32) >> 5;
1458     while (dsize) {
1459     int iter_len = dsize > 128 ? 128 : dsize;
1460    
1461     diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
1462     index e05499d6ed83..a4e259a00430 100644
1463     --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
1464     +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
1465     @@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1466     struct nouveau_fbdev *nfbdev = info->par;
1467     struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
1468     struct nouveau_channel *chan = drm->channel;
1469     - uint32_t width, dwords, *data = (uint32_t *)image->data;
1470     + uint32_t dwords, *data = (uint32_t *)image->data;
1471     uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
1472     uint32_t *palette = info->pseudo_palette;
1473     int ret;
1474     @@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1475     if (ret)
1476     return ret;
1477    
1478     - width = ALIGN(image->width, 32);
1479     - dwords = (width * image->height) >> 5;
1480     -
1481     BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
1482     if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
1483     info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
1484     @@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1485     OUT_RING(chan, 0);
1486     OUT_RING(chan, image->dy);
1487    
1488     + dwords = ALIGN(image->width * image->height, 32) >> 5;
1489     while (dwords) {
1490     int push = dwords > 2047 ? 2047 : dwords;
1491    
1492     diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
1493     index c97395b4a312..f28315e865a5 100644
1494     --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
1495     +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
1496     @@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1497     struct nouveau_fbdev *nfbdev = info->par;
1498     struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
1499     struct nouveau_channel *chan = drm->channel;
1500     - uint32_t width, dwords, *data = (uint32_t *)image->data;
1501     + uint32_t dwords, *data = (uint32_t *)image->data;
1502     uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
1503     uint32_t *palette = info->pseudo_palette;
1504     int ret;
1505     @@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1506     if (ret)
1507     return ret;
1508    
1509     - width = ALIGN(image->width, 32);
1510     - dwords = (width * image->height) >> 5;
1511     -
1512     BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
1513     if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
1514     info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
1515     @@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1516     OUT_RING (chan, 0);
1517     OUT_RING (chan, image->dy);
1518    
1519     + dwords = ALIGN(image->width * image->height, 32) >> 5;
1520     while (dwords) {
1521     int push = dwords > 2047 ? 2047 : dwords;
1522    
1523     diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
1524     index b4b41b135643..2aaf0dd19a55 100644
1525     --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
1526     +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
1527     @@ -40,8 +40,8 @@ static int
1528     gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
1529     {
1530     struct nvkm_device *device = outp->base.disp->engine.subdev.device;
1531     - const u32 loff = gf119_sor_loff(outp);
1532     - nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
1533     + const u32 soff = gf119_sor_soff(outp);
1534     + nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, 0x01010101 * pattern);
1535     return 0;
1536     }
1537    
1538     diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
1539     index 36655a74c538..eeeea1c2ca23 100644
1540     --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
1541     +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
1542     @@ -874,22 +874,41 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc)
1543     }
1544    
1545     static const struct nvkm_enum gf100_mp_warp_error[] = {
1546     - { 0x00, "NO_ERROR" },
1547     - { 0x01, "STACK_MISMATCH" },
1548     + { 0x01, "STACK_ERROR" },
1549     + { 0x02, "API_STACK_ERROR" },
1550     + { 0x03, "RET_EMPTY_STACK_ERROR" },
1551     + { 0x04, "PC_WRAP" },
1552     { 0x05, "MISALIGNED_PC" },
1553     - { 0x08, "MISALIGNED_GPR" },
1554     - { 0x09, "INVALID_OPCODE" },
1555     - { 0x0d, "GPR_OUT_OF_BOUNDS" },
1556     - { 0x0e, "MEM_OUT_OF_BOUNDS" },
1557     - { 0x0f, "UNALIGNED_MEM_ACCESS" },
1558     + { 0x06, "PC_OVERFLOW" },
1559     + { 0x07, "MISALIGNED_IMMC_ADDR" },
1560     + { 0x08, "MISALIGNED_REG" },
1561     + { 0x09, "ILLEGAL_INSTR_ENCODING" },
1562     + { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" },
1563     + { 0x0b, "ILLEGAL_INSTR_PARAM" },
1564     + { 0x0c, "INVALID_CONST_ADDR" },
1565     + { 0x0d, "OOR_REG" },
1566     + { 0x0e, "OOR_ADDR" },
1567     + { 0x0f, "MISALIGNED_ADDR" },
1568     { 0x10, "INVALID_ADDR_SPACE" },
1569     - { 0x11, "INVALID_PARAM" },
1570     + { 0x11, "ILLEGAL_INSTR_PARAM2" },
1571     + { 0x12, "INVALID_CONST_ADDR_LDC" },
1572     + { 0x13, "GEOMETRY_SM_ERROR" },
1573     + { 0x14, "DIVERGENT" },
1574     + { 0x15, "WARP_EXIT" },
1575     {}
1576     };
1577    
1578     static const struct nvkm_bitfield gf100_mp_global_error[] = {
1579     + { 0x00000001, "SM_TO_SM_FAULT" },
1580     + { 0x00000002, "L1_ERROR" },
1581     { 0x00000004, "MULTIPLE_WARP_ERRORS" },
1582     - { 0x00000008, "OUT_OF_STACK_SPACE" },
1583     + { 0x00000008, "PHYSICAL_STACK_OVERFLOW" },
1584     + { 0x00000010, "BPT_INT" },
1585     + { 0x00000020, "BPT_PAUSE" },
1586     + { 0x00000040, "SINGLE_STEP_COMPLETE" },
1587     + { 0x20000000, "ECC_SEC_ERROR" },
1588     + { 0x40000000, "ECC_DED_ERROR" },
1589     + { 0x80000000, "TIMEOUT" },
1590     {}
1591     };
1592    
1593     diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
1594     index c566993a2ec3..e2dd5d19c32c 100644
1595     --- a/drivers/gpu/drm/radeon/radeon_device.c
1596     +++ b/drivers/gpu/drm/radeon/radeon_device.c
1597     @@ -630,6 +630,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
1598     /*
1599     * GPU helpers function.
1600     */
1601     +
1602     +/**
1603     + * radeon_device_is_virtual - check if we are running is a virtual environment
1604     + *
1605     + * Check if the asic has been passed through to a VM (all asics).
1606     + * Used at driver startup.
1607     + * Returns true if virtual or false if not.
1608     + */
1609     +static bool radeon_device_is_virtual(void)
1610     +{
1611     +#ifdef CONFIG_X86
1612     + return boot_cpu_has(X86_FEATURE_HYPERVISOR);
1613     +#else
1614     + return false;
1615     +#endif
1616     +}
1617     +
1618     /**
1619     * radeon_card_posted - check if the hw has already been initialized
1620     *
1621     @@ -643,6 +660,10 @@ bool radeon_card_posted(struct radeon_device *rdev)
1622     {
1623     uint32_t reg;
1624    
1625     + /* for pass through, always force asic_init */
1626     + if (radeon_device_is_virtual())
1627     + return false;
1628     +
1629     /* required for EFI mode on macbook2,1 which uses an r5xx asic */
1630     if (efi_enabled(EFI_BOOT) &&
1631     (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
1632     diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
1633     index 745e996d2dbc..4ae8b56b1847 100644
1634     --- a/drivers/gpu/drm/ttm/ttm_bo.c
1635     +++ b/drivers/gpu/drm/ttm/ttm_bo.c
1636     @@ -1004,9 +1004,9 @@ out_unlock:
1637     return ret;
1638     }
1639    
1640     -static bool ttm_bo_mem_compat(struct ttm_placement *placement,
1641     - struct ttm_mem_reg *mem,
1642     - uint32_t *new_flags)
1643     +bool ttm_bo_mem_compat(struct ttm_placement *placement,
1644     + struct ttm_mem_reg *mem,
1645     + uint32_t *new_flags)
1646     {
1647     int i;
1648    
1649     @@ -1038,6 +1038,7 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
1650    
1651     return false;
1652     }
1653     +EXPORT_SYMBOL(ttm_bo_mem_compat);
1654    
1655     int ttm_bo_validate(struct ttm_buffer_object *bo,
1656     struct ttm_placement *placement,
1657     diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
1658     index 299925a1f6c6..eadc981ee79a 100644
1659     --- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
1660     +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
1661     @@ -49,6 +49,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
1662     {
1663     struct ttm_buffer_object *bo = &buf->base;
1664     int ret;
1665     + uint32_t new_flags;
1666    
1667     ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
1668     if (unlikely(ret != 0))
1669     @@ -60,7 +61,12 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
1670     if (unlikely(ret != 0))
1671     goto err;
1672    
1673     - ret = ttm_bo_validate(bo, placement, interruptible, false);
1674     + if (buf->pin_count > 0)
1675     + ret = ttm_bo_mem_compat(placement, &bo->mem,
1676     + &new_flags) == true ? 0 : -EINVAL;
1677     + else
1678     + ret = ttm_bo_validate(bo, placement, interruptible, false);
1679     +
1680     if (!ret)
1681     vmw_bo_pin_reserved(buf, true);
1682    
1683     @@ -91,6 +97,7 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
1684     {
1685     struct ttm_buffer_object *bo = &buf->base;
1686     int ret;
1687     + uint32_t new_flags;
1688    
1689     ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
1690     if (unlikely(ret != 0))
1691     @@ -102,6 +109,12 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
1692     if (unlikely(ret != 0))
1693     goto err;
1694    
1695     + if (buf->pin_count > 0) {
1696     + ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
1697     + &new_flags) == true ? 0 : -EINVAL;
1698     + goto out_unreserve;
1699     + }
1700     +
1701     ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
1702     false);
1703     if (likely(ret == 0) || ret == -ERESTARTSYS)
1704     @@ -161,6 +174,7 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
1705     struct ttm_placement placement;
1706     struct ttm_place place;
1707     int ret = 0;
1708     + uint32_t new_flags;
1709    
1710     place = vmw_vram_placement.placement[0];
1711     place.lpfn = bo->num_pages;
1712     @@ -185,10 +199,15 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
1713     */
1714     if (bo->mem.mem_type == TTM_PL_VRAM &&
1715     bo->mem.start < bo->num_pages &&
1716     - bo->mem.start > 0)
1717     + bo->mem.start > 0 &&
1718     + buf->pin_count == 0)
1719     (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
1720    
1721     - ret = ttm_bo_validate(bo, &placement, interruptible, false);
1722     + if (buf->pin_count > 0)
1723     + ret = ttm_bo_mem_compat(&placement, &bo->mem,
1724     + &new_flags) == true ? 0 : -EINVAL;
1725     + else
1726     + ret = ttm_bo_validate(bo, &placement, interruptible, false);
1727    
1728     /* For some reason we didn't end up at the start of vram */
1729     WARN_ON(ret == 0 && bo->offset != 0);
1730     diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
1731     index 24fb348a44e1..f3f31f995878 100644
1732     --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
1733     +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
1734     @@ -227,6 +227,7 @@ static int vmw_force_iommu;
1735     static int vmw_restrict_iommu;
1736     static int vmw_force_coherent;
1737     static int vmw_restrict_dma_mask;
1738     +static int vmw_assume_16bpp;
1739    
1740     static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
1741     static void vmw_master_init(struct vmw_master *);
1742     @@ -243,6 +244,8 @@ MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
1743     module_param_named(force_coherent, vmw_force_coherent, int, 0600);
1744     MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
1745     module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
1746     +MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
1747     +module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
1748    
1749    
1750     static void vmw_print_capabilities(uint32_t capabilities)
1751     @@ -652,6 +655,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
1752     dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
1753     dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
1754    
1755     + dev_priv->assume_16bpp = !!vmw_assume_16bpp;
1756     +
1757     dev_priv->enable_fb = enable_fbdev;
1758    
1759     vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
1760     @@ -698,6 +703,13 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
1761     vmw_read(dev_priv,
1762     SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
1763    
1764     + /*
1765     + * Workaround for low memory 2D VMs to compensate for the
1766     + * allocation taken by fbdev
1767     + */
1768     + if (!(dev_priv->capabilities & SVGA_CAP_3D))
1769     + mem_size *= 2;
1770     +
1771     dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
1772     dev_priv->prim_bb_mem =
1773     vmw_read(dev_priv,
1774     diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
1775     index 469cdd520615..2e94fe27b3f6 100644
1776     --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
1777     +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
1778     @@ -387,6 +387,7 @@ struct vmw_private {
1779     spinlock_t hw_lock;
1780     spinlock_t cap_lock;
1781     bool has_dx;
1782     + bool assume_16bpp;
1783    
1784     /*
1785     * VGA registers.
1786     diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
1787     index 679a4cb98ee3..d2d93959b119 100644
1788     --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
1789     +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
1790     @@ -517,28 +517,6 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info)
1791    
1792     par->set_fb = &vfb->base;
1793    
1794     - if (!par->bo_ptr) {
1795     - /*
1796     - * Pin before mapping. Since we don't know in what placement
1797     - * to pin, call into KMS to do it for us.
1798     - */
1799     - ret = vfb->pin(vfb);
1800     - if (ret) {
1801     - DRM_ERROR("Could not pin the fbdev framebuffer.\n");
1802     - return ret;
1803     - }
1804     -
1805     - ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
1806     - par->vmw_bo->base.num_pages, &par->map);
1807     - if (ret) {
1808     - vfb->unpin(vfb);
1809     - DRM_ERROR("Could not map the fbdev framebuffer.\n");
1810     - return ret;
1811     - }
1812     -
1813     - par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
1814     - }
1815     -
1816     return 0;
1817     }
1818    
1819     @@ -601,6 +579,31 @@ static int vmw_fb_set_par(struct fb_info *info)
1820     if (ret)
1821     goto out_unlock;
1822    
1823     + if (!par->bo_ptr) {
1824     + struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(set.fb);
1825     +
1826     + /*
1827     + * Pin before mapping. Since we don't know in what placement
1828     + * to pin, call into KMS to do it for us.
1829     + */
1830     + ret = vfb->pin(vfb);
1831     + if (ret) {
1832     + DRM_ERROR("Could not pin the fbdev framebuffer.\n");
1833     + goto out_unlock;
1834     + }
1835     +
1836     + ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
1837     + par->vmw_bo->base.num_pages, &par->map);
1838     + if (ret) {
1839     + vfb->unpin(vfb);
1840     + DRM_ERROR("Could not map the fbdev framebuffer.\n");
1841     + goto out_unlock;
1842     + }
1843     +
1844     + par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
1845     + }
1846     +
1847     +
1848     vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
1849     par->set_fb->width, par->set_fb->height);
1850    
1851     diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
1852     index 7c2e118a77b0..060e5c6f4446 100644
1853     --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
1854     +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
1855     @@ -1538,14 +1538,10 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
1856     DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
1857     };
1858     int i;
1859     - u32 assumed_bpp = 2;
1860     + u32 assumed_bpp = 4;
1861    
1862     - /*
1863     - * If using screen objects, then assume 32-bpp because that's what the
1864     - * SVGA device is assuming
1865     - */
1866     - if (dev_priv->active_display_unit == vmw_du_screen_object)
1867     - assumed_bpp = 4;
1868     + if (dev_priv->assume_16bpp)
1869     + assumed_bpp = 2;
1870    
1871     if (dev_priv->active_display_unit == vmw_du_screen_target) {
1872     max_width = min(max_width, dev_priv->stdu_max_width);
1873     diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
1874     index aad8c162a825..0cd4f7216239 100644
1875     --- a/drivers/hid/hid-elo.c
1876     +++ b/drivers/hid/hid-elo.c
1877     @@ -261,7 +261,7 @@ static void elo_remove(struct hid_device *hdev)
1878     struct elo_priv *priv = hid_get_drvdata(hdev);
1879    
1880     hid_hw_stop(hdev);
1881     - flush_workqueue(wq);
1882     + cancel_delayed_work_sync(&priv->work);
1883     kfree(priv);
1884     }
1885    
1886     diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
1887     index c5ec4f915594..f62a9d6601cc 100644
1888     --- a/drivers/hid/hid-multitouch.c
1889     +++ b/drivers/hid/hid-multitouch.c
1890     @@ -61,6 +61,7 @@ MODULE_LICENSE("GPL");
1891     #define MT_QUIRK_ALWAYS_VALID (1 << 4)
1892     #define MT_QUIRK_VALID_IS_INRANGE (1 << 5)
1893     #define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 6)
1894     +#define MT_QUIRK_CONFIDENCE (1 << 7)
1895     #define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 8)
1896     #define MT_QUIRK_NO_AREA (1 << 9)
1897     #define MT_QUIRK_IGNORE_DUPLICATES (1 << 10)
1898     @@ -78,6 +79,7 @@ struct mt_slot {
1899     __s32 contactid; /* the device ContactID assigned to this slot */
1900     bool touch_state; /* is the touch valid? */
1901     bool inrange_state; /* is the finger in proximity of the sensor? */
1902     + bool confidence_state; /* is the touch made by a finger? */
1903     };
1904    
1905     struct mt_class {
1906     @@ -502,6 +504,9 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
1907     mt_store_field(usage, td, hi);
1908     return 1;
1909     case HID_DG_CONFIDENCE:
1910     + if (cls->name == MT_CLS_WIN_8 &&
1911     + field->application == HID_DG_TOUCHPAD)
1912     + cls->quirks |= MT_QUIRK_CONFIDENCE;
1913     mt_store_field(usage, td, hi);
1914     return 1;
1915     case HID_DG_TIPSWITCH:
1916     @@ -614,6 +619,7 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
1917     return;
1918    
1919     if (td->curvalid || (td->mtclass.quirks & MT_QUIRK_ALWAYS_VALID)) {
1920     + int active;
1921     int slotnum = mt_compute_slot(td, input);
1922     struct mt_slot *s = &td->curdata;
1923     struct input_mt *mt = input->mt;
1924     @@ -628,10 +634,14 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
1925     return;
1926     }
1927    
1928     + if (!(td->mtclass.quirks & MT_QUIRK_CONFIDENCE))
1929     + s->confidence_state = 1;
1930     + active = (s->touch_state || s->inrange_state) &&
1931     + s->confidence_state;
1932     +
1933     input_mt_slot(input, slotnum);
1934     - input_mt_report_slot_state(input, MT_TOOL_FINGER,
1935     - s->touch_state || s->inrange_state);
1936     - if (s->touch_state || s->inrange_state) {
1937     + input_mt_report_slot_state(input, MT_TOOL_FINGER, active);
1938     + if (active) {
1939     /* this finger is in proximity of the sensor */
1940     int wide = (s->w > s->h);
1941     /* divided by two to match visual scale of touch */
1942     @@ -696,6 +706,8 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
1943     td->curdata.touch_state = value;
1944     break;
1945     case HID_DG_CONFIDENCE:
1946     + if (quirks & MT_QUIRK_CONFIDENCE)
1947     + td->curdata.confidence_state = value;
1948     if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE)
1949     td->curvalid = value;
1950     break;
1951     diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
1952     index 2f1ddca6f2e0..700145b15088 100644
1953     --- a/drivers/hid/usbhid/hiddev.c
1954     +++ b/drivers/hid/usbhid/hiddev.c
1955     @@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
1956     goto inval;
1957     } else if (uref->usage_index >= field->report_count)
1958     goto inval;
1959     -
1960     - else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
1961     - (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
1962     - uref->usage_index + uref_multi->num_values > field->report_count))
1963     - goto inval;
1964     }
1965    
1966     + if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
1967     + (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
1968     + uref->usage_index + uref_multi->num_values > field->report_count))
1969     + goto inval;
1970     +
1971     switch (cmd) {
1972     case HIDIOCGUSAGE:
1973     uref->value = field->value[uref->usage_index];
1974     diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
1975     index c43318d3416e..a9356a3dea92 100644
1976     --- a/drivers/hwmon/dell-smm-hwmon.c
1977     +++ b/drivers/hwmon/dell-smm-hwmon.c
1978     @@ -66,11 +66,13 @@
1979    
1980     static DEFINE_MUTEX(i8k_mutex);
1981     static char bios_version[4];
1982     +static char bios_machineid[16];
1983     static struct device *i8k_hwmon_dev;
1984     static u32 i8k_hwmon_flags;
1985     static uint i8k_fan_mult = I8K_FAN_MULT;
1986     static uint i8k_pwm_mult;
1987     static uint i8k_fan_max = I8K_FAN_HIGH;
1988     +static bool disallow_fan_type_call;
1989    
1990     #define I8K_HWMON_HAVE_TEMP1 (1 << 0)
1991     #define I8K_HWMON_HAVE_TEMP2 (1 << 1)
1992     @@ -94,13 +96,13 @@ module_param(ignore_dmi, bool, 0);
1993     MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match");
1994    
1995     #if IS_ENABLED(CONFIG_I8K)
1996     -static bool restricted;
1997     +static bool restricted = true;
1998     module_param(restricted, bool, 0);
1999     -MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set");
2000     +MODULE_PARM_DESC(restricted, "Restrict fan control and serial number to CAP_SYS_ADMIN (default: 1)");
2001    
2002     static bool power_status;
2003     module_param(power_status, bool, 0600);
2004     -MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k");
2005     +MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k (default: 0)");
2006     #endif
2007    
2008     static uint fan_mult;
2009     @@ -235,14 +237,28 @@ static int i8k_get_fan_speed(int fan)
2010     /*
2011     * Read the fan type.
2012     */
2013     -static int i8k_get_fan_type(int fan)
2014     +static int _i8k_get_fan_type(int fan)
2015     {
2016     struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, };
2017    
2018     + if (disallow_fan_type_call)
2019     + return -EINVAL;
2020     +
2021     regs.ebx = fan & 0xff;
2022     return i8k_smm(&regs) ? : regs.eax & 0xff;
2023     }
2024    
2025     +static int i8k_get_fan_type(int fan)
2026     +{
2027     + /* I8K_SMM_GET_FAN_TYPE SMM call is expensive, so cache values */
2028     + static int types[2] = { INT_MIN, INT_MIN };
2029     +
2030     + if (types[fan] == INT_MIN)
2031     + types[fan] = _i8k_get_fan_type(fan);
2032     +
2033     + return types[fan];
2034     +}
2035     +
2036     /*
2037     * Read the fan nominal rpm for specific fan speed.
2038     */
2039     @@ -392,9 +408,11 @@ i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg)
2040     break;
2041    
2042     case I8K_MACHINE_ID:
2043     - memset(buff, 0, 16);
2044     - strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
2045     - sizeof(buff));
2046     + if (restricted && !capable(CAP_SYS_ADMIN))
2047     + return -EPERM;
2048     +
2049     + memset(buff, 0, sizeof(buff));
2050     + strlcpy(buff, bios_machineid, sizeof(buff));
2051     break;
2052    
2053     case I8K_FN_STATUS:
2054     @@ -511,7 +529,7 @@ static int i8k_proc_show(struct seq_file *seq, void *offset)
2055     seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n",
2056     I8K_PROC_FMT,
2057     bios_version,
2058     - i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
2059     + (restricted && !capable(CAP_SYS_ADMIN)) ? "-1" : bios_machineid,
2060     cpu_temp,
2061     left_fan, right_fan, left_speed, right_speed,
2062     ac_power, fn_key);
2063     @@ -718,6 +736,9 @@ static struct attribute *i8k_attrs[] = {
2064     static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
2065     int index)
2066     {
2067     + if (disallow_fan_type_call &&
2068     + (index == 9 || index == 12))
2069     + return 0;
2070     if (index >= 0 && index <= 1 &&
2071     !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
2072     return 0;
2073     @@ -767,13 +788,17 @@ static int __init i8k_init_hwmon(void)
2074     if (err >= 0)
2075     i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4;
2076    
2077     - /* First fan attributes, if fan type is OK */
2078     - err = i8k_get_fan_type(0);
2079     + /* First fan attributes, if fan status or type is OK */
2080     + err = i8k_get_fan_status(0);
2081     + if (err < 0)
2082     + err = i8k_get_fan_type(0);
2083     if (err >= 0)
2084     i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1;
2085    
2086     - /* Second fan attributes, if fan type is OK */
2087     - err = i8k_get_fan_type(1);
2088     + /* Second fan attributes, if fan status or type is OK */
2089     + err = i8k_get_fan_status(1);
2090     + if (err < 0)
2091     + err = i8k_get_fan_type(1);
2092     if (err >= 0)
2093     i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
2094    
2095     @@ -929,12 +954,14 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
2096    
2097     MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
2098    
2099     -static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
2100     +/*
2101     + * On some machines once I8K_SMM_GET_FAN_TYPE is issued then CPU fan speed
2102     + * randomly going up and down due to bug in Dell SMM or BIOS. Here is blacklist
2103     + * of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call.
2104     + * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=100121
2105     + */
2106     +static struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initdata = {
2107     {
2108     - /*
2109     - * CPU fan speed going up and down on Dell Studio XPS 8000
2110     - * for unknown reasons.
2111     - */
2112     .ident = "Dell Studio XPS 8000",
2113     .matches = {
2114     DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
2115     @@ -942,16 +969,19 @@ static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
2116     },
2117     },
2118     {
2119     - /*
2120     - * CPU fan speed going up and down on Dell Studio XPS 8100
2121     - * for unknown reasons.
2122     - */
2123     .ident = "Dell Studio XPS 8100",
2124     .matches = {
2125     DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
2126     DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
2127     },
2128     },
2129     + {
2130     + .ident = "Dell Inspiron 580",
2131     + .matches = {
2132     + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
2133     + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 580 "),
2134     + },
2135     + },
2136     { }
2137     };
2138    
2139     @@ -966,8 +996,7 @@ static int __init i8k_probe(void)
2140     /*
2141     * Get DMI information
2142     */
2143     - if (!dmi_check_system(i8k_dmi_table) ||
2144     - dmi_check_system(i8k_blacklist_dmi_table)) {
2145     + if (!dmi_check_system(i8k_dmi_table)) {
2146     if (!ignore_dmi && !force)
2147     return -ENODEV;
2148    
2149     @@ -978,8 +1007,13 @@ static int __init i8k_probe(void)
2150     i8k_get_dmi_data(DMI_BIOS_VERSION));
2151     }
2152    
2153     + if (dmi_check_system(i8k_blacklist_fan_type_dmi_table))
2154     + disallow_fan_type_call = true;
2155     +
2156     strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
2157     sizeof(bios_version));
2158     + strlcpy(bios_machineid, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
2159     + sizeof(bios_machineid));
2160    
2161     /*
2162     * Get SMM Dell signature
2163     diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
2164     index 923f56598d4b..3a9f106787d2 100644
2165     --- a/drivers/iio/accel/kxsd9.c
2166     +++ b/drivers/iio/accel/kxsd9.c
2167     @@ -81,7 +81,7 @@ static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro)
2168    
2169     mutex_lock(&st->buf_lock);
2170     ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
2171     - if (ret)
2172     + if (ret < 0)
2173     goto error_ret;
2174     st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
2175     st->tx[1] = (ret & ~KXSD9_FS_MASK) | i;
2176     @@ -163,7 +163,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
2177     break;
2178     case IIO_CHAN_INFO_SCALE:
2179     ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
2180     - if (ret)
2181     + if (ret < 0)
2182     goto error_ret;
2183     *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
2184     ret = IIO_VAL_INT_PLUS_MICRO;
2185     diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
2186     index 21e19b60e2b9..2123f0ac2e2a 100644
2187     --- a/drivers/iio/adc/ad7266.c
2188     +++ b/drivers/iio/adc/ad7266.c
2189     @@ -396,8 +396,8 @@ static int ad7266_probe(struct spi_device *spi)
2190    
2191     st = iio_priv(indio_dev);
2192    
2193     - st->reg = devm_regulator_get(&spi->dev, "vref");
2194     - if (!IS_ERR_OR_NULL(st->reg)) {
2195     + st->reg = devm_regulator_get_optional(&spi->dev, "vref");
2196     + if (!IS_ERR(st->reg)) {
2197     ret = regulator_enable(st->reg);
2198     if (ret)
2199     return ret;
2200     @@ -408,6 +408,9 @@ static int ad7266_probe(struct spi_device *spi)
2201    
2202     st->vref_mv = ret / 1000;
2203     } else {
2204     + /* Any other error indicates that the regulator does exist */
2205     + if (PTR_ERR(st->reg) != -ENODEV)
2206     + return PTR_ERR(st->reg);
2207     /* Use internal reference */
2208     st->vref_mv = 2500;
2209     }
2210     diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
2211     index a7f61e881a49..dc5e7e70f951 100644
2212     --- a/drivers/iio/humidity/hdc100x.c
2213     +++ b/drivers/iio/humidity/hdc100x.c
2214     @@ -55,7 +55,7 @@ static const struct {
2215     },
2216     { /* IIO_HUMIDITYRELATIVE channel */
2217     .shift = 8,
2218     - .mask = 2,
2219     + .mask = 3,
2220     },
2221     };
2222    
2223     @@ -164,14 +164,14 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
2224     dev_err(&client->dev, "cannot read high byte measurement");
2225     return ret;
2226     }
2227     - val = ret << 6;
2228     + val = ret << 8;
2229    
2230     ret = i2c_smbus_read_byte(client);
2231     if (ret < 0) {
2232     dev_err(&client->dev, "cannot read low byte measurement");
2233     return ret;
2234     }
2235     - val |= ret >> 2;
2236     + val |= ret;
2237    
2238     return val;
2239     }
2240     @@ -211,18 +211,18 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev,
2241     return IIO_VAL_INT_PLUS_MICRO;
2242     case IIO_CHAN_INFO_SCALE:
2243     if (chan->type == IIO_TEMP) {
2244     - *val = 165;
2245     - *val2 = 65536 >> 2;
2246     + *val = 165000;
2247     + *val2 = 65536;
2248     return IIO_VAL_FRACTIONAL;
2249     } else {
2250     - *val = 0;
2251     - *val2 = 10000;
2252     - return IIO_VAL_INT_PLUS_MICRO;
2253     + *val = 100;
2254     + *val2 = 65536;
2255     + return IIO_VAL_FRACTIONAL;
2256     }
2257     break;
2258     case IIO_CHAN_INFO_OFFSET:
2259     - *val = -3971;
2260     - *val2 = 879096;
2261     + *val = -15887;
2262     + *val2 = 515151;
2263     return IIO_VAL_INT_PLUS_MICRO;
2264     default:
2265     return -EINVAL;
2266     diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
2267     index ae2806aafb72..0c52dfe64977 100644
2268     --- a/drivers/iio/industrialio-trigger.c
2269     +++ b/drivers/iio/industrialio-trigger.c
2270     @@ -210,22 +210,35 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
2271    
2272     /* Prevent the module from being removed whilst attached to a trigger */
2273     __module_get(pf->indio_dev->info->driver_module);
2274     +
2275     + /* Get irq number */
2276     pf->irq = iio_trigger_get_irq(trig);
2277     + if (pf->irq < 0)
2278     + goto out_put_module;
2279     +
2280     + /* Request irq */
2281     ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
2282     pf->type, pf->name,
2283     pf);
2284     - if (ret < 0) {
2285     - module_put(pf->indio_dev->info->driver_module);
2286     - return ret;
2287     - }
2288     + if (ret < 0)
2289     + goto out_put_irq;
2290    
2291     + /* Enable trigger in driver */
2292     if (trig->ops && trig->ops->set_trigger_state && notinuse) {
2293     ret = trig->ops->set_trigger_state(trig, true);
2294     if (ret < 0)
2295     - module_put(pf->indio_dev->info->driver_module);
2296     + goto out_free_irq;
2297     }
2298    
2299     return ret;
2300     +
2301     +out_free_irq:
2302     + free_irq(pf->irq, pf);
2303     +out_put_irq:
2304     + iio_trigger_put_irq(trig, pf->irq);
2305     +out_put_module:
2306     + module_put(pf->indio_dev->info->driver_module);
2307     + return ret;
2308     }
2309    
2310     static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
2311     diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
2312     index f6a07dc32ae4..4a6d9670e4cd 100644
2313     --- a/drivers/iio/light/apds9960.c
2314     +++ b/drivers/iio/light/apds9960.c
2315     @@ -1005,6 +1005,7 @@ static int apds9960_probe(struct i2c_client *client,
2316    
2317     iio_device_attach_buffer(indio_dev, buffer);
2318    
2319     + indio_dev->dev.parent = &client->dev;
2320     indio_dev->info = &apds9960_info;
2321     indio_dev->name = APDS9960_DRV_NAME;
2322     indio_dev->channels = apds9960_channels;
2323     diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
2324     index b39a2fb0671c..5056bd68573f 100644
2325     --- a/drivers/iio/pressure/st_pressure_core.c
2326     +++ b/drivers/iio/pressure/st_pressure_core.c
2327     @@ -28,15 +28,21 @@
2328     #include <linux/iio/common/st_sensors.h>
2329     #include "st_pressure.h"
2330    
2331     +#define MCELSIUS_PER_CELSIUS 1000
2332     +
2333     +/* Default pressure sensitivity */
2334     #define ST_PRESS_LSB_PER_MBAR 4096UL
2335     #define ST_PRESS_KPASCAL_NANO_SCALE (100000000UL / \
2336     ST_PRESS_LSB_PER_MBAR)
2337     +
2338     +/* Default temperature sensitivity */
2339     #define ST_PRESS_LSB_PER_CELSIUS 480UL
2340     -#define ST_PRESS_CELSIUS_NANO_SCALE (1000000000UL / \
2341     - ST_PRESS_LSB_PER_CELSIUS)
2342     +#define ST_PRESS_MILLI_CELSIUS_OFFSET 42500UL
2343     +
2344     #define ST_PRESS_NUMBER_DATA_CHANNELS 1
2345    
2346     /* FULLSCALE */
2347     +#define ST_PRESS_FS_AVL_1100MB 1100
2348     #define ST_PRESS_FS_AVL_1260MB 1260
2349    
2350     #define ST_PRESS_1_OUT_XL_ADDR 0x28
2351     @@ -54,18 +60,20 @@
2352     #define ST_PRESS_LPS331AP_PW_MASK 0x80
2353     #define ST_PRESS_LPS331AP_FS_ADDR 0x23
2354     #define ST_PRESS_LPS331AP_FS_MASK 0x30
2355     -#define ST_PRESS_LPS331AP_FS_AVL_1260_VAL 0x00
2356     -#define ST_PRESS_LPS331AP_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
2357     -#define ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
2358     #define ST_PRESS_LPS331AP_BDU_ADDR 0x20
2359     #define ST_PRESS_LPS331AP_BDU_MASK 0x04
2360     #define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22
2361     #define ST_PRESS_LPS331AP_DRDY_IRQ_INT1_MASK 0x04
2362     #define ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK 0x20
2363     #define ST_PRESS_LPS331AP_MULTIREAD_BIT true
2364     -#define ST_PRESS_LPS331AP_TEMP_OFFSET 42500
2365    
2366     /* CUSTOM VALUES FOR LPS001WP SENSOR */
2367     +
2368     +/* LPS001WP pressure resolution */
2369     +#define ST_PRESS_LPS001WP_LSB_PER_MBAR 16UL
2370     +/* LPS001WP temperature resolution */
2371     +#define ST_PRESS_LPS001WP_LSB_PER_CELSIUS 64UL
2372     +
2373     #define ST_PRESS_LPS001WP_WAI_EXP 0xba
2374     #define ST_PRESS_LPS001WP_ODR_ADDR 0x20
2375     #define ST_PRESS_LPS001WP_ODR_MASK 0x30
2376     @@ -74,6 +82,8 @@
2377     #define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL 0x03
2378     #define ST_PRESS_LPS001WP_PW_ADDR 0x20
2379     #define ST_PRESS_LPS001WP_PW_MASK 0x40
2380     +#define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \
2381     + (100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR)
2382     #define ST_PRESS_LPS001WP_BDU_ADDR 0x20
2383     #define ST_PRESS_LPS001WP_BDU_MASK 0x04
2384     #define ST_PRESS_LPS001WP_MULTIREAD_BIT true
2385     @@ -90,18 +100,12 @@
2386     #define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04
2387     #define ST_PRESS_LPS25H_PW_ADDR 0x20
2388     #define ST_PRESS_LPS25H_PW_MASK 0x80
2389     -#define ST_PRESS_LPS25H_FS_ADDR 0x00
2390     -#define ST_PRESS_LPS25H_FS_MASK 0x00
2391     -#define ST_PRESS_LPS25H_FS_AVL_1260_VAL 0x00
2392     -#define ST_PRESS_LPS25H_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
2393     -#define ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
2394     #define ST_PRESS_LPS25H_BDU_ADDR 0x20
2395     #define ST_PRESS_LPS25H_BDU_MASK 0x04
2396     #define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23
2397     #define ST_PRESS_LPS25H_DRDY_IRQ_INT1_MASK 0x01
2398     #define ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK 0x10
2399     #define ST_PRESS_LPS25H_MULTIREAD_BIT true
2400     -#define ST_PRESS_LPS25H_TEMP_OFFSET 42500
2401     #define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28
2402     #define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b
2403    
2404     @@ -153,7 +157,9 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
2405     .storagebits = 16,
2406     .endianness = IIO_LE,
2407     },
2408     - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
2409     + .info_mask_separate =
2410     + BIT(IIO_CHAN_INFO_RAW) |
2411     + BIT(IIO_CHAN_INFO_SCALE),
2412     .modified = 0,
2413     },
2414     {
2415     @@ -169,7 +175,7 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
2416     },
2417     .info_mask_separate =
2418     BIT(IIO_CHAN_INFO_RAW) |
2419     - BIT(IIO_CHAN_INFO_OFFSET),
2420     + BIT(IIO_CHAN_INFO_SCALE),
2421     .modified = 0,
2422     },
2423     IIO_CHAN_SOFT_TIMESTAMP(1)
2424     @@ -204,11 +210,14 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
2425     .addr = ST_PRESS_LPS331AP_FS_ADDR,
2426     .mask = ST_PRESS_LPS331AP_FS_MASK,
2427     .fs_avl = {
2428     + /*
2429     + * Pressure and temperature sensitivity values
2430     + * as defined in table 3 of LPS331AP datasheet.
2431     + */
2432     [0] = {
2433     .num = ST_PRESS_FS_AVL_1260MB,
2434     - .value = ST_PRESS_LPS331AP_FS_AVL_1260_VAL,
2435     - .gain = ST_PRESS_LPS331AP_FS_AVL_1260_GAIN,
2436     - .gain2 = ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN,
2437     + .gain = ST_PRESS_KPASCAL_NANO_SCALE,
2438     + .gain2 = ST_PRESS_LSB_PER_CELSIUS,
2439     },
2440     },
2441     },
2442     @@ -248,7 +257,17 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
2443     .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
2444     },
2445     .fs = {
2446     - .addr = 0,
2447     + .fs_avl = {
2448     + /*
2449     + * Pressure and temperature resolution values
2450     + * as defined in table 3 of LPS001WP datasheet.
2451     + */
2452     + [0] = {
2453     + .num = ST_PRESS_FS_AVL_1100MB,
2454     + .gain = ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN,
2455     + .gain2 = ST_PRESS_LPS001WP_LSB_PER_CELSIUS,
2456     + },
2457     + },
2458     },
2459     .bdu = {
2460     .addr = ST_PRESS_LPS001WP_BDU_ADDR,
2461     @@ -285,14 +304,15 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
2462     .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
2463     },
2464     .fs = {
2465     - .addr = ST_PRESS_LPS25H_FS_ADDR,
2466     - .mask = ST_PRESS_LPS25H_FS_MASK,
2467     .fs_avl = {
2468     + /*
2469     + * Pressure and temperature sensitivity values
2470     + * as defined in table 3 of LPS25H datasheet.
2471     + */
2472     [0] = {
2473     .num = ST_PRESS_FS_AVL_1260MB,
2474     - .value = ST_PRESS_LPS25H_FS_AVL_1260_VAL,
2475     - .gain = ST_PRESS_LPS25H_FS_AVL_1260_GAIN,
2476     - .gain2 = ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN,
2477     + .gain = ST_PRESS_KPASCAL_NANO_SCALE,
2478     + .gain2 = ST_PRESS_LSB_PER_CELSIUS,
2479     },
2480     },
2481     },
2482     @@ -346,26 +366,26 @@ static int st_press_read_raw(struct iio_dev *indio_dev,
2483    
2484     return IIO_VAL_INT;
2485     case IIO_CHAN_INFO_SCALE:
2486     - *val = 0;
2487     -
2488     switch (ch->type) {
2489     case IIO_PRESSURE:
2490     + *val = 0;
2491     *val2 = press_data->current_fullscale->gain;
2492     - break;
2493     + return IIO_VAL_INT_PLUS_NANO;
2494     case IIO_TEMP:
2495     + *val = MCELSIUS_PER_CELSIUS;
2496     *val2 = press_data->current_fullscale->gain2;
2497     - break;
2498     + return IIO_VAL_FRACTIONAL;
2499     default:
2500     err = -EINVAL;
2501     goto read_error;
2502     }
2503    
2504     - return IIO_VAL_INT_PLUS_NANO;
2505     case IIO_CHAN_INFO_OFFSET:
2506     switch (ch->type) {
2507     case IIO_TEMP:
2508     - *val = 425;
2509     - *val2 = 10;
2510     + *val = ST_PRESS_MILLI_CELSIUS_OFFSET *
2511     + press_data->current_fullscale->gain2;
2512     + *val2 = MCELSIUS_PER_CELSIUS;
2513     break;
2514     default:
2515     err = -EINVAL;
2516     diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
2517     index f4d29d5dbd5f..e2f926cdcad2 100644
2518     --- a/drivers/iio/proximity/as3935.c
2519     +++ b/drivers/iio/proximity/as3935.c
2520     @@ -64,6 +64,7 @@ struct as3935_state {
2521     struct delayed_work work;
2522    
2523     u32 tune_cap;
2524     + u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */
2525     u8 buf[2] ____cacheline_aligned;
2526     };
2527    
2528     @@ -72,7 +73,8 @@ static const struct iio_chan_spec as3935_channels[] = {
2529     .type = IIO_PROXIMITY,
2530     .info_mask_separate =
2531     BIT(IIO_CHAN_INFO_RAW) |
2532     - BIT(IIO_CHAN_INFO_PROCESSED),
2533     + BIT(IIO_CHAN_INFO_PROCESSED) |
2534     + BIT(IIO_CHAN_INFO_SCALE),
2535     .scan_index = 0,
2536     .scan_type = {
2537     .sign = 'u',
2538     @@ -181,7 +183,12 @@ static int as3935_read_raw(struct iio_dev *indio_dev,
2539     /* storm out of range */
2540     if (*val == AS3935_DATA_MASK)
2541     return -EINVAL;
2542     - *val *= 1000;
2543     +
2544     + if (m == IIO_CHAN_INFO_PROCESSED)
2545     + *val *= 1000;
2546     + break;
2547     + case IIO_CHAN_INFO_SCALE:
2548     + *val = 1000;
2549     break;
2550     default:
2551     return -EINVAL;
2552     @@ -206,10 +213,10 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
2553     ret = as3935_read(st, AS3935_DATA, &val);
2554     if (ret)
2555     goto err_read;
2556     - val &= AS3935_DATA_MASK;
2557     - val *= 1000;
2558    
2559     - iio_push_to_buffers_with_timestamp(indio_dev, &val, pf->timestamp);
2560     + st->buffer[0] = val & AS3935_DATA_MASK;
2561     + iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
2562     + pf->timestamp);
2563     err_read:
2564     iio_trigger_notify_done(indio_dev->trig);
2565    
2566     diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
2567     index d6d2b3582910..4d8e7f18a9af 100644
2568     --- a/drivers/infiniband/core/cm.c
2569     +++ b/drivers/infiniband/core/cm.c
2570     @@ -3430,14 +3430,14 @@ static int cm_establish(struct ib_cm_id *cm_id)
2571     work->cm_event.event = IB_CM_USER_ESTABLISHED;
2572    
2573     /* Check if the device started its remove_one */
2574     - spin_lock_irq(&cm.lock);
2575     + spin_lock_irqsave(&cm.lock, flags);
2576     if (!cm_dev->going_down) {
2577     queue_delayed_work(cm.wq, &work->work, 0);
2578     } else {
2579     kfree(work);
2580     ret = -ENODEV;
2581     }
2582     - spin_unlock_irq(&cm.lock);
2583     + spin_unlock_irqrestore(&cm.lock, flags);
2584    
2585     out:
2586     return ret;
2587     diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
2588     index 86af71351d9a..06da56bda201 100644
2589     --- a/drivers/infiniband/hw/mlx4/ah.c
2590     +++ b/drivers/infiniband/hw/mlx4/ah.c
2591     @@ -47,6 +47,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
2592    
2593     ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
2594     ah->av.ib.g_slid = ah_attr->src_path_bits;
2595     + ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
2596     if (ah_attr->ah_flags & IB_AH_GRH) {
2597     ah->av.ib.g_slid |= 0x80;
2598     ah->av.ib.gid_index = ah_attr->grh.sgid_index;
2599     @@ -64,7 +65,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
2600     !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
2601     --ah->av.ib.stat_rate;
2602     }
2603     - ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
2604    
2605     return &ah->ibah;
2606     }
2607     diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
2608     index bf4959f4225b..94f1bf772ec9 100644
2609     --- a/drivers/iommu/amd_iommu_init.c
2610     +++ b/drivers/iommu/amd_iommu_init.c
2611     @@ -1363,13 +1363,23 @@ static int __init amd_iommu_init_pci(void)
2612     break;
2613     }
2614    
2615     + /*
2616     + * Order is important here to make sure any unity map requirements are
2617     + * fulfilled. The unity mappings are created and written to the device
2618     + * table during the amd_iommu_init_api() call.
2619     + *
2620     + * After that we call init_device_table_dma() to make sure any
2621     + * uninitialized DTE will block DMA, and in the end we flush the caches
2622     + * of all IOMMUs to make sure the changes to the device table are
2623     + * active.
2624     + */
2625     + ret = amd_iommu_init_api();
2626     +
2627     init_device_table_dma();
2628    
2629     for_each_iommu(iommu)
2630     iommu_flush_all_caches(iommu);
2631    
2632     - ret = amd_iommu_init_api();
2633     -
2634     if (!ret)
2635     print_iommu_info();
2636    
2637     diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
2638     index 4e5118a4cd30..8487987458a1 100644
2639     --- a/drivers/iommu/arm-smmu-v3.c
2640     +++ b/drivers/iommu/arm-smmu-v3.c
2641     @@ -1919,6 +1919,7 @@ static struct iommu_ops arm_smmu_ops = {
2642     .detach_dev = arm_smmu_detach_dev,
2643     .map = arm_smmu_map,
2644     .unmap = arm_smmu_unmap,
2645     + .map_sg = default_iommu_map_sg,
2646     .iova_to_phys = arm_smmu_iova_to_phys,
2647     .add_device = arm_smmu_add_device,
2648     .remove_device = arm_smmu_remove_device,
2649     diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
2650     index a2e1b7f14df2..6763a4dfed94 100644
2651     --- a/drivers/iommu/intel-iommu.c
2652     +++ b/drivers/iommu/intel-iommu.c
2653     @@ -3169,11 +3169,6 @@ static int __init init_dmars(void)
2654     }
2655     }
2656    
2657     - iommu_flush_write_buffer(iommu);
2658     - iommu_set_root_entry(iommu);
2659     - iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2660     - iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2661     -
2662     if (!ecap_pass_through(iommu->ecap))
2663     hw_pass_through = 0;
2664     #ifdef CONFIG_INTEL_IOMMU_SVM
2665     @@ -3182,6 +3177,18 @@ static int __init init_dmars(void)
2666     #endif
2667     }
2668    
2669     + /*
2670     + * Now that qi is enabled on all iommus, set the root entry and flush
2671     + * caches. This is required on some Intel X58 chipsets, otherwise the
2672     + * flush_context function will loop forever and the boot hangs.
2673     + */
2674     + for_each_active_iommu(iommu, drhd) {
2675     + iommu_flush_write_buffer(iommu);
2676     + iommu_set_root_entry(iommu);
2677     + iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
2678     + iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
2679     + }
2680     +
2681     if (iommu_pass_through)
2682     iommu_identity_mapping |= IDENTMAP_ALL;
2683    
2684     diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
2685     index 2764f43607c1..0e7d16fe84d4 100644
2686     --- a/drivers/media/usb/uvc/uvc_v4l2.c
2687     +++ b/drivers/media/usb/uvc/uvc_v4l2.c
2688     @@ -1388,47 +1388,44 @@ static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
2689     static long uvc_v4l2_compat_ioctl32(struct file *file,
2690     unsigned int cmd, unsigned long arg)
2691     {
2692     + struct uvc_fh *handle = file->private_data;
2693     union {
2694     struct uvc_xu_control_mapping xmap;
2695     struct uvc_xu_control_query xqry;
2696     } karg;
2697     void __user *up = compat_ptr(arg);
2698     - mm_segment_t old_fs;
2699     long ret;
2700    
2701     switch (cmd) {
2702     case UVCIOC_CTRL_MAP32:
2703     - cmd = UVCIOC_CTRL_MAP;
2704     ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up);
2705     + if (ret)
2706     + return ret;
2707     + ret = uvc_ioctl_ctrl_map(handle->chain, &karg.xmap);
2708     + if (ret)
2709     + return ret;
2710     + ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
2711     + if (ret)
2712     + return ret;
2713     +
2714     break;
2715    
2716     case UVCIOC_CTRL_QUERY32:
2717     - cmd = UVCIOC_CTRL_QUERY;
2718     ret = uvc_v4l2_get_xu_query(&karg.xqry, up);
2719     + if (ret)
2720     + return ret;
2721     + ret = uvc_xu_ctrl_query(handle->chain, &karg.xqry);
2722     + if (ret)
2723     + return ret;
2724     + ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
2725     + if (ret)
2726     + return ret;
2727     break;
2728    
2729     default:
2730     return -ENOIOCTLCMD;
2731     }
2732    
2733     - old_fs = get_fs();
2734     - set_fs(KERNEL_DS);
2735     - ret = video_ioctl2(file, cmd, (unsigned long)&karg);
2736     - set_fs(old_fs);
2737     -
2738     - if (ret < 0)
2739     - return ret;
2740     -
2741     - switch (cmd) {
2742     - case UVCIOC_CTRL_MAP:
2743     - ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
2744     - break;
2745     -
2746     - case UVCIOC_CTRL_QUERY:
2747     - ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
2748     - break;
2749     - }
2750     -
2751     return ret;
2752     }
2753     #endif
2754     diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
2755     index 6515dfc2b805..55cba89dbdb8 100644
2756     --- a/drivers/memory/omap-gpmc.c
2757     +++ b/drivers/memory/omap-gpmc.c
2758     @@ -394,7 +394,7 @@ static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
2759     gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
2760     GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
2761     gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
2762     - GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay);
2763     + GPMC_CONFIG4_WEEXTRADELAY, p->we_extra_delay);
2764     gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
2765     GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
2766     p->cycle2cyclesamecsen);
2767     diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
2768     index 96fddb016bf1..4dd0391d2942 100644
2769     --- a/drivers/mtd/ubi/eba.c
2770     +++ b/drivers/mtd/ubi/eba.c
2771     @@ -575,6 +575,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
2772     int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
2773     struct ubi_volume *vol = ubi->volumes[idx];
2774     struct ubi_vid_hdr *vid_hdr;
2775     + uint32_t crc;
2776    
2777     vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
2778     if (!vid_hdr)
2779     @@ -599,14 +600,8 @@ retry:
2780     goto out_put;
2781     }
2782    
2783     - vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
2784     - err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
2785     - if (err) {
2786     - up_read(&ubi->fm_eba_sem);
2787     - goto write_error;
2788     - }
2789     + ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
2790    
2791     - data_size = offset + len;
2792     mutex_lock(&ubi->buf_mutex);
2793     memset(ubi->peb_buf + offset, 0xFF, len);
2794    
2795     @@ -621,6 +616,19 @@ retry:
2796    
2797     memcpy(ubi->peb_buf + offset, buf, len);
2798    
2799     + data_size = offset + len;
2800     + crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
2801     + vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
2802     + vid_hdr->copy_flag = 1;
2803     + vid_hdr->data_size = cpu_to_be32(data_size);
2804     + vid_hdr->data_crc = cpu_to_be32(crc);
2805     + err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
2806     + if (err) {
2807     + mutex_unlock(&ubi->buf_mutex);
2808     + up_read(&ubi->fm_eba_sem);
2809     + goto write_error;
2810     + }
2811     +
2812     err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
2813     if (err) {
2814     mutex_unlock(&ubi->buf_mutex);
2815     diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
2816     index 8c2bb77db049..a790d5f90b83 100644
2817     --- a/drivers/net/usb/cdc_ncm.c
2818     +++ b/drivers/net/usb/cdc_ncm.c
2819     @@ -809,6 +809,13 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
2820     if (cdc_ncm_init(dev))
2821     goto error2;
2822    
2823     + /* Some firmwares need a pause here or they will silently fail
2824     + * to set up the interface properly. This value was decided
2825     + * empirically on a Sierra Wireless MC7455 running 02.08.02.00
2826     + * firmware.
2827     + */
2828     + usleep_range(10000, 20000);
2829     +
2830     /* configure data interface */
2831     temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
2832     if (temp) {
2833     diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
2834     index c00a7daaa4bc..0cd95120bc78 100644
2835     --- a/drivers/net/wireless/mac80211_hwsim.c
2836     +++ b/drivers/net/wireless/mac80211_hwsim.c
2837     @@ -2723,6 +2723,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
2838     if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
2839     !info->attrs[HWSIM_ATTR_FLAGS] ||
2840     !info->attrs[HWSIM_ATTR_COOKIE] ||
2841     + !info->attrs[HWSIM_ATTR_SIGNAL] ||
2842     !info->attrs[HWSIM_ATTR_TX_INFO])
2843     goto out;
2844    
2845     diff --git a/drivers/of/irq.c b/drivers/of/irq.c
2846     index 72a2c1969646..28da6242eb84 100644
2847     --- a/drivers/of/irq.c
2848     +++ b/drivers/of/irq.c
2849     @@ -386,13 +386,13 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
2850     EXPORT_SYMBOL_GPL(of_irq_to_resource);
2851    
2852     /**
2853     - * of_irq_get - Decode a node's IRQ and return it as a Linux irq number
2854     + * of_irq_get - Decode a node's IRQ and return it as a Linux IRQ number
2855     * @dev: pointer to device tree node
2856     - * @index: zero-based index of the irq
2857     - *
2858     - * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
2859     - * is not yet created.
2860     + * @index: zero-based index of the IRQ
2861     *
2862     + * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
2863     + * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
2864     + * of any other failure.
2865     */
2866     int of_irq_get(struct device_node *dev, int index)
2867     {
2868     @@ -413,12 +413,13 @@ int of_irq_get(struct device_node *dev, int index)
2869     EXPORT_SYMBOL_GPL(of_irq_get);
2870    
2871     /**
2872     - * of_irq_get_byname - Decode a node's IRQ and return it as a Linux irq number
2873     + * of_irq_get_byname - Decode a node's IRQ and return it as a Linux IRQ number
2874     * @dev: pointer to device tree node
2875     - * @name: irq name
2876     + * @name: IRQ name
2877     *
2878     - * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
2879     - * is not yet created, or error code in case of any other failure.
2880     + * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
2881     + * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
2882     + * of any other failure.
2883     */
2884     int of_irq_get_byname(struct device_node *dev, const char *name)
2885     {
2886     diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
2887     index d4c285688ce9..3ddc85e6efd6 100644
2888     --- a/drivers/scsi/53c700.c
2889     +++ b/drivers/scsi/53c700.c
2890     @@ -1122,7 +1122,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
2891     } else {
2892     struct scsi_cmnd *SCp;
2893    
2894     - SCp = scsi_host_find_tag(SDp->host, SCSI_NO_TAG);
2895     + SCp = SDp->current_cmnd;
2896     if(unlikely(SCp == NULL)) {
2897     sdev_printk(KERN_ERR, SDp,
2898     "no saved request for untagged cmd\n");
2899     @@ -1826,7 +1826,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
2900     slot->tag, slot);
2901     } else {
2902     slot->tag = SCSI_NO_TAG;
2903     - /* must populate current_cmnd for scsi_host_find_tag to work */
2904     + /* save current command for reselection */
2905     SCp->device->current_cmnd = SCp;
2906     }
2907     /* sanity check: some of the commands generated by the mid-layer
2908     diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
2909     index 984ddcb4786d..1b9c049bd5c5 100644
2910     --- a/drivers/scsi/scsi_error.c
2911     +++ b/drivers/scsi/scsi_error.c
2912     @@ -1127,7 +1127,6 @@ static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
2913     */
2914     void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
2915     {
2916     - scmd->device->host->host_failed--;
2917     scmd->eh_eflags = 0;
2918     list_move_tail(&scmd->eh_entry, done_q);
2919     }
2920     @@ -2226,6 +2225,9 @@ int scsi_error_handler(void *data)
2921     else
2922     scsi_unjam_host(shost);
2923    
2924     + /* All scmds have been handled */
2925     + shost->host_failed = 0;
2926     +
2927     /*
2928     * Note - if the above fails completely, the action is to take
2929     * individual devices offline and flush the queue of any
2930     diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
2931     index 02e930c55570..e4839ee4ca61 100644
2932     --- a/drivers/staging/iio/accel/sca3000_core.c
2933     +++ b/drivers/staging/iio/accel/sca3000_core.c
2934     @@ -595,7 +595,7 @@ static ssize_t sca3000_read_frequency(struct device *dev,
2935     goto error_ret_mut;
2936     ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
2937     mutex_unlock(&st->lock);
2938     - if (ret)
2939     + if (ret < 0)
2940     goto error_ret;
2941     val = ret;
2942     if (base_freq > 0)
2943     diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
2944     index 6ceac4f2d4b2..5b4b47ed948b 100644
2945     --- a/drivers/thermal/cpu_cooling.c
2946     +++ b/drivers/thermal/cpu_cooling.c
2947     @@ -857,14 +857,6 @@ __cpufreq_cooling_register(struct device_node *np,
2948     goto free_power_table;
2949     }
2950    
2951     - snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
2952     - cpufreq_dev->id);
2953     -
2954     - cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
2955     - &cpufreq_cooling_ops);
2956     - if (IS_ERR(cool_dev))
2957     - goto remove_idr;
2958     -
2959     /* Fill freq-table in descending order of frequencies */
2960     for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
2961     freq = find_next_max(table, freq);
2962     @@ -877,6 +869,14 @@ __cpufreq_cooling_register(struct device_node *np,
2963     pr_debug("%s: freq:%u KHz\n", __func__, freq);
2964     }
2965    
2966     + snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
2967     + cpufreq_dev->id);
2968     +
2969     + cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
2970     + &cpufreq_cooling_ops);
2971     + if (IS_ERR(cool_dev))
2972     + goto remove_idr;
2973     +
2974     cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
2975     cpufreq_dev->cool_dev = cool_dev;
2976    
2977     diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
2978     index 6f0336fff501..41987a55a538 100644
2979     --- a/drivers/tty/vt/keyboard.c
2980     +++ b/drivers/tty/vt/keyboard.c
2981     @@ -366,34 +366,22 @@ static void to_utf8(struct vc_data *vc, uint c)
2982    
2983     static void do_compute_shiftstate(void)
2984     {
2985     - unsigned int i, j, k, sym, val;
2986     + unsigned int k, sym, val;
2987    
2988     shift_state = 0;
2989     memset(shift_down, 0, sizeof(shift_down));
2990    
2991     - for (i = 0; i < ARRAY_SIZE(key_down); i++) {
2992     -
2993     - if (!key_down[i])
2994     + for_each_set_bit(k, key_down, min(NR_KEYS, KEY_CNT)) {
2995     + sym = U(key_maps[0][k]);
2996     + if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
2997     continue;
2998    
2999     - k = i * BITS_PER_LONG;
3000     -
3001     - for (j = 0; j < BITS_PER_LONG; j++, k++) {
3002     -
3003     - if (!test_bit(k, key_down))
3004     - continue;
3005     + val = KVAL(sym);
3006     + if (val == KVAL(K_CAPSSHIFT))
3007     + val = KVAL(K_SHIFT);
3008    
3009     - sym = U(key_maps[0][k]);
3010     - if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
3011     - continue;
3012     -
3013     - val = KVAL(sym);
3014     - if (val == KVAL(K_CAPSSHIFT))
3015     - val = KVAL(K_SHIFT);
3016     -
3017     - shift_down[val]++;
3018     - shift_state |= (1 << val);
3019     - }
3020     + shift_down[val]++;
3021     + shift_state |= BIT(val);
3022     }
3023     }
3024    
3025     diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
3026     index cf20282f79f0..136ebaaa9cc0 100644
3027     --- a/drivers/tty/vt/vt.c
3028     +++ b/drivers/tty/vt/vt.c
3029     @@ -750,6 +750,7 @@ static void visual_init(struct vc_data *vc, int num, int init)
3030     vc->vc_complement_mask = 0;
3031     vc->vc_can_do_color = 0;
3032     vc->vc_panic_force_write = false;
3033     + vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS;
3034     vc->vc_sw->con_init(vc, init);
3035     if (!vc->vc_complement_mask)
3036     vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
3037     diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
3038     index 61d538aa2346..4f4f06a5889f 100644
3039     --- a/drivers/usb/common/usb-otg-fsm.c
3040     +++ b/drivers/usb/common/usb-otg-fsm.c
3041     @@ -21,6 +21,7 @@
3042     * 675 Mass Ave, Cambridge, MA 02139, USA.
3043     */
3044    
3045     +#include <linux/module.h>
3046     #include <linux/kernel.h>
3047     #include <linux/types.h>
3048     #include <linux/mutex.h>
3049     @@ -365,3 +366,4 @@ int otg_statemachine(struct otg_fsm *fsm)
3050     return state_changed;
3051     }
3052     EXPORT_SYMBOL_GPL(otg_statemachine);
3053     +MODULE_LICENSE("GPL");
3054     diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
3055     index a66d3cb62b65..a738a68d2292 100644
3056     --- a/drivers/usb/dwc2/core.h
3057     +++ b/drivers/usb/dwc2/core.h
3058     @@ -44,6 +44,17 @@
3059     #include <linux/usb/phy.h>
3060     #include "hw.h"
3061    
3062     +#ifdef CONFIG_MIPS
3063     +/*
3064     + * There are some MIPS machines that can run in either big-endian
3065     + * or little-endian mode and that use the dwc2 register without
3066     + * a byteswap in both ways.
3067     + * Unlike other architectures, MIPS apparently does not require a
3068     + * barrier before the __raw_writel() to synchronize with DMA but does
3069     + * require the barrier after the __raw_writel() to serialize a set of
3070     + * writes. This set of operations was added specifically for MIPS and
3071     + * should only be used there.
3072     + */
3073     static inline u32 dwc2_readl(const void __iomem *addr)
3074     {
3075     u32 value = __raw_readl(addr);
3076     @@ -70,6 +81,22 @@ static inline void dwc2_writel(u32 value, void __iomem *addr)
3077     pr_info("INFO:: wrote %08x to %p\n", value, addr);
3078     #endif
3079     }
3080     +#else
3081     +/* Normal architectures just use readl/write */
3082     +static inline u32 dwc2_readl(const void __iomem *addr)
3083     +{
3084     + return readl(addr);
3085     +}
3086     +
3087     +static inline void dwc2_writel(u32 value, void __iomem *addr)
3088     +{
3089     + writel(value, addr);
3090     +
3091     +#ifdef DWC2_LOG_WRITES
3092     + pr_info("info:: wrote %08x to %p\n", value, addr);
3093     +#endif
3094     +}
3095     +#endif
3096    
3097     /* Maximum number of Endpoints/HostChannels */
3098     #define MAX_EPS_CHANNELS 16
3099     diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
3100     index 7d3e5d0e9aa4..8ab6238c9299 100644
3101     --- a/drivers/virtio/virtio_balloon.c
3102     +++ b/drivers/virtio/virtio_balloon.c
3103     @@ -73,7 +73,7 @@ struct virtio_balloon {
3104    
3105     /* The array of pfns we tell the Host about. */
3106     unsigned int num_pfns;
3107     - u32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
3108     + __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
3109    
3110     /* Memory statistics */
3111     int need_stats_update;
3112     @@ -125,14 +125,16 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
3113     wait_event(vb->acked, virtqueue_get_buf(vq, &len));
3114     }
3115    
3116     -static void set_page_pfns(u32 pfns[], struct page *page)
3117     +static void set_page_pfns(struct virtio_balloon *vb,
3118     + __virtio32 pfns[], struct page *page)
3119     {
3120     unsigned int i;
3121    
3122     /* Set balloon pfns pointing at this page.
3123     * Note that the first pfn points at start of the page. */
3124     for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++)
3125     - pfns[i] = page_to_balloon_pfn(page) + i;
3126     + pfns[i] = cpu_to_virtio32(vb->vdev,
3127     + page_to_balloon_pfn(page) + i);
3128     }
3129    
3130     static void fill_balloon(struct virtio_balloon *vb, size_t num)
3131     @@ -155,7 +157,7 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
3132     msleep(200);
3133     break;
3134     }
3135     - set_page_pfns(vb->pfns + vb->num_pfns, page);
3136     + set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
3137     vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
3138     if (!virtio_has_feature(vb->vdev,
3139     VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
3140     @@ -171,10 +173,12 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
3141     static void release_pages_balloon(struct virtio_balloon *vb)
3142     {
3143     unsigned int i;
3144     + struct page *page;
3145    
3146     /* Find pfns pointing at start of each page, get pages and free them. */
3147     for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
3148     - struct page *page = balloon_pfn_to_page(vb->pfns[i]);
3149     + page = balloon_pfn_to_page(virtio32_to_cpu(vb->vdev,
3150     + vb->pfns[i]));
3151     if (!virtio_has_feature(vb->vdev,
3152     VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
3153     adjust_managed_page_count(page, 1);
3154     @@ -197,7 +201,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
3155     page = balloon_page_dequeue(vb_dev_info);
3156     if (!page)
3157     break;
3158     - set_page_pfns(vb->pfns + vb->num_pfns, page);
3159     + set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
3160     vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
3161     }
3162    
3163     @@ -465,13 +469,13 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
3164     __count_vm_event(BALLOON_MIGRATE);
3165     spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
3166     vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
3167     - set_page_pfns(vb->pfns, newpage);
3168     + set_page_pfns(vb, vb->pfns, newpage);
3169     tell_host(vb, vb->inflate_vq);
3170    
3171     /* balloon's page migration 2nd step -- deflate "page" */
3172     balloon_page_delete(page);
3173     vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
3174     - set_page_pfns(vb->pfns, page);
3175     + set_page_pfns(vb, vb->pfns, page);
3176     tell_host(vb, vb->deflate_vq);
3177    
3178     mutex_unlock(&vb->balloon_lock);
3179     diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
3180     index 364bc44610c1..cfab1d24e4bc 100644
3181     --- a/drivers/xen/balloon.c
3182     +++ b/drivers/xen/balloon.c
3183     @@ -152,8 +152,6 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
3184     static void balloon_process(struct work_struct *work);
3185     static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
3186    
3187     -static void release_memory_resource(struct resource *resource);
3188     -
3189     /* When ballooning out (allocating memory to return to Xen) we don't really
3190     want the kernel to try too hard since that can trigger the oom killer. */
3191     #define GFP_BALLOON \
3192     @@ -249,6 +247,19 @@ static enum bp_state update_schedule(enum bp_state state)
3193     }
3194    
3195     #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
3196     +static void release_memory_resource(struct resource *resource)
3197     +{
3198     + if (!resource)
3199     + return;
3200     +
3201     + /*
3202     + * No need to reset region to identity mapped since we now
3203     + * know that no I/O can be in this region
3204     + */
3205     + release_resource(resource);
3206     + kfree(resource);
3207     +}
3208     +
3209     static struct resource *additional_memory_resource(phys_addr_t size)
3210     {
3211     struct resource *res;
3212     @@ -287,19 +298,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
3213     return res;
3214     }
3215    
3216     -static void release_memory_resource(struct resource *resource)
3217     -{
3218     - if (!resource)
3219     - return;
3220     -
3221     - /*
3222     - * No need to reset region to identity mapped since we now
3223     - * know that no I/O can be in this region
3224     - */
3225     - release_resource(resource);
3226     - kfree(resource);
3227     -}
3228     -
3229     static enum bp_state reserve_additional_memory(void)
3230     {
3231     long credit;
3232     diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
3233     index 70fa438000af..611f9c11da85 100644
3234     --- a/drivers/xen/xen-acpi-processor.c
3235     +++ b/drivers/xen/xen-acpi-processor.c
3236     @@ -423,36 +423,7 @@ upload:
3237    
3238     return 0;
3239     }
3240     -static int __init check_prereq(void)
3241     -{
3242     - struct cpuinfo_x86 *c = &cpu_data(0);
3243     -
3244     - if (!xen_initial_domain())
3245     - return -ENODEV;
3246     -
3247     - if (!acpi_gbl_FADT.smi_command)
3248     - return -ENODEV;
3249     -
3250     - if (c->x86_vendor == X86_VENDOR_INTEL) {
3251     - if (!cpu_has(c, X86_FEATURE_EST))
3252     - return -ENODEV;
3253    
3254     - return 0;
3255     - }
3256     - if (c->x86_vendor == X86_VENDOR_AMD) {
3257     - /* Copied from powernow-k8.h, can't include ../cpufreq/powernow
3258     - * as we get compile warnings for the static functions.
3259     - */
3260     -#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
3261     -#define USE_HW_PSTATE 0x00000080
3262     - u32 eax, ebx, ecx, edx;
3263     - cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
3264     - if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE)
3265     - return -ENODEV;
3266     - return 0;
3267     - }
3268     - return -ENODEV;
3269     -}
3270     /* acpi_perf_data is a pointer to percpu data. */
3271     static struct acpi_processor_performance __percpu *acpi_perf_data;
3272    
3273     @@ -509,10 +480,10 @@ struct notifier_block xen_acpi_processor_resume_nb = {
3274     static int __init xen_acpi_processor_init(void)
3275     {
3276     unsigned int i;
3277     - int rc = check_prereq();
3278     + int rc;
3279    
3280     - if (rc)
3281     - return rc;
3282     + if (!xen_initial_domain())
3283     + return -ENODEV;
3284    
3285     nr_acpi_bits = get_max_acpi_id() + 1;
3286     acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
3287     diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
3288     index 5b8e235c4b6d..0f2b7c622ce3 100644
3289     --- a/fs/btrfs/ctree.c
3290     +++ b/fs/btrfs/ctree.c
3291     @@ -1551,6 +1551,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
3292     trans->transid, root->fs_info->generation);
3293    
3294     if (!should_cow_block(trans, root, buf)) {
3295     + trans->dirty = true;
3296     *cow_ret = buf;
3297     return 0;
3298     }
3299     @@ -2773,8 +2774,10 @@ again:
3300     * then we don't want to set the path blocking,
3301     * so we test it here
3302     */
3303     - if (!should_cow_block(trans, root, b))
3304     + if (!should_cow_block(trans, root, b)) {
3305     + trans->dirty = true;
3306     goto cow_done;
3307     + }
3308    
3309     /*
3310     * must have write locks on this node and the
3311     diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
3312     index 2368cac1115a..47cdc6f3390b 100644
3313     --- a/fs/btrfs/extent-tree.c
3314     +++ b/fs/btrfs/extent-tree.c
3315     @@ -7856,7 +7856,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3316     set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
3317     buf->start + buf->len - 1, GFP_NOFS);
3318     }
3319     - trans->blocks_used++;
3320     + trans->dirty = true;
3321     /* this returns a buffer locked for blocking */
3322     return buf;
3323     }
3324     diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
3325     index fe609b81dd1b..5d34a062ca4f 100644
3326     --- a/fs/btrfs/super.c
3327     +++ b/fs/btrfs/super.c
3328     @@ -239,7 +239,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
3329     trans->aborted = errno;
3330     /* Nothing used. The other threads that have joined this
3331     * transaction may be able to continue. */
3332     - if (!trans->blocks_used && list_empty(&trans->new_bgs)) {
3333     + if (!trans->dirty && list_empty(&trans->new_bgs)) {
3334     const char *errstr;
3335    
3336     errstr = btrfs_decode_error(errno);
3337     diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
3338     index 64c8221b6165..1e872923ec2c 100644
3339     --- a/fs/btrfs/transaction.h
3340     +++ b/fs/btrfs/transaction.h
3341     @@ -110,7 +110,6 @@ struct btrfs_trans_handle {
3342     u64 chunk_bytes_reserved;
3343     unsigned long use_count;
3344     unsigned long blocks_reserved;
3345     - unsigned long blocks_used;
3346     unsigned long delayed_ref_updates;
3347     struct btrfs_transaction *transaction;
3348     struct btrfs_block_rsv *block_rsv;
3349     @@ -121,6 +120,7 @@ struct btrfs_trans_handle {
3350     bool can_flush_pending_bgs;
3351     bool reloc_reserved;
3352     bool sync;
3353     + bool dirty;
3354     unsigned int type;
3355     /*
3356     * this root is only needed to validate that the root passed to
3357     diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
3358     index 5a53ac6b1e02..02b071bf3732 100644
3359     --- a/fs/cifs/cifs_unicode.c
3360     +++ b/fs/cifs/cifs_unicode.c
3361     @@ -101,6 +101,12 @@ convert_sfm_char(const __u16 src_char, char *target)
3362     case SFM_SLASH:
3363     *target = '\\';
3364     break;
3365     + case SFM_SPACE:
3366     + *target = ' ';
3367     + break;
3368     + case SFM_PERIOD:
3369     + *target = '.';
3370     + break;
3371     default:
3372     return false;
3373     }
3374     @@ -404,7 +410,7 @@ static __le16 convert_to_sfu_char(char src_char)
3375     return dest_char;
3376     }
3377    
3378     -static __le16 convert_to_sfm_char(char src_char)
3379     +static __le16 convert_to_sfm_char(char src_char, bool end_of_string)
3380     {
3381     __le16 dest_char;
3382    
3383     @@ -427,6 +433,18 @@ static __le16 convert_to_sfm_char(char src_char)
3384     case '|':
3385     dest_char = cpu_to_le16(SFM_PIPE);
3386     break;
3387     + case '.':
3388     + if (end_of_string)
3389     + dest_char = cpu_to_le16(SFM_PERIOD);
3390     + else
3391     + dest_char = 0;
3392     + break;
3393     + case ' ':
3394     + if (end_of_string)
3395     + dest_char = cpu_to_le16(SFM_SPACE);
3396     + else
3397     + dest_char = 0;
3398     + break;
3399     default:
3400     dest_char = 0;
3401     }
3402     @@ -469,9 +487,16 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
3403     /* see if we must remap this char */
3404     if (map_chars == SFU_MAP_UNI_RSVD)
3405     dst_char = convert_to_sfu_char(src_char);
3406     - else if (map_chars == SFM_MAP_UNI_RSVD)
3407     - dst_char = convert_to_sfm_char(src_char);
3408     - else
3409     + else if (map_chars == SFM_MAP_UNI_RSVD) {
3410     + bool end_of_string;
3411     +
3412     + if (i == srclen - 1)
3413     + end_of_string = true;
3414     + else
3415     + end_of_string = false;
3416     +
3417     + dst_char = convert_to_sfm_char(src_char, end_of_string);
3418     + } else
3419     dst_char = 0;
3420     /*
3421     * FIXME: We can not handle remapping backslash (UNI_SLASH)
3422     diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
3423     index bdc52cb9a676..479bc0a941f3 100644
3424     --- a/fs/cifs/cifs_unicode.h
3425     +++ b/fs/cifs/cifs_unicode.h
3426     @@ -64,6 +64,8 @@
3427     #define SFM_LESSTHAN ((__u16) 0xF023)
3428     #define SFM_PIPE ((__u16) 0xF027)
3429     #define SFM_SLASH ((__u16) 0xF026)
3430     +#define SFM_PERIOD ((__u16) 0xF028)
3431     +#define SFM_SPACE ((__u16) 0xF029)
3432    
3433     /*
3434     * Mapping mechanism to use when one of the seven reserved characters is
3435     diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
3436     index 3c194ff0d2f0..5481a6eb9a95 100644
3437     --- a/fs/cifs/connect.c
3438     +++ b/fs/cifs/connect.c
3439     @@ -425,7 +425,9 @@ cifs_echo_request(struct work_struct *work)
3440     * server->ops->need_neg() == true. Also, no need to ping if
3441     * we got a response recently.
3442     */
3443     - if (!server->ops->need_neg || server->ops->need_neg(server) ||
3444     +
3445     + if (server->tcpStatus == CifsNeedReconnect ||
3446     + server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
3447     (server->ops->can_echo && !server->ops->can_echo(server)) ||
3448     time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
3449     goto requeue_echo;
3450     diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
3451     index 848249fa120f..3079b38f0afb 100644
3452     --- a/fs/cifs/ntlmssp.h
3453     +++ b/fs/cifs/ntlmssp.h
3454     @@ -133,6 +133,6 @@ typedef struct _AUTHENTICATE_MESSAGE {
3455    
3456     int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses);
3457     void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, struct cifs_ses *ses);
3458     -int build_ntlmssp_auth_blob(unsigned char *pbuffer, u16 *buflen,
3459     +int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
3460     struct cifs_ses *ses,
3461     const struct nls_table *nls_cp);
3462     diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
3463     index af0ec2d5ad0e..e88ffe1da045 100644
3464     --- a/fs/cifs/sess.c
3465     +++ b/fs/cifs/sess.c
3466     @@ -364,19 +364,43 @@ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
3467     sec_blob->DomainName.MaximumLength = 0;
3468     }
3469    
3470     -/* We do not malloc the blob, it is passed in pbuffer, because its
3471     - maximum possible size is fixed and small, making this approach cleaner.
3472     - This function returns the length of the data in the blob */
3473     -int build_ntlmssp_auth_blob(unsigned char *pbuffer,
3474     +static int size_of_ntlmssp_blob(struct cifs_ses *ses)
3475     +{
3476     + int sz = sizeof(AUTHENTICATE_MESSAGE) + ses->auth_key.len
3477     + - CIFS_SESS_KEY_SIZE + CIFS_CPHTXT_SIZE + 2;
3478     +
3479     + if (ses->domainName)
3480     + sz += 2 * strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
3481     + else
3482     + sz += 2;
3483     +
3484     + if (ses->user_name)
3485     + sz += 2 * strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
3486     + else
3487     + sz += 2;
3488     +
3489     + return sz;
3490     +}
3491     +
3492     +int build_ntlmssp_auth_blob(unsigned char **pbuffer,
3493     u16 *buflen,
3494     struct cifs_ses *ses,
3495     const struct nls_table *nls_cp)
3496     {
3497     int rc;
3498     - AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
3499     + AUTHENTICATE_MESSAGE *sec_blob;
3500     __u32 flags;
3501     unsigned char *tmp;
3502    
3503     + rc = setup_ntlmv2_rsp(ses, nls_cp);
3504     + if (rc) {
3505     + cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
3506     + *buflen = 0;
3507     + goto setup_ntlmv2_ret;
3508     + }
3509     + *pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
3510     + sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
3511     +
3512     memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
3513     sec_blob->MessageType = NtLmAuthenticate;
3514    
3515     @@ -391,7 +415,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
3516     flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
3517     }
3518    
3519     - tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE);
3520     + tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
3521     sec_blob->NegotiateFlags = cpu_to_le32(flags);
3522    
3523     sec_blob->LmChallengeResponse.BufferOffset =
3524     @@ -399,13 +423,9 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
3525     sec_blob->LmChallengeResponse.Length = 0;
3526     sec_blob->LmChallengeResponse.MaximumLength = 0;
3527    
3528     - sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
3529     + sec_blob->NtChallengeResponse.BufferOffset =
3530     + cpu_to_le32(tmp - *pbuffer);
3531     if (ses->user_name != NULL) {
3532     - rc = setup_ntlmv2_rsp(ses, nls_cp);
3533     - if (rc) {
3534     - cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
3535     - goto setup_ntlmv2_ret;
3536     - }
3537     memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
3538     ses->auth_key.len - CIFS_SESS_KEY_SIZE);
3539     tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
3540     @@ -423,7 +443,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
3541     }
3542    
3543     if (ses->domainName == NULL) {
3544     - sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
3545     + sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
3546     sec_blob->DomainName.Length = 0;
3547     sec_blob->DomainName.MaximumLength = 0;
3548     tmp += 2;
3549     @@ -432,14 +452,14 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
3550     len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName,
3551     CIFS_MAX_USERNAME_LEN, nls_cp);
3552     len *= 2; /* unicode is 2 bytes each */
3553     - sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
3554     + sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
3555     sec_blob->DomainName.Length = cpu_to_le16(len);
3556     sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
3557     tmp += len;
3558     }
3559    
3560     if (ses->user_name == NULL) {
3561     - sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
3562     + sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
3563     sec_blob->UserName.Length = 0;
3564     sec_blob->UserName.MaximumLength = 0;
3565     tmp += 2;
3566     @@ -448,13 +468,13 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
3567     len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name,
3568     CIFS_MAX_USERNAME_LEN, nls_cp);
3569     len *= 2; /* unicode is 2 bytes each */
3570     - sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
3571     + sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
3572     sec_blob->UserName.Length = cpu_to_le16(len);
3573     sec_blob->UserName.MaximumLength = cpu_to_le16(len);
3574     tmp += len;
3575     }
3576    
3577     - sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - pbuffer);
3578     + sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
3579     sec_blob->WorkstationName.Length = 0;
3580     sec_blob->WorkstationName.MaximumLength = 0;
3581     tmp += 2;
3582     @@ -463,19 +483,19 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
3583     (ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
3584     && !calc_seckey(ses)) {
3585     memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
3586     - sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
3587     + sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
3588     sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
3589     sec_blob->SessionKey.MaximumLength =
3590     cpu_to_le16(CIFS_CPHTXT_SIZE);
3591     tmp += CIFS_CPHTXT_SIZE;
3592     } else {
3593     - sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
3594     + sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
3595     sec_blob->SessionKey.Length = 0;
3596     sec_blob->SessionKey.MaximumLength = 0;
3597     }
3598    
3599     + *buflen = tmp - *pbuffer;
3600     setup_ntlmv2_ret:
3601     - *buflen = tmp - pbuffer;
3602     return rc;
3603     }
3604    
3605     @@ -1266,7 +1286,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
3606     struct cifs_ses *ses = sess_data->ses;
3607     __u16 bytes_remaining;
3608     char *bcc_ptr;
3609     - char *ntlmsspblob = NULL;
3610     + unsigned char *ntlmsspblob = NULL;
3611     u16 blob_len;
3612    
3613     cifs_dbg(FYI, "rawntlmssp session setup authenticate phase\n");
3614     @@ -1279,19 +1299,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
3615     /* Build security blob before we assemble the request */
3616     pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
3617     smb_buf = (struct smb_hdr *)pSMB;
3618     - /*
3619     - * 5 is an empirical value, large enough to hold
3620     - * authenticate message plus max 10 of av paris,
3621     - * domain, user, workstation names, flags, etc.
3622     - */
3623     - ntlmsspblob = kzalloc(5*sizeof(struct _AUTHENTICATE_MESSAGE),
3624     - GFP_KERNEL);
3625     - if (!ntlmsspblob) {
3626     - rc = -ENOMEM;
3627     - goto out;
3628     - }
3629     -
3630     - rc = build_ntlmssp_auth_blob(ntlmsspblob,
3631     + rc = build_ntlmssp_auth_blob(&ntlmsspblob,
3632     &blob_len, ses, sess_data->nls_cp);
3633     if (rc)
3634     goto out_free_ntlmsspblob;
3635     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
3636     index 82c5f57382b2..0b6dc1942bdc 100644
3637     --- a/fs/cifs/smb2pdu.c
3638     +++ b/fs/cifs/smb2pdu.c
3639     @@ -591,7 +591,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
3640     u16 blob_length = 0;
3641     struct key *spnego_key = NULL;
3642     char *security_blob = NULL;
3643     - char *ntlmssp_blob = NULL;
3644     + unsigned char *ntlmssp_blob = NULL;
3645     bool use_spnego = false; /* else use raw ntlmssp */
3646    
3647     cifs_dbg(FYI, "Session Setup\n");
3648     @@ -716,13 +716,7 @@ ssetup_ntlmssp_authenticate:
3649     iov[1].iov_len = blob_length;
3650     } else if (phase == NtLmAuthenticate) {
3651     req->hdr.SessionId = ses->Suid;
3652     - ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
3653     - GFP_KERNEL);
3654     - if (ntlmssp_blob == NULL) {
3655     - rc = -ENOMEM;
3656     - goto ssetup_exit;
3657     - }
3658     - rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
3659     + rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
3660     nls_cp);
3661     if (rc) {
3662     cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
3663     @@ -1820,6 +1814,33 @@ SMB2_echo(struct TCP_Server_Info *server)
3664    
3665     cifs_dbg(FYI, "In echo request\n");
3666    
3667     + if (server->tcpStatus == CifsNeedNegotiate) {
3668     + struct list_head *tmp, *tmp2;
3669     + struct cifs_ses *ses;
3670     + struct cifs_tcon *tcon;
3671     +
3672     + cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
3673     + spin_lock(&cifs_tcp_ses_lock);
3674     + list_for_each(tmp, &server->smb_ses_list) {
3675     + ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
3676     + list_for_each(tmp2, &ses->tcon_list) {
3677     + tcon = list_entry(tmp2, struct cifs_tcon,
3678     + tcon_list);
3679     + /* add check for persistent handle reconnect */
3680     + if (tcon && tcon->need_reconnect) {
3681     + spin_unlock(&cifs_tcp_ses_lock);
3682     + rc = smb2_reconnect(SMB2_ECHO, tcon);
3683     + spin_lock(&cifs_tcp_ses_lock);
3684     + }
3685     + }
3686     + }
3687     + spin_unlock(&cifs_tcp_ses_lock);
3688     + }
3689     +
3690     + /* if no session, renegotiate failed above */
3691     + if (server->tcpStatus == CifsNeedNegotiate)
3692     + return -EIO;
3693     +
3694     rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
3695     if (rc)
3696     return rc;
3697     diff --git a/fs/namespace.c b/fs/namespace.c
3698     index 0570729c87fd..33064fcbfff9 100644
3699     --- a/fs/namespace.c
3700     +++ b/fs/namespace.c
3701     @@ -2401,8 +2401,10 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
3702     mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
3703     }
3704     if (type->fs_flags & FS_USERNS_VISIBLE) {
3705     - if (!fs_fully_visible(type, &mnt_flags))
3706     + if (!fs_fully_visible(type, &mnt_flags)) {
3707     + put_filesystem(type);
3708     return -EPERM;
3709     + }
3710     }
3711     }
3712    
3713     @@ -3236,6 +3238,10 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
3714     if (mnt->mnt.mnt_sb->s_iflags & SB_I_NOEXEC)
3715     mnt_flags &= ~(MNT_LOCK_NOSUID | MNT_LOCK_NOEXEC);
3716    
3717     + /* Don't miss readonly hidden in the superblock flags */
3718     + if (mnt->mnt.mnt_sb->s_flags & MS_RDONLY)
3719     + mnt_flags |= MNT_LOCK_READONLY;
3720     +
3721     /* Verify the mount flags are equal to or more permissive
3722     * than the proposed new mount.
3723     */
3724     @@ -3262,7 +3268,7 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
3725     list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
3726     struct inode *inode = child->mnt_mountpoint->d_inode;
3727     /* Only worry about locked mounts */
3728     - if (!(mnt_flags & MNT_LOCKED))
3729     + if (!(child->mnt.mnt_flags & MNT_LOCKED))
3730     continue;
3731     /* Is the directory permanetly empty? */
3732     if (!is_empty_dir_inode(inode))
3733     diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
3734     index 5fc2162afb67..46cfed63d229 100644
3735     --- a/fs/nfs/dir.c
3736     +++ b/fs/nfs/dir.c
3737     @@ -1531,9 +1531,9 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
3738     err = PTR_ERR(inode);
3739     trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
3740     put_nfs_open_context(ctx);
3741     + d_drop(dentry);
3742     switch (err) {
3743     case -ENOENT:
3744     - d_drop(dentry);
3745     d_add(dentry, NULL);
3746     nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
3747     break;
3748     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3749     index 98a44157353a..fc215ab4dcd5 100644
3750     --- a/fs/nfs/nfs4proc.c
3751     +++ b/fs/nfs/nfs4proc.c
3752     @@ -2854,12 +2854,11 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
3753     call_close |= is_wronly;
3754     else if (is_wronly)
3755     calldata->arg.fmode |= FMODE_WRITE;
3756     + if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
3757     + call_close |= is_rdwr;
3758     } else if (is_rdwr)
3759     calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
3760    
3761     - if (calldata->arg.fmode == 0)
3762     - call_close |= is_rdwr;
3763     -
3764     if (!nfs4_valid_open_stateid(state))
3765     call_close = 0;
3766     spin_unlock(&state->owner->so_lock);
3767     diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
3768     index 1580ea6fd64d..d08cd88155c7 100644
3769     --- a/fs/nfsd/nfs2acl.c
3770     +++ b/fs/nfsd/nfs2acl.c
3771     @@ -104,22 +104,21 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp,
3772     goto out;
3773    
3774     inode = d_inode(fh->fh_dentry);
3775     - if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
3776     - error = -EOPNOTSUPP;
3777     - goto out_errno;
3778     - }
3779    
3780     error = fh_want_write(fh);
3781     if (error)
3782     goto out_errno;
3783    
3784     - error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
3785     + fh_lock(fh);
3786     +
3787     + error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
3788     if (error)
3789     - goto out_drop_write;
3790     - error = inode->i_op->set_acl(inode, argp->acl_default,
3791     - ACL_TYPE_DEFAULT);
3792     + goto out_drop_lock;
3793     + error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
3794     if (error)
3795     - goto out_drop_write;
3796     + goto out_drop_lock;
3797     +
3798     + fh_unlock(fh);
3799    
3800     fh_drop_write(fh);
3801    
3802     @@ -131,7 +130,8 @@ out:
3803     posix_acl_release(argp->acl_access);
3804     posix_acl_release(argp->acl_default);
3805     return nfserr;
3806     -out_drop_write:
3807     +out_drop_lock:
3808     + fh_unlock(fh);
3809     fh_drop_write(fh);
3810     out_errno:
3811     nfserr = nfserrno(error);
3812     diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
3813     index 01df4cd7c753..0c890347cde3 100644
3814     --- a/fs/nfsd/nfs3acl.c
3815     +++ b/fs/nfsd/nfs3acl.c
3816     @@ -95,22 +95,20 @@ static __be32 nfsd3_proc_setacl(struct svc_rqst * rqstp,
3817     goto out;
3818    
3819     inode = d_inode(fh->fh_dentry);
3820     - if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
3821     - error = -EOPNOTSUPP;
3822     - goto out_errno;
3823     - }
3824    
3825     error = fh_want_write(fh);
3826     if (error)
3827     goto out_errno;
3828    
3829     - error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
3830     + fh_lock(fh);
3831     +
3832     + error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
3833     if (error)
3834     - goto out_drop_write;
3835     - error = inode->i_op->set_acl(inode, argp->acl_default,
3836     - ACL_TYPE_DEFAULT);
3837     + goto out_drop_lock;
3838     + error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
3839    
3840     -out_drop_write:
3841     +out_drop_lock:
3842     + fh_unlock(fh);
3843     fh_drop_write(fh);
3844     out_errno:
3845     nfserr = nfserrno(error);
3846     diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
3847     index 6adabd6049b7..71292a0d6f09 100644
3848     --- a/fs/nfsd/nfs4acl.c
3849     +++ b/fs/nfsd/nfs4acl.c
3850     @@ -770,9 +770,6 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
3851     dentry = fhp->fh_dentry;
3852     inode = d_inode(dentry);
3853    
3854     - if (!inode->i_op->set_acl || !IS_POSIXACL(inode))
3855     - return nfserr_attrnotsupp;
3856     -
3857     if (S_ISDIR(inode->i_mode))
3858     flags = NFS4_ACL_DIR;
3859    
3860     @@ -782,16 +779,19 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
3861     if (host_error < 0)
3862     goto out_nfserr;
3863    
3864     - host_error = inode->i_op->set_acl(inode, pacl, ACL_TYPE_ACCESS);
3865     + fh_lock(fhp);
3866     +
3867     + host_error = set_posix_acl(inode, ACL_TYPE_ACCESS, pacl);
3868     if (host_error < 0)
3869     - goto out_release;
3870     + goto out_drop_lock;
3871    
3872     if (S_ISDIR(inode->i_mode)) {
3873     - host_error = inode->i_op->set_acl(inode, dpacl,
3874     - ACL_TYPE_DEFAULT);
3875     + host_error = set_posix_acl(inode, ACL_TYPE_DEFAULT, dpacl);
3876     }
3877    
3878     -out_release:
3879     +out_drop_lock:
3880     + fh_unlock(fhp);
3881     +
3882     posix_acl_release(pacl);
3883     posix_acl_release(dpacl);
3884     out_nfserr:
3885     diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
3886     index e7f50c4081d6..15bdc2d48cfe 100644
3887     --- a/fs/nfsd/nfs4callback.c
3888     +++ b/fs/nfsd/nfs4callback.c
3889     @@ -710,22 +710,6 @@ static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc
3890     }
3891     }
3892    
3893     -static struct rpc_clnt *create_backchannel_client(struct rpc_create_args *args)
3894     -{
3895     - struct rpc_xprt *xprt;
3896     -
3897     - if (args->protocol != XPRT_TRANSPORT_BC_TCP)
3898     - return rpc_create(args);
3899     -
3900     - xprt = args->bc_xprt->xpt_bc_xprt;
3901     - if (xprt) {
3902     - xprt_get(xprt);
3903     - return rpc_create_xprt(args, xprt);
3904     - }
3905     -
3906     - return rpc_create(args);
3907     -}
3908     -
3909     static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
3910     {
3911     int maxtime = max_cb_time(clp->net);
3912     @@ -768,7 +752,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
3913     args.authflavor = ses->se_cb_sec.flavor;
3914     }
3915     /* Create RPC client */
3916     - client = create_backchannel_client(&args);
3917     + client = rpc_create(&args);
3918     if (IS_ERR(client)) {
3919     dprintk("NFSD: couldn't create callback client: %ld\n",
3920     PTR_ERR(client));
3921     diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
3922     index 6b800b5b8fed..ed2f64ca49de 100644
3923     --- a/fs/nfsd/nfs4state.c
3924     +++ b/fs/nfsd/nfs4state.c
3925     @@ -3452,6 +3452,10 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
3926     struct nfs4_openowner *oo = open->op_openowner;
3927     struct nfs4_ol_stateid *retstp = NULL;
3928    
3929     + /* We are moving these outside of the spinlocks to avoid the warnings */
3930     + mutex_init(&stp->st_mutex);
3931     + mutex_lock(&stp->st_mutex);
3932     +
3933     spin_lock(&oo->oo_owner.so_client->cl_lock);
3934     spin_lock(&fp->fi_lock);
3935    
3936     @@ -3467,13 +3471,17 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
3937     stp->st_access_bmap = 0;
3938     stp->st_deny_bmap = 0;
3939     stp->st_openstp = NULL;
3940     - init_rwsem(&stp->st_rwsem);
3941     list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
3942     list_add(&stp->st_perfile, &fp->fi_stateids);
3943    
3944     out_unlock:
3945     spin_unlock(&fp->fi_lock);
3946     spin_unlock(&oo->oo_owner.so_client->cl_lock);
3947     + if (retstp) {
3948     + mutex_lock(&retstp->st_mutex);
3949     + /* Not that we need to, just for neatness */
3950     + mutex_unlock(&stp->st_mutex);
3951     + }
3952     return retstp;
3953     }
3954    
3955     @@ -4300,32 +4308,34 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
3956     */
3957     if (stp) {
3958     /* Stateid was found, this is an OPEN upgrade */
3959     - down_read(&stp->st_rwsem);
3960     + mutex_lock(&stp->st_mutex);
3961     status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
3962     if (status) {
3963     - up_read(&stp->st_rwsem);
3964     + mutex_unlock(&stp->st_mutex);
3965     goto out;
3966     }
3967     } else {
3968     stp = open->op_stp;
3969     open->op_stp = NULL;
3970     + /*
3971     + * init_open_stateid() either returns a locked stateid
3972     + * it found, or initializes and locks the new one we passed in
3973     + */
3974     swapstp = init_open_stateid(stp, fp, open);
3975     if (swapstp) {
3976     nfs4_put_stid(&stp->st_stid);
3977     stp = swapstp;
3978     - down_read(&stp->st_rwsem);
3979     status = nfs4_upgrade_open(rqstp, fp, current_fh,
3980     stp, open);
3981     if (status) {
3982     - up_read(&stp->st_rwsem);
3983     + mutex_unlock(&stp->st_mutex);
3984     goto out;
3985     }
3986     goto upgrade_out;
3987     }
3988     - down_read(&stp->st_rwsem);
3989     status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
3990     if (status) {
3991     - up_read(&stp->st_rwsem);
3992     + mutex_unlock(&stp->st_mutex);
3993     release_open_stateid(stp);
3994     goto out;
3995     }
3996     @@ -4337,7 +4347,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
3997     }
3998     upgrade_out:
3999     nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
4000     - up_read(&stp->st_rwsem);
4001     + mutex_unlock(&stp->st_mutex);
4002    
4003     if (nfsd4_has_session(&resp->cstate)) {
4004     if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
4005     @@ -4950,12 +4960,12 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
4006     * revoked delegations are kept only for free_stateid.
4007     */
4008     return nfserr_bad_stateid;
4009     - down_write(&stp->st_rwsem);
4010     + mutex_lock(&stp->st_mutex);
4011     status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
4012     if (status == nfs_ok)
4013     status = nfs4_check_fh(current_fh, &stp->st_stid);
4014     if (status != nfs_ok)
4015     - up_write(&stp->st_rwsem);
4016     + mutex_unlock(&stp->st_mutex);
4017     return status;
4018     }
4019    
4020     @@ -5003,7 +5013,7 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
4021     return status;
4022     oo = openowner(stp->st_stateowner);
4023     if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
4024     - up_write(&stp->st_rwsem);
4025     + mutex_unlock(&stp->st_mutex);
4026     nfs4_put_stid(&stp->st_stid);
4027     return nfserr_bad_stateid;
4028     }
4029     @@ -5035,12 +5045,12 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4030     oo = openowner(stp->st_stateowner);
4031     status = nfserr_bad_stateid;
4032     if (oo->oo_flags & NFS4_OO_CONFIRMED) {
4033     - up_write(&stp->st_rwsem);
4034     + mutex_unlock(&stp->st_mutex);
4035     goto put_stateid;
4036     }
4037     oo->oo_flags |= NFS4_OO_CONFIRMED;
4038     nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
4039     - up_write(&stp->st_rwsem);
4040     + mutex_unlock(&stp->st_mutex);
4041     dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
4042     __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
4043    
4044     @@ -5116,7 +5126,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
4045     nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
4046     status = nfs_ok;
4047     put_stateid:
4048     - up_write(&stp->st_rwsem);
4049     + mutex_unlock(&stp->st_mutex);
4050     nfs4_put_stid(&stp->st_stid);
4051     out:
4052     nfsd4_bump_seqid(cstate, status);
4053     @@ -5169,7 +5179,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4054     if (status)
4055     goto out;
4056     nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
4057     - up_write(&stp->st_rwsem);
4058     + mutex_unlock(&stp->st_mutex);
4059    
4060     nfsd4_close_open_stateid(stp);
4061    
4062     @@ -5395,7 +5405,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
4063     stp->st_access_bmap = 0;
4064     stp->st_deny_bmap = open_stp->st_deny_bmap;
4065     stp->st_openstp = open_stp;
4066     - init_rwsem(&stp->st_rwsem);
4067     + mutex_init(&stp->st_mutex);
4068     list_add(&stp->st_locks, &open_stp->st_locks);
4069     list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
4070     spin_lock(&fp->fi_lock);
4071     @@ -5564,7 +5574,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4072     &open_stp, nn);
4073     if (status)
4074     goto out;
4075     - up_write(&open_stp->st_rwsem);
4076     + mutex_unlock(&open_stp->st_mutex);
4077     open_sop = openowner(open_stp->st_stateowner);
4078     status = nfserr_bad_stateid;
4079     if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
4080     @@ -5573,7 +5583,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4081     status = lookup_or_create_lock_state(cstate, open_stp, lock,
4082     &lock_stp, &new);
4083     if (status == nfs_ok)
4084     - down_write(&lock_stp->st_rwsem);
4085     + mutex_lock(&lock_stp->st_mutex);
4086     } else {
4087     status = nfs4_preprocess_seqid_op(cstate,
4088     lock->lk_old_lock_seqid,
4089     @@ -5677,7 +5687,7 @@ out:
4090     seqid_mutating_err(ntohl(status)))
4091     lock_sop->lo_owner.so_seqid++;
4092    
4093     - up_write(&lock_stp->st_rwsem);
4094     + mutex_unlock(&lock_stp->st_mutex);
4095    
4096     /*
4097     * If this is a new, never-before-used stateid, and we are
4098     @@ -5847,7 +5857,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4099     fput:
4100     fput(filp);
4101     put_stateid:
4102     - up_write(&stp->st_rwsem);
4103     + mutex_unlock(&stp->st_mutex);
4104     nfs4_put_stid(&stp->st_stid);
4105     out:
4106     nfsd4_bump_seqid(cstate, status);
4107     diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
4108     index 77fdf4de91ba..77860b75da9d 100644
4109     --- a/fs/nfsd/state.h
4110     +++ b/fs/nfsd/state.h
4111     @@ -535,7 +535,7 @@ struct nfs4_ol_stateid {
4112     unsigned char st_access_bmap;
4113     unsigned char st_deny_bmap;
4114     struct nfs4_ol_stateid *st_openstp;
4115     - struct rw_semaphore st_rwsem;
4116     + struct mutex st_mutex;
4117     };
4118    
4119     static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
4120     diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
4121     index a2b1d7ce3e1a..ba5ef733951f 100644
4122     --- a/fs/overlayfs/dir.c
4123     +++ b/fs/overlayfs/dir.c
4124     @@ -511,6 +511,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
4125     struct dentry *upper;
4126     struct dentry *opaquedir = NULL;
4127     int err;
4128     + int flags = 0;
4129    
4130     if (WARN_ON(!workdir))
4131     return -EROFS;
4132     @@ -540,46 +541,39 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
4133     if (err)
4134     goto out_dput;
4135    
4136     - whiteout = ovl_whiteout(workdir, dentry);
4137     - err = PTR_ERR(whiteout);
4138     - if (IS_ERR(whiteout))
4139     + upper = lookup_one_len(dentry->d_name.name, upperdir,
4140     + dentry->d_name.len);
4141     + err = PTR_ERR(upper);
4142     + if (IS_ERR(upper))
4143     goto out_unlock;
4144    
4145     - upper = ovl_dentry_upper(dentry);
4146     - if (!upper) {
4147     - upper = lookup_one_len(dentry->d_name.name, upperdir,
4148     - dentry->d_name.len);
4149     - err = PTR_ERR(upper);
4150     - if (IS_ERR(upper))
4151     - goto kill_whiteout;
4152     -
4153     - err = ovl_do_rename(wdir, whiteout, udir, upper, 0);
4154     - dput(upper);
4155     - if (err)
4156     - goto kill_whiteout;
4157     - } else {
4158     - int flags = 0;
4159     + err = -ESTALE;
4160     + if ((opaquedir && upper != opaquedir) ||
4161     + (!opaquedir && ovl_dentry_upper(dentry) &&
4162     + upper != ovl_dentry_upper(dentry))) {
4163     + goto out_dput_upper;
4164     + }
4165    
4166     - if (opaquedir)
4167     - upper = opaquedir;
4168     - err = -ESTALE;
4169     - if (upper->d_parent != upperdir)
4170     - goto kill_whiteout;
4171     + whiteout = ovl_whiteout(workdir, dentry);
4172     + err = PTR_ERR(whiteout);
4173     + if (IS_ERR(whiteout))
4174     + goto out_dput_upper;
4175    
4176     - if (is_dir)
4177     - flags |= RENAME_EXCHANGE;
4178     + if (d_is_dir(upper))
4179     + flags = RENAME_EXCHANGE;
4180    
4181     - err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
4182     - if (err)
4183     - goto kill_whiteout;
4184     + err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
4185     + if (err)
4186     + goto kill_whiteout;
4187     + if (flags)
4188     + ovl_cleanup(wdir, upper);
4189    
4190     - if (is_dir)
4191     - ovl_cleanup(wdir, upper);
4192     - }
4193     ovl_dentry_version_inc(dentry->d_parent);
4194     out_d_drop:
4195     d_drop(dentry);
4196     dput(whiteout);
4197     +out_dput_upper:
4198     + dput(upper);
4199     out_unlock:
4200     unlock_rename(workdir, upperdir);
4201     out_dput:
4202     @@ -596,21 +590,25 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir)
4203     {
4204     struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
4205     struct inode *dir = upperdir->d_inode;
4206     - struct dentry *upper = ovl_dentry_upper(dentry);
4207     + struct dentry *upper;
4208     int err;
4209    
4210     mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
4211     + upper = lookup_one_len(dentry->d_name.name, upperdir,
4212     + dentry->d_name.len);
4213     + err = PTR_ERR(upper);
4214     + if (IS_ERR(upper))
4215     + goto out_unlock;
4216     +
4217     err = -ESTALE;
4218     - if (upper->d_parent == upperdir) {
4219     - /* Don't let d_delete() think it can reset d_inode */
4220     - dget(upper);
4221     + if (upper == ovl_dentry_upper(dentry)) {
4222     if (is_dir)
4223     err = vfs_rmdir(dir, upper);
4224     else
4225     err = vfs_unlink(dir, upper, NULL);
4226     - dput(upper);
4227     ovl_dentry_version_inc(dentry->d_parent);
4228     }
4229     + dput(upper);
4230    
4231     /*
4232     * Keeping this dentry hashed would mean having to release
4233     @@ -620,6 +618,7 @@ static int ovl_remove_upper(struct dentry *dentry, bool is_dir)
4234     */
4235     if (!err)
4236     d_drop(dentry);
4237     +out_unlock:
4238     mutex_unlock(&dir->i_mutex);
4239    
4240     return err;
4241     @@ -840,29 +839,39 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
4242    
4243     trap = lock_rename(new_upperdir, old_upperdir);
4244    
4245     - olddentry = ovl_dentry_upper(old);
4246     - newdentry = ovl_dentry_upper(new);
4247     - if (newdentry) {
4248     +
4249     + olddentry = lookup_one_len(old->d_name.name, old_upperdir,
4250     + old->d_name.len);
4251     + err = PTR_ERR(olddentry);
4252     + if (IS_ERR(olddentry))
4253     + goto out_unlock;
4254     +
4255     + err = -ESTALE;
4256     + if (olddentry != ovl_dentry_upper(old))
4257     + goto out_dput_old;
4258     +
4259     + newdentry = lookup_one_len(new->d_name.name, new_upperdir,
4260     + new->d_name.len);
4261     + err = PTR_ERR(newdentry);
4262     + if (IS_ERR(newdentry))
4263     + goto out_dput_old;
4264     +
4265     + err = -ESTALE;
4266     + if (ovl_dentry_upper(new)) {
4267     if (opaquedir) {
4268     - newdentry = opaquedir;
4269     - opaquedir = NULL;
4270     + if (newdentry != opaquedir)
4271     + goto out_dput;
4272     } else {
4273     - dget(newdentry);
4274     + if (newdentry != ovl_dentry_upper(new))
4275     + goto out_dput;
4276     }
4277     } else {
4278     new_create = true;
4279     - newdentry = lookup_one_len(new->d_name.name, new_upperdir,
4280     - new->d_name.len);
4281     - err = PTR_ERR(newdentry);
4282     - if (IS_ERR(newdentry))
4283     - goto out_unlock;
4284     + if (!d_is_negative(newdentry) &&
4285     + (!new_opaque || !ovl_is_whiteout(newdentry)))
4286     + goto out_dput;
4287     }
4288    
4289     - err = -ESTALE;
4290     - if (olddentry->d_parent != old_upperdir)
4291     - goto out_dput;
4292     - if (newdentry->d_parent != new_upperdir)
4293     - goto out_dput;
4294     if (olddentry == trap)
4295     goto out_dput;
4296     if (newdentry == trap)
4297     @@ -925,6 +934,8 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
4298    
4299     out_dput:
4300     dput(newdentry);
4301     +out_dput_old:
4302     + dput(olddentry);
4303     out_unlock:
4304     unlock_rename(new_upperdir, old_upperdir);
4305     out_revert_creds:
4306     diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
4307     index 05ac9a95e881..0597820f5d9d 100644
4308     --- a/fs/overlayfs/inode.c
4309     +++ b/fs/overlayfs/inode.c
4310     @@ -412,12 +412,11 @@ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
4311     if (!inode)
4312     return NULL;
4313    
4314     - mode &= S_IFMT;
4315     -
4316     inode->i_ino = get_next_ino();
4317     inode->i_mode = mode;
4318     inode->i_flags |= S_NOATIME | S_NOCMTIME;
4319    
4320     + mode &= S_IFMT;
4321     switch (mode) {
4322     case S_IFDIR:
4323     inode->i_private = oe;
4324     diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
4325     index e17154aeaae4..735e1d49b301 100644
4326     --- a/fs/overlayfs/overlayfs.h
4327     +++ b/fs/overlayfs/overlayfs.h
4328     @@ -181,6 +181,7 @@ static inline void ovl_copyattr(struct inode *from, struct inode *to)
4329     {
4330     to->i_uid = from->i_uid;
4331     to->i_gid = from->i_gid;
4332     + to->i_mode = from->i_mode;
4333     }
4334    
4335     /* dir.c */
4336     diff --git a/fs/posix_acl.c b/fs/posix_acl.c
4337     index 4adde1e2cbec..34bd1bd354e6 100644
4338     --- a/fs/posix_acl.c
4339     +++ b/fs/posix_acl.c
4340     @@ -788,6 +788,28 @@ posix_acl_xattr_get(const struct xattr_handler *handler,
4341     return error;
4342     }
4343    
4344     +int
4345     +set_posix_acl(struct inode *inode, int type, struct posix_acl *acl)
4346     +{
4347     + if (!IS_POSIXACL(inode))
4348     + return -EOPNOTSUPP;
4349     + if (!inode->i_op->set_acl)
4350     + return -EOPNOTSUPP;
4351     +
4352     + if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
4353     + return acl ? -EACCES : 0;
4354     + if (!inode_owner_or_capable(inode))
4355     + return -EPERM;
4356     +
4357     + if (acl) {
4358     + int ret = posix_acl_valid(acl);
4359     + if (ret)
4360     + return ret;
4361     + }
4362     + return inode->i_op->set_acl(inode, acl, type);
4363     +}
4364     +EXPORT_SYMBOL(set_posix_acl);
4365     +
4366     static int
4367     posix_acl_xattr_set(const struct xattr_handler *handler,
4368     struct dentry *dentry, const char *name,
4369     @@ -799,30 +821,13 @@ posix_acl_xattr_set(const struct xattr_handler *handler,
4370    
4371     if (strcmp(name, "") != 0)
4372     return -EINVAL;
4373     - if (!IS_POSIXACL(inode))
4374     - return -EOPNOTSUPP;
4375     - if (!inode->i_op->set_acl)
4376     - return -EOPNOTSUPP;
4377     -
4378     - if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
4379     - return value ? -EACCES : 0;
4380     - if (!inode_owner_or_capable(inode))
4381     - return -EPERM;
4382    
4383     if (value) {
4384     acl = posix_acl_from_xattr(&init_user_ns, value, size);
4385     if (IS_ERR(acl))
4386     return PTR_ERR(acl);
4387     -
4388     - if (acl) {
4389     - ret = posix_acl_valid(acl);
4390     - if (ret)
4391     - goto out;
4392     - }
4393     }
4394     -
4395     - ret = inode->i_op->set_acl(inode, acl, handler->flags);
4396     -out:
4397     + ret = set_posix_acl(inode, handler->flags, acl);
4398     posix_acl_release(acl);
4399     return ret;
4400     }
4401     diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
4402     index 0edc12856147..b895af7d8d80 100644
4403     --- a/fs/ubifs/file.c
4404     +++ b/fs/ubifs/file.c
4405     @@ -52,6 +52,7 @@
4406     #include "ubifs.h"
4407     #include <linux/mount.h>
4408     #include <linux/slab.h>
4409     +#include <linux/migrate.h>
4410    
4411     static int read_block(struct inode *inode, void *addr, unsigned int block,
4412     struct ubifs_data_node *dn)
4413     @@ -1452,6 +1453,26 @@ static int ubifs_set_page_dirty(struct page *page)
4414     return ret;
4415     }
4416    
4417     +#ifdef CONFIG_MIGRATION
4418     +static int ubifs_migrate_page(struct address_space *mapping,
4419     + struct page *newpage, struct page *page, enum migrate_mode mode)
4420     +{
4421     + int rc;
4422     +
4423     + rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
4424     + if (rc != MIGRATEPAGE_SUCCESS)
4425     + return rc;
4426     +
4427     + if (PagePrivate(page)) {
4428     + ClearPagePrivate(page);
4429     + SetPagePrivate(newpage);
4430     + }
4431     +
4432     + migrate_page_copy(newpage, page);
4433     + return MIGRATEPAGE_SUCCESS;
4434     +}
4435     +#endif
4436     +
4437     static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
4438     {
4439     /*
4440     @@ -1591,6 +1612,9 @@ const struct address_space_operations ubifs_file_address_operations = {
4441     .write_end = ubifs_write_end,
4442     .invalidatepage = ubifs_invalidatepage,
4443     .set_page_dirty = ubifs_set_page_dirty,
4444     +#ifdef CONFIG_MIGRATION
4445     + .migratepage = ubifs_migrate_page,
4446     +#endif
4447     .releasepage = ubifs_releasepage,
4448     };
4449    
4450     diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
4451     index 7d633f19e38a..1885fc44b1bc 100644
4452     --- a/include/asm-generic/qspinlock.h
4453     +++ b/include/asm-generic/qspinlock.h
4454     @@ -21,37 +21,33 @@
4455     #include <asm-generic/qspinlock_types.h>
4456    
4457     /**
4458     + * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
4459     + * @lock : Pointer to queued spinlock structure
4460     + *
4461     + * There is a very slight possibility of live-lock if the lockers keep coming
4462     + * and the waiter is just unfortunate enough to not see any unlock state.
4463     + */
4464     +#ifndef queued_spin_unlock_wait
4465     +extern void queued_spin_unlock_wait(struct qspinlock *lock);
4466     +#endif
4467     +
4468     +/**
4469     * queued_spin_is_locked - is the spinlock locked?
4470     * @lock: Pointer to queued spinlock structure
4471     * Return: 1 if it is locked, 0 otherwise
4472     */
4473     +#ifndef queued_spin_is_locked
4474     static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
4475     {
4476     /*
4477     - * queued_spin_lock_slowpath() can ACQUIRE the lock before
4478     - * issuing the unordered store that sets _Q_LOCKED_VAL.
4479     - *
4480     - * See both smp_cond_acquire() sites for more detail.
4481     - *
4482     - * This however means that in code like:
4483     - *
4484     - * spin_lock(A) spin_lock(B)
4485     - * spin_unlock_wait(B) spin_is_locked(A)
4486     - * do_something() do_something()
4487     - *
4488     - * Both CPUs can end up running do_something() because the store
4489     - * setting _Q_LOCKED_VAL will pass through the loads in
4490     - * spin_unlock_wait() and/or spin_is_locked().
4491     + * See queued_spin_unlock_wait().
4492     *
4493     - * Avoid this by issuing a full memory barrier between the spin_lock()
4494     - * and the loads in spin_unlock_wait() and spin_is_locked().
4495     - *
4496     - * Note that regular mutual exclusion doesn't care about this
4497     - * delayed store.
4498     + * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
4499     + * isn't immediately observable.
4500     */
4501     - smp_mb();
4502     - return atomic_read(&lock->val) & _Q_LOCKED_MASK;
4503     + return atomic_read(&lock->val);
4504     }
4505     +#endif
4506    
4507     /**
4508     * queued_spin_value_unlocked - is the spinlock structure unlocked?
4509     @@ -121,21 +117,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
4510     }
4511     #endif
4512    
4513     -/**
4514     - * queued_spin_unlock_wait - wait until current lock holder releases the lock
4515     - * @lock : Pointer to queued spinlock structure
4516     - *
4517     - * There is a very slight possibility of live-lock if the lockers keep coming
4518     - * and the waiter is just unfortunate enough to not see any unlock state.
4519     - */
4520     -static inline void queued_spin_unlock_wait(struct qspinlock *lock)
4521     -{
4522     - /* See queued_spin_is_locked() */
4523     - smp_mb();
4524     - while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
4525     - cpu_relax();
4526     -}
4527     -
4528     #ifndef virt_spin_lock
4529     static __always_inline bool virt_spin_lock(struct qspinlock *lock)
4530     {
4531     diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
4532     index c768ddfbe53c..b7bfa513e6ed 100644
4533     --- a/include/drm/ttm/ttm_bo_api.h
4534     +++ b/include/drm/ttm/ttm_bo_api.h
4535     @@ -316,6 +316,20 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
4536     */
4537     extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
4538     bool interruptible, bool no_wait);
4539     +
4540     +/**
4541     + * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
4542     + *
4543     + * @placement: Return immediately if buffer is busy.
4544     + * @mem: The struct ttm_mem_reg indicating the region where the bo resides
4545     + * @new_flags: Describes compatible placement found
4546     + *
4547     + * Returns true if the placement is compatible
4548     + */
4549     +extern bool ttm_bo_mem_compat(struct ttm_placement *placement,
4550     + struct ttm_mem_reg *mem,
4551     + uint32_t *new_flags);
4552     +
4553     /**
4554     * ttm_bo_validate
4555     *
4556     diff --git a/include/linux/dcache.h b/include/linux/dcache.h
4557     index d81746d3b2da..8d7151eb6ceb 100644
4558     --- a/include/linux/dcache.h
4559     +++ b/include/linux/dcache.h
4560     @@ -603,5 +603,17 @@ static inline struct inode *vfs_select_inode(struct dentry *dentry,
4561     return inode;
4562     }
4563    
4564     +/**
4565     + * d_real_inode - Return the real inode
4566     + * @dentry: The dentry to query
4567     + *
4568     + * If dentry is on an union/overlay, then return the underlying, real inode.
4569     + * Otherwise return d_inode().
4570     + */
4571     +static inline struct inode *d_real_inode(struct dentry *dentry)
4572     +{
4573     + return d_backing_inode(d_real(dentry));
4574     +}
4575     +
4576    
4577     #endif /* __LINUX_DCACHE_H */
4578     diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
4579     index 0536524bb9eb..68904469fba1 100644
4580     --- a/include/linux/jump_label.h
4581     +++ b/include/linux/jump_label.h
4582     @@ -117,13 +117,18 @@ struct module;
4583    
4584     #include <linux/atomic.h>
4585    
4586     +#ifdef HAVE_JUMP_LABEL
4587     +
4588     static inline int static_key_count(struct static_key *key)
4589     {
4590     - return atomic_read(&key->enabled);
4591     + /*
4592     + * -1 means the first static_key_slow_inc() is in progress.
4593     + * static_key_enabled() must return true, so return 1 here.
4594     + */
4595     + int n = atomic_read(&key->enabled);
4596     + return n >= 0 ? n : 1;
4597     }
4598    
4599     -#ifdef HAVE_JUMP_LABEL
4600     -
4601     #define JUMP_TYPE_FALSE 0UL
4602     #define JUMP_TYPE_TRUE 1UL
4603     #define JUMP_TYPE_MASK 1UL
4604     @@ -162,6 +167,11 @@ extern void jump_label_apply_nops(struct module *mod);
4605    
4606     #else /* !HAVE_JUMP_LABEL */
4607    
4608     +static inline int static_key_count(struct static_key *key)
4609     +{
4610     + return atomic_read(&key->enabled);
4611     +}
4612     +
4613     static __always_inline void jump_label_init(void)
4614     {
4615     static_key_initialized = true;
4616     diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
4617     index 1716f9395010..d443d9ab0236 100644
4618     --- a/include/linux/skbuff.h
4619     +++ b/include/linux/skbuff.h
4620     @@ -982,6 +982,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
4621     }
4622    
4623     void __skb_get_hash(struct sk_buff *skb);
4624     +u32 __skb_get_hash_symmetric(struct sk_buff *skb);
4625     u32 skb_get_poff(const struct sk_buff *skb);
4626     u32 __skb_get_poff(const struct sk_buff *skb, void *data,
4627     const struct flow_keys *keys, int hlen);
4628     @@ -2773,6 +2774,25 @@ static inline void skb_postpush_rcsum(struct sk_buff *skb,
4629     }
4630    
4631     /**
4632     + * skb_push_rcsum - push skb and update receive checksum
4633     + * @skb: buffer to update
4634     + * @len: length of data pulled
4635     + *
4636     + * This function performs an skb_push on the packet and updates
4637     + * the CHECKSUM_COMPLETE checksum. It should be used on
4638     + * receive path processing instead of skb_push unless you know
4639     + * that the checksum difference is zero (e.g., a valid IP header)
4640     + * or you are setting ip_summed to CHECKSUM_NONE.
4641     + */
4642     +static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
4643     + unsigned int len)
4644     +{
4645     + skb_push(skb, len);
4646     + skb_postpush_rcsum(skb, skb->data, len);
4647     + return skb->data;
4648     +}
4649     +
4650     +/**
4651     * pskb_trim_rcsum - trim received skb and update checksum
4652     * @skb: buffer to trim
4653     * @len: new length
4654     diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
4655     index 131032f15cc1..9b6027c51736 100644
4656     --- a/include/linux/sunrpc/clnt.h
4657     +++ b/include/linux/sunrpc/clnt.h
4658     @@ -135,8 +135,6 @@ struct rpc_create_args {
4659     #define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
4660    
4661     struct rpc_clnt *rpc_create(struct rpc_create_args *args);
4662     -struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
4663     - struct rpc_xprt *xprt);
4664     struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
4665     const struct rpc_program *, u32);
4666     void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt);
4667     diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
4668     index 966889a20ea3..e479033bd782 100644
4669     --- a/include/linux/usb/ehci_def.h
4670     +++ b/include/linux/usb/ehci_def.h
4671     @@ -180,11 +180,11 @@ struct ehci_regs {
4672     * PORTSCx
4673     */
4674     /* HOSTPC: offset 0x84 */
4675     - u32 hostpc[1]; /* HOSTPC extension */
4676     + u32 hostpc[0]; /* HOSTPC extension */
4677     #define HOSTPC_PHCD (1<<22) /* Phy clock disable */
4678     #define HOSTPC_PSPD (3<<25) /* Port speed detection */
4679    
4680     - u32 reserved5[16];
4681     + u32 reserved5[17];
4682    
4683     /* USBMODE_EX: offset 0xc8 */
4684     u32 usbmode_ex; /* USB Device mode extension */
4685     diff --git a/kernel/jump_label.c b/kernel/jump_label.c
4686     index 05254eeb4b4e..4b353e0be121 100644
4687     --- a/kernel/jump_label.c
4688     +++ b/kernel/jump_label.c
4689     @@ -58,13 +58,36 @@ static void jump_label_update(struct static_key *key);
4690    
4691     void static_key_slow_inc(struct static_key *key)
4692     {
4693     + int v, v1;
4694     +
4695     STATIC_KEY_CHECK_USE();
4696     - if (atomic_inc_not_zero(&key->enabled))
4697     - return;
4698     +
4699     + /*
4700     + * Careful if we get concurrent static_key_slow_inc() calls;
4701     + * later calls must wait for the first one to _finish_ the
4702     + * jump_label_update() process. At the same time, however,
4703     + * the jump_label_update() call below wants to see
4704     + * static_key_enabled(&key) for jumps to be updated properly.
4705     + *
4706     + * So give a special meaning to negative key->enabled: it sends
4707     + * static_key_slow_inc() down the slow path, and it is non-zero
4708     + * so it counts as "enabled" in jump_label_update(). Note that
4709     + * atomic_inc_unless_negative() checks >= 0, so roll our own.
4710     + */
4711     + for (v = atomic_read(&key->enabled); v > 0; v = v1) {
4712     + v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
4713     + if (likely(v1 == v))
4714     + return;
4715     + }
4716    
4717     jump_label_lock();
4718     - if (atomic_inc_return(&key->enabled) == 1)
4719     + if (atomic_read(&key->enabled) == 0) {
4720     + atomic_set(&key->enabled, -1);
4721     jump_label_update(key);
4722     + atomic_set(&key->enabled, 1);
4723     + } else {
4724     + atomic_inc(&key->enabled);
4725     + }
4726     jump_label_unlock();
4727     }
4728     EXPORT_SYMBOL_GPL(static_key_slow_inc);
4729     @@ -72,6 +95,13 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc);
4730     static void __static_key_slow_dec(struct static_key *key,
4731     unsigned long rate_limit, struct delayed_work *work)
4732     {
4733     + /*
4734     + * The negative count check is valid even when a negative
4735     + * key->enabled is in use by static_key_slow_inc(); a
4736     + * __static_key_slow_dec() before the first static_key_slow_inc()
4737     + * returns is unbalanced, because all other static_key_slow_inc()
4738     + * instances block while the update is in progress.
4739     + */
4740     if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
4741     WARN(atomic_read(&key->enabled) < 0,
4742     "jump label: negative count!\n");
4743     diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
4744     index 0551c219c40e..89350f924c85 100644
4745     --- a/kernel/locking/mutex.c
4746     +++ b/kernel/locking/mutex.c
4747     @@ -486,9 +486,6 @@ __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
4748     if (!hold_ctx)
4749     return 0;
4750    
4751     - if (unlikely(ctx == hold_ctx))
4752     - return -EALREADY;
4753     -
4754     if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
4755     (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
4756     #ifdef CONFIG_DEBUG_MUTEXES
4757     @@ -514,6 +511,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
4758     unsigned long flags;
4759     int ret;
4760    
4761     + if (use_ww_ctx) {
4762     + struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
4763     + if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
4764     + return -EALREADY;
4765     + }
4766     +
4767     preempt_disable();
4768     mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
4769    
4770     diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
4771     index 87e9ce6a63c5..8173bc7fec92 100644
4772     --- a/kernel/locking/qspinlock.c
4773     +++ b/kernel/locking/qspinlock.c
4774     @@ -255,6 +255,66 @@ static __always_inline void __pv_wait_head(struct qspinlock *lock,
4775     #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
4776     #endif
4777    
4778     +/*
4779     + * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before
4780     + * issuing an _unordered_ store to set _Q_LOCKED_VAL.
4781     + *
4782     + * This means that the store can be delayed, but no later than the
4783     + * store-release from the unlock. This means that simply observing
4784     + * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired.
4785     + *
4786     + * There are two paths that can issue the unordered store:
4787     + *
4788     + * (1) clear_pending_set_locked(): *,1,0 -> *,0,1
4789     + *
4790     + * (2) set_locked(): t,0,0 -> t,0,1 ; t != 0
4791     + * atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1
4792     + *
4793     + * However, in both cases we have other !0 state we've set before to queue
4794     + * ourseves:
4795     + *
4796     + * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our
4797     + * load is constrained by that ACQUIRE to not pass before that, and thus must
4798     + * observe the store.
4799     + *
4800     + * For (2) we have a more intersting scenario. We enqueue ourselves using
4801     + * xchg_tail(), which ends up being a RELEASE. This in itself is not
4802     + * sufficient, however that is followed by an smp_cond_acquire() on the same
4803     + * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and
4804     + * guarantees we must observe that store.
4805     + *
4806     + * Therefore both cases have other !0 state that is observable before the
4807     + * unordered locked byte store comes through. This means we can use that to
4808     + * wait for the lock store, and then wait for an unlock.
4809     + */
4810     +#ifndef queued_spin_unlock_wait
4811     +void queued_spin_unlock_wait(struct qspinlock *lock)
4812     +{
4813     + u32 val;
4814     +
4815     + for (;;) {
4816     + val = atomic_read(&lock->val);
4817     +
4818     + if (!val) /* not locked, we're done */
4819     + goto done;
4820     +
4821     + if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */
4822     + break;
4823     +
4824     + /* not locked, but pending, wait until we observe the lock */
4825     + cpu_relax();
4826     + }
4827     +
4828     + /* any unlock is good */
4829     + while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
4830     + cpu_relax();
4831     +
4832     +done:
4833     + smp_rmb(); /* CTRL + RMB -> ACQUIRE */
4834     +}
4835     +EXPORT_SYMBOL(queued_spin_unlock_wait);
4836     +#endif
4837     +
4838     #endif /* _GEN_PV_LOCK_SLOWPATH */
4839    
4840     /**
4841     diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
4842     index cfdc0e61066c..51c615279b23 100644
4843     --- a/kernel/sched/fair.c
4844     +++ b/kernel/sched/fair.c
4845     @@ -2682,6 +2682,23 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
4846    
4847     static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
4848    
4849     +/*
4850     + * Unsigned subtract and clamp on underflow.
4851     + *
4852     + * Explicitly do a load-store to ensure the intermediate value never hits
4853     + * memory. This allows lockless observations without ever seeing the negative
4854     + * values.
4855     + */
4856     +#define sub_positive(_ptr, _val) do { \
4857     + typeof(_ptr) ptr = (_ptr); \
4858     + typeof(*ptr) val = (_val); \
4859     + typeof(*ptr) res, var = READ_ONCE(*ptr); \
4860     + res = var - val; \
4861     + if (res > var) \
4862     + res = 0; \
4863     + WRITE_ONCE(*ptr, res); \
4864     +} while (0)
4865     +
4866     /* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
4867     static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
4868     {
4869     @@ -2690,15 +2707,15 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
4870    
4871     if (atomic_long_read(&cfs_rq->removed_load_avg)) {
4872     s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
4873     - sa->load_avg = max_t(long, sa->load_avg - r, 0);
4874     - sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
4875     + sub_positive(&sa->load_avg, r);
4876     + sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
4877     removed = 1;
4878     }
4879    
4880     if (atomic_long_read(&cfs_rq->removed_util_avg)) {
4881     long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
4882     - sa->util_avg = max_t(long, sa->util_avg - r, 0);
4883     - sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
4884     + sub_positive(&sa->util_avg, r);
4885     + sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
4886     }
4887    
4888     decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
4889     @@ -2764,10 +2781,10 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
4890     &se->avg, se->on_rq * scale_load_down(se->load.weight),
4891     cfs_rq->curr == se, NULL);
4892    
4893     - cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
4894     - cfs_rq->avg.load_sum = max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
4895     - cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
4896     - cfs_rq->avg.util_sum = max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
4897     + sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
4898     + sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
4899     + sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
4900     + sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
4901     }
4902    
4903     /* Add the load generated by se into cfs_rq's load average */
4904     diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
4905     index f96f0383f6c6..ad1d6164e946 100644
4906     --- a/kernel/trace/trace_printk.c
4907     +++ b/kernel/trace/trace_printk.c
4908     @@ -36,6 +36,10 @@ struct trace_bprintk_fmt {
4909     static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
4910     {
4911     struct trace_bprintk_fmt *pos;
4912     +
4913     + if (!fmt)
4914     + return ERR_PTR(-EINVAL);
4915     +
4916     list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
4917     if (!strcmp(pos->fmt, fmt))
4918     return pos;
4919     @@ -57,7 +61,8 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
4920     for (iter = start; iter < end; iter++) {
4921     struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
4922     if (tb_fmt) {
4923     - *iter = tb_fmt->fmt;
4924     + if (!IS_ERR(tb_fmt))
4925     + *iter = tb_fmt->fmt;
4926     continue;
4927     }
4928    
4929     diff --git a/mm/migrate.c b/mm/migrate.c
4930     index bbeb0b71fcf4..72c09dea6526 100644
4931     --- a/mm/migrate.c
4932     +++ b/mm/migrate.c
4933     @@ -429,6 +429,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
4934    
4935     return MIGRATEPAGE_SUCCESS;
4936     }
4937     +EXPORT_SYMBOL(migrate_page_move_mapping);
4938    
4939     /*
4940     * The expected number of remaining references is the same as that
4941     @@ -579,6 +580,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
4942     if (PageWriteback(newpage))
4943     end_page_writeback(newpage);
4944     }
4945     +EXPORT_SYMBOL(migrate_page_copy);
4946    
4947     /************************************************************
4948     * Migration functions
4949     diff --git a/mm/page-writeback.c b/mm/page-writeback.c
4950     index e40c9364582d..fd51ebfc423f 100644
4951     --- a/mm/page-writeback.c
4952     +++ b/mm/page-writeback.c
4953     @@ -359,8 +359,9 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
4954     struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
4955     unsigned long bytes = vm_dirty_bytes;
4956     unsigned long bg_bytes = dirty_background_bytes;
4957     - unsigned long ratio = vm_dirty_ratio;
4958     - unsigned long bg_ratio = dirty_background_ratio;
4959     + /* convert ratios to per-PAGE_SIZE for higher precision */
4960     + unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
4961     + unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
4962     unsigned long thresh;
4963     unsigned long bg_thresh;
4964     struct task_struct *tsk;
4965     @@ -372,26 +373,28 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
4966     /*
4967     * The byte settings can't be applied directly to memcg
4968     * domains. Convert them to ratios by scaling against
4969     - * globally available memory.
4970     + * globally available memory. As the ratios are in
4971     + * per-PAGE_SIZE, they can be obtained by dividing bytes by
4972     + * number of pages.
4973     */
4974     if (bytes)
4975     - ratio = min(DIV_ROUND_UP(bytes, PAGE_SIZE) * 100 /
4976     - global_avail, 100UL);
4977     + ratio = min(DIV_ROUND_UP(bytes, global_avail),
4978     + PAGE_SIZE);
4979     if (bg_bytes)
4980     - bg_ratio = min(DIV_ROUND_UP(bg_bytes, PAGE_SIZE) * 100 /
4981     - global_avail, 100UL);
4982     + bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
4983     + PAGE_SIZE);
4984     bytes = bg_bytes = 0;
4985     }
4986    
4987     if (bytes)
4988     thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
4989     else
4990     - thresh = (ratio * available_memory) / 100;
4991     + thresh = (ratio * available_memory) / PAGE_SIZE;
4992    
4993     if (bg_bytes)
4994     bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
4995     else
4996     - bg_thresh = (bg_ratio * available_memory) / 100;
4997     + bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
4998    
4999     if (bg_thresh >= thresh)
5000     bg_thresh = thresh / 2;
5001     diff --git a/mm/percpu.c b/mm/percpu.c
5002     index 8a943b97a053..1f376bce413c 100644
5003     --- a/mm/percpu.c
5004     +++ b/mm/percpu.c
5005     @@ -110,7 +110,7 @@ struct pcpu_chunk {
5006     int map_used; /* # of map entries used before the sentry */
5007     int map_alloc; /* # of map entries allocated */
5008     int *map; /* allocation map */
5009     - struct work_struct map_extend_work;/* async ->map[] extension */
5010     + struct list_head map_extend_list;/* on pcpu_map_extend_chunks */
5011    
5012     void *data; /* chunk data */
5013     int first_free; /* no free below this */
5014     @@ -160,10 +160,13 @@ static struct pcpu_chunk *pcpu_reserved_chunk;
5015     static int pcpu_reserved_chunk_limit;
5016    
5017     static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
5018     -static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */
5019     +static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
5020    
5021     static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
5022    
5023     +/* chunks which need their map areas extended, protected by pcpu_lock */
5024     +static LIST_HEAD(pcpu_map_extend_chunks);
5025     +
5026     /*
5027     * The number of empty populated pages, protected by pcpu_lock. The
5028     * reserved chunk doesn't contribute to the count.
5029     @@ -397,13 +400,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
5030     {
5031     int margin, new_alloc;
5032    
5033     + lockdep_assert_held(&pcpu_lock);
5034     +
5035     if (is_atomic) {
5036     margin = 3;
5037    
5038     if (chunk->map_alloc <
5039     - chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW &&
5040     - pcpu_async_enabled)
5041     - schedule_work(&chunk->map_extend_work);
5042     + chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
5043     + if (list_empty(&chunk->map_extend_list)) {
5044     + list_add_tail(&chunk->map_extend_list,
5045     + &pcpu_map_extend_chunks);
5046     + pcpu_schedule_balance_work();
5047     + }
5048     + }
5049     } else {
5050     margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
5051     }
5052     @@ -437,6 +446,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
5053     size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
5054     unsigned long flags;
5055    
5056     + lockdep_assert_held(&pcpu_alloc_mutex);
5057     +
5058     new = pcpu_mem_zalloc(new_size);
5059     if (!new)
5060     return -ENOMEM;
5061     @@ -469,20 +480,6 @@ out_unlock:
5062     return 0;
5063     }
5064    
5065     -static void pcpu_map_extend_workfn(struct work_struct *work)
5066     -{
5067     - struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
5068     - map_extend_work);
5069     - int new_alloc;
5070     -
5071     - spin_lock_irq(&pcpu_lock);
5072     - new_alloc = pcpu_need_to_extend(chunk, false);
5073     - spin_unlock_irq(&pcpu_lock);
5074     -
5075     - if (new_alloc)
5076     - pcpu_extend_area_map(chunk, new_alloc);
5077     -}
5078     -
5079     /**
5080     * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
5081     * @chunk: chunk the candidate area belongs to
5082     @@ -742,7 +739,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
5083     chunk->map_used = 1;
5084    
5085     INIT_LIST_HEAD(&chunk->list);
5086     - INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn);
5087     + INIT_LIST_HEAD(&chunk->map_extend_list);
5088     chunk->free_size = pcpu_unit_size;
5089     chunk->contig_hint = pcpu_unit_size;
5090    
5091     @@ -897,6 +894,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
5092     return NULL;
5093     }
5094    
5095     + if (!is_atomic)
5096     + mutex_lock(&pcpu_alloc_mutex);
5097     +
5098     spin_lock_irqsave(&pcpu_lock, flags);
5099    
5100     /* serve reserved allocations from the reserved chunk if available */
5101     @@ -969,12 +969,9 @@ restart:
5102     if (is_atomic)
5103     goto fail;
5104    
5105     - mutex_lock(&pcpu_alloc_mutex);
5106     -
5107     if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
5108     chunk = pcpu_create_chunk();
5109     if (!chunk) {
5110     - mutex_unlock(&pcpu_alloc_mutex);
5111     err = "failed to allocate new chunk";
5112     goto fail;
5113     }
5114     @@ -985,7 +982,6 @@ restart:
5115     spin_lock_irqsave(&pcpu_lock, flags);
5116     }
5117    
5118     - mutex_unlock(&pcpu_alloc_mutex);
5119     goto restart;
5120    
5121     area_found:
5122     @@ -995,8 +991,6 @@ area_found:
5123     if (!is_atomic) {
5124     int page_start, page_end, rs, re;
5125    
5126     - mutex_lock(&pcpu_alloc_mutex);
5127     -
5128     page_start = PFN_DOWN(off);
5129     page_end = PFN_UP(off + size);
5130    
5131     @@ -1007,7 +1001,6 @@ area_found:
5132    
5133     spin_lock_irqsave(&pcpu_lock, flags);
5134     if (ret) {
5135     - mutex_unlock(&pcpu_alloc_mutex);
5136     pcpu_free_area(chunk, off, &occ_pages);
5137     err = "failed to populate";
5138     goto fail_unlock;
5139     @@ -1047,6 +1040,8 @@ fail:
5140     /* see the flag handling in pcpu_blance_workfn() */
5141     pcpu_atomic_alloc_failed = true;
5142     pcpu_schedule_balance_work();
5143     + } else {
5144     + mutex_unlock(&pcpu_alloc_mutex);
5145     }
5146     return NULL;
5147     }
5148     @@ -1131,6 +1126,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
5149     if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
5150     continue;
5151    
5152     + list_del_init(&chunk->map_extend_list);
5153     list_move(&chunk->list, &to_free);
5154     }
5155    
5156     @@ -1148,6 +1144,25 @@ static void pcpu_balance_workfn(struct work_struct *work)
5157     pcpu_destroy_chunk(chunk);
5158     }
5159    
5160     + /* service chunks which requested async area map extension */
5161     + do {
5162     + int new_alloc = 0;
5163     +
5164     + spin_lock_irq(&pcpu_lock);
5165     +
5166     + chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
5167     + struct pcpu_chunk, map_extend_list);
5168     + if (chunk) {
5169     + list_del_init(&chunk->map_extend_list);
5170     + new_alloc = pcpu_need_to_extend(chunk, false);
5171     + }
5172     +
5173     + spin_unlock_irq(&pcpu_lock);
5174     +
5175     + if (new_alloc)
5176     + pcpu_extend_area_map(chunk, new_alloc);
5177     + } while (chunk);
5178     +
5179     /*
5180     * Ensure there are certain number of free populated pages for
5181     * atomic allocs. Fill up from the most packed so that atomic
5182     @@ -1646,7 +1661,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
5183     */
5184     schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
5185     INIT_LIST_HEAD(&schunk->list);
5186     - INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn);
5187     + INIT_LIST_HEAD(&schunk->map_extend_list);
5188     schunk->base_addr = base_addr;
5189     schunk->map = smap;
5190     schunk->map_alloc = ARRAY_SIZE(smap);
5191     @@ -1675,7 +1690,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
5192     if (dyn_size) {
5193     dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
5194     INIT_LIST_HEAD(&dchunk->list);
5195     - INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn);
5196     + INIT_LIST_HEAD(&dchunk->map_extend_list);
5197     dchunk->base_addr = base_addr;
5198     dchunk->map = dmap;
5199     dchunk->map_alloc = ARRAY_SIZE(dmap);
5200     diff --git a/mm/shmem.c b/mm/shmem.c
5201     index ea5a70cfc1d8..1b11ccc0a3b7 100644
5202     --- a/mm/shmem.c
5203     +++ b/mm/shmem.c
5204     @@ -2153,9 +2153,11 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
5205     NULL);
5206     if (error) {
5207     /* Remove the !PageUptodate pages we added */
5208     - shmem_undo_range(inode,
5209     - (loff_t)start << PAGE_CACHE_SHIFT,
5210     - (loff_t)index << PAGE_CACHE_SHIFT, true);
5211     + if (index > start) {
5212     + shmem_undo_range(inode,
5213     + (loff_t)start << PAGE_CACHE_SHIFT,
5214     + ((loff_t)index << PAGE_CACHE_SHIFT) - 1, true);
5215     + }
5216     goto undone;
5217     }
5218    
5219     diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
5220     index 12e700332010..4ab6ead3d8ee 100644
5221     --- a/net/core/flow_dissector.c
5222     +++ b/net/core/flow_dissector.c
5223     @@ -662,6 +662,23 @@ void make_flow_keys_digest(struct flow_keys_digest *digest,
5224     }
5225     EXPORT_SYMBOL(make_flow_keys_digest);
5226    
5227     +static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
5228     +
5229     +u32 __skb_get_hash_symmetric(struct sk_buff *skb)
5230     +{
5231     + struct flow_keys keys;
5232     +
5233     + __flow_hash_secret_init();
5234     +
5235     + memset(&keys, 0, sizeof(keys));
5236     + __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
5237     + NULL, 0, 0, 0,
5238     + FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
5239     +
5240     + return __flow_hash_from_keys(&keys, hashrnd);
5241     +}
5242     +EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
5243     +
5244     /**
5245     * __skb_get_hash: calculate a flow hash
5246     * @skb: sk_buff to calculate flow hash from
5247     @@ -874,6 +891,29 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = {
5248     },
5249     };
5250    
5251     +static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
5252     + {
5253     + .key_id = FLOW_DISSECTOR_KEY_CONTROL,
5254     + .offset = offsetof(struct flow_keys, control),
5255     + },
5256     + {
5257     + .key_id = FLOW_DISSECTOR_KEY_BASIC,
5258     + .offset = offsetof(struct flow_keys, basic),
5259     + },
5260     + {
5261     + .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
5262     + .offset = offsetof(struct flow_keys, addrs.v4addrs),
5263     + },
5264     + {
5265     + .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
5266     + .offset = offsetof(struct flow_keys, addrs.v6addrs),
5267     + },
5268     + {
5269     + .key_id = FLOW_DISSECTOR_KEY_PORTS,
5270     + .offset = offsetof(struct flow_keys, ports),
5271     + },
5272     +};
5273     +
5274     static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
5275     {
5276     .key_id = FLOW_DISSECTOR_KEY_CONTROL,
5277     @@ -895,6 +935,9 @@ static int __init init_default_flow_dissectors(void)
5278     skb_flow_dissector_init(&flow_keys_dissector,
5279     flow_keys_dissector_keys,
5280     ARRAY_SIZE(flow_keys_dissector_keys));
5281     + skb_flow_dissector_init(&flow_keys_dissector_symmetric,
5282     + flow_keys_dissector_symmetric_keys,
5283     + ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
5284     skb_flow_dissector_init(&flow_keys_buf_dissector,
5285     flow_keys_buf_dissector_keys,
5286     ARRAY_SIZE(flow_keys_buf_dissector_keys));
5287     diff --git a/net/core/skbuff.c b/net/core/skbuff.c
5288     index 9835d9a8a7a4..4968b5ddea69 100644
5289     --- a/net/core/skbuff.c
5290     +++ b/net/core/skbuff.c
5291     @@ -2948,24 +2948,6 @@ int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
5292     EXPORT_SYMBOL_GPL(skb_append_pagefrags);
5293    
5294     /**
5295     - * skb_push_rcsum - push skb and update receive checksum
5296     - * @skb: buffer to update
5297     - * @len: length of data pulled
5298     - *
5299     - * This function performs an skb_push on the packet and updates
5300     - * the CHECKSUM_COMPLETE checksum. It should be used on
5301     - * receive path processing instead of skb_push unless you know
5302     - * that the checksum difference is zero (e.g., a valid IP header)
5303     - * or you are setting ip_summed to CHECKSUM_NONE.
5304     - */
5305     -static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len)
5306     -{
5307     - skb_push(skb, len);
5308     - skb_postpush_rcsum(skb, skb->data, len);
5309     - return skb->data;
5310     -}
5311     -
5312     -/**
5313     * skb_pull_rcsum - pull skb and update receive checksum
5314     * @skb: buffer to update
5315     * @len: length of data pulled
5316     diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
5317     index 0c7e276c230e..34cf46d74554 100644
5318     --- a/net/ipv6/ip6_fib.c
5319     +++ b/net/ipv6/ip6_fib.c
5320     @@ -179,6 +179,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
5321     }
5322     }
5323    
5324     + free_percpu(non_pcpu_rt->rt6i_pcpu);
5325     non_pcpu_rt->rt6i_pcpu = NULL;
5326     }
5327    
5328     diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
5329     index 6f85b6ab8e51..f7bb6829b415 100644
5330     --- a/net/mac80211/mesh.c
5331     +++ b/net/mac80211/mesh.c
5332     @@ -151,19 +151,26 @@ u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
5333     void mesh_sta_cleanup(struct sta_info *sta)
5334     {
5335     struct ieee80211_sub_if_data *sdata = sta->sdata;
5336     - u32 changed;
5337     + u32 changed = 0;
5338    
5339     /*
5340     * maybe userspace handles peer allocation and peering, but in either
5341     * case the beacon is still generated by the kernel and we might need
5342     * an update.
5343     */
5344     - changed = mesh_accept_plinks_update(sdata);
5345     + if (sdata->u.mesh.user_mpm &&
5346     + sta->mesh->plink_state == NL80211_PLINK_ESTAB)
5347     + changed |= mesh_plink_dec_estab_count(sdata);
5348     + changed |= mesh_accept_plinks_update(sdata);
5349     if (!sdata->u.mesh.user_mpm) {
5350     changed |= mesh_plink_deactivate(sta);
5351     del_timer_sync(&sta->mesh->plink_timer);
5352     }
5353    
5354     + /* make sure no readers can access nexthop sta from here on */
5355     + mesh_path_flush_by_nexthop(sta);
5356     + synchronize_net();
5357     +
5358     if (changed)
5359     ieee80211_mbss_info_change_notify(sdata, changed);
5360     }
5361     diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
5362     index 2cafb21b422f..15b0150283b6 100644
5363     --- a/net/mac80211/sta_info.h
5364     +++ b/net/mac80211/sta_info.h
5365     @@ -269,7 +269,7 @@ struct ieee80211_fast_tx {
5366     u8 sa_offs, da_offs, pn_offs;
5367     u8 band;
5368     u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV +
5369     - sizeof(rfc1042_header)];
5370     + sizeof(rfc1042_header)] __aligned(2);
5371    
5372     struct rcu_head rcu_head;
5373     };
5374     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
5375     index 9cc7b512b472..a86f26d05bc2 100644
5376     --- a/net/packet/af_packet.c
5377     +++ b/net/packet/af_packet.c
5378     @@ -1341,7 +1341,7 @@ static unsigned int fanout_demux_hash(struct packet_fanout *f,
5379     struct sk_buff *skb,
5380     unsigned int num)
5381     {
5382     - return reciprocal_scale(skb_get_hash(skb), num);
5383     + return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
5384     }
5385    
5386     static unsigned int fanout_demux_lb(struct packet_fanout *f,
5387     diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
5388     index 32fcdecdb9e2..e384d6aefa3a 100644
5389     --- a/net/sched/act_mirred.c
5390     +++ b/net/sched/act_mirred.c
5391     @@ -170,7 +170,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
5392    
5393     if (!(at & AT_EGRESS)) {
5394     if (m->tcfm_ok_push)
5395     - skb_push(skb2, skb->mac_len);
5396     + skb_push_rcsum(skb2, skb->mac_len);
5397     }
5398    
5399     /* mirror is always swallowed */
5400     diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
5401     index 23608eb0ded2..7a93922457ff 100644
5402     --- a/net/sunrpc/clnt.c
5403     +++ b/net/sunrpc/clnt.c
5404     @@ -442,7 +442,7 @@ out_no_rpciod:
5405     return ERR_PTR(err);
5406     }
5407    
5408     -struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
5409     +static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
5410     struct rpc_xprt *xprt)
5411     {
5412     struct rpc_clnt *clnt = NULL;
5413     @@ -474,7 +474,6 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
5414    
5415     return clnt;
5416     }
5417     -EXPORT_SYMBOL_GPL(rpc_create_xprt);
5418    
5419     /**
5420     * rpc_create - create an RPC client and transport with one call
5421     @@ -500,6 +499,15 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
5422     };
5423     char servername[48];
5424    
5425     + if (args->bc_xprt) {
5426     + WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
5427     + xprt = args->bc_xprt->xpt_bc_xprt;
5428     + if (xprt) {
5429     + xprt_get(xprt);
5430     + return rpc_create_xprt(args, xprt);
5431     + }
5432     + }
5433     +
5434     if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
5435     xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
5436     if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
5437     diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
5438     index 898a53a562b8..6579fd6e7459 100644
5439     --- a/net/unix/af_unix.c
5440     +++ b/net/unix/af_unix.c
5441     @@ -315,7 +315,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
5442     &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
5443     struct dentry *dentry = unix_sk(s)->path.dentry;
5444    
5445     - if (dentry && d_backing_inode(dentry) == i) {
5446     + if (dentry && d_real_inode(dentry) == i) {
5447     sock_hold(s);
5448     goto found;
5449     }
5450     @@ -911,7 +911,7 @@ static struct sock *unix_find_other(struct net *net,
5451     err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
5452     if (err)
5453     goto fail;
5454     - inode = d_backing_inode(path.dentry);
5455     + inode = d_real_inode(path.dentry);
5456     err = inode_permission(inode, MAY_WRITE);
5457     if (err)
5458     goto put_fail;
5459     @@ -1048,7 +1048,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
5460     goto out_up;
5461     }
5462     addr->hash = UNIX_HASH_SIZE;
5463     - hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
5464     + hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
5465     spin_lock(&unix_table_lock);
5466     u->path = u_path;
5467     list = &unix_socket_table[hash];
5468     diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
5469     index 5b96206e9aab..9f5cdd49ff0b 100644
5470     --- a/scripts/mod/file2alias.c
5471     +++ b/scripts/mod/file2alias.c
5472     @@ -695,7 +695,7 @@ static int do_of_entry (const char *filename, void *symval, char *alias)
5473     len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
5474     (*type)[0] ? *type : "*");
5475    
5476     - if (compatible[0])
5477     + if ((*compatible)[0])
5478     sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
5479     *compatible);
5480    
5481     diff --git a/security/keys/key.c b/security/keys/key.c
5482     index ab7997ded725..534808915371 100644
5483     --- a/security/keys/key.c
5484     +++ b/security/keys/key.c
5485     @@ -578,7 +578,7 @@ int key_reject_and_link(struct key *key,
5486    
5487     mutex_unlock(&key_construction_mutex);
5488    
5489     - if (keyring)
5490     + if (keyring && link_ret == 0)
5491     __key_link_end(keyring, &key->index_key, edit);
5492    
5493     /* wake up anyone waiting for a key to be constructed */
5494     diff --git a/sound/core/control.c b/sound/core/control.c
5495     index a85d45595d02..b4fe9b002512 100644
5496     --- a/sound/core/control.c
5497     +++ b/sound/core/control.c
5498     @@ -160,6 +160,8 @@ void snd_ctl_notify(struct snd_card *card, unsigned int mask,
5499    
5500     if (snd_BUG_ON(!card || !id))
5501     return;
5502     + if (card->shutdown)
5503     + return;
5504     read_lock(&card->ctl_files_rwlock);
5505     #if IS_ENABLED(CONFIG_SND_MIXER_OSS)
5506     card->mixer_oss_change_count++;
5507     diff --git a/sound/core/pcm.c b/sound/core/pcm.c
5508     index 308c9ecf73db..8e980aa678d0 100644
5509     --- a/sound/core/pcm.c
5510     +++ b/sound/core/pcm.c
5511     @@ -849,6 +849,14 @@ int snd_pcm_new_internal(struct snd_card *card, const char *id, int device,
5512     }
5513     EXPORT_SYMBOL(snd_pcm_new_internal);
5514    
5515     +static void free_chmap(struct snd_pcm_str *pstr)
5516     +{
5517     + if (pstr->chmap_kctl) {
5518     + snd_ctl_remove(pstr->pcm->card, pstr->chmap_kctl);
5519     + pstr->chmap_kctl = NULL;
5520     + }
5521     +}
5522     +
5523     static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
5524     {
5525     struct snd_pcm_substream *substream, *substream_next;
5526     @@ -871,6 +879,7 @@ static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
5527     kfree(setup);
5528     }
5529     #endif
5530     + free_chmap(pstr);
5531     if (pstr->substream_count)
5532     put_device(&pstr->dev);
5533     }
5534     @@ -1135,10 +1144,7 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
5535     for (cidx = 0; cidx < 2; cidx++) {
5536     if (!pcm->internal)
5537     snd_unregister_device(&pcm->streams[cidx].dev);
5538     - if (pcm->streams[cidx].chmap_kctl) {
5539     - snd_ctl_remove(pcm->card, pcm->streams[cidx].chmap_kctl);
5540     - pcm->streams[cidx].chmap_kctl = NULL;
5541     - }
5542     + free_chmap(&pcm->streams[cidx]);
5543     }
5544     mutex_unlock(&pcm->open_mutex);
5545     mutex_unlock(&register_mutex);
5546     diff --git a/sound/core/timer.c b/sound/core/timer.c
5547     index b982d1b089bd..7c6155f5865b 100644
5548     --- a/sound/core/timer.c
5549     +++ b/sound/core/timer.c
5550     @@ -1961,6 +1961,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
5551    
5552     qhead = tu->qhead++;
5553     tu->qhead %= tu->queue_size;
5554     + tu->qused--;
5555     spin_unlock_irq(&tu->qlock);
5556    
5557     if (tu->tread) {
5558     @@ -1974,7 +1975,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
5559     }
5560    
5561     spin_lock_irq(&tu->qlock);
5562     - tu->qused--;
5563     if (err < 0)
5564     goto _error;
5565     result += unit;
5566     diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
5567     index a9f7a75702d2..67628616506e 100644
5568     --- a/sound/drivers/dummy.c
5569     +++ b/sound/drivers/dummy.c
5570     @@ -420,6 +420,7 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
5571    
5572     static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
5573     {
5574     + hrtimer_cancel(&dpcm->timer);
5575     tasklet_kill(&dpcm->tasklet);
5576     }
5577    
5578     diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
5579     index 4667c3232b7f..74177189063c 100644
5580     --- a/sound/pci/au88x0/au88x0_core.c
5581     +++ b/sound/pci/au88x0/au88x0_core.c
5582     @@ -1444,9 +1444,8 @@ static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma)
5583     int page, p, pp, delta, i;
5584    
5585     page =
5586     - (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) &
5587     - WT_SUBBUF_MASK)
5588     - >> WT_SUBBUF_SHIFT;
5589     + (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2))
5590     + >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK;
5591     if (dma->nr_periods >= 4)
5592     delta = (page - dma->period_real) & 3;
5593     else {
5594     diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
5595     index 1cb85aeb0cea..286f5e3686a3 100644
5596     --- a/sound/pci/echoaudio/echoaudio.c
5597     +++ b/sound/pci/echoaudio/echoaudio.c
5598     @@ -2200,11 +2200,11 @@ static int snd_echo_resume(struct device *dev)
5599     u32 pipe_alloc_mask;
5600     int err;
5601    
5602     - commpage_bak = kmalloc(sizeof(struct echoaudio), GFP_KERNEL);
5603     + commpage_bak = kmalloc(sizeof(*commpage), GFP_KERNEL);
5604     if (commpage_bak == NULL)
5605     return -ENOMEM;
5606     commpage = chip->comm_page;
5607     - memcpy(commpage_bak, commpage, sizeof(struct comm_page));
5608     + memcpy(commpage_bak, commpage, sizeof(*commpage));
5609    
5610     err = init_hw(chip, chip->pci->device, chip->pci->subsystem_device);
5611     if (err < 0) {
5612     diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
5613     index 367dbf0d285e..dc2fa576d60d 100644
5614     --- a/sound/pci/hda/hda_generic.c
5615     +++ b/sound/pci/hda/hda_generic.c
5616     @@ -3994,6 +3994,8 @@ static hda_nid_t set_path_power(struct hda_codec *codec, hda_nid_t nid,
5617    
5618     for (n = 0; n < spec->paths.used; n++) {
5619     path = snd_array_elem(&spec->paths, n);
5620     + if (!path->depth)
5621     + continue;
5622     if (path->path[0] == nid ||
5623     path->path[path->depth - 1] == nid) {
5624     bool pin_old = path->pin_enabled;
5625     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
5626     index 1475440b70aa..8218cace8fea 100644
5627     --- a/sound/pci/hda/hda_intel.c
5628     +++ b/sound/pci/hda/hda_intel.c
5629     @@ -361,9 +361,10 @@ enum {
5630     #define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
5631     #define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171)
5632     #define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
5633     +#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0)
5634     #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
5635     #define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
5636     - IS_KBL(pci) || IS_KBL_LP(pci)
5637     + IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci)
5638    
5639     static char *driver_short_names[] = {
5640     [AZX_DRIVER_ICH] = "HDA Intel",
5641     @@ -1255,8 +1256,10 @@ static int azx_free(struct azx *chip)
5642     if (use_vga_switcheroo(hda)) {
5643     if (chip->disabled && hda->probe_continued)
5644     snd_hda_unlock_devices(&chip->bus);
5645     - if (hda->vga_switcheroo_registered)
5646     + if (hda->vga_switcheroo_registered) {
5647     vga_switcheroo_unregister_client(chip->pci);
5648     + vga_switcheroo_fini_domain_pm_ops(chip->card->dev);
5649     + }
5650     }
5651    
5652     if (bus->chip_init) {
5653     @@ -2213,6 +2216,9 @@ static const struct pci_device_id azx_ids[] = {
5654     /* Kabylake-LP */
5655     { PCI_DEVICE(0x8086, 0x9d71),
5656     .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
5657     + /* Kabylake-H */
5658     + { PCI_DEVICE(0x8086, 0xa2f0),
5659     + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
5660     /* Broxton-P(Apollolake) */
5661     { PCI_DEVICE(0x8086, 0x5a98),
5662     .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
5663     @@ -2286,6 +2292,8 @@ static const struct pci_device_id azx_ids[] = {
5664     .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
5665     { PCI_DEVICE(0x1002, 0x157a),
5666     .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
5667     + { PCI_DEVICE(0x1002, 0x15b3),
5668     + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
5669     { PCI_DEVICE(0x1002, 0x793b),
5670     .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
5671     { PCI_DEVICE(0x1002, 0x7919),
5672     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5673     index 0fe18ede3e85..abcb5a6a1cd9 100644
5674     --- a/sound/pci/hda/patch_realtek.c
5675     +++ b/sound/pci/hda/patch_realtek.c
5676     @@ -5650,6 +5650,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5677     SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
5678     SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
5679     SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
5680     + SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
5681     + SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460),
5682     + SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
5683     SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
5684     SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
5685     SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
5686     @@ -5735,7 +5738,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
5687     {}
5688     };
5689     #define ALC225_STANDARD_PINS \
5690     - {0x12, 0xb7a60130}, \
5691     {0x21, 0x04211020}
5692    
5693     #define ALC256_STANDARD_PINS \
5694     @@ -5760,10 +5762,24 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
5695     static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5696     SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
5697     ALC225_STANDARD_PINS,
5698     + {0x12, 0xb7a60130},
5699     {0x14, 0x901701a0}),
5700     SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
5701     ALC225_STANDARD_PINS,
5702     + {0x12, 0xb7a60130},
5703     {0x14, 0x901701b0}),
5704     + SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
5705     + ALC225_STANDARD_PINS,
5706     + {0x12, 0xb7a60150},
5707     + {0x14, 0x901701a0}),
5708     + SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
5709     + ALC225_STANDARD_PINS,
5710     + {0x12, 0xb7a60150},
5711     + {0x14, 0x901701b0}),
5712     + SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
5713     + ALC225_STANDARD_PINS,
5714     + {0x12, 0xb7a60130},
5715     + {0x1b, 0x90170110}),
5716     SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
5717     {0x14, 0x90170110},
5718     {0x21, 0x02211020}),
5719     @@ -5832,6 +5848,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5720     {0x14, 0x90170120},
5721     {0x21, 0x02211030}),
5722     SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5723     + {0x12, 0x90a60170},
5724     + {0x14, 0x90170120},
5725     + {0x21, 0x02211030}),
5726     + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5727     ALC256_STANDARD_PINS),
5728     SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
5729     {0x12, 0x90a60130},
5730     diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
5731     index fefbf2d148ef..510df220d1b5 100644
5732     --- a/virt/kvm/kvm_main.c
5733     +++ b/virt/kvm/kvm_main.c
5734     @@ -2861,7 +2861,7 @@ static long kvm_vm_ioctl(struct file *filp,
5735     if (copy_from_user(&routing, argp, sizeof(routing)))
5736     goto out;
5737     r = -EINVAL;
5738     - if (routing.nr >= KVM_MAX_IRQ_ROUTES)
5739     + if (routing.nr > KVM_MAX_IRQ_ROUTES)
5740     goto out;
5741     if (routing.flags)
5742     goto out;