Magellan Linux

Contents of /trunk/kernel-magellan/patches-4.6/0104-4.6.5-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2819 - (show annotations) (download)
Thu Aug 4 13:24:58 2016 UTC (7 years, 8 months ago) by niro
File size: 243812 byte(s)
-linux-4.6.5
1 diff --git a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
2 index 6708c5e264aa..33e96f740639 100644
3 --- a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
4 +++ b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
5 @@ -1,4 +1,4 @@
6 -What /sys/bus/iio/devices/iio:deviceX/in_proximity_raw
7 +What /sys/bus/iio/devices/iio:deviceX/in_proximity_input
8 Date: March 2014
9 KernelVersion: 3.15
10 Contact: Matt Ranostay <mranostay@gmail.com>
11 diff --git a/Documentation/scsi/scsi_eh.txt b/Documentation/scsi/scsi_eh.txt
12 index 8638f61c8c9d..37eca00796ee 100644
13 --- a/Documentation/scsi/scsi_eh.txt
14 +++ b/Documentation/scsi/scsi_eh.txt
15 @@ -263,19 +263,23 @@ scmd->allowed.
16
17 3. scmd recovered
18 ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
19 - - shost->host_failed--
20 - clear scmd->eh_eflags
21 - scsi_setup_cmd_retry()
22 - move from local eh_work_q to local eh_done_q
23 LOCKING: none
24 + CONCURRENCY: at most one thread per separate eh_work_q to
25 + keep queue manipulation lockless
26
27 4. EH completes
28 ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
29 - layer of failure.
30 + layer of failure. May be called concurrently but must have
31 + a no more than one thread per separate eh_work_q to
32 + manipulate the queue locklessly
33 - scmd is removed from eh_done_q and scmd->eh_entry is cleared
34 - if retry is necessary, scmd is requeued using
35 scsi_queue_insert()
36 - otherwise, scsi_finish_command() is invoked for scmd
37 + - zero shost->host_failed
38 LOCKING: queue or finish function performs appropriate locking
39
40
41 diff --git a/Makefile b/Makefile
42 index cd374426114a..7d693a825fc7 100644
43 --- a/Makefile
44 +++ b/Makefile
45 @@ -1,6 +1,6 @@
46 VERSION = 4
47 PATCHLEVEL = 6
48 -SUBLEVEL = 4
49 +SUBLEVEL = 5
50 EXTRAVERSION =
51 NAME = Charred Weasel
52
53 diff --git a/arch/arm/boot/dts/armada-385-linksys.dtsi b/arch/arm/boot/dts/armada-385-linksys.dtsi
54 index 8450944b28e6..22f7a13e20b4 100644
55 --- a/arch/arm/boot/dts/armada-385-linksys.dtsi
56 +++ b/arch/arm/boot/dts/armada-385-linksys.dtsi
57 @@ -58,8 +58,8 @@
58 soc {
59 ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000
60 MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000
61 - MBUS_ID(0x09, 0x09) 0 0xf1100000 0x10000
62 - MBUS_ID(0x09, 0x05) 0 0xf1110000 0x10000>;
63 + MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000
64 + MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>;
65
66 internal-regs {
67
68 diff --git a/arch/arm/boot/dts/sun5i-r8-chip.dts b/arch/arm/boot/dts/sun5i-r8-chip.dts
69 index f6898c6b84d4..c937c85ffb45 100644
70 --- a/arch/arm/boot/dts/sun5i-r8-chip.dts
71 +++ b/arch/arm/boot/dts/sun5i-r8-chip.dts
72 @@ -52,7 +52,7 @@
73
74 / {
75 model = "NextThing C.H.I.P.";
76 - compatible = "nextthing,chip", "allwinner,sun5i-r8";
77 + compatible = "nextthing,chip", "allwinner,sun5i-r8", "allwinner,sun5i-a13";
78
79 aliases {
80 i2c0 = &i2c0;
81 diff --git a/arch/arm/boot/dts/sun6i-a31s-primo81.dts b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
82 index 68b479b8772c..73c133f5e79c 100644
83 --- a/arch/arm/boot/dts/sun6i-a31s-primo81.dts
84 +++ b/arch/arm/boot/dts/sun6i-a31s-primo81.dts
85 @@ -176,8 +176,6 @@
86 };
87
88 &reg_dc1sw {
89 - regulator-min-microvolt = <3000000>;
90 - regulator-max-microvolt = <3000000>;
91 regulator-name = "vcc-lcd";
92 };
93
94 diff --git a/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts b/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
95 index 360adfb1e9ca..d6ad6196a768 100644
96 --- a/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
97 +++ b/arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
98 @@ -135,8 +135,6 @@
99
100 &reg_dc1sw {
101 regulator-name = "vcc-lcd-usb2";
102 - regulator-min-microvolt = <3000000>;
103 - regulator-max-microvolt = <3000000>;
104 };
105
106 &reg_dc5ldo {
107 diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
108 index aeddd28b3595..92fd2c8a9af0 100644
109 --- a/arch/arm/include/asm/pgtable-2level.h
110 +++ b/arch/arm/include/asm/pgtable-2level.h
111 @@ -193,6 +193,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
112
113 #define pmd_large(pmd) (pmd_val(pmd) & 2)
114 #define pmd_bad(pmd) (pmd_val(pmd) & 2)
115 +#define pmd_present(pmd) (pmd_val(pmd))
116
117 #define copy_pmd(pmdpd,pmdps) \
118 do { \
119 diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
120 index dc46398bc3a5..74114667d116 100644
121 --- a/arch/arm/include/asm/pgtable-3level.h
122 +++ b/arch/arm/include/asm/pgtable-3level.h
123 @@ -211,6 +211,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
124 : !!(pmd_val(pmd) & (val)))
125 #define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val)))
126
127 +#define pmd_present(pmd) (pmd_isset((pmd), L_PMD_SECT_VALID))
128 #define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF))
129 #define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL))
130 static inline pte_t pte_mkspecial(pte_t pte)
131 @@ -249,10 +250,10 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
132 #define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
133 #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
134
135 -/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
136 +/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
137 static inline pmd_t pmd_mknotpresent(pmd_t pmd)
138 {
139 - return __pmd(0);
140 + return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
141 }
142
143 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
144 diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
145 index 348caabb7625..d62204060cbe 100644
146 --- a/arch/arm/include/asm/pgtable.h
147 +++ b/arch/arm/include/asm/pgtable.h
148 @@ -182,7 +182,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
149 #define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
150
151 #define pmd_none(pmd) (!pmd_val(pmd))
152 -#define pmd_present(pmd) (pmd_val(pmd))
153
154 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
155 {
156 diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
157 index dded1b763c16..72b11d91ede2 100644
158 --- a/arch/arm/kvm/arm.c
159 +++ b/arch/arm/kvm/arm.c
160 @@ -267,6 +267,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
161 kvm_timer_vcpu_terminate(vcpu);
162 kvm_vgic_vcpu_destroy(vcpu);
163 kvm_pmu_vcpu_destroy(vcpu);
164 + kvm_vcpu_uninit(vcpu);
165 kmem_cache_free(kvm_vcpu_cache, vcpu);
166 }
167
168 diff --git a/arch/arm/mach-imx/mach-imx6ul.c b/arch/arm/mach-imx/mach-imx6ul.c
169 index a38b16b69923..b56de4b8cdf2 100644
170 --- a/arch/arm/mach-imx/mach-imx6ul.c
171 +++ b/arch/arm/mach-imx/mach-imx6ul.c
172 @@ -46,7 +46,7 @@ static int ksz8081_phy_fixup(struct phy_device *dev)
173 static void __init imx6ul_enet_phy_init(void)
174 {
175 if (IS_BUILTIN(CONFIG_PHYLIB))
176 - phy_register_fixup_for_uid(PHY_ID_KSZ8081, 0xffffffff,
177 + phy_register_fixup_for_uid(PHY_ID_KSZ8081, MICREL_PHY_ID_MASK,
178 ksz8081_phy_fixup);
179 }
180
181 diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
182 index 7e989d61159c..474abff7e855 100644
183 --- a/arch/arm/mach-mvebu/coherency.c
184 +++ b/arch/arm/mach-mvebu/coherency.c
185 @@ -162,22 +162,16 @@ exit:
186 }
187
188 /*
189 - * This ioremap hook is used on Armada 375/38x to ensure that PCIe
190 - * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This
191 - * is needed as a workaround for a deadlock issue between the PCIe
192 - * interface and the cache controller.
193 + * This ioremap hook is used on Armada 375/38x to ensure that all MMIO
194 + * areas are mapped as MT_UNCACHED instead of MT_DEVICE. This is
195 + * needed for the HW I/O coherency mechanism to work properly without
196 + * deadlock.
197 */
198 static void __iomem *
199 -armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
200 - unsigned int mtype, void *caller)
201 +armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size,
202 + unsigned int mtype, void *caller)
203 {
204 - struct resource pcie_mem;
205 -
206 - mvebu_mbus_get_pcie_mem_aperture(&pcie_mem);
207 -
208 - if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end)
209 - mtype = MT_UNCACHED;
210 -
211 + mtype = MT_UNCACHED;
212 return __arm_ioremap_caller(phys_addr, size, mtype, caller);
213 }
214
215 @@ -186,7 +180,7 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
216 struct device_node *cache_dn;
217
218 coherency_cpu_base = of_iomap(np, 0);
219 - arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
220 + arch_ioremap_caller = armada_wa_ioremap_caller;
221
222 /*
223 * We should switch the PL310 to I/O coherency mode only if
224 diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
225 index a307eb6e7fa8..7f94755089e2 100644
226 --- a/arch/arm64/include/asm/ptrace.h
227 +++ b/arch/arm64/include/asm/ptrace.h
228 @@ -117,6 +117,8 @@ struct pt_regs {
229 };
230 u64 orig_x0;
231 u64 syscallno;
232 + u64 orig_addr_limit;
233 + u64 unused; // maintain 16 byte alignment
234 };
235
236 #define arch_has_single_step() (1)
237 diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
238 index 3ae6b310ac9b..1abcd8829f3b 100644
239 --- a/arch/arm64/kernel/asm-offsets.c
240 +++ b/arch/arm64/kernel/asm-offsets.c
241 @@ -59,6 +59,7 @@ int main(void)
242 DEFINE(S_PC, offsetof(struct pt_regs, pc));
243 DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0));
244 DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
245 + DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
246 DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
247 BLANK();
248 DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter));
249 diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
250 index 12e8d2bcb3f9..6c3b7345a6c4 100644
251 --- a/arch/arm64/kernel/entry.S
252 +++ b/arch/arm64/kernel/entry.S
253 @@ -28,6 +28,7 @@
254 #include <asm/errno.h>
255 #include <asm/esr.h>
256 #include <asm/irq.h>
257 +#include <asm/memory.h>
258 #include <asm/thread_info.h>
259 #include <asm/unistd.h>
260
261 @@ -97,7 +98,14 @@
262 mov x29, xzr // fp pointed to user-space
263 .else
264 add x21, sp, #S_FRAME_SIZE
265 - .endif
266 + get_thread_info tsk
267 + /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
268 + ldr x20, [tsk, #TI_ADDR_LIMIT]
269 + str x20, [sp, #S_ORIG_ADDR_LIMIT]
270 + mov x20, #TASK_SIZE_64
271 + str x20, [tsk, #TI_ADDR_LIMIT]
272 + ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO)
273 + .endif /* \el == 0 */
274 mrs x22, elr_el1
275 mrs x23, spsr_el1
276 stp lr, x21, [sp, #S_LR]
277 @@ -128,6 +136,14 @@
278 .endm
279
280 .macro kernel_exit, el
281 + .if \el != 0
282 + /* Restore the task's original addr_limit. */
283 + ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
284 + str x20, [tsk, #TI_ADDR_LIMIT]
285 +
286 + /* No need to restore UAO, it will be restored from SPSR_EL1 */
287 + .endif
288 +
289 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
290 .if \el == 0
291 ct_user_enter
292 @@ -406,7 +422,6 @@ el1_irq:
293 bl trace_hardirqs_off
294 #endif
295
296 - get_thread_info tsk
297 irq_handler
298
299 #ifdef CONFIG_PREEMPT
300 diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
301 index c5392081b49b..58651a9dfcf8 100644
302 --- a/arch/arm64/kernel/traps.c
303 +++ b/arch/arm64/kernel/traps.c
304 @@ -64,8 +64,7 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
305
306 /*
307 * We need to switch to kernel mode so that we can use __get_user
308 - * to safely read from kernel space. Note that we now dump the
309 - * code first, just in case the backtrace kills us.
310 + * to safely read from kernel space.
311 */
312 fs = get_fs();
313 set_fs(KERNEL_DS);
314 @@ -111,21 +110,12 @@ static void dump_backtrace_entry(unsigned long where)
315 print_ip_sym(where);
316 }
317
318 -static void dump_instr(const char *lvl, struct pt_regs *regs)
319 +static void __dump_instr(const char *lvl, struct pt_regs *regs)
320 {
321 unsigned long addr = instruction_pointer(regs);
322 - mm_segment_t fs;
323 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
324 int i;
325
326 - /*
327 - * We need to switch to kernel mode so that we can use __get_user
328 - * to safely read from kernel space. Note that we now dump the
329 - * code first, just in case the backtrace kills us.
330 - */
331 - fs = get_fs();
332 - set_fs(KERNEL_DS);
333 -
334 for (i = -4; i < 1; i++) {
335 unsigned int val, bad;
336
337 @@ -139,8 +129,18 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
338 }
339 }
340 printk("%sCode: %s\n", lvl, str);
341 +}
342
343 - set_fs(fs);
344 +static void dump_instr(const char *lvl, struct pt_regs *regs)
345 +{
346 + if (!user_mode(regs)) {
347 + mm_segment_t fs = get_fs();
348 + set_fs(KERNEL_DS);
349 + __dump_instr(lvl, regs);
350 + set_fs(fs);
351 + } else {
352 + __dump_instr(lvl, regs);
353 + }
354 }
355
356 static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
357 diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
358 index 10b79e9e87d1..e22849a90557 100644
359 --- a/arch/arm64/mm/fault.c
360 +++ b/arch/arm64/mm/fault.c
361 @@ -284,7 +284,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
362 }
363
364 if (permission_fault(esr) && (addr < USER_DS)) {
365 - if (get_fs() == KERNEL_DS)
366 + /* regs->orig_addr_limit may be 0 if we entered from EL0 */
367 + if (regs->orig_addr_limit == KERNEL_DS)
368 die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
369
370 if (!search_exception_tables(regs->pc))
371 diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
372 index dbd12ea8ce68..43a76b07eb32 100644
373 --- a/arch/arm64/mm/flush.c
374 +++ b/arch/arm64/mm/flush.c
375 @@ -71,10 +71,6 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
376 {
377 struct page *page = pte_page(pte);
378
379 - /* no flushing needed for anonymous pages */
380 - if (!page_mapping(page))
381 - return;
382 -
383 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
384 sync_icache_aliases(page_address(page),
385 PAGE_SIZE << compound_order(page));
386 diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
387 index 942b8f6bf35b..1907ab379fad 100644
388 --- a/arch/mips/include/asm/kvm_host.h
389 +++ b/arch/mips/include/asm/kvm_host.h
390 @@ -336,6 +336,7 @@ struct kvm_mips_tlb {
391 #define KVM_MIPS_GUEST_TLB_SIZE 64
392 struct kvm_vcpu_arch {
393 void *host_ebase, *guest_ebase;
394 + int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
395 unsigned long host_stack;
396 unsigned long host_gp;
397
398 diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
399 index 4ab4bdfad703..2143884709e4 100644
400 --- a/arch/mips/kvm/interrupt.h
401 +++ b/arch/mips/kvm/interrupt.h
402 @@ -28,6 +28,7 @@
403 #define MIPS_EXC_MAX 12
404 /* XXXSL More to follow */
405
406 +extern char __kvm_mips_vcpu_run_end[];
407 extern char mips32_exception[], mips32_exceptionEnd[];
408 extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
409
410 diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
411 index 81687ab1b523..fc93a08b6954 100644
412 --- a/arch/mips/kvm/locore.S
413 +++ b/arch/mips/kvm/locore.S
414 @@ -227,6 +227,7 @@ FEXPORT(__kvm_mips_load_k0k1)
415
416 /* Jump to guest */
417 eret
418 +EXPORT(__kvm_mips_vcpu_run_end)
419
420 VECTOR(MIPSX(exception), unknown)
421 /* Find out what mode we came from and jump to the proper handler. */
422 diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
423 index 70ef1a43c114..e223cb3d9e81 100644
424 --- a/arch/mips/kvm/mips.c
425 +++ b/arch/mips/kvm/mips.c
426 @@ -314,6 +314,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
427 memcpy(gebase + offset, mips32_GuestException,
428 mips32_GuestExceptionEnd - mips32_GuestException);
429
430 +#ifdef MODULE
431 + offset += mips32_GuestExceptionEnd - mips32_GuestException;
432 + memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
433 + __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
434 + vcpu->arch.vcpu_run = gebase + offset;
435 +#else
436 + vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
437 +#endif
438 +
439 /* Invalidate the icache for these ranges */
440 local_flush_icache_range((unsigned long)gebase,
441 (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
442 @@ -403,7 +412,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
443 /* Disable hardware page table walking while in guest */
444 htw_stop();
445
446 - r = __kvm_mips_vcpu_run(run, vcpu);
447 + r = vcpu->arch.vcpu_run(run, vcpu);
448
449 /* Re-enable HTW before enabling interrupts */
450 htw_start();
451 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
452 index b8500b4ac7fe..bec85055fc42 100644
453 --- a/arch/powerpc/kernel/process.c
454 +++ b/arch/powerpc/kernel/process.c
455 @@ -1501,6 +1501,16 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
456 current->thread.regs = regs - 1;
457 }
458
459 +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
460 + /*
461 + * Clear any transactional state, we're exec()ing. The cause is
462 + * not important as there will never be a recheckpoint so it's not
463 + * user visible.
464 + */
465 + if (MSR_TM_SUSPENDED(mfmsr()))
466 + tm_reclaim_current(0);
467 +#endif
468 +
469 memset(regs->gpr, 0, sizeof(regs->gpr));
470 regs->ctr = 0;
471 regs->link = 0;
472 diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
473 index ccd2037c797f..6ee4b72cda42 100644
474 --- a/arch/powerpc/kernel/prom_init.c
475 +++ b/arch/powerpc/kernel/prom_init.c
476 @@ -719,7 +719,7 @@ unsigned char ibm_architecture_vec[] = {
477 * must match by the macro below. Update the definition if
478 * the structure layout changes.
479 */
480 -#define IBM_ARCH_VEC_NRCORES_OFFSET 125
481 +#define IBM_ARCH_VEC_NRCORES_OFFSET 133
482 W(NR_CPUS), /* number of cores supported */
483 0,
484 0,
485 diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
486 index bd98ce2be17b..3e8865b187de 100644
487 --- a/arch/powerpc/platforms/pseries/iommu.c
488 +++ b/arch/powerpc/platforms/pseries/iommu.c
489 @@ -912,7 +912,8 @@ machine_arch_initcall(pseries, find_existing_ddw_windows);
490 static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
491 struct ddw_query_response *query)
492 {
493 - struct eeh_dev *edev;
494 + struct device_node *dn;
495 + struct pci_dn *pdn;
496 u32 cfg_addr;
497 u64 buid;
498 int ret;
499 @@ -923,11 +924,10 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
500 * Retrieve them from the pci device, not the node with the
501 * dma-window property
502 */
503 - edev = pci_dev_to_eeh_dev(dev);
504 - cfg_addr = edev->config_addr;
505 - if (edev->pe_config_addr)
506 - cfg_addr = edev->pe_config_addr;
507 - buid = edev->phb->buid;
508 + dn = pci_device_to_OF_node(dev);
509 + pdn = PCI_DN(dn);
510 + buid = pdn->phb->buid;
511 + cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
512
513 ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
514 cfg_addr, BUID_HI(buid), BUID_LO(buid));
515 @@ -941,7 +941,8 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
516 struct ddw_create_response *create, int page_shift,
517 int window_shift)
518 {
519 - struct eeh_dev *edev;
520 + struct device_node *dn;
521 + struct pci_dn *pdn;
522 u32 cfg_addr;
523 u64 buid;
524 int ret;
525 @@ -952,11 +953,10 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
526 * Retrieve them from the pci device, not the node with the
527 * dma-window property
528 */
529 - edev = pci_dev_to_eeh_dev(dev);
530 - cfg_addr = edev->config_addr;
531 - if (edev->pe_config_addr)
532 - cfg_addr = edev->pe_config_addr;
533 - buid = edev->phb->buid;
534 + dn = pci_device_to_OF_node(dev);
535 + pdn = PCI_DN(dn);
536 + buid = pdn->phb->buid;
537 + cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
538
539 do {
540 /* extra outputs are LIOBN and dma-addr (hi, lo) */
541 diff --git a/arch/s390/include/asm/fpu/api.h b/arch/s390/include/asm/fpu/api.h
542 index 5e04f3cbd320..8ae236b0f80b 100644
543 --- a/arch/s390/include/asm/fpu/api.h
544 +++ b/arch/s390/include/asm/fpu/api.h
545 @@ -22,7 +22,7 @@ static inline int test_fp_ctl(u32 fpc)
546 " la %0,0\n"
547 "1:\n"
548 EX_TABLE(0b,1b)
549 - : "=d" (rc), "=d" (orig_fpc)
550 + : "=d" (rc), "=&d" (orig_fpc)
551 : "d" (fpc), "0" (-EINVAL));
552 return rc;
553 }
554 diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
555 index f20abdb5630a..d14069d4b88d 100644
556 --- a/arch/s390/kernel/ipl.c
557 +++ b/arch/s390/kernel/ipl.c
558 @@ -2064,12 +2064,5 @@ void s390_reset_system(void)
559 S390_lowcore.program_new_psw.addr =
560 (unsigned long) s390_base_pgm_handler;
561
562 - /*
563 - * Clear subchannel ID and number to signal new kernel that no CCW or
564 - * SCSI IPL has been done (for kexec and kdump)
565 - */
566 - S390_lowcore.subchannel_id = 0;
567 - S390_lowcore.subchannel_nr = 0;
568 -
569 do_reset_calls();
570 }
571 diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
572 index 4324b87f9398..9f0ce0e6eeb4 100644
573 --- a/arch/s390/mm/pgtable.c
574 +++ b/arch/s390/mm/pgtable.c
575 @@ -437,7 +437,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
576 pgste = pgste_get_lock(ptep);
577 pgstev = pgste_val(pgste);
578 pte = *ptep;
579 - if (pte_swap(pte) &&
580 + if (!reset && pte_swap(pte) &&
581 ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
582 (pgstev & _PGSTE_GPS_ZERO))) {
583 ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
584 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
585 index b1ef9e489084..b67f9e8b93a6 100644
586 --- a/arch/x86/boot/Makefile
587 +++ b/arch/x86/boot/Makefile
588 @@ -171,6 +171,9 @@ isoimage: $(obj)/bzImage
589 for i in lib lib64 share end ; do \
590 if [ -f /usr/$$i/syslinux/isolinux.bin ] ; then \
591 cp /usr/$$i/syslinux/isolinux.bin $(obj)/isoimage ; \
592 + if [ -f /usr/$$i/syslinux/ldlinux.c32 ]; then \
593 + cp /usr/$$i/syslinux/ldlinux.c32 $(obj)/isoimage ; \
594 + fi ; \
595 break ; \
596 fi ; \
597 if [ $$i = end ] ; then exit 1 ; fi ; \
598 diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
599 index 041e442a3e28..7eb806ca6b03 100644
600 --- a/arch/x86/events/core.c
601 +++ b/arch/x86/events/core.c
602 @@ -2313,7 +2313,7 @@ void
603 perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
604 {
605 struct stack_frame frame;
606 - const void __user *fp;
607 + const unsigned long __user *fp;
608
609 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
610 /* TODO: We don't support guest os callchain now */
611 @@ -2326,7 +2326,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
612 if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
613 return;
614
615 - fp = (void __user *)regs->bp;
616 + fp = (unsigned long __user *)regs->bp;
617
618 perf_callchain_store(entry, regs->ip);
619
620 @@ -2339,16 +2339,17 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
621 pagefault_disable();
622 while (entry->nr < PERF_MAX_STACK_DEPTH) {
623 unsigned long bytes;
624 +
625 frame.next_frame = NULL;
626 frame.return_address = 0;
627
628 - if (!access_ok(VERIFY_READ, fp, 16))
629 + if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
630 break;
631
632 - bytes = __copy_from_user_nmi(&frame.next_frame, fp, 8);
633 + bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
634 if (bytes != 0)
635 break;
636 - bytes = __copy_from_user_nmi(&frame.return_address, fp+8, 8);
637 + bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
638 if (bytes != 0)
639 break;
640
641 diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
642 index 1705c9d75e44..78ee9ebe38dd 100644
643 --- a/arch/x86/events/intel/rapl.c
644 +++ b/arch/x86/events/intel/rapl.c
645 @@ -665,7 +665,7 @@ static void __init cleanup_rapl_pmus(void)
646 int i;
647
648 for (i = 0; i < rapl_pmus->maxpkg; i++)
649 - kfree(rapl_pmus->pmus + i);
650 + kfree(rapl_pmus->pmus[i]);
651 kfree(rapl_pmus);
652 }
653
654 diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
655 index 7a79ee2778b3..33c709ca2666 100644
656 --- a/arch/x86/include/asm/msr.h
657 +++ b/arch/x86/include/asm/msr.h
658 @@ -112,7 +112,7 @@ static inline void native_write_msr(unsigned int msr,
659 unsigned low, unsigned high)
660 {
661 asm volatile("wrmsr" : : "c" (msr), "a"(low), "d" (high) : "memory");
662 - if (msr_tracepoint_active(__tracepoint_read_msr))
663 + if (msr_tracepoint_active(__tracepoint_write_msr))
664 do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
665 }
666
667 @@ -131,7 +131,7 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
668 : "c" (msr), "0" (low), "d" (high),
669 [fault] "i" (-EIO)
670 : "memory");
671 - if (msr_tracepoint_active(__tracepoint_read_msr))
672 + if (msr_tracepoint_active(__tracepoint_write_msr))
673 do_trace_write_msr(msr, ((u64)high << 32 | low), err);
674 return err;
675 }
676 diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
677 index a147e676fc7b..e991d5c8bb3a 100644
678 --- a/arch/x86/kernel/amd_nb.c
679 +++ b/arch/x86/kernel/amd_nb.c
680 @@ -71,8 +71,8 @@ int amd_cache_northbridges(void)
681 while ((misc = next_northbridge(misc, amd_nb_misc_ids)) != NULL)
682 i++;
683
684 - if (i == 0)
685 - return 0;
686 + if (!i)
687 + return -ENODEV;
688
689 nb = kzalloc(i * sizeof(struct amd_northbridge), GFP_KERNEL);
690 if (!nb)
691 diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
692 index ae703acb85c1..44bcd5779ec1 100644
693 --- a/arch/x86/kernel/kprobes/core.c
694 +++ b/arch/x86/kernel/kprobes/core.c
695 @@ -960,7 +960,19 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
696 * normal page fault.
697 */
698 regs->ip = (unsigned long)cur->addr;
699 + /*
700 + * Trap flag (TF) has been set here because this fault
701 + * happened where the single stepping will be done.
702 + * So clear it by resetting the current kprobe:
703 + */
704 + regs->flags &= ~X86_EFLAGS_TF;
705 +
706 + /*
707 + * If the TF flag was set before the kprobe hit,
708 + * don't touch it:
709 + */
710 regs->flags |= kcb->kprobe_old_flags;
711 +
712 if (kcb->kprobe_status == KPROBE_REENTER)
713 restore_previous_kprobe(kcb);
714 else
715 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
716 index faf52bac1416..c4217a23a98d 100644
717 --- a/arch/x86/kvm/vmx.c
718 +++ b/arch/x86/kvm/vmx.c
719 @@ -2072,7 +2072,8 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
720 unsigned int dest;
721
722 if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
723 - !irq_remapping_cap(IRQ_POSTING_CAP))
724 + !irq_remapping_cap(IRQ_POSTING_CAP) ||
725 + !kvm_vcpu_apicv_active(vcpu))
726 return;
727
728 do {
729 @@ -2180,7 +2181,8 @@ static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
730 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
731
732 if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
733 - !irq_remapping_cap(IRQ_POSTING_CAP))
734 + !irq_remapping_cap(IRQ_POSTING_CAP) ||
735 + !kvm_vcpu_apicv_active(vcpu))
736 return;
737
738 /* Set SN when the vCPU is preempted */
739 @@ -6657,7 +6659,13 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
740
741 /* Checks for #GP/#SS exceptions. */
742 exn = false;
743 - if (is_protmode(vcpu)) {
744 + if (is_long_mode(vcpu)) {
745 + /* Long mode: #GP(0)/#SS(0) if the memory address is in a
746 + * non-canonical form. This is the only check on the memory
747 + * destination for long mode!
748 + */
749 + exn = is_noncanonical_address(*ret);
750 + } else if (is_protmode(vcpu)) {
751 /* Protected mode: apply checks for segment validity in the
752 * following order:
753 * - segment type check (#GP(0) may be thrown)
754 @@ -6674,17 +6682,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
755 * execute-only code segment
756 */
757 exn = ((s.type & 0xa) == 8);
758 - }
759 - if (exn) {
760 - kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
761 - return 1;
762 - }
763 - if (is_long_mode(vcpu)) {
764 - /* Long mode: #GP(0)/#SS(0) if the memory address is in a
765 - * non-canonical form. This is an only check for long mode.
766 - */
767 - exn = is_noncanonical_address(*ret);
768 - } else if (is_protmode(vcpu)) {
769 + if (exn) {
770 + kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
771 + return 1;
772 + }
773 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
774 */
775 exn = (s.unusable != 0);
776 @@ -10702,7 +10703,8 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu)
777 struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
778
779 if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
780 - !irq_remapping_cap(IRQ_POSTING_CAP))
781 + !irq_remapping_cap(IRQ_POSTING_CAP) ||
782 + !kvm_vcpu_apicv_active(vcpu))
783 return 0;
784
785 vcpu->pre_pcpu = vcpu->cpu;
786 @@ -10768,7 +10770,8 @@ static void vmx_post_block(struct kvm_vcpu *vcpu)
787 unsigned long flags;
788
789 if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
790 - !irq_remapping_cap(IRQ_POSTING_CAP))
791 + !irq_remapping_cap(IRQ_POSTING_CAP) ||
792 + !kvm_vcpu_apicv_active(vcpu))
793 return;
794
795 do {
796 @@ -10821,7 +10824,8 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
797 int idx, ret = -EINVAL;
798
799 if (!kvm_arch_has_assigned_device(kvm) ||
800 - !irq_remapping_cap(IRQ_POSTING_CAP))
801 + !irq_remapping_cap(IRQ_POSTING_CAP) ||
802 + !kvm_vcpu_apicv_active(kvm->vcpus[0]))
803 return 0;
804
805 idx = srcu_read_lock(&kvm->irq_srcu);
806 diff --git a/crypto/rsa-pkcs1pad.c b/crypto/rsa-pkcs1pad.c
807 index ead8dc0d084e..8ba426635b1b 100644
808 --- a/crypto/rsa-pkcs1pad.c
809 +++ b/crypto/rsa-pkcs1pad.c
810 @@ -102,10 +102,10 @@ struct pkcs1pad_inst_ctx {
811 };
812
813 struct pkcs1pad_request {
814 - struct akcipher_request child_req;
815 -
816 struct scatterlist in_sg[3], out_sg[2];
817 uint8_t *in_buf, *out_buf;
818 +
819 + struct akcipher_request child_req;
820 };
821
822 static int pkcs1pad_set_pub_key(struct crypto_akcipher *tfm, const void *key,
823 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
824 index 961acc788f44..91a9e6af2ec4 100644
825 --- a/drivers/ata/libata-eh.c
826 +++ b/drivers/ata/libata-eh.c
827 @@ -606,7 +606,7 @@ void ata_scsi_error(struct Scsi_Host *host)
828 ata_scsi_port_error_handler(host, ap);
829
830 /* finish or retry handled scmd's and clean up */
831 - WARN_ON(host->host_failed || !list_empty(&eh_work_q));
832 + WARN_ON(!list_empty(&eh_work_q));
833
834 DPRINTK("EXIT\n");
835 }
836 diff --git a/drivers/base/module.c b/drivers/base/module.c
837 index db930d3ee312..2a215780eda2 100644
838 --- a/drivers/base/module.c
839 +++ b/drivers/base/module.c
840 @@ -24,10 +24,12 @@ static char *make_driver_name(struct device_driver *drv)
841
842 static void module_create_drivers_dir(struct module_kobject *mk)
843 {
844 - if (!mk || mk->drivers_dir)
845 - return;
846 + static DEFINE_MUTEX(drivers_dir_mutex);
847
848 - mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
849 + mutex_lock(&drivers_dir_mutex);
850 + if (mk && !mk->drivers_dir)
851 + mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
852 + mutex_unlock(&drivers_dir_mutex);
853 }
854
855 void module_add_driver(struct module *mod, struct device_driver *drv)
856 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
857 index 94fb407d8561..44b1bd6baa38 100644
858 --- a/drivers/char/ipmi/ipmi_msghandler.c
859 +++ b/drivers/char/ipmi/ipmi_msghandler.c
860 @@ -3820,6 +3820,7 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
861 while (!list_empty(&intf->waiting_rcv_msgs)) {
862 smi_msg = list_entry(intf->waiting_rcv_msgs.next,
863 struct ipmi_smi_msg, link);
864 + list_del(&smi_msg->link);
865 if (!run_to_completion)
866 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
867 flags);
868 @@ -3829,11 +3830,14 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
869 if (rv > 0) {
870 /*
871 * To preserve message order, quit if we
872 - * can't handle a message.
873 + * can't handle a message. Add the message
874 + * back at the head, this is safe because this
875 + * tasklet is the only thing that pulls the
876 + * messages.
877 */
878 + list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
879 break;
880 } else {
881 - list_del(&smi_msg->link);
882 if (rv == 0)
883 /* Message handled */
884 ipmi_free_smi_msg(smi_msg);
885 diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
886 index 29c7c53d2845..92561c87f349 100644
887 --- a/drivers/crypto/qat/qat_common/Makefile
888 +++ b/drivers/crypto/qat/qat_common/Makefile
889 @@ -2,6 +2,7 @@ $(obj)/qat_rsapubkey-asn1.o: $(obj)/qat_rsapubkey-asn1.c \
890 $(obj)/qat_rsapubkey-asn1.h
891 $(obj)/qat_rsaprivkey-asn1.o: $(obj)/qat_rsaprivkey-asn1.c \
892 $(obj)/qat_rsaprivkey-asn1.h
893 +$(obj)/qat_asym_algs.o: $(obj)/qat_rsapubkey-asn1.h $(obj)/qat_rsaprivkey-asn1.h
894
895 clean-files += qat_rsapubkey-asn1.c qat_rsapubkey-asn1.h
896 clean-files += qat_rsaprivkey-asn1.c qat_rsaprivkey-asn1.h
897 diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
898 index 1472f48c8ac6..ff51b51d2fd4 100644
899 --- a/drivers/edac/edac_mc.c
900 +++ b/drivers/edac/edac_mc.c
901 @@ -565,7 +565,8 @@ void edac_mc_reset_delay_period(unsigned long value)
902 list_for_each(item, &mc_devices) {
903 mci = list_entry(item, struct mem_ctl_info, link);
904
905 - edac_mod_work(&mci->work, value);
906 + if (mci->op_state == OP_RUNNING_POLL)
907 + edac_mod_work(&mci->work, value);
908 }
909 mutex_unlock(&mem_ctls_mutex);
910 }
911 diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
912 index 8bf745d2da7e..b274fa2ffdec 100644
913 --- a/drivers/edac/sb_edac.c
914 +++ b/drivers/edac/sb_edac.c
915 @@ -239,8 +239,11 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
916 { 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
917 };
918
919 -#define RIR_RNK_TGT(reg) GET_BITFIELD(reg, 16, 19)
920 -#define RIR_OFFSET(reg) GET_BITFIELD(reg, 2, 14)
921 +#define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
922 + GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
923 +
924 +#define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
925 + GET_BITFIELD(reg, 2, 15) : GET_BITFIELD(reg, 2, 14))
926
927 /* Device 16, functions 2-7 */
928
929 @@ -1916,14 +1919,14 @@ static void get_memory_layout(const struct mem_ctl_info *mci)
930 pci_read_config_dword(pvt->pci_tad[i],
931 rir_offset[j][k],
932 &reg);
933 - tmp_mb = RIR_OFFSET(reg) << 6;
934 + tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
935
936 gb = div_u64_rem(tmp_mb, 1024, &mb);
937 edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
938 i, j, k,
939 gb, (mb*1000)/1024,
940 ((u64)tmp_mb) << 20L,
941 - (u32)RIR_RNK_TGT(reg),
942 + (u32)RIR_RNK_TGT(pvt->info.type, reg),
943 reg);
944 }
945 }
946 @@ -2256,7 +2259,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
947 pci_read_config_dword(pvt->pci_tad[ch_add + base_ch],
948 rir_offset[n_rir][idx],
949 &reg);
950 - *rank = RIR_RNK_TGT(reg);
951 + *rank = RIR_RNK_TGT(pvt->info.type, reg);
952
953 edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
954 n_rir,
955 diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
956 index 8b3226dca1d9..caff46c0e214 100644
957 --- a/drivers/extcon/extcon-palmas.c
958 +++ b/drivers/extcon/extcon-palmas.c
959 @@ -360,6 +360,8 @@ static int palmas_usb_probe(struct platform_device *pdev)
960
961 palmas_enable_irq(palmas_usb);
962 /* perform initial detection */
963 + if (palmas_usb->enable_gpio_vbus_detection)
964 + palmas_vbus_irq_handler(palmas_usb->gpio_vbus_irq, palmas_usb);
965 palmas_gpio_id_detect(&palmas_usb->wq_detectid.work);
966 device_set_wakeup_capable(&pdev->dev, true);
967 return 0;
968 diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
969 index e85e7539cf5d..eb43ae4835c1 100644
970 --- a/drivers/gpio/gpio-sch.c
971 +++ b/drivers/gpio/gpio-sch.c
972 @@ -61,9 +61,8 @@ static unsigned sch_gpio_bit(struct sch_gpio *sch, unsigned gpio)
973 return gpio % 8;
974 }
975
976 -static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
977 +static int sch_gpio_reg_get(struct sch_gpio *sch, unsigned gpio, unsigned reg)
978 {
979 - struct sch_gpio *sch = gpiochip_get_data(gc);
980 unsigned short offset, bit;
981 u8 reg_val;
982
983 @@ -75,10 +74,9 @@ static int sch_gpio_reg_get(struct gpio_chip *gc, unsigned gpio, unsigned reg)
984 return reg_val;
985 }
986
987 -static void sch_gpio_reg_set(struct gpio_chip *gc, unsigned gpio, unsigned reg,
988 +static void sch_gpio_reg_set(struct sch_gpio *sch, unsigned gpio, unsigned reg,
989 int val)
990 {
991 - struct sch_gpio *sch = gpiochip_get_data(gc);
992 unsigned short offset, bit;
993 u8 reg_val;
994
995 @@ -98,14 +96,15 @@ static int sch_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num)
996 struct sch_gpio *sch = gpiochip_get_data(gc);
997
998 spin_lock(&sch->lock);
999 - sch_gpio_reg_set(gc, gpio_num, GIO, 1);
1000 + sch_gpio_reg_set(sch, gpio_num, GIO, 1);
1001 spin_unlock(&sch->lock);
1002 return 0;
1003 }
1004
1005 static int sch_gpio_get(struct gpio_chip *gc, unsigned gpio_num)
1006 {
1007 - return sch_gpio_reg_get(gc, gpio_num, GLV);
1008 + struct sch_gpio *sch = gpiochip_get_data(gc);
1009 + return sch_gpio_reg_get(sch, gpio_num, GLV);
1010 }
1011
1012 static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
1013 @@ -113,7 +112,7 @@ static void sch_gpio_set(struct gpio_chip *gc, unsigned gpio_num, int val)
1014 struct sch_gpio *sch = gpiochip_get_data(gc);
1015
1016 spin_lock(&sch->lock);
1017 - sch_gpio_reg_set(gc, gpio_num, GLV, val);
1018 + sch_gpio_reg_set(sch, gpio_num, GLV, val);
1019 spin_unlock(&sch->lock);
1020 }
1021
1022 @@ -123,7 +122,7 @@ static int sch_gpio_direction_out(struct gpio_chip *gc, unsigned gpio_num,
1023 struct sch_gpio *sch = gpiochip_get_data(gc);
1024
1025 spin_lock(&sch->lock);
1026 - sch_gpio_reg_set(gc, gpio_num, GIO, 0);
1027 + sch_gpio_reg_set(sch, gpio_num, GIO, 0);
1028 spin_unlock(&sch->lock);
1029
1030 /*
1031 @@ -182,13 +181,13 @@ static int sch_gpio_probe(struct platform_device *pdev)
1032 * GPIO7 is configured by the CMC as SLPIOVR
1033 * Enable GPIO[9:8] core powered gpios explicitly
1034 */
1035 - sch_gpio_reg_set(&sch->chip, 8, GEN, 1);
1036 - sch_gpio_reg_set(&sch->chip, 9, GEN, 1);
1037 + sch_gpio_reg_set(sch, 8, GEN, 1);
1038 + sch_gpio_reg_set(sch, 9, GEN, 1);
1039 /*
1040 * SUS_GPIO[2:0] enabled by default
1041 * Enable SUS_GPIO3 resume powered gpio explicitly
1042 */
1043 - sch_gpio_reg_set(&sch->chip, 13, GEN, 1);
1044 + sch_gpio_reg_set(sch, 13, GEN, 1);
1045 break;
1046
1047 case PCI_DEVICE_ID_INTEL_ITC_LPC:
1048 diff --git a/drivers/gpio/gpiolib-legacy.c b/drivers/gpio/gpiolib-legacy.c
1049 index 3a5c7011ad3b..8b830996fe02 100644
1050 --- a/drivers/gpio/gpiolib-legacy.c
1051 +++ b/drivers/gpio/gpiolib-legacy.c
1052 @@ -28,6 +28,10 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
1053 if (!desc && gpio_is_valid(gpio))
1054 return -EPROBE_DEFER;
1055
1056 + err = gpiod_request(desc, label);
1057 + if (err)
1058 + return err;
1059 +
1060 if (flags & GPIOF_OPEN_DRAIN)
1061 set_bit(FLAG_OPEN_DRAIN, &desc->flags);
1062
1063 @@ -37,10 +41,6 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
1064 if (flags & GPIOF_ACTIVE_LOW)
1065 set_bit(FLAG_ACTIVE_LOW, &desc->flags);
1066
1067 - err = gpiod_request(desc, label);
1068 - if (err)
1069 - return err;
1070 -
1071 if (flags & GPIOF_DIR_IN)
1072 err = gpiod_direction_input(desc);
1073 else
1074 diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
1075 index cf3e71243d6d..996a73390bba 100644
1076 --- a/drivers/gpio/gpiolib.c
1077 +++ b/drivers/gpio/gpiolib.c
1078 @@ -1324,14 +1324,6 @@ static int __gpiod_request(struct gpio_desc *desc, const char *label)
1079 spin_lock_irqsave(&gpio_lock, flags);
1080 }
1081 done:
1082 - if (status < 0) {
1083 - /* Clear flags that might have been set by the caller before
1084 - * requesting the GPIO.
1085 - */
1086 - clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
1087 - clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
1088 - clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
1089 - }
1090 spin_unlock_irqrestore(&gpio_lock, flags);
1091 return status;
1092 }
1093 @@ -1345,8 +1337,12 @@ done:
1094 #define VALIDATE_DESC(desc) do { \
1095 if (!desc) \
1096 return 0; \
1097 + if (IS_ERR(desc)) { \
1098 + pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
1099 + return PTR_ERR(desc); \
1100 + } \
1101 if (!desc->gdev) { \
1102 - pr_warn("%s: invalid GPIO\n", __func__); \
1103 + pr_warn("%s: invalid GPIO (no device)\n", __func__); \
1104 return -EINVAL; \
1105 } \
1106 if ( !desc->gdev->chip ) { \
1107 @@ -1358,8 +1354,12 @@ done:
1108 #define VALIDATE_DESC_VOID(desc) do { \
1109 if (!desc) \
1110 return; \
1111 + if (IS_ERR(desc)) { \
1112 + pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
1113 + return; \
1114 + } \
1115 if (!desc->gdev) { \
1116 - pr_warn("%s: invalid GPIO\n", __func__); \
1117 + pr_warn("%s: invalid GPIO (no device)\n", __func__); \
1118 return; \
1119 } \
1120 if (!desc->gdev->chip) { \
1121 @@ -2011,7 +2011,7 @@ int gpiod_to_irq(const struct gpio_desc *desc)
1122 * requires this function to not return zero on an invalid descriptor
1123 * but rather a negative error number.
1124 */
1125 - if (!desc || !desc->gdev || !desc->gdev->chip)
1126 + if (!desc || IS_ERR(desc) || !desc->gdev || !desc->gdev->chip)
1127 return -EINVAL;
1128
1129 chip = desc->gdev->chip;
1130 @@ -2507,28 +2507,13 @@ struct gpio_desc *__must_check gpiod_get_optional(struct device *dev,
1131 }
1132 EXPORT_SYMBOL_GPL(gpiod_get_optional);
1133
1134 -/**
1135 - * gpiod_parse_flags - helper function to parse GPIO lookup flags
1136 - * @desc: gpio to be setup
1137 - * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
1138 - * of_get_gpio_hog()
1139 - *
1140 - * Set the GPIO descriptor flags based on the given GPIO lookup flags.
1141 - */
1142 -static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
1143 -{
1144 - if (lflags & GPIO_ACTIVE_LOW)
1145 - set_bit(FLAG_ACTIVE_LOW, &desc->flags);
1146 - if (lflags & GPIO_OPEN_DRAIN)
1147 - set_bit(FLAG_OPEN_DRAIN, &desc->flags);
1148 - if (lflags & GPIO_OPEN_SOURCE)
1149 - set_bit(FLAG_OPEN_SOURCE, &desc->flags);
1150 -}
1151
1152 /**
1153 * gpiod_configure_flags - helper function to configure a given GPIO
1154 * @desc: gpio whose value will be assigned
1155 * @con_id: function within the GPIO consumer
1156 + * @lflags: gpio_lookup_flags - returned from of_find_gpio() or
1157 + * of_get_gpio_hog()
1158 * @dflags: gpiod_flags - optional GPIO initialization flags
1159 *
1160 * Return 0 on success, -ENOENT if no GPIO has been assigned to the
1161 @@ -2536,10 +2521,17 @@ static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags)
1162 * occurred while trying to acquire the GPIO.
1163 */
1164 static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id,
1165 - enum gpiod_flags dflags)
1166 + unsigned long lflags, enum gpiod_flags dflags)
1167 {
1168 int status;
1169
1170 + if (lflags & GPIO_ACTIVE_LOW)
1171 + set_bit(FLAG_ACTIVE_LOW, &desc->flags);
1172 + if (lflags & GPIO_OPEN_DRAIN)
1173 + set_bit(FLAG_OPEN_DRAIN, &desc->flags);
1174 + if (lflags & GPIO_OPEN_SOURCE)
1175 + set_bit(FLAG_OPEN_SOURCE, &desc->flags);
1176 +
1177 /* No particular flag request, return here... */
1178 if (!(dflags & GPIOD_FLAGS_BIT_DIR_SET)) {
1179 pr_debug("no flags found for %s\n", con_id);
1180 @@ -2606,13 +2598,11 @@ struct gpio_desc *__must_check gpiod_get_index(struct device *dev,
1181 return desc;
1182 }
1183
1184 - gpiod_parse_flags(desc, lookupflags);
1185 -
1186 status = gpiod_request(desc, con_id);
1187 if (status < 0)
1188 return ERR_PTR(status);
1189
1190 - status = gpiod_configure_flags(desc, con_id, flags);
1191 + status = gpiod_configure_flags(desc, con_id, lookupflags, flags);
1192 if (status < 0) {
1193 dev_dbg(dev, "setup of GPIO %s failed\n", con_id);
1194 gpiod_put(desc);
1195 @@ -2668,6 +2658,10 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
1196 if (IS_ERR(desc))
1197 return desc;
1198
1199 + ret = gpiod_request(desc, NULL);
1200 + if (ret)
1201 + return ERR_PTR(ret);
1202 +
1203 if (active_low)
1204 set_bit(FLAG_ACTIVE_LOW, &desc->flags);
1205
1206 @@ -2678,10 +2672,6 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
1207 set_bit(FLAG_OPEN_SOURCE, &desc->flags);
1208 }
1209
1210 - ret = gpiod_request(desc, NULL);
1211 - if (ret)
1212 - return ERR_PTR(ret);
1213 -
1214 return desc;
1215 }
1216 EXPORT_SYMBOL_GPL(fwnode_get_named_gpiod);
1217 @@ -2734,8 +2724,6 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
1218 chip = gpiod_to_chip(desc);
1219 hwnum = gpio_chip_hwgpio(desc);
1220
1221 - gpiod_parse_flags(desc, lflags);
1222 -
1223 local_desc = gpiochip_request_own_desc(chip, hwnum, name);
1224 if (IS_ERR(local_desc)) {
1225 pr_err("requesting hog GPIO %s (chip %s, offset %d) failed\n",
1226 @@ -2743,7 +2731,7 @@ int gpiod_hog(struct gpio_desc *desc, const char *name,
1227 return PTR_ERR(local_desc);
1228 }
1229
1230 - status = gpiod_configure_flags(desc, name, dflags);
1231 + status = gpiod_configure_flags(desc, name, lflags, dflags);
1232 if (status < 0) {
1233 pr_err("setup of hog GPIO %s (chip %s, offset %d) failed\n",
1234 name, chip->label, hwnum);
1235 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
1236 index 6043dc7c3a94..3e21732f22e3 100644
1237 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
1238 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
1239 @@ -880,7 +880,7 @@ static int amdgpu_cgs_acpi_eval_object(void *cgs_device,
1240 struct cgs_acpi_method_argument *argument = NULL;
1241 uint32_t i, count;
1242 acpi_status status;
1243 - int result;
1244 + int result = 0;
1245 uint32_t func_no = 0xFFFFFFFF;
1246
1247 handle = ACPI_HANDLE(&adev->pdev->dev);
1248 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1249 index b04337de65d1..d78739d2952d 100644
1250 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1251 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1252 @@ -448,7 +448,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
1253 dev_info.max_memory_clock = adev->pm.default_mclk * 10;
1254 }
1255 dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
1256 - dev_info.num_rb_pipes = adev->gfx.config.num_rbs;
1257 + dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
1258 + adev->gfx.config.max_shader_engines;
1259 dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
1260 dev_info._pad = 0;
1261 dev_info.ids_flags = 0;
1262 diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
1263 index bb8709066fd8..d2216f83bd7a 100644
1264 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
1265 +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
1266 @@ -5074,7 +5074,7 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
1267 case 2:
1268 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
1269 ring = &adev->gfx.compute_ring[i];
1270 - if ((ring->me == me_id) & (ring->pipe == pipe_id))
1271 + if ((ring->me == me_id) && (ring->pipe == pipe_id))
1272 amdgpu_fence_process(ring);
1273 }
1274 break;
1275 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
1276 index ac005796b71c..7708d90b9da9 100644
1277 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
1278 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
1279 @@ -242,13 +242,19 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
1280 pqm_uninit(&p->pqm);
1281
1282 /* Iterate over all process device data structure and check
1283 - * if we should reset all wavefronts */
1284 - list_for_each_entry(pdd, &p->per_device_data, per_device_list)
1285 + * if we should delete debug managers and reset all wavefronts
1286 + */
1287 + list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
1288 + if ((pdd->dev->dbgmgr) &&
1289 + (pdd->dev->dbgmgr->pasid == p->pasid))
1290 + kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
1291 +
1292 if (pdd->reset_wavefronts) {
1293 pr_warn("amdkfd: Resetting all wave fronts\n");
1294 dbgdev_wave_reset_wavefronts(pdd->dev, p);
1295 pdd->reset_wavefronts = false;
1296 }
1297 + }
1298
1299 mutex_unlock(&p->mutex);
1300
1301 @@ -404,42 +410,52 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
1302
1303 idx = srcu_read_lock(&kfd_processes_srcu);
1304
1305 + /*
1306 + * Look for the process that matches the pasid. If there is no such
1307 + * process, we either released it in amdkfd's own notifier, or there
1308 + * is a bug. Unfortunately, there is no way to tell...
1309 + */
1310 hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
1311 - if (p->pasid == pasid)
1312 - break;
1313 + if (p->pasid == pasid) {
1314
1315 - srcu_read_unlock(&kfd_processes_srcu, idx);
1316 + srcu_read_unlock(&kfd_processes_srcu, idx);
1317
1318 - BUG_ON(p->pasid != pasid);
1319 + pr_debug("Unbinding process %d from IOMMU\n", pasid);
1320
1321 - mutex_lock(&p->mutex);
1322 + mutex_lock(&p->mutex);
1323
1324 - if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
1325 - kfd_dbgmgr_destroy(dev->dbgmgr);
1326 + if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
1327 + kfd_dbgmgr_destroy(dev->dbgmgr);
1328
1329 - pqm_uninit(&p->pqm);
1330 + pqm_uninit(&p->pqm);
1331
1332 - pdd = kfd_get_process_device_data(dev, p);
1333 + pdd = kfd_get_process_device_data(dev, p);
1334
1335 - if (!pdd) {
1336 - mutex_unlock(&p->mutex);
1337 - return;
1338 - }
1339 + if (!pdd) {
1340 + mutex_unlock(&p->mutex);
1341 + return;
1342 + }
1343
1344 - if (pdd->reset_wavefronts) {
1345 - dbgdev_wave_reset_wavefronts(pdd->dev, p);
1346 - pdd->reset_wavefronts = false;
1347 - }
1348 + if (pdd->reset_wavefronts) {
1349 + dbgdev_wave_reset_wavefronts(pdd->dev, p);
1350 + pdd->reset_wavefronts = false;
1351 + }
1352
1353 - /*
1354 - * Just mark pdd as unbound, because we still need it to call
1355 - * amd_iommu_unbind_pasid() in when the process exits.
1356 - * We don't call amd_iommu_unbind_pasid() here
1357 - * because the IOMMU called us.
1358 - */
1359 - pdd->bound = false;
1360 + /*
1361 + * Just mark pdd as unbound, because we still need it
1362 + * to call amd_iommu_unbind_pasid() in when the
1363 + * process exits.
1364 + * We don't call amd_iommu_unbind_pasid() here
1365 + * because the IOMMU called us.
1366 + */
1367 + pdd->bound = false;
1368
1369 - mutex_unlock(&p->mutex);
1370 + mutex_unlock(&p->mutex);
1371 +
1372 + return;
1373 + }
1374 +
1375 + srcu_read_unlock(&kfd_processes_srcu, idx);
1376 }
1377
1378 struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
1379 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
1380 index fa208ada6892..efb77eda7508 100644
1381 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
1382 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
1383 @@ -306,10 +306,14 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
1384 {
1385 PHM_FUNC_CHECK(hwmgr);
1386
1387 - if (hwmgr->hwmgr_func->store_cc6_data == NULL)
1388 + if (display_config == NULL)
1389 return -EINVAL;
1390
1391 hwmgr->display_config = *display_config;
1392 +
1393 + if (hwmgr->hwmgr_func->store_cc6_data == NULL)
1394 + return -EINVAL;
1395 +
1396 /* to do pass other display configuration in furture */
1397
1398 if (hwmgr->hwmgr_func->store_cc6_data)
1399 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
1400 index 7b2d5000292d..7cce483b0859 100644
1401 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
1402 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
1403 @@ -21,6 +21,20 @@ bool acpi_atcs_functions_supported(void *device, uint32_t index)
1404 return result == 0 ? (output_buf.function_bits & (1 << (index - 1))) != 0 : false;
1405 }
1406
1407 +bool acpi_atcs_notify_pcie_device_ready(void *device)
1408 +{
1409 + int32_t temp_buffer = 1;
1410 +
1411 + return cgs_call_acpi_method(device, CGS_ACPI_METHOD_ATCS,
1412 + ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION,
1413 + &temp_buffer,
1414 + NULL,
1415 + 0,
1416 + sizeof(temp_buffer),
1417 + 0);
1418 +}
1419 +
1420 +
1421 int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
1422 {
1423 struct atcs_pref_req_input atcs_input;
1424 @@ -29,7 +43,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
1425 int result;
1426 struct cgs_system_info info = {0};
1427
1428 - if (!acpi_atcs_functions_supported(device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST))
1429 + if( 0 != acpi_atcs_notify_pcie_device_ready(device))
1430 return -EINVAL;
1431
1432 info.size = sizeof(struct cgs_system_info);
1433 @@ -54,7 +68,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
1434 ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST,
1435 &atcs_input,
1436 &atcs_output,
1437 - 0,
1438 + 1,
1439 sizeof(atcs_input),
1440 sizeof(atcs_output));
1441 if (result != 0)
1442 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
1443 index 0d5d8372953e..aae2e8ec0542 100644
1444 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
1445 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
1446 @@ -1298,7 +1298,7 @@ static int tonga_populate_smc_mvdd_table(struct pp_hwmgr *hwmgr,
1447 table->Smio[count] |=
1448 data->mvdd_voltage_table.entries[count].smio_low;
1449 }
1450 - table->SmioMask2 = data->vddci_voltage_table.mask_low;
1451 + table->SmioMask2 = data->mvdd_voltage_table.mask_low;
1452
1453 CONVERT_FROM_HOST_TO_SMC_UL(table->MvddLevelCount);
1454 }
1455 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
1456 index b156481b50e8..17766e8da0ca 100644
1457 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
1458 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
1459 @@ -299,7 +299,7 @@ static int init_dpm_2_parameters(
1460 (((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usPPMTableOffset));
1461
1462 if (0 != powerplay_table->usPPMTableOffset) {
1463 - if (1 == get_platform_power_management_table(hwmgr, atom_ppm_table)) {
1464 + if (get_platform_power_management_table(hwmgr, atom_ppm_table) == 0) {
1465 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
1466 PHM_PlatformCaps_EnablePlatformPowerManagement);
1467 }
1468 diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
1469 index 3bd5e69b9045..3df5de2cdab0 100644
1470 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
1471 +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
1472 @@ -26,3 +26,4 @@ extern bool acpi_atcs_functions_supported(void *device,
1473 extern int acpi_pcie_perf_request(void *device,
1474 uint8_t perf_req,
1475 bool advertise);
1476 +extern bool acpi_atcs_notify_pcie_device_ready(void *device);
1477 diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
1478 index d65dcaee3832..6d9c0f5bcba6 100644
1479 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
1480 +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
1481 @@ -335,6 +335,8 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
1482
1483 atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff,
1484 factor_reg);
1485 + } else {
1486 + atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, 0);
1487 }
1488 }
1489
1490 diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
1491 index d307d9627887..080a09014580 100644
1492 --- a/drivers/gpu/drm/drm_atomic.c
1493 +++ b/drivers/gpu/drm/drm_atomic.c
1494 @@ -354,6 +354,8 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
1495 drm_property_unreference_blob(state->mode_blob);
1496 state->mode_blob = NULL;
1497
1498 + memset(&state->mode, 0, sizeof(state->mode));
1499 +
1500 if (blob) {
1501 if (blob->length != sizeof(struct drm_mode_modeinfo) ||
1502 drm_mode_convert_umode(&state->mode,
1503 @@ -366,7 +368,6 @@ int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
1504 DRM_DEBUG_ATOMIC("Set [MODE:%s] for CRTC state %p\n",
1505 state->mode.name, state);
1506 } else {
1507 - memset(&state->mode, 0, sizeof(state->mode));
1508 state->enable = false;
1509 DRM_DEBUG_ATOMIC("Set [NOMODE] for CRTC state %p\n",
1510 state);
1511 @@ -1287,14 +1288,39 @@ EXPORT_SYMBOL(drm_atomic_add_affected_planes);
1512 */
1513 void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
1514 {
1515 + struct drm_device *dev = state->dev;
1516 + unsigned crtc_mask = 0;
1517 + struct drm_crtc *crtc;
1518 int ret;
1519 + bool global = false;
1520 +
1521 + drm_for_each_crtc(crtc, dev) {
1522 + if (crtc->acquire_ctx != state->acquire_ctx)
1523 + continue;
1524 +
1525 + crtc_mask |= drm_crtc_mask(crtc);
1526 + crtc->acquire_ctx = NULL;
1527 + }
1528 +
1529 + if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
1530 + global = true;
1531 +
1532 + dev->mode_config.acquire_ctx = NULL;
1533 + }
1534
1535 retry:
1536 drm_modeset_backoff(state->acquire_ctx);
1537
1538 - ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx);
1539 + ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
1540 if (ret)
1541 goto retry;
1542 +
1543 + drm_for_each_crtc(crtc, dev)
1544 + if (drm_crtc_mask(crtc) & crtc_mask)
1545 + crtc->acquire_ctx = state->acquire_ctx;
1546 +
1547 + if (global)
1548 + dev->mode_config.acquire_ctx = state->acquire_ctx;
1549 }
1550 EXPORT_SYMBOL(drm_atomic_legacy_backoff);
1551
1552 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
1553 index f30de8053545..691a1b939c1c 100644
1554 --- a/drivers/gpu/drm/drm_crtc.c
1555 +++ b/drivers/gpu/drm/drm_crtc.c
1556 @@ -2800,8 +2800,6 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
1557 goto out;
1558 }
1559
1560 - drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
1561 -
1562 /*
1563 * Check whether the primary plane supports the fb pixel format.
1564 * Drivers not implementing the universal planes API use a
1565 diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
1566 index 71ea0521ea96..ccfe7e72d8fc 100644
1567 --- a/drivers/gpu/drm/drm_dp_mst_topology.c
1568 +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
1569 @@ -2908,11 +2908,9 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
1570 drm_dp_port_teardown_pdt(port, port->pdt);
1571
1572 if (!port->input && port->vcpi.vcpi > 0) {
1573 - if (mgr->mst_state) {
1574 - drm_dp_mst_reset_vcpi_slots(mgr, port);
1575 - drm_dp_update_payload_part1(mgr);
1576 - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
1577 - }
1578 + drm_dp_mst_reset_vcpi_slots(mgr, port);
1579 + drm_dp_update_payload_part1(mgr);
1580 + drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
1581 }
1582
1583 kref_put(&port->kref, drm_dp_free_mst_port);
1584 diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
1585 index bb88e3df9257..e619b00c7343 100644
1586 --- a/drivers/gpu/drm/drm_fb_cma_helper.c
1587 +++ b/drivers/gpu/drm/drm_fb_cma_helper.c
1588 @@ -301,7 +301,7 @@ static int drm_fbdev_cma_create(struct drm_fb_helper *helper,
1589 err_fb_info_destroy:
1590 drm_fb_helper_release_fbi(helper);
1591 err_gem_free_object:
1592 - dev->driver->gem_free_object(&obj->base);
1593 + drm_gem_object_unreference_unlocked(&obj->base);
1594 return ret;
1595 }
1596
1597 diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
1598 index 1f500a1b9969..d988ca0b597a 100644
1599 --- a/drivers/gpu/drm/drm_gem_cma_helper.c
1600 +++ b/drivers/gpu/drm/drm_gem_cma_helper.c
1601 @@ -121,7 +121,7 @@ struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
1602 return cma_obj;
1603
1604 error:
1605 - drm->driver->gem_free_object(&cma_obj->base);
1606 + drm_gem_object_unreference_unlocked(&cma_obj->base);
1607 return ERR_PTR(ret);
1608 }
1609 EXPORT_SYMBOL_GPL(drm_gem_cma_create);
1610 @@ -162,18 +162,12 @@ drm_gem_cma_create_with_handle(struct drm_file *file_priv,
1611 * and handle has the id what user can see.
1612 */
1613 ret = drm_gem_handle_create(file_priv, gem_obj, handle);
1614 - if (ret)
1615 - goto err_handle_create;
1616 -
1617 /* drop reference from allocate - handle holds it now. */
1618 drm_gem_object_unreference_unlocked(gem_obj);
1619 + if (ret)
1620 + return ERR_PTR(ret);
1621
1622 return cma_obj;
1623 -
1624 -err_handle_create:
1625 - drm->driver->gem_free_object(gem_obj);
1626 -
1627 - return ERR_PTR(ret);
1628 }
1629
1630 /**
1631 diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
1632 index f7448a5e95a9..5d0fc2644352 100644
1633 --- a/drivers/gpu/drm/drm_modes.c
1634 +++ b/drivers/gpu/drm/drm_modes.c
1635 @@ -1518,6 +1518,8 @@ int drm_mode_convert_umode(struct drm_display_mode *out,
1636 if (out->status != MODE_OK)
1637 goto out;
1638
1639 + drm_mode_set_crtcinfo(out, CRTC_INTERLACE_HALVE_V);
1640 +
1641 ret = 0;
1642
1643 out:
1644 diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
1645 index e8d9337a66d8..77886f1182f1 100644
1646 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
1647 +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
1648 @@ -40,9 +40,10 @@ static const struct regmap_config fsl_dcu_regmap_config = {
1649 .reg_bits = 32,
1650 .reg_stride = 4,
1651 .val_bits = 32,
1652 - .cache_type = REGCACHE_RBTREE,
1653 + .cache_type = REGCACHE_FLAT,
1654
1655 .volatile_reg = fsl_dcu_drm_is_volatile_reg,
1656 + .max_register = 0x11fc,
1657 };
1658
1659 static int fsl_dcu_drm_irq_init(struct drm_device *dev)
1660 diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
1661 index d3c473ffb90a..3af40616bf8b 100644
1662 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
1663 +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
1664 @@ -39,7 +39,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
1665 if (!mutex_is_locked(mutex))
1666 return false;
1667
1668 -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
1669 +#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
1670 return mutex->owner == task;
1671 #else
1672 /* Since UP may be pre-empted, we cannot assume that we own the lock */
1673 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
1674 index 7741efbd5e57..e5db9e1f623f 100644
1675 --- a/drivers/gpu/drm/i915/intel_display.c
1676 +++ b/drivers/gpu/drm/i915/intel_display.c
1677 @@ -8229,12 +8229,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
1678 {
1679 struct drm_i915_private *dev_priv = dev->dev_private;
1680 struct intel_encoder *encoder;
1681 + int i;
1682 u32 val, final;
1683 bool has_lvds = false;
1684 bool has_cpu_edp = false;
1685 bool has_panel = false;
1686 bool has_ck505 = false;
1687 bool can_ssc = false;
1688 + bool using_ssc_source = false;
1689
1690 /* We need to take the global config into account */
1691 for_each_intel_encoder(dev, encoder) {
1692 @@ -8261,8 +8263,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
1693 can_ssc = true;
1694 }
1695
1696 - DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
1697 - has_panel, has_lvds, has_ck505);
1698 + /* Check if any DPLLs are using the SSC source */
1699 + for (i = 0; i < dev_priv->num_shared_dpll; i++) {
1700 + u32 temp = I915_READ(PCH_DPLL(i));
1701 +
1702 + if (!(temp & DPLL_VCO_ENABLE))
1703 + continue;
1704 +
1705 + if ((temp & PLL_REF_INPUT_MASK) ==
1706 + PLLB_REF_INPUT_SPREADSPECTRUMIN) {
1707 + using_ssc_source = true;
1708 + break;
1709 + }
1710 + }
1711 +
1712 + DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
1713 + has_panel, has_lvds, has_ck505, using_ssc_source);
1714
1715 /* Ironlake: try to setup display ref clock before DPLL
1716 * enabling. This is only under driver's control after
1717 @@ -8299,9 +8315,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
1718 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
1719 } else
1720 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
1721 - } else {
1722 - final |= DREF_SSC_SOURCE_DISABLE;
1723 - final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
1724 + } else if (using_ssc_source) {
1725 + final |= DREF_SSC_SOURCE_ENABLE;
1726 + final |= DREF_SSC1_ENABLE;
1727 }
1728
1729 if (final == val)
1730 @@ -8347,7 +8363,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
1731 POSTING_READ(PCH_DREF_CONTROL);
1732 udelay(200);
1733 } else {
1734 - DRM_DEBUG_KMS("Disabling SSC entirely\n");
1735 + DRM_DEBUG_KMS("Disabling CPU source output\n");
1736
1737 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
1738
1739 @@ -8358,16 +8374,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
1740 POSTING_READ(PCH_DREF_CONTROL);
1741 udelay(200);
1742
1743 - /* Turn off the SSC source */
1744 - val &= ~DREF_SSC_SOURCE_MASK;
1745 - val |= DREF_SSC_SOURCE_DISABLE;
1746 + if (!using_ssc_source) {
1747 + DRM_DEBUG_KMS("Disabling SSC source\n");
1748
1749 - /* Turn off SSC1 */
1750 - val &= ~DREF_SSC1_ENABLE;
1751 + /* Turn off the SSC source */
1752 + val &= ~DREF_SSC_SOURCE_MASK;
1753 + val |= DREF_SSC_SOURCE_DISABLE;
1754
1755 - I915_WRITE(PCH_DREF_CONTROL, val);
1756 - POSTING_READ(PCH_DREF_CONTROL);
1757 - udelay(200);
1758 + /* Turn off SSC1 */
1759 + val &= ~DREF_SSC1_ENABLE;
1760 +
1761 + I915_WRITE(PCH_DREF_CONTROL, val);
1762 + POSTING_READ(PCH_DREF_CONTROL);
1763 + udelay(200);
1764 + }
1765 }
1766
1767 BUG_ON(val != final);
1768 diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
1769 index 412a34c39522..69054ef978fa 100644
1770 --- a/drivers/gpu/drm/i915/intel_dp.c
1771 +++ b/drivers/gpu/drm/i915/intel_dp.c
1772 @@ -4942,13 +4942,15 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
1773
1774 void intel_dp_encoder_reset(struct drm_encoder *encoder)
1775 {
1776 - struct intel_dp *intel_dp;
1777 + struct drm_i915_private *dev_priv = to_i915(encoder->dev);
1778 + struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1779 +
1780 + if (!HAS_DDI(dev_priv))
1781 + intel_dp->DP = I915_READ(intel_dp->output_reg);
1782
1783 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
1784 return;
1785
1786 - intel_dp = enc_to_intel_dp(encoder);
1787 -
1788 pps_lock(intel_dp);
1789
1790 /*
1791 @@ -5020,9 +5022,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
1792 intel_display_power_get(dev_priv, power_domain);
1793
1794 if (long_hpd) {
1795 - /* indicate that we need to restart link training */
1796 - intel_dp->train_set_valid = false;
1797 -
1798 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
1799 goto mst_fail;
1800
1801 diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
1802 index 0b8eefc2acc5..926a1e6ea2f6 100644
1803 --- a/drivers/gpu/drm/i915/intel_dp_link_training.c
1804 +++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
1805 @@ -85,8 +85,7 @@ static bool
1806 intel_dp_reset_link_train(struct intel_dp *intel_dp,
1807 uint8_t dp_train_pat)
1808 {
1809 - if (!intel_dp->train_set_valid)
1810 - memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
1811 + memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
1812 intel_dp_set_signal_levels(intel_dp);
1813 return intel_dp_set_link_train(intel_dp, dp_train_pat);
1814 }
1815 @@ -161,22 +160,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
1816 break;
1817 }
1818
1819 - /*
1820 - * if we used previously trained voltage and pre-emphasis values
1821 - * and we don't get clock recovery, reset link training values
1822 - */
1823 - if (intel_dp->train_set_valid) {
1824 - DRM_DEBUG_KMS("clock recovery not ok, reset");
1825 - /* clear the flag as we are not reusing train set */
1826 - intel_dp->train_set_valid = false;
1827 - if (!intel_dp_reset_link_train(intel_dp,
1828 - DP_TRAINING_PATTERN_1 |
1829 - DP_LINK_SCRAMBLING_DISABLE)) {
1830 - DRM_ERROR("failed to enable link training\n");
1831 - return;
1832 - }
1833 - continue;
1834 - }
1835
1836 /* Check to see if we've tried the max voltage */
1837 for (i = 0; i < intel_dp->lane_count; i++)
1838 @@ -284,7 +267,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
1839 /* Make sure clock is still ok */
1840 if (!drm_dp_clock_recovery_ok(link_status,
1841 intel_dp->lane_count)) {
1842 - intel_dp->train_set_valid = false;
1843 intel_dp_link_training_clock_recovery(intel_dp);
1844 intel_dp_set_link_train(intel_dp,
1845 training_pattern |
1846 @@ -301,7 +283,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
1847
1848 /* Try 5 times, then try clock recovery if that fails */
1849 if (tries > 5) {
1850 - intel_dp->train_set_valid = false;
1851 intel_dp_link_training_clock_recovery(intel_dp);
1852 intel_dp_set_link_train(intel_dp,
1853 training_pattern |
1854 @@ -322,10 +303,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
1855
1856 intel_dp_set_idle_link_train(intel_dp);
1857
1858 - if (channel_eq) {
1859 - intel_dp->train_set_valid = true;
1860 + if (channel_eq)
1861 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
1862 - }
1863 }
1864
1865 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
1866 diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
1867 index 3a30b37d6885..8dd2cc56451f 100644
1868 --- a/drivers/gpu/drm/i915/intel_drv.h
1869 +++ b/drivers/gpu/drm/i915/intel_drv.h
1870 @@ -811,8 +811,6 @@ struct intel_dp {
1871 /* This is called before a link training is starterd */
1872 void (*prepare_link_retrain)(struct intel_dp *intel_dp);
1873
1874 - bool train_set_valid;
1875 -
1876 /* Displayport compliance testing */
1877 unsigned long compliance_test_type;
1878 unsigned long compliance_test_data;
1879 diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
1880 index 0f0492f4a357..28f4407722a1 100644
1881 --- a/drivers/gpu/drm/i915/intel_fbc.c
1882 +++ b/drivers/gpu/drm/i915/intel_fbc.c
1883 @@ -823,8 +823,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
1884 {
1885 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1886 struct intel_fbc *fbc = &dev_priv->fbc;
1887 - bool enable_by_default = IS_HASWELL(dev_priv) ||
1888 - IS_BROADWELL(dev_priv);
1889 + bool enable_by_default = IS_BROADWELL(dev_priv);
1890
1891 if (intel_vgpu_active(dev_priv->dev)) {
1892 fbc->no_fbc_reason = "VGPU is active";
1893 diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
1894 index 14e64e08909e..d347dca17267 100644
1895 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c
1896 +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
1897 @@ -182,7 +182,7 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
1898 }
1899 }
1900
1901 - fvv = pllreffreq * testn / testm;
1902 + fvv = pllreffreq * (n + 1) / (m + 1);
1903 fvv = (fvv - 800000) / 50000;
1904
1905 if (fvv > 15)
1906 @@ -202,6 +202,14 @@ static int mga_g200se_set_plls(struct mga_device *mdev, long clock)
1907 WREG_DAC(MGA1064_PIX_PLLC_M, m);
1908 WREG_DAC(MGA1064_PIX_PLLC_N, n);
1909 WREG_DAC(MGA1064_PIX_PLLC_P, p);
1910 +
1911 + if (mdev->unique_rev_id >= 0x04) {
1912 + WREG_DAC(0x1a, 0x09);
1913 + msleep(20);
1914 + WREG_DAC(0x1a, 0x01);
1915 +
1916 + }
1917 +
1918 return 0;
1919 }
1920
1921 diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
1922 index db10c11f0595..c5a6ebd5a478 100644
1923 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
1924 +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h
1925 @@ -25,7 +25,8 @@ u16 nvbios_outp_match(struct nvkm_bios *, u16 type, u16 mask,
1926 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *);
1927
1928 struct nvbios_ocfg {
1929 - u16 match;
1930 + u8 proto;
1931 + u8 flags;
1932 u16 clkcmp[2];
1933 };
1934
1935 @@ -33,7 +34,7 @@ u16 nvbios_ocfg_entry(struct nvkm_bios *, u16 outp, u8 idx,
1936 u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
1937 u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx,
1938 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
1939 -u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u16 type,
1940 +u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u8 proto, u8 flags,
1941 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *);
1942 u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz);
1943 #endif
1944 diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
1945 index 59f27e774acb..e40a1b07a014 100644
1946 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
1947 +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
1948 @@ -557,6 +557,8 @@ nouveau_fbcon_init(struct drm_device *dev)
1949 if (ret)
1950 goto fini;
1951
1952 + if (fbcon->helper.fbdev)
1953 + fbcon->helper.fbdev->pixmap.buf_align = 4;
1954 return 0;
1955
1956 fini:
1957 diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
1958 index 789dc2993b0d..8f715feadf56 100644
1959 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
1960 +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
1961 @@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1962 uint32_t fg;
1963 uint32_t bg;
1964 uint32_t dsize;
1965 - uint32_t width;
1966 uint32_t *data = (uint32_t *)image->data;
1967 int ret;
1968
1969 @@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1970 if (ret)
1971 return ret;
1972
1973 - width = ALIGN(image->width, 8);
1974 - dsize = ALIGN(width * image->height, 32) >> 5;
1975 -
1976 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
1977 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
1978 fg = ((uint32_t *) info->pseudo_palette)[image->fg_color];
1979 @@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1980 ((image->dx + image->width) & 0xffff));
1981 OUT_RING(chan, bg);
1982 OUT_RING(chan, fg);
1983 - OUT_RING(chan, (image->height << 16) | width);
1984 + OUT_RING(chan, (image->height << 16) | image->width);
1985 OUT_RING(chan, (image->height << 16) | image->width);
1986 OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
1987
1988 + dsize = ALIGN(image->width * image->height, 32) >> 5;
1989 while (dsize) {
1990 int iter_len = dsize > 128 ? 128 : dsize;
1991
1992 diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
1993 index e05499d6ed83..a4e259a00430 100644
1994 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
1995 +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
1996 @@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
1997 struct nouveau_fbdev *nfbdev = info->par;
1998 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
1999 struct nouveau_channel *chan = drm->channel;
2000 - uint32_t width, dwords, *data = (uint32_t *)image->data;
2001 + uint32_t dwords, *data = (uint32_t *)image->data;
2002 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
2003 uint32_t *palette = info->pseudo_palette;
2004 int ret;
2005 @@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
2006 if (ret)
2007 return ret;
2008
2009 - width = ALIGN(image->width, 32);
2010 - dwords = (width * image->height) >> 5;
2011 -
2012 BEGIN_NV04(chan, NvSub2D, 0x0814, 2);
2013 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
2014 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
2015 @@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
2016 OUT_RING(chan, 0);
2017 OUT_RING(chan, image->dy);
2018
2019 + dwords = ALIGN(image->width * image->height, 32) >> 5;
2020 while (dwords) {
2021 int push = dwords > 2047 ? 2047 : dwords;
2022
2023 diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
2024 index c97395b4a312..f28315e865a5 100644
2025 --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
2026 +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
2027 @@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
2028 struct nouveau_fbdev *nfbdev = info->par;
2029 struct nouveau_drm *drm = nouveau_drm(nfbdev->dev);
2030 struct nouveau_channel *chan = drm->channel;
2031 - uint32_t width, dwords, *data = (uint32_t *)image->data;
2032 + uint32_t dwords, *data = (uint32_t *)image->data;
2033 uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel));
2034 uint32_t *palette = info->pseudo_palette;
2035 int ret;
2036 @@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
2037 if (ret)
2038 return ret;
2039
2040 - width = ALIGN(image->width, 32);
2041 - dwords = (width * image->height) >> 5;
2042 -
2043 BEGIN_NVC0(chan, NvSub2D, 0x0814, 2);
2044 if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
2045 info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
2046 @@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
2047 OUT_RING (chan, 0);
2048 OUT_RING (chan, image->dy);
2049
2050 + dwords = ALIGN(image->width * image->height, 32) >> 5;
2051 while (dwords) {
2052 int push = dwords > 2047 ? 2047 : dwords;
2053
2054 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
2055 index 18fab3973ce5..62ad0300cfa5 100644
2056 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
2057 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
2058 @@ -1614,7 +1614,7 @@ nvkm_device_pci_func = {
2059 .fini = nvkm_device_pci_fini,
2060 .resource_addr = nvkm_device_pci_resource_addr,
2061 .resource_size = nvkm_device_pci_resource_size,
2062 - .cpu_coherent = !IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_ARM64),
2063 + .cpu_coherent = !IS_ENABLED(CONFIG_ARM),
2064 };
2065
2066 int
2067 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
2068 index a74c5dd27dc0..e2a64ed14b22 100644
2069 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
2070 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
2071 @@ -18,6 +18,7 @@ nvkm-y += nvkm/engine/disp/piornv50.o
2072 nvkm-y += nvkm/engine/disp/sornv50.o
2073 nvkm-y += nvkm/engine/disp/sorg94.o
2074 nvkm-y += nvkm/engine/disp/sorgf119.o
2075 +nvkm-y += nvkm/engine/disp/sorgm107.o
2076 nvkm-y += nvkm/engine/disp/sorgm200.o
2077 nvkm-y += nvkm/engine/disp/dport.o
2078
2079 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
2080 index f0314664349c..5dd34382f55a 100644
2081 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
2082 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
2083 @@ -76,6 +76,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
2084 mask |= 0x0001 << or;
2085 mask |= 0x0100 << head;
2086
2087 +
2088 list_for_each_entry(outp, &disp->base.outp, head) {
2089 if ((outp->info.hasht & 0xff) == type &&
2090 (outp->info.hashm & mask) == mask) {
2091 @@ -155,25 +156,21 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
2092 if (!outp)
2093 return NULL;
2094
2095 + *conf = (ctrl & 0x00000f00) >> 8;
2096 switch (outp->info.type) {
2097 case DCB_OUTPUT_TMDS:
2098 - *conf = (ctrl & 0x00000f00) >> 8;
2099 if (*conf == 5)
2100 *conf |= 0x0100;
2101 break;
2102 case DCB_OUTPUT_LVDS:
2103 - *conf = disp->sor.lvdsconf;
2104 - break;
2105 - case DCB_OUTPUT_DP:
2106 - *conf = (ctrl & 0x00000f00) >> 8;
2107 + *conf |= disp->sor.lvdsconf;
2108 break;
2109 - case DCB_OUTPUT_ANALOG:
2110 default:
2111 - *conf = 0x00ff;
2112 break;
2113 }
2114
2115 - data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
2116 + data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
2117 + &ver, &hdr, &cnt, &len, &info2);
2118 if (data && id < 0xff) {
2119 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
2120 if (data) {
2121 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
2122 index b6944142d616..f4b9cf8574be 100644
2123 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
2124 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
2125 @@ -36,7 +36,7 @@ gm107_disp = {
2126 .outp.internal.crt = nv50_dac_output_new,
2127 .outp.internal.tmds = nv50_sor_output_new,
2128 .outp.internal.lvds = nv50_sor_output_new,
2129 - .outp.internal.dp = gf119_sor_dp_new,
2130 + .outp.internal.dp = gm107_sor_dp_new,
2131 .dac.nr = 3,
2132 .dac.power = nv50_dac_power,
2133 .dac.sense = nv50_dac_sense,
2134 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
2135 index 4226d2153b9c..fcb1b0c46d64 100644
2136 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
2137 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
2138 @@ -387,22 +387,17 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
2139 if (!outp)
2140 return NULL;
2141
2142 + *conf = (ctrl & 0x00000f00) >> 8;
2143 if (outp->info.location == 0) {
2144 switch (outp->info.type) {
2145 case DCB_OUTPUT_TMDS:
2146 - *conf = (ctrl & 0x00000f00) >> 8;
2147 if (*conf == 5)
2148 *conf |= 0x0100;
2149 break;
2150 case DCB_OUTPUT_LVDS:
2151 - *conf = disp->sor.lvdsconf;
2152 + *conf |= disp->sor.lvdsconf;
2153 break;
2154 - case DCB_OUTPUT_DP:
2155 - *conf = (ctrl & 0x00000f00) >> 8;
2156 - break;
2157 - case DCB_OUTPUT_ANALOG:
2158 default:
2159 - *conf = 0x00ff;
2160 break;
2161 }
2162 } else {
2163 @@ -410,7 +405,8 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
2164 pclk = pclk / 2;
2165 }
2166
2167 - data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2);
2168 + data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8,
2169 + &ver, &hdr, &cnt, &len, &info2);
2170 if (data && id < 0xff) {
2171 data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk);
2172 if (data) {
2173 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
2174 index e9067ba4e179..4e983f6d7032 100644
2175 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
2176 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h
2177 @@ -62,7 +62,12 @@ int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
2178 int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
2179 struct nvkm_output **);
2180 int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
2181 +int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int);
2182
2183 -int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
2184 - struct nvkm_output **);
2185 +int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
2186 + struct nvkm_output **);
2187 +int gm107_sor_dp_pattern(struct nvkm_output_dp *, int);
2188 +
2189 +int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *,
2190 + struct nvkm_output **);
2191 #endif
2192 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
2193 index b4b41b135643..49bd5da194e1 100644
2194 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
2195 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c
2196 @@ -40,8 +40,8 @@ static int
2197 gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
2198 {
2199 struct nvkm_device *device = outp->base.disp->engine.subdev.device;
2200 - const u32 loff = gf119_sor_loff(outp);
2201 - nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern);
2202 + const u32 soff = gf119_sor_soff(outp);
2203 + nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, 0x01010101 * pattern);
2204 return 0;
2205 }
2206
2207 @@ -64,7 +64,7 @@ gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
2208 return 0;
2209 }
2210
2211 -static int
2212 +int
2213 gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
2214 int ln, int vs, int pe, int pc)
2215 {
2216 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
2217 new file mode 100644
2218 index 000000000000..37790b2617c5
2219 --- /dev/null
2220 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c
2221 @@ -0,0 +1,53 @@
2222 +/*
2223 + * Copyright 2016 Red Hat Inc.
2224 + *
2225 + * Permission is hereby granted, free of charge, to any person obtaining a
2226 + * copy of this software and associated documentation files (the "Software"),
2227 + * to deal in the Software without restriction, including without limitation
2228 + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
2229 + * and/or sell copies of the Software, and to permit persons to whom the
2230 + * Software is furnished to do so, subject to the following conditions:
2231 + *
2232 + * The above copyright notice and this permission notice shall be included in
2233 + * all copies or substantial portions of the Software.
2234 + *
2235 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
2236 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
2237 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
2238 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
2239 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
2240 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
2241 + * OTHER DEALINGS IN THE SOFTWARE.
2242 + *
2243 + * Authors: Ben Skeggs <bskeggs@redhat.com>
2244 + */
2245 +#include "nv50.h"
2246 +#include "outpdp.h"
2247 +
2248 +int
2249 +gm107_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
2250 +{
2251 + struct nvkm_device *device = outp->base.disp->engine.subdev.device;
2252 + const u32 soff = outp->base.or * 0x800;
2253 + const u32 data = 0x01010101 * pattern;
2254 + if (outp->base.info.sorconf.link & 1)
2255 + nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
2256 + else
2257 + nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
2258 + return 0;
2259 +}
2260 +
2261 +static const struct nvkm_output_dp_func
2262 +gm107_sor_dp_func = {
2263 + .pattern = gm107_sor_dp_pattern,
2264 + .lnk_pwr = g94_sor_dp_lnk_pwr,
2265 + .lnk_ctl = gf119_sor_dp_lnk_ctl,
2266 + .drv_ctl = gf119_sor_dp_drv_ctl,
2267 +};
2268 +
2269 +int
2270 +gm107_sor_dp_new(struct nvkm_disp *disp, int index,
2271 + struct dcb_output *dcbE, struct nvkm_output **poutp)
2272 +{
2273 + return nvkm_output_dp_new_(&gm107_sor_dp_func, disp, index, dcbE, poutp);
2274 +}
2275 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
2276 index 2cfbef9c344f..c44fa7ea672a 100644
2277 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
2278 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c
2279 @@ -57,19 +57,6 @@ gm200_sor_dp_lane_map(struct nvkm_device *device, u8 lane)
2280 }
2281
2282 static int
2283 -gm200_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
2284 -{
2285 - struct nvkm_device *device = outp->base.disp->engine.subdev.device;
2286 - const u32 soff = gm200_sor_soff(outp);
2287 - const u32 data = 0x01010101 * pattern;
2288 - if (outp->base.info.sorconf.link & 1)
2289 - nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data);
2290 - else
2291 - nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data);
2292 - return 0;
2293 -}
2294 -
2295 -static int
2296 gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
2297 {
2298 struct nvkm_device *device = outp->base.disp->engine.subdev.device;
2299 @@ -129,7 +116,7 @@ gm200_sor_dp_drv_ctl(struct nvkm_output_dp *outp,
2300
2301 static const struct nvkm_output_dp_func
2302 gm200_sor_dp_func = {
2303 - .pattern = gm200_sor_dp_pattern,
2304 + .pattern = gm107_sor_dp_pattern,
2305 .lnk_pwr = gm200_sor_dp_lnk_pwr,
2306 .lnk_ctl = gf119_sor_dp_lnk_ctl,
2307 .drv_ctl = gm200_sor_dp_drv_ctl,
2308 diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
2309 index b2de290da16f..b0c721616c4e 100644
2310 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
2311 +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
2312 @@ -942,22 +942,41 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc)
2313 }
2314
2315 static const struct nvkm_enum gf100_mp_warp_error[] = {
2316 - { 0x00, "NO_ERROR" },
2317 - { 0x01, "STACK_MISMATCH" },
2318 + { 0x01, "STACK_ERROR" },
2319 + { 0x02, "API_STACK_ERROR" },
2320 + { 0x03, "RET_EMPTY_STACK_ERROR" },
2321 + { 0x04, "PC_WRAP" },
2322 { 0x05, "MISALIGNED_PC" },
2323 - { 0x08, "MISALIGNED_GPR" },
2324 - { 0x09, "INVALID_OPCODE" },
2325 - { 0x0d, "GPR_OUT_OF_BOUNDS" },
2326 - { 0x0e, "MEM_OUT_OF_BOUNDS" },
2327 - { 0x0f, "UNALIGNED_MEM_ACCESS" },
2328 + { 0x06, "PC_OVERFLOW" },
2329 + { 0x07, "MISALIGNED_IMMC_ADDR" },
2330 + { 0x08, "MISALIGNED_REG" },
2331 + { 0x09, "ILLEGAL_INSTR_ENCODING" },
2332 + { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" },
2333 + { 0x0b, "ILLEGAL_INSTR_PARAM" },
2334 + { 0x0c, "INVALID_CONST_ADDR" },
2335 + { 0x0d, "OOR_REG" },
2336 + { 0x0e, "OOR_ADDR" },
2337 + { 0x0f, "MISALIGNED_ADDR" },
2338 { 0x10, "INVALID_ADDR_SPACE" },
2339 - { 0x11, "INVALID_PARAM" },
2340 + { 0x11, "ILLEGAL_INSTR_PARAM2" },
2341 + { 0x12, "INVALID_CONST_ADDR_LDC" },
2342 + { 0x13, "GEOMETRY_SM_ERROR" },
2343 + { 0x14, "DIVERGENT" },
2344 + { 0x15, "WARP_EXIT" },
2345 {}
2346 };
2347
2348 static const struct nvkm_bitfield gf100_mp_global_error[] = {
2349 + { 0x00000001, "SM_TO_SM_FAULT" },
2350 + { 0x00000002, "L1_ERROR" },
2351 { 0x00000004, "MULTIPLE_WARP_ERRORS" },
2352 - { 0x00000008, "OUT_OF_STACK_SPACE" },
2353 + { 0x00000008, "PHYSICAL_STACK_OVERFLOW" },
2354 + { 0x00000010, "BPT_INT" },
2355 + { 0x00000020, "BPT_PAUSE" },
2356 + { 0x00000040, "SINGLE_STEP_COMPLETE" },
2357 + { 0x20000000, "ECC_SEC_ERROR" },
2358 + { 0x40000000, "ECC_DED_ERROR" },
2359 + { 0x80000000, "TIMEOUT" },
2360 {}
2361 };
2362
2363 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
2364 index a5e92135cd77..9efb1b48cd54 100644
2365 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
2366 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c
2367 @@ -141,7 +141,8 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
2368 {
2369 u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len);
2370 if (data) {
2371 - info->match = nvbios_rd16(bios, data + 0x00);
2372 + info->proto = nvbios_rd08(bios, data + 0x00);
2373 + info->flags = nvbios_rd16(bios, data + 0x01);
2374 info->clkcmp[0] = nvbios_rd16(bios, data + 0x02);
2375 info->clkcmp[1] = nvbios_rd16(bios, data + 0x04);
2376 }
2377 @@ -149,12 +150,13 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
2378 }
2379
2380 u16
2381 -nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u16 type,
2382 +nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u8 proto, u8 flags,
2383 u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info)
2384 {
2385 u16 data, idx = 0;
2386 while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) {
2387 - if (info->match == type)
2388 + if ((info->proto == proto || info->proto == 0xff) &&
2389 + (info->flags == flags))
2390 break;
2391 }
2392 return data;
2393 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
2394 index e292f5679418..389fb13a1998 100644
2395 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
2396 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
2397 @@ -69,11 +69,11 @@ gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
2398 }
2399
2400 static void
2401 -gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s)
2402 +gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s)
2403 {
2404 struct nvkm_subdev *subdev = &ltc->subdev;
2405 struct nvkm_device *device = subdev->device;
2406 - u32 base = 0x140000 + (c * 0x2000) + (s * 0x200);
2407 + u32 base = 0x140400 + (c * 0x2000) + (s * 0x200);
2408 u32 stat = nvkm_rd32(device, base + 0x00c);
2409
2410 if (stat) {
2411 @@ -92,7 +92,7 @@ gm107_ltc_intr(struct nvkm_ltc *ltc)
2412 while (mask) {
2413 u32 s, c = __ffs(mask);
2414 for (s = 0; s < ltc->lts_nr; s++)
2415 - gm107_ltc_lts_isr(ltc, c, s);
2416 + gm107_ltc_intr_lts(ltc, c, s);
2417 mask &= ~(1 << c);
2418 }
2419 }
2420 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
2421 index 2a29bfd5125a..e18e0dc19ec8 100644
2422 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
2423 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c
2424 @@ -46,7 +46,7 @@ static const struct nvkm_ltc_func
2425 gm200_ltc = {
2426 .oneinit = gm200_ltc_oneinit,
2427 .init = gm200_ltc_init,
2428 - .intr = gm107_ltc_intr, /*XXX: not validated */
2429 + .intr = gm107_ltc_intr,
2430 .cbc_clear = gm107_ltc_cbc_clear,
2431 .cbc_wait = gm107_ltc_cbc_wait,
2432 .zbc = 16,
2433 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
2434 index d0826fb0434c..cb2986876738 100644
2435 --- a/drivers/gpu/drm/radeon/radeon_device.c
2436 +++ b/drivers/gpu/drm/radeon/radeon_device.c
2437 @@ -630,6 +630,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
2438 /*
2439 * GPU helpers function.
2440 */
2441 +
2442 +/**
2443 + * radeon_device_is_virtual - check if we are running is a virtual environment
2444 + *
2445 + * Check if the asic has been passed through to a VM (all asics).
2446 + * Used at driver startup.
2447 + * Returns true if virtual or false if not.
2448 + */
2449 +static bool radeon_device_is_virtual(void)
2450 +{
2451 +#ifdef CONFIG_X86
2452 + return boot_cpu_has(X86_FEATURE_HYPERVISOR);
2453 +#else
2454 + return false;
2455 +#endif
2456 +}
2457 +
2458 /**
2459 * radeon_card_posted - check if the hw has already been initialized
2460 *
2461 @@ -643,6 +660,10 @@ bool radeon_card_posted(struct radeon_device *rdev)
2462 {
2463 uint32_t reg;
2464
2465 + /* for pass through, always force asic_init */
2466 + if (radeon_device_is_virtual())
2467 + return false;
2468 +
2469 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
2470 if (efi_enabled(EFI_BOOT) &&
2471 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
2472 diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
2473 index e3daafa1be13..3e7c9ac50ccd 100644
2474 --- a/drivers/gpu/drm/ttm/ttm_bo.c
2475 +++ b/drivers/gpu/drm/ttm/ttm_bo.c
2476 @@ -1016,9 +1016,9 @@ out_unlock:
2477 return ret;
2478 }
2479
2480 -static bool ttm_bo_mem_compat(struct ttm_placement *placement,
2481 - struct ttm_mem_reg *mem,
2482 - uint32_t *new_flags)
2483 +bool ttm_bo_mem_compat(struct ttm_placement *placement,
2484 + struct ttm_mem_reg *mem,
2485 + uint32_t *new_flags)
2486 {
2487 int i;
2488
2489 @@ -1050,6 +1050,7 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
2490
2491 return false;
2492 }
2493 +EXPORT_SYMBOL(ttm_bo_mem_compat);
2494
2495 int ttm_bo_validate(struct ttm_buffer_object *bo,
2496 struct ttm_placement *placement,
2497 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
2498 index 299925a1f6c6..eadc981ee79a 100644
2499 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
2500 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c
2501 @@ -49,6 +49,7 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
2502 {
2503 struct ttm_buffer_object *bo = &buf->base;
2504 int ret;
2505 + uint32_t new_flags;
2506
2507 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
2508 if (unlikely(ret != 0))
2509 @@ -60,7 +61,12 @@ int vmw_dmabuf_pin_in_placement(struct vmw_private *dev_priv,
2510 if (unlikely(ret != 0))
2511 goto err;
2512
2513 - ret = ttm_bo_validate(bo, placement, interruptible, false);
2514 + if (buf->pin_count > 0)
2515 + ret = ttm_bo_mem_compat(placement, &bo->mem,
2516 + &new_flags) == true ? 0 : -EINVAL;
2517 + else
2518 + ret = ttm_bo_validate(bo, placement, interruptible, false);
2519 +
2520 if (!ret)
2521 vmw_bo_pin_reserved(buf, true);
2522
2523 @@ -91,6 +97,7 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
2524 {
2525 struct ttm_buffer_object *bo = &buf->base;
2526 int ret;
2527 + uint32_t new_flags;
2528
2529 ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
2530 if (unlikely(ret != 0))
2531 @@ -102,6 +109,12 @@ int vmw_dmabuf_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
2532 if (unlikely(ret != 0))
2533 goto err;
2534
2535 + if (buf->pin_count > 0) {
2536 + ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, &bo->mem,
2537 + &new_flags) == true ? 0 : -EINVAL;
2538 + goto out_unreserve;
2539 + }
2540 +
2541 ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, interruptible,
2542 false);
2543 if (likely(ret == 0) || ret == -ERESTARTSYS)
2544 @@ -161,6 +174,7 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
2545 struct ttm_placement placement;
2546 struct ttm_place place;
2547 int ret = 0;
2548 + uint32_t new_flags;
2549
2550 place = vmw_vram_placement.placement[0];
2551 place.lpfn = bo->num_pages;
2552 @@ -185,10 +199,15 @@ int vmw_dmabuf_pin_in_start_of_vram(struct vmw_private *dev_priv,
2553 */
2554 if (bo->mem.mem_type == TTM_PL_VRAM &&
2555 bo->mem.start < bo->num_pages &&
2556 - bo->mem.start > 0)
2557 + bo->mem.start > 0 &&
2558 + buf->pin_count == 0)
2559 (void) ttm_bo_validate(bo, &vmw_sys_placement, false, false);
2560
2561 - ret = ttm_bo_validate(bo, &placement, interruptible, false);
2562 + if (buf->pin_count > 0)
2563 + ret = ttm_bo_mem_compat(&placement, &bo->mem,
2564 + &new_flags) == true ? 0 : -EINVAL;
2565 + else
2566 + ret = ttm_bo_validate(bo, &placement, interruptible, false);
2567
2568 /* For some reason we didn't end up at the start of vram */
2569 WARN_ON(ret == 0 && bo->offset != 0);
2570 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
2571 index f2cf9231872a..2a505464c50f 100644
2572 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
2573 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
2574 @@ -227,6 +227,7 @@ static int vmw_force_iommu;
2575 static int vmw_restrict_iommu;
2576 static int vmw_force_coherent;
2577 static int vmw_restrict_dma_mask;
2578 +static int vmw_assume_16bpp;
2579
2580 static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
2581 static void vmw_master_init(struct vmw_master *);
2582 @@ -243,6 +244,8 @@ MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
2583 module_param_named(force_coherent, vmw_force_coherent, int, 0600);
2584 MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
2585 module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
2586 +MODULE_PARM_DESC(assume_16bpp, "Assume 16-bpp when filtering modes");
2587 +module_param_named(assume_16bpp, vmw_assume_16bpp, int, 0600);
2588
2589
2590 static void vmw_print_capabilities(uint32_t capabilities)
2591 @@ -653,6 +656,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
2592 dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
2593 dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
2594
2595 + dev_priv->assume_16bpp = !!vmw_assume_16bpp;
2596 +
2597 dev_priv->enable_fb = enable_fbdev;
2598
2599 vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
2600 @@ -699,6 +704,13 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
2601 vmw_read(dev_priv,
2602 SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
2603
2604 + /*
2605 + * Workaround for low memory 2D VMs to compensate for the
2606 + * allocation taken by fbdev
2607 + */
2608 + if (!(dev_priv->capabilities & SVGA_CAP_3D))
2609 + mem_size *= 2;
2610 +
2611 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
2612 dev_priv->prim_bb_mem =
2613 vmw_read(dev_priv,
2614 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
2615 index 6db358a85b46..cab0c54b46ae 100644
2616 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
2617 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
2618 @@ -386,6 +386,7 @@ struct vmw_private {
2619 spinlock_t hw_lock;
2620 spinlock_t cap_lock;
2621 bool has_dx;
2622 + bool assume_16bpp;
2623
2624 /*
2625 * VGA registers.
2626 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
2627 index 679a4cb98ee3..d2d93959b119 100644
2628 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
2629 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
2630 @@ -517,28 +517,6 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info)
2631
2632 par->set_fb = &vfb->base;
2633
2634 - if (!par->bo_ptr) {
2635 - /*
2636 - * Pin before mapping. Since we don't know in what placement
2637 - * to pin, call into KMS to do it for us.
2638 - */
2639 - ret = vfb->pin(vfb);
2640 - if (ret) {
2641 - DRM_ERROR("Could not pin the fbdev framebuffer.\n");
2642 - return ret;
2643 - }
2644 -
2645 - ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
2646 - par->vmw_bo->base.num_pages, &par->map);
2647 - if (ret) {
2648 - vfb->unpin(vfb);
2649 - DRM_ERROR("Could not map the fbdev framebuffer.\n");
2650 - return ret;
2651 - }
2652 -
2653 - par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
2654 - }
2655 -
2656 return 0;
2657 }
2658
2659 @@ -601,6 +579,31 @@ static int vmw_fb_set_par(struct fb_info *info)
2660 if (ret)
2661 goto out_unlock;
2662
2663 + if (!par->bo_ptr) {
2664 + struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(set.fb);
2665 +
2666 + /*
2667 + * Pin before mapping. Since we don't know in what placement
2668 + * to pin, call into KMS to do it for us.
2669 + */
2670 + ret = vfb->pin(vfb);
2671 + if (ret) {
2672 + DRM_ERROR("Could not pin the fbdev framebuffer.\n");
2673 + goto out_unlock;
2674 + }
2675 +
2676 + ret = ttm_bo_kmap(&par->vmw_bo->base, 0,
2677 + par->vmw_bo->base.num_pages, &par->map);
2678 + if (ret) {
2679 + vfb->unpin(vfb);
2680 + DRM_ERROR("Could not map the fbdev framebuffer.\n");
2681 + goto out_unlock;
2682 + }
2683 +
2684 + par->bo_ptr = ttm_kmap_obj_virtual(&par->map, &par->bo_iowrite);
2685 + }
2686 +
2687 +
2688 vmw_fb_dirty_mark(par, par->fb_x, par->fb_y,
2689 par->set_fb->width, par->set_fb->height);
2690
2691 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
2692 index b07543b5cea4..6ccd61d37b78 100644
2693 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
2694 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
2695 @@ -1553,14 +1553,10 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
2696 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
2697 };
2698 int i;
2699 - u32 assumed_bpp = 2;
2700 + u32 assumed_bpp = 4;
2701
2702 - /*
2703 - * If using screen objects, then assume 32-bpp because that's what the
2704 - * SVGA device is assuming
2705 - */
2706 - if (dev_priv->active_display_unit == vmw_du_screen_object)
2707 - assumed_bpp = 4;
2708 + if (dev_priv->assume_16bpp)
2709 + assumed_bpp = 2;
2710
2711 if (dev_priv->active_display_unit == vmw_du_screen_target) {
2712 max_width = min(max_width, dev_priv->stdu_max_width);
2713 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
2714 index 9ca818fb034c..41932a7c4f79 100644
2715 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
2716 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
2717 @@ -399,8 +399,10 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
2718
2719 WARN_ON_ONCE(!stdu->defined);
2720
2721 - if (!vfb->dmabuf && new_fb->width == mode->hdisplay &&
2722 - new_fb->height == mode->vdisplay)
2723 + new_vfbs = (vfb->dmabuf) ? NULL : vmw_framebuffer_to_vfbs(new_fb);
2724 +
2725 + if (new_vfbs && new_vfbs->surface->base_size.width == mode->hdisplay &&
2726 + new_vfbs->surface->base_size.height == mode->vdisplay)
2727 new_content_type = SAME_AS_DISPLAY;
2728 else if (vfb->dmabuf)
2729 new_content_type = SEPARATE_DMA;
2730 @@ -444,7 +446,6 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
2731 content_srf.mip_levels[0] = 1;
2732 content_srf.multisample_count = 0;
2733 } else {
2734 - new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
2735 content_srf = *new_vfbs->surface;
2736 }
2737
2738 @@ -464,7 +465,6 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv,
2739 return ret;
2740 }
2741 } else if (new_content_type == SAME_AS_DISPLAY) {
2742 - new_vfbs = vmw_framebuffer_to_vfbs(new_fb);
2743 new_display_srf = vmw_surface_reference(new_vfbs->surface);
2744 }
2745
2746 diff --git a/drivers/hid/hid-elo.c b/drivers/hid/hid-elo.c
2747 index aad8c162a825..0cd4f7216239 100644
2748 --- a/drivers/hid/hid-elo.c
2749 +++ b/drivers/hid/hid-elo.c
2750 @@ -261,7 +261,7 @@ static void elo_remove(struct hid_device *hdev)
2751 struct elo_priv *priv = hid_get_drvdata(hdev);
2752
2753 hid_hw_stop(hdev);
2754 - flush_workqueue(wq);
2755 + cancel_delayed_work_sync(&priv->work);
2756 kfree(priv);
2757 }
2758
2759 diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
2760 index c741f5e50a66..0088979f7281 100644
2761 --- a/drivers/hid/hid-multitouch.c
2762 +++ b/drivers/hid/hid-multitouch.c
2763 @@ -61,6 +61,7 @@ MODULE_LICENSE("GPL");
2764 #define MT_QUIRK_ALWAYS_VALID (1 << 4)
2765 #define MT_QUIRK_VALID_IS_INRANGE (1 << 5)
2766 #define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 6)
2767 +#define MT_QUIRK_CONFIDENCE (1 << 7)
2768 #define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 8)
2769 #define MT_QUIRK_NO_AREA (1 << 9)
2770 #define MT_QUIRK_IGNORE_DUPLICATES (1 << 10)
2771 @@ -78,6 +79,7 @@ struct mt_slot {
2772 __s32 contactid; /* the device ContactID assigned to this slot */
2773 bool touch_state; /* is the touch valid? */
2774 bool inrange_state; /* is the finger in proximity of the sensor? */
2775 + bool confidence_state; /* is the touch made by a finger? */
2776 };
2777
2778 struct mt_class {
2779 @@ -503,10 +505,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
2780 return 1;
2781 case HID_DG_CONFIDENCE:
2782 if (cls->name == MT_CLS_WIN_8 &&
2783 - field->application == HID_DG_TOUCHPAD) {
2784 - cls->quirks &= ~MT_QUIRK_ALWAYS_VALID;
2785 - cls->quirks |= MT_QUIRK_VALID_IS_CONFIDENCE;
2786 - }
2787 + field->application == HID_DG_TOUCHPAD)
2788 + cls->quirks |= MT_QUIRK_CONFIDENCE;
2789 mt_store_field(usage, td, hi);
2790 return 1;
2791 case HID_DG_TIPSWITCH:
2792 @@ -619,6 +619,7 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
2793 return;
2794
2795 if (td->curvalid || (td->mtclass.quirks & MT_QUIRK_ALWAYS_VALID)) {
2796 + int active;
2797 int slotnum = mt_compute_slot(td, input);
2798 struct mt_slot *s = &td->curdata;
2799 struct input_mt *mt = input->mt;
2800 @@ -633,10 +634,14 @@ static void mt_complete_slot(struct mt_device *td, struct input_dev *input)
2801 return;
2802 }
2803
2804 + if (!(td->mtclass.quirks & MT_QUIRK_CONFIDENCE))
2805 + s->confidence_state = 1;
2806 + active = (s->touch_state || s->inrange_state) &&
2807 + s->confidence_state;
2808 +
2809 input_mt_slot(input, slotnum);
2810 - input_mt_report_slot_state(input, MT_TOOL_FINGER,
2811 - s->touch_state || s->inrange_state);
2812 - if (s->touch_state || s->inrange_state) {
2813 + input_mt_report_slot_state(input, MT_TOOL_FINGER, active);
2814 + if (active) {
2815 /* this finger is in proximity of the sensor */
2816 int wide = (s->w > s->h);
2817 /* divided by two to match visual scale of touch */
2818 @@ -701,6 +706,8 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
2819 td->curdata.touch_state = value;
2820 break;
2821 case HID_DG_CONFIDENCE:
2822 + if (quirks & MT_QUIRK_CONFIDENCE)
2823 + td->curdata.confidence_state = value;
2824 if (quirks & MT_QUIRK_VALID_IS_CONFIDENCE)
2825 td->curvalid = value;
2826 break;
2827 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
2828 index 2f1ddca6f2e0..700145b15088 100644
2829 --- a/drivers/hid/usbhid/hiddev.c
2830 +++ b/drivers/hid/usbhid/hiddev.c
2831 @@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
2832 goto inval;
2833 } else if (uref->usage_index >= field->report_count)
2834 goto inval;
2835 -
2836 - else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
2837 - (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
2838 - uref->usage_index + uref_multi->num_values > field->report_count))
2839 - goto inval;
2840 }
2841
2842 + if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
2843 + (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
2844 + uref->usage_index + uref_multi->num_values > field->report_count))
2845 + goto inval;
2846 +
2847 switch (cmd) {
2848 case HIDIOCGUSAGE:
2849 uref->value = field->value[uref->usage_index];
2850 diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
2851 index c43318d3416e..a9356a3dea92 100644
2852 --- a/drivers/hwmon/dell-smm-hwmon.c
2853 +++ b/drivers/hwmon/dell-smm-hwmon.c
2854 @@ -66,11 +66,13 @@
2855
2856 static DEFINE_MUTEX(i8k_mutex);
2857 static char bios_version[4];
2858 +static char bios_machineid[16];
2859 static struct device *i8k_hwmon_dev;
2860 static u32 i8k_hwmon_flags;
2861 static uint i8k_fan_mult = I8K_FAN_MULT;
2862 static uint i8k_pwm_mult;
2863 static uint i8k_fan_max = I8K_FAN_HIGH;
2864 +static bool disallow_fan_type_call;
2865
2866 #define I8K_HWMON_HAVE_TEMP1 (1 << 0)
2867 #define I8K_HWMON_HAVE_TEMP2 (1 << 1)
2868 @@ -94,13 +96,13 @@ module_param(ignore_dmi, bool, 0);
2869 MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match");
2870
2871 #if IS_ENABLED(CONFIG_I8K)
2872 -static bool restricted;
2873 +static bool restricted = true;
2874 module_param(restricted, bool, 0);
2875 -MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set");
2876 +MODULE_PARM_DESC(restricted, "Restrict fan control and serial number to CAP_SYS_ADMIN (default: 1)");
2877
2878 static bool power_status;
2879 module_param(power_status, bool, 0600);
2880 -MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k");
2881 +MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k (default: 0)");
2882 #endif
2883
2884 static uint fan_mult;
2885 @@ -235,14 +237,28 @@ static int i8k_get_fan_speed(int fan)
2886 /*
2887 * Read the fan type.
2888 */
2889 -static int i8k_get_fan_type(int fan)
2890 +static int _i8k_get_fan_type(int fan)
2891 {
2892 struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, };
2893
2894 + if (disallow_fan_type_call)
2895 + return -EINVAL;
2896 +
2897 regs.ebx = fan & 0xff;
2898 return i8k_smm(&regs) ? : regs.eax & 0xff;
2899 }
2900
2901 +static int i8k_get_fan_type(int fan)
2902 +{
2903 + /* I8K_SMM_GET_FAN_TYPE SMM call is expensive, so cache values */
2904 + static int types[2] = { INT_MIN, INT_MIN };
2905 +
2906 + if (types[fan] == INT_MIN)
2907 + types[fan] = _i8k_get_fan_type(fan);
2908 +
2909 + return types[fan];
2910 +}
2911 +
2912 /*
2913 * Read the fan nominal rpm for specific fan speed.
2914 */
2915 @@ -392,9 +408,11 @@ i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg)
2916 break;
2917
2918 case I8K_MACHINE_ID:
2919 - memset(buff, 0, 16);
2920 - strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
2921 - sizeof(buff));
2922 + if (restricted && !capable(CAP_SYS_ADMIN))
2923 + return -EPERM;
2924 +
2925 + memset(buff, 0, sizeof(buff));
2926 + strlcpy(buff, bios_machineid, sizeof(buff));
2927 break;
2928
2929 case I8K_FN_STATUS:
2930 @@ -511,7 +529,7 @@ static int i8k_proc_show(struct seq_file *seq, void *offset)
2931 seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n",
2932 I8K_PROC_FMT,
2933 bios_version,
2934 - i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
2935 + (restricted && !capable(CAP_SYS_ADMIN)) ? "-1" : bios_machineid,
2936 cpu_temp,
2937 left_fan, right_fan, left_speed, right_speed,
2938 ac_power, fn_key);
2939 @@ -718,6 +736,9 @@ static struct attribute *i8k_attrs[] = {
2940 static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
2941 int index)
2942 {
2943 + if (disallow_fan_type_call &&
2944 + (index == 9 || index == 12))
2945 + return 0;
2946 if (index >= 0 && index <= 1 &&
2947 !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
2948 return 0;
2949 @@ -767,13 +788,17 @@ static int __init i8k_init_hwmon(void)
2950 if (err >= 0)
2951 i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4;
2952
2953 - /* First fan attributes, if fan type is OK */
2954 - err = i8k_get_fan_type(0);
2955 + /* First fan attributes, if fan status or type is OK */
2956 + err = i8k_get_fan_status(0);
2957 + if (err < 0)
2958 + err = i8k_get_fan_type(0);
2959 if (err >= 0)
2960 i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1;
2961
2962 - /* Second fan attributes, if fan type is OK */
2963 - err = i8k_get_fan_type(1);
2964 + /* Second fan attributes, if fan status or type is OK */
2965 + err = i8k_get_fan_status(1);
2966 + if (err < 0)
2967 + err = i8k_get_fan_type(1);
2968 if (err >= 0)
2969 i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
2970
2971 @@ -929,12 +954,14 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
2972
2973 MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
2974
2975 -static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
2976 +/*
2977 + * On some machines once I8K_SMM_GET_FAN_TYPE is issued then CPU fan speed
2978 + * randomly going up and down due to bug in Dell SMM or BIOS. Here is blacklist
2979 + * of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call.
2980 + * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=100121
2981 + */
2982 +static struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initdata = {
2983 {
2984 - /*
2985 - * CPU fan speed going up and down on Dell Studio XPS 8000
2986 - * for unknown reasons.
2987 - */
2988 .ident = "Dell Studio XPS 8000",
2989 .matches = {
2990 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
2991 @@ -942,16 +969,19 @@ static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
2992 },
2993 },
2994 {
2995 - /*
2996 - * CPU fan speed going up and down on Dell Studio XPS 8100
2997 - * for unknown reasons.
2998 - */
2999 .ident = "Dell Studio XPS 8100",
3000 .matches = {
3001 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
3002 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
3003 },
3004 },
3005 + {
3006 + .ident = "Dell Inspiron 580",
3007 + .matches = {
3008 + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
3009 + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 580 "),
3010 + },
3011 + },
3012 { }
3013 };
3014
3015 @@ -966,8 +996,7 @@ static int __init i8k_probe(void)
3016 /*
3017 * Get DMI information
3018 */
3019 - if (!dmi_check_system(i8k_dmi_table) ||
3020 - dmi_check_system(i8k_blacklist_dmi_table)) {
3021 + if (!dmi_check_system(i8k_dmi_table)) {
3022 if (!ignore_dmi && !force)
3023 return -ENODEV;
3024
3025 @@ -978,8 +1007,13 @@ static int __init i8k_probe(void)
3026 i8k_get_dmi_data(DMI_BIOS_VERSION));
3027 }
3028
3029 + if (dmi_check_system(i8k_blacklist_fan_type_dmi_table))
3030 + disallow_fan_type_call = true;
3031 +
3032 strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
3033 sizeof(bios_version));
3034 + strlcpy(bios_machineid, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
3035 + sizeof(bios_machineid));
3036
3037 /*
3038 * Get SMM Dell signature
3039 diff --git a/drivers/iio/accel/kxsd9.c b/drivers/iio/accel/kxsd9.c
3040 index 923f56598d4b..3a9f106787d2 100644
3041 --- a/drivers/iio/accel/kxsd9.c
3042 +++ b/drivers/iio/accel/kxsd9.c
3043 @@ -81,7 +81,7 @@ static int kxsd9_write_scale(struct iio_dev *indio_dev, int micro)
3044
3045 mutex_lock(&st->buf_lock);
3046 ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
3047 - if (ret)
3048 + if (ret < 0)
3049 goto error_ret;
3050 st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C);
3051 st->tx[1] = (ret & ~KXSD9_FS_MASK) | i;
3052 @@ -163,7 +163,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
3053 break;
3054 case IIO_CHAN_INFO_SCALE:
3055 ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
3056 - if (ret)
3057 + if (ret < 0)
3058 goto error_ret;
3059 *val2 = kxsd9_micro_scales[ret & KXSD9_FS_MASK];
3060 ret = IIO_VAL_INT_PLUS_MICRO;
3061 diff --git a/drivers/iio/adc/ad7266.c b/drivers/iio/adc/ad7266.c
3062 index 21e19b60e2b9..2123f0ac2e2a 100644
3063 --- a/drivers/iio/adc/ad7266.c
3064 +++ b/drivers/iio/adc/ad7266.c
3065 @@ -396,8 +396,8 @@ static int ad7266_probe(struct spi_device *spi)
3066
3067 st = iio_priv(indio_dev);
3068
3069 - st->reg = devm_regulator_get(&spi->dev, "vref");
3070 - if (!IS_ERR_OR_NULL(st->reg)) {
3071 + st->reg = devm_regulator_get_optional(&spi->dev, "vref");
3072 + if (!IS_ERR(st->reg)) {
3073 ret = regulator_enable(st->reg);
3074 if (ret)
3075 return ret;
3076 @@ -408,6 +408,9 @@ static int ad7266_probe(struct spi_device *spi)
3077
3078 st->vref_mv = ret / 1000;
3079 } else {
3080 + /* Any other error indicates that the regulator does exist */
3081 + if (PTR_ERR(st->reg) != -ENODEV)
3082 + return PTR_ERR(st->reg);
3083 /* Use internal reference */
3084 st->vref_mv = 2500;
3085 }
3086 diff --git a/drivers/iio/humidity/hdc100x.c b/drivers/iio/humidity/hdc100x.c
3087 index fa4767613173..a03832a5fc95 100644
3088 --- a/drivers/iio/humidity/hdc100x.c
3089 +++ b/drivers/iio/humidity/hdc100x.c
3090 @@ -55,7 +55,7 @@ static const struct {
3091 },
3092 { /* IIO_HUMIDITYRELATIVE channel */
3093 .shift = 8,
3094 - .mask = 2,
3095 + .mask = 3,
3096 },
3097 };
3098
3099 @@ -164,14 +164,14 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
3100 dev_err(&client->dev, "cannot read high byte measurement");
3101 return ret;
3102 }
3103 - val = ret << 6;
3104 + val = ret << 8;
3105
3106 ret = i2c_smbus_read_byte(client);
3107 if (ret < 0) {
3108 dev_err(&client->dev, "cannot read low byte measurement");
3109 return ret;
3110 }
3111 - val |= ret >> 2;
3112 + val |= ret;
3113
3114 return val;
3115 }
3116 @@ -211,18 +211,18 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev,
3117 return IIO_VAL_INT_PLUS_MICRO;
3118 case IIO_CHAN_INFO_SCALE:
3119 if (chan->type == IIO_TEMP) {
3120 - *val = 165;
3121 - *val2 = 65536 >> 2;
3122 + *val = 165000;
3123 + *val2 = 65536;
3124 return IIO_VAL_FRACTIONAL;
3125 } else {
3126 - *val = 0;
3127 - *val2 = 10000;
3128 - return IIO_VAL_INT_PLUS_MICRO;
3129 + *val = 100;
3130 + *val2 = 65536;
3131 + return IIO_VAL_FRACTIONAL;
3132 }
3133 break;
3134 case IIO_CHAN_INFO_OFFSET:
3135 - *val = -3971;
3136 - *val2 = 879096;
3137 + *val = -15887;
3138 + *val2 = 515151;
3139 return IIO_VAL_INT_PLUS_MICRO;
3140 default:
3141 return -EINVAL;
3142 diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c
3143 index ae2806aafb72..0c52dfe64977 100644
3144 --- a/drivers/iio/industrialio-trigger.c
3145 +++ b/drivers/iio/industrialio-trigger.c
3146 @@ -210,22 +210,35 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
3147
3148 /* Prevent the module from being removed whilst attached to a trigger */
3149 __module_get(pf->indio_dev->info->driver_module);
3150 +
3151 + /* Get irq number */
3152 pf->irq = iio_trigger_get_irq(trig);
3153 + if (pf->irq < 0)
3154 + goto out_put_module;
3155 +
3156 + /* Request irq */
3157 ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
3158 pf->type, pf->name,
3159 pf);
3160 - if (ret < 0) {
3161 - module_put(pf->indio_dev->info->driver_module);
3162 - return ret;
3163 - }
3164 + if (ret < 0)
3165 + goto out_put_irq;
3166
3167 + /* Enable trigger in driver */
3168 if (trig->ops && trig->ops->set_trigger_state && notinuse) {
3169 ret = trig->ops->set_trigger_state(trig, true);
3170 if (ret < 0)
3171 - module_put(pf->indio_dev->info->driver_module);
3172 + goto out_free_irq;
3173 }
3174
3175 return ret;
3176 +
3177 +out_free_irq:
3178 + free_irq(pf->irq, pf);
3179 +out_put_irq:
3180 + iio_trigger_put_irq(trig, pf->irq);
3181 +out_put_module:
3182 + module_put(pf->indio_dev->info->driver_module);
3183 + return ret;
3184 }
3185
3186 static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
3187 diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
3188 index a6af56ad10e1..6443aad809b2 100644
3189 --- a/drivers/iio/light/apds9960.c
3190 +++ b/drivers/iio/light/apds9960.c
3191 @@ -1006,6 +1006,7 @@ static int apds9960_probe(struct i2c_client *client,
3192
3193 iio_device_attach_buffer(indio_dev, buffer);
3194
3195 + indio_dev->dev.parent = &client->dev;
3196 indio_dev->info = &apds9960_info;
3197 indio_dev->name = APDS9960_DRV_NAME;
3198 indio_dev->channels = apds9960_channels;
3199 diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
3200 index 172393ad34af..d3ca3207935d 100644
3201 --- a/drivers/iio/pressure/st_pressure_core.c
3202 +++ b/drivers/iio/pressure/st_pressure_core.c
3203 @@ -28,15 +28,21 @@
3204 #include <linux/iio/common/st_sensors.h>
3205 #include "st_pressure.h"
3206
3207 +#define MCELSIUS_PER_CELSIUS 1000
3208 +
3209 +/* Default pressure sensitivity */
3210 #define ST_PRESS_LSB_PER_MBAR 4096UL
3211 #define ST_PRESS_KPASCAL_NANO_SCALE (100000000UL / \
3212 ST_PRESS_LSB_PER_MBAR)
3213 +
3214 +/* Default temperature sensitivity */
3215 #define ST_PRESS_LSB_PER_CELSIUS 480UL
3216 -#define ST_PRESS_CELSIUS_NANO_SCALE (1000000000UL / \
3217 - ST_PRESS_LSB_PER_CELSIUS)
3218 +#define ST_PRESS_MILLI_CELSIUS_OFFSET 42500UL
3219 +
3220 #define ST_PRESS_NUMBER_DATA_CHANNELS 1
3221
3222 /* FULLSCALE */
3223 +#define ST_PRESS_FS_AVL_1100MB 1100
3224 #define ST_PRESS_FS_AVL_1260MB 1260
3225
3226 #define ST_PRESS_1_OUT_XL_ADDR 0x28
3227 @@ -54,9 +60,6 @@
3228 #define ST_PRESS_LPS331AP_PW_MASK 0x80
3229 #define ST_PRESS_LPS331AP_FS_ADDR 0x23
3230 #define ST_PRESS_LPS331AP_FS_MASK 0x30
3231 -#define ST_PRESS_LPS331AP_FS_AVL_1260_VAL 0x00
3232 -#define ST_PRESS_LPS331AP_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
3233 -#define ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
3234 #define ST_PRESS_LPS331AP_BDU_ADDR 0x20
3235 #define ST_PRESS_LPS331AP_BDU_MASK 0x04
3236 #define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR 0x22
3237 @@ -65,9 +68,14 @@
3238 #define ST_PRESS_LPS331AP_IHL_IRQ_ADDR 0x22
3239 #define ST_PRESS_LPS331AP_IHL_IRQ_MASK 0x80
3240 #define ST_PRESS_LPS331AP_MULTIREAD_BIT true
3241 -#define ST_PRESS_LPS331AP_TEMP_OFFSET 42500
3242
3243 /* CUSTOM VALUES FOR LPS001WP SENSOR */
3244 +
3245 +/* LPS001WP pressure resolution */
3246 +#define ST_PRESS_LPS001WP_LSB_PER_MBAR 16UL
3247 +/* LPS001WP temperature resolution */
3248 +#define ST_PRESS_LPS001WP_LSB_PER_CELSIUS 64UL
3249 +
3250 #define ST_PRESS_LPS001WP_WAI_EXP 0xba
3251 #define ST_PRESS_LPS001WP_ODR_ADDR 0x20
3252 #define ST_PRESS_LPS001WP_ODR_MASK 0x30
3253 @@ -76,6 +84,8 @@
3254 #define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL 0x03
3255 #define ST_PRESS_LPS001WP_PW_ADDR 0x20
3256 #define ST_PRESS_LPS001WP_PW_MASK 0x40
3257 +#define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \
3258 + (100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR)
3259 #define ST_PRESS_LPS001WP_BDU_ADDR 0x20
3260 #define ST_PRESS_LPS001WP_BDU_MASK 0x04
3261 #define ST_PRESS_LPS001WP_MULTIREAD_BIT true
3262 @@ -92,11 +102,6 @@
3263 #define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL 0x04
3264 #define ST_PRESS_LPS25H_PW_ADDR 0x20
3265 #define ST_PRESS_LPS25H_PW_MASK 0x80
3266 -#define ST_PRESS_LPS25H_FS_ADDR 0x00
3267 -#define ST_PRESS_LPS25H_FS_MASK 0x00
3268 -#define ST_PRESS_LPS25H_FS_AVL_1260_VAL 0x00
3269 -#define ST_PRESS_LPS25H_FS_AVL_1260_GAIN ST_PRESS_KPASCAL_NANO_SCALE
3270 -#define ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN ST_PRESS_CELSIUS_NANO_SCALE
3271 #define ST_PRESS_LPS25H_BDU_ADDR 0x20
3272 #define ST_PRESS_LPS25H_BDU_MASK 0x04
3273 #define ST_PRESS_LPS25H_DRDY_IRQ_ADDR 0x23
3274 @@ -105,7 +110,6 @@
3275 #define ST_PRESS_LPS25H_IHL_IRQ_ADDR 0x22
3276 #define ST_PRESS_LPS25H_IHL_IRQ_MASK 0x80
3277 #define ST_PRESS_LPS25H_MULTIREAD_BIT true
3278 -#define ST_PRESS_LPS25H_TEMP_OFFSET 42500
3279 #define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28
3280 #define ST_TEMP_LPS25H_OUT_L_ADDR 0x2b
3281
3282 @@ -157,7 +161,9 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
3283 .storagebits = 16,
3284 .endianness = IIO_LE,
3285 },
3286 - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
3287 + .info_mask_separate =
3288 + BIT(IIO_CHAN_INFO_RAW) |
3289 + BIT(IIO_CHAN_INFO_SCALE),
3290 .modified = 0,
3291 },
3292 {
3293 @@ -173,7 +179,7 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
3294 },
3295 .info_mask_separate =
3296 BIT(IIO_CHAN_INFO_RAW) |
3297 - BIT(IIO_CHAN_INFO_OFFSET),
3298 + BIT(IIO_CHAN_INFO_SCALE),
3299 .modified = 0,
3300 },
3301 IIO_CHAN_SOFT_TIMESTAMP(1)
3302 @@ -208,11 +214,14 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
3303 .addr = ST_PRESS_LPS331AP_FS_ADDR,
3304 .mask = ST_PRESS_LPS331AP_FS_MASK,
3305 .fs_avl = {
3306 + /*
3307 + * Pressure and temperature sensitivity values
3308 + * as defined in table 3 of LPS331AP datasheet.
3309 + */
3310 [0] = {
3311 .num = ST_PRESS_FS_AVL_1260MB,
3312 - .value = ST_PRESS_LPS331AP_FS_AVL_1260_VAL,
3313 - .gain = ST_PRESS_LPS331AP_FS_AVL_1260_GAIN,
3314 - .gain2 = ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN,
3315 + .gain = ST_PRESS_KPASCAL_NANO_SCALE,
3316 + .gain2 = ST_PRESS_LSB_PER_CELSIUS,
3317 },
3318 },
3319 },
3320 @@ -254,7 +263,17 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
3321 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
3322 },
3323 .fs = {
3324 - .addr = 0,
3325 + .fs_avl = {
3326 + /*
3327 + * Pressure and temperature resolution values
3328 + * as defined in table 3 of LPS001WP datasheet.
3329 + */
3330 + [0] = {
3331 + .num = ST_PRESS_FS_AVL_1100MB,
3332 + .gain = ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN,
3333 + .gain2 = ST_PRESS_LPS001WP_LSB_PER_CELSIUS,
3334 + },
3335 + },
3336 },
3337 .bdu = {
3338 .addr = ST_PRESS_LPS001WP_BDU_ADDR,
3339 @@ -291,14 +310,15 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
3340 .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
3341 },
3342 .fs = {
3343 - .addr = ST_PRESS_LPS25H_FS_ADDR,
3344 - .mask = ST_PRESS_LPS25H_FS_MASK,
3345 .fs_avl = {
3346 + /*
3347 + * Pressure and temperature sensitivity values
3348 + * as defined in table 3 of LPS25H datasheet.
3349 + */
3350 [0] = {
3351 .num = ST_PRESS_FS_AVL_1260MB,
3352 - .value = ST_PRESS_LPS25H_FS_AVL_1260_VAL,
3353 - .gain = ST_PRESS_LPS25H_FS_AVL_1260_GAIN,
3354 - .gain2 = ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN,
3355 + .gain = ST_PRESS_KPASCAL_NANO_SCALE,
3356 + .gain2 = ST_PRESS_LSB_PER_CELSIUS,
3357 },
3358 },
3359 },
3360 @@ -354,26 +374,26 @@ static int st_press_read_raw(struct iio_dev *indio_dev,
3361
3362 return IIO_VAL_INT;
3363 case IIO_CHAN_INFO_SCALE:
3364 - *val = 0;
3365 -
3366 switch (ch->type) {
3367 case IIO_PRESSURE:
3368 + *val = 0;
3369 *val2 = press_data->current_fullscale->gain;
3370 - break;
3371 + return IIO_VAL_INT_PLUS_NANO;
3372 case IIO_TEMP:
3373 + *val = MCELSIUS_PER_CELSIUS;
3374 *val2 = press_data->current_fullscale->gain2;
3375 - break;
3376 + return IIO_VAL_FRACTIONAL;
3377 default:
3378 err = -EINVAL;
3379 goto read_error;
3380 }
3381
3382 - return IIO_VAL_INT_PLUS_NANO;
3383 case IIO_CHAN_INFO_OFFSET:
3384 switch (ch->type) {
3385 case IIO_TEMP:
3386 - *val = 425;
3387 - *val2 = 10;
3388 + *val = ST_PRESS_MILLI_CELSIUS_OFFSET *
3389 + press_data->current_fullscale->gain2;
3390 + *val2 = MCELSIUS_PER_CELSIUS;
3391 break;
3392 default:
3393 err = -EINVAL;
3394 diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
3395 index f4d29d5dbd5f..e2f926cdcad2 100644
3396 --- a/drivers/iio/proximity/as3935.c
3397 +++ b/drivers/iio/proximity/as3935.c
3398 @@ -64,6 +64,7 @@ struct as3935_state {
3399 struct delayed_work work;
3400
3401 u32 tune_cap;
3402 + u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */
3403 u8 buf[2] ____cacheline_aligned;
3404 };
3405
3406 @@ -72,7 +73,8 @@ static const struct iio_chan_spec as3935_channels[] = {
3407 .type = IIO_PROXIMITY,
3408 .info_mask_separate =
3409 BIT(IIO_CHAN_INFO_RAW) |
3410 - BIT(IIO_CHAN_INFO_PROCESSED),
3411 + BIT(IIO_CHAN_INFO_PROCESSED) |
3412 + BIT(IIO_CHAN_INFO_SCALE),
3413 .scan_index = 0,
3414 .scan_type = {
3415 .sign = 'u',
3416 @@ -181,7 +183,12 @@ static int as3935_read_raw(struct iio_dev *indio_dev,
3417 /* storm out of range */
3418 if (*val == AS3935_DATA_MASK)
3419 return -EINVAL;
3420 - *val *= 1000;
3421 +
3422 + if (m == IIO_CHAN_INFO_PROCESSED)
3423 + *val *= 1000;
3424 + break;
3425 + case IIO_CHAN_INFO_SCALE:
3426 + *val = 1000;
3427 break;
3428 default:
3429 return -EINVAL;
3430 @@ -206,10 +213,10 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
3431 ret = as3935_read(st, AS3935_DATA, &val);
3432 if (ret)
3433 goto err_read;
3434 - val &= AS3935_DATA_MASK;
3435 - val *= 1000;
3436
3437 - iio_push_to_buffers_with_timestamp(indio_dev, &val, pf->timestamp);
3438 + st->buffer[0] = val & AS3935_DATA_MASK;
3439 + iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
3440 + pf->timestamp);
3441 err_read:
3442 iio_trigger_notify_done(indio_dev->trig);
3443
3444 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
3445 index 1d92e091e22e..c99525512b34 100644
3446 --- a/drivers/infiniband/core/cm.c
3447 +++ b/drivers/infiniband/core/cm.c
3448 @@ -3452,14 +3452,14 @@ static int cm_establish(struct ib_cm_id *cm_id)
3449 work->cm_event.event = IB_CM_USER_ESTABLISHED;
3450
3451 /* Check if the device started its remove_one */
3452 - spin_lock_irq(&cm.lock);
3453 + spin_lock_irqsave(&cm.lock, flags);
3454 if (!cm_dev->going_down) {
3455 queue_delayed_work(cm.wq, &work->work, 0);
3456 } else {
3457 kfree(work);
3458 ret = -ENODEV;
3459 }
3460 - spin_unlock_irq(&cm.lock);
3461 + spin_unlock_irqrestore(&cm.lock, flags);
3462
3463 out:
3464 return ret;
3465 diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
3466 index 105246fba2e7..5fc623362731 100644
3467 --- a/drivers/infiniband/hw/mlx4/ah.c
3468 +++ b/drivers/infiniband/hw/mlx4/ah.c
3469 @@ -47,6 +47,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
3470
3471 ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
3472 ah->av.ib.g_slid = ah_attr->src_path_bits;
3473 + ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
3474 if (ah_attr->ah_flags & IB_AH_GRH) {
3475 ah->av.ib.g_slid |= 0x80;
3476 ah->av.ib.gid_index = ah_attr->grh.sgid_index;
3477 @@ -64,7 +65,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
3478 !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
3479 --ah->av.ib.stat_rate;
3480 }
3481 - ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
3482
3483 return &ah->ibah;
3484 }
3485 diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
3486 index a9e3bcc522c4..a0ecf08b2b86 100644
3487 --- a/drivers/infiniband/sw/rdmavt/qp.c
3488 +++ b/drivers/infiniband/sw/rdmavt/qp.c
3489 @@ -683,8 +683,10 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
3490 * initialization that is needed.
3491 */
3492 priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
3493 - if (!priv)
3494 + if (IS_ERR(priv)) {
3495 + ret = priv;
3496 goto bail_qp;
3497 + }
3498 qp->priv = priv;
3499 qp->timeout_jiffies =
3500 usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
3501 diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
3502 index bf4959f4225b..94f1bf772ec9 100644
3503 --- a/drivers/iommu/amd_iommu_init.c
3504 +++ b/drivers/iommu/amd_iommu_init.c
3505 @@ -1363,13 +1363,23 @@ static int __init amd_iommu_init_pci(void)
3506 break;
3507 }
3508
3509 + /*
3510 + * Order is important here to make sure any unity map requirements are
3511 + * fulfilled. The unity mappings are created and written to the device
3512 + * table during the amd_iommu_init_api() call.
3513 + *
3514 + * After that we call init_device_table_dma() to make sure any
3515 + * uninitialized DTE will block DMA, and in the end we flush the caches
3516 + * of all IOMMUs to make sure the changes to the device table are
3517 + * active.
3518 + */
3519 + ret = amd_iommu_init_api();
3520 +
3521 init_device_table_dma();
3522
3523 for_each_iommu(iommu)
3524 iommu_flush_all_caches(iommu);
3525
3526 - ret = amd_iommu_init_api();
3527 -
3528 if (!ret)
3529 print_iommu_info();
3530
3531 diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
3532 index 4ff73ff64e49..3e20208d6fdb 100644
3533 --- a/drivers/iommu/arm-smmu-v3.c
3534 +++ b/drivers/iommu/arm-smmu-v3.c
3535 @@ -1942,6 +1942,7 @@ static struct iommu_ops arm_smmu_ops = {
3536 .attach_dev = arm_smmu_attach_dev,
3537 .map = arm_smmu_map,
3538 .unmap = arm_smmu_unmap,
3539 + .map_sg = default_iommu_map_sg,
3540 .iova_to_phys = arm_smmu_iova_to_phys,
3541 .add_device = arm_smmu_add_device,
3542 .remove_device = arm_smmu_remove_device,
3543 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
3544 index e1852e845d21..ae364e07840c 100644
3545 --- a/drivers/iommu/intel-iommu.c
3546 +++ b/drivers/iommu/intel-iommu.c
3547 @@ -3169,11 +3169,6 @@ static int __init init_dmars(void)
3548 }
3549 }
3550
3551 - iommu_flush_write_buffer(iommu);
3552 - iommu_set_root_entry(iommu);
3553 - iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3554 - iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3555 -
3556 if (!ecap_pass_through(iommu->ecap))
3557 hw_pass_through = 0;
3558 #ifdef CONFIG_INTEL_IOMMU_SVM
3559 @@ -3182,6 +3177,18 @@ static int __init init_dmars(void)
3560 #endif
3561 }
3562
3563 + /*
3564 + * Now that qi is enabled on all iommus, set the root entry and flush
3565 + * caches. This is required on some Intel X58 chipsets, otherwise the
3566 + * flush_context function will loop forever and the boot hangs.
3567 + */
3568 + for_each_active_iommu(iommu, drhd) {
3569 + iommu_flush_write_buffer(iommu);
3570 + iommu_set_root_entry(iommu);
3571 + iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3572 + iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3573 + }
3574 +
3575 if (iommu_pass_through)
3576 iommu_identity_mapping |= IDENTMAP_ALL;
3577
3578 diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
3579 index 5710a06c3049..0ea8d9a24de0 100644
3580 --- a/drivers/iommu/rockchip-iommu.c
3581 +++ b/drivers/iommu/rockchip-iommu.c
3582 @@ -815,7 +815,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
3583 dte_addr = virt_to_phys(rk_domain->dt);
3584 for (i = 0; i < iommu->num_mmu; i++) {
3585 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
3586 - rk_iommu_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
3587 + rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
3588 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
3589 }
3590
3591 diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
3592 index 4dffccf532a2..40fb1209d512 100644
3593 --- a/drivers/irqchip/irq-mips-gic.c
3594 +++ b/drivers/irqchip/irq-mips-gic.c
3595 @@ -734,6 +734,12 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
3596 /* verify that it doesn't conflict with an IPI irq */
3597 if (test_bit(spec->hwirq, ipi_resrv))
3598 return -EBUSY;
3599 +
3600 + hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq);
3601 +
3602 + return irq_domain_set_hwirq_and_chip(d, virq, hwirq,
3603 + &gic_level_irq_controller,
3604 + NULL);
3605 } else {
3606 base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
3607 if (base_hwirq == gic_shared_intrs) {
3608 @@ -855,10 +861,14 @@ static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq,
3609 &gic_level_irq_controller,
3610 NULL);
3611 if (ret)
3612 - return ret;
3613 + goto error;
3614 }
3615
3616 return 0;
3617 +
3618 +error:
3619 + irq_domain_free_irqs_parent(d, virq, nr_irqs);
3620 + return ret;
3621 }
3622
3623 void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
3624 diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
3625 index d7723ce772b3..12690c1ea8f8 100644
3626 --- a/drivers/media/usb/uvc/uvc_v4l2.c
3627 +++ b/drivers/media/usb/uvc/uvc_v4l2.c
3628 @@ -1408,47 +1408,44 @@ static int uvc_v4l2_put_xu_query(const struct uvc_xu_control_query *kp,
3629 static long uvc_v4l2_compat_ioctl32(struct file *file,
3630 unsigned int cmd, unsigned long arg)
3631 {
3632 + struct uvc_fh *handle = file->private_data;
3633 union {
3634 struct uvc_xu_control_mapping xmap;
3635 struct uvc_xu_control_query xqry;
3636 } karg;
3637 void __user *up = compat_ptr(arg);
3638 - mm_segment_t old_fs;
3639 long ret;
3640
3641 switch (cmd) {
3642 case UVCIOC_CTRL_MAP32:
3643 - cmd = UVCIOC_CTRL_MAP;
3644 ret = uvc_v4l2_get_xu_mapping(&karg.xmap, up);
3645 + if (ret)
3646 + return ret;
3647 + ret = uvc_ioctl_ctrl_map(handle->chain, &karg.xmap);
3648 + if (ret)
3649 + return ret;
3650 + ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
3651 + if (ret)
3652 + return ret;
3653 +
3654 break;
3655
3656 case UVCIOC_CTRL_QUERY32:
3657 - cmd = UVCIOC_CTRL_QUERY;
3658 ret = uvc_v4l2_get_xu_query(&karg.xqry, up);
3659 + if (ret)
3660 + return ret;
3661 + ret = uvc_xu_ctrl_query(handle->chain, &karg.xqry);
3662 + if (ret)
3663 + return ret;
3664 + ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
3665 + if (ret)
3666 + return ret;
3667 break;
3668
3669 default:
3670 return -ENOIOCTLCMD;
3671 }
3672
3673 - old_fs = get_fs();
3674 - set_fs(KERNEL_DS);
3675 - ret = video_ioctl2(file, cmd, (unsigned long)&karg);
3676 - set_fs(old_fs);
3677 -
3678 - if (ret < 0)
3679 - return ret;
3680 -
3681 - switch (cmd) {
3682 - case UVCIOC_CTRL_MAP:
3683 - ret = uvc_v4l2_put_xu_mapping(&karg.xmap, up);
3684 - break;
3685 -
3686 - case UVCIOC_CTRL_QUERY:
3687 - ret = uvc_v4l2_put_xu_query(&karg.xqry, up);
3688 - break;
3689 - }
3690 -
3691 return ret;
3692 }
3693 #endif
3694 diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
3695 index 21825ddce4a3..859b4a1d11e4 100644
3696 --- a/drivers/memory/omap-gpmc.c
3697 +++ b/drivers/memory/omap-gpmc.c
3698 @@ -394,7 +394,7 @@ static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
3699 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
3700 GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
3701 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
3702 - GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay);
3703 + GPMC_CONFIG4_WEEXTRADELAY, p->we_extra_delay);
3704 gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
3705 GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
3706 p->cycle2cyclesamecsen);
3707 diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
3708 index 96fddb016bf1..4dd0391d2942 100644
3709 --- a/drivers/mtd/ubi/eba.c
3710 +++ b/drivers/mtd/ubi/eba.c
3711 @@ -575,6 +575,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
3712 int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
3713 struct ubi_volume *vol = ubi->volumes[idx];
3714 struct ubi_vid_hdr *vid_hdr;
3715 + uint32_t crc;
3716
3717 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
3718 if (!vid_hdr)
3719 @@ -599,14 +600,8 @@ retry:
3720 goto out_put;
3721 }
3722
3723 - vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
3724 - err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
3725 - if (err) {
3726 - up_read(&ubi->fm_eba_sem);
3727 - goto write_error;
3728 - }
3729 + ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
3730
3731 - data_size = offset + len;
3732 mutex_lock(&ubi->buf_mutex);
3733 memset(ubi->peb_buf + offset, 0xFF, len);
3734
3735 @@ -621,6 +616,19 @@ retry:
3736
3737 memcpy(ubi->peb_buf + offset, buf, len);
3738
3739 + data_size = offset + len;
3740 + crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
3741 + vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
3742 + vid_hdr->copy_flag = 1;
3743 + vid_hdr->data_size = cpu_to_be32(data_size);
3744 + vid_hdr->data_crc = cpu_to_be32(crc);
3745 + err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
3746 + if (err) {
3747 + mutex_unlock(&ubi->buf_mutex);
3748 + up_read(&ubi->fm_eba_sem);
3749 + goto write_error;
3750 + }
3751 +
3752 err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
3753 if (err) {
3754 mutex_unlock(&ubi->buf_mutex);
3755 diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
3756 index 9fcb4898fb68..c70e51567eed 100644
3757 --- a/drivers/net/geneve.c
3758 +++ b/drivers/net/geneve.c
3759 @@ -1092,12 +1092,17 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
3760
3761 static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
3762 {
3763 + struct geneve_dev *geneve = netdev_priv(dev);
3764 /* The max_mtu calculation does not take account of GENEVE
3765 * options, to avoid excluding potentially valid
3766 * configurations.
3767 */
3768 - int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr)
3769 - - dev->hard_header_len;
3770 + int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - dev->hard_header_len;
3771 +
3772 + if (geneve->remote.sa.sa_family == AF_INET6)
3773 + max_mtu -= sizeof(struct ipv6hdr);
3774 + else
3775 + max_mtu -= sizeof(struct iphdr);
3776
3777 if (new_mtu < 68)
3778 return -EINVAL;
3779 diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
3780 index 9e803bbcc0b6..8f3c55d03d5d 100644
3781 --- a/drivers/net/macsec.c
3782 +++ b/drivers/net/macsec.c
3783 @@ -2564,6 +2564,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
3784 u64_stats_update_begin(&secy_stats->syncp);
3785 secy_stats->stats.OutPktsUntagged++;
3786 u64_stats_update_end(&secy_stats->syncp);
3787 + skb->dev = macsec->real_dev;
3788 len = skb->len;
3789 ret = dev_queue_xmit(skb);
3790 count_tx(dev, ret, len);
3791 diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
3792 index 2fb31edab125..d4425c565839 100644
3793 --- a/drivers/net/usb/cdc_ncm.c
3794 +++ b/drivers/net/usb/cdc_ncm.c
3795 @@ -852,6 +852,13 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_
3796 if (cdc_ncm_init(dev))
3797 goto error2;
3798
3799 + /* Some firmwares need a pause here or they will silently fail
3800 + * to set up the interface properly. This value was decided
3801 + * empirically on a Sierra Wireless MC7455 running 02.08.02.00
3802 + * firmware.
3803 + */
3804 + usleep_range(10000, 20000);
3805 +
3806 /* configure data interface */
3807 temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
3808 if (temp) {
3809 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
3810 index e85e0737771c..06664baa43d9 100644
3811 --- a/drivers/net/wireless/mac80211_hwsim.c
3812 +++ b/drivers/net/wireless/mac80211_hwsim.c
3813 @@ -2771,6 +2771,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
3814 if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] ||
3815 !info->attrs[HWSIM_ATTR_FLAGS] ||
3816 !info->attrs[HWSIM_ATTR_COOKIE] ||
3817 + !info->attrs[HWSIM_ATTR_SIGNAL] ||
3818 !info->attrs[HWSIM_ATTR_TX_INFO])
3819 goto out;
3820
3821 diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
3822 index 0f48048b8654..3a0faa8fe9d4 100644
3823 --- a/drivers/net/wireless/realtek/rtlwifi/core.c
3824 +++ b/drivers/net/wireless/realtek/rtlwifi/core.c
3825 @@ -54,7 +54,7 @@ EXPORT_SYMBOL(channel5g_80m);
3826 void rtl_addr_delay(u32 addr)
3827 {
3828 if (addr == 0xfe)
3829 - msleep(50);
3830 + mdelay(50);
3831 else if (addr == 0xfd)
3832 msleep(5);
3833 else if (addr == 0xfc)
3834 @@ -75,7 +75,7 @@ void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
3835 rtl_addr_delay(addr);
3836 } else {
3837 rtl_set_rfreg(hw, rfpath, addr, mask, data);
3838 - usleep_range(1, 2);
3839 + udelay(1);
3840 }
3841 }
3842 EXPORT_SYMBOL(rtl_rfreg_delay);
3843 @@ -86,7 +86,7 @@ void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data)
3844 rtl_addr_delay(addr);
3845 } else {
3846 rtl_set_bbreg(hw, addr, MASKDWORD, data);
3847 - usleep_range(1, 2);
3848 + udelay(1);
3849 }
3850 }
3851 EXPORT_SYMBOL(rtl_bb_delay);
3852 diff --git a/drivers/of/irq.c b/drivers/of/irq.c
3853 index e7bfc175b8e1..6ec743faabe8 100644
3854 --- a/drivers/of/irq.c
3855 +++ b/drivers/of/irq.c
3856 @@ -386,13 +386,13 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
3857 EXPORT_SYMBOL_GPL(of_irq_to_resource);
3858
3859 /**
3860 - * of_irq_get - Decode a node's IRQ and return it as a Linux irq number
3861 + * of_irq_get - Decode a node's IRQ and return it as a Linux IRQ number
3862 * @dev: pointer to device tree node
3863 - * @index: zero-based index of the irq
3864 - *
3865 - * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
3866 - * is not yet created.
3867 + * @index: zero-based index of the IRQ
3868 *
3869 + * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
3870 + * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
3871 + * of any other failure.
3872 */
3873 int of_irq_get(struct device_node *dev, int index)
3874 {
3875 @@ -413,12 +413,13 @@ int of_irq_get(struct device_node *dev, int index)
3876 EXPORT_SYMBOL_GPL(of_irq_get);
3877
3878 /**
3879 - * of_irq_get_byname - Decode a node's IRQ and return it as a Linux irq number
3880 + * of_irq_get_byname - Decode a node's IRQ and return it as a Linux IRQ number
3881 * @dev: pointer to device tree node
3882 - * @name: irq name
3883 + * @name: IRQ name
3884 *
3885 - * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
3886 - * is not yet created, or error code in case of any other failure.
3887 + * Returns Linux IRQ number on success, or 0 on the IRQ mapping failure, or
3888 + * -EPROBE_DEFER if the IRQ domain is not yet created, or error code in case
3889 + * of any other failure.
3890 */
3891 int of_irq_get_byname(struct device_node *dev, const char *name)
3892 {
3893 diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c
3894 index dfbab61a1b47..1fa3a3219c45 100644
3895 --- a/drivers/pci/vc.c
3896 +++ b/drivers/pci/vc.c
3897 @@ -221,9 +221,9 @@ static int pci_vc_do_save_buffer(struct pci_dev *dev, int pos,
3898 else
3899 pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL,
3900 *(u16 *)buf);
3901 - buf += 2;
3902 + buf += 4;
3903 }
3904 - len += 2;
3905 + len += 4;
3906
3907 /*
3908 * If we have any Low Priority VCs and a VC Arbitration Table Offset
3909 diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
3910 index 56a17ec5b5ef..6c7fe4778793 100644
3911 --- a/drivers/regulator/qcom_smd-regulator.c
3912 +++ b/drivers/regulator/qcom_smd-regulator.c
3913 @@ -140,6 +140,18 @@ static const struct regulator_ops rpm_smps_ldo_ops = {
3914 .enable = rpm_reg_enable,
3915 .disable = rpm_reg_disable,
3916 .is_enabled = rpm_reg_is_enabled,
3917 + .list_voltage = regulator_list_voltage_linear_range,
3918 +
3919 + .get_voltage = rpm_reg_get_voltage,
3920 + .set_voltage = rpm_reg_set_voltage,
3921 +
3922 + .set_load = rpm_reg_set_load,
3923 +};
3924 +
3925 +static const struct regulator_ops rpm_smps_ldo_ops_fixed = {
3926 + .enable = rpm_reg_enable,
3927 + .disable = rpm_reg_disable,
3928 + .is_enabled = rpm_reg_is_enabled,
3929
3930 .get_voltage = rpm_reg_get_voltage,
3931 .set_voltage = rpm_reg_set_voltage,
3932 @@ -247,7 +259,7 @@ static const struct regulator_desc pm8941_nldo = {
3933 static const struct regulator_desc pm8941_lnldo = {
3934 .fixed_uV = 1740000,
3935 .n_voltages = 1,
3936 - .ops = &rpm_smps_ldo_ops,
3937 + .ops = &rpm_smps_ldo_ops_fixed,
3938 };
3939
3940 static const struct regulator_desc pm8941_switch = {
3941 diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
3942 index d4c285688ce9..3ddc85e6efd6 100644
3943 --- a/drivers/scsi/53c700.c
3944 +++ b/drivers/scsi/53c700.c
3945 @@ -1122,7 +1122,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
3946 } else {
3947 struct scsi_cmnd *SCp;
3948
3949 - SCp = scsi_host_find_tag(SDp->host, SCSI_NO_TAG);
3950 + SCp = SDp->current_cmnd;
3951 if(unlikely(SCp == NULL)) {
3952 sdev_printk(KERN_ERR, SDp,
3953 "no saved request for untagged cmd\n");
3954 @@ -1826,7 +1826,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
3955 slot->tag, slot);
3956 } else {
3957 slot->tag = SCSI_NO_TAG;
3958 - /* must populate current_cmnd for scsi_host_find_tag to work */
3959 + /* save current command for reselection */
3960 SCp->device->current_cmnd = SCp;
3961 }
3962 /* sanity check: some of the commands generated by the mid-layer
3963 diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
3964 index 984ddcb4786d..1b9c049bd5c5 100644
3965 --- a/drivers/scsi/scsi_error.c
3966 +++ b/drivers/scsi/scsi_error.c
3967 @@ -1127,7 +1127,6 @@ static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
3968 */
3969 void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
3970 {
3971 - scmd->device->host->host_failed--;
3972 scmd->eh_eflags = 0;
3973 list_move_tail(&scmd->eh_entry, done_q);
3974 }
3975 @@ -2226,6 +2225,9 @@ int scsi_error_handler(void *data)
3976 else
3977 scsi_unjam_host(shost);
3978
3979 + /* All scmds have been handled */
3980 + shost->host_failed = 0;
3981 +
3982 /*
3983 * Note - if the above fails completely, the action is to take
3984 * individual devices offline and flush the queue of any
3985 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
3986 index f52b74cf8d1e..41c3a2c4f112 100644
3987 --- a/drivers/scsi/sd.c
3988 +++ b/drivers/scsi/sd.c
3989 @@ -2862,10 +2862,10 @@ static int sd_revalidate_disk(struct gendisk *disk)
3990 if (sdkp->opt_xfer_blocks &&
3991 sdkp->opt_xfer_blocks <= dev_max &&
3992 sdkp->opt_xfer_blocks <= SD_DEF_XFER_BLOCKS &&
3993 - sdkp->opt_xfer_blocks * sdp->sector_size >= PAGE_SIZE)
3994 - rw_max = q->limits.io_opt =
3995 - sdkp->opt_xfer_blocks * sdp->sector_size;
3996 - else
3997 + logical_to_bytes(sdp, sdkp->opt_xfer_blocks) >= PAGE_SIZE) {
3998 + q->limits.io_opt = logical_to_bytes(sdp, sdkp->opt_xfer_blocks);
3999 + rw_max = logical_to_sectors(sdp, sdkp->opt_xfer_blocks);
4000 + } else
4001 rw_max = BLK_DEF_MAX_SECTORS;
4002
4003 /* Combine with controller limits */
4004 diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
4005 index 654630bb7d0e..765a6f1ac1b7 100644
4006 --- a/drivers/scsi/sd.h
4007 +++ b/drivers/scsi/sd.h
4008 @@ -151,6 +151,11 @@ static inline sector_t logical_to_sectors(struct scsi_device *sdev, sector_t blo
4009 return blocks << (ilog2(sdev->sector_size) - 9);
4010 }
4011
4012 +static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks)
4013 +{
4014 + return blocks * sdev->sector_size;
4015 +}
4016 +
4017 /*
4018 * A DIF-capable target device can be formatted with different
4019 * protection schemes. Currently 0 through 3 are defined:
4020 diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
4021 index a8f533af9eca..ec12181822e6 100644
4022 --- a/drivers/staging/iio/accel/sca3000_core.c
4023 +++ b/drivers/staging/iio/accel/sca3000_core.c
4024 @@ -594,7 +594,7 @@ static ssize_t sca3000_read_frequency(struct device *dev,
4025 goto error_ret_mut;
4026 ret = sca3000_read_ctrl_reg(st, SCA3000_REG_CTRL_SEL_OUT_CTRL);
4027 mutex_unlock(&st->lock);
4028 - if (ret)
4029 + if (ret < 0)
4030 goto error_ret;
4031 val = ret;
4032 if (base_freq > 0)
4033 diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
4034 index 6ceac4f2d4b2..5b4b47ed948b 100644
4035 --- a/drivers/thermal/cpu_cooling.c
4036 +++ b/drivers/thermal/cpu_cooling.c
4037 @@ -857,14 +857,6 @@ __cpufreq_cooling_register(struct device_node *np,
4038 goto free_power_table;
4039 }
4040
4041 - snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
4042 - cpufreq_dev->id);
4043 -
4044 - cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
4045 - &cpufreq_cooling_ops);
4046 - if (IS_ERR(cool_dev))
4047 - goto remove_idr;
4048 -
4049 /* Fill freq-table in descending order of frequencies */
4050 for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
4051 freq = find_next_max(table, freq);
4052 @@ -877,6 +869,14 @@ __cpufreq_cooling_register(struct device_node *np,
4053 pr_debug("%s: freq:%u KHz\n", __func__, freq);
4054 }
4055
4056 + snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
4057 + cpufreq_dev->id);
4058 +
4059 + cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
4060 + &cpufreq_cooling_ops);
4061 + if (IS_ERR(cool_dev))
4062 + goto remove_idr;
4063 +
4064 cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0];
4065 cpufreq_dev->cool_dev = cool_dev;
4066
4067 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
4068 index f973bfce5d08..1e93a37e27f0 100644
4069 --- a/drivers/tty/vt/keyboard.c
4070 +++ b/drivers/tty/vt/keyboard.c
4071 @@ -366,34 +366,22 @@ static void to_utf8(struct vc_data *vc, uint c)
4072
4073 static void do_compute_shiftstate(void)
4074 {
4075 - unsigned int i, j, k, sym, val;
4076 + unsigned int k, sym, val;
4077
4078 shift_state = 0;
4079 memset(shift_down, 0, sizeof(shift_down));
4080
4081 - for (i = 0; i < ARRAY_SIZE(key_down); i++) {
4082 -
4083 - if (!key_down[i])
4084 + for_each_set_bit(k, key_down, min(NR_KEYS, KEY_CNT)) {
4085 + sym = U(key_maps[0][k]);
4086 + if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
4087 continue;
4088
4089 - k = i * BITS_PER_LONG;
4090 -
4091 - for (j = 0; j < BITS_PER_LONG; j++, k++) {
4092 -
4093 - if (!test_bit(k, key_down))
4094 - continue;
4095 + val = KVAL(sym);
4096 + if (val == KVAL(K_CAPSSHIFT))
4097 + val = KVAL(K_SHIFT);
4098
4099 - sym = U(key_maps[0][k]);
4100 - if (KTYP(sym) != KT_SHIFT && KTYP(sym) != KT_SLOCK)
4101 - continue;
4102 -
4103 - val = KVAL(sym);
4104 - if (val == KVAL(K_CAPSSHIFT))
4105 - val = KVAL(K_SHIFT);
4106 -
4107 - shift_down[val]++;
4108 - shift_state |= (1 << val);
4109 - }
4110 + shift_down[val]++;
4111 + shift_state |= BIT(val);
4112 }
4113 }
4114
4115 diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
4116 index bd523adb9643..e9e29ded3f30 100644
4117 --- a/drivers/tty/vt/vt.c
4118 +++ b/drivers/tty/vt/vt.c
4119 @@ -750,6 +750,7 @@ static void visual_init(struct vc_data *vc, int num, int init)
4120 vc->vc_complement_mask = 0;
4121 vc->vc_can_do_color = 0;
4122 vc->vc_panic_force_write = false;
4123 + vc->vc_cur_blink_ms = DEFAULT_CURSOR_BLINK_MS;
4124 vc->vc_sw->con_init(vc, init);
4125 if (!vc->vc_complement_mask)
4126 vc->vc_complement_mask = vc->vc_can_do_color ? 0x7700 : 0x0800;
4127 diff --git a/drivers/usb/common/usb-otg-fsm.c b/drivers/usb/common/usb-otg-fsm.c
4128 index 504708f59b93..6c6040c22c7a 100644
4129 --- a/drivers/usb/common/usb-otg-fsm.c
4130 +++ b/drivers/usb/common/usb-otg-fsm.c
4131 @@ -21,6 +21,7 @@
4132 * 675 Mass Ave, Cambridge, MA 02139, USA.
4133 */
4134
4135 +#include <linux/module.h>
4136 #include <linux/kernel.h>
4137 #include <linux/types.h>
4138 #include <linux/mutex.h>
4139 @@ -452,3 +453,4 @@ int otg_statemachine(struct otg_fsm *fsm)
4140 return state_changed;
4141 }
4142 EXPORT_SYMBOL_GPL(otg_statemachine);
4143 +MODULE_LICENSE("GPL");
4144 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
4145 index 980fc5774151..2d107d0f61b0 100644
4146 --- a/drivers/usb/core/hcd.c
4147 +++ b/drivers/usb/core/hcd.c
4148 @@ -2597,26 +2597,23 @@ EXPORT_SYMBOL_GPL(usb_create_hcd);
4149 * Don't deallocate the bandwidth_mutex until the last shared usb_hcd is
4150 * deallocated.
4151 *
4152 - * Make sure to only deallocate the bandwidth_mutex when the primary HCD is
4153 - * freed. When hcd_release() is called for either hcd in a peer set
4154 - * invalidate the peer's ->shared_hcd and ->primary_hcd pointers to
4155 - * block new peering attempts
4156 + * Make sure to deallocate the bandwidth_mutex only when the last HCD is
4157 + * freed. When hcd_release() is called for either hcd in a peer set,
4158 + * invalidate the peer's ->shared_hcd and ->primary_hcd pointers.
4159 */
4160 static void hcd_release(struct kref *kref)
4161 {
4162 struct usb_hcd *hcd = container_of (kref, struct usb_hcd, kref);
4163
4164 mutex_lock(&usb_port_peer_mutex);
4165 - if (usb_hcd_is_primary_hcd(hcd)) {
4166 - kfree(hcd->address0_mutex);
4167 - kfree(hcd->bandwidth_mutex);
4168 - }
4169 if (hcd->shared_hcd) {
4170 struct usb_hcd *peer = hcd->shared_hcd;
4171
4172 peer->shared_hcd = NULL;
4173 - if (peer->primary_hcd == hcd)
4174 - peer->primary_hcd = NULL;
4175 + peer->primary_hcd = NULL;
4176 + } else {
4177 + kfree(hcd->address0_mutex);
4178 + kfree(hcd->bandwidth_mutex);
4179 }
4180 mutex_unlock(&usb_port_peer_mutex);
4181 kfree(hcd);
4182 diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
4183 index 3c58d633ce80..dec0b21fc626 100644
4184 --- a/drivers/usb/dwc2/core.h
4185 +++ b/drivers/usb/dwc2/core.h
4186 @@ -64,6 +64,17 @@
4187 DWC2_TRACE_SCHEDULER_VB(pr_fmt("%s: SCH: " fmt), \
4188 dev_name(hsotg->dev), ##__VA_ARGS__)
4189
4190 +#ifdef CONFIG_MIPS
4191 +/*
4192 + * There are some MIPS machines that can run in either big-endian
4193 + * or little-endian mode and that use the dwc2 register without
4194 + * a byteswap in both ways.
4195 + * Unlike other architectures, MIPS apparently does not require a
4196 + * barrier before the __raw_writel() to synchronize with DMA but does
4197 + * require the barrier after the __raw_writel() to serialize a set of
4198 + * writes. This set of operations was added specifically for MIPS and
4199 + * should only be used there.
4200 + */
4201 static inline u32 dwc2_readl(const void __iomem *addr)
4202 {
4203 u32 value = __raw_readl(addr);
4204 @@ -90,6 +101,22 @@ static inline void dwc2_writel(u32 value, void __iomem *addr)
4205 pr_info("INFO:: wrote %08x to %p\n", value, addr);
4206 #endif
4207 }
4208 +#else
4209 +/* Normal architectures just use readl/write */
4210 +static inline u32 dwc2_readl(const void __iomem *addr)
4211 +{
4212 + return readl(addr);
4213 +}
4214 +
4215 +static inline void dwc2_writel(u32 value, void __iomem *addr)
4216 +{
4217 + writel(value, addr);
4218 +
4219 +#ifdef DWC2_LOG_WRITES
4220 + pr_info("info:: wrote %08x to %p\n", value, addr);
4221 +#endif
4222 +}
4223 +#endif
4224
4225 /* Maximum number of Endpoints/HostChannels */
4226 #define MAX_EPS_CHANNELS 16
4227 diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
4228 index 7b6d74f0c72f..476c0e3a7150 100644
4229 --- a/drivers/virtio/virtio_balloon.c
4230 +++ b/drivers/virtio/virtio_balloon.c
4231 @@ -75,7 +75,7 @@ struct virtio_balloon {
4232
4233 /* The array of pfns we tell the Host about. */
4234 unsigned int num_pfns;
4235 - u32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
4236 + __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX];
4237
4238 /* Memory statistics */
4239 struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR];
4240 @@ -127,14 +127,16 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
4241
4242 }
4243
4244 -static void set_page_pfns(u32 pfns[], struct page *page)
4245 +static void set_page_pfns(struct virtio_balloon *vb,
4246 + __virtio32 pfns[], struct page *page)
4247 {
4248 unsigned int i;
4249
4250 /* Set balloon pfns pointing at this page.
4251 * Note that the first pfn points at start of the page. */
4252 for (i = 0; i < VIRTIO_BALLOON_PAGES_PER_PAGE; i++)
4253 - pfns[i] = page_to_balloon_pfn(page) + i;
4254 + pfns[i] = cpu_to_virtio32(vb->vdev,
4255 + page_to_balloon_pfn(page) + i);
4256 }
4257
4258 static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
4259 @@ -158,7 +160,7 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
4260 msleep(200);
4261 break;
4262 }
4263 - set_page_pfns(vb->pfns + vb->num_pfns, page);
4264 + set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
4265 vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
4266 if (!virtio_has_feature(vb->vdev,
4267 VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
4268 @@ -177,10 +179,12 @@ static unsigned fill_balloon(struct virtio_balloon *vb, size_t num)
4269 static void release_pages_balloon(struct virtio_balloon *vb)
4270 {
4271 unsigned int i;
4272 + struct page *page;
4273
4274 /* Find pfns pointing at start of each page, get pages and free them. */
4275 for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
4276 - struct page *page = balloon_pfn_to_page(vb->pfns[i]);
4277 + page = balloon_pfn_to_page(virtio32_to_cpu(vb->vdev,
4278 + vb->pfns[i]));
4279 if (!virtio_has_feature(vb->vdev,
4280 VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
4281 adjust_managed_page_count(page, 1);
4282 @@ -203,7 +207,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
4283 page = balloon_page_dequeue(vb_dev_info);
4284 if (!page)
4285 break;
4286 - set_page_pfns(vb->pfns + vb->num_pfns, page);
4287 + set_page_pfns(vb, vb->pfns + vb->num_pfns, page);
4288 vb->num_pages -= VIRTIO_BALLOON_PAGES_PER_PAGE;
4289 }
4290
4291 @@ -471,13 +475,13 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
4292 __count_vm_event(BALLOON_MIGRATE);
4293 spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags);
4294 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
4295 - set_page_pfns(vb->pfns, newpage);
4296 + set_page_pfns(vb, vb->pfns, newpage);
4297 tell_host(vb, vb->inflate_vq);
4298
4299 /* balloon's page migration 2nd step -- deflate "page" */
4300 balloon_page_delete(page);
4301 vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE;
4302 - set_page_pfns(vb->pfns, page);
4303 + set_page_pfns(vb, vb->pfns, page);
4304 tell_host(vb, vb->deflate_vq);
4305
4306 mutex_unlock(&vb->balloon_lock);
4307 diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
4308 index d46839f51e73..e4db19e88ab1 100644
4309 --- a/drivers/xen/balloon.c
4310 +++ b/drivers/xen/balloon.c
4311 @@ -151,8 +151,6 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
4312 static void balloon_process(struct work_struct *work);
4313 static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
4314
4315 -static void release_memory_resource(struct resource *resource);
4316 -
4317 /* When ballooning out (allocating memory to return to Xen) we don't really
4318 want the kernel to try too hard since that can trigger the oom killer. */
4319 #define GFP_BALLOON \
4320 @@ -248,6 +246,19 @@ static enum bp_state update_schedule(enum bp_state state)
4321 }
4322
4323 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
4324 +static void release_memory_resource(struct resource *resource)
4325 +{
4326 + if (!resource)
4327 + return;
4328 +
4329 + /*
4330 + * No need to reset region to identity mapped since we now
4331 + * know that no I/O can be in this region
4332 + */
4333 + release_resource(resource);
4334 + kfree(resource);
4335 +}
4336 +
4337 static struct resource *additional_memory_resource(phys_addr_t size)
4338 {
4339 struct resource *res;
4340 @@ -286,19 +297,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
4341 return res;
4342 }
4343
4344 -static void release_memory_resource(struct resource *resource)
4345 -{
4346 - if (!resource)
4347 - return;
4348 -
4349 - /*
4350 - * No need to reset region to identity mapped since we now
4351 - * know that no I/O can be in this region
4352 - */
4353 - release_resource(resource);
4354 - kfree(resource);
4355 -}
4356 -
4357 static enum bp_state reserve_additional_memory(void)
4358 {
4359 long credit;
4360 diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
4361 index 076970a54f89..4ce10bcca18b 100644
4362 --- a/drivers/xen/xen-acpi-processor.c
4363 +++ b/drivers/xen/xen-acpi-processor.c
4364 @@ -423,36 +423,7 @@ upload:
4365
4366 return 0;
4367 }
4368 -static int __init check_prereq(void)
4369 -{
4370 - struct cpuinfo_x86 *c = &cpu_data(0);
4371 -
4372 - if (!xen_initial_domain())
4373 - return -ENODEV;
4374 -
4375 - if (!acpi_gbl_FADT.smi_command)
4376 - return -ENODEV;
4377 -
4378 - if (c->x86_vendor == X86_VENDOR_INTEL) {
4379 - if (!cpu_has(c, X86_FEATURE_EST))
4380 - return -ENODEV;
4381
4382 - return 0;
4383 - }
4384 - if (c->x86_vendor == X86_VENDOR_AMD) {
4385 - /* Copied from powernow-k8.h, can't include ../cpufreq/powernow
4386 - * as we get compile warnings for the static functions.
4387 - */
4388 -#define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007
4389 -#define USE_HW_PSTATE 0x00000080
4390 - u32 eax, ebx, ecx, edx;
4391 - cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
4392 - if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE)
4393 - return -ENODEV;
4394 - return 0;
4395 - }
4396 - return -ENODEV;
4397 -}
4398 /* acpi_perf_data is a pointer to percpu data. */
4399 static struct acpi_processor_performance __percpu *acpi_perf_data;
4400
4401 @@ -509,10 +480,10 @@ struct notifier_block xen_acpi_processor_resume_nb = {
4402 static int __init xen_acpi_processor_init(void)
4403 {
4404 unsigned int i;
4405 - int rc = check_prereq();
4406 + int rc;
4407
4408 - if (rc)
4409 - return rc;
4410 + if (!xen_initial_domain())
4411 + return -ENODEV;
4412
4413 nr_acpi_bits = get_max_acpi_id() + 1;
4414 acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL);
4415 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
4416 index ec7928a27aaa..234707cc419c 100644
4417 --- a/fs/btrfs/ctree.c
4418 +++ b/fs/btrfs/ctree.c
4419 @@ -1552,6 +1552,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
4420 trans->transid, root->fs_info->generation);
4421
4422 if (!should_cow_block(trans, root, buf)) {
4423 + trans->dirty = true;
4424 *cow_ret = buf;
4425 return 0;
4426 }
4427 @@ -2773,8 +2774,10 @@ again:
4428 * then we don't want to set the path blocking,
4429 * so we test it here
4430 */
4431 - if (!should_cow_block(trans, root, b))
4432 + if (!should_cow_block(trans, root, b)) {
4433 + trans->dirty = true;
4434 goto cow_done;
4435 + }
4436
4437 /*
4438 * must have write locks on this node and the
4439 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
4440 index 84e060eb0de8..78f1b57d0b46 100644
4441 --- a/fs/btrfs/extent-tree.c
4442 +++ b/fs/btrfs/extent-tree.c
4443 @@ -7929,7 +7929,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4444 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
4445 buf->start + buf->len - 1, GFP_NOFS);
4446 }
4447 - trans->blocks_used++;
4448 + trans->dirty = true;
4449 /* this returns a buffer locked for blocking */
4450 return buf;
4451 }
4452 diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
4453 index 00b8f37cc306..d7c138f42bdf 100644
4454 --- a/fs/btrfs/super.c
4455 +++ b/fs/btrfs/super.c
4456 @@ -239,7 +239,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
4457 trans->aborted = errno;
4458 /* Nothing used. The other threads that have joined this
4459 * transaction may be able to continue. */
4460 - if (!trans->blocks_used && list_empty(&trans->new_bgs)) {
4461 + if (!trans->dirty && list_empty(&trans->new_bgs)) {
4462 const char *errstr;
4463
4464 errstr = btrfs_decode_error(errno);
4465 diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
4466 index 72be51f7ca2f..c0b501a5a353 100644
4467 --- a/fs/btrfs/transaction.h
4468 +++ b/fs/btrfs/transaction.h
4469 @@ -110,7 +110,6 @@ struct btrfs_trans_handle {
4470 u64 chunk_bytes_reserved;
4471 unsigned long use_count;
4472 unsigned long blocks_reserved;
4473 - unsigned long blocks_used;
4474 unsigned long delayed_ref_updates;
4475 struct btrfs_transaction *transaction;
4476 struct btrfs_block_rsv *block_rsv;
4477 @@ -121,6 +120,7 @@ struct btrfs_trans_handle {
4478 bool can_flush_pending_bgs;
4479 bool reloc_reserved;
4480 bool sync;
4481 + bool dirty;
4482 unsigned int type;
4483 /*
4484 * this root is only needed to validate that the root passed to
4485 diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
4486 index 5a53ac6b1e02..02b071bf3732 100644
4487 --- a/fs/cifs/cifs_unicode.c
4488 +++ b/fs/cifs/cifs_unicode.c
4489 @@ -101,6 +101,12 @@ convert_sfm_char(const __u16 src_char, char *target)
4490 case SFM_SLASH:
4491 *target = '\\';
4492 break;
4493 + case SFM_SPACE:
4494 + *target = ' ';
4495 + break;
4496 + case SFM_PERIOD:
4497 + *target = '.';
4498 + break;
4499 default:
4500 return false;
4501 }
4502 @@ -404,7 +410,7 @@ static __le16 convert_to_sfu_char(char src_char)
4503 return dest_char;
4504 }
4505
4506 -static __le16 convert_to_sfm_char(char src_char)
4507 +static __le16 convert_to_sfm_char(char src_char, bool end_of_string)
4508 {
4509 __le16 dest_char;
4510
4511 @@ -427,6 +433,18 @@ static __le16 convert_to_sfm_char(char src_char)
4512 case '|':
4513 dest_char = cpu_to_le16(SFM_PIPE);
4514 break;
4515 + case '.':
4516 + if (end_of_string)
4517 + dest_char = cpu_to_le16(SFM_PERIOD);
4518 + else
4519 + dest_char = 0;
4520 + break;
4521 + case ' ':
4522 + if (end_of_string)
4523 + dest_char = cpu_to_le16(SFM_SPACE);
4524 + else
4525 + dest_char = 0;
4526 + break;
4527 default:
4528 dest_char = 0;
4529 }
4530 @@ -469,9 +487,16 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
4531 /* see if we must remap this char */
4532 if (map_chars == SFU_MAP_UNI_RSVD)
4533 dst_char = convert_to_sfu_char(src_char);
4534 - else if (map_chars == SFM_MAP_UNI_RSVD)
4535 - dst_char = convert_to_sfm_char(src_char);
4536 - else
4537 + else if (map_chars == SFM_MAP_UNI_RSVD) {
4538 + bool end_of_string;
4539 +
4540 + if (i == srclen - 1)
4541 + end_of_string = true;
4542 + else
4543 + end_of_string = false;
4544 +
4545 + dst_char = convert_to_sfm_char(src_char, end_of_string);
4546 + } else
4547 dst_char = 0;
4548 /*
4549 * FIXME: We can not handle remapping backslash (UNI_SLASH)
4550 diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
4551 index bdc52cb9a676..479bc0a941f3 100644
4552 --- a/fs/cifs/cifs_unicode.h
4553 +++ b/fs/cifs/cifs_unicode.h
4554 @@ -64,6 +64,8 @@
4555 #define SFM_LESSTHAN ((__u16) 0xF023)
4556 #define SFM_PIPE ((__u16) 0xF027)
4557 #define SFM_SLASH ((__u16) 0xF026)
4558 +#define SFM_PERIOD ((__u16) 0xF028)
4559 +#define SFM_SPACE ((__u16) 0xF029)
4560
4561 /*
4562 * Mapping mechanism to use when one of the seven reserved characters is
4563 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
4564 index 6f62ac821a84..34cbc582e8d7 100644
4565 --- a/fs/cifs/connect.c
4566 +++ b/fs/cifs/connect.c
4567 @@ -428,7 +428,9 @@ cifs_echo_request(struct work_struct *work)
4568 * server->ops->need_neg() == true. Also, no need to ping if
4569 * we got a response recently.
4570 */
4571 - if (!server->ops->need_neg || server->ops->need_neg(server) ||
4572 +
4573 + if (server->tcpStatus == CifsNeedReconnect ||
4574 + server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
4575 (server->ops->can_echo && !server->ops->can_echo(server)) ||
4576 time_before(jiffies, server->lstrp + echo_interval - HZ))
4577 goto requeue_echo;
4578 diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
4579 index 848249fa120f..3079b38f0afb 100644
4580 --- a/fs/cifs/ntlmssp.h
4581 +++ b/fs/cifs/ntlmssp.h
4582 @@ -133,6 +133,6 @@ typedef struct _AUTHENTICATE_MESSAGE {
4583
4584 int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses);
4585 void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, struct cifs_ses *ses);
4586 -int build_ntlmssp_auth_blob(unsigned char *pbuffer, u16 *buflen,
4587 +int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
4588 struct cifs_ses *ses,
4589 const struct nls_table *nls_cp);
4590 diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
4591 index af0ec2d5ad0e..e88ffe1da045 100644
4592 --- a/fs/cifs/sess.c
4593 +++ b/fs/cifs/sess.c
4594 @@ -364,19 +364,43 @@ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
4595 sec_blob->DomainName.MaximumLength = 0;
4596 }
4597
4598 -/* We do not malloc the blob, it is passed in pbuffer, because its
4599 - maximum possible size is fixed and small, making this approach cleaner.
4600 - This function returns the length of the data in the blob */
4601 -int build_ntlmssp_auth_blob(unsigned char *pbuffer,
4602 +static int size_of_ntlmssp_blob(struct cifs_ses *ses)
4603 +{
4604 + int sz = sizeof(AUTHENTICATE_MESSAGE) + ses->auth_key.len
4605 + - CIFS_SESS_KEY_SIZE + CIFS_CPHTXT_SIZE + 2;
4606 +
4607 + if (ses->domainName)
4608 + sz += 2 * strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
4609 + else
4610 + sz += 2;
4611 +
4612 + if (ses->user_name)
4613 + sz += 2 * strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
4614 + else
4615 + sz += 2;
4616 +
4617 + return sz;
4618 +}
4619 +
4620 +int build_ntlmssp_auth_blob(unsigned char **pbuffer,
4621 u16 *buflen,
4622 struct cifs_ses *ses,
4623 const struct nls_table *nls_cp)
4624 {
4625 int rc;
4626 - AUTHENTICATE_MESSAGE *sec_blob = (AUTHENTICATE_MESSAGE *)pbuffer;
4627 + AUTHENTICATE_MESSAGE *sec_blob;
4628 __u32 flags;
4629 unsigned char *tmp;
4630
4631 + rc = setup_ntlmv2_rsp(ses, nls_cp);
4632 + if (rc) {
4633 + cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
4634 + *buflen = 0;
4635 + goto setup_ntlmv2_ret;
4636 + }
4637 + *pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
4638 + sec_blob = (AUTHENTICATE_MESSAGE *)*pbuffer;
4639 +
4640 memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
4641 sec_blob->MessageType = NtLmAuthenticate;
4642
4643 @@ -391,7 +415,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
4644 flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
4645 }
4646
4647 - tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE);
4648 + tmp = *pbuffer + sizeof(AUTHENTICATE_MESSAGE);
4649 sec_blob->NegotiateFlags = cpu_to_le32(flags);
4650
4651 sec_blob->LmChallengeResponse.BufferOffset =
4652 @@ -399,13 +423,9 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
4653 sec_blob->LmChallengeResponse.Length = 0;
4654 sec_blob->LmChallengeResponse.MaximumLength = 0;
4655
4656 - sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer);
4657 + sec_blob->NtChallengeResponse.BufferOffset =
4658 + cpu_to_le32(tmp - *pbuffer);
4659 if (ses->user_name != NULL) {
4660 - rc = setup_ntlmv2_rsp(ses, nls_cp);
4661 - if (rc) {
4662 - cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc);
4663 - goto setup_ntlmv2_ret;
4664 - }
4665 memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE,
4666 ses->auth_key.len - CIFS_SESS_KEY_SIZE);
4667 tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE;
4668 @@ -423,7 +443,7 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
4669 }
4670
4671 if (ses->domainName == NULL) {
4672 - sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
4673 + sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
4674 sec_blob->DomainName.Length = 0;
4675 sec_blob->DomainName.MaximumLength = 0;
4676 tmp += 2;
4677 @@ -432,14 +452,14 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
4678 len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName,
4679 CIFS_MAX_USERNAME_LEN, nls_cp);
4680 len *= 2; /* unicode is 2 bytes each */
4681 - sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
4682 + sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
4683 sec_blob->DomainName.Length = cpu_to_le16(len);
4684 sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
4685 tmp += len;
4686 }
4687
4688 if (ses->user_name == NULL) {
4689 - sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
4690 + sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
4691 sec_blob->UserName.Length = 0;
4692 sec_blob->UserName.MaximumLength = 0;
4693 tmp += 2;
4694 @@ -448,13 +468,13 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
4695 len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name,
4696 CIFS_MAX_USERNAME_LEN, nls_cp);
4697 len *= 2; /* unicode is 2 bytes each */
4698 - sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
4699 + sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
4700 sec_blob->UserName.Length = cpu_to_le16(len);
4701 sec_blob->UserName.MaximumLength = cpu_to_le16(len);
4702 tmp += len;
4703 }
4704
4705 - sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - pbuffer);
4706 + sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
4707 sec_blob->WorkstationName.Length = 0;
4708 sec_blob->WorkstationName.MaximumLength = 0;
4709 tmp += 2;
4710 @@ -463,19 +483,19 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer,
4711 (ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
4712 && !calc_seckey(ses)) {
4713 memcpy(tmp, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE);
4714 - sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
4715 + sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
4716 sec_blob->SessionKey.Length = cpu_to_le16(CIFS_CPHTXT_SIZE);
4717 sec_blob->SessionKey.MaximumLength =
4718 cpu_to_le16(CIFS_CPHTXT_SIZE);
4719 tmp += CIFS_CPHTXT_SIZE;
4720 } else {
4721 - sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - pbuffer);
4722 + sec_blob->SessionKey.BufferOffset = cpu_to_le32(tmp - *pbuffer);
4723 sec_blob->SessionKey.Length = 0;
4724 sec_blob->SessionKey.MaximumLength = 0;
4725 }
4726
4727 + *buflen = tmp - *pbuffer;
4728 setup_ntlmv2_ret:
4729 - *buflen = tmp - pbuffer;
4730 return rc;
4731 }
4732
4733 @@ -1266,7 +1286,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
4734 struct cifs_ses *ses = sess_data->ses;
4735 __u16 bytes_remaining;
4736 char *bcc_ptr;
4737 - char *ntlmsspblob = NULL;
4738 + unsigned char *ntlmsspblob = NULL;
4739 u16 blob_len;
4740
4741 cifs_dbg(FYI, "rawntlmssp session setup authenticate phase\n");
4742 @@ -1279,19 +1299,7 @@ sess_auth_rawntlmssp_authenticate(struct sess_data *sess_data)
4743 /* Build security blob before we assemble the request */
4744 pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
4745 smb_buf = (struct smb_hdr *)pSMB;
4746 - /*
4747 - * 5 is an empirical value, large enough to hold
4748 - * authenticate message plus max 10 of av paris,
4749 - * domain, user, workstation names, flags, etc.
4750 - */
4751 - ntlmsspblob = kzalloc(5*sizeof(struct _AUTHENTICATE_MESSAGE),
4752 - GFP_KERNEL);
4753 - if (!ntlmsspblob) {
4754 - rc = -ENOMEM;
4755 - goto out;
4756 - }
4757 -
4758 - rc = build_ntlmssp_auth_blob(ntlmsspblob,
4759 + rc = build_ntlmssp_auth_blob(&ntlmsspblob,
4760 &blob_len, ses, sess_data->nls_cp);
4761 if (rc)
4762 goto out_free_ntlmsspblob;
4763 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
4764 index 8f38e33d365b..29e06db5f187 100644
4765 --- a/fs/cifs/smb2pdu.c
4766 +++ b/fs/cifs/smb2pdu.c
4767 @@ -588,7 +588,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses,
4768 u16 blob_length = 0;
4769 struct key *spnego_key = NULL;
4770 char *security_blob = NULL;
4771 - char *ntlmssp_blob = NULL;
4772 + unsigned char *ntlmssp_blob = NULL;
4773 bool use_spnego = false; /* else use raw ntlmssp */
4774
4775 cifs_dbg(FYI, "Session Setup\n");
4776 @@ -713,13 +713,7 @@ ssetup_ntlmssp_authenticate:
4777 iov[1].iov_len = blob_length;
4778 } else if (phase == NtLmAuthenticate) {
4779 req->hdr.SessionId = ses->Suid;
4780 - ntlmssp_blob = kzalloc(sizeof(struct _NEGOTIATE_MESSAGE) + 500,
4781 - GFP_KERNEL);
4782 - if (ntlmssp_blob == NULL) {
4783 - rc = -ENOMEM;
4784 - goto ssetup_exit;
4785 - }
4786 - rc = build_ntlmssp_auth_blob(ntlmssp_blob, &blob_length, ses,
4787 + rc = build_ntlmssp_auth_blob(&ntlmssp_blob, &blob_length, ses,
4788 nls_cp);
4789 if (rc) {
4790 cifs_dbg(FYI, "build_ntlmssp_auth_blob failed %d\n",
4791 @@ -1818,6 +1812,33 @@ SMB2_echo(struct TCP_Server_Info *server)
4792
4793 cifs_dbg(FYI, "In echo request\n");
4794
4795 + if (server->tcpStatus == CifsNeedNegotiate) {
4796 + struct list_head *tmp, *tmp2;
4797 + struct cifs_ses *ses;
4798 + struct cifs_tcon *tcon;
4799 +
4800 + cifs_dbg(FYI, "Need negotiate, reconnecting tcons\n");
4801 + spin_lock(&cifs_tcp_ses_lock);
4802 + list_for_each(tmp, &server->smb_ses_list) {
4803 + ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
4804 + list_for_each(tmp2, &ses->tcon_list) {
4805 + tcon = list_entry(tmp2, struct cifs_tcon,
4806 + tcon_list);
4807 + /* add check for persistent handle reconnect */
4808 + if (tcon && tcon->need_reconnect) {
4809 + spin_unlock(&cifs_tcp_ses_lock);
4810 + rc = smb2_reconnect(SMB2_ECHO, tcon);
4811 + spin_lock(&cifs_tcp_ses_lock);
4812 + }
4813 + }
4814 + }
4815 + spin_unlock(&cifs_tcp_ses_lock);
4816 + }
4817 +
4818 + /* if no session, renegotiate failed above */
4819 + if (server->tcpStatus == CifsNeedNegotiate)
4820 + return -EIO;
4821 +
4822 rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req);
4823 if (rc)
4824 return rc;
4825 diff --git a/fs/namei.c b/fs/namei.c
4826 index 30145f8f21ed..aaa3b693ec0b 100644
4827 --- a/fs/namei.c
4828 +++ b/fs/namei.c
4829 @@ -3173,6 +3173,10 @@ retry_lookup:
4830 got_write = false;
4831 }
4832
4833 + error = follow_managed(&path, nd);
4834 + if (unlikely(error < 0))
4835 + return error;
4836 +
4837 if (unlikely(d_is_negative(path.dentry))) {
4838 path_to_nameidata(&path, nd);
4839 return -ENOENT;
4840 @@ -3188,10 +3192,6 @@ retry_lookup:
4841 return -EEXIST;
4842 }
4843
4844 - error = follow_managed(&path, nd);
4845 - if (unlikely(error < 0))
4846 - return error;
4847 -
4848 seq = 0; /* out of RCU mode, so the value doesn't matter */
4849 inode = d_backing_inode(path.dentry);
4850 finish_lookup:
4851 diff --git a/fs/namespace.c b/fs/namespace.c
4852 index 4fb1691b4355..783004af5707 100644
4853 --- a/fs/namespace.c
4854 +++ b/fs/namespace.c
4855 @@ -2409,8 +2409,10 @@ static int do_new_mount(struct path *path, const char *fstype, int flags,
4856 mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV;
4857 }
4858 if (type->fs_flags & FS_USERNS_VISIBLE) {
4859 - if (!fs_fully_visible(type, &mnt_flags))
4860 + if (!fs_fully_visible(type, &mnt_flags)) {
4861 + put_filesystem(type);
4862 return -EPERM;
4863 + }
4864 }
4865 }
4866
4867 @@ -3245,6 +3247,10 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
4868 if (mnt->mnt.mnt_sb->s_iflags & SB_I_NOEXEC)
4869 mnt_flags &= ~(MNT_LOCK_NOSUID | MNT_LOCK_NOEXEC);
4870
4871 + /* Don't miss readonly hidden in the superblock flags */
4872 + if (mnt->mnt.mnt_sb->s_flags & MS_RDONLY)
4873 + mnt_flags |= MNT_LOCK_READONLY;
4874 +
4875 /* Verify the mount flags are equal to or more permissive
4876 * than the proposed new mount.
4877 */
4878 @@ -3271,7 +3277,7 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
4879 list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
4880 struct inode *inode = child->mnt_mountpoint->d_inode;
4881 /* Only worry about locked mounts */
4882 - if (!(mnt_flags & MNT_LOCKED))
4883 + if (!(child->mnt.mnt_flags & MNT_LOCKED))
4884 continue;
4885 /* Is the directory permanetly empty? */
4886 if (!is_empty_dir_inode(inode))
4887 diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
4888 index 33eb81738d03..a7dd1fee8f13 100644
4889 --- a/fs/nfs/dir.c
4890 +++ b/fs/nfs/dir.c
4891 @@ -1527,9 +1527,9 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
4892 err = PTR_ERR(inode);
4893 trace_nfs_atomic_open_exit(dir, ctx, open_flags, err);
4894 put_nfs_open_context(ctx);
4895 + d_drop(dentry);
4896 switch (err) {
4897 case -ENOENT:
4898 - d_drop(dentry);
4899 d_add(dentry, NULL);
4900 nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
4901 break;
4902 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
4903 index 327b8c34d360..de2523f5e7c9 100644
4904 --- a/fs/nfs/nfs4proc.c
4905 +++ b/fs/nfs/nfs4proc.c
4906 @@ -2860,12 +2860,11 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
4907 call_close |= is_wronly;
4908 else if (is_wronly)
4909 calldata->arg.fmode |= FMODE_WRITE;
4910 + if (calldata->arg.fmode != (FMODE_READ|FMODE_WRITE))
4911 + call_close |= is_rdwr;
4912 } else if (is_rdwr)
4913 calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
4914
4915 - if (calldata->arg.fmode == 0)
4916 - call_close |= is_rdwr;
4917 -
4918 if (!nfs4_valid_open_stateid(state))
4919 call_close = 0;
4920 spin_unlock(&state->owner->so_lock);
4921 diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
4922 index 776dccbc306d..dcb70001ae2c 100644
4923 --- a/fs/nfs/pnfs_nfs.c
4924 +++ b/fs/nfs/pnfs_nfs.c
4925 @@ -247,7 +247,11 @@ void pnfs_fetch_commit_bucket_list(struct list_head *pages,
4926 }
4927
4928 /* Helper function for pnfs_generic_commit_pagelist to catch an empty
4929 - * page list. This can happen when two commits race. */
4930 + * page list. This can happen when two commits race.
4931 + *
4932 + * This must be called instead of nfs_init_commit - call one or the other, but
4933 + * not both!
4934 + */
4935 static bool
4936 pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
4937 struct nfs_commit_data *data,
4938 @@ -256,7 +260,11 @@ pnfs_generic_commit_cancel_empty_pagelist(struct list_head *pages,
4939 if (list_empty(pages)) {
4940 if (atomic_dec_and_test(&cinfo->mds->rpcs_out))
4941 wake_up_atomic_t(&cinfo->mds->rpcs_out);
4942 - nfs_commitdata_release(data);
4943 + /* don't call nfs_commitdata_release - it tries to put
4944 + * the open_context which is not acquired until nfs_init_commit
4945 + * which has not been called on @data */
4946 + WARN_ON_ONCE(data->context);
4947 + nfs_commit_free(data);
4948 return true;
4949 }
4950
4951 diff --git a/fs/nfs/read.c b/fs/nfs/read.c
4952 index 6776d7a7839e..572e5b3b06f1 100644
4953 --- a/fs/nfs/read.c
4954 +++ b/fs/nfs/read.c
4955 @@ -367,13 +367,13 @@ readpage_async_filler(void *data, struct page *page)
4956 nfs_list_remove_request(new);
4957 nfs_readpage_release(new);
4958 error = desc->pgio->pg_error;
4959 - goto out_unlock;
4960 + goto out;
4961 }
4962 return 0;
4963 out_error:
4964 error = PTR_ERR(new);
4965 -out_unlock:
4966 unlock_page(page);
4967 +out:
4968 return error;
4969 }
4970
4971 diff --git a/fs/nfsd/nfs2acl.c b/fs/nfsd/nfs2acl.c
4972 index 1580ea6fd64d..d08cd88155c7 100644
4973 --- a/fs/nfsd/nfs2acl.c
4974 +++ b/fs/nfsd/nfs2acl.c
4975 @@ -104,22 +104,21 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp,
4976 goto out;
4977
4978 inode = d_inode(fh->fh_dentry);
4979 - if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
4980 - error = -EOPNOTSUPP;
4981 - goto out_errno;
4982 - }
4983
4984 error = fh_want_write(fh);
4985 if (error)
4986 goto out_errno;
4987
4988 - error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
4989 + fh_lock(fh);
4990 +
4991 + error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
4992 if (error)
4993 - goto out_drop_write;
4994 - error = inode->i_op->set_acl(inode, argp->acl_default,
4995 - ACL_TYPE_DEFAULT);
4996 + goto out_drop_lock;
4997 + error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
4998 if (error)
4999 - goto out_drop_write;
5000 + goto out_drop_lock;
5001 +
5002 + fh_unlock(fh);
5003
5004 fh_drop_write(fh);
5005
5006 @@ -131,7 +130,8 @@ out:
5007 posix_acl_release(argp->acl_access);
5008 posix_acl_release(argp->acl_default);
5009 return nfserr;
5010 -out_drop_write:
5011 +out_drop_lock:
5012 + fh_unlock(fh);
5013 fh_drop_write(fh);
5014 out_errno:
5015 nfserr = nfserrno(error);
5016 diff --git a/fs/nfsd/nfs3acl.c b/fs/nfsd/nfs3acl.c
5017 index 01df4cd7c753..0c890347cde3 100644
5018 --- a/fs/nfsd/nfs3acl.c
5019 +++ b/fs/nfsd/nfs3acl.c
5020 @@ -95,22 +95,20 @@ static __be32 nfsd3_proc_setacl(struct svc_rqst * rqstp,
5021 goto out;
5022
5023 inode = d_inode(fh->fh_dentry);
5024 - if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
5025 - error = -EOPNOTSUPP;
5026 - goto out_errno;
5027 - }
5028
5029 error = fh_want_write(fh);
5030 if (error)
5031 goto out_errno;
5032
5033 - error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
5034 + fh_lock(fh);
5035 +
5036 + error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
5037 if (error)
5038 - goto out_drop_write;
5039 - error = inode->i_op->set_acl(inode, argp->acl_default,
5040 - ACL_TYPE_DEFAULT);
5041 + goto out_drop_lock;
5042 + error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
5043
5044 -out_drop_write:
5045 +out_drop_lock:
5046 + fh_unlock(fh);
5047 fh_drop_write(fh);
5048 out_errno:
5049 nfserr = nfserrno(error);
5050 diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
5051 index 6adabd6049b7..71292a0d6f09 100644
5052 --- a/fs/nfsd/nfs4acl.c
5053 +++ b/fs/nfsd/nfs4acl.c
5054 @@ -770,9 +770,6 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
5055 dentry = fhp->fh_dentry;
5056 inode = d_inode(dentry);
5057
5058 - if (!inode->i_op->set_acl || !IS_POSIXACL(inode))
5059 - return nfserr_attrnotsupp;
5060 -
5061 if (S_ISDIR(inode->i_mode))
5062 flags = NFS4_ACL_DIR;
5063
5064 @@ -782,16 +779,19 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
5065 if (host_error < 0)
5066 goto out_nfserr;
5067
5068 - host_error = inode->i_op->set_acl(inode, pacl, ACL_TYPE_ACCESS);
5069 + fh_lock(fhp);
5070 +
5071 + host_error = set_posix_acl(inode, ACL_TYPE_ACCESS, pacl);
5072 if (host_error < 0)
5073 - goto out_release;
5074 + goto out_drop_lock;
5075
5076 if (S_ISDIR(inode->i_mode)) {
5077 - host_error = inode->i_op->set_acl(inode, dpacl,
5078 - ACL_TYPE_DEFAULT);
5079 + host_error = set_posix_acl(inode, ACL_TYPE_DEFAULT, dpacl);
5080 }
5081
5082 -out_release:
5083 +out_drop_lock:
5084 + fh_unlock(fhp);
5085 +
5086 posix_acl_release(pacl);
5087 posix_acl_release(dpacl);
5088 out_nfserr:
5089 diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
5090 index 7389cb1d7409..04c68d900324 100644
5091 --- a/fs/nfsd/nfs4callback.c
5092 +++ b/fs/nfsd/nfs4callback.c
5093 @@ -710,22 +710,6 @@ static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc
5094 }
5095 }
5096
5097 -static struct rpc_clnt *create_backchannel_client(struct rpc_create_args *args)
5098 -{
5099 - struct rpc_xprt *xprt;
5100 -
5101 - if (args->protocol != XPRT_TRANSPORT_BC_TCP)
5102 - return rpc_create(args);
5103 -
5104 - xprt = args->bc_xprt->xpt_bc_xprt;
5105 - if (xprt) {
5106 - xprt_get(xprt);
5107 - return rpc_create_xprt(args, xprt);
5108 - }
5109 -
5110 - return rpc_create(args);
5111 -}
5112 -
5113 static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
5114 {
5115 int maxtime = max_cb_time(clp->net);
5116 @@ -768,7 +752,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
5117 args.authflavor = ses->se_cb_sec.flavor;
5118 }
5119 /* Create RPC client */
5120 - client = create_backchannel_client(&args);
5121 + client = rpc_create(&args);
5122 if (IS_ERR(client)) {
5123 dprintk("NFSD: couldn't create callback client: %ld\n",
5124 PTR_ERR(client));
5125 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
5126 index 0462eeddfff9..9e04e49df681 100644
5127 --- a/fs/nfsd/nfs4state.c
5128 +++ b/fs/nfsd/nfs4state.c
5129 @@ -3487,6 +3487,10 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
5130 struct nfs4_openowner *oo = open->op_openowner;
5131 struct nfs4_ol_stateid *retstp = NULL;
5132
5133 + /* We are moving these outside of the spinlocks to avoid the warnings */
5134 + mutex_init(&stp->st_mutex);
5135 + mutex_lock(&stp->st_mutex);
5136 +
5137 spin_lock(&oo->oo_owner.so_client->cl_lock);
5138 spin_lock(&fp->fi_lock);
5139
5140 @@ -3502,13 +3506,17 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
5141 stp->st_access_bmap = 0;
5142 stp->st_deny_bmap = 0;
5143 stp->st_openstp = NULL;
5144 - init_rwsem(&stp->st_rwsem);
5145 list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
5146 list_add(&stp->st_perfile, &fp->fi_stateids);
5147
5148 out_unlock:
5149 spin_unlock(&fp->fi_lock);
5150 spin_unlock(&oo->oo_owner.so_client->cl_lock);
5151 + if (retstp) {
5152 + mutex_lock(&retstp->st_mutex);
5153 + /* Not that we need to, just for neatness */
5154 + mutex_unlock(&stp->st_mutex);
5155 + }
5156 return retstp;
5157 }
5158
5159 @@ -4335,32 +4343,34 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
5160 */
5161 if (stp) {
5162 /* Stateid was found, this is an OPEN upgrade */
5163 - down_read(&stp->st_rwsem);
5164 + mutex_lock(&stp->st_mutex);
5165 status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
5166 if (status) {
5167 - up_read(&stp->st_rwsem);
5168 + mutex_unlock(&stp->st_mutex);
5169 goto out;
5170 }
5171 } else {
5172 stp = open->op_stp;
5173 open->op_stp = NULL;
5174 + /*
5175 + * init_open_stateid() either returns a locked stateid
5176 + * it found, or initializes and locks the new one we passed in
5177 + */
5178 swapstp = init_open_stateid(stp, fp, open);
5179 if (swapstp) {
5180 nfs4_put_stid(&stp->st_stid);
5181 stp = swapstp;
5182 - down_read(&stp->st_rwsem);
5183 status = nfs4_upgrade_open(rqstp, fp, current_fh,
5184 stp, open);
5185 if (status) {
5186 - up_read(&stp->st_rwsem);
5187 + mutex_unlock(&stp->st_mutex);
5188 goto out;
5189 }
5190 goto upgrade_out;
5191 }
5192 - down_read(&stp->st_rwsem);
5193 status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
5194 if (status) {
5195 - up_read(&stp->st_rwsem);
5196 + mutex_unlock(&stp->st_mutex);
5197 release_open_stateid(stp);
5198 goto out;
5199 }
5200 @@ -4372,7 +4382,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
5201 }
5202 upgrade_out:
5203 nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
5204 - up_read(&stp->st_rwsem);
5205 + mutex_unlock(&stp->st_mutex);
5206
5207 if (nfsd4_has_session(&resp->cstate)) {
5208 if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
5209 @@ -4983,12 +4993,12 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
5210 * revoked delegations are kept only for free_stateid.
5211 */
5212 return nfserr_bad_stateid;
5213 - down_write(&stp->st_rwsem);
5214 + mutex_lock(&stp->st_mutex);
5215 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
5216 if (status == nfs_ok)
5217 status = nfs4_check_fh(current_fh, &stp->st_stid);
5218 if (status != nfs_ok)
5219 - up_write(&stp->st_rwsem);
5220 + mutex_unlock(&stp->st_mutex);
5221 return status;
5222 }
5223
5224 @@ -5036,7 +5046,7 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
5225 return status;
5226 oo = openowner(stp->st_stateowner);
5227 if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
5228 - up_write(&stp->st_rwsem);
5229 + mutex_unlock(&stp->st_mutex);
5230 nfs4_put_stid(&stp->st_stid);
5231 return nfserr_bad_stateid;
5232 }
5233 @@ -5068,12 +5078,12 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5234 oo = openowner(stp->st_stateowner);
5235 status = nfserr_bad_stateid;
5236 if (oo->oo_flags & NFS4_OO_CONFIRMED) {
5237 - up_write(&stp->st_rwsem);
5238 + mutex_unlock(&stp->st_mutex);
5239 goto put_stateid;
5240 }
5241 oo->oo_flags |= NFS4_OO_CONFIRMED;
5242 nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
5243 - up_write(&stp->st_rwsem);
5244 + mutex_unlock(&stp->st_mutex);
5245 dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
5246 __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
5247
5248 @@ -5149,7 +5159,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
5249 nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
5250 status = nfs_ok;
5251 put_stateid:
5252 - up_write(&stp->st_rwsem);
5253 + mutex_unlock(&stp->st_mutex);
5254 nfs4_put_stid(&stp->st_stid);
5255 out:
5256 nfsd4_bump_seqid(cstate, status);
5257 @@ -5202,7 +5212,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5258 if (status)
5259 goto out;
5260 nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
5261 - up_write(&stp->st_rwsem);
5262 + mutex_unlock(&stp->st_mutex);
5263
5264 nfsd4_close_open_stateid(stp);
5265
5266 @@ -5428,7 +5438,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
5267 stp->st_access_bmap = 0;
5268 stp->st_deny_bmap = open_stp->st_deny_bmap;
5269 stp->st_openstp = open_stp;
5270 - init_rwsem(&stp->st_rwsem);
5271 + mutex_init(&stp->st_mutex);
5272 list_add(&stp->st_locks, &open_stp->st_locks);
5273 list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
5274 spin_lock(&fp->fi_lock);
5275 @@ -5597,7 +5607,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5276 &open_stp, nn);
5277 if (status)
5278 goto out;
5279 - up_write(&open_stp->st_rwsem);
5280 + mutex_unlock(&open_stp->st_mutex);
5281 open_sop = openowner(open_stp->st_stateowner);
5282 status = nfserr_bad_stateid;
5283 if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
5284 @@ -5606,7 +5616,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5285 status = lookup_or_create_lock_state(cstate, open_stp, lock,
5286 &lock_stp, &new);
5287 if (status == nfs_ok)
5288 - down_write(&lock_stp->st_rwsem);
5289 + mutex_lock(&lock_stp->st_mutex);
5290 } else {
5291 status = nfs4_preprocess_seqid_op(cstate,
5292 lock->lk_old_lock_seqid,
5293 @@ -5710,7 +5720,7 @@ out:
5294 seqid_mutating_err(ntohl(status)))
5295 lock_sop->lo_owner.so_seqid++;
5296
5297 - up_write(&lock_stp->st_rwsem);
5298 + mutex_unlock(&lock_stp->st_mutex);
5299
5300 /*
5301 * If this is a new, never-before-used stateid, and we are
5302 @@ -5880,7 +5890,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5303 fput:
5304 fput(filp);
5305 put_stateid:
5306 - up_write(&stp->st_rwsem);
5307 + mutex_unlock(&stp->st_mutex);
5308 nfs4_put_stid(&stp->st_stid);
5309 out:
5310 nfsd4_bump_seqid(cstate, status);
5311 diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
5312 index c050c53036a6..c89d7b55fb9a 100644
5313 --- a/fs/nfsd/state.h
5314 +++ b/fs/nfsd/state.h
5315 @@ -535,7 +535,7 @@ struct nfs4_ol_stateid {
5316 unsigned char st_access_bmap;
5317 unsigned char st_deny_bmap;
5318 struct nfs4_ol_stateid *st_openstp;
5319 - struct rw_semaphore st_rwsem;
5320 + struct mutex st_mutex;
5321 };
5322
5323 static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
5324 diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
5325 index b3fc0a35bf62..fb35aa21b34b 100644
5326 --- a/fs/overlayfs/dir.c
5327 +++ b/fs/overlayfs/dir.c
5328 @@ -511,6 +511,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
5329 struct dentry *upper;
5330 struct dentry *opaquedir = NULL;
5331 int err;
5332 + int flags = 0;
5333
5334 if (WARN_ON(!workdir))
5335 return -EROFS;
5336 @@ -540,46 +541,39 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
5337 if (err)
5338 goto out_dput;
5339
5340 - whiteout = ovl_whiteout(workdir, dentry);
5341 - err = PTR_ERR(whiteout);
5342 - if (IS_ERR(whiteout))
5343 + upper = lookup_one_len(dentry->d_name.name, upperdir,
5344 + dentry->d_name.len);
5345 + err = PTR_ERR(upper);
5346 + if (IS_ERR(upper))
5347 goto out_unlock;
5348
5349 - upper = ovl_dentry_upper(dentry);
5350 - if (!upper) {
5351 - upper = lookup_one_len(dentry->d_name.name, upperdir,
5352 - dentry->d_name.len);
5353 - err = PTR_ERR(upper);
5354 - if (IS_ERR(upper))
5355 - goto kill_whiteout;
5356 -
5357 - err = ovl_do_rename(wdir, whiteout, udir, upper, 0);
5358 - dput(upper);
5359 - if (err)
5360 - goto kill_whiteout;
5361 - } else {
5362 - int flags = 0;
5363 + err = -ESTALE;
5364 + if ((opaquedir && upper != opaquedir) ||
5365 + (!opaquedir && ovl_dentry_upper(dentry) &&
5366 + upper != ovl_dentry_upper(dentry))) {
5367 + goto out_dput_upper;
5368 + }
5369
5370 - if (opaquedir)
5371 - upper = opaquedir;
5372 - err = -ESTALE;
5373 - if (upper->d_parent != upperdir)
5374 - goto kill_whiteout;
5375 + whiteout = ovl_whiteout(workdir, dentry);
5376 + err = PTR_ERR(whiteout);
5377 + if (IS_ERR(whiteout))
5378 + goto out_dput_upper;
5379
5380 - if (is_dir)
5381 - flags |= RENAME_EXCHANGE;
5382 + if (d_is_dir(upper))
5383 + flags = RENAME_EXCHANGE;
5384
5385 - err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
5386 - if (err)
5387 - goto kill_whiteout;
5388 + err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
5389 + if (err)
5390 + goto kill_whiteout;
5391 + if (flags)
5392 + ovl_cleanup(wdir, upper);
5393
5394 - if (is_dir)
5395 - ovl_cleanup(wdir, upper);
5396 - }
5397 ovl_dentry_version_inc(dentry->d_parent);
5398 out_d_drop:
5399 d_drop(dentry);
5400 dput(whiteout);
5401 +out_dput_upper:
5402 + dput(upper);
5403 out_unlock:
5404 unlock_rename(workdir, upperdir);
5405 out_dput:
5406 diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
5407 index a4ff5d0d7db9..d46fa609e803 100644
5408 --- a/fs/overlayfs/inode.c
5409 +++ b/fs/overlayfs/inode.c
5410 @@ -59,16 +59,40 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr)
5411 if (err)
5412 goto out;
5413
5414 + if (attr->ia_valid & ATTR_SIZE) {
5415 + struct inode *realinode = d_inode(ovl_dentry_real(dentry));
5416 +
5417 + err = -ETXTBSY;
5418 + if (atomic_read(&realinode->i_writecount) < 0)
5419 + goto out_drop_write;
5420 + }
5421 +
5422 err = ovl_copy_up(dentry);
5423 if (!err) {
5424 + struct inode *winode = NULL;
5425 +
5426 upperdentry = ovl_dentry_upper(dentry);
5427
5428 + if (attr->ia_valid & ATTR_SIZE) {
5429 + winode = d_inode(upperdentry);
5430 + err = get_write_access(winode);
5431 + if (err)
5432 + goto out_drop_write;
5433 + }
5434 +
5435 + if (attr->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID))
5436 + attr->ia_valid &= ~ATTR_MODE;
5437 +
5438 inode_lock(upperdentry->d_inode);
5439 err = notify_change(upperdentry, attr, NULL);
5440 if (!err)
5441 ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
5442 inode_unlock(upperdentry->d_inode);
5443 +
5444 + if (winode)
5445 + put_write_access(winode);
5446 }
5447 +out_drop_write:
5448 ovl_drop_write(dentry);
5449 out:
5450 return err;
5451 @@ -121,16 +145,18 @@ int ovl_permission(struct inode *inode, int mask)
5452
5453 err = vfs_getattr(&realpath, &stat);
5454 if (err)
5455 - return err;
5456 + goto out_dput;
5457
5458 + err = -ESTALE;
5459 if ((stat.mode ^ inode->i_mode) & S_IFMT)
5460 - return -ESTALE;
5461 + goto out_dput;
5462
5463 inode->i_mode = stat.mode;
5464 inode->i_uid = stat.uid;
5465 inode->i_gid = stat.gid;
5466
5467 - return generic_permission(inode, mask);
5468 + err = generic_permission(inode, mask);
5469 + goto out_dput;
5470 }
5471
5472 /* Careful in RCU walk mode */
5473 @@ -400,12 +426,11 @@ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
5474 if (!inode)
5475 return NULL;
5476
5477 - mode &= S_IFMT;
5478 -
5479 inode->i_ino = get_next_ino();
5480 inode->i_mode = mode;
5481 inode->i_flags |= S_NOATIME | S_NOCMTIME;
5482
5483 + mode &= S_IFMT;
5484 switch (mode) {
5485 case S_IFDIR:
5486 inode->i_private = oe;
5487 diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
5488 index 6a7090f4a441..294ccc0c1fc7 100644
5489 --- a/fs/overlayfs/overlayfs.h
5490 +++ b/fs/overlayfs/overlayfs.h
5491 @@ -185,6 +185,7 @@ static inline void ovl_copyattr(struct inode *from, struct inode *to)
5492 {
5493 to->i_uid = from->i_uid;
5494 to->i_gid = from->i_gid;
5495 + to->i_mode = from->i_mode;
5496 }
5497
5498 /* dir.c */
5499 diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
5500 index 791235e03d17..7952a50f0a72 100644
5501 --- a/fs/overlayfs/super.c
5502 +++ b/fs/overlayfs/super.c
5503 @@ -1064,16 +1064,21 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
5504 /*
5505 * Upper should support d_type, else whiteouts are visible.
5506 * Given workdir and upper are on same fs, we can do
5507 - * iterate_dir() on workdir.
5508 + * iterate_dir() on workdir. This check requires successful
5509 + * creation of workdir in previous step.
5510 */
5511 - err = ovl_check_d_type_supported(&workpath);
5512 - if (err < 0)
5513 - goto out_put_workdir;
5514 + if (ufs->workdir) {
5515 + err = ovl_check_d_type_supported(&workpath);
5516 + if (err < 0)
5517 + goto out_put_workdir;
5518
5519 - if (!err) {
5520 - pr_err("overlayfs: upper fs needs to support d_type.\n");
5521 - err = -EINVAL;
5522 - goto out_put_workdir;
5523 + /*
5524 + * We allowed this configuration and don't want to
5525 + * break users over kernel upgrade. So warn instead
5526 + * of erroring out.
5527 + */
5528 + if (!err)
5529 + pr_warn("overlayfs: upper fs needs to support d_type.\n");
5530 }
5531 }
5532
5533 diff --git a/fs/posix_acl.c b/fs/posix_acl.c
5534 index 711dd5170376..e11ea5fb1bad 100644
5535 --- a/fs/posix_acl.c
5536 +++ b/fs/posix_acl.c
5537 @@ -786,39 +786,43 @@ posix_acl_xattr_get(const struct xattr_handler *handler,
5538 return error;
5539 }
5540
5541 -static int
5542 -posix_acl_xattr_set(const struct xattr_handler *handler,
5543 - struct dentry *dentry, const char *name,
5544 - const void *value, size_t size, int flags)
5545 +int
5546 +set_posix_acl(struct inode *inode, int type, struct posix_acl *acl)
5547 {
5548 - struct inode *inode = d_backing_inode(dentry);
5549 - struct posix_acl *acl = NULL;
5550 - int ret;
5551 -
5552 if (!IS_POSIXACL(inode))
5553 return -EOPNOTSUPP;
5554 if (!inode->i_op->set_acl)
5555 return -EOPNOTSUPP;
5556
5557 - if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
5558 - return value ? -EACCES : 0;
5559 + if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
5560 + return acl ? -EACCES : 0;
5561 if (!inode_owner_or_capable(inode))
5562 return -EPERM;
5563
5564 + if (acl) {
5565 + int ret = posix_acl_valid(acl);
5566 + if (ret)
5567 + return ret;
5568 + }
5569 + return inode->i_op->set_acl(inode, acl, type);
5570 +}
5571 +EXPORT_SYMBOL(set_posix_acl);
5572 +
5573 +static int
5574 +posix_acl_xattr_set(const struct xattr_handler *handler,
5575 + struct dentry *dentry, const char *name,
5576 + const void *value, size_t size, int flags)
5577 +{
5578 + struct inode *inode = d_backing_inode(dentry);
5579 + struct posix_acl *acl = NULL;
5580 + int ret;
5581 +
5582 if (value) {
5583 acl = posix_acl_from_xattr(&init_user_ns, value, size);
5584 if (IS_ERR(acl))
5585 return PTR_ERR(acl);
5586 -
5587 - if (acl) {
5588 - ret = posix_acl_valid(acl);
5589 - if (ret)
5590 - goto out;
5591 - }
5592 }
5593 -
5594 - ret = inode->i_op->set_acl(inode, acl, handler->flags);
5595 -out:
5596 + ret = set_posix_acl(inode, handler->flags, acl);
5597 posix_acl_release(acl);
5598 return ret;
5599 }
5600 diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
5601 index 446753d8ac34..5b5ec8d0f324 100644
5602 --- a/fs/ubifs/file.c
5603 +++ b/fs/ubifs/file.c
5604 @@ -52,6 +52,7 @@
5605 #include "ubifs.h"
5606 #include <linux/mount.h>
5607 #include <linux/slab.h>
5608 +#include <linux/migrate.h>
5609
5610 static int read_block(struct inode *inode, void *addr, unsigned int block,
5611 struct ubifs_data_node *dn)
5612 @@ -1452,6 +1453,26 @@ static int ubifs_set_page_dirty(struct page *page)
5613 return ret;
5614 }
5615
5616 +#ifdef CONFIG_MIGRATION
5617 +static int ubifs_migrate_page(struct address_space *mapping,
5618 + struct page *newpage, struct page *page, enum migrate_mode mode)
5619 +{
5620 + int rc;
5621 +
5622 + rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
5623 + if (rc != MIGRATEPAGE_SUCCESS)
5624 + return rc;
5625 +
5626 + if (PagePrivate(page)) {
5627 + ClearPagePrivate(page);
5628 + SetPagePrivate(newpage);
5629 + }
5630 +
5631 + migrate_page_copy(newpage, page);
5632 + return MIGRATEPAGE_SUCCESS;
5633 +}
5634 +#endif
5635 +
5636 static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
5637 {
5638 /*
5639 @@ -1591,6 +1612,9 @@ const struct address_space_operations ubifs_file_address_operations = {
5640 .write_end = ubifs_write_end,
5641 .invalidatepage = ubifs_invalidatepage,
5642 .set_page_dirty = ubifs_set_page_dirty,
5643 +#ifdef CONFIG_MIGRATION
5644 + .migratepage = ubifs_migrate_page,
5645 +#endif
5646 .releasepage = ubifs_releasepage,
5647 };
5648
5649 diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
5650 index 6bd05700d8c9..05f05f17a7c2 100644
5651 --- a/include/asm-generic/qspinlock.h
5652 +++ b/include/asm-generic/qspinlock.h
5653 @@ -22,37 +22,33 @@
5654 #include <asm-generic/qspinlock_types.h>
5655
5656 /**
5657 + * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
5658 + * @lock : Pointer to queued spinlock structure
5659 + *
5660 + * There is a very slight possibility of live-lock if the lockers keep coming
5661 + * and the waiter is just unfortunate enough to not see any unlock state.
5662 + */
5663 +#ifndef queued_spin_unlock_wait
5664 +extern void queued_spin_unlock_wait(struct qspinlock *lock);
5665 +#endif
5666 +
5667 +/**
5668 * queued_spin_is_locked - is the spinlock locked?
5669 * @lock: Pointer to queued spinlock structure
5670 * Return: 1 if it is locked, 0 otherwise
5671 */
5672 +#ifndef queued_spin_is_locked
5673 static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
5674 {
5675 /*
5676 - * queued_spin_lock_slowpath() can ACQUIRE the lock before
5677 - * issuing the unordered store that sets _Q_LOCKED_VAL.
5678 - *
5679 - * See both smp_cond_acquire() sites for more detail.
5680 - *
5681 - * This however means that in code like:
5682 - *
5683 - * spin_lock(A) spin_lock(B)
5684 - * spin_unlock_wait(B) spin_is_locked(A)
5685 - * do_something() do_something()
5686 - *
5687 - * Both CPUs can end up running do_something() because the store
5688 - * setting _Q_LOCKED_VAL will pass through the loads in
5689 - * spin_unlock_wait() and/or spin_is_locked().
5690 + * See queued_spin_unlock_wait().
5691 *
5692 - * Avoid this by issuing a full memory barrier between the spin_lock()
5693 - * and the loads in spin_unlock_wait() and spin_is_locked().
5694 - *
5695 - * Note that regular mutual exclusion doesn't care about this
5696 - * delayed store.
5697 + * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
5698 + * isn't immediately observable.
5699 */
5700 - smp_mb();
5701 - return atomic_read(&lock->val) & _Q_LOCKED_MASK;
5702 + return atomic_read(&lock->val);
5703 }
5704 +#endif
5705
5706 /**
5707 * queued_spin_value_unlocked - is the spinlock structure unlocked?
5708 @@ -122,21 +118,6 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
5709 }
5710 #endif
5711
5712 -/**
5713 - * queued_spin_unlock_wait - wait until current lock holder releases the lock
5714 - * @lock : Pointer to queued spinlock structure
5715 - *
5716 - * There is a very slight possibility of live-lock if the lockers keep coming
5717 - * and the waiter is just unfortunate enough to not see any unlock state.
5718 - */
5719 -static inline void queued_spin_unlock_wait(struct qspinlock *lock)
5720 -{
5721 - /* See queued_spin_is_locked() */
5722 - smp_mb();
5723 - while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
5724 - cpu_relax();
5725 -}
5726 -
5727 #ifndef virt_spin_lock
5728 static __always_inline bool virt_spin_lock(struct qspinlock *lock)
5729 {
5730 diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
5731 index 055a08ddac02..a74c49d7c0fc 100644
5732 --- a/include/drm/ttm/ttm_bo_api.h
5733 +++ b/include/drm/ttm/ttm_bo_api.h
5734 @@ -316,6 +316,20 @@ ttm_bo_reference(struct ttm_buffer_object *bo)
5735 */
5736 extern int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy,
5737 bool interruptible, bool no_wait);
5738 +
5739 +/**
5740 + * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo
5741 + *
5742 + * @placement: Return immediately if buffer is busy.
5743 + * @mem: The struct ttm_mem_reg indicating the region where the bo resides
5744 + * @new_flags: Describes compatible placement found
5745 + *
5746 + * Returns true if the placement is compatible
5747 + */
5748 +extern bool ttm_bo_mem_compat(struct ttm_placement *placement,
5749 + struct ttm_mem_reg *mem,
5750 + uint32_t *new_flags);
5751 +
5752 /**
5753 * ttm_bo_validate
5754 *
5755 diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
5756 index 786ad32631a6..07b83d32f66c 100644
5757 --- a/include/linux/cpuidle.h
5758 +++ b/include/linux/cpuidle.h
5759 @@ -152,6 +152,8 @@ extern void cpuidle_disable_device(struct cpuidle_device *dev);
5760 extern int cpuidle_play_dead(void);
5761
5762 extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
5763 +static inline struct cpuidle_device *cpuidle_get_device(void)
5764 +{return __this_cpu_read(cpuidle_devices); }
5765 #else
5766 static inline void disable_cpuidle(void) { }
5767 static inline bool cpuidle_not_available(struct cpuidle_driver *drv,
5768 @@ -187,6 +189,7 @@ static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
5769 static inline int cpuidle_play_dead(void) {return -ENODEV; }
5770 static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
5771 struct cpuidle_device *dev) {return NULL; }
5772 +static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
5773 #endif
5774
5775 #if defined(CONFIG_CPU_IDLE) && defined(CONFIG_SUSPEND)
5776 diff --git a/include/linux/dcache.h b/include/linux/dcache.h
5777 index 7e9422cb5989..ad5d582f9b14 100644
5778 --- a/include/linux/dcache.h
5779 +++ b/include/linux/dcache.h
5780 @@ -576,5 +576,17 @@ static inline struct inode *vfs_select_inode(struct dentry *dentry,
5781 return inode;
5782 }
5783
5784 +/**
5785 + * d_real_inode - Return the real inode
5786 + * @dentry: The dentry to query
5787 + *
5788 + * If dentry is on an union/overlay, then return the underlying, real inode.
5789 + * Otherwise return d_inode().
5790 + */
5791 +static inline struct inode *d_real_inode(struct dentry *dentry)
5792 +{
5793 + return d_backing_inode(d_real(dentry));
5794 +}
5795 +
5796
5797 #endif /* __LINUX_DCACHE_H */
5798 diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
5799 index 0536524bb9eb..68904469fba1 100644
5800 --- a/include/linux/jump_label.h
5801 +++ b/include/linux/jump_label.h
5802 @@ -117,13 +117,18 @@ struct module;
5803
5804 #include <linux/atomic.h>
5805
5806 +#ifdef HAVE_JUMP_LABEL
5807 +
5808 static inline int static_key_count(struct static_key *key)
5809 {
5810 - return atomic_read(&key->enabled);
5811 + /*
5812 + * -1 means the first static_key_slow_inc() is in progress.
5813 + * static_key_enabled() must return true, so return 1 here.
5814 + */
5815 + int n = atomic_read(&key->enabled);
5816 + return n >= 0 ? n : 1;
5817 }
5818
5819 -#ifdef HAVE_JUMP_LABEL
5820 -
5821 #define JUMP_TYPE_FALSE 0UL
5822 #define JUMP_TYPE_TRUE 1UL
5823 #define JUMP_TYPE_MASK 1UL
5824 @@ -162,6 +167,11 @@ extern void jump_label_apply_nops(struct module *mod);
5825
5826 #else /* !HAVE_JUMP_LABEL */
5827
5828 +static inline int static_key_count(struct static_key *key)
5829 +{
5830 + return atomic_read(&key->enabled);
5831 +}
5832 +
5833 static __always_inline void jump_label_init(void)
5834 {
5835 static_key_initialized = true;
5836 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
5837 index 15d0df943466..794b924e9669 100644
5838 --- a/include/linux/skbuff.h
5839 +++ b/include/linux/skbuff.h
5840 @@ -1062,6 +1062,7 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4)
5841 }
5842
5843 void __skb_get_hash(struct sk_buff *skb);
5844 +u32 __skb_get_hash_symmetric(struct sk_buff *skb);
5845 u32 skb_get_poff(const struct sk_buff *skb);
5846 u32 __skb_get_poff(const struct sk_buff *skb, void *data,
5847 const struct flow_keys *keys, int hlen);
5848 @@ -2860,6 +2861,25 @@ static inline void skb_postpush_rcsum(struct sk_buff *skb,
5849 }
5850
5851 /**
5852 + * skb_push_rcsum - push skb and update receive checksum
5853 + * @skb: buffer to update
5854 + * @len: length of data pulled
5855 + *
5856 + * This function performs an skb_push on the packet and updates
5857 + * the CHECKSUM_COMPLETE checksum. It should be used on
5858 + * receive path processing instead of skb_push unless you know
5859 + * that the checksum difference is zero (e.g., a valid IP header)
5860 + * or you are setting ip_summed to CHECKSUM_NONE.
5861 + */
5862 +static inline unsigned char *skb_push_rcsum(struct sk_buff *skb,
5863 + unsigned int len)
5864 +{
5865 + skb_push(skb, len);
5866 + skb_postpush_rcsum(skb, skb->data, len);
5867 + return skb->data;
5868 +}
5869 +
5870 +/**
5871 * pskb_trim_rcsum - trim received skb and update checksum
5872 * @skb: buffer to trim
5873 * @len: new length
5874 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
5875 index 9a7ddbaf116e..14d70f59f0c2 100644
5876 --- a/include/linux/sunrpc/clnt.h
5877 +++ b/include/linux/sunrpc/clnt.h
5878 @@ -137,8 +137,6 @@ struct rpc_create_args {
5879 #define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
5880
5881 struct rpc_clnt *rpc_create(struct rpc_create_args *args);
5882 -struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
5883 - struct rpc_xprt *xprt);
5884 struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
5885 const struct rpc_program *, u32);
5886 struct rpc_clnt *rpc_clone_client(struct rpc_clnt *);
5887 diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
5888 index b7dabc4baafd..79ba50856707 100644
5889 --- a/include/linux/sunrpc/svc_xprt.h
5890 +++ b/include/linux/sunrpc/svc_xprt.h
5891 @@ -84,6 +84,7 @@ struct svc_xprt {
5892
5893 struct net *xpt_net;
5894 struct rpc_xprt *xpt_bc_xprt; /* NFSv4.1 backchannel */
5895 + struct rpc_xprt_switch *xpt_bc_xps; /* NFSv4.1 backchannel */
5896 };
5897
5898 static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
5899 diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
5900 index fb0d212e0d3a..9f51e1df3023 100644
5901 --- a/include/linux/sunrpc/xprt.h
5902 +++ b/include/linux/sunrpc/xprt.h
5903 @@ -296,6 +296,7 @@ struct xprt_create {
5904 size_t addrlen;
5905 const char *servername;
5906 struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
5907 + struct rpc_xprt_switch *bc_xps;
5908 unsigned int flags;
5909 };
5910
5911 diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
5912 index 966889a20ea3..e479033bd782 100644
5913 --- a/include/linux/usb/ehci_def.h
5914 +++ b/include/linux/usb/ehci_def.h
5915 @@ -180,11 +180,11 @@ struct ehci_regs {
5916 * PORTSCx
5917 */
5918 /* HOSTPC: offset 0x84 */
5919 - u32 hostpc[1]; /* HOSTPC extension */
5920 + u32 hostpc[0]; /* HOSTPC extension */
5921 #define HOSTPC_PHCD (1<<22) /* Phy clock disable */
5922 #define HOSTPC_PSPD (3<<25) /* Port speed detection */
5923
5924 - u32 reserved5[16];
5925 + u32 reserved5[17];
5926
5927 /* USBMODE_EX: offset 0xc8 */
5928 u32 usbmode_ex; /* USB Device mode extension */
5929 diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
5930 index fb2cef4e9747..b8334a637095 100644
5931 --- a/include/rdma/ib_verbs.h
5932 +++ b/include/rdma/ib_verbs.h
5933 @@ -217,7 +217,7 @@ enum ib_device_cap_flags {
5934 IB_DEVICE_CROSS_CHANNEL = (1 << 27),
5935 IB_DEVICE_MANAGED_FLOW_STEERING = (1 << 29),
5936 IB_DEVICE_SIGNATURE_HANDOVER = (1 << 30),
5937 - IB_DEVICE_ON_DEMAND_PAGING = (1 << 31),
5938 + IB_DEVICE_ON_DEMAND_PAGING = (1ULL << 31),
5939 IB_DEVICE_SG_GAPS_REG = (1ULL << 32),
5940 IB_DEVICE_VIRTUAL_FUNCTION = ((u64)1 << 33),
5941 };
5942 diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
5943 index a8696551abb1..6ee9d97004d5 100644
5944 --- a/include/rdma/rdma_vt.h
5945 +++ b/include/rdma/rdma_vt.h
5946 @@ -203,7 +203,9 @@ struct rvt_driver_provided {
5947
5948 /*
5949 * Allocate a private queue pair data structure for driver specific
5950 - * information which is opaque to rdmavt.
5951 + * information which is opaque to rdmavt. Errors are returned via
5952 + * ERR_PTR(err). The driver is free to return NULL or a valid
5953 + * pointer.
5954 */
5955 void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
5956 gfp_t gfp);
5957 diff --git a/kernel/futex.c b/kernel/futex.c
5958 index c20f06f38ef3..6555d5459e98 100644
5959 --- a/kernel/futex.c
5960 +++ b/kernel/futex.c
5961 @@ -469,7 +469,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
5962 {
5963 unsigned long address = (unsigned long)uaddr;
5964 struct mm_struct *mm = current->mm;
5965 - struct page *page;
5966 + struct page *page, *tail;
5967 struct address_space *mapping;
5968 int err, ro = 0;
5969
5970 @@ -530,7 +530,15 @@ again:
5971 * considered here and page lock forces unnecessarily serialization
5972 * From this point on, mapping will be re-verified if necessary and
5973 * page lock will be acquired only if it is unavoidable
5974 - */
5975 + *
5976 + * Mapping checks require the head page for any compound page so the
5977 + * head page and mapping is looked up now. For anonymous pages, it
5978 + * does not matter if the page splits in the future as the key is
5979 + * based on the address. For filesystem-backed pages, the tail is
5980 + * required as the index of the page determines the key. For
5981 + * base pages, there is no tail page and tail == page.
5982 + */
5983 + tail = page;
5984 page = compound_head(page);
5985 mapping = READ_ONCE(page->mapping);
5986
5987 @@ -654,7 +662,7 @@ again:
5988
5989 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
5990 key->shared.inode = inode;
5991 - key->shared.pgoff = basepage_index(page);
5992 + key->shared.pgoff = basepage_index(tail);
5993 rcu_read_unlock();
5994 }
5995
5996 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
5997 index 05254eeb4b4e..4b353e0be121 100644
5998 --- a/kernel/jump_label.c
5999 +++ b/kernel/jump_label.c
6000 @@ -58,13 +58,36 @@ static void jump_label_update(struct static_key *key);
6001
6002 void static_key_slow_inc(struct static_key *key)
6003 {
6004 + int v, v1;
6005 +
6006 STATIC_KEY_CHECK_USE();
6007 - if (atomic_inc_not_zero(&key->enabled))
6008 - return;
6009 +
6010 + /*
6011 + * Careful if we get concurrent static_key_slow_inc() calls;
6012 + * later calls must wait for the first one to _finish_ the
6013 + * jump_label_update() process. At the same time, however,
6014 + * the jump_label_update() call below wants to see
6015 + * static_key_enabled(&key) for jumps to be updated properly.
6016 + *
6017 + * So give a special meaning to negative key->enabled: it sends
6018 + * static_key_slow_inc() down the slow path, and it is non-zero
6019 + * so it counts as "enabled" in jump_label_update(). Note that
6020 + * atomic_inc_unless_negative() checks >= 0, so roll our own.
6021 + */
6022 + for (v = atomic_read(&key->enabled); v > 0; v = v1) {
6023 + v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
6024 + if (likely(v1 == v))
6025 + return;
6026 + }
6027
6028 jump_label_lock();
6029 - if (atomic_inc_return(&key->enabled) == 1)
6030 + if (atomic_read(&key->enabled) == 0) {
6031 + atomic_set(&key->enabled, -1);
6032 jump_label_update(key);
6033 + atomic_set(&key->enabled, 1);
6034 + } else {
6035 + atomic_inc(&key->enabled);
6036 + }
6037 jump_label_unlock();
6038 }
6039 EXPORT_SYMBOL_GPL(static_key_slow_inc);
6040 @@ -72,6 +95,13 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc);
6041 static void __static_key_slow_dec(struct static_key *key,
6042 unsigned long rate_limit, struct delayed_work *work)
6043 {
6044 + /*
6045 + * The negative count check is valid even when a negative
6046 + * key->enabled is in use by static_key_slow_inc(); a
6047 + * __static_key_slow_dec() before the first static_key_slow_inc()
6048 + * returns is unbalanced, because all other static_key_slow_inc()
6049 + * instances block while the update is in progress.
6050 + */
6051 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
6052 WARN(atomic_read(&key->enabled) < 0,
6053 "jump label: negative count!\n");
6054 diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
6055 index e364b424b019..79d2d765a75f 100644
6056 --- a/kernel/locking/mutex.c
6057 +++ b/kernel/locking/mutex.c
6058 @@ -486,9 +486,6 @@ __ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx)
6059 if (!hold_ctx)
6060 return 0;
6061
6062 - if (unlikely(ctx == hold_ctx))
6063 - return -EALREADY;
6064 -
6065 if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
6066 (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
6067 #ifdef CONFIG_DEBUG_MUTEXES
6068 @@ -514,6 +511,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
6069 unsigned long flags;
6070 int ret;
6071
6072 + if (use_ww_ctx) {
6073 + struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
6074 + if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
6075 + return -EALREADY;
6076 + }
6077 +
6078 preempt_disable();
6079 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
6080
6081 diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
6082 index ce2f75e32ae1..5fc8c311b8fe 100644
6083 --- a/kernel/locking/qspinlock.c
6084 +++ b/kernel/locking/qspinlock.c
6085 @@ -267,6 +267,66 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
6086 #define queued_spin_lock_slowpath native_queued_spin_lock_slowpath
6087 #endif
6088
6089 +/*
6090 + * queued_spin_lock_slowpath() can (load-)ACQUIRE the lock before
6091 + * issuing an _unordered_ store to set _Q_LOCKED_VAL.
6092 + *
6093 + * This means that the store can be delayed, but no later than the
6094 + * store-release from the unlock. This means that simply observing
6095 + * _Q_LOCKED_VAL is not sufficient to determine if the lock is acquired.
6096 + *
6097 + * There are two paths that can issue the unordered store:
6098 + *
6099 + * (1) clear_pending_set_locked(): *,1,0 -> *,0,1
6100 + *
6101 + * (2) set_locked(): t,0,0 -> t,0,1 ; t != 0
6102 + * atomic_cmpxchg_relaxed(): t,0,0 -> 0,0,1
6103 + *
6104 + * However, in both cases we have other !0 state we've set before to queue
6105 + * ourseves:
6106 + *
6107 + * For (1) we have the atomic_cmpxchg_acquire() that set _Q_PENDING_VAL, our
6108 + * load is constrained by that ACQUIRE to not pass before that, and thus must
6109 + * observe the store.
6110 + *
6111 + * For (2) we have a more intersting scenario. We enqueue ourselves using
6112 + * xchg_tail(), which ends up being a RELEASE. This in itself is not
6113 + * sufficient, however that is followed by an smp_cond_acquire() on the same
6114 + * word, giving a RELEASE->ACQUIRE ordering. This again constrains our load and
6115 + * guarantees we must observe that store.
6116 + *
6117 + * Therefore both cases have other !0 state that is observable before the
6118 + * unordered locked byte store comes through. This means we can use that to
6119 + * wait for the lock store, and then wait for an unlock.
6120 + */
6121 +#ifndef queued_spin_unlock_wait
6122 +void queued_spin_unlock_wait(struct qspinlock *lock)
6123 +{
6124 + u32 val;
6125 +
6126 + for (;;) {
6127 + val = atomic_read(&lock->val);
6128 +
6129 + if (!val) /* not locked, we're done */
6130 + goto done;
6131 +
6132 + if (val & _Q_LOCKED_MASK) /* locked, go wait for unlock */
6133 + break;
6134 +
6135 + /* not locked, but pending, wait until we observe the lock */
6136 + cpu_relax();
6137 + }
6138 +
6139 + /* any unlock is good */
6140 + while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
6141 + cpu_relax();
6142 +
6143 +done:
6144 + smp_rmb(); /* CTRL + RMB -> ACQUIRE */
6145 +}
6146 +EXPORT_SYMBOL(queued_spin_unlock_wait);
6147 +#endif
6148 +
6149 #endif /* _GEN_PV_LOCK_SLOWPATH */
6150
6151 /**
6152 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
6153 index e7dd0ec169be..eeaf920f46b9 100644
6154 --- a/kernel/sched/fair.c
6155 +++ b/kernel/sched/fair.c
6156 @@ -2821,6 +2821,23 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {}
6157
6158 static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq);
6159
6160 +/*
6161 + * Unsigned subtract and clamp on underflow.
6162 + *
6163 + * Explicitly do a load-store to ensure the intermediate value never hits
6164 + * memory. This allows lockless observations without ever seeing the negative
6165 + * values.
6166 + */
6167 +#define sub_positive(_ptr, _val) do { \
6168 + typeof(_ptr) ptr = (_ptr); \
6169 + typeof(*ptr) val = (_val); \
6170 + typeof(*ptr) res, var = READ_ONCE(*ptr); \
6171 + res = var - val; \
6172 + if (res > var) \
6173 + res = 0; \
6174 + WRITE_ONCE(*ptr, res); \
6175 +} while (0)
6176 +
6177 /* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
6178 static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
6179 {
6180 @@ -2829,15 +2846,15 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
6181
6182 if (atomic_long_read(&cfs_rq->removed_load_avg)) {
6183 s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
6184 - sa->load_avg = max_t(long, sa->load_avg - r, 0);
6185 - sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
6186 + sub_positive(&sa->load_avg, r);
6187 + sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
6188 removed = 1;
6189 }
6190
6191 if (atomic_long_read(&cfs_rq->removed_util_avg)) {
6192 long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
6193 - sa->util_avg = max_t(long, sa->util_avg - r, 0);
6194 - sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
6195 + sub_positive(&sa->util_avg, r);
6196 + sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
6197 }
6198
6199 decayed = __update_load_avg(now, cpu_of(rq_of(cfs_rq)), sa,
6200 @@ -2927,10 +2944,10 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
6201 &se->avg, se->on_rq * scale_load_down(se->load.weight),
6202 cfs_rq->curr == se, NULL);
6203
6204 - cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
6205 - cfs_rq->avg.load_sum = max_t(s64, cfs_rq->avg.load_sum - se->avg.load_sum, 0);
6206 - cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
6207 - cfs_rq->avg.util_sum = max_t(s32, cfs_rq->avg.util_sum - se->avg.util_sum, 0);
6208 + sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
6209 + sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
6210 + sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
6211 + sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
6212 }
6213
6214 /* Add the load generated by se into cfs_rq's load average */
6215 diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
6216 index bd12c6c714ec..c5aeedf4e93a 100644
6217 --- a/kernel/sched/idle.c
6218 +++ b/kernel/sched/idle.c
6219 @@ -127,7 +127,7 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
6220 */
6221 static void cpuidle_idle_call(void)
6222 {
6223 - struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
6224 + struct cpuidle_device *dev = cpuidle_get_device();
6225 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
6226 int next_state, entered_state;
6227
6228 diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
6229 index f96f0383f6c6..ad1d6164e946 100644
6230 --- a/kernel/trace/trace_printk.c
6231 +++ b/kernel/trace/trace_printk.c
6232 @@ -36,6 +36,10 @@ struct trace_bprintk_fmt {
6233 static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
6234 {
6235 struct trace_bprintk_fmt *pos;
6236 +
6237 + if (!fmt)
6238 + return ERR_PTR(-EINVAL);
6239 +
6240 list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
6241 if (!strcmp(pos->fmt, fmt))
6242 return pos;
6243 @@ -57,7 +61,8 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
6244 for (iter = start; iter < end; iter++) {
6245 struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
6246 if (tb_fmt) {
6247 - *iter = tb_fmt->fmt;
6248 + if (!IS_ERR(tb_fmt))
6249 + *iter = tb_fmt->fmt;
6250 continue;
6251 }
6252
6253 diff --git a/mm/migrate.c b/mm/migrate.c
6254 index f9dfb18a4eba..bdf3410bb4fa 100644
6255 --- a/mm/migrate.c
6256 +++ b/mm/migrate.c
6257 @@ -431,6 +431,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
6258
6259 return MIGRATEPAGE_SUCCESS;
6260 }
6261 +EXPORT_SYMBOL(migrate_page_move_mapping);
6262
6263 /*
6264 * The expected number of remaining references is the same as that
6265 @@ -586,6 +587,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
6266
6267 mem_cgroup_migrate(page, newpage);
6268 }
6269 +EXPORT_SYMBOL(migrate_page_copy);
6270
6271 /************************************************************
6272 * Migration functions
6273 diff --git a/mm/page-writeback.c b/mm/page-writeback.c
6274 index bc5149d5ec38..e389f0a998f1 100644
6275 --- a/mm/page-writeback.c
6276 +++ b/mm/page-writeback.c
6277 @@ -369,8 +369,9 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
6278 struct dirty_throttle_control *gdtc = mdtc_gdtc(dtc);
6279 unsigned long bytes = vm_dirty_bytes;
6280 unsigned long bg_bytes = dirty_background_bytes;
6281 - unsigned long ratio = vm_dirty_ratio;
6282 - unsigned long bg_ratio = dirty_background_ratio;
6283 + /* convert ratios to per-PAGE_SIZE for higher precision */
6284 + unsigned long ratio = (vm_dirty_ratio * PAGE_SIZE) / 100;
6285 + unsigned long bg_ratio = (dirty_background_ratio * PAGE_SIZE) / 100;
6286 unsigned long thresh;
6287 unsigned long bg_thresh;
6288 struct task_struct *tsk;
6289 @@ -382,26 +383,28 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc)
6290 /*
6291 * The byte settings can't be applied directly to memcg
6292 * domains. Convert them to ratios by scaling against
6293 - * globally available memory.
6294 + * globally available memory. As the ratios are in
6295 + * per-PAGE_SIZE, they can be obtained by dividing bytes by
6296 + * number of pages.
6297 */
6298 if (bytes)
6299 - ratio = min(DIV_ROUND_UP(bytes, PAGE_SIZE) * 100 /
6300 - global_avail, 100UL);
6301 + ratio = min(DIV_ROUND_UP(bytes, global_avail),
6302 + PAGE_SIZE);
6303 if (bg_bytes)
6304 - bg_ratio = min(DIV_ROUND_UP(bg_bytes, PAGE_SIZE) * 100 /
6305 - global_avail, 100UL);
6306 + bg_ratio = min(DIV_ROUND_UP(bg_bytes, global_avail),
6307 + PAGE_SIZE);
6308 bytes = bg_bytes = 0;
6309 }
6310
6311 if (bytes)
6312 thresh = DIV_ROUND_UP(bytes, PAGE_SIZE);
6313 else
6314 - thresh = (ratio * available_memory) / 100;
6315 + thresh = (ratio * available_memory) / PAGE_SIZE;
6316
6317 if (bg_bytes)
6318 bg_thresh = DIV_ROUND_UP(bg_bytes, PAGE_SIZE);
6319 else
6320 - bg_thresh = (bg_ratio * available_memory) / 100;
6321 + bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
6322
6323 if (bg_thresh >= thresh)
6324 bg_thresh = thresh / 2;
6325 diff --git a/mm/percpu.c b/mm/percpu.c
6326 index 0c59684f1ff2..9903830aaebb 100644
6327 --- a/mm/percpu.c
6328 +++ b/mm/percpu.c
6329 @@ -112,7 +112,7 @@ struct pcpu_chunk {
6330 int map_used; /* # of map entries used before the sentry */
6331 int map_alloc; /* # of map entries allocated */
6332 int *map; /* allocation map */
6333 - struct work_struct map_extend_work;/* async ->map[] extension */
6334 + struct list_head map_extend_list;/* on pcpu_map_extend_chunks */
6335
6336 void *data; /* chunk data */
6337 int first_free; /* no free below this */
6338 @@ -162,10 +162,13 @@ static struct pcpu_chunk *pcpu_reserved_chunk;
6339 static int pcpu_reserved_chunk_limit;
6340
6341 static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
6342 -static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */
6343 +static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
6344
6345 static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
6346
6347 +/* chunks which need their map areas extended, protected by pcpu_lock */
6348 +static LIST_HEAD(pcpu_map_extend_chunks);
6349 +
6350 /*
6351 * The number of empty populated pages, protected by pcpu_lock. The
6352 * reserved chunk doesn't contribute to the count.
6353 @@ -395,13 +398,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
6354 {
6355 int margin, new_alloc;
6356
6357 + lockdep_assert_held(&pcpu_lock);
6358 +
6359 if (is_atomic) {
6360 margin = 3;
6361
6362 if (chunk->map_alloc <
6363 - chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW &&
6364 - pcpu_async_enabled)
6365 - schedule_work(&chunk->map_extend_work);
6366 + chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
6367 + if (list_empty(&chunk->map_extend_list)) {
6368 + list_add_tail(&chunk->map_extend_list,
6369 + &pcpu_map_extend_chunks);
6370 + pcpu_schedule_balance_work();
6371 + }
6372 + }
6373 } else {
6374 margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
6375 }
6376 @@ -435,6 +444,8 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
6377 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
6378 unsigned long flags;
6379
6380 + lockdep_assert_held(&pcpu_alloc_mutex);
6381 +
6382 new = pcpu_mem_zalloc(new_size);
6383 if (!new)
6384 return -ENOMEM;
6385 @@ -467,20 +478,6 @@ out_unlock:
6386 return 0;
6387 }
6388
6389 -static void pcpu_map_extend_workfn(struct work_struct *work)
6390 -{
6391 - struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
6392 - map_extend_work);
6393 - int new_alloc;
6394 -
6395 - spin_lock_irq(&pcpu_lock);
6396 - new_alloc = pcpu_need_to_extend(chunk, false);
6397 - spin_unlock_irq(&pcpu_lock);
6398 -
6399 - if (new_alloc)
6400 - pcpu_extend_area_map(chunk, new_alloc);
6401 -}
6402 -
6403 /**
6404 * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
6405 * @chunk: chunk the candidate area belongs to
6406 @@ -740,7 +737,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
6407 chunk->map_used = 1;
6408
6409 INIT_LIST_HEAD(&chunk->list);
6410 - INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn);
6411 + INIT_LIST_HEAD(&chunk->map_extend_list);
6412 chunk->free_size = pcpu_unit_size;
6413 chunk->contig_hint = pcpu_unit_size;
6414
6415 @@ -895,6 +892,9 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
6416 return NULL;
6417 }
6418
6419 + if (!is_atomic)
6420 + mutex_lock(&pcpu_alloc_mutex);
6421 +
6422 spin_lock_irqsave(&pcpu_lock, flags);
6423
6424 /* serve reserved allocations from the reserved chunk if available */
6425 @@ -967,12 +967,9 @@ restart:
6426 if (is_atomic)
6427 goto fail;
6428
6429 - mutex_lock(&pcpu_alloc_mutex);
6430 -
6431 if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
6432 chunk = pcpu_create_chunk();
6433 if (!chunk) {
6434 - mutex_unlock(&pcpu_alloc_mutex);
6435 err = "failed to allocate new chunk";
6436 goto fail;
6437 }
6438 @@ -983,7 +980,6 @@ restart:
6439 spin_lock_irqsave(&pcpu_lock, flags);
6440 }
6441
6442 - mutex_unlock(&pcpu_alloc_mutex);
6443 goto restart;
6444
6445 area_found:
6446 @@ -993,8 +989,6 @@ area_found:
6447 if (!is_atomic) {
6448 int page_start, page_end, rs, re;
6449
6450 - mutex_lock(&pcpu_alloc_mutex);
6451 -
6452 page_start = PFN_DOWN(off);
6453 page_end = PFN_UP(off + size);
6454
6455 @@ -1005,7 +999,6 @@ area_found:
6456
6457 spin_lock_irqsave(&pcpu_lock, flags);
6458 if (ret) {
6459 - mutex_unlock(&pcpu_alloc_mutex);
6460 pcpu_free_area(chunk, off, &occ_pages);
6461 err = "failed to populate";
6462 goto fail_unlock;
6463 @@ -1045,6 +1038,8 @@ fail:
6464 /* see the flag handling in pcpu_blance_workfn() */
6465 pcpu_atomic_alloc_failed = true;
6466 pcpu_schedule_balance_work();
6467 + } else {
6468 + mutex_unlock(&pcpu_alloc_mutex);
6469 }
6470 return NULL;
6471 }
6472 @@ -1129,6 +1124,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
6473 if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
6474 continue;
6475
6476 + list_del_init(&chunk->map_extend_list);
6477 list_move(&chunk->list, &to_free);
6478 }
6479
6480 @@ -1146,6 +1142,25 @@ static void pcpu_balance_workfn(struct work_struct *work)
6481 pcpu_destroy_chunk(chunk);
6482 }
6483
6484 + /* service chunks which requested async area map extension */
6485 + do {
6486 + int new_alloc = 0;
6487 +
6488 + spin_lock_irq(&pcpu_lock);
6489 +
6490 + chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
6491 + struct pcpu_chunk, map_extend_list);
6492 + if (chunk) {
6493 + list_del_init(&chunk->map_extend_list);
6494 + new_alloc = pcpu_need_to_extend(chunk, false);
6495 + }
6496 +
6497 + spin_unlock_irq(&pcpu_lock);
6498 +
6499 + if (new_alloc)
6500 + pcpu_extend_area_map(chunk, new_alloc);
6501 + } while (chunk);
6502 +
6503 /*
6504 * Ensure there are certain number of free populated pages for
6505 * atomic allocs. Fill up from the most packed so that atomic
6506 @@ -1644,7 +1659,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
6507 */
6508 schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
6509 INIT_LIST_HEAD(&schunk->list);
6510 - INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn);
6511 + INIT_LIST_HEAD(&schunk->map_extend_list);
6512 schunk->base_addr = base_addr;
6513 schunk->map = smap;
6514 schunk->map_alloc = ARRAY_SIZE(smap);
6515 @@ -1673,7 +1688,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
6516 if (dyn_size) {
6517 dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
6518 INIT_LIST_HEAD(&dchunk->list);
6519 - INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn);
6520 + INIT_LIST_HEAD(&dchunk->map_extend_list);
6521 dchunk->base_addr = base_addr;
6522 dchunk->map = dmap;
6523 dchunk->map_alloc = ARRAY_SIZE(dmap);
6524 diff --git a/mm/shmem.c b/mm/shmem.c
6525 index 719bd6b88d98..9ca09f52fef5 100644
6526 --- a/mm/shmem.c
6527 +++ b/mm/shmem.c
6528 @@ -2236,9 +2236,11 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
6529 NULL);
6530 if (error) {
6531 /* Remove the !PageUptodate pages we added */
6532 - shmem_undo_range(inode,
6533 - (loff_t)start << PAGE_SHIFT,
6534 - (loff_t)index << PAGE_SHIFT, true);
6535 + if (index > start) {
6536 + shmem_undo_range(inode,
6537 + (loff_t)start << PAGE_SHIFT,
6538 + ((loff_t)index << PAGE_SHIFT) - 1, true);
6539 + }
6540 goto undone;
6541 }
6542
6543 diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
6544 index a669dea146c6..61ad43f61c5e 100644
6545 --- a/net/core/flow_dissector.c
6546 +++ b/net/core/flow_dissector.c
6547 @@ -651,6 +651,23 @@ void make_flow_keys_digest(struct flow_keys_digest *digest,
6548 }
6549 EXPORT_SYMBOL(make_flow_keys_digest);
6550
6551 +static struct flow_dissector flow_keys_dissector_symmetric __read_mostly;
6552 +
6553 +u32 __skb_get_hash_symmetric(struct sk_buff *skb)
6554 +{
6555 + struct flow_keys keys;
6556 +
6557 + __flow_hash_secret_init();
6558 +
6559 + memset(&keys, 0, sizeof(keys));
6560 + __skb_flow_dissect(skb, &flow_keys_dissector_symmetric, &keys,
6561 + NULL, 0, 0, 0,
6562 + FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
6563 +
6564 + return __flow_hash_from_keys(&keys, hashrnd);
6565 +}
6566 +EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric);
6567 +
6568 /**
6569 * __skb_get_hash: calculate a flow hash
6570 * @skb: sk_buff to calculate flow hash from
6571 @@ -868,6 +885,29 @@ static const struct flow_dissector_key flow_keys_dissector_keys[] = {
6572 },
6573 };
6574
6575 +static const struct flow_dissector_key flow_keys_dissector_symmetric_keys[] = {
6576 + {
6577 + .key_id = FLOW_DISSECTOR_KEY_CONTROL,
6578 + .offset = offsetof(struct flow_keys, control),
6579 + },
6580 + {
6581 + .key_id = FLOW_DISSECTOR_KEY_BASIC,
6582 + .offset = offsetof(struct flow_keys, basic),
6583 + },
6584 + {
6585 + .key_id = FLOW_DISSECTOR_KEY_IPV4_ADDRS,
6586 + .offset = offsetof(struct flow_keys, addrs.v4addrs),
6587 + },
6588 + {
6589 + .key_id = FLOW_DISSECTOR_KEY_IPV6_ADDRS,
6590 + .offset = offsetof(struct flow_keys, addrs.v6addrs),
6591 + },
6592 + {
6593 + .key_id = FLOW_DISSECTOR_KEY_PORTS,
6594 + .offset = offsetof(struct flow_keys, ports),
6595 + },
6596 +};
6597 +
6598 static const struct flow_dissector_key flow_keys_buf_dissector_keys[] = {
6599 {
6600 .key_id = FLOW_DISSECTOR_KEY_CONTROL,
6601 @@ -889,6 +929,9 @@ static int __init init_default_flow_dissectors(void)
6602 skb_flow_dissector_init(&flow_keys_dissector,
6603 flow_keys_dissector_keys,
6604 ARRAY_SIZE(flow_keys_dissector_keys));
6605 + skb_flow_dissector_init(&flow_keys_dissector_symmetric,
6606 + flow_keys_dissector_symmetric_keys,
6607 + ARRAY_SIZE(flow_keys_dissector_symmetric_keys));
6608 skb_flow_dissector_init(&flow_keys_buf_dissector,
6609 flow_keys_buf_dissector_keys,
6610 ARRAY_SIZE(flow_keys_buf_dissector_keys));
6611 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
6612 index e561f9f07d6d..59bf4d77154f 100644
6613 --- a/net/core/skbuff.c
6614 +++ b/net/core/skbuff.c
6615 @@ -3016,24 +3016,6 @@ int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
6616 EXPORT_SYMBOL_GPL(skb_append_pagefrags);
6617
6618 /**
6619 - * skb_push_rcsum - push skb and update receive checksum
6620 - * @skb: buffer to update
6621 - * @len: length of data pulled
6622 - *
6623 - * This function performs an skb_push on the packet and updates
6624 - * the CHECKSUM_COMPLETE checksum. It should be used on
6625 - * receive path processing instead of skb_push unless you know
6626 - * that the checksum difference is zero (e.g., a valid IP header)
6627 - * or you are setting ip_summed to CHECKSUM_NONE.
6628 - */
6629 -static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len)
6630 -{
6631 - skb_push(skb, len);
6632 - skb_postpush_rcsum(skb, skb->data, len);
6633 - return skb->data;
6634 -}
6635 -
6636 -/**
6637 * skb_pull_rcsum - pull skb and update receive checksum
6638 * @skb: buffer to update
6639 * @len: length of data pulled
6640 diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
6641 index ea071fad67a0..c26fac26b23c 100644
6642 --- a/net/ipv6/ip6_fib.c
6643 +++ b/net/ipv6/ip6_fib.c
6644 @@ -177,6 +177,7 @@ static void rt6_free_pcpu(struct rt6_info *non_pcpu_rt)
6645 }
6646 }
6647
6648 + free_percpu(non_pcpu_rt->rt6i_pcpu);
6649 non_pcpu_rt->rt6i_pcpu = NULL;
6650 }
6651
6652 diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
6653 index d32cefcb63b0..34a5712d467f 100644
6654 --- a/net/mac80211/mesh.c
6655 +++ b/net/mac80211/mesh.c
6656 @@ -150,19 +150,26 @@ u32 mesh_accept_plinks_update(struct ieee80211_sub_if_data *sdata)
6657 void mesh_sta_cleanup(struct sta_info *sta)
6658 {
6659 struct ieee80211_sub_if_data *sdata = sta->sdata;
6660 - u32 changed;
6661 + u32 changed = 0;
6662
6663 /*
6664 * maybe userspace handles peer allocation and peering, but in either
6665 * case the beacon is still generated by the kernel and we might need
6666 * an update.
6667 */
6668 - changed = mesh_accept_plinks_update(sdata);
6669 + if (sdata->u.mesh.user_mpm &&
6670 + sta->mesh->plink_state == NL80211_PLINK_ESTAB)
6671 + changed |= mesh_plink_dec_estab_count(sdata);
6672 + changed |= mesh_accept_plinks_update(sdata);
6673 if (!sdata->u.mesh.user_mpm) {
6674 changed |= mesh_plink_deactivate(sta);
6675 del_timer_sync(&sta->mesh->plink_timer);
6676 }
6677
6678 + /* make sure no readers can access nexthop sta from here on */
6679 + mesh_path_flush_by_nexthop(sta);
6680 + synchronize_net();
6681 +
6682 if (changed)
6683 ieee80211_mbss_info_change_notify(sdata, changed);
6684 }
6685 diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
6686 index 62193f4bc37b..ba7ce53ec615 100644
6687 --- a/net/mac80211/sta_info.h
6688 +++ b/net/mac80211/sta_info.h
6689 @@ -275,7 +275,7 @@ struct ieee80211_fast_tx {
6690 u8 sa_offs, da_offs, pn_offs;
6691 u8 band;
6692 u8 hdr[30 + 2 + IEEE80211_FAST_XMIT_MAX_IV +
6693 - sizeof(rfc1042_header)];
6694 + sizeof(rfc1042_header)] __aligned(2);
6695
6696 struct rcu_head rcu_head;
6697 };
6698 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
6699 index 18d0becbc46d..8012f67ca5ae 100644
6700 --- a/net/packet/af_packet.c
6701 +++ b/net/packet/af_packet.c
6702 @@ -1340,7 +1340,7 @@ static unsigned int fanout_demux_hash(struct packet_fanout *f,
6703 struct sk_buff *skb,
6704 unsigned int num)
6705 {
6706 - return reciprocal_scale(skb_get_hash(skb), num);
6707 + return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
6708 }
6709
6710 static unsigned int fanout_demux_lb(struct packet_fanout *f,
6711 diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
6712 index 8f3948dd38b8..934336e12a65 100644
6713 --- a/net/sched/act_mirred.c
6714 +++ b/net/sched/act_mirred.c
6715 @@ -180,7 +180,7 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
6716
6717 if (!(at & AT_EGRESS)) {
6718 if (m->tcfm_ok_push)
6719 - skb_push(skb2, skb->mac_len);
6720 + skb_push_rcsum(skb2, skb->mac_len);
6721 }
6722
6723 /* mirror is always swallowed */
6724 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
6725 index 7e0c9bf22df8..837dd910a252 100644
6726 --- a/net/sunrpc/clnt.c
6727 +++ b/net/sunrpc/clnt.c
6728 @@ -446,16 +446,27 @@ out_no_rpciod:
6729 return ERR_PTR(err);
6730 }
6731
6732 -struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
6733 +static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
6734 struct rpc_xprt *xprt)
6735 {
6736 struct rpc_clnt *clnt = NULL;
6737 struct rpc_xprt_switch *xps;
6738
6739 - xps = xprt_switch_alloc(xprt, GFP_KERNEL);
6740 - if (xps == NULL)
6741 - return ERR_PTR(-ENOMEM);
6742 -
6743 + if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) {
6744 + WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
6745 + xps = args->bc_xprt->xpt_bc_xps;
6746 + xprt_switch_get(xps);
6747 + } else {
6748 + xps = xprt_switch_alloc(xprt, GFP_KERNEL);
6749 + if (xps == NULL) {
6750 + xprt_put(xprt);
6751 + return ERR_PTR(-ENOMEM);
6752 + }
6753 + if (xprt->bc_xprt) {
6754 + xprt_switch_get(xps);
6755 + xprt->bc_xprt->xpt_bc_xps = xps;
6756 + }
6757 + }
6758 clnt = rpc_new_client(args, xps, xprt, NULL);
6759 if (IS_ERR(clnt))
6760 return clnt;
6761 @@ -483,7 +494,6 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
6762
6763 return clnt;
6764 }
6765 -EXPORT_SYMBOL_GPL(rpc_create_xprt);
6766
6767 /**
6768 * rpc_create - create an RPC client and transport with one call
6769 @@ -509,6 +519,15 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
6770 };
6771 char servername[48];
6772
6773 + if (args->bc_xprt) {
6774 + WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
6775 + xprt = args->bc_xprt->xpt_bc_xprt;
6776 + if (xprt) {
6777 + xprt_get(xprt);
6778 + return rpc_create_xprt(args, xprt);
6779 + }
6780 + }
6781 +
6782 if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
6783 xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
6784 if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
6785 diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
6786 index 7422f28818b2..7231cb413a2c 100644
6787 --- a/net/sunrpc/svc_xprt.c
6788 +++ b/net/sunrpc/svc_xprt.c
6789 @@ -136,6 +136,8 @@ static void svc_xprt_free(struct kref *kref)
6790 /* See comment on corresponding get in xs_setup_bc_tcp(): */
6791 if (xprt->xpt_bc_xprt)
6792 xprt_put(xprt->xpt_bc_xprt);
6793 + if (xprt->xpt_bc_xps)
6794 + xprt_switch_put(xprt->xpt_bc_xps);
6795 xprt->xpt_ops->xpo_free(xprt);
6796 module_put(owner);
6797 }
6798 diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
6799 index 65e759569e48..e9e5dd0dc8f4 100644
6800 --- a/net/sunrpc/xprtsock.c
6801 +++ b/net/sunrpc/xprtsock.c
6802 @@ -3050,6 +3050,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
6803 return xprt;
6804
6805 args->bc_xprt->xpt_bc_xprt = NULL;
6806 + args->bc_xprt->xpt_bc_xps = NULL;
6807 xprt_put(xprt);
6808 ret = ERR_PTR(-EINVAL);
6809 out_err:
6810 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
6811 index 8269da73e9e5..7748199b3568 100644
6812 --- a/net/unix/af_unix.c
6813 +++ b/net/unix/af_unix.c
6814 @@ -315,7 +315,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
6815 &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
6816 struct dentry *dentry = unix_sk(s)->path.dentry;
6817
6818 - if (dentry && d_backing_inode(dentry) == i) {
6819 + if (dentry && d_real_inode(dentry) == i) {
6820 sock_hold(s);
6821 goto found;
6822 }
6823 @@ -911,7 +911,7 @@ static struct sock *unix_find_other(struct net *net,
6824 err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
6825 if (err)
6826 goto fail;
6827 - inode = d_backing_inode(path.dentry);
6828 + inode = d_real_inode(path.dentry);
6829 err = inode_permission(inode, MAY_WRITE);
6830 if (err)
6831 goto put_fail;
6832 @@ -1048,7 +1048,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
6833 goto out_up;
6834 }
6835 addr->hash = UNIX_HASH_SIZE;
6836 - hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
6837 + hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
6838 spin_lock(&unix_table_lock);
6839 u->path = u_path;
6840 list = &unix_socket_table[hash];
6841 diff --git a/net/wireless/core.c b/net/wireless/core.c
6842 index 9f1c4aa851ef..c878045d146a 100644
6843 --- a/net/wireless/core.c
6844 +++ b/net/wireless/core.c
6845 @@ -360,8 +360,6 @@ struct wiphy *wiphy_new_nm(const struct cfg80211_ops *ops, int sizeof_priv,
6846 WARN_ON(ops->remain_on_channel && !ops->cancel_remain_on_channel);
6847 WARN_ON(ops->tdls_channel_switch && !ops->tdls_cancel_channel_switch);
6848 WARN_ON(ops->add_tx_ts && !ops->del_tx_ts);
6849 - WARN_ON(ops->set_tx_power && !ops->get_tx_power);
6850 - WARN_ON(ops->set_antenna && !ops->get_antenna);
6851
6852 alloc_size = sizeof(*rdev) + sizeof_priv;
6853
6854 diff --git a/net/wireless/util.c b/net/wireless/util.c
6855 index 9f440a9de63b..47b917841623 100644
6856 --- a/net/wireless/util.c
6857 +++ b/net/wireless/util.c
6858 @@ -509,7 +509,7 @@ static int __ieee80211_data_to_8023(struct sk_buff *skb, struct ethhdr *ehdr,
6859 * replace EtherType */
6860 hdrlen += ETH_ALEN + 2;
6861 else
6862 - tmp.h_proto = htons(skb->len);
6863 + tmp.h_proto = htons(skb->len - hdrlen);
6864
6865 pskb_pull(skb, hdrlen);
6866
6867 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
6868 index a9155077feef..fec75786f75b 100644
6869 --- a/scripts/mod/file2alias.c
6870 +++ b/scripts/mod/file2alias.c
6871 @@ -384,7 +384,7 @@ static void do_of_entry_multi(void *symval, struct module *mod)
6872 len = sprintf(alias, "of:N%sT%s", (*name)[0] ? *name : "*",
6873 (*type)[0] ? *type : "*");
6874
6875 - if (compatible[0])
6876 + if ((*compatible)[0])
6877 sprintf(&alias[len], "%sC%s", (*type)[0] ? "*" : "",
6878 *compatible);
6879
6880 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
6881 index dec607c17b64..5ee820111027 100644
6882 --- a/security/apparmor/lsm.c
6883 +++ b/security/apparmor/lsm.c
6884 @@ -523,34 +523,34 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
6885 {
6886 struct common_audit_data sa;
6887 struct apparmor_audit_data aad = {0,};
6888 - char *command, *args = value;
6889 + char *command, *largs = NULL, *args = value;
6890 size_t arg_size;
6891 int error;
6892
6893 if (size == 0)
6894 return -EINVAL;
6895 - /* args points to a PAGE_SIZE buffer, AppArmor requires that
6896 - * the buffer must be null terminated or have size <= PAGE_SIZE -1
6897 - * so that AppArmor can null terminate them
6898 - */
6899 - if (args[size - 1] != '\0') {
6900 - if (size == PAGE_SIZE)
6901 - return -EINVAL;
6902 - args[size] = '\0';
6903 - }
6904 -
6905 /* task can only write its own attributes */
6906 if (current != task)
6907 return -EACCES;
6908
6909 - args = value;
6910 + /* AppArmor requires that the buffer must be null terminated atm */
6911 + if (args[size - 1] != '\0') {
6912 + /* null terminate */
6913 + largs = args = kmalloc(size + 1, GFP_KERNEL);
6914 + if (!args)
6915 + return -ENOMEM;
6916 + memcpy(args, value, size);
6917 + args[size] = '\0';
6918 + }
6919 +
6920 + error = -EINVAL;
6921 args = strim(args);
6922 command = strsep(&args, " ");
6923 if (!args)
6924 - return -EINVAL;
6925 + goto out;
6926 args = skip_spaces(args);
6927 if (!*args)
6928 - return -EINVAL;
6929 + goto out;
6930
6931 arg_size = size - (args - (char *) value);
6932 if (strcmp(name, "current") == 0) {
6933 @@ -576,10 +576,12 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
6934 goto fail;
6935 } else
6936 /* only support the "current" and "exec" process attributes */
6937 - return -EINVAL;
6938 + goto fail;
6939
6940 if (!error)
6941 error = size;
6942 +out:
6943 + kfree(largs);
6944 return error;
6945
6946 fail:
6947 @@ -588,9 +590,9 @@ fail:
6948 aad.profile = aa_current_profile();
6949 aad.op = OP_SETPROCATTR;
6950 aad.info = name;
6951 - aad.error = -EINVAL;
6952 + aad.error = error = -EINVAL;
6953 aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL);
6954 - return -EINVAL;
6955 + goto out;
6956 }
6957
6958 static int apparmor_task_setrlimit(struct task_struct *task,
6959 diff --git a/security/keys/key.c b/security/keys/key.c
6960 index b28755131687..af7f6821d26b 100644
6961 --- a/security/keys/key.c
6962 +++ b/security/keys/key.c
6963 @@ -584,7 +584,7 @@ int key_reject_and_link(struct key *key,
6964
6965 mutex_unlock(&key_construction_mutex);
6966
6967 - if (keyring)
6968 + if (keyring && link_ret == 0)
6969 __key_link_end(keyring, &key->index_key, edit);
6970
6971 /* wake up anyone waiting for a key to be constructed */
6972 diff --git a/sound/core/control.c b/sound/core/control.c
6973 index a85d45595d02..b4fe9b002512 100644
6974 --- a/sound/core/control.c
6975 +++ b/sound/core/control.c
6976 @@ -160,6 +160,8 @@ void snd_ctl_notify(struct snd_card *card, unsigned int mask,
6977
6978 if (snd_BUG_ON(!card || !id))
6979 return;
6980 + if (card->shutdown)
6981 + return;
6982 read_lock(&card->ctl_files_rwlock);
6983 #if IS_ENABLED(CONFIG_SND_MIXER_OSS)
6984 card->mixer_oss_change_count++;
6985 diff --git a/sound/core/pcm.c b/sound/core/pcm.c
6986 index 308c9ecf73db..8e980aa678d0 100644
6987 --- a/sound/core/pcm.c
6988 +++ b/sound/core/pcm.c
6989 @@ -849,6 +849,14 @@ int snd_pcm_new_internal(struct snd_card *card, const char *id, int device,
6990 }
6991 EXPORT_SYMBOL(snd_pcm_new_internal);
6992
6993 +static void free_chmap(struct snd_pcm_str *pstr)
6994 +{
6995 + if (pstr->chmap_kctl) {
6996 + snd_ctl_remove(pstr->pcm->card, pstr->chmap_kctl);
6997 + pstr->chmap_kctl = NULL;
6998 + }
6999 +}
7000 +
7001 static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
7002 {
7003 struct snd_pcm_substream *substream, *substream_next;
7004 @@ -871,6 +879,7 @@ static void snd_pcm_free_stream(struct snd_pcm_str * pstr)
7005 kfree(setup);
7006 }
7007 #endif
7008 + free_chmap(pstr);
7009 if (pstr->substream_count)
7010 put_device(&pstr->dev);
7011 }
7012 @@ -1135,10 +1144,7 @@ static int snd_pcm_dev_disconnect(struct snd_device *device)
7013 for (cidx = 0; cidx < 2; cidx++) {
7014 if (!pcm->internal)
7015 snd_unregister_device(&pcm->streams[cidx].dev);
7016 - if (pcm->streams[cidx].chmap_kctl) {
7017 - snd_ctl_remove(pcm->card, pcm->streams[cidx].chmap_kctl);
7018 - pcm->streams[cidx].chmap_kctl = NULL;
7019 - }
7020 + free_chmap(&pcm->streams[cidx]);
7021 }
7022 mutex_unlock(&pcm->open_mutex);
7023 mutex_unlock(&register_mutex);
7024 diff --git a/sound/core/timer.c b/sound/core/timer.c
7025 index 6469bedda2f3..23b73f6ac040 100644
7026 --- a/sound/core/timer.c
7027 +++ b/sound/core/timer.c
7028 @@ -1954,6 +1954,7 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
7029
7030 qhead = tu->qhead++;
7031 tu->qhead %= tu->queue_size;
7032 + tu->qused--;
7033 spin_unlock_irq(&tu->qlock);
7034
7035 if (tu->tread) {
7036 @@ -1967,7 +1968,6 @@ static ssize_t snd_timer_user_read(struct file *file, char __user *buffer,
7037 }
7038
7039 spin_lock_irq(&tu->qlock);
7040 - tu->qused--;
7041 if (err < 0)
7042 goto _error;
7043 result += unit;
7044 diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
7045 index c0f8f613f1f1..172dacd925f5 100644
7046 --- a/sound/drivers/dummy.c
7047 +++ b/sound/drivers/dummy.c
7048 @@ -420,6 +420,7 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
7049
7050 static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
7051 {
7052 + hrtimer_cancel(&dpcm->timer);
7053 tasklet_kill(&dpcm->tasklet);
7054 }
7055
7056 diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c
7057 index 87041ddd29cb..47a358fab132 100644
7058 --- a/sound/hda/hdac_regmap.c
7059 +++ b/sound/hda/hdac_regmap.c
7060 @@ -444,7 +444,7 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
7061 err = reg_raw_write(codec, reg, val);
7062 if (err == -EAGAIN) {
7063 err = snd_hdac_power_up_pm(codec);
7064 - if (!err)
7065 + if (err >= 0)
7066 err = reg_raw_write(codec, reg, val);
7067 snd_hdac_power_down_pm(codec);
7068 }
7069 @@ -470,7 +470,7 @@ static int __snd_hdac_regmap_read_raw(struct hdac_device *codec,
7070 err = reg_raw_read(codec, reg, val, uncached);
7071 if (err == -EAGAIN) {
7072 err = snd_hdac_power_up_pm(codec);
7073 - if (!err)
7074 + if (err >= 0)
7075 err = reg_raw_read(codec, reg, val, uncached);
7076 snd_hdac_power_down_pm(codec);
7077 }
7078 diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
7079 index 4667c3232b7f..74177189063c 100644
7080 --- a/sound/pci/au88x0/au88x0_core.c
7081 +++ b/sound/pci/au88x0/au88x0_core.c
7082 @@ -1444,9 +1444,8 @@ static int vortex_wtdma_bufshift(vortex_t * vortex, int wtdma)
7083 int page, p, pp, delta, i;
7084
7085 page =
7086 - (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2)) &
7087 - WT_SUBBUF_MASK)
7088 - >> WT_SUBBUF_SHIFT;
7089 + (hwread(vortex->mmio, VORTEX_WTDMA_STAT + (wtdma << 2))
7090 + >> WT_SUBBUF_SHIFT) & WT_SUBBUF_MASK;
7091 if (dma->nr_periods >= 4)
7092 delta = (page - dma->period_real) & 3;
7093 else {
7094 diff --git a/sound/pci/echoaudio/echoaudio.c b/sound/pci/echoaudio/echoaudio.c
7095 index 1cb85aeb0cea..286f5e3686a3 100644
7096 --- a/sound/pci/echoaudio/echoaudio.c
7097 +++ b/sound/pci/echoaudio/echoaudio.c
7098 @@ -2200,11 +2200,11 @@ static int snd_echo_resume(struct device *dev)
7099 u32 pipe_alloc_mask;
7100 int err;
7101
7102 - commpage_bak = kmalloc(sizeof(struct echoaudio), GFP_KERNEL);
7103 + commpage_bak = kmalloc(sizeof(*commpage), GFP_KERNEL);
7104 if (commpage_bak == NULL)
7105 return -ENOMEM;
7106 commpage = chip->comm_page;
7107 - memcpy(commpage_bak, commpage, sizeof(struct comm_page));
7108 + memcpy(commpage_bak, commpage, sizeof(*commpage));
7109
7110 err = init_hw(chip, chip->pci->device, chip->pci->subsystem_device);
7111 if (err < 0) {
7112 diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
7113 index dfaf1a93fb8a..d77cc76aadab 100644
7114 --- a/sound/pci/hda/hda_generic.c
7115 +++ b/sound/pci/hda/hda_generic.c
7116 @@ -3977,6 +3977,8 @@ static hda_nid_t set_path_power(struct hda_codec *codec, hda_nid_t nid,
7117
7118 for (n = 0; n < spec->paths.used; n++) {
7119 path = snd_array_elem(&spec->paths, n);
7120 + if (!path->depth)
7121 + continue;
7122 if (path->path[0] == nid ||
7123 path->path[path->depth - 1] == nid) {
7124 bool pin_old = path->pin_enabled;
7125 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
7126 index 94089fc71884..6f8ea13323c1 100644
7127 --- a/sound/pci/hda/hda_intel.c
7128 +++ b/sound/pci/hda/hda_intel.c
7129 @@ -367,9 +367,10 @@ enum {
7130 #define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
7131 #define IS_KBL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa171)
7132 #define IS_KBL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d71)
7133 +#define IS_KBL_H(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa2f0)
7134 #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
7135 #define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci)) || \
7136 - IS_KBL(pci) || IS_KBL_LP(pci)
7137 + IS_KBL(pci) || IS_KBL_LP(pci) || IS_KBL_H(pci)
7138
7139 static char *driver_short_names[] = {
7140 [AZX_DRIVER_ICH] = "HDA Intel",
7141 @@ -1217,8 +1218,10 @@ static int azx_free(struct azx *chip)
7142 if (use_vga_switcheroo(hda)) {
7143 if (chip->disabled && hda->probe_continued)
7144 snd_hda_unlock_devices(&chip->bus);
7145 - if (hda->vga_switcheroo_registered)
7146 + if (hda->vga_switcheroo_registered) {
7147 vga_switcheroo_unregister_client(chip->pci);
7148 + vga_switcheroo_fini_domain_pm_ops(chip->card->dev);
7149 + }
7150 }
7151
7152 if (bus->chip_init) {
7153 @@ -2190,6 +2193,9 @@ static const struct pci_device_id azx_ids[] = {
7154 /* Kabylake-LP */
7155 { PCI_DEVICE(0x8086, 0x9d71),
7156 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
7157 + /* Kabylake-H */
7158 + { PCI_DEVICE(0x8086, 0xa2f0),
7159 + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
7160 /* Broxton-P(Apollolake) */
7161 { PCI_DEVICE(0x8086, 0x5a98),
7162 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_BROXTON },
7163 @@ -2263,6 +2269,8 @@ static const struct pci_device_id azx_ids[] = {
7164 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
7165 { PCI_DEVICE(0x1002, 0x157a),
7166 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
7167 + { PCI_DEVICE(0x1002, 0x15b3),
7168 + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
7169 { PCI_DEVICE(0x1002, 0x793b),
7170 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
7171 { PCI_DEVICE(0x1002, 0x7919),
7172 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
7173 index 0fe18ede3e85..abcb5a6a1cd9 100644
7174 --- a/sound/pci/hda/patch_realtek.c
7175 +++ b/sound/pci/hda/patch_realtek.c
7176 @@ -5650,6 +5650,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7177 SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
7178 SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
7179 SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
7180 + SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
7181 + SND_PCI_QUIRK(0x17aa, 0x5051, "Thinkpad L460", ALC292_FIXUP_TPT460),
7182 + SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
7183 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
7184 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
7185 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
7186 @@ -5735,7 +5738,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
7187 {}
7188 };
7189 #define ALC225_STANDARD_PINS \
7190 - {0x12, 0xb7a60130}, \
7191 {0x21, 0x04211020}
7192
7193 #define ALC256_STANDARD_PINS \
7194 @@ -5760,10 +5762,24 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
7195 static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
7196 SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
7197 ALC225_STANDARD_PINS,
7198 + {0x12, 0xb7a60130},
7199 {0x14, 0x901701a0}),
7200 SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
7201 ALC225_STANDARD_PINS,
7202 + {0x12, 0xb7a60130},
7203 {0x14, 0x901701b0}),
7204 + SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
7205 + ALC225_STANDARD_PINS,
7206 + {0x12, 0xb7a60150},
7207 + {0x14, 0x901701a0}),
7208 + SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
7209 + ALC225_STANDARD_PINS,
7210 + {0x12, 0xb7a60150},
7211 + {0x14, 0x901701b0}),
7212 + SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
7213 + ALC225_STANDARD_PINS,
7214 + {0x12, 0xb7a60130},
7215 + {0x1b, 0x90170110}),
7216 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
7217 {0x14, 0x90170110},
7218 {0x21, 0x02211020}),
7219 @@ -5832,6 +5848,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
7220 {0x14, 0x90170120},
7221 {0x21, 0x02211030}),
7222 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
7223 + {0x12, 0x90a60170},
7224 + {0x14, 0x90170120},
7225 + {0x21, 0x02211030}),
7226 + SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
7227 ALC256_STANDARD_PINS),
7228 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
7229 {0x12, 0x90a60130},
7230 diff --git a/sound/usb/card.c b/sound/usb/card.c
7231 index 3fc63583a537..2d493501b7f6 100644
7232 --- a/sound/usb/card.c
7233 +++ b/sound/usb/card.c
7234 @@ -552,7 +552,6 @@ static int usb_audio_probe(struct usb_interface *intf,
7235 goto __error;
7236 }
7237 chip = usb_chip[i];
7238 - dev_set_drvdata(&dev->dev, chip);
7239 atomic_inc(&chip->active); /* avoid autopm */
7240 break;
7241 }
7242 @@ -578,6 +577,7 @@ static int usb_audio_probe(struct usb_interface *intf,
7243 goto __error;
7244 }
7245 }
7246 + dev_set_drvdata(&dev->dev, chip);
7247
7248 /*
7249 * For devices with more than one control interface, we assume the
7250 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
7251 index 4fd482fb9260..7cb12249baa5 100644
7252 --- a/virt/kvm/kvm_main.c
7253 +++ b/virt/kvm/kvm_main.c
7254 @@ -2868,7 +2868,7 @@ static long kvm_vm_ioctl(struct file *filp,
7255 if (copy_from_user(&routing, argp, sizeof(routing)))
7256 goto out;
7257 r = -EINVAL;
7258 - if (routing.nr >= KVM_MAX_IRQ_ROUTES)
7259 + if (routing.nr > KVM_MAX_IRQ_ROUTES)
7260 goto out;
7261 if (routing.flags)
7262 goto out;