Magellan Linux

Contents of /trunk/kernel-alx/patches-3.18/0113-3.18.14-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2614 - (show annotations) (download)
Mon Jul 13 08:28:49 2015 UTC (8 years, 10 months ago) by niro
File size: 299270 byte(s)
-linux-3.18.14
1 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
2 index f4c71d4a9ba3..61f9273d0c46 100644
3 --- a/Documentation/kernel-parameters.txt
4 +++ b/Documentation/kernel-parameters.txt
5 @@ -3644,6 +3644,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
6 READ_CAPACITY_16 command);
7 f = NO_REPORT_OPCODES (don't use report opcodes
8 command, uas only);
9 + g = MAX_SECTORS_240 (don't transfer more than
10 + 240 sectors at a time, uas only);
11 h = CAPACITY_HEURISTICS (decrease the
12 reported device capacity by one
13 sector if the number is odd);
14 diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
15 index 7610eaa4d491..702bb2557db8 100644
16 --- a/Documentation/virtual/kvm/api.txt
17 +++ b/Documentation/virtual/kvm/api.txt
18 @@ -2455,7 +2455,8 @@ should be created before this ioctl is invoked.
19
20 Possible features:
21 - KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state.
22 - Depends on KVM_CAP_ARM_PSCI.
23 + Depends on KVM_CAP_ARM_PSCI. If not set, the CPU will be powered on
24 + and execute guest code when KVM_RUN is called.
25 - KVM_ARM_VCPU_EL1_32BIT: Starts the CPU in a 32bit mode.
26 Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
27 - KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU.
28 @@ -2951,6 +2952,15 @@ HVC instruction based PSCI call from the vcpu. The 'type' field describes
29 the system-level event type. The 'flags' field describes architecture
30 specific flags for the system-level event.
31
32 +Valid values for 'type' are:
33 + KVM_SYSTEM_EVENT_SHUTDOWN -- the guest has requested a shutdown of the
34 + VM. Userspace is not obliged to honour this, and if it does honour
35 + this does not need to destroy the VM synchronously (ie it may call
36 + KVM_RUN again before shutdown finally occurs).
37 + KVM_SYSTEM_EVENT_RESET -- the guest has requested a reset of the VM.
38 + As with SHUTDOWN, userspace can choose to ignore the request, or
39 + to schedule the reset to occur in the future and may call KVM_RUN again.
40 +
41 /* Fix the size of the union. */
42 char padding[256];
43 };
44 diff --git a/Documentation/virtual/kvm/devices/s390_flic.txt b/Documentation/virtual/kvm/devices/s390_flic.txt
45 index 4ceef53164b0..d1ad9d5cae46 100644
46 --- a/Documentation/virtual/kvm/devices/s390_flic.txt
47 +++ b/Documentation/virtual/kvm/devices/s390_flic.txt
48 @@ -27,6 +27,9 @@ Groups:
49 Copies all floating interrupts into a buffer provided by userspace.
50 When the buffer is too small it returns -ENOMEM, which is the indication
51 for userspace to try again with a bigger buffer.
52 + -ENOBUFS is returned when the allocation of a kernelspace buffer has
53 + failed.
54 + -EFAULT is returned when copying data to userspace failed.
55 All interrupts remain pending, i.e. are not deleted from the list of
56 currently pending interrupts.
57 attr->addr contains the userspace address of the buffer into which all
58 diff --git a/Makefile b/Makefile
59 index 9cd08d55f557..43377eb8a4e5 100644
60 --- a/Makefile
61 +++ b/Makefile
62 @@ -1,6 +1,6 @@
63 VERSION = 3
64 PATCHLEVEL = 18
65 -SUBLEVEL = 13
66 +SUBLEVEL = 14
67 EXTRAVERSION =
68 NAME = Diseased Newt
69
70 diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
71 index a5441d5482a6..3cc8b8320345 100644
72 --- a/arch/arm/boot/dts/dove.dtsi
73 +++ b/arch/arm/boot/dts/dove.dtsi
74 @@ -154,7 +154,7 @@
75
76 uart2: serial@12200 {
77 compatible = "ns16550a";
78 - reg = <0x12000 0x100>;
79 + reg = <0x12200 0x100>;
80 reg-shift = <2>;
81 interrupts = <9>;
82 clocks = <&core_clk 0>;
83 @@ -163,7 +163,7 @@
84
85 uart3: serial@12300 {
86 compatible = "ns16550a";
87 - reg = <0x12100 0x100>;
88 + reg = <0x12300 0x100>;
89 reg-shift = <2>;
90 interrupts = <10>;
91 clocks = <&core_clk 0>;
92 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
93 index afb9cafd3786..674d03f4ba15 100644
94 --- a/arch/arm/include/asm/elf.h
95 +++ b/arch/arm/include/asm/elf.h
96 @@ -115,7 +115,7 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
97 the loader. We need to make sure that it is out of the way of the program
98 that it will "exec", and that there is sufficient room for the brk. */
99
100 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
101 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
102
103 /* When the program starts, a1 contains a pointer to a function to be
104 registered with atexit, as per the SVR4 ABI. A value of 0 means we
105 diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
106 index b9db269c6e61..66ce17655bb9 100644
107 --- a/arch/arm/include/asm/kvm_emulate.h
108 +++ b/arch/arm/include/asm/kvm_emulate.h
109 @@ -33,6 +33,11 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
110 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
111 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
112
113 +static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
114 +{
115 + vcpu->arch.hcr = HCR_GUEST_MASK;
116 +}
117 +
118 static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
119 {
120 return 1;
121 diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
122 index acb0d5712716..16d9d788d0b8 100644
123 --- a/arch/arm/include/asm/kvm_mmu.h
124 +++ b/arch/arm/include/asm/kvm_mmu.h
125 @@ -44,6 +44,7 @@
126
127 #ifndef __ASSEMBLY__
128
129 +#include <linux/highmem.h>
130 #include <asm/cacheflush.h>
131 #include <asm/pgalloc.h>
132
133 @@ -52,6 +53,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
134 void free_boot_hyp_pgd(void);
135 void free_hyp_pgds(void);
136
137 +void stage2_unmap_vm(struct kvm *kvm);
138 int kvm_alloc_stage2_pgd(struct kvm *kvm);
139 void kvm_free_stage2_pgd(struct kvm *kvm);
140 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
141 @@ -126,29 +128,28 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
142 (__boundary - 1 < (end) - 1)? __boundary: (end); \
143 })
144
145 +#define kvm_pgd_index(addr) pgd_index(addr)
146 +
147 static inline bool kvm_page_empty(void *ptr)
148 {
149 struct page *ptr_page = virt_to_page(ptr);
150 return page_count(ptr_page) == 1;
151 }
152
153 -
154 #define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
155 #define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
156 #define kvm_pud_table_empty(kvm, pudp) (0)
157
158 #define KVM_PREALLOC_LEVEL 0
159
160 -static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
161 +static inline void *kvm_get_hwpgd(struct kvm *kvm)
162 {
163 - return 0;
164 + return kvm->arch.pgd;
165 }
166
167 -static inline void kvm_free_hwpgd(struct kvm *kvm) { }
168 -
169 -static inline void *kvm_get_hwpgd(struct kvm *kvm)
170 +static inline unsigned int kvm_get_hwpgd_size(void)
171 {
172 - return kvm->arch.pgd;
173 + return PTRS_PER_S2_PGD * sizeof(pgd_t);
174 }
175
176 struct kvm;
177 @@ -160,12 +161,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
178 return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
179 }
180
181 -static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
182 - unsigned long size)
183 +static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
184 + unsigned long size,
185 + bool ipa_uncached)
186 {
187 - if (!vcpu_has_cache_enabled(vcpu))
188 - kvm_flush_dcache_to_poc((void *)hva, size);
189 -
190 /*
191 * If we are going to insert an instruction page and the icache is
192 * either VIPT or PIPT, there is a potential problem where the host
193 @@ -177,15 +176,73 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
194 *
195 * VIVT caches are tagged using both the ASID and the VMID and doesn't
196 * need any kind of flushing (DDI 0406C.b - Page B3-1392).
197 + *
198 + * We need to do this through a kernel mapping (using the
199 + * user-space mapping has proved to be the wrong
200 + * solution). For that, we need to kmap one page at a time,
201 + * and iterate over the range.
202 */
203 - if (icache_is_pipt()) {
204 - __cpuc_coherent_user_range(hva, hva + size);
205 - } else if (!icache_is_vivt_asid_tagged()) {
206 +
207 + bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
208 +
209 + VM_BUG_ON(size & ~PAGE_MASK);
210 +
211 + if (!need_flush && !icache_is_pipt())
212 + goto vipt_cache;
213 +
214 + while (size) {
215 + void *va = kmap_atomic_pfn(pfn);
216 +
217 + if (need_flush)
218 + kvm_flush_dcache_to_poc(va, PAGE_SIZE);
219 +
220 + if (icache_is_pipt())
221 + __cpuc_coherent_user_range((unsigned long)va,
222 + (unsigned long)va + PAGE_SIZE);
223 +
224 + size -= PAGE_SIZE;
225 + pfn++;
226 +
227 + kunmap_atomic(va);
228 + }
229 +
230 +vipt_cache:
231 + if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
232 /* any kind of VIPT cache */
233 __flush_icache_all();
234 }
235 }
236
237 +static inline void __kvm_flush_dcache_pte(pte_t pte)
238 +{
239 + void *va = kmap_atomic(pte_page(pte));
240 +
241 + kvm_flush_dcache_to_poc(va, PAGE_SIZE);
242 +
243 + kunmap_atomic(va);
244 +}
245 +
246 +static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
247 +{
248 + unsigned long size = PMD_SIZE;
249 + pfn_t pfn = pmd_pfn(pmd);
250 +
251 + while (size) {
252 + void *va = kmap_atomic_pfn(pfn);
253 +
254 + kvm_flush_dcache_to_poc(va, PAGE_SIZE);
255 +
256 + pfn++;
257 + size -= PAGE_SIZE;
258 +
259 + kunmap_atomic(va);
260 + }
261 +}
262 +
263 +static inline void __kvm_flush_dcache_pud(pud_t pud)
264 +{
265 +}
266 +
267 #define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
268
269 void stage2_flush_vm(struct kvm *kvm);
270 diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h
271 index 09ee408c1a67..b404cf886029 100644
272 --- a/arch/arm/include/uapi/asm/kvm.h
273 +++ b/arch/arm/include/uapi/asm/kvm.h
274 @@ -193,8 +193,14 @@ struct kvm_arch_memory_slot {
275 #define KVM_ARM_IRQ_CPU_IRQ 0
276 #define KVM_ARM_IRQ_CPU_FIQ 1
277
278 -/* Highest supported SPI, from VGIC_NR_IRQS */
279 +/*
280 + * This used to hold the highest supported SPI, but it is now obsolete
281 + * and only here to provide source code level compatibility with older
282 + * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
283 + */
284 +#ifndef __KERNEL__
285 #define KVM_ARM_IRQ_GIC_MAX 127
286 +#endif
287
288 /* PSCI interface */
289 #define KVM_PSCI_FN_BASE 0x95c1ba5e
290 diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c
291 index c4cc50e58c13..cfb354ff2a60 100644
292 --- a/arch/arm/kernel/hibernate.c
293 +++ b/arch/arm/kernel/hibernate.c
294 @@ -22,6 +22,7 @@
295 #include <asm/suspend.h>
296 #include <asm/memory.h>
297 #include <asm/sections.h>
298 +#include "reboot.h"
299
300 int pfn_is_nosave(unsigned long pfn)
301 {
302 @@ -61,7 +62,7 @@ static int notrace arch_save_image(unsigned long unused)
303
304 ret = swsusp_save();
305 if (ret == 0)
306 - soft_restart(virt_to_phys(cpu_resume));
307 + _soft_restart(virt_to_phys(cpu_resume), false);
308 return ret;
309 }
310
311 @@ -86,7 +87,7 @@ static void notrace arch_restore_image(void *unused)
312 for (pbe = restore_pblist; pbe; pbe = pbe->next)
313 copy_page(pbe->orig_address, pbe->address);
314
315 - soft_restart(virt_to_phys(cpu_resume));
316 + _soft_restart(virt_to_phys(cpu_resume), false);
317 }
318
319 static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata;
320 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
321 index fe972a2f3df3..ecefea4e2929 100644
322 --- a/arch/arm/kernel/process.c
323 +++ b/arch/arm/kernel/process.c
324 @@ -41,6 +41,7 @@
325 #include <asm/system_misc.h>
326 #include <asm/mach/time.h>
327 #include <asm/tls.h>
328 +#include "reboot.h"
329
330 #ifdef CONFIG_CC_STACKPROTECTOR
331 #include <linux/stackprotector.h>
332 @@ -95,7 +96,7 @@ static void __soft_restart(void *addr)
333 BUG();
334 }
335
336 -void soft_restart(unsigned long addr)
337 +void _soft_restart(unsigned long addr, bool disable_l2)
338 {
339 u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
340
341 @@ -104,7 +105,7 @@ void soft_restart(unsigned long addr)
342 local_fiq_disable();
343
344 /* Disable the L2 if we're the last man standing. */
345 - if (num_online_cpus() == 1)
346 + if (disable_l2)
347 outer_disable();
348
349 /* Change to the new stack and continue with the reset. */
350 @@ -114,6 +115,11 @@ void soft_restart(unsigned long addr)
351 BUG();
352 }
353
354 +void soft_restart(unsigned long addr)
355 +{
356 + _soft_restart(addr, num_online_cpus() == 1);
357 +}
358 +
359 /*
360 * Function pointers to optional machine specific functions
361 */
362 diff --git a/arch/arm/kernel/reboot.h b/arch/arm/kernel/reboot.h
363 new file mode 100644
364 index 000000000000..c87f05816d6b
365 --- /dev/null
366 +++ b/arch/arm/kernel/reboot.h
367 @@ -0,0 +1,6 @@
368 +#ifndef REBOOT_H
369 +#define REBOOT_H
370 +
371 +extern void _soft_restart(unsigned long addr, bool disable_l2);
372 +
373 +#endif
374 diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
375 index 9e193c8a959e..ed5834e8e2ac 100644
376 --- a/arch/arm/kvm/arm.c
377 +++ b/arch/arm/kvm/arm.c
378 @@ -213,6 +213,11 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
379 int err;
380 struct kvm_vcpu *vcpu;
381
382 + if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) {
383 + err = -EBUSY;
384 + goto out;
385 + }
386 +
387 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
388 if (!vcpu) {
389 err = -ENOMEM;
390 @@ -419,6 +424,7 @@ static void update_vttbr(struct kvm *kvm)
391
392 static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
393 {
394 + struct kvm *kvm = vcpu->kvm;
395 int ret;
396
397 if (likely(vcpu->arch.has_run_once))
398 @@ -427,15 +433,23 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
399 vcpu->arch.has_run_once = true;
400
401 /*
402 - * Initialize the VGIC before running a vcpu the first time on
403 - * this VM.
404 + * Map the VGIC hardware resources before running a vcpu the first
405 + * time on this VM.
406 */
407 - if (unlikely(!vgic_initialized(vcpu->kvm))) {
408 - ret = kvm_vgic_init(vcpu->kvm);
409 + if (unlikely(!vgic_initialized(kvm))) {
410 + ret = kvm_vgic_map_resources(kvm);
411 if (ret)
412 return ret;
413 }
414
415 + /*
416 + * Enable the arch timers only if we have an in-kernel VGIC
417 + * and it has been properly initialized, since we cannot handle
418 + * interrupts from the virtual timer with a userspace gic.
419 + */
420 + if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
421 + kvm_timer_enable(kvm);
422 +
423 return 0;
424 }
425
426 @@ -639,8 +653,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
427 if (!irqchip_in_kernel(kvm))
428 return -ENXIO;
429
430 - if (irq_num < VGIC_NR_PRIVATE_IRQS ||
431 - irq_num > KVM_ARM_IRQ_GIC_MAX)
432 + if (irq_num < VGIC_NR_PRIVATE_IRQS)
433 return -EINVAL;
434
435 return kvm_vgic_inject_irq(kvm, 0, irq_num, level);
436 @@ -659,10 +672,21 @@ static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu,
437 return ret;
438
439 /*
440 + * Ensure a rebooted VM will fault in RAM pages and detect if the
441 + * guest MMU is turned off and flush the caches as needed.
442 + */
443 + if (vcpu->arch.has_run_once)
444 + stage2_unmap_vm(vcpu->kvm);
445 +
446 + vcpu_reset_hcr(vcpu);
447 +
448 + /*
449 * Handle the "start in power-off" case by marking the VCPU as paused.
450 */
451 - if (__test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
452 + if (test_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features))
453 vcpu->arch.pause = true;
454 + else
455 + vcpu->arch.pause = false;
456
457 return 0;
458 }
459 diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
460 index cc0b78769bd8..8c97208b9b97 100644
461 --- a/arch/arm/kvm/guest.c
462 +++ b/arch/arm/kvm/guest.c
463 @@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
464
465 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
466 {
467 - vcpu->arch.hcr = HCR_GUEST_MASK;
468 return 0;
469 }
470
471 diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
472 index 8664ff17cbbe..cba52cf6ed3f 100644
473 --- a/arch/arm/kvm/mmu.c
474 +++ b/arch/arm/kvm/mmu.c
475 @@ -58,6 +58,26 @@ static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
476 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
477 }
478
479 +/*
480 + * D-Cache management functions. They take the page table entries by
481 + * value, as they are flushing the cache using the kernel mapping (or
482 + * kmap on 32bit).
483 + */
484 +static void kvm_flush_dcache_pte(pte_t pte)
485 +{
486 + __kvm_flush_dcache_pte(pte);
487 +}
488 +
489 +static void kvm_flush_dcache_pmd(pmd_t pmd)
490 +{
491 + __kvm_flush_dcache_pmd(pmd);
492 +}
493 +
494 +static void kvm_flush_dcache_pud(pud_t pud)
495 +{
496 + __kvm_flush_dcache_pud(pud);
497 +}
498 +
499 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
500 int min, int max)
501 {
502 @@ -119,6 +139,26 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
503 put_page(virt_to_page(pmd));
504 }
505
506 +/*
507 + * Unmapping vs dcache management:
508 + *
509 + * If a guest maps certain memory pages as uncached, all writes will
510 + * bypass the data cache and go directly to RAM. However, the CPUs
511 + * can still speculate reads (not writes) and fill cache lines with
512 + * data.
513 + *
514 + * Those cache lines will be *clean* cache lines though, so a
515 + * clean+invalidate operation is equivalent to an invalidate
516 + * operation, because no cache lines are marked dirty.
517 + *
518 + * Those clean cache lines could be filled prior to an uncached write
519 + * by the guest, and the cache coherent IO subsystem would therefore
520 + * end up writing old data to disk.
521 + *
522 + * This is why right after unmapping a page/section and invalidating
523 + * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
524 + * the IO subsystem will never hit in the cache.
525 + */
526 static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
527 phys_addr_t addr, phys_addr_t end)
528 {
529 @@ -128,9 +168,16 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
530 start_pte = pte = pte_offset_kernel(pmd, addr);
531 do {
532 if (!pte_none(*pte)) {
533 + pte_t old_pte = *pte;
534 +
535 kvm_set_pte(pte, __pte(0));
536 - put_page(virt_to_page(pte));
537 kvm_tlb_flush_vmid_ipa(kvm, addr);
538 +
539 + /* No need to invalidate the cache for device mappings */
540 + if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
541 + kvm_flush_dcache_pte(old_pte);
542 +
543 + put_page(virt_to_page(pte));
544 }
545 } while (pte++, addr += PAGE_SIZE, addr != end);
546
547 @@ -149,8 +196,13 @@ static void unmap_pmds(struct kvm *kvm, pud_t *pud,
548 next = kvm_pmd_addr_end(addr, end);
549 if (!pmd_none(*pmd)) {
550 if (kvm_pmd_huge(*pmd)) {
551 + pmd_t old_pmd = *pmd;
552 +
553 pmd_clear(pmd);
554 kvm_tlb_flush_vmid_ipa(kvm, addr);
555 +
556 + kvm_flush_dcache_pmd(old_pmd);
557 +
558 put_page(virt_to_page(pmd));
559 } else {
560 unmap_ptes(kvm, pmd, addr, next);
561 @@ -173,8 +225,13 @@ static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
562 next = kvm_pud_addr_end(addr, end);
563 if (!pud_none(*pud)) {
564 if (pud_huge(*pud)) {
565 + pud_t old_pud = *pud;
566 +
567 pud_clear(pud);
568 kvm_tlb_flush_vmid_ipa(kvm, addr);
569 +
570 + kvm_flush_dcache_pud(old_pud);
571 +
572 put_page(virt_to_page(pud));
573 } else {
574 unmap_pmds(kvm, pud, addr, next);
575 @@ -194,7 +251,7 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
576 phys_addr_t addr = start, end = start + size;
577 phys_addr_t next;
578
579 - pgd = pgdp + pgd_index(addr);
580 + pgd = pgdp + kvm_pgd_index(addr);
581 do {
582 next = kvm_pgd_addr_end(addr, end);
583 if (!pgd_none(*pgd))
584 @@ -209,10 +266,9 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
585
586 pte = pte_offset_kernel(pmd, addr);
587 do {
588 - if (!pte_none(*pte)) {
589 - hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
590 - kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
591 - }
592 + if (!pte_none(*pte) &&
593 + (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
594 + kvm_flush_dcache_pte(*pte);
595 } while (pte++, addr += PAGE_SIZE, addr != end);
596 }
597
598 @@ -226,12 +282,10 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
599 do {
600 next = kvm_pmd_addr_end(addr, end);
601 if (!pmd_none(*pmd)) {
602 - if (kvm_pmd_huge(*pmd)) {
603 - hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
604 - kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
605 - } else {
606 + if (kvm_pmd_huge(*pmd))
607 + kvm_flush_dcache_pmd(*pmd);
608 + else
609 stage2_flush_ptes(kvm, pmd, addr, next);
610 - }
611 }
612 } while (pmd++, addr = next, addr != end);
613 }
614 @@ -246,12 +300,10 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
615 do {
616 next = kvm_pud_addr_end(addr, end);
617 if (!pud_none(*pud)) {
618 - if (pud_huge(*pud)) {
619 - hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
620 - kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
621 - } else {
622 + if (pud_huge(*pud))
623 + kvm_flush_dcache_pud(*pud);
624 + else
625 stage2_flush_pmds(kvm, pud, addr, next);
626 - }
627 }
628 } while (pud++, addr = next, addr != end);
629 }
630 @@ -264,7 +316,7 @@ static void stage2_flush_memslot(struct kvm *kvm,
631 phys_addr_t next;
632 pgd_t *pgd;
633
634 - pgd = kvm->arch.pgd + pgd_index(addr);
635 + pgd = kvm->arch.pgd + kvm_pgd_index(addr);
636 do {
637 next = kvm_pgd_addr_end(addr, end);
638 stage2_flush_puds(kvm, pgd, addr, next);
639 @@ -541,6 +593,20 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
640 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
641 }
642
643 +/* Free the HW pgd, one page at a time */
644 +static void kvm_free_hwpgd(void *hwpgd)
645 +{
646 + free_pages_exact(hwpgd, kvm_get_hwpgd_size());
647 +}
648 +
649 +/* Allocate the HW PGD, making sure that each page gets its own refcount */
650 +static void *kvm_alloc_hwpgd(void)
651 +{
652 + unsigned int size = kvm_get_hwpgd_size();
653 +
654 + return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
655 +}
656 +
657 /**
658 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
659 * @kvm: The KVM struct pointer for the VM.
660 @@ -554,15 +620,31 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
661 */
662 int kvm_alloc_stage2_pgd(struct kvm *kvm)
663 {
664 - int ret;
665 pgd_t *pgd;
666 + void *hwpgd;
667
668 if (kvm->arch.pgd != NULL) {
669 kvm_err("kvm_arch already initialized?\n");
670 return -EINVAL;
671 }
672
673 + hwpgd = kvm_alloc_hwpgd();
674 + if (!hwpgd)
675 + return -ENOMEM;
676 +
677 + /* When the kernel uses more levels of page tables than the
678 + * guest, we allocate a fake PGD and pre-populate it to point
679 + * to the next-level page table, which will be the real
680 + * initial page table pointed to by the VTTBR.
681 + *
682 + * When KVM_PREALLOC_LEVEL==2, we allocate a single page for
683 + * the PMD and the kernel will use folded pud.
684 + * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD
685 + * pages.
686 + */
687 if (KVM_PREALLOC_LEVEL > 0) {
688 + int i;
689 +
690 /*
691 * Allocate fake pgd for the page table manipulation macros to
692 * work. This is not used by the hardware and we have no
693 @@ -570,30 +652,32 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
694 */
695 pgd = (pgd_t *)kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
696 GFP_KERNEL | __GFP_ZERO);
697 +
698 + if (!pgd) {
699 + kvm_free_hwpgd(hwpgd);
700 + return -ENOMEM;
701 + }
702 +
703 + /* Plug the HW PGD into the fake one. */
704 + for (i = 0; i < PTRS_PER_S2_PGD; i++) {
705 + if (KVM_PREALLOC_LEVEL == 1)
706 + pgd_populate(NULL, pgd + i,
707 + (pud_t *)hwpgd + i * PTRS_PER_PUD);
708 + else if (KVM_PREALLOC_LEVEL == 2)
709 + pud_populate(NULL, pud_offset(pgd, 0) + i,
710 + (pmd_t *)hwpgd + i * PTRS_PER_PMD);
711 + }
712 } else {
713 /*
714 * Allocate actual first-level Stage-2 page table used by the
715 * hardware for Stage-2 page table walks.
716 */
717 - pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, S2_PGD_ORDER);
718 + pgd = (pgd_t *)hwpgd;
719 }
720
721 - if (!pgd)
722 - return -ENOMEM;
723 -
724 - ret = kvm_prealloc_hwpgd(kvm, pgd);
725 - if (ret)
726 - goto out_err;
727 -
728 kvm_clean_pgd(pgd);
729 kvm->arch.pgd = pgd;
730 return 0;
731 -out_err:
732 - if (KVM_PREALLOC_LEVEL > 0)
733 - kfree(pgd);
734 - else
735 - free_pages((unsigned long)pgd, S2_PGD_ORDER);
736 - return ret;
737 }
738
739 /**
740 @@ -612,6 +696,71 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
741 unmap_range(kvm, kvm->arch.pgd, start, size);
742 }
743
744 +static void stage2_unmap_memslot(struct kvm *kvm,
745 + struct kvm_memory_slot *memslot)
746 +{
747 + hva_t hva = memslot->userspace_addr;
748 + phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
749 + phys_addr_t size = PAGE_SIZE * memslot->npages;
750 + hva_t reg_end = hva + size;
751 +
752 + /*
753 + * A memory region could potentially cover multiple VMAs, and any holes
754 + * between them, so iterate over all of them to find out if we should
755 + * unmap any of them.
756 + *
757 + * +--------------------------------------------+
758 + * +---------------+----------------+ +----------------+
759 + * | : VMA 1 | VMA 2 | | VMA 3 : |
760 + * +---------------+----------------+ +----------------+
761 + * | memory region |
762 + * +--------------------------------------------+
763 + */
764 + do {
765 + struct vm_area_struct *vma = find_vma(current->mm, hva);
766 + hva_t vm_start, vm_end;
767 +
768 + if (!vma || vma->vm_start >= reg_end)
769 + break;
770 +
771 + /*
772 + * Take the intersection of this VMA with the memory region
773 + */
774 + vm_start = max(hva, vma->vm_start);
775 + vm_end = min(reg_end, vma->vm_end);
776 +
777 + if (!(vma->vm_flags & VM_PFNMAP)) {
778 + gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
779 + unmap_stage2_range(kvm, gpa, vm_end - vm_start);
780 + }
781 + hva = vm_end;
782 + } while (hva < reg_end);
783 +}
784 +
785 +/**
786 + * stage2_unmap_vm - Unmap Stage-2 RAM mappings
787 + * @kvm: The struct kvm pointer
788 + *
789 + * Go through the memregions and unmap any reguler RAM
790 + * backing memory already mapped to the VM.
791 + */
792 +void stage2_unmap_vm(struct kvm *kvm)
793 +{
794 + struct kvm_memslots *slots;
795 + struct kvm_memory_slot *memslot;
796 + int idx;
797 +
798 + idx = srcu_read_lock(&kvm->srcu);
799 + spin_lock(&kvm->mmu_lock);
800 +
801 + slots = kvm_memslots(kvm);
802 + kvm_for_each_memslot(memslot, slots)
803 + stage2_unmap_memslot(kvm, memslot);
804 +
805 + spin_unlock(&kvm->mmu_lock);
806 + srcu_read_unlock(&kvm->srcu, idx);
807 +}
808 +
809 /**
810 * kvm_free_stage2_pgd - free all stage-2 tables
811 * @kvm: The KVM struct pointer for the VM.
812 @@ -629,11 +778,10 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
813 return;
814
815 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
816 - kvm_free_hwpgd(kvm);
817 + kvm_free_hwpgd(kvm_get_hwpgd(kvm));
818 if (KVM_PREALLOC_LEVEL > 0)
819 kfree(kvm->arch.pgd);
820 - else
821 - free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER);
822 +
823 kvm->arch.pgd = NULL;
824 }
825
826 @@ -643,7 +791,7 @@ static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache
827 pgd_t *pgd;
828 pud_t *pud;
829
830 - pgd = kvm->arch.pgd + pgd_index(addr);
831 + pgd = kvm->arch.pgd + kvm_pgd_index(addr);
832 if (WARN_ON(pgd_none(*pgd))) {
833 if (!cache)
834 return NULL;
835 @@ -840,6 +988,12 @@ static bool kvm_is_device_pfn(unsigned long pfn)
836 return !pfn_valid(pfn);
837 }
838
839 +static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
840 + unsigned long size, bool uncached)
841 +{
842 + __coherent_cache_guest_page(vcpu, pfn, size, uncached);
843 +}
844 +
845 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
846 struct kvm_memory_slot *memslot, unsigned long hva,
847 unsigned long fault_status)
848 @@ -853,6 +1007,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
849 struct vm_area_struct *vma;
850 pfn_t pfn;
851 pgprot_t mem_type = PAGE_S2;
852 + bool fault_ipa_uncached;
853
854 write_fault = kvm_is_write_fault(vcpu);
855 if (fault_status == FSC_PERM && !write_fault) {
856 @@ -919,6 +1074,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
857 if (!hugetlb && !force_pte)
858 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
859
860 + fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT;
861 +
862 if (hugetlb) {
863 pmd_t new_pmd = pfn_pmd(pfn, mem_type);
864 new_pmd = pmd_mkhuge(new_pmd);
865 @@ -926,7 +1083,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
866 kvm_set_s2pmd_writable(&new_pmd);
867 kvm_set_pfn_dirty(pfn);
868 }
869 - coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE);
870 + coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached);
871 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
872 } else {
873 pte_t new_pte = pfn_pte(pfn, mem_type);
874 @@ -934,7 +1091,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
875 kvm_set_s2pte_writable(&new_pte);
876 kvm_set_pfn_dirty(pfn);
877 }
878 - coherent_cache_guest_page(vcpu, hva, PAGE_SIZE);
879 + coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached);
880 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
881 pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
882 }
883 @@ -1294,11 +1451,12 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
884 hva = vm_end;
885 } while (hva < reg_end);
886
887 - if (ret) {
888 - spin_lock(&kvm->mmu_lock);
889 + spin_lock(&kvm->mmu_lock);
890 + if (ret)
891 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
892 - spin_unlock(&kvm->mmu_lock);
893 - }
894 + else
895 + stage2_flush_memslot(kvm, memslot);
896 + spin_unlock(&kvm->mmu_lock);
897 return ret;
898 }
899
900 @@ -1310,6 +1468,15 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
901 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
902 unsigned long npages)
903 {
904 + /*
905 + * Readonly memslots are not incoherent with the caches by definition,
906 + * but in practice, they are used mostly to emulate ROMs or NOR flashes
907 + * that the guest may consider devices and hence map as uncached.
908 + * To prevent incoherency issues in these cases, tag all readonly
909 + * regions as incoherent.
910 + */
911 + if (slot->flags & KVM_MEM_READONLY)
912 + slot->flags |= KVM_MEMSLOT_INCOHERENT;
913 return 0;
914 }
915
916 diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
917 index 09cf37737ee2..58cb3248d277 100644
918 --- a/arch/arm/kvm/psci.c
919 +++ b/arch/arm/kvm/psci.c
920 @@ -15,6 +15,7 @@
921 * along with this program. If not, see <http://www.gnu.org/licenses/>.
922 */
923
924 +#include <linux/preempt.h>
925 #include <linux/kvm_host.h>
926 #include <linux/wait.h>
927
928 @@ -166,6 +167,23 @@ static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
929
930 static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
931 {
932 + int i;
933 + struct kvm_vcpu *tmp;
934 +
935 + /*
936 + * The KVM ABI specifies that a system event exit may call KVM_RUN
937 + * again and may perform shutdown/reboot at a later time that when the
938 + * actual request is made. Since we are implementing PSCI and a
939 + * caller of PSCI reboot and shutdown expects that the system shuts
940 + * down or reboots immediately, let's make sure that VCPUs are not run
941 + * after this call is handled and before the VCPUs have been
942 + * re-initialized.
943 + */
944 + kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
945 + tmp->arch.pause = true;
946 + kvm_vcpu_kick(tmp);
947 + }
948 +
949 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
950 vcpu->run->system_event.type = type;
951 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
952 diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
953 index bbd8664d1bac..6f8a85c5965c 100644
954 --- a/arch/arm/mach-mvebu/pmsu.c
955 +++ b/arch/arm/mach-mvebu/pmsu.c
956 @@ -415,6 +415,9 @@ static __init int armada_38x_cpuidle_init(void)
957 void __iomem *mpsoc_base;
958 u32 reg;
959
960 + pr_warn("CPU idle is currently broken on Armada 38x: disabling");
961 + return 0;
962 +
963 np = of_find_compatible_node(NULL, NULL,
964 "marvell,armada-380-coherency-fabric");
965 if (!np)
966 @@ -476,6 +479,16 @@ static int __init mvebu_v7_cpu_pm_init(void)
967 return 0;
968 of_node_put(np);
969
970 + /*
971 + * Currently the CPU idle support for Armada 38x is broken, as
972 + * the CPU hotplug uses some of the CPU idle functions it is
973 + * broken too, so let's disable it
974 + */
975 + if (of_machine_is_compatible("marvell,armada380")) {
976 + cpu_hotplug_disable();
977 + pr_warn("CPU hotplug support is currently broken on Armada 38x: disabling");
978 + }
979 +
980 if (of_machine_is_compatible("marvell,armadaxp"))
981 ret = armada_xp_cpuidle_init();
982 else if (of_machine_is_compatible("marvell,armada370"))
983 @@ -489,7 +502,8 @@ static int __init mvebu_v7_cpu_pm_init(void)
984 return ret;
985
986 mvebu_v7_pmsu_enable_l2_powerdown_onidle();
987 - platform_device_register(&mvebu_v7_cpuidle_device);
988 + if (mvebu_v7_cpuidle_device.name)
989 + platform_device_register(&mvebu_v7_cpuidle_device);
990 cpu_pm_register_notifier(&mvebu_v7_cpu_pm_notifier);
991
992 return 0;
993 diff --git a/arch/arm/mach-s3c64xx/crag6410.h b/arch/arm/mach-s3c64xx/crag6410.h
994 index 7bc66682687e..dcbe17f5e5f8 100644
995 --- a/arch/arm/mach-s3c64xx/crag6410.h
996 +++ b/arch/arm/mach-s3c64xx/crag6410.h
997 @@ -14,6 +14,7 @@
998 #include <mach/gpio-samsung.h>
999
1000 #define GLENFARCLAS_PMIC_IRQ_BASE IRQ_BOARD_START
1001 +#define BANFF_PMIC_IRQ_BASE (IRQ_BOARD_START + 64)
1002
1003 #define PCA935X_GPIO_BASE GPIO_BOARD_START
1004 #define CODEC_GPIO_BASE (GPIO_BOARD_START + 8)
1005 diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
1006 index 10b913baab28..65c426bc45f7 100644
1007 --- a/arch/arm/mach-s3c64xx/mach-crag6410.c
1008 +++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
1009 @@ -554,6 +554,7 @@ static struct wm831x_touch_pdata touch_pdata = {
1010
1011 static struct wm831x_pdata crag_pmic_pdata = {
1012 .wm831x_num = 1,
1013 + .irq_base = BANFF_PMIC_IRQ_BASE,
1014 .gpio_base = BANFF_PMIC_GPIO_BASE,
1015 .soft_shutdown = true,
1016
1017 diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c
1018 index 66781bf34077..c72412415093 100644
1019 --- a/arch/arm/mm/hugetlbpage.c
1020 +++ b/arch/arm/mm/hugetlbpage.c
1021 @@ -36,12 +36,6 @@
1022 * of type casting from pmd_t * to pte_t *.
1023 */
1024
1025 -struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
1026 - int write)
1027 -{
1028 - return ERR_PTR(-EINVAL);
1029 -}
1030 -
1031 int pud_huge(pud_t pud)
1032 {
1033 return 0;
1034 diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
1035 index 5674a55b5518..865a7e28ea2d 100644
1036 --- a/arch/arm64/include/asm/kvm_emulate.h
1037 +++ b/arch/arm64/include/asm/kvm_emulate.h
1038 @@ -38,6 +38,13 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu);
1039 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
1040 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
1041
1042 +static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
1043 +{
1044 + vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
1045 + if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
1046 + vcpu->arch.hcr_el2 &= ~HCR_RW;
1047 +}
1048 +
1049 static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
1050 {
1051 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
1052 diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
1053 index 2012c4ba8d67..dbd32127dbb6 100644
1054 --- a/arch/arm64/include/asm/kvm_host.h
1055 +++ b/arch/arm64/include/asm/kvm_host.h
1056 @@ -200,6 +200,7 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
1057 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
1058
1059 u64 kvm_call_hyp(void *hypfn, ...);
1060 +void force_vm_exit(const cpumask_t *mask);
1061
1062 int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
1063 int exception_index);
1064 diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
1065 index 0caf7a59f6a1..a205e957d5c4 100644
1066 --- a/arch/arm64/include/asm/kvm_mmu.h
1067 +++ b/arch/arm64/include/asm/kvm_mmu.h
1068 @@ -83,6 +83,7 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
1069 void free_boot_hyp_pgd(void);
1070 void free_hyp_pgds(void);
1071
1072 +void stage2_unmap_vm(struct kvm *kvm);
1073 int kvm_alloc_stage2_pgd(struct kvm *kvm);
1074 void kvm_free_stage2_pgd(struct kvm *kvm);
1075 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
1076 @@ -136,6 +137,8 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
1077 #define PTRS_PER_S2_PGD (1 << PTRS_PER_S2_PGD_SHIFT)
1078 #define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
1079
1080 +#define kvm_pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
1081 +
1082 /*
1083 * If we are concatenating first level stage-2 page tables, we would have less
1084 * than or equal to 16 pointers in the fake PGD, because that's what the
1085 @@ -149,43 +152,6 @@ static inline void kvm_set_s2pmd_writable(pmd_t *pmd)
1086 #define KVM_PREALLOC_LEVEL (0)
1087 #endif
1088
1089 -/**
1090 - * kvm_prealloc_hwpgd - allocate inital table for VTTBR
1091 - * @kvm: The KVM struct pointer for the VM.
1092 - * @pgd: The kernel pseudo pgd
1093 - *
1094 - * When the kernel uses more levels of page tables than the guest, we allocate
1095 - * a fake PGD and pre-populate it to point to the next-level page table, which
1096 - * will be the real initial page table pointed to by the VTTBR.
1097 - *
1098 - * When KVM_PREALLOC_LEVEL==2, we allocate a single page for the PMD and
1099 - * the kernel will use folded pud. When KVM_PREALLOC_LEVEL==1, we
1100 - * allocate 2 consecutive PUD pages.
1101 - */
1102 -static inline int kvm_prealloc_hwpgd(struct kvm *kvm, pgd_t *pgd)
1103 -{
1104 - unsigned int i;
1105 - unsigned long hwpgd;
1106 -
1107 - if (KVM_PREALLOC_LEVEL == 0)
1108 - return 0;
1109 -
1110 - hwpgd = __get_free_pages(GFP_KERNEL | __GFP_ZERO, PTRS_PER_S2_PGD_SHIFT);
1111 - if (!hwpgd)
1112 - return -ENOMEM;
1113 -
1114 - for (i = 0; i < PTRS_PER_S2_PGD; i++) {
1115 - if (KVM_PREALLOC_LEVEL == 1)
1116 - pgd_populate(NULL, pgd + i,
1117 - (pud_t *)hwpgd + i * PTRS_PER_PUD);
1118 - else if (KVM_PREALLOC_LEVEL == 2)
1119 - pud_populate(NULL, pud_offset(pgd, 0) + i,
1120 - (pmd_t *)hwpgd + i * PTRS_PER_PMD);
1121 - }
1122 -
1123 - return 0;
1124 -}
1125 -
1126 static inline void *kvm_get_hwpgd(struct kvm *kvm)
1127 {
1128 pgd_t *pgd = kvm->arch.pgd;
1129 @@ -202,12 +168,11 @@ static inline void *kvm_get_hwpgd(struct kvm *kvm)
1130 return pmd_offset(pud, 0);
1131 }
1132
1133 -static inline void kvm_free_hwpgd(struct kvm *kvm)
1134 +static inline unsigned int kvm_get_hwpgd_size(void)
1135 {
1136 - if (KVM_PREALLOC_LEVEL > 0) {
1137 - unsigned long hwpgd = (unsigned long)kvm_get_hwpgd(kvm);
1138 - free_pages(hwpgd, PTRS_PER_S2_PGD_SHIFT);
1139 - }
1140 + if (KVM_PREALLOC_LEVEL > 0)
1141 + return PTRS_PER_S2_PGD * PAGE_SIZE;
1142 + return PTRS_PER_S2_PGD * sizeof(pgd_t);
1143 }
1144
1145 static inline bool kvm_page_empty(void *ptr)
1146 @@ -242,20 +207,42 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
1147 return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
1148 }
1149
1150 -static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
1151 - unsigned long size)
1152 +static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
1153 + unsigned long size,
1154 + bool ipa_uncached)
1155 {
1156 - if (!vcpu_has_cache_enabled(vcpu))
1157 - kvm_flush_dcache_to_poc((void *)hva, size);
1158 + void *va = page_address(pfn_to_page(pfn));
1159 +
1160 + if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
1161 + kvm_flush_dcache_to_poc(va, size);
1162
1163 if (!icache_is_aliasing()) { /* PIPT */
1164 - flush_icache_range(hva, hva + size);
1165 + flush_icache_range((unsigned long)va,
1166 + (unsigned long)va + size);
1167 } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
1168 /* any kind of VIPT cache */
1169 __flush_icache_all();
1170 }
1171 }
1172
1173 +static inline void __kvm_flush_dcache_pte(pte_t pte)
1174 +{
1175 + struct page *page = pte_page(pte);
1176 + kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
1177 +}
1178 +
1179 +static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
1180 +{
1181 + struct page *page = pmd_page(pmd);
1182 + kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
1183 +}
1184 +
1185 +static inline void __kvm_flush_dcache_pud(pud_t pud)
1186 +{
1187 + struct page *page = pud_page(pud);
1188 + kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
1189 +}
1190 +
1191 #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
1192
1193 void stage2_flush_vm(struct kvm *kvm);
1194 diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
1195 index 8e38878c87c6..d9e9822efcee 100644
1196 --- a/arch/arm64/include/uapi/asm/kvm.h
1197 +++ b/arch/arm64/include/uapi/asm/kvm.h
1198 @@ -179,8 +179,14 @@ struct kvm_arch_memory_slot {
1199 #define KVM_ARM_IRQ_CPU_IRQ 0
1200 #define KVM_ARM_IRQ_CPU_FIQ 1
1201
1202 -/* Highest supported SPI, from VGIC_NR_IRQS */
1203 +/*
1204 + * This used to hold the highest supported SPI, but it is now obsolete
1205 + * and only here to provide source code level compatibility with older
1206 + * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
1207 + */
1208 +#ifndef __KERNEL__
1209 #define KVM_ARM_IRQ_GIC_MAX 127
1210 +#endif
1211
1212 /* PSCI interface */
1213 #define KVM_PSCI_FN_BASE 0x95c1ba5e
1214 diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
1215 index 76794692c20b..84d5959ff874 100644
1216 --- a/arch/arm64/kvm/guest.c
1217 +++ b/arch/arm64/kvm/guest.c
1218 @@ -38,7 +38,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
1219
1220 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1221 {
1222 - vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
1223 return 0;
1224 }
1225
1226 diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
1227 index b72aa9f9215c..a767f6a4ce54 100644
1228 --- a/arch/arm64/kvm/hyp.S
1229 +++ b/arch/arm64/kvm/hyp.S
1230 @@ -1014,6 +1014,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
1231 * Instead, we invalidate Stage-2 for this IPA, and the
1232 * whole of Stage-1. Weep...
1233 */
1234 + lsr x1, x1, #12
1235 tlbi ipas2e1is, x1
1236 /*
1237 * We have to ensure completion of the invalidation at Stage-2,
1238 diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
1239 index 70a7816535cd..0b4326578985 100644
1240 --- a/arch/arm64/kvm/reset.c
1241 +++ b/arch/arm64/kvm/reset.c
1242 @@ -90,7 +90,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
1243 if (!cpu_has_32bit_el1())
1244 return -EINVAL;
1245 cpu_reset = &default_regs_reset32;
1246 - vcpu->arch.hcr_el2 &= ~HCR_RW;
1247 } else {
1248 cpu_reset = &default_regs_reset;
1249 }
1250 diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
1251 index df34a70caca1..6efbb52cb92e 100644
1252 --- a/arch/arm64/mm/dma-mapping.c
1253 +++ b/arch/arm64/mm/dma-mapping.c
1254 @@ -67,8 +67,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
1255
1256 *ret_page = phys_to_page(phys);
1257 ptr = (void *)val;
1258 - if (flags & __GFP_ZERO)
1259 - memset(ptr, 0, size);
1260 + memset(ptr, 0, size);
1261 }
1262
1263 return ptr;
1264 @@ -113,8 +112,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
1265
1266 *dma_handle = phys_to_dma(dev, page_to_phys(page));
1267 addr = page_address(page);
1268 - if (flags & __GFP_ZERO)
1269 - memset(addr, 0, size);
1270 + memset(addr, 0, size);
1271 return addr;
1272 } else {
1273 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
1274 diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
1275 index 023747bf4dd7..2de9d2e59d96 100644
1276 --- a/arch/arm64/mm/hugetlbpage.c
1277 +++ b/arch/arm64/mm/hugetlbpage.c
1278 @@ -38,12 +38,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
1279 }
1280 #endif
1281
1282 -struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
1283 - int write)
1284 -{
1285 - return ERR_PTR(-EINVAL);
1286 -}
1287 -
1288 int pmd_huge(pmd_t pmd)
1289 {
1290 return !(pmd_val(pmd) & PMD_TABLE_BIT);
1291 diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c
1292 index 356ee84cad95..04845aaf5985 100644
1293 --- a/arch/c6x/kernel/time.c
1294 +++ b/arch/c6x/kernel/time.c
1295 @@ -49,7 +49,7 @@ u64 sched_clock(void)
1296 return (tsc * sched_clock_multiplier) >> SCHED_CLOCK_SHIFT;
1297 }
1298
1299 -void time_init(void)
1300 +void __init time_init(void)
1301 {
1302 u64 tmp = (u64)NSEC_PER_SEC << SCHED_CLOCK_SHIFT;
1303
1304 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1305 index 76069c18ee42..52b7604b5215 100644
1306 --- a/arch/ia64/mm/hugetlbpage.c
1307 +++ b/arch/ia64/mm/hugetlbpage.c
1308 @@ -114,12 +114,6 @@ int pud_huge(pud_t pud)
1309 return 0;
1310 }
1311
1312 -struct page *
1313 -follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
1314 -{
1315 - return NULL;
1316 -}
1317 -
1318 void hugetlb_free_pgd_range(struct mmu_gather *tlb,
1319 unsigned long addr, unsigned long end,
1320 unsigned long floor, unsigned long ceiling)
1321 diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
1322 index 3c32075d2945..7ca80ac42ed5 100644
1323 --- a/arch/metag/mm/hugetlbpage.c
1324 +++ b/arch/metag/mm/hugetlbpage.c
1325 @@ -94,12 +94,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
1326 return 0;
1327 }
1328
1329 -struct page *follow_huge_addr(struct mm_struct *mm,
1330 - unsigned long address, int write)
1331 -{
1332 - return ERR_PTR(-EINVAL);
1333 -}
1334 -
1335 int pmd_huge(pmd_t pmd)
1336 {
1337 return pmd_page_shift(pmd) > PAGE_SHIFT;
1338 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
1339 index 9536ef912f59..155bb7f5ad28 100644
1340 --- a/arch/mips/Kconfig
1341 +++ b/arch/mips/Kconfig
1342 @@ -2048,7 +2048,7 @@ config MIPS_CMP
1343
1344 config MIPS_CPS
1345 bool "MIPS Coherent Processing System support"
1346 - depends on SYS_SUPPORTS_MIPS_CPS
1347 + depends on SYS_SUPPORTS_MIPS_CPS && !64BIT
1348 select MIPS_CM
1349 select MIPS_CPC
1350 select MIPS_CPS_PM if HOTPLUG_CPU
1351 diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c
1352 index b3ae068ca4fa..3fd369d74444 100644
1353 --- a/arch/mips/bcm47xx/board.c
1354 +++ b/arch/mips/bcm47xx/board.c
1355 @@ -247,8 +247,8 @@ static __init const struct bcm47xx_board_type *bcm47xx_board_get_nvram(void)
1356 }
1357
1358 if (bcm47xx_nvram_getenv("hardware_version", buf1, sizeof(buf1)) >= 0 &&
1359 - bcm47xx_nvram_getenv("boardtype", buf2, sizeof(buf2)) >= 0) {
1360 - for (e2 = bcm47xx_board_list_boot_hw; e2->value1; e2++) {
1361 + bcm47xx_nvram_getenv("boardnum", buf2, sizeof(buf2)) >= 0) {
1362 + for (e2 = bcm47xx_board_list_hw_version_num; e2->value1; e2++) {
1363 if (!strstarts(buf1, e2->value1) &&
1364 !strcmp(buf2, e2->value2))
1365 return &e2->board;
1366 diff --git a/arch/mips/bcm63xx/prom.c b/arch/mips/bcm63xx/prom.c
1367 index e1f27d653f60..7019e2967009 100644
1368 --- a/arch/mips/bcm63xx/prom.c
1369 +++ b/arch/mips/bcm63xx/prom.c
1370 @@ -17,7 +17,6 @@
1371 #include <bcm63xx_cpu.h>
1372 #include <bcm63xx_io.h>
1373 #include <bcm63xx_regs.h>
1374 -#include <bcm63xx_gpio.h>
1375
1376 void __init prom_init(void)
1377 {
1378 @@ -53,9 +52,6 @@ void __init prom_init(void)
1379 reg &= ~mask;
1380 bcm_perf_writel(reg, PERF_CKCTL_REG);
1381
1382 - /* register gpiochip */
1383 - bcm63xx_gpio_init();
1384 -
1385 /* do low level board init */
1386 board_prom_init();
1387
1388 diff --git a/arch/mips/bcm63xx/setup.c b/arch/mips/bcm63xx/setup.c
1389 index 6660c7ddf87b..240fb4ffa55c 100644
1390 --- a/arch/mips/bcm63xx/setup.c
1391 +++ b/arch/mips/bcm63xx/setup.c
1392 @@ -20,6 +20,7 @@
1393 #include <bcm63xx_cpu.h>
1394 #include <bcm63xx_regs.h>
1395 #include <bcm63xx_io.h>
1396 +#include <bcm63xx_gpio.h>
1397
1398 void bcm63xx_machine_halt(void)
1399 {
1400 @@ -160,6 +161,9 @@ void __init plat_mem_setup(void)
1401
1402 int __init bcm63xx_register_devices(void)
1403 {
1404 + /* register gpiochip */
1405 + bcm63xx_gpio_init();
1406 +
1407 return board_register_devices();
1408 }
1409
1410 diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
1411 index 02f244475207..c76a289b95b5 100644
1412 --- a/arch/mips/cavium-octeon/dma-octeon.c
1413 +++ b/arch/mips/cavium-octeon/dma-octeon.c
1414 @@ -306,7 +306,7 @@ void __init plat_swiotlb_setup(void)
1415 swiotlbsize = 64 * (1<<20);
1416 }
1417 #endif
1418 -#ifdef CONFIG_USB_OCTEON_OHCI
1419 +#ifdef CONFIG_USB_OHCI_HCD_PLATFORM
1420 /* OCTEON II ohci is only 32-bit. */
1421 if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && max_addr >= 0x100000000ul)
1422 swiotlbsize = 64 * (1<<20);
1423 diff --git a/arch/mips/include/asm/asm-eva.h b/arch/mips/include/asm/asm-eva.h
1424 index e41c56e375b1..1e38f0e1ea3e 100644
1425 --- a/arch/mips/include/asm/asm-eva.h
1426 +++ b/arch/mips/include/asm/asm-eva.h
1427 @@ -11,6 +11,36 @@
1428 #define __ASM_ASM_EVA_H
1429
1430 #ifndef __ASSEMBLY__
1431 +
1432 +/* Kernel variants */
1433 +
1434 +#define kernel_cache(op, base) "cache " op ", " base "\n"
1435 +#define kernel_ll(reg, addr) "ll " reg ", " addr "\n"
1436 +#define kernel_sc(reg, addr) "sc " reg ", " addr "\n"
1437 +#define kernel_lw(reg, addr) "lw " reg ", " addr "\n"
1438 +#define kernel_lwl(reg, addr) "lwl " reg ", " addr "\n"
1439 +#define kernel_lwr(reg, addr) "lwr " reg ", " addr "\n"
1440 +#define kernel_lh(reg, addr) "lh " reg ", " addr "\n"
1441 +#define kernel_lb(reg, addr) "lb " reg ", " addr "\n"
1442 +#define kernel_lbu(reg, addr) "lbu " reg ", " addr "\n"
1443 +#define kernel_sw(reg, addr) "sw " reg ", " addr "\n"
1444 +#define kernel_swl(reg, addr) "swl " reg ", " addr "\n"
1445 +#define kernel_swr(reg, addr) "swr " reg ", " addr "\n"
1446 +#define kernel_sh(reg, addr) "sh " reg ", " addr "\n"
1447 +#define kernel_sb(reg, addr) "sb " reg ", " addr "\n"
1448 +
1449 +#ifdef CONFIG_32BIT
1450 +/*
1451 + * No 'sd' or 'ld' instructions in 32-bit but the code will
1452 + * do the correct thing
1453 + */
1454 +#define kernel_sd(reg, addr) user_sw(reg, addr)
1455 +#define kernel_ld(reg, addr) user_lw(reg, addr)
1456 +#else
1457 +#define kernel_sd(reg, addr) "sd " reg", " addr "\n"
1458 +#define kernel_ld(reg, addr) "ld " reg", " addr "\n"
1459 +#endif /* CONFIG_32BIT */
1460 +
1461 #ifdef CONFIG_EVA
1462
1463 #define __BUILD_EVA_INSN(insn, reg, addr) \
1464 @@ -41,37 +71,60 @@
1465
1466 #else
1467
1468 -#define user_cache(op, base) "cache " op ", " base "\n"
1469 -#define user_ll(reg, addr) "ll " reg ", " addr "\n"
1470 -#define user_sc(reg, addr) "sc " reg ", " addr "\n"
1471 -#define user_lw(reg, addr) "lw " reg ", " addr "\n"
1472 -#define user_lwl(reg, addr) "lwl " reg ", " addr "\n"
1473 -#define user_lwr(reg, addr) "lwr " reg ", " addr "\n"
1474 -#define user_lh(reg, addr) "lh " reg ", " addr "\n"
1475 -#define user_lb(reg, addr) "lb " reg ", " addr "\n"
1476 -#define user_lbu(reg, addr) "lbu " reg ", " addr "\n"
1477 -#define user_sw(reg, addr) "sw " reg ", " addr "\n"
1478 -#define user_swl(reg, addr) "swl " reg ", " addr "\n"
1479 -#define user_swr(reg, addr) "swr " reg ", " addr "\n"
1480 -#define user_sh(reg, addr) "sh " reg ", " addr "\n"
1481 -#define user_sb(reg, addr) "sb " reg ", " addr "\n"
1482 +#define user_cache(op, base) kernel_cache(op, base)
1483 +#define user_ll(reg, addr) kernel_ll(reg, addr)
1484 +#define user_sc(reg, addr) kernel_sc(reg, addr)
1485 +#define user_lw(reg, addr) kernel_lw(reg, addr)
1486 +#define user_lwl(reg, addr) kernel_lwl(reg, addr)
1487 +#define user_lwr(reg, addr) kernel_lwr(reg, addr)
1488 +#define user_lh(reg, addr) kernel_lh(reg, addr)
1489 +#define user_lb(reg, addr) kernel_lb(reg, addr)
1490 +#define user_lbu(reg, addr) kernel_lbu(reg, addr)
1491 +#define user_sw(reg, addr) kernel_sw(reg, addr)
1492 +#define user_swl(reg, addr) kernel_swl(reg, addr)
1493 +#define user_swr(reg, addr) kernel_swr(reg, addr)
1494 +#define user_sh(reg, addr) kernel_sh(reg, addr)
1495 +#define user_sb(reg, addr) kernel_sb(reg, addr)
1496
1497 #ifdef CONFIG_32BIT
1498 -/*
1499 - * No 'sd' or 'ld' instructions in 32-bit but the code will
1500 - * do the correct thing
1501 - */
1502 -#define user_sd(reg, addr) user_sw(reg, addr)
1503 -#define user_ld(reg, addr) user_lw(reg, addr)
1504 +#define user_sd(reg, addr) kernel_sw(reg, addr)
1505 +#define user_ld(reg, addr) kernel_lw(reg, addr)
1506 #else
1507 -#define user_sd(reg, addr) "sd " reg", " addr "\n"
1508 -#define user_ld(reg, addr) "ld " reg", " addr "\n"
1509 +#define user_sd(reg, addr) kernel_sd(reg, addr)
1510 +#define user_ld(reg, addr) kernel_ld(reg, addr)
1511 #endif /* CONFIG_32BIT */
1512
1513 #endif /* CONFIG_EVA */
1514
1515 #else /* __ASSEMBLY__ */
1516
1517 +#define kernel_cache(op, base) cache op, base
1518 +#define kernel_ll(reg, addr) ll reg, addr
1519 +#define kernel_sc(reg, addr) sc reg, addr
1520 +#define kernel_lw(reg, addr) lw reg, addr
1521 +#define kernel_lwl(reg, addr) lwl reg, addr
1522 +#define kernel_lwr(reg, addr) lwr reg, addr
1523 +#define kernel_lh(reg, addr) lh reg, addr
1524 +#define kernel_lb(reg, addr) lb reg, addr
1525 +#define kernel_lbu(reg, addr) lbu reg, addr
1526 +#define kernel_sw(reg, addr) sw reg, addr
1527 +#define kernel_swl(reg, addr) swl reg, addr
1528 +#define kernel_swr(reg, addr) swr reg, addr
1529 +#define kernel_sh(reg, addr) sh reg, addr
1530 +#define kernel_sb(reg, addr) sb reg, addr
1531 +
1532 +#ifdef CONFIG_32BIT
1533 +/*
1534 + * No 'sd' or 'ld' instructions in 32-bit but the code will
1535 + * do the correct thing
1536 + */
1537 +#define kernel_sd(reg, addr) user_sw(reg, addr)
1538 +#define kernel_ld(reg, addr) user_lw(reg, addr)
1539 +#else
1540 +#define kernel_sd(reg, addr) sd reg, addr
1541 +#define kernel_ld(reg, addr) ld reg, addr
1542 +#endif /* CONFIG_32BIT */
1543 +
1544 #ifdef CONFIG_EVA
1545
1546 #define __BUILD_EVA_INSN(insn, reg, addr) \
1547 @@ -101,31 +154,27 @@
1548 #define user_sd(reg, addr) user_sw(reg, addr)
1549 #else
1550
1551 -#define user_cache(op, base) cache op, base
1552 -#define user_ll(reg, addr) ll reg, addr
1553 -#define user_sc(reg, addr) sc reg, addr
1554 -#define user_lw(reg, addr) lw reg, addr
1555 -#define user_lwl(reg, addr) lwl reg, addr
1556 -#define user_lwr(reg, addr) lwr reg, addr
1557 -#define user_lh(reg, addr) lh reg, addr
1558 -#define user_lb(reg, addr) lb reg, addr
1559 -#define user_lbu(reg, addr) lbu reg, addr
1560 -#define user_sw(reg, addr) sw reg, addr
1561 -#define user_swl(reg, addr) swl reg, addr
1562 -#define user_swr(reg, addr) swr reg, addr
1563 -#define user_sh(reg, addr) sh reg, addr
1564 -#define user_sb(reg, addr) sb reg, addr
1565 +#define user_cache(op, base) kernel_cache(op, base)
1566 +#define user_ll(reg, addr) kernel_ll(reg, addr)
1567 +#define user_sc(reg, addr) kernel_sc(reg, addr)
1568 +#define user_lw(reg, addr) kernel_lw(reg, addr)
1569 +#define user_lwl(reg, addr) kernel_lwl(reg, addr)
1570 +#define user_lwr(reg, addr) kernel_lwr(reg, addr)
1571 +#define user_lh(reg, addr) kernel_lh(reg, addr)
1572 +#define user_lb(reg, addr) kernel_lb(reg, addr)
1573 +#define user_lbu(reg, addr) kernel_lbu(reg, addr)
1574 +#define user_sw(reg, addr) kernel_sw(reg, addr)
1575 +#define user_swl(reg, addr) kernel_swl(reg, addr)
1576 +#define user_swr(reg, addr) kernel_swr(reg, addr)
1577 +#define user_sh(reg, addr) kernel_sh(reg, addr)
1578 +#define user_sb(reg, addr) kernel_sb(reg, addr)
1579
1580 #ifdef CONFIG_32BIT
1581 -/*
1582 - * No 'sd' or 'ld' instructions in 32-bit but the code will
1583 - * do the correct thing
1584 - */
1585 -#define user_sd(reg, addr) user_sw(reg, addr)
1586 -#define user_ld(reg, addr) user_lw(reg, addr)
1587 +#define user_sd(reg, addr) kernel_sw(reg, addr)
1588 +#define user_ld(reg, addr) kernel_lw(reg, addr)
1589 #else
1590 -#define user_sd(reg, addr) sd reg, addr
1591 -#define user_ld(reg, addr) ld reg, addr
1592 +#define user_sd(reg, addr) kernel_sd(reg, addr)
1593 +#define user_ld(reg, addr) kernel_sd(reg, addr)
1594 #endif /* CONFIG_32BIT */
1595
1596 #endif /* CONFIG_EVA */
1597 diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
1598 index dd562414cd5e..99f71e87ce31 100644
1599 --- a/arch/mips/include/asm/fpu.h
1600 +++ b/arch/mips/include/asm/fpu.h
1601 @@ -150,6 +150,7 @@ static inline void lose_fpu(int save)
1602 }
1603 disable_msa();
1604 clear_thread_flag(TIF_USEDMSA);
1605 + __disable_fpu();
1606 } else if (is_fpu_owner()) {
1607 if (save)
1608 _save_fp(current);
1609 diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
1610 index f2c249796ea8..4e3205a3bee2 100644
1611 --- a/arch/mips/include/asm/kvm_host.h
1612 +++ b/arch/mips/include/asm/kvm_host.h
1613 @@ -321,6 +321,7 @@ enum mips_mmu_types {
1614 #define T_TRAP 13 /* Trap instruction */
1615 #define T_VCEI 14 /* Virtual coherency exception */
1616 #define T_FPE 15 /* Floating point exception */
1617 +#define T_MSADIS 21 /* MSA disabled exception */
1618 #define T_WATCH 23 /* Watch address reference */
1619 #define T_VCED 31 /* Virtual coherency data */
1620
1621 @@ -577,6 +578,7 @@ struct kvm_mips_callbacks {
1622 int (*handle_syscall)(struct kvm_vcpu *vcpu);
1623 int (*handle_res_inst)(struct kvm_vcpu *vcpu);
1624 int (*handle_break)(struct kvm_vcpu *vcpu);
1625 + int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
1626 int (*vm_init)(struct kvm *kvm);
1627 int (*vcpu_init)(struct kvm_vcpu *vcpu);
1628 int (*vcpu_setup)(struct kvm_vcpu *vcpu);
1629 diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
1630 index fa1f3cfbae8d..d68e685cde60 100644
1631 --- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
1632 +++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
1633 @@ -50,7 +50,6 @@
1634 #define cpu_has_mips32r2 0
1635 #define cpu_has_mips64r1 0
1636 #define cpu_has_mips64r2 1
1637 -#define cpu_has_mips_r2_exec_hazard 0
1638 #define cpu_has_dsp 0
1639 #define cpu_has_dsp2 0
1640 #define cpu_has_mipsmt 0
1641 diff --git a/arch/mips/include/asm/octeon/pci-octeon.h b/arch/mips/include/asm/octeon/pci-octeon.h
1642 index 64ba56a02843..1884609741a8 100644
1643 --- a/arch/mips/include/asm/octeon/pci-octeon.h
1644 +++ b/arch/mips/include/asm/octeon/pci-octeon.h
1645 @@ -11,9 +11,6 @@
1646
1647 #include <linux/pci.h>
1648
1649 -/* Some PCI cards require delays when accessing config space. */
1650 -#define PCI_CONFIG_SPACE_DELAY 10000
1651 -
1652 /*
1653 * The physical memory base mapped by BAR1. 256MB at the end of the
1654 * first 4GB.
1655 diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
1656 index 4353d323f017..39d682937d52 100644
1657 --- a/arch/mips/kernel/entry.S
1658 +++ b/arch/mips/kernel/entry.S
1659 @@ -10,6 +10,7 @@
1660
1661 #include <asm/asm.h>
1662 #include <asm/asmmacro.h>
1663 +#include <asm/compiler.h>
1664 #include <asm/regdef.h>
1665 #include <asm/mipsregs.h>
1666 #include <asm/stackframe.h>
1667 @@ -166,7 +167,7 @@ syscall_exit_work:
1668 * For C code use the inline version named instruction_hazard().
1669 */
1670 LEAF(mips_ihb)
1671 - .set mips32r2
1672 + .set MIPS_ISA_LEVEL_RAW
1673 jr.hb ra
1674 nop
1675 END(mips_ihb)
1676 diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
1677 index e6e16a1d4add..0854f17829f3 100644
1678 --- a/arch/mips/kernel/smp-cps.c
1679 +++ b/arch/mips/kernel/smp-cps.c
1680 @@ -88,6 +88,12 @@ static void __init cps_smp_setup(void)
1681
1682 /* Make core 0 coherent with everything */
1683 write_gcr_cl_coherence(0xff);
1684 +
1685 +#ifdef CONFIG_MIPS_MT_FPAFF
1686 + /* If we have an FPU, enroll ourselves in the FPU-full mask */
1687 + if (cpu_has_fpu)
1688 + cpu_set(0, mt_fpu_cpumask);
1689 +#endif /* CONFIG_MIPS_MT_FPAFF */
1690 }
1691
1692 static void __init cps_prepare_cpus(unsigned int max_cpus)
1693 diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
1694 index fb3e8dfd1ff6..838d3a6a5b7d 100644
1695 --- a/arch/mips/kvm/emulate.c
1696 +++ b/arch/mips/kvm/emulate.c
1697 @@ -2176,6 +2176,7 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
1698 case T_SYSCALL:
1699 case T_BREAK:
1700 case T_RES_INST:
1701 + case T_MSADIS:
1702 break;
1703
1704 case T_COP_UNUSABLE:
1705 diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
1706 index 270bbd41769e..39074fb83bad 100644
1707 --- a/arch/mips/kvm/mips.c
1708 +++ b/arch/mips/kvm/mips.c
1709 @@ -1119,6 +1119,10 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
1710 ret = kvm_mips_callbacks->handle_break(vcpu);
1711 break;
1712
1713 + case T_MSADIS:
1714 + ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
1715 + break;
1716 +
1717 default:
1718 kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n",
1719 exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
1720 diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
1721 index fd7257b70e65..4372cc86650c 100644
1722 --- a/arch/mips/kvm/trap_emul.c
1723 +++ b/arch/mips/kvm/trap_emul.c
1724 @@ -330,6 +330,33 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
1725 return ret;
1726 }
1727
1728 +static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
1729 +{
1730 + struct kvm_run *run = vcpu->run;
1731 + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
1732 + unsigned long cause = vcpu->arch.host_cp0_cause;
1733 + enum emulation_result er = EMULATE_DONE;
1734 + int ret = RESUME_GUEST;
1735 +
1736 + /* No MSA supported in guest, guest reserved instruction exception */
1737 + er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
1738 +
1739 + switch (er) {
1740 + case EMULATE_DONE:
1741 + ret = RESUME_GUEST;
1742 + break;
1743 +
1744 + case EMULATE_FAIL:
1745 + run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1746 + ret = RESUME_HOST;
1747 + break;
1748 +
1749 + default:
1750 + BUG();
1751 + }
1752 + return ret;
1753 +}
1754 +
1755 static int kvm_trap_emul_vm_init(struct kvm *kvm)
1756 {
1757 return 0;
1758 @@ -470,6 +497,7 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
1759 .handle_syscall = kvm_trap_emul_handle_syscall,
1760 .handle_res_inst = kvm_trap_emul_handle_res_inst,
1761 .handle_break = kvm_trap_emul_handle_break,
1762 + .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
1763
1764 .vm_init = kvm_trap_emul_vm_init,
1765 .vcpu_init = kvm_trap_emul_vcpu_init,
1766 diff --git a/arch/mips/loongson/loongson-3/irq.c b/arch/mips/loongson/loongson-3/irq.c
1767 index ca1c62af5188..8f5209aff01e 100644
1768 --- a/arch/mips/loongson/loongson-3/irq.c
1769 +++ b/arch/mips/loongson/loongson-3/irq.c
1770 @@ -44,6 +44,7 @@ void mach_irq_dispatch(unsigned int pending)
1771
1772 static struct irqaction cascade_irqaction = {
1773 .handler = no_action,
1774 + .flags = IRQF_NO_SUSPEND,
1775 .name = "cascade",
1776 };
1777
1778 diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
1779 index 4ec8ee10d371..06e0f421b41b 100644
1780 --- a/arch/mips/mm/hugetlbpage.c
1781 +++ b/arch/mips/mm/hugetlbpage.c
1782 @@ -68,12 +68,6 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
1783 return 0;
1784 }
1785
1786 -struct page *
1787 -follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
1788 -{
1789 - return ERR_PTR(-EINVAL);
1790 -}
1791 -
1792 int pmd_huge(pmd_t pmd)
1793 {
1794 return (pmd_val(pmd) & _PAGE_HUGE) != 0;
1795 @@ -83,15 +77,3 @@ int pud_huge(pud_t pud)
1796 {
1797 return (pud_val(pud) & _PAGE_HUGE) != 0;
1798 }
1799 -
1800 -struct page *
1801 -follow_huge_pmd(struct mm_struct *mm, unsigned long address,
1802 - pmd_t *pmd, int write)
1803 -{
1804 - struct page *page;
1805 -
1806 - page = pte_page(*(pte_t *)pmd);
1807 - if (page)
1808 - page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
1809 - return page;
1810 -}
1811 diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
1812 index 8fddd2cdbff7..efe366d618b1 100644
1813 --- a/arch/mips/mti-malta/malta-memory.c
1814 +++ b/arch/mips/mti-malta/malta-memory.c
1815 @@ -53,6 +53,12 @@ fw_memblock_t * __init fw_getmdesc(int eva)
1816 pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
1817 physical_memsize = 0x02000000;
1818 } else {
1819 + if (memsize > (256 << 20)) { /* memsize should be capped to 256M */
1820 + pr_warn("Unsupported memsize value (0x%lx) detected! "
1821 + "Using 0x10000000 (256M) instead\n",
1822 + memsize);
1823 + memsize = 256 << 20;
1824 + }
1825 /* If ememsize is set, then set physical_memsize to that */
1826 physical_memsize = ememsize ? : memsize;
1827 }
1828 diff --git a/arch/mips/netlogic/xlp/ahci-init-xlp2.c b/arch/mips/netlogic/xlp/ahci-init-xlp2.c
1829 index c83dbf3689e2..7b066a44e679 100644
1830 --- a/arch/mips/netlogic/xlp/ahci-init-xlp2.c
1831 +++ b/arch/mips/netlogic/xlp/ahci-init-xlp2.c
1832 @@ -203,6 +203,7 @@ static u8 read_phy_reg(u64 regbase, u32 addr, u32 physel)
1833 static void config_sata_phy(u64 regbase)
1834 {
1835 u32 port, i, reg;
1836 + u8 val;
1837
1838 for (port = 0; port < 2; port++) {
1839 for (i = 0, reg = RXCDRCALFOSC0; reg <= CALDUTY; reg++, i++)
1840 @@ -210,6 +211,18 @@ static void config_sata_phy(u64 regbase)
1841
1842 for (i = 0, reg = RXDPIF; reg <= PPMDRIFTMAX_HI; reg++, i++)
1843 write_phy_reg(regbase, reg, port, sata_phy_config2[i]);
1844 +
1845 + /* Fix for PHY link up failures at lower temperatures */
1846 + write_phy_reg(regbase, 0x800F, port, 0x1f);
1847 +
1848 + val = read_phy_reg(regbase, 0x0029, port);
1849 + write_phy_reg(regbase, 0x0029, port, val | (0x7 << 1));
1850 +
1851 + val = read_phy_reg(regbase, 0x0056, port);
1852 + write_phy_reg(regbase, 0x0056, port, val & ~(1 << 3));
1853 +
1854 + val = read_phy_reg(regbase, 0x0018, port);
1855 + write_phy_reg(regbase, 0x0018, port, val & ~(0x7 << 0));
1856 }
1857 }
1858
1859 diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
1860 index 59cccd95688b..14d3351227ef 100644
1861 --- a/arch/mips/pci/pci-octeon.c
1862 +++ b/arch/mips/pci/pci-octeon.c
1863 @@ -214,6 +214,8 @@ const char *octeon_get_pci_interrupts(void)
1864 return "AAABAAAAAAAAAAAAAAAAAAAAAAAAAAAA";
1865 case CVMX_BOARD_TYPE_BBGW_REF:
1866 return "AABCD";
1867 + case CVMX_BOARD_TYPE_CUST_DSR1000N:
1868 + return "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC";
1869 case CVMX_BOARD_TYPE_THUNDER:
1870 case CVMX_BOARD_TYPE_EBH3000:
1871 default:
1872 @@ -271,9 +273,6 @@ static int octeon_read_config(struct pci_bus *bus, unsigned int devfn,
1873 pci_addr.s.func = devfn & 0x7;
1874 pci_addr.s.reg = reg;
1875
1876 -#if PCI_CONFIG_SPACE_DELAY
1877 - udelay(PCI_CONFIG_SPACE_DELAY);
1878 -#endif
1879 switch (size) {
1880 case 4:
1881 *val = le32_to_cpu(cvmx_read64_uint32(pci_addr.u64));
1882 @@ -308,9 +307,6 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
1883 pci_addr.s.func = devfn & 0x7;
1884 pci_addr.s.reg = reg;
1885
1886 -#if PCI_CONFIG_SPACE_DELAY
1887 - udelay(PCI_CONFIG_SPACE_DELAY);
1888 -#endif
1889 switch (size) {
1890 case 4:
1891 cvmx_write64_uint32(pci_addr.u64, cpu_to_le32(val));
1892 diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c
1893 index 5e36c33e5543..38335af0a7f3 100644
1894 --- a/arch/mips/pci/pcie-octeon.c
1895 +++ b/arch/mips/pci/pcie-octeon.c
1896 @@ -1762,14 +1762,6 @@ static int octeon_pcie_write_config(unsigned int pcie_port, struct pci_bus *bus,
1897 default:
1898 return PCIBIOS_FUNC_NOT_SUPPORTED;
1899 }
1900 -#if PCI_CONFIG_SPACE_DELAY
1901 - /*
1902 - * Delay on writes so that devices have time to come up. Some
1903 - * bridges need this to allow time for the secondary busses to
1904 - * work
1905 - */
1906 - udelay(PCI_CONFIG_SPACE_DELAY);
1907 -#endif
1908 return PCIBIOS_SUCCESSFUL;
1909 }
1910
1911 diff --git a/arch/mips/power/hibernate.S b/arch/mips/power/hibernate.S
1912 index 32a7c828f073..e7567c8a9e79 100644
1913 --- a/arch/mips/power/hibernate.S
1914 +++ b/arch/mips/power/hibernate.S
1915 @@ -30,6 +30,8 @@ LEAF(swsusp_arch_suspend)
1916 END(swsusp_arch_suspend)
1917
1918 LEAF(swsusp_arch_resume)
1919 + /* Avoid TLB mismatch during and after kernel resume */
1920 + jal local_flush_tlb_all
1921 PTR_L t0, restore_pblist
1922 0:
1923 PTR_L t1, PBE_ADDRESS(t0) /* source */
1924 @@ -43,7 +45,6 @@ LEAF(swsusp_arch_resume)
1925 bne t1, t3, 1b
1926 PTR_L t0, PBE_NEXT(t0)
1927 bnez t0, 0b
1928 - jal local_flush_tlb_all /* Avoid TLB mismatch after kernel resume */
1929 PTR_LA t0, saved_regs
1930 PTR_L ra, PT_R31(t0)
1931 PTR_L sp, PT_R29(t0)
1932 diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
1933 index 77e8a9620e18..d50914d5191e 100644
1934 --- a/arch/mips/ralink/Kconfig
1935 +++ b/arch/mips/ralink/Kconfig
1936 @@ -7,6 +7,11 @@ config CLKEVT_RT3352
1937 select CLKSRC_OF
1938 select CLKSRC_MMIO
1939
1940 +config RALINK_ILL_ACC
1941 + bool
1942 + depends on SOC_RT305X
1943 + default y
1944 +
1945 choice
1946 prompt "Ralink SoC selection"
1947 default SOC_RT305X
1948 diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
1949 index 40198d50b4c2..8005b79ecbcf 100644
1950 --- a/arch/powerpc/kernel/cacheinfo.c
1951 +++ b/arch/powerpc/kernel/cacheinfo.c
1952 @@ -61,12 +61,22 @@ struct cache_type_info {
1953 };
1954
1955 /* These are used to index the cache_type_info array. */
1956 -#define CACHE_TYPE_UNIFIED 0
1957 -#define CACHE_TYPE_INSTRUCTION 1
1958 -#define CACHE_TYPE_DATA 2
1959 +#define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */
1960 +#define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */
1961 +#define CACHE_TYPE_INSTRUCTION 2
1962 +#define CACHE_TYPE_DATA 3
1963
1964 static const struct cache_type_info cache_type_info[] = {
1965 {
1966 + /* Embedded systems that use cache-size, cache-block-size,
1967 + * etc. for the Unified (typically L2) cache. */
1968 + .name = "Unified",
1969 + .size_prop = "cache-size",
1970 + .line_size_props = { "cache-line-size",
1971 + "cache-block-size", },
1972 + .nr_sets_prop = "cache-sets",
1973 + },
1974 + {
1975 /* PowerPC Processor binding says the [di]-cache-*
1976 * must be equal on unified caches, so just use
1977 * d-cache properties. */
1978 @@ -293,7 +303,8 @@ static struct cache *cache_find_first_sibling(struct cache *cache)
1979 {
1980 struct cache *iter;
1981
1982 - if (cache->type == CACHE_TYPE_UNIFIED)
1983 + if (cache->type == CACHE_TYPE_UNIFIED ||
1984 + cache->type == CACHE_TYPE_UNIFIED_D)
1985 return cache;
1986
1987 list_for_each_entry(iter, &cache_list, list)
1988 @@ -324,16 +335,29 @@ static bool cache_node_is_unified(const struct device_node *np)
1989 return of_get_property(np, "cache-unified", NULL);
1990 }
1991
1992 -static struct cache *cache_do_one_devnode_unified(struct device_node *node,
1993 - int level)
1994 +/*
1995 + * Unified caches can have two different sets of tags. Most embedded
1996 + * use cache-size, etc. for the unified cache size, but open firmware systems
1997 + * use d-cache-size, etc. Check on initialization for which type we have, and
1998 + * return the appropriate structure type. Assume it's embedded if it isn't
1999 + * open firmware. If it's yet a 3rd type, then there will be missing entries
2000 + * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
2001 + * to be extended further.
2002 + */
2003 +static int cache_is_unified_d(const struct device_node *np)
2004 {
2005 - struct cache *cache;
2006 + return of_get_property(np,
2007 + cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
2008 + CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
2009 +}
2010
2011 +/*
2012 + */
2013 +static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
2014 +{
2015 pr_debug("creating L%d ucache for %s\n", level, node->full_name);
2016
2017 - cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
2018 -
2019 - return cache;
2020 + return new_cache(cache_is_unified_d(node), level, node);
2021 }
2022
2023 static struct cache *cache_do_one_devnode_split(struct device_node *node,
2024 diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
2025 index 6a4a5fcb9730..17e83a043bba 100644
2026 --- a/arch/powerpc/mm/hugetlbpage.c
2027 +++ b/arch/powerpc/mm/hugetlbpage.c
2028 @@ -704,6 +704,14 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address,
2029 return NULL;
2030 }
2031
2032 +struct page *
2033 +follow_huge_pud(struct mm_struct *mm, unsigned long address,
2034 + pud_t *pud, int write)
2035 +{
2036 + BUG();
2037 + return NULL;
2038 +}
2039 +
2040 static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
2041 unsigned long sz)
2042 {
2043 diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
2044 index 2396dda282cd..ead55351b254 100644
2045 --- a/arch/powerpc/perf/callchain.c
2046 +++ b/arch/powerpc/perf/callchain.c
2047 @@ -243,7 +243,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
2048 sp = regs->gpr[1];
2049 perf_callchain_store(entry, next_ip);
2050
2051 - for (;;) {
2052 + while (entry->nr < PERF_MAX_STACK_DEPTH) {
2053 fp = (unsigned long __user *) sp;
2054 if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
2055 return;
2056 diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
2057 index 8a106b4172e0..28e558d3316d 100644
2058 --- a/arch/powerpc/platforms/cell/interrupt.c
2059 +++ b/arch/powerpc/platforms/cell/interrupt.c
2060 @@ -163,7 +163,7 @@ static unsigned int iic_get_irq(void)
2061
2062 void iic_setup_cpu(void)
2063 {
2064 - out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff);
2065 + out_be64(&this_cpu_ptr(&cpu_iic)->regs->prio, 0xff);
2066 }
2067
2068 u8 iic_get_target_id(int cpu)
2069 diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
2070 index 2b90ff8a93be..59ef76c5f4f4 100644
2071 --- a/arch/powerpc/platforms/cell/iommu.c
2072 +++ b/arch/powerpc/platforms/cell/iommu.c
2073 @@ -197,7 +197,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
2074
2075 io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
2076
2077 - for (i = 0; i < npages; i++, uaddr += tbl->it_page_shift)
2078 + for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift))
2079 io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
2080
2081 mb();
2082 diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
2083 index 3f596706a5b7..86a7256d84e5 100644
2084 --- a/arch/powerpc/platforms/powernv/pci-ioda.c
2085 +++ b/arch/powerpc/platforms/powernv/pci-ioda.c
2086 @@ -1645,7 +1645,8 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
2087 region.start += phb->ioda.io_segsize;
2088 index++;
2089 }
2090 - } else if (res->flags & IORESOURCE_MEM) {
2091 + } else if ((res->flags & IORESOURCE_MEM) &&
2092 + !pnv_pci_is_mem_pref_64(res->flags)) {
2093 region.start = res->start -
2094 hose->mem_offset[0] -
2095 phb->ioda.m32_pci_base;
2096 diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c
2097 index 1c4c5accd220..d3236c9e226b 100644
2098 --- a/arch/s390/kernel/suspend.c
2099 +++ b/arch/s390/kernel/suspend.c
2100 @@ -138,6 +138,8 @@ int pfn_is_nosave(unsigned long pfn)
2101 {
2102 unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin));
2103 unsigned long nosave_end_pfn = PFN_DOWN(__pa(&__nosave_end));
2104 + unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1;
2105 + unsigned long stext_pfn = PFN_DOWN(__pa(&_stext));
2106
2107 /* Always save lowcore pages (LC protection might be enabled). */
2108 if (pfn <= LC_PAGES)
2109 @@ -145,6 +147,8 @@ int pfn_is_nosave(unsigned long pfn)
2110 if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn)
2111 return 1;
2112 /* Skip memory holes and read-only pages (NSS, DCSS, ...). */
2113 + if (pfn >= stext_pfn && pfn <= eshared_pfn)
2114 + return ipl_info.type == IPL_TYPE_NSS ? 1 : 0;
2115 if (tprot(PFN_PHYS(pfn)))
2116 return 1;
2117 return 0;
2118 diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
2119 index 29e2e5aa2111..cd6344d334cb 100644
2120 --- a/arch/s390/kvm/interrupt.c
2121 +++ b/arch/s390/kvm/interrupt.c
2122 @@ -16,6 +16,7 @@
2123 #include <linux/mmu_context.h>
2124 #include <linux/signal.h>
2125 #include <linux/slab.h>
2126 +#include <linux/vmalloc.h>
2127 #include <asm/asm-offsets.h>
2128 #include <asm/uaccess.h>
2129 #include "kvm-s390.h"
2130 @@ -784,7 +785,6 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
2131
2132 if ((!schid && !cr6) || (schid && cr6))
2133 return NULL;
2134 - mutex_lock(&kvm->lock);
2135 fi = &kvm->arch.float_int;
2136 spin_lock(&fi->lock);
2137 inti = NULL;
2138 @@ -812,7 +812,6 @@ struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
2139 if (list_empty(&fi->list))
2140 atomic_set(&fi->active, 0);
2141 spin_unlock(&fi->lock);
2142 - mutex_unlock(&kvm->lock);
2143 return inti;
2144 }
2145
2146 @@ -825,7 +824,6 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
2147 int sigcpu;
2148 int rc = 0;
2149
2150 - mutex_lock(&kvm->lock);
2151 fi = &kvm->arch.float_int;
2152 spin_lock(&fi->lock);
2153 if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
2154 @@ -868,7 +866,6 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
2155 kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
2156 unlock_fi:
2157 spin_unlock(&fi->lock);
2158 - mutex_unlock(&kvm->lock);
2159 return rc;
2160 }
2161
2162 @@ -930,10 +927,10 @@ int kvm_s390_inject_vm(struct kvm *kvm,
2163 return rc;
2164 }
2165
2166 -void kvm_s390_reinject_io_int(struct kvm *kvm,
2167 +int kvm_s390_reinject_io_int(struct kvm *kvm,
2168 struct kvm_s390_interrupt_info *inti)
2169 {
2170 - __inject_vm(kvm, inti);
2171 + return __inject_vm(kvm, inti);
2172 }
2173
2174 int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
2175 @@ -1029,7 +1026,6 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
2176 struct kvm_s390_float_interrupt *fi;
2177 struct kvm_s390_interrupt_info *n, *inti = NULL;
2178
2179 - mutex_lock(&kvm->lock);
2180 fi = &kvm->arch.float_int;
2181 spin_lock(&fi->lock);
2182 list_for_each_entry_safe(inti, n, &fi->list, list) {
2183 @@ -1039,66 +1035,68 @@ void kvm_s390_clear_float_irqs(struct kvm *kvm)
2184 fi->irq_count = 0;
2185 atomic_set(&fi->active, 0);
2186 spin_unlock(&fi->lock);
2187 - mutex_unlock(&kvm->lock);
2188 }
2189
2190 -static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
2191 - u8 *addr)
2192 +static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
2193 + struct kvm_s390_irq *irq)
2194 {
2195 - struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
2196 - struct kvm_s390_irq irq = {0};
2197 -
2198 - irq.type = inti->type;
2199 + irq->type = inti->type;
2200 switch (inti->type) {
2201 case KVM_S390_INT_PFAULT_INIT:
2202 case KVM_S390_INT_PFAULT_DONE:
2203 case KVM_S390_INT_VIRTIO:
2204 case KVM_S390_INT_SERVICE:
2205 - irq.u.ext = inti->ext;
2206 + irq->u.ext = inti->ext;
2207 break;
2208 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2209 - irq.u.io = inti->io;
2210 + irq->u.io = inti->io;
2211 break;
2212 case KVM_S390_MCHK:
2213 - irq.u.mchk = inti->mchk;
2214 + irq->u.mchk = inti->mchk;
2215 break;
2216 - default:
2217 - return -EINVAL;
2218 }
2219 -
2220 - if (copy_to_user(uptr, &irq, sizeof(irq)))
2221 - return -EFAULT;
2222 -
2223 - return 0;
2224 }
2225
2226 -static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
2227 +static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
2228 {
2229 struct kvm_s390_interrupt_info *inti;
2230 struct kvm_s390_float_interrupt *fi;
2231 + struct kvm_s390_irq *buf;
2232 + int max_irqs;
2233 int ret = 0;
2234 int n = 0;
2235
2236 - mutex_lock(&kvm->lock);
2237 + if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
2238 + return -EINVAL;
2239 +
2240 + /*
2241 + * We are already using -ENOMEM to signal
2242 + * userspace it may retry with a bigger buffer,
2243 + * so we need to use something else for this case
2244 + */
2245 + buf = vzalloc(len);
2246 + if (!buf)
2247 + return -ENOBUFS;
2248 +
2249 + max_irqs = len / sizeof(struct kvm_s390_irq);
2250 +
2251 fi = &kvm->arch.float_int;
2252 spin_lock(&fi->lock);
2253 -
2254 list_for_each_entry(inti, &fi->list, list) {
2255 - if (len < sizeof(struct kvm_s390_irq)) {
2256 + if (n == max_irqs) {
2257 /* signal userspace to try again */
2258 ret = -ENOMEM;
2259 break;
2260 }
2261 - ret = copy_irq_to_user(inti, buf);
2262 - if (ret)
2263 - break;
2264 - buf += sizeof(struct kvm_s390_irq);
2265 - len -= sizeof(struct kvm_s390_irq);
2266 + inti_to_irq(inti, &buf[n]);
2267 n++;
2268 }
2269 -
2270 spin_unlock(&fi->lock);
2271 - mutex_unlock(&kvm->lock);
2272 + if (!ret && n > 0) {
2273 + if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
2274 + ret = -EFAULT;
2275 + }
2276 + vfree(buf);
2277
2278 return ret < 0 ? ret : n;
2279 }
2280 @@ -1109,7 +1107,7 @@ static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2281
2282 switch (attr->group) {
2283 case KVM_DEV_FLIC_GET_ALL_IRQS:
2284 - r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
2285 + r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
2286 attr->attr);
2287 break;
2288 default:
2289 diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
2290 index 244d02303182..4c173474ecfe 100644
2291 --- a/arch/s390/kvm/kvm-s390.h
2292 +++ b/arch/s390/kvm/kvm-s390.h
2293 @@ -148,8 +148,8 @@ int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
2294 int __must_check kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code);
2295 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
2296 u64 cr6, u64 schid);
2297 -void kvm_s390_reinject_io_int(struct kvm *kvm,
2298 - struct kvm_s390_interrupt_info *inti);
2299 +int kvm_s390_reinject_io_int(struct kvm *kvm,
2300 + struct kvm_s390_interrupt_info *inti);
2301 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
2302
2303 /* implemented in priv.c */
2304 diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
2305 index 9c565b6b4ccb..a30102c4d707 100644
2306 --- a/arch/s390/kvm/priv.c
2307 +++ b/arch/s390/kvm/priv.c
2308 @@ -228,18 +228,19 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
2309 struct kvm_s390_interrupt_info *inti;
2310 unsigned long len;
2311 u32 tpi_data[3];
2312 - int cc, rc;
2313 + int rc;
2314 u64 addr;
2315
2316 - rc = 0;
2317 addr = kvm_s390_get_base_disp_s(vcpu);
2318 if (addr & 3)
2319 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
2320 - cc = 0;
2321 +
2322 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
2323 - if (!inti)
2324 - goto no_interrupt;
2325 - cc = 1;
2326 + if (!inti) {
2327 + kvm_s390_set_psw_cc(vcpu, 0);
2328 + return 0;
2329 + }
2330 +
2331 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
2332 tpi_data[1] = inti->io.io_int_parm;
2333 tpi_data[2] = inti->io.io_int_word;
2334 @@ -250,30 +251,38 @@ static int handle_tpi(struct kvm_vcpu *vcpu)
2335 */
2336 len = sizeof(tpi_data) - 4;
2337 rc = write_guest(vcpu, addr, &tpi_data, len);
2338 - if (rc)
2339 - return kvm_s390_inject_prog_cond(vcpu, rc);
2340 + if (rc) {
2341 + rc = kvm_s390_inject_prog_cond(vcpu, rc);
2342 + goto reinject_interrupt;
2343 + }
2344 } else {
2345 /*
2346 * Store the three-word I/O interruption code into
2347 * the appropriate lowcore area.
2348 */
2349 len = sizeof(tpi_data);
2350 - if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len))
2351 + if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
2352 + /* failed writes to the low core are not recoverable */
2353 rc = -EFAULT;
2354 + goto reinject_interrupt;
2355 + }
2356 }
2357 +
2358 + /* irq was successfully handed to the guest */
2359 + kfree(inti);
2360 + kvm_s390_set_psw_cc(vcpu, 1);
2361 + return 0;
2362 +reinject_interrupt:
2363 /*
2364 * If we encounter a problem storing the interruption code, the
2365 * instruction is suppressed from the guest's view: reinject the
2366 * interrupt.
2367 */
2368 - if (!rc)
2369 + if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
2370 kfree(inti);
2371 - else
2372 - kvm_s390_reinject_io_int(vcpu->kvm, inti);
2373 -no_interrupt:
2374 - /* Set condition code and we're done. */
2375 - if (!rc)
2376 - kvm_s390_set_psw_cc(vcpu, cc);
2377 + rc = -EFAULT;
2378 + }
2379 + /* don't set the cc, a pgm irq was injected or we drop to user space */
2380 return rc ? -EFAULT : 0;
2381 }
2382
2383 @@ -461,6 +470,7 @@ static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
2384 for (n = mem->count - 1; n > 0 ; n--)
2385 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
2386
2387 + memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
2388 mem->vm[0].cpus_total = cpus;
2389 mem->vm[0].cpus_configured = cpus;
2390 mem->vm[0].cpus_standby = 0;
2391 diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
2392 index 3c80d2e38f03..210ffede0153 100644
2393 --- a/arch/s390/mm/hugetlbpage.c
2394 +++ b/arch/s390/mm/hugetlbpage.c
2395 @@ -192,12 +192,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
2396 return 0;
2397 }
2398
2399 -struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
2400 - int write)
2401 -{
2402 - return ERR_PTR(-EINVAL);
2403 -}
2404 -
2405 int pmd_huge(pmd_t pmd)
2406 {
2407 if (!MACHINE_HAS_HPAGE)
2408 @@ -210,17 +204,3 @@ int pud_huge(pud_t pud)
2409 {
2410 return 0;
2411 }
2412 -
2413 -struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
2414 - pmd_t *pmdp, int write)
2415 -{
2416 - struct page *page;
2417 -
2418 - if (!MACHINE_HAS_HPAGE)
2419 - return NULL;
2420 -
2421 - page = pmd_page(*pmdp);
2422 - if (page)
2423 - page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
2424 - return page;
2425 -}
2426 diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c
2427 index d7762349ea48..534bc978af8a 100644
2428 --- a/arch/sh/mm/hugetlbpage.c
2429 +++ b/arch/sh/mm/hugetlbpage.c
2430 @@ -67,12 +67,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
2431 return 0;
2432 }
2433
2434 -struct page *follow_huge_addr(struct mm_struct *mm,
2435 - unsigned long address, int write)
2436 -{
2437 - return ERR_PTR(-EINVAL);
2438 -}
2439 -
2440 int pmd_huge(pmd_t pmd)
2441 {
2442 return 0;
2443 @@ -82,9 +76,3 @@ int pud_huge(pud_t pud)
2444 {
2445 return 0;
2446 }
2447 -
2448 -struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
2449 - pmd_t *pmd, int write)
2450 -{
2451 - return NULL;
2452 -}
2453 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
2454 index d329537739c6..4242eab12e10 100644
2455 --- a/arch/sparc/mm/hugetlbpage.c
2456 +++ b/arch/sparc/mm/hugetlbpage.c
2457 @@ -215,12 +215,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
2458 return entry;
2459 }
2460
2461 -struct page *follow_huge_addr(struct mm_struct *mm,
2462 - unsigned long address, int write)
2463 -{
2464 - return ERR_PTR(-EINVAL);
2465 -}
2466 -
2467 int pmd_huge(pmd_t pmd)
2468 {
2469 return 0;
2470 @@ -230,9 +224,3 @@ int pud_huge(pud_t pud)
2471 {
2472 return 0;
2473 }
2474 -
2475 -struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
2476 - pmd_t *pmd, int write)
2477 -{
2478 - return NULL;
2479 -}
2480 diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
2481 index e514899e1100..8a00c7b7b862 100644
2482 --- a/arch/tile/mm/hugetlbpage.c
2483 +++ b/arch/tile/mm/hugetlbpage.c
2484 @@ -150,12 +150,6 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
2485 return NULL;
2486 }
2487
2488 -struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
2489 - int write)
2490 -{
2491 - return ERR_PTR(-EINVAL);
2492 -}
2493 -
2494 int pmd_huge(pmd_t pmd)
2495 {
2496 return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
2497 @@ -166,28 +160,6 @@ int pud_huge(pud_t pud)
2498 return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
2499 }
2500
2501 -struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
2502 - pmd_t *pmd, int write)
2503 -{
2504 - struct page *page;
2505 -
2506 - page = pte_page(*(pte_t *)pmd);
2507 - if (page)
2508 - page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
2509 - return page;
2510 -}
2511 -
2512 -struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
2513 - pud_t *pud, int write)
2514 -{
2515 - struct page *page;
2516 -
2517 - page = pte_page(*(pte_t *)pud);
2518 - if (page)
2519 - page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
2520 - return page;
2521 -}
2522 -
2523 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
2524 {
2525 return 0;
2526 diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h
2527 index a1410db38a1a..653dfa7662e1 100644
2528 --- a/arch/x86/include/asm/mwait.h
2529 +++ b/arch/x86/include/asm/mwait.h
2530 @@ -30,6 +30,14 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
2531 :: "a" (eax), "c" (ecx));
2532 }
2533
2534 +static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
2535 +{
2536 + trace_hardirqs_on();
2537 + /* "mwait %eax, %ecx;" */
2538 + asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
2539 + :: "a" (eax), "c" (ecx));
2540 +}
2541 +
2542 /*
2543 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
2544 * which can obviate IPI to trigger checking of need_resched.
2545 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
2546 index e127ddaa2d5a..6ad8a6396b75 100644
2547 --- a/arch/x86/kernel/process.c
2548 +++ b/arch/x86/kernel/process.c
2549 @@ -24,6 +24,7 @@
2550 #include <asm/syscalls.h>
2551 #include <asm/idle.h>
2552 #include <asm/uaccess.h>
2553 +#include <asm/mwait.h>
2554 #include <asm/i387.h>
2555 #include <asm/fpu-internal.h>
2556 #include <asm/debugreg.h>
2557 @@ -398,6 +399,53 @@ static void amd_e400_idle(void)
2558 default_idle();
2559 }
2560
2561 +/*
2562 + * Intel Core2 and older machines prefer MWAIT over HALT for C1.
2563 + * We can't rely on cpuidle installing MWAIT, because it will not load
2564 + * on systems that support only C1 -- so the boot default must be MWAIT.
2565 + *
2566 + * Some AMD machines are the opposite, they depend on using HALT.
2567 + *
2568 + * So for default C1, which is used during boot until cpuidle loads,
2569 + * use MWAIT-C1 on Intel HW that has it, else use HALT.
2570 + */
2571 +static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
2572 +{
2573 + if (c->x86_vendor != X86_VENDOR_INTEL)
2574 + return 0;
2575 +
2576 + if (!cpu_has(c, X86_FEATURE_MWAIT))
2577 + return 0;
2578 +
2579 + return 1;
2580 +}
2581 +
2582 +/*
2583 + * MONITOR/MWAIT with no hints, used for default default C1 state.
2584 + * This invokes MWAIT with interrutps enabled and no flags,
2585 + * which is backwards compatible with the original MWAIT implementation.
2586 + */
2587 +
2588 +static void mwait_idle(void)
2589 +{
2590 + if (!current_set_polling_and_test()) {
2591 + if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
2592 + smp_mb(); /* quirk */
2593 + clflush((void *)&current_thread_info()->flags);
2594 + smp_mb(); /* quirk */
2595 + }
2596 +
2597 + __monitor((void *)&current_thread_info()->flags, 0, 0);
2598 + if (!need_resched())
2599 + __sti_mwait(0, 0);
2600 + else
2601 + local_irq_enable();
2602 + } else {
2603 + local_irq_enable();
2604 + }
2605 + __current_clr_polling();
2606 +}
2607 +
2608 void select_idle_routine(const struct cpuinfo_x86 *c)
2609 {
2610 #ifdef CONFIG_SMP
2611 @@ -411,6 +459,9 @@ void select_idle_routine(const struct cpuinfo_x86 *c)
2612 /* E400: APIC timer interrupt does not wake up CPU from C1e */
2613 pr_info("using AMD E400 aware idle routine\n");
2614 x86_idle = amd_e400_idle;
2615 + } else if (prefer_mwait_c1_over_halt(c)) {
2616 + pr_info("using mwait in idle threads\n");
2617 + x86_idle = mwait_idle;
2618 } else
2619 x86_idle = default_idle;
2620 }
2621 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
2622 index ed7039465f16..47843b04d60f 100644
2623 --- a/arch/x86/kvm/vmx.c
2624 +++ b/arch/x86/kvm/vmx.c
2625 @@ -2381,8 +2381,7 @@ static __init void nested_vmx_setup_ctls_msrs(void)
2626
2627 if (enable_ept) {
2628 /* nested EPT: emulate EPT also to L1 */
2629 - nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT |
2630 - SECONDARY_EXEC_UNRESTRICTED_GUEST;
2631 + nested_vmx_secondary_ctls_high |= SECONDARY_EXEC_ENABLE_EPT;
2632 nested_vmx_ept_caps = VMX_EPT_PAGE_WALK_4_BIT |
2633 VMX_EPTP_WB_BIT | VMX_EPT_2MB_PAGE_BIT |
2634 VMX_EPT_INVEPT_BIT;
2635 @@ -2396,6 +2395,10 @@ static __init void nested_vmx_setup_ctls_msrs(void)
2636 } else
2637 nested_vmx_ept_caps = 0;
2638
2639 + if (enable_unrestricted_guest)
2640 + nested_vmx_secondary_ctls_high |=
2641 + SECONDARY_EXEC_UNRESTRICTED_GUEST;
2642 +
2643 /* miscellaneous data */
2644 rdmsr(MSR_IA32_VMX_MISC, nested_vmx_misc_low, nested_vmx_misc_high);
2645 nested_vmx_misc_low &= VMX_MISC_SAVE_EFER_LMA;
2646 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
2647 index a38dd816015b..5369ec6a8094 100644
2648 --- a/arch/x86/kvm/x86.c
2649 +++ b/arch/x86/kvm/x86.c
2650 @@ -5779,7 +5779,6 @@ int kvm_arch_init(void *opaque)
2651 kvm_set_mmio_spte_mask();
2652
2653 kvm_x86_ops = ops;
2654 - kvm_init_msr_list();
2655
2656 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
2657 PT_DIRTY_MASK, PT64_NX_MASK, 0);
2658 @@ -7210,7 +7209,14 @@ void kvm_arch_hardware_disable(void)
2659
2660 int kvm_arch_hardware_setup(void)
2661 {
2662 - return kvm_x86_ops->hardware_setup();
2663 + int r;
2664 +
2665 + r = kvm_x86_ops->hardware_setup();
2666 + if (r != 0)
2667 + return r;
2668 +
2669 + kvm_init_msr_list();
2670 + return 0;
2671 }
2672
2673 void kvm_arch_hardware_unsetup(void)
2674 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
2675 index 006cc914994b..9161f764121e 100644
2676 --- a/arch/x86/mm/hugetlbpage.c
2677 +++ b/arch/x86/mm/hugetlbpage.c
2678 @@ -52,20 +52,8 @@ int pud_huge(pud_t pud)
2679 return 0;
2680 }
2681
2682 -struct page *
2683 -follow_huge_pmd(struct mm_struct *mm, unsigned long address,
2684 - pmd_t *pmd, int write)
2685 -{
2686 - return NULL;
2687 -}
2688 #else
2689
2690 -struct page *
2691 -follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
2692 -{
2693 - return ERR_PTR(-EINVAL);
2694 -}
2695 -
2696 /*
2697 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
2698 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
2699 diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
2700 index 81f57e8c8f1b..73d328f480e5 100644
2701 --- a/arch/xtensa/Kconfig
2702 +++ b/arch/xtensa/Kconfig
2703 @@ -406,6 +406,36 @@ source "drivers/pcmcia/Kconfig"
2704
2705 source "drivers/pci/hotplug/Kconfig"
2706
2707 +config XTFPGA_LCD
2708 + bool "Enable XTFPGA LCD driver"
2709 + depends on XTENSA_PLATFORM_XTFPGA
2710 + default n
2711 + help
2712 + There's a 2x16 LCD on most of XTFPGA boards, kernel may output
2713 + progress messages there during bootup/shutdown. It may be useful
2714 + during board bringup.
2715 +
2716 + If unsure, say N.
2717 +
2718 +config XTFPGA_LCD_BASE_ADDR
2719 + hex "XTFPGA LCD base address"
2720 + depends on XTFPGA_LCD
2721 + default "0x0d0c0000"
2722 + help
2723 + Base address of the LCD controller inside KIO region.
2724 + Different boards from XTFPGA family have LCD controller at different
2725 + addresses. Please consult prototyping user guide for your board for
2726 + the correct address. Wrong address here may lead to hardware lockup.
2727 +
2728 +config XTFPGA_LCD_8BIT_ACCESS
2729 + bool "Use 8-bit access to XTFPGA LCD"
2730 + depends on XTFPGA_LCD
2731 + default n
2732 + help
2733 + LCD may be connected with 4- or 8-bit interface, 8-bit access may
2734 + only be used with 8-bit interface. Please consult prototyping user
2735 + guide for your board for the correct interface width.
2736 +
2737 endmenu
2738
2739 menu "Executable file formats"
2740 diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
2741 index db5bb72e2f4e..62d84657c60b 100644
2742 --- a/arch/xtensa/include/uapi/asm/unistd.h
2743 +++ b/arch/xtensa/include/uapi/asm/unistd.h
2744 @@ -715,7 +715,7 @@ __SYSCALL(323, sys_process_vm_writev, 6)
2745 __SYSCALL(324, sys_name_to_handle_at, 5)
2746 #define __NR_open_by_handle_at 325
2747 __SYSCALL(325, sys_open_by_handle_at, 3)
2748 -#define __NR_sync_file_range 326
2749 +#define __NR_sync_file_range2 326
2750 __SYSCALL(326, sys_sync_file_range2, 6)
2751 #define __NR_perf_event_open 327
2752 __SYSCALL(327, sys_perf_event_open, 5)
2753 diff --git a/arch/xtensa/platforms/iss/network.c b/arch/xtensa/platforms/iss/network.c
2754 index d05f8feeb8d7..17b1ef3232e4 100644
2755 --- a/arch/xtensa/platforms/iss/network.c
2756 +++ b/arch/xtensa/platforms/iss/network.c
2757 @@ -349,8 +349,8 @@ static void iss_net_timer(unsigned long priv)
2758 {
2759 struct iss_net_private *lp = (struct iss_net_private *)priv;
2760
2761 - spin_lock(&lp->lock);
2762 iss_net_poll();
2763 + spin_lock(&lp->lock);
2764 mod_timer(&lp->timer, jiffies + lp->timer_val);
2765 spin_unlock(&lp->lock);
2766 }
2767 @@ -361,7 +361,7 @@ static int iss_net_open(struct net_device *dev)
2768 struct iss_net_private *lp = netdev_priv(dev);
2769 int err;
2770
2771 - spin_lock(&lp->lock);
2772 + spin_lock_bh(&lp->lock);
2773
2774 err = lp->tp.open(lp);
2775 if (err < 0)
2776 @@ -376,9 +376,11 @@ static int iss_net_open(struct net_device *dev)
2777 while ((err = iss_net_rx(dev)) > 0)
2778 ;
2779
2780 - spin_lock(&opened_lock);
2781 + spin_unlock_bh(&lp->lock);
2782 + spin_lock_bh(&opened_lock);
2783 list_add(&lp->opened_list, &opened);
2784 - spin_unlock(&opened_lock);
2785 + spin_unlock_bh(&opened_lock);
2786 + spin_lock_bh(&lp->lock);
2787
2788 init_timer(&lp->timer);
2789 lp->timer_val = ISS_NET_TIMER_VALUE;
2790 @@ -387,7 +389,7 @@ static int iss_net_open(struct net_device *dev)
2791 mod_timer(&lp->timer, jiffies + lp->timer_val);
2792
2793 out:
2794 - spin_unlock(&lp->lock);
2795 + spin_unlock_bh(&lp->lock);
2796 return err;
2797 }
2798
2799 @@ -395,7 +397,7 @@ static int iss_net_close(struct net_device *dev)
2800 {
2801 struct iss_net_private *lp = netdev_priv(dev);
2802 netif_stop_queue(dev);
2803 - spin_lock(&lp->lock);
2804 + spin_lock_bh(&lp->lock);
2805
2806 spin_lock(&opened_lock);
2807 list_del(&opened);
2808 @@ -405,18 +407,17 @@ static int iss_net_close(struct net_device *dev)
2809
2810 lp->tp.close(lp);
2811
2812 - spin_unlock(&lp->lock);
2813 + spin_unlock_bh(&lp->lock);
2814 return 0;
2815 }
2816
2817 static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
2818 {
2819 struct iss_net_private *lp = netdev_priv(dev);
2820 - unsigned long flags;
2821 int len;
2822
2823 netif_stop_queue(dev);
2824 - spin_lock_irqsave(&lp->lock, flags);
2825 + spin_lock_bh(&lp->lock);
2826
2827 len = lp->tp.write(lp, &skb);
2828
2829 @@ -438,7 +439,7 @@ static int iss_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
2830 pr_err("%s: %s failed(%d)\n", dev->name, __func__, len);
2831 }
2832
2833 - spin_unlock_irqrestore(&lp->lock, flags);
2834 + spin_unlock_bh(&lp->lock);
2835
2836 dev_kfree_skb(skb);
2837 return NETDEV_TX_OK;
2838 @@ -466,9 +467,9 @@ static int iss_net_set_mac(struct net_device *dev, void *addr)
2839
2840 if (!is_valid_ether_addr(hwaddr->sa_data))
2841 return -EADDRNOTAVAIL;
2842 - spin_lock(&lp->lock);
2843 + spin_lock_bh(&lp->lock);
2844 memcpy(dev->dev_addr, hwaddr->sa_data, ETH_ALEN);
2845 - spin_unlock(&lp->lock);
2846 + spin_unlock_bh(&lp->lock);
2847 return 0;
2848 }
2849
2850 @@ -520,11 +521,11 @@ static int iss_net_configure(int index, char *init)
2851 *lp = (struct iss_net_private) {
2852 .device_list = LIST_HEAD_INIT(lp->device_list),
2853 .opened_list = LIST_HEAD_INIT(lp->opened_list),
2854 - .lock = __SPIN_LOCK_UNLOCKED(lp.lock),
2855 .dev = dev,
2856 .index = index,
2857 - };
2858 + };
2859
2860 + spin_lock_init(&lp->lock);
2861 /*
2862 * If this name ends up conflicting with an existing registered
2863 * netdevice, that is OK, register_netdev{,ice}() will notice this
2864 diff --git a/arch/xtensa/platforms/xtfpga/Makefile b/arch/xtensa/platforms/xtfpga/Makefile
2865 index b9ae206340cd..7839d38b2337 100644
2866 --- a/arch/xtensa/platforms/xtfpga/Makefile
2867 +++ b/arch/xtensa/platforms/xtfpga/Makefile
2868 @@ -6,4 +6,5 @@
2869 #
2870 # Note 2! The CFLAGS definitions are in the main makefile...
2871
2872 -obj-y = setup.o lcd.o
2873 +obj-y += setup.o
2874 +obj-$(CONFIG_XTFPGA_LCD) += lcd.o
2875 diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
2876 index aeb316b7ff88..e8cc86fbba09 100644
2877 --- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
2878 +++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
2879 @@ -40,9 +40,6 @@
2880
2881 /* UART */
2882 #define DUART16552_PADDR (XCHAL_KIO_PADDR + 0x0D050020)
2883 -/* LCD instruction and data addresses. */
2884 -#define LCD_INSTR_ADDR ((char *)IOADDR(0x0D040000))
2885 -#define LCD_DATA_ADDR ((char *)IOADDR(0x0D040004))
2886
2887 /* Misc. */
2888 #define XTFPGA_FPGAREGS_VADDR IOADDR(0x0D020000)
2889 diff --git a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
2890 index 0e435645af5a..4c8541ed1139 100644
2891 --- a/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
2892 +++ b/arch/xtensa/platforms/xtfpga/include/platform/lcd.h
2893 @@ -11,10 +11,25 @@
2894 #ifndef __XTENSA_XTAVNET_LCD_H
2895 #define __XTENSA_XTAVNET_LCD_H
2896
2897 +#ifdef CONFIG_XTFPGA_LCD
2898 /* Display string STR at position POS on the LCD. */
2899 void lcd_disp_at_pos(char *str, unsigned char pos);
2900
2901 /* Shift the contents of the LCD display left or right. */
2902 void lcd_shiftleft(void);
2903 void lcd_shiftright(void);
2904 +#else
2905 +static inline void lcd_disp_at_pos(char *str, unsigned char pos)
2906 +{
2907 +}
2908 +
2909 +static inline void lcd_shiftleft(void)
2910 +{
2911 +}
2912 +
2913 +static inline void lcd_shiftright(void)
2914 +{
2915 +}
2916 +#endif
2917 +
2918 #endif
2919 diff --git a/arch/xtensa/platforms/xtfpga/lcd.c b/arch/xtensa/platforms/xtfpga/lcd.c
2920 index 2872301598df..4dc0c1b43f4b 100644
2921 --- a/arch/xtensa/platforms/xtfpga/lcd.c
2922 +++ b/arch/xtensa/platforms/xtfpga/lcd.c
2923 @@ -1,50 +1,63 @@
2924 /*
2925 - * Driver for the LCD display on the Tensilica LX60 Board.
2926 + * Driver for the LCD display on the Tensilica XTFPGA board family.
2927 + * http://www.mytechcorp.com/cfdata/productFile/File1/MOC-16216B-B-A0A04.pdf
2928 *
2929 * This file is subject to the terms and conditions of the GNU General Public
2930 * License. See the file "COPYING" in the main directory of this archive
2931 * for more details.
2932 *
2933 * Copyright (C) 2001, 2006 Tensilica Inc.
2934 + * Copyright (C) 2015 Cadence Design Systems Inc.
2935 */
2936
2937 -/*
2938 - *
2939 - * FIXME: this code is from the examples from the LX60 user guide.
2940 - *
2941 - * The lcd_pause function does busy waiting, which is probably not
2942 - * great. Maybe the code could be changed to use kernel timers, or
2943 - * change the hardware to not need to wait.
2944 - */
2945 -
2946 +#include <linux/delay.h>
2947 #include <linux/init.h>
2948 #include <linux/io.h>
2949
2950 #include <platform/hardware.h>
2951 #include <platform/lcd.h>
2952 -#include <linux/delay.h>
2953
2954 -#define LCD_PAUSE_ITERATIONS 4000
2955 +/* LCD instruction and data addresses. */
2956 +#define LCD_INSTR_ADDR ((char *)IOADDR(CONFIG_XTFPGA_LCD_BASE_ADDR))
2957 +#define LCD_DATA_ADDR (LCD_INSTR_ADDR + 4)
2958 +
2959 #define LCD_CLEAR 0x1
2960 #define LCD_DISPLAY_ON 0xc
2961
2962 /* 8bit and 2 lines display */
2963 #define LCD_DISPLAY_MODE8BIT 0x38
2964 +#define LCD_DISPLAY_MODE4BIT 0x28
2965 #define LCD_DISPLAY_POS 0x80
2966 #define LCD_SHIFT_LEFT 0x18
2967 #define LCD_SHIFT_RIGHT 0x1c
2968
2969 +static void lcd_put_byte(u8 *addr, u8 data)
2970 +{
2971 +#ifdef CONFIG_XTFPGA_LCD_8BIT_ACCESS
2972 + ACCESS_ONCE(*addr) = data;
2973 +#else
2974 + ACCESS_ONCE(*addr) = data & 0xf0;
2975 + ACCESS_ONCE(*addr) = (data << 4) & 0xf0;
2976 +#endif
2977 +}
2978 +
2979 static int __init lcd_init(void)
2980 {
2981 - *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
2982 + ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
2983 mdelay(5);
2984 - *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
2985 + ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
2986 udelay(200);
2987 - *LCD_INSTR_ADDR = LCD_DISPLAY_MODE8BIT;
2988 + ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE8BIT;
2989 + udelay(50);
2990 +#ifndef CONFIG_XTFPGA_LCD_8BIT_ACCESS
2991 + ACCESS_ONCE(*LCD_INSTR_ADDR) = LCD_DISPLAY_MODE4BIT;
2992 + udelay(50);
2993 + lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_MODE4BIT);
2994 udelay(50);
2995 - *LCD_INSTR_ADDR = LCD_DISPLAY_ON;
2996 +#endif
2997 + lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_ON);
2998 udelay(50);
2999 - *LCD_INSTR_ADDR = LCD_CLEAR;
3000 + lcd_put_byte(LCD_INSTR_ADDR, LCD_CLEAR);
3001 mdelay(10);
3002 lcd_disp_at_pos("XTENSA LINUX", 0);
3003 return 0;
3004 @@ -52,10 +65,10 @@ static int __init lcd_init(void)
3005
3006 void lcd_disp_at_pos(char *str, unsigned char pos)
3007 {
3008 - *LCD_INSTR_ADDR = LCD_DISPLAY_POS | pos;
3009 + lcd_put_byte(LCD_INSTR_ADDR, LCD_DISPLAY_POS | pos);
3010 udelay(100);
3011 while (*str != 0) {
3012 - *LCD_DATA_ADDR = *str;
3013 + lcd_put_byte(LCD_DATA_ADDR, *str);
3014 udelay(200);
3015 str++;
3016 }
3017 @@ -63,13 +76,13 @@ void lcd_disp_at_pos(char *str, unsigned char pos)
3018
3019 void lcd_shiftleft(void)
3020 {
3021 - *LCD_INSTR_ADDR = LCD_SHIFT_LEFT;
3022 + lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_LEFT);
3023 udelay(50);
3024 }
3025
3026 void lcd_shiftright(void)
3027 {
3028 - *LCD_INSTR_ADDR = LCD_SHIFT_RIGHT;
3029 + lcd_put_byte(LCD_INSTR_ADDR, LCD_SHIFT_RIGHT);
3030 udelay(50);
3031 }
3032
3033 diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
3034 index 755b90c40ddf..c0b39f304ea3 100644
3035 --- a/drivers/acpi/acpica/tbinstal.c
3036 +++ b/drivers/acpi/acpica/tbinstal.c
3037 @@ -346,7 +346,6 @@ acpi_tb_install_standard_table(acpi_physical_address address,
3038 */
3039 acpi_tb_uninstall_table(&new_table_desc);
3040 *table_index = i;
3041 - (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
3042 return_ACPI_STATUS(AE_OK);
3043 }
3044 }
3045 diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
3046 index a7a3edd28beb..f23179e84128 100644
3047 --- a/drivers/acpi/sbs.c
3048 +++ b/drivers/acpi/sbs.c
3049 @@ -670,7 +670,7 @@ static int acpi_sbs_add(struct acpi_device *device)
3050 if (!sbs_manager_broken) {
3051 result = acpi_manager_get_info(sbs);
3052 if (!result) {
3053 - sbs->manager_present = 0;
3054 + sbs->manager_present = 1;
3055 for (id = 0; id < MAX_SBS_BAT; ++id)
3056 if ((sbs->batteries_supported & (1 << id)))
3057 acpi_battery_add(sbs, id);
3058 diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
3059 index c9ea3dfb4974..0446d0d5efa5 100644
3060 --- a/drivers/acpi/scan.c
3061 +++ b/drivers/acpi/scan.c
3062 @@ -251,7 +251,11 @@ bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
3063 struct acpi_device_physical_node *pn;
3064 bool offline = true;
3065
3066 - mutex_lock(&adev->physical_node_lock);
3067 + /*
3068 + * acpi_container_offline() calls this for all of the container's
3069 + * children under the container's physical_node_lock lock.
3070 + */
3071 + mutex_lock_nested(&adev->physical_node_lock, SINGLE_DEPTH_NESTING);
3072
3073 list_for_each_entry(pn, &adev->physical_node_list, node)
3074 if (device_supports_offline(pn->dev) && !pn->dev->offline) {
3075 diff --git a/drivers/base/bus.c b/drivers/base/bus.c
3076 index 876bae5ade33..79bc203f51ef 100644
3077 --- a/drivers/base/bus.c
3078 +++ b/drivers/base/bus.c
3079 @@ -515,11 +515,11 @@ int bus_add_device(struct device *dev)
3080 goto out_put;
3081 error = device_add_groups(dev, bus->dev_groups);
3082 if (error)
3083 - goto out_groups;
3084 + goto out_id;
3085 error = sysfs_create_link(&bus->p->devices_kset->kobj,
3086 &dev->kobj, dev_name(dev));
3087 if (error)
3088 - goto out_id;
3089 + goto out_groups;
3090 error = sysfs_create_link(&dev->kobj,
3091 &dev->bus->p->subsys.kobj, "subsystem");
3092 if (error)
3093 diff --git a/drivers/base/platform.c b/drivers/base/platform.c
3094 index b2afc29403f9..360272cd4549 100644
3095 --- a/drivers/base/platform.c
3096 +++ b/drivers/base/platform.c
3097 @@ -101,6 +101,15 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
3098 }
3099
3100 r = platform_get_resource(dev, IORESOURCE_IRQ, num);
3101 + /*
3102 + * The resources may pass trigger flags to the irqs that need
3103 + * to be set up. It so happens that the trigger flags for
3104 + * IORESOURCE_BITS correspond 1-to-1 to the IRQF_TRIGGER*
3105 + * settings.
3106 + */
3107 + if (r && r->flags & IORESOURCE_BITS)
3108 + irqd_set_trigger_type(irq_get_irq_data(r->start),
3109 + r->flags & IORESOURCE_BITS);
3110
3111 return r ? r->start : -ENXIO;
3112 #endif
3113 diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
3114 index 76b5be937de6..5d684d97dc6d 100644
3115 --- a/drivers/block/rbd.c
3116 +++ b/drivers/block/rbd.c
3117 @@ -2261,6 +2261,11 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
3118 result, xferred);
3119 if (!img_request->result)
3120 img_request->result = result;
3121 + /*
3122 + * Need to end I/O on the entire obj_request worth of
3123 + * bytes in case of error.
3124 + */
3125 + xferred = obj_request->length;
3126 }
3127
3128 /* Image object requests don't own their page array */
3129 diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
3130 index de4c8499cbac..288547a3c566 100644
3131 --- a/drivers/bluetooth/ath3k.c
3132 +++ b/drivers/bluetooth/ath3k.c
3133 @@ -65,6 +65,7 @@ static const struct usb_device_id ath3k_table[] = {
3134 /* Atheros AR3011 with sflash firmware*/
3135 { USB_DEVICE(0x0489, 0xE027) },
3136 { USB_DEVICE(0x0489, 0xE03D) },
3137 + { USB_DEVICE(0x04F2, 0xAFF1) },
3138 { USB_DEVICE(0x0930, 0x0215) },
3139 { USB_DEVICE(0x0CF3, 0x3002) },
3140 { USB_DEVICE(0x0CF3, 0xE019) },
3141 diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
3142 index 9a7d24f95c5e..813bb06a8889 100644
3143 --- a/drivers/bluetooth/btusb.c
3144 +++ b/drivers/bluetooth/btusb.c
3145 @@ -153,6 +153,7 @@ static const struct usb_device_id blacklist_table[] = {
3146 /* Atheros 3011 with sflash firmware */
3147 { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
3148 { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
3149 + { USB_DEVICE(0x04f2, 0xaff1), .driver_info = BTUSB_IGNORE },
3150 { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE },
3151 { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
3152 { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE },
3153 diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
3154 index a23ac0c724f0..0283a5713d6c 100644
3155 --- a/drivers/clk/at91/clk-usb.c
3156 +++ b/drivers/clk/at91/clk-usb.c
3157 @@ -56,22 +56,53 @@ static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
3158 return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1));
3159 }
3160
3161 -static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
3162 - unsigned long *parent_rate)
3163 +static long at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw,
3164 + unsigned long rate,
3165 + unsigned long *best_parent_rate,
3166 + struct clk_hw **best_parent_hw)
3167 {
3168 - unsigned long div;
3169 + struct clk *parent = NULL;
3170 + long best_rate = -EINVAL;
3171 + unsigned long tmp_rate;
3172 + int best_diff = -1;
3173 + int tmp_diff;
3174 + int i;
3175
3176 - if (!rate)
3177 - return -EINVAL;
3178 + for (i = 0; i < __clk_get_num_parents(hw->clk); i++) {
3179 + int div;
3180
3181 - if (rate >= *parent_rate)
3182 - return *parent_rate;
3183 + parent = clk_get_parent_by_index(hw->clk, i);
3184 + if (!parent)
3185 + continue;
3186 +
3187 + for (div = 1; div < SAM9X5_USB_MAX_DIV + 2; div++) {
3188 + unsigned long tmp_parent_rate;
3189 +
3190 + tmp_parent_rate = rate * div;
3191 + tmp_parent_rate = __clk_round_rate(parent,
3192 + tmp_parent_rate);
3193 + tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div);
3194 + if (tmp_rate < rate)
3195 + tmp_diff = rate - tmp_rate;
3196 + else
3197 + tmp_diff = tmp_rate - rate;
3198 +
3199 + if (best_diff < 0 || best_diff > tmp_diff) {
3200 + best_rate = tmp_rate;
3201 + best_diff = tmp_diff;
3202 + *best_parent_rate = tmp_parent_rate;
3203 + *best_parent_hw = __clk_get_hw(parent);
3204 + }
3205 +
3206 + if (!best_diff || tmp_rate < rate)
3207 + break;
3208 + }
3209
3210 - div = DIV_ROUND_CLOSEST(*parent_rate, rate);
3211 - if (div > SAM9X5_USB_MAX_DIV + 1)
3212 - div = SAM9X5_USB_MAX_DIV + 1;
3213 + if (!best_diff)
3214 + break;
3215 + }
3216
3217 - return DIV_ROUND_CLOSEST(*parent_rate, div);
3218 + return best_rate;
3219 }
3220
3221 static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
3222 @@ -121,7 +152,7 @@ static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
3223
3224 static const struct clk_ops at91sam9x5_usb_ops = {
3225 .recalc_rate = at91sam9x5_clk_usb_recalc_rate,
3226 - .round_rate = at91sam9x5_clk_usb_round_rate,
3227 + .determine_rate = at91sam9x5_clk_usb_determine_rate,
3228 .get_parent = at91sam9x5_clk_usb_get_parent,
3229 .set_parent = at91sam9x5_clk_usb_set_parent,
3230 .set_rate = at91sam9x5_clk_usb_set_rate,
3231 @@ -159,7 +190,7 @@ static const struct clk_ops at91sam9n12_usb_ops = {
3232 .disable = at91sam9n12_clk_usb_disable,
3233 .is_enabled = at91sam9n12_clk_usb_is_enabled,
3234 .recalc_rate = at91sam9x5_clk_usb_recalc_rate,
3235 - .round_rate = at91sam9x5_clk_usb_round_rate,
3236 + .determine_rate = at91sam9x5_clk_usb_determine_rate,
3237 .set_rate = at91sam9x5_clk_usb_set_rate,
3238 };
3239
3240 @@ -179,7 +210,8 @@ at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name,
3241 init.ops = &at91sam9x5_usb_ops;
3242 init.parent_names = parent_names;
3243 init.num_parents = num_parents;
3244 - init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE;
3245 + init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE |
3246 + CLK_SET_RATE_PARENT;
3247
3248 usb->hw.init = &init;
3249 usb->pmc = pmc;
3250 @@ -207,7 +239,7 @@ at91sam9n12_clk_register_usb(struct at91_pmc *pmc, const char *name,
3251 init.ops = &at91sam9n12_usb_ops;
3252 init.parent_names = &parent_name;
3253 init.num_parents = 1;
3254 - init.flags = CLK_SET_RATE_GATE;
3255 + init.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT;
3256
3257 usb->hw.init = &init;
3258 usb->pmc = pmc;
3259 diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
3260 index cfa9eb4fe9ca..f76b6853b61c 100644
3261 --- a/drivers/clk/qcom/clk-rcg2.c
3262 +++ b/drivers/clk/qcom/clk-rcg2.c
3263 @@ -240,7 +240,7 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
3264 mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK;
3265 cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
3266 cfg |= rcg->parent_map[f->src] << CFG_SRC_SEL_SHIFT;
3267 - if (rcg->mnd_width && f->n)
3268 + if (rcg->mnd_width && f->n && (f->m != f->n))
3269 cfg |= CFG_MODE_DUAL_EDGE;
3270 ret = regmap_update_bits(rcg->clkr.regmap,
3271 rcg->cmd_rcgr + CFG_REG, mask, cfg);
3272 diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
3273 index 5cd62a709ac7..8c37d9c374eb 100644
3274 --- a/drivers/clk/qcom/gcc-ipq806x.c
3275 +++ b/drivers/clk/qcom/gcc-ipq806x.c
3276 @@ -514,8 +514,8 @@ static struct freq_tbl clk_tbl_gsbi_qup[] = {
3277 { 10800000, P_PXO, 1, 2, 5 },
3278 { 15060000, P_PLL8, 1, 2, 51 },
3279 { 24000000, P_PLL8, 4, 1, 4 },
3280 + { 25000000, P_PXO, 1, 0, 0 },
3281 { 25600000, P_PLL8, 1, 1, 15 },
3282 - { 27000000, P_PXO, 1, 0, 0 },
3283 { 48000000, P_PLL8, 4, 1, 2 },
3284 { 51200000, P_PLL8, 1, 2, 15 },
3285 { }
3286 diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
3287 index 940f02837b82..6197317ea2d7 100644
3288 --- a/drivers/clk/samsung/clk-exynos4.c
3289 +++ b/drivers/clk/samsung/clk-exynos4.c
3290 @@ -1354,7 +1354,7 @@ static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = {
3291 VPLL_LOCK, VPLL_CON0, NULL),
3292 };
3293
3294 -static void __init exynos4_core_down_clock(enum exynos4_soc soc)
3295 +static void __init exynos4x12_core_down_clock(void)
3296 {
3297 unsigned int tmp;
3298
3299 @@ -1373,11 +1373,9 @@ static void __init exynos4_core_down_clock(enum exynos4_soc soc)
3300 __raw_writel(tmp, reg_base + PWR_CTRL1);
3301
3302 /*
3303 - * Disable the clock up feature on Exynos4x12, in case it was
3304 - * enabled by bootloader.
3305 + * Disable the clock up feature in case it was enabled by bootloader.
3306 */
3307 - if (exynos4_soc == EXYNOS4X12)
3308 - __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2);
3309 + __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2);
3310 }
3311
3312 /* register exynos4 clocks */
3313 @@ -1474,7 +1472,8 @@ static void __init exynos4_clk_init(struct device_node *np,
3314 samsung_clk_register_alias(ctx, exynos4_aliases,
3315 ARRAY_SIZE(exynos4_aliases));
3316
3317 - exynos4_core_down_clock(soc);
3318 + if (soc == EXYNOS4X12)
3319 + exynos4x12_core_down_clock();
3320 exynos4_clk_sleep_init();
3321
3322 samsung_clk_of_add_provider(np, ctx);
3323 diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c
3324 index 97dc8595c3cd..c51f7c84c163 100644
3325 --- a/drivers/clk/tegra/clk.c
3326 +++ b/drivers/clk/tegra/clk.c
3327 @@ -272,7 +272,7 @@ void __init tegra_add_of_provider(struct device_node *np)
3328 of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
3329
3330 rst_ctlr.of_node = np;
3331 - rst_ctlr.nr_resets = clk_num * 32;
3332 + rst_ctlr.nr_resets = periph_banks * 32;
3333 reset_controller_register(&rst_ctlr);
3334 }
3335
3336 diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
3337 index cb98fa54573d..f7826ee3754b 100644
3338 --- a/drivers/crypto/omap-aes.c
3339 +++ b/drivers/crypto/omap-aes.c
3340 @@ -554,15 +554,23 @@ static int omap_aes_crypt_dma_stop(struct omap_aes_dev *dd)
3341 return err;
3342 }
3343
3344 -static int omap_aes_check_aligned(struct scatterlist *sg)
3345 +static int omap_aes_check_aligned(struct scatterlist *sg, int total)
3346 {
3347 + int len = 0;
3348 +
3349 while (sg) {
3350 if (!IS_ALIGNED(sg->offset, 4))
3351 return -1;
3352 if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
3353 return -1;
3354 +
3355 + len += sg->length;
3356 sg = sg_next(sg);
3357 }
3358 +
3359 + if (len != total)
3360 + return -1;
3361 +
3362 return 0;
3363 }
3364
3365 @@ -633,8 +641,8 @@ static int omap_aes_handle_queue(struct omap_aes_dev *dd,
3366 dd->in_sg = req->src;
3367 dd->out_sg = req->dst;
3368
3369 - if (omap_aes_check_aligned(dd->in_sg) ||
3370 - omap_aes_check_aligned(dd->out_sg)) {
3371 + if (omap_aes_check_aligned(dd->in_sg, dd->total) ||
3372 + omap_aes_check_aligned(dd->out_sg, dd->total)) {
3373 if (omap_aes_copy_sgs(dd))
3374 pr_err("Failed to copy SGs for unaligned cases\n");
3375 dd->sgs_copied = 1;
3376 diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
3377 index 418e38650363..a93ddbc1948e 100644
3378 --- a/drivers/gpio/gpio-mvebu.c
3379 +++ b/drivers/gpio/gpio-mvebu.c
3380 @@ -305,11 +305,13 @@ static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
3381 {
3382 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
3383 struct mvebu_gpio_chip *mvchip = gc->private;
3384 + struct irq_chip_type *ct = irq_data_get_chip_type(d);
3385 u32 mask = 1 << (d->irq - gc->irq_base);
3386
3387 irq_gc_lock(gc);
3388 - gc->mask_cache &= ~mask;
3389 - writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
3390 + ct->mask_cache_priv &= ~mask;
3391 +
3392 + writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
3393 irq_gc_unlock(gc);
3394 }
3395
3396 @@ -317,11 +319,13 @@ static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
3397 {
3398 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
3399 struct mvebu_gpio_chip *mvchip = gc->private;
3400 + struct irq_chip_type *ct = irq_data_get_chip_type(d);
3401 +
3402 u32 mask = 1 << (d->irq - gc->irq_base);
3403
3404 irq_gc_lock(gc);
3405 - gc->mask_cache |= mask;
3406 - writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
3407 + ct->mask_cache_priv |= mask;
3408 + writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_edge_mask(mvchip));
3409 irq_gc_unlock(gc);
3410 }
3411
3412 @@ -329,11 +333,13 @@ static void mvebu_gpio_level_irq_mask(struct irq_data *d)
3413 {
3414 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
3415 struct mvebu_gpio_chip *mvchip = gc->private;
3416 + struct irq_chip_type *ct = irq_data_get_chip_type(d);
3417 +
3418 u32 mask = 1 << (d->irq - gc->irq_base);
3419
3420 irq_gc_lock(gc);
3421 - gc->mask_cache &= ~mask;
3422 - writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
3423 + ct->mask_cache_priv &= ~mask;
3424 + writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
3425 irq_gc_unlock(gc);
3426 }
3427
3428 @@ -341,11 +347,13 @@ static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
3429 {
3430 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
3431 struct mvebu_gpio_chip *mvchip = gc->private;
3432 + struct irq_chip_type *ct = irq_data_get_chip_type(d);
3433 +
3434 u32 mask = 1 << (d->irq - gc->irq_base);
3435
3436 irq_gc_lock(gc);
3437 - gc->mask_cache |= mask;
3438 - writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
3439 + ct->mask_cache_priv |= mask;
3440 + writel_relaxed(ct->mask_cache_priv, mvebu_gpioreg_level_mask(mvchip));
3441 irq_gc_unlock(gc);
3442 }
3443
3444 diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
3445 index 59f23fca0596..b51f02758836 100644
3446 --- a/drivers/gpu/drm/i915/i915_drv.c
3447 +++ b/drivers/gpu/drm/i915/i915_drv.c
3448 @@ -1088,7 +1088,7 @@ static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
3449 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
3450
3451 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
3452 - s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
3453 + s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
3454
3455 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
3456 s->ecochk = I915_READ(GAM_ECOCHK);
3457 @@ -1170,7 +1170,7 @@ static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
3458 I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
3459
3460 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
3461 - I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
3462 + I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
3463
3464 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
3465 I915_WRITE(GAM_ECOCHK, s->ecochk);
3466 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
3467 index 0ab77f319cef..23329b48766f 100644
3468 --- a/drivers/gpu/drm/i915/i915_irq.c
3469 +++ b/drivers/gpu/drm/i915/i915_irq.c
3470 @@ -3998,14 +3998,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
3471 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3472 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3473 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3474 - I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3475 - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3476 + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3477 I915_WRITE16(IMR, dev_priv->irq_mask);
3478
3479 I915_WRITE16(IER,
3480 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3481 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3482 - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3483 I915_USER_INTERRUPT);
3484 POSTING_READ16(IER);
3485
3486 @@ -4173,14 +4171,12 @@ static int i915_irq_postinstall(struct drm_device *dev)
3487 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3488 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3489 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3490 - I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3491 - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3492 + I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3493
3494 enable_mask =
3495 I915_ASLE_INTERRUPT |
3496 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3497 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3498 - I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3499 I915_USER_INTERRUPT;
3500
3501 if (I915_HAS_HOTPLUG(dev)) {
3502 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
3503 index 3f1e54bfcddb..e08a4729596b 100644
3504 --- a/drivers/gpu/drm/i915/i915_reg.h
3505 +++ b/drivers/gpu/drm/i915/i915_reg.h
3506 @@ -1657,6 +1657,7 @@ enum punit_power_well {
3507 #define GMBUS_CYCLE_INDEX (2<<25)
3508 #define GMBUS_CYCLE_STOP (4<<25)
3509 #define GMBUS_BYTE_COUNT_SHIFT 16
3510 +#define GMBUS_BYTE_COUNT_MAX 256U
3511 #define GMBUS_SLAVE_INDEX_SHIFT 8
3512 #define GMBUS_SLAVE_ADDR_SHIFT 1
3513 #define GMBUS_SLAVE_READ (1<<0)
3514 diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
3515 index b31088a551f2..56e437e31580 100644
3516 --- a/drivers/gpu/drm/i915/intel_i2c.c
3517 +++ b/drivers/gpu/drm/i915/intel_i2c.c
3518 @@ -270,18 +270,17 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
3519 }
3520
3521 static int
3522 -gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
3523 - u32 gmbus1_index)
3524 +gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
3525 + unsigned short addr, u8 *buf, unsigned int len,
3526 + u32 gmbus1_index)
3527 {
3528 int reg_offset = dev_priv->gpio_mmio_base;
3529 - u16 len = msg->len;
3530 - u8 *buf = msg->buf;
3531
3532 I915_WRITE(GMBUS1 + reg_offset,
3533 gmbus1_index |
3534 GMBUS_CYCLE_WAIT |
3535 (len << GMBUS_BYTE_COUNT_SHIFT) |
3536 - (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
3537 + (addr << GMBUS_SLAVE_ADDR_SHIFT) |
3538 GMBUS_SLAVE_READ | GMBUS_SW_RDY);
3539 while (len) {
3540 int ret;
3541 @@ -303,11 +302,35 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
3542 }
3543
3544 static int
3545 -gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
3546 +gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
3547 + u32 gmbus1_index)
3548 {
3549 - int reg_offset = dev_priv->gpio_mmio_base;
3550 - u16 len = msg->len;
3551 u8 *buf = msg->buf;
3552 + unsigned int rx_size = msg->len;
3553 + unsigned int len;
3554 + int ret;
3555 +
3556 + do {
3557 + len = min(rx_size, GMBUS_BYTE_COUNT_MAX);
3558 +
3559 + ret = gmbus_xfer_read_chunk(dev_priv, msg->addr,
3560 + buf, len, gmbus1_index);
3561 + if (ret)
3562 + return ret;
3563 +
3564 + rx_size -= len;
3565 + buf += len;
3566 + } while (rx_size != 0);
3567 +
3568 + return 0;
3569 +}
3570 +
3571 +static int
3572 +gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
3573 + unsigned short addr, u8 *buf, unsigned int len)
3574 +{
3575 + int reg_offset = dev_priv->gpio_mmio_base;
3576 + unsigned int chunk_size = len;
3577 u32 val, loop;
3578
3579 val = loop = 0;
3580 @@ -319,8 +342,8 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
3581 I915_WRITE(GMBUS3 + reg_offset, val);
3582 I915_WRITE(GMBUS1 + reg_offset,
3583 GMBUS_CYCLE_WAIT |
3584 - (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
3585 - (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
3586 + (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
3587 + (addr << GMBUS_SLAVE_ADDR_SHIFT) |
3588 GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
3589 while (len) {
3590 int ret;
3591 @@ -337,6 +360,29 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
3592 if (ret)
3593 return ret;
3594 }
3595 +
3596 + return 0;
3597 +}
3598 +
3599 +static int
3600 +gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
3601 +{
3602 + u8 *buf = msg->buf;
3603 + unsigned int tx_size = msg->len;
3604 + unsigned int len;
3605 + int ret;
3606 +
3607 + do {
3608 + len = min(tx_size, GMBUS_BYTE_COUNT_MAX);
3609 +
3610 + ret = gmbus_xfer_write_chunk(dev_priv, msg->addr, buf, len);
3611 + if (ret)
3612 + return ret;
3613 +
3614 + buf += len;
3615 + tx_size -= len;
3616 + } while (tx_size != 0);
3617 +
3618 return 0;
3619 }
3620
3621 diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
3622 index 9f0e62529c46..5d73e5f3845d 100644
3623 --- a/drivers/gpu/drm/radeon/atombios_crtc.c
3624 +++ b/drivers/gpu/drm/radeon/atombios_crtc.c
3625 @@ -330,8 +330,10 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
3626 misc |= ATOM_COMPOSITESYNC;
3627 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
3628 misc |= ATOM_INTERLACE;
3629 - if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
3630 + if (mode->flags & DRM_MODE_FLAG_DBLCLK)
3631 misc |= ATOM_DOUBLE_CLOCK_MODE;
3632 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
3633 + misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
3634
3635 args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
3636 args.ucCRTC = radeon_crtc->crtc_id;
3637 @@ -374,8 +376,10 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
3638 misc |= ATOM_COMPOSITESYNC;
3639 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
3640 misc |= ATOM_INTERLACE;
3641 - if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
3642 + if (mode->flags & DRM_MODE_FLAG_DBLCLK)
3643 misc |= ATOM_DOUBLE_CLOCK_MODE;
3644 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
3645 + misc |= ATOM_H_REPLICATIONBY2 | ATOM_V_REPLICATIONBY2;
3646
3647 args.susModeMiscInfo.usAccess = cpu_to_le16(misc);
3648 args.ucCRTC = radeon_crtc->crtc_id;
3649 @@ -576,6 +580,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
3650 else
3651 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
3652
3653 + /* if there is no audio, set MINM_OVER_MAXP */
3654 + if (!drm_detect_monitor_audio(radeon_connector_edid(connector)))
3655 + radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
3656 if (rdev->family < CHIP_RV770)
3657 radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
3658 /* use frac fb div on APUs */
3659 diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
3660 index afbdf9e685d1..2ab80a5331a4 100644
3661 --- a/drivers/gpu/drm/radeon/radeon_vm.c
3662 +++ b/drivers/gpu/drm/radeon/radeon_vm.c
3663 @@ -481,6 +481,23 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
3664 }
3665
3666 mutex_lock(&vm->mutex);
3667 + soffset /= RADEON_GPU_PAGE_SIZE;
3668 + eoffset /= RADEON_GPU_PAGE_SIZE;
3669 + if (soffset || eoffset) {
3670 + struct interval_tree_node *it;
3671 + it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
3672 + if (it && it != &bo_va->it) {
3673 + struct radeon_bo_va *tmp;
3674 + tmp = container_of(it, struct radeon_bo_va, it);
3675 + /* bo and tmp overlap, invalid offset */
3676 + dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
3677 + "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
3678 + soffset, tmp->bo, tmp->it.start, tmp->it.last);
3679 + mutex_unlock(&vm->mutex);
3680 + return -EINVAL;
3681 + }
3682 + }
3683 +
3684 if (bo_va->it.start || bo_va->it.last) {
3685 if (bo_va->addr) {
3686 /* add a clone of the bo_va to clear the old address */
3687 @@ -503,21 +520,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
3688 bo_va->it.last = 0;
3689 }
3690
3691 - soffset /= RADEON_GPU_PAGE_SIZE;
3692 - eoffset /= RADEON_GPU_PAGE_SIZE;
3693 if (soffset || eoffset) {
3694 - struct interval_tree_node *it;
3695 - it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
3696 - if (it) {
3697 - struct radeon_bo_va *tmp;
3698 - tmp = container_of(it, struct radeon_bo_va, it);
3699 - /* bo and tmp overlap, invalid offset */
3700 - dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
3701 - "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
3702 - soffset, tmp->bo, tmp->it.start, tmp->it.last);
3703 - mutex_unlock(&vm->mutex);
3704 - return -EINVAL;
3705 - }
3706 bo_va->it.start = soffset;
3707 bo_va->it.last = eoffset - 1;
3708 interval_tree_insert(&bo_va->it, &vm->va);
3709 diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
3710 index 2b70d3eca8fd..534edaa77940 100644
3711 --- a/drivers/gpu/drm/radeon/si_dpm.c
3712 +++ b/drivers/gpu/drm/radeon/si_dpm.c
3713 @@ -2921,6 +2921,7 @@ struct si_dpm_quirk {
3714 static struct si_dpm_quirk si_dpm_quirk_list[] = {
3715 /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
3716 { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
3717 + { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
3718 { 0, 0, 0, 0 },
3719 };
3720
3721 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
3722 index 433f72a1c006..995e2a0cf096 100644
3723 --- a/drivers/hv/channel.c
3724 +++ b/drivers/hv/channel.c
3725 @@ -135,7 +135,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
3726 GFP_KERNEL);
3727 if (!open_info) {
3728 err = -ENOMEM;
3729 - goto error0;
3730 + goto error_gpadl;
3731 }
3732
3733 init_completion(&open_info->waitevent);
3734 @@ -151,7 +151,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
3735
3736 if (userdatalen > MAX_USER_DEFINED_BYTES) {
3737 err = -EINVAL;
3738 - goto error0;
3739 + goto error_gpadl;
3740 }
3741
3742 if (userdatalen)
3743 @@ -195,6 +195,9 @@ error1:
3744 list_del(&open_info->msglistentry);
3745 spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
3746
3747 +error_gpadl:
3748 + vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
3749 +
3750 error0:
3751 free_pages((unsigned long)out,
3752 get_order(send_ringbuffer_size + recv_ringbuffer_size));
3753 diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
3754 index d36ce6835fb7..08b58defdb95 100644
3755 --- a/drivers/hv/channel_mgmt.c
3756 +++ b/drivers/hv/channel_mgmt.c
3757 @@ -767,7 +767,7 @@ int vmbus_request_offers(void)
3758 {
3759 struct vmbus_channel_message_header *msg;
3760 struct vmbus_channel_msginfo *msginfo;
3761 - int ret, t;
3762 + int ret;
3763
3764 msginfo = kmalloc(sizeof(*msginfo) +
3765 sizeof(struct vmbus_channel_message_header),
3766 @@ -775,8 +775,6 @@ int vmbus_request_offers(void)
3767 if (!msginfo)
3768 return -ENOMEM;
3769
3770 - init_completion(&msginfo->waitevent);
3771 -
3772 msg = (struct vmbus_channel_message_header *)msginfo->msg;
3773
3774 msg->msgtype = CHANNELMSG_REQUESTOFFERS;
3775 @@ -790,14 +788,6 @@ int vmbus_request_offers(void)
3776 goto cleanup;
3777 }
3778
3779 - t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
3780 - if (t == 0) {
3781 - ret = -ETIMEDOUT;
3782 - goto cleanup;
3783 - }
3784 -
3785 -
3786 -
3787 cleanup:
3788 kfree(msginfo);
3789
3790 diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
3791 index f486d0eac4d0..3ba933396c25 100644
3792 --- a/drivers/i2c/busses/i2c-rk3x.c
3793 +++ b/drivers/i2c/busses/i2c-rk3x.c
3794 @@ -588,7 +588,7 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap,
3795 clk_disable(i2c->clk);
3796 spin_unlock_irqrestore(&i2c->lock, flags);
3797
3798 - return ret;
3799 + return ret < 0 ? ret : num;
3800 }
3801
3802 static u32 rk3x_i2c_func(struct i2c_adapter *adap)
3803 diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
3804 index 17a1853c6c2f..7bd1b5cfb8d1 100644
3805 --- a/drivers/i2c/i2c-core.c
3806 +++ b/drivers/i2c/i2c-core.c
3807 @@ -593,6 +593,7 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
3808 adap->bus_recovery_info->set_scl(adap, 1);
3809 return i2c_generic_recovery(adap);
3810 }
3811 +EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
3812
3813 int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
3814 {
3815 @@ -607,6 +608,7 @@ int i2c_generic_gpio_recovery(struct i2c_adapter *adap)
3816
3817 return ret;
3818 }
3819 +EXPORT_SYMBOL_GPL(i2c_generic_gpio_recovery);
3820
3821 int i2c_recover_bus(struct i2c_adapter *adap)
3822 {
3823 @@ -616,6 +618,7 @@ int i2c_recover_bus(struct i2c_adapter *adap)
3824 dev_dbg(&adap->dev, "Trying i2c bus recovery\n");
3825 return adap->bus_recovery_info->recover_bus(adap);
3826 }
3827 +EXPORT_SYMBOL_GPL(i2c_recover_bus);
3828
3829 static int i2c_device_probe(struct device *dev)
3830 {
3831 diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
3832 index dfa4286f98a4..49df6af0f03f 100644
3833 --- a/drivers/infiniband/core/umem.c
3834 +++ b/drivers/infiniband/core/umem.c
3835 @@ -94,12 +94,15 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
3836 if (dmasync)
3837 dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs);
3838
3839 + if (!size)
3840 + return ERR_PTR(-EINVAL);
3841 +
3842 /*
3843 * If the combination of the addr and size requested for this memory
3844 * region causes an integer overflow, return error.
3845 */
3846 - if ((PAGE_ALIGN(addr + size) <= size) ||
3847 - (PAGE_ALIGN(addr + size) <= addr))
3848 + if (((addr + size) < addr) ||
3849 + PAGE_ALIGN(addr + size) < (addr + size))
3850 return ERR_PTR(-EINVAL);
3851
3852 if (!can_do_mlock())
3853 diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
3854 index 03045dd9e5de..33cbb7611d66 100644
3855 --- a/drivers/infiniband/hw/mlx4/qp.c
3856 +++ b/drivers/infiniband/hw/mlx4/qp.c
3857 @@ -2559,8 +2559,7 @@ static int build_lso_seg(struct mlx4_wqe_lso_seg *wqe, struct ib_send_wr *wr,
3858
3859 memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
3860
3861 - *lso_hdr_sz = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
3862 - wr->wr.ud.hlen);
3863 + *lso_hdr_sz = cpu_to_be32(wr->wr.ud.mss << 16 | wr->wr.ud.hlen);
3864 *lso_seg_len = halign;
3865 return 0;
3866 }
3867 diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
3868 index 20e859a6f1a6..76eb57b31a59 100644
3869 --- a/drivers/infiniband/ulp/iser/iser_initiator.c
3870 +++ b/drivers/infiniband/ulp/iser/iser_initiator.c
3871 @@ -409,8 +409,8 @@ int iser_send_command(struct iscsi_conn *conn,
3872 if (scsi_prot_sg_count(sc)) {
3873 prot_buf->buf = scsi_prot_sglist(sc);
3874 prot_buf->size = scsi_prot_sg_count(sc);
3875 - prot_buf->data_len = data_buf->data_len >>
3876 - ilog2(sc->device->sector_size) * 8;
3877 + prot_buf->data_len = (data_buf->data_len >>
3878 + ilog2(sc->device->sector_size)) * 8;
3879 }
3880
3881 if (hdr->flags & ISCSI_FLAG_CMD_READ) {
3882 diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
3883 index a6daabc70425..0618e407197d 100644
3884 --- a/drivers/infiniband/ulp/isert/ib_isert.c
3885 +++ b/drivers/infiniband/ulp/isert/ib_isert.c
3886 @@ -1900,11 +1900,13 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
3887 cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
3888 spin_unlock_bh(&cmd->istate_lock);
3889
3890 - if (ret)
3891 + if (ret) {
3892 + target_put_sess_cmd(se_cmd->se_sess, se_cmd);
3893 transport_send_check_condition_and_sense(se_cmd,
3894 se_cmd->pi_err, 0);
3895 - else
3896 + } else {
3897 target_execute_cmd(se_cmd);
3898 + }
3899 }
3900
3901 static void
3902 diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
3903 index 6e22682c8255..991dc6b20a58 100644
3904 --- a/drivers/input/mouse/elantech.c
3905 +++ b/drivers/input/mouse/elantech.c
3906 @@ -893,6 +893,21 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse)
3907 }
3908
3909 /*
3910 + * This writes the reg_07 value again to the hardware at the end of every
3911 + * set_rate call because the register loses its value. reg_07 allows setting
3912 + * absolute mode on v4 hardware
3913 + */
3914 +static void elantech_set_rate_restore_reg_07(struct psmouse *psmouse,
3915 + unsigned int rate)
3916 +{
3917 + struct elantech_data *etd = psmouse->private;
3918 +
3919 + etd->original_set_rate(psmouse, rate);
3920 + if (elantech_write_reg(psmouse, 0x07, etd->reg_07))
3921 + psmouse_err(psmouse, "restoring reg_07 failed\n");
3922 +}
3923 +
3924 +/*
3925 * Put the touchpad into absolute mode
3926 */
3927 static int elantech_set_absolute_mode(struct psmouse *psmouse)
3928 @@ -1094,6 +1109,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
3929 * Asus K53SV 0x450f01 78, 15, 0c 2 hw buttons
3930 * Asus G46VW 0x460f02 00, 18, 0c 2 hw buttons
3931 * Asus G750JX 0x360f00 00, 16, 0c 2 hw buttons
3932 + * Asus TP500LN 0x381f17 10, 14, 0e clickpad
3933 + * Asus X750JN 0x381f17 10, 14, 0e clickpad
3934 * Asus UX31 0x361f00 20, 15, 0e clickpad
3935 * Asus UX32VD 0x361f02 00, 15, 0e clickpad
3936 * Avatar AVIU-145A2 0x361f00 ? clickpad
3937 @@ -1635,6 +1652,11 @@ int elantech_init(struct psmouse *psmouse)
3938 goto init_fail;
3939 }
3940
3941 + if (etd->fw_version == 0x381f17) {
3942 + etd->original_set_rate = psmouse->set_rate;
3943 + psmouse->set_rate = elantech_set_rate_restore_reg_07;
3944 + }
3945 +
3946 if (elantech_set_input_params(psmouse)) {
3947 psmouse_err(psmouse, "failed to query touchpad range.\n");
3948 goto init_fail;
3949 diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
3950 index 6f3afec02f03..f965d1569cc3 100644
3951 --- a/drivers/input/mouse/elantech.h
3952 +++ b/drivers/input/mouse/elantech.h
3953 @@ -142,6 +142,7 @@ struct elantech_data {
3954 struct finger_pos mt[ETP_MAX_FINGERS];
3955 unsigned char parity[256];
3956 int (*send_cmd)(struct psmouse *psmouse, unsigned char c, unsigned char *param);
3957 + void (*original_set_rate)(struct psmouse *psmouse, unsigned int rate);
3958 };
3959
3960 #ifdef CONFIG_MOUSE_PS2_ELANTECH
3961 diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
3962 index 08981be7baa1..a9f2266049af 100644
3963 --- a/drivers/md/dm-crypt.c
3964 +++ b/drivers/md/dm-crypt.c
3965 @@ -920,11 +920,10 @@ static int crypt_convert(struct crypt_config *cc,
3966
3967 switch (r) {
3968 /* async */
3969 + case -EINPROGRESS:
3970 case -EBUSY:
3971 wait_for_completion(&ctx->restart);
3972 reinit_completion(&ctx->restart);
3973 - /* fall through*/
3974 - case -EINPROGRESS:
3975 ctx->req = NULL;
3976 ctx->cc_sector++;
3977 continue;
3978 @@ -1315,10 +1314,8 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
3979 struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
3980 struct crypt_config *cc = io->cc;
3981
3982 - if (error == -EINPROGRESS) {
3983 - complete(&ctx->restart);
3984 + if (error == -EINPROGRESS)
3985 return;
3986 - }
3987
3988 if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
3989 error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
3990 @@ -1329,12 +1326,15 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
3991 crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
3992
3993 if (!atomic_dec_and_test(&ctx->cc_pending))
3994 - return;
3995 + goto done;
3996
3997 if (bio_data_dir(io->base_bio) == READ)
3998 kcryptd_crypt_read_done(io);
3999 else
4000 kcryptd_crypt_write_io_submit(io, 1);
4001 +done:
4002 + if (!completion_done(&ctx->restart))
4003 + complete(&ctx->restart);
4004 }
4005
4006 static void kcryptd_crypt(struct work_struct *work)
4007 diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
4008 index ba6b85de96d2..d5c12e5b6125 100644
4009 --- a/drivers/md/raid0.c
4010 +++ b/drivers/md/raid0.c
4011 @@ -319,7 +319,7 @@ static struct strip_zone *find_zone(struct r0conf *conf,
4012
4013 /*
4014 * remaps the bio to the target device. we separate two flows.
4015 - * power 2 flow and a general flow for the sake of perfromance
4016 + * power 2 flow and a general flow for the sake of performance
4017 */
4018 static struct md_rdev *map_sector(struct mddev *mddev, struct strip_zone *zone,
4019 sector_t sector, sector_t *sector_offset)
4020 @@ -537,6 +537,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
4021 split = bio;
4022 }
4023
4024 + sector = bio->bi_iter.bi_sector;
4025 zone = find_zone(mddev->private, &sector);
4026 tmp_dev = map_sector(mddev, zone, sector, &sector);
4027 split->bi_bdev = tmp_dev->bdev;
4028 diff --git a/drivers/media/rc/img-ir/img-ir-core.c b/drivers/media/rc/img-ir/img-ir-core.c
4029 index a0cac2f09109..9971dda621f5 100644
4030 --- a/drivers/media/rc/img-ir/img-ir-core.c
4031 +++ b/drivers/media/rc/img-ir/img-ir-core.c
4032 @@ -146,7 +146,7 @@ static int img_ir_remove(struct platform_device *pdev)
4033 {
4034 struct img_ir_priv *priv = platform_get_drvdata(pdev);
4035
4036 - free_irq(priv->irq, img_ir_isr);
4037 + free_irq(priv->irq, priv);
4038 img_ir_remove_hw(priv);
4039 img_ir_remove_raw(priv);
4040
4041 diff --git a/drivers/media/usb/stk1160/stk1160-v4l.c b/drivers/media/usb/stk1160/stk1160-v4l.c
4042 index 233054311a62..f6d0334f246e 100644
4043 --- a/drivers/media/usb/stk1160/stk1160-v4l.c
4044 +++ b/drivers/media/usb/stk1160/stk1160-v4l.c
4045 @@ -244,6 +244,11 @@ static int stk1160_stop_streaming(struct stk1160 *dev)
4046 if (mutex_lock_interruptible(&dev->v4l_lock))
4047 return -ERESTARTSYS;
4048
4049 + /*
4050 + * Once URBs are cancelled, the URB complete handler
4051 + * won't be running. This is required to safely release the
4052 + * current buffer (dev->isoc_ctl.buf).
4053 + */
4054 stk1160_cancel_isoc(dev);
4055
4056 /*
4057 @@ -624,8 +629,16 @@ void stk1160_clear_queue(struct stk1160 *dev)
4058 stk1160_info("buffer [%p/%d] aborted\n",
4059 buf, buf->vb.v4l2_buf.index);
4060 }
4061 - /* It's important to clear current buffer */
4062 - dev->isoc_ctl.buf = NULL;
4063 +
4064 + /* It's important to release the current buffer */
4065 + if (dev->isoc_ctl.buf) {
4066 + buf = dev->isoc_ctl.buf;
4067 + dev->isoc_ctl.buf = NULL;
4068 +
4069 + vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR);
4070 + stk1160_info("buffer [%p/%d] aborted\n",
4071 + buf, buf->vb.v4l2_buf.index);
4072 + }
4073 spin_unlock_irqrestore(&dev->buf_lock, flags);
4074 }
4075
4076 diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
4077 index fc145d202c46..922a750640e8 100644
4078 --- a/drivers/memstick/core/mspro_block.c
4079 +++ b/drivers/memstick/core/mspro_block.c
4080 @@ -758,7 +758,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
4081
4082 if (error || (card->current_mrq.tpc == MSPRO_CMD_STOP)) {
4083 if (msb->data_dir == READ) {
4084 - for (cnt = 0; cnt < msb->current_seg; cnt++)
4085 + for (cnt = 0; cnt < msb->current_seg; cnt++) {
4086 t_len += msb->req_sg[cnt].length
4087 / msb->page_size;
4088
4089 @@ -766,6 +766,7 @@ static int mspro_block_complete_req(struct memstick_dev *card, int error)
4090 t_len += msb->current_page - 1;
4091
4092 t_len *= msb->page_size;
4093 + }
4094 }
4095 } else
4096 t_len = blk_rq_bytes(msb->block_req);
4097 diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
4098 index d1663b3c4143..aac659bdd08a 100644
4099 --- a/drivers/mmc/host/sunxi-mmc.c
4100 +++ b/drivers/mmc/host/sunxi-mmc.c
4101 @@ -909,7 +909,9 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
4102 return PTR_ERR(host->clk_mmc);
4103 }
4104
4105 - host->reset = devm_reset_control_get(&pdev->dev, "ahb");
4106 + host->reset = devm_reset_control_get_optional(&pdev->dev, "ahb");
4107 + if (PTR_ERR(host->reset) == -EPROBE_DEFER)
4108 + return PTR_ERR(host->reset);
4109
4110 ret = clk_prepare_enable(host->clk_ahb);
4111 if (ret) {
4112 diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
4113 index 6f27d9a1be3b..21841fe25ad3 100644
4114 --- a/drivers/mtd/ubi/attach.c
4115 +++ b/drivers/mtd/ubi/attach.c
4116 @@ -408,7 +408,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
4117 second_is_newer = !second_is_newer;
4118 } else {
4119 dbg_bld("PEB %d CRC is OK", pnum);
4120 - bitflips = !!err;
4121 + bitflips |= !!err;
4122 }
4123 mutex_unlock(&ubi->buf_mutex);
4124
4125 diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
4126 index 59de69a24e40..3946d78e6a81 100644
4127 --- a/drivers/mtd/ubi/cdev.c
4128 +++ b/drivers/mtd/ubi/cdev.c
4129 @@ -453,7 +453,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
4130 /* Validate the request */
4131 err = -EINVAL;
4132 if (req.lnum < 0 || req.lnum >= vol->reserved_pebs ||
4133 - req.bytes < 0 || req.lnum >= vol->usable_leb_size)
4134 + req.bytes < 0 || req.bytes > vol->usable_leb_size)
4135 break;
4136
4137 err = get_exclusive(desc);
4138 diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
4139 index 2402d3b50171..493f7b3dbc33 100644
4140 --- a/drivers/mtd/ubi/eba.c
4141 +++ b/drivers/mtd/ubi/eba.c
4142 @@ -1361,7 +1361,8 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
4143 * during re-size.
4144 */
4145 ubi_move_aeb_to_list(av, aeb, &ai->erase);
4146 - vol->eba_tbl[aeb->lnum] = aeb->pnum;
4147 + else
4148 + vol->eba_tbl[aeb->lnum] = aeb->pnum;
4149 }
4150 }
4151
4152 diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
4153 index b9686c1472d2..ef670560971e 100644
4154 --- a/drivers/mtd/ubi/wl.c
4155 +++ b/drivers/mtd/ubi/wl.c
4156 @@ -1001,7 +1001,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
4157 int shutdown)
4158 {
4159 int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
4160 - int vol_id = -1, uninitialized_var(lnum);
4161 + int vol_id = -1, lnum = -1;
4162 #ifdef CONFIG_MTD_UBI_FASTMAP
4163 int anchor = wrk->anchor;
4164 #endif
4165 diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
4166 index 24f3986cfae2..3a06cae4ff68 100644
4167 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c
4168 +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
4169 @@ -144,6 +144,11 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4170 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4171 struct e1000_rx_ring *rx_ring,
4172 int *work_done, int work_to_do);
4173 +static void e1000_alloc_dummy_rx_buffers(struct e1000_adapter *adapter,
4174 + struct e1000_rx_ring *rx_ring,
4175 + int cleaned_count)
4176 +{
4177 +}
4178 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4179 struct e1000_rx_ring *rx_ring,
4180 int cleaned_count);
4181 @@ -3552,8 +3557,11 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
4182 msleep(1);
4183 /* e1000_down has a dependency on max_frame_size */
4184 hw->max_frame_size = max_frame;
4185 - if (netif_running(netdev))
4186 + if (netif_running(netdev)) {
4187 + /* prevent buffers from being reallocated */
4188 + adapter->alloc_rx_buf = e1000_alloc_dummy_rx_buffers;
4189 e1000_down(adapter);
4190 + }
4191
4192 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
4193 * means we reserve 2 more, this pushes us to allocate from the next
4194 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
4195 index 0207044f6d57..68fef1151dde 100644
4196 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
4197 +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
4198 @@ -1466,6 +1466,7 @@ static void mlx4_en_service_task(struct work_struct *work)
4199 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
4200 mlx4_en_ptp_overflow_check(mdev);
4201
4202 + mlx4_en_recover_from_oom(priv);
4203 queue_delayed_work(mdev->workqueue, &priv->service_task,
4204 SERVICE_TASK_DELAY);
4205 }
4206 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
4207 index 01660c595f5c..5bbb59dce4d5 100644
4208 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
4209 +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
4210 @@ -237,6 +237,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
4211 return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
4212 }
4213
4214 +static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
4215 +{
4216 + BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
4217 + return ring->prod == ring->cons;
4218 +}
4219 +
4220 static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
4221 {
4222 *ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
4223 @@ -308,8 +314,7 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
4224 ring->cons, ring->prod);
4225
4226 /* Unmap and free Rx buffers */
4227 - BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
4228 - while (ring->cons != ring->prod) {
4229 + while (!mlx4_en_is_ring_empty(ring)) {
4230 index = ring->cons & ring->size_mask;
4231 en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
4232 mlx4_en_free_rx_desc(priv, ring, index);
4233 @@ -484,6 +489,23 @@ err_allocator:
4234 return err;
4235 }
4236
4237 +/* We recover from out of memory by scheduling our napi poll
4238 + * function (mlx4_en_process_cq), which tries to allocate
4239 + * all missing RX buffers (call to mlx4_en_refill_rx_buffers).
4240 + */
4241 +void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
4242 +{
4243 + int ring;
4244 +
4245 + if (!priv->port_up)
4246 + return;
4247 +
4248 + for (ring = 0; ring < priv->rx_ring_num; ring++) {
4249 + if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
4250 + napi_reschedule(&priv->rx_cq[ring]->napi);
4251 + }
4252 +}
4253 +
4254 void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
4255 struct mlx4_en_rx_ring **pring,
4256 u32 size, u16 stride)
4257 diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
4258 index 11ff28b5fca3..142ddd595e2e 100644
4259 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
4260 +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
4261 @@ -137,8 +137,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
4262 ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
4263 ring->queue_index = queue_index;
4264
4265 - if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
4266 - cpumask_set_cpu(queue_index, &ring->affinity_mask);
4267 + if (queue_index < priv->num_tx_rings_p_up)
4268 + cpumask_set_cpu_local_first(queue_index,
4269 + priv->mdev->dev->numa_node,
4270 + &ring->affinity_mask);
4271
4272 *pring = ring;
4273 return 0;
4274 @@ -205,7 +207,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
4275
4276 err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
4277 &ring->qp, &ring->qp_state);
4278 - if (!user_prio && cpu_online(ring->queue_index))
4279 + if (!cpumask_empty(&ring->affinity_mask))
4280 netif_set_xps_queue(priv->dev, &ring->affinity_mask,
4281 ring->queue_index);
4282
4283 diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
4284 index 8fef65840b3b..692bd4e8b21f 100644
4285 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
4286 +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
4287 @@ -779,6 +779,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
4288 void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
4289 struct mlx4_en_tx_ring *ring);
4290 void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
4291 +void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv);
4292 int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
4293 struct mlx4_en_rx_ring **pring,
4294 u32 size, u16 stride, int node);
4295 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
4296 index 794a47329368..6d6c20c3ef7e 100644
4297 --- a/drivers/net/ppp/ppp_generic.c
4298 +++ b/drivers/net/ppp/ppp_generic.c
4299 @@ -1714,6 +1714,7 @@ ppp_receive_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
4300 {
4301 /* note: a 0-length skb is used as an error indication */
4302 if (skb->len > 0) {
4303 + skb_checksum_complete_unset(skb);
4304 #ifdef CONFIG_PPP_MULTILINK
4305 /* XXX do channel-level decompression here */
4306 if (PPP_PROTO(skb) == PPP_MP)
4307 diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
4308 index a5186bb7c63e..8c45cf44ce24 100644
4309 --- a/drivers/net/wireless/rtlwifi/pci.c
4310 +++ b/drivers/net/wireless/rtlwifi/pci.c
4311 @@ -578,6 +578,13 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
4312 else
4313 entry = (u8 *)(&ring->desc[ring->idx]);
4314
4315 + if (rtlpriv->cfg->ops->get_available_desc &&
4316 + rtlpriv->cfg->ops->get_available_desc(hw, prio) <= 1) {
4317 + RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_DMESG,
4318 + "no available desc!\n");
4319 + return;
4320 + }
4321 +
4322 if (!rtlpriv->cfg->ops->is_tx_desc_closed(hw, prio, ring->idx))
4323 return;
4324 ring->idx = (ring->idx + 1) % ring->entries;
4325 @@ -641,10 +648,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
4326
4327 ieee80211_tx_status_irqsafe(hw, skb);
4328
4329 - if ((ring->entries - skb_queue_len(&ring->queue))
4330 - == 2) {
4331 + if ((ring->entries - skb_queue_len(&ring->queue)) <= 4) {
4332
4333 - RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
4334 + RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
4335 "more desc left, wake skb_queue@%d, ring->idx = %d, skb_queue_len = 0x%x\n",
4336 prio, ring->idx,
4337 skb_queue_len(&ring->queue));
4338 @@ -793,7 +799,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
4339 rx_remained_cnt =
4340 rtlpriv->cfg->ops->rx_desc_buff_remained_cnt(hw,
4341 hw_queue);
4342 - if (rx_remained_cnt < 1)
4343 + if (rx_remained_cnt == 0)
4344 return;
4345
4346 } else { /* rx descriptor */
4347 @@ -845,18 +851,18 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
4348 else
4349 skb_reserve(skb, stats.rx_drvinfo_size +
4350 stats.rx_bufshift);
4351 -
4352 } else {
4353 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
4354 "skb->end - skb->tail = %d, len is %d\n",
4355 skb->end - skb->tail, len);
4356 - break;
4357 + dev_kfree_skb_any(skb);
4358 + goto new_trx_end;
4359 }
4360 /* handle command packet here */
4361 if (rtlpriv->cfg->ops->rx_command_packet &&
4362 rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) {
4363 dev_kfree_skb_any(skb);
4364 - goto end;
4365 + goto new_trx_end;
4366 }
4367
4368 /*
4369 @@ -906,6 +912,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
4370 } else {
4371 dev_kfree_skb_any(skb);
4372 }
4373 +new_trx_end:
4374 if (rtlpriv->use_new_trx_flow) {
4375 rtlpci->rx_ring[hw_queue].next_rx_rp += 1;
4376 rtlpci->rx_ring[hw_queue].next_rx_rp %=
4377 @@ -921,7 +928,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
4378 rtlpriv->enter_ps = false;
4379 schedule_work(&rtlpriv->works.lps_change_work);
4380 }
4381 -end:
4382 skb = new_skb;
4383 no_new:
4384 if (rtlpriv->use_new_trx_flow) {
4385 @@ -1695,6 +1701,15 @@ static int rtl_pci_tx(struct ieee80211_hw *hw,
4386 }
4387 }
4388
4389 + if (rtlpriv->cfg->ops->get_available_desc &&
4390 + rtlpriv->cfg->ops->get_available_desc(hw, hw_queue) == 0) {
4391 + RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
4392 + "get_available_desc fail\n");
4393 + spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock,
4394 + flags);
4395 + return skb->len;
4396 + }
4397 +
4398 if (ieee80211_is_data_qos(fc)) {
4399 tid = rtl_get_tid(skb);
4400 if (sta) {
4401 diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
4402 index e06bafee37f9..5034660bf411 100644
4403 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
4404 +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
4405 @@ -321,6 +321,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
4406 {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
4407 {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
4408 {RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
4409 + {RTL_USB_DEVICE(0x0b05, 0x17ba, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
4410 {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/
4411 {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
4412 {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
4413 @@ -377,6 +378,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
4414 {RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
4415 {RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
4416 {RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
4417 + {RTL_USB_DEVICE(0x2001, 0x330d, rtl92cu_hal_cfg)}, /*D-Link DWA-131 */
4418 {RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
4419 {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
4420 {RTL_USB_DEVICE(0x2357, 0x0100, rtl92cu_hal_cfg)}, /*TP-Link WN8200ND*/
4421 diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c
4422 index 9b5a7d5be121..c31c6bfb536d 100644
4423 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c
4424 +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/sw.c
4425 @@ -113,8 +113,6 @@ int rtl92ee_init_sw_vars(struct ieee80211_hw *hw)
4426 RCR_HTC_LOC_CTRL |
4427 RCR_AMF |
4428 RCR_ACF |
4429 - RCR_ADF |
4430 - RCR_AICV |
4431 RCR_ACRC32 |
4432 RCR_AB |
4433 RCR_AM |
4434 @@ -241,6 +239,7 @@ static struct rtl_hal_ops rtl8192ee_hal_ops = {
4435 .set_desc = rtl92ee_set_desc,
4436 .get_desc = rtl92ee_get_desc,
4437 .is_tx_desc_closed = rtl92ee_is_tx_desc_closed,
4438 + .get_available_desc = rtl92ee_get_available_desc,
4439 .tx_polling = rtl92ee_tx_polling,
4440 .enable_hw_sec = rtl92ee_enable_hw_security_config,
4441 .set_key = rtl92ee_set_key,
4442 diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
4443 index 00690040be37..1f6d160877e1 100644
4444 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
4445 +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.c
4446 @@ -707,7 +707,7 @@ static u16 get_desc_addr_fr_q_idx(u16 queue_index)
4447 return desc_address;
4448 }
4449
4450 -void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
4451 +u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
4452 {
4453 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
4454 struct rtl_priv *rtlpriv = rtl_priv(hw);
4455 @@ -721,11 +721,12 @@ void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 q_idx)
4456 current_tx_write_point = (u16)((tmp_4byte) & 0x0fff);
4457
4458 point_diff = ((current_tx_read_point > current_tx_write_point) ?
4459 - (current_tx_read_point - current_tx_write_point) :
4460 - (TX_DESC_NUM_92E - current_tx_write_point +
4461 + (current_tx_read_point - current_tx_write_point - 1) :
4462 + (TX_DESC_NUM_92E - 1 - current_tx_write_point +
4463 current_tx_read_point));
4464
4465 rtlpci->tx_ring[q_idx].avl_desc = point_diff;
4466 + return point_diff;
4467 }
4468
4469 void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw,
4470 diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
4471 index 8effef9b13dd..b489dd9c8401 100644
4472 --- a/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
4473 +++ b/drivers/net/wireless/rtlwifi/rtl8192ee/trx.h
4474 @@ -831,7 +831,7 @@ void rtl92ee_rx_check_dma_ok(struct ieee80211_hw *hw, u8 *header_desc,
4475 u8 queue_index);
4476 u16 rtl92ee_rx_desc_buff_remained_cnt(struct ieee80211_hw *hw,
4477 u8 queue_index);
4478 -void rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 queue_index);
4479 +u16 rtl92ee_get_available_desc(struct ieee80211_hw *hw, u8 queue_index);
4480 void rtl92ee_pre_fill_tx_bd_desc(struct ieee80211_hw *hw,
4481 u8 *tx_bd_desc, u8 *desc, u8 queue_index,
4482 struct sk_buff *skb, dma_addr_t addr);
4483 diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
4484 index 6866dcf24340..27822fe34d9a 100644
4485 --- a/drivers/net/wireless/rtlwifi/wifi.h
4486 +++ b/drivers/net/wireless/rtlwifi/wifi.h
4487 @@ -2161,6 +2161,7 @@ struct rtl_hal_ops {
4488 void (*add_wowlan_pattern)(struct ieee80211_hw *hw,
4489 struct rtl_wow_pattern *rtl_pattern,
4490 u8 index);
4491 + u16 (*get_available_desc)(struct ieee80211_hw *hw, u8 q_idx);
4492 };
4493
4494 struct rtl_intf_ops {
4495 diff --git a/drivers/net/wireless/ti/wl18xx/debugfs.c b/drivers/net/wireless/ti/wl18xx/debugfs.c
4496 index 7f1669cdea09..779dc2b2ca75 100644
4497 --- a/drivers/net/wireless/ti/wl18xx/debugfs.c
4498 +++ b/drivers/net/wireless/ti/wl18xx/debugfs.c
4499 @@ -136,7 +136,7 @@ WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u");
4500 WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u");
4501 WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
4502
4503 -WL18XX_DEBUGFS_FWSTATS_FILE(rx_rate, rx_frames_per_rates, "%u");
4504 +WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);
4505
4506 WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_vs_rate,
4507 AGGR_STATS_TX_AGG*AGGR_STATS_TX_RATE);
4508 diff --git a/drivers/net/wireless/ti/wlcore/debugfs.h b/drivers/net/wireless/ti/wlcore/debugfs.h
4509 index 0f2cfb0d2a9e..bf14676e6515 100644
4510 --- a/drivers/net/wireless/ti/wlcore/debugfs.h
4511 +++ b/drivers/net/wireless/ti/wlcore/debugfs.h
4512 @@ -26,8 +26,8 @@
4513
4514 #include "wlcore.h"
4515
4516 -int wl1271_format_buffer(char __user *userbuf, size_t count,
4517 - loff_t *ppos, char *fmt, ...);
4518 +__printf(4, 5) int wl1271_format_buffer(char __user *userbuf, size_t count,
4519 + loff_t *ppos, char *fmt, ...);
4520
4521 int wl1271_debugfs_init(struct wl1271 *wl);
4522 void wl1271_debugfs_exit(struct wl1271 *wl);
4523 diff --git a/drivers/nfc/st21nfcb/i2c.c b/drivers/nfc/st21nfcb/i2c.c
4524 index c5d2427a3db2..2cf867b9796d 100644
4525 --- a/drivers/nfc/st21nfcb/i2c.c
4526 +++ b/drivers/nfc/st21nfcb/i2c.c
4527 @@ -112,7 +112,7 @@ static int st21nfcb_nci_i2c_write(void *phy_id, struct sk_buff *skb)
4528 return phy->ndlc->hard_fault;
4529
4530 r = i2c_master_send(client, skb->data, skb->len);
4531 - if (r == -EREMOTEIO) { /* Retry, chip was in standby */
4532 + if (r < 0) { /* Retry, chip was in standby */
4533 usleep_range(1000, 4000);
4534 r = i2c_master_send(client, skb->data, skb->len);
4535 }
4536 @@ -151,7 +151,7 @@ static int st21nfcb_nci_i2c_read(struct st21nfcb_i2c_phy *phy,
4537 struct i2c_client *client = phy->i2c_dev;
4538
4539 r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
4540 - if (r == -EREMOTEIO) { /* Retry, chip was in standby */
4541 + if (r < 0) { /* Retry, chip was in standby */
4542 usleep_range(1000, 4000);
4543 r = i2c_master_recv(client, buf, ST21NFCB_NCI_I2C_MIN_SIZE);
4544 }
4545 diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
4546 index 26bfd7bb5c13..034aa844a1b5 100644
4547 --- a/drivers/platform/x86/compal-laptop.c
4548 +++ b/drivers/platform/x86/compal-laptop.c
4549 @@ -1027,9 +1027,9 @@ static int compal_probe(struct platform_device *pdev)
4550 if (err)
4551 return err;
4552
4553 - hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
4554 - "compal", data,
4555 - compal_hwmon_groups);
4556 + hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
4557 + "compal", data,
4558 + compal_hwmon_groups);
4559 if (IS_ERR(hwmon_dev)) {
4560 err = PTR_ERR(hwmon_dev);
4561 goto remove;
4562 @@ -1037,7 +1037,9 @@ static int compal_probe(struct platform_device *pdev)
4563
4564 /* Power supply */
4565 initialize_power_supply_data(data);
4566 - power_supply_register(&compal_device->dev, &data->psy);
4567 + err = power_supply_register(&compal_device->dev, &data->psy);
4568 + if (err < 0)
4569 + goto remove;
4570
4571 platform_set_drvdata(pdev, data);
4572
4573 diff --git a/drivers/power/ipaq_micro_battery.c b/drivers/power/ipaq_micro_battery.c
4574 index 9d694605cdb7..96b15e003f3f 100644
4575 --- a/drivers/power/ipaq_micro_battery.c
4576 +++ b/drivers/power/ipaq_micro_battery.c
4577 @@ -226,6 +226,7 @@ static struct power_supply micro_ac_power = {
4578 static int micro_batt_probe(struct platform_device *pdev)
4579 {
4580 struct micro_battery *mb;
4581 + int ret;
4582
4583 mb = devm_kzalloc(&pdev->dev, sizeof(*mb), GFP_KERNEL);
4584 if (!mb)
4585 @@ -233,14 +234,30 @@ static int micro_batt_probe(struct platform_device *pdev)
4586
4587 mb->micro = dev_get_drvdata(pdev->dev.parent);
4588 mb->wq = create_singlethread_workqueue("ipaq-battery-wq");
4589 + if (!mb->wq)
4590 + return -ENOMEM;
4591 +
4592 INIT_DELAYED_WORK(&mb->update, micro_battery_work);
4593 platform_set_drvdata(pdev, mb);
4594 queue_delayed_work(mb->wq, &mb->update, 1);
4595 - power_supply_register(&pdev->dev, &micro_batt_power);
4596 - power_supply_register(&pdev->dev, &micro_ac_power);
4597 +
4598 + ret = power_supply_register(&pdev->dev, &micro_batt_power);
4599 + if (ret < 0)
4600 + goto batt_err;
4601 +
4602 + ret = power_supply_register(&pdev->dev, &micro_ac_power);
4603 + if (ret < 0)
4604 + goto ac_err;
4605
4606 dev_info(&pdev->dev, "iPAQ micro battery driver\n");
4607 return 0;
4608 +
4609 +ac_err:
4610 + power_supply_unregister(&micro_ac_power);
4611 +batt_err:
4612 + cancel_delayed_work_sync(&mb->update);
4613 + destroy_workqueue(mb->wq);
4614 + return ret;
4615 }
4616
4617 static int micro_batt_remove(struct platform_device *pdev)
4618 @@ -251,6 +268,7 @@ static int micro_batt_remove(struct platform_device *pdev)
4619 power_supply_unregister(&micro_ac_power);
4620 power_supply_unregister(&micro_batt_power);
4621 cancel_delayed_work_sync(&mb->update);
4622 + destroy_workqueue(mb->wq);
4623
4624 return 0;
4625 }
4626 diff --git a/drivers/power/lp8788-charger.c b/drivers/power/lp8788-charger.c
4627 index ed49b50b220b..72da2a6c22db 100644
4628 --- a/drivers/power/lp8788-charger.c
4629 +++ b/drivers/power/lp8788-charger.c
4630 @@ -417,8 +417,10 @@ static int lp8788_psy_register(struct platform_device *pdev,
4631 pchg->battery.num_properties = ARRAY_SIZE(lp8788_battery_prop);
4632 pchg->battery.get_property = lp8788_battery_get_property;
4633
4634 - if (power_supply_register(&pdev->dev, &pchg->battery))
4635 + if (power_supply_register(&pdev->dev, &pchg->battery)) {
4636 + power_supply_unregister(&pchg->charger);
4637 return -EPERM;
4638 + }
4639
4640 return 0;
4641 }
4642 diff --git a/drivers/power/twl4030_madc_battery.c b/drivers/power/twl4030_madc_battery.c
4643 index 7ef445a6cfa6..cf907609ec49 100644
4644 --- a/drivers/power/twl4030_madc_battery.c
4645 +++ b/drivers/power/twl4030_madc_battery.c
4646 @@ -192,6 +192,7 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
4647 {
4648 struct twl4030_madc_battery *twl4030_madc_bat;
4649 struct twl4030_madc_bat_platform_data *pdata = pdev->dev.platform_data;
4650 + int ret = 0;
4651
4652 twl4030_madc_bat = kzalloc(sizeof(*twl4030_madc_bat), GFP_KERNEL);
4653 if (!twl4030_madc_bat)
4654 @@ -216,9 +217,11 @@ static int twl4030_madc_battery_probe(struct platform_device *pdev)
4655
4656 twl4030_madc_bat->pdata = pdata;
4657 platform_set_drvdata(pdev, twl4030_madc_bat);
4658 - power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
4659 + ret = power_supply_register(&pdev->dev, &twl4030_madc_bat->psy);
4660 + if (ret < 0)
4661 + kfree(twl4030_madc_bat);
4662
4663 - return 0;
4664 + return ret;
4665 }
4666
4667 static int twl4030_madc_battery_remove(struct platform_device *pdev)
4668 diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
4669 index 0a7325361d29..5f57e3d35e26 100644
4670 --- a/drivers/scsi/3w-9xxx.c
4671 +++ b/drivers/scsi/3w-9xxx.c
4672 @@ -149,7 +149,6 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
4673 static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
4674 static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
4675 static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
4676 -static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id);
4677
4678 /* Functions */
4679
4680 @@ -1352,11 +1351,11 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
4681 }
4682
4683 /* Now complete the io */
4684 + scsi_dma_unmap(cmd);
4685 + cmd->scsi_done(cmd);
4686 tw_dev->state[request_id] = TW_S_COMPLETED;
4687 twa_free_request_id(tw_dev, request_id);
4688 tw_dev->posted_request_count--;
4689 - tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
4690 - twa_unmap_scsi_data(tw_dev, request_id);
4691 }
4692
4693 /* Check for valid status after each drain */
4694 @@ -1414,26 +1413,6 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
4695 }
4696 } /* End twa_load_sgl() */
4697
4698 -/* This function will perform a pci-dma mapping for a scatter gather list */
4699 -static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
4700 -{
4701 - int use_sg;
4702 - struct scsi_cmnd *cmd = tw_dev->srb[request_id];
4703 -
4704 - use_sg = scsi_dma_map(cmd);
4705 - if (!use_sg)
4706 - return 0;
4707 - else if (use_sg < 0) {
4708 - TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
4709 - return 0;
4710 - }
4711 -
4712 - cmd->SCp.phase = TW_PHASE_SGLIST;
4713 - cmd->SCp.have_data_in = use_sg;
4714 -
4715 - return use_sg;
4716 -} /* End twa_map_scsi_sg_data() */
4717 -
4718 /* This function will poll for a response interrupt of a request */
4719 static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
4720 {
4721 @@ -1612,9 +1591,11 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
4722 (tw_dev->state[i] != TW_S_INITIAL) &&
4723 (tw_dev->state[i] != TW_S_COMPLETED)) {
4724 if (tw_dev->srb[i]) {
4725 - tw_dev->srb[i]->result = (DID_RESET << 16);
4726 - tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
4727 - twa_unmap_scsi_data(tw_dev, i);
4728 + struct scsi_cmnd *cmd = tw_dev->srb[i];
4729 +
4730 + cmd->result = (DID_RESET << 16);
4731 + scsi_dma_unmap(cmd);
4732 + cmd->scsi_done(cmd);
4733 }
4734 }
4735 }
4736 @@ -1793,21 +1774,18 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
4737 /* Save the scsi command for use by the ISR */
4738 tw_dev->srb[request_id] = SCpnt;
4739
4740 - /* Initialize phase to zero */
4741 - SCpnt->SCp.phase = TW_PHASE_INITIAL;
4742 -
4743 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
4744 switch (retval) {
4745 case SCSI_MLQUEUE_HOST_BUSY:
4746 + scsi_dma_unmap(SCpnt);
4747 twa_free_request_id(tw_dev, request_id);
4748 - twa_unmap_scsi_data(tw_dev, request_id);
4749 break;
4750 case 1:
4751 - tw_dev->state[request_id] = TW_S_COMPLETED;
4752 - twa_free_request_id(tw_dev, request_id);
4753 - twa_unmap_scsi_data(tw_dev, request_id);
4754 SCpnt->result = (DID_ERROR << 16);
4755 + scsi_dma_unmap(SCpnt);
4756 done(SCpnt);
4757 + tw_dev->state[request_id] = TW_S_COMPLETED;
4758 + twa_free_request_id(tw_dev, request_id);
4759 retval = 0;
4760 }
4761 out:
4762 @@ -1875,8 +1853,8 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
4763 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
4764 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
4765 } else {
4766 - sg_count = twa_map_scsi_sg_data(tw_dev, request_id);
4767 - if (sg_count == 0)
4768 + sg_count = scsi_dma_map(srb);
4769 + if (sg_count < 0)
4770 goto out;
4771
4772 scsi_for_each_sg(srb, sg, sg_count, i) {
4773 @@ -1991,15 +1969,6 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code)
4774 return(table[index].text);
4775 } /* End twa_string_lookup() */
4776
4777 -/* This function will perform a pci-dma unmap */
4778 -static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
4779 -{
4780 - struct scsi_cmnd *cmd = tw_dev->srb[request_id];
4781 -
4782 - if (cmd->SCp.phase == TW_PHASE_SGLIST)
4783 - scsi_dma_unmap(cmd);
4784 -} /* End twa_unmap_scsi_data() */
4785 -
4786 /* This function gets called when a disk is coming on-line */
4787 static int twa_slave_configure(struct scsi_device *sdev)
4788 {
4789 diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
4790 index 040f7214e5b7..0fdc83cfa0e1 100644
4791 --- a/drivers/scsi/3w-9xxx.h
4792 +++ b/drivers/scsi/3w-9xxx.h
4793 @@ -324,11 +324,6 @@ static twa_message_type twa_error_table[] = {
4794 #define TW_CURRENT_DRIVER_BUILD 0
4795 #define TW_CURRENT_DRIVER_BRANCH 0
4796
4797 -/* Phase defines */
4798 -#define TW_PHASE_INITIAL 0
4799 -#define TW_PHASE_SINGLE 1
4800 -#define TW_PHASE_SGLIST 2
4801 -
4802 /* Misc defines */
4803 #define TW_9550SX_DRAIN_COMPLETED 0xFFFF
4804 #define TW_SECTOR_SIZE 512
4805 diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
4806 index 6da6cec9a651..2ee2e543ab73 100644
4807 --- a/drivers/scsi/3w-sas.c
4808 +++ b/drivers/scsi/3w-sas.c
4809 @@ -303,26 +303,6 @@ static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
4810 return 0;
4811 } /* End twl_post_command_packet() */
4812
4813 -/* This function will perform a pci-dma mapping for a scatter gather list */
4814 -static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
4815 -{
4816 - int use_sg;
4817 - struct scsi_cmnd *cmd = tw_dev->srb[request_id];
4818 -
4819 - use_sg = scsi_dma_map(cmd);
4820 - if (!use_sg)
4821 - return 0;
4822 - else if (use_sg < 0) {
4823 - TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list");
4824 - return 0;
4825 - }
4826 -
4827 - cmd->SCp.phase = TW_PHASE_SGLIST;
4828 - cmd->SCp.have_data_in = use_sg;
4829 -
4830 - return use_sg;
4831 -} /* End twl_map_scsi_sg_data() */
4832 -
4833 /* This function hands scsi cdb's to the firmware */
4834 static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
4835 {
4836 @@ -370,8 +350,8 @@ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
4837 if (!sglistarg) {
4838 /* Map sglist from scsi layer to cmd packet */
4839 if (scsi_sg_count(srb)) {
4840 - sg_count = twl_map_scsi_sg_data(tw_dev, request_id);
4841 - if (sg_count == 0)
4842 + sg_count = scsi_dma_map(srb);
4843 + if (sg_count <= 0)
4844 goto out;
4845
4846 scsi_for_each_sg(srb, sg, sg_count, i) {
4847 @@ -1115,15 +1095,6 @@ out:
4848 return retval;
4849 } /* End twl_initialize_device_extension() */
4850
4851 -/* This function will perform a pci-dma unmap */
4852 -static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
4853 -{
4854 - struct scsi_cmnd *cmd = tw_dev->srb[request_id];
4855 -
4856 - if (cmd->SCp.phase == TW_PHASE_SGLIST)
4857 - scsi_dma_unmap(cmd);
4858 -} /* End twl_unmap_scsi_data() */
4859 -
4860 /* This function will handle attention interrupts */
4861 static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev)
4862 {
4863 @@ -1264,11 +1235,11 @@ static irqreturn_t twl_interrupt(int irq, void *dev_instance)
4864 }
4865
4866 /* Now complete the io */
4867 + scsi_dma_unmap(cmd);
4868 + cmd->scsi_done(cmd);
4869 tw_dev->state[request_id] = TW_S_COMPLETED;
4870 twl_free_request_id(tw_dev, request_id);
4871 tw_dev->posted_request_count--;
4872 - tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
4873 - twl_unmap_scsi_data(tw_dev, request_id);
4874 }
4875
4876 /* Check for another response interrupt */
4877 @@ -1413,10 +1384,12 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res
4878 if ((tw_dev->state[i] != TW_S_FINISHED) &&
4879 (tw_dev->state[i] != TW_S_INITIAL) &&
4880 (tw_dev->state[i] != TW_S_COMPLETED)) {
4881 - if (tw_dev->srb[i]) {
4882 - tw_dev->srb[i]->result = (DID_RESET << 16);
4883 - tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
4884 - twl_unmap_scsi_data(tw_dev, i);
4885 + struct scsi_cmnd *cmd = tw_dev->srb[i];
4886 +
4887 + if (cmd) {
4888 + cmd->result = (DID_RESET << 16);
4889 + scsi_dma_unmap(cmd);
4890 + cmd->scsi_done(cmd);
4891 }
4892 }
4893 }
4894 @@ -1520,9 +1493,6 @@ static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
4895 /* Save the scsi command for use by the ISR */
4896 tw_dev->srb[request_id] = SCpnt;
4897
4898 - /* Initialize phase to zero */
4899 - SCpnt->SCp.phase = TW_PHASE_INITIAL;
4900 -
4901 retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
4902 if (retval) {
4903 tw_dev->state[request_id] = TW_S_COMPLETED;
4904 diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
4905 index d474892701d4..fec6449c7595 100644
4906 --- a/drivers/scsi/3w-sas.h
4907 +++ b/drivers/scsi/3w-sas.h
4908 @@ -103,10 +103,6 @@ static char *twl_aen_severity_table[] =
4909 #define TW_CURRENT_DRIVER_BUILD 0
4910 #define TW_CURRENT_DRIVER_BRANCH 0
4911
4912 -/* Phase defines */
4913 -#define TW_PHASE_INITIAL 0
4914 -#define TW_PHASE_SGLIST 2
4915 -
4916 /* Misc defines */
4917 #define TW_SECTOR_SIZE 512
4918 #define TW_MAX_UNITS 32
4919 diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
4920 index 752624e6bc00..b327742b95ef 100644
4921 --- a/drivers/scsi/3w-xxxx.c
4922 +++ b/drivers/scsi/3w-xxxx.c
4923 @@ -1284,32 +1284,6 @@ static int tw_initialize_device_extension(TW_Device_Extension *tw_dev)
4924 return 0;
4925 } /* End tw_initialize_device_extension() */
4926
4927 -static int tw_map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
4928 -{
4929 - int use_sg;
4930 -
4931 - dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n");
4932 -
4933 - use_sg = scsi_dma_map(cmd);
4934 - if (use_sg < 0) {
4935 - printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n");
4936 - return 0;
4937 - }
4938 -
4939 - cmd->SCp.phase = TW_PHASE_SGLIST;
4940 - cmd->SCp.have_data_in = use_sg;
4941 -
4942 - return use_sg;
4943 -} /* End tw_map_scsi_sg_data() */
4944 -
4945 -static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
4946 -{
4947 - dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
4948 -
4949 - if (cmd->SCp.phase == TW_PHASE_SGLIST)
4950 - scsi_dma_unmap(cmd);
4951 -} /* End tw_unmap_scsi_data() */
4952 -
4953 /* This function will reset a device extension */
4954 static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
4955 {
4956 @@ -1332,8 +1306,8 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
4957 srb = tw_dev->srb[i];
4958 if (srb != NULL) {
4959 srb->result = (DID_RESET << 16);
4960 - tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
4961 - tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[i]);
4962 + scsi_dma_unmap(srb);
4963 + srb->scsi_done(srb);
4964 }
4965 }
4966 }
4967 @@ -1780,8 +1754,8 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
4968 command_packet->byte8.io.lba = lba;
4969 command_packet->byte6.block_count = num_sectors;
4970
4971 - use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
4972 - if (!use_sg)
4973 + use_sg = scsi_dma_map(srb);
4974 + if (use_sg <= 0)
4975 return 1;
4976
4977 scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) {
4978 @@ -1968,9 +1942,6 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
4979 /* Save the scsi command for use by the ISR */
4980 tw_dev->srb[request_id] = SCpnt;
4981
4982 - /* Initialize phase to zero */
4983 - SCpnt->SCp.phase = TW_PHASE_INITIAL;
4984 -
4985 switch (*command) {
4986 case READ_10:
4987 case READ_6:
4988 @@ -2198,12 +2169,11 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance)
4989
4990 /* Now complete the io */
4991 if ((error != TW_ISR_DONT_COMPLETE)) {
4992 + scsi_dma_unmap(tw_dev->srb[request_id]);
4993 + tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
4994 tw_dev->state[request_id] = TW_S_COMPLETED;
4995 tw_state_request_finish(tw_dev, request_id);
4996 tw_dev->posted_request_count--;
4997 - tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
4998 -
4999 - tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
5000 }
5001 }
5002
5003 diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
5004 index 29b0b84ed69e..6f65e663d393 100644
5005 --- a/drivers/scsi/3w-xxxx.h
5006 +++ b/drivers/scsi/3w-xxxx.h
5007 @@ -195,11 +195,6 @@ static unsigned char tw_sense_table[][4] =
5008 #define TW_AEN_SMART_FAIL 0x000F
5009 #define TW_AEN_SBUF_FAIL 0x0024
5010
5011 -/* Phase defines */
5012 -#define TW_PHASE_INITIAL 0
5013 -#define TW_PHASE_SINGLE 1
5014 -#define TW_PHASE_SGLIST 2
5015 -
5016 /* Misc defines */
5017 #define TW_ALIGNMENT_6000 64 /* 64 bytes */
5018 #define TW_ALIGNMENT_7000 4 /* 4 bytes */
5019 diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
5020 index ac52f7c99513..0eb2da8a696f 100644
5021 --- a/drivers/scsi/mvsas/mv_sas.c
5022 +++ b/drivers/scsi/mvsas/mv_sas.c
5023 @@ -441,14 +441,11 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
5024 static int mvs_task_prep_ata(struct mvs_info *mvi,
5025 struct mvs_task_exec_info *tei)
5026 {
5027 - struct sas_ha_struct *sha = mvi->sas;
5028 struct sas_task *task = tei->task;
5029 struct domain_device *dev = task->dev;
5030 struct mvs_device *mvi_dev = dev->lldd_dev;
5031 struct mvs_cmd_hdr *hdr = tei->hdr;
5032 struct asd_sas_port *sas_port = dev->port;
5033 - struct sas_phy *sphy = dev->phy;
5034 - struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
5035 struct mvs_slot_info *slot;
5036 void *buf_prd;
5037 u32 tag = tei->tag, hdr_tag;
5038 @@ -468,7 +465,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
5039 slot->tx = mvi->tx_prod;
5040 del_q = TXQ_MODE_I | tag |
5041 (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
5042 - (MVS_PHY_ID << TXQ_PHY_SHIFT) |
5043 + ((sas_port->phy_mask & TXQ_PHY_MASK) << TXQ_PHY_SHIFT) |
5044 (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
5045 mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
5046
5047 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
5048 index dd8c8d690763..7a4d88c91069 100644
5049 --- a/drivers/scsi/sd.c
5050 +++ b/drivers/scsi/sd.c
5051 @@ -3118,6 +3118,7 @@ static void scsi_disk_release(struct device *dev)
5052 ida_remove(&sd_index_ida, sdkp->index);
5053 spin_unlock(&sd_index_lock);
5054
5055 + blk_integrity_unregister(disk);
5056 disk->private_data = NULL;
5057 put_disk(disk);
5058 put_device(&sdkp->device->sdev_gendev);
5059 diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
5060 index 14c7d42a11c2..5c06d292b94c 100644
5061 --- a/drivers/scsi/sd_dif.c
5062 +++ b/drivers/scsi/sd_dif.c
5063 @@ -77,7 +77,7 @@ void sd_dif_config_host(struct scsi_disk *sdkp)
5064
5065 disk->integrity->flags |= BLK_INTEGRITY_DEVICE_CAPABLE;
5066
5067 - if (!sdkp)
5068 + if (!sdkp->ATO)
5069 return;
5070
5071 if (type == SD_DIF_TYPE3_PROTECTION)
5072 diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
5073 index 90af465359d6..4534d9debcdc 100644
5074 --- a/drivers/scsi/storvsc_drv.c
5075 +++ b/drivers/scsi/storvsc_drv.c
5076 @@ -741,21 +741,22 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
5077 if (bounce_sgl[j].length == PAGE_SIZE) {
5078 /* full..move to next entry */
5079 sg_kunmap_atomic(bounce_addr);
5080 + bounce_addr = 0;
5081 j++;
5082 + }
5083
5084 - /* if we need to use another bounce buffer */
5085 - if (srclen || i != orig_sgl_count - 1)
5086 - bounce_addr = sg_kmap_atomic(bounce_sgl,j);
5087 + /* if we need to use another bounce buffer */
5088 + if (srclen && bounce_addr == 0)
5089 + bounce_addr = sg_kmap_atomic(bounce_sgl, j);
5090
5091 - } else if (srclen == 0 && i == orig_sgl_count - 1) {
5092 - /* unmap the last bounce that is < PAGE_SIZE */
5093 - sg_kunmap_atomic(bounce_addr);
5094 - }
5095 }
5096
5097 sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
5098 }
5099
5100 + if (bounce_addr)
5101 + sg_kunmap_atomic(bounce_addr);
5102 +
5103 local_irq_restore(flags);
5104
5105 return total_copied;
5106 diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
5107 index 82269a86fd7a..bf0effb86137 100644
5108 --- a/drivers/spi/spi-imx.c
5109 +++ b/drivers/spi/spi-imx.c
5110 @@ -371,8 +371,6 @@ static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
5111 if (spi_imx->dma_is_inited) {
5112 dma = readl(spi_imx->base + MX51_ECSPI_DMA);
5113
5114 - spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2;
5115 - spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2;
5116 spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2;
5117 rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET;
5118 tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET;
5119 @@ -869,6 +867,8 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
5120 master->max_dma_len = MAX_SDMA_BD_BYTES;
5121 spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
5122 SPI_MASTER_MUST_TX;
5123 + spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2;
5124 + spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2;
5125 spi_imx->dma_is_inited = 1;
5126
5127 return 0;
5128 diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
5129 index e50039fb1474..409a8c576c7a 100644
5130 --- a/drivers/spi/spidev.c
5131 +++ b/drivers/spi/spidev.c
5132 @@ -246,7 +246,10 @@ static int spidev_message(struct spidev_data *spidev,
5133 k_tmp->len = u_tmp->len;
5134
5135 total += k_tmp->len;
5136 - if (total > bufsiz) {
5137 + /* Check total length of transfers. Also check each
5138 + * transfer length to avoid arithmetic overflow.
5139 + */
5140 + if (total > bufsiz || k_tmp->len > bufsiz) {
5141 status = -EMSGSIZE;
5142 goto done;
5143 }
5144 diff --git a/drivers/ssb/Kconfig b/drivers/ssb/Kconfig
5145 index 75b3603906c1..f0d22cdb51cd 100644
5146 --- a/drivers/ssb/Kconfig
5147 +++ b/drivers/ssb/Kconfig
5148 @@ -130,6 +130,7 @@ config SSB_DRIVER_MIPS
5149 bool "SSB Broadcom MIPS core driver"
5150 depends on SSB && MIPS
5151 select SSB_SERIAL
5152 + select SSB_SFLASH
5153 help
5154 Driver for the Sonics Silicon Backplane attached
5155 Broadcom MIPS core.
5156 diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
5157 index 7bdb62bf6b40..f83e00c78051 100644
5158 --- a/drivers/staging/android/sync.c
5159 +++ b/drivers/staging/android/sync.c
5160 @@ -114,7 +114,7 @@ void sync_timeline_signal(struct sync_timeline *obj)
5161 list_for_each_entry_safe(pt, next, &obj->active_list_head,
5162 active_list) {
5163 if (fence_is_signaled_locked(&pt->base))
5164 - list_del(&pt->active_list);
5165 + list_del_init(&pt->active_list);
5166 }
5167
5168 spin_unlock_irqrestore(&obj->child_list_lock, flags);
5169 diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
5170 index 5539bd294862..a65fa9f68f5e 100644
5171 --- a/drivers/staging/comedi/drivers/adv_pci1710.c
5172 +++ b/drivers/staging/comedi/drivers/adv_pci1710.c
5173 @@ -456,7 +456,6 @@ static int pci171x_insn_read_ai(struct comedi_device *dev,
5174 struct comedi_insn *insn, unsigned int *data)
5175 {
5176 struct pci1710_private *devpriv = dev->private;
5177 - unsigned int chan = CR_CHAN(insn->chanspec);
5178 int ret = 0;
5179 int i;
5180
5181 @@ -478,7 +477,7 @@ static int pci171x_insn_read_ai(struct comedi_device *dev,
5182 break;
5183
5184 val = inw(dev->iobase + PCI171x_AD_DATA);
5185 - ret = pci171x_ai_dropout(dev, s, chan, val);
5186 + ret = pci171x_ai_dropout(dev, s, 0, val);
5187 if (ret)
5188 break;
5189
5190 diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
5191 index 6d1a32097d3c..2dee1757db79 100644
5192 --- a/drivers/staging/panel/panel.c
5193 +++ b/drivers/staging/panel/panel.c
5194 @@ -275,11 +275,11 @@ static unsigned char lcd_bits[LCD_PORTS][LCD_BITS][BIT_STATES];
5195 * LCD types
5196 */
5197 #define LCD_TYPE_NONE 0
5198 -#define LCD_TYPE_OLD 1
5199 -#define LCD_TYPE_KS0074 2
5200 -#define LCD_TYPE_HANTRONIX 3
5201 -#define LCD_TYPE_NEXCOM 4
5202 -#define LCD_TYPE_CUSTOM 5
5203 +#define LCD_TYPE_CUSTOM 1
5204 +#define LCD_TYPE_OLD 2
5205 +#define LCD_TYPE_KS0074 3
5206 +#define LCD_TYPE_HANTRONIX 4
5207 +#define LCD_TYPE_NEXCOM 5
5208
5209 /*
5210 * keypad types
5211 diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
5212 index f018b6a3ffbf..88c9179ab20f 100644
5213 --- a/drivers/target/target_core_file.c
5214 +++ b/drivers/target/target_core_file.c
5215 @@ -263,40 +263,32 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
5216 struct se_device *se_dev = cmd->se_dev;
5217 struct fd_dev *dev = FD_DEV(se_dev);
5218 struct file *prot_fd = dev->fd_prot_file;
5219 - struct scatterlist *sg;
5220 loff_t pos = (cmd->t_task_lba * se_dev->prot_length);
5221 unsigned char *buf;
5222 - u32 prot_size, len, size;
5223 - int rc, ret = 1, i;
5224 + u32 prot_size;
5225 + int rc, ret = 1;
5226
5227 prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) *
5228 se_dev->prot_length;
5229
5230 if (!is_write) {
5231 - fd_prot->prot_buf = vzalloc(prot_size);
5232 + fd_prot->prot_buf = kzalloc(prot_size, GFP_KERNEL);
5233 if (!fd_prot->prot_buf) {
5234 pr_err("Unable to allocate fd_prot->prot_buf\n");
5235 return -ENOMEM;
5236 }
5237 buf = fd_prot->prot_buf;
5238
5239 - fd_prot->prot_sg_nents = cmd->t_prot_nents;
5240 - fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) *
5241 - fd_prot->prot_sg_nents, GFP_KERNEL);
5242 + fd_prot->prot_sg_nents = 1;
5243 + fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist),
5244 + GFP_KERNEL);
5245 if (!fd_prot->prot_sg) {
5246 pr_err("Unable to allocate fd_prot->prot_sg\n");
5247 - vfree(fd_prot->prot_buf);
5248 + kfree(fd_prot->prot_buf);
5249 return -ENOMEM;
5250 }
5251 - size = prot_size;
5252 -
5253 - for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) {
5254 -
5255 - len = min_t(u32, PAGE_SIZE, size);
5256 - sg_set_buf(sg, buf, len);
5257 - size -= len;
5258 - buf += len;
5259 - }
5260 + sg_init_table(fd_prot->prot_sg, fd_prot->prot_sg_nents);
5261 + sg_set_buf(fd_prot->prot_sg, buf, prot_size);
5262 }
5263
5264 if (is_write) {
5265 @@ -317,7 +309,7 @@ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
5266
5267 if (is_write || ret < 0) {
5268 kfree(fd_prot->prot_sg);
5269 - vfree(fd_prot->prot_buf);
5270 + kfree(fd_prot->prot_buf);
5271 }
5272
5273 return ret;
5274 @@ -543,6 +535,56 @@ fd_execute_write_same(struct se_cmd *cmd)
5275 return 0;
5276 }
5277
5278 +static int
5279 +fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb,
5280 + void *buf, size_t bufsize)
5281 +{
5282 + struct fd_dev *fd_dev = FD_DEV(se_dev);
5283 + struct file *prot_fd = fd_dev->fd_prot_file;
5284 + sector_t prot_length, prot;
5285 + loff_t pos = lba * se_dev->prot_length;
5286 +
5287 + if (!prot_fd) {
5288 + pr_err("Unable to locate fd_dev->fd_prot_file\n");
5289 + return -ENODEV;
5290 + }
5291 +
5292 + prot_length = nolb * se_dev->prot_length;
5293 +
5294 + for (prot = 0; prot < prot_length;) {
5295 + sector_t len = min_t(sector_t, bufsize, prot_length - prot);
5296 + ssize_t ret = kernel_write(prot_fd, buf, len, pos + prot);
5297 +
5298 + if (ret != len) {
5299 + pr_err("vfs_write to prot file failed: %zd\n", ret);
5300 + return ret < 0 ? ret : -ENODEV;
5301 + }
5302 + prot += ret;
5303 + }
5304 +
5305 + return 0;
5306 +}
5307 +
5308 +static int
5309 +fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
5310 +{
5311 + void *buf;
5312 + int rc;
5313 +
5314 + buf = (void *)__get_free_page(GFP_KERNEL);
5315 + if (!buf) {
5316 + pr_err("Unable to allocate FILEIO prot buf\n");
5317 + return -ENOMEM;
5318 + }
5319 + memset(buf, 0xff, PAGE_SIZE);
5320 +
5321 + rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
5322 +
5323 + free_page((unsigned long)buf);
5324 +
5325 + return rc;
5326 +}
5327 +
5328 static sense_reason_t
5329 fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
5330 {
5331 @@ -550,6 +592,12 @@ fd_do_unmap(struct se_cmd *cmd, void *priv, sector_t lba, sector_t nolb)
5332 struct inode *inode = file->f_mapping->host;
5333 int ret;
5334
5335 + if (cmd->se_dev->dev_attrib.pi_prot_type) {
5336 + ret = fd_do_prot_unmap(cmd, lba, nolb);
5337 + if (ret)
5338 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
5339 + }
5340 +
5341 if (S_ISBLK(inode->i_mode)) {
5342 /* The backend is block device, use discard */
5343 struct block_device *bdev = inode->i_bdev;
5344 @@ -652,11 +700,11 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
5345 0, fd_prot.prot_sg, 0);
5346 if (rc) {
5347 kfree(fd_prot.prot_sg);
5348 - vfree(fd_prot.prot_buf);
5349 + kfree(fd_prot.prot_buf);
5350 return rc;
5351 }
5352 kfree(fd_prot.prot_sg);
5353 - vfree(fd_prot.prot_buf);
5354 + kfree(fd_prot.prot_buf);
5355 }
5356 } else {
5357 memset(&fd_prot, 0, sizeof(struct fd_prot));
5358 @@ -672,7 +720,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
5359 0, fd_prot.prot_sg, 0);
5360 if (rc) {
5361 kfree(fd_prot.prot_sg);
5362 - vfree(fd_prot.prot_buf);
5363 + kfree(fd_prot.prot_buf);
5364 return rc;
5365 }
5366 }
5367 @@ -708,7 +756,7 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
5368
5369 if (ret < 0) {
5370 kfree(fd_prot.prot_sg);
5371 - vfree(fd_prot.prot_buf);
5372 + kfree(fd_prot.prot_buf);
5373 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
5374 }
5375
5376 @@ -872,48 +920,28 @@ static int fd_init_prot(struct se_device *dev)
5377
5378 static int fd_format_prot(struct se_device *dev)
5379 {
5380 - struct fd_dev *fd_dev = FD_DEV(dev);
5381 - struct file *prot_fd = fd_dev->fd_prot_file;
5382 - sector_t prot_length, prot;
5383 unsigned char *buf;
5384 - loff_t pos = 0;
5385 int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
5386 - int rc, ret = 0, size, len;
5387 + int ret;
5388
5389 if (!dev->dev_attrib.pi_prot_type) {
5390 pr_err("Unable to format_prot while pi_prot_type == 0\n");
5391 return -ENODEV;
5392 }
5393 - if (!prot_fd) {
5394 - pr_err("Unable to locate fd_dev->fd_prot_file\n");
5395 - return -ENODEV;
5396 - }
5397
5398 buf = vzalloc(unit_size);
5399 if (!buf) {
5400 pr_err("Unable to allocate FILEIO prot buf\n");
5401 return -ENOMEM;
5402 }
5403 - prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
5404 - size = prot_length;
5405
5406 pr_debug("Using FILEIO prot_length: %llu\n",
5407 - (unsigned long long)prot_length);
5408 + (unsigned long long)(dev->transport->get_blocks(dev) + 1) *
5409 + dev->prot_length);
5410
5411 memset(buf, 0xff, unit_size);
5412 - for (prot = 0; prot < prot_length; prot += unit_size) {
5413 - len = min(unit_size, size);
5414 - rc = kernel_write(prot_fd, buf, len, pos);
5415 - if (rc != len) {
5416 - pr_err("vfs_write to prot file failed: %d\n", rc);
5417 - ret = -ENODEV;
5418 - goto out;
5419 - }
5420 - pos += len;
5421 - size -= len;
5422 - }
5423 -
5424 -out:
5425 + ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1,
5426 + buf, unit_size);
5427 vfree(buf);
5428 return ret;
5429 }
5430 diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
5431 index 565c0da9d99d..fa89c2f105e6 100644
5432 --- a/drivers/target/target_core_sbc.c
5433 +++ b/drivers/target/target_core_sbc.c
5434 @@ -299,7 +299,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
5435 return 0;
5436 }
5437
5438 -static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd)
5439 +static sense_reason_t xdreadwrite_callback(struct se_cmd *cmd, bool success)
5440 {
5441 unsigned char *buf, *addr;
5442 struct scatterlist *sg;
5443 @@ -363,7 +363,7 @@ sbc_execute_rw(struct se_cmd *cmd)
5444 cmd->data_direction);
5445 }
5446
5447 -static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
5448 +static sense_reason_t compare_and_write_post(struct se_cmd *cmd, bool success)
5449 {
5450 struct se_device *dev = cmd->se_dev;
5451
5452 @@ -386,7 +386,7 @@ static sense_reason_t compare_and_write_post(struct se_cmd *cmd)
5453 return TCM_NO_SENSE;
5454 }
5455
5456 -static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
5457 +static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool success)
5458 {
5459 struct se_device *dev = cmd->se_dev;
5460 struct scatterlist *write_sg = NULL, *sg;
5461 @@ -401,11 +401,16 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
5462
5463 /*
5464 * Handle early failure in transport_generic_request_failure(),
5465 - * which will not have taken ->caw_mutex yet..
5466 + * which will not have taken ->caw_sem yet..
5467 */
5468 - if (!cmd->t_data_sg || !cmd->t_bidi_data_sg)
5469 + if (!success && (!cmd->t_data_sg || !cmd->t_bidi_data_sg))
5470 return TCM_NO_SENSE;
5471 /*
5472 + * Handle special case for zero-length COMPARE_AND_WRITE
5473 + */
5474 + if (!cmd->data_length)
5475 + goto out;
5476 + /*
5477 * Immediately exit + release dev->caw_sem if command has already
5478 * been failed with a non-zero SCSI status.
5479 */
5480 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
5481 index 2e0998420254..e786e9104c41 100644
5482 --- a/drivers/target/target_core_transport.c
5483 +++ b/drivers/target/target_core_transport.c
5484 @@ -1615,11 +1615,11 @@ void transport_generic_request_failure(struct se_cmd *cmd,
5485 transport_complete_task_attr(cmd);
5486 /*
5487 * Handle special case for COMPARE_AND_WRITE failure, where the
5488 - * callback is expected to drop the per device ->caw_mutex.
5489 + * callback is expected to drop the per device ->caw_sem.
5490 */
5491 if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
5492 cmd->transport_complete_callback)
5493 - cmd->transport_complete_callback(cmd);
5494 + cmd->transport_complete_callback(cmd, false);
5495
5496 switch (sense_reason) {
5497 case TCM_NON_EXISTENT_LUN:
5498 @@ -1975,8 +1975,12 @@ static void target_complete_ok_work(struct work_struct *work)
5499 if (cmd->transport_complete_callback) {
5500 sense_reason_t rc;
5501
5502 - rc = cmd->transport_complete_callback(cmd);
5503 + rc = cmd->transport_complete_callback(cmd, true);
5504 if (!rc && !(cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE_POST)) {
5505 + if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
5506 + !cmd->data_length)
5507 + goto queue_rsp;
5508 +
5509 return;
5510 } else if (rc) {
5511 ret = transport_send_check_condition_and_sense(cmd,
5512 @@ -1990,6 +1994,7 @@ static void target_complete_ok_work(struct work_struct *work)
5513 }
5514 }
5515
5516 +queue_rsp:
5517 switch (cmd->data_direction) {
5518 case DMA_FROM_DEVICE:
5519 spin_lock(&cmd->se_lun->lun_sep_lock);
5520 @@ -2094,6 +2099,16 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
5521 static inline void transport_free_pages(struct se_cmd *cmd)
5522 {
5523 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
5524 + /*
5525 + * Release special case READ buffer payload required for
5526 + * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
5527 + */
5528 + if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
5529 + transport_free_sgl(cmd->t_bidi_data_sg,
5530 + cmd->t_bidi_data_nents);
5531 + cmd->t_bidi_data_sg = NULL;
5532 + cmd->t_bidi_data_nents = 0;
5533 + }
5534 transport_reset_sgl_orig(cmd);
5535 return;
5536 }
5537 @@ -2246,6 +2261,7 @@ sense_reason_t
5538 transport_generic_new_cmd(struct se_cmd *cmd)
5539 {
5540 int ret = 0;
5541 + bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
5542
5543 /*
5544 * Determine is the TCM fabric module has already allocated physical
5545 @@ -2254,7 +2270,6 @@ transport_generic_new_cmd(struct se_cmd *cmd)
5546 */
5547 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
5548 cmd->data_length) {
5549 - bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
5550
5551 if ((cmd->se_cmd_flags & SCF_BIDI) ||
5552 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
5553 @@ -2285,6 +2300,20 @@ transport_generic_new_cmd(struct se_cmd *cmd)
5554 cmd->data_length, zero_flag);
5555 if (ret < 0)
5556 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
5557 + } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
5558 + cmd->data_length) {
5559 + /*
5560 + * Special case for COMPARE_AND_WRITE with fabrics
5561 + * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
5562 + */
5563 + u32 caw_length = cmd->t_task_nolb *
5564 + cmd->se_dev->dev_attrib.block_size;
5565 +
5566 + ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
5567 + &cmd->t_bidi_data_nents,
5568 + caw_length, zero_flag);
5569 + if (ret < 0)
5570 + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
5571 }
5572 /*
5573 * If this command is not a write we can execute it right here,
5574 diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
5575 index 6ee5c6cefac0..11300f7b49cb 100644
5576 --- a/drivers/tty/serial/atmel_serial.c
5577 +++ b/drivers/tty/serial/atmel_serial.c
5578 @@ -861,6 +861,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port)
5579 config.direction = DMA_MEM_TO_DEV;
5580 config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
5581 config.dst_addr = port->mapbase + ATMEL_US_THR;
5582 + config.dst_maxburst = 1;
5583
5584 ret = dmaengine_device_control(atmel_port->chan_tx,
5585 DMA_SLAVE_CONFIG,
5586 @@ -1025,6 +1026,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
5587 config.direction = DMA_DEV_TO_MEM;
5588 config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
5589 config.src_addr = port->mapbase + ATMEL_US_RHR;
5590 + config.src_maxburst = 1;
5591
5592 ret = dmaengine_device_control(atmel_port->chan_rx,
5593 DMA_SLAVE_CONFIG,
5594 diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
5595 index bf355050eab6..2e4b545f1823 100644
5596 --- a/drivers/tty/serial/of_serial.c
5597 +++ b/drivers/tty/serial/of_serial.c
5598 @@ -262,7 +262,6 @@ static struct of_device_id of_platform_serial_table[] = {
5599 { .compatible = "ibm,qpace-nwp-serial",
5600 .data = (void *)PORT_NWPSERIAL, },
5601 #endif
5602 - { .type = "serial", .data = (void *)PORT_UNKNOWN, },
5603 { /* end of list */ },
5604 };
5605
5606 diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
5607 index 189f52e3111f..a0099a7f60d4 100644
5608 --- a/drivers/tty/serial/uartlite.c
5609 +++ b/drivers/tty/serial/uartlite.c
5610 @@ -632,7 +632,8 @@ MODULE_DEVICE_TABLE(of, ulite_of_match);
5611
5612 static int ulite_probe(struct platform_device *pdev)
5613 {
5614 - struct resource *res, *res2;
5615 + struct resource *res;
5616 + int irq;
5617 int id = pdev->id;
5618 #ifdef CONFIG_OF
5619 const __be32 *prop;
5620 @@ -646,11 +647,11 @@ static int ulite_probe(struct platform_device *pdev)
5621 if (!res)
5622 return -ENODEV;
5623
5624 - res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
5625 - if (!res2)
5626 - return -ENODEV;
5627 + irq = platform_get_irq(pdev, 0);
5628 + if (irq <= 0)
5629 + return -ENXIO;
5630
5631 - return ulite_assign(&pdev->dev, id, res->start, res2->start);
5632 + return ulite_assign(&pdev->dev, id, res->start, irq);
5633 }
5634
5635 static int ulite_remove(struct platform_device *pdev)
5636 diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
5637 index 200c1af2141b..fabde0e878e0 100644
5638 --- a/drivers/tty/serial/xilinx_uartps.c
5639 +++ b/drivers/tty/serial/xilinx_uartps.c
5640 @@ -1303,9 +1303,9 @@ static SIMPLE_DEV_PM_OPS(cdns_uart_dev_pm_ops, cdns_uart_suspend,
5641 */
5642 static int cdns_uart_probe(struct platform_device *pdev)
5643 {
5644 - int rc, id;
5645 + int rc, id, irq;
5646 struct uart_port *port;
5647 - struct resource *res, *res2;
5648 + struct resource *res;
5649 struct cdns_uart *cdns_uart_data;
5650
5651 cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data),
5652 @@ -1352,9 +1352,9 @@ static int cdns_uart_probe(struct platform_device *pdev)
5653 goto err_out_clk_disable;
5654 }
5655
5656 - res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
5657 - if (!res2) {
5658 - rc = -ENODEV;
5659 + irq = platform_get_irq(pdev, 0);
5660 + if (irq <= 0) {
5661 + rc = -ENXIO;
5662 goto err_out_clk_disable;
5663 }
5664
5665 @@ -1383,7 +1383,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
5666 * and triggers invocation of the config_port() entry point.
5667 */
5668 port->mapbase = res->start;
5669 - port->irq = res2->start;
5670 + port->irq = irq;
5671 port->dev = &pdev->dev;
5672 port->uartclk = clk_get_rate(cdns_uart_data->uartclk);
5673 port->private_data = cdns_uart_data;
5674 diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
5675 index caaabc58021e..34a52cd7bfb7 100644
5676 --- a/drivers/usb/chipidea/otg_fsm.c
5677 +++ b/drivers/usb/chipidea/otg_fsm.c
5678 @@ -537,7 +537,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on)
5679 {
5680 struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm);
5681
5682 - mutex_unlock(&fsm->lock);
5683 if (on) {
5684 ci_role_stop(ci);
5685 ci_role_start(ci, CI_ROLE_HOST);
5686 @@ -546,7 +545,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on)
5687 hw_device_reset(ci, USBMODE_CM_DC);
5688 ci_role_start(ci, CI_ROLE_GADGET);
5689 }
5690 - mutex_lock(&fsm->lock);
5691 return 0;
5692 }
5693
5694 @@ -554,12 +552,10 @@ static int ci_otg_start_gadget(struct otg_fsm *fsm, int on)
5695 {
5696 struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm);
5697
5698 - mutex_unlock(&fsm->lock);
5699 if (on)
5700 usb_gadget_vbus_connect(&ci->gadget);
5701 else
5702 usb_gadget_vbus_disconnect(&ci->gadget);
5703 - mutex_lock(&fsm->lock);
5704
5705 return 0;
5706 }
5707 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
5708 index 64d9c3daa856..12d37aedc66f 100644
5709 --- a/drivers/usb/class/cdc-acm.c
5710 +++ b/drivers/usb/class/cdc-acm.c
5711 @@ -1091,6 +1091,7 @@ static int acm_probe(struct usb_interface *intf,
5712 unsigned long quirks;
5713 int num_rx_buf;
5714 int i;
5715 + unsigned int elength = 0;
5716 int combined_interfaces = 0;
5717 struct device *tty_dev;
5718 int rv = -ENOMEM;
5719 @@ -1132,13 +1133,22 @@ static int acm_probe(struct usb_interface *intf,
5720 }
5721
5722 while (buflen > 0) {
5723 + elength = buffer[0];
5724 + if (!elength) {
5725 + dev_err(&intf->dev, "skipping garbage byte\n");
5726 + elength = 1;
5727 + goto next_desc;
5728 + }
5729 if (buffer[1] != USB_DT_CS_INTERFACE) {
5730 dev_err(&intf->dev, "skipping garbage\n");
5731 goto next_desc;
5732 }
5733 + elength = buffer[0];
5734
5735 switch (buffer[2]) {
5736 case USB_CDC_UNION_TYPE: /* we've found it */
5737 + if (elength < sizeof(struct usb_cdc_union_desc))
5738 + goto next_desc;
5739 if (union_header) {
5740 dev_err(&intf->dev, "More than one "
5741 "union descriptor, skipping ...\n");
5742 @@ -1147,31 +1157,38 @@ static int acm_probe(struct usb_interface *intf,
5743 union_header = (struct usb_cdc_union_desc *)buffer;
5744 break;
5745 case USB_CDC_COUNTRY_TYPE: /* export through sysfs*/
5746 + if (elength < sizeof(struct usb_cdc_country_functional_desc))
5747 + goto next_desc;
5748 cfd = (struct usb_cdc_country_functional_desc *)buffer;
5749 break;
5750 case USB_CDC_HEADER_TYPE: /* maybe check version */
5751 break; /* for now we ignore it */
5752 case USB_CDC_ACM_TYPE:
5753 + if (elength < 4)
5754 + goto next_desc;
5755 ac_management_function = buffer[3];
5756 break;
5757 case USB_CDC_CALL_MANAGEMENT_TYPE:
5758 + if (elength < 5)
5759 + goto next_desc;
5760 call_management_function = buffer[3];
5761 call_interface_num = buffer[4];
5762 if ((quirks & NOT_A_MODEM) == 0 && (call_management_function & 3) != 3)
5763 dev_err(&intf->dev, "This device cannot do calls on its own. It is not a modem.\n");
5764 break;
5765 default:
5766 - /* there are LOTS more CDC descriptors that
5767 + /*
5768 + * there are LOTS more CDC descriptors that
5769 * could legitimately be found here.
5770 */
5771 dev_dbg(&intf->dev, "Ignoring descriptor: "
5772 - "type %02x, length %d\n",
5773 - buffer[2], buffer[0]);
5774 + "type %02x, length %ud\n",
5775 + buffer[2], elength);
5776 break;
5777 }
5778 next_desc:
5779 - buflen -= buffer[0];
5780 - buffer += buffer[0];
5781 + buflen -= elength;
5782 + buffer += elength;
5783 }
5784
5785 if (!union_header) {
5786 diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
5787 index a051a7a2b1bd..a81f9dd7ee97 100644
5788 --- a/drivers/usb/class/cdc-wdm.c
5789 +++ b/drivers/usb/class/cdc-wdm.c
5790 @@ -245,7 +245,7 @@ static void wdm_int_callback(struct urb *urb)
5791 case USB_CDC_NOTIFY_RESPONSE_AVAILABLE:
5792 dev_dbg(&desc->intf->dev,
5793 "NOTIFY_RESPONSE_AVAILABLE received: index %d len %d",
5794 - dr->wIndex, dr->wLength);
5795 + le16_to_cpu(dr->wIndex), le16_to_cpu(dr->wLength));
5796 break;
5797
5798 case USB_CDC_NOTIFY_NETWORK_CONNECTION:
5799 @@ -262,7 +262,9 @@ static void wdm_int_callback(struct urb *urb)
5800 clear_bit(WDM_POLL_RUNNING, &desc->flags);
5801 dev_err(&desc->intf->dev,
5802 "unknown notification %d received: index %d len %d\n",
5803 - dr->bNotificationType, dr->wIndex, dr->wLength);
5804 + dr->bNotificationType,
5805 + le16_to_cpu(dr->wIndex),
5806 + le16_to_cpu(dr->wLength));
5807 goto exit;
5808 }
5809
5810 @@ -408,7 +410,7 @@ static ssize_t wdm_write
5811 USB_RECIP_INTERFACE);
5812 req->bRequest = USB_CDC_SEND_ENCAPSULATED_COMMAND;
5813 req->wValue = 0;
5814 - req->wIndex = desc->inum;
5815 + req->wIndex = desc->inum; /* already converted */
5816 req->wLength = cpu_to_le16(count);
5817 set_bit(WDM_IN_USE, &desc->flags);
5818 desc->outbuf = buf;
5819 @@ -422,7 +424,7 @@ static ssize_t wdm_write
5820 rv = usb_translate_errors(rv);
5821 } else {
5822 dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d",
5823 - req->wIndex);
5824 + le16_to_cpu(req->wIndex));
5825 }
5826 out:
5827 usb_autopm_put_interface(desc->intf);
5828 @@ -820,7 +822,7 @@ static int wdm_create(struct usb_interface *intf, struct usb_endpoint_descriptor
5829 desc->irq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE);
5830 desc->irq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE;
5831 desc->irq->wValue = 0;
5832 - desc->irq->wIndex = desc->inum;
5833 + desc->irq->wIndex = desc->inum; /* already converted */
5834 desc->irq->wLength = cpu_to_le16(desc->wMaxCommand);
5835
5836 usb_fill_control_urb(
5837 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
5838 index 2246954d7df3..85e03cb0c418 100644
5839 --- a/drivers/usb/core/hub.c
5840 +++ b/drivers/usb/core/hub.c
5841 @@ -3394,10 +3394,10 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
5842 if (status) {
5843 dev_dbg(&port_dev->dev, "can't resume, status %d\n", status);
5844 } else {
5845 - /* drive resume for at least 20 msec */
5846 + /* drive resume for USB_RESUME_TIMEOUT msec */
5847 dev_dbg(&udev->dev, "usb %sresume\n",
5848 (PMSG_IS_AUTO(msg) ? "auto-" : ""));
5849 - msleep(25);
5850 + msleep(USB_RESUME_TIMEOUT);
5851
5852 /* Virtual root hubs can trigger on GET_PORT_STATUS to
5853 * stop resume signaling. Then finish the resume
5854 diff --git a/drivers/usb/gadget/legacy/printer.c b/drivers/usb/gadget/legacy/printer.c
5855 index 6474081dcbaf..c9c6dae75c13 100644
5856 --- a/drivers/usb/gadget/legacy/printer.c
5857 +++ b/drivers/usb/gadget/legacy/printer.c
5858 @@ -980,6 +980,15 @@ unknown:
5859 break;
5860 }
5861 /* host either stalls (value < 0) or reports success */
5862 + if (value >= 0) {
5863 + req->length = value;
5864 + req->zero = value < wLength;
5865 + value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
5866 + if (value < 0) {
5867 + ERROR(dev, "%s:%d Error!\n", __func__, __LINE__);
5868 + req->status = 0;
5869 + }
5870 + }
5871 return value;
5872 }
5873
5874 diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
5875 index 15feaf924b71..3df32fa8c8ae 100644
5876 --- a/drivers/usb/host/ehci-hcd.c
5877 +++ b/drivers/usb/host/ehci-hcd.c
5878 @@ -787,12 +787,12 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
5879 ehci->reset_done[i] == 0))
5880 continue;
5881
5882 - /* start 20 msec resume signaling from this port,
5883 - * and make hub_wq collect PORT_STAT_C_SUSPEND to
5884 - * stop that signaling. Use 5 ms extra for safety,
5885 - * like usb_port_resume() does.
5886 + /* start USB_RESUME_TIMEOUT msec resume signaling from
5887 + * this port, and make hub_wq collect
5888 + * PORT_STAT_C_SUSPEND to stop that signaling.
5889 */
5890 - ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
5891 + ehci->reset_done[i] = jiffies +
5892 + msecs_to_jiffies(USB_RESUME_TIMEOUT);
5893 set_bit(i, &ehci->resuming_ports);
5894 ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
5895 usb_hcd_start_port_resume(&hcd->self, i);
5896 diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
5897 index 5728829cf6ef..ecd5d6a55ca3 100644
5898 --- a/drivers/usb/host/ehci-hub.c
5899 +++ b/drivers/usb/host/ehci-hub.c
5900 @@ -473,10 +473,13 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
5901 ehci_writel(ehci, temp, &ehci->regs->port_status [i]);
5902 }
5903
5904 - /* msleep for 20ms only if code is trying to resume port */
5905 + /*
5906 + * msleep for USB_RESUME_TIMEOUT ms only if code is trying to resume
5907 + * port
5908 + */
5909 if (resume_needed) {
5910 spin_unlock_irq(&ehci->lock);
5911 - msleep(20);
5912 + msleep(USB_RESUME_TIMEOUT);
5913 spin_lock_irq(&ehci->lock);
5914 if (ehci->shutdown)
5915 goto shutdown;
5916 @@ -944,7 +947,7 @@ int ehci_hub_control(
5917 temp &= ~PORT_WAKE_BITS;
5918 ehci_writel(ehci, temp | PORT_RESUME, status_reg);
5919 ehci->reset_done[wIndex] = jiffies
5920 - + msecs_to_jiffies(20);
5921 + + msecs_to_jiffies(USB_RESUME_TIMEOUT);
5922 set_bit(wIndex, &ehci->resuming_ports);
5923 usb_hcd_start_port_resume(&hcd->self, wIndex);
5924 break;
5925 diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
5926 index 3de1278677d0..a2fbb0b3db31 100644
5927 --- a/drivers/usb/host/fotg210-hcd.c
5928 +++ b/drivers/usb/host/fotg210-hcd.c
5929 @@ -1595,7 +1595,7 @@ static int fotg210_hub_control(
5930 /* resume signaling for 20 msec */
5931 fotg210_writel(fotg210, temp | PORT_RESUME, status_reg);
5932 fotg210->reset_done[wIndex] = jiffies
5933 - + msecs_to_jiffies(20);
5934 + + msecs_to_jiffies(USB_RESUME_TIMEOUT);
5935 break;
5936 case USB_PORT_FEAT_C_SUSPEND:
5937 clear_bit(wIndex, &fotg210->port_c_suspend);
5938 diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c
5939 index abe42f31559f..50fa8f2eabe3 100644
5940 --- a/drivers/usb/host/fusbh200-hcd.c
5941 +++ b/drivers/usb/host/fusbh200-hcd.c
5942 @@ -1550,10 +1550,9 @@ static int fusbh200_hub_control (
5943 if ((temp & PORT_PE) == 0)
5944 goto error;
5945
5946 - /* resume signaling for 20 msec */
5947 fusbh200_writel(fusbh200, temp | PORT_RESUME, status_reg);
5948 fusbh200->reset_done[wIndex] = jiffies
5949 - + msecs_to_jiffies(20);
5950 + + msecs_to_jiffies(USB_RESUME_TIMEOUT);
5951 break;
5952 case USB_PORT_FEAT_C_SUSPEND:
5953 clear_bit(wIndex, &fusbh200->port_c_suspend);
5954 diff --git a/drivers/usb/host/isp116x-hcd.c b/drivers/usb/host/isp116x-hcd.c
5955 index 240e792c81a7..b62298fe0be8 100644
5956 --- a/drivers/usb/host/isp116x-hcd.c
5957 +++ b/drivers/usb/host/isp116x-hcd.c
5958 @@ -1487,7 +1487,7 @@ static int isp116x_bus_resume(struct usb_hcd *hcd)
5959 spin_unlock_irq(&isp116x->lock);
5960
5961 hcd->state = HC_STATE_RESUMING;
5962 - msleep(20);
5963 + msleep(USB_RESUME_TIMEOUT);
5964
5965 /* Go operational */
5966 spin_lock_irq(&isp116x->lock);
5967 diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
5968 index 4fe79a2d71a9..c3d4074309f8 100644
5969 --- a/drivers/usb/host/oxu210hp-hcd.c
5970 +++ b/drivers/usb/host/oxu210hp-hcd.c
5971 @@ -2500,11 +2500,12 @@ static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
5972 || oxu->reset_done[i] != 0)
5973 continue;
5974
5975 - /* start 20 msec resume signaling from this port,
5976 - * and make hub_wq collect PORT_STAT_C_SUSPEND to
5977 + /* start USB_RESUME_TIMEOUT resume signaling from this
5978 + * port, and make hub_wq collect PORT_STAT_C_SUSPEND to
5979 * stop that signaling.
5980 */
5981 - oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
5982 + oxu->reset_done[i] = jiffies +
5983 + msecs_to_jiffies(USB_RESUME_TIMEOUT);
5984 oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
5985 mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
5986 }
5987 diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
5988 index 110b4b9ebeaa..f130bb2f7bbe 100644
5989 --- a/drivers/usb/host/r8a66597-hcd.c
5990 +++ b/drivers/usb/host/r8a66597-hcd.c
5991 @@ -2300,7 +2300,7 @@ static int r8a66597_bus_resume(struct usb_hcd *hcd)
5992 rh->port &= ~USB_PORT_STAT_SUSPEND;
5993 rh->port |= USB_PORT_STAT_C_SUSPEND << 16;
5994 r8a66597_mdfy(r8a66597, RESUME, RESUME | UACT, dvstctr_reg);
5995 - msleep(50);
5996 + msleep(USB_RESUME_TIMEOUT);
5997 r8a66597_mdfy(r8a66597, UACT, RESUME | UACT, dvstctr_reg);
5998 }
5999
6000 diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
6001 index ad0c348e68e9..17f97f999c38 100644
6002 --- a/drivers/usb/host/sl811-hcd.c
6003 +++ b/drivers/usb/host/sl811-hcd.c
6004 @@ -1259,7 +1259,7 @@ sl811h_hub_control(
6005 sl811_write(sl811, SL11H_CTLREG1, sl811->ctrl1);
6006
6007 mod_timer(&sl811->timer, jiffies
6008 - + msecs_to_jiffies(20));
6009 + + msecs_to_jiffies(USB_RESUME_TIMEOUT));
6010 break;
6011 case USB_PORT_FEAT_POWER:
6012 port_power(sl811, 0);
6013 diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
6014 index 93e17b12fb33..98c66d88ebde 100644
6015 --- a/drivers/usb/host/uhci-hub.c
6016 +++ b/drivers/usb/host/uhci-hub.c
6017 @@ -165,7 +165,7 @@ static void uhci_check_ports(struct uhci_hcd *uhci)
6018 /* Port received a wakeup request */
6019 set_bit(port, &uhci->resuming_ports);
6020 uhci->ports_timeout = jiffies +
6021 - msecs_to_jiffies(25);
6022 + msecs_to_jiffies(USB_RESUME_TIMEOUT);
6023 usb_hcd_start_port_resume(
6024 &uhci_to_hcd(uhci)->self, port);
6025
6026 @@ -337,7 +337,8 @@ static int uhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
6027 uhci_finish_suspend(uhci, port, port_addr);
6028
6029 /* USB v2.0 7.1.7.5 */
6030 - uhci->ports_timeout = jiffies + msecs_to_jiffies(50);
6031 + uhci->ports_timeout = jiffies +
6032 + msecs_to_jiffies(USB_RESUME_TIMEOUT);
6033 break;
6034 case USB_PORT_FEAT_POWER:
6035 /* UHCI has no power switching */
6036 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
6037 index 338f19cc0973..3307e161c100 100644
6038 --- a/drivers/usb/host/xhci-ring.c
6039 +++ b/drivers/usb/host/xhci-ring.c
6040 @@ -1576,7 +1576,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
6041 } else {
6042 xhci_dbg(xhci, "resume HS port %d\n", port_id);
6043 bus_state->resume_done[faked_port_index] = jiffies +
6044 - msecs_to_jiffies(20);
6045 + msecs_to_jiffies(USB_RESUME_TIMEOUT);
6046 set_bit(faked_port_index, &bus_state->resuming_ports);
6047 mod_timer(&hcd->rh_timer,
6048 bus_state->resume_done[faked_port_index]);
6049 diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
6050 index b841ee0bff06..6d63efce3706 100644
6051 --- a/drivers/usb/musb/musb_core.c
6052 +++ b/drivers/usb/musb/musb_core.c
6053 @@ -99,6 +99,7 @@
6054 #include <linux/platform_device.h>
6055 #include <linux/io.h>
6056 #include <linux/dma-mapping.h>
6057 +#include <linux/usb.h>
6058
6059 #include "musb_core.h"
6060
6061 @@ -480,7 +481,7 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
6062 + msecs_to_jiffies(20);
6063 schedule_delayed_work(
6064 &musb->finish_resume_work,
6065 - msecs_to_jiffies(20));
6066 + msecs_to_jiffies(USB_RESUME_TIMEOUT));
6067
6068 musb->xceiv->state = OTG_STATE_A_HOST;
6069 musb->is_active = 1;
6070 @@ -1521,16 +1522,30 @@ irqreturn_t musb_interrupt(struct musb *musb)
6071 is_host_active(musb) ? "host" : "peripheral",
6072 musb->int_usb, musb->int_tx, musb->int_rx);
6073
6074 - /* the core can interrupt us for multiple reasons; docs have
6075 - * a generic interrupt flowchart to follow
6076 + /**
6077 + * According to Mentor Graphics' documentation, flowchart on page 98,
6078 + * IRQ should be handled as follows:
6079 + *
6080 + * . Resume IRQ
6081 + * . Session Request IRQ
6082 + * . VBUS Error IRQ
6083 + * . Suspend IRQ
6084 + * . Connect IRQ
6085 + * . Disconnect IRQ
6086 + * . Reset/Babble IRQ
6087 + * . SOF IRQ (we're not using this one)
6088 + * . Endpoint 0 IRQ
6089 + * . TX Endpoints
6090 + * . RX Endpoints
6091 + *
6092 + * We will be following that flowchart in order to avoid any problems
6093 + * that might arise with internal Finite State Machine.
6094 */
6095 +
6096 if (musb->int_usb)
6097 retval |= musb_stage0_irq(musb, musb->int_usb,
6098 devctl);
6099
6100 - /* "stage 1" is handling endpoint irqs */
6101 -
6102 - /* handle endpoint 0 first */
6103 if (musb->int_tx & 1) {
6104 if (is_host_active(musb))
6105 retval |= musb_h_ep0_irq(musb);
6106 @@ -1538,37 +1553,31 @@ irqreturn_t musb_interrupt(struct musb *musb)
6107 retval |= musb_g_ep0_irq(musb);
6108 }
6109
6110 - /* RX on endpoints 1-15 */
6111 - reg = musb->int_rx >> 1;
6112 + reg = musb->int_tx >> 1;
6113 ep_num = 1;
6114 while (reg) {
6115 if (reg & 1) {
6116 - /* musb_ep_select(musb->mregs, ep_num); */
6117 - /* REVISIT just retval = ep->rx_irq(...) */
6118 retval = IRQ_HANDLED;
6119 if (is_host_active(musb))
6120 - musb_host_rx(musb, ep_num);
6121 + musb_host_tx(musb, ep_num);
6122 else
6123 - musb_g_rx(musb, ep_num);
6124 + musb_g_tx(musb, ep_num);
6125 }
6126 -
6127 reg >>= 1;
6128 ep_num++;
6129 }
6130
6131 - /* TX on endpoints 1-15 */
6132 - reg = musb->int_tx >> 1;
6133 + reg = musb->int_rx >> 1;
6134 ep_num = 1;
6135 while (reg) {
6136 if (reg & 1) {
6137 - /* musb_ep_select(musb->mregs, ep_num); */
6138 - /* REVISIT just retval |= ep->tx_irq(...) */
6139 retval = IRQ_HANDLED;
6140 if (is_host_active(musb))
6141 - musb_host_tx(musb, ep_num);
6142 + musb_host_rx(musb, ep_num);
6143 else
6144 - musb_g_tx(musb, ep_num);
6145 + musb_g_rx(musb, ep_num);
6146 }
6147 +
6148 reg >>= 1;
6149 ep_num++;
6150 }
6151 diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
6152 index e2d2d8c9891b..0241a3a0d63e 100644
6153 --- a/drivers/usb/musb/musb_virthub.c
6154 +++ b/drivers/usb/musb/musb_virthub.c
6155 @@ -136,7 +136,7 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
6156 /* later, GetPortStatus will stop RESUME signaling */
6157 musb->port1_status |= MUSB_PORT_STAT_RESUME;
6158 schedule_delayed_work(&musb->finish_resume_work,
6159 - msecs_to_jiffies(20));
6160 + msecs_to_jiffies(USB_RESUME_TIMEOUT));
6161 }
6162 }
6163
6164 diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
6165 index 045cd309367a..7e8898d61dcd 100644
6166 --- a/drivers/usb/phy/phy.c
6167 +++ b/drivers/usb/phy/phy.c
6168 @@ -78,7 +78,9 @@ static void devm_usb_phy_release(struct device *dev, void *res)
6169
6170 static int devm_usb_phy_match(struct device *dev, void *res, void *match_data)
6171 {
6172 - return res == match_data;
6173 + struct usb_phy **phy = res;
6174 +
6175 + return *phy == match_data;
6176 }
6177
6178 /**
6179 diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
6180 index 8a6f371ed6e7..a451903a6857 100644
6181 --- a/drivers/usb/storage/uas-detect.h
6182 +++ b/drivers/usb/storage/uas-detect.h
6183 @@ -51,7 +51,8 @@ static int uas_find_endpoints(struct usb_host_interface *alt,
6184 }
6185
6186 static int uas_use_uas_driver(struct usb_interface *intf,
6187 - const struct usb_device_id *id)
6188 + const struct usb_device_id *id,
6189 + unsigned long *flags_ret)
6190 {
6191 struct usb_host_endpoint *eps[4] = { };
6192 struct usb_device *udev = interface_to_usbdev(intf);
6193 @@ -80,6 +81,9 @@ static int uas_use_uas_driver(struct usb_interface *intf,
6194 flags |= US_FL_IGNORE_UAS;
6195 } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) {
6196 flags |= US_FL_IGNORE_UAS;
6197 + } else {
6198 + /* ASM1053, these have issues with large transfers */
6199 + flags |= US_FL_MAX_SECTORS_240;
6200 }
6201 }
6202
6203 @@ -109,5 +113,8 @@ static int uas_use_uas_driver(struct usb_interface *intf,
6204 return 0;
6205 }
6206
6207 + if (flags_ret)
6208 + *flags_ret = flags;
6209 +
6210 return 1;
6211 }
6212 diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
6213 index 89b24349269e..2ef0f0abe246 100644
6214 --- a/drivers/usb/storage/uas.c
6215 +++ b/drivers/usb/storage/uas.c
6216 @@ -770,7 +770,10 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
6217
6218 static int uas_slave_alloc(struct scsi_device *sdev)
6219 {
6220 - sdev->hostdata = (void *)sdev->host->hostdata;
6221 + struct uas_dev_info *devinfo =
6222 + (struct uas_dev_info *)sdev->host->hostdata;
6223 +
6224 + sdev->hostdata = devinfo;
6225
6226 /* USB has unusual DMA-alignment requirements: Although the
6227 * starting address of each scatter-gather element doesn't matter,
6228 @@ -789,6 +792,11 @@ static int uas_slave_alloc(struct scsi_device *sdev)
6229 */
6230 blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
6231
6232 + if (devinfo->flags & US_FL_MAX_SECTORS_64)
6233 + blk_queue_max_hw_sectors(sdev->request_queue, 64);
6234 + else if (devinfo->flags & US_FL_MAX_SECTORS_240)
6235 + blk_queue_max_hw_sectors(sdev->request_queue, 240);
6236 +
6237 return 0;
6238 }
6239
6240 @@ -906,8 +914,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
6241 struct Scsi_Host *shost = NULL;
6242 struct uas_dev_info *devinfo;
6243 struct usb_device *udev = interface_to_usbdev(intf);
6244 + unsigned long dev_flags;
6245
6246 - if (!uas_use_uas_driver(intf, id))
6247 + if (!uas_use_uas_driver(intf, id, &dev_flags))
6248 return -ENODEV;
6249
6250 if (uas_switch_interface(udev, intf))
6251 @@ -929,8 +938,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
6252 devinfo->udev = udev;
6253 devinfo->resetting = 0;
6254 devinfo->shutdown = 0;
6255 - devinfo->flags = id->driver_info;
6256 - usb_stor_adjust_quirks(udev, &devinfo->flags);
6257 + devinfo->flags = dev_flags;
6258 init_usb_anchor(&devinfo->cmd_urbs);
6259 init_usb_anchor(&devinfo->sense_urbs);
6260 init_usb_anchor(&devinfo->data_urbs);
6261 diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
6262 index 9d66ce62542e..cda42cf779a5 100644
6263 --- a/drivers/usb/storage/usb.c
6264 +++ b/drivers/usb/storage/usb.c
6265 @@ -479,7 +479,8 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
6266 US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT |
6267 US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
6268 US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
6269 - US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES);
6270 + US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
6271 + US_FL_MAX_SECTORS_240);
6272
6273 p = quirks;
6274 while (*p) {
6275 @@ -520,6 +521,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
6276 case 'f':
6277 f |= US_FL_NO_REPORT_OPCODES;
6278 break;
6279 + case 'g':
6280 + f |= US_FL_MAX_SECTORS_240;
6281 + break;
6282 case 'h':
6283 f |= US_FL_CAPACITY_HEURISTICS;
6284 break;
6285 @@ -1062,7 +1066,7 @@ static int storage_probe(struct usb_interface *intf,
6286
6287 /* If uas is enabled and this device can do uas then ignore it. */
6288 #if IS_ENABLED(CONFIG_USB_UAS)
6289 - if (uas_use_uas_driver(intf, id))
6290 + if (uas_use_uas_driver(intf, id, NULL))
6291 return -ENXIO;
6292 #endif
6293
6294 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
6295 index e1efcaa1b245..3dd249787b11 100644
6296 --- a/fs/binfmt_elf.c
6297 +++ b/fs/binfmt_elf.c
6298 @@ -750,6 +750,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
6299 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
6300 int elf_prot = 0, elf_flags;
6301 unsigned long k, vaddr;
6302 + unsigned long total_size = 0;
6303
6304 if (elf_ppnt->p_type != PT_LOAD)
6305 continue;
6306 @@ -812,10 +813,16 @@ static int load_elf_binary(struct linux_binprm *bprm)
6307 #else
6308 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
6309 #endif
6310 + total_size = total_mapping_size(elf_phdata,
6311 + loc->elf_ex.e_phnum);
6312 + if (!total_size) {
6313 + error = -EINVAL;
6314 + goto out_free_dentry;
6315 + }
6316 }
6317
6318 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
6319 - elf_prot, elf_flags, 0);
6320 + elf_prot, elf_flags, total_size);
6321 if (BAD_ADDR(error)) {
6322 retval = IS_ERR((void *)error) ?
6323 PTR_ERR((void*)error) : -EINVAL;
6324 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
6325 index 4bd5e06fa5ab..950479f2d337 100644
6326 --- a/fs/btrfs/extent-tree.c
6327 +++ b/fs/btrfs/extent-tree.c
6328 @@ -6955,12 +6955,11 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
6329 return -ENOSPC;
6330 }
6331
6332 - if (btrfs_test_opt(root, DISCARD))
6333 - ret = btrfs_discard_extent(root, start, len, NULL);
6334 -
6335 if (pin)
6336 pin_down_extent(root, cache, start, len, 1);
6337 else {
6338 + if (btrfs_test_opt(root, DISCARD))
6339 + ret = btrfs_discard_extent(root, start, len, NULL);
6340 btrfs_add_free_space(cache, start, len);
6341 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
6342 }
6343 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
6344 index 4399f0c3a4ce..fce3b5b9a2bb 100644
6345 --- a/fs/btrfs/ioctl.c
6346 +++ b/fs/btrfs/ioctl.c
6347 @@ -2431,7 +2431,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
6348 "Attempt to delete subvolume %llu during send",
6349 dest->root_key.objectid);
6350 err = -EPERM;
6351 - goto out_dput;
6352 + goto out_unlock_inode;
6353 }
6354
6355 d_invalidate(dentry);
6356 @@ -2526,6 +2526,7 @@ out_up_write:
6357 root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
6358 spin_unlock(&dest->root_item_lock);
6359 }
6360 +out_unlock_inode:
6361 mutex_unlock(&inode->i_mutex);
6362 if (!err) {
6363 shrink_dcache_sb(root->fs_info->sb);
6364 @@ -2925,6 +2926,9 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 len,
6365 if (src == dst)
6366 return -EINVAL;
6367
6368 + if (len == 0)
6369 + return 0;
6370 +
6371 btrfs_double_lock(src, loff, dst, dst_loff, len);
6372
6373 ret = extent_same_check_offsets(src, loff, len);
6374 @@ -3654,6 +3658,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
6375 if (off + len == src->i_size)
6376 len = ALIGN(src->i_size, bs) - off;
6377
6378 + if (len == 0) {
6379 + ret = 0;
6380 + goto out_unlock;
6381 + }
6382 +
6383 /* verify the end result is block aligned */
6384 if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
6385 !IS_ALIGNED(destoff, bs))
6386 diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
6387 index dcf20131fbe4..00eacd83ce3d 100644
6388 --- a/fs/btrfs/xattr.c
6389 +++ b/fs/btrfs/xattr.c
6390 @@ -324,22 +324,42 @@ const struct xattr_handler *btrfs_xattr_handlers[] = {
6391 /*
6392 * Check if the attribute is in a supported namespace.
6393 *
6394 - * This applied after the check for the synthetic attributes in the system
6395 + * This is applied after the check for the synthetic attributes in the system
6396 * namespace.
6397 */
6398 -static bool btrfs_is_valid_xattr(const char *name)
6399 +static int btrfs_is_valid_xattr(const char *name)
6400 {
6401 - return !strncmp(name, XATTR_SECURITY_PREFIX,
6402 - XATTR_SECURITY_PREFIX_LEN) ||
6403 - !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) ||
6404 - !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) ||
6405 - !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) ||
6406 - !strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN);
6407 + int len = strlen(name);
6408 + int prefixlen = 0;
6409 +
6410 + if (!strncmp(name, XATTR_SECURITY_PREFIX,
6411 + XATTR_SECURITY_PREFIX_LEN))
6412 + prefixlen = XATTR_SECURITY_PREFIX_LEN;
6413 + else if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
6414 + prefixlen = XATTR_SYSTEM_PREFIX_LEN;
6415 + else if (!strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
6416 + prefixlen = XATTR_TRUSTED_PREFIX_LEN;
6417 + else if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
6418 + prefixlen = XATTR_USER_PREFIX_LEN;
6419 + else if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
6420 + prefixlen = XATTR_BTRFS_PREFIX_LEN;
6421 + else
6422 + return -EOPNOTSUPP;
6423 +
6424 + /*
6425 + * The name cannot consist of just prefix
6426 + */
6427 + if (len <= prefixlen)
6428 + return -EINVAL;
6429 +
6430 + return 0;
6431 }
6432
6433 ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
6434 void *buffer, size_t size)
6435 {
6436 + int ret;
6437 +
6438 /*
6439 * If this is a request for a synthetic attribute in the system.*
6440 * namespace use the generic infrastructure to resolve a handler
6441 @@ -348,8 +368,9 @@ ssize_t btrfs_getxattr(struct dentry *dentry, const char *name,
6442 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
6443 return generic_getxattr(dentry, name, buffer, size);
6444
6445 - if (!btrfs_is_valid_xattr(name))
6446 - return -EOPNOTSUPP;
6447 + ret = btrfs_is_valid_xattr(name);
6448 + if (ret)
6449 + return ret;
6450 return __btrfs_getxattr(dentry->d_inode, name, buffer, size);
6451 }
6452
6453 @@ -357,6 +378,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
6454 size_t size, int flags)
6455 {
6456 struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
6457 + int ret;
6458
6459 /*
6460 * The permission on security.* and system.* is not checked
6461 @@ -373,8 +395,9 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
6462 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
6463 return generic_setxattr(dentry, name, value, size, flags);
6464
6465 - if (!btrfs_is_valid_xattr(name))
6466 - return -EOPNOTSUPP;
6467 + ret = btrfs_is_valid_xattr(name);
6468 + if (ret)
6469 + return ret;
6470
6471 if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
6472 return btrfs_set_prop(dentry->d_inode, name,
6473 @@ -390,6 +413,7 @@ int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value,
6474 int btrfs_removexattr(struct dentry *dentry, const char *name)
6475 {
6476 struct btrfs_root *root = BTRFS_I(dentry->d_inode)->root;
6477 + int ret;
6478
6479 /*
6480 * The permission on security.* and system.* is not checked
6481 @@ -406,8 +430,9 @@ int btrfs_removexattr(struct dentry *dentry, const char *name)
6482 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
6483 return generic_removexattr(dentry, name);
6484
6485 - if (!btrfs_is_valid_xattr(name))
6486 - return -EOPNOTSUPP;
6487 + ret = btrfs_is_valid_xattr(name);
6488 + if (ret)
6489 + return ret;
6490
6491 if (!strncmp(name, XATTR_BTRFS_PREFIX, XATTR_BTRFS_PREFIX_LEN))
6492 return btrfs_set_prop(dentry->d_inode, name,
6493 diff --git a/fs/exec.c b/fs/exec.c
6494 index 7302b75a9820..2e83209016ec 100644
6495 --- a/fs/exec.c
6496 +++ b/fs/exec.c
6497 @@ -1250,6 +1250,53 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
6498 spin_unlock(&p->fs->lock);
6499 }
6500
6501 +static void bprm_fill_uid(struct linux_binprm *bprm)
6502 +{
6503 + struct inode *inode;
6504 + unsigned int mode;
6505 + kuid_t uid;
6506 + kgid_t gid;
6507 +
6508 + /* clear any previous set[ug]id data from a previous binary */
6509 + bprm->cred->euid = current_euid();
6510 + bprm->cred->egid = current_egid();
6511 +
6512 + if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
6513 + return;
6514 +
6515 + if (task_no_new_privs(current))
6516 + return;
6517 +
6518 + inode = file_inode(bprm->file);
6519 + mode = READ_ONCE(inode->i_mode);
6520 + if (!(mode & (S_ISUID|S_ISGID)))
6521 + return;
6522 +
6523 + /* Be careful if suid/sgid is set */
6524 + mutex_lock(&inode->i_mutex);
6525 +
6526 + /* reload atomically mode/uid/gid now that lock held */
6527 + mode = inode->i_mode;
6528 + uid = inode->i_uid;
6529 + gid = inode->i_gid;
6530 + mutex_unlock(&inode->i_mutex);
6531 +
6532 + /* We ignore suid/sgid if there are no mappings for them in the ns */
6533 + if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
6534 + !kgid_has_mapping(bprm->cred->user_ns, gid))
6535 + return;
6536 +
6537 + if (mode & S_ISUID) {
6538 + bprm->per_clear |= PER_CLEAR_ON_SETID;
6539 + bprm->cred->euid = uid;
6540 + }
6541 +
6542 + if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
6543 + bprm->per_clear |= PER_CLEAR_ON_SETID;
6544 + bprm->cred->egid = gid;
6545 + }
6546 +}
6547 +
6548 /*
6549 * Fill the binprm structure from the inode.
6550 * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
6551 @@ -1258,36 +1305,9 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
6552 */
6553 int prepare_binprm(struct linux_binprm *bprm)
6554 {
6555 - struct inode *inode = file_inode(bprm->file);
6556 - umode_t mode = inode->i_mode;
6557 int retval;
6558
6559 -
6560 - /* clear any previous set[ug]id data from a previous binary */
6561 - bprm->cred->euid = current_euid();
6562 - bprm->cred->egid = current_egid();
6563 -
6564 - if (!(bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) &&
6565 - !task_no_new_privs(current) &&
6566 - kuid_has_mapping(bprm->cred->user_ns, inode->i_uid) &&
6567 - kgid_has_mapping(bprm->cred->user_ns, inode->i_gid)) {
6568 - /* Set-uid? */
6569 - if (mode & S_ISUID) {
6570 - bprm->per_clear |= PER_CLEAR_ON_SETID;
6571 - bprm->cred->euid = inode->i_uid;
6572 - }
6573 -
6574 - /* Set-gid? */
6575 - /*
6576 - * If setgid is set but no group execute bit then this
6577 - * is a candidate for mandatory locking, not a setgid
6578 - * executable.
6579 - */
6580 - if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
6581 - bprm->per_clear |= PER_CLEAR_ON_SETID;
6582 - bprm->cred->egid = inode->i_gid;
6583 - }
6584 - }
6585 + bprm_fill_uid(bprm);
6586
6587 /* fill in binprm security blob */
6588 retval = security_bprm_set_creds(bprm);
6589 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
6590 index 0b16fb4c06d3..6cfacbb0f928 100644
6591 --- a/fs/ext4/extents.c
6592 +++ b/fs/ext4/extents.c
6593 @@ -4923,13 +4923,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
6594 if (ret)
6595 return ret;
6596
6597 - /*
6598 - * currently supporting (pre)allocate mode for extent-based
6599 - * files _only_
6600 - */
6601 - if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
6602 - return -EOPNOTSUPP;
6603 -
6604 if (mode & FALLOC_FL_COLLAPSE_RANGE)
6605 return ext4_collapse_range(inode, offset, len);
6606
6607 @@ -4951,6 +4944,14 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
6608
6609 mutex_lock(&inode->i_mutex);
6610
6611 + /*
6612 + * We only support preallocation for extent-based files only
6613 + */
6614 + if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
6615 + ret = -EOPNOTSUPP;
6616 + goto out;
6617 + }
6618 +
6619 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
6620 offset + len > i_size_read(inode)) {
6621 new_size = offset + len;
6622 diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
6623 index 94e7855ae71b..b860603dac33 100644
6624 --- a/fs/ext4/extents_status.c
6625 +++ b/fs/ext4/extents_status.c
6626 @@ -670,6 +670,14 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
6627
6628 BUG_ON(end < lblk);
6629
6630 + if ((status & EXTENT_STATUS_DELAYED) &&
6631 + (status & EXTENT_STATUS_WRITTEN)) {
6632 + ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as "
6633 + " delayed and written which can potentially "
6634 + " cause data loss.\n", lblk, len);
6635 + WARN_ON(1);
6636 + }
6637 +
6638 newes.es_lblk = lblk;
6639 newes.es_len = len;
6640 ext4_es_store_pblock_status(&newes, pblk, status);
6641 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
6642 index 3356ab5395f4..842cdd153c20 100644
6643 --- a/fs/ext4/inode.c
6644 +++ b/fs/ext4/inode.c
6645 @@ -540,6 +540,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
6646 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
6647 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
6648 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
6649 + !(status & EXTENT_STATUS_WRITTEN) &&
6650 ext4_find_delalloc_range(inode, map->m_lblk,
6651 map->m_lblk + map->m_len - 1))
6652 status |= EXTENT_STATUS_DELAYED;
6653 @@ -644,6 +645,7 @@ found:
6654 status = map->m_flags & EXT4_MAP_UNWRITTEN ?
6655 EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
6656 if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
6657 + !(status & EXTENT_STATUS_WRITTEN) &&
6658 ext4_find_delalloc_range(inode, map->m_lblk,
6659 map->m_lblk + map->m_len - 1))
6660 status |= EXTENT_STATUS_DELAYED;
6661 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
6662 index 426211882f72..bada5a1fb695 100644
6663 --- a/fs/ext4/namei.c
6664 +++ b/fs/ext4/namei.c
6665 @@ -1865,7 +1865,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
6666 struct inode *inode)
6667 {
6668 struct inode *dir = dentry->d_parent->d_inode;
6669 - struct buffer_head *bh;
6670 + struct buffer_head *bh = NULL;
6671 struct ext4_dir_entry_2 *de;
6672 struct ext4_dir_entry_tail *t;
6673 struct super_block *sb;
6674 @@ -1889,14 +1889,14 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
6675 return retval;
6676 if (retval == 1) {
6677 retval = 0;
6678 - return retval;
6679 + goto out;
6680 }
6681 }
6682
6683 if (is_dx(dir)) {
6684 retval = ext4_dx_add_entry(handle, dentry, inode);
6685 if (!retval || (retval != ERR_BAD_DX_DIR))
6686 - return retval;
6687 + goto out;
6688 ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
6689 dx_fallback++;
6690 ext4_mark_inode_dirty(handle, dir);
6691 @@ -1908,14 +1908,15 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
6692 return PTR_ERR(bh);
6693
6694 retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
6695 - if (retval != -ENOSPC) {
6696 - brelse(bh);
6697 - return retval;
6698 - }
6699 + if (retval != -ENOSPC)
6700 + goto out;
6701
6702 if (blocks == 1 && !dx_fallback &&
6703 - EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX))
6704 - return make_indexed_dir(handle, dentry, inode, bh);
6705 + EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
6706 + retval = make_indexed_dir(handle, dentry, inode, bh);
6707 + bh = NULL; /* make_indexed_dir releases bh */
6708 + goto out;
6709 + }
6710 brelse(bh);
6711 }
6712 bh = ext4_append(handle, dir, &block);
6713 @@ -1931,6 +1932,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
6714 }
6715
6716 retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
6717 +out:
6718 brelse(bh);
6719 if (retval == 0)
6720 ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
6721 diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c
6722 index d98094a9f476..ff10f3decbc9 100644
6723 --- a/fs/hfsplus/xattr.c
6724 +++ b/fs/hfsplus/xattr.c
6725 @@ -806,9 +806,6 @@ end_removexattr:
6726 static int hfsplus_osx_getxattr(struct dentry *dentry, const char *name,
6727 void *buffer, size_t size, int type)
6728 {
6729 - char *xattr_name;
6730 - int res;
6731 -
6732 if (!strcmp(name, ""))
6733 return -EINVAL;
6734
6735 @@ -818,24 +815,19 @@ static int hfsplus_osx_getxattr(struct dentry *dentry, const char *name,
6736 */
6737 if (is_known_namespace(name))
6738 return -EOPNOTSUPP;
6739 - xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN
6740 - + XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL);
6741 - if (!xattr_name)
6742 - return -ENOMEM;
6743 - strcpy(xattr_name, XATTR_MAC_OSX_PREFIX);
6744 - strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name);
6745
6746 - res = hfsplus_getxattr(dentry, xattr_name, buffer, size);
6747 - kfree(xattr_name);
6748 - return res;
6749 + /*
6750 + * osx is the namespace we use to indicate an unprefixed
6751 + * attribute on the filesystem (like the ones that OS X
6752 + * creates), so we pass the name through unmodified (after
6753 + * ensuring it doesn't conflict with another namespace).
6754 + */
6755 + return hfsplus_getxattr(dentry, name, buffer, size);
6756 }
6757
6758 static int hfsplus_osx_setxattr(struct dentry *dentry, const char *name,
6759 const void *buffer, size_t size, int flags, int type)
6760 {
6761 - char *xattr_name;
6762 - int res;
6763 -
6764 if (!strcmp(name, ""))
6765 return -EINVAL;
6766
6767 @@ -845,16 +837,14 @@ static int hfsplus_osx_setxattr(struct dentry *dentry, const char *name,
6768 */
6769 if (is_known_namespace(name))
6770 return -EOPNOTSUPP;
6771 - xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN
6772 - + XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL);
6773 - if (!xattr_name)
6774 - return -ENOMEM;
6775 - strcpy(xattr_name, XATTR_MAC_OSX_PREFIX);
6776 - strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name);
6777
6778 - res = hfsplus_setxattr(dentry, xattr_name, buffer, size, flags);
6779 - kfree(xattr_name);
6780 - return res;
6781 + /*
6782 + * osx is the namespace we use to indicate an unprefixed
6783 + * attribute on the filesystem (like the ones that OS X
6784 + * creates), so we pass the name through unmodified (after
6785 + * ensuring it doesn't conflict with another namespace).
6786 + */
6787 + return hfsplus_setxattr(dentry, name, buffer, size, flags);
6788 }
6789
6790 static size_t hfsplus_osx_listxattr(struct dentry *dentry, char *list,
6791 diff --git a/fs/namei.c b/fs/namei.c
6792 index db5fe86319e6..890d3580bf0e 100644
6793 --- a/fs/namei.c
6794 +++ b/fs/namei.c
6795 @@ -1560,7 +1560,8 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
6796
6797 if (should_follow_link(path->dentry, follow)) {
6798 if (nd->flags & LOOKUP_RCU) {
6799 - if (unlikely(unlazy_walk(nd, path->dentry))) {
6800 + if (unlikely(nd->path.mnt != path->mnt ||
6801 + unlazy_walk(nd, path->dentry))) {
6802 err = -ECHILD;
6803 goto out_err;
6804 }
6805 @@ -3015,7 +3016,8 @@ finish_lookup:
6806
6807 if (should_follow_link(path->dentry, !symlink_ok)) {
6808 if (nd->flags & LOOKUP_RCU) {
6809 - if (unlikely(unlazy_walk(nd, path->dentry))) {
6810 + if (unlikely(nd->path.mnt != path->mnt ||
6811 + unlazy_walk(nd, path->dentry))) {
6812 error = -ECHILD;
6813 goto out;
6814 }
6815 diff --git a/fs/namespace.c b/fs/namespace.c
6816 index bbde14719655..07ba424181a5 100644
6817 --- a/fs/namespace.c
6818 +++ b/fs/namespace.c
6819 @@ -1322,14 +1322,15 @@ static inline void namespace_lock(void)
6820 down_write(&namespace_sem);
6821 }
6822
6823 +enum umount_tree_flags {
6824 + UMOUNT_SYNC = 1,
6825 + UMOUNT_PROPAGATE = 2,
6826 +};
6827 /*
6828 * mount_lock must be held
6829 * namespace_sem must be held for write
6830 - * how = 0 => just this tree, don't propagate
6831 - * how = 1 => propagate; we know that nobody else has reference to any victims
6832 - * how = 2 => lazy umount
6833 */
6834 -void umount_tree(struct mount *mnt, int how)
6835 +static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
6836 {
6837 HLIST_HEAD(tmp_list);
6838 struct mount *p;
6839 @@ -1343,7 +1344,7 @@ void umount_tree(struct mount *mnt, int how)
6840 hlist_for_each_entry(p, &tmp_list, mnt_hash)
6841 list_del_init(&p->mnt_child);
6842
6843 - if (how)
6844 + if (how & UMOUNT_PROPAGATE)
6845 propagate_umount(&tmp_list);
6846
6847 hlist_for_each_entry(p, &tmp_list, mnt_hash) {
6848 @@ -1351,7 +1352,7 @@ void umount_tree(struct mount *mnt, int how)
6849 list_del_init(&p->mnt_list);
6850 __touch_mnt_namespace(p->mnt_ns);
6851 p->mnt_ns = NULL;
6852 - if (how < 2)
6853 + if (how & UMOUNT_SYNC)
6854 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
6855 if (mnt_has_parent(p)) {
6856 hlist_del_init(&p->mnt_mp_list);
6857 @@ -1456,14 +1457,14 @@ static int do_umount(struct mount *mnt, int flags)
6858
6859 if (flags & MNT_DETACH) {
6860 if (!list_empty(&mnt->mnt_list))
6861 - umount_tree(mnt, 2);
6862 + umount_tree(mnt, UMOUNT_PROPAGATE);
6863 retval = 0;
6864 } else {
6865 shrink_submounts(mnt);
6866 retval = -EBUSY;
6867 if (!propagate_mount_busy(mnt, 2)) {
6868 if (!list_empty(&mnt->mnt_list))
6869 - umount_tree(mnt, 1);
6870 + umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
6871 retval = 0;
6872 }
6873 }
6874 @@ -1495,7 +1496,7 @@ void __detach_mounts(struct dentry *dentry)
6875 lock_mount_hash();
6876 while (!hlist_empty(&mp->m_list)) {
6877 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
6878 - umount_tree(mnt, 2);
6879 + umount_tree(mnt, 0);
6880 }
6881 unlock_mount_hash();
6882 put_mountpoint(mp);
6883 @@ -1662,7 +1663,7 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
6884 out:
6885 if (res) {
6886 lock_mount_hash();
6887 - umount_tree(res, 0);
6888 + umount_tree(res, UMOUNT_SYNC);
6889 unlock_mount_hash();
6890 }
6891 return q;
6892 @@ -1686,7 +1687,7 @@ void drop_collected_mounts(struct vfsmount *mnt)
6893 {
6894 namespace_lock();
6895 lock_mount_hash();
6896 - umount_tree(real_mount(mnt), 0);
6897 + umount_tree(real_mount(mnt), UMOUNT_SYNC);
6898 unlock_mount_hash();
6899 namespace_unlock();
6900 }
6901 @@ -1869,7 +1870,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
6902 out_cleanup_ids:
6903 while (!hlist_empty(&tree_list)) {
6904 child = hlist_entry(tree_list.first, struct mount, mnt_hash);
6905 - umount_tree(child, 0);
6906 + umount_tree(child, UMOUNT_SYNC);
6907 }
6908 unlock_mount_hash();
6909 cleanup_group_ids(source_mnt, NULL);
6910 @@ -2046,7 +2047,7 @@ static int do_loopback(struct path *path, const char *old_name,
6911 err = graft_tree(mnt, parent, mp);
6912 if (err) {
6913 lock_mount_hash();
6914 - umount_tree(mnt, 0);
6915 + umount_tree(mnt, UMOUNT_SYNC);
6916 unlock_mount_hash();
6917 }
6918 out2:
6919 @@ -2417,7 +2418,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
6920 while (!list_empty(&graveyard)) {
6921 mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
6922 touch_mnt_namespace(mnt->mnt_ns);
6923 - umount_tree(mnt, 1);
6924 + umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC);
6925 }
6926 unlock_mount_hash();
6927 namespace_unlock();
6928 @@ -2488,7 +2489,7 @@ static void shrink_submounts(struct mount *mnt)
6929 m = list_first_entry(&graveyard, struct mount,
6930 mnt_expire);
6931 touch_mnt_namespace(m->mnt_ns);
6932 - umount_tree(m, 1);
6933 + umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC);
6934 }
6935 }
6936 }
6937 diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
6938 index 206c08a60c7f..22bd1d612703 100644
6939 --- a/fs/nfs/nfs4xdr.c
6940 +++ b/fs/nfs/nfs4xdr.c
6941 @@ -7336,6 +7336,11 @@ nfs4_stat_to_errno(int stat)
6942 .p_name = #proc, \
6943 }
6944
6945 +#define STUB(proc) \
6946 +[NFSPROC4_CLNT_##proc] = { \
6947 + .p_name = #proc, \
6948 +}
6949 +
6950 struct rpc_procinfo nfs4_procedures[] = {
6951 PROC(READ, enc_read, dec_read),
6952 PROC(WRITE, enc_write, dec_write),
6953 @@ -7388,6 +7393,7 @@ struct rpc_procinfo nfs4_procedures[] = {
6954 PROC(SECINFO_NO_NAME, enc_secinfo_no_name, dec_secinfo_no_name),
6955 PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid),
6956 PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid),
6957 + STUB(GETDEVICELIST),
6958 PROC(BIND_CONN_TO_SESSION,
6959 enc_bind_conn_to_session, dec_bind_conn_to_session),
6960 PROC(DESTROY_CLIENTID, enc_destroy_clientid, dec_destroy_clientid),
6961 diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
6962 index 0beb023f25ac..6ed585935d5e 100644
6963 --- a/fs/nfsd/nfs4proc.c
6964 +++ b/fs/nfsd/nfs4proc.c
6965 @@ -1028,6 +1028,8 @@ nfsd4_seek(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
6966 dprintk("NFSD: nfsd4_seek: couldn't process stateid!\n");
6967 return status;
6968 }
6969 + if (!file)
6970 + return nfserr_bad_stateid;
6971
6972 switch (seek->seek_whence) {
6973 case NFS4_CONTENT_DATA:
6974 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
6975 index 2a77603d7cfd..6abe96593225 100644
6976 --- a/fs/nfsd/nfs4xdr.c
6977 +++ b/fs/nfsd/nfs4xdr.c
6978 @@ -3229,6 +3229,7 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
6979 unsigned long maxcount;
6980 struct xdr_stream *xdr = &resp->xdr;
6981 struct file *file = read->rd_filp;
6982 + struct svc_fh *fhp = read->rd_fhp;
6983 int starting_len = xdr->buf->len;
6984 struct raparms *ra;
6985 __be32 *p;
6986 @@ -3252,12 +3253,15 @@ nfsd4_encode_read(struct nfsd4_compoundres *resp, __be32 nfserr,
6987 maxcount = min_t(unsigned long, maxcount, (xdr->buf->buflen - xdr->buf->len));
6988 maxcount = min_t(unsigned long, maxcount, read->rd_length);
6989
6990 - if (!read->rd_filp) {
6991 + if (read->rd_filp)
6992 + err = nfsd_permission(resp->rqstp, fhp->fh_export,
6993 + fhp->fh_dentry,
6994 + NFSD_MAY_READ|NFSD_MAY_OWNER_OVERRIDE);
6995 + else
6996 err = nfsd_get_tmp_read_open(resp->rqstp, read->rd_fhp,
6997 &file, &ra);
6998 - if (err)
6999 - goto err_truncate;
7000 - }
7001 + if (err)
7002 + goto err_truncate;
7003
7004 if (file->f_op->splice_read && resp->rqstp->rq_splice_ok)
7005 err = nfsd4_encode_splice_read(resp, read, file, maxcount);
7006 diff --git a/fs/open.c b/fs/open.c
7007 index de92c13b58be..4a8a355ffab8 100644
7008 --- a/fs/open.c
7009 +++ b/fs/open.c
7010 @@ -558,6 +558,7 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
7011 uid = make_kuid(current_user_ns(), user);
7012 gid = make_kgid(current_user_ns(), group);
7013
7014 +retry_deleg:
7015 newattrs.ia_valid = ATTR_CTIME;
7016 if (user != (uid_t) -1) {
7017 if (!uid_valid(uid))
7018 @@ -574,7 +575,6 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
7019 if (!S_ISDIR(inode->i_mode))
7020 newattrs.ia_valid |=
7021 ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
7022 -retry_deleg:
7023 mutex_lock(&inode->i_mutex);
7024 error = security_path_chown(path, uid, gid);
7025 if (!error)
7026 diff --git a/fs/pnode.h b/fs/pnode.h
7027 index 4a246358b031..16afc3d6d2f2 100644
7028 --- a/fs/pnode.h
7029 +++ b/fs/pnode.h
7030 @@ -47,7 +47,6 @@ int get_dominating_id(struct mount *mnt, const struct path *root);
7031 unsigned int mnt_get_count(struct mount *mnt);
7032 void mnt_set_mountpoint(struct mount *, struct mountpoint *,
7033 struct mount *);
7034 -void umount_tree(struct mount *, int);
7035 struct mount *copy_tree(struct mount *, struct dentry *, int);
7036 bool is_path_reachable(struct mount *, struct dentry *,
7037 const struct path *root);
7038 diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
7039 index 7000e66f768e..93ef42245647 100644
7040 --- a/include/acpi/actypes.h
7041 +++ b/include/acpi/actypes.h
7042 @@ -199,9 +199,29 @@ typedef int s32;
7043 typedef s32 acpi_native_int;
7044
7045 typedef u32 acpi_size;
7046 +
7047 +#ifdef ACPI_32BIT_PHYSICAL_ADDRESS
7048 +
7049 +/*
7050 + * OSPMs can define this to shrink the size of the structures for 32-bit
7051 + * none PAE environment. ASL compiler may always define this to generate
7052 + * 32-bit OSPM compliant tables.
7053 + */
7054 typedef u32 acpi_io_address;
7055 typedef u32 acpi_physical_address;
7056
7057 +#else /* ACPI_32BIT_PHYSICAL_ADDRESS */
7058 +
7059 +/*
7060 + * It is reported that, after some calculations, the physical addresses can
7061 + * wrap over the 32-bit boundary on 32-bit PAE environment.
7062 + * https://bugzilla.kernel.org/show_bug.cgi?id=87971
7063 + */
7064 +typedef u64 acpi_io_address;
7065 +typedef u64 acpi_physical_address;
7066 +
7067 +#endif /* ACPI_32BIT_PHYSICAL_ADDRESS */
7068 +
7069 #define ACPI_MAX_PTR ACPI_UINT32_MAX
7070 #define ACPI_SIZE_MAX ACPI_UINT32_MAX
7071
7072 diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
7073 index 5f8cc1fa3278..9e1ed2e380b6 100644
7074 --- a/include/acpi/platform/acenv.h
7075 +++ b/include/acpi/platform/acenv.h
7076 @@ -76,6 +76,7 @@
7077 #define ACPI_LARGE_NAMESPACE_NODE
7078 #define ACPI_DATA_TABLE_DISASSEMBLY
7079 #define ACPI_SINGLE_THREADED
7080 +#define ACPI_32BIT_PHYSICAL_ADDRESS
7081 #endif
7082
7083 /* acpi_exec configuration. Multithreaded with full AML debugger */
7084 diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
7085 index ad9db6045b2f..b3f45a578344 100644
7086 --- a/include/kvm/arm_arch_timer.h
7087 +++ b/include/kvm/arm_arch_timer.h
7088 @@ -60,7 +60,8 @@ struct arch_timer_cpu {
7089
7090 #ifdef CONFIG_KVM_ARM_TIMER
7091 int kvm_timer_hyp_init(void);
7092 -int kvm_timer_init(struct kvm *kvm);
7093 +void kvm_timer_enable(struct kvm *kvm);
7094 +void kvm_timer_init(struct kvm *kvm);
7095 void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
7096 const struct kvm_irq_level *irq);
7097 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
7098 @@ -77,11 +78,8 @@ static inline int kvm_timer_hyp_init(void)
7099 return 0;
7100 };
7101
7102 -static inline int kvm_timer_init(struct kvm *kvm)
7103 -{
7104 - return 0;
7105 -}
7106 -
7107 +static inline void kvm_timer_enable(struct kvm *kvm) {}
7108 +static inline void kvm_timer_init(struct kvm *kvm) {}
7109 static inline void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
7110 const struct kvm_irq_level *irq) {}
7111 static inline void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) {}
7112 diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
7113 index 206dcc3b3f7a..3f73f6c48adb 100644
7114 --- a/include/kvm/arm_vgic.h
7115 +++ b/include/kvm/arm_vgic.h
7116 @@ -113,6 +113,7 @@ struct vgic_ops {
7117 void (*sync_lr_elrsr)(struct kvm_vcpu *, int, struct vgic_lr);
7118 u64 (*get_elrsr)(const struct kvm_vcpu *vcpu);
7119 u64 (*get_eisr)(const struct kvm_vcpu *vcpu);
7120 + void (*clear_eisr)(struct kvm_vcpu *vcpu);
7121 u32 (*get_interrupt_status)(const struct kvm_vcpu *vcpu);
7122 void (*enable_underflow)(struct kvm_vcpu *vcpu);
7123 void (*disable_underflow)(struct kvm_vcpu *vcpu);
7124 @@ -274,7 +275,7 @@ struct kvm_exit_mmio;
7125 #ifdef CONFIG_KVM_ARM_VGIC
7126 int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
7127 int kvm_vgic_hyp_init(void);
7128 -int kvm_vgic_init(struct kvm *kvm);
7129 +int kvm_vgic_map_resources(struct kvm *kvm);
7130 int kvm_vgic_create(struct kvm *kvm);
7131 void kvm_vgic_destroy(struct kvm *kvm);
7132 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
7133 @@ -321,7 +322,7 @@ static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr,
7134 return -ENXIO;
7135 }
7136
7137 -static inline int kvm_vgic_init(struct kvm *kvm)
7138 +static inline int kvm_vgic_map_resources(struct kvm *kvm)
7139 {
7140 return 0;
7141 }
7142 diff --git a/include/linux/bpf.h b/include/linux/bpf.h
7143 index 3cf91754a957..cb4156a2c6bc 100644
7144 --- a/include/linux/bpf.h
7145 +++ b/include/linux/bpf.h
7146 @@ -48,7 +48,7 @@ struct bpf_map *bpf_map_get(struct fd f);
7147
7148 /* function argument constraints */
7149 enum bpf_arg_type {
7150 - ARG_ANYTHING = 0, /* any argument is ok */
7151 + ARG_DONTCARE = 0, /* unused argument in helper function */
7152
7153 /* the following constraints used to prototype
7154 * bpf_map_lookup/update/delete_elem() functions
7155 @@ -62,6 +62,8 @@ enum bpf_arg_type {
7156 */
7157 ARG_PTR_TO_STACK, /* any pointer to eBPF program stack */
7158 ARG_CONST_STACK_SIZE, /* number of bytes accessed from stack */
7159 +
7160 + ARG_ANYTHING, /* any (initialized) argument is ok */
7161 };
7162
7163 /* type of values returned from helper functions */
7164 diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
7165 index 6e6d338641fe..14020c7796af 100644
7166 --- a/include/linux/hugetlb.h
7167 +++ b/include/linux/hugetlb.h
7168 @@ -99,9 +99,9 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
7169 struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
7170 int write);
7171 struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
7172 - pmd_t *pmd, int write);
7173 + pmd_t *pmd, int flags);
7174 struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
7175 - pud_t *pud, int write);
7176 + pud_t *pud, int flags);
7177 int pmd_huge(pmd_t pmd);
7178 int pud_huge(pud_t pmd);
7179 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
7180 @@ -133,8 +133,8 @@ static inline void hugetlb_report_meminfo(struct seq_file *m)
7181 static inline void hugetlb_show_meminfo(void)
7182 {
7183 }
7184 -#define follow_huge_pmd(mm, addr, pmd, write) NULL
7185 -#define follow_huge_pud(mm, addr, pud, write) NULL
7186 +#define follow_huge_pmd(mm, addr, pmd, flags) NULL
7187 +#define follow_huge_pud(mm, addr, pud, flags) NULL
7188 #define prepare_hugepage_range(file, addr, len) (-EINVAL)
7189 #define pmd_huge(x) 0
7190 #define pud_huge(x) 0
7191 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
7192 index a6059bdf7b03..e4d8f705fecd 100644
7193 --- a/include/linux/kvm_host.h
7194 +++ b/include/linux/kvm_host.h
7195 @@ -43,6 +43,7 @@
7196 * include/linux/kvm_h.
7197 */
7198 #define KVM_MEMSLOT_INVALID (1UL << 16)
7199 +#define KVM_MEMSLOT_INCOHERENT (1UL << 17)
7200
7201 /* Two fragments for cross MMIO pages. */
7202 #define KVM_MAX_MMIO_FRAGMENTS 2
7203 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
7204 index 6c8b6f604e76..522d83731709 100644
7205 --- a/include/linux/skbuff.h
7206 +++ b/include/linux/skbuff.h
7207 @@ -772,6 +772,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
7208
7209 struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
7210 int node);
7211 +struct sk_buff *__build_skb(void *data, unsigned int frag_size);
7212 struct sk_buff *build_skb(void *data, unsigned int frag_size);
7213 static inline struct sk_buff *alloc_skb(unsigned int size,
7214 gfp_t priority)
7215 @@ -2958,6 +2959,18 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
7216 */
7217 #define CHECKSUM_BREAK 76
7218
7219 +/* Unset checksum-complete
7220 + *
7221 + * Unset checksum complete can be done when packet is being modified
7222 + * (uncompressed for instance) and checksum-complete value is
7223 + * invalidated.
7224 + */
7225 +static inline void skb_checksum_complete_unset(struct sk_buff *skb)
7226 +{
7227 + if (skb->ip_summed == CHECKSUM_COMPLETE)
7228 + skb->ip_summed = CHECKSUM_NONE;
7229 +}
7230 +
7231 /* Validate (init) checksum based on checksum complete.
7232 *
7233 * Return values:
7234 diff --git a/include/linux/swapops.h b/include/linux/swapops.h
7235 index 6adfb7bfbf44..e288d5c016a7 100644
7236 --- a/include/linux/swapops.h
7237 +++ b/include/linux/swapops.h
7238 @@ -137,6 +137,8 @@ static inline void make_migration_entry_read(swp_entry_t *entry)
7239 *entry = swp_entry(SWP_MIGRATION_READ, swp_offset(*entry));
7240 }
7241
7242 +extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
7243 + spinlock_t *ptl);
7244 extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
7245 unsigned long address);
7246 extern void migration_entry_wait_huge(struct vm_area_struct *vma,
7247 @@ -150,6 +152,8 @@ static inline int is_migration_entry(swp_entry_t swp)
7248 }
7249 #define migration_entry_to_page(swp) NULL
7250 static inline void make_migration_entry_read(swp_entry_t *entryp) { }
7251 +static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
7252 + spinlock_t *ptl) { }
7253 static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
7254 unsigned long address) { }
7255 static inline void migration_entry_wait_huge(struct vm_area_struct *vma,
7256 diff --git a/include/linux/usb.h b/include/linux/usb.h
7257 index 3827bffc11a7..bdbd19fb1ff8 100644
7258 --- a/include/linux/usb.h
7259 +++ b/include/linux/usb.h
7260 @@ -205,6 +205,32 @@ void usb_put_intf(struct usb_interface *intf);
7261 #define USB_MAXINTERFACES 32
7262 #define USB_MAXIADS (USB_MAXINTERFACES/2)
7263
7264 +/*
7265 + * USB Resume Timer: Every Host controller driver should drive the resume
7266 + * signalling on the bus for the amount of time defined by this macro.
7267 + *
7268 + * That way we will have a 'stable' behavior among all HCDs supported by Linux.
7269 + *
7270 + * Note that the USB Specification states we should drive resume for *at least*
7271 + * 20 ms, but it doesn't give an upper bound. This creates two possible
7272 + * situations which we want to avoid:
7273 + *
7274 + * (a) sometimes an msleep(20) might expire slightly before 20 ms, which causes
7275 + * us to fail USB Electrical Tests, thus failing Certification
7276 + *
7277 + * (b) Some (many) devices actually need more than 20 ms of resume signalling,
7278 + * and while we can argue that's against the USB Specification, we don't have
7279 + * control over which devices a certification laboratory will be using for
7280 + * certification. If CertLab uses a device which was tested against Windows and
7281 + * that happens to have relaxed resume signalling rules, we might fall into
7282 + * situations where we fail interoperability and electrical tests.
7283 + *
7284 + * In order to avoid both conditions, we're using a 40 ms resume timeout, which
7285 + * should cope with both LPJ calibration errors and devices not following every
7286 + * detail of the USB Specification.
7287 + */
7288 +#define USB_RESUME_TIMEOUT 40 /* ms */
7289 +
7290 /**
7291 * struct usb_interface_cache - long-term representation of a device interface
7292 * @num_altsetting: number of altsettings defined.
7293 diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
7294 index a7f2604c5f25..7f5f78bd15ad 100644
7295 --- a/include/linux/usb_usual.h
7296 +++ b/include/linux/usb_usual.h
7297 @@ -77,6 +77,8 @@
7298 /* Cannot handle ATA_12 or ATA_16 CDBs */ \
7299 US_FLAG(NO_REPORT_OPCODES, 0x04000000) \
7300 /* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
7301 + US_FLAG(MAX_SECTORS_240, 0x08000000) \
7302 + /* Sets max_sectors to 240 */ \
7303
7304 #define US_FLAG(name, value) US_FL_##name = value ,
7305 enum { US_DO_ALL_FLAGS };
7306 diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
7307 index c46908c1bb3f..8af2fff0eb08 100644
7308 --- a/include/sound/emu10k1.h
7309 +++ b/include/sound/emu10k1.h
7310 @@ -41,7 +41,8 @@
7311
7312 #define EMUPAGESIZE 4096
7313 #define MAXREQVOICES 8
7314 -#define MAXPAGES 8192
7315 +#define MAXPAGES0 4096 /* 32 bit mode */
7316 +#define MAXPAGES1 8192 /* 31 bit mode */
7317 #define RESERVED 0
7318 #define NUM_MIDI 16
7319 #define NUM_G 64 /* use all channels */
7320 @@ -50,8 +51,7 @@
7321
7322 /* FIXME? - according to the OSS driver the EMU10K1 needs a 29 bit DMA mask */
7323 #define EMU10K1_DMA_MASK 0x7fffffffUL /* 31bit */
7324 -#define AUDIGY_DMA_MASK 0x7fffffffUL /* 31bit FIXME - 32 should work? */
7325 - /* See ALSA bug #1276 - rlrevell */
7326 +#define AUDIGY_DMA_MASK 0xffffffffUL /* 32bit mode */
7327
7328 #define TMEMSIZE 256*1024
7329 #define TMEMSIZEREG 4
7330 @@ -466,8 +466,11 @@
7331
7332 #define MAPB 0x0d /* Cache map B */
7333
7334 -#define MAP_PTE_MASK 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */
7335 -#define MAP_PTI_MASK 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */
7336 +#define MAP_PTE_MASK0 0xfffff000 /* The 20 MSBs of the PTE indexed by the PTI */
7337 +#define MAP_PTI_MASK0 0x00000fff /* The 12 bit index to one of the 4096 PTE dwords */
7338 +
7339 +#define MAP_PTE_MASK1 0xffffe000 /* The 19 MSBs of the PTE indexed by the PTI */
7340 +#define MAP_PTI_MASK1 0x00001fff /* The 13 bit index to one of the 8192 PTE dwords */
7341
7342 /* 0x0e, 0x0f: Not used */
7343
7344 @@ -1704,6 +1707,7 @@ struct snd_emu10k1 {
7345 unsigned short model; /* subsystem id */
7346 unsigned int card_type; /* EMU10K1_CARD_* */
7347 unsigned int ecard_ctrl; /* ecard control bits */
7348 + unsigned int address_mode; /* address mode */
7349 unsigned long dma_mask; /* PCI DMA mask */
7350 unsigned int delay_pcm_irq; /* in samples */
7351 int max_cache_pages; /* max memory size / PAGE_SIZE */
7352 diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
7353 index 3a4d7da67b8d..525f313f9f41 100644
7354 --- a/include/sound/soc-dapm.h
7355 +++ b/include/sound/soc-dapm.h
7356 @@ -287,7 +287,7 @@ struct device;
7357 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_READWRITE,\
7358 .tlv.p = (tlv_array), \
7359 .get = snd_soc_dapm_get_volsw, .put = snd_soc_dapm_put_volsw, \
7360 - .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 0) }
7361 + .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert, 1) }
7362 #define SOC_DAPM_SINGLE_TLV_VIRT(xname, max, tlv_array) \
7363 SOC_DAPM_SINGLE(xname, SND_SOC_NOPM, 0, max, 0, tlv_array)
7364 #define SOC_DAPM_ENUM(xname, xenum) \
7365 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
7366 index 1fbd69cfd0b7..c4b85a5889ba 100644
7367 --- a/include/target/target_core_base.h
7368 +++ b/include/target/target_core_base.h
7369 @@ -520,7 +520,7 @@ struct se_cmd {
7370 sense_reason_t (*execute_cmd)(struct se_cmd *);
7371 sense_reason_t (*execute_rw)(struct se_cmd *, struct scatterlist *,
7372 u32, enum dma_data_direction);
7373 - sense_reason_t (*transport_complete_callback)(struct se_cmd *);
7374 + sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool);
7375
7376 unsigned char *t_task_cdb;
7377 unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
7378 diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
7379 index d6594e457a25..71a9e5be6592 100644
7380 --- a/kernel/bpf/core.c
7381 +++ b/kernel/bpf/core.c
7382 @@ -357,8 +357,8 @@ select_insn:
7383 ALU64_MOD_X:
7384 if (unlikely(SRC == 0))
7385 return 0;
7386 - tmp = DST;
7387 - DST = do_div(tmp, SRC);
7388 + div64_u64_rem(DST, SRC, &tmp);
7389 + DST = tmp;
7390 CONT;
7391 ALU_MOD_X:
7392 if (unlikely(SRC == 0))
7393 @@ -367,8 +367,8 @@ select_insn:
7394 DST = do_div(tmp, (u32) SRC);
7395 CONT;
7396 ALU64_MOD_K:
7397 - tmp = DST;
7398 - DST = do_div(tmp, IMM);
7399 + div64_u64_rem(DST, IMM, &tmp);
7400 + DST = tmp;
7401 CONT;
7402 ALU_MOD_K:
7403 tmp = (u32) DST;
7404 @@ -377,7 +377,7 @@ select_insn:
7405 ALU64_DIV_X:
7406 if (unlikely(SRC == 0))
7407 return 0;
7408 - do_div(DST, SRC);
7409 + DST = div64_u64(DST, SRC);
7410 CONT;
7411 ALU_DIV_X:
7412 if (unlikely(SRC == 0))
7413 @@ -387,7 +387,7 @@ select_insn:
7414 DST = (u32) tmp;
7415 CONT;
7416 ALU64_DIV_K:
7417 - do_div(DST, IMM);
7418 + DST = div64_u64(DST, IMM);
7419 CONT;
7420 ALU_DIV_K:
7421 tmp = (u32) DST;
7422 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
7423 index d8dcc80472c1..055ae6ac0280 100644
7424 --- a/kernel/bpf/verifier.c
7425 +++ b/kernel/bpf/verifier.c
7426 @@ -763,7 +763,7 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
7427 enum bpf_reg_type expected_type;
7428 int err = 0;
7429
7430 - if (arg_type == ARG_ANYTHING)
7431 + if (arg_type == ARG_DONTCARE)
7432 return 0;
7433
7434 if (reg->type == NOT_INIT) {
7435 @@ -771,6 +771,9 @@ static int check_func_arg(struct verifier_env *env, u32 regno,
7436 return -EACCES;
7437 }
7438
7439 + if (arg_type == ARG_ANYTHING)
7440 + return 0;
7441 +
7442 if (arg_type == ARG_PTR_TO_STACK || arg_type == ARG_PTR_TO_MAP_KEY ||
7443 arg_type == ARG_PTR_TO_MAP_VALUE) {
7444 expected_type = PTR_TO_STACK;
7445 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
7446 index 54e75226c2c4..dcd968232d42 100644
7447 --- a/kernel/ptrace.c
7448 +++ b/kernel/ptrace.c
7449 @@ -714,6 +714,8 @@ static int ptrace_peek_siginfo(struct task_struct *child,
7450 static int ptrace_resume(struct task_struct *child, long request,
7451 unsigned long data)
7452 {
7453 + bool need_siglock;
7454 +
7455 if (!valid_signal(data))
7456 return -EIO;
7457
7458 @@ -741,8 +743,26 @@ static int ptrace_resume(struct task_struct *child, long request,
7459 user_disable_single_step(child);
7460 }
7461
7462 + /*
7463 + * Change ->exit_code and ->state under siglock to avoid the race
7464 + * with wait_task_stopped() in between; a non-zero ->exit_code will
7465 + * wrongly look like another report from tracee.
7466 + *
7467 + * Note that we need siglock even if ->exit_code == data and/or this
7468 + * status was not reported yet, the new status must not be cleared by
7469 + * wait_task_stopped() after resume.
7470 + *
7471 + * If data == 0 we do not care if wait_task_stopped() reports the old
7472 + * status and clears the code too; this can't race with the tracee, it
7473 + * takes siglock after resume.
7474 + */
7475 + need_siglock = data && !thread_group_empty(current);
7476 + if (need_siglock)
7477 + spin_lock_irq(&child->sighand->siglock);
7478 child->exit_code = data;
7479 wake_up_state(child, __TASK_TRACED);
7480 + if (need_siglock)
7481 + spin_unlock_irq(&child->sighand->siglock);
7482
7483 return 0;
7484 }
7485 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
7486 index f4fbbfcdf399..0fc5cfedcc8c 100644
7487 --- a/kernel/trace/ring_buffer.c
7488 +++ b/kernel/trace/ring_buffer.c
7489 @@ -2684,7 +2684,7 @@ static DEFINE_PER_CPU(unsigned int, current_context);
7490
7491 static __always_inline int trace_recursive_lock(void)
7492 {
7493 - unsigned int val = this_cpu_read(current_context);
7494 + unsigned int val = __this_cpu_read(current_context);
7495 int bit;
7496
7497 if (in_interrupt()) {
7498 @@ -2701,18 +2701,17 @@ static __always_inline int trace_recursive_lock(void)
7499 return 1;
7500
7501 val |= (1 << bit);
7502 - this_cpu_write(current_context, val);
7503 + __this_cpu_write(current_context, val);
7504
7505 return 0;
7506 }
7507
7508 static __always_inline void trace_recursive_unlock(void)
7509 {
7510 - unsigned int val = this_cpu_read(current_context);
7511 + unsigned int val = __this_cpu_read(current_context);
7512
7513 - val--;
7514 - val &= this_cpu_read(current_context);
7515 - this_cpu_write(current_context, val);
7516 + val &= val & (val - 1);
7517 + __this_cpu_write(current_context, val);
7518 }
7519
7520 #else
7521 diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
7522 index f0a0c982cde3..2964333687dc 100644
7523 --- a/kernel/trace/trace_functions_graph.c
7524 +++ b/kernel/trace/trace_functions_graph.c
7525 @@ -1462,15 +1462,19 @@ void graph_trace_open(struct trace_iterator *iter)
7526 {
7527 /* pid and depth on the last trace processed */
7528 struct fgraph_data *data;
7529 + gfp_t gfpflags;
7530 int cpu;
7531
7532 iter->private = NULL;
7533
7534 - data = kzalloc(sizeof(*data), GFP_KERNEL);
7535 + /* We can be called in atomic context via ftrace_dump() */
7536 + gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
7537 +
7538 + data = kzalloc(sizeof(*data), gfpflags);
7539 if (!data)
7540 goto out_err;
7541
7542 - data->cpu_data = alloc_percpu(struct fgraph_cpu_data);
7543 + data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
7544 if (!data->cpu_data)
7545 goto out_err_free;
7546
7547 diff --git a/lib/string.c b/lib/string.c
7548 index 10063300b830..643b0a90802c 100644
7549 --- a/lib/string.c
7550 +++ b/lib/string.c
7551 @@ -610,7 +610,7 @@ EXPORT_SYMBOL(memset);
7552 void memzero_explicit(void *s, size_t count)
7553 {
7554 memset(s, 0, count);
7555 - OPTIMIZER_HIDE_VAR(s);
7556 + barrier();
7557 }
7558 EXPORT_SYMBOL(memzero_explicit);
7559
7560 diff --git a/mm/gup.c b/mm/gup.c
7561 index a0d57ec05510..377a5a796242 100644
7562 --- a/mm/gup.c
7563 +++ b/mm/gup.c
7564 @@ -167,10 +167,10 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
7565 if (pud_none(*pud))
7566 return no_page_table(vma, flags);
7567 if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
7568 - if (flags & FOLL_GET)
7569 - return NULL;
7570 - page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
7571 - return page;
7572 + page = follow_huge_pud(mm, address, pud, flags);
7573 + if (page)
7574 + return page;
7575 + return no_page_table(vma, flags);
7576 }
7577 if (unlikely(pud_bad(*pud)))
7578 return no_page_table(vma, flags);
7579 @@ -179,19 +179,10 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
7580 if (pmd_none(*pmd))
7581 return no_page_table(vma, flags);
7582 if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
7583 - page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
7584 - if (flags & FOLL_GET) {
7585 - /*
7586 - * Refcount on tail pages are not well-defined and
7587 - * shouldn't be taken. The caller should handle a NULL
7588 - * return when trying to follow tail pages.
7589 - */
7590 - if (PageHead(page))
7591 - get_page(page);
7592 - else
7593 - page = NULL;
7594 - }
7595 - return page;
7596 + page = follow_huge_pmd(mm, address, pmd, flags);
7597 + if (page)
7598 + return page;
7599 + return no_page_table(vma, flags);
7600 }
7601 if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
7602 return no_page_table(vma, flags);
7603 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
7604 index 4cacc6a8a6c1..da8fa4e4237c 100644
7605 --- a/mm/hugetlb.c
7606 +++ b/mm/hugetlb.c
7607 @@ -3693,44 +3693,64 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
7608 return (pte_t *) pmd;
7609 }
7610
7611 -struct page *
7612 -follow_huge_pmd(struct mm_struct *mm, unsigned long address,
7613 - pmd_t *pmd, int write)
7614 -{
7615 - struct page *page;
7616 +#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
7617
7618 - if (!pmd_present(*pmd))
7619 - return NULL;
7620 - page = pte_page(*(pte_t *)pmd);
7621 - if (page)
7622 - page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
7623 - return page;
7624 +/*
7625 + * These functions are overwritable if your architecture needs its own
7626 + * behavior.
7627 + */
7628 +struct page * __weak
7629 +follow_huge_addr(struct mm_struct *mm, unsigned long address,
7630 + int write)
7631 +{
7632 + return ERR_PTR(-EINVAL);
7633 }
7634
7635 -struct page *
7636 -follow_huge_pud(struct mm_struct *mm, unsigned long address,
7637 - pud_t *pud, int write)
7638 +struct page * __weak
7639 +follow_huge_pmd(struct mm_struct *mm, unsigned long address,
7640 + pmd_t *pmd, int flags)
7641 {
7642 - struct page *page;
7643 -
7644 - page = pte_page(*(pte_t *)pud);
7645 - if (page)
7646 - page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
7647 + struct page *page = NULL;
7648 + spinlock_t *ptl;
7649 +retry:
7650 + ptl = pmd_lockptr(mm, pmd);
7651 + spin_lock(ptl);
7652 + /*
7653 + * make sure that the address range covered by this pmd is not
7654 + * unmapped from other threads.
7655 + */
7656 + if (!pmd_huge(*pmd))
7657 + goto out;
7658 + if (pmd_present(*pmd)) {
7659 + page = pte_page(*(pte_t *)pmd) +
7660 + ((address & ~PMD_MASK) >> PAGE_SHIFT);
7661 + if (flags & FOLL_GET)
7662 + get_page(page);
7663 + } else {
7664 + if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
7665 + spin_unlock(ptl);
7666 + __migration_entry_wait(mm, (pte_t *)pmd, ptl);
7667 + goto retry;
7668 + }
7669 + /*
7670 + * hwpoisoned entry is treated as no_page_table in
7671 + * follow_page_mask().
7672 + */
7673 + }
7674 +out:
7675 + spin_unlock(ptl);
7676 return page;
7677 }
7678
7679 -#else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */
7680 -
7681 -/* Can be overriden by architectures */
7682 struct page * __weak
7683 follow_huge_pud(struct mm_struct *mm, unsigned long address,
7684 - pud_t *pud, int write)
7685 + pud_t *pud, int flags)
7686 {
7687 - BUG();
7688 - return NULL;
7689 -}
7690 + if (flags & FOLL_GET)
7691 + return NULL;
7692
7693 -#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
7694 + return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
7695 +}
7696
7697 #ifdef CONFIG_MEMORY_FAILURE
7698
7699 diff --git a/mm/migrate.c b/mm/migrate.c
7700 index 01439953abf5..cd4fd10c4ec3 100644
7701 --- a/mm/migrate.c
7702 +++ b/mm/migrate.c
7703 @@ -229,7 +229,7 @@ static void remove_migration_ptes(struct page *old, struct page *new)
7704 * get to the page and wait until migration is finished.
7705 * When we return from this function the fault will be retried.
7706 */
7707 -static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
7708 +void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
7709 spinlock_t *ptl)
7710 {
7711 pte_t pte;
7712 @@ -1260,7 +1260,8 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
7713 goto put_and_set;
7714
7715 if (PageHuge(page)) {
7716 - isolate_huge_page(page, &pagelist);
7717 + if (PageHead(page))
7718 + isolate_huge_page(page, &pagelist);
7719 goto put_and_set;
7720 }
7721
7722 diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
7723 index 1a4f32c09ad5..f076a8ede00b 100644
7724 --- a/net/bridge/br_netfilter.c
7725 +++ b/net/bridge/br_netfilter.c
7726 @@ -650,6 +650,13 @@ static int br_nf_forward_finish(struct sk_buff *skb)
7727 struct net_device *in;
7728
7729 if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
7730 + int frag_max_size;
7731 +
7732 + if (skb->protocol == htons(ETH_P_IP)) {
7733 + frag_max_size = IPCB(skb)->frag_max_size;
7734 + BR_INPUT_SKB_CB(skb)->frag_max_size = frag_max_size;
7735 + }
7736 +
7737 in = nf_bridge->physindev;
7738 if (nf_bridge->mask & BRNF_PKT_TYPE) {
7739 skb->pkt_type = PACKET_OTHERHOST;
7740 @@ -709,8 +716,14 @@ static unsigned int br_nf_forward_ip(const struct nf_hook_ops *ops,
7741 nf_bridge->mask |= BRNF_PKT_TYPE;
7742 }
7743
7744 - if (pf == NFPROTO_IPV4 && br_parse_ip_options(skb))
7745 - return NF_DROP;
7746 + if (pf == NFPROTO_IPV4) {
7747 + int frag_max = BR_INPUT_SKB_CB(skb)->frag_max_size;
7748 +
7749 + if (br_parse_ip_options(skb))
7750 + return NF_DROP;
7751 +
7752 + IPCB(skb)->frag_max_size = frag_max;
7753 + }
7754
7755 /* The physdev module checks on this */
7756 nf_bridge->mask |= BRNF_BRIDGED;
7757 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
7758 index 17fd8dca921e..02ebb7133312 100644
7759 --- a/net/core/skbuff.c
7760 +++ b/net/core/skbuff.c
7761 @@ -278,13 +278,14 @@ nodata:
7762 EXPORT_SYMBOL(__alloc_skb);
7763
7764 /**
7765 - * build_skb - build a network buffer
7766 + * __build_skb - build a network buffer
7767 * @data: data buffer provided by caller
7768 - * @frag_size: size of fragment, or 0 if head was kmalloced
7769 + * @frag_size: size of data, or 0 if head was kmalloced
7770 *
7771 * Allocate a new &sk_buff. Caller provides space holding head and
7772 * skb_shared_info. @data must have been allocated by kmalloc() only if
7773 - * @frag_size is 0, otherwise data should come from the page allocator.
7774 + * @frag_size is 0, otherwise data should come from the page allocator
7775 + * or vmalloc()
7776 * The return is the new skb buffer.
7777 * On a failure the return is %NULL, and @data is not freed.
7778 * Notes :
7779 @@ -295,7 +296,7 @@ EXPORT_SYMBOL(__alloc_skb);
7780 * before giving packet to stack.
7781 * RX rings only contains data buffers, not full skbs.
7782 */
7783 -struct sk_buff *build_skb(void *data, unsigned int frag_size)
7784 +struct sk_buff *__build_skb(void *data, unsigned int frag_size)
7785 {
7786 struct skb_shared_info *shinfo;
7787 struct sk_buff *skb;
7788 @@ -309,7 +310,6 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
7789
7790 memset(skb, 0, offsetof(struct sk_buff, tail));
7791 skb->truesize = SKB_TRUESIZE(size);
7792 - skb->head_frag = frag_size != 0;
7793 atomic_set(&skb->users, 1);
7794 skb->head = data;
7795 skb->data = data;
7796 @@ -326,6 +326,23 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
7797
7798 return skb;
7799 }
7800 +
7801 +/* build_skb() is wrapper over __build_skb(), that specifically
7802 + * takes care of skb->head and skb->pfmemalloc
7803 + * This means that if @frag_size is not zero, then @data must be backed
7804 + * by a page fragment, not kmalloc() or vmalloc()
7805 + */
7806 +struct sk_buff *build_skb(void *data, unsigned int frag_size)
7807 +{
7808 + struct sk_buff *skb = __build_skb(data, frag_size);
7809 +
7810 + if (skb && frag_size) {
7811 + skb->head_frag = 1;
7812 + if (virt_to_head_page(data)->pfmemalloc)
7813 + skb->pfmemalloc = 1;
7814 + }
7815 + return skb;
7816 +}
7817 EXPORT_SYMBOL(build_skb);
7818
7819 struct netdev_alloc_cache {
7820 @@ -352,7 +369,8 @@ refill:
7821 gfp_t gfp = gfp_mask;
7822
7823 if (order)
7824 - gfp |= __GFP_COMP | __GFP_NOWARN;
7825 + gfp |= __GFP_COMP | __GFP_NOWARN |
7826 + __GFP_NOMEMALLOC;
7827 nc->frag.page = alloc_pages(gfp, order);
7828 if (likely(nc->frag.page))
7829 break;
7830 diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
7831 index 787b3c294ce6..d5410b57da19 100644
7832 --- a/net/ipv4/ip_forward.c
7833 +++ b/net/ipv4/ip_forward.c
7834 @@ -81,6 +81,9 @@ int ip_forward(struct sk_buff *skb)
7835 if (skb->pkt_type != PACKET_HOST)
7836 goto drop;
7837
7838 + if (unlikely(skb->sk))
7839 + goto drop;
7840 +
7841 if (skb_warn_if_lro(skb))
7842 goto drop;
7843
7844 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
7845 index a5c49d657ab1..64f4edb2dbf9 100644
7846 --- a/net/ipv4/ping.c
7847 +++ b/net/ipv4/ping.c
7848 @@ -158,6 +158,7 @@ void ping_unhash(struct sock *sk)
7849 if (sk_hashed(sk)) {
7850 write_lock_bh(&ping_table.lock);
7851 hlist_nulls_del(&sk->sk_nulls_node);
7852 + sk_nulls_node_init(&sk->sk_nulls_node);
7853 sock_put(sk);
7854 isk->inet_num = 0;
7855 isk->inet_sport = 0;
7856 diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
7857 index 32dcb4e05b6b..dc9f925b0cd5 100644
7858 --- a/net/ipv4/tcp_output.c
7859 +++ b/net/ipv4/tcp_output.c
7860 @@ -2717,39 +2717,65 @@ begin_fwd:
7861 }
7862 }
7863
7864 -/* Send a fin. The caller locks the socket for us. This cannot be
7865 - * allowed to fail queueing a FIN frame under any circumstances.
7866 +/* We allow to exceed memory limits for FIN packets to expedite
7867 + * connection tear down and (memory) recovery.
7868 + * Otherwise tcp_send_fin() could be tempted to either delay FIN
7869 + * or even be forced to close flow without any FIN.
7870 + */
7871 +static void sk_forced_wmem_schedule(struct sock *sk, int size)
7872 +{
7873 + int amt, status;
7874 +
7875 + if (size <= sk->sk_forward_alloc)
7876 + return;
7877 + amt = sk_mem_pages(size);
7878 + sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
7879 + sk_memory_allocated_add(sk, amt, &status);
7880 +}
7881 +
7882 +/* Send a FIN. The caller locks the socket for us.
7883 + * We should try to send a FIN packet really hard, but eventually give up.
7884 */
7885 void tcp_send_fin(struct sock *sk)
7886 {
7887 + struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
7888 struct tcp_sock *tp = tcp_sk(sk);
7889 - struct sk_buff *skb = tcp_write_queue_tail(sk);
7890 - int mss_now;
7891
7892 - /* Optimization, tack on the FIN if we have a queue of
7893 - * unsent frames. But be careful about outgoing SACKS
7894 - * and IP options.
7895 + /* Optimization, tack on the FIN if we have one skb in write queue and
7896 + * this skb was not yet sent, or we are under memory pressure.
7897 + * Note: in the latter case, FIN packet will be sent after a timeout,
7898 + * as TCP stack thinks it has already been transmitted.
7899 */
7900 - mss_now = tcp_current_mss(sk);
7901 -
7902 - if (tcp_send_head(sk) != NULL) {
7903 - TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
7904 - TCP_SKB_CB(skb)->end_seq++;
7905 + if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
7906 +coalesce:
7907 + TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
7908 + TCP_SKB_CB(tskb)->end_seq++;
7909 tp->write_seq++;
7910 + if (!tcp_send_head(sk)) {
7911 + /* This means tskb was already sent.
7912 + * Pretend we included the FIN on previous transmit.
7913 + * We need to set tp->snd_nxt to the value it would have
7914 + * if FIN had been sent. This is because retransmit path
7915 + * does not change tp->snd_nxt.
7916 + */
7917 + tp->snd_nxt++;
7918 + return;
7919 + }
7920 } else {
7921 - /* Socket is locked, keep trying until memory is available. */
7922 - for (;;) {
7923 - skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
7924 - if (skb)
7925 - break;
7926 - yield();
7927 + skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
7928 + if (unlikely(!skb)) {
7929 + if (tskb)
7930 + goto coalesce;
7931 + return;
7932 }
7933 + skb_reserve(skb, MAX_TCP_HEADER);
7934 + sk_forced_wmem_schedule(sk, skb->truesize);
7935 /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
7936 tcp_init_nondata_skb(skb, tp->write_seq,
7937 TCPHDR_ACK | TCPHDR_FIN);
7938 tcp_queue_skb(sk, skb);
7939 }
7940 - __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
7941 + __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
7942 }
7943
7944 /* We get here when a process closes a file descriptor (either due to
7945 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
7946 index 79c965a51ab2..c0a418766f9c 100644
7947 --- a/net/netlink/af_netlink.c
7948 +++ b/net/netlink/af_netlink.c
7949 @@ -1599,13 +1599,11 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
7950 if (data == NULL)
7951 return NULL;
7952
7953 - skb = build_skb(data, size);
7954 + skb = __build_skb(data, size);
7955 if (skb == NULL)
7956 vfree(data);
7957 - else {
7958 - skb->head_frag = 0;
7959 + else
7960 skb->destructor = netlink_skb_destructor;
7961 - }
7962
7963 return skb;
7964 }
7965 diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
7966 index 4c171636efcd..a432b4c7869f 100644
7967 --- a/sound/pci/emu10k1/emu10k1.c
7968 +++ b/sound/pci/emu10k1/emu10k1.c
7969 @@ -183,8 +183,10 @@ static int snd_card_emu10k1_probe(struct pci_dev *pci,
7970 }
7971 #endif
7972
7973 - strcpy(card->driver, emu->card_capabilities->driver);
7974 - strcpy(card->shortname, emu->card_capabilities->name);
7975 + strlcpy(card->driver, emu->card_capabilities->driver,
7976 + sizeof(card->driver));
7977 + strlcpy(card->shortname, emu->card_capabilities->name,
7978 + sizeof(card->shortname));
7979 snprintf(card->longname, sizeof(card->longname),
7980 "%s (rev.%d, serial:0x%x) at 0x%lx, irq %i",
7981 card->shortname, emu->revision, emu->serial, emu->port, emu->irq);
7982 diff --git a/sound/pci/emu10k1/emu10k1_callback.c b/sound/pci/emu10k1/emu10k1_callback.c
7983 index 874cd76c7b7f..d2c7ea3a7610 100644
7984 --- a/sound/pci/emu10k1/emu10k1_callback.c
7985 +++ b/sound/pci/emu10k1/emu10k1_callback.c
7986 @@ -415,7 +415,7 @@ start_voice(struct snd_emux_voice *vp)
7987 snd_emu10k1_ptr_write(hw, Z2, ch, 0);
7988
7989 /* invalidate maps */
7990 - temp = (hw->silent_page.addr << 1) | MAP_PTI_MASK;
7991 + temp = (hw->silent_page.addr << hw->address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
7992 snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
7993 snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
7994 #if 0
7995 @@ -436,7 +436,7 @@ start_voice(struct snd_emux_voice *vp)
7996 snd_emu10k1_ptr_write(hw, CDF, ch, sample);
7997
7998 /* invalidate maps */
7999 - temp = ((unsigned int)hw->silent_page.addr << 1) | MAP_PTI_MASK;
8000 + temp = ((unsigned int)hw->silent_page.addr << hw_address_mode) | (hw->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
8001 snd_emu10k1_ptr_write(hw, MAPA, ch, temp);
8002 snd_emu10k1_ptr_write(hw, MAPB, ch, temp);
8003
8004 diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
8005 index 229269788023..92f2371791a3 100644
8006 --- a/sound/pci/emu10k1/emu10k1_main.c
8007 +++ b/sound/pci/emu10k1/emu10k1_main.c
8008 @@ -282,7 +282,7 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume)
8009 snd_emu10k1_ptr_write(emu, TCB, 0, 0); /* taken from original driver */
8010 snd_emu10k1_ptr_write(emu, TCBS, 0, 4); /* taken from original driver */
8011
8012 - silent_page = (emu->silent_page.addr << 1) | MAP_PTI_MASK;
8013 + silent_page = (emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
8014 for (ch = 0; ch < NUM_G; ch++) {
8015 snd_emu10k1_ptr_write(emu, MAPA, ch, silent_page);
8016 snd_emu10k1_ptr_write(emu, MAPB, ch, silent_page);
8017 @@ -348,6 +348,11 @@ static int snd_emu10k1_init(struct snd_emu10k1 *emu, int enable_ir, int resume)
8018 outl(reg | A_IOCFG_GPOUT0, emu->port + A_IOCFG);
8019 }
8020
8021 + if (emu->address_mode == 0) {
8022 + /* use 16M in 4G */
8023 + outl(inl(emu->port + HCFG) | HCFG_EXPANDED_MEM, emu->port + HCFG);
8024 + }
8025 +
8026 return 0;
8027 }
8028
8029 @@ -1424,7 +1429,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
8030 *
8031 */
8032 {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x20011102,
8033 - .driver = "Audigy2", .name = "SB Audigy 2 ZS Notebook [SB0530]",
8034 + .driver = "Audigy2", .name = "Audigy 2 ZS Notebook [SB0530]",
8035 .id = "Audigy2",
8036 .emu10k2_chip = 1,
8037 .ca0108_chip = 1,
8038 @@ -1574,7 +1579,7 @@ static struct snd_emu_chip_details emu_chip_details[] = {
8039 .adc_1361t = 1, /* 24 bit capture instead of 16bit */
8040 .ac97_chip = 1} ,
8041 {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x10051102,
8042 - .driver = "Audigy2", .name = "SB Audigy 2 Platinum EX [SB0280]",
8043 + .driver = "Audigy2", .name = "Audigy 2 Platinum EX [SB0280]",
8044 .id = "Audigy2",
8045 .emu10k2_chip = 1,
8046 .ca0102_chip = 1,
8047 @@ -1880,8 +1885,10 @@ int snd_emu10k1_create(struct snd_card *card,
8048
8049 is_audigy = emu->audigy = c->emu10k2_chip;
8050
8051 + /* set addressing mode */
8052 + emu->address_mode = is_audigy ? 0 : 1;
8053 /* set the DMA transfer mask */
8054 - emu->dma_mask = is_audigy ? AUDIGY_DMA_MASK : EMU10K1_DMA_MASK;
8055 + emu->dma_mask = emu->address_mode ? EMU10K1_DMA_MASK : AUDIGY_DMA_MASK;
8056 if (pci_set_dma_mask(pci, emu->dma_mask) < 0 ||
8057 pci_set_consistent_dma_mask(pci, emu->dma_mask) < 0) {
8058 dev_err(card->dev,
8059 @@ -1906,7 +1913,7 @@ int snd_emu10k1_create(struct snd_card *card,
8060
8061 emu->max_cache_pages = max_cache_bytes >> PAGE_SHIFT;
8062 if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
8063 - 32 * 1024, &emu->ptb_pages) < 0) {
8064 + (emu->address_mode ? 32 : 16) * 1024, &emu->ptb_pages) < 0) {
8065 err = -ENOMEM;
8066 goto error;
8067 }
8068 @@ -2005,8 +2012,8 @@ int snd_emu10k1_create(struct snd_card *card,
8069
8070 /* Clear silent pages and set up pointers */
8071 memset(emu->silent_page.area, 0, PAGE_SIZE);
8072 - silent_page = emu->silent_page.addr << 1;
8073 - for (idx = 0; idx < MAXPAGES; idx++)
8074 + silent_page = emu->silent_page.addr << emu->address_mode;
8075 + for (idx = 0; idx < (emu->address_mode ? MAXPAGES1 : MAXPAGES0); idx++)
8076 ((u32 *)emu->ptb_pages.area)[idx] = cpu_to_le32(silent_page | idx);
8077
8078 /* set up voice indices */
8079 diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
8080 index f82481bd2542..36f0b8646417 100644
8081 --- a/sound/pci/emu10k1/emupcm.c
8082 +++ b/sound/pci/emu10k1/emupcm.c
8083 @@ -380,7 +380,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu,
8084 snd_emu10k1_ptr_write(emu, Z1, voice, 0);
8085 snd_emu10k1_ptr_write(emu, Z2, voice, 0);
8086 /* invalidate maps */
8087 - silent_page = ((unsigned int)emu->silent_page.addr << 1) | MAP_PTI_MASK;
8088 + silent_page = ((unsigned int)emu->silent_page.addr << emu->address_mode) | (emu->address_mode ? MAP_PTI_MASK1 : MAP_PTI_MASK0);
8089 snd_emu10k1_ptr_write(emu, MAPA, voice, silent_page);
8090 snd_emu10k1_ptr_write(emu, MAPB, voice, silent_page);
8091 /* modulation envelope */
8092 diff --git a/sound/pci/emu10k1/emuproc.c b/sound/pci/emu10k1/emuproc.c
8093 index 2ca9f2e93139..53745f4c2bf5 100644
8094 --- a/sound/pci/emu10k1/emuproc.c
8095 +++ b/sound/pci/emu10k1/emuproc.c
8096 @@ -241,31 +241,22 @@ static void snd_emu10k1_proc_spdif_read(struct snd_info_entry *entry,
8097 struct snd_emu10k1 *emu = entry->private_data;
8098 u32 value;
8099 u32 value2;
8100 - unsigned long flags;
8101 u32 rate;
8102
8103 if (emu->card_capabilities->emu_model) {
8104 - spin_lock_irqsave(&emu->emu_lock, flags);
8105 snd_emu1010_fpga_read(emu, 0x38, &value);
8106 - spin_unlock_irqrestore(&emu->emu_lock, flags);
8107 if ((value & 0x1) == 0) {
8108 - spin_lock_irqsave(&emu->emu_lock, flags);
8109 snd_emu1010_fpga_read(emu, 0x2a, &value);
8110 snd_emu1010_fpga_read(emu, 0x2b, &value2);
8111 - spin_unlock_irqrestore(&emu->emu_lock, flags);
8112 rate = 0x1770000 / (((value << 5) | value2)+1);
8113 snd_iprintf(buffer, "ADAT Locked : %u\n", rate);
8114 } else {
8115 snd_iprintf(buffer, "ADAT Unlocked\n");
8116 }
8117 - spin_lock_irqsave(&emu->emu_lock, flags);
8118 snd_emu1010_fpga_read(emu, 0x20, &value);
8119 - spin_unlock_irqrestore(&emu->emu_lock, flags);
8120 if ((value & 0x4) == 0) {
8121 - spin_lock_irqsave(&emu->emu_lock, flags);
8122 snd_emu1010_fpga_read(emu, 0x28, &value);
8123 snd_emu1010_fpga_read(emu, 0x29, &value2);
8124 - spin_unlock_irqrestore(&emu->emu_lock, flags);
8125 rate = 0x1770000 / (((value << 5) | value2)+1);
8126 snd_iprintf(buffer, "SPDIF Locked : %d\n", rate);
8127 } else {
8128 @@ -410,14 +401,11 @@ static void snd_emu_proc_emu1010_reg_read(struct snd_info_entry *entry,
8129 {
8130 struct snd_emu10k1 *emu = entry->private_data;
8131 u32 value;
8132 - unsigned long flags;
8133 int i;
8134 snd_iprintf(buffer, "EMU1010 Registers:\n\n");
8135
8136 for(i = 0; i < 0x40; i+=1) {
8137 - spin_lock_irqsave(&emu->emu_lock, flags);
8138 snd_emu1010_fpga_read(emu, i, &value);
8139 - spin_unlock_irqrestore(&emu->emu_lock, flags);
8140 snd_iprintf(buffer, "%02X: %08X, %02X\n", i, value, (value >> 8) & 0x7f);
8141 }
8142 }
8143 diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c
8144 index c68e6dd2fa67..4f1f69be1865 100644
8145 --- a/sound/pci/emu10k1/memory.c
8146 +++ b/sound/pci/emu10k1/memory.c
8147 @@ -34,10 +34,11 @@
8148 * aligned pages in others
8149 */
8150 #define __set_ptb_entry(emu,page,addr) \
8151 - (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << 1) | (page)))
8152 + (((u32 *)(emu)->ptb_pages.area)[page] = cpu_to_le32(((addr) << (emu->address_mode)) | (page)))
8153
8154 #define UNIT_PAGES (PAGE_SIZE / EMUPAGESIZE)
8155 -#define MAX_ALIGN_PAGES (MAXPAGES / UNIT_PAGES)
8156 +#define MAX_ALIGN_PAGES0 (MAXPAGES0 / UNIT_PAGES)
8157 +#define MAX_ALIGN_PAGES1 (MAXPAGES1 / UNIT_PAGES)
8158 /* get aligned page from offset address */
8159 #define get_aligned_page(offset) ((offset) >> PAGE_SHIFT)
8160 /* get offset address from aligned page */
8161 @@ -124,7 +125,7 @@ static int search_empty_map_area(struct snd_emu10k1 *emu, int npages, struct lis
8162 }
8163 page = blk->mapped_page + blk->pages;
8164 }
8165 - size = MAX_ALIGN_PAGES - page;
8166 + size = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0) - page;
8167 if (size >= max_size) {
8168 *nextp = pos;
8169 return page;
8170 @@ -181,7 +182,7 @@ static int unmap_memblk(struct snd_emu10k1 *emu, struct snd_emu10k1_memblk *blk)
8171 q = get_emu10k1_memblk(p, mapped_link);
8172 end_page = q->mapped_page;
8173 } else
8174 - end_page = MAX_ALIGN_PAGES;
8175 + end_page = (emu->address_mode ? MAX_ALIGN_PAGES1 : MAX_ALIGN_PAGES0);
8176
8177 /* remove links */
8178 list_del(&blk->mapped_link);
8179 @@ -307,7 +308,7 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst
8180 if (snd_BUG_ON(!emu))
8181 return NULL;
8182 if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
8183 - runtime->dma_bytes >= MAXPAGES * EMUPAGESIZE))
8184 + runtime->dma_bytes >= (emu->address_mode ? MAXPAGES1 : MAXPAGES0) * EMUPAGESIZE))
8185 return NULL;
8186 hdr = emu->memhdr;
8187 if (snd_BUG_ON(!hdr))
8188 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
8189 index 1783a3332984..e3ad4a4d8d14 100644
8190 --- a/sound/pci/hda/patch_realtek.c
8191 +++ b/sound/pci/hda/patch_realtek.c
8192 @@ -4906,12 +4906,14 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
8193 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
8194 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
8195 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
8196 + SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK),
8197 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
8198 SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
8199 SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
8200 SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
8201 SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK),
8202 SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
8203 + SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
8204 SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
8205 SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
8206 SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
8207 @@ -5412,6 +5414,8 @@ static int patch_alc269(struct hda_codec *codec)
8208 break;
8209 case 0x10ec0256:
8210 spec->codec_variant = ALC269_TYPE_ALC256;
8211 + spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */
8212 + alc_update_coef_idx(codec, 0x36, 1 << 13, 1 << 5); /* Switch pcbeep path to Line in path*/
8213 break;
8214 }
8215
8216 @@ -5425,8 +5429,8 @@ static int patch_alc269(struct hda_codec *codec)
8217 if (err < 0)
8218 goto error;
8219
8220 - if (!spec->gen.no_analog && spec->gen.beep_nid)
8221 - set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
8222 + if (!spec->gen.no_analog && spec->gen.beep_nid && spec->gen.mixer_nid)
8223 + set_beep_amp(spec, spec->gen.mixer_nid, 0x04, HDA_INPUT);
8224
8225 codec->patch_ops = alc_patch_ops;
8226 #ifdef CONFIG_PM
8227 diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
8228 index 6ba0b5517c40..2341fc334163 100644
8229 --- a/sound/pci/hda/thinkpad_helper.c
8230 +++ b/sound/pci/hda/thinkpad_helper.c
8231 @@ -72,6 +72,7 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
8232 if (led_set_func(TPACPI_LED_MUTE, false) >= 0) {
8233 old_vmaster_hook = spec->vmaster_mute.hook;
8234 spec->vmaster_mute.hook = update_tpacpi_mute_led;
8235 + spec->vmaster_mute_enum = 1;
8236 removefunc = false;
8237 }
8238 if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
8239 diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
8240 index 6ec074fec068..38c2adc99770 100644
8241 --- a/sound/soc/codecs/cs4271.c
8242 +++ b/sound/soc/codecs/cs4271.c
8243 @@ -561,10 +561,10 @@ static int cs4271_probe(struct snd_soc_codec *codec)
8244 if (gpio_is_valid(cs4271->gpio_nreset)) {
8245 /* Reset codec */
8246 gpio_direction_output(cs4271->gpio_nreset, 0);
8247 - udelay(1);
8248 + mdelay(1);
8249 gpio_set_value(cs4271->gpio_nreset, 1);
8250 /* Give the codec time to wake up */
8251 - udelay(1);
8252 + mdelay(1);
8253 }
8254
8255 ret = regmap_update_bits(cs4271->regmap, CS4271_MODE2,
8256 diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
8257 index 640c99198cda..28b77214d821 100644
8258 --- a/sound/soc/codecs/pcm512x.c
8259 +++ b/sound/soc/codecs/pcm512x.c
8260 @@ -261,9 +261,9 @@ static const struct soc_enum pcm512x_veds =
8261 static const struct snd_kcontrol_new pcm512x_controls[] = {
8262 SOC_DOUBLE_R_TLV("Digital Playback Volume", PCM512x_DIGITAL_VOLUME_2,
8263 PCM512x_DIGITAL_VOLUME_3, 0, 255, 1, digital_tlv),
8264 -SOC_DOUBLE_TLV("Playback Volume", PCM512x_ANALOG_GAIN_CTRL,
8265 +SOC_DOUBLE_TLV("Analogue Playback Volume", PCM512x_ANALOG_GAIN_CTRL,
8266 PCM512x_LAGN_SHIFT, PCM512x_RAGN_SHIFT, 1, 1, analog_tlv),
8267 -SOC_DOUBLE_TLV("Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST,
8268 +SOC_DOUBLE_TLV("Analogue Playback Boost Volume", PCM512x_ANALOG_GAIN_BOOST,
8269 PCM512x_AGBL_SHIFT, PCM512x_AGBR_SHIFT, 1, 0, boost_tlv),
8270 SOC_DOUBLE("Digital Playback Switch", PCM512x_MUTE, PCM512x_RQML_SHIFT,
8271 PCM512x_RQMR_SHIFT, 1, 1),
8272 diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
8273 index 16aa4d99a713..691237a3ab52 100644
8274 --- a/sound/soc/codecs/rt5677.c
8275 +++ b/sound/soc/codecs/rt5677.c
8276 @@ -644,7 +644,7 @@ static int set_dmic_clk(struct snd_soc_dapm_widget *w,
8277 {
8278 struct snd_soc_codec *codec = w->codec;
8279 struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
8280 - int idx = rl6231_calc_dmic_clk(rt5677->sysclk);
8281 + int idx = rl6231_calc_dmic_clk(rt5677->lrck[RT5677_AIF1] << 8);
8282
8283 if (idx < 0)
8284 dev_err(codec->dev, "Failed to set DMIC clock\n");
8285 diff --git a/sound/soc/codecs/wm8741.c b/sound/soc/codecs/wm8741.c
8286 index 31bb4801a005..9e71c768966f 100644
8287 --- a/sound/soc/codecs/wm8741.c
8288 +++ b/sound/soc/codecs/wm8741.c
8289 @@ -123,7 +123,7 @@ static struct {
8290 };
8291
8292 static const unsigned int rates_11289[] = {
8293 - 44100, 88235,
8294 + 44100, 88200,
8295 };
8296
8297 static const struct snd_pcm_hw_constraint_list constraints_11289 = {
8298 @@ -150,7 +150,7 @@ static const struct snd_pcm_hw_constraint_list constraints_16384 = {
8299 };
8300
8301 static const unsigned int rates_16934[] = {
8302 - 44100, 88235,
8303 + 44100, 88200,
8304 };
8305
8306 static const struct snd_pcm_hw_constraint_list constraints_16934 = {
8307 @@ -168,7 +168,7 @@ static const struct snd_pcm_hw_constraint_list constraints_18432 = {
8308 };
8309
8310 static const unsigned int rates_22579[] = {
8311 - 44100, 88235, 1764000
8312 + 44100, 88200, 176400
8313 };
8314
8315 static const struct snd_pcm_hw_constraint_list constraints_22579 = {
8316 @@ -186,7 +186,7 @@ static const struct snd_pcm_hw_constraint_list constraints_24576 = {
8317 };
8318
8319 static const unsigned int rates_36864[] = {
8320 - 48000, 96000, 19200
8321 + 48000, 96000, 192000
8322 };
8323
8324 static const struct snd_pcm_hw_constraint_list constraints_36864 = {
8325 diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
8326 index a50010e2891f..82837e5e96ab 100644
8327 --- a/sound/soc/davinci/davinci-evm.c
8328 +++ b/sound/soc/davinci/davinci-evm.c
8329 @@ -431,18 +431,8 @@ static int davinci_evm_probe(struct platform_device *pdev)
8330 return ret;
8331 }
8332
8333 -static int davinci_evm_remove(struct platform_device *pdev)
8334 -{
8335 - struct snd_soc_card *card = platform_get_drvdata(pdev);
8336 -
8337 - snd_soc_unregister_card(card);
8338 -
8339 - return 0;
8340 -}
8341 -
8342 static struct platform_driver davinci_evm_driver = {
8343 .probe = davinci_evm_probe,
8344 - .remove = davinci_evm_remove,
8345 .driver = {
8346 .name = "davinci_evm",
8347 .owner = THIS_MODULE,
8348 diff --git a/sound/soc/samsung/s3c24xx-i2s.c b/sound/soc/samsung/s3c24xx-i2s.c
8349 index e87d9a2053b8..fb1d39324a65 100644
8350 --- a/sound/soc/samsung/s3c24xx-i2s.c
8351 +++ b/sound/soc/samsung/s3c24xx-i2s.c
8352 @@ -461,8 +461,8 @@ static int s3c24xx_iis_dev_probe(struct platform_device *pdev)
8353 return -ENOENT;
8354 }
8355 s3c24xx_i2s.regs = devm_ioremap_resource(&pdev->dev, res);
8356 - if (s3c24xx_i2s.regs == NULL)
8357 - return -ENXIO;
8358 + if (IS_ERR(s3c24xx_i2s.regs))
8359 + return PTR_ERR(s3c24xx_i2s.regs);
8360
8361 s3c24xx_i2s_pcm_stereo_out.dma_addr = res->start + S3C2410_IISFIFO;
8362 s3c24xx_i2s_pcm_stereo_in.dma_addr = res->start + S3C2410_IISFIFO;
8363 diff --git a/sound/synth/emux/emux_oss.c b/sound/synth/emux/emux_oss.c
8364 index 319754cf6208..daf61abc3670 100644
8365 --- a/sound/synth/emux/emux_oss.c
8366 +++ b/sound/synth/emux/emux_oss.c
8367 @@ -118,12 +118,8 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
8368 if (snd_BUG_ON(!arg || !emu))
8369 return -ENXIO;
8370
8371 - mutex_lock(&emu->register_mutex);
8372 -
8373 - if (!snd_emux_inc_count(emu)) {
8374 - mutex_unlock(&emu->register_mutex);
8375 + if (!snd_emux_inc_count(emu))
8376 return -EFAULT;
8377 - }
8378
8379 memset(&callback, 0, sizeof(callback));
8380 callback.owner = THIS_MODULE;
8381 @@ -135,7 +131,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
8382 if (p == NULL) {
8383 snd_printk(KERN_ERR "can't create port\n");
8384 snd_emux_dec_count(emu);
8385 - mutex_unlock(&emu->register_mutex);
8386 return -ENOMEM;
8387 }
8388
8389 @@ -148,8 +143,6 @@ snd_emux_open_seq_oss(struct snd_seq_oss_arg *arg, void *closure)
8390 reset_port_mode(p, arg->seq_mode);
8391
8392 snd_emux_reset_port(p);
8393 -
8394 - mutex_unlock(&emu->register_mutex);
8395 return 0;
8396 }
8397
8398 @@ -195,13 +188,11 @@ snd_emux_close_seq_oss(struct snd_seq_oss_arg *arg)
8399 if (snd_BUG_ON(!emu))
8400 return -ENXIO;
8401
8402 - mutex_lock(&emu->register_mutex);
8403 snd_emux_sounds_off_all(p);
8404 snd_soundfont_close_check(emu->sflist, SF_CLIENT_NO(p->chset.port));
8405 snd_seq_event_port_detach(p->chset.client, p->chset.port);
8406 snd_emux_dec_count(emu);
8407
8408 - mutex_unlock(&emu->register_mutex);
8409 return 0;
8410 }
8411
8412 diff --git a/sound/synth/emux/emux_seq.c b/sound/synth/emux/emux_seq.c
8413 index 7778b8e19782..a0209204ae48 100644
8414 --- a/sound/synth/emux/emux_seq.c
8415 +++ b/sound/synth/emux/emux_seq.c
8416 @@ -124,12 +124,10 @@ snd_emux_detach_seq(struct snd_emux *emu)
8417 if (emu->voices)
8418 snd_emux_terminate_all(emu);
8419
8420 - mutex_lock(&emu->register_mutex);
8421 if (emu->client >= 0) {
8422 snd_seq_delete_kernel_client(emu->client);
8423 emu->client = -1;
8424 }
8425 - mutex_unlock(&emu->register_mutex);
8426 }
8427
8428
8429 @@ -269,8 +267,8 @@ snd_emux_event_input(struct snd_seq_event *ev, int direct, void *private_data,
8430 /*
8431 * increment usage count
8432 */
8433 -int
8434 -snd_emux_inc_count(struct snd_emux *emu)
8435 +static int
8436 +__snd_emux_inc_count(struct snd_emux *emu)
8437 {
8438 emu->used++;
8439 if (!try_module_get(emu->ops.owner))
8440 @@ -284,12 +282,21 @@ snd_emux_inc_count(struct snd_emux *emu)
8441 return 1;
8442 }
8443
8444 +int snd_emux_inc_count(struct snd_emux *emu)
8445 +{
8446 + int ret;
8447 +
8448 + mutex_lock(&emu->register_mutex);
8449 + ret = __snd_emux_inc_count(emu);
8450 + mutex_unlock(&emu->register_mutex);
8451 + return ret;
8452 +}
8453
8454 /*
8455 * decrease usage count
8456 */
8457 -void
8458 -snd_emux_dec_count(struct snd_emux *emu)
8459 +static void
8460 +__snd_emux_dec_count(struct snd_emux *emu)
8461 {
8462 module_put(emu->card->module);
8463 emu->used--;
8464 @@ -298,6 +305,12 @@ snd_emux_dec_count(struct snd_emux *emu)
8465 module_put(emu->ops.owner);
8466 }
8467
8468 +void snd_emux_dec_count(struct snd_emux *emu)
8469 +{
8470 + mutex_lock(&emu->register_mutex);
8471 + __snd_emux_dec_count(emu);
8472 + mutex_unlock(&emu->register_mutex);
8473 +}
8474
8475 /*
8476 * Routine that is called upon a first use of a particular port
8477 @@ -317,7 +330,7 @@ snd_emux_use(void *private_data, struct snd_seq_port_subscribe *info)
8478
8479 mutex_lock(&emu->register_mutex);
8480 snd_emux_init_port(p);
8481 - snd_emux_inc_count(emu);
8482 + __snd_emux_inc_count(emu);
8483 mutex_unlock(&emu->register_mutex);
8484 return 0;
8485 }
8486 @@ -340,7 +353,7 @@ snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *info)
8487
8488 mutex_lock(&emu->register_mutex);
8489 snd_emux_sounds_off_all(p);
8490 - snd_emux_dec_count(emu);
8491 + __snd_emux_dec_count(emu);
8492 mutex_unlock(&emu->register_mutex);
8493 return 0;
8494 }
8495 diff --git a/tools/lib/traceevent/kbuffer-parse.c b/tools/lib/traceevent/kbuffer-parse.c
8496 index dcc665228c71..deb3569ab004 100644
8497 --- a/tools/lib/traceevent/kbuffer-parse.c
8498 +++ b/tools/lib/traceevent/kbuffer-parse.c
8499 @@ -372,7 +372,6 @@ translate_data(struct kbuffer *kbuf, void *data, void **rptr,
8500 switch (type_len) {
8501 case KBUFFER_TYPE_PADDING:
8502 *length = read_4(kbuf, data);
8503 - data += *length;
8504 break;
8505
8506 case KBUFFER_TYPE_TIME_EXTEND:
8507 diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c
8508 index 6da965bdbc2c..85b523885f9d 100644
8509 --- a/tools/perf/util/cloexec.c
8510 +++ b/tools/perf/util/cloexec.c
8511 @@ -7,6 +7,12 @@
8512
8513 static unsigned long flag = PERF_FLAG_FD_CLOEXEC;
8514
8515 +int __weak sched_getcpu(void)
8516 +{
8517 + errno = ENOSYS;
8518 + return -1;
8519 +}
8520 +
8521 static int perf_flag_probe(void)
8522 {
8523 /* use 'safest' configuration as used in perf_evsel__fallback() */
8524 diff --git a/tools/perf/util/cloexec.h b/tools/perf/util/cloexec.h
8525 index 94a5a7d829d5..68888c29b04a 100644
8526 --- a/tools/perf/util/cloexec.h
8527 +++ b/tools/perf/util/cloexec.h
8528 @@ -3,4 +3,10 @@
8529
8530 unsigned long perf_event_open_cloexec_flag(void);
8531
8532 +#ifdef __GLIBC_PREREQ
8533 +#if !__GLIBC_PREREQ(2, 6)
8534 +extern int sched_getcpu(void) __THROW;
8535 +#endif
8536 +#endif
8537 +
8538 #endif /* __PERF_CLOEXEC_H */
8539 diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
8540 index 1e23a5bfb044..fcaf06b40558 100644
8541 --- a/tools/perf/util/symbol-elf.c
8542 +++ b/tools/perf/util/symbol-elf.c
8543 @@ -48,6 +48,10 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym)
8544 return GELF_ST_TYPE(sym->st_info);
8545 }
8546
8547 +#ifndef STT_GNU_IFUNC
8548 +#define STT_GNU_IFUNC 10
8549 +#endif
8550 +
8551 static inline int elf_sym__is_function(const GElf_Sym *sym)
8552 {
8553 return (elf_sym__type(sym) == STT_FUNC ||
8554 diff --git a/tools/power/x86/turbostat/Makefile b/tools/power/x86/turbostat/Makefile
8555 index d1b3a361e526..4039854560d0 100644
8556 --- a/tools/power/x86/turbostat/Makefile
8557 +++ b/tools/power/x86/turbostat/Makefile
8558 @@ -1,8 +1,12 @@
8559 CC = $(CROSS_COMPILE)gcc
8560 -BUILD_OUTPUT := $(PWD)
8561 +BUILD_OUTPUT := $(CURDIR)
8562 PREFIX := /usr
8563 DESTDIR :=
8564
8565 +ifeq ("$(origin O)", "command line")
8566 + BUILD_OUTPUT := $(O)
8567 +endif
8568 +
8569 turbostat : turbostat.c
8570 CFLAGS += -Wall
8571 CFLAGS += -DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"'
8572 diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
8573 index 22fa819a9b6a..1c0772b340d8 100644
8574 --- a/virt/kvm/arm/arch_timer.c
8575 +++ b/virt/kvm/arm/arch_timer.c
8576 @@ -61,12 +61,14 @@ static void timer_disarm(struct arch_timer_cpu *timer)
8577
8578 static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
8579 {
8580 + int ret;
8581 struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
8582
8583 timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK;
8584 - kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
8585 - timer->irq->irq,
8586 - timer->irq->level);
8587 + ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
8588 + timer->irq->irq,
8589 + timer->irq->level);
8590 + WARN_ON(ret);
8591 }
8592
8593 static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
8594 @@ -307,12 +309,24 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
8595 timer_disarm(timer);
8596 }
8597
8598 -int kvm_timer_init(struct kvm *kvm)
8599 +void kvm_timer_enable(struct kvm *kvm)
8600 {
8601 - if (timecounter && wqueue) {
8602 - kvm->arch.timer.cntvoff = kvm_phys_timer_read();
8603 + if (kvm->arch.timer.enabled)
8604 + return;
8605 +
8606 + /*
8607 + * There is a potential race here between VCPUs starting for the first
8608 + * time, which may be enabling the timer multiple times. That doesn't
8609 + * hurt though, because we're just setting a variable to the same
8610 + * variable that it already was. The important thing is that all
8611 + * VCPUs have the enabled variable set, before entering the guest, if
8612 + * the arch timers are enabled.
8613 + */
8614 + if (timecounter && wqueue)
8615 kvm->arch.timer.enabled = 1;
8616 - }
8617 +}
8618
8619 - return 0;
8620 +void kvm_timer_init(struct kvm *kvm)
8621 +{
8622 + kvm->arch.timer.cntvoff = kvm_phys_timer_read();
8623 }
8624 diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
8625 index 2935405ad22f..b9d48e8e1eb4 100644
8626 --- a/virt/kvm/arm/vgic-v2.c
8627 +++ b/virt/kvm/arm/vgic-v2.c
8628 @@ -72,6 +72,8 @@ static void vgic_v2_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
8629 {
8630 if (!(lr_desc.state & LR_STATE_MASK))
8631 vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr |= (1ULL << lr);
8632 + else
8633 + vcpu->arch.vgic_cpu.vgic_v2.vgic_elrsr &= ~(1ULL << lr);
8634 }
8635
8636 static u64 vgic_v2_get_elrsr(const struct kvm_vcpu *vcpu)
8637 @@ -84,6 +86,11 @@ static u64 vgic_v2_get_eisr(const struct kvm_vcpu *vcpu)
8638 return vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr;
8639 }
8640
8641 +static void vgic_v2_clear_eisr(struct kvm_vcpu *vcpu)
8642 +{
8643 + vcpu->arch.vgic_cpu.vgic_v2.vgic_eisr = 0;
8644 +}
8645 +
8646 static u32 vgic_v2_get_interrupt_status(const struct kvm_vcpu *vcpu)
8647 {
8648 u32 misr = vcpu->arch.vgic_cpu.vgic_v2.vgic_misr;
8649 @@ -148,6 +155,7 @@ static const struct vgic_ops vgic_v2_ops = {
8650 .sync_lr_elrsr = vgic_v2_sync_lr_elrsr,
8651 .get_elrsr = vgic_v2_get_elrsr,
8652 .get_eisr = vgic_v2_get_eisr,
8653 + .clear_eisr = vgic_v2_clear_eisr,
8654 .get_interrupt_status = vgic_v2_get_interrupt_status,
8655 .enable_underflow = vgic_v2_enable_underflow,
8656 .disable_underflow = vgic_v2_disable_underflow,
8657 diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
8658 index 1c2c8eef0599..58b8af00ee4c 100644
8659 --- a/virt/kvm/arm/vgic-v3.c
8660 +++ b/virt/kvm/arm/vgic-v3.c
8661 @@ -86,6 +86,8 @@ static void vgic_v3_sync_lr_elrsr(struct kvm_vcpu *vcpu, int lr,
8662 {
8663 if (!(lr_desc.state & LR_STATE_MASK))
8664 vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
8665 + else
8666 + vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr &= ~(1U << lr);
8667 }
8668
8669 static u64 vgic_v3_get_elrsr(const struct kvm_vcpu *vcpu)
8670 @@ -98,6 +100,11 @@ static u64 vgic_v3_get_eisr(const struct kvm_vcpu *vcpu)
8671 return vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr;
8672 }
8673
8674 +static void vgic_v3_clear_eisr(struct kvm_vcpu *vcpu)
8675 +{
8676 + vcpu->arch.vgic_cpu.vgic_v3.vgic_eisr = 0;
8677 +}
8678 +
8679 static u32 vgic_v3_get_interrupt_status(const struct kvm_vcpu *vcpu)
8680 {
8681 u32 misr = vcpu->arch.vgic_cpu.vgic_v3.vgic_misr;
8682 @@ -162,6 +169,7 @@ static const struct vgic_ops vgic_v3_ops = {
8683 .sync_lr_elrsr = vgic_v3_sync_lr_elrsr,
8684 .get_elrsr = vgic_v3_get_elrsr,
8685 .get_eisr = vgic_v3_get_eisr,
8686 + .clear_eisr = vgic_v3_clear_eisr,
8687 .get_interrupt_status = vgic_v3_get_interrupt_status,
8688 .enable_underflow = vgic_v3_enable_underflow,
8689 .disable_underflow = vgic_v3_disable_underflow,
8690 diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
8691 index aacdb59f30de..5f67fadfca65 100644
8692 --- a/virt/kvm/arm/vgic.c
8693 +++ b/virt/kvm/arm/vgic.c
8694 @@ -91,6 +91,7 @@
8695 #define ACCESS_WRITE_VALUE (3 << 1)
8696 #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1))
8697
8698 +static int vgic_init(struct kvm *kvm);
8699 static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
8700 static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
8701 static void vgic_update_state(struct kvm *kvm);
8702 @@ -1218,6 +1219,11 @@ static inline u64 vgic_get_eisr(struct kvm_vcpu *vcpu)
8703 return vgic_ops->get_eisr(vcpu);
8704 }
8705
8706 +static inline void vgic_clear_eisr(struct kvm_vcpu *vcpu)
8707 +{
8708 + vgic_ops->clear_eisr(vcpu);
8709 +}
8710 +
8711 static inline u32 vgic_get_interrupt_status(struct kvm_vcpu *vcpu)
8712 {
8713 return vgic_ops->get_interrupt_status(vcpu);
8714 @@ -1257,6 +1263,7 @@ static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu)
8715 vgic_set_lr(vcpu, lr_nr, vlr);
8716 clear_bit(lr_nr, vgic_cpu->lr_used);
8717 vgic_cpu->vgic_irq_lr_map[irq] = LR_EMPTY;
8718 + vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
8719 }
8720
8721 /*
8722 @@ -1312,6 +1319,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
8723 BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
8724 vlr.state |= LR_STATE_PENDING;
8725 vgic_set_lr(vcpu, lr, vlr);
8726 + vgic_sync_lr_elrsr(vcpu, lr, vlr);
8727 return true;
8728 }
8729 }
8730 @@ -1333,6 +1341,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
8731 vlr.state |= LR_EOI_INT;
8732
8733 vgic_set_lr(vcpu, lr, vlr);
8734 + vgic_sync_lr_elrsr(vcpu, lr, vlr);
8735
8736 return true;
8737 }
8738 @@ -1501,6 +1510,14 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
8739 if (status & INT_STATUS_UNDERFLOW)
8740 vgic_disable_underflow(vcpu);
8741
8742 + /*
8743 + * In the next iterations of the vcpu loop, if we sync the vgic state
8744 + * after flushing it, but before entering the guest (this happens for
8745 + * pending signals and vmid rollovers), then make sure we don't pick
8746 + * up any old maintenance interrupts here.
8747 + */
8748 + vgic_clear_eisr(vcpu);
8749 +
8750 return level_pending;
8751 }
8752
8753 @@ -1607,7 +1624,7 @@ static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
8754 }
8755 }
8756
8757 -static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
8758 +static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
8759 unsigned int irq_num, bool level)
8760 {
8761 struct vgic_dist *dist = &kvm->arch.vgic;
8762 @@ -1672,7 +1689,7 @@ static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid,
8763 out:
8764 spin_unlock(&dist->lock);
8765
8766 - return ret;
8767 + return ret ? cpuid : -EINVAL;
8768 }
8769
8770 /**
8771 @@ -1692,11 +1709,29 @@ out:
8772 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
8773 bool level)
8774 {
8775 - if (likely(vgic_initialized(kvm)) &&
8776 - vgic_update_irq_pending(kvm, cpuid, irq_num, level))
8777 - vgic_kick_vcpus(kvm);
8778 + int ret = 0;
8779 + int vcpu_id;
8780
8781 - return 0;
8782 + if (unlikely(!vgic_initialized(kvm))) {
8783 + mutex_lock(&kvm->lock);
8784 + ret = vgic_init(kvm);
8785 + mutex_unlock(&kvm->lock);
8786 +
8787 + if (ret)
8788 + goto out;
8789 + }
8790 +
8791 + if (irq_num >= kvm->arch.vgic.nr_irqs)
8792 + return -EINVAL;
8793 +
8794 + vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
8795 + if (vcpu_id >= 0) {
8796 + /* kick the specified vcpu */
8797 + kvm_vcpu_kick(kvm_get_vcpu(kvm, vcpu_id));
8798 + }
8799 +
8800 +out:
8801 + return ret;
8802 }
8803
8804 static irqreturn_t vgic_maintenance_handler(int irq, void *data)
8805 @@ -1726,39 +1761,14 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
8806
8807 int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
8808 vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
8809 - vgic_cpu->vgic_irq_lr_map = kzalloc(nr_irqs, GFP_KERNEL);
8810 + vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL);
8811
8812 if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) {
8813 kvm_vgic_vcpu_destroy(vcpu);
8814 return -ENOMEM;
8815 }
8816
8817 - return 0;
8818 -}
8819 -
8820 -/**
8821 - * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state
8822 - * @vcpu: pointer to the vcpu struct
8823 - *
8824 - * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to
8825 - * this vcpu and enable the VGIC for this VCPU
8826 - */
8827 -static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
8828 -{
8829 - struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
8830 - struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
8831 - int i;
8832 -
8833 - for (i = 0; i < dist->nr_irqs; i++) {
8834 - if (i < VGIC_NR_PPIS)
8835 - vgic_bitmap_set_irq_val(&dist->irq_enabled,
8836 - vcpu->vcpu_id, i, 1);
8837 - if (i < VGIC_NR_PRIVATE_IRQS)
8838 - vgic_bitmap_set_irq_val(&dist->irq_cfg,
8839 - vcpu->vcpu_id, i, VGIC_CFG_EDGE);
8840 -
8841 - vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY;
8842 - }
8843 + memset(vgic_cpu->vgic_irq_lr_map, LR_EMPTY, nr_irqs);
8844
8845 /*
8846 * Store the number of LRs per vcpu, so we don't have to go
8847 @@ -1767,7 +1777,7 @@ static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
8848 */
8849 vgic_cpu->nr_lr = vgic->nr_lr;
8850
8851 - vgic_enable(vcpu);
8852 + return 0;
8853 }
8854
8855 void kvm_vgic_destroy(struct kvm *kvm)
8856 @@ -1804,19 +1814,19 @@ void kvm_vgic_destroy(struct kvm *kvm)
8857 * Allocate and initialize the various data structures. Must be called
8858 * with kvm->lock held!
8859 */
8860 -static int vgic_init_maps(struct kvm *kvm)
8861 +static int vgic_init(struct kvm *kvm)
8862 {
8863 struct vgic_dist *dist = &kvm->arch.vgic;
8864 struct kvm_vcpu *vcpu;
8865 int nr_cpus, nr_irqs;
8866 - int ret, i;
8867 + int ret, i, vcpu_id;
8868
8869 if (dist->nr_cpus) /* Already allocated */
8870 return 0;
8871
8872 nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus);
8873 if (!nr_cpus) /* No vcpus? Can't be good... */
8874 - return -EINVAL;
8875 + return -ENODEV;
8876
8877 /*
8878 * If nobody configured the number of interrupts, use the
8879 @@ -1859,16 +1869,28 @@ static int vgic_init_maps(struct kvm *kvm)
8880 if (ret)
8881 goto out;
8882
8883 - kvm_for_each_vcpu(i, vcpu, kvm) {
8884 + for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
8885 + vgic_set_target_reg(kvm, 0, i);
8886 +
8887 + kvm_for_each_vcpu(vcpu_id, vcpu, kvm) {
8888 ret = vgic_vcpu_init_maps(vcpu, nr_irqs);
8889 if (ret) {
8890 kvm_err("VGIC: Failed to allocate vcpu memory\n");
8891 break;
8892 }
8893 - }
8894
8895 - for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4)
8896 - vgic_set_target_reg(kvm, 0, i);
8897 + for (i = 0; i < dist->nr_irqs; i++) {
8898 + if (i < VGIC_NR_PPIS)
8899 + vgic_bitmap_set_irq_val(&dist->irq_enabled,
8900 + vcpu->vcpu_id, i, 1);
8901 + if (i < VGIC_NR_PRIVATE_IRQS)
8902 + vgic_bitmap_set_irq_val(&dist->irq_cfg,
8903 + vcpu->vcpu_id, i,
8904 + VGIC_CFG_EDGE);
8905 + }
8906 +
8907 + vgic_enable(vcpu);
8908 + }
8909
8910 out:
8911 if (ret)
8912 @@ -1878,18 +1900,16 @@ out:
8913 }
8914
8915 /**
8916 - * kvm_vgic_init - Initialize global VGIC state before running any VCPUs
8917 + * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs
8918 * @kvm: pointer to the kvm struct
8919 *
8920 * Map the virtual CPU interface into the VM before running any VCPUs. We
8921 * can't do this at creation time, because user space must first set the
8922 - * virtual CPU interface address in the guest physical address space. Also
8923 - * initialize the ITARGETSRn regs to 0 on the emulated distributor.
8924 + * virtual CPU interface address in the guest physical address space.
8925 */
8926 -int kvm_vgic_init(struct kvm *kvm)
8927 +int kvm_vgic_map_resources(struct kvm *kvm)
8928 {
8929 - struct kvm_vcpu *vcpu;
8930 - int ret = 0, i;
8931 + int ret = 0;
8932
8933 if (!irqchip_in_kernel(kvm))
8934 return 0;
8935 @@ -1906,7 +1926,11 @@ int kvm_vgic_init(struct kvm *kvm)
8936 goto out;
8937 }
8938
8939 - ret = vgic_init_maps(kvm);
8940 + /*
8941 + * Initialize the vgic if this hasn't already been done on demand by
8942 + * accessing the vgic state from userspace.
8943 + */
8944 + ret = vgic_init(kvm);
8945 if (ret) {
8946 kvm_err("Unable to allocate maps\n");
8947 goto out;
8948 @@ -1920,9 +1944,6 @@ int kvm_vgic_init(struct kvm *kvm)
8949 goto out;
8950 }
8951
8952 - kvm_for_each_vcpu(i, vcpu, kvm)
8953 - kvm_vgic_vcpu_init(vcpu);
8954 -
8955 kvm->arch.vgic.ready = true;
8956 out:
8957 if (ret)
8958 @@ -2167,7 +2188,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
8959
8960 mutex_lock(&dev->kvm->lock);
8961
8962 - ret = vgic_init_maps(dev->kvm);
8963 + ret = vgic_init(dev->kvm);
8964 if (ret)
8965 goto out;
8966
8967 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
8968 index 272fee82f89e..4e52bb926374 100644
8969 --- a/virt/kvm/kvm_main.c
8970 +++ b/virt/kvm/kvm_main.c
8971 @@ -1615,8 +1615,8 @@ int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
8972 ghc->generation = slots->generation;
8973 ghc->len = len;
8974 ghc->memslot = gfn_to_memslot(kvm, start_gfn);
8975 - ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail);
8976 - if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) {
8977 + ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, NULL);
8978 + if (!kvm_is_error_hva(ghc->hva) && nr_pages_needed <= 1) {
8979 ghc->hva += offset;
8980 } else {
8981 /*