Contents of /trunk/kernel-alx/patches-4.4/0118-4.4.19-all-fixes.patch
Parent Directory | Revision Log
Revision 2827 -
(show annotations)
(download)
Tue Sep 13 07:18:26 2016 UTC (8 years ago) by niro
File size: 187821 byte(s)
Tue Sep 13 07:18:26 2016 UTC (8 years ago) by niro
File size: 187821 byte(s)
-linux-4.4.19
1 | diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt |
2 | index a78bf1ffa68c..39b7f612c418 100644 |
3 | --- a/Documentation/module-signing.txt |
4 | +++ b/Documentation/module-signing.txt |
5 | @@ -271,3 +271,9 @@ Since the private key is used to sign modules, viruses and malware could use |
6 | the private key to sign modules and compromise the operating system. The |
7 | private key must be either destroyed or moved to a secure location and not kept |
8 | in the root node of the kernel source tree. |
9 | + |
10 | +If you use the same private key to sign modules for multiple kernel |
11 | +configurations, you must ensure that the module version information is |
12 | +sufficient to prevent loading a module into a different kernel. Either |
13 | +set CONFIG_MODVERSIONS=y or ensure that each configuration has a different |
14 | +kernel release string by changing EXTRAVERSION or CONFIG_LOCALVERSION. |
15 | diff --git a/Makefile b/Makefile |
16 | index eaedea88a8a7..695c64ec160c 100644 |
17 | --- a/Makefile |
18 | +++ b/Makefile |
19 | @@ -1,6 +1,6 @@ |
20 | VERSION = 4 |
21 | PATCHLEVEL = 4 |
22 | -SUBLEVEL = 18 |
23 | +SUBLEVEL = 19 |
24 | EXTRAVERSION = |
25 | NAME = Blurry Fish Butt |
26 | |
27 | diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h |
28 | index 57af2f05ae84..3cab04255ae0 100644 |
29 | --- a/arch/arc/include/asm/pgtable.h |
30 | +++ b/arch/arc/include/asm/pgtable.h |
31 | @@ -110,7 +110,7 @@ |
32 | #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE) |
33 | |
34 | /* Set of bits not changed in pte_modify */ |
35 | -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) |
36 | +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_SPECIAL) |
37 | |
38 | /* More Abbrevaited helpers */ |
39 | #define PAGE_U_NONE __pgprot(___DEF) |
40 | diff --git a/arch/arm/boot/dts/sun4i-a10-a1000.dts b/arch/arm/boot/dts/sun4i-a10-a1000.dts |
41 | index 97570cb7f2fc..1d23527d4ecf 100644 |
42 | --- a/arch/arm/boot/dts/sun4i-a10-a1000.dts |
43 | +++ b/arch/arm/boot/dts/sun4i-a10-a1000.dts |
44 | @@ -84,6 +84,7 @@ |
45 | regulator-name = "emac-3v3"; |
46 | regulator-min-microvolt = <3300000>; |
47 | regulator-max-microvolt = <3300000>; |
48 | + startup-delay-us = <20000>; |
49 | enable-active-high; |
50 | gpio = <&pio 7 15 GPIO_ACTIVE_HIGH>; |
51 | }; |
52 | diff --git a/arch/arm/boot/dts/sun4i-a10-hackberry.dts b/arch/arm/boot/dts/sun4i-a10-hackberry.dts |
53 | index 2b17c5199151..6de83a6187d0 100644 |
54 | --- a/arch/arm/boot/dts/sun4i-a10-hackberry.dts |
55 | +++ b/arch/arm/boot/dts/sun4i-a10-hackberry.dts |
56 | @@ -66,6 +66,7 @@ |
57 | regulator-name = "emac-3v3"; |
58 | regulator-min-microvolt = <3300000>; |
59 | regulator-max-microvolt = <3300000>; |
60 | + startup-delay-us = <20000>; |
61 | enable-active-high; |
62 | gpio = <&pio 7 19 GPIO_ACTIVE_HIGH>; |
63 | }; |
64 | diff --git a/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts b/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts |
65 | index 7afc7a64eef1..e28f080b1fd5 100644 |
66 | --- a/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts |
67 | +++ b/arch/arm/boot/dts/sun4i-a10-jesurun-q5.dts |
68 | @@ -80,6 +80,7 @@ |
69 | regulator-name = "emac-3v3"; |
70 | regulator-min-microvolt = <3300000>; |
71 | regulator-max-microvolt = <3300000>; |
72 | + startup-delay-us = <20000>; |
73 | enable-active-high; |
74 | gpio = <&pio 7 19 GPIO_ACTIVE_HIGH>; /* PH19 */ |
75 | }; |
76 | diff --git a/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts b/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts |
77 | index 9fea918f949e..39731a78f087 100644 |
78 | --- a/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts |
79 | +++ b/arch/arm/boot/dts/sun5i-a10s-wobo-i5.dts |
80 | @@ -79,6 +79,7 @@ |
81 | regulator-name = "emac-3v3"; |
82 | regulator-min-microvolt = <3300000>; |
83 | regulator-max-microvolt = <3300000>; |
84 | + startup-delay-us = <20000>; |
85 | enable-active-high; |
86 | gpio = <&pio 0 2 GPIO_ACTIVE_HIGH>; |
87 | }; |
88 | diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi |
89 | index cc093a482aa4..8fe39e1b680e 100644 |
90 | --- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi |
91 | +++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi |
92 | @@ -517,7 +517,7 @@ |
93 | #address-cells = <0>; |
94 | |
95 | reg = <0x0 0xffb71000 0x0 0x1000>, |
96 | - <0x0 0xffb72000 0x0 0x1000>, |
97 | + <0x0 0xffb72000 0x0 0x2000>, |
98 | <0x0 0xffb74000 0x0 0x2000>, |
99 | <0x0 0xffb76000 0x0 0x2000>; |
100 | interrupts = <GIC_PPI 9 |
101 | diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h |
102 | index a307eb6e7fa8..7f94755089e2 100644 |
103 | --- a/arch/arm64/include/asm/ptrace.h |
104 | +++ b/arch/arm64/include/asm/ptrace.h |
105 | @@ -117,6 +117,8 @@ struct pt_regs { |
106 | }; |
107 | u64 orig_x0; |
108 | u64 syscallno; |
109 | + u64 orig_addr_limit; |
110 | + u64 unused; // maintain 16 byte alignment |
111 | }; |
112 | |
113 | #define arch_has_single_step() (1) |
114 | diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c |
115 | index 25de8b244961..087cf9a65359 100644 |
116 | --- a/arch/arm64/kernel/asm-offsets.c |
117 | +++ b/arch/arm64/kernel/asm-offsets.c |
118 | @@ -58,6 +58,7 @@ int main(void) |
119 | DEFINE(S_PC, offsetof(struct pt_regs, pc)); |
120 | DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0)); |
121 | DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno)); |
122 | + DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit)); |
123 | DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); |
124 | BLANK(); |
125 | DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter)); |
126 | diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c |
127 | index c1492ba1f6d1..e51f27ac13fd 100644 |
128 | --- a/arch/arm64/kernel/debug-monitors.c |
129 | +++ b/arch/arm64/kernel/debug-monitors.c |
130 | @@ -152,7 +152,6 @@ static int debug_monitors_init(void) |
131 | /* Clear the OS lock. */ |
132 | on_each_cpu(clear_os_lock, NULL, 1); |
133 | isb(); |
134 | - local_dbg_enable(); |
135 | |
136 | /* Register hotplug handler. */ |
137 | __register_cpu_notifier(&os_lock_nb); |
138 | diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S |
139 | index 7ed3d75f6304..5a3753d09e20 100644 |
140 | --- a/arch/arm64/kernel/entry.S |
141 | +++ b/arch/arm64/kernel/entry.S |
142 | @@ -27,6 +27,7 @@ |
143 | #include <asm/cpufeature.h> |
144 | #include <asm/errno.h> |
145 | #include <asm/esr.h> |
146 | +#include <asm/memory.h> |
147 | #include <asm/thread_info.h> |
148 | #include <asm/unistd.h> |
149 | |
150 | @@ -93,7 +94,13 @@ |
151 | disable_step_tsk x19, x20 // exceptions when scheduling. |
152 | .else |
153 | add x21, sp, #S_FRAME_SIZE |
154 | - .endif |
155 | + get_thread_info tsk |
156 | + /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */ |
157 | + ldr x20, [tsk, #TI_ADDR_LIMIT] |
158 | + str x20, [sp, #S_ORIG_ADDR_LIMIT] |
159 | + mov x20, #TASK_SIZE_64 |
160 | + str x20, [tsk, #TI_ADDR_LIMIT] |
161 | + .endif /* \el == 0 */ |
162 | mrs x22, elr_el1 |
163 | mrs x23, spsr_el1 |
164 | stp lr, x21, [sp, #S_LR] |
165 | @@ -117,6 +124,12 @@ |
166 | .endm |
167 | |
168 | .macro kernel_exit, el |
169 | + .if \el != 0 |
170 | + /* Restore the task's original addr_limit. */ |
171 | + ldr x20, [sp, #S_ORIG_ADDR_LIMIT] |
172 | + str x20, [tsk, #TI_ADDR_LIMIT] |
173 | + .endif |
174 | + |
175 | ldp x21, x22, [sp, #S_PC] // load ELR, SPSR |
176 | .if \el == 0 |
177 | ct_user_enter |
178 | diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c |
179 | index b1adc51b2c2e..f3c3d8fee5ba 100644 |
180 | --- a/arch/arm64/kernel/smp.c |
181 | +++ b/arch/arm64/kernel/smp.c |
182 | @@ -188,7 +188,6 @@ asmlinkage void secondary_start_kernel(void) |
183 | set_cpu_online(cpu, true); |
184 | complete(&cpu_running); |
185 | |
186 | - local_dbg_enable(); |
187 | local_irq_enable(); |
188 | local_async_enable(); |
189 | |
190 | @@ -334,8 +333,8 @@ void __init smp_cpus_done(unsigned int max_cpus) |
191 | |
192 | void __init smp_prepare_boot_cpu(void) |
193 | { |
194 | - cpuinfo_store_boot_cpu(); |
195 | set_my_cpu_offset(per_cpu_offset(smp_processor_id())); |
196 | + cpuinfo_store_boot_cpu(); |
197 | } |
198 | |
199 | static u64 __init of_get_cpu_mpidr(struct device_node *dn) |
200 | diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c |
201 | index 116ad654dd59..653735a8c58a 100644 |
202 | --- a/arch/arm64/mm/mmu.c |
203 | +++ b/arch/arm64/mm/mmu.c |
204 | @@ -652,9 +652,9 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) |
205 | /* |
206 | * Check whether the physical FDT address is set and meets the minimum |
207 | * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be |
208 | - * at least 8 bytes so that we can always access the size field of the |
209 | - * FDT header after mapping the first chunk, double check here if that |
210 | - * is indeed the case. |
211 | + * at least 8 bytes so that we can always access the magic and size |
212 | + * fields of the FDT header after mapping the first chunk, double check |
213 | + * here if that is indeed the case. |
214 | */ |
215 | BUILD_BUG_ON(MIN_FDT_ALIGN < 8); |
216 | if (!dt_phys || dt_phys % MIN_FDT_ALIGN) |
217 | @@ -682,7 +682,7 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys) |
218 | create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, |
219 | SWAPPER_BLOCK_SIZE, prot); |
220 | |
221 | - if (fdt_check_header(dt_virt) != 0) |
222 | + if (fdt_magic(dt_virt) != FDT_MAGIC) |
223 | return NULL; |
224 | |
225 | size = fdt_totalsize(dt_virt); |
226 | diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S |
227 | index b8f04b3f2786..1f6bb29ca53b 100644 |
228 | --- a/arch/arm64/mm/proc.S |
229 | +++ b/arch/arm64/mm/proc.S |
230 | @@ -156,6 +156,8 @@ ENTRY(__cpu_setup) |
231 | msr cpacr_el1, x0 // Enable FP/ASIMD |
232 | mov x0, #1 << 12 // Reset mdscr_el1 and disable |
233 | msr mdscr_el1, x0 // access to the DCC from EL0 |
234 | + isb // Unmask debug exceptions now, |
235 | + enable_dbg // since this is per-cpu |
236 | reset_pmuserenr_el0 x0 // Disable PMU access from EL0 |
237 | /* |
238 | * Memory region attributes for LPAE: |
239 | diff --git a/arch/metag/include/asm/cmpxchg_lnkget.h b/arch/metag/include/asm/cmpxchg_lnkget.h |
240 | index 0154e2807ebb..2369ad394876 100644 |
241 | --- a/arch/metag/include/asm/cmpxchg_lnkget.h |
242 | +++ b/arch/metag/include/asm/cmpxchg_lnkget.h |
243 | @@ -73,7 +73,7 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old, |
244 | " DCACHE [%2], %0\n" |
245 | #endif |
246 | "2:\n" |
247 | - : "=&d" (temp), "=&da" (retval) |
248 | + : "=&d" (temp), "=&d" (retval) |
249 | : "da" (m), "bd" (old), "da" (new) |
250 | : "cc" |
251 | ); |
252 | diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c |
253 | index 1f910563fdf6..d76275da54cb 100644 |
254 | --- a/arch/mips/kernel/csrc-r4k.c |
255 | +++ b/arch/mips/kernel/csrc-r4k.c |
256 | @@ -23,7 +23,7 @@ static struct clocksource clocksource_mips = { |
257 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
258 | }; |
259 | |
260 | -static u64 notrace r4k_read_sched_clock(void) |
261 | +static u64 __maybe_unused notrace r4k_read_sched_clock(void) |
262 | { |
263 | return read_c0_count(); |
264 | } |
265 | @@ -82,7 +82,9 @@ int __init init_r4k_clocksource(void) |
266 | |
267 | clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); |
268 | |
269 | +#ifndef CONFIG_CPU_FREQ |
270 | sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency); |
271 | +#endif |
272 | |
273 | return 0; |
274 | } |
275 | diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c |
276 | index dc10c77b7500..d6476d11212e 100644 |
277 | --- a/arch/mips/kvm/emulate.c |
278 | +++ b/arch/mips/kvm/emulate.c |
279 | @@ -1629,8 +1629,14 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, |
280 | |
281 | preempt_disable(); |
282 | if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { |
283 | - if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) |
284 | - kvm_mips_handle_kseg0_tlb_fault(va, vcpu); |
285 | + if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 && |
286 | + kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) { |
287 | + kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n", |
288 | + __func__, va, vcpu, read_c0_entryhi()); |
289 | + er = EMULATE_FAIL; |
290 | + preempt_enable(); |
291 | + goto done; |
292 | + } |
293 | } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || |
294 | KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { |
295 | int index; |
296 | @@ -1665,14 +1671,19 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, |
297 | run, vcpu); |
298 | preempt_enable(); |
299 | goto dont_update_pc; |
300 | - } else { |
301 | - /* |
302 | - * We fault an entry from the guest tlb to the |
303 | - * shadow host TLB |
304 | - */ |
305 | - kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, |
306 | - NULL, |
307 | - NULL); |
308 | + } |
309 | + /* |
310 | + * We fault an entry from the guest tlb to the |
311 | + * shadow host TLB |
312 | + */ |
313 | + if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, |
314 | + NULL, NULL)) { |
315 | + kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", |
316 | + __func__, va, index, vcpu, |
317 | + read_c0_entryhi()); |
318 | + er = EMULATE_FAIL; |
319 | + preempt_enable(); |
320 | + goto done; |
321 | } |
322 | } |
323 | } else { |
324 | @@ -2633,8 +2644,13 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause, |
325 | * OK we have a Guest TLB entry, now inject it into the |
326 | * shadow host TLB |
327 | */ |
328 | - kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, |
329 | - NULL); |
330 | + if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, |
331 | + NULL, NULL)) { |
332 | + kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", |
333 | + __func__, va, index, vcpu, |
334 | + read_c0_entryhi()); |
335 | + er = EMULATE_FAIL; |
336 | + } |
337 | } |
338 | } |
339 | |
340 | diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c |
341 | index aed0ac2a4972..7a7ed9ca01bb 100644 |
342 | --- a/arch/mips/kvm/tlb.c |
343 | +++ b/arch/mips/kvm/tlb.c |
344 | @@ -276,7 +276,7 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, |
345 | } |
346 | |
347 | gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT); |
348 | - if (gfn >= kvm->arch.guest_pmap_npages) { |
349 | + if ((gfn | 1) >= kvm->arch.guest_pmap_npages) { |
350 | kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__, |
351 | gfn, badvaddr); |
352 | kvm_mips_dump_host_tlbs(); |
353 | @@ -361,25 +361,39 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, |
354 | unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; |
355 | struct kvm *kvm = vcpu->kvm; |
356 | pfn_t pfn0, pfn1; |
357 | - |
358 | - if ((tlb->tlb_hi & VPN2_MASK) == 0) { |
359 | - pfn0 = 0; |
360 | - pfn1 = 0; |
361 | - } else { |
362 | - if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) |
363 | - >> PAGE_SHIFT) < 0) |
364 | - return -1; |
365 | - |
366 | - if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) |
367 | - >> PAGE_SHIFT) < 0) |
368 | - return -1; |
369 | - |
370 | - pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) |
371 | - >> PAGE_SHIFT]; |
372 | - pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) |
373 | - >> PAGE_SHIFT]; |
374 | + gfn_t gfn0, gfn1; |
375 | + long tlb_lo[2]; |
376 | + |
377 | + tlb_lo[0] = tlb->tlb_lo0; |
378 | + tlb_lo[1] = tlb->tlb_lo1; |
379 | + |
380 | + /* |
381 | + * The commpage address must not be mapped to anything else if the guest |
382 | + * TLB contains entries nearby, or commpage accesses will break. |
383 | + */ |
384 | + if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) & |
385 | + VPN2_MASK & (PAGE_MASK << 1))) |
386 | + tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0; |
387 | + |
388 | + gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT; |
389 | + gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT; |
390 | + if (gfn0 >= kvm->arch.guest_pmap_npages || |
391 | + gfn1 >= kvm->arch.guest_pmap_npages) { |
392 | + kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n", |
393 | + __func__, gfn0, gfn1, tlb->tlb_hi); |
394 | + kvm_mips_dump_guest_tlbs(vcpu); |
395 | + return -1; |
396 | } |
397 | |
398 | + if (kvm_mips_map_page(kvm, gfn0) < 0) |
399 | + return -1; |
400 | + |
401 | + if (kvm_mips_map_page(kvm, gfn1) < 0) |
402 | + return -1; |
403 | + |
404 | + pfn0 = kvm->arch.guest_pmap[gfn0]; |
405 | + pfn1 = kvm->arch.guest_pmap[gfn1]; |
406 | + |
407 | if (hpa0) |
408 | *hpa0 = pfn0 << PAGE_SHIFT; |
409 | |
410 | @@ -391,9 +405,9 @@ int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, |
411 | kvm_mips_get_kernel_asid(vcpu) : |
412 | kvm_mips_get_user_asid(vcpu)); |
413 | entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | |
414 | - (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V); |
415 | + (tlb_lo[0] & MIPS3_PG_D) | (tlb_lo[0] & MIPS3_PG_V); |
416 | entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | |
417 | - (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V); |
418 | + (tlb_lo[1] & MIPS3_PG_D) | (tlb_lo[1] & MIPS3_PG_V); |
419 | |
420 | kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, |
421 | tlb->tlb_lo0, tlb->tlb_lo1); |
422 | @@ -794,10 +808,16 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) |
423 | local_irq_restore(flags); |
424 | return KVM_INVALID_INST; |
425 | } |
426 | - kvm_mips_handle_mapped_seg_tlb_fault(vcpu, |
427 | - &vcpu->arch. |
428 | - guest_tlb[index], |
429 | - NULL, NULL); |
430 | + if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, |
431 | + &vcpu->arch.guest_tlb[index], |
432 | + NULL, NULL)) { |
433 | + kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n", |
434 | + __func__, opc, index, vcpu, |
435 | + read_c0_entryhi()); |
436 | + kvm_mips_dump_guest_tlbs(vcpu); |
437 | + local_irq_restore(flags); |
438 | + return KVM_INVALID_INST; |
439 | + } |
440 | inst = *(opc); |
441 | } |
442 | local_irq_restore(flags); |
443 | diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c |
444 | index a2631a52ca99..444802e78554 100644 |
445 | --- a/arch/mips/loongson64/loongson-3/hpet.c |
446 | +++ b/arch/mips/loongson64/loongson-3/hpet.c |
447 | @@ -13,8 +13,8 @@ |
448 | #define SMBUS_PCI_REG64 0x64 |
449 | #define SMBUS_PCI_REGB4 0xb4 |
450 | |
451 | -#define HPET_MIN_CYCLES 64 |
452 | -#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1)) |
453 | +#define HPET_MIN_CYCLES 16 |
454 | +#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES * 12) |
455 | |
456 | static DEFINE_SPINLOCK(hpet_lock); |
457 | DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device); |
458 | @@ -157,14 +157,14 @@ static int hpet_tick_resume(struct clock_event_device *evt) |
459 | static int hpet_next_event(unsigned long delta, |
460 | struct clock_event_device *evt) |
461 | { |
462 | - unsigned int cnt; |
463 | - int res; |
464 | + u32 cnt; |
465 | + s32 res; |
466 | |
467 | cnt = hpet_read(HPET_COUNTER); |
468 | - cnt += delta; |
469 | + cnt += (u32) delta; |
470 | hpet_write(HPET_T0_CMP, cnt); |
471 | |
472 | - res = (int)(cnt - hpet_read(HPET_COUNTER)); |
473 | + res = (s32)(cnt - hpet_read(HPET_COUNTER)); |
474 | |
475 | return res < HPET_MIN_CYCLES ? -ETIME : 0; |
476 | } |
477 | @@ -230,7 +230,7 @@ void __init setup_hpet_timer(void) |
478 | |
479 | cd = &per_cpu(hpet_clockevent_device, cpu); |
480 | cd->name = "hpet"; |
481 | - cd->rating = 320; |
482 | + cd->rating = 100; |
483 | cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; |
484 | cd->set_state_shutdown = hpet_set_state_shutdown; |
485 | cd->set_state_periodic = hpet_set_state_periodic; |
486 | diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c |
487 | index b4a837893562..5abe51cad899 100644 |
488 | --- a/arch/mips/mm/uasm-mips.c |
489 | +++ b/arch/mips/mm/uasm-mips.c |
490 | @@ -65,7 +65,7 @@ static struct insn insn_table[] = { |
491 | #ifndef CONFIG_CPU_MIPSR6 |
492 | { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, |
493 | #else |
494 | - { insn_cache, M6(cache_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 }, |
495 | + { insn_cache, M6(spec3_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 }, |
496 | #endif |
497 | { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, |
498 | { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, |
499 | diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S |
500 | index 463af88c95a2..974f73df00bb 100644 |
501 | --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S |
502 | +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S |
503 | @@ -655,112 +655,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
504 | |
505 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
506 | BEGIN_FTR_SECTION |
507 | - b skip_tm |
508 | -END_FTR_SECTION_IFCLR(CPU_FTR_TM) |
509 | - |
510 | - /* Turn on TM/FP/VSX/VMX so we can restore them. */ |
511 | - mfmsr r5 |
512 | - li r6, MSR_TM >> 32 |
513 | - sldi r6, r6, 32 |
514 | - or r5, r5, r6 |
515 | - ori r5, r5, MSR_FP |
516 | - oris r5, r5, (MSR_VEC | MSR_VSX)@h |
517 | - mtmsrd r5 |
518 | - |
519 | - /* |
520 | - * The user may change these outside of a transaction, so they must |
521 | - * always be context switched. |
522 | - */ |
523 | - ld r5, VCPU_TFHAR(r4) |
524 | - ld r6, VCPU_TFIAR(r4) |
525 | - ld r7, VCPU_TEXASR(r4) |
526 | - mtspr SPRN_TFHAR, r5 |
527 | - mtspr SPRN_TFIAR, r6 |
528 | - mtspr SPRN_TEXASR, r7 |
529 | - |
530 | - ld r5, VCPU_MSR(r4) |
531 | - rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 |
532 | - beq skip_tm /* TM not active in guest */ |
533 | - |
534 | - /* Make sure the failure summary is set, otherwise we'll program check |
535 | - * when we trechkpt. It's possible that this might have been not set |
536 | - * on a kvmppc_set_one_reg() call but we shouldn't let this crash the |
537 | - * host. |
538 | - */ |
539 | - oris r7, r7, (TEXASR_FS)@h |
540 | - mtspr SPRN_TEXASR, r7 |
541 | - |
542 | - /* |
543 | - * We need to load up the checkpointed state for the guest. |
544 | - * We need to do this early as it will blow away any GPRs, VSRs and |
545 | - * some SPRs. |
546 | - */ |
547 | - |
548 | - mr r31, r4 |
549 | - addi r3, r31, VCPU_FPRS_TM |
550 | - bl load_fp_state |
551 | - addi r3, r31, VCPU_VRS_TM |
552 | - bl load_vr_state |
553 | - mr r4, r31 |
554 | - lwz r7, VCPU_VRSAVE_TM(r4) |
555 | - mtspr SPRN_VRSAVE, r7 |
556 | - |
557 | - ld r5, VCPU_LR_TM(r4) |
558 | - lwz r6, VCPU_CR_TM(r4) |
559 | - ld r7, VCPU_CTR_TM(r4) |
560 | - ld r8, VCPU_AMR_TM(r4) |
561 | - ld r9, VCPU_TAR_TM(r4) |
562 | - mtlr r5 |
563 | - mtcr r6 |
564 | - mtctr r7 |
565 | - mtspr SPRN_AMR, r8 |
566 | - mtspr SPRN_TAR, r9 |
567 | - |
568 | - /* |
569 | - * Load up PPR and DSCR values but don't put them in the actual SPRs |
570 | - * till the last moment to avoid running with userspace PPR and DSCR for |
571 | - * too long. |
572 | - */ |
573 | - ld r29, VCPU_DSCR_TM(r4) |
574 | - ld r30, VCPU_PPR_TM(r4) |
575 | - |
576 | - std r2, PACATMSCRATCH(r13) /* Save TOC */ |
577 | - |
578 | - /* Clear the MSR RI since r1, r13 are all going to be foobar. */ |
579 | - li r5, 0 |
580 | - mtmsrd r5, 1 |
581 | - |
582 | - /* Load GPRs r0-r28 */ |
583 | - reg = 0 |
584 | - .rept 29 |
585 | - ld reg, VCPU_GPRS_TM(reg)(r31) |
586 | - reg = reg + 1 |
587 | - .endr |
588 | - |
589 | - mtspr SPRN_DSCR, r29 |
590 | - mtspr SPRN_PPR, r30 |
591 | - |
592 | - /* Load final GPRs */ |
593 | - ld 29, VCPU_GPRS_TM(29)(r31) |
594 | - ld 30, VCPU_GPRS_TM(30)(r31) |
595 | - ld 31, VCPU_GPRS_TM(31)(r31) |
596 | - |
597 | - /* TM checkpointed state is now setup. All GPRs are now volatile. */ |
598 | - TRECHKPT |
599 | - |
600 | - /* Now let's get back the state we need. */ |
601 | - HMT_MEDIUM |
602 | - GET_PACA(r13) |
603 | - ld r29, HSTATE_DSCR(r13) |
604 | - mtspr SPRN_DSCR, r29 |
605 | - ld r4, HSTATE_KVM_VCPU(r13) |
606 | - ld r1, HSTATE_HOST_R1(r13) |
607 | - ld r2, PACATMSCRATCH(r13) |
608 | - |
609 | - /* Set the MSR RI since we have our registers back. */ |
610 | - li r5, MSR_RI |
611 | - mtmsrd r5, 1 |
612 | -skip_tm: |
613 | + bl kvmppc_restore_tm |
614 | +END_FTR_SECTION_IFSET(CPU_FTR_TM) |
615 | #endif |
616 | |
617 | /* Load guest PMU registers */ |
618 | @@ -841,12 +737,6 @@ BEGIN_FTR_SECTION |
619 | /* Skip next section on POWER7 */ |
620 | b 8f |
621 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
622 | - /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */ |
623 | - mfmsr r8 |
624 | - li r0, 1 |
625 | - rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG |
626 | - mtmsrd r8 |
627 | - |
628 | /* Load up POWER8-specific registers */ |
629 | ld r5, VCPU_IAMR(r4) |
630 | lwz r6, VCPU_PSPB(r4) |
631 | @@ -1436,106 +1326,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) |
632 | |
633 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
634 | BEGIN_FTR_SECTION |
635 | - b 2f |
636 | -END_FTR_SECTION_IFCLR(CPU_FTR_TM) |
637 | - /* Turn on TM. */ |
638 | - mfmsr r8 |
639 | - li r0, 1 |
640 | - rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG |
641 | - mtmsrd r8 |
642 | - |
643 | - ld r5, VCPU_MSR(r9) |
644 | - rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 |
645 | - beq 1f /* TM not active in guest. */ |
646 | - |
647 | - li r3, TM_CAUSE_KVM_RESCHED |
648 | - |
649 | - /* Clear the MSR RI since r1, r13 are all going to be foobar. */ |
650 | - li r5, 0 |
651 | - mtmsrd r5, 1 |
652 | - |
653 | - /* All GPRs are volatile at this point. */ |
654 | - TRECLAIM(R3) |
655 | - |
656 | - /* Temporarily store r13 and r9 so we have some regs to play with */ |
657 | - SET_SCRATCH0(r13) |
658 | - GET_PACA(r13) |
659 | - std r9, PACATMSCRATCH(r13) |
660 | - ld r9, HSTATE_KVM_VCPU(r13) |
661 | - |
662 | - /* Get a few more GPRs free. */ |
663 | - std r29, VCPU_GPRS_TM(29)(r9) |
664 | - std r30, VCPU_GPRS_TM(30)(r9) |
665 | - std r31, VCPU_GPRS_TM(31)(r9) |
666 | - |
667 | - /* Save away PPR and DSCR soon so don't run with user values. */ |
668 | - mfspr r31, SPRN_PPR |
669 | - HMT_MEDIUM |
670 | - mfspr r30, SPRN_DSCR |
671 | - ld r29, HSTATE_DSCR(r13) |
672 | - mtspr SPRN_DSCR, r29 |
673 | - |
674 | - /* Save all but r9, r13 & r29-r31 */ |
675 | - reg = 0 |
676 | - .rept 29 |
677 | - .if (reg != 9) && (reg != 13) |
678 | - std reg, VCPU_GPRS_TM(reg)(r9) |
679 | - .endif |
680 | - reg = reg + 1 |
681 | - .endr |
682 | - /* ... now save r13 */ |
683 | - GET_SCRATCH0(r4) |
684 | - std r4, VCPU_GPRS_TM(13)(r9) |
685 | - /* ... and save r9 */ |
686 | - ld r4, PACATMSCRATCH(r13) |
687 | - std r4, VCPU_GPRS_TM(9)(r9) |
688 | - |
689 | - /* Reload stack pointer and TOC. */ |
690 | - ld r1, HSTATE_HOST_R1(r13) |
691 | - ld r2, PACATOC(r13) |
692 | - |
693 | - /* Set MSR RI now we have r1 and r13 back. */ |
694 | - li r5, MSR_RI |
695 | - mtmsrd r5, 1 |
696 | - |
697 | - /* Save away checkpinted SPRs. */ |
698 | - std r31, VCPU_PPR_TM(r9) |
699 | - std r30, VCPU_DSCR_TM(r9) |
700 | - mflr r5 |
701 | - mfcr r6 |
702 | - mfctr r7 |
703 | - mfspr r8, SPRN_AMR |
704 | - mfspr r10, SPRN_TAR |
705 | - std r5, VCPU_LR_TM(r9) |
706 | - stw r6, VCPU_CR_TM(r9) |
707 | - std r7, VCPU_CTR_TM(r9) |
708 | - std r8, VCPU_AMR_TM(r9) |
709 | - std r10, VCPU_TAR_TM(r9) |
710 | - |
711 | - /* Restore r12 as trap number. */ |
712 | - lwz r12, VCPU_TRAP(r9) |
713 | - |
714 | - /* Save FP/VSX. */ |
715 | - addi r3, r9, VCPU_FPRS_TM |
716 | - bl store_fp_state |
717 | - addi r3, r9, VCPU_VRS_TM |
718 | - bl store_vr_state |
719 | - mfspr r6, SPRN_VRSAVE |
720 | - stw r6, VCPU_VRSAVE_TM(r9) |
721 | -1: |
722 | - /* |
723 | - * We need to save these SPRs after the treclaim so that the software |
724 | - * error code is recorded correctly in the TEXASR. Also the user may |
725 | - * change these outside of a transaction, so they must always be |
726 | - * context switched. |
727 | - */ |
728 | - mfspr r5, SPRN_TFHAR |
729 | - mfspr r6, SPRN_TFIAR |
730 | - mfspr r7, SPRN_TEXASR |
731 | - std r5, VCPU_TFHAR(r9) |
732 | - std r6, VCPU_TFIAR(r9) |
733 | - std r7, VCPU_TEXASR(r9) |
734 | -2: |
735 | + bl kvmppc_save_tm |
736 | +END_FTR_SECTION_IFSET(CPU_FTR_TM) |
737 | #endif |
738 | |
739 | /* Increment yield count if they have a VPA */ |
740 | @@ -2245,6 +2037,13 @@ _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */ |
741 | /* save FP state */ |
742 | bl kvmppc_save_fp |
743 | |
744 | +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
745 | +BEGIN_FTR_SECTION |
746 | + ld r9, HSTATE_KVM_VCPU(r13) |
747 | + bl kvmppc_save_tm |
748 | +END_FTR_SECTION_IFSET(CPU_FTR_TM) |
749 | +#endif |
750 | + |
751 | /* |
752 | * Set DEC to the smaller of DEC and HDEC, so that we wake |
753 | * no later than the end of our timeslice (HDEC interrupts |
754 | @@ -2321,6 +2120,12 @@ kvm_end_cede: |
755 | bl kvmhv_accumulate_time |
756 | #endif |
757 | |
758 | +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
759 | +BEGIN_FTR_SECTION |
760 | + bl kvmppc_restore_tm |
761 | +END_FTR_SECTION_IFSET(CPU_FTR_TM) |
762 | +#endif |
763 | + |
764 | /* load up FP state */ |
765 | bl kvmppc_load_fp |
766 | |
767 | @@ -2629,6 +2434,239 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
768 | mr r4,r31 |
769 | blr |
770 | |
771 | +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
772 | +/* |
773 | + * Save transactional state and TM-related registers. |
774 | + * Called with r9 pointing to the vcpu struct. |
775 | + * This can modify all checkpointed registers, but |
776 | + * restores r1, r2 and r9 (vcpu pointer) before exit. |
777 | + */ |
778 | +kvmppc_save_tm: |
779 | + mflr r0 |
780 | + std r0, PPC_LR_STKOFF(r1) |
781 | + |
782 | + /* Turn on TM. */ |
783 | + mfmsr r8 |
784 | + li r0, 1 |
785 | + rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG |
786 | + mtmsrd r8 |
787 | + |
788 | + ld r5, VCPU_MSR(r9) |
789 | + rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 |
790 | + beq 1f /* TM not active in guest. */ |
791 | + |
792 | + std r1, HSTATE_HOST_R1(r13) |
793 | + li r3, TM_CAUSE_KVM_RESCHED |
794 | + |
795 | + /* Clear the MSR RI since r1, r13 are all going to be foobar. */ |
796 | + li r5, 0 |
797 | + mtmsrd r5, 1 |
798 | + |
799 | + /* All GPRs are volatile at this point. */ |
800 | + TRECLAIM(R3) |
801 | + |
802 | + /* Temporarily store r13 and r9 so we have some regs to play with */ |
803 | + SET_SCRATCH0(r13) |
804 | + GET_PACA(r13) |
805 | + std r9, PACATMSCRATCH(r13) |
806 | + ld r9, HSTATE_KVM_VCPU(r13) |
807 | + |
808 | + /* Get a few more GPRs free. */ |
809 | + std r29, VCPU_GPRS_TM(29)(r9) |
810 | + std r30, VCPU_GPRS_TM(30)(r9) |
811 | + std r31, VCPU_GPRS_TM(31)(r9) |
812 | + |
813 | + /* Save away PPR and DSCR soon so don't run with user values. */ |
814 | + mfspr r31, SPRN_PPR |
815 | + HMT_MEDIUM |
816 | + mfspr r30, SPRN_DSCR |
817 | + ld r29, HSTATE_DSCR(r13) |
818 | + mtspr SPRN_DSCR, r29 |
819 | + |
820 | + /* Save all but r9, r13 & r29-r31 */ |
821 | + reg = 0 |
822 | + .rept 29 |
823 | + .if (reg != 9) && (reg != 13) |
824 | + std reg, VCPU_GPRS_TM(reg)(r9) |
825 | + .endif |
826 | + reg = reg + 1 |
827 | + .endr |
828 | + /* ... now save r13 */ |
829 | + GET_SCRATCH0(r4) |
830 | + std r4, VCPU_GPRS_TM(13)(r9) |
831 | + /* ... and save r9 */ |
832 | + ld r4, PACATMSCRATCH(r13) |
833 | + std r4, VCPU_GPRS_TM(9)(r9) |
834 | + |
835 | + /* Reload stack pointer and TOC. */ |
836 | + ld r1, HSTATE_HOST_R1(r13) |
837 | + ld r2, PACATOC(r13) |
838 | + |
839 | + /* Set MSR RI now we have r1 and r13 back. */ |
840 | + li r5, MSR_RI |
841 | + mtmsrd r5, 1 |
842 | + |
843 | + /* Save away checkpinted SPRs. */ |
844 | + std r31, VCPU_PPR_TM(r9) |
845 | + std r30, VCPU_DSCR_TM(r9) |
846 | + mflr r5 |
847 | + mfcr r6 |
848 | + mfctr r7 |
849 | + mfspr r8, SPRN_AMR |
850 | + mfspr r10, SPRN_TAR |
851 | + std r5, VCPU_LR_TM(r9) |
852 | + stw r6, VCPU_CR_TM(r9) |
853 | + std r7, VCPU_CTR_TM(r9) |
854 | + std r8, VCPU_AMR_TM(r9) |
855 | + std r10, VCPU_TAR_TM(r9) |
856 | + |
857 | + /* Restore r12 as trap number. */ |
858 | + lwz r12, VCPU_TRAP(r9) |
859 | + |
860 | + /* Save FP/VSX. */ |
861 | + addi r3, r9, VCPU_FPRS_TM |
862 | + bl store_fp_state |
863 | + addi r3, r9, VCPU_VRS_TM |
864 | + bl store_vr_state |
865 | + mfspr r6, SPRN_VRSAVE |
866 | + stw r6, VCPU_VRSAVE_TM(r9) |
867 | +1: |
868 | + /* |
869 | + * We need to save these SPRs after the treclaim so that the software |
870 | + * error code is recorded correctly in the TEXASR. Also the user may |
871 | + * change these outside of a transaction, so they must always be |
872 | + * context switched. |
873 | + */ |
874 | + mfspr r5, SPRN_TFHAR |
875 | + mfspr r6, SPRN_TFIAR |
876 | + mfspr r7, SPRN_TEXASR |
877 | + std r5, VCPU_TFHAR(r9) |
878 | + std r6, VCPU_TFIAR(r9) |
879 | + std r7, VCPU_TEXASR(r9) |
880 | + |
881 | + ld r0, PPC_LR_STKOFF(r1) |
882 | + mtlr r0 |
883 | + blr |
884 | + |
885 | +/* |
886 | + * Restore transactional state and TM-related registers. |
887 | + * Called with r4 pointing to the vcpu struct. |
888 | + * This potentially modifies all checkpointed registers. |
889 | + * It restores r1, r2, r4 from the PACA. |
890 | + */ |
891 | +kvmppc_restore_tm: |
892 | + mflr r0 |
893 | + std r0, PPC_LR_STKOFF(r1) |
894 | + |
895 | + /* Turn on TM/FP/VSX/VMX so we can restore them. */ |
896 | + mfmsr r5 |
897 | + li r6, MSR_TM >> 32 |
898 | + sldi r6, r6, 32 |
899 | + or r5, r5, r6 |
900 | + ori r5, r5, MSR_FP |
901 | + oris r5, r5, (MSR_VEC | MSR_VSX)@h |
902 | + mtmsrd r5 |
903 | + |
904 | + /* |
905 | + * The user may change these outside of a transaction, so they must |
906 | + * always be context switched. |
907 | + */ |
908 | + ld r5, VCPU_TFHAR(r4) |
909 | + ld r6, VCPU_TFIAR(r4) |
910 | + ld r7, VCPU_TEXASR(r4) |
911 | + mtspr SPRN_TFHAR, r5 |
912 | + mtspr SPRN_TFIAR, r6 |
913 | + mtspr SPRN_TEXASR, r7 |
914 | + |
915 | + ld r5, VCPU_MSR(r4) |
916 | + rldicl. r5, r5, 64 - MSR_TS_S_LG, 62 |
917 | + beqlr /* TM not active in guest */ |
918 | + std r1, HSTATE_HOST_R1(r13) |
919 | + |
920 | + /* Make sure the failure summary is set, otherwise we'll program check |
921 | + * when we trechkpt. It's possible that this might have been not set |
922 | + * on a kvmppc_set_one_reg() call but we shouldn't let this crash the |
923 | + * host. |
924 | + */ |
925 | + oris r7, r7, (TEXASR_FS)@h |
926 | + mtspr SPRN_TEXASR, r7 |
927 | + |
928 | + /* |
929 | + * We need to load up the checkpointed state for the guest. |
930 | + * We need to do this early as it will blow away any GPRs, VSRs and |
931 | + * some SPRs. |
932 | + */ |
933 | + |
934 | + mr r31, r4 |
935 | + addi r3, r31, VCPU_FPRS_TM |
936 | + bl load_fp_state |
937 | + addi r3, r31, VCPU_VRS_TM |
938 | + bl load_vr_state |
939 | + mr r4, r31 |
940 | + lwz r7, VCPU_VRSAVE_TM(r4) |
941 | + mtspr SPRN_VRSAVE, r7 |
942 | + |
943 | + ld r5, VCPU_LR_TM(r4) |
944 | + lwz r6, VCPU_CR_TM(r4) |
945 | + ld r7, VCPU_CTR_TM(r4) |
946 | + ld r8, VCPU_AMR_TM(r4) |
947 | + ld r9, VCPU_TAR_TM(r4) |
948 | + mtlr r5 |
949 | + mtcr r6 |
950 | + mtctr r7 |
951 | + mtspr SPRN_AMR, r8 |
952 | + mtspr SPRN_TAR, r9 |
953 | + |
954 | + /* |
955 | + * Load up PPR and DSCR values but don't put them in the actual SPRs |
956 | + * till the last moment to avoid running with userspace PPR and DSCR for |
957 | + * too long. |
958 | + */ |
959 | + ld r29, VCPU_DSCR_TM(r4) |
960 | + ld r30, VCPU_PPR_TM(r4) |
961 | + |
962 | + std r2, PACATMSCRATCH(r13) /* Save TOC */ |
963 | + |
964 | + /* Clear the MSR RI since r1, r13 are all going to be foobar. */ |
965 | + li r5, 0 |
966 | + mtmsrd r5, 1 |
967 | + |
968 | + /* Load GPRs r0-r28 */ |
969 | + reg = 0 |
970 | + .rept 29 |
971 | + ld reg, VCPU_GPRS_TM(reg)(r31) |
972 | + reg = reg + 1 |
973 | + .endr |
974 | + |
975 | + mtspr SPRN_DSCR, r29 |
976 | + mtspr SPRN_PPR, r30 |
977 | + |
978 | + /* Load final GPRs */ |
979 | + ld 29, VCPU_GPRS_TM(29)(r31) |
980 | + ld 30, VCPU_GPRS_TM(30)(r31) |
981 | + ld 31, VCPU_GPRS_TM(31)(r31) |
982 | + |
983 | + /* TM checkpointed state is now setup. All GPRs are now volatile. */ |
984 | + TRECHKPT |
985 | + |
986 | + /* Now let's get back the state we need. */ |
987 | + HMT_MEDIUM |
988 | + GET_PACA(r13) |
989 | + ld r29, HSTATE_DSCR(r13) |
990 | + mtspr SPRN_DSCR, r29 |
991 | + ld r4, HSTATE_KVM_VCPU(r13) |
992 | + ld r1, HSTATE_HOST_R1(r13) |
993 | + ld r2, PACATMSCRATCH(r13) |
994 | + |
995 | + /* Set the MSR RI since we have our registers back. */ |
996 | + li r5, MSR_RI |
997 | + mtmsrd r5, 1 |
998 | + |
999 | + ld r0, PPC_LR_STKOFF(r1) |
1000 | + mtlr r0 |
1001 | + blr |
1002 | +#endif |
1003 | + |
1004 | /* |
1005 | * We come here if we get any exception or interrupt while we are |
1006 | * executing host real mode code while in guest MMU context. |
1007 | diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c |
1008 | index c146f3c262c3..0149ac59c273 100644 |
1009 | --- a/arch/x86/kvm/mtrr.c |
1010 | +++ b/arch/x86/kvm/mtrr.c |
1011 | @@ -539,6 +539,7 @@ static void mtrr_lookup_var_start(struct mtrr_iter *iter) |
1012 | |
1013 | iter->fixed = false; |
1014 | iter->start_max = iter->start; |
1015 | + iter->range = NULL; |
1016 | iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node); |
1017 | |
1018 | __mtrr_lookup_var_next(iter); |
1019 | diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
1020 | index 41e7943004fe..4589b6feeb7b 100644 |
1021 | --- a/arch/x86/kvm/vmx.c |
1022 | +++ b/arch/x86/kvm/vmx.c |
1023 | @@ -8124,6 +8124,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) |
1024 | if ((vectoring_info & VECTORING_INFO_VALID_MASK) && |
1025 | (exit_reason != EXIT_REASON_EXCEPTION_NMI && |
1026 | exit_reason != EXIT_REASON_EPT_VIOLATION && |
1027 | + exit_reason != EXIT_REASON_PML_FULL && |
1028 | exit_reason != EXIT_REASON_TASK_SWITCH)) { |
1029 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
1030 | vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_DELIVERY_EV; |
1031 | @@ -8736,6 +8737,22 @@ static void vmx_load_vmcs01(struct kvm_vcpu *vcpu) |
1032 | put_cpu(); |
1033 | } |
1034 | |
1035 | +/* |
1036 | + * Ensure that the current vmcs of the logical processor is the |
1037 | + * vmcs01 of the vcpu before calling free_nested(). |
1038 | + */ |
1039 | +static void vmx_free_vcpu_nested(struct kvm_vcpu *vcpu) |
1040 | +{ |
1041 | + struct vcpu_vmx *vmx = to_vmx(vcpu); |
1042 | + int r; |
1043 | + |
1044 | + r = vcpu_load(vcpu); |
1045 | + BUG_ON(r); |
1046 | + vmx_load_vmcs01(vcpu); |
1047 | + free_nested(vmx); |
1048 | + vcpu_put(vcpu); |
1049 | +} |
1050 | + |
1051 | static void vmx_free_vcpu(struct kvm_vcpu *vcpu) |
1052 | { |
1053 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
1054 | @@ -8744,8 +8761,7 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu) |
1055 | vmx_destroy_pml_buffer(vmx); |
1056 | free_vpid(vmx->vpid); |
1057 | leave_guest_mode(vcpu); |
1058 | - vmx_load_vmcs01(vcpu); |
1059 | - free_nested(vmx); |
1060 | + vmx_free_vcpu_nested(vcpu); |
1061 | free_loaded_vmcs(vmx->loaded_vmcs); |
1062 | kfree(vmx->guest_msrs); |
1063 | kvm_vcpu_uninit(vcpu); |
1064 | diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c |
1065 | index 8b93e634af84..ae97f24a4371 100644 |
1066 | --- a/arch/x86/pci/intel_mid_pci.c |
1067 | +++ b/arch/x86/pci/intel_mid_pci.c |
1068 | @@ -37,6 +37,7 @@ |
1069 | |
1070 | /* Quirks for the listed devices */ |
1071 | #define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190 |
1072 | +#define PCI_DEVICE_ID_INTEL_MRFL_HSU 0x1191 |
1073 | |
1074 | /* Fixed BAR fields */ |
1075 | #define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00 /* Fixed BAR (TBD) */ |
1076 | @@ -225,13 +226,20 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) |
1077 | /* Special treatment for IRQ0 */ |
1078 | if (dev->irq == 0) { |
1079 | /* |
1080 | + * Skip HS UART common registers device since it has |
1081 | + * IRQ0 assigned and not used by the kernel. |
1082 | + */ |
1083 | + if (dev->device == PCI_DEVICE_ID_INTEL_MRFL_HSU) |
1084 | + return -EBUSY; |
1085 | + /* |
1086 | * TNG has IRQ0 assigned to eMMC controller. But there |
1087 | * are also other devices with bogus PCI configuration |
1088 | * that have IRQ0 assigned. This check ensures that |
1089 | - * eMMC gets it. |
1090 | + * eMMC gets it. The rest of devices still could be |
1091 | + * enabled without interrupt line being allocated. |
1092 | */ |
1093 | if (dev->device != PCI_DEVICE_ID_INTEL_MRFL_MMC) |
1094 | - return -EBUSY; |
1095 | + return 0; |
1096 | } |
1097 | break; |
1098 | default: |
1099 | diff --git a/block/bio.c b/block/bio.c |
1100 | index d4d144363250..46e2cc1d4016 100644 |
1101 | --- a/block/bio.c |
1102 | +++ b/block/bio.c |
1103 | @@ -584,6 +584,8 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) |
1104 | bio->bi_rw = bio_src->bi_rw; |
1105 | bio->bi_iter = bio_src->bi_iter; |
1106 | bio->bi_io_vec = bio_src->bi_io_vec; |
1107 | + |
1108 | + bio_clone_blkcg_association(bio, bio_src); |
1109 | } |
1110 | EXPORT_SYMBOL(__bio_clone_fast); |
1111 | |
1112 | @@ -689,6 +691,8 @@ integrity_clone: |
1113 | } |
1114 | } |
1115 | |
1116 | + bio_clone_blkcg_association(bio, bio_src); |
1117 | + |
1118 | return bio; |
1119 | } |
1120 | EXPORT_SYMBOL(bio_clone_bioset); |
1121 | @@ -2014,6 +2018,17 @@ void bio_disassociate_task(struct bio *bio) |
1122 | } |
1123 | } |
1124 | |
1125 | +/** |
1126 | + * bio_clone_blkcg_association - clone blkcg association from src to dst bio |
1127 | + * @dst: destination bio |
1128 | + * @src: source bio |
1129 | + */ |
1130 | +void bio_clone_blkcg_association(struct bio *dst, struct bio *src) |
1131 | +{ |
1132 | + if (src->bi_css) |
1133 | + WARN_ON(bio_associate_blkcg(dst, src->bi_css)); |
1134 | +} |
1135 | + |
1136 | #endif /* CONFIG_BLK_CGROUP */ |
1137 | |
1138 | static void __init biovec_init_slabs(void) |
1139 | diff --git a/block/genhd.c b/block/genhd.c |
1140 | index d2a1d43bf9fa..a5bed6bc869d 100644 |
1141 | --- a/block/genhd.c |
1142 | +++ b/block/genhd.c |
1143 | @@ -612,7 +612,7 @@ void add_disk(struct gendisk *disk) |
1144 | |
1145 | /* Register BDI before referencing it from bdev */ |
1146 | bdi = &disk->queue->backing_dev_info; |
1147 | - bdi_register_dev(bdi, disk_devt(disk)); |
1148 | + bdi_register_owner(bdi, disk_to_dev(disk)); |
1149 | |
1150 | blk_register_region(disk_devt(disk), disk->minors, NULL, |
1151 | exact_match, exact_lock, disk); |
1152 | diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c |
1153 | index b420fb46669d..43f20328f830 100644 |
1154 | --- a/drivers/acpi/ec.c |
1155 | +++ b/drivers/acpi/ec.c |
1156 | @@ -101,6 +101,7 @@ enum ec_command { |
1157 | #define ACPI_EC_UDELAY_POLL 550 /* Wait 1ms for EC transaction polling */ |
1158 | #define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query |
1159 | * when trying to clear the EC */ |
1160 | +#define ACPI_EC_MAX_QUERIES 16 /* Maximum number of parallel queries */ |
1161 | |
1162 | enum { |
1163 | EC_FLAGS_QUERY_PENDING, /* Query is pending */ |
1164 | @@ -121,6 +122,10 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; |
1165 | module_param(ec_delay, uint, 0644); |
1166 | MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes"); |
1167 | |
1168 | +static unsigned int ec_max_queries __read_mostly = ACPI_EC_MAX_QUERIES; |
1169 | +module_param(ec_max_queries, uint, 0644); |
1170 | +MODULE_PARM_DESC(ec_max_queries, "Maximum parallel _Qxx evaluations"); |
1171 | + |
1172 | static bool ec_busy_polling __read_mostly; |
1173 | module_param(ec_busy_polling, bool, 0644); |
1174 | MODULE_PARM_DESC(ec_busy_polling, "Use busy polling to advance EC transaction"); |
1175 | @@ -174,6 +179,7 @@ static void acpi_ec_event_processor(struct work_struct *work); |
1176 | |
1177 | struct acpi_ec *boot_ec, *first_ec; |
1178 | EXPORT_SYMBOL(first_ec); |
1179 | +static struct workqueue_struct *ec_query_wq; |
1180 | |
1181 | static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ |
1182 | static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ |
1183 | @@ -1097,7 +1103,7 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 *data) |
1184 | * work queue execution. |
1185 | */ |
1186 | ec_dbg_evt("Query(0x%02x) scheduled", value); |
1187 | - if (!schedule_work(&q->work)) { |
1188 | + if (!queue_work(ec_query_wq, &q->work)) { |
1189 | ec_dbg_evt("Query(0x%02x) overlapped", value); |
1190 | result = -EBUSY; |
1191 | } |
1192 | @@ -1657,15 +1663,41 @@ static struct acpi_driver acpi_ec_driver = { |
1193 | }, |
1194 | }; |
1195 | |
1196 | +static inline int acpi_ec_query_init(void) |
1197 | +{ |
1198 | + if (!ec_query_wq) { |
1199 | + ec_query_wq = alloc_workqueue("kec_query", 0, |
1200 | + ec_max_queries); |
1201 | + if (!ec_query_wq) |
1202 | + return -ENODEV; |
1203 | + } |
1204 | + return 0; |
1205 | +} |
1206 | + |
1207 | +static inline void acpi_ec_query_exit(void) |
1208 | +{ |
1209 | + if (ec_query_wq) { |
1210 | + destroy_workqueue(ec_query_wq); |
1211 | + ec_query_wq = NULL; |
1212 | + } |
1213 | +} |
1214 | + |
1215 | int __init acpi_ec_init(void) |
1216 | { |
1217 | - int result = 0; |
1218 | + int result; |
1219 | |
1220 | + /* register workqueue for _Qxx evaluations */ |
1221 | + result = acpi_ec_query_init(); |
1222 | + if (result) |
1223 | + goto err_exit; |
1224 | /* Now register the driver for the EC */ |
1225 | result = acpi_bus_register_driver(&acpi_ec_driver); |
1226 | - if (result < 0) |
1227 | - return -ENODEV; |
1228 | + if (result) |
1229 | + goto err_exit; |
1230 | |
1231 | +err_exit: |
1232 | + if (result) |
1233 | + acpi_ec_query_exit(); |
1234 | return result; |
1235 | } |
1236 | |
1237 | @@ -1675,5 +1707,6 @@ static void __exit acpi_ec_exit(void) |
1238 | { |
1239 | |
1240 | acpi_bus_unregister_driver(&acpi_ec_driver); |
1241 | + acpi_ec_query_exit(); |
1242 | } |
1243 | #endif /* 0 */ |
1244 | diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c |
1245 | index 4a414a5a3165..b9065506a847 100644 |
1246 | --- a/drivers/bluetooth/hci_intel.c |
1247 | +++ b/drivers/bluetooth/hci_intel.c |
1248 | @@ -1234,8 +1234,7 @@ static int intel_probe(struct platform_device *pdev) |
1249 | |
1250 | idev->pdev = pdev; |
1251 | |
1252 | - idev->reset = devm_gpiod_get_optional(&pdev->dev, "reset", |
1253 | - GPIOD_OUT_LOW); |
1254 | + idev->reset = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW); |
1255 | if (IS_ERR(idev->reset)) { |
1256 | dev_err(&pdev->dev, "Unable to retrieve gpio\n"); |
1257 | return PTR_ERR(idev->reset); |
1258 | @@ -1247,8 +1246,7 @@ static int intel_probe(struct platform_device *pdev) |
1259 | |
1260 | dev_err(&pdev->dev, "No IRQ, falling back to gpio-irq\n"); |
1261 | |
1262 | - host_wake = devm_gpiod_get_optional(&pdev->dev, "host-wake", |
1263 | - GPIOD_IN); |
1264 | + host_wake = devm_gpiod_get(&pdev->dev, "host-wake", GPIOD_IN); |
1265 | if (IS_ERR(host_wake)) { |
1266 | dev_err(&pdev->dev, "Unable to retrieve IRQ\n"); |
1267 | goto no_irq; |
1268 | diff --git a/drivers/char/random.c b/drivers/char/random.c |
1269 | index 0227b0465b40..491a4dce13fe 100644 |
1270 | --- a/drivers/char/random.c |
1271 | +++ b/drivers/char/random.c |
1272 | @@ -948,6 +948,7 @@ void add_interrupt_randomness(int irq, int irq_flags) |
1273 | /* award one bit for the contents of the fast pool */ |
1274 | credit_entropy_bits(r, credit + 1); |
1275 | } |
1276 | +EXPORT_SYMBOL_GPL(add_interrupt_randomness); |
1277 | |
1278 | #ifdef CONFIG_BLOCK |
1279 | void add_disk_randomness(struct gendisk *disk) |
1280 | @@ -1460,12 +1461,16 @@ random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) |
1281 | static ssize_t |
1282 | urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) |
1283 | { |
1284 | + static int maxwarn = 10; |
1285 | int ret; |
1286 | |
1287 | - if (unlikely(nonblocking_pool.initialized == 0)) |
1288 | - printk_once(KERN_NOTICE "random: %s urandom read " |
1289 | - "with %d bits of entropy available\n", |
1290 | - current->comm, nonblocking_pool.entropy_total); |
1291 | + if (unlikely(nonblocking_pool.initialized == 0) && |
1292 | + maxwarn > 0) { |
1293 | + maxwarn--; |
1294 | + printk(KERN_NOTICE "random: %s: uninitialized urandom read " |
1295 | + "(%zd bytes read, %d bits of entropy available)\n", |
1296 | + current->comm, nbytes, nonblocking_pool.entropy_total); |
1297 | + } |
1298 | |
1299 | nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3)); |
1300 | ret = extract_entropy_user(&nonblocking_pool, buf, nbytes); |
1301 | @@ -1847,12 +1852,18 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, |
1302 | { |
1303 | struct entropy_store *poolp = &input_pool; |
1304 | |
1305 | - /* Suspend writing if we're above the trickle threshold. |
1306 | - * We'll be woken up again once below random_write_wakeup_thresh, |
1307 | - * or when the calling thread is about to terminate. |
1308 | - */ |
1309 | - wait_event_interruptible(random_write_wait, kthread_should_stop() || |
1310 | + if (unlikely(nonblocking_pool.initialized == 0)) |
1311 | + poolp = &nonblocking_pool; |
1312 | + else { |
1313 | + /* Suspend writing if we're above the trickle |
1314 | + * threshold. We'll be woken up again once below |
1315 | + * random_write_wakeup_thresh, or when the calling |
1316 | + * thread is about to terminate. |
1317 | + */ |
1318 | + wait_event_interruptible(random_write_wait, |
1319 | + kthread_should_stop() || |
1320 | ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits); |
1321 | + } |
1322 | mix_pool_bytes(poolp, buffer, count); |
1323 | credit_entropy_bits(poolp, entropy); |
1324 | } |
1325 | diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c |
1326 | index f53b02a6bc05..6e80e4298274 100644 |
1327 | --- a/drivers/cpufreq/intel_pstate.c |
1328 | +++ b/drivers/cpufreq/intel_pstate.c |
1329 | @@ -662,7 +662,7 @@ static int core_get_max_pstate(void) |
1330 | if (err) |
1331 | goto skip_tar; |
1332 | |
1333 | - tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl; |
1334 | + tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3); |
1335 | err = rdmsrl_safe(tdp_msr, &tdp_ratio); |
1336 | if (err) |
1337 | goto skip_tar; |
1338 | diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c |
1339 | index 58aed67b7eba..3c8f19f5ac81 100644 |
1340 | --- a/drivers/edac/edac_mc_sysfs.c |
1341 | +++ b/drivers/edac/edac_mc_sysfs.c |
1342 | @@ -313,7 +313,6 @@ static struct device_type csrow_attr_type = { |
1343 | * possible dynamic channel DIMM Label attribute files |
1344 | * |
1345 | */ |
1346 | - |
1347 | DEVICE_CHANNEL(ch0_dimm_label, S_IRUGO | S_IWUSR, |
1348 | channel_dimm_label_show, channel_dimm_label_store, 0); |
1349 | DEVICE_CHANNEL(ch1_dimm_label, S_IRUGO | S_IWUSR, |
1350 | @@ -326,6 +325,10 @@ DEVICE_CHANNEL(ch4_dimm_label, S_IRUGO | S_IWUSR, |
1351 | channel_dimm_label_show, channel_dimm_label_store, 4); |
1352 | DEVICE_CHANNEL(ch5_dimm_label, S_IRUGO | S_IWUSR, |
1353 | channel_dimm_label_show, channel_dimm_label_store, 5); |
1354 | +DEVICE_CHANNEL(ch6_dimm_label, S_IRUGO | S_IWUSR, |
1355 | + channel_dimm_label_show, channel_dimm_label_store, 6); |
1356 | +DEVICE_CHANNEL(ch7_dimm_label, S_IRUGO | S_IWUSR, |
1357 | + channel_dimm_label_show, channel_dimm_label_store, 7); |
1358 | |
1359 | /* Total possible dynamic DIMM Label attribute file table */ |
1360 | static struct attribute *dynamic_csrow_dimm_attr[] = { |
1361 | @@ -335,6 +338,8 @@ static struct attribute *dynamic_csrow_dimm_attr[] = { |
1362 | &dev_attr_legacy_ch3_dimm_label.attr.attr, |
1363 | &dev_attr_legacy_ch4_dimm_label.attr.attr, |
1364 | &dev_attr_legacy_ch5_dimm_label.attr.attr, |
1365 | + &dev_attr_legacy_ch6_dimm_label.attr.attr, |
1366 | + &dev_attr_legacy_ch7_dimm_label.attr.attr, |
1367 | NULL |
1368 | }; |
1369 | |
1370 | @@ -351,6 +356,10 @@ DEVICE_CHANNEL(ch4_ce_count, S_IRUGO, |
1371 | channel_ce_count_show, NULL, 4); |
1372 | DEVICE_CHANNEL(ch5_ce_count, S_IRUGO, |
1373 | channel_ce_count_show, NULL, 5); |
1374 | +DEVICE_CHANNEL(ch6_ce_count, S_IRUGO, |
1375 | + channel_ce_count_show, NULL, 6); |
1376 | +DEVICE_CHANNEL(ch7_ce_count, S_IRUGO, |
1377 | + channel_ce_count_show, NULL, 7); |
1378 | |
1379 | /* Total possible dynamic ce_count attribute file table */ |
1380 | static struct attribute *dynamic_csrow_ce_count_attr[] = { |
1381 | @@ -360,6 +369,8 @@ static struct attribute *dynamic_csrow_ce_count_attr[] = { |
1382 | &dev_attr_legacy_ch3_ce_count.attr.attr, |
1383 | &dev_attr_legacy_ch4_ce_count.attr.attr, |
1384 | &dev_attr_legacy_ch5_ce_count.attr.attr, |
1385 | + &dev_attr_legacy_ch6_ce_count.attr.attr, |
1386 | + &dev_attr_legacy_ch7_ce_count.attr.attr, |
1387 | NULL |
1388 | }; |
1389 | |
1390 | @@ -371,9 +382,16 @@ static umode_t csrow_dev_is_visible(struct kobject *kobj, |
1391 | |
1392 | if (idx >= csrow->nr_channels) |
1393 | return 0; |
1394 | + |
1395 | + if (idx >= ARRAY_SIZE(dynamic_csrow_ce_count_attr) - 1) { |
1396 | + WARN_ONCE(1, "idx: %d\n", idx); |
1397 | + return 0; |
1398 | + } |
1399 | + |
1400 | /* Only expose populated DIMMs */ |
1401 | if (!csrow->channels[idx]->dimm->nr_pages) |
1402 | return 0; |
1403 | + |
1404 | return attr->mode; |
1405 | } |
1406 | |
1407 | diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c |
1408 | index 70097472b02c..c50e930d97d3 100644 |
1409 | --- a/drivers/gpio/gpio-intel-mid.c |
1410 | +++ b/drivers/gpio/gpio-intel-mid.c |
1411 | @@ -17,7 +17,6 @@ |
1412 | * Moorestown platform Langwell chip. |
1413 | * Medfield platform Penwell chip. |
1414 | * Clovertrail platform Cloverview chip. |
1415 | - * Merrifield platform Tangier chip. |
1416 | */ |
1417 | |
1418 | #include <linux/module.h> |
1419 | @@ -64,10 +63,6 @@ enum GPIO_REG { |
1420 | /* intel_mid gpio driver data */ |
1421 | struct intel_mid_gpio_ddata { |
1422 | u16 ngpio; /* number of gpio pins */ |
1423 | - u32 gplr_offset; /* offset of first GPLR register from base */ |
1424 | - u32 flis_base; /* base address of FLIS registers */ |
1425 | - u32 flis_len; /* length of FLIS registers */ |
1426 | - u32 (*get_flis_offset)(int gpio); |
1427 | u32 chip_irq_type; /* chip interrupt type */ |
1428 | }; |
1429 | |
1430 | @@ -257,15 +252,6 @@ static const struct intel_mid_gpio_ddata gpio_cloverview_core = { |
1431 | .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE, |
1432 | }; |
1433 | |
1434 | -static const struct intel_mid_gpio_ddata gpio_tangier = { |
1435 | - .ngpio = 192, |
1436 | - .gplr_offset = 4, |
1437 | - .flis_base = 0xff0c0000, |
1438 | - .flis_len = 0x8000, |
1439 | - .get_flis_offset = NULL, |
1440 | - .chip_irq_type = INTEL_MID_IRQ_TYPE_EDGE, |
1441 | -}; |
1442 | - |
1443 | static const struct pci_device_id intel_gpio_ids[] = { |
1444 | { |
1445 | /* Lincroft */ |
1446 | @@ -292,11 +278,6 @@ static const struct pci_device_id intel_gpio_ids[] = { |
1447 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x08f7), |
1448 | .driver_data = (kernel_ulong_t)&gpio_cloverview_core, |
1449 | }, |
1450 | - { |
1451 | - /* Tangier */ |
1452 | - PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1199), |
1453 | - .driver_data = (kernel_ulong_t)&gpio_tangier, |
1454 | - }, |
1455 | { 0 } |
1456 | }; |
1457 | MODULE_DEVICE_TABLE(pci, intel_gpio_ids); |
1458 | diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c |
1459 | index 2d4892cc70fb..c844d7eccb6c 100644 |
1460 | --- a/drivers/gpio/gpio-pca953x.c |
1461 | +++ b/drivers/gpio/gpio-pca953x.c |
1462 | @@ -86,7 +86,7 @@ MODULE_DEVICE_TABLE(acpi, pca953x_acpi_ids); |
1463 | #define MAX_BANK 5 |
1464 | #define BANK_SZ 8 |
1465 | |
1466 | -#define NBANK(chip) (chip->gpio_chip.ngpio / BANK_SZ) |
1467 | +#define NBANK(chip) DIV_ROUND_UP(chip->gpio_chip.ngpio, BANK_SZ) |
1468 | |
1469 | struct pca953x_chip { |
1470 | unsigned gpio_start; |
1471 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c |
1472 | index 9416e0f5c1db..0aaa457a1710 100644 |
1473 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c |
1474 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c |
1475 | @@ -566,28 +566,19 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev) |
1476 | le16_to_cpu(firmware_info->info.usReferenceClock); |
1477 | ppll->reference_div = 0; |
1478 | |
1479 | - if (crev < 2) |
1480 | - ppll->pll_out_min = |
1481 | - le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output); |
1482 | - else |
1483 | - ppll->pll_out_min = |
1484 | - le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output); |
1485 | + ppll->pll_out_min = |
1486 | + le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output); |
1487 | ppll->pll_out_max = |
1488 | le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); |
1489 | |
1490 | - if (crev >= 4) { |
1491 | - ppll->lcd_pll_out_min = |
1492 | - le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; |
1493 | - if (ppll->lcd_pll_out_min == 0) |
1494 | - ppll->lcd_pll_out_min = ppll->pll_out_min; |
1495 | - ppll->lcd_pll_out_max = |
1496 | - le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100; |
1497 | - if (ppll->lcd_pll_out_max == 0) |
1498 | - ppll->lcd_pll_out_max = ppll->pll_out_max; |
1499 | - } else { |
1500 | + ppll->lcd_pll_out_min = |
1501 | + le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; |
1502 | + if (ppll->lcd_pll_out_min == 0) |
1503 | ppll->lcd_pll_out_min = ppll->pll_out_min; |
1504 | + ppll->lcd_pll_out_max = |
1505 | + le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100; |
1506 | + if (ppll->lcd_pll_out_max == 0) |
1507 | ppll->lcd_pll_out_max = ppll->pll_out_max; |
1508 | - } |
1509 | |
1510 | if (ppll->pll_out_min == 0) |
1511 | ppll->pll_out_min = 64800; |
1512 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c |
1513 | index 5a8fbadbd27b..29adbbe225c4 100644 |
1514 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c |
1515 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c |
1516 | @@ -10,6 +10,7 @@ |
1517 | #include <linux/slab.h> |
1518 | #include <linux/acpi.h> |
1519 | #include <linux/pci.h> |
1520 | +#include <linux/delay.h> |
1521 | |
1522 | #include "amdgpu_acpi.h" |
1523 | |
1524 | @@ -256,6 +257,10 @@ static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state) |
1525 | if (!info) |
1526 | return -EIO; |
1527 | kfree(info); |
1528 | + |
1529 | + /* 200ms delay is required after off */ |
1530 | + if (state == 0) |
1531 | + msleep(200); |
1532 | } |
1533 | return 0; |
1534 | } |
1535 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c |
1536 | index 7ef2c13921b4..930083336968 100644 |
1537 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c |
1538 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c |
1539 | @@ -1690,7 +1690,6 @@ amdgpu_connector_add(struct amdgpu_device *adev, |
1540 | DRM_MODE_SCALE_NONE); |
1541 | /* no HPD on analog connectors */ |
1542 | amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE; |
1543 | - connector->polled = DRM_CONNECTOR_POLL_CONNECT; |
1544 | connector->interlace_allowed = true; |
1545 | connector->doublescan_allowed = true; |
1546 | break; |
1547 | @@ -1893,8 +1892,10 @@ amdgpu_connector_add(struct amdgpu_device *adev, |
1548 | } |
1549 | |
1550 | if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) { |
1551 | - if (i2c_bus->valid) |
1552 | - connector->polled = DRM_CONNECTOR_POLL_CONNECT; |
1553 | + if (i2c_bus->valid) { |
1554 | + connector->polled = DRM_CONNECTOR_POLL_CONNECT | |
1555 | + DRM_CONNECTOR_POLL_DISCONNECT; |
1556 | + } |
1557 | } else |
1558 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
1559 | |
1560 | diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |
1561 | index c961fe093e12..16302f7d59f6 100644 |
1562 | --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |
1563 | +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c |
1564 | @@ -1793,7 +1793,23 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) |
1565 | } |
1566 | |
1567 | drm_kms_helper_poll_enable(dev); |
1568 | + |
1569 | + /* |
1570 | + * Most of the connector probing functions try to acquire runtime pm |
1571 | + * refs to ensure that the GPU is powered on when connector polling is |
1572 | + * performed. Since we're calling this from a runtime PM callback, |
1573 | + * trying to acquire rpm refs will cause us to deadlock. |
1574 | + * |
1575 | + * Since we're guaranteed to be holding the rpm lock, it's safe to |
1576 | + * temporarily disable the rpm helpers so this doesn't deadlock us. |
1577 | + */ |
1578 | +#ifdef CONFIG_PM |
1579 | + dev->dev->power.disable_depth++; |
1580 | +#endif |
1581 | drm_helper_hpd_irq_event(dev); |
1582 | +#ifdef CONFIG_PM |
1583 | + dev->dev->power.disable_depth--; |
1584 | +#endif |
1585 | |
1586 | if (fbcon) { |
1587 | amdgpu_fbdev_set_suspend(adev, 0); |
1588 | diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c |
1589 | index 1cd6de575305..542517d4e584 100644 |
1590 | --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c |
1591 | +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c |
1592 | @@ -98,6 +98,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode |
1593 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
1594 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
1595 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
1596 | + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: |
1597 | if (dig->backlight_level == 0) |
1598 | amdgpu_atombios_encoder_setup_dig_transmitter(encoder, |
1599 | ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); |
1600 | diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c |
1601 | index ea87033bfaf6..df17fababbd6 100644 |
1602 | --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c |
1603 | +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c |
1604 | @@ -167,6 +167,7 @@ static int gmc_v7_0_init_microcode(struct amdgpu_device *adev) |
1605 | break; |
1606 | case CHIP_KAVERI: |
1607 | case CHIP_KABINI: |
1608 | + case CHIP_MULLINS: |
1609 | return 0; |
1610 | default: BUG(); |
1611 | } |
1612 | diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c |
1613 | index 6743ff7dccfa..7f4a6c550319 100644 |
1614 | --- a/drivers/gpu/drm/drm_cache.c |
1615 | +++ b/drivers/gpu/drm/drm_cache.c |
1616 | @@ -136,6 +136,7 @@ drm_clflush_virt_range(void *addr, unsigned long length) |
1617 | mb(); |
1618 | for (; addr < end; addr += size) |
1619 | clflushopt(addr); |
1620 | + clflushopt(end - 1); /* force serialisation */ |
1621 | mb(); |
1622 | return; |
1623 | } |
1624 | diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c |
1625 | index d5d2c03fd136..8c9ac021608f 100644 |
1626 | --- a/drivers/gpu/drm/drm_edid.c |
1627 | +++ b/drivers/gpu/drm/drm_edid.c |
1628 | @@ -73,6 +73,8 @@ |
1629 | #define EDID_QUIRK_FORCE_8BPC (1 << 8) |
1630 | /* Force 12bpc */ |
1631 | #define EDID_QUIRK_FORCE_12BPC (1 << 9) |
1632 | +/* Force 6bpc */ |
1633 | +#define EDID_QUIRK_FORCE_6BPC (1 << 10) |
1634 | |
1635 | struct detailed_mode_closure { |
1636 | struct drm_connector *connector; |
1637 | @@ -99,6 +101,9 @@ static struct edid_quirk { |
1638 | /* Unknown Acer */ |
1639 | { "ACR", 2423, EDID_QUIRK_FIRST_DETAILED_PREFERRED }, |
1640 | |
1641 | + /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */ |
1642 | + { "AEO", 0, EDID_QUIRK_FORCE_6BPC }, |
1643 | + |
1644 | /* Belinea 10 15 55 */ |
1645 | { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 }, |
1646 | { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 }, |
1647 | @@ -3820,6 +3825,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) |
1648 | |
1649 | drm_add_display_info(edid, &connector->display_info, connector); |
1650 | |
1651 | + if (quirks & EDID_QUIRK_FORCE_6BPC) |
1652 | + connector->display_info.bpc = 6; |
1653 | + |
1654 | if (quirks & EDID_QUIRK_FORCE_8BPC) |
1655 | connector->display_info.bpc = 8; |
1656 | |
1657 | diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c |
1658 | index c41bc42b6fa7..3292495ee10f 100644 |
1659 | --- a/drivers/gpu/drm/i915/intel_display.c |
1660 | +++ b/drivers/gpu/drm/i915/intel_display.c |
1661 | @@ -11952,21 +11952,11 @@ connected_sink_compute_bpp(struct intel_connector *connector, |
1662 | pipe_config->pipe_bpp = connector->base.display_info.bpc*3; |
1663 | } |
1664 | |
1665 | - /* Clamp bpp to default limit on screens without EDID 1.4 */ |
1666 | - if (connector->base.display_info.bpc == 0) { |
1667 | - int type = connector->base.connector_type; |
1668 | - int clamp_bpp = 24; |
1669 | - |
1670 | - /* Fall back to 18 bpp when DP sink capability is unknown. */ |
1671 | - if (type == DRM_MODE_CONNECTOR_DisplayPort || |
1672 | - type == DRM_MODE_CONNECTOR_eDP) |
1673 | - clamp_bpp = 18; |
1674 | - |
1675 | - if (bpp > clamp_bpp) { |
1676 | - DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n", |
1677 | - bpp, clamp_bpp); |
1678 | - pipe_config->pipe_bpp = clamp_bpp; |
1679 | - } |
1680 | + /* Clamp bpp to 8 on screens without EDID 1.4 */ |
1681 | + if (connector->base.display_info.bpc == 0 && bpp > 24) { |
1682 | + DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of 24\n", |
1683 | + bpp); |
1684 | + pipe_config->pipe_bpp = 24; |
1685 | } |
1686 | } |
1687 | |
1688 | diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c |
1689 | index 6dc13c02c28e..e362a30776fa 100644 |
1690 | --- a/drivers/gpu/drm/i915/intel_opregion.c |
1691 | +++ b/drivers/gpu/drm/i915/intel_opregion.c |
1692 | @@ -682,7 +682,7 @@ static void intel_didl_outputs(struct drm_device *dev) |
1693 | } |
1694 | |
1695 | if (!acpi_video_bus) { |
1696 | - DRM_ERROR("No ACPI video bus found\n"); |
1697 | + DRM_DEBUG_KMS("No ACPI video bus found\n"); |
1698 | return; |
1699 | } |
1700 | |
1701 | diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c |
1702 | index eb434881ddbc..1e851e037c29 100644 |
1703 | --- a/drivers/gpu/drm/i915/intel_pm.c |
1704 | +++ b/drivers/gpu/drm/i915/intel_pm.c |
1705 | @@ -4526,7 +4526,8 @@ void gen6_rps_idle(struct drm_i915_private *dev_priv) |
1706 | else |
1707 | gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); |
1708 | dev_priv->rps.last_adj = 0; |
1709 | - I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); |
1710 | + I915_WRITE(GEN6_PMINTRMSK, |
1711 | + gen6_sanitize_rps_pm_mask(dev_priv, ~0)); |
1712 | } |
1713 | mutex_unlock(&dev_priv->rps.hw_lock); |
1714 | |
1715 | diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c |
1716 | index 1d3ee5179ab8..d236fc7c425b 100644 |
1717 | --- a/drivers/gpu/drm/nouveau/nouveau_drm.c |
1718 | +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c |
1719 | @@ -308,7 +308,16 @@ static int nouveau_drm_probe(struct pci_dev *pdev, |
1720 | bool boot = false; |
1721 | int ret; |
1722 | |
1723 | - /* remove conflicting drivers (vesafb, efifb etc) */ |
1724 | + /* We need to check that the chipset is supported before booting |
1725 | + * fbdev off the hardware, as there's no way to put it back. |
1726 | + */ |
1727 | + ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device); |
1728 | + if (ret) |
1729 | + return ret; |
1730 | + |
1731 | + nvkm_device_del(&device); |
1732 | + |
1733 | + /* Remove conflicting drivers (vesafb, efifb etc). */ |
1734 | aper = alloc_apertures(3); |
1735 | if (!aper) |
1736 | return -ENOMEM; |
1737 | diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c |
1738 | index 8f715feadf56..f90568327468 100644 |
1739 | --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c |
1740 | +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c |
1741 | @@ -107,11 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) |
1742 | ((image->dx + image->width) & 0xffff)); |
1743 | OUT_RING(chan, bg); |
1744 | OUT_RING(chan, fg); |
1745 | - OUT_RING(chan, (image->height << 16) | image->width); |
1746 | + OUT_RING(chan, (image->height << 16) | ALIGN(image->width, 8)); |
1747 | OUT_RING(chan, (image->height << 16) | image->width); |
1748 | OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); |
1749 | |
1750 | - dsize = ALIGN(image->width * image->height, 32) >> 5; |
1751 | + dsize = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5; |
1752 | while (dsize) { |
1753 | int iter_len = dsize > 128 ? 128 : dsize; |
1754 | |
1755 | diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c |
1756 | index a4e259a00430..c8e096533f60 100644 |
1757 | --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c |
1758 | +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c |
1759 | @@ -125,7 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) |
1760 | OUT_RING(chan, 0); |
1761 | OUT_RING(chan, image->dy); |
1762 | |
1763 | - dwords = ALIGN(image->width * image->height, 32) >> 5; |
1764 | + dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5; |
1765 | while (dwords) { |
1766 | int push = dwords > 2047 ? 2047 : dwords; |
1767 | |
1768 | diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c |
1769 | index f28315e865a5..22d32578dafd 100644 |
1770 | --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c |
1771 | +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c |
1772 | @@ -125,7 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) |
1773 | OUT_RING (chan, 0); |
1774 | OUT_RING (chan, image->dy); |
1775 | |
1776 | - dwords = ALIGN(image->width * image->height, 32) >> 5; |
1777 | + dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5; |
1778 | while (dwords) { |
1779 | int push = dwords > 2047 ? 2047 : dwords; |
1780 | |
1781 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c |
1782 | index 69de8c6259fe..f1e15a4d4f64 100644 |
1783 | --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c |
1784 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c |
1785 | @@ -76,8 +76,8 @@ nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch, |
1786 | nvkm_wo32(chan->inst, i, 0x00040004); |
1787 | for (i = 0x1f18; i <= 0x3088 ; i += 16) { |
1788 | nvkm_wo32(chan->inst, i + 0, 0x10700ff9); |
1789 | - nvkm_wo32(chan->inst, i + 1, 0x0436086c); |
1790 | - nvkm_wo32(chan->inst, i + 2, 0x000c001b); |
1791 | + nvkm_wo32(chan->inst, i + 4, 0x0436086c); |
1792 | + nvkm_wo32(chan->inst, i + 8, 0x000c001b); |
1793 | } |
1794 | for (i = 0x30b8; i < 0x30c8; i += 4) |
1795 | nvkm_wo32(chan->inst, i, 0x0000ffff); |
1796 | diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c |
1797 | index 2207dac23981..300f5ed5de0b 100644 |
1798 | --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c |
1799 | +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c |
1800 | @@ -75,8 +75,8 @@ nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch, |
1801 | nvkm_wo32(chan->inst, i, 0x00040004); |
1802 | for (i = 0x15ac; i <= 0x271c ; i += 16) { |
1803 | nvkm_wo32(chan->inst, i + 0, 0x10700ff9); |
1804 | - nvkm_wo32(chan->inst, i + 1, 0x0436086c); |
1805 | - nvkm_wo32(chan->inst, i + 2, 0x000c001b); |
1806 | + nvkm_wo32(chan->inst, i + 4, 0x0436086c); |
1807 | + nvkm_wo32(chan->inst, i + 8, 0x000c001b); |
1808 | } |
1809 | for (i = 0x274c; i < 0x275c; i += 4) |
1810 | nvkm_wo32(chan->inst, i, 0x0000ffff); |
1811 | diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c |
1812 | index 0b04b9282f56..d4ac8c837314 100644 |
1813 | --- a/drivers/gpu/drm/radeon/atombios_encoders.c |
1814 | +++ b/drivers/gpu/drm/radeon/atombios_encoders.c |
1815 | @@ -120,6 +120,7 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level) |
1816 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
1817 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
1818 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
1819 | + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: |
1820 | if (dig->backlight_level == 0) |
1821 | atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); |
1822 | else { |
1823 | diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c |
1824 | index de9a2ffcf5f7..0c5b3eeff82d 100644 |
1825 | --- a/drivers/gpu/drm/radeon/radeon_atombios.c |
1826 | +++ b/drivers/gpu/drm/radeon/radeon_atombios.c |
1827 | @@ -1155,7 +1155,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) |
1828 | le16_to_cpu(firmware_info->info.usReferenceClock); |
1829 | p1pll->reference_div = 0; |
1830 | |
1831 | - if (crev < 2) |
1832 | + if ((frev < 2) && (crev < 2)) |
1833 | p1pll->pll_out_min = |
1834 | le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output); |
1835 | else |
1836 | @@ -1164,7 +1164,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) |
1837 | p1pll->pll_out_max = |
1838 | le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); |
1839 | |
1840 | - if (crev >= 4) { |
1841 | + if (((frev < 2) && (crev >= 4)) || (frev >= 2)) { |
1842 | p1pll->lcd_pll_out_min = |
1843 | le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; |
1844 | if (p1pll->lcd_pll_out_min == 0) |
1845 | diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c |
1846 | index c4b4f298a283..69ce95571136 100644 |
1847 | --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c |
1848 | +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c |
1849 | @@ -10,6 +10,7 @@ |
1850 | #include <linux/slab.h> |
1851 | #include <linux/acpi.h> |
1852 | #include <linux/pci.h> |
1853 | +#include <linux/delay.h> |
1854 | |
1855 | #include "radeon_acpi.h" |
1856 | |
1857 | @@ -255,6 +256,10 @@ static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state) |
1858 | if (!info) |
1859 | return -EIO; |
1860 | kfree(info); |
1861 | + |
1862 | + /* 200ms delay is required after off */ |
1863 | + if (state == 0) |
1864 | + msleep(200); |
1865 | } |
1866 | return 0; |
1867 | } |
1868 | diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c |
1869 | index 9cfc1c3e1965..30f00748ed37 100644 |
1870 | --- a/drivers/gpu/drm/radeon/radeon_connectors.c |
1871 | +++ b/drivers/gpu/drm/radeon/radeon_connectors.c |
1872 | @@ -2058,7 +2058,6 @@ radeon_add_atom_connector(struct drm_device *dev, |
1873 | RADEON_OUTPUT_CSC_BYPASS); |
1874 | /* no HPD on analog connectors */ |
1875 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; |
1876 | - connector->polled = DRM_CONNECTOR_POLL_CONNECT; |
1877 | connector->interlace_allowed = true; |
1878 | connector->doublescan_allowed = true; |
1879 | break; |
1880 | @@ -2308,8 +2307,10 @@ radeon_add_atom_connector(struct drm_device *dev, |
1881 | } |
1882 | |
1883 | if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { |
1884 | - if (i2c_bus->valid) |
1885 | - connector->polled = DRM_CONNECTOR_POLL_CONNECT; |
1886 | + if (i2c_bus->valid) { |
1887 | + connector->polled = DRM_CONNECTOR_POLL_CONNECT | |
1888 | + DRM_CONNECTOR_POLL_DISCONNECT; |
1889 | + } |
1890 | } else |
1891 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
1892 | |
1893 | @@ -2385,7 +2386,6 @@ radeon_add_legacy_connector(struct drm_device *dev, |
1894 | 1); |
1895 | /* no HPD on analog connectors */ |
1896 | radeon_connector->hpd.hpd = RADEON_HPD_NONE; |
1897 | - connector->polled = DRM_CONNECTOR_POLL_CONNECT; |
1898 | connector->interlace_allowed = true; |
1899 | connector->doublescan_allowed = true; |
1900 | break; |
1901 | @@ -2470,10 +2470,13 @@ radeon_add_legacy_connector(struct drm_device *dev, |
1902 | } |
1903 | |
1904 | if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { |
1905 | - if (i2c_bus->valid) |
1906 | - connector->polled = DRM_CONNECTOR_POLL_CONNECT; |
1907 | + if (i2c_bus->valid) { |
1908 | + connector->polled = DRM_CONNECTOR_POLL_CONNECT | |
1909 | + DRM_CONNECTOR_POLL_DISCONNECT; |
1910 | + } |
1911 | } else |
1912 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
1913 | + |
1914 | connector->display_info.subpixel_order = subpixel_order; |
1915 | drm_connector_register(connector); |
1916 | } |
1917 | diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c |
1918 | index e094c572b86e..1a2032c2c1fb 100644 |
1919 | --- a/drivers/hid/uhid.c |
1920 | +++ b/drivers/hid/uhid.c |
1921 | @@ -51,10 +51,26 @@ struct uhid_device { |
1922 | u32 report_id; |
1923 | u32 report_type; |
1924 | struct uhid_event report_buf; |
1925 | + struct work_struct worker; |
1926 | }; |
1927 | |
1928 | static struct miscdevice uhid_misc; |
1929 | |
1930 | +static void uhid_device_add_worker(struct work_struct *work) |
1931 | +{ |
1932 | + struct uhid_device *uhid = container_of(work, struct uhid_device, worker); |
1933 | + int ret; |
1934 | + |
1935 | + ret = hid_add_device(uhid->hid); |
1936 | + if (ret) { |
1937 | + hid_err(uhid->hid, "Cannot register HID device: error %d\n", ret); |
1938 | + |
1939 | + hid_destroy_device(uhid->hid); |
1940 | + uhid->hid = NULL; |
1941 | + uhid->running = false; |
1942 | + } |
1943 | +} |
1944 | + |
1945 | static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev) |
1946 | { |
1947 | __u8 newhead; |
1948 | @@ -498,18 +514,14 @@ static int uhid_dev_create2(struct uhid_device *uhid, |
1949 | uhid->hid = hid; |
1950 | uhid->running = true; |
1951 | |
1952 | - ret = hid_add_device(hid); |
1953 | - if (ret) { |
1954 | - hid_err(hid, "Cannot register HID device\n"); |
1955 | - goto err_hid; |
1956 | - } |
1957 | + /* Adding of a HID device is done through a worker, to allow HID drivers |
1958 | + * which use feature requests during .probe to work, without they would |
1959 | + * be blocked on devlock, which is held by uhid_char_write. |
1960 | + */ |
1961 | + schedule_work(&uhid->worker); |
1962 | |
1963 | return 0; |
1964 | |
1965 | -err_hid: |
1966 | - hid_destroy_device(hid); |
1967 | - uhid->hid = NULL; |
1968 | - uhid->running = false; |
1969 | err_free: |
1970 | kfree(uhid->rd_data); |
1971 | uhid->rd_data = NULL; |
1972 | @@ -550,6 +562,8 @@ static int uhid_dev_destroy(struct uhid_device *uhid) |
1973 | uhid->running = false; |
1974 | wake_up_interruptible(&uhid->report_wait); |
1975 | |
1976 | + cancel_work_sync(&uhid->worker); |
1977 | + |
1978 | hid_destroy_device(uhid->hid); |
1979 | kfree(uhid->rd_data); |
1980 | |
1981 | @@ -612,6 +626,7 @@ static int uhid_char_open(struct inode *inode, struct file *file) |
1982 | init_waitqueue_head(&uhid->waitq); |
1983 | init_waitqueue_head(&uhid->report_wait); |
1984 | uhid->running = false; |
1985 | + INIT_WORK(&uhid->worker, uhid_device_add_worker); |
1986 | |
1987 | file->private_data = uhid; |
1988 | nonseekable_open(inode, file); |
1989 | diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c |
1990 | index f19b6f7a467a..9b5440f6b3b4 100644 |
1991 | --- a/drivers/hv/vmbus_drv.c |
1992 | +++ b/drivers/hv/vmbus_drv.c |
1993 | @@ -41,6 +41,7 @@ |
1994 | #include <linux/ptrace.h> |
1995 | #include <linux/screen_info.h> |
1996 | #include <linux/kdebug.h> |
1997 | +#include <linux/random.h> |
1998 | #include "hyperv_vmbus.h" |
1999 | |
2000 | static struct acpi_device *hv_acpi_dev; |
2001 | @@ -826,6 +827,8 @@ static void vmbus_isr(void) |
2002 | else |
2003 | tasklet_schedule(&msg_dpc); |
2004 | } |
2005 | + |
2006 | + add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0); |
2007 | } |
2008 | |
2009 | |
2010 | diff --git a/drivers/i2c/busses/i2c-efm32.c b/drivers/i2c/busses/i2c-efm32.c |
2011 | index 8eff62738877..e253598d764c 100644 |
2012 | --- a/drivers/i2c/busses/i2c-efm32.c |
2013 | +++ b/drivers/i2c/busses/i2c-efm32.c |
2014 | @@ -433,7 +433,7 @@ static int efm32_i2c_probe(struct platform_device *pdev) |
2015 | ret = request_irq(ddata->irq, efm32_i2c_irq, 0, DRIVER_NAME, ddata); |
2016 | if (ret < 0) { |
2017 | dev_err(&pdev->dev, "failed to request irq (%d)\n", ret); |
2018 | - return ret; |
2019 | + goto err_disable_clk; |
2020 | } |
2021 | |
2022 | ret = i2c_add_adapter(&ddata->adapter); |
2023 | diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c |
2024 | index 5fb089e91353..fb43a242847b 100644 |
2025 | --- a/drivers/infiniband/core/iwpm_util.c |
2026 | +++ b/drivers/infiniband/core/iwpm_util.c |
2027 | @@ -634,6 +634,7 @@ static int send_nlmsg_done(struct sk_buff *skb, u8 nl_client, int iwpm_pid) |
2028 | if (!(ibnl_put_msg(skb, &nlh, 0, 0, nl_client, |
2029 | RDMA_NL_IWPM_MAPINFO, NLM_F_MULTI))) { |
2030 | pr_warn("%s Unable to put NLMSG_DONE\n", __func__); |
2031 | + dev_kfree_skb(skb); |
2032 | return -ENOMEM; |
2033 | } |
2034 | nlh->nlmsg_type = NLMSG_DONE; |
2035 | diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c |
2036 | index a95a32ba596e..d3b7ecd106f7 100644 |
2037 | --- a/drivers/infiniband/core/sa_query.c |
2038 | +++ b/drivers/infiniband/core/sa_query.c |
2039 | @@ -534,7 +534,7 @@ static int ib_nl_send_msg(struct ib_sa_query *query, gfp_t gfp_mask) |
2040 | data = ibnl_put_msg(skb, &nlh, query->seq, 0, RDMA_NL_LS, |
2041 | RDMA_NL_LS_OP_RESOLVE, NLM_F_REQUEST); |
2042 | if (!data) { |
2043 | - kfree_skb(skb); |
2044 | + nlmsg_free(skb); |
2045 | return -EMSGSIZE; |
2046 | } |
2047 | |
2048 | diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c |
2049 | index 870e56b6b25f..05179f47bbde 100644 |
2050 | --- a/drivers/infiniband/hw/mlx4/mad.c |
2051 | +++ b/drivers/infiniband/hw/mlx4/mad.c |
2052 | @@ -526,7 +526,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, |
2053 | tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1); |
2054 | spin_unlock(&tun_qp->tx_lock); |
2055 | if (ret) |
2056 | - goto out; |
2057 | + goto end; |
2058 | |
2059 | tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr); |
2060 | if (tun_qp->tx_ring[tun_tx_ix].ah) |
2061 | @@ -595,9 +595,15 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port, |
2062 | wr.wr.send_flags = IB_SEND_SIGNALED; |
2063 | |
2064 | ret = ib_post_send(src_qp, &wr.wr, &bad_wr); |
2065 | -out: |
2066 | - if (ret) |
2067 | - ib_destroy_ah(ah); |
2068 | + if (!ret) |
2069 | + return 0; |
2070 | + out: |
2071 | + spin_lock(&tun_qp->tx_lock); |
2072 | + tun_qp->tx_ix_tail++; |
2073 | + spin_unlock(&tun_qp->tx_lock); |
2074 | + tun_qp->tx_ring[tun_tx_ix].ah = NULL; |
2075 | +end: |
2076 | + ib_destroy_ah(ah); |
2077 | return ret; |
2078 | } |
2079 | |
2080 | @@ -1278,9 +1284,15 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, |
2081 | |
2082 | |
2083 | ret = ib_post_send(send_qp, &wr.wr, &bad_wr); |
2084 | + if (!ret) |
2085 | + return 0; |
2086 | + |
2087 | + spin_lock(&sqp->tx_lock); |
2088 | + sqp->tx_ix_tail++; |
2089 | + spin_unlock(&sqp->tx_lock); |
2090 | + sqp->tx_ring[wire_tx_ix].ah = NULL; |
2091 | out: |
2092 | - if (ret) |
2093 | - ib_destroy_ah(ah); |
2094 | + ib_destroy_ah(ah); |
2095 | return ret; |
2096 | } |
2097 | |
2098 | diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c |
2099 | index 13eaaf45288f..ea1e2ddaddf5 100644 |
2100 | --- a/drivers/infiniband/hw/mlx4/qp.c |
2101 | +++ b/drivers/infiniband/hw/mlx4/qp.c |
2102 | @@ -357,7 +357,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags) |
2103 | sizeof (struct mlx4_wqe_raddr_seg); |
2104 | case MLX4_IB_QPT_RC: |
2105 | return sizeof (struct mlx4_wqe_ctrl_seg) + |
2106 | - sizeof (struct mlx4_wqe_atomic_seg) + |
2107 | + sizeof (struct mlx4_wqe_masked_atomic_seg) + |
2108 | sizeof (struct mlx4_wqe_raddr_seg); |
2109 | case MLX4_IB_QPT_SMI: |
2110 | case MLX4_IB_QPT_GSI: |
2111 | @@ -1162,8 +1162,10 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, |
2112 | { |
2113 | err = create_qp_common(to_mdev(pd->device), pd, init_attr, |
2114 | udata, 0, &qp, gfp); |
2115 | - if (err) |
2116 | + if (err) { |
2117 | + kfree(qp); |
2118 | return ERR_PTR(err); |
2119 | + } |
2120 | |
2121 | qp->ibqp.qp_num = qp->mqp.qpn; |
2122 | qp->xrcdn = xrcdn; |
2123 | diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c |
2124 | index 92ddae101ecc..8184267c7901 100644 |
2125 | --- a/drivers/infiniband/hw/mlx5/cq.c |
2126 | +++ b/drivers/infiniband/hw/mlx5/cq.c |
2127 | @@ -763,7 +763,8 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, |
2128 | if (attr->flags) |
2129 | return ERR_PTR(-EINVAL); |
2130 | |
2131 | - if (entries < 0) |
2132 | + if (entries < 0 || |
2133 | + (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)))) |
2134 | return ERR_PTR(-EINVAL); |
2135 | |
2136 | entries = roundup_pow_of_two(entries + 1); |
2137 | @@ -1094,11 +1095,16 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) |
2138 | return -ENOSYS; |
2139 | } |
2140 | |
2141 | - if (entries < 1) |
2142 | + if (entries < 1 || |
2143 | + entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz))) { |
2144 | + mlx5_ib_warn(dev, "wrong entries number %d, max %d\n", |
2145 | + entries, |
2146 | + 1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)); |
2147 | return -EINVAL; |
2148 | + } |
2149 | |
2150 | entries = roundup_pow_of_two(entries + 1); |
2151 | - if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) |
2152 | + if (entries > (1 << MLX5_CAP_GEN(dev->mdev, log_max_cq_sz)) + 1) |
2153 | return -EINVAL; |
2154 | |
2155 | if (entries == ibcq->cqe + 1) |
2156 | diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c |
2157 | index fd17443aeacd..bfc940ff9c8a 100644 |
2158 | --- a/drivers/infiniband/hw/mlx5/main.c |
2159 | +++ b/drivers/infiniband/hw/mlx5/main.c |
2160 | @@ -962,14 +962,11 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, |
2161 | break; |
2162 | |
2163 | case MLX5_DEV_EVENT_PORT_DOWN: |
2164 | + case MLX5_DEV_EVENT_PORT_INITIALIZED: |
2165 | ibev.event = IB_EVENT_PORT_ERR; |
2166 | port = (u8)param; |
2167 | break; |
2168 | |
2169 | - case MLX5_DEV_EVENT_PORT_INITIALIZED: |
2170 | - /* not used by ULPs */ |
2171 | - return; |
2172 | - |
2173 | case MLX5_DEV_EVENT_LID_CHANGE: |
2174 | ibev.event = IB_EVENT_LID_CHANGE; |
2175 | port = (u8)param; |
2176 | diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c |
2177 | index 307bdbca8938..cfcfbb6b84d7 100644 |
2178 | --- a/drivers/infiniband/hw/mlx5/qp.c |
2179 | +++ b/drivers/infiniband/hw/mlx5/qp.c |
2180 | @@ -226,6 +226,8 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap, |
2181 | qp->rq.max_gs = 0; |
2182 | qp->rq.wqe_cnt = 0; |
2183 | qp->rq.wqe_shift = 0; |
2184 | + cap->max_recv_wr = 0; |
2185 | + cap->max_recv_sge = 0; |
2186 | } else { |
2187 | if (ucmd) { |
2188 | qp->rq.wqe_cnt = ucmd->rq_wqe_count; |
2189 | @@ -2525,10 +2527,11 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr) |
2190 | return MLX5_FENCE_MODE_SMALL_AND_FENCE; |
2191 | else |
2192 | return fence; |
2193 | - |
2194 | - } else { |
2195 | - return 0; |
2196 | + } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) { |
2197 | + return MLX5_FENCE_MODE_FENCE; |
2198 | } |
2199 | + |
2200 | + return 0; |
2201 | } |
2202 | |
2203 | static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, |
2204 | @@ -3092,17 +3095,19 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr |
2205 | qp_attr->cap.max_recv_sge = qp->rq.max_gs; |
2206 | |
2207 | if (!ibqp->uobject) { |
2208 | - qp_attr->cap.max_send_wr = qp->sq.wqe_cnt; |
2209 | + qp_attr->cap.max_send_wr = qp->sq.max_post; |
2210 | qp_attr->cap.max_send_sge = qp->sq.max_gs; |
2211 | + qp_init_attr->qp_context = ibqp->qp_context; |
2212 | } else { |
2213 | qp_attr->cap.max_send_wr = 0; |
2214 | qp_attr->cap.max_send_sge = 0; |
2215 | } |
2216 | |
2217 | - /* We don't support inline sends for kernel QPs (yet), and we |
2218 | - * don't know what userspace's value should be. |
2219 | - */ |
2220 | - qp_attr->cap.max_inline_data = 0; |
2221 | + qp_init_attr->qp_type = ibqp->qp_type; |
2222 | + qp_init_attr->recv_cq = ibqp->recv_cq; |
2223 | + qp_init_attr->send_cq = ibqp->send_cq; |
2224 | + qp_init_attr->srq = ibqp->srq; |
2225 | + qp_attr->cap.max_inline_data = qp->max_inline_data; |
2226 | |
2227 | qp_init_attr->cap = qp_attr->cap; |
2228 | |
2229 | diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c |
2230 | index 7d3281866ffc..942dffca6a9d 100644 |
2231 | --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c |
2232 | +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c |
2233 | @@ -1131,7 +1131,9 @@ struct ipoib_neigh *ipoib_neigh_get(struct net_device *dev, u8 *daddr) |
2234 | neigh = NULL; |
2235 | goto out_unlock; |
2236 | } |
2237 | - neigh->alive = jiffies; |
2238 | + |
2239 | + if (likely(skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE)) |
2240 | + neigh->alive = jiffies; |
2241 | goto out_unlock; |
2242 | } |
2243 | } |
2244 | diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c |
2245 | index 2f589857a039..d15b33813021 100644 |
2246 | --- a/drivers/input/mouse/elan_i2c_core.c |
2247 | +++ b/drivers/input/mouse/elan_i2c_core.c |
2248 | @@ -4,7 +4,8 @@ |
2249 | * Copyright (c) 2013 ELAN Microelectronics Corp. |
2250 | * |
2251 | * Author: æž—æ”¿ç¶ (Duson Lin) <dusonlin@emc.com.tw> |
2252 | - * Version: 1.6.0 |
2253 | + * Author: KT Liao <kt.liao@emc.com.tw> |
2254 | + * Version: 1.6.2 |
2255 | * |
2256 | * Based on cyapa driver: |
2257 | * copyright (c) 2011-2012 Cypress Semiconductor, Inc. |
2258 | @@ -40,7 +41,7 @@ |
2259 | #include "elan_i2c.h" |
2260 | |
2261 | #define DRIVER_NAME "elan_i2c" |
2262 | -#define ELAN_DRIVER_VERSION "1.6.1" |
2263 | +#define ELAN_DRIVER_VERSION "1.6.2" |
2264 | #define ELAN_VENDOR_ID 0x04f3 |
2265 | #define ETP_MAX_PRESSURE 255 |
2266 | #define ETP_FWIDTH_REDUCE 90 |
2267 | @@ -199,9 +200,41 @@ static int elan_sleep(struct elan_tp_data *data) |
2268 | return error; |
2269 | } |
2270 | |
2271 | +static int elan_query_product(struct elan_tp_data *data) |
2272 | +{ |
2273 | + int error; |
2274 | + |
2275 | + error = data->ops->get_product_id(data->client, &data->product_id); |
2276 | + if (error) |
2277 | + return error; |
2278 | + |
2279 | + error = data->ops->get_sm_version(data->client, &data->ic_type, |
2280 | + &data->sm_version); |
2281 | + if (error) |
2282 | + return error; |
2283 | + |
2284 | + return 0; |
2285 | +} |
2286 | + |
2287 | +static int elan_check_ASUS_special_fw(struct elan_tp_data *data) |
2288 | +{ |
2289 | + if (data->ic_type != 0x0E) |
2290 | + return false; |
2291 | + |
2292 | + switch (data->product_id) { |
2293 | + case 0x05 ... 0x07: |
2294 | + case 0x09: |
2295 | + case 0x13: |
2296 | + return true; |
2297 | + default: |
2298 | + return false; |
2299 | + } |
2300 | +} |
2301 | + |
2302 | static int __elan_initialize(struct elan_tp_data *data) |
2303 | { |
2304 | struct i2c_client *client = data->client; |
2305 | + bool woken_up = false; |
2306 | int error; |
2307 | |
2308 | error = data->ops->initialize(client); |
2309 | @@ -210,6 +243,27 @@ static int __elan_initialize(struct elan_tp_data *data) |
2310 | return error; |
2311 | } |
2312 | |
2313 | + error = elan_query_product(data); |
2314 | + if (error) |
2315 | + return error; |
2316 | + |
2317 | + /* |
2318 | + * Some ASUS devices were shipped with firmware that requires |
2319 | + * touchpads to be woken up first, before attempting to switch |
2320 | + * them into absolute reporting mode. |
2321 | + */ |
2322 | + if (elan_check_ASUS_special_fw(data)) { |
2323 | + error = data->ops->sleep_control(client, false); |
2324 | + if (error) { |
2325 | + dev_err(&client->dev, |
2326 | + "failed to wake device up: %d\n", error); |
2327 | + return error; |
2328 | + } |
2329 | + |
2330 | + msleep(200); |
2331 | + woken_up = true; |
2332 | + } |
2333 | + |
2334 | data->mode |= ETP_ENABLE_ABS; |
2335 | error = data->ops->set_mode(client, data->mode); |
2336 | if (error) { |
2337 | @@ -218,11 +272,13 @@ static int __elan_initialize(struct elan_tp_data *data) |
2338 | return error; |
2339 | } |
2340 | |
2341 | - error = data->ops->sleep_control(client, false); |
2342 | - if (error) { |
2343 | - dev_err(&client->dev, |
2344 | - "failed to wake device up: %d\n", error); |
2345 | - return error; |
2346 | + if (!woken_up) { |
2347 | + error = data->ops->sleep_control(client, false); |
2348 | + if (error) { |
2349 | + dev_err(&client->dev, |
2350 | + "failed to wake device up: %d\n", error); |
2351 | + return error; |
2352 | + } |
2353 | } |
2354 | |
2355 | return 0; |
2356 | @@ -248,10 +304,6 @@ static int elan_query_device_info(struct elan_tp_data *data) |
2357 | { |
2358 | int error; |
2359 | |
2360 | - error = data->ops->get_product_id(data->client, &data->product_id); |
2361 | - if (error) |
2362 | - return error; |
2363 | - |
2364 | error = data->ops->get_version(data->client, false, &data->fw_version); |
2365 | if (error) |
2366 | return error; |
2367 | @@ -261,11 +313,6 @@ static int elan_query_device_info(struct elan_tp_data *data) |
2368 | if (error) |
2369 | return error; |
2370 | |
2371 | - error = data->ops->get_sm_version(data->client, &data->ic_type, |
2372 | - &data->sm_version); |
2373 | - if (error) |
2374 | - return error; |
2375 | - |
2376 | error = data->ops->get_version(data->client, true, &data->iap_version); |
2377 | if (error) |
2378 | return error; |
2379 | diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c |
2380 | index d214f22ed305..45b466e3bbe8 100644 |
2381 | --- a/drivers/input/touchscreen/sur40.c |
2382 | +++ b/drivers/input/touchscreen/sur40.c |
2383 | @@ -126,7 +126,7 @@ struct sur40_image_header { |
2384 | #define VIDEO_PACKET_SIZE 16384 |
2385 | |
2386 | /* polling interval (ms) */ |
2387 | -#define POLL_INTERVAL 4 |
2388 | +#define POLL_INTERVAL 1 |
2389 | |
2390 | /* maximum number of contacts FIXME: this is a guess? */ |
2391 | #define MAX_CONTACTS 64 |
2392 | @@ -441,7 +441,7 @@ static void sur40_process_video(struct sur40_state *sur40) |
2393 | |
2394 | /* return error if streaming was stopped in the meantime */ |
2395 | if (sur40->sequence == -1) |
2396 | - goto err_poll; |
2397 | + return; |
2398 | |
2399 | /* mark as finished */ |
2400 | v4l2_get_timestamp(&new_buf->vb.timestamp); |
2401 | @@ -730,6 +730,7 @@ static int sur40_start_streaming(struct vb2_queue *vq, unsigned int count) |
2402 | static void sur40_stop_streaming(struct vb2_queue *vq) |
2403 | { |
2404 | struct sur40_state *sur40 = vb2_get_drv_priv(vq); |
2405 | + vb2_wait_for_all_buffers(vq); |
2406 | sur40->sequence = -1; |
2407 | |
2408 | /* Release all active buffers */ |
2409 | diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c |
2410 | index b9319b76a8a1..0397985a2601 100644 |
2411 | --- a/drivers/iommu/amd_iommu.c |
2412 | +++ b/drivers/iommu/amd_iommu.c |
2413 | @@ -352,9 +352,11 @@ static void init_iommu_group(struct device *dev) |
2414 | if (!domain) |
2415 | goto out; |
2416 | |
2417 | - dma_domain = to_pdomain(domain)->priv; |
2418 | + if (to_pdomain(domain)->flags == PD_DMA_OPS_MASK) { |
2419 | + dma_domain = to_pdomain(domain)->priv; |
2420 | + init_unity_mappings_for_device(dev, dma_domain); |
2421 | + } |
2422 | |
2423 | - init_unity_mappings_for_device(dev, dma_domain); |
2424 | out: |
2425 | iommu_group_put(group); |
2426 | } |
2427 | @@ -2322,8 +2324,15 @@ static void update_device_table(struct protection_domain *domain) |
2428 | { |
2429 | struct iommu_dev_data *dev_data; |
2430 | |
2431 | - list_for_each_entry(dev_data, &domain->dev_list, list) |
2432 | + list_for_each_entry(dev_data, &domain->dev_list, list) { |
2433 | set_dte_entry(dev_data->devid, domain, dev_data->ats.enabled); |
2434 | + |
2435 | + if (dev_data->devid == dev_data->alias) |
2436 | + continue; |
2437 | + |
2438 | + /* There is an alias, update device table entry for it */ |
2439 | + set_dte_entry(dev_data->alias, domain, dev_data->ats.enabled); |
2440 | + } |
2441 | } |
2442 | |
2443 | static void update_domain(struct protection_domain *domain) |
2444 | @@ -2970,9 +2979,7 @@ static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) |
2445 | static void amd_iommu_domain_free(struct iommu_domain *dom) |
2446 | { |
2447 | struct protection_domain *domain; |
2448 | - |
2449 | - if (!dom) |
2450 | - return; |
2451 | + struct dma_ops_domain *dma_dom; |
2452 | |
2453 | domain = to_pdomain(dom); |
2454 | |
2455 | @@ -2981,13 +2988,24 @@ static void amd_iommu_domain_free(struct iommu_domain *dom) |
2456 | |
2457 | BUG_ON(domain->dev_cnt != 0); |
2458 | |
2459 | - if (domain->mode != PAGE_MODE_NONE) |
2460 | - free_pagetable(domain); |
2461 | + if (!dom) |
2462 | + return; |
2463 | + |
2464 | + switch (dom->type) { |
2465 | + case IOMMU_DOMAIN_DMA: |
2466 | + dma_dom = domain->priv; |
2467 | + dma_ops_domain_free(dma_dom); |
2468 | + break; |
2469 | + default: |
2470 | + if (domain->mode != PAGE_MODE_NONE) |
2471 | + free_pagetable(domain); |
2472 | |
2473 | - if (domain->flags & PD_IOMMUV2_MASK) |
2474 | - free_gcr3_table(domain); |
2475 | + if (domain->flags & PD_IOMMUV2_MASK) |
2476 | + free_gcr3_table(domain); |
2477 | |
2478 | - protection_domain_free(domain); |
2479 | + protection_domain_free(domain); |
2480 | + break; |
2481 | + } |
2482 | } |
2483 | |
2484 | static void amd_iommu_detach_device(struct iommu_domain *dom, |
2485 | diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c |
2486 | index 97c41b8ab5d9..29a31eb9ace3 100644 |
2487 | --- a/drivers/iommu/exynos-iommu.c |
2488 | +++ b/drivers/iommu/exynos-iommu.c |
2489 | @@ -647,6 +647,7 @@ static struct platform_driver exynos_sysmmu_driver __refdata = { |
2490 | .name = "exynos-sysmmu", |
2491 | .of_match_table = sysmmu_of_match, |
2492 | .pm = &sysmmu_pm_ops, |
2493 | + .suppress_bind_attrs = true, |
2494 | } |
2495 | }; |
2496 | |
2497 | diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c |
2498 | index 6763a4dfed94..24d81308a1a6 100644 |
2499 | --- a/drivers/iommu/intel-iommu.c |
2500 | +++ b/drivers/iommu/intel-iommu.c |
2501 | @@ -2032,7 +2032,7 @@ out_unlock: |
2502 | spin_unlock(&iommu->lock); |
2503 | spin_unlock_irqrestore(&device_domain_lock, flags); |
2504 | |
2505 | - return 0; |
2506 | + return ret; |
2507 | } |
2508 | |
2509 | struct domain_context_mapping_data { |
2510 | diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c |
2511 | index 09e2afcafd2d..cd0a93df4cb7 100644 |
2512 | --- a/drivers/md/dm-flakey.c |
2513 | +++ b/drivers/md/dm-flakey.c |
2514 | @@ -289,10 +289,16 @@ static int flakey_map(struct dm_target *ti, struct bio *bio) |
2515 | pb->bio_submitted = true; |
2516 | |
2517 | /* |
2518 | - * Map reads as normal. |
2519 | + * Map reads as normal only if corrupt_bio_byte set. |
2520 | */ |
2521 | - if (bio_data_dir(bio) == READ) |
2522 | - goto map_bio; |
2523 | + if (bio_data_dir(bio) == READ) { |
2524 | + /* If flags were specified, only corrupt those that match. */ |
2525 | + if (fc->corrupt_bio_byte && (fc->corrupt_bio_rw == READ) && |
2526 | + all_corrupt_bio_flags_match(bio, fc)) |
2527 | + goto map_bio; |
2528 | + else |
2529 | + return -EIO; |
2530 | + } |
2531 | |
2532 | /* |
2533 | * Drop writes? |
2534 | @@ -330,12 +336,13 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio, int error) |
2535 | |
2536 | /* |
2537 | * Corrupt successful READs while in down state. |
2538 | - * If flags were specified, only corrupt those that match. |
2539 | */ |
2540 | - if (fc->corrupt_bio_byte && !error && pb->bio_submitted && |
2541 | - (bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) && |
2542 | - all_corrupt_bio_flags_match(bio, fc)) |
2543 | - corrupt_bio_data(bio, fc); |
2544 | + if (!error && pb->bio_submitted && (bio_data_dir(bio) == READ)) { |
2545 | + if (fc->corrupt_bio_byte) |
2546 | + corrupt_bio_data(bio, fc); |
2547 | + else |
2548 | + return -EIO; |
2549 | + } |
2550 | |
2551 | return error; |
2552 | } |
2553 | diff --git a/drivers/md/dm.c b/drivers/md/dm.c |
2554 | index c338aebb4ccd..a42729ebf272 100644 |
2555 | --- a/drivers/md/dm.c |
2556 | +++ b/drivers/md/dm.c |
2557 | @@ -3078,7 +3078,8 @@ static void unlock_fs(struct mapped_device *md) |
2558 | * Caller must hold md->suspend_lock |
2559 | */ |
2560 | static int __dm_suspend(struct mapped_device *md, struct dm_table *map, |
2561 | - unsigned suspend_flags, int interruptible) |
2562 | + unsigned suspend_flags, int interruptible, |
2563 | + int dmf_suspended_flag) |
2564 | { |
2565 | bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG; |
2566 | bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG; |
2567 | @@ -3145,6 +3146,8 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, |
2568 | * to finish. |
2569 | */ |
2570 | r = dm_wait_for_completion(md, interruptible); |
2571 | + if (!r) |
2572 | + set_bit(dmf_suspended_flag, &md->flags); |
2573 | |
2574 | if (noflush) |
2575 | clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); |
2576 | @@ -3206,12 +3209,10 @@ retry: |
2577 | |
2578 | map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); |
2579 | |
2580 | - r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE); |
2581 | + r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); |
2582 | if (r) |
2583 | goto out_unlock; |
2584 | |
2585 | - set_bit(DMF_SUSPENDED, &md->flags); |
2586 | - |
2587 | dm_table_postsuspend_targets(map); |
2588 | |
2589 | out_unlock: |
2590 | @@ -3305,9 +3306,8 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla |
2591 | * would require changing .presuspend to return an error -- avoid this |
2592 | * until there is a need for more elaborate variants of internal suspend. |
2593 | */ |
2594 | - (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE); |
2595 | - |
2596 | - set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); |
2597 | + (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, |
2598 | + DMF_SUSPENDED_INTERNALLY); |
2599 | |
2600 | dm_table_postsuspend_targets(map); |
2601 | } |
2602 | diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c |
2603 | index 1100e98a7b1d..7df7fb3738a0 100644 |
2604 | --- a/drivers/media/dvb-core/dvb_ringbuffer.c |
2605 | +++ b/drivers/media/dvb-core/dvb_ringbuffer.c |
2606 | @@ -55,7 +55,13 @@ void dvb_ringbuffer_init(struct dvb_ringbuffer *rbuf, void *data, size_t len) |
2607 | |
2608 | int dvb_ringbuffer_empty(struct dvb_ringbuffer *rbuf) |
2609 | { |
2610 | - return (rbuf->pread==rbuf->pwrite); |
2611 | + /* smp_load_acquire() to load write pointer on reader side |
2612 | + * this pairs with smp_store_release() in dvb_ringbuffer_write(), |
2613 | + * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset() |
2614 | + * |
2615 | + * for memory barriers also see Documentation/circular-buffers.txt |
2616 | + */ |
2617 | + return (rbuf->pread == smp_load_acquire(&rbuf->pwrite)); |
2618 | } |
2619 | |
2620 | |
2621 | @@ -64,7 +70,12 @@ ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf) |
2622 | { |
2623 | ssize_t free; |
2624 | |
2625 | - free = rbuf->pread - rbuf->pwrite; |
2626 | + /* ACCESS_ONCE() to load read pointer on writer side |
2627 | + * this pairs with smp_store_release() in dvb_ringbuffer_read(), |
2628 | + * dvb_ringbuffer_read_user(), dvb_ringbuffer_flush(), |
2629 | + * or dvb_ringbuffer_reset() |
2630 | + */ |
2631 | + free = ACCESS_ONCE(rbuf->pread) - rbuf->pwrite; |
2632 | if (free <= 0) |
2633 | free += rbuf->size; |
2634 | return free-1; |
2635 | @@ -76,7 +87,11 @@ ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf) |
2636 | { |
2637 | ssize_t avail; |
2638 | |
2639 | - avail = rbuf->pwrite - rbuf->pread; |
2640 | + /* smp_load_acquire() to load write pointer on reader side |
2641 | + * this pairs with smp_store_release() in dvb_ringbuffer_write(), |
2642 | + * dvb_ringbuffer_write_user(), or dvb_ringbuffer_reset() |
2643 | + */ |
2644 | + avail = smp_load_acquire(&rbuf->pwrite) - rbuf->pread; |
2645 | if (avail < 0) |
2646 | avail += rbuf->size; |
2647 | return avail; |
2648 | @@ -86,14 +101,25 @@ ssize_t dvb_ringbuffer_avail(struct dvb_ringbuffer *rbuf) |
2649 | |
2650 | void dvb_ringbuffer_flush(struct dvb_ringbuffer *rbuf) |
2651 | { |
2652 | - rbuf->pread = rbuf->pwrite; |
2653 | + /* dvb_ringbuffer_flush() counts as read operation |
2654 | + * smp_load_acquire() to load write pointer |
2655 | + * smp_store_release() to update read pointer, this ensures that the |
2656 | + * correct pointer is visible for subsequent dvb_ringbuffer_free() |
2657 | + * calls on other cpu cores |
2658 | + */ |
2659 | + smp_store_release(&rbuf->pread, smp_load_acquire(&rbuf->pwrite)); |
2660 | rbuf->error = 0; |
2661 | } |
2662 | EXPORT_SYMBOL(dvb_ringbuffer_flush); |
2663 | |
2664 | void dvb_ringbuffer_reset(struct dvb_ringbuffer *rbuf) |
2665 | { |
2666 | - rbuf->pread = rbuf->pwrite = 0; |
2667 | + /* dvb_ringbuffer_reset() counts as read and write operation |
2668 | + * smp_store_release() to update read pointer |
2669 | + */ |
2670 | + smp_store_release(&rbuf->pread, 0); |
2671 | + /* smp_store_release() to update write pointer */ |
2672 | + smp_store_release(&rbuf->pwrite, 0); |
2673 | rbuf->error = 0; |
2674 | } |
2675 | |
2676 | @@ -119,12 +145,17 @@ ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, u8 __user *buf, si |
2677 | return -EFAULT; |
2678 | buf += split; |
2679 | todo -= split; |
2680 | - rbuf->pread = 0; |
2681 | + /* smp_store_release() for read pointer update to ensure |
2682 | + * that buf is not overwritten until read is complete, |
2683 | + * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free() |
2684 | + */ |
2685 | + smp_store_release(&rbuf->pread, 0); |
2686 | } |
2687 | if (copy_to_user(buf, rbuf->data+rbuf->pread, todo)) |
2688 | return -EFAULT; |
2689 | |
2690 | - rbuf->pread = (rbuf->pread + todo) % rbuf->size; |
2691 | + /* smp_store_release() to update read pointer, see above */ |
2692 | + smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size); |
2693 | |
2694 | return len; |
2695 | } |
2696 | @@ -139,11 +170,16 @@ void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len) |
2697 | memcpy(buf, rbuf->data+rbuf->pread, split); |
2698 | buf += split; |
2699 | todo -= split; |
2700 | - rbuf->pread = 0; |
2701 | + /* smp_store_release() for read pointer update to ensure |
2702 | + * that buf is not overwritten until read is complete, |
2703 | + * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free() |
2704 | + */ |
2705 | + smp_store_release(&rbuf->pread, 0); |
2706 | } |
2707 | memcpy(buf, rbuf->data+rbuf->pread, todo); |
2708 | |
2709 | - rbuf->pread = (rbuf->pread + todo) % rbuf->size; |
2710 | + /* smp_store_release() to update read pointer, see above */ |
2711 | + smp_store_release(&rbuf->pread, (rbuf->pread + todo) % rbuf->size); |
2712 | } |
2713 | |
2714 | |
2715 | @@ -158,10 +194,16 @@ ssize_t dvb_ringbuffer_write(struct dvb_ringbuffer *rbuf, const u8 *buf, size_t |
2716 | memcpy(rbuf->data+rbuf->pwrite, buf, split); |
2717 | buf += split; |
2718 | todo -= split; |
2719 | - rbuf->pwrite = 0; |
2720 | + /* smp_store_release() for write pointer update to ensure that |
2721 | + * written data is visible on other cpu cores before the pointer |
2722 | + * update, this pairs with smp_load_acquire() in |
2723 | + * dvb_ringbuffer_empty() or dvb_ringbuffer_avail() |
2724 | + */ |
2725 | + smp_store_release(&rbuf->pwrite, 0); |
2726 | } |
2727 | memcpy(rbuf->data+rbuf->pwrite, buf, todo); |
2728 | - rbuf->pwrite = (rbuf->pwrite + todo) % rbuf->size; |
2729 | + /* smp_store_release() for write pointer update, see above */ |
2730 | + smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size); |
2731 | |
2732 | return len; |
2733 | } |
2734 | @@ -181,12 +223,18 @@ ssize_t dvb_ringbuffer_write_user(struct dvb_ringbuffer *rbuf, |
2735 | return len - todo; |
2736 | buf += split; |
2737 | todo -= split; |
2738 | - rbuf->pwrite = 0; |
2739 | + /* smp_store_release() for write pointer update to ensure that |
2740 | + * written data is visible on other cpu cores before the pointer |
2741 | + * update, this pairs with smp_load_acquire() in |
2742 | + * dvb_ringbuffer_empty() or dvb_ringbuffer_avail() |
2743 | + */ |
2744 | + smp_store_release(&rbuf->pwrite, 0); |
2745 | } |
2746 | status = copy_from_user(rbuf->data+rbuf->pwrite, buf, todo); |
2747 | if (status) |
2748 | return len - todo; |
2749 | - rbuf->pwrite = (rbuf->pwrite + todo) % rbuf->size; |
2750 | + /* smp_store_release() for write pointer update, see above */ |
2751 | + smp_store_release(&rbuf->pwrite, (rbuf->pwrite + todo) % rbuf->size); |
2752 | |
2753 | return len; |
2754 | } |
2755 | diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c |
2756 | index 3ffe2ecfd5ef..c8946f98ced4 100644 |
2757 | --- a/drivers/media/platform/s5p-mfc/s5p_mfc.c |
2758 | +++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c |
2759 | @@ -1029,6 +1029,11 @@ static int match_child(struct device *dev, void *data) |
2760 | return !strcmp(dev_name(dev), (char *)data); |
2761 | } |
2762 | |
2763 | +static void s5p_mfc_memdev_release(struct device *dev) |
2764 | +{ |
2765 | + dma_release_declared_memory(dev); |
2766 | +} |
2767 | + |
2768 | static void *mfc_get_drv_data(struct platform_device *pdev); |
2769 | |
2770 | static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev) |
2771 | @@ -1041,6 +1046,9 @@ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev) |
2772 | mfc_err("Not enough memory\n"); |
2773 | return -ENOMEM; |
2774 | } |
2775 | + |
2776 | + dev_set_name(dev->mem_dev_l, "%s", "s5p-mfc-l"); |
2777 | + dev->mem_dev_l->release = s5p_mfc_memdev_release; |
2778 | device_initialize(dev->mem_dev_l); |
2779 | of_property_read_u32_array(dev->plat_dev->dev.of_node, |
2780 | "samsung,mfc-l", mem_info, 2); |
2781 | @@ -1058,6 +1066,9 @@ static int s5p_mfc_alloc_memdevs(struct s5p_mfc_dev *dev) |
2782 | mfc_err("Not enough memory\n"); |
2783 | return -ENOMEM; |
2784 | } |
2785 | + |
2786 | + dev_set_name(dev->mem_dev_r, "%s", "s5p-mfc-r"); |
2787 | + dev->mem_dev_r->release = s5p_mfc_memdev_release; |
2788 | device_initialize(dev->mem_dev_r); |
2789 | of_property_read_u32_array(dev->plat_dev->dev.of_node, |
2790 | "samsung,mfc-r", mem_info, 2); |
2791 | diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c |
2792 | index 84fa6e9b59a1..67314c034cdb 100644 |
2793 | --- a/drivers/media/rc/ir-rc5-decoder.c |
2794 | +++ b/drivers/media/rc/ir-rc5-decoder.c |
2795 | @@ -29,7 +29,7 @@ |
2796 | #define RC5_BIT_START (1 * RC5_UNIT) |
2797 | #define RC5_BIT_END (1 * RC5_UNIT) |
2798 | #define RC5X_SPACE (4 * RC5_UNIT) |
2799 | -#define RC5_TRAILER (10 * RC5_UNIT) /* In reality, approx 100 */ |
2800 | +#define RC5_TRAILER (6 * RC5_UNIT) /* In reality, approx 100 */ |
2801 | |
2802 | enum rc5_state { |
2803 | STATE_INACTIVE, |
2804 | diff --git a/drivers/media/usb/usbtv/usbtv-audio.c b/drivers/media/usb/usbtv/usbtv-audio.c |
2805 | index 78c12d22dfbb..5dab02432e82 100644 |
2806 | --- a/drivers/media/usb/usbtv/usbtv-audio.c |
2807 | +++ b/drivers/media/usb/usbtv/usbtv-audio.c |
2808 | @@ -278,6 +278,9 @@ static void snd_usbtv_trigger(struct work_struct *work) |
2809 | { |
2810 | struct usbtv *chip = container_of(work, struct usbtv, snd_trigger); |
2811 | |
2812 | + if (!chip->snd) |
2813 | + return; |
2814 | + |
2815 | if (atomic_read(&chip->snd_stream)) |
2816 | usbtv_audio_start(chip); |
2817 | else |
2818 | @@ -378,6 +381,8 @@ err: |
2819 | |
2820 | void usbtv_audio_free(struct usbtv *usbtv) |
2821 | { |
2822 | + cancel_work_sync(&usbtv->snd_trigger); |
2823 | + |
2824 | if (usbtv->snd && usbtv->udev) { |
2825 | snd_card_free(usbtv->snd); |
2826 | usbtv->snd = NULL; |
2827 | diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c |
2828 | index 11f39791ec33..47f37683893a 100644 |
2829 | --- a/drivers/media/v4l2-core/videobuf2-core.c |
2830 | +++ b/drivers/media/v4l2-core/videobuf2-core.c |
2831 | @@ -1505,7 +1505,7 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, |
2832 | void *pb, int nonblocking) |
2833 | { |
2834 | unsigned long flags; |
2835 | - int ret; |
2836 | + int ret = 0; |
2837 | |
2838 | /* |
2839 | * Wait for at least one buffer to become available on the done_list. |
2840 | @@ -1521,10 +1521,12 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, |
2841 | spin_lock_irqsave(&q->done_lock, flags); |
2842 | *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry); |
2843 | /* |
2844 | - * Only remove the buffer from done_list if v4l2_buffer can handle all |
2845 | - * the planes. |
2846 | + * Only remove the buffer from done_list if all planes can be |
2847 | + * handled. Some cases such as V4L2 file I/O and DVB have pb |
2848 | + * == NULL; skip the check then as there's nothing to verify. |
2849 | */ |
2850 | - ret = call_bufop(q, verify_planes_array, *vb, pb); |
2851 | + if (pb) |
2852 | + ret = call_bufop(q, verify_planes_array, *vb, pb); |
2853 | if (!ret) |
2854 | list_del(&(*vb)->done_entry); |
2855 | spin_unlock_irqrestore(&q->done_lock, flags); |
2856 | diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c |
2857 | index 502984c724ff..6c441be8f893 100644 |
2858 | --- a/drivers/media/v4l2-core/videobuf2-v4l2.c |
2859 | +++ b/drivers/media/v4l2-core/videobuf2-v4l2.c |
2860 | @@ -67,6 +67,11 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer |
2861 | return 0; |
2862 | } |
2863 | |
2864 | +static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb) |
2865 | +{ |
2866 | + return __verify_planes_array(vb, pb); |
2867 | +} |
2868 | + |
2869 | /** |
2870 | * __verify_length() - Verify that the bytesused value for each plane fits in |
2871 | * the plane length and that the data offset doesn't exceed the bytesused value. |
2872 | @@ -432,6 +437,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb, |
2873 | } |
2874 | |
2875 | static const struct vb2_buf_ops v4l2_buf_ops = { |
2876 | + .verify_planes_array = __verify_planes_array_core, |
2877 | .fill_user_buffer = __fill_v4l2_buffer, |
2878 | .fill_vb2_buffer = __fill_vb2_buffer, |
2879 | .set_timestamp = __set_timestamp, |
2880 | diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c |
2881 | index 207a3bd68559..a867cc91657e 100644 |
2882 | --- a/drivers/mfd/qcom_rpm.c |
2883 | +++ b/drivers/mfd/qcom_rpm.c |
2884 | @@ -34,7 +34,13 @@ struct qcom_rpm_resource { |
2885 | struct qcom_rpm_data { |
2886 | u32 version; |
2887 | const struct qcom_rpm_resource *resource_table; |
2888 | - unsigned n_resources; |
2889 | + unsigned int n_resources; |
2890 | + unsigned int req_ctx_off; |
2891 | + unsigned int req_sel_off; |
2892 | + unsigned int ack_ctx_off; |
2893 | + unsigned int ack_sel_off; |
2894 | + unsigned int req_sel_size; |
2895 | + unsigned int ack_sel_size; |
2896 | }; |
2897 | |
2898 | struct qcom_rpm { |
2899 | @@ -61,11 +67,7 @@ struct qcom_rpm { |
2900 | |
2901 | #define RPM_REQUEST_TIMEOUT (5 * HZ) |
2902 | |
2903 | -#define RPM_REQUEST_CONTEXT 3 |
2904 | -#define RPM_REQ_SELECT 11 |
2905 | -#define RPM_ACK_CONTEXT 15 |
2906 | -#define RPM_ACK_SELECTOR 23 |
2907 | -#define RPM_SELECT_SIZE 7 |
2908 | +#define RPM_MAX_SEL_SIZE 7 |
2909 | |
2910 | #define RPM_NOTIFICATION BIT(30) |
2911 | #define RPM_REJECTED BIT(31) |
2912 | @@ -157,6 +159,12 @@ static const struct qcom_rpm_data apq8064_template = { |
2913 | .version = 3, |
2914 | .resource_table = apq8064_rpm_resource_table, |
2915 | .n_resources = ARRAY_SIZE(apq8064_rpm_resource_table), |
2916 | + .req_ctx_off = 3, |
2917 | + .req_sel_off = 11, |
2918 | + .ack_ctx_off = 15, |
2919 | + .ack_sel_off = 23, |
2920 | + .req_sel_size = 4, |
2921 | + .ack_sel_size = 7, |
2922 | }; |
2923 | |
2924 | static const struct qcom_rpm_resource msm8660_rpm_resource_table[] = { |
2925 | @@ -240,6 +248,12 @@ static const struct qcom_rpm_data msm8660_template = { |
2926 | .version = 2, |
2927 | .resource_table = msm8660_rpm_resource_table, |
2928 | .n_resources = ARRAY_SIZE(msm8660_rpm_resource_table), |
2929 | + .req_ctx_off = 3, |
2930 | + .req_sel_off = 11, |
2931 | + .ack_ctx_off = 19, |
2932 | + .ack_sel_off = 27, |
2933 | + .req_sel_size = 7, |
2934 | + .ack_sel_size = 7, |
2935 | }; |
2936 | |
2937 | static const struct qcom_rpm_resource msm8960_rpm_resource_table[] = { |
2938 | @@ -322,6 +336,12 @@ static const struct qcom_rpm_data msm8960_template = { |
2939 | .version = 3, |
2940 | .resource_table = msm8960_rpm_resource_table, |
2941 | .n_resources = ARRAY_SIZE(msm8960_rpm_resource_table), |
2942 | + .req_ctx_off = 3, |
2943 | + .req_sel_off = 11, |
2944 | + .ack_ctx_off = 15, |
2945 | + .ack_sel_off = 23, |
2946 | + .req_sel_size = 4, |
2947 | + .ack_sel_size = 7, |
2948 | }; |
2949 | |
2950 | static const struct qcom_rpm_resource ipq806x_rpm_resource_table[] = { |
2951 | @@ -362,6 +382,12 @@ static const struct qcom_rpm_data ipq806x_template = { |
2952 | .version = 3, |
2953 | .resource_table = ipq806x_rpm_resource_table, |
2954 | .n_resources = ARRAY_SIZE(ipq806x_rpm_resource_table), |
2955 | + .req_ctx_off = 3, |
2956 | + .req_sel_off = 11, |
2957 | + .ack_ctx_off = 15, |
2958 | + .ack_sel_off = 23, |
2959 | + .req_sel_size = 4, |
2960 | + .ack_sel_size = 7, |
2961 | }; |
2962 | |
2963 | static const struct of_device_id qcom_rpm_of_match[] = { |
2964 | @@ -380,7 +406,7 @@ int qcom_rpm_write(struct qcom_rpm *rpm, |
2965 | { |
2966 | const struct qcom_rpm_resource *res; |
2967 | const struct qcom_rpm_data *data = rpm->data; |
2968 | - u32 sel_mask[RPM_SELECT_SIZE] = { 0 }; |
2969 | + u32 sel_mask[RPM_MAX_SEL_SIZE] = { 0 }; |
2970 | int left; |
2971 | int ret = 0; |
2972 | int i; |
2973 | @@ -398,12 +424,12 @@ int qcom_rpm_write(struct qcom_rpm *rpm, |
2974 | writel_relaxed(buf[i], RPM_REQ_REG(rpm, res->target_id + i)); |
2975 | |
2976 | bitmap_set((unsigned long *)sel_mask, res->select_id, 1); |
2977 | - for (i = 0; i < ARRAY_SIZE(sel_mask); i++) { |
2978 | + for (i = 0; i < rpm->data->req_sel_size; i++) { |
2979 | writel_relaxed(sel_mask[i], |
2980 | - RPM_CTRL_REG(rpm, RPM_REQ_SELECT + i)); |
2981 | + RPM_CTRL_REG(rpm, rpm->data->req_sel_off + i)); |
2982 | } |
2983 | |
2984 | - writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, RPM_REQUEST_CONTEXT)); |
2985 | + writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, rpm->data->req_ctx_off)); |
2986 | |
2987 | reinit_completion(&rpm->ack); |
2988 | regmap_write(rpm->ipc_regmap, rpm->ipc_offset, BIT(rpm->ipc_bit)); |
2989 | @@ -426,10 +452,11 @@ static irqreturn_t qcom_rpm_ack_interrupt(int irq, void *dev) |
2990 | u32 ack; |
2991 | int i; |
2992 | |
2993 | - ack = readl_relaxed(RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT)); |
2994 | - for (i = 0; i < RPM_SELECT_SIZE; i++) |
2995 | - writel_relaxed(0, RPM_CTRL_REG(rpm, RPM_ACK_SELECTOR + i)); |
2996 | - writel(0, RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT)); |
2997 | + ack = readl_relaxed(RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off)); |
2998 | + for (i = 0; i < rpm->data->ack_sel_size; i++) |
2999 | + writel_relaxed(0, |
3000 | + RPM_CTRL_REG(rpm, rpm->data->ack_sel_off + i)); |
3001 | + writel(0, RPM_CTRL_REG(rpm, rpm->data->ack_ctx_off)); |
3002 | |
3003 | if (ack & RPM_NOTIFICATION) { |
3004 | dev_warn(rpm->dev, "ignoring notification!\n"); |
3005 | diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c |
3006 | index ce7b2cab5762..54ab48827258 100644 |
3007 | --- a/drivers/mtd/nand/nand_base.c |
3008 | +++ b/drivers/mtd/nand/nand_base.c |
3009 | @@ -2586,7 +2586,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, |
3010 | int cached = writelen > bytes && page != blockmask; |
3011 | uint8_t *wbuf = buf; |
3012 | int use_bufpoi; |
3013 | - int part_pagewr = (column || writelen < (mtd->writesize - 1)); |
3014 | + int part_pagewr = (column || writelen < mtd->writesize); |
3015 | |
3016 | if (part_pagewr) |
3017 | use_bufpoi = 1; |
3018 | diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c |
3019 | index 22fd19c0c5d3..27de0463226e 100644 |
3020 | --- a/drivers/mtd/ubi/build.c |
3021 | +++ b/drivers/mtd/ubi/build.c |
3022 | @@ -869,7 +869,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, |
3023 | for (i = 0; i < UBI_MAX_DEVICES; i++) { |
3024 | ubi = ubi_devices[i]; |
3025 | if (ubi && mtd->index == ubi->mtd->index) { |
3026 | - ubi_err(ubi, "mtd%d is already attached to ubi%d", |
3027 | + pr_err("ubi: mtd%d is already attached to ubi%d", |
3028 | mtd->index, i); |
3029 | return -EEXIST; |
3030 | } |
3031 | @@ -884,7 +884,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, |
3032 | * no sense to attach emulated MTD devices, so we prohibit this. |
3033 | */ |
3034 | if (mtd->type == MTD_UBIVOLUME) { |
3035 | - ubi_err(ubi, "refuse attaching mtd%d - it is already emulated on top of UBI", |
3036 | + pr_err("ubi: refuse attaching mtd%d - it is already emulated on top of UBI", |
3037 | mtd->index); |
3038 | return -EINVAL; |
3039 | } |
3040 | @@ -895,7 +895,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, |
3041 | if (!ubi_devices[ubi_num]) |
3042 | break; |
3043 | if (ubi_num == UBI_MAX_DEVICES) { |
3044 | - ubi_err(ubi, "only %d UBI devices may be created", |
3045 | + pr_err("ubi: only %d UBI devices may be created", |
3046 | UBI_MAX_DEVICES); |
3047 | return -ENFILE; |
3048 | } |
3049 | @@ -905,7 +905,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, |
3050 | |
3051 | /* Make sure ubi_num is not busy */ |
3052 | if (ubi_devices[ubi_num]) { |
3053 | - ubi_err(ubi, "already exists"); |
3054 | + pr_err("ubi: ubi%i already exists", ubi_num); |
3055 | return -EEXIST; |
3056 | } |
3057 | } |
3058 | @@ -987,6 +987,9 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, |
3059 | goto out_detach; |
3060 | } |
3061 | |
3062 | + /* Make device "available" before it becomes accessible via sysfs */ |
3063 | + ubi_devices[ubi_num] = ubi; |
3064 | + |
3065 | err = uif_init(ubi, &ref); |
3066 | if (err) |
3067 | goto out_detach; |
3068 | @@ -1031,7 +1034,6 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, |
3069 | wake_up_process(ubi->bgt_thread); |
3070 | spin_unlock(&ubi->wl_lock); |
3071 | |
3072 | - ubi_devices[ubi_num] = ubi; |
3073 | ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL); |
3074 | return ubi_num; |
3075 | |
3076 | @@ -1042,6 +1044,7 @@ out_uif: |
3077 | ubi_assert(ref); |
3078 | uif_close(ubi); |
3079 | out_detach: |
3080 | + ubi_devices[ubi_num] = NULL; |
3081 | ubi_wl_close(ubi); |
3082 | ubi_free_internal_volumes(ubi); |
3083 | vfree(ubi->vtbl); |
3084 | diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c |
3085 | index 1ae17bb9b889..3ea4c022cbb9 100644 |
3086 | --- a/drivers/mtd/ubi/vmt.c |
3087 | +++ b/drivers/mtd/ubi/vmt.c |
3088 | @@ -488,13 +488,6 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) |
3089 | spin_unlock(&ubi->volumes_lock); |
3090 | } |
3091 | |
3092 | - /* Change volume table record */ |
3093 | - vtbl_rec = ubi->vtbl[vol_id]; |
3094 | - vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs); |
3095 | - err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); |
3096 | - if (err) |
3097 | - goto out_acc; |
3098 | - |
3099 | if (pebs < 0) { |
3100 | for (i = 0; i < -pebs; i++) { |
3101 | err = ubi_eba_unmap_leb(ubi, vol, reserved_pebs + i); |
3102 | @@ -512,6 +505,24 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) |
3103 | spin_unlock(&ubi->volumes_lock); |
3104 | } |
3105 | |
3106 | + /* |
3107 | + * When we shrink a volume we have to flush all pending (erase) work. |
3108 | + * Otherwise it can happen that upon next attach UBI finds a LEB with |
3109 | + * lnum > highest_lnum and refuses to attach. |
3110 | + */ |
3111 | + if (pebs < 0) { |
3112 | + err = ubi_wl_flush(ubi, vol_id, UBI_ALL); |
3113 | + if (err) |
3114 | + goto out_acc; |
3115 | + } |
3116 | + |
3117 | + /* Change volume table record */ |
3118 | + vtbl_rec = ubi->vtbl[vol_id]; |
3119 | + vtbl_rec.reserved_pebs = cpu_to_be32(reserved_pebs); |
3120 | + err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); |
3121 | + if (err) |
3122 | + goto out_acc; |
3123 | + |
3124 | vol->reserved_pebs = reserved_pebs; |
3125 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) { |
3126 | vol->used_ebs = reserved_pebs; |
3127 | diff --git a/drivers/of/base.c b/drivers/of/base.c |
3128 | index 017dd94f16ea..942461f36616 100644 |
3129 | --- a/drivers/of/base.c |
3130 | +++ b/drivers/of/base.c |
3131 | @@ -112,6 +112,7 @@ static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj, |
3132 | return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length); |
3133 | } |
3134 | |
3135 | +/* always return newly allocated name, caller must free after use */ |
3136 | static const char *safe_name(struct kobject *kobj, const char *orig_name) |
3137 | { |
3138 | const char *name = orig_name; |
3139 | @@ -126,9 +127,12 @@ static const char *safe_name(struct kobject *kobj, const char *orig_name) |
3140 | name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i); |
3141 | } |
3142 | |
3143 | - if (name != orig_name) |
3144 | + if (name == orig_name) { |
3145 | + name = kstrdup(orig_name, GFP_KERNEL); |
3146 | + } else { |
3147 | pr_warn("device-tree: Duplicate name in %s, renamed to \"%s\"\n", |
3148 | kobject_name(kobj), name); |
3149 | + } |
3150 | return name; |
3151 | } |
3152 | |
3153 | @@ -159,6 +163,7 @@ int __of_add_property_sysfs(struct device_node *np, struct property *pp) |
3154 | int __of_attach_node_sysfs(struct device_node *np) |
3155 | { |
3156 | const char *name; |
3157 | + struct kobject *parent; |
3158 | struct property *pp; |
3159 | int rc; |
3160 | |
3161 | @@ -171,15 +176,16 @@ int __of_attach_node_sysfs(struct device_node *np) |
3162 | np->kobj.kset = of_kset; |
3163 | if (!np->parent) { |
3164 | /* Nodes without parents are new top level trees */ |
3165 | - rc = kobject_add(&np->kobj, NULL, "%s", |
3166 | - safe_name(&of_kset->kobj, "base")); |
3167 | + name = safe_name(&of_kset->kobj, "base"); |
3168 | + parent = NULL; |
3169 | } else { |
3170 | name = safe_name(&np->parent->kobj, kbasename(np->full_name)); |
3171 | - if (!name || !name[0]) |
3172 | - return -EINVAL; |
3173 | - |
3174 | - rc = kobject_add(&np->kobj, &np->parent->kobj, "%s", name); |
3175 | + parent = &np->parent->kobj; |
3176 | } |
3177 | + if (!name) |
3178 | + return -ENOMEM; |
3179 | + rc = kobject_add(&np->kobj, parent, "%s", name); |
3180 | + kfree(name); |
3181 | if (rc) |
3182 | return rc; |
3183 | |
3184 | @@ -1753,6 +1759,12 @@ int __of_remove_property(struct device_node *np, struct property *prop) |
3185 | return 0; |
3186 | } |
3187 | |
3188 | +void __of_sysfs_remove_bin_file(struct device_node *np, struct property *prop) |
3189 | +{ |
3190 | + sysfs_remove_bin_file(&np->kobj, &prop->attr); |
3191 | + kfree(prop->attr.attr.name); |
3192 | +} |
3193 | + |
3194 | void __of_remove_property_sysfs(struct device_node *np, struct property *prop) |
3195 | { |
3196 | if (!IS_ENABLED(CONFIG_SYSFS)) |
3197 | @@ -1760,7 +1772,7 @@ void __of_remove_property_sysfs(struct device_node *np, struct property *prop) |
3198 | |
3199 | /* at early boot, bail here and defer setup to of_init() */ |
3200 | if (of_kset && of_node_is_attached(np)) |
3201 | - sysfs_remove_bin_file(&np->kobj, &prop->attr); |
3202 | + __of_sysfs_remove_bin_file(np, prop); |
3203 | } |
3204 | |
3205 | /** |
3206 | @@ -1830,7 +1842,7 @@ void __of_update_property_sysfs(struct device_node *np, struct property *newprop |
3207 | return; |
3208 | |
3209 | if (oldprop) |
3210 | - sysfs_remove_bin_file(&np->kobj, &oldprop->attr); |
3211 | + __of_sysfs_remove_bin_file(np, oldprop); |
3212 | __of_add_property_sysfs(np, newprop); |
3213 | } |
3214 | |
3215 | diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c |
3216 | index 53826b84e0ec..2d72ddcf534f 100644 |
3217 | --- a/drivers/of/dynamic.c |
3218 | +++ b/drivers/of/dynamic.c |
3219 | @@ -55,7 +55,7 @@ void __of_detach_node_sysfs(struct device_node *np) |
3220 | /* only remove properties if on sysfs */ |
3221 | if (of_node_is_attached(np)) { |
3222 | for_each_property_of_node(np, pp) |
3223 | - sysfs_remove_bin_file(&np->kobj, &pp->attr); |
3224 | + __of_sysfs_remove_bin_file(np, pp); |
3225 | kobject_del(&np->kobj); |
3226 | } |
3227 | |
3228 | diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h |
3229 | index 8e882e706cd8..46ddbee22ce3 100644 |
3230 | --- a/drivers/of/of_private.h |
3231 | +++ b/drivers/of/of_private.h |
3232 | @@ -81,6 +81,9 @@ extern int __of_attach_node_sysfs(struct device_node *np); |
3233 | extern void __of_detach_node(struct device_node *np); |
3234 | extern void __of_detach_node_sysfs(struct device_node *np); |
3235 | |
3236 | +extern void __of_sysfs_remove_bin_file(struct device_node *np, |
3237 | + struct property *prop); |
3238 | + |
3239 | /* iterators for transactions, used for overlays */ |
3240 | /* forward iterator */ |
3241 | #define for_each_transaction_entry(_oft, _te) \ |
3242 | diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c |
3243 | index 7e327309cf69..3c4752a288e2 100644 |
3244 | --- a/drivers/pci/quirks.c |
3245 | +++ b/drivers/pci/quirks.c |
3246 | @@ -3115,13 +3115,15 @@ static void quirk_no_bus_reset(struct pci_dev *dev) |
3247 | } |
3248 | |
3249 | /* |
3250 | - * Atheros AR93xx chips do not behave after a bus reset. The device will |
3251 | - * throw a Link Down error on AER-capable systems and regardless of AER, |
3252 | - * config space of the device is never accessible again and typically |
3253 | - * causes the system to hang or reset when access is attempted. |
3254 | + * Some Atheros AR9xxx and QCA988x chips do not behave after a bus reset. |
3255 | + * The device will throw a Link Down error on AER-capable systems and |
3256 | + * regardless of AER, config space of the device is never accessible again |
3257 | + * and typically causes the system to hang or reset when access is attempted. |
3258 | * http://www.spinics.net/lists/linux-pci/msg34797.html |
3259 | */ |
3260 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset); |
3261 | +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset); |
3262 | +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x003c, quirk_no_bus_reset); |
3263 | |
3264 | static void quirk_no_pm_reset(struct pci_dev *dev) |
3265 | { |
3266 | diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c |
3267 | index 84936bae6e5e..4e377599d266 100644 |
3268 | --- a/drivers/pinctrl/intel/pinctrl-cherryview.c |
3269 | +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c |
3270 | @@ -160,7 +160,6 @@ struct chv_pin_context { |
3271 | * @pctldev: Pointer to the pin controller device |
3272 | * @chip: GPIO chip in this pin controller |
3273 | * @regs: MMIO registers |
3274 | - * @lock: Lock to serialize register accesses |
3275 | * @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO |
3276 | * offset (in GPIO number space) |
3277 | * @community: Community this pinctrl instance represents |
3278 | @@ -174,7 +173,6 @@ struct chv_pinctrl { |
3279 | struct pinctrl_dev *pctldev; |
3280 | struct gpio_chip chip; |
3281 | void __iomem *regs; |
3282 | - raw_spinlock_t lock; |
3283 | unsigned intr_lines[16]; |
3284 | const struct chv_community *community; |
3285 | u32 saved_intmask; |
3286 | @@ -659,6 +657,17 @@ static const struct chv_community *chv_communities[] = { |
3287 | &southeast_community, |
3288 | }; |
3289 | |
3290 | +/* |
3291 | + * Lock to serialize register accesses |
3292 | + * |
3293 | + * Due to a silicon issue, a shared lock must be used to prevent |
3294 | + * concurrent accesses across the 4 GPIO controllers. |
3295 | + * |
3296 | + * See Intel Atom Z8000 Processor Series Specification Update (Rev. 005), |
3297 | + * errata #CHT34, for further information. |
3298 | + */ |
3299 | +static DEFINE_RAW_SPINLOCK(chv_lock); |
3300 | + |
3301 | static void __iomem *chv_padreg(struct chv_pinctrl *pctrl, unsigned offset, |
3302 | unsigned reg) |
3303 | { |
3304 | @@ -720,13 +729,13 @@ static void chv_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, |
3305 | u32 ctrl0, ctrl1; |
3306 | bool locked; |
3307 | |
3308 | - raw_spin_lock_irqsave(&pctrl->lock, flags); |
3309 | + raw_spin_lock_irqsave(&chv_lock, flags); |
3310 | |
3311 | ctrl0 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0)); |
3312 | ctrl1 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL1)); |
3313 | locked = chv_pad_locked(pctrl, offset); |
3314 | |
3315 | - raw_spin_unlock_irqrestore(&pctrl->lock, flags); |
3316 | + raw_spin_unlock_irqrestore(&chv_lock, flags); |
3317 | |
3318 | if (ctrl0 & CHV_PADCTRL0_GPIOEN) { |
3319 | seq_puts(s, "GPIO "); |
3320 | @@ -789,14 +798,14 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function, |
3321 | |
3322 | grp = &pctrl->community->groups[group]; |
3323 | |
3324 | - raw_spin_lock_irqsave(&pctrl->lock, flags); |
3325 | + raw_spin_lock_irqsave(&chv_lock, flags); |
3326 | |
3327 | /* Check first that the pad is not locked */ |
3328 | for (i = 0; i < grp->npins; i++) { |
3329 | if (chv_pad_locked(pctrl, grp->pins[i])) { |
3330 | dev_warn(pctrl->dev, "unable to set mode for locked pin %u\n", |
3331 | grp->pins[i]); |
3332 | - raw_spin_unlock_irqrestore(&pctrl->lock, flags); |
3333 | + raw_spin_unlock_irqrestore(&chv_lock, flags); |
3334 | return -EBUSY; |
3335 | } |
3336 | } |
3337 | @@ -839,7 +848,7 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function, |
3338 | pin, altfunc->mode, altfunc->invert_oe ? "" : "not "); |
3339 | } |
3340 | |
3341 | - raw_spin_unlock_irqrestore(&pctrl->lock, flags); |
3342 | + raw_spin_unlock_irqrestore(&chv_lock, flags); |
3343 | |
3344 | return 0; |
3345 | } |
3346 | @@ -853,13 +862,13 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev, |
3347 | void __iomem *reg; |
3348 | u32 value; |
3349 | |
3350 | - raw_spin_lock_irqsave(&pctrl->lock, flags); |
3351 | + raw_spin_lock_irqsave(&chv_lock, flags); |
3352 | |
3353 | if (chv_pad_locked(pctrl, offset)) { |
3354 | value = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0)); |
3355 | if (!(value & CHV_PADCTRL0_GPIOEN)) { |
3356 | /* Locked so cannot enable */ |
3357 | - raw_spin_unlock_irqrestore(&pctrl->lock, flags); |
3358 | + raw_spin_unlock_irqrestore(&chv_lock, flags); |
3359 | return -EBUSY; |
3360 | } |
3361 | } else { |
3362 | @@ -899,7 +908,7 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev, |
3363 | chv_writel(value, reg); |
3364 | } |
3365 | |
3366 | - raw_spin_unlock_irqrestore(&pctrl->lock, flags); |
3367 | + raw_spin_unlock_irqrestore(&chv_lock, flags); |
3368 | |
3369 | return 0; |
3370 | } |
3371 | @@ -913,13 +922,13 @@ static void chv_gpio_disable_free(struct pinctrl_dev *pctldev, |
3372 | void __iomem *reg; |
3373 | u32 value; |
3374 | |
3375 | - raw_spin_lock_irqsave(&pctrl->lock, flags); |
3376 | + raw_spin_lock_irqsave(&chv_lock, flags); |
3377 | |
3378 | reg = chv_padreg(pctrl, offset, CHV_PADCTRL0); |
3379 | value = readl(reg) & ~CHV_PADCTRL0_GPIOEN; |
3380 | chv_writel(value, reg); |
3381 | |
3382 | - raw_spin_unlock_irqrestore(&pctrl->lock, flags); |
3383 | + raw_spin_unlock_irqrestore(&chv_lock, flags); |
3384 | } |
3385 | |
3386 | static int chv_gpio_set_direction(struct pinctrl_dev *pctldev, |
3387 | @@ -931,7 +940,7 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev, |
3388 | unsigned long flags; |
3389 | u32 ctrl0; |
3390 | |
3391 | - raw_spin_lock_irqsave(&pctrl->lock, flags); |
3392 | + raw_spin_lock_irqsave(&chv_lock, flags); |
3393 | |
3394 | ctrl0 = readl(reg) & ~CHV_PADCTRL0_GPIOCFG_MASK; |
3395 | if (input) |
3396 | @@ -940,7 +949,7 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev, |
3397 | ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPO << CHV_PADCTRL0_GPIOCFG_SHIFT; |
3398 | chv_writel(ctrl0, reg); |
3399 | |
3400 | - raw_spin_unlock_irqrestore(&pctrl->lock, flags); |
3401 | + raw_spin_unlock_irqrestore(&chv_lock, flags); |
3402 | |
3403 | return 0; |
3404 | } |
3405 | @@ -965,10 +974,10 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned pin, |
3406 | u16 arg = 0; |
3407 | u32 term; |
3408 | |
3409 | - raw_spin_lock_irqsave(&pctrl->lock, flags); |
3410 | + raw_spin_lock_irqsave(&chv_lock, flags); |
3411 | ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); |
3412 | ctrl1 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1)); |
3413 | - raw_spin_unlock_irqrestore(&pctrl->lock, flags); |
3414 | + raw_spin_unlock_irqrestore(&chv_lock, flags); |
3415 | |
3416 | term = (ctrl0 & CHV_PADCTRL0_TERM_MASK) >> CHV_PADCTRL0_TERM_SHIFT; |
3417 | |
3418 | @@ -1042,7 +1051,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin, |
3419 | unsigned long flags; |
3420 | u32 ctrl0, pull; |
3421 | |
3422 | - raw_spin_lock_irqsave(&pctrl->lock, flags); |
3423 | + raw_spin_lock_irqsave(&chv_lock, flags); |
3424 | ctrl0 = readl(reg); |
3425 | |
3426 | switch (param) { |
3427 | @@ -1065,7 +1074,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin, |
3428 | pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT; |
3429 | break; |
3430 | default: |
3431 | - raw_spin_unlock_irqrestore(&pctrl->lock, flags); |
3432 | + raw_spin_unlock_irqrestore(&chv_lock, flags); |
3433 | return -EINVAL; |
3434 | } |
3435 | |
3436 | @@ -1083,7 +1092,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin, |
3437 | pull = CHV_PADCTRL0_TERM_20K << CHV_PADCTRL0_TERM_SHIFT; |
3438 | break; |
3439 | default: |
3440 | - raw_spin_unlock_irqrestore(&pctrl->lock, flags); |
3441 | + raw_spin_unlock_irqrestore(&chv_lock, flags); |
3442 | return -EINVAL; |
3443 | } |
3444 | |
3445 | @@ -1091,12 +1100,12 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned pin, |
3446 | break; |
3447 | |
3448 | default: |
3449 | - raw_spin_unlock_irqrestore(&pctrl->lock, flags); |
3450 | + raw_spin_unlock_irqrestore(&chv_lock, flags); |
3451 | return -EINVAL; |
3452 | } |
3453 | |
3454 | chv_writel(ctrl0, reg); |
3455 | - raw_spin_unlock_irqrestore(&pctrl->lock, flags); |
3456 | + raw_spin_unlock_irqrestore(&chv_lock, flags); |
3457 | |
3458 | return 0; |
3459 | } |
3460 | @@ -1162,9 +1171,9 @@ static int chv_gpio_get(struct gpio_chip *chip, unsigned offset) |
3461 | unsigned long flags; |
3462 | u32 ctrl0, cfg; |
3463 | |
3464 | - raw_spin_lock_irqsave(&pctrl->lock, flags); |
3465 | + raw_spin_lock_irqsave(&chv_lock, flags); |
3466 | ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); |
3467 | - raw_spin_unlock_irqrestore(&pctrl->lock, flags); |
3468 | + raw_spin_unlock_irqrestore(&chv_lock, flags); |
3469 | |
3470 | cfg = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK; |
3471 | cfg >>= CHV_PADCTRL0_GPIOCFG_SHIFT; |
3472 | @@ -1182,7 +1191,7 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned offset, int value) |
3473 | void __iomem *reg; |
3474 | u32 ctrl0; |
3475 | |
3476 | - raw_spin_lock_irqsave(&pctrl->lock, flags); |
3477 | + raw_spin_lock_irqsave(&chv_lock, flags); |
3478 | |
3479 | reg = chv_padreg(pctrl, pin, CHV_PADCTRL0); |
3480 | ctrl0 = readl(reg); |
3481 | @@ -1194,7 +1203,7 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned offset, int value) |
3482 | |
3483 | chv_writel(ctrl0, reg); |
3484 | |
3485 | - raw_spin_unlock_irqrestore(&pctrl->lock, flags); |
3486 | + raw_spin_unlock_irqrestore(&chv_lock, flags); |
3487 | } |
3488 | |
3489 | static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset) |
3490 | @@ -1204,9 +1213,9 @@ static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned offset) |
3491 | u32 ctrl0, direction; |
3492 | unsigned long flags; |
3493 | |
3494 | - raw_spin_lock_irqsave(&pctrl->lock, flags); |
3495 | + raw_spin_lock_irqsave(&chv_lock, flags); |
3496 | ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); |
3497 | - raw_spin_unlock_irqrestore(&pctrl->lock, flags); |
3498 | + raw_spin_unlock_irqrestore(&chv_lock, flags); |
3499 | |
3500 | direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK; |
3501 | direction >>= CHV_PADCTRL0_GPIOCFG_SHIFT; |
3502 | @@ -1244,14 +1253,14 @@ static void chv_gpio_irq_ack(struct irq_data *d) |
3503 | int pin = chv_gpio_offset_to_pin(pctrl, irqd_to_hwirq(d)); |
3504 | u32 intr_line; |
3505 | |
3506 | - raw_spin_lock(&pctrl->lock); |
3507 | + raw_spin_lock(&chv_lock); |
3508 | |
3509 | intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); |
3510 | intr_line &= CHV_PADCTRL0_INTSEL_MASK; |
3511 | intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT; |
3512 | chv_writel(BIT(intr_line), pctrl->regs + CHV_INTSTAT); |
3513 | |
3514 | - raw_spin_unlock(&pctrl->lock); |
3515 | + raw_spin_unlock(&chv_lock); |
3516 | } |
3517 | |
3518 | static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask) |
3519 | @@ -1262,7 +1271,7 @@ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask) |
3520 | u32 value, intr_line; |
3521 | unsigned long flags; |
3522 | |
3523 | - raw_spin_lock_irqsave(&pctrl->lock, flags); |
3524 | + raw_spin_lock_irqsave(&chv_lock, flags); |
3525 | |
3526 | intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); |
3527 | intr_line &= CHV_PADCTRL0_INTSEL_MASK; |
3528 | @@ -1275,7 +1284,7 @@ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask) |
3529 | value |= BIT(intr_line); |
3530 | chv_writel(value, pctrl->regs + CHV_INTMASK); |
3531 | |
3532 | - raw_spin_unlock_irqrestore(&pctrl->lock, flags); |
3533 | + raw_spin_unlock_irqrestore(&chv_lock, flags); |
3534 | } |
3535 | |
3536 | static void chv_gpio_irq_mask(struct irq_data *d) |
3537 | @@ -1309,7 +1318,7 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d) |
3538 | unsigned long flags; |
3539 | u32 intsel, value; |
3540 | |
3541 | - raw_spin_lock_irqsave(&pctrl->lock, flags); |
3542 | + raw_spin_lock_irqsave(&chv_lock, flags); |
3543 | intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); |
3544 | intsel &= CHV_PADCTRL0_INTSEL_MASK; |
3545 | intsel >>= CHV_PADCTRL0_INTSEL_SHIFT; |
3546 | @@ -1324,7 +1333,7 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d) |
3547 | irq_set_handler_locked(d, handler); |
3548 | pctrl->intr_lines[intsel] = offset; |
3549 | } |
3550 | - raw_spin_unlock_irqrestore(&pctrl->lock, flags); |
3551 | + raw_spin_unlock_irqrestore(&chv_lock, flags); |
3552 | } |
3553 | |
3554 | chv_gpio_irq_unmask(d); |
3555 | @@ -1340,7 +1349,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type) |
3556 | unsigned long flags; |
3557 | u32 value; |
3558 | |
3559 | - raw_spin_lock_irqsave(&pctrl->lock, flags); |
3560 | + raw_spin_lock_irqsave(&chv_lock, flags); |
3561 | |
3562 | /* |
3563 | * Pins which can be used as shared interrupt are configured in |
3564 | @@ -1389,7 +1398,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type) |
3565 | else if (type & IRQ_TYPE_LEVEL_MASK) |
3566 | irq_set_handler_locked(d, handle_level_irq); |
3567 | |
3568 | - raw_spin_unlock_irqrestore(&pctrl->lock, flags); |
3569 | + raw_spin_unlock_irqrestore(&chv_lock, flags); |
3570 | |
3571 | return 0; |
3572 | } |
3573 | @@ -1501,7 +1510,6 @@ static int chv_pinctrl_probe(struct platform_device *pdev) |
3574 | if (i == ARRAY_SIZE(chv_communities)) |
3575 | return -ENODEV; |
3576 | |
3577 | - raw_spin_lock_init(&pctrl->lock); |
3578 | pctrl->dev = &pdev->dev; |
3579 | |
3580 | #ifdef CONFIG_PM_SLEEP |
3581 | diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c |
3582 | index fb4dd7b3ee71..af2046c87806 100644 |
3583 | --- a/drivers/platform/x86/hp-wmi.c |
3584 | +++ b/drivers/platform/x86/hp-wmi.c |
3585 | @@ -723,6 +723,11 @@ static int __init hp_wmi_rfkill_setup(struct platform_device *device) |
3586 | if (err) |
3587 | return err; |
3588 | |
3589 | + err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, 1, &wireless, |
3590 | + sizeof(wireless), 0); |
3591 | + if (err) |
3592 | + return err; |
3593 | + |
3594 | if (wireless & 0x1) { |
3595 | wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev, |
3596 | RFKILL_TYPE_WLAN, |
3597 | @@ -910,7 +915,7 @@ static int __init hp_wmi_bios_setup(struct platform_device *device) |
3598 | gps_rfkill = NULL; |
3599 | rfkill2_count = 0; |
3600 | |
3601 | - if (hp_wmi_bios_2009_later() || hp_wmi_rfkill_setup(device)) |
3602 | + if (hp_wmi_rfkill_setup(device)) |
3603 | hp_wmi_rfkill2_setup(device); |
3604 | |
3605 | err = device_create_file(&device->dev, &dev_attr_display); |
3606 | diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c |
3607 | index 9e03d158f411..4f7ce0097191 100644 |
3608 | --- a/drivers/remoteproc/remoteproc_core.c |
3609 | +++ b/drivers/remoteproc/remoteproc_core.c |
3610 | @@ -1239,11 +1239,6 @@ int rproc_add(struct rproc *rproc) |
3611 | if (ret < 0) |
3612 | return ret; |
3613 | |
3614 | - /* expose to rproc_get_by_phandle users */ |
3615 | - mutex_lock(&rproc_list_mutex); |
3616 | - list_add(&rproc->node, &rproc_list); |
3617 | - mutex_unlock(&rproc_list_mutex); |
3618 | - |
3619 | dev_info(dev, "%s is available\n", rproc->name); |
3620 | |
3621 | dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n"); |
3622 | @@ -1251,8 +1246,16 @@ int rproc_add(struct rproc *rproc) |
3623 | |
3624 | /* create debugfs entries */ |
3625 | rproc_create_debug_dir(rproc); |
3626 | + ret = rproc_add_virtio_devices(rproc); |
3627 | + if (ret < 0) |
3628 | + return ret; |
3629 | |
3630 | - return rproc_add_virtio_devices(rproc); |
3631 | + /* expose to rproc_get_by_phandle users */ |
3632 | + mutex_lock(&rproc_list_mutex); |
3633 | + list_add(&rproc->node, &rproc_list); |
3634 | + mutex_unlock(&rproc_list_mutex); |
3635 | + |
3636 | + return 0; |
3637 | } |
3638 | EXPORT_SYMBOL(rproc_add); |
3639 | |
3640 | diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c |
3641 | index ffb860d18701..f92528822f06 100644 |
3642 | --- a/drivers/rtc/rtc-s3c.c |
3643 | +++ b/drivers/rtc/rtc-s3c.c |
3644 | @@ -149,12 +149,14 @@ static int s3c_rtc_setfreq(struct s3c_rtc *info, int freq) |
3645 | if (!is_power_of_2(freq)) |
3646 | return -EINVAL; |
3647 | |
3648 | + s3c_rtc_enable_clk(info); |
3649 | spin_lock_irq(&info->pie_lock); |
3650 | |
3651 | if (info->data->set_freq) |
3652 | info->data->set_freq(info, freq); |
3653 | |
3654 | spin_unlock_irq(&info->pie_lock); |
3655 | + s3c_rtc_disable_clk(info); |
3656 | |
3657 | return 0; |
3658 | } |
3659 | diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c |
3660 | index b2afad5a5682..2a34eb5f6161 100644 |
3661 | --- a/drivers/s390/cio/cmf.c |
3662 | +++ b/drivers/s390/cio/cmf.c |
3663 | @@ -753,6 +753,17 @@ static void reset_cmb(struct ccw_device *cdev) |
3664 | cmf_generic_reset(cdev); |
3665 | } |
3666 | |
3667 | +static int cmf_enabled(struct ccw_device *cdev) |
3668 | +{ |
3669 | + int enabled; |
3670 | + |
3671 | + spin_lock_irq(cdev->ccwlock); |
3672 | + enabled = !!cdev->private->cmb; |
3673 | + spin_unlock_irq(cdev->ccwlock); |
3674 | + |
3675 | + return enabled; |
3676 | +} |
3677 | + |
3678 | static struct attribute_group cmf_attr_group; |
3679 | |
3680 | static struct cmb_operations cmbops_basic = { |
3681 | @@ -1153,13 +1164,8 @@ static ssize_t cmb_enable_show(struct device *dev, |
3682 | char *buf) |
3683 | { |
3684 | struct ccw_device *cdev = to_ccwdev(dev); |
3685 | - int enabled; |
3686 | |
3687 | - spin_lock_irq(cdev->ccwlock); |
3688 | - enabled = !!cdev->private->cmb; |
3689 | - spin_unlock_irq(cdev->ccwlock); |
3690 | - |
3691 | - return sprintf(buf, "%d\n", enabled); |
3692 | + return sprintf(buf, "%d\n", cmf_enabled(cdev)); |
3693 | } |
3694 | |
3695 | static ssize_t cmb_enable_store(struct device *dev, |
3696 | @@ -1199,15 +1205,20 @@ int ccw_set_cmf(struct ccw_device *cdev, int enable) |
3697 | * @cdev: The ccw device to be enabled |
3698 | * |
3699 | * Returns %0 for success or a negative error value. |
3700 | - * |
3701 | + * Note: If this is called on a device for which channel measurement is already |
3702 | + * enabled a reset of the measurement data is triggered. |
3703 | * Context: |
3704 | * non-atomic |
3705 | */ |
3706 | int enable_cmf(struct ccw_device *cdev) |
3707 | { |
3708 | - int ret; |
3709 | + int ret = 0; |
3710 | |
3711 | device_lock(&cdev->dev); |
3712 | + if (cmf_enabled(cdev)) { |
3713 | + cmbops->reset(cdev); |
3714 | + goto out_unlock; |
3715 | + } |
3716 | get_device(&cdev->dev); |
3717 | ret = cmbops->alloc(cdev); |
3718 | if (ret) |
3719 | @@ -1226,7 +1237,7 @@ int enable_cmf(struct ccw_device *cdev) |
3720 | out: |
3721 | if (ret) |
3722 | put_device(&cdev->dev); |
3723 | - |
3724 | +out_unlock: |
3725 | device_unlock(&cdev->dev); |
3726 | return ret; |
3727 | } |
3728 | diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c |
3729 | index 4679ed4444a7..9e165bc05ee1 100644 |
3730 | --- a/drivers/scsi/lpfc/lpfc_scsi.c |
3731 | +++ b/drivers/scsi/lpfc/lpfc_scsi.c |
3732 | @@ -3859,7 +3859,7 @@ int lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, |
3733 | uint32_t tag; |
3734 | uint16_t hwq; |
3735 | |
3736 | - if (shost_use_blk_mq(cmnd->device->host)) { |
3737 | + if (cmnd && shost_use_blk_mq(cmnd->device->host)) { |
3738 | tag = blk_mq_unique_tag(cmnd->request); |
3739 | hwq = blk_mq_unique_tag_to_hwq(tag); |
3740 | |
3741 | diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c |
3742 | index 73c8ea0b1360..3cac73e4c3e4 100644 |
3743 | --- a/drivers/spi/spi-pxa2xx.c |
3744 | +++ b/drivers/spi/spi-pxa2xx.c |
3745 | @@ -548,7 +548,14 @@ static void reset_sccr1(struct driver_data *drv_data) |
3746 | u32 sccr1_reg; |
3747 | |
3748 | sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1; |
3749 | - sccr1_reg &= ~SSCR1_RFT; |
3750 | + switch (drv_data->ssp_type) { |
3751 | + case QUARK_X1000_SSP: |
3752 | + sccr1_reg &= ~QUARK_X1000_SSCR1_RFT; |
3753 | + break; |
3754 | + default: |
3755 | + sccr1_reg &= ~SSCR1_RFT; |
3756 | + break; |
3757 | + } |
3758 | sccr1_reg |= chip->threshold; |
3759 | pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg); |
3760 | } |
3761 | diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c |
3762 | index 72204fbf2bb1..bd810c109277 100644 |
3763 | --- a/drivers/target/iscsi/iscsi_target.c |
3764 | +++ b/drivers/target/iscsi/iscsi_target.c |
3765 | @@ -492,7 +492,8 @@ static void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) |
3766 | bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD); |
3767 | |
3768 | spin_lock_bh(&conn->cmd_lock); |
3769 | - if (!list_empty(&cmd->i_conn_node)) |
3770 | + if (!list_empty(&cmd->i_conn_node) && |
3771 | + !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP)) |
3772 | list_del_init(&cmd->i_conn_node); |
3773 | spin_unlock_bh(&conn->cmd_lock); |
3774 | |
3775 | @@ -4194,6 +4195,7 @@ transport_err: |
3776 | |
3777 | static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) |
3778 | { |
3779 | + LIST_HEAD(tmp_list); |
3780 | struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL; |
3781 | struct iscsi_session *sess = conn->sess; |
3782 | /* |
3783 | @@ -4202,18 +4204,26 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) |
3784 | * has been reset -> returned sleeping pre-handler state. |
3785 | */ |
3786 | spin_lock_bh(&conn->cmd_lock); |
3787 | - list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { |
3788 | + list_splice_init(&conn->conn_cmd_list, &tmp_list); |
3789 | |
3790 | + list_for_each_entry(cmd, &tmp_list, i_conn_node) { |
3791 | + struct se_cmd *se_cmd = &cmd->se_cmd; |
3792 | + |
3793 | + if (se_cmd->se_tfo != NULL) { |
3794 | + spin_lock(&se_cmd->t_state_lock); |
3795 | + se_cmd->transport_state |= CMD_T_FABRIC_STOP; |
3796 | + spin_unlock(&se_cmd->t_state_lock); |
3797 | + } |
3798 | + } |
3799 | + spin_unlock_bh(&conn->cmd_lock); |
3800 | + |
3801 | + list_for_each_entry_safe(cmd, cmd_tmp, &tmp_list, i_conn_node) { |
3802 | list_del_init(&cmd->i_conn_node); |
3803 | - spin_unlock_bh(&conn->cmd_lock); |
3804 | |
3805 | iscsit_increment_maxcmdsn(cmd, sess); |
3806 | - |
3807 | iscsit_free_cmd(cmd, true); |
3808 | |
3809 | - spin_lock_bh(&conn->cmd_lock); |
3810 | } |
3811 | - spin_unlock_bh(&conn->cmd_lock); |
3812 | } |
3813 | |
3814 | static void iscsit_stop_timers_for_cmds( |
3815 | diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c |
3816 | index 96e78c823d13..316f66172335 100644 |
3817 | --- a/drivers/target/iscsi/iscsi_target_login.c |
3818 | +++ b/drivers/target/iscsi/iscsi_target_login.c |
3819 | @@ -1357,8 +1357,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) |
3820 | } |
3821 | login->zero_tsih = zero_tsih; |
3822 | |
3823 | - conn->sess->se_sess->sup_prot_ops = |
3824 | - conn->conn_transport->iscsit_get_sup_prot_ops(conn); |
3825 | + if (conn->sess) |
3826 | + conn->sess->se_sess->sup_prot_ops = |
3827 | + conn->conn_transport->iscsit_get_sup_prot_ops(conn); |
3828 | |
3829 | tpg = conn->tpg; |
3830 | if (!tpg) { |
3831 | diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c |
3832 | index 3436a83568ea..dcd5ed26eb18 100644 |
3833 | --- a/drivers/target/target_core_device.c |
3834 | +++ b/drivers/target/target_core_device.c |
3835 | @@ -832,13 +832,15 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) |
3836 | * in ATA and we need to set TPE=1 |
3837 | */ |
3838 | bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, |
3839 | - struct request_queue *q, int block_size) |
3840 | + struct request_queue *q) |
3841 | { |
3842 | + int block_size = queue_logical_block_size(q); |
3843 | + |
3844 | if (!blk_queue_discard(q)) |
3845 | return false; |
3846 | |
3847 | - attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) / |
3848 | - block_size; |
3849 | + attrib->max_unmap_lba_count = |
3850 | + q->limits.max_discard_sectors >> (ilog2(block_size) - 9); |
3851 | /* |
3852 | * Currently hardcoded to 1 in Linux/SCSI code.. |
3853 | */ |
3854 | diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c |
3855 | index 75f0f08b2a34..79291869bce6 100644 |
3856 | --- a/drivers/target/target_core_file.c |
3857 | +++ b/drivers/target/target_core_file.c |
3858 | @@ -161,8 +161,7 @@ static int fd_configure_device(struct se_device *dev) |
3859 | dev_size, div_u64(dev_size, fd_dev->fd_block_size), |
3860 | fd_dev->fd_block_size); |
3861 | |
3862 | - if (target_configure_unmap_from_queue(&dev->dev_attrib, q, |
3863 | - fd_dev->fd_block_size)) |
3864 | + if (target_configure_unmap_from_queue(&dev->dev_attrib, q)) |
3865 | pr_debug("IFILE: BLOCK Discard support available," |
3866 | " disabled by default\n"); |
3867 | /* |
3868 | diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c |
3869 | index 2c53dcefff3e..4620c1dcdbc7 100644 |
3870 | --- a/drivers/target/target_core_iblock.c |
3871 | +++ b/drivers/target/target_core_iblock.c |
3872 | @@ -121,8 +121,7 @@ static int iblock_configure_device(struct se_device *dev) |
3873 | dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); |
3874 | dev->dev_attrib.hw_queue_depth = q->nr_requests; |
3875 | |
3876 | - if (target_configure_unmap_from_queue(&dev->dev_attrib, q, |
3877 | - dev->dev_attrib.hw_block_size)) |
3878 | + if (target_configure_unmap_from_queue(&dev->dev_attrib, q)) |
3879 | pr_debug("IBLOCK: BLOCK Discard support available," |
3880 | " disabled by default\n"); |
3881 | |
3882 | diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h |
3883 | index dae0750c2032..253a91bff943 100644 |
3884 | --- a/drivers/target/target_core_internal.h |
3885 | +++ b/drivers/target/target_core_internal.h |
3886 | @@ -148,6 +148,7 @@ sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); |
3887 | void target_qf_do_work(struct work_struct *work); |
3888 | bool target_check_wce(struct se_device *dev); |
3889 | bool target_check_fua(struct se_device *dev); |
3890 | +void __target_execute_cmd(struct se_cmd *, bool); |
3891 | |
3892 | /* target_core_stat.c */ |
3893 | void target_stat_setup_dev_default_groups(struct se_device *); |
3894 | diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c |
3895 | index 98698d875742..c220bb8dfa9d 100644 |
3896 | --- a/drivers/target/target_core_sbc.c |
3897 | +++ b/drivers/target/target_core_sbc.c |
3898 | @@ -594,7 +594,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd, bool succes |
3899 | cmd->transport_state |= CMD_T_ACTIVE|CMD_T_BUSY|CMD_T_SENT; |
3900 | spin_unlock_irq(&cmd->t_state_lock); |
3901 | |
3902 | - __target_execute_cmd(cmd); |
3903 | + __target_execute_cmd(cmd, false); |
3904 | |
3905 | kfree(buf); |
3906 | return ret; |
3907 | diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c |
3908 | index d151bc3d6971..7bc3778a1ac9 100644 |
3909 | --- a/drivers/target/target_core_transport.c |
3910 | +++ b/drivers/target/target_core_transport.c |
3911 | @@ -1270,23 +1270,6 @@ target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb) |
3912 | |
3913 | trace_target_sequencer_start(cmd); |
3914 | |
3915 | - /* |
3916 | - * Check for an existing UNIT ATTENTION condition |
3917 | - */ |
3918 | - ret = target_scsi3_ua_check(cmd); |
3919 | - if (ret) |
3920 | - return ret; |
3921 | - |
3922 | - ret = target_alua_state_check(cmd); |
3923 | - if (ret) |
3924 | - return ret; |
3925 | - |
3926 | - ret = target_check_reservation(cmd); |
3927 | - if (ret) { |
3928 | - cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; |
3929 | - return ret; |
3930 | - } |
3931 | - |
3932 | ret = dev->transport->parse_cdb(cmd); |
3933 | if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) |
3934 | pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", |
3935 | @@ -1749,20 +1732,45 @@ queue_full: |
3936 | } |
3937 | EXPORT_SYMBOL(transport_generic_request_failure); |
3938 | |
3939 | -void __target_execute_cmd(struct se_cmd *cmd) |
3940 | +void __target_execute_cmd(struct se_cmd *cmd, bool do_checks) |
3941 | { |
3942 | sense_reason_t ret; |
3943 | |
3944 | - if (cmd->execute_cmd) { |
3945 | - ret = cmd->execute_cmd(cmd); |
3946 | - if (ret) { |
3947 | - spin_lock_irq(&cmd->t_state_lock); |
3948 | - cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); |
3949 | - spin_unlock_irq(&cmd->t_state_lock); |
3950 | + if (!cmd->execute_cmd) { |
3951 | + ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; |
3952 | + goto err; |
3953 | + } |
3954 | + if (do_checks) { |
3955 | + /* |
3956 | + * Check for an existing UNIT ATTENTION condition after |
3957 | + * target_handle_task_attr() has done SAM task attr |
3958 | + * checking, and possibly have already defered execution |
3959 | + * out to target_restart_delayed_cmds() context. |
3960 | + */ |
3961 | + ret = target_scsi3_ua_check(cmd); |
3962 | + if (ret) |
3963 | + goto err; |
3964 | + |
3965 | + ret = target_alua_state_check(cmd); |
3966 | + if (ret) |
3967 | + goto err; |
3968 | |
3969 | - transport_generic_request_failure(cmd, ret); |
3970 | + ret = target_check_reservation(cmd); |
3971 | + if (ret) { |
3972 | + cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; |
3973 | + goto err; |
3974 | } |
3975 | } |
3976 | + |
3977 | + ret = cmd->execute_cmd(cmd); |
3978 | + if (!ret) |
3979 | + return; |
3980 | +err: |
3981 | + spin_lock_irq(&cmd->t_state_lock); |
3982 | + cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT); |
3983 | + spin_unlock_irq(&cmd->t_state_lock); |
3984 | + |
3985 | + transport_generic_request_failure(cmd, ret); |
3986 | } |
3987 | |
3988 | static int target_write_prot_action(struct se_cmd *cmd) |
3989 | @@ -1807,6 +1815,8 @@ static bool target_handle_task_attr(struct se_cmd *cmd) |
3990 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) |
3991 | return false; |
3992 | |
3993 | + cmd->se_cmd_flags |= SCF_TASK_ATTR_SET; |
3994 | + |
3995 | /* |
3996 | * Check for the existence of HEAD_OF_QUEUE, and if true return 1 |
3997 | * to allow the passed struct se_cmd list of tasks to the front of the list. |
3998 | @@ -1887,7 +1897,7 @@ void target_execute_cmd(struct se_cmd *cmd) |
3999 | return; |
4000 | } |
4001 | |
4002 | - __target_execute_cmd(cmd); |
4003 | + __target_execute_cmd(cmd, true); |
4004 | } |
4005 | EXPORT_SYMBOL(target_execute_cmd); |
4006 | |
4007 | @@ -1911,7 +1921,7 @@ static void target_restart_delayed_cmds(struct se_device *dev) |
4008 | list_del(&cmd->se_delayed_node); |
4009 | spin_unlock(&dev->delayed_cmd_lock); |
4010 | |
4011 | - __target_execute_cmd(cmd); |
4012 | + __target_execute_cmd(cmd, true); |
4013 | |
4014 | if (cmd->sam_task_attr == TCM_ORDERED_TAG) |
4015 | break; |
4016 | @@ -1929,6 +1939,9 @@ static void transport_complete_task_attr(struct se_cmd *cmd) |
4017 | if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) |
4018 | return; |
4019 | |
4020 | + if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET)) |
4021 | + goto restart; |
4022 | + |
4023 | if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { |
4024 | atomic_dec_mb(&dev->simple_cmds); |
4025 | dev->dev_cur_ordered_id++; |
4026 | @@ -1945,7 +1958,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd) |
4027 | pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", |
4028 | dev->dev_cur_ordered_id); |
4029 | } |
4030 | - |
4031 | +restart: |
4032 | target_restart_delayed_cmds(dev); |
4033 | } |
4034 | |
4035 | @@ -2533,15 +2546,10 @@ static void target_release_cmd_kref(struct kref *kref) |
4036 | bool fabric_stop; |
4037 | |
4038 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); |
4039 | - if (list_empty(&se_cmd->se_cmd_list)) { |
4040 | - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
4041 | - target_free_cmd_mem(se_cmd); |
4042 | - se_cmd->se_tfo->release_cmd(se_cmd); |
4043 | - return; |
4044 | - } |
4045 | |
4046 | spin_lock(&se_cmd->t_state_lock); |
4047 | - fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP); |
4048 | + fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP) && |
4049 | + (se_cmd->transport_state & CMD_T_ABORTED); |
4050 | spin_unlock(&se_cmd->t_state_lock); |
4051 | |
4052 | if (se_cmd->cmd_wait_set || fabric_stop) { |
4053 | diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c |
4054 | index 7bbadd176c74..7b5462eb8388 100644 |
4055 | --- a/drivers/tty/serial/atmel_serial.c |
4056 | +++ b/drivers/tty/serial/atmel_serial.c |
4057 | @@ -485,19 +485,21 @@ static void atmel_start_tx(struct uart_port *port) |
4058 | { |
4059 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); |
4060 | |
4061 | - if (atmel_use_pdc_tx(port)) { |
4062 | - if (atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN) |
4063 | - /* The transmitter is already running. Yes, we |
4064 | - really need this.*/ |
4065 | - return; |
4066 | + if (atmel_use_pdc_tx(port) && (atmel_uart_readl(port, ATMEL_PDC_PTSR) |
4067 | + & ATMEL_PDC_TXTEN)) |
4068 | + /* The transmitter is already running. Yes, we |
4069 | + really need this.*/ |
4070 | + return; |
4071 | |
4072 | + if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port)) |
4073 | if ((port->rs485.flags & SER_RS485_ENABLED) && |
4074 | !(port->rs485.flags & SER_RS485_RX_DURING_TX)) |
4075 | atmel_stop_rx(port); |
4076 | |
4077 | + if (atmel_use_pdc_tx(port)) |
4078 | /* re-enable PDC transmit */ |
4079 | atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); |
4080 | - } |
4081 | + |
4082 | /* Enable interrupts */ |
4083 | atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask); |
4084 | } |
4085 | diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c |
4086 | index dcde955475dc..e1de4944e0ce 100644 |
4087 | --- a/drivers/tty/serial/msm_serial.c |
4088 | +++ b/drivers/tty/serial/msm_serial.c |
4089 | @@ -726,7 +726,7 @@ static void msm_handle_tx(struct uart_port *port) |
4090 | return; |
4091 | } |
4092 | |
4093 | - pio_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE); |
4094 | + pio_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); |
4095 | dma_count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); |
4096 | |
4097 | dma_min = 1; /* Always DMA */ |
4098 | diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c |
4099 | index 8320173af846..237ef5573c18 100644 |
4100 | --- a/drivers/tty/serial/samsung.c |
4101 | +++ b/drivers/tty/serial/samsung.c |
4102 | @@ -1676,7 +1676,7 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, |
4103 | return -ENODEV; |
4104 | |
4105 | if (port->mapbase != 0) |
4106 | - return 0; |
4107 | + return -EINVAL; |
4108 | |
4109 | /* setup info for port */ |
4110 | port->dev = &platdev->dev; |
4111 | @@ -1730,22 +1730,25 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, |
4112 | ourport->dma = devm_kzalloc(port->dev, |
4113 | sizeof(*ourport->dma), |
4114 | GFP_KERNEL); |
4115 | - if (!ourport->dma) |
4116 | - return -ENOMEM; |
4117 | + if (!ourport->dma) { |
4118 | + ret = -ENOMEM; |
4119 | + goto err; |
4120 | + } |
4121 | } |
4122 | |
4123 | ourport->clk = clk_get(&platdev->dev, "uart"); |
4124 | if (IS_ERR(ourport->clk)) { |
4125 | pr_err("%s: Controller clock not found\n", |
4126 | dev_name(&platdev->dev)); |
4127 | - return PTR_ERR(ourport->clk); |
4128 | + ret = PTR_ERR(ourport->clk); |
4129 | + goto err; |
4130 | } |
4131 | |
4132 | ret = clk_prepare_enable(ourport->clk); |
4133 | if (ret) { |
4134 | pr_err("uart: clock failed to prepare+enable: %d\n", ret); |
4135 | clk_put(ourport->clk); |
4136 | - return ret; |
4137 | + goto err; |
4138 | } |
4139 | |
4140 | /* Keep all interrupts masked and cleared */ |
4141 | @@ -1761,7 +1764,12 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport, |
4142 | |
4143 | /* reset the fifos (and setup the uart) */ |
4144 | s3c24xx_serial_resetport(port, cfg); |
4145 | + |
4146 | return 0; |
4147 | + |
4148 | +err: |
4149 | + port->mapbase = 0; |
4150 | + return ret; |
4151 | } |
4152 | |
4153 | /* Device driver serial port probe */ |
4154 | diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c |
4155 | index 38ae877c46e3..3ffb01ff6549 100644 |
4156 | --- a/drivers/usb/core/devio.c |
4157 | +++ b/drivers/usb/core/devio.c |
4158 | @@ -1203,10 +1203,11 @@ static int proc_getdriver(struct usb_dev_state *ps, void __user *arg) |
4159 | |
4160 | static int proc_connectinfo(struct usb_dev_state *ps, void __user *arg) |
4161 | { |
4162 | - struct usbdevfs_connectinfo ci = { |
4163 | - .devnum = ps->dev->devnum, |
4164 | - .slow = ps->dev->speed == USB_SPEED_LOW |
4165 | - }; |
4166 | + struct usbdevfs_connectinfo ci; |
4167 | + |
4168 | + memset(&ci, 0, sizeof(ci)); |
4169 | + ci.devnum = ps->dev->devnum; |
4170 | + ci.slow = ps->dev->speed == USB_SPEED_LOW; |
4171 | |
4172 | if (copy_to_user(arg, &ci, sizeof(ci))) |
4173 | return -EFAULT; |
4174 | diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c |
4175 | index 944a6dca0fcb..d2e50a27140c 100644 |
4176 | --- a/drivers/usb/core/quirks.c |
4177 | +++ b/drivers/usb/core/quirks.c |
4178 | @@ -128,6 +128,9 @@ static const struct usb_device_id usb_quirk_list[] = { |
4179 | { USB_DEVICE(0x04f3, 0x016f), .driver_info = |
4180 | USB_QUIRK_DEVICE_QUALIFIER }, |
4181 | |
4182 | + { USB_DEVICE(0x04f3, 0x0381), .driver_info = |
4183 | + USB_QUIRK_NO_LPM }, |
4184 | + |
4185 | { USB_DEVICE(0x04f3, 0x21b8), .driver_info = |
4186 | USB_QUIRK_DEVICE_QUALIFIER }, |
4187 | |
4188 | diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c |
4189 | index 69ffe6e8d77f..70900e6ca9bc 100644 |
4190 | --- a/drivers/usb/dwc3/gadget.c |
4191 | +++ b/drivers/usb/dwc3/gadget.c |
4192 | @@ -1965,6 +1965,10 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep, |
4193 | return 1; |
4194 | } |
4195 | |
4196 | + if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) |
4197 | + if ((event->status & DEPEVT_STATUS_IOC) && |
4198 | + (trb->ctrl & DWC3_TRB_CTRL_IOC)) |
4199 | + return 0; |
4200 | return 1; |
4201 | } |
4202 | |
4203 | diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c |
4204 | index 97ef75af9632..803c503a2e3d 100644 |
4205 | --- a/drivers/usb/gadget/function/f_fs.c |
4206 | +++ b/drivers/usb/gadget/function/f_fs.c |
4207 | @@ -2740,6 +2740,7 @@ static int _ffs_func_bind(struct usb_configuration *c, |
4208 | func->ffs->ss_descs_count; |
4209 | |
4210 | int fs_len, hs_len, ss_len, ret, i; |
4211 | + struct ffs_ep *eps_ptr; |
4212 | |
4213 | /* Make it a single chunk, less management later on */ |
4214 | vla_group(d); |
4215 | @@ -2788,12 +2789,9 @@ static int _ffs_func_bind(struct usb_configuration *c, |
4216 | ffs->raw_descs_length); |
4217 | |
4218 | memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz); |
4219 | - for (ret = ffs->eps_count; ret; --ret) { |
4220 | - struct ffs_ep *ptr; |
4221 | - |
4222 | - ptr = vla_ptr(vlabuf, d, eps); |
4223 | - ptr[ret].num = -1; |
4224 | - } |
4225 | + eps_ptr = vla_ptr(vlabuf, d, eps); |
4226 | + for (i = 0; i < ffs->eps_count; i++) |
4227 | + eps_ptr[i].num = -1; |
4228 | |
4229 | /* Save pointers |
4230 | * d_eps == vlabuf, func->eps used to kfree vlabuf later |
4231 | diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c |
4232 | index 044ca79d3cb5..12628dd36e55 100644 |
4233 | --- a/drivers/usb/gadget/function/f_uac2.c |
4234 | +++ b/drivers/usb/gadget/function/f_uac2.c |
4235 | @@ -1291,6 +1291,7 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr) |
4236 | |
4237 | if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) { |
4238 | struct cntrl_cur_lay3 c; |
4239 | + memset(&c, 0, sizeof(struct cntrl_cur_lay3)); |
4240 | |
4241 | if (entity_id == USB_IN_CLK_ID) |
4242 | c.dCUR = p_srate; |
4243 | diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c |
4244 | index f1893e08e51a..db565f620f82 100644 |
4245 | --- a/drivers/usb/renesas_usbhs/fifo.c |
4246 | +++ b/drivers/usb/renesas_usbhs/fifo.c |
4247 | @@ -808,20 +808,27 @@ static void xfer_work(struct work_struct *work) |
4248 | { |
4249 | struct usbhs_pkt *pkt = container_of(work, struct usbhs_pkt, work); |
4250 | struct usbhs_pipe *pipe = pkt->pipe; |
4251 | - struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe); |
4252 | + struct usbhs_fifo *fifo; |
4253 | struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); |
4254 | struct dma_async_tx_descriptor *desc; |
4255 | - struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt); |
4256 | + struct dma_chan *chan; |
4257 | struct device *dev = usbhs_priv_to_dev(priv); |
4258 | enum dma_transfer_direction dir; |
4259 | + unsigned long flags; |
4260 | |
4261 | + usbhs_lock(priv, flags); |
4262 | + fifo = usbhs_pipe_to_fifo(pipe); |
4263 | + if (!fifo) |
4264 | + goto xfer_work_end; |
4265 | + |
4266 | + chan = usbhsf_dma_chan_get(fifo, pkt); |
4267 | dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; |
4268 | |
4269 | desc = dmaengine_prep_slave_single(chan, pkt->dma + pkt->actual, |
4270 | pkt->trans, dir, |
4271 | DMA_PREP_INTERRUPT | DMA_CTRL_ACK); |
4272 | if (!desc) |
4273 | - return; |
4274 | + goto xfer_work_end; |
4275 | |
4276 | desc->callback = usbhsf_dma_complete; |
4277 | desc->callback_param = pipe; |
4278 | @@ -829,7 +836,7 @@ static void xfer_work(struct work_struct *work) |
4279 | pkt->cookie = dmaengine_submit(desc); |
4280 | if (pkt->cookie < 0) { |
4281 | dev_err(dev, "Failed to submit dma descriptor\n"); |
4282 | - return; |
4283 | + goto xfer_work_end; |
4284 | } |
4285 | |
4286 | dev_dbg(dev, " %s %d (%d/ %d)\n", |
4287 | @@ -840,6 +847,9 @@ static void xfer_work(struct work_struct *work) |
4288 | usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans); |
4289 | dma_async_issue_pending(chan); |
4290 | usbhs_pipe_enable(pipe); |
4291 | + |
4292 | +xfer_work_end: |
4293 | + usbhs_unlock(priv, flags); |
4294 | } |
4295 | |
4296 | /* |
4297 | diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c |
4298 | index fa14198daf77..5a3abf56d56b 100644 |
4299 | --- a/drivers/usb/renesas_usbhs/mod_gadget.c |
4300 | +++ b/drivers/usb/renesas_usbhs/mod_gadget.c |
4301 | @@ -586,6 +586,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep, |
4302 | struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv); |
4303 | struct usbhs_pipe *pipe; |
4304 | int ret = -EIO; |
4305 | + unsigned long flags; |
4306 | + |
4307 | + usbhs_lock(priv, flags); |
4308 | |
4309 | /* |
4310 | * if it already have pipe, |
4311 | @@ -594,7 +597,8 @@ static int usbhsg_ep_enable(struct usb_ep *ep, |
4312 | if (uep->pipe) { |
4313 | usbhs_pipe_clear(uep->pipe); |
4314 | usbhs_pipe_sequence_data0(uep->pipe); |
4315 | - return 0; |
4316 | + ret = 0; |
4317 | + goto usbhsg_ep_enable_end; |
4318 | } |
4319 | |
4320 | pipe = usbhs_pipe_malloc(priv, |
4321 | @@ -622,6 +626,9 @@ static int usbhsg_ep_enable(struct usb_ep *ep, |
4322 | ret = 0; |
4323 | } |
4324 | |
4325 | +usbhsg_ep_enable_end: |
4326 | + usbhs_unlock(priv, flags); |
4327 | + |
4328 | return ret; |
4329 | } |
4330 | |
4331 | diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
4332 | index d96d423d00e6..8e07536c233a 100644 |
4333 | --- a/drivers/usb/serial/option.c |
4334 | +++ b/drivers/usb/serial/option.c |
4335 | @@ -273,6 +273,7 @@ static void option_instat_callback(struct urb *urb); |
4336 | #define TELIT_PRODUCT_LE922_USBCFG5 0x1045 |
4337 | #define TELIT_PRODUCT_LE920 0x1200 |
4338 | #define TELIT_PRODUCT_LE910 0x1201 |
4339 | +#define TELIT_PRODUCT_LE910_USBCFG4 0x1206 |
4340 | |
4341 | /* ZTE PRODUCTS */ |
4342 | #define ZTE_VENDOR_ID 0x19d2 |
4343 | @@ -1198,6 +1199,8 @@ static const struct usb_device_id option_ids[] = { |
4344 | .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 }, |
4345 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910), |
4346 | .driver_info = (kernel_ulong_t)&telit_le910_blacklist }, |
4347 | + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4), |
4348 | + .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 }, |
4349 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), |
4350 | .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, |
4351 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ |
4352 | diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c |
4353 | index 8ab6238c9299..56f7e2521202 100644 |
4354 | --- a/drivers/virtio/virtio_balloon.c |
4355 | +++ b/drivers/virtio/virtio_balloon.c |
4356 | @@ -196,6 +196,8 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num) |
4357 | num = min(num, ARRAY_SIZE(vb->pfns)); |
4358 | |
4359 | mutex_lock(&vb->balloon_lock); |
4360 | + /* We can't release more pages than taken */ |
4361 | + num = min(num, (size_t)vb->num_pages); |
4362 | for (vb->num_pfns = 0; vb->num_pfns < num; |
4363 | vb->num_pfns += VIRTIO_BALLOON_PAGES_PER_PAGE) { |
4364 | page = balloon_page_dequeue(vb_dev_info); |
4365 | diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c |
4366 | index 0e2f43bccf1f..0c427d6a12d1 100644 |
4367 | --- a/drivers/w1/masters/omap_hdq.c |
4368 | +++ b/drivers/w1/masters/omap_hdq.c |
4369 | @@ -390,8 +390,6 @@ static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val) |
4370 | goto out; |
4371 | } |
4372 | |
4373 | - hdq_data->hdq_irqstatus = 0; |
4374 | - |
4375 | if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) { |
4376 | hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, |
4377 | OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO, |
4378 | diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c |
4379 | index 9abe18763a7f..257bbdcb5df6 100644 |
4380 | --- a/fs/btrfs/extent_io.c |
4381 | +++ b/fs/btrfs/extent_io.c |
4382 | @@ -2786,12 +2786,6 @@ struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask) |
4383 | btrfs_bio->csum = NULL; |
4384 | btrfs_bio->csum_allocated = NULL; |
4385 | btrfs_bio->end_io = NULL; |
4386 | - |
4387 | -#ifdef CONFIG_BLK_CGROUP |
4388 | - /* FIXME, put this into bio_clone_bioset */ |
4389 | - if (bio->bi_css) |
4390 | - bio_associate_blkcg(new, bio->bi_css); |
4391 | -#endif |
4392 | } |
4393 | return new; |
4394 | } |
4395 | diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h |
4396 | index 3182273a3407..1418daa03d95 100644 |
4397 | --- a/fs/cifs/cifs_fs_sb.h |
4398 | +++ b/fs/cifs/cifs_fs_sb.h |
4399 | @@ -46,6 +46,9 @@ |
4400 | #define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */ |
4401 | #define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */ |
4402 | #define CIFS_MOUNT_MAP_SFM_CHR 0x800000 /* SFM/MAC mapping for illegal chars */ |
4403 | +#define CIFS_MOUNT_USE_PREFIX_PATH 0x1000000 /* make subpath with unaccessible |
4404 | + * root mountable |
4405 | + */ |
4406 | |
4407 | struct cifs_sb_info { |
4408 | struct rb_root tlink_tree; |
4409 | @@ -67,5 +70,6 @@ struct cifs_sb_info { |
4410 | struct backing_dev_info bdi; |
4411 | struct delayed_work prune_tlinks; |
4412 | struct rcu_head rcu; |
4413 | + char *prepath; |
4414 | }; |
4415 | #endif /* _CIFS_FS_SB_H */ |
4416 | diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c |
4417 | index e682b36a210f..4acbc390a7d6 100644 |
4418 | --- a/fs/cifs/cifsencrypt.c |
4419 | +++ b/fs/cifs/cifsencrypt.c |
4420 | @@ -731,24 +731,26 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) |
4421 | |
4422 | memcpy(ses->auth_key.response + baselen, tiblob, tilen); |
4423 | |
4424 | + mutex_lock(&ses->server->srv_mutex); |
4425 | + |
4426 | rc = crypto_hmacmd5_alloc(ses->server); |
4427 | if (rc) { |
4428 | cifs_dbg(VFS, "could not crypto alloc hmacmd5 rc %d\n", rc); |
4429 | - goto setup_ntlmv2_rsp_ret; |
4430 | + goto unlock; |
4431 | } |
4432 | |
4433 | /* calculate ntlmv2_hash */ |
4434 | rc = calc_ntlmv2_hash(ses, ntlmv2_hash, nls_cp); |
4435 | if (rc) { |
4436 | cifs_dbg(VFS, "could not get v2 hash rc %d\n", rc); |
4437 | - goto setup_ntlmv2_rsp_ret; |
4438 | + goto unlock; |
4439 | } |
4440 | |
4441 | /* calculate first part of the client response (CR1) */ |
4442 | rc = CalcNTLMv2_response(ses, ntlmv2_hash); |
4443 | if (rc) { |
4444 | cifs_dbg(VFS, "Could not calculate CR1 rc: %d\n", rc); |
4445 | - goto setup_ntlmv2_rsp_ret; |
4446 | + goto unlock; |
4447 | } |
4448 | |
4449 | /* now calculate the session key for NTLMv2 */ |
4450 | @@ -757,13 +759,13 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) |
4451 | if (rc) { |
4452 | cifs_dbg(VFS, "%s: Could not set NTLMV2 Hash as a key\n", |
4453 | __func__); |
4454 | - goto setup_ntlmv2_rsp_ret; |
4455 | + goto unlock; |
4456 | } |
4457 | |
4458 | rc = crypto_shash_init(&ses->server->secmech.sdeschmacmd5->shash); |
4459 | if (rc) { |
4460 | cifs_dbg(VFS, "%s: Could not init hmacmd5\n", __func__); |
4461 | - goto setup_ntlmv2_rsp_ret; |
4462 | + goto unlock; |
4463 | } |
4464 | |
4465 | rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash, |
4466 | @@ -771,7 +773,7 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) |
4467 | CIFS_HMAC_MD5_HASH_SIZE); |
4468 | if (rc) { |
4469 | cifs_dbg(VFS, "%s: Could not update with response\n", __func__); |
4470 | - goto setup_ntlmv2_rsp_ret; |
4471 | + goto unlock; |
4472 | } |
4473 | |
4474 | rc = crypto_shash_final(&ses->server->secmech.sdeschmacmd5->shash, |
4475 | @@ -779,6 +781,8 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) |
4476 | if (rc) |
4477 | cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__); |
4478 | |
4479 | +unlock: |
4480 | + mutex_unlock(&ses->server->srv_mutex); |
4481 | setup_ntlmv2_rsp_ret: |
4482 | kfree(tiblob); |
4483 | |
4484 | diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c |
4485 | index cbc0f4bca0c0..450578097fb7 100644 |
4486 | --- a/fs/cifs/cifsfs.c |
4487 | +++ b/fs/cifs/cifsfs.c |
4488 | @@ -686,6 +686,14 @@ cifs_do_mount(struct file_system_type *fs_type, |
4489 | goto out_cifs_sb; |
4490 | } |
4491 | |
4492 | + if (volume_info->prepath) { |
4493 | + cifs_sb->prepath = kstrdup(volume_info->prepath, GFP_KERNEL); |
4494 | + if (cifs_sb->prepath == NULL) { |
4495 | + root = ERR_PTR(-ENOMEM); |
4496 | + goto out_cifs_sb; |
4497 | + } |
4498 | + } |
4499 | + |
4500 | cifs_setup_cifs_sb(volume_info, cifs_sb); |
4501 | |
4502 | rc = cifs_mount(cifs_sb, volume_info); |
4503 | @@ -724,7 +732,11 @@ cifs_do_mount(struct file_system_type *fs_type, |
4504 | sb->s_flags |= MS_ACTIVE; |
4505 | } |
4506 | |
4507 | - root = cifs_get_root(volume_info, sb); |
4508 | + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) |
4509 | + root = dget(sb->s_root); |
4510 | + else |
4511 | + root = cifs_get_root(volume_info, sb); |
4512 | + |
4513 | if (IS_ERR(root)) |
4514 | goto out_super; |
4515 | |
4516 | diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c |
4517 | index 5481a6eb9a95..61c3a5ab8637 100644 |
4518 | --- a/fs/cifs/connect.c |
4519 | +++ b/fs/cifs/connect.c |
4520 | @@ -3517,6 +3517,44 @@ cifs_get_volume_info(char *mount_data, const char *devname) |
4521 | return volume_info; |
4522 | } |
4523 | |
4524 | +static int |
4525 | +cifs_are_all_path_components_accessible(struct TCP_Server_Info *server, |
4526 | + unsigned int xid, |
4527 | + struct cifs_tcon *tcon, |
4528 | + struct cifs_sb_info *cifs_sb, |
4529 | + char *full_path) |
4530 | +{ |
4531 | + int rc; |
4532 | + char *s; |
4533 | + char sep, tmp; |
4534 | + |
4535 | + sep = CIFS_DIR_SEP(cifs_sb); |
4536 | + s = full_path; |
4537 | + |
4538 | + rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, ""); |
4539 | + while (rc == 0) { |
4540 | + /* skip separators */ |
4541 | + while (*s == sep) |
4542 | + s++; |
4543 | + if (!*s) |
4544 | + break; |
4545 | + /* next separator */ |
4546 | + while (*s && *s != sep) |
4547 | + s++; |
4548 | + |
4549 | + /* |
4550 | + * temporarily null-terminate the path at the end of |
4551 | + * the current component |
4552 | + */ |
4553 | + tmp = *s; |
4554 | + *s = 0; |
4555 | + rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, |
4556 | + full_path); |
4557 | + *s = tmp; |
4558 | + } |
4559 | + return rc; |
4560 | +} |
4561 | + |
4562 | int |
4563 | cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) |
4564 | { |
4565 | @@ -3654,6 +3692,16 @@ remote_path_check: |
4566 | kfree(full_path); |
4567 | goto mount_fail_check; |
4568 | } |
4569 | + |
4570 | + rc = cifs_are_all_path_components_accessible(server, |
4571 | + xid, tcon, cifs_sb, |
4572 | + full_path); |
4573 | + if (rc != 0) { |
4574 | + cifs_dbg(VFS, "cannot query dirs between root and final path, " |
4575 | + "enabling CIFS_MOUNT_USE_PREFIX_PATH\n"); |
4576 | + cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH; |
4577 | + rc = 0; |
4578 | + } |
4579 | kfree(full_path); |
4580 | } |
4581 | |
4582 | @@ -3923,6 +3971,7 @@ cifs_umount(struct cifs_sb_info *cifs_sb) |
4583 | |
4584 | bdi_destroy(&cifs_sb->bdi); |
4585 | kfree(cifs_sb->mountdata); |
4586 | + kfree(cifs_sb->prepath); |
4587 | call_rcu(&cifs_sb->rcu, delayed_free); |
4588 | } |
4589 | |
4590 | diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c |
4591 | index c3eb998a99bd..26a3b389a265 100644 |
4592 | --- a/fs/cifs/dir.c |
4593 | +++ b/fs/cifs/dir.c |
4594 | @@ -84,6 +84,7 @@ build_path_from_dentry(struct dentry *direntry) |
4595 | struct dentry *temp; |
4596 | int namelen; |
4597 | int dfsplen; |
4598 | + int pplen = 0; |
4599 | char *full_path; |
4600 | char dirsep; |
4601 | struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); |
4602 | @@ -95,8 +96,12 @@ build_path_from_dentry(struct dentry *direntry) |
4603 | dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1); |
4604 | else |
4605 | dfsplen = 0; |
4606 | + |
4607 | + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) |
4608 | + pplen = cifs_sb->prepath ? strlen(cifs_sb->prepath) + 1 : 0; |
4609 | + |
4610 | cifs_bp_rename_retry: |
4611 | - namelen = dfsplen; |
4612 | + namelen = dfsplen + pplen; |
4613 | seq = read_seqbegin(&rename_lock); |
4614 | rcu_read_lock(); |
4615 | for (temp = direntry; !IS_ROOT(temp);) { |
4616 | @@ -137,7 +142,7 @@ cifs_bp_rename_retry: |
4617 | } |
4618 | } |
4619 | rcu_read_unlock(); |
4620 | - if (namelen != dfsplen || read_seqretry(&rename_lock, seq)) { |
4621 | + if (namelen != dfsplen + pplen || read_seqretry(&rename_lock, seq)) { |
4622 | cifs_dbg(FYI, "did not end path lookup where expected. namelen=%ddfsplen=%d\n", |
4623 | namelen, dfsplen); |
4624 | /* presumably this is only possible if racing with a rename |
4625 | @@ -153,6 +158,17 @@ cifs_bp_rename_retry: |
4626 | those safely to '/' if any are found in the middle of the prepath */ |
4627 | /* BB test paths to Windows with '/' in the midst of prepath */ |
4628 | |
4629 | + if (pplen) { |
4630 | + int i; |
4631 | + |
4632 | + cifs_dbg(FYI, "using cifs_sb prepath <%s>\n", cifs_sb->prepath); |
4633 | + memcpy(full_path+dfsplen+1, cifs_sb->prepath, pplen-1); |
4634 | + full_path[dfsplen] = '\\'; |
4635 | + for (i = 0; i < pplen-1; i++) |
4636 | + if (full_path[dfsplen+1+i] == '/') |
4637 | + full_path[dfsplen+1+i] = CIFS_DIR_SEP(cifs_sb); |
4638 | + } |
4639 | + |
4640 | if (dfsplen) { |
4641 | strncpy(full_path, tcon->treeName, dfsplen); |
4642 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) { |
4643 | @@ -229,6 +245,13 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid, |
4644 | goto cifs_create_get_file_info; |
4645 | } |
4646 | |
4647 | + if (S_ISDIR(newinode->i_mode)) { |
4648 | + CIFSSMBClose(xid, tcon, fid->netfid); |
4649 | + iput(newinode); |
4650 | + rc = -EISDIR; |
4651 | + goto out; |
4652 | + } |
4653 | + |
4654 | if (!S_ISREG(newinode->i_mode)) { |
4655 | /* |
4656 | * The server may allow us to open things like |
4657 | @@ -399,10 +422,14 @@ cifs_create_set_dentry: |
4658 | if (rc != 0) { |
4659 | cifs_dbg(FYI, "Create worked, get_inode_info failed rc = %d\n", |
4660 | rc); |
4661 | - if (server->ops->close) |
4662 | - server->ops->close(xid, tcon, fid); |
4663 | - goto out; |
4664 | + goto out_err; |
4665 | } |
4666 | + |
4667 | + if (S_ISDIR(newinode->i_mode)) { |
4668 | + rc = -EISDIR; |
4669 | + goto out_err; |
4670 | + } |
4671 | + |
4672 | d_drop(direntry); |
4673 | d_add(direntry, newinode); |
4674 | |
4675 | @@ -410,6 +437,13 @@ out: |
4676 | kfree(buf); |
4677 | kfree(full_path); |
4678 | return rc; |
4679 | + |
4680 | +out_err: |
4681 | + if (server->ops->close) |
4682 | + server->ops->close(xid, tcon, fid); |
4683 | + if (newinode) |
4684 | + iput(newinode); |
4685 | + goto out; |
4686 | } |
4687 | |
4688 | int |
4689 | diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c |
4690 | index a329f5ba35aa..9cdeb0293267 100644 |
4691 | --- a/fs/cifs/inode.c |
4692 | +++ b/fs/cifs/inode.c |
4693 | @@ -982,10 +982,26 @@ struct inode *cifs_root_iget(struct super_block *sb) |
4694 | struct inode *inode = NULL; |
4695 | long rc; |
4696 | struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); |
4697 | + char *path = NULL; |
4698 | + int len; |
4699 | + |
4700 | + if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) |
4701 | + && cifs_sb->prepath) { |
4702 | + len = strlen(cifs_sb->prepath); |
4703 | + path = kzalloc(len + 2 /* leading sep + null */, GFP_KERNEL); |
4704 | + if (path == NULL) |
4705 | + return ERR_PTR(-ENOMEM); |
4706 | + path[0] = '/'; |
4707 | + memcpy(path+1, cifs_sb->prepath, len); |
4708 | + } else { |
4709 | + path = kstrdup("", GFP_KERNEL); |
4710 | + if (path == NULL) |
4711 | + return ERR_PTR(-ENOMEM); |
4712 | + } |
4713 | |
4714 | xid = get_xid(); |
4715 | if (tcon->unix_ext) { |
4716 | - rc = cifs_get_inode_info_unix(&inode, "", sb, xid); |
4717 | + rc = cifs_get_inode_info_unix(&inode, path, sb, xid); |
4718 | /* some servers mistakenly claim POSIX support */ |
4719 | if (rc != -EOPNOTSUPP) |
4720 | goto iget_no_retry; |
4721 | @@ -993,7 +1009,8 @@ struct inode *cifs_root_iget(struct super_block *sb) |
4722 | tcon->unix_ext = false; |
4723 | } |
4724 | |
4725 | - rc = cifs_get_inode_info(&inode, "", NULL, sb, xid, NULL); |
4726 | + convert_delimiter(path, CIFS_DIR_SEP(cifs_sb)); |
4727 | + rc = cifs_get_inode_info(&inode, path, NULL, sb, xid, NULL); |
4728 | |
4729 | iget_no_retry: |
4730 | if (!inode) { |
4731 | @@ -1022,6 +1039,7 @@ iget_no_retry: |
4732 | } |
4733 | |
4734 | out: |
4735 | + kfree(path); |
4736 | /* can not call macro free_xid here since in a void func |
4737 | * TODO: This is no longer true |
4738 | */ |
4739 | diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c |
4740 | index 53ccdde6ff18..dd8543caa56e 100644 |
4741 | --- a/fs/cifs/smb2ops.c |
4742 | +++ b/fs/cifs/smb2ops.c |
4743 | @@ -1039,6 +1039,9 @@ smb2_new_lease_key(struct cifs_fid *fid) |
4744 | get_random_bytes(fid->lease_key, SMB2_LEASE_KEY_SIZE); |
4745 | } |
4746 | |
4747 | +#define SMB2_SYMLINK_STRUCT_SIZE \ |
4748 | + (sizeof(struct smb2_err_rsp) - 1 + sizeof(struct smb2_symlink_err_rsp)) |
4749 | + |
4750 | static int |
4751 | smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, |
4752 | const char *full_path, char **target_path, |
4753 | @@ -1051,7 +1054,10 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, |
4754 | struct cifs_fid fid; |
4755 | struct smb2_err_rsp *err_buf = NULL; |
4756 | struct smb2_symlink_err_rsp *symlink; |
4757 | - unsigned int sub_len, sub_offset; |
4758 | + unsigned int sub_len; |
4759 | + unsigned int sub_offset; |
4760 | + unsigned int print_len; |
4761 | + unsigned int print_offset; |
4762 | |
4763 | cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path); |
4764 | |
4765 | @@ -1072,11 +1078,33 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, |
4766 | kfree(utf16_path); |
4767 | return -ENOENT; |
4768 | } |
4769 | + |
4770 | + if (le32_to_cpu(err_buf->ByteCount) < sizeof(struct smb2_symlink_err_rsp) || |
4771 | + get_rfc1002_length(err_buf) + 4 < SMB2_SYMLINK_STRUCT_SIZE) { |
4772 | + kfree(utf16_path); |
4773 | + return -ENOENT; |
4774 | + } |
4775 | + |
4776 | /* open must fail on symlink - reset rc */ |
4777 | rc = 0; |
4778 | symlink = (struct smb2_symlink_err_rsp *)err_buf->ErrorData; |
4779 | sub_len = le16_to_cpu(symlink->SubstituteNameLength); |
4780 | sub_offset = le16_to_cpu(symlink->SubstituteNameOffset); |
4781 | + print_len = le16_to_cpu(symlink->PrintNameLength); |
4782 | + print_offset = le16_to_cpu(symlink->PrintNameOffset); |
4783 | + |
4784 | + if (get_rfc1002_length(err_buf) + 4 < |
4785 | + SMB2_SYMLINK_STRUCT_SIZE + sub_offset + sub_len) { |
4786 | + kfree(utf16_path); |
4787 | + return -ENOENT; |
4788 | + } |
4789 | + |
4790 | + if (get_rfc1002_length(err_buf) + 4 < |
4791 | + SMB2_SYMLINK_STRUCT_SIZE + print_offset + print_len) { |
4792 | + kfree(utf16_path); |
4793 | + return -ENOENT; |
4794 | + } |
4795 | + |
4796 | *target_path = cifs_strndup_from_utf16( |
4797 | (char *)symlink->PathBuffer + sub_offset, |
4798 | sub_len, true, cifs_sb->local_nls); |
4799 | diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c |
4800 | index 36345fefa3ff..2d964ce45606 100644 |
4801 | --- a/fs/jbd2/commit.c |
4802 | +++ b/fs/jbd2/commit.c |
4803 | @@ -124,7 +124,7 @@ static int journal_submit_commit_record(journal_t *journal, |
4804 | struct commit_header *tmp; |
4805 | struct buffer_head *bh; |
4806 | int ret; |
4807 | - struct timespec now = current_kernel_time(); |
4808 | + struct timespec64 now = current_kernel_time64(); |
4809 | |
4810 | *cbh = NULL; |
4811 | |
4812 | diff --git a/fs/nfs/write.c b/fs/nfs/write.c |
4813 | index 7b9316406930..7a9b6e347249 100644 |
4814 | --- a/fs/nfs/write.c |
4815 | +++ b/fs/nfs/write.c |
4816 | @@ -1261,6 +1261,9 @@ int nfs_updatepage(struct file *file, struct page *page, |
4817 | dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n", |
4818 | file, count, (long long)(page_file_offset(page) + offset)); |
4819 | |
4820 | + if (!count) |
4821 | + goto out; |
4822 | + |
4823 | if (nfs_can_extend_write(file, page, inode)) { |
4824 | count = max(count + offset, nfs_page_length(page)); |
4825 | offset = 0; |
4826 | @@ -1271,7 +1274,7 @@ int nfs_updatepage(struct file *file, struct page *page, |
4827 | nfs_set_pageerror(page); |
4828 | else |
4829 | __set_page_dirty_nobuffers(page); |
4830 | - |
4831 | +out: |
4832 | dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", |
4833 | status, (long long)i_size_read(inode)); |
4834 | return status; |
4835 | diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c |
4836 | index ed2f64ca49de..f7ea624780a7 100644 |
4837 | --- a/fs/nfsd/nfs4state.c |
4838 | +++ b/fs/nfsd/nfs4state.c |
4839 | @@ -4882,6 +4882,32 @@ nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, |
4840 | return nfs_ok; |
4841 | } |
4842 | |
4843 | +static __be32 |
4844 | +nfsd4_free_lock_stateid(stateid_t *stateid, struct nfs4_stid *s) |
4845 | +{ |
4846 | + struct nfs4_ol_stateid *stp = openlockstateid(s); |
4847 | + __be32 ret; |
4848 | + |
4849 | + mutex_lock(&stp->st_mutex); |
4850 | + |
4851 | + ret = check_stateid_generation(stateid, &s->sc_stateid, 1); |
4852 | + if (ret) |
4853 | + goto out; |
4854 | + |
4855 | + ret = nfserr_locks_held; |
4856 | + if (check_for_locks(stp->st_stid.sc_file, |
4857 | + lockowner(stp->st_stateowner))) |
4858 | + goto out; |
4859 | + |
4860 | + release_lock_stateid(stp); |
4861 | + ret = nfs_ok; |
4862 | + |
4863 | +out: |
4864 | + mutex_unlock(&stp->st_mutex); |
4865 | + nfs4_put_stid(s); |
4866 | + return ret; |
4867 | +} |
4868 | + |
4869 | __be32 |
4870 | nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, |
4871 | struct nfsd4_free_stateid *free_stateid) |
4872 | @@ -4889,7 +4915,6 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, |
4873 | stateid_t *stateid = &free_stateid->fr_stateid; |
4874 | struct nfs4_stid *s; |
4875 | struct nfs4_delegation *dp; |
4876 | - struct nfs4_ol_stateid *stp; |
4877 | struct nfs4_client *cl = cstate->session->se_client; |
4878 | __be32 ret = nfserr_bad_stateid; |
4879 | |
4880 | @@ -4908,18 +4933,9 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, |
4881 | ret = nfserr_locks_held; |
4882 | break; |
4883 | case NFS4_LOCK_STID: |
4884 | - ret = check_stateid_generation(stateid, &s->sc_stateid, 1); |
4885 | - if (ret) |
4886 | - break; |
4887 | - stp = openlockstateid(s); |
4888 | - ret = nfserr_locks_held; |
4889 | - if (check_for_locks(stp->st_stid.sc_file, |
4890 | - lockowner(stp->st_stateowner))) |
4891 | - break; |
4892 | - WARN_ON(!unhash_lock_stateid(stp)); |
4893 | + atomic_inc(&s->sc_count); |
4894 | spin_unlock(&cl->cl_lock); |
4895 | - nfs4_put_stid(s); |
4896 | - ret = nfs_ok; |
4897 | + ret = nfsd4_free_lock_stateid(stateid, s); |
4898 | goto out; |
4899 | case NFS4_REVOKED_DELEG_STID: |
4900 | dp = delegstateid(s); |
4901 | @@ -5486,7 +5502,7 @@ static __be32 |
4902 | lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, |
4903 | struct nfs4_ol_stateid *ost, |
4904 | struct nfsd4_lock *lock, |
4905 | - struct nfs4_ol_stateid **lst, bool *new) |
4906 | + struct nfs4_ol_stateid **plst, bool *new) |
4907 | { |
4908 | __be32 status; |
4909 | struct nfs4_file *fi = ost->st_stid.sc_file; |
4910 | @@ -5494,7 +5510,9 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, |
4911 | struct nfs4_client *cl = oo->oo_owner.so_client; |
4912 | struct inode *inode = d_inode(cstate->current_fh.fh_dentry); |
4913 | struct nfs4_lockowner *lo; |
4914 | + struct nfs4_ol_stateid *lst; |
4915 | unsigned int strhashval; |
4916 | + bool hashed; |
4917 | |
4918 | lo = find_lockowner_str(cl, &lock->lk_new_owner); |
4919 | if (!lo) { |
4920 | @@ -5510,12 +5528,27 @@ lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, |
4921 | goto out; |
4922 | } |
4923 | |
4924 | - *lst = find_or_create_lock_stateid(lo, fi, inode, ost, new); |
4925 | - if (*lst == NULL) { |
4926 | +retry: |
4927 | + lst = find_or_create_lock_stateid(lo, fi, inode, ost, new); |
4928 | + if (lst == NULL) { |
4929 | status = nfserr_jukebox; |
4930 | goto out; |
4931 | } |
4932 | + |
4933 | + mutex_lock(&lst->st_mutex); |
4934 | + |
4935 | + /* See if it's still hashed to avoid race with FREE_STATEID */ |
4936 | + spin_lock(&cl->cl_lock); |
4937 | + hashed = !list_empty(&lst->st_perfile); |
4938 | + spin_unlock(&cl->cl_lock); |
4939 | + |
4940 | + if (!hashed) { |
4941 | + mutex_unlock(&lst->st_mutex); |
4942 | + nfs4_put_stid(&lst->st_stid); |
4943 | + goto retry; |
4944 | + } |
4945 | status = nfs_ok; |
4946 | + *plst = lst; |
4947 | out: |
4948 | nfs4_put_stateowner(&lo->lo_owner); |
4949 | return status; |
4950 | @@ -5582,8 +5615,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, |
4951 | goto out; |
4952 | status = lookup_or_create_lock_state(cstate, open_stp, lock, |
4953 | &lock_stp, &new); |
4954 | - if (status == nfs_ok) |
4955 | - mutex_lock(&lock_stp->st_mutex); |
4956 | } else { |
4957 | status = nfs4_preprocess_seqid_op(cstate, |
4958 | lock->lk_old_lock_seqid, |
4959 | diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c |
4960 | index a1acc6004a91..70a7bbe199d0 100644 |
4961 | --- a/fs/overlayfs/super.c |
4962 | +++ b/fs/overlayfs/super.c |
4963 | @@ -376,7 +376,8 @@ static struct ovl_entry *ovl_alloc_entry(unsigned int numlower) |
4964 | static bool ovl_dentry_remote(struct dentry *dentry) |
4965 | { |
4966 | return dentry->d_flags & |
4967 | - (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE); |
4968 | + (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE | |
4969 | + DCACHE_OP_REAL); |
4970 | } |
4971 | |
4972 | static bool ovl_dentry_weird(struct dentry *dentry) |
4973 | diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h |
4974 | index 1b4d69f68c33..140c29635069 100644 |
4975 | --- a/include/linux/backing-dev-defs.h |
4976 | +++ b/include/linux/backing-dev-defs.h |
4977 | @@ -163,6 +163,7 @@ struct backing_dev_info { |
4978 | wait_queue_head_t wb_waitq; |
4979 | |
4980 | struct device *dev; |
4981 | + struct device *owner; |
4982 | |
4983 | struct timer_list laptop_mode_wb_timer; |
4984 | |
4985 | diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h |
4986 | index c82794f20110..89d3de3e096b 100644 |
4987 | --- a/include/linux/backing-dev.h |
4988 | +++ b/include/linux/backing-dev.h |
4989 | @@ -24,6 +24,7 @@ __printf(3, 4) |
4990 | int bdi_register(struct backing_dev_info *bdi, struct device *parent, |
4991 | const char *fmt, ...); |
4992 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); |
4993 | +int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner); |
4994 | void bdi_unregister(struct backing_dev_info *bdi); |
4995 | |
4996 | int __must_check bdi_setup_and_register(struct backing_dev_info *, char *); |
4997 | diff --git a/include/linux/bio.h b/include/linux/bio.h |
4998 | index fbe47bc700bd..42e4e3cbb001 100644 |
4999 | --- a/include/linux/bio.h |
5000 | +++ b/include/linux/bio.h |
5001 | @@ -527,11 +527,14 @@ extern unsigned int bvec_nr_vecs(unsigned short idx); |
5002 | int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); |
5003 | int bio_associate_current(struct bio *bio); |
5004 | void bio_disassociate_task(struct bio *bio); |
5005 | +void bio_clone_blkcg_association(struct bio *dst, struct bio *src); |
5006 | #else /* CONFIG_BLK_CGROUP */ |
5007 | static inline int bio_associate_blkcg(struct bio *bio, |
5008 | struct cgroup_subsys_state *blkcg_css) { return 0; } |
5009 | static inline int bio_associate_current(struct bio *bio) { return -ENOENT; } |
5010 | static inline void bio_disassociate_task(struct bio *bio) { } |
5011 | +static inline void bio_clone_blkcg_association(struct bio *dst, |
5012 | + struct bio *src) { } |
5013 | #endif /* CONFIG_BLK_CGROUP */ |
5014 | |
5015 | #ifdef CONFIG_HIGHMEM |
5016 | diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h |
5017 | index f079fb1a31f7..a8786d27ab81 100644 |
5018 | --- a/include/linux/mlx5/qp.h |
5019 | +++ b/include/linux/mlx5/qp.h |
5020 | @@ -160,6 +160,7 @@ enum { |
5021 | enum { |
5022 | MLX5_FENCE_MODE_NONE = 0 << 5, |
5023 | MLX5_FENCE_MODE_INITIATOR_SMALL = 1 << 5, |
5024 | + MLX5_FENCE_MODE_FENCE = 2 << 5, |
5025 | MLX5_FENCE_MODE_STRONG_ORDERING = 3 << 5, |
5026 | MLX5_FENCE_MODE_SMALL_AND_FENCE = 4 << 5, |
5027 | }; |
5028 | @@ -534,9 +535,9 @@ struct mlx5_destroy_qp_mbox_out { |
5029 | struct mlx5_modify_qp_mbox_in { |
5030 | struct mlx5_inbox_hdr hdr; |
5031 | __be32 qpn; |
5032 | - u8 rsvd1[4]; |
5033 | - __be32 optparam; |
5034 | u8 rsvd0[4]; |
5035 | + __be32 optparam; |
5036 | + u8 rsvd1[4]; |
5037 | struct mlx5_qp_context ctx; |
5038 | }; |
5039 | |
5040 | diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h |
5041 | index 28ee5c2e6bcd..711322a8ee35 100644 |
5042 | --- a/include/target/target_core_backend.h |
5043 | +++ b/include/target/target_core_backend.h |
5044 | @@ -96,6 +96,6 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, |
5045 | bool target_sense_desc_format(struct se_device *dev); |
5046 | sector_t target_to_linux_sector(struct se_device *dev, sector_t lb); |
5047 | bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, |
5048 | - struct request_queue *q, int block_size); |
5049 | + struct request_queue *q); |
5050 | |
5051 | #endif /* TARGET_CORE_BACKEND_H */ |
5052 | diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h |
5053 | index 689f4d207122..59081c73b296 100644 |
5054 | --- a/include/target/target_core_base.h |
5055 | +++ b/include/target/target_core_base.h |
5056 | @@ -139,6 +139,7 @@ enum se_cmd_flags_table { |
5057 | SCF_COMPARE_AND_WRITE_POST = 0x00100000, |
5058 | SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000, |
5059 | SCF_ACK_KREF = 0x00400000, |
5060 | + SCF_TASK_ATTR_SET = 0x01000000, |
5061 | }; |
5062 | |
5063 | /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ |
5064 | diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h |
5065 | index 7fb2557a760e..ce9ea736f1d7 100644 |
5066 | --- a/include/target/target_core_fabric.h |
5067 | +++ b/include/target/target_core_fabric.h |
5068 | @@ -163,7 +163,6 @@ int core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t); |
5069 | void core_tmr_release_req(struct se_tmr_req *); |
5070 | int transport_generic_handle_tmr(struct se_cmd *); |
5071 | void transport_generic_request_failure(struct se_cmd *, sense_reason_t); |
5072 | -void __target_execute_cmd(struct se_cmd *); |
5073 | int transport_lookup_tmr_lun(struct se_cmd *, u64); |
5074 | void core_allocate_nexus_loss_ua(struct se_node_acl *acl); |
5075 | |
5076 | diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h |
5077 | index 003dca933803..5664ca07c9c7 100644 |
5078 | --- a/include/trace/events/sunrpc.h |
5079 | +++ b/include/trace/events/sunrpc.h |
5080 | @@ -529,20 +529,27 @@ TRACE_EVENT(svc_xprt_do_enqueue, |
5081 | |
5082 | TP_STRUCT__entry( |
5083 | __field(struct svc_xprt *, xprt) |
5084 | - __field_struct(struct sockaddr_storage, ss) |
5085 | __field(int, pid) |
5086 | __field(unsigned long, flags) |
5087 | + __dynamic_array(unsigned char, addr, xprt != NULL ? |
5088 | + xprt->xpt_remotelen : 0) |
5089 | ), |
5090 | |
5091 | TP_fast_assign( |
5092 | __entry->xprt = xprt; |
5093 | - xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss)); |
5094 | __entry->pid = rqst? rqst->rq_task->pid : 0; |
5095 | - __entry->flags = xprt ? xprt->xpt_flags : 0; |
5096 | + if (xprt) { |
5097 | + memcpy(__get_dynamic_array(addr), |
5098 | + &xprt->xpt_remote, |
5099 | + xprt->xpt_remotelen); |
5100 | + __entry->flags = xprt->xpt_flags; |
5101 | + } else |
5102 | + __entry->flags = 0; |
5103 | ), |
5104 | |
5105 | TP_printk("xprt=0x%p addr=%pIScp pid=%d flags=%s", __entry->xprt, |
5106 | - (struct sockaddr *)&__entry->ss, |
5107 | + __get_dynamic_array_len(addr) != 0 ? |
5108 | + (struct sockaddr *)__get_dynamic_array(addr) : NULL, |
5109 | __entry->pid, show_svc_xprt_flags(__entry->flags)) |
5110 | ); |
5111 | |
5112 | @@ -553,18 +560,25 @@ TRACE_EVENT(svc_xprt_dequeue, |
5113 | |
5114 | TP_STRUCT__entry( |
5115 | __field(struct svc_xprt *, xprt) |
5116 | - __field_struct(struct sockaddr_storage, ss) |
5117 | __field(unsigned long, flags) |
5118 | + __dynamic_array(unsigned char, addr, xprt != NULL ? |
5119 | + xprt->xpt_remotelen : 0) |
5120 | ), |
5121 | |
5122 | TP_fast_assign( |
5123 | - __entry->xprt = xprt, |
5124 | - xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss)); |
5125 | - __entry->flags = xprt ? xprt->xpt_flags : 0; |
5126 | + __entry->xprt = xprt; |
5127 | + if (xprt) { |
5128 | + memcpy(__get_dynamic_array(addr), |
5129 | + &xprt->xpt_remote, |
5130 | + xprt->xpt_remotelen); |
5131 | + __entry->flags = xprt->xpt_flags; |
5132 | + } else |
5133 | + __entry->flags = 0; |
5134 | ), |
5135 | |
5136 | TP_printk("xprt=0x%p addr=%pIScp flags=%s", __entry->xprt, |
5137 | - (struct sockaddr *)&__entry->ss, |
5138 | + __get_dynamic_array_len(addr) != 0 ? |
5139 | + (struct sockaddr *)__get_dynamic_array(addr) : NULL, |
5140 | show_svc_xprt_flags(__entry->flags)) |
5141 | ); |
5142 | |
5143 | @@ -592,19 +606,26 @@ TRACE_EVENT(svc_handle_xprt, |
5144 | TP_STRUCT__entry( |
5145 | __field(struct svc_xprt *, xprt) |
5146 | __field(int, len) |
5147 | - __field_struct(struct sockaddr_storage, ss) |
5148 | __field(unsigned long, flags) |
5149 | + __dynamic_array(unsigned char, addr, xprt != NULL ? |
5150 | + xprt->xpt_remotelen : 0) |
5151 | ), |
5152 | |
5153 | TP_fast_assign( |
5154 | __entry->xprt = xprt; |
5155 | - xprt ? memcpy(&__entry->ss, &xprt->xpt_remote, sizeof(__entry->ss)) : memset(&__entry->ss, 0, sizeof(__entry->ss)); |
5156 | __entry->len = len; |
5157 | - __entry->flags = xprt ? xprt->xpt_flags : 0; |
5158 | + if (xprt) { |
5159 | + memcpy(__get_dynamic_array(addr), |
5160 | + &xprt->xpt_remote, |
5161 | + xprt->xpt_remotelen); |
5162 | + __entry->flags = xprt->xpt_flags; |
5163 | + } else |
5164 | + __entry->flags = 0; |
5165 | ), |
5166 | |
5167 | TP_printk("xprt=0x%p addr=%pIScp len=%d flags=%s", __entry->xprt, |
5168 | - (struct sockaddr *)&__entry->ss, |
5169 | + __get_dynamic_array_len(addr) != 0 ? |
5170 | + (struct sockaddr *)__get_dynamic_array(addr) : NULL, |
5171 | __entry->len, show_svc_xprt_flags(__entry->flags)) |
5172 | ); |
5173 | #endif /* _TRACE_SUNRPC_H */ |
5174 | diff --git a/kernel/auditsc.c b/kernel/auditsc.c |
5175 | index b86cc04959de..48f45987dc6c 100644 |
5176 | --- a/kernel/auditsc.c |
5177 | +++ b/kernel/auditsc.c |
5178 | @@ -73,6 +73,7 @@ |
5179 | #include <linux/compat.h> |
5180 | #include <linux/ctype.h> |
5181 | #include <linux/string.h> |
5182 | +#include <linux/uaccess.h> |
5183 | #include <uapi/linux/limits.h> |
5184 | |
5185 | #include "audit.h" |
5186 | @@ -82,7 +83,8 @@ |
5187 | #define AUDITSC_SUCCESS 1 |
5188 | #define AUDITSC_FAILURE 2 |
5189 | |
5190 | -/* no execve audit message should be longer than this (userspace limits) */ |
5191 | +/* no execve audit message should be longer than this (userspace limits), |
5192 | + * see the note near the top of audit_log_execve_info() about this value */ |
5193 | #define MAX_EXECVE_AUDIT_LEN 7500 |
5194 | |
5195 | /* max length to print of cmdline/proctitle value during audit */ |
5196 | @@ -988,184 +990,178 @@ static int audit_log_pid_context(struct audit_context *context, pid_t pid, |
5197 | return rc; |
5198 | } |
5199 | |
5200 | -/* |
5201 | - * to_send and len_sent accounting are very loose estimates. We aren't |
5202 | - * really worried about a hard cap to MAX_EXECVE_AUDIT_LEN so much as being |
5203 | - * within about 500 bytes (next page boundary) |
5204 | - * |
5205 | - * why snprintf? an int is up to 12 digits long. if we just assumed when |
5206 | - * logging that a[%d]= was going to be 16 characters long we would be wasting |
5207 | - * space in every audit message. In one 7500 byte message we can log up to |
5208 | - * about 1000 min size arguments. That comes down to about 50% waste of space |
5209 | - * if we didn't do the snprintf to find out how long arg_num_len was. |
5210 | - */ |
5211 | -static int audit_log_single_execve_arg(struct audit_context *context, |
5212 | - struct audit_buffer **ab, |
5213 | - int arg_num, |
5214 | - size_t *len_sent, |
5215 | - const char __user *p, |
5216 | - char *buf) |
5217 | +static void audit_log_execve_info(struct audit_context *context, |
5218 | + struct audit_buffer **ab) |
5219 | { |
5220 | - char arg_num_len_buf[12]; |
5221 | - const char __user *tmp_p = p; |
5222 | - /* how many digits are in arg_num? 5 is the length of ' a=""' */ |
5223 | - size_t arg_num_len = snprintf(arg_num_len_buf, 12, "%d", arg_num) + 5; |
5224 | - size_t len, len_left, to_send; |
5225 | - size_t max_execve_audit_len = MAX_EXECVE_AUDIT_LEN; |
5226 | - unsigned int i, has_cntl = 0, too_long = 0; |
5227 | - int ret; |
5228 | - |
5229 | - /* strnlen_user includes the null we don't want to send */ |
5230 | - len_left = len = strnlen_user(p, MAX_ARG_STRLEN) - 1; |
5231 | - |
5232 | - /* |
5233 | - * We just created this mm, if we can't find the strings |
5234 | - * we just copied into it something is _very_ wrong. Similar |
5235 | - * for strings that are too long, we should not have created |
5236 | - * any. |
5237 | - */ |
5238 | - if (WARN_ON_ONCE(len < 0 || len > MAX_ARG_STRLEN - 1)) { |
5239 | - send_sig(SIGKILL, current, 0); |
5240 | - return -1; |
5241 | + long len_max; |
5242 | + long len_rem; |
5243 | + long len_full; |
5244 | + long len_buf; |
5245 | + long len_abuf; |
5246 | + long len_tmp; |
5247 | + bool require_data; |
5248 | + bool encode; |
5249 | + unsigned int iter; |
5250 | + unsigned int arg; |
5251 | + char *buf_head; |
5252 | + char *buf; |
5253 | + const char __user *p = (const char __user *)current->mm->arg_start; |
5254 | + |
5255 | + /* NOTE: this buffer needs to be large enough to hold all the non-arg |
5256 | + * data we put in the audit record for this argument (see the |
5257 | + * code below) ... at this point in time 96 is plenty */ |
5258 | + char abuf[96]; |
5259 | + |
5260 | + /* NOTE: we set MAX_EXECVE_AUDIT_LEN to a rather arbitrary limit, the |
5261 | + * current value of 7500 is not as important as the fact that it |
5262 | + * is less than 8k, a setting of 7500 gives us plenty of wiggle |
5263 | + * room if we go over a little bit in the logging below */ |
5264 | + WARN_ON_ONCE(MAX_EXECVE_AUDIT_LEN > 7500); |
5265 | + len_max = MAX_EXECVE_AUDIT_LEN; |
5266 | + |
5267 | + /* scratch buffer to hold the userspace args */ |
5268 | + buf_head = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL); |
5269 | + if (!buf_head) { |
5270 | + audit_panic("out of memory for argv string"); |
5271 | + return; |
5272 | } |
5273 | + buf = buf_head; |
5274 | |
5275 | - /* walk the whole argument looking for non-ascii chars */ |
5276 | + audit_log_format(*ab, "argc=%d", context->execve.argc); |
5277 | + |
5278 | + len_rem = len_max; |
5279 | + len_buf = 0; |
5280 | + len_full = 0; |
5281 | + require_data = true; |
5282 | + encode = false; |
5283 | + iter = 0; |
5284 | + arg = 0; |
5285 | do { |
5286 | - if (len_left > MAX_EXECVE_AUDIT_LEN) |
5287 | - to_send = MAX_EXECVE_AUDIT_LEN; |
5288 | - else |
5289 | - to_send = len_left; |
5290 | - ret = copy_from_user(buf, tmp_p, to_send); |
5291 | - /* |
5292 | - * There is no reason for this copy to be short. We just |
5293 | - * copied them here, and the mm hasn't been exposed to user- |
5294 | - * space yet. |
5295 | - */ |
5296 | - if (ret) { |
5297 | - WARN_ON(1); |
5298 | - send_sig(SIGKILL, current, 0); |
5299 | - return -1; |
5300 | - } |
5301 | - buf[to_send] = '\0'; |
5302 | - has_cntl = audit_string_contains_control(buf, to_send); |
5303 | - if (has_cntl) { |
5304 | - /* |
5305 | - * hex messages get logged as 2 bytes, so we can only |
5306 | - * send half as much in each message |
5307 | - */ |
5308 | - max_execve_audit_len = MAX_EXECVE_AUDIT_LEN / 2; |
5309 | - break; |
5310 | - } |
5311 | - len_left -= to_send; |
5312 | - tmp_p += to_send; |
5313 | - } while (len_left > 0); |
5314 | - |
5315 | - len_left = len; |
5316 | - |
5317 | - if (len > max_execve_audit_len) |
5318 | - too_long = 1; |
5319 | - |
5320 | - /* rewalk the argument actually logging the message */ |
5321 | - for (i = 0; len_left > 0; i++) { |
5322 | - int room_left; |
5323 | - |
5324 | - if (len_left > max_execve_audit_len) |
5325 | - to_send = max_execve_audit_len; |
5326 | - else |
5327 | - to_send = len_left; |
5328 | - |
5329 | - /* do we have space left to send this argument in this ab? */ |
5330 | - room_left = MAX_EXECVE_AUDIT_LEN - arg_num_len - *len_sent; |
5331 | - if (has_cntl) |
5332 | - room_left -= (to_send * 2); |
5333 | - else |
5334 | - room_left -= to_send; |
5335 | - if (room_left < 0) { |
5336 | - *len_sent = 0; |
5337 | - audit_log_end(*ab); |
5338 | - *ab = audit_log_start(context, GFP_KERNEL, AUDIT_EXECVE); |
5339 | - if (!*ab) |
5340 | - return 0; |
5341 | - } |
5342 | + /* NOTE: we don't ever want to trust this value for anything |
5343 | + * serious, but the audit record format insists we |
5344 | + * provide an argument length for really long arguments, |
5345 | + * e.g. > MAX_EXECVE_AUDIT_LEN, so we have no choice but |
5346 | + * to use strncpy_from_user() to obtain this value for |
5347 | + * recording in the log, although we don't use it |
5348 | + * anywhere here to avoid a double-fetch problem */ |
5349 | + if (len_full == 0) |
5350 | + len_full = strnlen_user(p, MAX_ARG_STRLEN) - 1; |
5351 | + |
5352 | + /* read more data from userspace */ |
5353 | + if (require_data) { |
5354 | + /* can we make more room in the buffer? */ |
5355 | + if (buf != buf_head) { |
5356 | + memmove(buf_head, buf, len_buf); |
5357 | + buf = buf_head; |
5358 | + } |
5359 | + |
5360 | + /* fetch as much as we can of the argument */ |
5361 | + len_tmp = strncpy_from_user(&buf_head[len_buf], p, |
5362 | + len_max - len_buf); |
5363 | + if (len_tmp == -EFAULT) { |
5364 | + /* unable to copy from userspace */ |
5365 | + send_sig(SIGKILL, current, 0); |
5366 | + goto out; |
5367 | + } else if (len_tmp == (len_max - len_buf)) { |
5368 | + /* buffer is not large enough */ |
5369 | + require_data = true; |
5370 | + /* NOTE: if we are going to span multiple |
5371 | + * buffers force the encoding so we stand |
5372 | + * a chance at a sane len_full value and |
5373 | + * consistent record encoding */ |
5374 | + encode = true; |
5375 | + len_full = len_full * 2; |
5376 | + p += len_tmp; |
5377 | + } else { |
5378 | + require_data = false; |
5379 | + if (!encode) |
5380 | + encode = audit_string_contains_control( |
5381 | + buf, len_tmp); |
5382 | + /* try to use a trusted value for len_full */ |
5383 | + if (len_full < len_max) |
5384 | + len_full = (encode ? |
5385 | + len_tmp * 2 : len_tmp); |
5386 | + p += len_tmp + 1; |
5387 | + } |
5388 | + len_buf += len_tmp; |
5389 | + buf_head[len_buf] = '\0'; |
5390 | |
5391 | - /* |
5392 | - * first record needs to say how long the original string was |
5393 | - * so we can be sure nothing was lost. |
5394 | - */ |
5395 | - if ((i == 0) && (too_long)) |
5396 | - audit_log_format(*ab, " a%d_len=%zu", arg_num, |
5397 | - has_cntl ? 2*len : len); |
5398 | - |
5399 | - /* |
5400 | - * normally arguments are small enough to fit and we already |
5401 | - * filled buf above when we checked for control characters |
5402 | - * so don't bother with another copy_from_user |
5403 | - */ |
5404 | - if (len >= max_execve_audit_len) |
5405 | - ret = copy_from_user(buf, p, to_send); |
5406 | - else |
5407 | - ret = 0; |
5408 | - if (ret) { |
5409 | - WARN_ON(1); |
5410 | - send_sig(SIGKILL, current, 0); |
5411 | - return -1; |
5412 | + /* length of the buffer in the audit record? */ |
5413 | + len_abuf = (encode ? len_buf * 2 : len_buf + 2); |
5414 | } |
5415 | - buf[to_send] = '\0'; |
5416 | - |
5417 | - /* actually log it */ |
5418 | - audit_log_format(*ab, " a%d", arg_num); |
5419 | - if (too_long) |
5420 | - audit_log_format(*ab, "[%d]", i); |
5421 | - audit_log_format(*ab, "="); |
5422 | - if (has_cntl) |
5423 | - audit_log_n_hex(*ab, buf, to_send); |
5424 | - else |
5425 | - audit_log_string(*ab, buf); |
5426 | - |
5427 | - p += to_send; |
5428 | - len_left -= to_send; |
5429 | - *len_sent += arg_num_len; |
5430 | - if (has_cntl) |
5431 | - *len_sent += to_send * 2; |
5432 | - else |
5433 | - *len_sent += to_send; |
5434 | - } |
5435 | - /* include the null we didn't log */ |
5436 | - return len + 1; |
5437 | -} |
5438 | |
5439 | -static void audit_log_execve_info(struct audit_context *context, |
5440 | - struct audit_buffer **ab) |
5441 | -{ |
5442 | - int i, len; |
5443 | - size_t len_sent = 0; |
5444 | - const char __user *p; |
5445 | - char *buf; |
5446 | + /* write as much as we can to the audit log */ |
5447 | + if (len_buf > 0) { |
5448 | + /* NOTE: some magic numbers here - basically if we |
5449 | + * can't fit a reasonable amount of data into the |
5450 | + * existing audit buffer, flush it and start with |
5451 | + * a new buffer */ |
5452 | + if ((sizeof(abuf) + 8) > len_rem) { |
5453 | + len_rem = len_max; |
5454 | + audit_log_end(*ab); |
5455 | + *ab = audit_log_start(context, |
5456 | + GFP_KERNEL, AUDIT_EXECVE); |
5457 | + if (!*ab) |
5458 | + goto out; |
5459 | + } |
5460 | |
5461 | - p = (const char __user *)current->mm->arg_start; |
5462 | + /* create the non-arg portion of the arg record */ |
5463 | + len_tmp = 0; |
5464 | + if (require_data || (iter > 0) || |
5465 | + ((len_abuf + sizeof(abuf)) > len_rem)) { |
5466 | + if (iter == 0) { |
5467 | + len_tmp += snprintf(&abuf[len_tmp], |
5468 | + sizeof(abuf) - len_tmp, |
5469 | + " a%d_len=%lu", |
5470 | + arg, len_full); |
5471 | + } |
5472 | + len_tmp += snprintf(&abuf[len_tmp], |
5473 | + sizeof(abuf) - len_tmp, |
5474 | + " a%d[%d]=", arg, iter++); |
5475 | + } else |
5476 | + len_tmp += snprintf(&abuf[len_tmp], |
5477 | + sizeof(abuf) - len_tmp, |
5478 | + " a%d=", arg); |
5479 | + WARN_ON(len_tmp >= sizeof(abuf)); |
5480 | + abuf[sizeof(abuf) - 1] = '\0'; |
5481 | + |
5482 | + /* log the arg in the audit record */ |
5483 | + audit_log_format(*ab, "%s", abuf); |
5484 | + len_rem -= len_tmp; |
5485 | + len_tmp = len_buf; |
5486 | + if (encode) { |
5487 | + if (len_abuf > len_rem) |
5488 | + len_tmp = len_rem / 2; /* encoding */ |
5489 | + audit_log_n_hex(*ab, buf, len_tmp); |
5490 | + len_rem -= len_tmp * 2; |
5491 | + len_abuf -= len_tmp * 2; |
5492 | + } else { |
5493 | + if (len_abuf > len_rem) |
5494 | + len_tmp = len_rem - 2; /* quotes */ |
5495 | + audit_log_n_string(*ab, buf, len_tmp); |
5496 | + len_rem -= len_tmp + 2; |
5497 | + /* don't subtract the "2" because we still need |
5498 | + * to add quotes to the remaining string */ |
5499 | + len_abuf -= len_tmp; |
5500 | + } |
5501 | + len_buf -= len_tmp; |
5502 | + buf += len_tmp; |
5503 | + } |
5504 | |
5505 | - audit_log_format(*ab, "argc=%d", context->execve.argc); |
5506 | + /* ready to move to the next argument? */ |
5507 | + if ((len_buf == 0) && !require_data) { |
5508 | + arg++; |
5509 | + iter = 0; |
5510 | + len_full = 0; |
5511 | + require_data = true; |
5512 | + encode = false; |
5513 | + } |
5514 | + } while (arg < context->execve.argc); |
5515 | |
5516 | - /* |
5517 | - * we need some kernel buffer to hold the userspace args. Just |
5518 | - * allocate one big one rather than allocating one of the right size |
5519 | - * for every single argument inside audit_log_single_execve_arg() |
5520 | - * should be <8k allocation so should be pretty safe. |
5521 | - */ |
5522 | - buf = kmalloc(MAX_EXECVE_AUDIT_LEN + 1, GFP_KERNEL); |
5523 | - if (!buf) { |
5524 | - audit_panic("out of memory for argv string"); |
5525 | - return; |
5526 | - } |
5527 | + /* NOTE: the caller handles the final audit_log_end() call */ |
5528 | |
5529 | - for (i = 0; i < context->execve.argc; i++) { |
5530 | - len = audit_log_single_execve_arg(context, ab, i, |
5531 | - &len_sent, p, buf); |
5532 | - if (len <= 0) |
5533 | - break; |
5534 | - p += len; |
5535 | - } |
5536 | - kfree(buf); |
5537 | +out: |
5538 | + kfree(buf_head); |
5539 | } |
5540 | |
5541 | static void show_special(struct audit_context *context, int *call_panic) |
5542 | diff --git a/kernel/module.c b/kernel/module.c |
5543 | index 0e5c71195f18..b14a4f31221f 100644 |
5544 | --- a/kernel/module.c |
5545 | +++ b/kernel/module.c |
5546 | @@ -2606,13 +2606,18 @@ static inline void kmemleak_load_module(const struct module *mod, |
5547 | #endif |
5548 | |
5549 | #ifdef CONFIG_MODULE_SIG |
5550 | -static int module_sig_check(struct load_info *info) |
5551 | +static int module_sig_check(struct load_info *info, int flags) |
5552 | { |
5553 | int err = -ENOKEY; |
5554 | const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1; |
5555 | const void *mod = info->hdr; |
5556 | |
5557 | - if (info->len > markerlen && |
5558 | + /* |
5559 | + * Require flags == 0, as a module with version information |
5560 | + * removed is no longer the module that was signed |
5561 | + */ |
5562 | + if (flags == 0 && |
5563 | + info->len > markerlen && |
5564 | memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) { |
5565 | /* We truncate the module to discard the signature */ |
5566 | info->len -= markerlen; |
5567 | @@ -2631,7 +2636,7 @@ static int module_sig_check(struct load_info *info) |
5568 | return err; |
5569 | } |
5570 | #else /* !CONFIG_MODULE_SIG */ |
5571 | -static int module_sig_check(struct load_info *info) |
5572 | +static int module_sig_check(struct load_info *info, int flags) |
5573 | { |
5574 | return 0; |
5575 | } |
5576 | @@ -3444,7 +3449,7 @@ static int load_module(struct load_info *info, const char __user *uargs, |
5577 | long err; |
5578 | char *after_dashes; |
5579 | |
5580 | - err = module_sig_check(info); |
5581 | + err = module_sig_check(info, flags); |
5582 | if (err) |
5583 | goto free_copy; |
5584 | |
5585 | diff --git a/mm/backing-dev.c b/mm/backing-dev.c |
5586 | index cbe6f0b96f29..9ef80bf441b3 100644 |
5587 | --- a/mm/backing-dev.c |
5588 | +++ b/mm/backing-dev.c |
5589 | @@ -825,6 +825,20 @@ int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev) |
5590 | } |
5591 | EXPORT_SYMBOL(bdi_register_dev); |
5592 | |
5593 | +int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner) |
5594 | +{ |
5595 | + int rc; |
5596 | + |
5597 | + rc = bdi_register(bdi, NULL, "%u:%u", MAJOR(owner->devt), |
5598 | + MINOR(owner->devt)); |
5599 | + if (rc) |
5600 | + return rc; |
5601 | + bdi->owner = owner; |
5602 | + get_device(owner); |
5603 | + return 0; |
5604 | +} |
5605 | +EXPORT_SYMBOL(bdi_register_owner); |
5606 | + |
5607 | /* |
5608 | * Remove bdi from bdi_list, and ensure that it is no longer visible |
5609 | */ |
5610 | @@ -849,6 +863,11 @@ void bdi_unregister(struct backing_dev_info *bdi) |
5611 | device_unregister(bdi->dev); |
5612 | bdi->dev = NULL; |
5613 | } |
5614 | + |
5615 | + if (bdi->owner) { |
5616 | + put_device(bdi->owner); |
5617 | + bdi->owner = NULL; |
5618 | + } |
5619 | } |
5620 | |
5621 | void bdi_exit(struct backing_dev_info *bdi) |
5622 | diff --git a/mm/hugetlb.c b/mm/hugetlb.c |
5623 | index ef6963b577fd..0c31f184daf8 100644 |
5624 | --- a/mm/hugetlb.c |
5625 | +++ b/mm/hugetlb.c |
5626 | @@ -2170,6 +2170,10 @@ static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count, |
5627 | * and reducing the surplus. |
5628 | */ |
5629 | spin_unlock(&hugetlb_lock); |
5630 | + |
5631 | + /* yield cpu to avoid soft lockup */ |
5632 | + cond_resched(); |
5633 | + |
5634 | if (hstate_is_gigantic(h)) |
5635 | ret = alloc_fresh_gigantic_page(h, nodes_allowed); |
5636 | else |
5637 | diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c |
5638 | index 1bb551527044..d9bbbded49ef 100644 |
5639 | --- a/net/bluetooth/l2cap_sock.c |
5640 | +++ b/net/bluetooth/l2cap_sock.c |
5641 | @@ -927,7 +927,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, |
5642 | break; |
5643 | } |
5644 | |
5645 | - if (get_user(opt, (u32 __user *) optval)) { |
5646 | + if (get_user(opt, (u16 __user *) optval)) { |
5647 | err = -EFAULT; |
5648 | break; |
5649 | } |
5650 | diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c |
5651 | index 28cddc85b700..bfa2b6d5b5cf 100644 |
5652 | --- a/net/netlabel/netlabel_kapi.c |
5653 | +++ b/net/netlabel/netlabel_kapi.c |
5654 | @@ -824,7 +824,11 @@ socket_setattr_return: |
5655 | */ |
5656 | void netlbl_sock_delattr(struct sock *sk) |
5657 | { |
5658 | - cipso_v4_sock_delattr(sk); |
5659 | + switch (sk->sk_family) { |
5660 | + case AF_INET: |
5661 | + cipso_v4_sock_delattr(sk); |
5662 | + break; |
5663 | + } |
5664 | } |
5665 | |
5666 | /** |
5667 | @@ -987,7 +991,11 @@ req_setattr_return: |
5668 | */ |
5669 | void netlbl_req_delattr(struct request_sock *req) |
5670 | { |
5671 | - cipso_v4_req_delattr(req); |
5672 | + switch (req->rsk_ops->family) { |
5673 | + case AF_INET: |
5674 | + cipso_v4_req_delattr(req); |
5675 | + break; |
5676 | + } |
5677 | } |
5678 | |
5679 | /** |
5680 | diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c |
5681 | index e167592793a7..42396a74405d 100644 |
5682 | --- a/scripts/recordmcount.c |
5683 | +++ b/scripts/recordmcount.c |
5684 | @@ -33,10 +33,17 @@ |
5685 | #include <string.h> |
5686 | #include <unistd.h> |
5687 | |
5688 | +/* |
5689 | + * glibc synced up and added the metag number but didn't add the relocations. |
5690 | + * Work around this in a crude manner for now. |
5691 | + */ |
5692 | #ifndef EM_METAG |
5693 | -/* Remove this when these make it to the standard system elf.h. */ |
5694 | #define EM_METAG 174 |
5695 | +#endif |
5696 | +#ifndef R_METAG_ADDR32 |
5697 | #define R_METAG_ADDR32 2 |
5698 | +#endif |
5699 | +#ifndef R_METAG_NONE |
5700 | #define R_METAG_NONE 3 |
5701 | #endif |
5702 | |
5703 | diff --git a/sound/hda/array.c b/sound/hda/array.c |
5704 | index 516795baa7db..5dfa610e4471 100644 |
5705 | --- a/sound/hda/array.c |
5706 | +++ b/sound/hda/array.c |
5707 | @@ -21,13 +21,15 @@ void *snd_array_new(struct snd_array *array) |
5708 | return NULL; |
5709 | if (array->used >= array->alloced) { |
5710 | int num = array->alloced + array->alloc_align; |
5711 | + int oldsize = array->alloced * array->elem_size; |
5712 | int size = (num + 1) * array->elem_size; |
5713 | void *nlist; |
5714 | if (snd_BUG_ON(num >= 4096)) |
5715 | return NULL; |
5716 | - nlist = krealloc(array->list, size, GFP_KERNEL | __GFP_ZERO); |
5717 | + nlist = krealloc(array->list, size, GFP_KERNEL); |
5718 | if (!nlist) |
5719 | return NULL; |
5720 | + memset(nlist + oldsize, 0, size - oldsize); |
5721 | array->list = nlist; |
5722 | array->alloced = num; |
5723 | } |
5724 | diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c |
5725 | index 8218cace8fea..e769e5764cba 100644 |
5726 | --- a/sound/pci/hda/hda_intel.c |
5727 | +++ b/sound/pci/hda/hda_intel.c |
5728 | @@ -2288,6 +2288,8 @@ static const struct pci_device_id azx_ids[] = { |
5729 | { PCI_DEVICE(0x1022, 0x780d), |
5730 | .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB }, |
5731 | /* ATI HDMI */ |
5732 | + { PCI_DEVICE(0x1002, 0x0002), |
5733 | + .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, |
5734 | { PCI_DEVICE(0x1002, 0x1308), |
5735 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, |
5736 | { PCI_DEVICE(0x1002, 0x157a), |
5737 | diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
5738 | index abcb5a6a1cd9..f25479ba3981 100644 |
5739 | --- a/sound/pci/hda/patch_realtek.c |
5740 | +++ b/sound/pci/hda/patch_realtek.c |
5741 | @@ -4674,6 +4674,22 @@ static void alc290_fixup_mono_speakers(struct hda_codec *codec, |
5742 | } |
5743 | } |
5744 | |
5745 | +static void alc298_fixup_speaker_volume(struct hda_codec *codec, |
5746 | + const struct hda_fixup *fix, int action) |
5747 | +{ |
5748 | + if (action == HDA_FIXUP_ACT_PRE_PROBE) { |
5749 | + /* The speaker is routed to the Node 0x06 by a mistake, as a result |
5750 | + we can't adjust the speaker's volume since this node does not has |
5751 | + Amp-out capability. we change the speaker's route to: |
5752 | + Node 0x02 (Audio Output) -> Node 0x0c (Audio Mixer) -> Node 0x17 ( |
5753 | + Pin Complex), since Node 0x02 has Amp-out caps, we can adjust |
5754 | + speaker's volume now. */ |
5755 | + |
5756 | + hda_nid_t conn1[1] = { 0x0c }; |
5757 | + snd_hda_override_conn_list(codec, 0x17, 1, conn1); |
5758 | + } |
5759 | +} |
5760 | + |
5761 | /* Hook to update amp GPIO4 for automute */ |
5762 | static void alc280_hp_gpio4_automute_hook(struct hda_codec *codec, |
5763 | struct hda_jack_callback *jack) |
5764 | @@ -4823,6 +4839,7 @@ enum { |
5765 | ALC280_FIXUP_HP_HEADSET_MIC, |
5766 | ALC221_FIXUP_HP_FRONT_MIC, |
5767 | ALC292_FIXUP_TPT460, |
5768 | + ALC298_FIXUP_SPK_VOLUME, |
5769 | }; |
5770 | |
5771 | static const struct hda_fixup alc269_fixups[] = { |
5772 | @@ -5478,6 +5495,12 @@ static const struct hda_fixup alc269_fixups[] = { |
5773 | .chained = true, |
5774 | .chain_id = ALC293_FIXUP_LENOVO_SPK_NOISE, |
5775 | }, |
5776 | + [ALC298_FIXUP_SPK_VOLUME] = { |
5777 | + .type = HDA_FIXUP_FUNC, |
5778 | + .v.func = alc298_fixup_speaker_volume, |
5779 | + .chained = true, |
5780 | + .chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, |
5781 | + }, |
5782 | }; |
5783 | |
5784 | static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
5785 | @@ -5524,6 +5547,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
5786 | SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), |
5787 | SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE), |
5788 | SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE), |
5789 | + SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME), |
5790 | SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
5791 | SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), |
5792 | SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), |
5793 | @@ -5799,6 +5823,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { |
5794 | {0x1b, 0x01014020}, |
5795 | {0x21, 0x0221103f}), |
5796 | SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, |
5797 | + {0x14, 0x90170130}, |
5798 | + {0x1b, 0x02011020}, |
5799 | + {0x21, 0x0221103f}), |
5800 | + SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, |
5801 | {0x14, 0x90170150}, |
5802 | {0x1b, 0x02011020}, |
5803 | {0x21, 0x0221105f}), |
5804 | diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c |
5805 | index 510df220d1b5..336ed267c407 100644 |
5806 | --- a/virt/kvm/kvm_main.c |
5807 | +++ b/virt/kvm/kvm_main.c |
5808 | @@ -142,6 +142,7 @@ int vcpu_load(struct kvm_vcpu *vcpu) |
5809 | put_cpu(); |
5810 | return 0; |
5811 | } |
5812 | +EXPORT_SYMBOL_GPL(vcpu_load); |
5813 | |
5814 | void vcpu_put(struct kvm_vcpu *vcpu) |
5815 | { |
5816 | @@ -151,6 +152,7 @@ void vcpu_put(struct kvm_vcpu *vcpu) |
5817 | preempt_enable(); |
5818 | mutex_unlock(&vcpu->mutex); |
5819 | } |
5820 | +EXPORT_SYMBOL_GPL(vcpu_put); |
5821 | |
5822 | static void ack_flush(void *_completed) |
5823 | { |