Contents of /trunk/kernel-magellan/patches-3.16/0101-3.16.2-all-fixes.patch
Parent Directory | Revision Log
Revision 2498 -
(show annotations)
(download)
Tue Sep 9 06:58:23 2014 UTC (10 years ago) by niro
File size: 204338 byte(s)
Tue Sep 9 06:58:23 2014 UTC (10 years ago) by niro
File size: 204338 byte(s)
-linux-3.16.2
1 | diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt |
2 | index 7ccf933bfbe0..48148d6d9307 100644 |
3 | --- a/Documentation/sound/alsa/ALSA-Configuration.txt |
4 | +++ b/Documentation/sound/alsa/ALSA-Configuration.txt |
5 | @@ -2026,8 +2026,8 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed. |
6 | ------------------- |
7 | |
8 | Module for sound cards based on the Asus AV66/AV100/AV200 chips, |
9 | - i.e., Xonar D1, DX, D2, D2X, DS, Essence ST (Deluxe), Essence STX, |
10 | - HDAV1.3 (Deluxe), and HDAV1.3 Slim. |
11 | + i.e., Xonar D1, DX, D2, D2X, DS, DSX, Essence ST (Deluxe), |
12 | + Essence STX (II), HDAV1.3 (Deluxe), and HDAV1.3 Slim. |
13 | |
14 | This module supports autoprobe and multiple cards. |
15 | |
16 | diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt |
17 | index cbc2f03056bd..aee73e78c7d4 100644 |
18 | --- a/Documentation/stable_kernel_rules.txt |
19 | +++ b/Documentation/stable_kernel_rules.txt |
20 | @@ -29,6 +29,9 @@ Rules on what kind of patches are accepted, and which ones are not, into the |
21 | |
22 | Procedure for submitting patches to the -stable tree: |
23 | |
24 | + - If the patch covers files in net/ or drivers/net please follow netdev stable |
25 | + submission guidelines as described in |
26 | + Documentation/networking/netdev-FAQ.txt |
27 | - Send the patch, after verifying that it follows the above rules, to |
28 | stable@vger.kernel.org. You must note the upstream commit ID in the |
29 | changelog of your submission, as well as the kernel version you wish |
30 | diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt |
31 | index 0fe36497642c..612e6e99d1e5 100644 |
32 | --- a/Documentation/virtual/kvm/api.txt |
33 | +++ b/Documentation/virtual/kvm/api.txt |
34 | @@ -1869,7 +1869,8 @@ registers, find a list below: |
35 | PPC | KVM_REG_PPC_PID | 64 |
36 | PPC | KVM_REG_PPC_ACOP | 64 |
37 | PPC | KVM_REG_PPC_VRSAVE | 32 |
38 | - PPC | KVM_REG_PPC_LPCR | 64 |
39 | + PPC | KVM_REG_PPC_LPCR | 32 |
40 | + PPC | KVM_REG_PPC_LPCR_64 | 64 |
41 | PPC | KVM_REG_PPC_PPR | 64 |
42 | PPC | KVM_REG_PPC_ARCH_COMPAT 32 |
43 | PPC | KVM_REG_PPC_DABRX | 32 |
44 | diff --git a/Makefile b/Makefile |
45 | index 87663a2d1d10..c2617526e605 100644 |
46 | --- a/Makefile |
47 | +++ b/Makefile |
48 | @@ -1,6 +1,6 @@ |
49 | VERSION = 3 |
50 | PATCHLEVEL = 16 |
51 | -SUBLEVEL = 1 |
52 | +SUBLEVEL = 2 |
53 | EXTRAVERSION = |
54 | NAME = Museum of Fishiegoodies |
55 | |
56 | diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi |
57 | index 49fa59622254..c9aee0e799bb 100644 |
58 | --- a/arch/arm/boot/dts/am4372.dtsi |
59 | +++ b/arch/arm/boot/dts/am4372.dtsi |
60 | @@ -168,9 +168,6 @@ |
61 | ti,hwmods = "mailbox"; |
62 | ti,mbox-num-users = <4>; |
63 | ti,mbox-num-fifos = <8>; |
64 | - ti,mbox-names = "wkup_m3"; |
65 | - ti,mbox-data = <0 0 0 0>; |
66 | - status = "disabled"; |
67 | }; |
68 | |
69 | timer1: timer@44e31000 { |
70 | diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h |
71 | index 43876245fc57..21ca0cebcab0 100644 |
72 | --- a/arch/arm/include/asm/unistd.h |
73 | +++ b/arch/arm/include/asm/unistd.h |
74 | @@ -15,7 +15,17 @@ |
75 | |
76 | #include <uapi/asm/unistd.h> |
77 | |
78 | +/* |
79 | + * This may need to be greater than __NR_last_syscall+1 in order to |
80 | + * account for the padding in the syscall table |
81 | + */ |
82 | #define __NR_syscalls (384) |
83 | + |
84 | +/* |
85 | + * *NOTE*: This is a ghost syscall private to the kernel. Only the |
86 | + * __kuser_cmpxchg code in entry-armv.S should be aware of its |
87 | + * existence. Don't ever use this from user code. |
88 | + */ |
89 | #define __ARM_NR_cmpxchg (__ARM_NR_BASE+0x00fff0) |
90 | |
91 | #define __ARCH_WANT_STAT64 |
92 | diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h |
93 | index ba94446c72d9..acd5b66ea3aa 100644 |
94 | --- a/arch/arm/include/uapi/asm/unistd.h |
95 | +++ b/arch/arm/include/uapi/asm/unistd.h |
96 | @@ -411,11 +411,6 @@ |
97 | #define __NR_renameat2 (__NR_SYSCALL_BASE+382) |
98 | |
99 | /* |
100 | - * This may need to be greater than __NR_last_syscall+1 in order to |
101 | - * account for the padding in the syscall table |
102 | - */ |
103 | - |
104 | -/* |
105 | * The following SWIs are ARM private. |
106 | */ |
107 | #define __ARM_NR_BASE (__NR_SYSCALL_BASE+0x0f0000) |
108 | @@ -426,12 +421,6 @@ |
109 | #define __ARM_NR_set_tls (__ARM_NR_BASE+5) |
110 | |
111 | /* |
112 | - * *NOTE*: This is a ghost syscall private to the kernel. Only the |
113 | - * __kuser_cmpxchg code in entry-armv.S should be aware of its |
114 | - * existence. Don't ever use this from user code. |
115 | - */ |
116 | - |
117 | -/* |
118 | * The following syscalls are obsolete and no longer available for EABI. |
119 | */ |
120 | #if !defined(__KERNEL__) |
121 | diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c |
122 | index 751f3549bf6f..acadac0992b6 100644 |
123 | --- a/arch/arm/mach-omap2/control.c |
124 | +++ b/arch/arm/mach-omap2/control.c |
125 | @@ -314,7 +314,8 @@ void omap3_save_scratchpad_contents(void) |
126 | scratchpad_contents.public_restore_ptr = |
127 | virt_to_phys(omap3_restore_3630); |
128 | else if (omap_rev() != OMAP3430_REV_ES3_0 && |
129 | - omap_rev() != OMAP3430_REV_ES3_1) |
130 | + omap_rev() != OMAP3430_REV_ES3_1 && |
131 | + omap_rev() != OMAP3430_REV_ES3_1_2) |
132 | scratchpad_contents.public_restore_ptr = |
133 | virt_to_phys(omap3_restore); |
134 | else |
135 | diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c |
136 | index 6c074f37cdd2..da1b256caccc 100644 |
137 | --- a/arch/arm/mach-omap2/omap_hwmod.c |
138 | +++ b/arch/arm/mach-omap2/omap_hwmod.c |
139 | @@ -2185,6 +2185,8 @@ static int _enable(struct omap_hwmod *oh) |
140 | oh->mux->pads_dynamic))) { |
141 | omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED); |
142 | _reconfigure_io_chain(); |
143 | + } else if (oh->flags & HWMOD_FORCE_MSTANDBY) { |
144 | + _reconfigure_io_chain(); |
145 | } |
146 | |
147 | _add_initiator_dep(oh, mpu_oh); |
148 | @@ -2291,6 +2293,8 @@ static int _idle(struct omap_hwmod *oh) |
149 | if (oh->mux && oh->mux->pads_dynamic) { |
150 | omap_hwmod_mux(oh->mux, _HWMOD_STATE_IDLE); |
151 | _reconfigure_io_chain(); |
152 | + } else if (oh->flags & HWMOD_FORCE_MSTANDBY) { |
153 | + _reconfigure_io_chain(); |
154 | } |
155 | |
156 | oh->_state = _HWMOD_STATE_IDLE; |
157 | diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h |
158 | index a5176cf32dad..f2defe1c380c 100644 |
159 | --- a/arch/arm64/include/asm/cacheflush.h |
160 | +++ b/arch/arm64/include/asm/cacheflush.h |
161 | @@ -138,19 +138,10 @@ static inline void __flush_icache_all(void) |
162 | #define flush_icache_page(vma,page) do { } while (0) |
163 | |
164 | /* |
165 | - * flush_cache_vmap() is used when creating mappings (eg, via vmap, |
166 | - * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT |
167 | - * caches, since the direct-mappings of these pages may contain cached |
168 | - * data, we need to do a full cache flush to ensure that writebacks |
169 | - * don't corrupt data placed into these pages via the new mappings. |
170 | + * Not required on AArch64 (PIPT or VIPT non-aliasing D-cache). |
171 | */ |
172 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) |
173 | { |
174 | - /* |
175 | - * set_pte_at() called from vmap_pte_range() does not |
176 | - * have a DSB after cleaning the cache line. |
177 | - */ |
178 | - dsb(ish); |
179 | } |
180 | |
181 | static inline void flush_cache_vunmap(unsigned long start, unsigned long end) |
182 | diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h |
183 | index e0ccceb317d9..2a1508cdead0 100644 |
184 | --- a/arch/arm64/include/asm/pgtable.h |
185 | +++ b/arch/arm64/include/asm/pgtable.h |
186 | @@ -138,6 +138,8 @@ extern struct page *empty_zero_page; |
187 | |
188 | #define pte_valid_user(pte) \ |
189 | ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) |
190 | +#define pte_valid_not_user(pte) \ |
191 | + ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) |
192 | |
193 | static inline pte_t pte_wrprotect(pte_t pte) |
194 | { |
195 | @@ -184,6 +186,15 @@ static inline pte_t pte_mkspecial(pte_t pte) |
196 | static inline void set_pte(pte_t *ptep, pte_t pte) |
197 | { |
198 | *ptep = pte; |
199 | + |
200 | + /* |
201 | + * Only if the new pte is valid and kernel, otherwise TLB maintenance |
202 | + * or update_mmu_cache() have the necessary barriers. |
203 | + */ |
204 | + if (pte_valid_not_user(pte)) { |
205 | + dsb(ishst); |
206 | + isb(); |
207 | + } |
208 | } |
209 | |
210 | extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); |
211 | @@ -303,6 +314,7 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) |
212 | { |
213 | *pmdp = pmd; |
214 | dsb(ishst); |
215 | + isb(); |
216 | } |
217 | |
218 | static inline void pmd_clear(pmd_t *pmdp) |
219 | @@ -333,6 +345,7 @@ static inline void set_pud(pud_t *pudp, pud_t pud) |
220 | { |
221 | *pudp = pud; |
222 | dsb(ishst); |
223 | + isb(); |
224 | } |
225 | |
226 | static inline void pud_clear(pud_t *pudp) |
227 | diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h |
228 | index b9349c4513ea..3796ea6bb734 100644 |
229 | --- a/arch/arm64/include/asm/tlbflush.h |
230 | +++ b/arch/arm64/include/asm/tlbflush.h |
231 | @@ -122,6 +122,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end |
232 | for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) |
233 | asm("tlbi vaae1is, %0" : : "r"(addr)); |
234 | dsb(ish); |
235 | + isb(); |
236 | } |
237 | |
238 | /* |
239 | @@ -131,8 +132,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, |
240 | unsigned long addr, pte_t *ptep) |
241 | { |
242 | /* |
243 | - * set_pte() does not have a DSB, so make sure that the page table |
244 | - * write is visible. |
245 | + * set_pte() does not have a DSB for user mappings, so make sure that |
246 | + * the page table write is visible. |
247 | */ |
248 | dsb(ishst); |
249 | } |
250 | diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c |
251 | index a7fb874b595e..fe5b94078d82 100644 |
252 | --- a/arch/arm64/kernel/debug-monitors.c |
253 | +++ b/arch/arm64/kernel/debug-monitors.c |
254 | @@ -315,20 +315,20 @@ static int brk_handler(unsigned long addr, unsigned int esr, |
255 | { |
256 | siginfo_t info; |
257 | |
258 | - if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED) |
259 | - return 0; |
260 | + if (user_mode(regs)) { |
261 | + info = (siginfo_t) { |
262 | + .si_signo = SIGTRAP, |
263 | + .si_errno = 0, |
264 | + .si_code = TRAP_BRKPT, |
265 | + .si_addr = (void __user *)instruction_pointer(regs), |
266 | + }; |
267 | |
268 | - if (!user_mode(regs)) |
269 | + force_sig_info(SIGTRAP, &info, current); |
270 | + } else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) { |
271 | + pr_warning("Unexpected kernel BRK exception at EL1\n"); |
272 | return -EFAULT; |
273 | + } |
274 | |
275 | - info = (siginfo_t) { |
276 | - .si_signo = SIGTRAP, |
277 | - .si_errno = 0, |
278 | - .si_code = TRAP_BRKPT, |
279 | - .si_addr = (void __user *)instruction_pointer(regs), |
280 | - }; |
281 | - |
282 | - force_sig_info(SIGTRAP, &info, current); |
283 | return 0; |
284 | } |
285 | |
286 | diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c |
287 | index 14db1f6e8d7f..c0aead7d1a72 100644 |
288 | --- a/arch/arm64/kernel/efi.c |
289 | +++ b/arch/arm64/kernel/efi.c |
290 | @@ -464,6 +464,8 @@ static int __init arm64_enter_virtual_mode(void) |
291 | |
292 | set_bit(EFI_RUNTIME_SERVICES, &efi.flags); |
293 | |
294 | + efi.runtime_version = efi.systab->hdr.revision; |
295 | + |
296 | return 0; |
297 | } |
298 | early_initcall(arm64_enter_virtual_mode); |
299 | diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c |
300 | index 736c17a226e9..bf0fc6b16ad9 100644 |
301 | --- a/arch/mips/math-emu/cp1emu.c |
302 | +++ b/arch/mips/math-emu/cp1emu.c |
303 | @@ -1827,7 +1827,7 @@ dcopuop: |
304 | case -1: |
305 | |
306 | if (cpu_has_mips_4_5_r) |
307 | - cbit = fpucondbit[MIPSInst_RT(ir) >> 2]; |
308 | + cbit = fpucondbit[MIPSInst_FD(ir) >> 2]; |
309 | else |
310 | cbit = FPU_CSR_COND; |
311 | if (rv.w) |
312 | diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h |
313 | index 2bc4a9409a93..de7d426a9b0c 100644 |
314 | --- a/arch/powerpc/include/uapi/asm/kvm.h |
315 | +++ b/arch/powerpc/include/uapi/asm/kvm.h |
316 | @@ -548,6 +548,7 @@ struct kvm_get_htab_header { |
317 | |
318 | #define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4) |
319 | #define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5) |
320 | +#define KVM_REG_PPC_LPCR_64 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb5) |
321 | #define KVM_REG_PPC_PPR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6) |
322 | |
323 | /* Architecture compatibility level */ |
324 | diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c |
325 | index fbd01eba4473..94802d267022 100644 |
326 | --- a/arch/powerpc/kernel/eeh_pe.c |
327 | +++ b/arch/powerpc/kernel/eeh_pe.c |
328 | @@ -802,53 +802,33 @@ void eeh_pe_restore_bars(struct eeh_pe *pe) |
329 | */ |
330 | const char *eeh_pe_loc_get(struct eeh_pe *pe) |
331 | { |
332 | - struct pci_controller *hose; |
333 | struct pci_bus *bus = eeh_pe_bus_get(pe); |
334 | - struct pci_dev *pdev; |
335 | - struct device_node *dn; |
336 | - const char *loc; |
337 | + struct device_node *dn = pci_bus_to_OF_node(bus); |
338 | + const char *loc = NULL; |
339 | |
340 | - if (!bus) |
341 | - return "N/A"; |
342 | + if (!dn) |
343 | + goto out; |
344 | |
345 | /* PHB PE or root PE ? */ |
346 | if (pci_is_root_bus(bus)) { |
347 | - hose = pci_bus_to_host(bus); |
348 | - loc = of_get_property(hose->dn, |
349 | - "ibm,loc-code", NULL); |
350 | - if (loc) |
351 | - return loc; |
352 | - loc = of_get_property(hose->dn, |
353 | - "ibm,io-base-loc-code", NULL); |
354 | + loc = of_get_property(dn, "ibm,loc-code", NULL); |
355 | + if (!loc) |
356 | + loc = of_get_property(dn, "ibm,io-base-loc-code", NULL); |
357 | if (loc) |
358 | - return loc; |
359 | - |
360 | - pdev = pci_get_slot(bus, 0x0); |
361 | - } else { |
362 | - pdev = bus->self; |
363 | - } |
364 | - |
365 | - if (!pdev) { |
366 | - loc = "N/A"; |
367 | - goto out; |
368 | - } |
369 | + goto out; |
370 | |
371 | - dn = pci_device_to_OF_node(pdev); |
372 | - if (!dn) { |
373 | - loc = "N/A"; |
374 | - goto out; |
375 | + /* Check the root port */ |
376 | + dn = dn->child; |
377 | + if (!dn) |
378 | + goto out; |
379 | } |
380 | |
381 | loc = of_get_property(dn, "ibm,loc-code", NULL); |
382 | if (!loc) |
383 | loc = of_get_property(dn, "ibm,slot-location-code", NULL); |
384 | - if (!loc) |
385 | - loc = "N/A"; |
386 | |
387 | out: |
388 | - if (pci_is_root_bus(bus) && pdev) |
389 | - pci_dev_put(pdev); |
390 | - return loc; |
391 | + return loc ? loc : "N/A"; |
392 | } |
393 | |
394 | /** |
395 | diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c |
396 | index 7a12edbb61e7..0f3a19237444 100644 |
397 | --- a/arch/powerpc/kvm/book3s_hv.c |
398 | +++ b/arch/powerpc/kvm/book3s_hv.c |
399 | @@ -785,7 +785,8 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, |
400 | return 0; |
401 | } |
402 | |
403 | -static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr) |
404 | +static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, |
405 | + bool preserve_top32) |
406 | { |
407 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
408 | u64 mask; |
409 | @@ -820,6 +821,10 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr) |
410 | mask = LPCR_DPFD | LPCR_ILE | LPCR_TC; |
411 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
412 | mask |= LPCR_AIL; |
413 | + |
414 | + /* Broken 32-bit version of LPCR must not clear top bits */ |
415 | + if (preserve_top32) |
416 | + mask &= 0xFFFFFFFF; |
417 | vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); |
418 | spin_unlock(&vc->lock); |
419 | } |
420 | @@ -939,6 +944,7 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, |
421 | *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); |
422 | break; |
423 | case KVM_REG_PPC_LPCR: |
424 | + case KVM_REG_PPC_LPCR_64: |
425 | *val = get_reg_val(id, vcpu->arch.vcore->lpcr); |
426 | break; |
427 | case KVM_REG_PPC_PPR: |
428 | @@ -1150,7 +1156,10 @@ static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, |
429 | ALIGN(set_reg_val(id, *val), 1UL << 24); |
430 | break; |
431 | case KVM_REG_PPC_LPCR: |
432 | - kvmppc_set_lpcr(vcpu, set_reg_val(id, *val)); |
433 | + kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true); |
434 | + break; |
435 | + case KVM_REG_PPC_LPCR_64: |
436 | + kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false); |
437 | break; |
438 | case KVM_REG_PPC_PPR: |
439 | vcpu->arch.ppr = set_reg_val(id, *val); |
440 | diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c |
441 | index 8eef1e519077..66b7afec250f 100644 |
442 | --- a/arch/powerpc/kvm/book3s_pr.c |
443 | +++ b/arch/powerpc/kvm/book3s_pr.c |
444 | @@ -1233,6 +1233,7 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, |
445 | *val = get_reg_val(id, to_book3s(vcpu)->hior); |
446 | break; |
447 | case KVM_REG_PPC_LPCR: |
448 | + case KVM_REG_PPC_LPCR_64: |
449 | /* |
450 | * We are only interested in the LPCR_ILE bit |
451 | */ |
452 | @@ -1268,6 +1269,7 @@ static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, |
453 | to_book3s(vcpu)->hior_explicit = true; |
454 | break; |
455 | case KVM_REG_PPC_LPCR: |
456 | + case KVM_REG_PPC_LPCR_64: |
457 | kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val)); |
458 | break; |
459 | default: |
460 | diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c |
461 | index de19edeaa7a7..3136ae2f75af 100644 |
462 | --- a/arch/powerpc/platforms/powernv/pci-ioda.c |
463 | +++ b/arch/powerpc/platforms/powernv/pci-ioda.c |
464 | @@ -491,6 +491,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb, |
465 | set_dma_ops(&pdev->dev, &dma_iommu_ops); |
466 | set_iommu_table_base(&pdev->dev, &pe->tce32_table); |
467 | } |
468 | + *pdev->dev.dma_mask = dma_mask; |
469 | return 0; |
470 | } |
471 | |
472 | diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c |
473 | index 203cbf0dc101..89e23811199c 100644 |
474 | --- a/arch/powerpc/platforms/pseries/pci_dlpar.c |
475 | +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c |
476 | @@ -118,10 +118,10 @@ int remove_phb_dynamic(struct pci_controller *phb) |
477 | } |
478 | } |
479 | |
480 | - /* Unregister the bridge device from sysfs and remove the PCI bus */ |
481 | - device_unregister(b->bridge); |
482 | + /* Remove the PCI bus and unregister the bridge device from sysfs */ |
483 | phb->bus = NULL; |
484 | pci_remove_bus(b); |
485 | + device_unregister(b->bridge); |
486 | |
487 | /* Now release the IO resource */ |
488 | if (res->flags & IORESOURCE_IO) |
489 | diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c |
490 | index 37b8241ec784..f90ad8592b36 100644 |
491 | --- a/arch/s390/mm/pgtable.c |
492 | +++ b/arch/s390/mm/pgtable.c |
493 | @@ -1279,6 +1279,7 @@ static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb, |
494 | { |
495 | unsigned long next, *table, *new; |
496 | struct page *page; |
497 | + spinlock_t *ptl; |
498 | pmd_t *pmd; |
499 | |
500 | pmd = pmd_offset(pud, addr); |
501 | @@ -1296,7 +1297,7 @@ again: |
502 | if (!new) |
503 | return -ENOMEM; |
504 | |
505 | - spin_lock(&mm->page_table_lock); |
506 | + ptl = pmd_lock(mm, pmd); |
507 | if (likely((unsigned long *) pmd_deref(*pmd) == table)) { |
508 | /* Nuke pmd entry pointing to the "short" page table */ |
509 | pmdp_flush_lazy(mm, addr, pmd); |
510 | @@ -1310,7 +1311,7 @@ again: |
511 | page_table_free_rcu(tlb, table); |
512 | new = NULL; |
513 | } |
514 | - spin_unlock(&mm->page_table_lock); |
515 | + spin_unlock(ptl); |
516 | if (new) { |
517 | page_table_free_pgste(new); |
518 | goto again; |
519 | diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig |
520 | index d24887b645dc..27adfd902c6f 100644 |
521 | --- a/arch/x86/Kconfig |
522 | +++ b/arch/x86/Kconfig |
523 | @@ -1537,6 +1537,7 @@ config EFI |
524 | config EFI_STUB |
525 | bool "EFI stub support" |
526 | depends on EFI |
527 | + select RELOCATABLE |
528 | ---help--- |
529 | This kernel feature allows a bzImage to be loaded directly |
530 | by EFI firmware without the use of a bootloader. |
531 | diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h |
532 | index 49205d01b9ad..9f83c171ac18 100644 |
533 | --- a/arch/x86/include/asm/kvm_host.h |
534 | +++ b/arch/x86/include/asm/kvm_host.h |
535 | @@ -95,7 +95,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) |
536 | #define KVM_REFILL_PAGES 25 |
537 | #define KVM_MAX_CPUID_ENTRIES 80 |
538 | #define KVM_NR_FIXED_MTRR_REGION 88 |
539 | -#define KVM_NR_VAR_MTRR 10 |
540 | +#define KVM_NR_VAR_MTRR 8 |
541 | |
542 | #define ASYNC_PF_PER_VCPU 64 |
543 | |
544 | diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h |
545 | index 0ec056012618..aa97a070f09f 100644 |
546 | --- a/arch/x86/include/asm/pgtable.h |
547 | +++ b/arch/x86/include/asm/pgtable.h |
548 | @@ -131,8 +131,13 @@ static inline int pte_exec(pte_t pte) |
549 | |
550 | static inline int pte_special(pte_t pte) |
551 | { |
552 | - return (pte_flags(pte) & (_PAGE_PRESENT|_PAGE_SPECIAL)) == |
553 | - (_PAGE_PRESENT|_PAGE_SPECIAL); |
554 | + /* |
555 | + * See CONFIG_NUMA_BALANCING pte_numa in include/asm-generic/pgtable.h. |
556 | + * On x86 we have _PAGE_BIT_NUMA == _PAGE_BIT_GLOBAL+1 == |
557 | + * __PAGE_BIT_SOFTW1 == _PAGE_BIT_SPECIAL. |
558 | + */ |
559 | + return (pte_flags(pte) & _PAGE_SPECIAL) && |
560 | + (pte_flags(pte) & (_PAGE_PRESENT|_PAGE_PROTNONE)); |
561 | } |
562 | |
563 | static inline unsigned long pte_pfn(pte_t pte) |
564 | diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel.c b/arch/x86/kernel/cpu/mcheck/mce_intel.c |
565 | index 9a316b21df8b..3bdb95ae8c43 100644 |
566 | --- a/arch/x86/kernel/cpu/mcheck/mce_intel.c |
567 | +++ b/arch/x86/kernel/cpu/mcheck/mce_intel.c |
568 | @@ -42,7 +42,7 @@ static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned); |
569 | * cmci_discover_lock protects against parallel discovery attempts |
570 | * which could race against each other. |
571 | */ |
572 | -static DEFINE_SPINLOCK(cmci_discover_lock); |
573 | +static DEFINE_RAW_SPINLOCK(cmci_discover_lock); |
574 | |
575 | #define CMCI_THRESHOLD 1 |
576 | #define CMCI_POLL_INTERVAL (30 * HZ) |
577 | @@ -144,14 +144,14 @@ static void cmci_storm_disable_banks(void) |
578 | int bank; |
579 | u64 val; |
580 | |
581 | - spin_lock_irqsave(&cmci_discover_lock, flags); |
582 | + raw_spin_lock_irqsave(&cmci_discover_lock, flags); |
583 | owned = __get_cpu_var(mce_banks_owned); |
584 | for_each_set_bit(bank, owned, MAX_NR_BANKS) { |
585 | rdmsrl(MSR_IA32_MCx_CTL2(bank), val); |
586 | val &= ~MCI_CTL2_CMCI_EN; |
587 | wrmsrl(MSR_IA32_MCx_CTL2(bank), val); |
588 | } |
589 | - spin_unlock_irqrestore(&cmci_discover_lock, flags); |
590 | + raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); |
591 | } |
592 | |
593 | static bool cmci_storm_detect(void) |
594 | @@ -211,7 +211,7 @@ static void cmci_discover(int banks) |
595 | int i; |
596 | int bios_wrong_thresh = 0; |
597 | |
598 | - spin_lock_irqsave(&cmci_discover_lock, flags); |
599 | + raw_spin_lock_irqsave(&cmci_discover_lock, flags); |
600 | for (i = 0; i < banks; i++) { |
601 | u64 val; |
602 | int bios_zero_thresh = 0; |
603 | @@ -266,7 +266,7 @@ static void cmci_discover(int banks) |
604 | WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); |
605 | } |
606 | } |
607 | - spin_unlock_irqrestore(&cmci_discover_lock, flags); |
608 | + raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); |
609 | if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) { |
610 | pr_info_once( |
611 | "bios_cmci_threshold: Some banks do not have valid thresholds set\n"); |
612 | @@ -316,10 +316,10 @@ void cmci_clear(void) |
613 | |
614 | if (!cmci_supported(&banks)) |
615 | return; |
616 | - spin_lock_irqsave(&cmci_discover_lock, flags); |
617 | + raw_spin_lock_irqsave(&cmci_discover_lock, flags); |
618 | for (i = 0; i < banks; i++) |
619 | __cmci_disable_bank(i); |
620 | - spin_unlock_irqrestore(&cmci_discover_lock, flags); |
621 | + raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); |
622 | } |
623 | |
624 | static void cmci_rediscover_work_func(void *arg) |
625 | @@ -360,9 +360,9 @@ void cmci_disable_bank(int bank) |
626 | if (!cmci_supported(&banks)) |
627 | return; |
628 | |
629 | - spin_lock_irqsave(&cmci_discover_lock, flags); |
630 | + raw_spin_lock_irqsave(&cmci_discover_lock, flags); |
631 | __cmci_disable_bank(bank); |
632 | - spin_unlock_irqrestore(&cmci_discover_lock, flags); |
633 | + raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); |
634 | } |
635 | |
636 | static void intel_init_cmci(void) |
637 | diff --git a/arch/x86/kernel/resource.c b/arch/x86/kernel/resource.c |
638 | index 2a26819bb6a8..80eab01c1a68 100644 |
639 | --- a/arch/x86/kernel/resource.c |
640 | +++ b/arch/x86/kernel/resource.c |
641 | @@ -37,10 +37,12 @@ static void remove_e820_regions(struct resource *avail) |
642 | |
643 | void arch_remove_reservations(struct resource *avail) |
644 | { |
645 | - /* Trim out BIOS areas (low 1MB and high 2MB) and E820 regions */ |
646 | + /* |
647 | + * Trim out BIOS area (high 2MB) and E820 regions. We do not remove |
648 | + * the low 1MB unconditionally, as this area is needed for some ISA |
649 | + * cards requiring a memory range, e.g. the i82365 PCMCIA controller. |
650 | + */ |
651 | if (avail->flags & IORESOURCE_MEM) { |
652 | - if (avail->start < BIOS_END) |
653 | - avail->start = BIOS_END; |
654 | resource_clip(avail, BIOS_ROM_BASE, BIOS_ROM_END); |
655 | |
656 | remove_e820_regions(avail); |
657 | diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c |
658 | index ea5b5709aa76..e1e1e80fc6a6 100644 |
659 | --- a/arch/x86/kernel/vsyscall_64.c |
660 | +++ b/arch/x86/kernel/vsyscall_64.c |
661 | @@ -81,10 +81,10 @@ static void warn_bad_vsyscall(const char *level, struct pt_regs *regs, |
662 | if (!show_unhandled_signals) |
663 | return; |
664 | |
665 | - pr_notice_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n", |
666 | - level, current->comm, task_pid_nr(current), |
667 | - message, regs->ip, regs->cs, |
668 | - regs->sp, regs->ax, regs->si, regs->di); |
669 | + printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n", |
670 | + level, current->comm, task_pid_nr(current), |
671 | + message, regs->ip, regs->cs, |
672 | + regs->sp, regs->ax, regs->si, regs->di); |
673 | } |
674 | |
675 | static int addr_to_vsyscall_nr(unsigned long addr) |
676 | diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c |
677 | index e4e833d3d7d7..2d3b8d0efa0f 100644 |
678 | --- a/arch/x86/kvm/emulate.c |
679 | +++ b/arch/x86/kvm/emulate.c |
680 | @@ -2017,6 +2017,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) |
681 | { |
682 | int rc; |
683 | unsigned long cs; |
684 | + int cpl = ctxt->ops->cpl(ctxt); |
685 | |
686 | rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes); |
687 | if (rc != X86EMUL_CONTINUE) |
688 | @@ -2026,6 +2027,9 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) |
689 | rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); |
690 | if (rc != X86EMUL_CONTINUE) |
691 | return rc; |
692 | + /* Outer-privilege level return is not implemented */ |
693 | + if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) |
694 | + return X86EMUL_UNHANDLEABLE; |
695 | rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); |
696 | return rc; |
697 | } |
698 | diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c |
699 | index bd0da433e6d7..a1ec6a50a05a 100644 |
700 | --- a/arch/x86/kvm/irq.c |
701 | +++ b/arch/x86/kvm/irq.c |
702 | @@ -108,7 +108,7 @@ int kvm_cpu_get_interrupt(struct kvm_vcpu *v) |
703 | |
704 | vector = kvm_cpu_get_extint(v); |
705 | |
706 | - if (kvm_apic_vid_enabled(v->kvm) || vector != -1) |
707 | + if (vector != -1) |
708 | return vector; /* PIC */ |
709 | |
710 | return kvm_get_apic_interrupt(v); /* APIC */ |
711 | diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c |
712 | index 006911858174..453e5fbbb7ae 100644 |
713 | --- a/arch/x86/kvm/lapic.c |
714 | +++ b/arch/x86/kvm/lapic.c |
715 | @@ -352,25 +352,46 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic) |
716 | |
717 | static inline void apic_clear_irr(int vec, struct kvm_lapic *apic) |
718 | { |
719 | - apic->irr_pending = false; |
720 | + struct kvm_vcpu *vcpu; |
721 | + |
722 | + vcpu = apic->vcpu; |
723 | + |
724 | apic_clear_vector(vec, apic->regs + APIC_IRR); |
725 | - if (apic_search_irr(apic) != -1) |
726 | - apic->irr_pending = true; |
727 | + if (unlikely(kvm_apic_vid_enabled(vcpu->kvm))) |
728 | + /* try to update RVI */ |
729 | + kvm_make_request(KVM_REQ_EVENT, vcpu); |
730 | + else { |
731 | + vec = apic_search_irr(apic); |
732 | + apic->irr_pending = (vec != -1); |
733 | + } |
734 | } |
735 | |
736 | static inline void apic_set_isr(int vec, struct kvm_lapic *apic) |
737 | { |
738 | - /* Note that we never get here with APIC virtualization enabled. */ |
739 | + struct kvm_vcpu *vcpu; |
740 | + |
741 | + if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR)) |
742 | + return; |
743 | + |
744 | + vcpu = apic->vcpu; |
745 | |
746 | - if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR)) |
747 | - ++apic->isr_count; |
748 | - BUG_ON(apic->isr_count > MAX_APIC_VECTOR); |
749 | /* |
750 | - * ISR (in service register) bit is set when injecting an interrupt. |
751 | - * The highest vector is injected. Thus the latest bit set matches |
752 | - * the highest bit in ISR. |
753 | + * With APIC virtualization enabled, all caching is disabled |
754 | + * because the processor can modify ISR under the hood. Instead |
755 | + * just set SVI. |
756 | */ |
757 | - apic->highest_isr_cache = vec; |
758 | + if (unlikely(kvm_apic_vid_enabled(vcpu->kvm))) |
759 | + kvm_x86_ops->hwapic_isr_update(vcpu->kvm, vec); |
760 | + else { |
761 | + ++apic->isr_count; |
762 | + BUG_ON(apic->isr_count > MAX_APIC_VECTOR); |
763 | + /* |
764 | + * ISR (in service register) bit is set when injecting an interrupt. |
765 | + * The highest vector is injected. Thus the latest bit set matches |
766 | + * the highest bit in ISR. |
767 | + */ |
768 | + apic->highest_isr_cache = vec; |
769 | + } |
770 | } |
771 | |
772 | static inline int apic_find_highest_isr(struct kvm_lapic *apic) |
773 | @@ -1627,11 +1648,16 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) |
774 | int vector = kvm_apic_has_interrupt(vcpu); |
775 | struct kvm_lapic *apic = vcpu->arch.apic; |
776 | |
777 | - /* Note that we never get here with APIC virtualization enabled. */ |
778 | - |
779 | if (vector == -1) |
780 | return -1; |
781 | |
782 | + /* |
783 | + * We get here even with APIC virtualization enabled, if doing |
784 | + * nested virtualization and L1 runs with the "acknowledge interrupt |
785 | + * on exit" mode. Then we cannot inject the interrupt via RVI, |
786 | + * because the process would deliver it through the IDT. |
787 | + */ |
788 | + |
789 | apic_set_isr(vector, apic); |
790 | apic_update_ppr(apic); |
791 | apic_clear_irr(vector, apic); |
792 | diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c |
793 | index a19ed92e74e4..2ae525e0d8ba 100644 |
794 | --- a/arch/x86/pci/i386.c |
795 | +++ b/arch/x86/pci/i386.c |
796 | @@ -162,6 +162,10 @@ pcibios_align_resource(void *data, const struct resource *res, |
797 | return start; |
798 | if (start & 0x300) |
799 | start = (start + 0x3ff) & ~0x3ff; |
800 | + } else if (res->flags & IORESOURCE_MEM) { |
801 | + /* The low 1MB range is reserved for ISA cards */ |
802 | + if (start < BIOS_END) |
803 | + start = BIOS_END; |
804 | } |
805 | return start; |
806 | } |
807 | diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c |
808 | index ebfa9b2c871d..767c9cbb869f 100644 |
809 | --- a/arch/x86/xen/grant-table.c |
810 | +++ b/arch/x86/xen/grant-table.c |
811 | @@ -168,6 +168,7 @@ static int __init xlated_setup_gnttab_pages(void) |
812 | { |
813 | struct page **pages; |
814 | xen_pfn_t *pfns; |
815 | + void *vaddr; |
816 | int rc; |
817 | unsigned int i; |
818 | unsigned long nr_grant_frames = gnttab_max_grant_frames(); |
819 | @@ -193,21 +194,20 @@ static int __init xlated_setup_gnttab_pages(void) |
820 | for (i = 0; i < nr_grant_frames; i++) |
821 | pfns[i] = page_to_pfn(pages[i]); |
822 | |
823 | - rc = arch_gnttab_map_shared(pfns, nr_grant_frames, nr_grant_frames, |
824 | - &xen_auto_xlat_grant_frames.vaddr); |
825 | - |
826 | - if (rc) { |
827 | + vaddr = vmap(pages, nr_grant_frames, 0, PAGE_KERNEL); |
828 | + if (!vaddr) { |
829 | pr_warn("%s Couldn't map %ld pfns rc:%d\n", __func__, |
830 | nr_grant_frames, rc); |
831 | free_xenballooned_pages(nr_grant_frames, pages); |
832 | kfree(pages); |
833 | kfree(pfns); |
834 | - return rc; |
835 | + return -ENOMEM; |
836 | } |
837 | kfree(pages); |
838 | |
839 | xen_auto_xlat_grant_frames.pfn = pfns; |
840 | xen_auto_xlat_grant_frames.count = nr_grant_frames; |
841 | + xen_auto_xlat_grant_frames.vaddr = vaddr; |
842 | |
843 | return 0; |
844 | } |
845 | diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c |
846 | index 7b78f88c1707..5718b0b58b60 100644 |
847 | --- a/arch/x86/xen/time.c |
848 | +++ b/arch/x86/xen/time.c |
849 | @@ -444,7 +444,7 @@ void xen_setup_timer(int cpu) |
850 | |
851 | irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, |
852 | IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER| |
853 | - IRQF_FORCE_RESUME, |
854 | + IRQF_FORCE_RESUME|IRQF_EARLY_RESUME, |
855 | name, NULL); |
856 | (void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX); |
857 | |
858 | diff --git a/drivers/char/tpm/tpm_i2c_stm_st33.c b/drivers/char/tpm/tpm_i2c_stm_st33.c |
859 | index 3b7bf2162898..4669e3713428 100644 |
860 | --- a/drivers/char/tpm/tpm_i2c_stm_st33.c |
861 | +++ b/drivers/char/tpm/tpm_i2c_stm_st33.c |
862 | @@ -714,6 +714,7 @@ tpm_st33_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) |
863 | } |
864 | |
865 | tpm_get_timeouts(chip); |
866 | + tpm_do_selftest(chip); |
867 | |
868 | dev_info(chip->dev, "TPM I2C Initialized\n"); |
869 | return 0; |
870 | diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c |
871 | index a999f537228f..92105f3dc8e0 100644 |
872 | --- a/drivers/crypto/ux500/cryp/cryp_core.c |
873 | +++ b/drivers/crypto/ux500/cryp/cryp_core.c |
874 | @@ -190,7 +190,7 @@ static void add_session_id(struct cryp_ctx *ctx) |
875 | static irqreturn_t cryp_interrupt_handler(int irq, void *param) |
876 | { |
877 | struct cryp_ctx *ctx; |
878 | - int i; |
879 | + int count; |
880 | struct cryp_device_data *device_data; |
881 | |
882 | if (param == NULL) { |
883 | @@ -215,12 +215,11 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param) |
884 | if (cryp_pending_irq_src(device_data, |
885 | CRYP_IRQ_SRC_OUTPUT_FIFO)) { |
886 | if (ctx->outlen / ctx->blocksize > 0) { |
887 | - for (i = 0; i < ctx->blocksize / 4; i++) { |
888 | - *(ctx->outdata) = readl_relaxed( |
889 | - &device_data->base->dout); |
890 | - ctx->outdata += 4; |
891 | - ctx->outlen -= 4; |
892 | - } |
893 | + count = ctx->blocksize / 4; |
894 | + |
895 | + readsl(&device_data->base->dout, ctx->outdata, count); |
896 | + ctx->outdata += count; |
897 | + ctx->outlen -= count; |
898 | |
899 | if (ctx->outlen == 0) { |
900 | cryp_disable_irq_src(device_data, |
901 | @@ -230,12 +229,12 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param) |
902 | } else if (cryp_pending_irq_src(device_data, |
903 | CRYP_IRQ_SRC_INPUT_FIFO)) { |
904 | if (ctx->datalen / ctx->blocksize > 0) { |
905 | - for (i = 0 ; i < ctx->blocksize / 4; i++) { |
906 | - writel_relaxed(ctx->indata, |
907 | - &device_data->base->din); |
908 | - ctx->indata += 4; |
909 | - ctx->datalen -= 4; |
910 | - } |
911 | + count = ctx->blocksize / 4; |
912 | + |
913 | + writesl(&device_data->base->din, ctx->indata, count); |
914 | + |
915 | + ctx->indata += count; |
916 | + ctx->datalen -= count; |
917 | |
918 | if (ctx->datalen == 0) |
919 | cryp_disable_irq_src(device_data, |
920 | diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c |
921 | index f926b4caf449..56c60552abba 100644 |
922 | --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c |
923 | +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c |
924 | @@ -199,7 +199,7 @@ static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm) |
925 | static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area, |
926 | struct page **pages, uint32_t npages, uint32_t roll) |
927 | { |
928 | - dma_addr_t pat_pa = 0; |
929 | + dma_addr_t pat_pa = 0, data_pa = 0; |
930 | uint32_t *data; |
931 | struct pat *pat; |
932 | struct refill_engine *engine = txn->engine_handle; |
933 | @@ -223,7 +223,9 @@ static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area, |
934 | .lut_id = engine->tcm->lut_id, |
935 | }; |
936 | |
937 | - data = alloc_dma(txn, 4*i, &pat->data_pa); |
938 | + data = alloc_dma(txn, 4*i, &data_pa); |
939 | + /* FIXME: what if data_pa is more than 32-bit ? */ |
940 | + pat->data_pa = data_pa; |
941 | |
942 | while (i--) { |
943 | int n = i + roll; |
944 | diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c |
945 | index 95dbce286a41..d9f5e5241af4 100644 |
946 | --- a/drivers/gpu/drm/omapdrm/omap_gem.c |
947 | +++ b/drivers/gpu/drm/omapdrm/omap_gem.c |
948 | @@ -791,7 +791,7 @@ int omap_gem_get_paddr(struct drm_gem_object *obj, |
949 | omap_obj->paddr = tiler_ssptr(block); |
950 | omap_obj->block = block; |
951 | |
952 | - DBG("got paddr: %08x", omap_obj->paddr); |
953 | + DBG("got paddr: %pad", &omap_obj->paddr); |
954 | } |
955 | |
956 | omap_obj->paddr_cnt++; |
957 | @@ -985,9 +985,9 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) |
958 | |
959 | off = drm_vma_node_start(&obj->vma_node); |
960 | |
961 | - seq_printf(m, "%08x: %2d (%2d) %08llx %08Zx (%2d) %p %4d", |
962 | + seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d", |
963 | omap_obj->flags, obj->name, obj->refcount.refcount.counter, |
964 | - off, omap_obj->paddr, omap_obj->paddr_cnt, |
965 | + off, &omap_obj->paddr, omap_obj->paddr_cnt, |
966 | omap_obj->vaddr, omap_obj->roll); |
967 | |
968 | if (omap_obj->flags & OMAP_BO_TILED) { |
969 | @@ -1467,8 +1467,8 @@ void omap_gem_init(struct drm_device *dev) |
970 | entry->paddr = tiler_ssptr(block); |
971 | entry->block = block; |
972 | |
973 | - DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h, |
974 | - entry->paddr, |
975 | + DBG("%d:%d: %dx%d: paddr=%pad stride=%d", i, j, w, h, |
976 | + &entry->paddr, |
977 | usergart[i].stride_pfn << PAGE_SHIFT); |
978 | } |
979 | } |
980 | diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c |
981 | index 3cf31ee59aac..6af3398b5278 100644 |
982 | --- a/drivers/gpu/drm/omapdrm/omap_plane.c |
983 | +++ b/drivers/gpu/drm/omapdrm/omap_plane.c |
984 | @@ -142,8 +142,8 @@ static void omap_plane_pre_apply(struct omap_drm_apply *apply) |
985 | DBG("%dx%d -> %dx%d (%d)", info->width, info->height, |
986 | info->out_width, info->out_height, |
987 | info->screen_width); |
988 | - DBG("%d,%d %08x %08x", info->pos_x, info->pos_y, |
989 | - info->paddr, info->p_uv_addr); |
990 | + DBG("%d,%d %pad %pad", info->pos_x, info->pos_y, |
991 | + &info->paddr, &info->p_uv_addr); |
992 | |
993 | /* TODO: */ |
994 | ilace = false; |
995 | diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c |
996 | index c0ea66192fe0..767f2cc44bd8 100644 |
997 | --- a/drivers/gpu/drm/radeon/cik.c |
998 | +++ b/drivers/gpu/drm/radeon/cik.c |
999 | @@ -3320,6 +3320,7 @@ static void cik_gpu_init(struct radeon_device *rdev) |
1000 | (rdev->pdev->device == 0x130B) || |
1001 | (rdev->pdev->device == 0x130E) || |
1002 | (rdev->pdev->device == 0x1315) || |
1003 | + (rdev->pdev->device == 0x1318) || |
1004 | (rdev->pdev->device == 0x131B)) { |
1005 | rdev->config.cik.max_cu_per_sh = 4; |
1006 | rdev->config.cik.max_backends_per_se = 1; |
1007 | diff --git a/drivers/hid/hid-cherry.c b/drivers/hid/hid-cherry.c |
1008 | index 1bdcccc54a1d..f745d2c1325e 100644 |
1009 | --- a/drivers/hid/hid-cherry.c |
1010 | +++ b/drivers/hid/hid-cherry.c |
1011 | @@ -28,7 +28,7 @@ |
1012 | static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc, |
1013 | unsigned int *rsize) |
1014 | { |
1015 | - if (*rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) { |
1016 | + if (*rsize >= 18 && rdesc[11] == 0x3c && rdesc[12] == 0x02) { |
1017 | hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n"); |
1018 | rdesc[11] = rdesc[16] = 0xff; |
1019 | rdesc[12] = rdesc[17] = 0x03; |
1020 | diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c |
1021 | index e77696367591..b92bf01a1ae8 100644 |
1022 | --- a/drivers/hid/hid-kye.c |
1023 | +++ b/drivers/hid/hid-kye.c |
1024 | @@ -300,7 +300,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc, |
1025 | * - change the button usage range to 4-7 for the extra |
1026 | * buttons |
1027 | */ |
1028 | - if (*rsize >= 74 && |
1029 | + if (*rsize >= 75 && |
1030 | rdesc[61] == 0x05 && rdesc[62] == 0x08 && |
1031 | rdesc[63] == 0x19 && rdesc[64] == 0x08 && |
1032 | rdesc[65] == 0x29 && rdesc[66] == 0x0f && |
1033 | diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c |
1034 | index a976f48263f6..f91ff145db9a 100644 |
1035 | --- a/drivers/hid/hid-lg.c |
1036 | +++ b/drivers/hid/hid-lg.c |
1037 | @@ -345,14 +345,14 @@ static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc, |
1038 | struct usb_device_descriptor *udesc; |
1039 | __u16 bcdDevice, rev_maj, rev_min; |
1040 | |
1041 | - if ((drv_data->quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 && |
1042 | + if ((drv_data->quirks & LG_RDESC) && *rsize >= 91 && rdesc[83] == 0x26 && |
1043 | rdesc[84] == 0x8c && rdesc[85] == 0x02) { |
1044 | hid_info(hdev, |
1045 | "fixing up Logitech keyboard report descriptor\n"); |
1046 | rdesc[84] = rdesc[89] = 0x4d; |
1047 | rdesc[85] = rdesc[90] = 0x10; |
1048 | } |
1049 | - if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 50 && |
1050 | + if ((drv_data->quirks & LG_RDESC_REL_ABS) && *rsize >= 51 && |
1051 | rdesc[32] == 0x81 && rdesc[33] == 0x06 && |
1052 | rdesc[49] == 0x81 && rdesc[50] == 0x06) { |
1053 | hid_info(hdev, |
1054 | diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c |
1055 | index 486dbde2ba2d..b7ba82960c79 100644 |
1056 | --- a/drivers/hid/hid-logitech-dj.c |
1057 | +++ b/drivers/hid/hid-logitech-dj.c |
1058 | @@ -238,13 +238,6 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev, |
1059 | return; |
1060 | } |
1061 | |
1062 | - if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) || |
1063 | - (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) { |
1064 | - dev_err(&djrcv_hdev->dev, "%s: invalid device index:%d\n", |
1065 | - __func__, dj_report->device_index); |
1066 | - return; |
1067 | - } |
1068 | - |
1069 | if (djrcv_dev->paired_dj_devices[dj_report->device_index]) { |
1070 | /* The device is already known. No need to reallocate it. */ |
1071 | dbg_hid("%s: device is already known\n", __func__); |
1072 | @@ -557,7 +550,7 @@ static int logi_dj_ll_raw_request(struct hid_device *hid, |
1073 | if (!out_buf) |
1074 | return -ENOMEM; |
1075 | |
1076 | - if (count < DJREPORT_SHORT_LENGTH - 2) |
1077 | + if (count > DJREPORT_SHORT_LENGTH - 2) |
1078 | count = DJREPORT_SHORT_LENGTH - 2; |
1079 | |
1080 | out_buf[0] = REPORT_ID_DJ_SHORT; |
1081 | @@ -690,6 +683,12 @@ static int logi_dj_raw_event(struct hid_device *hdev, |
1082 | * device (via hid_input_report() ) and return 1 so hid-core does not do |
1083 | * anything else with it. |
1084 | */ |
1085 | + if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) || |
1086 | + (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) { |
1087 | + dev_err(&hdev->dev, "%s: invalid device index:%d\n", |
1088 | + __func__, dj_report->device_index); |
1089 | + return false; |
1090 | + } |
1091 | |
1092 | spin_lock_irqsave(&djrcv_dev->lock, flags); |
1093 | if (dj_report->report_id == REPORT_ID_DJ_SHORT) { |
1094 | diff --git a/drivers/hid/hid-monterey.c b/drivers/hid/hid-monterey.c |
1095 | index 9e14c00eb1b6..25daf28b26bd 100644 |
1096 | --- a/drivers/hid/hid-monterey.c |
1097 | +++ b/drivers/hid/hid-monterey.c |
1098 | @@ -24,7 +24,7 @@ |
1099 | static __u8 *mr_report_fixup(struct hid_device *hdev, __u8 *rdesc, |
1100 | unsigned int *rsize) |
1101 | { |
1102 | - if (*rsize >= 30 && rdesc[29] == 0x05 && rdesc[30] == 0x09) { |
1103 | + if (*rsize >= 31 && rdesc[29] == 0x05 && rdesc[30] == 0x09) { |
1104 | hid_info(hdev, "fixing up button/consumer in HID report descriptor\n"); |
1105 | rdesc[30] = 0x0c; |
1106 | } |
1107 | diff --git a/drivers/hid/hid-petalynx.c b/drivers/hid/hid-petalynx.c |
1108 | index 736b2502df4f..6aca4f2554bf 100644 |
1109 | --- a/drivers/hid/hid-petalynx.c |
1110 | +++ b/drivers/hid/hid-petalynx.c |
1111 | @@ -25,7 +25,7 @@ |
1112 | static __u8 *pl_report_fixup(struct hid_device *hdev, __u8 *rdesc, |
1113 | unsigned int *rsize) |
1114 | { |
1115 | - if (*rsize >= 60 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 && |
1116 | + if (*rsize >= 62 && rdesc[39] == 0x2a && rdesc[40] == 0xf5 && |
1117 | rdesc[41] == 0x00 && rdesc[59] == 0x26 && |
1118 | rdesc[60] == 0xf9 && rdesc[61] == 0x00) { |
1119 | hid_info(hdev, "fixing up Petalynx Maxter Remote report descriptor\n"); |
1120 | diff --git a/drivers/hid/hid-sunplus.c b/drivers/hid/hid-sunplus.c |
1121 | index 87fc91e1c8de..91072fa54663 100644 |
1122 | --- a/drivers/hid/hid-sunplus.c |
1123 | +++ b/drivers/hid/hid-sunplus.c |
1124 | @@ -24,7 +24,7 @@ |
1125 | static __u8 *sp_report_fixup(struct hid_device *hdev, __u8 *rdesc, |
1126 | unsigned int *rsize) |
1127 | { |
1128 | - if (*rsize >= 107 && rdesc[104] == 0x26 && rdesc[105] == 0x80 && |
1129 | + if (*rsize >= 112 && rdesc[104] == 0x26 && rdesc[105] == 0x80 && |
1130 | rdesc[106] == 0x03) { |
1131 | hid_info(hdev, "fixing up Sunplus Wireless Desktop report descriptor\n"); |
1132 | rdesc[105] = rdesc[110] = 0x03; |
1133 | diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c |
1134 | index 7f9dc2f86b63..126516414c11 100644 |
1135 | --- a/drivers/hwmon/ads1015.c |
1136 | +++ b/drivers/hwmon/ads1015.c |
1137 | @@ -198,7 +198,7 @@ static int ads1015_get_channels_config_of(struct i2c_client *client) |
1138 | } |
1139 | |
1140 | channel = be32_to_cpup(property); |
1141 | - if (channel > ADS1015_CHANNELS) { |
1142 | + if (channel >= ADS1015_CHANNELS) { |
1143 | dev_err(&client->dev, |
1144 | "invalid channel index %d on %s\n", |
1145 | channel, node->full_name); |
1146 | @@ -212,6 +212,7 @@ static int ads1015_get_channels_config_of(struct i2c_client *client) |
1147 | dev_err(&client->dev, |
1148 | "invalid gain on %s\n", |
1149 | node->full_name); |
1150 | + return -EINVAL; |
1151 | } |
1152 | } |
1153 | |
1154 | @@ -222,6 +223,7 @@ static int ads1015_get_channels_config_of(struct i2c_client *client) |
1155 | dev_err(&client->dev, |
1156 | "invalid data_rate on %s\n", |
1157 | node->full_name); |
1158 | + return -EINVAL; |
1159 | } |
1160 | } |
1161 | |
1162 | diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c |
1163 | index 9f2be3dd28f3..8a67ec6279a4 100644 |
1164 | --- a/drivers/hwmon/amc6821.c |
1165 | +++ b/drivers/hwmon/amc6821.c |
1166 | @@ -360,11 +360,13 @@ static ssize_t set_pwm1_enable( |
1167 | if (config) |
1168 | return config; |
1169 | |
1170 | + mutex_lock(&data->update_lock); |
1171 | config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF1); |
1172 | if (config < 0) { |
1173 | dev_err(&client->dev, |
1174 | "Error reading configuration register, aborting.\n"); |
1175 | - return config; |
1176 | + count = config; |
1177 | + goto unlock; |
1178 | } |
1179 | |
1180 | switch (val) { |
1181 | @@ -381,14 +383,15 @@ static ssize_t set_pwm1_enable( |
1182 | config |= AMC6821_CONF1_FDRC1; |
1183 | break; |
1184 | default: |
1185 | - return -EINVAL; |
1186 | + count = -EINVAL; |
1187 | + goto unlock; |
1188 | } |
1189 | - mutex_lock(&data->update_lock); |
1190 | if (i2c_smbus_write_byte_data(client, AMC6821_REG_CONF1, config)) { |
1191 | dev_err(&client->dev, |
1192 | "Configuration register write error, aborting.\n"); |
1193 | count = -EIO; |
1194 | } |
1195 | +unlock: |
1196 | mutex_unlock(&data->update_lock); |
1197 | return count; |
1198 | } |
1199 | @@ -493,8 +496,9 @@ static ssize_t set_temp_auto_point_temp( |
1200 | return -EINVAL; |
1201 | } |
1202 | |
1203 | - data->valid = 0; |
1204 | mutex_lock(&data->update_lock); |
1205 | + data->valid = 0; |
1206 | + |
1207 | switch (ix) { |
1208 | case 0: |
1209 | ptemp[0] = clamp_val(val / 1000, 0, |
1210 | @@ -658,13 +662,14 @@ static ssize_t set_fan1_div( |
1211 | if (config) |
1212 | return config; |
1213 | |
1214 | + mutex_lock(&data->update_lock); |
1215 | config = i2c_smbus_read_byte_data(client, AMC6821_REG_CONF4); |
1216 | if (config < 0) { |
1217 | dev_err(&client->dev, |
1218 | "Error reading configuration register, aborting.\n"); |
1219 | - return config; |
1220 | + count = config; |
1221 | + goto EXIT; |
1222 | } |
1223 | - mutex_lock(&data->update_lock); |
1224 | switch (val) { |
1225 | case 2: |
1226 | config &= ~AMC6821_CONF4_PSPR; |
1227 | diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c |
1228 | index 4ae3fff13f44..bea0a344fab5 100644 |
1229 | --- a/drivers/hwmon/dme1737.c |
1230 | +++ b/drivers/hwmon/dme1737.c |
1231 | @@ -247,8 +247,8 @@ struct dme1737_data { |
1232 | u8 pwm_acz[3]; |
1233 | u8 pwm_freq[6]; |
1234 | u8 pwm_rr[2]; |
1235 | - u8 zone_low[3]; |
1236 | - u8 zone_abs[3]; |
1237 | + s8 zone_low[3]; |
1238 | + s8 zone_abs[3]; |
1239 | u8 zone_hyst[2]; |
1240 | u32 alarms; |
1241 | }; |
1242 | @@ -277,7 +277,7 @@ static inline int IN_FROM_REG(int reg, int nominal, int res) |
1243 | return (reg * nominal + (3 << (res - 3))) / (3 << (res - 2)); |
1244 | } |
1245 | |
1246 | -static inline int IN_TO_REG(int val, int nominal) |
1247 | +static inline int IN_TO_REG(long val, int nominal) |
1248 | { |
1249 | return clamp_val((val * 192 + nominal / 2) / nominal, 0, 255); |
1250 | } |
1251 | @@ -293,7 +293,7 @@ static inline int TEMP_FROM_REG(int reg, int res) |
1252 | return (reg * 1000) >> (res - 8); |
1253 | } |
1254 | |
1255 | -static inline int TEMP_TO_REG(int val) |
1256 | +static inline int TEMP_TO_REG(long val) |
1257 | { |
1258 | return clamp_val((val < 0 ? val - 500 : val + 500) / 1000, -128, 127); |
1259 | } |
1260 | @@ -308,7 +308,7 @@ static inline int TEMP_RANGE_FROM_REG(int reg) |
1261 | return TEMP_RANGE[(reg >> 4) & 0x0f]; |
1262 | } |
1263 | |
1264 | -static int TEMP_RANGE_TO_REG(int val, int reg) |
1265 | +static int TEMP_RANGE_TO_REG(long val, int reg) |
1266 | { |
1267 | int i; |
1268 | |
1269 | @@ -331,7 +331,7 @@ static inline int TEMP_HYST_FROM_REG(int reg, int ix) |
1270 | return (((ix == 1) ? reg : reg >> 4) & 0x0f) * 1000; |
1271 | } |
1272 | |
1273 | -static inline int TEMP_HYST_TO_REG(int val, int ix, int reg) |
1274 | +static inline int TEMP_HYST_TO_REG(long val, int ix, int reg) |
1275 | { |
1276 | int hyst = clamp_val((val + 500) / 1000, 0, 15); |
1277 | |
1278 | @@ -347,7 +347,7 @@ static inline int FAN_FROM_REG(int reg, int tpc) |
1279 | return (reg == 0 || reg == 0xffff) ? 0 : 90000 * 60 / reg; |
1280 | } |
1281 | |
1282 | -static inline int FAN_TO_REG(int val, int tpc) |
1283 | +static inline int FAN_TO_REG(long val, int tpc) |
1284 | { |
1285 | if (tpc) { |
1286 | return clamp_val(val / tpc, 0, 0xffff); |
1287 | @@ -379,7 +379,7 @@ static inline int FAN_TYPE_FROM_REG(int reg) |
1288 | return (edge > 0) ? 1 << (edge - 1) : 0; |
1289 | } |
1290 | |
1291 | -static inline int FAN_TYPE_TO_REG(int val, int reg) |
1292 | +static inline int FAN_TYPE_TO_REG(long val, int reg) |
1293 | { |
1294 | int edge = (val == 4) ? 3 : val; |
1295 | |
1296 | @@ -402,7 +402,7 @@ static int FAN_MAX_FROM_REG(int reg) |
1297 | return 1000 + i * 500; |
1298 | } |
1299 | |
1300 | -static int FAN_MAX_TO_REG(int val) |
1301 | +static int FAN_MAX_TO_REG(long val) |
1302 | { |
1303 | int i; |
1304 | |
1305 | @@ -460,7 +460,7 @@ static inline int PWM_ACZ_FROM_REG(int reg) |
1306 | return acz[(reg >> 5) & 0x07]; |
1307 | } |
1308 | |
1309 | -static inline int PWM_ACZ_TO_REG(int val, int reg) |
1310 | +static inline int PWM_ACZ_TO_REG(long val, int reg) |
1311 | { |
1312 | int acz = (val == 4) ? 2 : val - 1; |
1313 | |
1314 | @@ -476,7 +476,7 @@ static inline int PWM_FREQ_FROM_REG(int reg) |
1315 | return PWM_FREQ[reg & 0x0f]; |
1316 | } |
1317 | |
1318 | -static int PWM_FREQ_TO_REG(int val, int reg) |
1319 | +static int PWM_FREQ_TO_REG(long val, int reg) |
1320 | { |
1321 | int i; |
1322 | |
1323 | @@ -510,7 +510,7 @@ static inline int PWM_RR_FROM_REG(int reg, int ix) |
1324 | return (rr & 0x08) ? PWM_RR[rr & 0x07] : 0; |
1325 | } |
1326 | |
1327 | -static int PWM_RR_TO_REG(int val, int ix, int reg) |
1328 | +static int PWM_RR_TO_REG(long val, int ix, int reg) |
1329 | { |
1330 | int i; |
1331 | |
1332 | @@ -528,7 +528,7 @@ static inline int PWM_RR_EN_FROM_REG(int reg, int ix) |
1333 | return PWM_RR_FROM_REG(reg, ix) ? 1 : 0; |
1334 | } |
1335 | |
1336 | -static inline int PWM_RR_EN_TO_REG(int val, int ix, int reg) |
1337 | +static inline int PWM_RR_EN_TO_REG(long val, int ix, int reg) |
1338 | { |
1339 | int en = (ix == 1) ? 0x80 : 0x08; |
1340 | |
1341 | @@ -1481,13 +1481,16 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr, |
1342 | const char *buf, size_t count) |
1343 | { |
1344 | struct dme1737_data *data = dev_get_drvdata(dev); |
1345 | - long val; |
1346 | + unsigned long val; |
1347 | int err; |
1348 | |
1349 | - err = kstrtol(buf, 10, &val); |
1350 | + err = kstrtoul(buf, 10, &val); |
1351 | if (err) |
1352 | return err; |
1353 | |
1354 | + if (val > 255) |
1355 | + return -EINVAL; |
1356 | + |
1357 | data->vrm = val; |
1358 | return count; |
1359 | } |
1360 | diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c |
1361 | index 2566c43dd1e9..d10aa7b46cca 100644 |
1362 | --- a/drivers/hwmon/gpio-fan.c |
1363 | +++ b/drivers/hwmon/gpio-fan.c |
1364 | @@ -173,7 +173,7 @@ static int get_fan_speed_index(struct gpio_fan_data *fan_data) |
1365 | return -ENODEV; |
1366 | } |
1367 | |
1368 | -static int rpm_to_speed_index(struct gpio_fan_data *fan_data, int rpm) |
1369 | +static int rpm_to_speed_index(struct gpio_fan_data *fan_data, unsigned long rpm) |
1370 | { |
1371 | struct gpio_fan_speed *speed = fan_data->speed; |
1372 | int i; |
1373 | diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c |
1374 | index 9efadfc851bc..c1eb464f0fd0 100644 |
1375 | --- a/drivers/hwmon/lm78.c |
1376 | +++ b/drivers/hwmon/lm78.c |
1377 | @@ -108,7 +108,7 @@ static inline int FAN_FROM_REG(u8 val, int div) |
1378 | * TEMP: mC (-128C to +127C) |
1379 | * REG: 1C/bit, two's complement |
1380 | */ |
1381 | -static inline s8 TEMP_TO_REG(int val) |
1382 | +static inline s8 TEMP_TO_REG(long val) |
1383 | { |
1384 | int nval = clamp_val(val, -128000, 127000) ; |
1385 | return nval < 0 ? (nval - 500) / 1000 : (nval + 500) / 1000; |
1386 | diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c |
1387 | index b0129a54e1a6..ef627ea71cc8 100644 |
1388 | --- a/drivers/hwmon/lm85.c |
1389 | +++ b/drivers/hwmon/lm85.c |
1390 | @@ -155,7 +155,7 @@ static inline u16 FAN_TO_REG(unsigned long val) |
1391 | |
1392 | /* Temperature is reported in .001 degC increments */ |
1393 | #define TEMP_TO_REG(val) \ |
1394 | - clamp_val(SCALE(val, 1000, 1), -127, 127) |
1395 | + DIV_ROUND_CLOSEST(clamp_val((val), -127000, 127000), 1000) |
1396 | #define TEMPEXT_FROM_REG(val, ext) \ |
1397 | SCALE(((val) << 4) + (ext), 16, 1000) |
1398 | #define TEMP_FROM_REG(val) ((val) * 1000) |
1399 | @@ -189,7 +189,7 @@ static const int lm85_range_map[] = { |
1400 | 13300, 16000, 20000, 26600, 32000, 40000, 53300, 80000 |
1401 | }; |
1402 | |
1403 | -static int RANGE_TO_REG(int range) |
1404 | +static int RANGE_TO_REG(long range) |
1405 | { |
1406 | int i; |
1407 | |
1408 | @@ -211,7 +211,7 @@ static const int adm1027_freq_map[8] = { /* 1 Hz */ |
1409 | 11, 15, 22, 29, 35, 44, 59, 88 |
1410 | }; |
1411 | |
1412 | -static int FREQ_TO_REG(const int *map, int freq) |
1413 | +static int FREQ_TO_REG(const int *map, unsigned long freq) |
1414 | { |
1415 | int i; |
1416 | |
1417 | @@ -460,6 +460,9 @@ static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr, |
1418 | if (err) |
1419 | return err; |
1420 | |
1421 | + if (val > 255) |
1422 | + return -EINVAL; |
1423 | + |
1424 | data->vrm = val; |
1425 | return count; |
1426 | } |
1427 | diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c |
1428 | index d2060e245ff5..cfaf70b9cba7 100644 |
1429 | --- a/drivers/hwmon/lm92.c |
1430 | +++ b/drivers/hwmon/lm92.c |
1431 | @@ -74,12 +74,9 @@ static inline int TEMP_FROM_REG(s16 reg) |
1432 | return reg / 8 * 625 / 10; |
1433 | } |
1434 | |
1435 | -static inline s16 TEMP_TO_REG(int val) |
1436 | +static inline s16 TEMP_TO_REG(long val) |
1437 | { |
1438 | - if (val <= -60000) |
1439 | - return -60000 * 10 / 625 * 8; |
1440 | - if (val >= 160000) |
1441 | - return 160000 * 10 / 625 * 8; |
1442 | + val = clamp_val(val, -60000, 160000); |
1443 | return val * 10 / 625 * 8; |
1444 | } |
1445 | |
1446 | @@ -206,10 +203,12 @@ static ssize_t set_temp_hyst(struct device *dev, |
1447 | if (err) |
1448 | return err; |
1449 | |
1450 | + val = clamp_val(val, -120000, 220000); |
1451 | mutex_lock(&data->update_lock); |
1452 | - data->temp[t_hyst] = TEMP_FROM_REG(data->temp[attr->index]) - val; |
1453 | + data->temp[t_hyst] = |
1454 | + TEMP_TO_REG(TEMP_FROM_REG(data->temp[attr->index]) - val); |
1455 | i2c_smbus_write_word_swapped(client, LM92_REG_TEMP_HYST, |
1456 | - TEMP_TO_REG(data->temp[t_hyst])); |
1457 | + data->temp[t_hyst]); |
1458 | mutex_unlock(&data->update_lock); |
1459 | return count; |
1460 | } |
1461 | diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c |
1462 | index 3532026e25da..bf1d7893d51c 100644 |
1463 | --- a/drivers/hwmon/sis5595.c |
1464 | +++ b/drivers/hwmon/sis5595.c |
1465 | @@ -159,7 +159,7 @@ static inline int TEMP_FROM_REG(s8 val) |
1466 | { |
1467 | return val * 830 + 52120; |
1468 | } |
1469 | -static inline s8 TEMP_TO_REG(int val) |
1470 | +static inline s8 TEMP_TO_REG(long val) |
1471 | { |
1472 | int nval = clamp_val(val, -54120, 157530) ; |
1473 | return nval < 0 ? (nval - 5212 - 415) / 830 : (nval - 5212 + 415) / 830; |
1474 | diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c |
1475 | index e95f9ba96790..83c989382be9 100644 |
1476 | --- a/drivers/i2c/busses/i2c-at91.c |
1477 | +++ b/drivers/i2c/busses/i2c-at91.c |
1478 | @@ -210,7 +210,7 @@ static void at91_twi_write_data_dma_callback(void *data) |
1479 | struct at91_twi_dev *dev = (struct at91_twi_dev *)data; |
1480 | |
1481 | dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg), |
1482 | - dev->buf_len, DMA_MEM_TO_DEV); |
1483 | + dev->buf_len, DMA_TO_DEVICE); |
1484 | |
1485 | at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); |
1486 | } |
1487 | @@ -289,7 +289,7 @@ static void at91_twi_read_data_dma_callback(void *data) |
1488 | struct at91_twi_dev *dev = (struct at91_twi_dev *)data; |
1489 | |
1490 | dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg), |
1491 | - dev->buf_len, DMA_DEV_TO_MEM); |
1492 | + dev->buf_len, DMA_FROM_DEVICE); |
1493 | |
1494 | /* The last two bytes have to be read without using dma */ |
1495 | dev->buf += dev->buf_len - 2; |
1496 | diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c |
1497 | index a9791509966a..69e11853e8bf 100644 |
1498 | --- a/drivers/i2c/busses/i2c-rk3x.c |
1499 | +++ b/drivers/i2c/busses/i2c-rk3x.c |
1500 | @@ -399,7 +399,7 @@ static irqreturn_t rk3x_i2c_irq(int irqno, void *dev_id) |
1501 | } |
1502 | |
1503 | /* is there anything left to handle? */ |
1504 | - if (unlikely(ipd == 0)) |
1505 | + if (unlikely((ipd & REG_INT_ALL) == 0)) |
1506 | goto out; |
1507 | |
1508 | switch (i2c->state) { |
1509 | diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c |
1510 | index 59d20c599b16..2da05c0e113d 100644 |
1511 | --- a/drivers/misc/mei/client.c |
1512 | +++ b/drivers/misc/mei/client.c |
1513 | @@ -459,7 +459,7 @@ int mei_cl_disconnect(struct mei_cl *cl) |
1514 | { |
1515 | struct mei_device *dev; |
1516 | struct mei_cl_cb *cb; |
1517 | - int rets, err; |
1518 | + int rets; |
1519 | |
1520 | if (WARN_ON(!cl || !cl->dev)) |
1521 | return -ENODEV; |
1522 | @@ -491,6 +491,7 @@ int mei_cl_disconnect(struct mei_cl *cl) |
1523 | cl_err(dev, cl, "failed to disconnect.\n"); |
1524 | goto free; |
1525 | } |
1526 | + cl->timer_count = MEI_CONNECT_TIMEOUT; |
1527 | mdelay(10); /* Wait for hardware disconnection ready */ |
1528 | list_add_tail(&cb->list, &dev->ctrl_rd_list.list); |
1529 | } else { |
1530 | @@ -500,23 +501,18 @@ int mei_cl_disconnect(struct mei_cl *cl) |
1531 | } |
1532 | mutex_unlock(&dev->device_lock); |
1533 | |
1534 | - err = wait_event_timeout(dev->wait_recvd_msg, |
1535 | + wait_event_timeout(dev->wait_recvd_msg, |
1536 | MEI_FILE_DISCONNECTED == cl->state, |
1537 | mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT)); |
1538 | |
1539 | mutex_lock(&dev->device_lock); |
1540 | + |
1541 | if (MEI_FILE_DISCONNECTED == cl->state) { |
1542 | rets = 0; |
1543 | cl_dbg(dev, cl, "successfully disconnected from FW client.\n"); |
1544 | } else { |
1545 | - rets = -ENODEV; |
1546 | - if (MEI_FILE_DISCONNECTED != cl->state) |
1547 | - cl_err(dev, cl, "wrong status client disconnect.\n"); |
1548 | - |
1549 | - if (err) |
1550 | - cl_dbg(dev, cl, "wait failed disconnect err=%d\n", err); |
1551 | - |
1552 | - cl_err(dev, cl, "failed to disconnect from FW client.\n"); |
1553 | + cl_dbg(dev, cl, "timeout on disconnect from FW client.\n"); |
1554 | + rets = -ETIME; |
1555 | } |
1556 | |
1557 | mei_io_list_flush(&dev->ctrl_rd_list, cl); |
1558 | @@ -605,6 +601,7 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file) |
1559 | cl->timer_count = MEI_CONNECT_TIMEOUT; |
1560 | list_add_tail(&cb->list, &dev->ctrl_rd_list.list); |
1561 | } else { |
1562 | + cl->state = MEI_FILE_INITIALIZING; |
1563 | list_add_tail(&cb->list, &dev->ctrl_wr_list.list); |
1564 | } |
1565 | |
1566 | @@ -616,6 +613,7 @@ int mei_cl_connect(struct mei_cl *cl, struct file *file) |
1567 | mutex_lock(&dev->device_lock); |
1568 | |
1569 | if (cl->state != MEI_FILE_CONNECTED) { |
1570 | + cl->state = MEI_FILE_DISCONNECTED; |
1571 | /* something went really wrong */ |
1572 | if (!cl->status) |
1573 | cl->status = -EFAULT; |
1574 | diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c |
1575 | index 3095fc514a65..5ccc23bc7690 100644 |
1576 | --- a/drivers/misc/mei/nfc.c |
1577 | +++ b/drivers/misc/mei/nfc.c |
1578 | @@ -342,9 +342,10 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length) |
1579 | ndev = (struct mei_nfc_dev *) cldev->priv_data; |
1580 | dev = ndev->cl->dev; |
1581 | |
1582 | + err = -ENOMEM; |
1583 | mei_buf = kzalloc(length + MEI_NFC_HEADER_SIZE, GFP_KERNEL); |
1584 | if (!mei_buf) |
1585 | - return -ENOMEM; |
1586 | + goto out; |
1587 | |
1588 | hdr = (struct mei_nfc_hci_hdr *) mei_buf; |
1589 | hdr->cmd = MEI_NFC_CMD_HCI_SEND; |
1590 | @@ -354,12 +355,9 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length) |
1591 | hdr->data_size = length; |
1592 | |
1593 | memcpy(mei_buf + MEI_NFC_HEADER_SIZE, buf, length); |
1594 | - |
1595 | err = __mei_cl_send(ndev->cl, mei_buf, length + MEI_NFC_HEADER_SIZE); |
1596 | if (err < 0) |
1597 | - return err; |
1598 | - |
1599 | - kfree(mei_buf); |
1600 | + goto out; |
1601 | |
1602 | if (!wait_event_interruptible_timeout(ndev->send_wq, |
1603 | ndev->recv_req_id == ndev->req_id, HZ)) { |
1604 | @@ -368,7 +366,8 @@ static int mei_nfc_send(struct mei_cl_device *cldev, u8 *buf, size_t length) |
1605 | } else { |
1606 | ndev->req_id++; |
1607 | } |
1608 | - |
1609 | +out: |
1610 | + kfree(mei_buf); |
1611 | return err; |
1612 | } |
1613 | |
1614 | diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c |
1615 | index 1b46c64a649f..4b821b4360e1 100644 |
1616 | --- a/drivers/misc/mei/pci-me.c |
1617 | +++ b/drivers/misc/mei/pci-me.c |
1618 | @@ -369,7 +369,7 @@ static int mei_me_pm_runtime_idle(struct device *device) |
1619 | if (!dev) |
1620 | return -ENODEV; |
1621 | if (mei_write_is_idle(dev)) |
1622 | - pm_schedule_suspend(device, MEI_ME_RPM_TIMEOUT * 2); |
1623 | + pm_runtime_autosuspend(device); |
1624 | |
1625 | return -EBUSY; |
1626 | } |
1627 | diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c |
1628 | index 2343c6236df9..32fef4d5b0b6 100644 |
1629 | --- a/drivers/misc/mei/pci-txe.c |
1630 | +++ b/drivers/misc/mei/pci-txe.c |
1631 | @@ -306,7 +306,7 @@ static int mei_txe_pm_runtime_idle(struct device *device) |
1632 | if (!dev) |
1633 | return -ENODEV; |
1634 | if (mei_write_is_idle(dev)) |
1635 | - pm_schedule_suspend(device, MEI_TXI_RPM_TIMEOUT * 2); |
1636 | + pm_runtime_autosuspend(device); |
1637 | |
1638 | return -EBUSY; |
1639 | } |
1640 | diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c |
1641 | index 7ad463e9741c..249ab80cbb45 100644 |
1642 | --- a/drivers/mmc/host/mmci.c |
1643 | +++ b/drivers/mmc/host/mmci.c |
1644 | @@ -834,6 +834,10 @@ static void |
1645 | mmci_data_irq(struct mmci_host *host, struct mmc_data *data, |
1646 | unsigned int status) |
1647 | { |
1648 | + /* Make sure we have data to handle */ |
1649 | + if (!data) |
1650 | + return; |
1651 | + |
1652 | /* First check for errors */ |
1653 | if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| |
1654 | MCI_TXUNDERRUN|MCI_RXOVERRUN)) { |
1655 | @@ -902,9 +906,17 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, |
1656 | unsigned int status) |
1657 | { |
1658 | void __iomem *base = host->base; |
1659 | - bool sbc = (cmd == host->mrq->sbc); |
1660 | - bool busy_resp = host->variant->busy_detect && |
1661 | - (cmd->flags & MMC_RSP_BUSY); |
1662 | + bool sbc, busy_resp; |
1663 | + |
1664 | + if (!cmd) |
1665 | + return; |
1666 | + |
1667 | + sbc = (cmd == host->mrq->sbc); |
1668 | + busy_resp = host->variant->busy_detect && (cmd->flags & MMC_RSP_BUSY); |
1669 | + |
1670 | + if (!((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT| |
1671 | + MCI_CMDSENT|MCI_CMDRESPEND))) |
1672 | + return; |
1673 | |
1674 | /* Check if we need to wait for busy completion. */ |
1675 | if (host->busy_status && (status & MCI_ST_CARDBUSY)) |
1676 | @@ -1132,9 +1144,6 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) |
1677 | spin_lock(&host->lock); |
1678 | |
1679 | do { |
1680 | - struct mmc_command *cmd; |
1681 | - struct mmc_data *data; |
1682 | - |
1683 | status = readl(host->base + MMCISTATUS); |
1684 | |
1685 | if (host->singleirq) { |
1686 | @@ -1154,16 +1163,8 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) |
1687 | |
1688 | dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); |
1689 | |
1690 | - cmd = host->cmd; |
1691 | - if ((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT| |
1692 | - MCI_CMDSENT|MCI_CMDRESPEND) && cmd) |
1693 | - mmci_cmd_irq(host, cmd, status); |
1694 | - |
1695 | - data = host->data; |
1696 | - if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| |
1697 | - MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND| |
1698 | - MCI_DATABLOCKEND) && data) |
1699 | - mmci_data_irq(host, data, status); |
1700 | + mmci_cmd_irq(host, host->cmd, status); |
1701 | + mmci_data_irq(host, host->data, status); |
1702 | |
1703 | /* Don't poll for busy completion in irq context. */ |
1704 | if (host->busy_status) |
1705 | diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c |
1706 | index 42914e04d110..056841651a80 100644 |
1707 | --- a/drivers/pci/hotplug/pciehp_hpc.c |
1708 | +++ b/drivers/pci/hotplug/pciehp_hpc.c |
1709 | @@ -794,7 +794,7 @@ struct controller *pcie_init(struct pcie_device *dev) |
1710 | pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, |
1711 | PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD | |
1712 | PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC | |
1713 | - PCI_EXP_SLTSTA_CC); |
1714 | + PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC); |
1715 | |
1716 | /* Disable software notification */ |
1717 | pcie_disable_notification(ctrl); |
1718 | diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c |
1719 | index a3fbe2012ea3..2ab1b47c7651 100644 |
1720 | --- a/drivers/pci/pci-label.c |
1721 | +++ b/drivers/pci/pci-label.c |
1722 | @@ -161,8 +161,8 @@ enum acpi_attr_enum { |
1723 | static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf) |
1724 | { |
1725 | int len; |
1726 | - len = utf16s_to_utf8s((const wchar_t *)obj->string.pointer, |
1727 | - obj->string.length, |
1728 | + len = utf16s_to_utf8s((const wchar_t *)obj->buffer.pointer, |
1729 | + obj->buffer.length, |
1730 | UTF16_LITTLE_ENDIAN, |
1731 | buf, PAGE_SIZE); |
1732 | buf[len] = '\n'; |
1733 | @@ -187,16 +187,22 @@ static int dsm_get_label(struct device *dev, char *buf, |
1734 | tmp = obj->package.elements; |
1735 | if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 2 && |
1736 | tmp[0].type == ACPI_TYPE_INTEGER && |
1737 | - tmp[1].type == ACPI_TYPE_STRING) { |
1738 | + (tmp[1].type == ACPI_TYPE_STRING || |
1739 | + tmp[1].type == ACPI_TYPE_BUFFER)) { |
1740 | /* |
1741 | * The second string element is optional even when |
1742 | * this _DSM is implemented; when not implemented, |
1743 | * this entry must return a null string. |
1744 | */ |
1745 | - if (attr == ACPI_ATTR_INDEX_SHOW) |
1746 | + if (attr == ACPI_ATTR_INDEX_SHOW) { |
1747 | scnprintf(buf, PAGE_SIZE, "%llu\n", tmp->integer.value); |
1748 | - else if (attr == ACPI_ATTR_LABEL_SHOW) |
1749 | - dsm_label_utf16s_to_utf8s(tmp + 1, buf); |
1750 | + } else if (attr == ACPI_ATTR_LABEL_SHOW) { |
1751 | + if (tmp[1].type == ACPI_TYPE_STRING) |
1752 | + scnprintf(buf, PAGE_SIZE, "%s\n", |
1753 | + tmp[1].string.pointer); |
1754 | + else if (tmp[1].type == ACPI_TYPE_BUFFER) |
1755 | + dsm_label_utf16s_to_utf8s(tmp + 1, buf); |
1756 | + } |
1757 | len = strlen(buf) > 0 ? strlen(buf) : -1; |
1758 | } |
1759 | |
1760 | diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c |
1761 | index 1c8592b0e146..81d49d3ab221 100644 |
1762 | --- a/drivers/pci/pci.c |
1763 | +++ b/drivers/pci/pci.c |
1764 | @@ -839,12 +839,6 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) |
1765 | |
1766 | if (!__pci_complete_power_transition(dev, state)) |
1767 | error = 0; |
1768 | - /* |
1769 | - * When aspm_policy is "powersave" this call ensures |
1770 | - * that ASPM is configured. |
1771 | - */ |
1772 | - if (!error && dev->bus->self) |
1773 | - pcie_aspm_powersave_config_link(dev->bus->self); |
1774 | |
1775 | return error; |
1776 | } |
1777 | @@ -1195,12 +1189,18 @@ int __weak pcibios_enable_device(struct pci_dev *dev, int bars) |
1778 | static int do_pci_enable_device(struct pci_dev *dev, int bars) |
1779 | { |
1780 | int err; |
1781 | + struct pci_dev *bridge; |
1782 | u16 cmd; |
1783 | u8 pin; |
1784 | |
1785 | err = pci_set_power_state(dev, PCI_D0); |
1786 | if (err < 0 && err != -EIO) |
1787 | return err; |
1788 | + |
1789 | + bridge = pci_upstream_bridge(dev); |
1790 | + if (bridge) |
1791 | + pcie_aspm_powersave_config_link(bridge); |
1792 | + |
1793 | err = pcibios_enable_device(dev, bars); |
1794 | if (err < 0) |
1795 | return err; |
1796 | diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c |
1797 | index caed1ce6facd..481c4e18693a 100644 |
1798 | --- a/drivers/pci/setup-res.c |
1799 | +++ b/drivers/pci/setup-res.c |
1800 | @@ -320,9 +320,11 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz |
1801 | resource_size_t min_align) |
1802 | { |
1803 | struct resource *res = dev->resource + resno; |
1804 | + unsigned long flags; |
1805 | resource_size_t new_size; |
1806 | int ret; |
1807 | |
1808 | + flags = res->flags; |
1809 | res->flags |= IORESOURCE_UNSET; |
1810 | if (!res->parent) { |
1811 | dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resource %pR\n", |
1812 | @@ -339,7 +341,12 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz |
1813 | dev_info(&dev->dev, "BAR %d: reassigned %pR\n", resno, res); |
1814 | if (resno < PCI_BRIDGE_RESOURCES) |
1815 | pci_update_resource(dev, resno); |
1816 | + } else { |
1817 | + res->flags = flags; |
1818 | + dev_info(&dev->dev, "BAR %d: %pR (failed to expand by %#llx)\n", |
1819 | + resno, res, (unsigned long long) addsize); |
1820 | } |
1821 | + |
1822 | return ret; |
1823 | } |
1824 | |
1825 | diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c |
1826 | index 31184b35370f..489e83b6b5e1 100644 |
1827 | --- a/drivers/scsi/hpsa.c |
1828 | +++ b/drivers/scsi/hpsa.c |
1829 | @@ -5092,7 +5092,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) |
1830 | } |
1831 | if (ioc->Request.Type.Direction & XFER_WRITE) { |
1832 | if (copy_from_user(buff[sg_used], data_ptr, sz)) { |
1833 | - status = -ENOMEM; |
1834 | + status = -EFAULT; |
1835 | goto cleanup1; |
1836 | } |
1837 | } else |
1838 | @@ -6365,9 +6365,9 @@ static inline void hpsa_set_driver_support_bits(struct ctlr_info *h) |
1839 | { |
1840 | u32 driver_support; |
1841 | |
1842 | -#ifdef CONFIG_X86 |
1843 | - /* Need to enable prefetch in the SCSI core for 6400 in x86 */ |
1844 | driver_support = readl(&(h->cfgtable->driver_support)); |
1845 | + /* Need to enable prefetch in the SCSI core for 6400 in x86 */ |
1846 | +#ifdef CONFIG_X86 |
1847 | driver_support |= ENABLE_SCSI_PREFETCH; |
1848 | #endif |
1849 | driver_support |= ENABLE_UNIT_ATTN; |
1850 | diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c |
1851 | index 08356b6955a4..2d36eac6889c 100644 |
1852 | --- a/drivers/staging/et131x/et131x.c |
1853 | +++ b/drivers/staging/et131x/et131x.c |
1854 | @@ -1423,22 +1423,16 @@ static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) |
1855 | * @reg: the register to read |
1856 | * @value: 16-bit value to write |
1857 | */ |
1858 | -static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value) |
1859 | +static int et131x_mii_write(struct et131x_adapter *adapter, u8 addr, u8 reg, |
1860 | + u16 value) |
1861 | { |
1862 | struct mac_regs __iomem *mac = &adapter->regs->mac; |
1863 | - struct phy_device *phydev = adapter->phydev; |
1864 | int status = 0; |
1865 | - u8 addr; |
1866 | u32 delay = 0; |
1867 | u32 mii_addr; |
1868 | u32 mii_cmd; |
1869 | u32 mii_indicator; |
1870 | |
1871 | - if (!phydev) |
1872 | - return -EIO; |
1873 | - |
1874 | - addr = phydev->addr; |
1875 | - |
1876 | /* Save a local copy of the registers we are dealing with so we can |
1877 | * set them back |
1878 | */ |
1879 | @@ -1633,17 +1627,7 @@ static int et131x_mdio_write(struct mii_bus *bus, int phy_addr, |
1880 | struct net_device *netdev = bus->priv; |
1881 | struct et131x_adapter *adapter = netdev_priv(netdev); |
1882 | |
1883 | - return et131x_mii_write(adapter, reg, value); |
1884 | -} |
1885 | - |
1886 | -static int et131x_mdio_reset(struct mii_bus *bus) |
1887 | -{ |
1888 | - struct net_device *netdev = bus->priv; |
1889 | - struct et131x_adapter *adapter = netdev_priv(netdev); |
1890 | - |
1891 | - et131x_mii_write(adapter, MII_BMCR, BMCR_RESET); |
1892 | - |
1893 | - return 0; |
1894 | + return et131x_mii_write(adapter, phy_addr, reg, value); |
1895 | } |
1896 | |
1897 | /* et1310_phy_power_switch - PHY power control |
1898 | @@ -1658,18 +1642,20 @@ static int et131x_mdio_reset(struct mii_bus *bus) |
1899 | static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down) |
1900 | { |
1901 | u16 data; |
1902 | + struct phy_device *phydev = adapter->phydev; |
1903 | |
1904 | et131x_mii_read(adapter, MII_BMCR, &data); |
1905 | data &= ~BMCR_PDOWN; |
1906 | if (down) |
1907 | data |= BMCR_PDOWN; |
1908 | - et131x_mii_write(adapter, MII_BMCR, data); |
1909 | + et131x_mii_write(adapter, phydev->addr, MII_BMCR, data); |
1910 | } |
1911 | |
1912 | /* et131x_xcvr_init - Init the phy if we are setting it into force mode */ |
1913 | static void et131x_xcvr_init(struct et131x_adapter *adapter) |
1914 | { |
1915 | u16 lcr2; |
1916 | + struct phy_device *phydev = adapter->phydev; |
1917 | |
1918 | /* Set the LED behavior such that LED 1 indicates speed (off = |
1919 | * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates |
1920 | @@ -1690,7 +1676,7 @@ static void et131x_xcvr_init(struct et131x_adapter *adapter) |
1921 | else |
1922 | lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT); |
1923 | |
1924 | - et131x_mii_write(adapter, PHY_LED_2, lcr2); |
1925 | + et131x_mii_write(adapter, phydev->addr, PHY_LED_2, lcr2); |
1926 | } |
1927 | } |
1928 | |
1929 | @@ -3645,14 +3631,14 @@ static void et131x_adjust_link(struct net_device *netdev) |
1930 | |
1931 | et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, |
1932 | ®ister18); |
1933 | - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, |
1934 | - register18 | 0x4); |
1935 | - et131x_mii_write(adapter, PHY_INDEX_REG, |
1936 | + et131x_mii_write(adapter, phydev->addr, |
1937 | + PHY_MPHY_CONTROL_REG, register18 | 0x4); |
1938 | + et131x_mii_write(adapter, phydev->addr, PHY_INDEX_REG, |
1939 | register18 | 0x8402); |
1940 | - et131x_mii_write(adapter, PHY_DATA_REG, |
1941 | + et131x_mii_write(adapter, phydev->addr, PHY_DATA_REG, |
1942 | register18 | 511); |
1943 | - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, |
1944 | - register18); |
1945 | + et131x_mii_write(adapter, phydev->addr, |
1946 | + PHY_MPHY_CONTROL_REG, register18); |
1947 | } |
1948 | |
1949 | et1310_config_flow_control(adapter); |
1950 | @@ -3664,7 +3650,8 @@ static void et131x_adjust_link(struct net_device *netdev) |
1951 | et131x_mii_read(adapter, PHY_CONFIG, ®); |
1952 | reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; |
1953 | reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; |
1954 | - et131x_mii_write(adapter, PHY_CONFIG, reg); |
1955 | + et131x_mii_write(adapter, phydev->addr, PHY_CONFIG, |
1956 | + reg); |
1957 | } |
1958 | |
1959 | et131x_set_rx_dma_timer(adapter); |
1960 | @@ -3677,14 +3664,14 @@ static void et131x_adjust_link(struct net_device *netdev) |
1961 | |
1962 | et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, |
1963 | ®ister18); |
1964 | - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, |
1965 | - register18 | 0x4); |
1966 | - et131x_mii_write(adapter, PHY_INDEX_REG, |
1967 | - register18 | 0x8402); |
1968 | - et131x_mii_write(adapter, PHY_DATA_REG, |
1969 | - register18 | 511); |
1970 | - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, |
1971 | - register18); |
1972 | + et131x_mii_write(adapter, phydev->addr, |
1973 | + PHY_MPHY_CONTROL_REG, register18 | 0x4); |
1974 | + et131x_mii_write(adapter, phydev->addr, |
1975 | + PHY_INDEX_REG, register18 | 0x8402); |
1976 | + et131x_mii_write(adapter, phydev->addr, |
1977 | + PHY_DATA_REG, register18 | 511); |
1978 | + et131x_mii_write(adapter, phydev->addr, |
1979 | + PHY_MPHY_CONTROL_REG, register18); |
1980 | } |
1981 | |
1982 | /* Free the packets being actively sent & stopped */ |
1983 | @@ -4646,10 +4633,6 @@ static int et131x_pci_setup(struct pci_dev *pdev, |
1984 | /* Copy address into the net_device struct */ |
1985 | memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); |
1986 | |
1987 | - /* Init variable for counting how long we do not have link status */ |
1988 | - adapter->boot_coma = 0; |
1989 | - et1310_disable_phy_coma(adapter); |
1990 | - |
1991 | rc = -ENOMEM; |
1992 | |
1993 | /* Setup the mii_bus struct */ |
1994 | @@ -4665,7 +4648,6 @@ static int et131x_pci_setup(struct pci_dev *pdev, |
1995 | adapter->mii_bus->priv = netdev; |
1996 | adapter->mii_bus->read = et131x_mdio_read; |
1997 | adapter->mii_bus->write = et131x_mdio_write; |
1998 | - adapter->mii_bus->reset = et131x_mdio_reset; |
1999 | adapter->mii_bus->irq = kmalloc_array(PHY_MAX_ADDR, sizeof(int), |
2000 | GFP_KERNEL); |
2001 | if (!adapter->mii_bus->irq) |
2002 | @@ -4689,6 +4671,10 @@ static int et131x_pci_setup(struct pci_dev *pdev, |
2003 | /* Setup et1310 as per the documentation */ |
2004 | et131x_adapter_setup(adapter); |
2005 | |
2006 | + /* Init variable for counting how long we do not have link status */ |
2007 | + adapter->boot_coma = 0; |
2008 | + et1310_disable_phy_coma(adapter); |
2009 | + |
2010 | /* We can enable interrupts now |
2011 | * |
2012 | * NOTE - Because registration of interrupt handler is done in the |
2013 | diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c |
2014 | index dde04b767a6d..b16687625c44 100644 |
2015 | --- a/drivers/staging/lustre/lustre/obdclass/class_obd.c |
2016 | +++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c |
2017 | @@ -35,7 +35,7 @@ |
2018 | */ |
2019 | |
2020 | #define DEBUG_SUBSYSTEM S_CLASS |
2021 | -# include <asm/atomic.h> |
2022 | +# include <linux/atomic.h> |
2023 | |
2024 | #include <obd_support.h> |
2025 | #include <obd_class.h> |
2026 | diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c |
2027 | index 7526b989dcbf..c4273cd5f7ed 100644 |
2028 | --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c |
2029 | +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c |
2030 | @@ -54,9 +54,11 @@ static struct usb_device_id rtw_usb_id_tbl[] = { |
2031 | {USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */ |
2032 | /*=== Customer ID ===*/ |
2033 | /****** 8188EUS ********/ |
2034 | + {USB_DEVICE(0x056e, 0x4008)}, /* Elecom WDC-150SU2M */ |
2035 | {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */ |
2036 | {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ |
2037 | {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ |
2038 | + {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ |
2039 | {} /* Terminating entry */ |
2040 | }; |
2041 | |
2042 | diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c |
2043 | index fbf6c5ad222f..ef2fb367d179 100644 |
2044 | --- a/drivers/tty/serial/serial_core.c |
2045 | +++ b/drivers/tty/serial/serial_core.c |
2046 | @@ -243,6 +243,9 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state) |
2047 | /* |
2048 | * Turn off DTR and RTS early. |
2049 | */ |
2050 | + if (uart_console(uport) && tty) |
2051 | + uport->cons->cflag = tty->termios.c_cflag; |
2052 | + |
2053 | if (!tty || (tty->termios.c_cflag & HUPCL)) |
2054 | uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS); |
2055 | |
2056 | diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c |
2057 | index 257876ea03a1..0b59731c3021 100644 |
2058 | --- a/drivers/usb/core/devio.c |
2059 | +++ b/drivers/usb/core/devio.c |
2060 | @@ -1509,7 +1509,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb |
2061 | u = (is_in ? URB_DIR_IN : URB_DIR_OUT); |
2062 | if (uurb->flags & USBDEVFS_URB_ISO_ASAP) |
2063 | u |= URB_ISO_ASAP; |
2064 | - if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK) |
2065 | + if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in) |
2066 | u |= URB_SHORT_NOT_OK; |
2067 | if (uurb->flags & USBDEVFS_URB_NO_FSBR) |
2068 | u |= URB_NO_FSBR; |
2069 | diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c |
2070 | index 0e950ad8cb25..27f217107ef1 100644 |
2071 | --- a/drivers/usb/core/hub.c |
2072 | +++ b/drivers/usb/core/hub.c |
2073 | @@ -1728,8 +1728,14 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id) |
2074 | * - Change autosuspend delay of hub can avoid unnecessary auto |
2075 | * suspend timer for hub, also may decrease power consumption |
2076 | * of USB bus. |
2077 | + * |
2078 | + * - If user has indicated to prevent autosuspend by passing |
2079 | + * usbcore.autosuspend = -1 then keep autosuspend disabled. |
2080 | */ |
2081 | - pm_runtime_set_autosuspend_delay(&hdev->dev, 0); |
2082 | +#ifdef CONFIG_PM_RUNTIME |
2083 | + if (hdev->dev.power.autosuspend_delay >= 0) |
2084 | + pm_runtime_set_autosuspend_delay(&hdev->dev, 0); |
2085 | +#endif |
2086 | |
2087 | /* |
2088 | * Hubs have proper suspend/resume support, except for root hubs |
2089 | @@ -3264,6 +3270,43 @@ static int finish_port_resume(struct usb_device *udev) |
2090 | } |
2091 | |
2092 | /* |
2093 | + * There are some SS USB devices which take longer time for link training. |
2094 | + * XHCI specs 4.19.4 says that when Link training is successful, port |
2095 | + * sets CSC bit to 1. So if SW reads port status before successful link |
2096 | + * training, then it will not find device to be present. |
2097 | + * USB Analyzer log with such buggy devices show that in some cases |
2098 | + * device switch on the RX termination after long delay of host enabling |
2099 | + * the VBUS. In few other cases it has been seen that device fails to |
2100 | + * negotiate link training in first attempt. It has been |
2101 | + * reported till now that few devices take as long as 2000 ms to train |
2102 | + * the link after host enabling its VBUS and termination. Following |
2103 | + * routine implements a 2000 ms timeout for link training. If in a case |
2104 | + * link trains before timeout, loop will exit earlier. |
2105 | + * |
2106 | + * FIXME: If a device was connected before suspend, but was removed |
2107 | + * while system was asleep, then the loop in the following routine will |
2108 | + * only exit at timeout. |
2109 | + * |
2110 | + * This routine should only be called when persist is enabled for a SS |
2111 | + * device. |
2112 | + */ |
2113 | +static int wait_for_ss_port_enable(struct usb_device *udev, |
2114 | + struct usb_hub *hub, int *port1, |
2115 | + u16 *portchange, u16 *portstatus) |
2116 | +{ |
2117 | + int status = 0, delay_ms = 0; |
2118 | + |
2119 | + while (delay_ms < 2000) { |
2120 | + if (status || *portstatus & USB_PORT_STAT_CONNECTION) |
2121 | + break; |
2122 | + msleep(20); |
2123 | + delay_ms += 20; |
2124 | + status = hub_port_status(hub, *port1, portstatus, portchange); |
2125 | + } |
2126 | + return status; |
2127 | +} |
2128 | + |
2129 | +/* |
2130 | * usb_port_resume - re-activate a suspended usb device's upstream port |
2131 | * @udev: device to re-activate, not a root hub |
2132 | * Context: must be able to sleep; device not locked; pm locks held |
2133 | @@ -3359,6 +3402,10 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg) |
2134 | } |
2135 | } |
2136 | |
2137 | + if (udev->persist_enabled && hub_is_superspeed(hub->hdev)) |
2138 | + status = wait_for_ss_port_enable(udev, hub, &port1, &portchange, |
2139 | + &portstatus); |
2140 | + |
2141 | status = check_port_resume_type(udev, |
2142 | hub, port1, status, portchange, portstatus); |
2143 | if (status == 0) |
2144 | @@ -4550,6 +4597,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, |
2145 | struct usb_hcd *hcd = bus_to_hcd(hdev->bus); |
2146 | struct usb_port *port_dev = hub->ports[port1 - 1]; |
2147 | struct usb_device *udev = port_dev->child; |
2148 | + static int unreliable_port = -1; |
2149 | |
2150 | /* Disconnect any existing devices under this port */ |
2151 | if (udev) { |
2152 | @@ -4570,10 +4618,12 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, |
2153 | USB_PORT_STAT_C_ENABLE)) { |
2154 | status = hub_port_debounce_be_stable(hub, port1); |
2155 | if (status < 0) { |
2156 | - if (status != -ENODEV && printk_ratelimit()) |
2157 | - dev_err(&port_dev->dev, |
2158 | - "connect-debounce failed\n"); |
2159 | + if (status != -ENODEV && |
2160 | + port1 != unreliable_port && |
2161 | + printk_ratelimit()) |
2162 | + dev_err(&port_dev->dev, "connect-debounce failed\n"); |
2163 | portstatus &= ~USB_PORT_STAT_CONNECTION; |
2164 | + unreliable_port = port1; |
2165 | } else { |
2166 | portstatus = status; |
2167 | } |
2168 | diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c |
2169 | index cc305c71ac3d..6130b7574908 100644 |
2170 | --- a/drivers/usb/host/ehci-hub.c |
2171 | +++ b/drivers/usb/host/ehci-hub.c |
2172 | @@ -1230,7 +1230,7 @@ int ehci_hub_control( |
2173 | if (selector == EHSET_TEST_SINGLE_STEP_SET_FEATURE) { |
2174 | spin_unlock_irqrestore(&ehci->lock, flags); |
2175 | retval = ehset_single_step_set_feature(hcd, |
2176 | - wIndex); |
2177 | + wIndex + 1); |
2178 | spin_lock_irqsave(&ehci->lock, flags); |
2179 | break; |
2180 | } |
2181 | diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c |
2182 | index 3e86bf4371b3..ca7b964124af 100644 |
2183 | --- a/drivers/usb/host/ehci-pci.c |
2184 | +++ b/drivers/usb/host/ehci-pci.c |
2185 | @@ -35,6 +35,21 @@ static const char hcd_name[] = "ehci-pci"; |
2186 | #define PCI_DEVICE_ID_INTEL_CE4100_USB 0x2e70 |
2187 | |
2188 | /*-------------------------------------------------------------------------*/ |
2189 | +#define PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC 0x0939 |
2190 | +static inline bool is_intel_quark_x1000(struct pci_dev *pdev) |
2191 | +{ |
2192 | + return pdev->vendor == PCI_VENDOR_ID_INTEL && |
2193 | + pdev->device == PCI_DEVICE_ID_INTEL_QUARK_X1000_SOC; |
2194 | +} |
2195 | + |
2196 | +/* |
2197 | + * 0x84 is the offset of in/out threshold register, |
2198 | + * and it is the same offset as the register of 'hostpc'. |
2199 | + */ |
2200 | +#define intel_quark_x1000_insnreg01 hostpc |
2201 | + |
2202 | +/* Maximum usable threshold value is 0x7f dwords for both IN and OUT */ |
2203 | +#define INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD 0x007f007f |
2204 | |
2205 | /* called after powerup, by probe or system-pm "wakeup" */ |
2206 | static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev) |
2207 | @@ -50,6 +65,16 @@ static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev) |
2208 | if (!retval) |
2209 | ehci_dbg(ehci, "MWI active\n"); |
2210 | |
2211 | + /* Reset the threshold limit */ |
2212 | + if (is_intel_quark_x1000(pdev)) { |
2213 | + /* |
2214 | + * For the Intel QUARK X1000, raise the I/O threshold to the |
2215 | + * maximum usable value in order to improve performance. |
2216 | + */ |
2217 | + ehci_writel(ehci, INTEL_QUARK_X1000_EHCI_MAX_THRESHOLD, |
2218 | + ehci->regs->intel_quark_x1000_insnreg01); |
2219 | + } |
2220 | + |
2221 | return 0; |
2222 | } |
2223 | |
2224 | diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c |
2225 | index 45032e933e18..04f2186939d2 100644 |
2226 | --- a/drivers/usb/host/ohci-dbg.c |
2227 | +++ b/drivers/usb/host/ohci-dbg.c |
2228 | @@ -236,7 +236,7 @@ ohci_dump_roothub ( |
2229 | } |
2230 | } |
2231 | |
2232 | -static void ohci_dump (struct ohci_hcd *controller, int verbose) |
2233 | +static void ohci_dump(struct ohci_hcd *controller) |
2234 | { |
2235 | ohci_dbg (controller, "OHCI controller state\n"); |
2236 | |
2237 | @@ -464,15 +464,16 @@ show_list (struct ohci_hcd *ohci, char *buf, size_t count, struct ed *ed) |
2238 | static ssize_t fill_async_buffer(struct debug_buffer *buf) |
2239 | { |
2240 | struct ohci_hcd *ohci; |
2241 | - size_t temp; |
2242 | + size_t temp, size; |
2243 | unsigned long flags; |
2244 | |
2245 | ohci = buf->ohci; |
2246 | + size = PAGE_SIZE; |
2247 | |
2248 | /* display control and bulk lists together, for simplicity */ |
2249 | spin_lock_irqsave (&ohci->lock, flags); |
2250 | - temp = show_list(ohci, buf->page, buf->count, ohci->ed_controltail); |
2251 | - temp += show_list(ohci, buf->page + temp, buf->count - temp, |
2252 | + temp = show_list(ohci, buf->page, size, ohci->ed_controltail); |
2253 | + temp += show_list(ohci, buf->page + temp, size - temp, |
2254 | ohci->ed_bulktail); |
2255 | spin_unlock_irqrestore (&ohci->lock, flags); |
2256 | |
2257 | diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c |
2258 | index f98d03f3144c..a21a36500fd7 100644 |
2259 | --- a/drivers/usb/host/ohci-hcd.c |
2260 | +++ b/drivers/usb/host/ohci-hcd.c |
2261 | @@ -76,8 +76,8 @@ static const char hcd_name [] = "ohci_hcd"; |
2262 | #include "ohci.h" |
2263 | #include "pci-quirks.h" |
2264 | |
2265 | -static void ohci_dump (struct ohci_hcd *ohci, int verbose); |
2266 | -static void ohci_stop (struct usb_hcd *hcd); |
2267 | +static void ohci_dump(struct ohci_hcd *ohci); |
2268 | +static void ohci_stop(struct usb_hcd *hcd); |
2269 | |
2270 | #include "ohci-hub.c" |
2271 | #include "ohci-dbg.c" |
2272 | @@ -744,7 +744,7 @@ retry: |
2273 | ohci->ed_to_check = NULL; |
2274 | } |
2275 | |
2276 | - ohci_dump (ohci, 1); |
2277 | + ohci_dump(ohci); |
2278 | |
2279 | return 0; |
2280 | } |
2281 | @@ -825,7 +825,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd) |
2282 | usb_hc_died(hcd); |
2283 | } |
2284 | |
2285 | - ohci_dump (ohci, 1); |
2286 | + ohci_dump(ohci); |
2287 | ohci_usb_reset (ohci); |
2288 | } |
2289 | |
2290 | @@ -925,7 +925,7 @@ static void ohci_stop (struct usb_hcd *hcd) |
2291 | { |
2292 | struct ohci_hcd *ohci = hcd_to_ohci (hcd); |
2293 | |
2294 | - ohci_dump (ohci, 1); |
2295 | + ohci_dump(ohci); |
2296 | |
2297 | if (quirk_nec(ohci)) |
2298 | flush_work(&ohci->nec_work); |
2299 | diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c |
2300 | index d4253e319428..a8bde5b8cbdd 100644 |
2301 | --- a/drivers/usb/host/ohci-q.c |
2302 | +++ b/drivers/usb/host/ohci-q.c |
2303 | @@ -311,8 +311,7 @@ static void periodic_unlink (struct ohci_hcd *ohci, struct ed *ed) |
2304 | * - ED_OPER: when there's any request queued, the ED gets rescheduled |
2305 | * immediately. HC should be working on them. |
2306 | * |
2307 | - * - ED_IDLE: when there's no TD queue. there's no reason for the HC |
2308 | - * to care about this ED; safe to disable the endpoint. |
2309 | + * - ED_IDLE: when there's no TD queue or the HC isn't running. |
2310 | * |
2311 | * When finish_unlinks() runs later, after SOF interrupt, it will often |
2312 | * complete one or more URB unlinks before making that state change. |
2313 | @@ -926,6 +925,10 @@ rescan_all: |
2314 | int completed, modified; |
2315 | __hc32 *prev; |
2316 | |
2317 | + /* Is this ED already invisible to the hardware? */ |
2318 | + if (ed->state == ED_IDLE) |
2319 | + goto ed_idle; |
2320 | + |
2321 | /* only take off EDs that the HC isn't using, accounting for |
2322 | * frame counter wraps and EDs with partially retired TDs |
2323 | */ |
2324 | @@ -955,12 +958,20 @@ skip_ed: |
2325 | } |
2326 | } |
2327 | |
2328 | + /* ED's now officially unlinked, hc doesn't see */ |
2329 | + ed->state = ED_IDLE; |
2330 | + if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT) |
2331 | + ohci->eds_scheduled--; |
2332 | + ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); |
2333 | + ed->hwNextED = 0; |
2334 | + wmb(); |
2335 | + ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE); |
2336 | +ed_idle: |
2337 | + |
2338 | /* reentrancy: if we drop the schedule lock, someone might |
2339 | * have modified this list. normally it's just prepending |
2340 | * entries (which we'd ignore), but paranoia won't hurt. |
2341 | */ |
2342 | - *last = ed->ed_next; |
2343 | - ed->ed_next = NULL; |
2344 | modified = 0; |
2345 | |
2346 | /* unlink urbs as requested, but rescan the list after |
2347 | @@ -1018,19 +1029,20 @@ rescan_this: |
2348 | if (completed && !list_empty (&ed->td_list)) |
2349 | goto rescan_this; |
2350 | |
2351 | - /* ED's now officially unlinked, hc doesn't see */ |
2352 | - ed->state = ED_IDLE; |
2353 | - if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT) |
2354 | - ohci->eds_scheduled--; |
2355 | - ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); |
2356 | - ed->hwNextED = 0; |
2357 | - wmb (); |
2358 | - ed->hwINFO &= ~cpu_to_hc32 (ohci, ED_SKIP | ED_DEQUEUE); |
2359 | - |
2360 | - /* but if there's work queued, reschedule */ |
2361 | - if (!list_empty (&ed->td_list)) { |
2362 | - if (ohci->rh_state == OHCI_RH_RUNNING) |
2363 | - ed_schedule (ohci, ed); |
2364 | + /* |
2365 | + * If no TDs are queued, take ED off the ed_rm_list. |
2366 | + * Otherwise, if the HC is running, reschedule. |
2367 | + * If not, leave it on the list for further dequeues. |
2368 | + */ |
2369 | + if (list_empty(&ed->td_list)) { |
2370 | + *last = ed->ed_next; |
2371 | + ed->ed_next = NULL; |
2372 | + } else if (ohci->rh_state == OHCI_RH_RUNNING) { |
2373 | + *last = ed->ed_next; |
2374 | + ed->ed_next = NULL; |
2375 | + ed_schedule(ohci, ed); |
2376 | + } else { |
2377 | + last = &ed->ed_next; |
2378 | } |
2379 | |
2380 | if (modified) |
2381 | diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c |
2382 | index e20520f42753..994a36e582ca 100644 |
2383 | --- a/drivers/usb/host/xhci-pci.c |
2384 | +++ b/drivers/usb/host/xhci-pci.c |
2385 | @@ -101,6 +101,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) |
2386 | /* AMD PLL quirk */ |
2387 | if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info()) |
2388 | xhci->quirks |= XHCI_AMD_PLL_FIX; |
2389 | + |
2390 | + if (pdev->vendor == PCI_VENDOR_ID_AMD) |
2391 | + xhci->quirks |= XHCI_TRUST_TX_LENGTH; |
2392 | + |
2393 | if (pdev->vendor == PCI_VENDOR_ID_INTEL) { |
2394 | xhci->quirks |= XHCI_LPM_SUPPORT; |
2395 | xhci->quirks |= XHCI_INTEL_HOST; |
2396 | @@ -143,6 +147,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) |
2397 | pdev->device == PCI_DEVICE_ID_ASROCK_P67) { |
2398 | xhci->quirks |= XHCI_RESET_ON_RESUME; |
2399 | xhci->quirks |= XHCI_TRUST_TX_LENGTH; |
2400 | + xhci->quirks |= XHCI_BROKEN_STREAMS; |
2401 | } |
2402 | if (pdev->vendor == PCI_VENDOR_ID_RENESAS && |
2403 | pdev->device == 0x0015) |
2404 | @@ -150,6 +155,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) |
2405 | if (pdev->vendor == PCI_VENDOR_ID_VIA) |
2406 | xhci->quirks |= XHCI_RESET_ON_RESUME; |
2407 | |
2408 | + /* See https://bugzilla.kernel.org/show_bug.cgi?id=79511 */ |
2409 | + if (pdev->vendor == PCI_VENDOR_ID_VIA && |
2410 | + pdev->device == 0x3432) |
2411 | + xhci->quirks |= XHCI_BROKEN_STREAMS; |
2412 | + |
2413 | if (xhci->quirks & XHCI_RESET_ON_RESUME) |
2414 | xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, |
2415 | "QUIRK: Resetting on resume"); |
2416 | @@ -230,7 +240,8 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) |
2417 | goto put_usb3_hcd; |
2418 | /* Roothub already marked as USB 3.0 speed */ |
2419 | |
2420 | - if (HCC_MAX_PSA(xhci->hcc_params) >= 4) |
2421 | + if (!(xhci->quirks & XHCI_BROKEN_STREAMS) && |
2422 | + HCC_MAX_PSA(xhci->hcc_params) >= 4) |
2423 | xhci->shared_hcd->can_do_streams = 1; |
2424 | |
2425 | /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */ |
2426 | diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c |
2427 | index 749fc68eb5c1..28a929d45cfe 100644 |
2428 | --- a/drivers/usb/host/xhci-ring.c |
2429 | +++ b/drivers/usb/host/xhci-ring.c |
2430 | @@ -364,32 +364,6 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, |
2431 | } |
2432 | } |
2433 | |
2434 | -/* |
2435 | - * Find the segment that trb is in. Start searching in start_seg. |
2436 | - * If we must move past a segment that has a link TRB with a toggle cycle state |
2437 | - * bit set, then we will toggle the value pointed at by cycle_state. |
2438 | - */ |
2439 | -static struct xhci_segment *find_trb_seg( |
2440 | - struct xhci_segment *start_seg, |
2441 | - union xhci_trb *trb, int *cycle_state) |
2442 | -{ |
2443 | - struct xhci_segment *cur_seg = start_seg; |
2444 | - struct xhci_generic_trb *generic_trb; |
2445 | - |
2446 | - while (cur_seg->trbs > trb || |
2447 | - &cur_seg->trbs[TRBS_PER_SEGMENT - 1] < trb) { |
2448 | - generic_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1].generic; |
2449 | - if (generic_trb->field[3] & cpu_to_le32(LINK_TOGGLE)) |
2450 | - *cycle_state ^= 0x1; |
2451 | - cur_seg = cur_seg->next; |
2452 | - if (cur_seg == start_seg) |
2453 | - /* Looped over the entire list. Oops! */ |
2454 | - return NULL; |
2455 | - } |
2456 | - return cur_seg; |
2457 | -} |
2458 | - |
2459 | - |
2460 | static struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci, |
2461 | unsigned int slot_id, unsigned int ep_index, |
2462 | unsigned int stream_id) |
2463 | @@ -459,9 +433,12 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, |
2464 | struct xhci_virt_device *dev = xhci->devs[slot_id]; |
2465 | struct xhci_virt_ep *ep = &dev->eps[ep_index]; |
2466 | struct xhci_ring *ep_ring; |
2467 | - struct xhci_generic_trb *trb; |
2468 | + struct xhci_segment *new_seg; |
2469 | + union xhci_trb *new_deq; |
2470 | dma_addr_t addr; |
2471 | u64 hw_dequeue; |
2472 | + bool cycle_found = false; |
2473 | + bool td_last_trb_found = false; |
2474 | |
2475 | ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id, |
2476 | ep_index, stream_id); |
2477 | @@ -486,45 +463,45 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, |
2478 | hw_dequeue = le64_to_cpu(ep_ctx->deq); |
2479 | } |
2480 | |
2481 | - /* Find virtual address and segment of hardware dequeue pointer */ |
2482 | - state->new_deq_seg = ep_ring->deq_seg; |
2483 | - state->new_deq_ptr = ep_ring->dequeue; |
2484 | - while (xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr) |
2485 | - != (dma_addr_t)(hw_dequeue & ~0xf)) { |
2486 | - next_trb(xhci, ep_ring, &state->new_deq_seg, |
2487 | - &state->new_deq_ptr); |
2488 | - if (state->new_deq_ptr == ep_ring->dequeue) { |
2489 | - WARN_ON(1); |
2490 | - return; |
2491 | - } |
2492 | - } |
2493 | + new_seg = ep_ring->deq_seg; |
2494 | + new_deq = ep_ring->dequeue; |
2495 | + state->new_cycle_state = hw_dequeue & 0x1; |
2496 | + |
2497 | /* |
2498 | - * Find cycle state for last_trb, starting at old cycle state of |
2499 | - * hw_dequeue. If there is only one segment ring, find_trb_seg() will |
2500 | - * return immediately and cannot toggle the cycle state if this search |
2501 | - * wraps around, so add one more toggle manually in that case. |
2502 | + * We want to find the pointer, segment and cycle state of the new trb |
2503 | + * (the one after current TD's last_trb). We know the cycle state at |
2504 | + * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are |
2505 | + * found. |
2506 | */ |
2507 | - state->new_cycle_state = hw_dequeue & 0x1; |
2508 | - if (ep_ring->first_seg == ep_ring->first_seg->next && |
2509 | - cur_td->last_trb < state->new_deq_ptr) |
2510 | - state->new_cycle_state ^= 0x1; |
2511 | + do { |
2512 | + if (!cycle_found && xhci_trb_virt_to_dma(new_seg, new_deq) |
2513 | + == (dma_addr_t)(hw_dequeue & ~0xf)) { |
2514 | + cycle_found = true; |
2515 | + if (td_last_trb_found) |
2516 | + break; |
2517 | + } |
2518 | + if (new_deq == cur_td->last_trb) |
2519 | + td_last_trb_found = true; |
2520 | |
2521 | - state->new_deq_ptr = cur_td->last_trb; |
2522 | - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
2523 | - "Finding segment containing last TRB in TD."); |
2524 | - state->new_deq_seg = find_trb_seg(state->new_deq_seg, |
2525 | - state->new_deq_ptr, &state->new_cycle_state); |
2526 | - if (!state->new_deq_seg) { |
2527 | - WARN_ON(1); |
2528 | - return; |
2529 | - } |
2530 | + if (cycle_found && |
2531 | + TRB_TYPE_LINK_LE32(new_deq->generic.field[3]) && |
2532 | + new_deq->generic.field[3] & cpu_to_le32(LINK_TOGGLE)) |
2533 | + state->new_cycle_state ^= 0x1; |
2534 | + |
2535 | + next_trb(xhci, ep_ring, &new_seg, &new_deq); |
2536 | + |
2537 | + /* Search wrapped around, bail out */ |
2538 | + if (new_deq == ep->ring->dequeue) { |
2539 | + xhci_err(xhci, "Error: Failed finding new dequeue state\n"); |
2540 | + state->new_deq_seg = NULL; |
2541 | + state->new_deq_ptr = NULL; |
2542 | + return; |
2543 | + } |
2544 | + |
2545 | + } while (!cycle_found || !td_last_trb_found); |
2546 | |
2547 | - /* Increment to find next TRB after last_trb. Cycle if appropriate. */ |
2548 | - trb = &state->new_deq_ptr->generic; |
2549 | - if (TRB_TYPE_LINK_LE32(trb->field[3]) && |
2550 | - (trb->field[3] & cpu_to_le32(LINK_TOGGLE))) |
2551 | - state->new_cycle_state ^= 0x1; |
2552 | - next_trb(xhci, ep_ring, &state->new_deq_seg, &state->new_deq_ptr); |
2553 | + state->new_deq_seg = new_seg; |
2554 | + state->new_deq_ptr = new_deq; |
2555 | |
2556 | /* Don't update the ring cycle state for the producer (us). */ |
2557 | xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, |
2558 | @@ -2483,7 +2460,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, |
2559 | * last TRB of the previous TD. The command completion handle |
2560 | * will take care the rest. |
2561 | */ |
2562 | - if (!event_seg && trb_comp_code == COMP_STOP_INVAL) { |
2563 | + if (!event_seg && (trb_comp_code == COMP_STOP || |
2564 | + trb_comp_code == COMP_STOP_INVAL)) { |
2565 | ret = 0; |
2566 | goto cleanup; |
2567 | } |
2568 | diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c |
2569 | index 7436d5f5e67a..e32cc6cf86dc 100644 |
2570 | --- a/drivers/usb/host/xhci.c |
2571 | +++ b/drivers/usb/host/xhci.c |
2572 | @@ -2891,6 +2891,9 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, |
2573 | ep_index, ep->stopped_stream, ep->stopped_td, |
2574 | &deq_state); |
2575 | |
2576 | + if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg) |
2577 | + return; |
2578 | + |
2579 | /* HW with the reset endpoint quirk will use the saved dequeue state to |
2580 | * issue a configure endpoint command later. |
2581 | */ |
2582 | @@ -3163,7 +3166,8 @@ int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, |
2583 | num_streams); |
2584 | |
2585 | /* MaxPSASize value 0 (2 streams) means streams are not supported */ |
2586 | - if (HCC_MAX_PSA(xhci->hcc_params) < 4) { |
2587 | + if ((xhci->quirks & XHCI_BROKEN_STREAMS) || |
2588 | + HCC_MAX_PSA(xhci->hcc_params) < 4) { |
2589 | xhci_dbg(xhci, "xHCI controller does not support streams.\n"); |
2590 | return -ENOSYS; |
2591 | } |
2592 | diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h |
2593 | index 9ffecd56600d..dace5152e179 100644 |
2594 | --- a/drivers/usb/host/xhci.h |
2595 | +++ b/drivers/usb/host/xhci.h |
2596 | @@ -1558,6 +1558,8 @@ struct xhci_hcd { |
2597 | #define XHCI_PLAT (1 << 16) |
2598 | #define XHCI_SLOW_SUSPEND (1 << 17) |
2599 | #define XHCI_SPURIOUS_WAKEUP (1 << 18) |
2600 | +/* For controllers with a broken beyond repair streams implementation */ |
2601 | +#define XHCI_BROKEN_STREAMS (1 << 19) |
2602 | unsigned int num_active_eps; |
2603 | unsigned int limit_active_eps; |
2604 | /* There are two roothubs to keep track of bus suspend info for */ |
2605 | diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c |
2606 | index 8a3813be1b28..8b0f517abb6b 100644 |
2607 | --- a/drivers/usb/serial/ftdi_sio.c |
2608 | +++ b/drivers/usb/serial/ftdi_sio.c |
2609 | @@ -151,6 +151,7 @@ static const struct usb_device_id id_table_combined[] = { |
2610 | { USB_DEVICE(FTDI_VID, FTDI_AMC232_PID) }, |
2611 | { USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) }, |
2612 | { USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) }, |
2613 | + { USB_DEVICE(FTDI_VID, FTDI_BM_ATOM_NANO_PID) }, |
2614 | { USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) }, |
2615 | { USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) }, |
2616 | { USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) }, |
2617 | @@ -673,6 +674,8 @@ static const struct usb_device_id id_table_combined[] = { |
2618 | { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) }, |
2619 | { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) }, |
2620 | { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) }, |
2621 | + { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) }, |
2622 | + { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) }, |
2623 | { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, |
2624 | { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) }, |
2625 | { USB_DEVICE(FTDI_VID, FTDI_ACTIVE_ROBOTS_PID) }, |
2626 | @@ -945,6 +948,8 @@ static const struct usb_device_id id_table_combined[] = { |
2627 | { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) }, |
2628 | { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) }, |
2629 | { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) }, |
2630 | + /* ekey Devices */ |
2631 | + { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) }, |
2632 | /* Infineon Devices */ |
2633 | { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, |
2634 | { } /* Terminating entry */ |
2635 | diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h |
2636 | index c4777bc6aee0..70b0b1d88ae9 100644 |
2637 | --- a/drivers/usb/serial/ftdi_sio_ids.h |
2638 | +++ b/drivers/usb/serial/ftdi_sio_ids.h |
2639 | @@ -42,6 +42,8 @@ |
2640 | /* www.candapter.com Ewert Energy Systems CANdapter device */ |
2641 | #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */ |
2642 | |
2643 | +#define FTDI_BM_ATOM_NANO_PID 0xa559 /* Basic Micro ATOM Nano USB2Serial */ |
2644 | + |
2645 | /* |
2646 | * Texas Instruments XDS100v2 JTAG / BeagleBone A3 |
2647 | * http://processors.wiki.ti.com/index.php/XDS100 |
2648 | @@ -140,12 +142,15 @@ |
2649 | /* |
2650 | * Xsens Technologies BV products (http://www.xsens.com). |
2651 | */ |
2652 | -#define XSENS_CONVERTER_0_PID 0xD388 |
2653 | -#define XSENS_CONVERTER_1_PID 0xD389 |
2654 | +#define XSENS_VID 0x2639 |
2655 | +#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */ |
2656 | +#define XSENS_MTW_PID 0x0200 /* Xsens MTw */ |
2657 | +#define XSENS_CONVERTER_0_PID 0xD388 /* Xsens USB converter */ |
2658 | +#define XSENS_CONVERTER_1_PID 0xD389 /* Xsens Wireless Receiver */ |
2659 | #define XSENS_CONVERTER_2_PID 0xD38A |
2660 | -#define XSENS_CONVERTER_3_PID 0xD38B |
2661 | -#define XSENS_CONVERTER_4_PID 0xD38C |
2662 | -#define XSENS_CONVERTER_5_PID 0xD38D |
2663 | +#define XSENS_CONVERTER_3_PID 0xD38B /* Xsens USB-serial converter */ |
2664 | +#define XSENS_CONVERTER_4_PID 0xD38C /* Xsens Wireless Receiver */ |
2665 | +#define XSENS_CONVERTER_5_PID 0xD38D /* Xsens Awinda Station */ |
2666 | #define XSENS_CONVERTER_6_PID 0xD38E |
2667 | #define XSENS_CONVERTER_7_PID 0xD38F |
2668 | |
2669 | @@ -1375,3 +1380,8 @@ |
2670 | #define BRAINBOXES_US_160_6_PID 0x9006 /* US-160 16xRS232 1Mbaud Port 11 and 12 */ |
2671 | #define BRAINBOXES_US_160_7_PID 0x9007 /* US-160 16xRS232 1Mbaud Port 13 and 14 */ |
2672 | #define BRAINBOXES_US_160_8_PID 0x9008 /* US-160 16xRS232 1Mbaud Port 15 and 16 */ |
2673 | + |
2674 | +/* |
2675 | + * ekey biometric systems GmbH (http://ekey.net/) |
2676 | + */ |
2677 | +#define FTDI_EKEY_CONV_USB_PID 0xCB08 /* Converter USB */ |
2678 | diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c |
2679 | index e62f2dff8b7d..6c3734d2b45a 100644 |
2680 | --- a/drivers/usb/serial/whiteheat.c |
2681 | +++ b/drivers/usb/serial/whiteheat.c |
2682 | @@ -514,6 +514,10 @@ static void command_port_read_callback(struct urb *urb) |
2683 | dev_dbg(&urb->dev->dev, "%s - command_info is NULL, exiting.\n", __func__); |
2684 | return; |
2685 | } |
2686 | + if (!urb->actual_length) { |
2687 | + dev_dbg(&urb->dev->dev, "%s - empty response, exiting.\n", __func__); |
2688 | + return; |
2689 | + } |
2690 | if (status) { |
2691 | dev_dbg(&urb->dev->dev, "%s - nonzero urb status: %d\n", __func__, status); |
2692 | if (status != -ENOENT) |
2693 | @@ -534,7 +538,8 @@ static void command_port_read_callback(struct urb *urb) |
2694 | /* These are unsolicited reports from the firmware, hence no |
2695 | waiting command to wakeup */ |
2696 | dev_dbg(&urb->dev->dev, "%s - event received\n", __func__); |
2697 | - } else if (data[0] == WHITEHEAT_GET_DTR_RTS) { |
2698 | + } else if ((data[0] == WHITEHEAT_GET_DTR_RTS) && |
2699 | + (urb->actual_length - 1 <= sizeof(command_info->result_buffer))) { |
2700 | memcpy(command_info->result_buffer, &data[1], |
2701 | urb->actual_length - 1); |
2702 | command_info->command_finished = WHITEHEAT_CMD_COMPLETE; |
2703 | diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c |
2704 | index 511b22953167..3f42785f653c 100644 |
2705 | --- a/drivers/usb/storage/uas.c |
2706 | +++ b/drivers/usb/storage/uas.c |
2707 | @@ -1026,7 +1026,7 @@ static int uas_configure_endpoints(struct uas_dev_info *devinfo) |
2708 | usb_endpoint_num(&eps[3]->desc)); |
2709 | |
2710 | if (udev->speed != USB_SPEED_SUPER) { |
2711 | - devinfo->qdepth = 256; |
2712 | + devinfo->qdepth = 32; |
2713 | devinfo->use_streams = 0; |
2714 | } else { |
2715 | devinfo->qdepth = usb_alloc_streams(devinfo->intf, eps + 1, |
2716 | diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c |
2717 | index 84b4bfb84344..500713882ad5 100644 |
2718 | --- a/drivers/xen/events/events_fifo.c |
2719 | +++ b/drivers/xen/events/events_fifo.c |
2720 | @@ -67,10 +67,9 @@ static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly; |
2721 | static unsigned event_array_pages __read_mostly; |
2722 | |
2723 | /* |
2724 | - * sync_set_bit() and friends must be unsigned long aligned on non-x86 |
2725 | - * platforms. |
2726 | + * sync_set_bit() and friends must be unsigned long aligned. |
2727 | */ |
2728 | -#if !defined(CONFIG_X86) && BITS_PER_LONG > 32 |
2729 | +#if BITS_PER_LONG > 32 |
2730 | |
2731 | #define BM(w) (unsigned long *)((unsigned long)w & ~0x7UL) |
2732 | #define EVTCHN_FIFO_BIT(b, w) \ |
2733 | diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c |
2734 | index 5a201d81049c..fbd76ded9a34 100644 |
2735 | --- a/fs/btrfs/async-thread.c |
2736 | +++ b/fs/btrfs/async-thread.c |
2737 | @@ -22,7 +22,6 @@ |
2738 | #include <linux/list.h> |
2739 | #include <linux/spinlock.h> |
2740 | #include <linux/freezer.h> |
2741 | -#include <linux/workqueue.h> |
2742 | #include "async-thread.h" |
2743 | #include "ctree.h" |
2744 | |
2745 | @@ -55,8 +54,39 @@ struct btrfs_workqueue { |
2746 | struct __btrfs_workqueue *high; |
2747 | }; |
2748 | |
2749 | -static inline struct __btrfs_workqueue |
2750 | -*__btrfs_alloc_workqueue(const char *name, int flags, int max_active, |
2751 | +static void normal_work_helper(struct btrfs_work *work); |
2752 | + |
2753 | +#define BTRFS_WORK_HELPER(name) \ |
2754 | +void btrfs_##name(struct work_struct *arg) \ |
2755 | +{ \ |
2756 | + struct btrfs_work *work = container_of(arg, struct btrfs_work, \ |
2757 | + normal_work); \ |
2758 | + normal_work_helper(work); \ |
2759 | +} |
2760 | + |
2761 | +BTRFS_WORK_HELPER(worker_helper); |
2762 | +BTRFS_WORK_HELPER(delalloc_helper); |
2763 | +BTRFS_WORK_HELPER(flush_delalloc_helper); |
2764 | +BTRFS_WORK_HELPER(cache_helper); |
2765 | +BTRFS_WORK_HELPER(submit_helper); |
2766 | +BTRFS_WORK_HELPER(fixup_helper); |
2767 | +BTRFS_WORK_HELPER(endio_helper); |
2768 | +BTRFS_WORK_HELPER(endio_meta_helper); |
2769 | +BTRFS_WORK_HELPER(endio_meta_write_helper); |
2770 | +BTRFS_WORK_HELPER(endio_raid56_helper); |
2771 | +BTRFS_WORK_HELPER(rmw_helper); |
2772 | +BTRFS_WORK_HELPER(endio_write_helper); |
2773 | +BTRFS_WORK_HELPER(freespace_write_helper); |
2774 | +BTRFS_WORK_HELPER(delayed_meta_helper); |
2775 | +BTRFS_WORK_HELPER(readahead_helper); |
2776 | +BTRFS_WORK_HELPER(qgroup_rescan_helper); |
2777 | +BTRFS_WORK_HELPER(extent_refs_helper); |
2778 | +BTRFS_WORK_HELPER(scrub_helper); |
2779 | +BTRFS_WORK_HELPER(scrubwrc_helper); |
2780 | +BTRFS_WORK_HELPER(scrubnc_helper); |
2781 | + |
2782 | +static struct __btrfs_workqueue * |
2783 | +__btrfs_alloc_workqueue(const char *name, int flags, int max_active, |
2784 | int thresh) |
2785 | { |
2786 | struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS); |
2787 | @@ -232,13 +262,11 @@ static void run_ordered_work(struct __btrfs_workqueue *wq) |
2788 | spin_unlock_irqrestore(lock, flags); |
2789 | } |
2790 | |
2791 | -static void normal_work_helper(struct work_struct *arg) |
2792 | +static void normal_work_helper(struct btrfs_work *work) |
2793 | { |
2794 | - struct btrfs_work *work; |
2795 | struct __btrfs_workqueue *wq; |
2796 | int need_order = 0; |
2797 | |
2798 | - work = container_of(arg, struct btrfs_work, normal_work); |
2799 | /* |
2800 | * We should not touch things inside work in the following cases: |
2801 | * 1) after work->func() if it has no ordered_free |
2802 | @@ -262,7 +290,7 @@ static void normal_work_helper(struct work_struct *arg) |
2803 | trace_btrfs_all_work_done(work); |
2804 | } |
2805 | |
2806 | -void btrfs_init_work(struct btrfs_work *work, |
2807 | +void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func, |
2808 | btrfs_func_t func, |
2809 | btrfs_func_t ordered_func, |
2810 | btrfs_func_t ordered_free) |
2811 | @@ -270,7 +298,7 @@ void btrfs_init_work(struct btrfs_work *work, |
2812 | work->func = func; |
2813 | work->ordered_func = ordered_func; |
2814 | work->ordered_free = ordered_free; |
2815 | - INIT_WORK(&work->normal_work, normal_work_helper); |
2816 | + INIT_WORK(&work->normal_work, uniq_func); |
2817 | INIT_LIST_HEAD(&work->ordered_list); |
2818 | work->flags = 0; |
2819 | } |
2820 | diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h |
2821 | index 9c6b66d15fb0..e9e31c94758f 100644 |
2822 | --- a/fs/btrfs/async-thread.h |
2823 | +++ b/fs/btrfs/async-thread.h |
2824 | @@ -19,12 +19,14 @@ |
2825 | |
2826 | #ifndef __BTRFS_ASYNC_THREAD_ |
2827 | #define __BTRFS_ASYNC_THREAD_ |
2828 | +#include <linux/workqueue.h> |
2829 | |
2830 | struct btrfs_workqueue; |
2831 | /* Internal use only */ |
2832 | struct __btrfs_workqueue; |
2833 | struct btrfs_work; |
2834 | typedef void (*btrfs_func_t)(struct btrfs_work *arg); |
2835 | +typedef void (*btrfs_work_func_t)(struct work_struct *arg); |
2836 | |
2837 | struct btrfs_work { |
2838 | btrfs_func_t func; |
2839 | @@ -38,11 +40,35 @@ struct btrfs_work { |
2840 | unsigned long flags; |
2841 | }; |
2842 | |
2843 | +#define BTRFS_WORK_HELPER_PROTO(name) \ |
2844 | +void btrfs_##name(struct work_struct *arg) |
2845 | + |
2846 | +BTRFS_WORK_HELPER_PROTO(worker_helper); |
2847 | +BTRFS_WORK_HELPER_PROTO(delalloc_helper); |
2848 | +BTRFS_WORK_HELPER_PROTO(flush_delalloc_helper); |
2849 | +BTRFS_WORK_HELPER_PROTO(cache_helper); |
2850 | +BTRFS_WORK_HELPER_PROTO(submit_helper); |
2851 | +BTRFS_WORK_HELPER_PROTO(fixup_helper); |
2852 | +BTRFS_WORK_HELPER_PROTO(endio_helper); |
2853 | +BTRFS_WORK_HELPER_PROTO(endio_meta_helper); |
2854 | +BTRFS_WORK_HELPER_PROTO(endio_meta_write_helper); |
2855 | +BTRFS_WORK_HELPER_PROTO(endio_raid56_helper); |
2856 | +BTRFS_WORK_HELPER_PROTO(rmw_helper); |
2857 | +BTRFS_WORK_HELPER_PROTO(endio_write_helper); |
2858 | +BTRFS_WORK_HELPER_PROTO(freespace_write_helper); |
2859 | +BTRFS_WORK_HELPER_PROTO(delayed_meta_helper); |
2860 | +BTRFS_WORK_HELPER_PROTO(readahead_helper); |
2861 | +BTRFS_WORK_HELPER_PROTO(qgroup_rescan_helper); |
2862 | +BTRFS_WORK_HELPER_PROTO(extent_refs_helper); |
2863 | +BTRFS_WORK_HELPER_PROTO(scrub_helper); |
2864 | +BTRFS_WORK_HELPER_PROTO(scrubwrc_helper); |
2865 | +BTRFS_WORK_HELPER_PROTO(scrubnc_helper); |
2866 | + |
2867 | struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name, |
2868 | int flags, |
2869 | int max_active, |
2870 | int thresh); |
2871 | -void btrfs_init_work(struct btrfs_work *work, |
2872 | +void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper, |
2873 | btrfs_func_t func, |
2874 | btrfs_func_t ordered_func, |
2875 | btrfs_func_t ordered_free); |
2876 | diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c |
2877 | index e25564bfcb46..54a201dac7f9 100644 |
2878 | --- a/fs/btrfs/backref.c |
2879 | +++ b/fs/btrfs/backref.c |
2880 | @@ -276,9 +276,8 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, |
2881 | } |
2882 | if (ret > 0) |
2883 | goto next; |
2884 | - ret = ulist_add_merge(parents, eb->start, |
2885 | - (uintptr_t)eie, |
2886 | - (u64 *)&old, GFP_NOFS); |
2887 | + ret = ulist_add_merge_ptr(parents, eb->start, |
2888 | + eie, (void **)&old, GFP_NOFS); |
2889 | if (ret < 0) |
2890 | break; |
2891 | if (!ret && extent_item_pos) { |
2892 | @@ -1001,16 +1000,19 @@ again: |
2893 | ret = -EIO; |
2894 | goto out; |
2895 | } |
2896 | + btrfs_tree_read_lock(eb); |
2897 | + btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK); |
2898 | ret = find_extent_in_eb(eb, bytenr, |
2899 | *extent_item_pos, &eie); |
2900 | + btrfs_tree_read_unlock_blocking(eb); |
2901 | free_extent_buffer(eb); |
2902 | if (ret < 0) |
2903 | goto out; |
2904 | ref->inode_list = eie; |
2905 | } |
2906 | - ret = ulist_add_merge(refs, ref->parent, |
2907 | - (uintptr_t)ref->inode_list, |
2908 | - (u64 *)&eie, GFP_NOFS); |
2909 | + ret = ulist_add_merge_ptr(refs, ref->parent, |
2910 | + ref->inode_list, |
2911 | + (void **)&eie, GFP_NOFS); |
2912 | if (ret < 0) |
2913 | goto out; |
2914 | if (!ret && extent_item_pos) { |
2915 | diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h |
2916 | index 4794923c410c..43527fd78825 100644 |
2917 | --- a/fs/btrfs/btrfs_inode.h |
2918 | +++ b/fs/btrfs/btrfs_inode.h |
2919 | @@ -84,12 +84,6 @@ struct btrfs_inode { |
2920 | */ |
2921 | struct list_head delalloc_inodes; |
2922 | |
2923 | - /* |
2924 | - * list for tracking inodes that must be sent to disk before a |
2925 | - * rename or truncate commit |
2926 | - */ |
2927 | - struct list_head ordered_operations; |
2928 | - |
2929 | /* node for the red-black tree that links inodes in subvolume root */ |
2930 | struct rb_node rb_node; |
2931 | |
2932 | diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c |
2933 | index da775bfdebc9..a2e90f855d7d 100644 |
2934 | --- a/fs/btrfs/delayed-inode.c |
2935 | +++ b/fs/btrfs/delayed-inode.c |
2936 | @@ -1395,8 +1395,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, |
2937 | return -ENOMEM; |
2938 | |
2939 | async_work->delayed_root = delayed_root; |
2940 | - btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, |
2941 | - NULL, NULL); |
2942 | + btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper, |
2943 | + btrfs_async_run_delayed_root, NULL, NULL); |
2944 | async_work->nr = nr; |
2945 | |
2946 | btrfs_queue_work(root->fs_info->delayed_workers, &async_work->work); |
2947 | diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c |
2948 | index 08e65e9cf2aa..0229c3720b30 100644 |
2949 | --- a/fs/btrfs/disk-io.c |
2950 | +++ b/fs/btrfs/disk-io.c |
2951 | @@ -39,7 +39,6 @@ |
2952 | #include "btrfs_inode.h" |
2953 | #include "volumes.h" |
2954 | #include "print-tree.h" |
2955 | -#include "async-thread.h" |
2956 | #include "locking.h" |
2957 | #include "tree-log.h" |
2958 | #include "free-space-cache.h" |
2959 | @@ -60,8 +59,6 @@ static void end_workqueue_fn(struct btrfs_work *work); |
2960 | static void free_fs_root(struct btrfs_root *root); |
2961 | static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, |
2962 | int read_only); |
2963 | -static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t, |
2964 | - struct btrfs_root *root); |
2965 | static void btrfs_destroy_ordered_extents(struct btrfs_root *root); |
2966 | static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans, |
2967 | struct btrfs_root *root); |
2968 | @@ -695,35 +692,41 @@ static void end_workqueue_bio(struct bio *bio, int err) |
2969 | { |
2970 | struct end_io_wq *end_io_wq = bio->bi_private; |
2971 | struct btrfs_fs_info *fs_info; |
2972 | + struct btrfs_workqueue *wq; |
2973 | + btrfs_work_func_t func; |
2974 | |
2975 | fs_info = end_io_wq->info; |
2976 | end_io_wq->error = err; |
2977 | - btrfs_init_work(&end_io_wq->work, end_workqueue_fn, NULL, NULL); |
2978 | |
2979 | if (bio->bi_rw & REQ_WRITE) { |
2980 | - if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) |
2981 | - btrfs_queue_work(fs_info->endio_meta_write_workers, |
2982 | - &end_io_wq->work); |
2983 | - else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) |
2984 | - btrfs_queue_work(fs_info->endio_freespace_worker, |
2985 | - &end_io_wq->work); |
2986 | - else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) |
2987 | - btrfs_queue_work(fs_info->endio_raid56_workers, |
2988 | - &end_io_wq->work); |
2989 | - else |
2990 | - btrfs_queue_work(fs_info->endio_write_workers, |
2991 | - &end_io_wq->work); |
2992 | + if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { |
2993 | + wq = fs_info->endio_meta_write_workers; |
2994 | + func = btrfs_endio_meta_write_helper; |
2995 | + } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_FREE_SPACE) { |
2996 | + wq = fs_info->endio_freespace_worker; |
2997 | + func = btrfs_freespace_write_helper; |
2998 | + } else if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { |
2999 | + wq = fs_info->endio_raid56_workers; |
3000 | + func = btrfs_endio_raid56_helper; |
3001 | + } else { |
3002 | + wq = fs_info->endio_write_workers; |
3003 | + func = btrfs_endio_write_helper; |
3004 | + } |
3005 | } else { |
3006 | - if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) |
3007 | - btrfs_queue_work(fs_info->endio_raid56_workers, |
3008 | - &end_io_wq->work); |
3009 | - else if (end_io_wq->metadata) |
3010 | - btrfs_queue_work(fs_info->endio_meta_workers, |
3011 | - &end_io_wq->work); |
3012 | - else |
3013 | - btrfs_queue_work(fs_info->endio_workers, |
3014 | - &end_io_wq->work); |
3015 | + if (end_io_wq->metadata == BTRFS_WQ_ENDIO_RAID56) { |
3016 | + wq = fs_info->endio_raid56_workers; |
3017 | + func = btrfs_endio_raid56_helper; |
3018 | + } else if (end_io_wq->metadata) { |
3019 | + wq = fs_info->endio_meta_workers; |
3020 | + func = btrfs_endio_meta_helper; |
3021 | + } else { |
3022 | + wq = fs_info->endio_workers; |
3023 | + func = btrfs_endio_helper; |
3024 | + } |
3025 | } |
3026 | + |
3027 | + btrfs_init_work(&end_io_wq->work, func, end_workqueue_fn, NULL, NULL); |
3028 | + btrfs_queue_work(wq, &end_io_wq->work); |
3029 | } |
3030 | |
3031 | /* |
3032 | @@ -830,7 +833,7 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, |
3033 | async->submit_bio_start = submit_bio_start; |
3034 | async->submit_bio_done = submit_bio_done; |
3035 | |
3036 | - btrfs_init_work(&async->work, run_one_async_start, |
3037 | + btrfs_init_work(&async->work, btrfs_worker_helper, run_one_async_start, |
3038 | run_one_async_done, run_one_async_free); |
3039 | |
3040 | async->bio_flags = bio_flags; |
3041 | @@ -3829,34 +3832,6 @@ static void btrfs_error_commit_super(struct btrfs_root *root) |
3042 | btrfs_cleanup_transaction(root); |
3043 | } |
3044 | |
3045 | -static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t, |
3046 | - struct btrfs_root *root) |
3047 | -{ |
3048 | - struct btrfs_inode *btrfs_inode; |
3049 | - struct list_head splice; |
3050 | - |
3051 | - INIT_LIST_HEAD(&splice); |
3052 | - |
3053 | - mutex_lock(&root->fs_info->ordered_operations_mutex); |
3054 | - spin_lock(&root->fs_info->ordered_root_lock); |
3055 | - |
3056 | - list_splice_init(&t->ordered_operations, &splice); |
3057 | - while (!list_empty(&splice)) { |
3058 | - btrfs_inode = list_entry(splice.next, struct btrfs_inode, |
3059 | - ordered_operations); |
3060 | - |
3061 | - list_del_init(&btrfs_inode->ordered_operations); |
3062 | - spin_unlock(&root->fs_info->ordered_root_lock); |
3063 | - |
3064 | - btrfs_invalidate_inodes(btrfs_inode->root); |
3065 | - |
3066 | - spin_lock(&root->fs_info->ordered_root_lock); |
3067 | - } |
3068 | - |
3069 | - spin_unlock(&root->fs_info->ordered_root_lock); |
3070 | - mutex_unlock(&root->fs_info->ordered_operations_mutex); |
3071 | -} |
3072 | - |
3073 | static void btrfs_destroy_ordered_extents(struct btrfs_root *root) |
3074 | { |
3075 | struct btrfs_ordered_extent *ordered; |
3076 | @@ -4093,8 +4068,6 @@ again: |
3077 | void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, |
3078 | struct btrfs_root *root) |
3079 | { |
3080 | - btrfs_destroy_ordered_operations(cur_trans, root); |
3081 | - |
3082 | btrfs_destroy_delayed_refs(cur_trans, root); |
3083 | |
3084 | cur_trans->state = TRANS_STATE_COMMIT_START; |
3085 | diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c |
3086 | index 813537f362f9..8edb9fcc38d5 100644 |
3087 | --- a/fs/btrfs/extent-tree.c |
3088 | +++ b/fs/btrfs/extent-tree.c |
3089 | @@ -552,7 +552,8 @@ static int cache_block_group(struct btrfs_block_group_cache *cache, |
3090 | caching_ctl->block_group = cache; |
3091 | caching_ctl->progress = cache->key.objectid; |
3092 | atomic_set(&caching_ctl->count, 1); |
3093 | - btrfs_init_work(&caching_ctl->work, caching_thread, NULL, NULL); |
3094 | + btrfs_init_work(&caching_ctl->work, btrfs_cache_helper, |
3095 | + caching_thread, NULL, NULL); |
3096 | |
3097 | spin_lock(&cache->lock); |
3098 | /* |
3099 | @@ -2749,8 +2750,8 @@ int btrfs_async_run_delayed_refs(struct btrfs_root *root, |
3100 | async->sync = 0; |
3101 | init_completion(&async->wait); |
3102 | |
3103 | - btrfs_init_work(&async->work, delayed_ref_async_start, |
3104 | - NULL, NULL); |
3105 | + btrfs_init_work(&async->work, btrfs_extent_refs_helper, |
3106 | + delayed_ref_async_start, NULL, NULL); |
3107 | |
3108 | btrfs_queue_work(root->fs_info->extent_workers, &async->work); |
3109 | |
3110 | diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c |
3111 | index a389820d158b..09b4e3165e2c 100644 |
3112 | --- a/fs/btrfs/extent_io.c |
3113 | +++ b/fs/btrfs/extent_io.c |
3114 | @@ -2532,6 +2532,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) |
3115 | test_bit(BIO_UPTODATE, &bio->bi_flags); |
3116 | if (err) |
3117 | uptodate = 0; |
3118 | + offset += len; |
3119 | continue; |
3120 | } |
3121 | } |
3122 | diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c |
3123 | index f46cfe45d686..54c84daec9b5 100644 |
3124 | --- a/fs/btrfs/file-item.c |
3125 | +++ b/fs/btrfs/file-item.c |
3126 | @@ -756,7 +756,7 @@ again: |
3127 | found_next = 1; |
3128 | if (ret != 0) |
3129 | goto insert; |
3130 | - slot = 0; |
3131 | + slot = path->slots[0]; |
3132 | } |
3133 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot); |
3134 | if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID || |
3135 | diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c |
3136 | index 1f2b99cb55ea..ab1fd668020d 100644 |
3137 | --- a/fs/btrfs/file.c |
3138 | +++ b/fs/btrfs/file.c |
3139 | @@ -1838,6 +1838,8 @@ out: |
3140 | |
3141 | int btrfs_release_file(struct inode *inode, struct file *filp) |
3142 | { |
3143 | + if (filp->private_data) |
3144 | + btrfs_ioctl_trans_end(filp); |
3145 | /* |
3146 | * ordered_data_close is set by settattr when we are about to truncate |
3147 | * a file from a non-zero size to a zero size. This tries to |
3148 | @@ -1845,26 +1847,8 @@ int btrfs_release_file(struct inode *inode, struct file *filp) |
3149 | * application were using truncate to replace a file in place. |
3150 | */ |
3151 | if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE, |
3152 | - &BTRFS_I(inode)->runtime_flags)) { |
3153 | - struct btrfs_trans_handle *trans; |
3154 | - struct btrfs_root *root = BTRFS_I(inode)->root; |
3155 | - |
3156 | - /* |
3157 | - * We need to block on a committing transaction to keep us from |
3158 | - * throwing a ordered operation on to the list and causing |
3159 | - * something like sync to deadlock trying to flush out this |
3160 | - * inode. |
3161 | - */ |
3162 | - trans = btrfs_start_transaction(root, 0); |
3163 | - if (IS_ERR(trans)) |
3164 | - return PTR_ERR(trans); |
3165 | - btrfs_add_ordered_operation(trans, BTRFS_I(inode)->root, inode); |
3166 | - btrfs_end_transaction(trans, root); |
3167 | - if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) |
3168 | + &BTRFS_I(inode)->runtime_flags)) |
3169 | filemap_flush(inode->i_mapping); |
3170 | - } |
3171 | - if (filp->private_data) |
3172 | - btrfs_ioctl_trans_end(filp); |
3173 | return 0; |
3174 | } |
3175 | |
3176 | diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c |
3177 | index 3668048e16f8..c6cd34e699d0 100644 |
3178 | --- a/fs/btrfs/inode.c |
3179 | +++ b/fs/btrfs/inode.c |
3180 | @@ -709,6 +709,18 @@ retry: |
3181 | unlock_extent(io_tree, async_extent->start, |
3182 | async_extent->start + |
3183 | async_extent->ram_size - 1); |
3184 | + |
3185 | + /* |
3186 | + * we need to redirty the pages if we decide to |
3187 | + * fallback to uncompressed IO, otherwise we |
3188 | + * will not submit these pages down to lower |
3189 | + * layers. |
3190 | + */ |
3191 | + extent_range_redirty_for_io(inode, |
3192 | + async_extent->start, |
3193 | + async_extent->start + |
3194 | + async_extent->ram_size - 1); |
3195 | + |
3196 | goto retry; |
3197 | } |
3198 | goto out_free; |
3199 | @@ -1084,8 +1096,10 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page, |
3200 | async_cow->end = cur_end; |
3201 | INIT_LIST_HEAD(&async_cow->extents); |
3202 | |
3203 | - btrfs_init_work(&async_cow->work, async_cow_start, |
3204 | - async_cow_submit, async_cow_free); |
3205 | + btrfs_init_work(&async_cow->work, |
3206 | + btrfs_delalloc_helper, |
3207 | + async_cow_start, async_cow_submit, |
3208 | + async_cow_free); |
3209 | |
3210 | nr_pages = (cur_end - start + PAGE_CACHE_SIZE) >> |
3211 | PAGE_CACHE_SHIFT; |
3212 | @@ -1869,7 +1883,8 @@ static int btrfs_writepage_start_hook(struct page *page, u64 start, u64 end) |
3213 | |
3214 | SetPageChecked(page); |
3215 | page_cache_get(page); |
3216 | - btrfs_init_work(&fixup->work, btrfs_writepage_fixup_worker, NULL, NULL); |
3217 | + btrfs_init_work(&fixup->work, btrfs_fixup_helper, |
3218 | + btrfs_writepage_fixup_worker, NULL, NULL); |
3219 | fixup->page = page; |
3220 | btrfs_queue_work(root->fs_info->fixup_workers, &fixup->work); |
3221 | return -EBUSY; |
3222 | @@ -2810,7 +2825,8 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, |
3223 | struct inode *inode = page->mapping->host; |
3224 | struct btrfs_root *root = BTRFS_I(inode)->root; |
3225 | struct btrfs_ordered_extent *ordered_extent = NULL; |
3226 | - struct btrfs_workqueue *workers; |
3227 | + struct btrfs_workqueue *wq; |
3228 | + btrfs_work_func_t func; |
3229 | |
3230 | trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); |
3231 | |
3232 | @@ -2819,13 +2835,17 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, |
3233 | end - start + 1, uptodate)) |
3234 | return 0; |
3235 | |
3236 | - btrfs_init_work(&ordered_extent->work, finish_ordered_fn, NULL, NULL); |
3237 | + if (btrfs_is_free_space_inode(inode)) { |
3238 | + wq = root->fs_info->endio_freespace_worker; |
3239 | + func = btrfs_freespace_write_helper; |
3240 | + } else { |
3241 | + wq = root->fs_info->endio_write_workers; |
3242 | + func = btrfs_endio_write_helper; |
3243 | + } |
3244 | |
3245 | - if (btrfs_is_free_space_inode(inode)) |
3246 | - workers = root->fs_info->endio_freespace_worker; |
3247 | - else |
3248 | - workers = root->fs_info->endio_write_workers; |
3249 | - btrfs_queue_work(workers, &ordered_extent->work); |
3250 | + btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL, |
3251 | + NULL); |
3252 | + btrfs_queue_work(wq, &ordered_extent->work); |
3253 | |
3254 | return 0; |
3255 | } |
3256 | @@ -7146,7 +7166,8 @@ again: |
3257 | if (!ret) |
3258 | goto out_test; |
3259 | |
3260 | - btrfs_init_work(&ordered->work, finish_ordered_fn, NULL, NULL); |
3261 | + btrfs_init_work(&ordered->work, btrfs_endio_write_helper, |
3262 | + finish_ordered_fn, NULL, NULL); |
3263 | btrfs_queue_work(root->fs_info->endio_write_workers, |
3264 | &ordered->work); |
3265 | out_test: |
3266 | @@ -7939,27 +7960,6 @@ static int btrfs_truncate(struct inode *inode) |
3267 | BUG_ON(ret); |
3268 | |
3269 | /* |
3270 | - * setattr is responsible for setting the ordered_data_close flag, |
3271 | - * but that is only tested during the last file release. That |
3272 | - * could happen well after the next commit, leaving a great big |
3273 | - * window where new writes may get lost if someone chooses to write |
3274 | - * to this file after truncating to zero |
3275 | - * |
3276 | - * The inode doesn't have any dirty data here, and so if we commit |
3277 | - * this is a noop. If someone immediately starts writing to the inode |
3278 | - * it is very likely we'll catch some of their writes in this |
3279 | - * transaction, and the commit will find this file on the ordered |
3280 | - * data list with good things to send down. |
3281 | - * |
3282 | - * This is a best effort solution, there is still a window where |
3283 | - * using truncate to replace the contents of the file will |
3284 | - * end up with a zero length file after a crash. |
3285 | - */ |
3286 | - if (inode->i_size == 0 && test_bit(BTRFS_INODE_ORDERED_DATA_CLOSE, |
3287 | - &BTRFS_I(inode)->runtime_flags)) |
3288 | - btrfs_add_ordered_operation(trans, root, inode); |
3289 | - |
3290 | - /* |
3291 | * So if we truncate and then write and fsync we normally would just |
3292 | * write the extents that changed, which is a problem if we need to |
3293 | * first truncate that entire inode. So set this flag so we write out |
3294 | @@ -8106,7 +8106,6 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) |
3295 | mutex_init(&ei->delalloc_mutex); |
3296 | btrfs_ordered_inode_tree_init(&ei->ordered_tree); |
3297 | INIT_LIST_HEAD(&ei->delalloc_inodes); |
3298 | - INIT_LIST_HEAD(&ei->ordered_operations); |
3299 | RB_CLEAR_NODE(&ei->rb_node); |
3300 | |
3301 | return inode; |
3302 | @@ -8146,17 +8145,6 @@ void btrfs_destroy_inode(struct inode *inode) |
3303 | if (!root) |
3304 | goto free; |
3305 | |
3306 | - /* |
3307 | - * Make sure we're properly removed from the ordered operation |
3308 | - * lists. |
3309 | - */ |
3310 | - smp_mb(); |
3311 | - if (!list_empty(&BTRFS_I(inode)->ordered_operations)) { |
3312 | - spin_lock(&root->fs_info->ordered_root_lock); |
3313 | - list_del_init(&BTRFS_I(inode)->ordered_operations); |
3314 | - spin_unlock(&root->fs_info->ordered_root_lock); |
3315 | - } |
3316 | - |
3317 | if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, |
3318 | &BTRFS_I(inode)->runtime_flags)) { |
3319 | btrfs_info(root->fs_info, "inode %llu still on the orphan list", |
3320 | @@ -8338,12 +8326,10 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, |
3321 | ret = 0; |
3322 | |
3323 | /* |
3324 | - * we're using rename to replace one file with another. |
3325 | - * and the replacement file is large. Start IO on it now so |
3326 | - * we don't add too much work to the end of the transaction |
3327 | + * we're using rename to replace one file with another. Start IO on it |
3328 | + * now so we don't add too much work to the end of the transaction |
3329 | */ |
3330 | - if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size && |
3331 | - old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) |
3332 | + if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size) |
3333 | filemap_flush(old_inode->i_mapping); |
3334 | |
3335 | /* close the racy window with snapshot create/destroy ioctl */ |
3336 | @@ -8391,12 +8377,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, |
3337 | */ |
3338 | btrfs_pin_log_trans(root); |
3339 | } |
3340 | - /* |
3341 | - * make sure the inode gets flushed if it is replacing |
3342 | - * something. |
3343 | - */ |
3344 | - if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode)) |
3345 | - btrfs_add_ordered_operation(trans, root, old_inode); |
3346 | |
3347 | inode_inc_iversion(old_dir); |
3348 | inode_inc_iversion(new_dir); |
3349 | @@ -8514,7 +8494,9 @@ struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode, |
3350 | work->inode = inode; |
3351 | work->wait = wait; |
3352 | work->delay_iput = delay_iput; |
3353 | - btrfs_init_work(&work->work, btrfs_run_delalloc_work, NULL, NULL); |
3354 | + WARN_ON_ONCE(!inode); |
3355 | + btrfs_init_work(&work->work, btrfs_flush_delalloc_helper, |
3356 | + btrfs_run_delalloc_work, NULL, NULL); |
3357 | |
3358 | return work; |
3359 | } |
3360 | diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c |
3361 | index 7187b14faa6c..ac734ec4cc20 100644 |
3362 | --- a/fs/btrfs/ordered-data.c |
3363 | +++ b/fs/btrfs/ordered-data.c |
3364 | @@ -571,18 +571,6 @@ void btrfs_remove_ordered_extent(struct inode *inode, |
3365 | |
3366 | trace_btrfs_ordered_extent_remove(inode, entry); |
3367 | |
3368 | - /* |
3369 | - * we have no more ordered extents for this inode and |
3370 | - * no dirty pages. We can safely remove it from the |
3371 | - * list of ordered extents |
3372 | - */ |
3373 | - if (RB_EMPTY_ROOT(&tree->tree) && |
3374 | - !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) { |
3375 | - spin_lock(&root->fs_info->ordered_root_lock); |
3376 | - list_del_init(&BTRFS_I(inode)->ordered_operations); |
3377 | - spin_unlock(&root->fs_info->ordered_root_lock); |
3378 | - } |
3379 | - |
3380 | if (!root->nr_ordered_extents) { |
3381 | spin_lock(&root->fs_info->ordered_root_lock); |
3382 | BUG_ON(list_empty(&root->ordered_root)); |
3383 | @@ -627,6 +615,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr) |
3384 | spin_unlock(&root->ordered_extent_lock); |
3385 | |
3386 | btrfs_init_work(&ordered->flush_work, |
3387 | + btrfs_flush_delalloc_helper, |
3388 | btrfs_run_ordered_extent_work, NULL, NULL); |
3389 | list_add_tail(&ordered->work_list, &works); |
3390 | btrfs_queue_work(root->fs_info->flush_workers, |
3391 | @@ -687,81 +676,6 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr) |
3392 | } |
3393 | |
3394 | /* |
3395 | - * this is used during transaction commit to write all the inodes |
3396 | - * added to the ordered operation list. These files must be fully on |
3397 | - * disk before the transaction commits. |
3398 | - * |
3399 | - * we have two modes here, one is to just start the IO via filemap_flush |
3400 | - * and the other is to wait for all the io. When we wait, we have an |
3401 | - * extra check to make sure the ordered operation list really is empty |
3402 | - * before we return |
3403 | - */ |
3404 | -int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans, |
3405 | - struct btrfs_root *root, int wait) |
3406 | -{ |
3407 | - struct btrfs_inode *btrfs_inode; |
3408 | - struct inode *inode; |
3409 | - struct btrfs_transaction *cur_trans = trans->transaction; |
3410 | - struct list_head splice; |
3411 | - struct list_head works; |
3412 | - struct btrfs_delalloc_work *work, *next; |
3413 | - int ret = 0; |
3414 | - |
3415 | - INIT_LIST_HEAD(&splice); |
3416 | - INIT_LIST_HEAD(&works); |
3417 | - |
3418 | - mutex_lock(&root->fs_info->ordered_extent_flush_mutex); |
3419 | - spin_lock(&root->fs_info->ordered_root_lock); |
3420 | - list_splice_init(&cur_trans->ordered_operations, &splice); |
3421 | - while (!list_empty(&splice)) { |
3422 | - btrfs_inode = list_entry(splice.next, struct btrfs_inode, |
3423 | - ordered_operations); |
3424 | - inode = &btrfs_inode->vfs_inode; |
3425 | - |
3426 | - list_del_init(&btrfs_inode->ordered_operations); |
3427 | - |
3428 | - /* |
3429 | - * the inode may be getting freed (in sys_unlink path). |
3430 | - */ |
3431 | - inode = igrab(inode); |
3432 | - if (!inode) |
3433 | - continue; |
3434 | - |
3435 | - if (!wait) |
3436 | - list_add_tail(&BTRFS_I(inode)->ordered_operations, |
3437 | - &cur_trans->ordered_operations); |
3438 | - spin_unlock(&root->fs_info->ordered_root_lock); |
3439 | - |
3440 | - work = btrfs_alloc_delalloc_work(inode, wait, 1); |
3441 | - if (!work) { |
3442 | - spin_lock(&root->fs_info->ordered_root_lock); |
3443 | - if (list_empty(&BTRFS_I(inode)->ordered_operations)) |
3444 | - list_add_tail(&btrfs_inode->ordered_operations, |
3445 | - &splice); |
3446 | - list_splice_tail(&splice, |
3447 | - &cur_trans->ordered_operations); |
3448 | - spin_unlock(&root->fs_info->ordered_root_lock); |
3449 | - ret = -ENOMEM; |
3450 | - goto out; |
3451 | - } |
3452 | - list_add_tail(&work->list, &works); |
3453 | - btrfs_queue_work(root->fs_info->flush_workers, |
3454 | - &work->work); |
3455 | - |
3456 | - cond_resched(); |
3457 | - spin_lock(&root->fs_info->ordered_root_lock); |
3458 | - } |
3459 | - spin_unlock(&root->fs_info->ordered_root_lock); |
3460 | -out: |
3461 | - list_for_each_entry_safe(work, next, &works, list) { |
3462 | - list_del_init(&work->list); |
3463 | - btrfs_wait_and_free_delalloc_work(work); |
3464 | - } |
3465 | - mutex_unlock(&root->fs_info->ordered_extent_flush_mutex); |
3466 | - return ret; |
3467 | -} |
3468 | - |
3469 | -/* |
3470 | * Used to start IO or wait for a given ordered extent to finish. |
3471 | * |
3472 | * If wait is one, this effectively waits on page writeback for all the pages |
3473 | @@ -1120,42 +1034,6 @@ out: |
3474 | return index; |
3475 | } |
3476 | |
3477 | - |
3478 | -/* |
3479 | - * add a given inode to the list of inodes that must be fully on |
3480 | - * disk before a transaction commit finishes. |
3481 | - * |
3482 | - * This basically gives us the ext3 style data=ordered mode, and it is mostly |
3483 | - * used to make sure renamed files are fully on disk. |
3484 | - * |
3485 | - * It is a noop if the inode is already fully on disk. |
3486 | - * |
3487 | - * If trans is not null, we'll do a friendly check for a transaction that |
3488 | - * is already flushing things and force the IO down ourselves. |
3489 | - */ |
3490 | -void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, |
3491 | - struct btrfs_root *root, struct inode *inode) |
3492 | -{ |
3493 | - struct btrfs_transaction *cur_trans = trans->transaction; |
3494 | - u64 last_mod; |
3495 | - |
3496 | - last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans); |
3497 | - |
3498 | - /* |
3499 | - * if this file hasn't been changed since the last transaction |
3500 | - * commit, we can safely return without doing anything |
3501 | - */ |
3502 | - if (last_mod <= root->fs_info->last_trans_committed) |
3503 | - return; |
3504 | - |
3505 | - spin_lock(&root->fs_info->ordered_root_lock); |
3506 | - if (list_empty(&BTRFS_I(inode)->ordered_operations)) { |
3507 | - list_add_tail(&BTRFS_I(inode)->ordered_operations, |
3508 | - &cur_trans->ordered_operations); |
3509 | - } |
3510 | - spin_unlock(&root->fs_info->ordered_root_lock); |
3511 | -} |
3512 | - |
3513 | int __init ordered_data_init(void) |
3514 | { |
3515 | btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent", |
3516 | diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h |
3517 | index 246897058efb..d81a274d621e 100644 |
3518 | --- a/fs/btrfs/ordered-data.h |
3519 | +++ b/fs/btrfs/ordered-data.h |
3520 | @@ -190,11 +190,6 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, |
3521 | struct btrfs_ordered_extent *ordered); |
3522 | int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, |
3523 | u32 *sum, int len); |
3524 | -int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans, |
3525 | - struct btrfs_root *root, int wait); |
3526 | -void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, |
3527 | - struct btrfs_root *root, |
3528 | - struct inode *inode); |
3529 | int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr); |
3530 | void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr); |
3531 | void btrfs_get_logged_extents(struct inode *inode, |
3532 | diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c |
3533 | index 98cb6b2630f9..3eec914710b2 100644 |
3534 | --- a/fs/btrfs/qgroup.c |
3535 | +++ b/fs/btrfs/qgroup.c |
3536 | @@ -2551,6 +2551,7 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, |
3537 | memset(&fs_info->qgroup_rescan_work, 0, |
3538 | sizeof(fs_info->qgroup_rescan_work)); |
3539 | btrfs_init_work(&fs_info->qgroup_rescan_work, |
3540 | + btrfs_qgroup_rescan_helper, |
3541 | btrfs_qgroup_rescan_worker, NULL, NULL); |
3542 | |
3543 | if (ret) { |
3544 | diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c |
3545 | index 4a88f073fdd7..0a6b6e4bcbb9 100644 |
3546 | --- a/fs/btrfs/raid56.c |
3547 | +++ b/fs/btrfs/raid56.c |
3548 | @@ -1416,7 +1416,8 @@ cleanup: |
3549 | |
3550 | static void async_rmw_stripe(struct btrfs_raid_bio *rbio) |
3551 | { |
3552 | - btrfs_init_work(&rbio->work, rmw_work, NULL, NULL); |
3553 | + btrfs_init_work(&rbio->work, btrfs_rmw_helper, |
3554 | + rmw_work, NULL, NULL); |
3555 | |
3556 | btrfs_queue_work(rbio->fs_info->rmw_workers, |
3557 | &rbio->work); |
3558 | @@ -1424,7 +1425,8 @@ static void async_rmw_stripe(struct btrfs_raid_bio *rbio) |
3559 | |
3560 | static void async_read_rebuild(struct btrfs_raid_bio *rbio) |
3561 | { |
3562 | - btrfs_init_work(&rbio->work, read_rebuild_work, NULL, NULL); |
3563 | + btrfs_init_work(&rbio->work, btrfs_rmw_helper, |
3564 | + read_rebuild_work, NULL, NULL); |
3565 | |
3566 | btrfs_queue_work(rbio->fs_info->rmw_workers, |
3567 | &rbio->work); |
3568 | @@ -1665,7 +1667,8 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) |
3569 | plug = container_of(cb, struct btrfs_plug_cb, cb); |
3570 | |
3571 | if (from_schedule) { |
3572 | - btrfs_init_work(&plug->work, unplug_work, NULL, NULL); |
3573 | + btrfs_init_work(&plug->work, btrfs_rmw_helper, |
3574 | + unplug_work, NULL, NULL); |
3575 | btrfs_queue_work(plug->info->rmw_workers, |
3576 | &plug->work); |
3577 | return; |
3578 | diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c |
3579 | index 09230cf3a244..20408c6b665a 100644 |
3580 | --- a/fs/btrfs/reada.c |
3581 | +++ b/fs/btrfs/reada.c |
3582 | @@ -798,7 +798,8 @@ static void reada_start_machine(struct btrfs_fs_info *fs_info) |
3583 | /* FIXME we cannot handle this properly right now */ |
3584 | BUG(); |
3585 | } |
3586 | - btrfs_init_work(&rmw->work, reada_start_machine_worker, NULL, NULL); |
3587 | + btrfs_init_work(&rmw->work, btrfs_readahead_helper, |
3588 | + reada_start_machine_worker, NULL, NULL); |
3589 | rmw->fs_info = fs_info; |
3590 | |
3591 | btrfs_queue_work(fs_info->readahead_workers, &rmw->work); |
3592 | diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c |
3593 | index b6d198f5181e..8dddedcfa961 100644 |
3594 | --- a/fs/btrfs/scrub.c |
3595 | +++ b/fs/btrfs/scrub.c |
3596 | @@ -428,8 +428,8 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace) |
3597 | sbio->index = i; |
3598 | sbio->sctx = sctx; |
3599 | sbio->page_count = 0; |
3600 | - btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, |
3601 | - NULL, NULL); |
3602 | + btrfs_init_work(&sbio->work, btrfs_scrub_helper, |
3603 | + scrub_bio_end_io_worker, NULL, NULL); |
3604 | |
3605 | if (i != SCRUB_BIOS_PER_SCTX - 1) |
3606 | sctx->bios[i]->next_free = i + 1; |
3607 | @@ -999,8 +999,8 @@ nodatasum_case: |
3608 | fixup_nodatasum->root = fs_info->extent_root; |
3609 | fixup_nodatasum->mirror_num = failed_mirror_index + 1; |
3610 | scrub_pending_trans_workers_inc(sctx); |
3611 | - btrfs_init_work(&fixup_nodatasum->work, scrub_fixup_nodatasum, |
3612 | - NULL, NULL); |
3613 | + btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper, |
3614 | + scrub_fixup_nodatasum, NULL, NULL); |
3615 | btrfs_queue_work(fs_info->scrub_workers, |
3616 | &fixup_nodatasum->work); |
3617 | goto out; |
3618 | @@ -1616,7 +1616,8 @@ static void scrub_wr_bio_end_io(struct bio *bio, int err) |
3619 | sbio->err = err; |
3620 | sbio->bio = bio; |
3621 | |
3622 | - btrfs_init_work(&sbio->work, scrub_wr_bio_end_io_worker, NULL, NULL); |
3623 | + btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper, |
3624 | + scrub_wr_bio_end_io_worker, NULL, NULL); |
3625 | btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); |
3626 | } |
3627 | |
3628 | @@ -3203,7 +3204,8 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, |
3629 | nocow_ctx->len = len; |
3630 | nocow_ctx->mirror_num = mirror_num; |
3631 | nocow_ctx->physical_for_dev_replace = physical_for_dev_replace; |
3632 | - btrfs_init_work(&nocow_ctx->work, copy_nocow_pages_worker, NULL, NULL); |
3633 | + btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper, |
3634 | + copy_nocow_pages_worker, NULL, NULL); |
3635 | INIT_LIST_HEAD(&nocow_ctx->inodes); |
3636 | btrfs_queue_work(fs_info->scrub_nocow_workers, |
3637 | &nocow_ctx->work); |
3638 | diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c |
3639 | index 5f379affdf23..d89c6d3542ca 100644 |
3640 | --- a/fs/btrfs/transaction.c |
3641 | +++ b/fs/btrfs/transaction.c |
3642 | @@ -218,7 +218,6 @@ loop: |
3643 | spin_lock_init(&cur_trans->delayed_refs.lock); |
3644 | |
3645 | INIT_LIST_HEAD(&cur_trans->pending_snapshots); |
3646 | - INIT_LIST_HEAD(&cur_trans->ordered_operations); |
3647 | INIT_LIST_HEAD(&cur_trans->pending_chunks); |
3648 | INIT_LIST_HEAD(&cur_trans->switch_commits); |
3649 | list_add_tail(&cur_trans->list, &fs_info->trans_list); |
3650 | @@ -1612,27 +1611,6 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, |
3651 | kmem_cache_free(btrfs_trans_handle_cachep, trans); |
3652 | } |
3653 | |
3654 | -static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans, |
3655 | - struct btrfs_root *root) |
3656 | -{ |
3657 | - int ret; |
3658 | - |
3659 | - ret = btrfs_run_delayed_items(trans, root); |
3660 | - if (ret) |
3661 | - return ret; |
3662 | - |
3663 | - /* |
3664 | - * rename don't use btrfs_join_transaction, so, once we |
3665 | - * set the transaction to blocked above, we aren't going |
3666 | - * to get any new ordered operations. We can safely run |
3667 | - * it here and no for sure that nothing new will be added |
3668 | - * to the list |
3669 | - */ |
3670 | - ret = btrfs_run_ordered_operations(trans, root, 1); |
3671 | - |
3672 | - return ret; |
3673 | -} |
3674 | - |
3675 | static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) |
3676 | { |
3677 | if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT)) |
3678 | @@ -1653,13 +1631,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, |
3679 | struct btrfs_transaction *prev_trans = NULL; |
3680 | int ret; |
3681 | |
3682 | - ret = btrfs_run_ordered_operations(trans, root, 0); |
3683 | - if (ret) { |
3684 | - btrfs_abort_transaction(trans, root, ret); |
3685 | - btrfs_end_transaction(trans, root); |
3686 | - return ret; |
3687 | - } |
3688 | - |
3689 | /* Stop the commit early if ->aborted is set */ |
3690 | if (unlikely(ACCESS_ONCE(cur_trans->aborted))) { |
3691 | ret = cur_trans->aborted; |
3692 | @@ -1740,7 +1711,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, |
3693 | if (ret) |
3694 | goto cleanup_transaction; |
3695 | |
3696 | - ret = btrfs_flush_all_pending_stuffs(trans, root); |
3697 | + ret = btrfs_run_delayed_items(trans, root); |
3698 | if (ret) |
3699 | goto cleanup_transaction; |
3700 | |
3701 | @@ -1748,7 +1719,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, |
3702 | extwriter_counter_read(cur_trans) == 0); |
3703 | |
3704 | /* some pending stuffs might be added after the previous flush. */ |
3705 | - ret = btrfs_flush_all_pending_stuffs(trans, root); |
3706 | + ret = btrfs_run_delayed_items(trans, root); |
3707 | if (ret) |
3708 | goto cleanup_transaction; |
3709 | |
3710 | diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h |
3711 | index 7dd558ed0716..579be51b27e5 100644 |
3712 | --- a/fs/btrfs/transaction.h |
3713 | +++ b/fs/btrfs/transaction.h |
3714 | @@ -55,7 +55,6 @@ struct btrfs_transaction { |
3715 | wait_queue_head_t writer_wait; |
3716 | wait_queue_head_t commit_wait; |
3717 | struct list_head pending_snapshots; |
3718 | - struct list_head ordered_operations; |
3719 | struct list_head pending_chunks; |
3720 | struct list_head switch_commits; |
3721 | struct btrfs_delayed_ref_root delayed_refs; |
3722 | diff --git a/fs/btrfs/ulist.h b/fs/btrfs/ulist.h |
3723 | index 7f78cbf5cf41..4c29db604bbe 100644 |
3724 | --- a/fs/btrfs/ulist.h |
3725 | +++ b/fs/btrfs/ulist.h |
3726 | @@ -57,6 +57,21 @@ void ulist_free(struct ulist *ulist); |
3727 | int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask); |
3728 | int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux, |
3729 | u64 *old_aux, gfp_t gfp_mask); |
3730 | + |
3731 | +/* just like ulist_add_merge() but take a pointer for the aux data */ |
3732 | +static inline int ulist_add_merge_ptr(struct ulist *ulist, u64 val, void *aux, |
3733 | + void **old_aux, gfp_t gfp_mask) |
3734 | +{ |
3735 | +#if BITS_PER_LONG == 32 |
3736 | + u64 old64 = (uintptr_t)*old_aux; |
3737 | + int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask); |
3738 | + *old_aux = (void *)((uintptr_t)old64); |
3739 | + return ret; |
3740 | +#else |
3741 | + return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask); |
3742 | +#endif |
3743 | +} |
3744 | + |
3745 | struct ulist_node *ulist_next(struct ulist *ulist, |
3746 | struct ulist_iterator *uiter); |
3747 | |
3748 | diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c |
3749 | index 6cb82f62cb7c..81bec9fd8f19 100644 |
3750 | --- a/fs/btrfs/volumes.c |
3751 | +++ b/fs/btrfs/volumes.c |
3752 | @@ -5800,7 +5800,8 @@ struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, |
3753 | else |
3754 | generate_random_uuid(dev->uuid); |
3755 | |
3756 | - btrfs_init_work(&dev->work, pending_bios_fn, NULL, NULL); |
3757 | + btrfs_init_work(&dev->work, btrfs_submit_helper, |
3758 | + pending_bios_fn, NULL, NULL); |
3759 | |
3760 | return dev; |
3761 | } |
3762 | diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c |
3763 | index 8c41b52da358..16a46b6a6fee 100644 |
3764 | --- a/fs/debugfs/inode.c |
3765 | +++ b/fs/debugfs/inode.c |
3766 | @@ -534,7 +534,7 @@ EXPORT_SYMBOL_GPL(debugfs_remove); |
3767 | */ |
3768 | void debugfs_remove_recursive(struct dentry *dentry) |
3769 | { |
3770 | - struct dentry *child, *next, *parent; |
3771 | + struct dentry *child, *parent; |
3772 | |
3773 | if (IS_ERR_OR_NULL(dentry)) |
3774 | return; |
3775 | @@ -546,30 +546,49 @@ void debugfs_remove_recursive(struct dentry *dentry) |
3776 | parent = dentry; |
3777 | down: |
3778 | mutex_lock(&parent->d_inode->i_mutex); |
3779 | - list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) { |
3780 | + loop: |
3781 | + /* |
3782 | + * The parent->d_subdirs is protected by the d_lock. Outside that |
3783 | + * lock, the child can be unlinked and set to be freed which can |
3784 | + * use the d_u.d_child as the rcu head and corrupt this list. |
3785 | + */ |
3786 | + spin_lock(&parent->d_lock); |
3787 | + list_for_each_entry(child, &parent->d_subdirs, d_u.d_child) { |
3788 | if (!debugfs_positive(child)) |
3789 | continue; |
3790 | |
3791 | /* perhaps simple_empty(child) makes more sense */ |
3792 | if (!list_empty(&child->d_subdirs)) { |
3793 | + spin_unlock(&parent->d_lock); |
3794 | mutex_unlock(&parent->d_inode->i_mutex); |
3795 | parent = child; |
3796 | goto down; |
3797 | } |
3798 | - up: |
3799 | + |
3800 | + spin_unlock(&parent->d_lock); |
3801 | + |
3802 | if (!__debugfs_remove(child, parent)) |
3803 | simple_release_fs(&debugfs_mount, &debugfs_mount_count); |
3804 | + |
3805 | + /* |
3806 | + * The parent->d_lock protects agaist child from unlinking |
3807 | + * from d_subdirs. When releasing the parent->d_lock we can |
3808 | + * no longer trust that the next pointer is valid. |
3809 | + * Restart the loop. We'll skip this one with the |
3810 | + * debugfs_positive() check. |
3811 | + */ |
3812 | + goto loop; |
3813 | } |
3814 | + spin_unlock(&parent->d_lock); |
3815 | |
3816 | mutex_unlock(&parent->d_inode->i_mutex); |
3817 | child = parent; |
3818 | parent = parent->d_parent; |
3819 | mutex_lock(&parent->d_inode->i_mutex); |
3820 | |
3821 | - if (child != dentry) { |
3822 | - next = list_next_entry(child, d_u.d_child); |
3823 | - goto up; |
3824 | - } |
3825 | + if (child != dentry) |
3826 | + /* go up */ |
3827 | + goto loop; |
3828 | |
3829 | if (!__debugfs_remove(child, parent)) |
3830 | simple_release_fs(&debugfs_mount, &debugfs_mount_count); |
3831 | diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h |
3832 | index 7cc5a0e23688..1bbe7c315138 100644 |
3833 | --- a/fs/ext4/ext4.h |
3834 | +++ b/fs/ext4/ext4.h |
3835 | @@ -2144,8 +2144,8 @@ extern ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb, |
3836 | extern int ext4_ind_calc_metadata_amount(struct inode *inode, sector_t lblock); |
3837 | extern int ext4_ind_trans_blocks(struct inode *inode, int nrblocks); |
3838 | extern void ext4_ind_truncate(handle_t *, struct inode *inode); |
3839 | -extern int ext4_free_hole_blocks(handle_t *handle, struct inode *inode, |
3840 | - ext4_lblk_t first, ext4_lblk_t stop); |
3841 | +extern int ext4_ind_remove_space(handle_t *handle, struct inode *inode, |
3842 | + ext4_lblk_t start, ext4_lblk_t end); |
3843 | |
3844 | /* ioctl.c */ |
3845 | extern long ext4_ioctl(struct file *, unsigned int, unsigned long); |
3846 | @@ -2453,6 +2453,22 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize) |
3847 | up_write(&EXT4_I(inode)->i_data_sem); |
3848 | } |
3849 | |
3850 | +/* Update i_size, i_disksize. Requires i_mutex to avoid races with truncate */ |
3851 | +static inline int ext4_update_inode_size(struct inode *inode, loff_t newsize) |
3852 | +{ |
3853 | + int changed = 0; |
3854 | + |
3855 | + if (newsize > inode->i_size) { |
3856 | + i_size_write(inode, newsize); |
3857 | + changed = 1; |
3858 | + } |
3859 | + if (newsize > EXT4_I(inode)->i_disksize) { |
3860 | + ext4_update_i_disksize(inode, newsize); |
3861 | + changed |= 2; |
3862 | + } |
3863 | + return changed; |
3864 | +} |
3865 | + |
3866 | struct ext4_group_info { |
3867 | unsigned long bb_state; |
3868 | struct rb_root bb_free_root; |
3869 | diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c |
3870 | index 4da228a0e6d0..7dfd6300e1c2 100644 |
3871 | --- a/fs/ext4/extents.c |
3872 | +++ b/fs/ext4/extents.c |
3873 | @@ -4664,7 +4664,8 @@ retry: |
3874 | } |
3875 | |
3876 | static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, |
3877 | - ext4_lblk_t len, int flags, int mode) |
3878 | + ext4_lblk_t len, loff_t new_size, |
3879 | + int flags, int mode) |
3880 | { |
3881 | struct inode *inode = file_inode(file); |
3882 | handle_t *handle; |
3883 | @@ -4673,8 +4674,10 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, |
3884 | int retries = 0; |
3885 | struct ext4_map_blocks map; |
3886 | unsigned int credits; |
3887 | + loff_t epos; |
3888 | |
3889 | map.m_lblk = offset; |
3890 | + map.m_len = len; |
3891 | /* |
3892 | * Don't normalize the request if it can fit in one extent so |
3893 | * that it doesn't get unnecessarily split into multiple |
3894 | @@ -4689,9 +4692,7 @@ static int ext4_alloc_file_blocks(struct file *file, ext4_lblk_t offset, |
3895 | credits = ext4_chunk_trans_blocks(inode, len); |
3896 | |
3897 | retry: |
3898 | - while (ret >= 0 && ret < len) { |
3899 | - map.m_lblk = map.m_lblk + ret; |
3900 | - map.m_len = len = len - ret; |
3901 | + while (ret >= 0 && len) { |
3902 | handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS, |
3903 | credits); |
3904 | if (IS_ERR(handle)) { |
3905 | @@ -4708,6 +4709,21 @@ retry: |
3906 | ret2 = ext4_journal_stop(handle); |
3907 | break; |
3908 | } |
3909 | + map.m_lblk += ret; |
3910 | + map.m_len = len = len - ret; |
3911 | + epos = (loff_t)map.m_lblk << inode->i_blkbits; |
3912 | + inode->i_ctime = ext4_current_time(inode); |
3913 | + if (new_size) { |
3914 | + if (epos > new_size) |
3915 | + epos = new_size; |
3916 | + if (ext4_update_inode_size(inode, epos) & 0x1) |
3917 | + inode->i_mtime = inode->i_ctime; |
3918 | + } else { |
3919 | + if (epos > inode->i_size) |
3920 | + ext4_set_inode_flag(inode, |
3921 | + EXT4_INODE_EOFBLOCKS); |
3922 | + } |
3923 | + ext4_mark_inode_dirty(handle, inode); |
3924 | ret2 = ext4_journal_stop(handle); |
3925 | if (ret2) |
3926 | break; |
3927 | @@ -4730,7 +4746,8 @@ static long ext4_zero_range(struct file *file, loff_t offset, |
3928 | loff_t new_size = 0; |
3929 | int ret = 0; |
3930 | int flags; |
3931 | - int partial; |
3932 | + int credits; |
3933 | + int partial_begin, partial_end; |
3934 | loff_t start, end; |
3935 | ext4_lblk_t lblk; |
3936 | struct address_space *mapping = inode->i_mapping; |
3937 | @@ -4770,7 +4787,8 @@ static long ext4_zero_range(struct file *file, loff_t offset, |
3938 | |
3939 | if (start < offset || end > offset + len) |
3940 | return -EINVAL; |
3941 | - partial = (offset + len) & ((1 << blkbits) - 1); |
3942 | + partial_begin = offset & ((1 << blkbits) - 1); |
3943 | + partial_end = (offset + len) & ((1 << blkbits) - 1); |
3944 | |
3945 | lblk = start >> blkbits; |
3946 | max_blocks = (end >> blkbits); |
3947 | @@ -4804,7 +4822,7 @@ static long ext4_zero_range(struct file *file, loff_t offset, |
3948 | * If we have a partial block after EOF we have to allocate |
3949 | * the entire block. |
3950 | */ |
3951 | - if (partial) |
3952 | + if (partial_end) |
3953 | max_blocks += 1; |
3954 | } |
3955 | |
3956 | @@ -4812,6 +4830,7 @@ static long ext4_zero_range(struct file *file, loff_t offset, |
3957 | |
3958 | /* Now release the pages and zero block aligned part of pages*/ |
3959 | truncate_pagecache_range(inode, start, end - 1); |
3960 | + inode->i_mtime = inode->i_ctime = ext4_current_time(inode); |
3961 | |
3962 | /* Wait all existing dio workers, newcomers will block on i_mutex */ |
3963 | ext4_inode_block_unlocked_dio(inode); |
3964 | @@ -4824,13 +4843,22 @@ static long ext4_zero_range(struct file *file, loff_t offset, |
3965 | if (ret) |
3966 | goto out_dio; |
3967 | |
3968 | - ret = ext4_alloc_file_blocks(file, lblk, max_blocks, flags, |
3969 | - mode); |
3970 | + ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, |
3971 | + flags, mode); |
3972 | if (ret) |
3973 | goto out_dio; |
3974 | } |
3975 | + if (!partial_begin && !partial_end) |
3976 | + goto out_dio; |
3977 | |
3978 | - handle = ext4_journal_start(inode, EXT4_HT_MISC, 4); |
3979 | + /* |
3980 | + * In worst case we have to writeout two nonadjacent unwritten |
3981 | + * blocks and update the inode |
3982 | + */ |
3983 | + credits = (2 * ext4_ext_index_trans_blocks(inode, 2)) + 1; |
3984 | + if (ext4_should_journal_data(inode)) |
3985 | + credits += 2; |
3986 | + handle = ext4_journal_start(inode, EXT4_HT_MISC, credits); |
3987 | if (IS_ERR(handle)) { |
3988 | ret = PTR_ERR(handle); |
3989 | ext4_std_error(inode->i_sb, ret); |
3990 | @@ -4838,12 +4866,8 @@ static long ext4_zero_range(struct file *file, loff_t offset, |
3991 | } |
3992 | |
3993 | inode->i_mtime = inode->i_ctime = ext4_current_time(inode); |
3994 | - |
3995 | if (new_size) { |
3996 | - if (new_size > i_size_read(inode)) |
3997 | - i_size_write(inode, new_size); |
3998 | - if (new_size > EXT4_I(inode)->i_disksize) |
3999 | - ext4_update_i_disksize(inode, new_size); |
4000 | + ext4_update_inode_size(inode, new_size); |
4001 | } else { |
4002 | /* |
4003 | * Mark that we allocate beyond EOF so the subsequent truncate |
4004 | @@ -4852,7 +4876,6 @@ static long ext4_zero_range(struct file *file, loff_t offset, |
4005 | if ((offset + len) > i_size_read(inode)) |
4006 | ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); |
4007 | } |
4008 | - |
4009 | ext4_mark_inode_dirty(handle, inode); |
4010 | |
4011 | /* Zero out partial block at the edges of the range */ |
4012 | @@ -4879,13 +4902,11 @@ out_mutex: |
4013 | long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) |
4014 | { |
4015 | struct inode *inode = file_inode(file); |
4016 | - handle_t *handle; |
4017 | loff_t new_size = 0; |
4018 | unsigned int max_blocks; |
4019 | int ret = 0; |
4020 | int flags; |
4021 | ext4_lblk_t lblk; |
4022 | - struct timespec tv; |
4023 | unsigned int blkbits = inode->i_blkbits; |
4024 | |
4025 | /* Return error if mode is not supported */ |
4026 | @@ -4936,36 +4957,15 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) |
4027 | goto out; |
4028 | } |
4029 | |
4030 | - ret = ext4_alloc_file_blocks(file, lblk, max_blocks, flags, mode); |
4031 | + ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size, |
4032 | + flags, mode); |
4033 | if (ret) |
4034 | goto out; |
4035 | |
4036 | - handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); |
4037 | - if (IS_ERR(handle)) |
4038 | - goto out; |
4039 | - |
4040 | - tv = inode->i_ctime = ext4_current_time(inode); |
4041 | - |
4042 | - if (new_size) { |
4043 | - if (new_size > i_size_read(inode)) { |
4044 | - i_size_write(inode, new_size); |
4045 | - inode->i_mtime = tv; |
4046 | - } |
4047 | - if (new_size > EXT4_I(inode)->i_disksize) |
4048 | - ext4_update_i_disksize(inode, new_size); |
4049 | - } else { |
4050 | - /* |
4051 | - * Mark that we allocate beyond EOF so the subsequent truncate |
4052 | - * can proceed even if the new size is the same as i_size. |
4053 | - */ |
4054 | - if ((offset + len) > i_size_read(inode)) |
4055 | - ext4_set_inode_flag(inode, EXT4_INODE_EOFBLOCKS); |
4056 | + if (file->f_flags & O_SYNC && EXT4_SB(inode->i_sb)->s_journal) { |
4057 | + ret = jbd2_complete_transaction(EXT4_SB(inode->i_sb)->s_journal, |
4058 | + EXT4_I(inode)->i_sync_tid); |
4059 | } |
4060 | - ext4_mark_inode_dirty(handle, inode); |
4061 | - if (file->f_flags & O_SYNC) |
4062 | - ext4_handle_sync(handle); |
4063 | - |
4064 | - ext4_journal_stop(handle); |
4065 | out: |
4066 | mutex_unlock(&inode->i_mutex); |
4067 | trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); |
4068 | diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c |
4069 | index fd69da194826..e75f840000a0 100644 |
4070 | --- a/fs/ext4/indirect.c |
4071 | +++ b/fs/ext4/indirect.c |
4072 | @@ -1295,97 +1295,220 @@ do_indirects: |
4073 | } |
4074 | } |
4075 | |
4076 | -static int free_hole_blocks(handle_t *handle, struct inode *inode, |
4077 | - struct buffer_head *parent_bh, __le32 *i_data, |
4078 | - int level, ext4_lblk_t first, |
4079 | - ext4_lblk_t count, int max) |
4080 | +/** |
4081 | + * ext4_ind_remove_space - remove space from the range |
4082 | + * @handle: JBD handle for this transaction |
4083 | + * @inode: inode we are dealing with |
4084 | + * @start: First block to remove |
4085 | + * @end: One block after the last block to remove (exclusive) |
4086 | + * |
4087 | + * Free the blocks in the defined range (end is exclusive endpoint of |
4088 | + * range). This is used by ext4_punch_hole(). |
4089 | + */ |
4090 | +int ext4_ind_remove_space(handle_t *handle, struct inode *inode, |
4091 | + ext4_lblk_t start, ext4_lblk_t end) |
4092 | { |
4093 | - struct buffer_head *bh = NULL; |
4094 | + struct ext4_inode_info *ei = EXT4_I(inode); |
4095 | + __le32 *i_data = ei->i_data; |
4096 | int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); |
4097 | - int ret = 0; |
4098 | - int i, inc; |
4099 | - ext4_lblk_t offset; |
4100 | - __le32 blk; |
4101 | - |
4102 | - inc = 1 << ((EXT4_BLOCK_SIZE_BITS(inode->i_sb) - 2) * level); |
4103 | - for (i = 0, offset = 0; i < max; i++, i_data++, offset += inc) { |
4104 | - if (offset >= count + first) |
4105 | - break; |
4106 | - if (*i_data == 0 || (offset + inc) <= first) |
4107 | - continue; |
4108 | - blk = *i_data; |
4109 | - if (level > 0) { |
4110 | - ext4_lblk_t first2; |
4111 | - ext4_lblk_t count2; |
4112 | + ext4_lblk_t offsets[4], offsets2[4]; |
4113 | + Indirect chain[4], chain2[4]; |
4114 | + Indirect *partial, *partial2; |
4115 | + ext4_lblk_t max_block; |
4116 | + __le32 nr = 0, nr2 = 0; |
4117 | + int n = 0, n2 = 0; |
4118 | + unsigned blocksize = inode->i_sb->s_blocksize; |
4119 | |
4120 | - bh = sb_bread(inode->i_sb, le32_to_cpu(blk)); |
4121 | - if (!bh) { |
4122 | - EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk), |
4123 | - "Read failure"); |
4124 | - return -EIO; |
4125 | - } |
4126 | - if (first > offset) { |
4127 | - first2 = first - offset; |
4128 | - count2 = count; |
4129 | + max_block = (EXT4_SB(inode->i_sb)->s_bitmap_maxbytes + blocksize-1) |
4130 | + >> EXT4_BLOCK_SIZE_BITS(inode->i_sb); |
4131 | + if (end >= max_block) |
4132 | + end = max_block; |
4133 | + if ((start >= end) || (start > max_block)) |
4134 | + return 0; |
4135 | + |
4136 | + n = ext4_block_to_path(inode, start, offsets, NULL); |
4137 | + n2 = ext4_block_to_path(inode, end, offsets2, NULL); |
4138 | + |
4139 | + BUG_ON(n > n2); |
4140 | + |
4141 | + if ((n == 1) && (n == n2)) { |
4142 | + /* We're punching only within direct block range */ |
4143 | + ext4_free_data(handle, inode, NULL, i_data + offsets[0], |
4144 | + i_data + offsets2[0]); |
4145 | + return 0; |
4146 | + } else if (n2 > n) { |
4147 | + /* |
4148 | + * Start and end are on a different levels so we're going to |
4149 | + * free partial block at start, and partial block at end of |
4150 | + * the range. If there are some levels in between then |
4151 | + * do_indirects label will take care of that. |
4152 | + */ |
4153 | + |
4154 | + if (n == 1) { |
4155 | + /* |
4156 | + * Start is at the direct block level, free |
4157 | + * everything to the end of the level. |
4158 | + */ |
4159 | + ext4_free_data(handle, inode, NULL, i_data + offsets[0], |
4160 | + i_data + EXT4_NDIR_BLOCKS); |
4161 | + goto end_range; |
4162 | + } |
4163 | + |
4164 | + |
4165 | + partial = ext4_find_shared(inode, n, offsets, chain, &nr); |
4166 | + if (nr) { |
4167 | + if (partial == chain) { |
4168 | + /* Shared branch grows from the inode */ |
4169 | + ext4_free_branches(handle, inode, NULL, |
4170 | + &nr, &nr+1, (chain+n-1) - partial); |
4171 | + *partial->p = 0; |
4172 | } else { |
4173 | - first2 = 0; |
4174 | - count2 = count - (offset - first); |
4175 | + /* Shared branch grows from an indirect block */ |
4176 | + BUFFER_TRACE(partial->bh, "get_write_access"); |
4177 | + ext4_free_branches(handle, inode, partial->bh, |
4178 | + partial->p, |
4179 | + partial->p+1, (chain+n-1) - partial); |
4180 | } |
4181 | - ret = free_hole_blocks(handle, inode, bh, |
4182 | - (__le32 *)bh->b_data, level - 1, |
4183 | - first2, count2, |
4184 | - inode->i_sb->s_blocksize >> 2); |
4185 | - if (ret) { |
4186 | - brelse(bh); |
4187 | - goto err; |
4188 | + } |
4189 | + |
4190 | + /* |
4191 | + * Clear the ends of indirect blocks on the shared branch |
4192 | + * at the start of the range |
4193 | + */ |
4194 | + while (partial > chain) { |
4195 | + ext4_free_branches(handle, inode, partial->bh, |
4196 | + partial->p + 1, |
4197 | + (__le32 *)partial->bh->b_data+addr_per_block, |
4198 | + (chain+n-1) - partial); |
4199 | + BUFFER_TRACE(partial->bh, "call brelse"); |
4200 | + brelse(partial->bh); |
4201 | + partial--; |
4202 | + } |
4203 | + |
4204 | +end_range: |
4205 | + partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2); |
4206 | + if (nr2) { |
4207 | + if (partial2 == chain2) { |
4208 | + /* |
4209 | + * Remember, end is exclusive so here we're at |
4210 | + * the start of the next level we're not going |
4211 | + * to free. Everything was covered by the start |
4212 | + * of the range. |
4213 | + */ |
4214 | + return 0; |
4215 | + } else { |
4216 | + /* Shared branch grows from an indirect block */ |
4217 | + partial2--; |
4218 | } |
4219 | + } else { |
4220 | + /* |
4221 | + * ext4_find_shared returns Indirect structure which |
4222 | + * points to the last element which should not be |
4223 | + * removed by truncate. But this is end of the range |
4224 | + * in punch_hole so we need to point to the next element |
4225 | + */ |
4226 | + partial2->p++; |
4227 | } |
4228 | - if (level == 0 || |
4229 | - (bh && all_zeroes((__le32 *)bh->b_data, |
4230 | - (__le32 *)bh->b_data + addr_per_block))) { |
4231 | - ext4_free_data(handle, inode, parent_bh, |
4232 | - i_data, i_data + 1); |
4233 | + |
4234 | + /* |
4235 | + * Clear the ends of indirect blocks on the shared branch |
4236 | + * at the end of the range |
4237 | + */ |
4238 | + while (partial2 > chain2) { |
4239 | + ext4_free_branches(handle, inode, partial2->bh, |
4240 | + (__le32 *)partial2->bh->b_data, |
4241 | + partial2->p, |
4242 | + (chain2+n2-1) - partial2); |
4243 | + BUFFER_TRACE(partial2->bh, "call brelse"); |
4244 | + brelse(partial2->bh); |
4245 | + partial2--; |
4246 | } |
4247 | - brelse(bh); |
4248 | - bh = NULL; |
4249 | + goto do_indirects; |
4250 | } |
4251 | |
4252 | -err: |
4253 | - return ret; |
4254 | -} |
4255 | - |
4256 | -int ext4_free_hole_blocks(handle_t *handle, struct inode *inode, |
4257 | - ext4_lblk_t first, ext4_lblk_t stop) |
4258 | -{ |
4259 | - int addr_per_block = EXT4_ADDR_PER_BLOCK(inode->i_sb); |
4260 | - int level, ret = 0; |
4261 | - int num = EXT4_NDIR_BLOCKS; |
4262 | - ext4_lblk_t count, max = EXT4_NDIR_BLOCKS; |
4263 | - __le32 *i_data = EXT4_I(inode)->i_data; |
4264 | - |
4265 | - count = stop - first; |
4266 | - for (level = 0; level < 4; level++, max *= addr_per_block) { |
4267 | - if (first < max) { |
4268 | - ret = free_hole_blocks(handle, inode, NULL, i_data, |
4269 | - level, first, count, num); |
4270 | - if (ret) |
4271 | - goto err; |
4272 | - if (count > max - first) |
4273 | - count -= max - first; |
4274 | - else |
4275 | - break; |
4276 | - first = 0; |
4277 | - } else { |
4278 | - first -= max; |
4279 | + /* Punch happened within the same level (n == n2) */ |
4280 | + partial = ext4_find_shared(inode, n, offsets, chain, &nr); |
4281 | + partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2); |
4282 | + /* |
4283 | + * ext4_find_shared returns Indirect structure which |
4284 | + * points to the last element which should not be |
4285 | + * removed by truncate. But this is end of the range |
4286 | + * in punch_hole so we need to point to the next element |
4287 | + */ |
4288 | + partial2->p++; |
4289 | + while ((partial > chain) || (partial2 > chain2)) { |
4290 | + /* We're at the same block, so we're almost finished */ |
4291 | + if ((partial->bh && partial2->bh) && |
4292 | + (partial->bh->b_blocknr == partial2->bh->b_blocknr)) { |
4293 | + if ((partial > chain) && (partial2 > chain2)) { |
4294 | + ext4_free_branches(handle, inode, partial->bh, |
4295 | + partial->p + 1, |
4296 | + partial2->p, |
4297 | + (chain+n-1) - partial); |
4298 | + BUFFER_TRACE(partial->bh, "call brelse"); |
4299 | + brelse(partial->bh); |
4300 | + BUFFER_TRACE(partial2->bh, "call brelse"); |
4301 | + brelse(partial2->bh); |
4302 | + } |
4303 | + return 0; |
4304 | } |
4305 | - i_data += num; |
4306 | - if (level == 0) { |
4307 | - num = 1; |
4308 | - max = 1; |
4309 | + /* |
4310 | + * Clear the ends of indirect blocks on the shared branch |
4311 | + * at the start of the range |
4312 | + */ |
4313 | + if (partial > chain) { |
4314 | + ext4_free_branches(handle, inode, partial->bh, |
4315 | + partial->p + 1, |
4316 | + (__le32 *)partial->bh->b_data+addr_per_block, |
4317 | + (chain+n-1) - partial); |
4318 | + BUFFER_TRACE(partial->bh, "call brelse"); |
4319 | + brelse(partial->bh); |
4320 | + partial--; |
4321 | + } |
4322 | + /* |
4323 | + * Clear the ends of indirect blocks on the shared branch |
4324 | + * at the end of the range |
4325 | + */ |
4326 | + if (partial2 > chain2) { |
4327 | + ext4_free_branches(handle, inode, partial2->bh, |
4328 | + (__le32 *)partial2->bh->b_data, |
4329 | + partial2->p, |
4330 | + (chain2+n-1) - partial2); |
4331 | + BUFFER_TRACE(partial2->bh, "call brelse"); |
4332 | + brelse(partial2->bh); |
4333 | + partial2--; |
4334 | } |
4335 | } |
4336 | |
4337 | -err: |
4338 | - return ret; |
4339 | +do_indirects: |
4340 | + /* Kill the remaining (whole) subtrees */ |
4341 | + switch (offsets[0]) { |
4342 | + default: |
4343 | + if (++n >= n2) |
4344 | + return 0; |
4345 | + nr = i_data[EXT4_IND_BLOCK]; |
4346 | + if (nr) { |
4347 | + ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); |
4348 | + i_data[EXT4_IND_BLOCK] = 0; |
4349 | + } |
4350 | + case EXT4_IND_BLOCK: |
4351 | + if (++n >= n2) |
4352 | + return 0; |
4353 | + nr = i_data[EXT4_DIND_BLOCK]; |
4354 | + if (nr) { |
4355 | + ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); |
4356 | + i_data[EXT4_DIND_BLOCK] = 0; |
4357 | + } |
4358 | + case EXT4_DIND_BLOCK: |
4359 | + if (++n >= n2) |
4360 | + return 0; |
4361 | + nr = i_data[EXT4_TIND_BLOCK]; |
4362 | + if (nr) { |
4363 | + ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); |
4364 | + i_data[EXT4_TIND_BLOCK] = 0; |
4365 | + } |
4366 | + case EXT4_TIND_BLOCK: |
4367 | + ; |
4368 | + } |
4369 | + return 0; |
4370 | } |
4371 | - |
4372 | diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c |
4373 | index 8a064734e6eb..e9c9b5bd906a 100644 |
4374 | --- a/fs/ext4/inode.c |
4375 | +++ b/fs/ext4/inode.c |
4376 | @@ -1092,27 +1092,11 @@ static int ext4_write_end(struct file *file, |
4377 | } else |
4378 | copied = block_write_end(file, mapping, pos, |
4379 | len, copied, page, fsdata); |
4380 | - |
4381 | /* |
4382 | - * No need to use i_size_read() here, the i_size |
4383 | - * cannot change under us because we hole i_mutex. |
4384 | - * |
4385 | - * But it's important to update i_size while still holding page lock: |
4386 | + * it's important to update i_size while still holding page lock: |
4387 | * page writeout could otherwise come in and zero beyond i_size. |
4388 | */ |
4389 | - if (pos + copied > inode->i_size) { |
4390 | - i_size_write(inode, pos + copied); |
4391 | - i_size_changed = 1; |
4392 | - } |
4393 | - |
4394 | - if (pos + copied > EXT4_I(inode)->i_disksize) { |
4395 | - /* We need to mark inode dirty even if |
4396 | - * new_i_size is less that inode->i_size |
4397 | - * but greater than i_disksize. (hint delalloc) |
4398 | - */ |
4399 | - ext4_update_i_disksize(inode, (pos + copied)); |
4400 | - i_size_changed = 1; |
4401 | - } |
4402 | + i_size_changed = ext4_update_inode_size(inode, pos + copied); |
4403 | unlock_page(page); |
4404 | page_cache_release(page); |
4405 | |
4406 | @@ -1160,7 +1144,7 @@ static int ext4_journalled_write_end(struct file *file, |
4407 | int ret = 0, ret2; |
4408 | int partial = 0; |
4409 | unsigned from, to; |
4410 | - loff_t new_i_size; |
4411 | + int size_changed = 0; |
4412 | |
4413 | trace_ext4_journalled_write_end(inode, pos, len, copied); |
4414 | from = pos & (PAGE_CACHE_SIZE - 1); |
4415 | @@ -1183,20 +1167,18 @@ static int ext4_journalled_write_end(struct file *file, |
4416 | if (!partial) |
4417 | SetPageUptodate(page); |
4418 | } |
4419 | - new_i_size = pos + copied; |
4420 | - if (new_i_size > inode->i_size) |
4421 | - i_size_write(inode, pos+copied); |
4422 | + size_changed = ext4_update_inode_size(inode, pos + copied); |
4423 | ext4_set_inode_state(inode, EXT4_STATE_JDATA); |
4424 | EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid; |
4425 | - if (new_i_size > EXT4_I(inode)->i_disksize) { |
4426 | - ext4_update_i_disksize(inode, new_i_size); |
4427 | + unlock_page(page); |
4428 | + page_cache_release(page); |
4429 | + |
4430 | + if (size_changed) { |
4431 | ret2 = ext4_mark_inode_dirty(handle, inode); |
4432 | if (!ret) |
4433 | ret = ret2; |
4434 | } |
4435 | |
4436 | - unlock_page(page); |
4437 | - page_cache_release(page); |
4438 | if (pos + len > inode->i_size && ext4_can_truncate(inode)) |
4439 | /* if we have allocated more blocks and copied |
4440 | * less. We will have blocks allocated outside |
4441 | @@ -2212,6 +2194,7 @@ static int mpage_map_and_submit_extent(handle_t *handle, |
4442 | struct ext4_map_blocks *map = &mpd->map; |
4443 | int err; |
4444 | loff_t disksize; |
4445 | + int progress = 0; |
4446 | |
4447 | mpd->io_submit.io_end->offset = |
4448 | ((loff_t)map->m_lblk) << inode->i_blkbits; |
4449 | @@ -2228,8 +2211,11 @@ static int mpage_map_and_submit_extent(handle_t *handle, |
4450 | * is non-zero, a commit should free up blocks. |
4451 | */ |
4452 | if ((err == -ENOMEM) || |
4453 | - (err == -ENOSPC && ext4_count_free_clusters(sb))) |
4454 | + (err == -ENOSPC && ext4_count_free_clusters(sb))) { |
4455 | + if (progress) |
4456 | + goto update_disksize; |
4457 | return err; |
4458 | + } |
4459 | ext4_msg(sb, KERN_CRIT, |
4460 | "Delayed block allocation failed for " |
4461 | "inode %lu at logical offset %llu with" |
4462 | @@ -2246,15 +2232,17 @@ static int mpage_map_and_submit_extent(handle_t *handle, |
4463 | *give_up_on_write = true; |
4464 | return err; |
4465 | } |
4466 | + progress = 1; |
4467 | /* |
4468 | * Update buffer state, submit mapped pages, and get us new |
4469 | * extent to map |
4470 | */ |
4471 | err = mpage_map_and_submit_buffers(mpd); |
4472 | if (err < 0) |
4473 | - return err; |
4474 | + goto update_disksize; |
4475 | } while (map->m_len); |
4476 | |
4477 | +update_disksize: |
4478 | /* |
4479 | * Update on-disk size after IO is submitted. Races with |
4480 | * truncate are avoided by checking i_size under i_data_sem. |
4481 | @@ -3624,7 +3612,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) |
4482 | ret = ext4_ext_remove_space(inode, first_block, |
4483 | stop_block - 1); |
4484 | else |
4485 | - ret = ext4_free_hole_blocks(handle, inode, first_block, |
4486 | + ret = ext4_ind_remove_space(handle, inode, first_block, |
4487 | stop_block); |
4488 | |
4489 | up_write(&EXT4_I(inode)->i_data_sem); |
4490 | diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c |
4491 | index 2dcb936be90e..c3e7418a6811 100644 |
4492 | --- a/fs/ext4/mballoc.c |
4493 | +++ b/fs/ext4/mballoc.c |
4494 | @@ -1412,6 +1412,8 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, |
4495 | int last = first + count - 1; |
4496 | struct super_block *sb = e4b->bd_sb; |
4497 | |
4498 | + if (WARN_ON(count == 0)) |
4499 | + return; |
4500 | BUG_ON(last >= (sb->s_blocksize << 3)); |
4501 | assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); |
4502 | /* Don't bother if the block group is corrupt. */ |
4503 | @@ -3216,8 +3218,30 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) |
4504 | static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) |
4505 | { |
4506 | struct ext4_prealloc_space *pa = ac->ac_pa; |
4507 | + struct ext4_buddy e4b; |
4508 | + int err; |
4509 | |
4510 | - if (pa && pa->pa_type == MB_INODE_PA) |
4511 | + if (pa == NULL) { |
4512 | + if (ac->ac_f_ex.fe_len == 0) |
4513 | + return; |
4514 | + err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); |
4515 | + if (err) { |
4516 | + /* |
4517 | + * This should never happen since we pin the |
4518 | + * pages in the ext4_allocation_context so |
4519 | + * ext4_mb_load_buddy() should never fail. |
4520 | + */ |
4521 | + WARN(1, "mb_load_buddy failed (%d)", err); |
4522 | + return; |
4523 | + } |
4524 | + ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); |
4525 | + mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, |
4526 | + ac->ac_f_ex.fe_len); |
4527 | + ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); |
4528 | + ext4_mb_unload_buddy(&e4b); |
4529 | + return; |
4530 | + } |
4531 | + if (pa->pa_type == MB_INODE_PA) |
4532 | pa->pa_free += ac->ac_b_ex.fe_len; |
4533 | } |
4534 | |
4535 | diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c |
4536 | index 3520ab8a6639..9e6eced1605b 100644 |
4537 | --- a/fs/ext4/namei.c |
4538 | +++ b/fs/ext4/namei.c |
4539 | @@ -3128,7 +3128,8 @@ static int ext4_find_delete_entry(handle_t *handle, struct inode *dir, |
4540 | return retval; |
4541 | } |
4542 | |
4543 | -static void ext4_rename_delete(handle_t *handle, struct ext4_renament *ent) |
4544 | +static void ext4_rename_delete(handle_t *handle, struct ext4_renament *ent, |
4545 | + int force_reread) |
4546 | { |
4547 | int retval; |
4548 | /* |
4549 | @@ -3140,7 +3141,8 @@ static void ext4_rename_delete(handle_t *handle, struct ext4_renament *ent) |
4550 | if (le32_to_cpu(ent->de->inode) != ent->inode->i_ino || |
4551 | ent->de->name_len != ent->dentry->d_name.len || |
4552 | strncmp(ent->de->name, ent->dentry->d_name.name, |
4553 | - ent->de->name_len)) { |
4554 | + ent->de->name_len) || |
4555 | + force_reread) { |
4556 | retval = ext4_find_delete_entry(handle, ent->dir, |
4557 | &ent->dentry->d_name); |
4558 | } else { |
4559 | @@ -3191,6 +3193,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, |
4560 | .dentry = new_dentry, |
4561 | .inode = new_dentry->d_inode, |
4562 | }; |
4563 | + int force_reread; |
4564 | int retval; |
4565 | |
4566 | dquot_initialize(old.dir); |
4567 | @@ -3246,6 +3249,15 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, |
4568 | if (retval) |
4569 | goto end_rename; |
4570 | } |
4571 | + /* |
4572 | + * If we're renaming a file within an inline_data dir and adding or |
4573 | + * setting the new dirent causes a conversion from inline_data to |
4574 | + * extents/blockmap, we need to force the dirent delete code to |
4575 | + * re-read the directory, or else we end up trying to delete a dirent |
4576 | + * from what is now the extent tree root (or a block map). |
4577 | + */ |
4578 | + force_reread = (new.dir->i_ino == old.dir->i_ino && |
4579 | + ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA)); |
4580 | if (!new.bh) { |
4581 | retval = ext4_add_entry(handle, new.dentry, old.inode); |
4582 | if (retval) |
4583 | @@ -3256,6 +3268,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, |
4584 | if (retval) |
4585 | goto end_rename; |
4586 | } |
4587 | + if (force_reread) |
4588 | + force_reread = !ext4_test_inode_flag(new.dir, |
4589 | + EXT4_INODE_INLINE_DATA); |
4590 | |
4591 | /* |
4592 | * Like most other Unix systems, set the ctime for inodes on a |
4593 | @@ -3267,7 +3282,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, |
4594 | /* |
4595 | * ok, that's it |
4596 | */ |
4597 | - ext4_rename_delete(handle, &old); |
4598 | + ext4_rename_delete(handle, &old, force_reread); |
4599 | |
4600 | if (new.inode) { |
4601 | ext4_dec_count(handle, new.inode); |
4602 | diff --git a/fs/ext4/super.c b/fs/ext4/super.c |
4603 | index 6df7bc611dbd..beeb5c4e1f9d 100644 |
4604 | --- a/fs/ext4/super.c |
4605 | +++ b/fs/ext4/super.c |
4606 | @@ -3185,9 +3185,9 @@ static int set_journal_csum_feature_set(struct super_block *sb) |
4607 | |
4608 | if (EXT4_HAS_RO_COMPAT_FEATURE(sb, |
4609 | EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) { |
4610 | - /* journal checksum v2 */ |
4611 | + /* journal checksum v3 */ |
4612 | compat = 0; |
4613 | - incompat = JBD2_FEATURE_INCOMPAT_CSUM_V2; |
4614 | + incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3; |
4615 | } else { |
4616 | /* journal checksum v1 */ |
4617 | compat = JBD2_FEATURE_COMPAT_CHECKSUM; |
4618 | @@ -3209,6 +3209,7 @@ static int set_journal_csum_feature_set(struct super_block *sb) |
4619 | jbd2_journal_clear_features(sbi->s_journal, |
4620 | JBD2_FEATURE_COMPAT_CHECKSUM, 0, |
4621 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | |
4622 | + JBD2_FEATURE_INCOMPAT_CSUM_V3 | |
4623 | JBD2_FEATURE_INCOMPAT_CSUM_V2); |
4624 | } |
4625 | |
4626 | diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c |
4627 | index 4556ce1af5b0..5ddaf8625d3b 100644 |
4628 | --- a/fs/isofs/inode.c |
4629 | +++ b/fs/isofs/inode.c |
4630 | @@ -61,7 +61,7 @@ static void isofs_put_super(struct super_block *sb) |
4631 | return; |
4632 | } |
4633 | |
4634 | -static int isofs_read_inode(struct inode *); |
4635 | +static int isofs_read_inode(struct inode *, int relocated); |
4636 | static int isofs_statfs (struct dentry *, struct kstatfs *); |
4637 | |
4638 | static struct kmem_cache *isofs_inode_cachep; |
4639 | @@ -1259,7 +1259,7 @@ out_toomany: |
4640 | goto out; |
4641 | } |
4642 | |
4643 | -static int isofs_read_inode(struct inode *inode) |
4644 | +static int isofs_read_inode(struct inode *inode, int relocated) |
4645 | { |
4646 | struct super_block *sb = inode->i_sb; |
4647 | struct isofs_sb_info *sbi = ISOFS_SB(sb); |
4648 | @@ -1404,7 +1404,7 @@ static int isofs_read_inode(struct inode *inode) |
4649 | */ |
4650 | |
4651 | if (!high_sierra) { |
4652 | - parse_rock_ridge_inode(de, inode); |
4653 | + parse_rock_ridge_inode(de, inode, relocated); |
4654 | /* if we want uid/gid set, override the rock ridge setting */ |
4655 | if (sbi->s_uid_set) |
4656 | inode->i_uid = sbi->s_uid; |
4657 | @@ -1483,9 +1483,10 @@ static int isofs_iget5_set(struct inode *ino, void *data) |
4658 | * offset that point to the underlying meta-data for the inode. The |
4659 | * code below is otherwise similar to the iget() code in |
4660 | * include/linux/fs.h */ |
4661 | -struct inode *isofs_iget(struct super_block *sb, |
4662 | - unsigned long block, |
4663 | - unsigned long offset) |
4664 | +struct inode *__isofs_iget(struct super_block *sb, |
4665 | + unsigned long block, |
4666 | + unsigned long offset, |
4667 | + int relocated) |
4668 | { |
4669 | unsigned long hashval; |
4670 | struct inode *inode; |
4671 | @@ -1507,7 +1508,7 @@ struct inode *isofs_iget(struct super_block *sb, |
4672 | return ERR_PTR(-ENOMEM); |
4673 | |
4674 | if (inode->i_state & I_NEW) { |
4675 | - ret = isofs_read_inode(inode); |
4676 | + ret = isofs_read_inode(inode, relocated); |
4677 | if (ret < 0) { |
4678 | iget_failed(inode); |
4679 | inode = ERR_PTR(ret); |
4680 | diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h |
4681 | index 99167238518d..0ac4c1f73fbd 100644 |
4682 | --- a/fs/isofs/isofs.h |
4683 | +++ b/fs/isofs/isofs.h |
4684 | @@ -107,7 +107,7 @@ extern int iso_date(char *, int); |
4685 | |
4686 | struct inode; /* To make gcc happy */ |
4687 | |
4688 | -extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *); |
4689 | +extern int parse_rock_ridge_inode(struct iso_directory_record *, struct inode *, int relocated); |
4690 | extern int get_rock_ridge_filename(struct iso_directory_record *, char *, struct inode *); |
4691 | extern int isofs_name_translate(struct iso_directory_record *, char *, struct inode *); |
4692 | |
4693 | @@ -118,9 +118,24 @@ extern struct dentry *isofs_lookup(struct inode *, struct dentry *, unsigned int |
4694 | extern struct buffer_head *isofs_bread(struct inode *, sector_t); |
4695 | extern int isofs_get_blocks(struct inode *, sector_t, struct buffer_head **, unsigned long); |
4696 | |
4697 | -extern struct inode *isofs_iget(struct super_block *sb, |
4698 | - unsigned long block, |
4699 | - unsigned long offset); |
4700 | +struct inode *__isofs_iget(struct super_block *sb, |
4701 | + unsigned long block, |
4702 | + unsigned long offset, |
4703 | + int relocated); |
4704 | + |
4705 | +static inline struct inode *isofs_iget(struct super_block *sb, |
4706 | + unsigned long block, |
4707 | + unsigned long offset) |
4708 | +{ |
4709 | + return __isofs_iget(sb, block, offset, 0); |
4710 | +} |
4711 | + |
4712 | +static inline struct inode *isofs_iget_reloc(struct super_block *sb, |
4713 | + unsigned long block, |
4714 | + unsigned long offset) |
4715 | +{ |
4716 | + return __isofs_iget(sb, block, offset, 1); |
4717 | +} |
4718 | |
4719 | /* Because the inode number is no longer relevant to finding the |
4720 | * underlying meta-data for an inode, we are free to choose a more |
4721 | diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c |
4722 | index c0bf42472e40..f488bbae541a 100644 |
4723 | --- a/fs/isofs/rock.c |
4724 | +++ b/fs/isofs/rock.c |
4725 | @@ -288,12 +288,16 @@ eio: |
4726 | goto out; |
4727 | } |
4728 | |
4729 | +#define RR_REGARD_XA 1 |
4730 | +#define RR_RELOC_DE 2 |
4731 | + |
4732 | static int |
4733 | parse_rock_ridge_inode_internal(struct iso_directory_record *de, |
4734 | - struct inode *inode, int regard_xa) |
4735 | + struct inode *inode, int flags) |
4736 | { |
4737 | int symlink_len = 0; |
4738 | int cnt, sig; |
4739 | + unsigned int reloc_block; |
4740 | struct inode *reloc; |
4741 | struct rock_ridge *rr; |
4742 | int rootflag; |
4743 | @@ -305,7 +309,7 @@ parse_rock_ridge_inode_internal(struct iso_directory_record *de, |
4744 | |
4745 | init_rock_state(&rs, inode); |
4746 | setup_rock_ridge(de, inode, &rs); |
4747 | - if (regard_xa) { |
4748 | + if (flags & RR_REGARD_XA) { |
4749 | rs.chr += 14; |
4750 | rs.len -= 14; |
4751 | if (rs.len < 0) |
4752 | @@ -485,12 +489,22 @@ repeat: |
4753 | "relocated directory\n"); |
4754 | goto out; |
4755 | case SIG('C', 'L'): |
4756 | - ISOFS_I(inode)->i_first_extent = |
4757 | - isonum_733(rr->u.CL.location); |
4758 | - reloc = |
4759 | - isofs_iget(inode->i_sb, |
4760 | - ISOFS_I(inode)->i_first_extent, |
4761 | - 0); |
4762 | + if (flags & RR_RELOC_DE) { |
4763 | + printk(KERN_ERR |
4764 | + "ISOFS: Recursive directory relocation " |
4765 | + "is not supported\n"); |
4766 | + goto eio; |
4767 | + } |
4768 | + reloc_block = isonum_733(rr->u.CL.location); |
4769 | + if (reloc_block == ISOFS_I(inode)->i_iget5_block && |
4770 | + ISOFS_I(inode)->i_iget5_offset == 0) { |
4771 | + printk(KERN_ERR |
4772 | + "ISOFS: Directory relocation points to " |
4773 | + "itself\n"); |
4774 | + goto eio; |
4775 | + } |
4776 | + ISOFS_I(inode)->i_first_extent = reloc_block; |
4777 | + reloc = isofs_iget_reloc(inode->i_sb, reloc_block, 0); |
4778 | if (IS_ERR(reloc)) { |
4779 | ret = PTR_ERR(reloc); |
4780 | goto out; |
4781 | @@ -637,9 +651,11 @@ static char *get_symlink_chunk(char *rpnt, struct rock_ridge *rr, char *plimit) |
4782 | return rpnt; |
4783 | } |
4784 | |
4785 | -int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode) |
4786 | +int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode, |
4787 | + int relocated) |
4788 | { |
4789 | - int result = parse_rock_ridge_inode_internal(de, inode, 0); |
4790 | + int flags = relocated ? RR_RELOC_DE : 0; |
4791 | + int result = parse_rock_ridge_inode_internal(de, inode, flags); |
4792 | |
4793 | /* |
4794 | * if rockridge flag was reset and we didn't look for attributes |
4795 | @@ -647,7 +663,8 @@ int parse_rock_ridge_inode(struct iso_directory_record *de, struct inode *inode) |
4796 | */ |
4797 | if ((ISOFS_SB(inode->i_sb)->s_rock_offset == -1) |
4798 | && (ISOFS_SB(inode->i_sb)->s_rock == 2)) { |
4799 | - result = parse_rock_ridge_inode_internal(de, inode, 14); |
4800 | + result = parse_rock_ridge_inode_internal(de, inode, |
4801 | + flags | RR_REGARD_XA); |
4802 | } |
4803 | return result; |
4804 | } |
4805 | diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c |
4806 | index 6fac74349856..b73e0215baa7 100644 |
4807 | --- a/fs/jbd2/commit.c |
4808 | +++ b/fs/jbd2/commit.c |
4809 | @@ -97,7 +97,7 @@ static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh) |
4810 | struct commit_header *h; |
4811 | __u32 csum; |
4812 | |
4813 | - if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) |
4814 | + if (!jbd2_journal_has_csum_v2or3(j)) |
4815 | return; |
4816 | |
4817 | h = (struct commit_header *)(bh->b_data); |
4818 | @@ -313,11 +313,11 @@ static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh) |
4819 | return checksum; |
4820 | } |
4821 | |
4822 | -static void write_tag_block(int tag_bytes, journal_block_tag_t *tag, |
4823 | +static void write_tag_block(journal_t *j, journal_block_tag_t *tag, |
4824 | unsigned long long block) |
4825 | { |
4826 | tag->t_blocknr = cpu_to_be32(block & (u32)~0); |
4827 | - if (tag_bytes > JBD2_TAG_SIZE32) |
4828 | + if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_64BIT)) |
4829 | tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1); |
4830 | } |
4831 | |
4832 | @@ -327,7 +327,7 @@ static void jbd2_descr_block_csum_set(journal_t *j, |
4833 | struct jbd2_journal_block_tail *tail; |
4834 | __u32 csum; |
4835 | |
4836 | - if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) |
4837 | + if (!jbd2_journal_has_csum_v2or3(j)) |
4838 | return; |
4839 | |
4840 | tail = (struct jbd2_journal_block_tail *)(bh->b_data + j->j_blocksize - |
4841 | @@ -340,12 +340,13 @@ static void jbd2_descr_block_csum_set(journal_t *j, |
4842 | static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag, |
4843 | struct buffer_head *bh, __u32 sequence) |
4844 | { |
4845 | + journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag; |
4846 | struct page *page = bh->b_page; |
4847 | __u8 *addr; |
4848 | __u32 csum32; |
4849 | __be32 seq; |
4850 | |
4851 | - if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) |
4852 | + if (!jbd2_journal_has_csum_v2or3(j)) |
4853 | return; |
4854 | |
4855 | seq = cpu_to_be32(sequence); |
4856 | @@ -355,8 +356,10 @@ static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag, |
4857 | bh->b_size); |
4858 | kunmap_atomic(addr); |
4859 | |
4860 | - /* We only have space to store the lower 16 bits of the crc32c. */ |
4861 | - tag->t_checksum = cpu_to_be16(csum32); |
4862 | + if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V3)) |
4863 | + tag3->t_checksum = cpu_to_be32(csum32); |
4864 | + else |
4865 | + tag->t_checksum = cpu_to_be16(csum32); |
4866 | } |
4867 | /* |
4868 | * jbd2_journal_commit_transaction |
4869 | @@ -396,7 +399,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) |
4870 | LIST_HEAD(io_bufs); |
4871 | LIST_HEAD(log_bufs); |
4872 | |
4873 | - if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) |
4874 | + if (jbd2_journal_has_csum_v2or3(journal)) |
4875 | csum_size = sizeof(struct jbd2_journal_block_tail); |
4876 | |
4877 | /* |
4878 | @@ -690,7 +693,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) |
4879 | tag_flag |= JBD2_FLAG_SAME_UUID; |
4880 | |
4881 | tag = (journal_block_tag_t *) tagp; |
4882 | - write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr); |
4883 | + write_tag_block(journal, tag, jh2bh(jh)->b_blocknr); |
4884 | tag->t_flags = cpu_to_be16(tag_flag); |
4885 | jbd2_block_tag_csum_set(journal, tag, wbuf[bufs], |
4886 | commit_transaction->t_tid); |
4887 | diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c |
4888 | index 67b8e303946c..19d74d86d99c 100644 |
4889 | --- a/fs/jbd2/journal.c |
4890 | +++ b/fs/jbd2/journal.c |
4891 | @@ -124,7 +124,7 @@ EXPORT_SYMBOL(__jbd2_debug); |
4892 | /* Checksumming functions */ |
4893 | static int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb) |
4894 | { |
4895 | - if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) |
4896 | + if (!jbd2_journal_has_csum_v2or3(j)) |
4897 | return 1; |
4898 | |
4899 | return sb->s_checksum_type == JBD2_CRC32C_CHKSUM; |
4900 | @@ -145,7 +145,7 @@ static __be32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb) |
4901 | |
4902 | static int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb) |
4903 | { |
4904 | - if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) |
4905 | + if (!jbd2_journal_has_csum_v2or3(j)) |
4906 | return 1; |
4907 | |
4908 | return sb->s_checksum == jbd2_superblock_csum(j, sb); |
4909 | @@ -153,7 +153,7 @@ static int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb) |
4910 | |
4911 | static void jbd2_superblock_csum_set(journal_t *j, journal_superblock_t *sb) |
4912 | { |
4913 | - if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) |
4914 | + if (!jbd2_journal_has_csum_v2or3(j)) |
4915 | return; |
4916 | |
4917 | sb->s_checksum = jbd2_superblock_csum(j, sb); |
4918 | @@ -1522,21 +1522,29 @@ static int journal_get_superblock(journal_t *journal) |
4919 | goto out; |
4920 | } |
4921 | |
4922 | - if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) && |
4923 | - JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { |
4924 | + if (jbd2_journal_has_csum_v2or3(journal) && |
4925 | + JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM)) { |
4926 | /* Can't have checksum v1 and v2 on at the same time! */ |
4927 | printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2 " |
4928 | "at the same time!\n"); |
4929 | goto out; |
4930 | } |
4931 | |
4932 | + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) && |
4933 | + JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3)) { |
4934 | + /* Can't have checksum v2 and v3 at the same time! */ |
4935 | + printk(KERN_ERR "JBD2: Can't enable checksumming v2 and v3 " |
4936 | + "at the same time!\n"); |
4937 | + goto out; |
4938 | + } |
4939 | + |
4940 | if (!jbd2_verify_csum_type(journal, sb)) { |
4941 | printk(KERN_ERR "JBD2: Unknown checksum type\n"); |
4942 | goto out; |
4943 | } |
4944 | |
4945 | /* Load the checksum driver */ |
4946 | - if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) { |
4947 | + if (jbd2_journal_has_csum_v2or3(journal)) { |
4948 | journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0); |
4949 | if (IS_ERR(journal->j_chksum_driver)) { |
4950 | printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n"); |
4951 | @@ -1553,7 +1561,7 @@ static int journal_get_superblock(journal_t *journal) |
4952 | } |
4953 | |
4954 | /* Precompute checksum seed for all metadata */ |
4955 | - if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) |
4956 | + if (jbd2_journal_has_csum_v2or3(journal)) |
4957 | journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid, |
4958 | sizeof(sb->s_uuid)); |
4959 | |
4960 | @@ -1813,8 +1821,14 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat, |
4961 | if (!jbd2_journal_check_available_features(journal, compat, ro, incompat)) |
4962 | return 0; |
4963 | |
4964 | - /* Asking for checksumming v2 and v1? Only give them v2. */ |
4965 | - if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2 && |
4966 | + /* If enabling v2 checksums, turn on v3 instead */ |
4967 | + if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2) { |
4968 | + incompat &= ~JBD2_FEATURE_INCOMPAT_CSUM_V2; |
4969 | + incompat |= JBD2_FEATURE_INCOMPAT_CSUM_V3; |
4970 | + } |
4971 | + |
4972 | + /* Asking for checksumming v3 and v1? Only give them v3. */ |
4973 | + if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V3 && |
4974 | compat & JBD2_FEATURE_COMPAT_CHECKSUM) |
4975 | compat &= ~JBD2_FEATURE_COMPAT_CHECKSUM; |
4976 | |
4977 | @@ -1823,8 +1837,8 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat, |
4978 | |
4979 | sb = journal->j_superblock; |
4980 | |
4981 | - /* If enabling v2 checksums, update superblock */ |
4982 | - if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V2)) { |
4983 | + /* If enabling v3 checksums, update superblock */ |
4984 | + if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) { |
4985 | sb->s_checksum_type = JBD2_CRC32C_CHKSUM; |
4986 | sb->s_feature_compat &= |
4987 | ~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM); |
4988 | @@ -1842,8 +1856,7 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat, |
4989 | } |
4990 | |
4991 | /* Precompute checksum seed for all metadata */ |
4992 | - if (JBD2_HAS_INCOMPAT_FEATURE(journal, |
4993 | - JBD2_FEATURE_INCOMPAT_CSUM_V2)) |
4994 | + if (jbd2_journal_has_csum_v2or3(journal)) |
4995 | journal->j_csum_seed = jbd2_chksum(journal, ~0, |
4996 | sb->s_uuid, |
4997 | sizeof(sb->s_uuid)); |
4998 | @@ -1852,7 +1865,8 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat, |
4999 | /* If enabling v1 checksums, downgrade superblock */ |
5000 | if (COMPAT_FEATURE_ON(JBD2_FEATURE_COMPAT_CHECKSUM)) |
5001 | sb->s_feature_incompat &= |
5002 | - ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2); |
5003 | + ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2 | |
5004 | + JBD2_FEATURE_INCOMPAT_CSUM_V3); |
5005 | |
5006 | sb->s_feature_compat |= cpu_to_be32(compat); |
5007 | sb->s_feature_ro_compat |= cpu_to_be32(ro); |
5008 | @@ -2165,16 +2179,20 @@ int jbd2_journal_blocks_per_page(struct inode *inode) |
5009 | */ |
5010 | size_t journal_tag_bytes(journal_t *journal) |
5011 | { |
5012 | - journal_block_tag_t tag; |
5013 | - size_t x = 0; |
5014 | + size_t sz; |
5015 | + |
5016 | + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3)) |
5017 | + return sizeof(journal_block_tag3_t); |
5018 | + |
5019 | + sz = sizeof(journal_block_tag_t); |
5020 | |
5021 | if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) |
5022 | - x += sizeof(tag.t_checksum); |
5023 | + sz += sizeof(__u16); |
5024 | |
5025 | if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) |
5026 | - return x + JBD2_TAG_SIZE64; |
5027 | + return sz; |
5028 | else |
5029 | - return x + JBD2_TAG_SIZE32; |
5030 | + return sz - sizeof(__u32); |
5031 | } |
5032 | |
5033 | /* |
5034 | diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c |
5035 | index 3b6bb19d60b1..9b329b55ffe3 100644 |
5036 | --- a/fs/jbd2/recovery.c |
5037 | +++ b/fs/jbd2/recovery.c |
5038 | @@ -181,7 +181,7 @@ static int jbd2_descr_block_csum_verify(journal_t *j, |
5039 | __be32 provided; |
5040 | __u32 calculated; |
5041 | |
5042 | - if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) |
5043 | + if (!jbd2_journal_has_csum_v2or3(j)) |
5044 | return 1; |
5045 | |
5046 | tail = (struct jbd2_journal_block_tail *)(buf + j->j_blocksize - |
5047 | @@ -205,7 +205,7 @@ static int count_tags(journal_t *journal, struct buffer_head *bh) |
5048 | int nr = 0, size = journal->j_blocksize; |
5049 | int tag_bytes = journal_tag_bytes(journal); |
5050 | |
5051 | - if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) |
5052 | + if (jbd2_journal_has_csum_v2or3(journal)) |
5053 | size -= sizeof(struct jbd2_journal_block_tail); |
5054 | |
5055 | tagp = &bh->b_data[sizeof(journal_header_t)]; |
5056 | @@ -338,10 +338,11 @@ int jbd2_journal_skip_recovery(journal_t *journal) |
5057 | return err; |
5058 | } |
5059 | |
5060 | -static inline unsigned long long read_tag_block(int tag_bytes, journal_block_tag_t *tag) |
5061 | +static inline unsigned long long read_tag_block(journal_t *journal, |
5062 | + journal_block_tag_t *tag) |
5063 | { |
5064 | unsigned long long block = be32_to_cpu(tag->t_blocknr); |
5065 | - if (tag_bytes > JBD2_TAG_SIZE32) |
5066 | + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) |
5067 | block |= (u64)be32_to_cpu(tag->t_blocknr_high) << 32; |
5068 | return block; |
5069 | } |
5070 | @@ -384,7 +385,7 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf) |
5071 | __be32 provided; |
5072 | __u32 calculated; |
5073 | |
5074 | - if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) |
5075 | + if (!jbd2_journal_has_csum_v2or3(j)) |
5076 | return 1; |
5077 | |
5078 | h = buf; |
5079 | @@ -399,17 +400,21 @@ static int jbd2_commit_block_csum_verify(journal_t *j, void *buf) |
5080 | static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag, |
5081 | void *buf, __u32 sequence) |
5082 | { |
5083 | + journal_block_tag3_t *tag3 = (journal_block_tag3_t *)tag; |
5084 | __u32 csum32; |
5085 | __be32 seq; |
5086 | |
5087 | - if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) |
5088 | + if (!jbd2_journal_has_csum_v2or3(j)) |
5089 | return 1; |
5090 | |
5091 | seq = cpu_to_be32(sequence); |
5092 | csum32 = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&seq, sizeof(seq)); |
5093 | csum32 = jbd2_chksum(j, csum32, buf, j->j_blocksize); |
5094 | |
5095 | - return tag->t_checksum == cpu_to_be16(csum32); |
5096 | + if (JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V3)) |
5097 | + return tag3->t_checksum == cpu_to_be32(csum32); |
5098 | + else |
5099 | + return tag->t_checksum == cpu_to_be16(csum32); |
5100 | } |
5101 | |
5102 | static int do_one_pass(journal_t *journal, |
5103 | @@ -426,6 +431,7 @@ static int do_one_pass(journal_t *journal, |
5104 | int tag_bytes = journal_tag_bytes(journal); |
5105 | __u32 crc32_sum = ~0; /* Transactional Checksums */ |
5106 | int descr_csum_size = 0; |
5107 | + int block_error = 0; |
5108 | |
5109 | /* |
5110 | * First thing is to establish what we expect to find in the log |
5111 | @@ -512,8 +518,7 @@ static int do_one_pass(journal_t *journal, |
5112 | switch(blocktype) { |
5113 | case JBD2_DESCRIPTOR_BLOCK: |
5114 | /* Verify checksum first */ |
5115 | - if (JBD2_HAS_INCOMPAT_FEATURE(journal, |
5116 | - JBD2_FEATURE_INCOMPAT_CSUM_V2)) |
5117 | + if (jbd2_journal_has_csum_v2or3(journal)) |
5118 | descr_csum_size = |
5119 | sizeof(struct jbd2_journal_block_tail); |
5120 | if (descr_csum_size > 0 && |
5121 | @@ -574,7 +579,7 @@ static int do_one_pass(journal_t *journal, |
5122 | unsigned long long blocknr; |
5123 | |
5124 | J_ASSERT(obh != NULL); |
5125 | - blocknr = read_tag_block(tag_bytes, |
5126 | + blocknr = read_tag_block(journal, |
5127 | tag); |
5128 | |
5129 | /* If the block has been |
5130 | @@ -598,7 +603,8 @@ static int do_one_pass(journal_t *journal, |
5131 | "checksum recovering " |
5132 | "block %llu in log\n", |
5133 | blocknr); |
5134 | - continue; |
5135 | + block_error = 1; |
5136 | + goto skip_write; |
5137 | } |
5138 | |
5139 | /* Find a buffer for the new |
5140 | @@ -797,7 +803,8 @@ static int do_one_pass(journal_t *journal, |
5141 | success = -EIO; |
5142 | } |
5143 | } |
5144 | - |
5145 | + if (block_error && success == 0) |
5146 | + success = -EIO; |
5147 | return success; |
5148 | |
5149 | failed: |
5150 | @@ -811,7 +818,7 @@ static int jbd2_revoke_block_csum_verify(journal_t *j, |
5151 | __be32 provided; |
5152 | __u32 calculated; |
5153 | |
5154 | - if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) |
5155 | + if (!jbd2_journal_has_csum_v2or3(j)) |
5156 | return 1; |
5157 | |
5158 | tail = (struct jbd2_journal_revoke_tail *)(buf + j->j_blocksize - |
5159 | diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c |
5160 | index 198c9c10276d..d5e95a175c92 100644 |
5161 | --- a/fs/jbd2/revoke.c |
5162 | +++ b/fs/jbd2/revoke.c |
5163 | @@ -91,8 +91,8 @@ |
5164 | #include <linux/list.h> |
5165 | #include <linux/init.h> |
5166 | #include <linux/bio.h> |
5167 | -#endif |
5168 | #include <linux/log2.h> |
5169 | +#endif |
5170 | |
5171 | static struct kmem_cache *jbd2_revoke_record_cache; |
5172 | static struct kmem_cache *jbd2_revoke_table_cache; |
5173 | @@ -597,7 +597,7 @@ static void write_one_revoke_record(journal_t *journal, |
5174 | offset = *offsetp; |
5175 | |
5176 | /* Do we need to leave space at the end for a checksum? */ |
5177 | - if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) |
5178 | + if (jbd2_journal_has_csum_v2or3(journal)) |
5179 | csum_size = sizeof(struct jbd2_journal_revoke_tail); |
5180 | |
5181 | /* Make sure we have a descriptor with space left for the record */ |
5182 | @@ -644,7 +644,7 @@ static void jbd2_revoke_csum_set(journal_t *j, struct buffer_head *bh) |
5183 | struct jbd2_journal_revoke_tail *tail; |
5184 | __u32 csum; |
5185 | |
5186 | - if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2)) |
5187 | + if (!jbd2_journal_has_csum_v2or3(j)) |
5188 | return; |
5189 | |
5190 | tail = (struct jbd2_journal_revoke_tail *)(bh->b_data + j->j_blocksize - |
5191 | diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c |
5192 | index 8f854dde4150..24c6898159cc 100644 |
5193 | --- a/fs/nfs/nfs3acl.c |
5194 | +++ b/fs/nfs/nfs3acl.c |
5195 | @@ -129,7 +129,10 @@ static int __nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl, |
5196 | .rpc_argp = &args, |
5197 | .rpc_resp = &fattr, |
5198 | }; |
5199 | - int status; |
5200 | + int status = 0; |
5201 | + |
5202 | + if (acl == NULL && (!S_ISDIR(inode->i_mode) || dfacl == NULL)) |
5203 | + goto out; |
5204 | |
5205 | status = -EOPNOTSUPP; |
5206 | if (!nfs_server_capable(inode, NFS_CAP_ACLS)) |
5207 | @@ -256,7 +259,7 @@ nfs3_list_one_acl(struct inode *inode, int type, const char *name, void *data, |
5208 | char *p = data + *result; |
5209 | |
5210 | acl = get_acl(inode, type); |
5211 | - if (!acl) |
5212 | + if (IS_ERR_OR_NULL(acl)) |
5213 | return 0; |
5214 | |
5215 | posix_acl_release(acl); |
5216 | diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
5217 | index 4bf3d97cc5a0..dac979866f83 100644 |
5218 | --- a/fs/nfs/nfs4proc.c |
5219 | +++ b/fs/nfs/nfs4proc.c |
5220 | @@ -2545,6 +2545,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data) |
5221 | struct nfs4_closedata *calldata = data; |
5222 | struct nfs4_state *state = calldata->state; |
5223 | struct nfs_server *server = NFS_SERVER(calldata->inode); |
5224 | + nfs4_stateid *res_stateid = NULL; |
5225 | |
5226 | dprintk("%s: begin!\n", __func__); |
5227 | if (!nfs4_sequence_done(task, &calldata->res.seq_res)) |
5228 | @@ -2555,12 +2556,12 @@ static void nfs4_close_done(struct rpc_task *task, void *data) |
5229 | */ |
5230 | switch (task->tk_status) { |
5231 | case 0: |
5232 | - if (calldata->roc) |
5233 | + res_stateid = &calldata->res.stateid; |
5234 | + if (calldata->arg.fmode == 0 && calldata->roc) |
5235 | pnfs_roc_set_barrier(state->inode, |
5236 | calldata->roc_barrier); |
5237 | - nfs_clear_open_stateid(state, &calldata->res.stateid, 0); |
5238 | renew_lease(server, calldata->timestamp); |
5239 | - goto out_release; |
5240 | + break; |
5241 | case -NFS4ERR_ADMIN_REVOKED: |
5242 | case -NFS4ERR_STALE_STATEID: |
5243 | case -NFS4ERR_OLD_STATEID: |
5244 | @@ -2574,7 +2575,7 @@ static void nfs4_close_done(struct rpc_task *task, void *data) |
5245 | goto out_release; |
5246 | } |
5247 | } |
5248 | - nfs_clear_open_stateid(state, NULL, calldata->arg.fmode); |
5249 | + nfs_clear_open_stateid(state, res_stateid, calldata->arg.fmode); |
5250 | out_release: |
5251 | nfs_release_seqid(calldata->arg.seqid); |
5252 | nfs_refresh_inode(calldata->inode, calldata->res.fattr); |
5253 | @@ -2586,6 +2587,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) |
5254 | struct nfs4_closedata *calldata = data; |
5255 | struct nfs4_state *state = calldata->state; |
5256 | struct inode *inode = calldata->inode; |
5257 | + bool is_rdonly, is_wronly, is_rdwr; |
5258 | int call_close = 0; |
5259 | |
5260 | dprintk("%s: begin!\n", __func__); |
5261 | @@ -2593,18 +2595,24 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) |
5262 | goto out_wait; |
5263 | |
5264 | task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_DOWNGRADE]; |
5265 | - calldata->arg.fmode = FMODE_READ|FMODE_WRITE; |
5266 | spin_lock(&state->owner->so_lock); |
5267 | + is_rdwr = test_bit(NFS_O_RDWR_STATE, &state->flags); |
5268 | + is_rdonly = test_bit(NFS_O_RDONLY_STATE, &state->flags); |
5269 | + is_wronly = test_bit(NFS_O_WRONLY_STATE, &state->flags); |
5270 | + /* Calculate the current open share mode */ |
5271 | + calldata->arg.fmode = 0; |
5272 | + if (is_rdonly || is_rdwr) |
5273 | + calldata->arg.fmode |= FMODE_READ; |
5274 | + if (is_wronly || is_rdwr) |
5275 | + calldata->arg.fmode |= FMODE_WRITE; |
5276 | /* Calculate the change in open mode */ |
5277 | if (state->n_rdwr == 0) { |
5278 | if (state->n_rdonly == 0) { |
5279 | - call_close |= test_bit(NFS_O_RDONLY_STATE, &state->flags); |
5280 | - call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); |
5281 | + call_close |= is_rdonly || is_rdwr; |
5282 | calldata->arg.fmode &= ~FMODE_READ; |
5283 | } |
5284 | if (state->n_wronly == 0) { |
5285 | - call_close |= test_bit(NFS_O_WRONLY_STATE, &state->flags); |
5286 | - call_close |= test_bit(NFS_O_RDWR_STATE, &state->flags); |
5287 | + call_close |= is_wronly || is_rdwr; |
5288 | calldata->arg.fmode &= ~FMODE_WRITE; |
5289 | } |
5290 | } |
5291 | diff --git a/fs/nfs/super.c b/fs/nfs/super.c |
5292 | index 084af1060d79..3fd83327bbad 100644 |
5293 | --- a/fs/nfs/super.c |
5294 | +++ b/fs/nfs/super.c |
5295 | @@ -2180,7 +2180,7 @@ out_no_address: |
5296 | return -EINVAL; |
5297 | } |
5298 | |
5299 | -#define NFS_MOUNT_CMP_FLAGMASK ~(NFS_MOUNT_INTR \ |
5300 | +#define NFS_REMOUNT_CMP_FLAGMASK ~(NFS_MOUNT_INTR \ |
5301 | | NFS_MOUNT_SECURE \ |
5302 | | NFS_MOUNT_TCP \ |
5303 | | NFS_MOUNT_VER3 \ |
5304 | @@ -2188,15 +2188,16 @@ out_no_address: |
5305 | | NFS_MOUNT_NONLM \ |
5306 | | NFS_MOUNT_BROKEN_SUID \ |
5307 | | NFS_MOUNT_STRICTLOCK \ |
5308 | - | NFS_MOUNT_UNSHARED \ |
5309 | - | NFS_MOUNT_NORESVPORT \ |
5310 | | NFS_MOUNT_LEGACY_INTERFACE) |
5311 | |
5312 | +#define NFS_MOUNT_CMP_FLAGMASK (NFS_REMOUNT_CMP_FLAGMASK & \ |
5313 | + ~(NFS_MOUNT_UNSHARED | NFS_MOUNT_NORESVPORT)) |
5314 | + |
5315 | static int |
5316 | nfs_compare_remount_data(struct nfs_server *nfss, |
5317 | struct nfs_parsed_mount_data *data) |
5318 | { |
5319 | - if ((data->flags ^ nfss->flags) & NFS_MOUNT_CMP_FLAGMASK || |
5320 | + if ((data->flags ^ nfss->flags) & NFS_REMOUNT_CMP_FLAGMASK || |
5321 | data->rsize != nfss->rsize || |
5322 | data->wsize != nfss->wsize || |
5323 | data->version != nfss->nfs_client->rpc_ops->version || |
5324 | diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c |
5325 | index 2c73cae9899d..0f23ad005826 100644 |
5326 | --- a/fs/nfsd/nfs4callback.c |
5327 | +++ b/fs/nfsd/nfs4callback.c |
5328 | @@ -689,7 +689,8 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c |
5329 | clp->cl_cb_session = ses; |
5330 | args.bc_xprt = conn->cb_xprt; |
5331 | args.prognumber = clp->cl_cb_session->se_cb_prog; |
5332 | - args.protocol = XPRT_TRANSPORT_BC_TCP; |
5333 | + args.protocol = conn->cb_xprt->xpt_class->xcl_ident | |
5334 | + XPRT_TRANSPORT_BC; |
5335 | args.authflavor = ses->se_cb_sec.flavor; |
5336 | } |
5337 | /* Create RPC client */ |
5338 | diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c |
5339 | index 1879e43f2868..2f2edbb2a4a3 100644 |
5340 | --- a/fs/nfsd/nfssvc.c |
5341 | +++ b/fs/nfsd/nfssvc.c |
5342 | @@ -221,7 +221,8 @@ static int nfsd_startup_generic(int nrservs) |
5343 | */ |
5344 | ret = nfsd_racache_init(2*nrservs); |
5345 | if (ret) |
5346 | - return ret; |
5347 | + goto dec_users; |
5348 | + |
5349 | ret = nfs4_state_start(); |
5350 | if (ret) |
5351 | goto out_racache; |
5352 | @@ -229,6 +230,8 @@ static int nfsd_startup_generic(int nrservs) |
5353 | |
5354 | out_racache: |
5355 | nfsd_racache_shutdown(); |
5356 | +dec_users: |
5357 | + nfsd_users--; |
5358 | return ret; |
5359 | } |
5360 | |
5361 | diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h |
5362 | index 6dfd64b3a604..e973540cd15b 100644 |
5363 | --- a/include/drm/drm_pciids.h |
5364 | +++ b/include/drm/drm_pciids.h |
5365 | @@ -17,6 +17,7 @@ |
5366 | {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
5367 | {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
5368 | {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
5369 | + {0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
5370 | {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
5371 | {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
5372 | {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
5373 | @@ -164,8 +165,11 @@ |
5374 | {0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
5375 | {0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
5376 | {0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
5377 | + {0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
5378 | + {0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
5379 | {0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
5380 | {0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
5381 | + {0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
5382 | {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
5383 | {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
5384 | {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
5385 | @@ -175,6 +179,8 @@ |
5386 | {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ |
5387 | {0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
5388 | {0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
5389 | + {0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
5390 | + {0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
5391 | {0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ |
5392 | {0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ |
5393 | {0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \ |
5394 | @@ -297,6 +303,7 @@ |
5395 | {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
5396 | {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
5397 | {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
5398 | + {0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
5399 | {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
5400 | {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
5401 | {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
5402 | diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h |
5403 | index d5b50a19463c..0dae71e9971c 100644 |
5404 | --- a/include/linux/jbd2.h |
5405 | +++ b/include/linux/jbd2.h |
5406 | @@ -159,7 +159,11 @@ typedef struct journal_header_s |
5407 | * journal_block_tag (in the descriptor). The other h_chksum* fields are |
5408 | * not used. |
5409 | * |
5410 | - * Checksum v1 and v2 are mutually exclusive features. |
5411 | + * If FEATURE_INCOMPAT_CSUM_V3 is set, the descriptor block uses |
5412 | + * journal_block_tag3_t to store a full 32-bit checksum. Everything else |
5413 | + * is the same as v2. |
5414 | + * |
5415 | + * Checksum v1, v2, and v3 are mutually exclusive features. |
5416 | */ |
5417 | struct commit_header { |
5418 | __be32 h_magic; |
5419 | @@ -179,6 +183,14 @@ struct commit_header { |
5420 | * raw struct shouldn't be used for pointer math or sizeof() - use |
5421 | * journal_tag_bytes(journal) instead to compute this. |
5422 | */ |
5423 | +typedef struct journal_block_tag3_s |
5424 | +{ |
5425 | + __be32 t_blocknr; /* The on-disk block number */ |
5426 | + __be32 t_flags; /* See below */ |
5427 | + __be32 t_blocknr_high; /* most-significant high 32bits. */ |
5428 | + __be32 t_checksum; /* crc32c(uuid+seq+block) */ |
5429 | +} journal_block_tag3_t; |
5430 | + |
5431 | typedef struct journal_block_tag_s |
5432 | { |
5433 | __be32 t_blocknr; /* The on-disk block number */ |
5434 | @@ -187,9 +199,6 @@ typedef struct journal_block_tag_s |
5435 | __be32 t_blocknr_high; /* most-significant high 32bits. */ |
5436 | } journal_block_tag_t; |
5437 | |
5438 | -#define JBD2_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high)) |
5439 | -#define JBD2_TAG_SIZE64 (sizeof(journal_block_tag_t)) |
5440 | - |
5441 | /* Tail of descriptor block, for checksumming */ |
5442 | struct jbd2_journal_block_tail { |
5443 | __be32 t_checksum; /* crc32c(uuid+descr_block) */ |
5444 | @@ -284,6 +293,7 @@ typedef struct journal_superblock_s |
5445 | #define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002 |
5446 | #define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004 |
5447 | #define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008 |
5448 | +#define JBD2_FEATURE_INCOMPAT_CSUM_V3 0x00000010 |
5449 | |
5450 | /* Features known to this kernel version: */ |
5451 | #define JBD2_KNOWN_COMPAT_FEATURES JBD2_FEATURE_COMPAT_CHECKSUM |
5452 | @@ -291,7 +301,8 @@ typedef struct journal_superblock_s |
5453 | #define JBD2_KNOWN_INCOMPAT_FEATURES (JBD2_FEATURE_INCOMPAT_REVOKE | \ |
5454 | JBD2_FEATURE_INCOMPAT_64BIT | \ |
5455 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \ |
5456 | - JBD2_FEATURE_INCOMPAT_CSUM_V2) |
5457 | + JBD2_FEATURE_INCOMPAT_CSUM_V2 | \ |
5458 | + JBD2_FEATURE_INCOMPAT_CSUM_V3) |
5459 | |
5460 | #ifdef __KERNEL__ |
5461 | |
5462 | @@ -1296,6 +1307,15 @@ static inline int tid_geq(tid_t x, tid_t y) |
5463 | extern int jbd2_journal_blocks_per_page(struct inode *inode); |
5464 | extern size_t journal_tag_bytes(journal_t *journal); |
5465 | |
5466 | +static inline int jbd2_journal_has_csum_v2or3(journal_t *journal) |
5467 | +{ |
5468 | + if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) || |
5469 | + JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3)) |
5470 | + return 1; |
5471 | + |
5472 | + return 0; |
5473 | +} |
5474 | + |
5475 | /* |
5476 | * We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for |
5477 | * transaction control blocks. |
5478 | diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h |
5479 | index 7235040a19b2..5d9d6f84b382 100644 |
5480 | --- a/include/linux/sunrpc/svc_xprt.h |
5481 | +++ b/include/linux/sunrpc/svc_xprt.h |
5482 | @@ -33,6 +33,7 @@ struct svc_xprt_class { |
5483 | struct svc_xprt_ops *xcl_ops; |
5484 | struct list_head xcl_list; |
5485 | u32 xcl_max_payload; |
5486 | + int xcl_ident; |
5487 | }; |
5488 | |
5489 | /* |
5490 | diff --git a/kernel/sched/core.c b/kernel/sched/core.c |
5491 | index bc1638b33449..0acf96b790c5 100644 |
5492 | --- a/kernel/sched/core.c |
5493 | +++ b/kernel/sched/core.c |
5494 | @@ -3558,9 +3558,10 @@ static int _sched_setscheduler(struct task_struct *p, int policy, |
5495 | }; |
5496 | |
5497 | /* |
5498 | - * Fixup the legacy SCHED_RESET_ON_FORK hack |
5499 | + * Fixup the legacy SCHED_RESET_ON_FORK hack, except if |
5500 | + * the policy=-1 was passed by sched_setparam(). |
5501 | */ |
5502 | - if (policy & SCHED_RESET_ON_FORK) { |
5503 | + if ((policy != -1) && (policy & SCHED_RESET_ON_FORK)) { |
5504 | attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; |
5505 | policy &= ~SCHED_RESET_ON_FORK; |
5506 | attr.sched_policy = policy; |
5507 | diff --git a/mm/memory.c b/mm/memory.c |
5508 | index 8b44f765b645..0a21f3d162ae 100644 |
5509 | --- a/mm/memory.c |
5510 | +++ b/mm/memory.c |
5511 | @@ -751,7 +751,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, |
5512 | unsigned long pfn = pte_pfn(pte); |
5513 | |
5514 | if (HAVE_PTE_SPECIAL) { |
5515 | - if (likely(!pte_special(pte) || pte_numa(pte))) |
5516 | + if (likely(!pte_special(pte))) |
5517 | goto check_pfn; |
5518 | if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) |
5519 | return NULL; |
5520 | @@ -777,15 +777,14 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, |
5521 | } |
5522 | } |
5523 | |
5524 | + if (is_zero_pfn(pfn)) |
5525 | + return NULL; |
5526 | check_pfn: |
5527 | if (unlikely(pfn > highest_memmap_pfn)) { |
5528 | print_bad_pte(vma, addr, pte, NULL); |
5529 | return NULL; |
5530 | } |
5531 | |
5532 | - if (is_zero_pfn(pfn)) |
5533 | - return NULL; |
5534 | - |
5535 | /* |
5536 | * NOTE! We still have PageReserved() pages in the page tables. |
5537 | * eg. VDSO mappings can cause them to exist. |
5538 | diff --git a/mm/util.c b/mm/util.c |
5539 | index d5ea733c5082..33e9f4455800 100644 |
5540 | --- a/mm/util.c |
5541 | +++ b/mm/util.c |
5542 | @@ -277,17 +277,14 @@ pid_t vm_is_stack(struct task_struct *task, |
5543 | |
5544 | if (in_group) { |
5545 | struct task_struct *t; |
5546 | - rcu_read_lock(); |
5547 | - if (!pid_alive(task)) |
5548 | - goto done; |
5549 | |
5550 | - t = task; |
5551 | - do { |
5552 | + rcu_read_lock(); |
5553 | + for_each_thread(task, t) { |
5554 | if (vm_is_stack_for_task(t, vma)) { |
5555 | ret = t->pid; |
5556 | goto done; |
5557 | } |
5558 | - } while_each_thread(task, t); |
5559 | + } |
5560 | done: |
5561 | rcu_read_unlock(); |
5562 | } |
5563 | diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c |
5564 | index b507cd327d9b..b2437ee93657 100644 |
5565 | --- a/net/sunrpc/svcsock.c |
5566 | +++ b/net/sunrpc/svcsock.c |
5567 | @@ -692,6 +692,7 @@ static struct svc_xprt_class svc_udp_class = { |
5568 | .xcl_owner = THIS_MODULE, |
5569 | .xcl_ops = &svc_udp_ops, |
5570 | .xcl_max_payload = RPCSVC_MAXPAYLOAD_UDP, |
5571 | + .xcl_ident = XPRT_TRANSPORT_UDP, |
5572 | }; |
5573 | |
5574 | static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv) |
5575 | @@ -1292,6 +1293,7 @@ static struct svc_xprt_class svc_tcp_class = { |
5576 | .xcl_owner = THIS_MODULE, |
5577 | .xcl_ops = &svc_tcp_ops, |
5578 | .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, |
5579 | + .xcl_ident = XPRT_TRANSPORT_TCP, |
5580 | }; |
5581 | |
5582 | void svc_init_xprt_sock(void) |
5583 | diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c |
5584 | index c3b2b3369e52..51c63165073c 100644 |
5585 | --- a/net/sunrpc/xprt.c |
5586 | +++ b/net/sunrpc/xprt.c |
5587 | @@ -1306,7 +1306,7 @@ struct rpc_xprt *xprt_create_transport(struct xprt_create *args) |
5588 | } |
5589 | } |
5590 | spin_unlock(&xprt_list_lock); |
5591 | - printk(KERN_ERR "RPC: transport (%d) not supported\n", args->ident); |
5592 | + dprintk("RPC: transport (%d) not supported\n", args->ident); |
5593 | return ERR_PTR(-EIO); |
5594 | |
5595 | found: |
5596 | diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c |
5597 | index e7323fbbd348..06a5d9235107 100644 |
5598 | --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c |
5599 | +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c |
5600 | @@ -92,6 +92,7 @@ struct svc_xprt_class svc_rdma_class = { |
5601 | .xcl_owner = THIS_MODULE, |
5602 | .xcl_ops = &svc_rdma_ops, |
5603 | .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, |
5604 | + .xcl_ident = XPRT_TRANSPORT_RDMA, |
5605 | }; |
5606 | |
5607 | struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) |
5608 | diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig |
5609 | index 3a3a3a71088b..50dd0086cfb1 100644 |
5610 | --- a/sound/pci/Kconfig |
5611 | +++ b/sound/pci/Kconfig |
5612 | @@ -858,8 +858,8 @@ config SND_VIRTUOSO |
5613 | select SND_JACK if INPUT=y || INPUT=SND |
5614 | help |
5615 | Say Y here to include support for sound cards based on the |
5616 | - Asus AV66/AV100/AV200 chips, i.e., Xonar D1, DX, D2, D2X, DS, |
5617 | - Essence ST (Deluxe), and Essence STX. |
5618 | + Asus AV66/AV100/AV200 chips, i.e., Xonar D1, DX, D2, D2X, DS, DSX, |
5619 | + Essence ST (Deluxe), and Essence STX (II). |
5620 | Support for the HDAV1.3 (Deluxe) and HDAV1.3 Slim is experimental; |
5621 | for the Xense, missing. |
5622 | |
5623 | diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c |
5624 | index 092f2bd030bd..b686aca7f000 100644 |
5625 | --- a/sound/pci/hda/patch_ca0132.c |
5626 | +++ b/sound/pci/hda/patch_ca0132.c |
5627 | @@ -4376,6 +4376,9 @@ static void ca0132_download_dsp(struct hda_codec *codec) |
5628 | return; /* NOP */ |
5629 | #endif |
5630 | |
5631 | + if (spec->dsp_state == DSP_DOWNLOAD_FAILED) |
5632 | + return; /* don't retry failures */ |
5633 | + |
5634 | chipio_enable_clocks(codec); |
5635 | spec->dsp_state = DSP_DOWNLOADING; |
5636 | if (!ca0132_download_dsp_images(codec)) |
5637 | @@ -4552,7 +4555,8 @@ static int ca0132_init(struct hda_codec *codec) |
5638 | struct auto_pin_cfg *cfg = &spec->autocfg; |
5639 | int i; |
5640 | |
5641 | - spec->dsp_state = DSP_DOWNLOAD_INIT; |
5642 | + if (spec->dsp_state != DSP_DOWNLOAD_FAILED) |
5643 | + spec->dsp_state = DSP_DOWNLOAD_INIT; |
5644 | spec->curr_chip_addx = INVALID_CHIP_ADDRESS; |
5645 | |
5646 | snd_hda_power_up(codec); |
5647 | @@ -4663,6 +4667,7 @@ static int patch_ca0132(struct hda_codec *codec) |
5648 | codec->spec = spec; |
5649 | spec->codec = codec; |
5650 | |
5651 | + spec->dsp_state = DSP_DOWNLOAD_INIT; |
5652 | spec->num_mixers = 1; |
5653 | spec->mixers[0] = ca0132_mixer; |
5654 | |
5655 | diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
5656 | index b60824e90408..25728aaacc26 100644 |
5657 | --- a/sound/pci/hda/patch_realtek.c |
5658 | +++ b/sound/pci/hda/patch_realtek.c |
5659 | @@ -180,6 +180,8 @@ static void alc_fix_pll(struct hda_codec *codec) |
5660 | spec->pll_coef_idx); |
5661 | val = snd_hda_codec_read(codec, spec->pll_nid, 0, |
5662 | AC_VERB_GET_PROC_COEF, 0); |
5663 | + if (val == -1) |
5664 | + return; |
5665 | snd_hda_codec_write(codec, spec->pll_nid, 0, AC_VERB_SET_COEF_INDEX, |
5666 | spec->pll_coef_idx); |
5667 | snd_hda_codec_write(codec, spec->pll_nid, 0, AC_VERB_SET_PROC_COEF, |
5668 | @@ -2784,6 +2786,8 @@ static int alc269_parse_auto_config(struct hda_codec *codec) |
5669 | static void alc269vb_toggle_power_output(struct hda_codec *codec, int power_up) |
5670 | { |
5671 | int val = alc_read_coef_idx(codec, 0x04); |
5672 | + if (val == -1) |
5673 | + return; |
5674 | if (power_up) |
5675 | val |= 1 << 11; |
5676 | else |
5677 | @@ -3242,6 +3246,15 @@ static int alc269_resume(struct hda_codec *codec) |
5678 | snd_hda_codec_resume_cache(codec); |
5679 | alc_inv_dmic_sync(codec, true); |
5680 | hda_call_check_power_status(codec, 0x01); |
5681 | + |
5682 | + /* on some machine, the BIOS will clear the codec gpio data when enter |
5683 | + * suspend, and won't restore the data after resume, so we restore it |
5684 | + * in the driver. |
5685 | + */ |
5686 | + if (spec->gpio_led) |
5687 | + snd_hda_codec_write(codec, codec->afg, 0, AC_VERB_SET_GPIO_DATA, |
5688 | + spec->gpio_led); |
5689 | + |
5690 | if (spec->has_alc5505_dsp) |
5691 | alc5505_dsp_resume(codec); |
5692 | |
5693 | @@ -4782,6 +4795,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
5694 | SND_PCI_QUIRK(0x103c, 0x1983, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1), |
5695 | SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED), |
5696 | /* ALC282 */ |
5697 | + SND_PCI_QUIRK(0x103c, 0x2191, "HP Touchsmart 14", ALC269_FIXUP_HP_MUTE_LED_MIC1), |
5698 | + SND_PCI_QUIRK(0x103c, 0x2192, "HP Touchsmart 15", ALC269_FIXUP_HP_MUTE_LED_MIC1), |
5699 | SND_PCI_QUIRK(0x103c, 0x220d, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), |
5700 | SND_PCI_QUIRK(0x103c, 0x220e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), |
5701 | SND_PCI_QUIRK(0x103c, 0x220f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), |
5702 | @@ -5122,27 +5137,30 @@ static void alc269_fill_coef(struct hda_codec *codec) |
5703 | if ((alc_get_coef0(codec) & 0x00ff) == 0x017) { |
5704 | val = alc_read_coef_idx(codec, 0x04); |
5705 | /* Power up output pin */ |
5706 | - alc_write_coef_idx(codec, 0x04, val | (1<<11)); |
5707 | + if (val != -1) |
5708 | + alc_write_coef_idx(codec, 0x04, val | (1<<11)); |
5709 | } |
5710 | |
5711 | if ((alc_get_coef0(codec) & 0x00ff) == 0x018) { |
5712 | val = alc_read_coef_idx(codec, 0xd); |
5713 | - if ((val & 0x0c00) >> 10 != 0x1) { |
5714 | + if (val != -1 && (val & 0x0c00) >> 10 != 0x1) { |
5715 | /* Capless ramp up clock control */ |
5716 | alc_write_coef_idx(codec, 0xd, val | (1<<10)); |
5717 | } |
5718 | val = alc_read_coef_idx(codec, 0x17); |
5719 | - if ((val & 0x01c0) >> 6 != 0x4) { |
5720 | + if (val != -1 && (val & 0x01c0) >> 6 != 0x4) { |
5721 | /* Class D power on reset */ |
5722 | alc_write_coef_idx(codec, 0x17, val | (1<<7)); |
5723 | } |
5724 | } |
5725 | |
5726 | val = alc_read_coef_idx(codec, 0xd); /* Class D */ |
5727 | - alc_write_coef_idx(codec, 0xd, val | (1<<14)); |
5728 | + if (val != -1) |
5729 | + alc_write_coef_idx(codec, 0xd, val | (1<<14)); |
5730 | |
5731 | val = alc_read_coef_idx(codec, 0x4); /* HP */ |
5732 | - alc_write_coef_idx(codec, 0x4, val | (1<<11)); |
5733 | + if (val != -1) |
5734 | + alc_write_coef_idx(codec, 0x4, val | (1<<11)); |
5735 | } |
5736 | |
5737 | /* |
5738 | diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c |
5739 | index 3744ea4e843d..4d3a3b932690 100644 |
5740 | --- a/sound/pci/hda/patch_sigmatel.c |
5741 | +++ b/sound/pci/hda/patch_sigmatel.c |
5742 | @@ -84,6 +84,7 @@ enum { |
5743 | STAC_DELL_EQ, |
5744 | STAC_ALIENWARE_M17X, |
5745 | STAC_92HD89XX_HP_FRONT_JACK, |
5746 | + STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK, |
5747 | STAC_92HD73XX_MODELS |
5748 | }; |
5749 | |
5750 | @@ -1809,6 +1810,11 @@ static const struct hda_pintbl stac92hd89xx_hp_front_jack_pin_configs[] = { |
5751 | {} |
5752 | }; |
5753 | |
5754 | +static const struct hda_pintbl stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs[] = { |
5755 | + { 0x0e, 0x400000f0 }, |
5756 | + {} |
5757 | +}; |
5758 | + |
5759 | static void stac92hd73xx_fixup_ref(struct hda_codec *codec, |
5760 | const struct hda_fixup *fix, int action) |
5761 | { |
5762 | @@ -1931,6 +1937,10 @@ static const struct hda_fixup stac92hd73xx_fixups[] = { |
5763 | [STAC_92HD89XX_HP_FRONT_JACK] = { |
5764 | .type = HDA_FIXUP_PINS, |
5765 | .v.pins = stac92hd89xx_hp_front_jack_pin_configs, |
5766 | + }, |
5767 | + [STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK] = { |
5768 | + .type = HDA_FIXUP_PINS, |
5769 | + .v.pins = stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs, |
5770 | } |
5771 | }; |
5772 | |
5773 | @@ -1991,6 +2001,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = { |
5774 | "Alienware M17x", STAC_ALIENWARE_M17X), |
5775 | SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490, |
5776 | "Alienware M17x R3", STAC_DELL_EQ), |
5777 | + SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1927, |
5778 | + "HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK), |
5779 | SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17, |
5780 | "unknown HP", STAC_92HD89XX_HP_FRONT_JACK), |
5781 | {} /* terminator */ |
5782 | diff --git a/sound/pci/oxygen/virtuoso.c b/sound/pci/oxygen/virtuoso.c |
5783 | index 64b9fda5f04a..dbbbacfd535e 100644 |
5784 | --- a/sound/pci/oxygen/virtuoso.c |
5785 | +++ b/sound/pci/oxygen/virtuoso.c |
5786 | @@ -53,6 +53,7 @@ static DEFINE_PCI_DEVICE_TABLE(xonar_ids) = { |
5787 | { OXYGEN_PCI_SUBID(0x1043, 0x835e) }, |
5788 | { OXYGEN_PCI_SUBID(0x1043, 0x838e) }, |
5789 | { OXYGEN_PCI_SUBID(0x1043, 0x8522) }, |
5790 | + { OXYGEN_PCI_SUBID(0x1043, 0x85f4) }, |
5791 | { OXYGEN_PCI_SUBID_BROKEN_EEPROM }, |
5792 | { } |
5793 | }; |
5794 | diff --git a/sound/pci/oxygen/xonar_pcm179x.c b/sound/pci/oxygen/xonar_pcm179x.c |
5795 | index c8c7f2c9b355..e02605931669 100644 |
5796 | --- a/sound/pci/oxygen/xonar_pcm179x.c |
5797 | +++ b/sound/pci/oxygen/xonar_pcm179x.c |
5798 | @@ -100,8 +100,8 @@ |
5799 | */ |
5800 | |
5801 | /* |
5802 | - * Xonar Essence ST (Deluxe)/STX |
5803 | - * ----------------------------- |
5804 | + * Xonar Essence ST (Deluxe)/STX (II) |
5805 | + * ---------------------------------- |
5806 | * |
5807 | * CMI8788: |
5808 | * |
5809 | @@ -1138,6 +1138,14 @@ int get_xonar_pcm179x_model(struct oxygen *chip, |
5810 | chip->model.resume = xonar_stx_resume; |
5811 | chip->model.set_dac_params = set_pcm1796_params; |
5812 | break; |
5813 | + case 0x85f4: |
5814 | + chip->model = model_xonar_st; |
5815 | + /* TODO: daughterboard support */ |
5816 | + chip->model.shortname = "Xonar STX II"; |
5817 | + chip->model.init = xonar_stx_init; |
5818 | + chip->model.resume = xonar_stx_resume; |
5819 | + chip->model.set_dac_params = set_pcm1796_params; |
5820 | + break; |
5821 | default: |
5822 | return -EINVAL; |
5823 | } |
5824 | diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h |
5825 | index f652b10ce905..223c47b33ba3 100644 |
5826 | --- a/sound/usb/quirks-table.h |
5827 | +++ b/sound/usb/quirks-table.h |
5828 | @@ -1581,6 +1581,35 @@ YAMAHA_DEVICE(0x7010, "UB99"), |
5829 | } |
5830 | }, |
5831 | { |
5832 | + /* BOSS ME-25 */ |
5833 | + USB_DEVICE(0x0582, 0x0113), |
5834 | + .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { |
5835 | + .ifnum = QUIRK_ANY_INTERFACE, |
5836 | + .type = QUIRK_COMPOSITE, |
5837 | + .data = (const struct snd_usb_audio_quirk[]) { |
5838 | + { |
5839 | + .ifnum = 0, |
5840 | + .type = QUIRK_AUDIO_STANDARD_INTERFACE |
5841 | + }, |
5842 | + { |
5843 | + .ifnum = 1, |
5844 | + .type = QUIRK_AUDIO_STANDARD_INTERFACE |
5845 | + }, |
5846 | + { |
5847 | + .ifnum = 2, |
5848 | + .type = QUIRK_MIDI_FIXED_ENDPOINT, |
5849 | + .data = & (const struct snd_usb_midi_endpoint_info) { |
5850 | + .out_cables = 0x0001, |
5851 | + .in_cables = 0x0001 |
5852 | + } |
5853 | + }, |
5854 | + { |
5855 | + .ifnum = -1 |
5856 | + } |
5857 | + } |
5858 | + } |
5859 | +}, |
5860 | +{ |
5861 | /* only 44.1 kHz works at the moment */ |
5862 | USB_DEVICE(0x0582, 0x0120), |
5863 | .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { |
5864 | diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c |
5865 | index 7c57f2268dd7..19a921eb75f1 100644 |
5866 | --- a/sound/usb/quirks.c |
5867 | +++ b/sound/usb/quirks.c |
5868 | @@ -670,7 +670,7 @@ static int snd_usb_gamecon780_boot_quirk(struct usb_device *dev) |
5869 | /* set the initial volume and don't change; other values are either |
5870 | * too loud or silent due to firmware bug (bko#65251) |
5871 | */ |
5872 | - u8 buf[2] = { 0x74, 0xdc }; |
5873 | + u8 buf[2] = { 0x74, 0xe3 }; |
5874 | return snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC_SET_CUR, |
5875 | USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT, |
5876 | UAC_FU_VOLUME << 8, 9 << 8, buf, 2); |
5877 | diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c |
5878 | index 2458a1dc2ba9..e8ce34c9db32 100644 |
5879 | --- a/virt/kvm/ioapic.c |
5880 | +++ b/virt/kvm/ioapic.c |
5881 | @@ -254,10 +254,9 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap, |
5882 | spin_lock(&ioapic->lock); |
5883 | for (index = 0; index < IOAPIC_NUM_PINS; index++) { |
5884 | e = &ioapic->redirtbl[index]; |
5885 | - if (!e->fields.mask && |
5886 | - (e->fields.trig_mode == IOAPIC_LEVEL_TRIG || |
5887 | - kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, |
5888 | - index) || index == RTC_GSI)) { |
5889 | + if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG || |
5890 | + kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) || |
5891 | + index == RTC_GSI) { |
5892 | if (kvm_apic_match_dest(vcpu, NULL, 0, |
5893 | e->fields.dest_id, e->fields.dest_mode)) { |
5894 | __set_bit(e->fields.vector, |
5895 | diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c |
5896 | index 0df7d4b34dfe..714b94932312 100644 |
5897 | --- a/virt/kvm/iommu.c |
5898 | +++ b/virt/kvm/iommu.c |
5899 | @@ -61,6 +61,14 @@ static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, |
5900 | return pfn; |
5901 | } |
5902 | |
5903 | +static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages) |
5904 | +{ |
5905 | + unsigned long i; |
5906 | + |
5907 | + for (i = 0; i < npages; ++i) |
5908 | + kvm_release_pfn_clean(pfn + i); |
5909 | +} |
5910 | + |
5911 | int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) |
5912 | { |
5913 | gfn_t gfn, end_gfn; |
5914 | @@ -123,6 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) |
5915 | if (r) { |
5916 | printk(KERN_ERR "kvm_iommu_map_address:" |
5917 | "iommu failed to map pfn=%llx\n", pfn); |
5918 | + kvm_unpin_pages(kvm, pfn, page_size); |
5919 | goto unmap_pages; |
5920 | } |
5921 | |
5922 | @@ -134,7 +143,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot) |
5923 | return 0; |
5924 | |
5925 | unmap_pages: |
5926 | - kvm_iommu_put_pages(kvm, slot->base_gfn, gfn); |
5927 | + kvm_iommu_put_pages(kvm, slot->base_gfn, gfn - slot->base_gfn); |
5928 | return r; |
5929 | } |
5930 | |
5931 | @@ -266,14 +275,6 @@ out_unlock: |
5932 | return r; |
5933 | } |
5934 | |
5935 | -static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages) |
5936 | -{ |
5937 | - unsigned long i; |
5938 | - |
5939 | - for (i = 0; i < npages; ++i) |
5940 | - kvm_release_pfn_clean(pfn + i); |
5941 | -} |
5942 | - |
5943 | static void kvm_iommu_put_pages(struct kvm *kvm, |
5944 | gfn_t base_gfn, unsigned long npages) |
5945 | { |