Contents of /trunk/kernel-magellan/patches-3.2/0113-3.2.14-all-fixes.patch
Parent Directory | Revision Log
Revision 1739 -
(show annotations)
(download)
Mon Apr 16 16:34:31 2012 UTC (12 years, 5 months ago) by niro
File size: 206747 byte(s)
Mon Apr 16 16:34:31 2012 UTC (12 years, 5 months ago) by niro
File size: 206747 byte(s)
-incr patch for linux-3.2.14
1 | diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig |
2 | index 91aff7c..dbc59fa 100644 |
3 | --- a/arch/arm/mach-tegra/Kconfig |
4 | +++ b/arch/arm/mach-tegra/Kconfig |
5 | @@ -13,6 +13,13 @@ config ARCH_TEGRA_2x_SOC |
6 | select USB_ARCH_HAS_EHCI if USB_SUPPORT |
7 | select USB_ULPI if USB_SUPPORT |
8 | select USB_ULPI_VIEWPORT if USB_SUPPORT |
9 | + select ARM_ERRATA_720789 |
10 | + select ARM_ERRATA_742230 |
11 | + select ARM_ERRATA_751472 |
12 | + select ARM_ERRATA_754327 |
13 | + select ARM_ERRATA_764369 |
14 | + select PL310_ERRATA_727915 if CACHE_L2X0 |
15 | + select PL310_ERRATA_769419 if CACHE_L2X0 |
16 | help |
17 | Support for NVIDIA Tegra AP20 and T20 processors, based on the |
18 | ARM CortexA9MP CPU and the ARM PL310 L2 cache controller |
19 | @@ -54,6 +61,11 @@ config MACH_SEABOARD |
20 | config MACH_TEGRA_DT |
21 | bool "Generic Tegra board (FDT support)" |
22 | select USE_OF |
23 | + select ARM_ERRATA_743622 |
24 | + select ARM_ERRATA_751472 |
25 | + select ARM_ERRATA_754322 |
26 | + select ARM_ERRATA_764369 |
27 | + select PL310_ERRATA_769419 if CACHE_L2X0 |
28 | help |
29 | Support for generic nVidia Tegra boards using Flattened Device Tree |
30 | |
31 | diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h |
32 | index a026507..70ea6fd 100644 |
33 | --- a/arch/x86/include/asm/kvm_emulate.h |
34 | +++ b/arch/x86/include/asm/kvm_emulate.h |
35 | @@ -189,6 +189,9 @@ struct x86_emulate_ops { |
36 | int (*intercept)(struct x86_emulate_ctxt *ctxt, |
37 | struct x86_instruction_info *info, |
38 | enum x86_intercept_stage stage); |
39 | + |
40 | + bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt, |
41 | + u32 *eax, u32 *ebx, u32 *ecx, u32 *edx); |
42 | }; |
43 | |
44 | typedef u32 __attribute__((vector_size(16))) sse128_t; |
45 | @@ -297,6 +300,19 @@ struct x86_emulate_ctxt { |
46 | #define X86EMUL_MODE_PROT (X86EMUL_MODE_PROT16|X86EMUL_MODE_PROT32| \ |
47 | X86EMUL_MODE_PROT64) |
48 | |
49 | +/* CPUID vendors */ |
50 | +#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541 |
51 | +#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163 |
52 | +#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65 |
53 | + |
54 | +#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx 0x69444d41 |
55 | +#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574 |
56 | +#define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273 |
57 | + |
58 | +#define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547 |
59 | +#define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e |
60 | +#define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69 |
61 | + |
62 | enum x86_intercept_stage { |
63 | X86_ICTP_NONE = 0, /* Allow zero-init to not match anything */ |
64 | X86_ICPT_PRE_EXCEPT, |
65 | diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c |
66 | index 6d939d7..a25e276 100644 |
67 | --- a/arch/x86/kernel/apic/io_apic.c |
68 | +++ b/arch/x86/kernel/apic/io_apic.c |
69 | @@ -3963,18 +3963,36 @@ int mp_find_ioapic_pin(int ioapic, u32 gsi) |
70 | static __init int bad_ioapic(unsigned long address) |
71 | { |
72 | if (nr_ioapics >= MAX_IO_APICS) { |
73 | - printk(KERN_WARNING "WARNING: Max # of I/O APICs (%d) exceeded " |
74 | - "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics); |
75 | + pr_warn("WARNING: Max # of I/O APICs (%d) exceeded (found %d), skipping\n", |
76 | + MAX_IO_APICS, nr_ioapics); |
77 | return 1; |
78 | } |
79 | if (!address) { |
80 | - printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address" |
81 | - " found in table, skipping!\n"); |
82 | + pr_warn("WARNING: Bogus (zero) I/O APIC address found in table, skipping!\n"); |
83 | return 1; |
84 | } |
85 | return 0; |
86 | } |
87 | |
88 | +static __init int bad_ioapic_register(int idx) |
89 | +{ |
90 | + union IO_APIC_reg_00 reg_00; |
91 | + union IO_APIC_reg_01 reg_01; |
92 | + union IO_APIC_reg_02 reg_02; |
93 | + |
94 | + reg_00.raw = io_apic_read(idx, 0); |
95 | + reg_01.raw = io_apic_read(idx, 1); |
96 | + reg_02.raw = io_apic_read(idx, 2); |
97 | + |
98 | + if (reg_00.raw == -1 && reg_01.raw == -1 && reg_02.raw == -1) { |
99 | + pr_warn("I/O APIC 0x%x registers return all ones, skipping!\n", |
100 | + mpc_ioapic_addr(idx)); |
101 | + return 1; |
102 | + } |
103 | + |
104 | + return 0; |
105 | +} |
106 | + |
107 | void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) |
108 | { |
109 | int idx = 0; |
110 | @@ -3991,6 +4009,12 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) |
111 | ioapics[idx].mp_config.apicaddr = address; |
112 | |
113 | set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); |
114 | + |
115 | + if (bad_ioapic_register(idx)) { |
116 | + clear_fixmap(FIX_IO_APIC_BASE_0 + idx); |
117 | + return; |
118 | + } |
119 | + |
120 | ioapics[idx].mp_config.apicid = io_apic_unique_id(id); |
121 | ioapics[idx].mp_config.apicver = io_apic_get_version(idx); |
122 | |
123 | @@ -4011,10 +4035,10 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base) |
124 | if (gsi_cfg->gsi_end >= gsi_top) |
125 | gsi_top = gsi_cfg->gsi_end + 1; |
126 | |
127 | - printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, " |
128 | - "GSI %d-%d\n", idx, mpc_ioapic_id(idx), |
129 | - mpc_ioapic_ver(idx), mpc_ioapic_addr(idx), |
130 | - gsi_cfg->gsi_base, gsi_cfg->gsi_end); |
131 | + pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n", |
132 | + idx, mpc_ioapic_id(idx), |
133 | + mpc_ioapic_ver(idx), mpc_ioapic_addr(idx), |
134 | + gsi_cfg->gsi_base, gsi_cfg->gsi_end); |
135 | |
136 | nr_ioapics++; |
137 | } |
138 | diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S |
139 | index f3f6f53..bcda816 100644 |
140 | --- a/arch/x86/kernel/entry_32.S |
141 | +++ b/arch/x86/kernel/entry_32.S |
142 | @@ -99,12 +99,6 @@ |
143 | #endif |
144 | .endm |
145 | |
146 | -#ifdef CONFIG_VM86 |
147 | -#define resume_userspace_sig check_userspace |
148 | -#else |
149 | -#define resume_userspace_sig resume_userspace |
150 | -#endif |
151 | - |
152 | /* |
153 | * User gs save/restore |
154 | * |
155 | @@ -328,10 +322,19 @@ ret_from_exception: |
156 | preempt_stop(CLBR_ANY) |
157 | ret_from_intr: |
158 | GET_THREAD_INFO(%ebp) |
159 | -check_userspace: |
160 | +resume_userspace_sig: |
161 | +#ifdef CONFIG_VM86 |
162 | movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS |
163 | movb PT_CS(%esp), %al |
164 | andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax |
165 | +#else |
166 | + /* |
167 | + * We can be coming here from a syscall done in the kernel space, |
168 | + * e.g. a failed kernel_execve(). |
169 | + */ |
170 | + movl PT_CS(%esp), %eax |
171 | + andl $SEGMENT_RPL_MASK, %eax |
172 | +#endif |
173 | cmpl $USER_RPL, %eax |
174 | jb resume_kernel # not returning to v8086 or userspace |
175 | |
176 | diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c |
177 | index 6bb7b85..bcfec2d 100644 |
178 | --- a/arch/x86/kernel/tls.c |
179 | +++ b/arch/x86/kernel/tls.c |
180 | @@ -163,7 +163,7 @@ int regset_tls_get(struct task_struct *target, const struct user_regset *regset, |
181 | { |
182 | const struct desc_struct *tls; |
183 | |
184 | - if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || |
185 | + if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || |
186 | (pos % sizeof(struct user_desc)) != 0 || |
187 | (count % sizeof(struct user_desc)) != 0) |
188 | return -EINVAL; |
189 | @@ -198,7 +198,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset, |
190 | struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES]; |
191 | const struct user_desc *info; |
192 | |
193 | - if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || |
194 | + if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) || |
195 | (pos % sizeof(struct user_desc)) != 0 || |
196 | (count % sizeof(struct user_desc)) != 0) |
197 | return -EINVAL; |
198 | diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c |
199 | index db48336..3fe298a 100644 |
200 | --- a/arch/x86/kernel/tsc.c |
201 | +++ b/arch/x86/kernel/tsc.c |
202 | @@ -934,6 +934,16 @@ static int __init init_tsc_clocksource(void) |
203 | clocksource_tsc.rating = 0; |
204 | clocksource_tsc.flags &= ~CLOCK_SOURCE_IS_CONTINUOUS; |
205 | } |
206 | + |
207 | + /* |
208 | + * Trust the results of the earlier calibration on systems |
209 | + * exporting a reliable TSC. |
210 | + */ |
211 | + if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE)) { |
212 | + clocksource_register_khz(&clocksource_tsc, tsc_khz); |
213 | + return 0; |
214 | + } |
215 | + |
216 | schedule_delayed_work(&tsc_irqwork, 0); |
217 | return 0; |
218 | } |
219 | diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c |
220 | index 863f875..04b8726 100644 |
221 | --- a/arch/x86/kernel/vm86_32.c |
222 | +++ b/arch/x86/kernel/vm86_32.c |
223 | @@ -172,6 +172,7 @@ static void mark_screen_rdonly(struct mm_struct *mm) |
224 | spinlock_t *ptl; |
225 | int i; |
226 | |
227 | + down_write(&mm->mmap_sem); |
228 | pgd = pgd_offset(mm, 0xA0000); |
229 | if (pgd_none_or_clear_bad(pgd)) |
230 | goto out; |
231 | @@ -190,6 +191,7 @@ static void mark_screen_rdonly(struct mm_struct *mm) |
232 | } |
233 | pte_unmap_unlock(pte, ptl); |
234 | out: |
235 | + up_write(&mm->mmap_sem); |
236 | flush_tlb(); |
237 | } |
238 | |
239 | diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c |
240 | index f1e3be18..f5302da 100644 |
241 | --- a/arch/x86/kvm/emulate.c |
242 | +++ b/arch/x86/kvm/emulate.c |
243 | @@ -1877,6 +1877,51 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, |
244 | ss->p = 1; |
245 | } |
246 | |
247 | +static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) |
248 | +{ |
249 | + struct x86_emulate_ops *ops = ctxt->ops; |
250 | + u32 eax, ebx, ecx, edx; |
251 | + |
252 | + /* |
253 | + * syscall should always be enabled in longmode - so only become |
254 | + * vendor specific (cpuid) if other modes are active... |
255 | + */ |
256 | + if (ctxt->mode == X86EMUL_MODE_PROT64) |
257 | + return true; |
258 | + |
259 | + eax = 0x00000000; |
260 | + ecx = 0x00000000; |
261 | + if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) { |
262 | + /* |
263 | + * Intel ("GenuineIntel") |
264 | + * remark: Intel CPUs only support "syscall" in 64bit |
265 | + * longmode. Also an 64bit guest with a |
266 | + * 32bit compat-app running will #UD !! While this |
267 | + * behaviour can be fixed (by emulating) into AMD |
268 | + * response - CPUs of AMD can't behave like Intel. |
269 | + */ |
270 | + if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && |
271 | + ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && |
272 | + edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx) |
273 | + return false; |
274 | + |
275 | + /* AMD ("AuthenticAMD") */ |
276 | + if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx && |
277 | + ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx && |
278 | + edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) |
279 | + return true; |
280 | + |
281 | + /* AMD ("AMDisbetter!") */ |
282 | + if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx && |
283 | + ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx && |
284 | + edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) |
285 | + return true; |
286 | + } |
287 | + |
288 | + /* default: (not Intel, not AMD), apply Intel's stricter rules... */ |
289 | + return false; |
290 | +} |
291 | + |
292 | static int em_syscall(struct x86_emulate_ctxt *ctxt) |
293 | { |
294 | struct x86_emulate_ops *ops = ctxt->ops; |
295 | @@ -1890,9 +1935,15 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt) |
296 | ctxt->mode == X86EMUL_MODE_VM86) |
297 | return emulate_ud(ctxt); |
298 | |
299 | + if (!(em_syscall_is_enabled(ctxt))) |
300 | + return emulate_ud(ctxt); |
301 | + |
302 | ops->get_msr(ctxt, MSR_EFER, &efer); |
303 | setup_syscalls_segments(ctxt, &cs, &ss); |
304 | |
305 | + if (!(efer & EFER_SCE)) |
306 | + return emulate_ud(ctxt); |
307 | + |
308 | ops->get_msr(ctxt, MSR_STAR, &msr_data); |
309 | msr_data >>= 32; |
310 | cs_sel = (u16)(msr_data & 0xfffc); |
311 | diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
312 | index 4c938da..e04cae1 100644 |
313 | --- a/arch/x86/kvm/x86.c |
314 | +++ b/arch/x86/kvm/x86.c |
315 | @@ -4655,6 +4655,28 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt, |
316 | return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage); |
317 | } |
318 | |
319 | +static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, |
320 | + u32 *eax, u32 *ebx, u32 *ecx, u32 *edx) |
321 | +{ |
322 | + struct kvm_cpuid_entry2 *cpuid = NULL; |
323 | + |
324 | + if (eax && ecx) |
325 | + cpuid = kvm_find_cpuid_entry(emul_to_vcpu(ctxt), |
326 | + *eax, *ecx); |
327 | + |
328 | + if (cpuid) { |
329 | + *eax = cpuid->eax; |
330 | + *ecx = cpuid->ecx; |
331 | + if (ebx) |
332 | + *ebx = cpuid->ebx; |
333 | + if (edx) |
334 | + *edx = cpuid->edx; |
335 | + return true; |
336 | + } |
337 | + |
338 | + return false; |
339 | +} |
340 | + |
341 | static struct x86_emulate_ops emulate_ops = { |
342 | .read_std = kvm_read_guest_virt_system, |
343 | .write_std = kvm_write_guest_virt_system, |
344 | @@ -4685,6 +4707,7 @@ static struct x86_emulate_ops emulate_ops = { |
345 | .get_fpu = emulator_get_fpu, |
346 | .put_fpu = emulator_put_fpu, |
347 | .intercept = emulator_intercept, |
348 | + .get_cpuid = emulator_get_cpuid, |
349 | }; |
350 | |
351 | static void cache_all_regs(struct kvm_vcpu *vcpu) |
352 | diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c |
353 | index 7c1b765..5671752 100644 |
354 | --- a/arch/x86/net/bpf_jit_comp.c |
355 | +++ b/arch/x86/net/bpf_jit_comp.c |
356 | @@ -475,8 +475,10 @@ void bpf_jit_compile(struct sk_filter *fp) |
357 | case BPF_S_LD_W_ABS: |
358 | func = sk_load_word; |
359 | common_load: seen |= SEEN_DATAREF; |
360 | - if ((int)K < 0) |
361 | + if ((int)K < 0) { |
362 | + /* Abort the JIT because __load_pointer() is needed. */ |
363 | goto out; |
364 | + } |
365 | t_offset = func - (image + addrs[i]); |
366 | EMIT1_off32(0xbe, K); /* mov imm32,%esi */ |
367 | EMIT1_off32(0xe8, t_offset); /* call */ |
368 | @@ -489,14 +491,8 @@ common_load: seen |= SEEN_DATAREF; |
369 | goto common_load; |
370 | case BPF_S_LDX_B_MSH: |
371 | if ((int)K < 0) { |
372 | - if (pc_ret0 > 0) { |
373 | - /* addrs[pc_ret0 - 1] is the start address */ |
374 | - EMIT_JMP(addrs[pc_ret0 - 1] - addrs[i]); |
375 | - break; |
376 | - } |
377 | - CLEAR_A(); |
378 | - EMIT_JMP(cleanup_addr - addrs[i]); |
379 | - break; |
380 | + /* Abort the JIT because __load_pointer() is needed. */ |
381 | + goto out; |
382 | } |
383 | seen |= SEEN_DATAREF | SEEN_XREG; |
384 | t_offset = sk_load_byte_msh - (image + addrs[i]); |
385 | diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c |
386 | index 35aca7d..4fe9d21 100644 |
387 | --- a/drivers/ata/pata_legacy.c |
388 | +++ b/drivers/ata/pata_legacy.c |
389 | @@ -401,8 +401,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev) |
390 | ata_timing_compute(adev, adev->pio_mode, &t, 20000, 1000); |
391 | |
392 | active = clamp_val(t.active, 2, 15); |
393 | - recover = clamp_val(t.recover, 2, 16); |
394 | - recover &= 0x15; |
395 | + recover = clamp_val(t.recover, 2, 16) & 0x0F; |
396 | |
397 | inb(0x3E6); |
398 | inb(0x3E6); |
399 | diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c |
400 | index 6790cf7..79038e5 100644 |
401 | --- a/drivers/base/power/domain.c |
402 | +++ b/drivers/base/power/domain.c |
403 | @@ -751,7 +751,8 @@ static int pm_genpd_resume_noirq(struct device *dev) |
404 | if (IS_ERR(genpd)) |
405 | return -EINVAL; |
406 | |
407 | - if (genpd->suspend_power_off) |
408 | + if (genpd->suspend_power_off |
409 | + || (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))) |
410 | return 0; |
411 | |
412 | /* |
413 | diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c |
414 | index 106beb1..db811d2 100644 |
415 | --- a/drivers/bluetooth/ath3k.c |
416 | +++ b/drivers/bluetooth/ath3k.c |
417 | @@ -64,6 +64,7 @@ static struct usb_device_id ath3k_table[] = { |
418 | { USB_DEVICE(0x0CF3, 0x3002) }, |
419 | { USB_DEVICE(0x13d3, 0x3304) }, |
420 | { USB_DEVICE(0x0930, 0x0215) }, |
421 | + { USB_DEVICE(0x0489, 0xE03D) }, |
422 | |
423 | /* Atheros AR9285 Malbec with sflash firmware */ |
424 | { USB_DEVICE(0x03F0, 0x311D) }, |
425 | diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c |
426 | index eabc437..c16c750 100644 |
427 | --- a/drivers/bluetooth/btusb.c |
428 | +++ b/drivers/bluetooth/btusb.c |
429 | @@ -119,6 +119,7 @@ static struct usb_device_id blacklist_table[] = { |
430 | { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, |
431 | { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE }, |
432 | { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, |
433 | + { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, |
434 | |
435 | /* Atheros AR9285 Malbec with sflash firmware */ |
436 | { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, |
437 | @@ -506,15 +507,10 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags) |
438 | |
439 | pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress); |
440 | |
441 | - urb->dev = data->udev; |
442 | - urb->pipe = pipe; |
443 | - urb->context = hdev; |
444 | - urb->complete = btusb_isoc_complete; |
445 | - urb->interval = data->isoc_rx_ep->bInterval; |
446 | + usb_fill_int_urb(urb, data->udev, pipe, buf, size, btusb_isoc_complete, |
447 | + hdev, data->isoc_rx_ep->bInterval); |
448 | |
449 | urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP; |
450 | - urb->transfer_buffer = buf; |
451 | - urb->transfer_buffer_length = size; |
452 | |
453 | __fill_isoc_descriptor(urb, size, |
454 | le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize)); |
455 | diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig |
456 | index fa567f1..c9e045c 100644 |
457 | --- a/drivers/char/tpm/Kconfig |
458 | +++ b/drivers/char/tpm/Kconfig |
459 | @@ -5,7 +5,6 @@ |
460 | menuconfig TCG_TPM |
461 | tristate "TPM Hardware Support" |
462 | depends on HAS_IOMEM |
463 | - depends on EXPERIMENTAL |
464 | select SECURITYFS |
465 | ---help--- |
466 | If you have a TPM security chip in your system, which |
467 | diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c |
468 | index 361a1df..b366b34 100644 |
469 | --- a/drivers/char/tpm/tpm.c |
470 | +++ b/drivers/char/tpm/tpm.c |
471 | @@ -1115,12 +1115,13 @@ ssize_t tpm_read(struct file *file, char __user *buf, |
472 | ret_size = atomic_read(&chip->data_pending); |
473 | atomic_set(&chip->data_pending, 0); |
474 | if (ret_size > 0) { /* relay data */ |
475 | + ssize_t orig_ret_size = ret_size; |
476 | if (size < ret_size) |
477 | ret_size = size; |
478 | |
479 | mutex_lock(&chip->buffer_mutex); |
480 | rc = copy_to_user(buf, chip->data_buffer, ret_size); |
481 | - memset(chip->data_buffer, 0, ret_size); |
482 | + memset(chip->data_buffer, 0, orig_ret_size); |
483 | if (rc) |
484 | ret_size = -EFAULT; |
485 | |
486 | diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c |
487 | index 7f5f0da..0a0225a 100644 |
488 | --- a/drivers/firewire/ohci.c |
489 | +++ b/drivers/firewire/ohci.c |
490 | @@ -2748,7 +2748,7 @@ static int handle_ir_buffer_fill(struct context *context, |
491 | container_of(context, struct iso_context, context); |
492 | u32 buffer_dma; |
493 | |
494 | - if (!last->transfer_status) |
495 | + if (last->res_count != 0) |
496 | /* Descriptor(s) not done yet, stop iteration */ |
497 | return 0; |
498 | |
499 | @@ -2762,8 +2762,7 @@ static int handle_ir_buffer_fill(struct context *context, |
500 | if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) |
501 | ctx->base.callback.mc(&ctx->base, |
502 | le32_to_cpu(last->data_address) + |
503 | - le16_to_cpu(last->req_count) - |
504 | - le16_to_cpu(last->res_count), |
505 | + le16_to_cpu(last->req_count), |
506 | ctx->base.callback_data); |
507 | |
508 | return 1; |
509 | diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c |
510 | index df0d595..3d00016 100644 |
511 | --- a/drivers/gpio/gpio-davinci.c |
512 | +++ b/drivers/gpio/gpio-davinci.c |
513 | @@ -313,10 +313,16 @@ static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset) |
514 | return -ENODEV; |
515 | } |
516 | |
517 | -static int gpio_irq_type_unbanked(struct irq_data *d, unsigned trigger) |
518 | +static int gpio_irq_type_unbanked(struct irq_data *data, unsigned trigger) |
519 | { |
520 | - struct davinci_gpio_regs __iomem *g = irq2regs(d->irq); |
521 | - u32 mask = (u32) irq_data_get_irq_handler_data(d); |
522 | + struct davinci_gpio_controller *d; |
523 | + struct davinci_gpio_regs __iomem *g; |
524 | + struct davinci_soc_info *soc_info = &davinci_soc_info; |
525 | + u32 mask; |
526 | + |
527 | + d = (struct davinci_gpio_controller *)data->handler_data; |
528 | + g = (struct davinci_gpio_regs __iomem *)d->regs; |
529 | + mask = __gpio_mask(data->irq - soc_info->gpio_irq); |
530 | |
531 | if (trigger & ~(IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) |
532 | return -EINVAL; |
533 | @@ -380,7 +386,7 @@ static int __init davinci_gpio_irq_setup(void) |
534 | * IRQ mux conflicts; gpio_irq_type_unbanked() is only for GPIOs. |
535 | */ |
536 | if (soc_info->gpio_unbanked) { |
537 | - static struct irq_chip gpio_irqchip_unbanked; |
538 | + static struct irq_chip_type gpio_unbanked; |
539 | |
540 | /* pass "bank 0" GPIO IRQs to AINTC */ |
541 | chips[0].chip.to_irq = gpio_to_irq_unbanked; |
542 | @@ -388,9 +394,10 @@ static int __init davinci_gpio_irq_setup(void) |
543 | |
544 | /* AINTC handles mask/unmask; GPIO handles triggering */ |
545 | irq = bank_irq; |
546 | - gpio_irqchip_unbanked = *irq_get_chip(irq); |
547 | - gpio_irqchip_unbanked.name = "GPIO-AINTC"; |
548 | - gpio_irqchip_unbanked.irq_set_type = gpio_irq_type_unbanked; |
549 | + gpio_unbanked = *container_of(irq_get_chip(irq), |
550 | + struct irq_chip_type, chip); |
551 | + gpio_unbanked.chip.name = "GPIO-AINTC"; |
552 | + gpio_unbanked.chip.irq_set_type = gpio_irq_type_unbanked; |
553 | |
554 | /* default trigger: both edges */ |
555 | g = gpio2regs(0); |
556 | @@ -399,9 +406,8 @@ static int __init davinci_gpio_irq_setup(void) |
557 | |
558 | /* set the direct IRQs up to use that irqchip */ |
559 | for (gpio = 0; gpio < soc_info->gpio_unbanked; gpio++, irq++) { |
560 | - irq_set_chip(irq, &gpio_irqchip_unbanked); |
561 | - irq_set_handler_data(irq, (void *)__gpio_mask(gpio)); |
562 | - irq_set_chip_data(irq, (__force void *)g); |
563 | + irq_set_chip(irq, &gpio_unbanked.chip); |
564 | + irq_set_handler_data(irq, &chips[gpio / 32]); |
565 | irq_set_status_flags(irq, IRQ_TYPE_EDGE_BOTH); |
566 | } |
567 | |
568 | diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c |
569 | index 0b05629..a6c10e8 100644 |
570 | --- a/drivers/gpio/gpio-omap.c |
571 | +++ b/drivers/gpio/gpio-omap.c |
572 | @@ -508,7 +508,10 @@ static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) |
573 | |
574 | static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable) |
575 | { |
576 | - _enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio)); |
577 | + if (enable) |
578 | + _enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio)); |
579 | + else |
580 | + _disable_gpio_irqbank(bank, GPIO_BIT(bank, gpio)); |
581 | } |
582 | |
583 | /* |
584 | diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c |
585 | index e2d85a9..d04597d 100644 |
586 | --- a/drivers/gpu/drm/i915/i915_drv.c |
587 | +++ b/drivers/gpu/drm/i915/i915_drv.c |
588 | @@ -442,6 +442,10 @@ static int i915_drm_freeze(struct drm_device *dev) |
589 | /* Modeset on resume, not lid events */ |
590 | dev_priv->modeset_on_lid = 0; |
591 | |
592 | + console_lock(); |
593 | + intel_fbdev_set_suspend(dev, 1); |
594 | + console_unlock(); |
595 | + |
596 | return 0; |
597 | } |
598 | |
599 | @@ -514,6 +518,9 @@ static int i915_drm_thaw(struct drm_device *dev) |
600 | |
601 | dev_priv->modeset_on_lid = 0; |
602 | |
603 | + console_lock(); |
604 | + intel_fbdev_set_suspend(dev, 0); |
605 | + console_unlock(); |
606 | return error; |
607 | } |
608 | |
609 | diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c |
610 | index 8359dc7..3e7c478 100644 |
611 | --- a/drivers/gpu/drm/i915/i915_gem.c |
612 | +++ b/drivers/gpu/drm/i915/i915_gem.c |
613 | @@ -3084,10 +3084,13 @@ i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj) |
614 | return ret; |
615 | } |
616 | |
617 | + ret = i915_gem_object_wait_rendering(obj); |
618 | + if (ret) |
619 | + return ret; |
620 | + |
621 | /* Ensure that we invalidate the GPU's caches and TLBs. */ |
622 | obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; |
623 | - |
624 | - return i915_gem_object_wait_rendering(obj); |
625 | + return 0; |
626 | } |
627 | |
628 | /** |
629 | diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h |
630 | index a1b4343..83e820e 100644 |
631 | --- a/drivers/gpu/drm/i915/intel_drv.h |
632 | +++ b/drivers/gpu/drm/i915/intel_drv.h |
633 | @@ -364,7 +364,7 @@ extern int intel_framebuffer_init(struct drm_device *dev, |
634 | struct drm_i915_gem_object *obj); |
635 | extern int intel_fbdev_init(struct drm_device *dev); |
636 | extern void intel_fbdev_fini(struct drm_device *dev); |
637 | - |
638 | +extern void intel_fbdev_set_suspend(struct drm_device *dev, int state); |
639 | extern void intel_prepare_page_flip(struct drm_device *dev, int plane); |
640 | extern void intel_finish_page_flip(struct drm_device *dev, int pipe); |
641 | extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane); |
642 | diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c |
643 | index ec49bae..d0ce34b 100644 |
644 | --- a/drivers/gpu/drm/i915/intel_fb.c |
645 | +++ b/drivers/gpu/drm/i915/intel_fb.c |
646 | @@ -257,6 +257,16 @@ void intel_fbdev_fini(struct drm_device *dev) |
647 | kfree(dev_priv->fbdev); |
648 | dev_priv->fbdev = NULL; |
649 | } |
650 | + |
651 | +void intel_fbdev_set_suspend(struct drm_device *dev, int state) |
652 | +{ |
653 | + drm_i915_private_t *dev_priv = dev->dev_private; |
654 | + if (!dev_priv->fbdev) |
655 | + return; |
656 | + |
657 | + fb_set_suspend(dev_priv->fbdev->helper.fbdev, state); |
658 | +} |
659 | + |
660 | MODULE_LICENSE("GPL and additional rights"); |
661 | |
662 | void intel_fb_output_poll_changed(struct drm_device *dev) |
663 | diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c |
664 | index 5082d17..8e1532f 100644 |
665 | --- a/drivers/gpu/drm/radeon/radeon_atombios.c |
666 | +++ b/drivers/gpu/drm/radeon/radeon_atombios.c |
667 | @@ -442,6 +442,20 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, |
668 | struct radeon_device *rdev = dev->dev_private; |
669 | *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93); |
670 | } |
671 | + |
672 | + /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */ |
673 | + if ((dev->pdev->device == 0x9802) && |
674 | + (dev->pdev->subsystem_vendor == 0x1734) && |
675 | + (dev->pdev->subsystem_device == 0x11bd)) { |
676 | + if (*connector_type == DRM_MODE_CONNECTOR_VGA) { |
677 | + *connector_type = DRM_MODE_CONNECTOR_DVII; |
678 | + *line_mux = 0x3103; |
679 | + } else if (*connector_type == DRM_MODE_CONNECTOR_DVID) { |
680 | + *connector_type = DRM_MODE_CONNECTOR_DVII; |
681 | + } |
682 | + } |
683 | + |
684 | + |
685 | return true; |
686 | } |
687 | |
688 | diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c |
689 | index e7cb3ab..f7d39ac 100644 |
690 | --- a/drivers/gpu/drm/radeon/radeon_connectors.c |
691 | +++ b/drivers/gpu/drm/radeon/radeon_connectors.c |
692 | @@ -946,6 +946,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) |
693 | |
694 | encoder = obj_to_encoder(obj); |
695 | |
696 | + if (encoder->encoder_type != DRM_MODE_ENCODER_DAC || |
697 | + encoder->encoder_type != DRM_MODE_ENCODER_TVDAC) |
698 | + continue; |
699 | + |
700 | encoder_funcs = encoder->helper_private; |
701 | if (encoder_funcs->detect) { |
702 | if (ret != connector_status_connected) { |
703 | diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c |
704 | index fde25c0..986d608 100644 |
705 | --- a/drivers/gpu/drm/radeon/radeon_cursor.c |
706 | +++ b/drivers/gpu/drm/radeon/radeon_cursor.c |
707 | @@ -151,7 +151,9 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc, |
708 | uint32_t height) |
709 | { |
710 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
711 | + struct radeon_device *rdev = crtc->dev->dev_private; |
712 | struct drm_gem_object *obj; |
713 | + struct radeon_bo *robj; |
714 | uint64_t gpu_addr; |
715 | int ret; |
716 | |
717 | @@ -173,7 +175,15 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc, |
718 | return -ENOENT; |
719 | } |
720 | |
721 | - ret = radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &gpu_addr); |
722 | + robj = gem_to_radeon_bo(obj); |
723 | + ret = radeon_bo_reserve(robj, false); |
724 | + if (unlikely(ret != 0)) |
725 | + goto fail; |
726 | + /* Only 27 bit offset for legacy cursor */ |
727 | + ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM, |
728 | + ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, |
729 | + &gpu_addr); |
730 | + radeon_bo_unreserve(robj); |
731 | if (ret) |
732 | goto fail; |
733 | |
734 | @@ -181,7 +191,6 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc, |
735 | radeon_crtc->cursor_height = height; |
736 | |
737 | radeon_lock_cursor(crtc, true); |
738 | - /* XXX only 27 bit offset for legacy cursor */ |
739 | radeon_set_cursor(crtc, obj, gpu_addr); |
740 | radeon_show_cursor(crtc); |
741 | radeon_lock_cursor(crtc, false); |
742 | diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c |
743 | index 1c85152..f3ae607 100644 |
744 | --- a/drivers/gpu/drm/radeon/radeon_object.c |
745 | +++ b/drivers/gpu/drm/radeon/radeon_object.c |
746 | @@ -204,7 +204,8 @@ void radeon_bo_unref(struct radeon_bo **bo) |
747 | *bo = NULL; |
748 | } |
749 | |
750 | -int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) |
751 | +int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, |
752 | + u64 *gpu_addr) |
753 | { |
754 | int r, i; |
755 | |
756 | @@ -212,6 +213,7 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) |
757 | bo->pin_count++; |
758 | if (gpu_addr) |
759 | *gpu_addr = radeon_bo_gpu_offset(bo); |
760 | + WARN_ON_ONCE(max_offset != 0); |
761 | return 0; |
762 | } |
763 | radeon_ttm_placement_from_domain(bo, domain); |
764 | @@ -219,6 +221,15 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) |
765 | /* force to pin into visible video ram */ |
766 | bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; |
767 | } |
768 | + if (max_offset) { |
769 | + u64 lpfn = max_offset >> PAGE_SHIFT; |
770 | + |
771 | + if (!bo->placement.lpfn) |
772 | + bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; |
773 | + |
774 | + if (lpfn < bo->placement.lpfn) |
775 | + bo->placement.lpfn = lpfn; |
776 | + } |
777 | for (i = 0; i < bo->placement.num_placement; i++) |
778 | bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; |
779 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false); |
780 | @@ -232,6 +243,11 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) |
781 | return r; |
782 | } |
783 | |
784 | +int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) |
785 | +{ |
786 | + return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); |
787 | +} |
788 | + |
789 | int radeon_bo_unpin(struct radeon_bo *bo) |
790 | { |
791 | int r, i; |
792 | diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h |
793 | index b07f0f9..fb3f433 100644 |
794 | --- a/drivers/gpu/drm/radeon/radeon_object.h |
795 | +++ b/drivers/gpu/drm/radeon/radeon_object.h |
796 | @@ -108,6 +108,8 @@ extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr); |
797 | extern void radeon_bo_kunmap(struct radeon_bo *bo); |
798 | extern void radeon_bo_unref(struct radeon_bo **bo); |
799 | extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr); |
800 | +extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, |
801 | + u64 max_offset, u64 *gpu_addr); |
802 | extern int radeon_bo_unpin(struct radeon_bo *bo); |
803 | extern int radeon_bo_evict_vram(struct radeon_device *rdev); |
804 | extern void radeon_bo_force_delete(struct radeon_device *rdev); |
805 | diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c |
806 | index 8965ad9..b99af34 100644 |
807 | --- a/drivers/hid/hid-chicony.c |
808 | +++ b/drivers/hid/hid-chicony.c |
809 | @@ -45,6 +45,12 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi, |
810 | case 0xff09: ch_map_key_clear(BTN_9); break; |
811 | case 0xff0a: ch_map_key_clear(BTN_A); break; |
812 | case 0xff0b: ch_map_key_clear(BTN_B); break; |
813 | + case 0x00f1: ch_map_key_clear(KEY_WLAN); break; |
814 | + case 0x00f2: ch_map_key_clear(KEY_BRIGHTNESSDOWN); break; |
815 | + case 0x00f3: ch_map_key_clear(KEY_BRIGHTNESSUP); break; |
816 | + case 0x00f4: ch_map_key_clear(KEY_DISPLAY_OFF); break; |
817 | + case 0x00f7: ch_map_key_clear(KEY_CAMERA); break; |
818 | + case 0x00f8: ch_map_key_clear(KEY_PROG1); break; |
819 | default: |
820 | return 0; |
821 | } |
822 | @@ -53,6 +59,7 @@ static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi, |
823 | |
824 | static const struct hid_device_id ch_devices[] = { |
825 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, |
826 | + { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) }, |
827 | { } |
828 | }; |
829 | MODULE_DEVICE_TABLE(hid, ch_devices); |
830 | diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c |
831 | index bb656d8..c27b402 100644 |
832 | --- a/drivers/hid/hid-core.c |
833 | +++ b/drivers/hid/hid-core.c |
834 | @@ -1394,6 +1394,7 @@ static const struct hid_device_id hid_have_special_driver[] = { |
835 | { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) }, |
836 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, |
837 | { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) }, |
838 | + { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) }, |
839 | { HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) }, |
840 | { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, |
841 | { HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) }, |
842 | diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h |
843 | index 3c3daec..fba3fc4 100644 |
844 | --- a/drivers/hid/hid-ids.h |
845 | +++ b/drivers/hid/hid-ids.h |
846 | @@ -192,6 +192,7 @@ |
847 | #define USB_DEVICE_ID_CHICONY_TACTICAL_PAD 0x0418 |
848 | #define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d |
849 | #define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618 |
850 | +#define USB_DEVICE_ID_CHICONY_WIRELESS2 0x1123 |
851 | |
852 | #define USB_VENDOR_ID_CHUNGHWAT 0x2247 |
853 | #define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001 |
854 | diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c |
855 | index 523f8fb..930370d 100644 |
856 | --- a/drivers/hwmon/fam15h_power.c |
857 | +++ b/drivers/hwmon/fam15h_power.c |
858 | @@ -60,7 +60,7 @@ static ssize_t show_power(struct device *dev, |
859 | pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5), |
860 | REG_TDP_RUNNING_AVERAGE, &val); |
861 | running_avg_capture = (val >> 4) & 0x3fffff; |
862 | - running_avg_capture = sign_extend32(running_avg_capture, 22); |
863 | + running_avg_capture = sign_extend32(running_avg_capture, 21); |
864 | running_avg_range = val & 0xf; |
865 | |
866 | pci_bus_read_config_dword(f4->bus, PCI_DEVFN(PCI_SLOT(f4->devfn), 5), |
867 | diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c |
868 | index 7e7373a..d5f3b69 100644 |
869 | --- a/drivers/infiniband/ulp/iser/iscsi_iser.c |
870 | +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c |
871 | @@ -364,6 +364,9 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, |
872 | } |
873 | ib_conn = ep->dd_data; |
874 | |
875 | + if (iser_alloc_rx_descriptors(ib_conn)) |
876 | + return -ENOMEM; |
877 | + |
878 | /* binds the iSER connection retrieved from the previously |
879 | * connected ep_handle to the iSCSI layer connection. exchanges |
880 | * connection pointers */ |
881 | @@ -398,19 +401,6 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) |
882 | iser_conn->ib_conn = NULL; |
883 | } |
884 | |
885 | -static int |
886 | -iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) |
887 | -{ |
888 | - struct iscsi_conn *conn = cls_conn->dd_data; |
889 | - int err; |
890 | - |
891 | - err = iser_conn_set_full_featured_mode(conn); |
892 | - if (err) |
893 | - return err; |
894 | - |
895 | - return iscsi_conn_start(cls_conn); |
896 | -} |
897 | - |
898 | static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) |
899 | { |
900 | struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); |
901 | @@ -724,7 +714,7 @@ static struct iscsi_transport iscsi_iser_transport = { |
902 | .get_conn_param = iscsi_conn_get_param, |
903 | .get_ep_param = iscsi_iser_get_ep_param, |
904 | .get_session_param = iscsi_session_get_param, |
905 | - .start_conn = iscsi_iser_conn_start, |
906 | + .start_conn = iscsi_conn_start, |
907 | .stop_conn = iscsi_iser_conn_stop, |
908 | /* iscsi host params */ |
909 | .get_host_param = iscsi_host_get_param, |
910 | diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h |
911 | index db7ea37..296be43 100644 |
912 | --- a/drivers/infiniband/ulp/iser/iscsi_iser.h |
913 | +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h |
914 | @@ -366,4 +366,5 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, |
915 | void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task); |
916 | int iser_initialize_task_headers(struct iscsi_task *task, |
917 | struct iser_tx_desc *tx_desc); |
918 | +int iser_alloc_rx_descriptors(struct iser_conn *ib_conn); |
919 | #endif |
920 | diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c |
921 | index a607542..738a149 100644 |
922 | --- a/drivers/infiniband/ulp/iser/iser_initiator.c |
923 | +++ b/drivers/infiniband/ulp/iser/iser_initiator.c |
924 | @@ -170,7 +170,7 @@ static void iser_create_send_desc(struct iser_conn *ib_conn, |
925 | } |
926 | |
927 | |
928 | -static int iser_alloc_rx_descriptors(struct iser_conn *ib_conn) |
929 | +int iser_alloc_rx_descriptors(struct iser_conn *ib_conn) |
930 | { |
931 | int i, j; |
932 | u64 dma_addr; |
933 | @@ -242,23 +242,24 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn) |
934 | kfree(ib_conn->rx_descs); |
935 | } |
936 | |
937 | -/** |
938 | - * iser_conn_set_full_featured_mode - (iSER API) |
939 | - */ |
940 | -int iser_conn_set_full_featured_mode(struct iscsi_conn *conn) |
941 | +static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) |
942 | { |
943 | struct iscsi_iser_conn *iser_conn = conn->dd_data; |
944 | |
945 | - iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX); |
946 | - |
947 | - /* Check that there is no posted recv or send buffers left - */ |
948 | - /* they must be consumed during the login phase */ |
949 | - BUG_ON(iser_conn->ib_conn->post_recv_buf_count != 0); |
950 | - BUG_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0); |
951 | + iser_dbg("req op %x flags %x\n", req->opcode, req->flags); |
952 | + /* check if this is the last login - going to full feature phase */ |
953 | + if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE) |
954 | + return 0; |
955 | |
956 | - if (iser_alloc_rx_descriptors(iser_conn->ib_conn)) |
957 | - return -ENOMEM; |
958 | + /* |
959 | + * Check that there is one posted recv buffer (for the last login |
960 | + * response) and no posted send buffers left - they must have been |
961 | + * consumed during previous login phases. |
962 | + */ |
963 | + WARN_ON(iser_conn->ib_conn->post_recv_buf_count != 1); |
964 | + WARN_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0); |
965 | |
966 | + iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX); |
967 | /* Initial post receive buffers */ |
968 | if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX)) |
969 | return -ENOMEM; |
970 | @@ -438,6 +439,9 @@ int iser_send_control(struct iscsi_conn *conn, |
971 | err = iser_post_recvl(iser_conn->ib_conn); |
972 | if (err) |
973 | goto send_control_error; |
974 | + err = iser_post_rx_bufs(conn, task->hdr); |
975 | + if (err) |
976 | + goto send_control_error; |
977 | } |
978 | |
979 | err = iser_post_send(iser_conn->ib_conn, mdesc); |
980 | diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c |
981 | index e0b3e33..966a6e7 100644 |
982 | --- a/drivers/iommu/amd_iommu.c |
983 | +++ b/drivers/iommu/amd_iommu.c |
984 | @@ -2432,7 +2432,7 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask) |
985 | * we don't need to preallocate the protection domains anymore. |
986 | * For now we have to. |
987 | */ |
988 | -static void prealloc_protection_domains(void) |
989 | +static void __init prealloc_protection_domains(void) |
990 | { |
991 | struct pci_dev *dev = NULL; |
992 | struct dma_ops_domain *dma_dom; |
993 | diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c |
994 | index 6d03774..2a8722b 100644 |
995 | --- a/drivers/md/bitmap.c |
996 | +++ b/drivers/md/bitmap.c |
997 | @@ -1904,6 +1904,8 @@ location_store(struct mddev *mddev, const char *buf, size_t len) |
998 | if (mddev->pers) { |
999 | mddev->pers->quiesce(mddev, 1); |
1000 | rv = bitmap_create(mddev); |
1001 | + if (!rv) |
1002 | + rv = bitmap_load(mddev); |
1003 | if (rv) { |
1004 | bitmap_destroy(mddev); |
1005 | mddev->bitmap_info.offset = 0; |
1006 | diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c |
1007 | index 8c2a000..58d8c6d 100644 |
1008 | --- a/drivers/md/dm-crypt.c |
1009 | +++ b/drivers/md/dm-crypt.c |
1010 | @@ -176,7 +176,6 @@ struct crypt_config { |
1011 | |
1012 | #define MIN_IOS 16 |
1013 | #define MIN_POOL_PAGES 32 |
1014 | -#define MIN_BIO_PAGES 8 |
1015 | |
1016 | static struct kmem_cache *_crypt_io_pool; |
1017 | |
1018 | @@ -848,12 +847,11 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, |
1019 | } |
1020 | |
1021 | /* |
1022 | - * if additional pages cannot be allocated without waiting, |
1023 | - * return a partially allocated bio, the caller will then try |
1024 | - * to allocate additional bios while submitting this partial bio |
1025 | + * If additional pages cannot be allocated without waiting, |
1026 | + * return a partially-allocated bio. The caller will then try |
1027 | + * to allocate more bios while submitting this partial bio. |
1028 | */ |
1029 | - if (i == (MIN_BIO_PAGES - 1)) |
1030 | - gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; |
1031 | + gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; |
1032 | |
1033 | len = (size > PAGE_SIZE) ? PAGE_SIZE : size; |
1034 | |
1035 | @@ -1046,16 +1044,14 @@ static void kcryptd_queue_io(struct dm_crypt_io *io) |
1036 | queue_work(cc->io_queue, &io->work); |
1037 | } |
1038 | |
1039 | -static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, |
1040 | - int error, int async) |
1041 | +static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) |
1042 | { |
1043 | struct bio *clone = io->ctx.bio_out; |
1044 | struct crypt_config *cc = io->target->private; |
1045 | |
1046 | - if (unlikely(error < 0)) { |
1047 | + if (unlikely(io->error < 0)) { |
1048 | crypt_free_buffer_pages(cc, clone); |
1049 | bio_put(clone); |
1050 | - io->error = -EIO; |
1051 | crypt_dec_pending(io); |
1052 | return; |
1053 | } |
1054 | @@ -1106,12 +1102,16 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) |
1055 | sector += bio_sectors(clone); |
1056 | |
1057 | crypt_inc_pending(io); |
1058 | + |
1059 | r = crypt_convert(cc, &io->ctx); |
1060 | + if (r < 0) |
1061 | + io->error = -EIO; |
1062 | + |
1063 | crypt_finished = atomic_dec_and_test(&io->ctx.pending); |
1064 | |
1065 | /* Encryption was already finished, submit io now */ |
1066 | if (crypt_finished) { |
1067 | - kcryptd_crypt_write_io_submit(io, r, 0); |
1068 | + kcryptd_crypt_write_io_submit(io, 0); |
1069 | |
1070 | /* |
1071 | * If there was an error, do not try next fragments. |
1072 | @@ -1162,11 +1162,8 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) |
1073 | crypt_dec_pending(io); |
1074 | } |
1075 | |
1076 | -static void kcryptd_crypt_read_done(struct dm_crypt_io *io, int error) |
1077 | +static void kcryptd_crypt_read_done(struct dm_crypt_io *io) |
1078 | { |
1079 | - if (unlikely(error < 0)) |
1080 | - io->error = -EIO; |
1081 | - |
1082 | crypt_dec_pending(io); |
1083 | } |
1084 | |
1085 | @@ -1181,9 +1178,11 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) |
1086 | io->sector); |
1087 | |
1088 | r = crypt_convert(cc, &io->ctx); |
1089 | + if (r < 0) |
1090 | + io->error = -EIO; |
1091 | |
1092 | if (atomic_dec_and_test(&io->ctx.pending)) |
1093 | - kcryptd_crypt_read_done(io, r); |
1094 | + kcryptd_crypt_read_done(io); |
1095 | |
1096 | crypt_dec_pending(io); |
1097 | } |
1098 | @@ -1204,15 +1203,18 @@ static void kcryptd_async_done(struct crypto_async_request *async_req, |
1099 | if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post) |
1100 | error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq); |
1101 | |
1102 | + if (error < 0) |
1103 | + io->error = -EIO; |
1104 | + |
1105 | mempool_free(req_of_dmreq(cc, dmreq), cc->req_pool); |
1106 | |
1107 | if (!atomic_dec_and_test(&ctx->pending)) |
1108 | return; |
1109 | |
1110 | if (bio_data_dir(io->base_bio) == READ) |
1111 | - kcryptd_crypt_read_done(io, error); |
1112 | + kcryptd_crypt_read_done(io); |
1113 | else |
1114 | - kcryptd_crypt_write_io_submit(io, error, 1); |
1115 | + kcryptd_crypt_write_io_submit(io, 1); |
1116 | } |
1117 | |
1118 | static void kcryptd_crypt(struct work_struct *work) |
1119 | diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c |
1120 | index 042e719..aa70f7d 100644 |
1121 | --- a/drivers/md/dm-exception-store.c |
1122 | +++ b/drivers/md/dm-exception-store.c |
1123 | @@ -283,7 +283,7 @@ int dm_exception_store_init(void) |
1124 | return 0; |
1125 | |
1126 | persistent_fail: |
1127 | - dm_persistent_snapshot_exit(); |
1128 | + dm_transient_snapshot_exit(); |
1129 | transient_fail: |
1130 | return r; |
1131 | } |
1132 | diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c |
1133 | index c308757..da2f021 100644 |
1134 | --- a/drivers/md/dm-thin.c |
1135 | +++ b/drivers/md/dm-thin.c |
1136 | @@ -124,7 +124,7 @@ struct cell { |
1137 | struct hlist_node list; |
1138 | struct bio_prison *prison; |
1139 | struct cell_key key; |
1140 | - unsigned count; |
1141 | + struct bio *holder; |
1142 | struct bio_list bios; |
1143 | }; |
1144 | |
1145 | @@ -220,55 +220,60 @@ static struct cell *__search_bucket(struct hlist_head *bucket, |
1146 | * This may block if a new cell needs allocating. You must ensure that |
1147 | * cells will be unlocked even if the calling thread is blocked. |
1148 | * |
1149 | - * Returns the number of entries in the cell prior to the new addition |
1150 | - * or < 0 on failure. |
1151 | + * Returns 1 if the cell was already held, 0 if @inmate is the new holder. |
1152 | */ |
1153 | static int bio_detain(struct bio_prison *prison, struct cell_key *key, |
1154 | struct bio *inmate, struct cell **ref) |
1155 | { |
1156 | - int r; |
1157 | + int r = 1; |
1158 | unsigned long flags; |
1159 | uint32_t hash = hash_key(prison, key); |
1160 | - struct cell *uninitialized_var(cell), *cell2 = NULL; |
1161 | + struct cell *cell, *cell2; |
1162 | |
1163 | BUG_ON(hash > prison->nr_buckets); |
1164 | |
1165 | spin_lock_irqsave(&prison->lock, flags); |
1166 | + |
1167 | cell = __search_bucket(prison->cells + hash, key); |
1168 | + if (cell) { |
1169 | + bio_list_add(&cell->bios, inmate); |
1170 | + goto out; |
1171 | + } |
1172 | |
1173 | - if (!cell) { |
1174 | - /* |
1175 | - * Allocate a new cell |
1176 | - */ |
1177 | - spin_unlock_irqrestore(&prison->lock, flags); |
1178 | - cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO); |
1179 | - spin_lock_irqsave(&prison->lock, flags); |
1180 | + /* |
1181 | + * Allocate a new cell |
1182 | + */ |
1183 | + spin_unlock_irqrestore(&prison->lock, flags); |
1184 | + cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO); |
1185 | + spin_lock_irqsave(&prison->lock, flags); |
1186 | |
1187 | - /* |
1188 | - * We've been unlocked, so we have to double check that |
1189 | - * nobody else has inserted this cell in the meantime. |
1190 | - */ |
1191 | - cell = __search_bucket(prison->cells + hash, key); |
1192 | + /* |
1193 | + * We've been unlocked, so we have to double check that |
1194 | + * nobody else has inserted this cell in the meantime. |
1195 | + */ |
1196 | + cell = __search_bucket(prison->cells + hash, key); |
1197 | + if (cell) { |
1198 | + mempool_free(cell2, prison->cell_pool); |
1199 | + bio_list_add(&cell->bios, inmate); |
1200 | + goto out; |
1201 | + } |
1202 | + |
1203 | + /* |
1204 | + * Use new cell. |
1205 | + */ |
1206 | + cell = cell2; |
1207 | |
1208 | - if (!cell) { |
1209 | - cell = cell2; |
1210 | - cell2 = NULL; |
1211 | + cell->prison = prison; |
1212 | + memcpy(&cell->key, key, sizeof(cell->key)); |
1213 | + cell->holder = inmate; |
1214 | + bio_list_init(&cell->bios); |
1215 | + hlist_add_head(&cell->list, prison->cells + hash); |
1216 | |
1217 | - cell->prison = prison; |
1218 | - memcpy(&cell->key, key, sizeof(cell->key)); |
1219 | - cell->count = 0; |
1220 | - bio_list_init(&cell->bios); |
1221 | - hlist_add_head(&cell->list, prison->cells + hash); |
1222 | - } |
1223 | - } |
1224 | + r = 0; |
1225 | |
1226 | - r = cell->count++; |
1227 | - bio_list_add(&cell->bios, inmate); |
1228 | +out: |
1229 | spin_unlock_irqrestore(&prison->lock, flags); |
1230 | |
1231 | - if (cell2) |
1232 | - mempool_free(cell2, prison->cell_pool); |
1233 | - |
1234 | *ref = cell; |
1235 | |
1236 | return r; |
1237 | @@ -283,8 +288,8 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates) |
1238 | |
1239 | hlist_del(&cell->list); |
1240 | |
1241 | - if (inmates) |
1242 | - bio_list_merge(inmates, &cell->bios); |
1243 | + bio_list_add(inmates, cell->holder); |
1244 | + bio_list_merge(inmates, &cell->bios); |
1245 | |
1246 | mempool_free(cell, prison->cell_pool); |
1247 | } |
1248 | @@ -305,22 +310,44 @@ static void cell_release(struct cell *cell, struct bio_list *bios) |
1249 | * bio may be in the cell. This function releases the cell, and also does |
1250 | * a sanity check. |
1251 | */ |
1252 | +static void __cell_release_singleton(struct cell *cell, struct bio *bio) |
1253 | +{ |
1254 | + hlist_del(&cell->list); |
1255 | + BUG_ON(cell->holder != bio); |
1256 | + BUG_ON(!bio_list_empty(&cell->bios)); |
1257 | +} |
1258 | + |
1259 | static void cell_release_singleton(struct cell *cell, struct bio *bio) |
1260 | { |
1261 | - struct bio_prison *prison = cell->prison; |
1262 | - struct bio_list bios; |
1263 | - struct bio *b; |
1264 | unsigned long flags; |
1265 | - |
1266 | - bio_list_init(&bios); |
1267 | + struct bio_prison *prison = cell->prison; |
1268 | |
1269 | spin_lock_irqsave(&prison->lock, flags); |
1270 | - __cell_release(cell, &bios); |
1271 | + __cell_release_singleton(cell, bio); |
1272 | spin_unlock_irqrestore(&prison->lock, flags); |
1273 | +} |
1274 | + |
1275 | +/* |
1276 | + * Sometimes we don't want the holder, just the additional bios. |
1277 | + */ |
1278 | +static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates) |
1279 | +{ |
1280 | + struct bio_prison *prison = cell->prison; |
1281 | |
1282 | - b = bio_list_pop(&bios); |
1283 | - BUG_ON(b != bio); |
1284 | - BUG_ON(!bio_list_empty(&bios)); |
1285 | + hlist_del(&cell->list); |
1286 | + bio_list_merge(inmates, &cell->bios); |
1287 | + |
1288 | + mempool_free(cell, prison->cell_pool); |
1289 | +} |
1290 | + |
1291 | +static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates) |
1292 | +{ |
1293 | + unsigned long flags; |
1294 | + struct bio_prison *prison = cell->prison; |
1295 | + |
1296 | + spin_lock_irqsave(&prison->lock, flags); |
1297 | + __cell_release_no_holder(cell, inmates); |
1298 | + spin_unlock_irqrestore(&prison->lock, flags); |
1299 | } |
1300 | |
1301 | static void cell_error(struct cell *cell) |
1302 | @@ -800,21 +827,16 @@ static void cell_defer(struct thin_c *tc, struct cell *cell, |
1303 | * Same as cell_defer above, except it omits one particular detainee, |
1304 | * a write bio that covers the block and has already been processed. |
1305 | */ |
1306 | -static void cell_defer_except(struct thin_c *tc, struct cell *cell, |
1307 | - struct bio *exception) |
1308 | +static void cell_defer_except(struct thin_c *tc, struct cell *cell) |
1309 | { |
1310 | struct bio_list bios; |
1311 | - struct bio *bio; |
1312 | struct pool *pool = tc->pool; |
1313 | unsigned long flags; |
1314 | |
1315 | bio_list_init(&bios); |
1316 | - cell_release(cell, &bios); |
1317 | |
1318 | spin_lock_irqsave(&pool->lock, flags); |
1319 | - while ((bio = bio_list_pop(&bios))) |
1320 | - if (bio != exception) |
1321 | - bio_list_add(&pool->deferred_bios, bio); |
1322 | + cell_release_no_holder(cell, &pool->deferred_bios); |
1323 | spin_unlock_irqrestore(&pool->lock, flags); |
1324 | |
1325 | wake_worker(pool); |
1326 | @@ -854,7 +876,7 @@ static void process_prepared_mapping(struct new_mapping *m) |
1327 | * the bios in the cell. |
1328 | */ |
1329 | if (bio) { |
1330 | - cell_defer_except(tc, m->cell, bio); |
1331 | + cell_defer_except(tc, m->cell); |
1332 | bio_endio(bio, 0); |
1333 | } else |
1334 | cell_defer(tc, m->cell, m->data_block); |
1335 | diff --git a/drivers/md/md.c b/drivers/md/md.c |
1336 | index f47f1f8..6f37aa4 100644 |
1337 | --- a/drivers/md/md.c |
1338 | +++ b/drivers/md/md.c |
1339 | @@ -1801,13 +1801,13 @@ retry: |
1340 | | BB_LEN(internal_bb)); |
1341 | *bbp++ = cpu_to_le64(store_bb); |
1342 | } |
1343 | + bb->changed = 0; |
1344 | if (read_seqretry(&bb->lock, seq)) |
1345 | goto retry; |
1346 | |
1347 | bb->sector = (rdev->sb_start + |
1348 | (int)le32_to_cpu(sb->bblog_offset)); |
1349 | bb->size = le16_to_cpu(sb->bblog_size); |
1350 | - bb->changed = 0; |
1351 | } |
1352 | } |
1353 | |
1354 | @@ -2362,6 +2362,7 @@ repeat: |
1355 | clear_bit(MD_CHANGE_PENDING, &mddev->flags); |
1356 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
1357 | if (rdev->badblocks.changed) { |
1358 | + rdev->badblocks.changed = 0; |
1359 | md_ack_all_badblocks(&rdev->badblocks); |
1360 | md_error(mddev, rdev); |
1361 | } |
1362 | @@ -8097,30 +8098,23 @@ static int md_notify_reboot(struct notifier_block *this, |
1363 | struct mddev *mddev; |
1364 | int need_delay = 0; |
1365 | |
1366 | - if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) { |
1367 | - |
1368 | - printk(KERN_INFO "md: stopping all md devices.\n"); |
1369 | - |
1370 | - for_each_mddev(mddev, tmp) { |
1371 | - if (mddev_trylock(mddev)) { |
1372 | - /* Force a switch to readonly even array |
1373 | - * appears to still be in use. Hence |
1374 | - * the '100'. |
1375 | - */ |
1376 | - md_set_readonly(mddev, 100); |
1377 | - mddev_unlock(mddev); |
1378 | - } |
1379 | - need_delay = 1; |
1380 | + for_each_mddev(mddev, tmp) { |
1381 | + if (mddev_trylock(mddev)) { |
1382 | + __md_stop_writes(mddev); |
1383 | + mddev->safemode = 2; |
1384 | + mddev_unlock(mddev); |
1385 | } |
1386 | - /* |
1387 | - * certain more exotic SCSI devices are known to be |
1388 | - * volatile wrt too early system reboots. While the |
1389 | - * right place to handle this issue is the given |
1390 | - * driver, we do want to have a safe RAID driver ... |
1391 | - */ |
1392 | - if (need_delay) |
1393 | - mdelay(1000*1); |
1394 | + need_delay = 1; |
1395 | } |
1396 | + /* |
1397 | + * certain more exotic SCSI devices are known to be |
1398 | + * volatile wrt too early system reboots. While the |
1399 | + * right place to handle this issue is the given |
1400 | + * driver, we do want to have a safe RAID driver ... |
1401 | + */ |
1402 | + if (need_delay) |
1403 | + mdelay(1000*1); |
1404 | + |
1405 | return NOTIFY_DONE; |
1406 | } |
1407 | |
1408 | diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c |
1409 | index 023fbc2..1a35caf 100644 |
1410 | --- a/drivers/md/persistent-data/dm-btree-remove.c |
1411 | +++ b/drivers/md/persistent-data/dm-btree-remove.c |
1412 | @@ -128,18 +128,9 @@ static void delete_at(struct node *n, unsigned index) |
1413 | n->header.nr_entries = cpu_to_le32(nr_entries - 1); |
1414 | } |
1415 | |
1416 | -static unsigned del_threshold(struct node *n) |
1417 | -{ |
1418 | - return le32_to_cpu(n->header.max_entries) / 3; |
1419 | -} |
1420 | - |
1421 | static unsigned merge_threshold(struct node *n) |
1422 | { |
1423 | - /* |
1424 | - * The extra one is because we know we're potentially going to |
1425 | - * delete an entry. |
1426 | - */ |
1427 | - return 2 * (le32_to_cpu(n->header.max_entries) / 3) + 1; |
1428 | + return le32_to_cpu(n->header.max_entries) / 3; |
1429 | } |
1430 | |
1431 | struct child { |
1432 | @@ -188,6 +179,15 @@ static int exit_child(struct dm_btree_info *info, struct child *c) |
1433 | |
1434 | static void shift(struct node *left, struct node *right, int count) |
1435 | { |
1436 | + uint32_t nr_left = le32_to_cpu(left->header.nr_entries); |
1437 | + uint32_t nr_right = le32_to_cpu(right->header.nr_entries); |
1438 | + uint32_t max_entries = le32_to_cpu(left->header.max_entries); |
1439 | + uint32_t r_max_entries = le32_to_cpu(right->header.max_entries); |
1440 | + |
1441 | + BUG_ON(max_entries != r_max_entries); |
1442 | + BUG_ON(nr_left - count > max_entries); |
1443 | + BUG_ON(nr_right + count > max_entries); |
1444 | + |
1445 | if (!count) |
1446 | return; |
1447 | |
1448 | @@ -199,13 +199,8 @@ static void shift(struct node *left, struct node *right, int count) |
1449 | node_shift(right, count); |
1450 | } |
1451 | |
1452 | - left->header.nr_entries = |
1453 | - cpu_to_le32(le32_to_cpu(left->header.nr_entries) - count); |
1454 | - BUG_ON(le32_to_cpu(left->header.nr_entries) > le32_to_cpu(left->header.max_entries)); |
1455 | - |
1456 | - right->header.nr_entries = |
1457 | - cpu_to_le32(le32_to_cpu(right->header.nr_entries) + count); |
1458 | - BUG_ON(le32_to_cpu(right->header.nr_entries) > le32_to_cpu(right->header.max_entries)); |
1459 | + left->header.nr_entries = cpu_to_le32(nr_left - count); |
1460 | + right->header.nr_entries = cpu_to_le32(nr_right + count); |
1461 | } |
1462 | |
1463 | static void __rebalance2(struct dm_btree_info *info, struct node *parent, |
1464 | @@ -215,8 +210,9 @@ static void __rebalance2(struct dm_btree_info *info, struct node *parent, |
1465 | struct node *right = r->n; |
1466 | uint32_t nr_left = le32_to_cpu(left->header.nr_entries); |
1467 | uint32_t nr_right = le32_to_cpu(right->header.nr_entries); |
1468 | + unsigned threshold = 2 * merge_threshold(left) + 1; |
1469 | |
1470 | - if (nr_left + nr_right <= merge_threshold(left)) { |
1471 | + if (nr_left + nr_right < threshold) { |
1472 | /* |
1473 | * Merge |
1474 | */ |
1475 | @@ -234,9 +230,6 @@ static void __rebalance2(struct dm_btree_info *info, struct node *parent, |
1476 | * Rebalance. |
1477 | */ |
1478 | unsigned target_left = (nr_left + nr_right) / 2; |
1479 | - unsigned shift_ = nr_left - target_left; |
1480 | - BUG_ON(le32_to_cpu(left->header.max_entries) <= nr_left - shift_); |
1481 | - BUG_ON(le32_to_cpu(right->header.max_entries) <= nr_right + shift_); |
1482 | shift(left, right, nr_left - target_left); |
1483 | *key_ptr(parent, r->index) = right->keys[0]; |
1484 | } |
1485 | @@ -272,6 +265,84 @@ static int rebalance2(struct shadow_spine *s, struct dm_btree_info *info, |
1486 | return exit_child(info, &right); |
1487 | } |
1488 | |
1489 | +/* |
1490 | + * We dump as many entries from center as possible into left, then the rest |
1491 | + * in right, then rebalance2. This wastes some cpu, but I want something |
1492 | + * simple atm. |
1493 | + */ |
1494 | +static void delete_center_node(struct dm_btree_info *info, struct node *parent, |
1495 | + struct child *l, struct child *c, struct child *r, |
1496 | + struct node *left, struct node *center, struct node *right, |
1497 | + uint32_t nr_left, uint32_t nr_center, uint32_t nr_right) |
1498 | +{ |
1499 | + uint32_t max_entries = le32_to_cpu(left->header.max_entries); |
1500 | + unsigned shift = min(max_entries - nr_left, nr_center); |
1501 | + |
1502 | + BUG_ON(nr_left + shift > max_entries); |
1503 | + node_copy(left, center, -shift); |
1504 | + left->header.nr_entries = cpu_to_le32(nr_left + shift); |
1505 | + |
1506 | + if (shift != nr_center) { |
1507 | + shift = nr_center - shift; |
1508 | + BUG_ON((nr_right + shift) > max_entries); |
1509 | + node_shift(right, shift); |
1510 | + node_copy(center, right, shift); |
1511 | + right->header.nr_entries = cpu_to_le32(nr_right + shift); |
1512 | + } |
1513 | + *key_ptr(parent, r->index) = right->keys[0]; |
1514 | + |
1515 | + delete_at(parent, c->index); |
1516 | + r->index--; |
1517 | + |
1518 | + dm_tm_dec(info->tm, dm_block_location(c->block)); |
1519 | + __rebalance2(info, parent, l, r); |
1520 | +} |
1521 | + |
1522 | +/* |
1523 | + * Redistributes entries among 3 sibling nodes. |
1524 | + */ |
1525 | +static void redistribute3(struct dm_btree_info *info, struct node *parent, |
1526 | + struct child *l, struct child *c, struct child *r, |
1527 | + struct node *left, struct node *center, struct node *right, |
1528 | + uint32_t nr_left, uint32_t nr_center, uint32_t nr_right) |
1529 | +{ |
1530 | + int s; |
1531 | + uint32_t max_entries = le32_to_cpu(left->header.max_entries); |
1532 | + unsigned target = (nr_left + nr_center + nr_right) / 3; |
1533 | + BUG_ON(target > max_entries); |
1534 | + |
1535 | + if (nr_left < nr_right) { |
1536 | + s = nr_left - target; |
1537 | + |
1538 | + if (s < 0 && nr_center < -s) { |
1539 | + /* not enough in central node */ |
1540 | + shift(left, center, nr_center); |
1541 | + s = nr_center - target; |
1542 | + shift(left, right, s); |
1543 | + nr_right += s; |
1544 | + } else |
1545 | + shift(left, center, s); |
1546 | + |
1547 | + shift(center, right, target - nr_right); |
1548 | + |
1549 | + } else { |
1550 | + s = target - nr_right; |
1551 | + if (s > 0 && nr_center < s) { |
1552 | + /* not enough in central node */ |
1553 | + shift(center, right, nr_center); |
1554 | + s = target - nr_center; |
1555 | + shift(left, right, s); |
1556 | + nr_left -= s; |
1557 | + } else |
1558 | + shift(center, right, s); |
1559 | + |
1560 | + shift(left, center, nr_left - target); |
1561 | + } |
1562 | + |
1563 | + *key_ptr(parent, c->index) = center->keys[0]; |
1564 | + *key_ptr(parent, r->index) = right->keys[0]; |
1565 | +} |
1566 | + |
1567 | static void __rebalance3(struct dm_btree_info *info, struct node *parent, |
1568 | struct child *l, struct child *c, struct child *r) |
1569 | { |
1570 | @@ -282,62 +353,18 @@ static void __rebalance3(struct dm_btree_info *info, struct node *parent, |
1571 | uint32_t nr_left = le32_to_cpu(left->header.nr_entries); |
1572 | uint32_t nr_center = le32_to_cpu(center->header.nr_entries); |
1573 | uint32_t nr_right = le32_to_cpu(right->header.nr_entries); |
1574 | - uint32_t max_entries = le32_to_cpu(left->header.max_entries); |
1575 | |
1576 | - unsigned target; |
1577 | + unsigned threshold = merge_threshold(left) * 4 + 1; |
1578 | |
1579 | BUG_ON(left->header.max_entries != center->header.max_entries); |
1580 | BUG_ON(center->header.max_entries != right->header.max_entries); |
1581 | |
1582 | - if (((nr_left + nr_center + nr_right) / 2) < merge_threshold(center)) { |
1583 | - /* |
1584 | - * Delete center node: |
1585 | - * |
1586 | - * We dump as many entries from center as possible into |
1587 | - * left, then the rest in right, then rebalance2. This |
1588 | - * wastes some cpu, but I want something simple atm. |
1589 | - */ |
1590 | - unsigned shift = min(max_entries - nr_left, nr_center); |
1591 | - |
1592 | - BUG_ON(nr_left + shift > max_entries); |
1593 | - node_copy(left, center, -shift); |
1594 | - left->header.nr_entries = cpu_to_le32(nr_left + shift); |
1595 | - |
1596 | - if (shift != nr_center) { |
1597 | - shift = nr_center - shift; |
1598 | - BUG_ON((nr_right + shift) >= max_entries); |
1599 | - node_shift(right, shift); |
1600 | - node_copy(center, right, shift); |
1601 | - right->header.nr_entries = cpu_to_le32(nr_right + shift); |
1602 | - } |
1603 | - *key_ptr(parent, r->index) = right->keys[0]; |
1604 | - |
1605 | - delete_at(parent, c->index); |
1606 | - r->index--; |
1607 | - |
1608 | - dm_tm_dec(info->tm, dm_block_location(c->block)); |
1609 | - __rebalance2(info, parent, l, r); |
1610 | - |
1611 | - return; |
1612 | - } |
1613 | - |
1614 | - /* |
1615 | - * Rebalance |
1616 | - */ |
1617 | - target = (nr_left + nr_center + nr_right) / 3; |
1618 | - BUG_ON(target > max_entries); |
1619 | - |
1620 | - /* |
1621 | - * Adjust the left node |
1622 | - */ |
1623 | - shift(left, center, nr_left - target); |
1624 | - |
1625 | - /* |
1626 | - * Adjust the right node |
1627 | - */ |
1628 | - shift(center, right, target - nr_right); |
1629 | - *key_ptr(parent, c->index) = center->keys[0]; |
1630 | - *key_ptr(parent, r->index) = right->keys[0]; |
1631 | + if ((nr_left + nr_center + nr_right) < threshold) |
1632 | + delete_center_node(info, parent, l, c, r, left, center, right, |
1633 | + nr_left, nr_center, nr_right); |
1634 | + else |
1635 | + redistribute3(info, parent, l, c, r, left, center, right, |
1636 | + nr_left, nr_center, nr_right); |
1637 | } |
1638 | |
1639 | static int rebalance3(struct shadow_spine *s, struct dm_btree_info *info, |
1640 | @@ -441,9 +468,6 @@ static int rebalance_children(struct shadow_spine *s, |
1641 | if (r) |
1642 | return r; |
1643 | |
1644 | - if (child_entries > del_threshold(n)) |
1645 | - return 0; |
1646 | - |
1647 | has_left_sibling = i > 0; |
1648 | has_right_sibling = i < (le32_to_cpu(n->header.nr_entries) - 1); |
1649 | |
1650 | diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c |
1651 | index 7d9e071..7af60ec 100644 |
1652 | --- a/drivers/md/raid1.c |
1653 | +++ b/drivers/md/raid1.c |
1654 | @@ -731,9 +731,22 @@ static void wait_barrier(struct r1conf *conf) |
1655 | spin_lock_irq(&conf->resync_lock); |
1656 | if (conf->barrier) { |
1657 | conf->nr_waiting++; |
1658 | - wait_event_lock_irq(conf->wait_barrier, !conf->barrier, |
1659 | + /* Wait for the barrier to drop. |
1660 | + * However if there are already pending |
1661 | + * requests (preventing the barrier from |
1662 | + * rising completely), and the |
1663 | + * pre-process bio queue isn't empty, |
1664 | + * then don't wait, as we need to empty |
1665 | + * that queue to get the nr_pending |
1666 | + * count down. |
1667 | + */ |
1668 | + wait_event_lock_irq(conf->wait_barrier, |
1669 | + !conf->barrier || |
1670 | + (conf->nr_pending && |
1671 | + current->bio_list && |
1672 | + !bio_list_empty(current->bio_list)), |
1673 | conf->resync_lock, |
1674 | - ); |
1675 | + ); |
1676 | conf->nr_waiting--; |
1677 | } |
1678 | conf->nr_pending++; |
1679 | diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c |
1680 | index 685ddf3..b219449 100644 |
1681 | --- a/drivers/md/raid10.c |
1682 | +++ b/drivers/md/raid10.c |
1683 | @@ -790,9 +790,22 @@ static void wait_barrier(struct r10conf *conf) |
1684 | spin_lock_irq(&conf->resync_lock); |
1685 | if (conf->barrier) { |
1686 | conf->nr_waiting++; |
1687 | - wait_event_lock_irq(conf->wait_barrier, !conf->barrier, |
1688 | + /* Wait for the barrier to drop. |
1689 | + * However if there are already pending |
1690 | + * requests (preventing the barrier from |
1691 | + * rising completely), and the |
1692 | + * pre-process bio queue isn't empty, |
1693 | + * then don't wait, as we need to empty |
1694 | + * that queue to get the nr_pending |
1695 | + * count down. |
1696 | + */ |
1697 | + wait_event_lock_irq(conf->wait_barrier, |
1698 | + !conf->barrier || |
1699 | + (conf->nr_pending && |
1700 | + current->bio_list && |
1701 | + !bio_list_empty(current->bio_list)), |
1702 | conf->resync_lock, |
1703 | - ); |
1704 | + ); |
1705 | conf->nr_waiting--; |
1706 | } |
1707 | conf->nr_pending++; |
1708 | diff --git a/drivers/media/dvb/dvb-usb/mxl111sf.c b/drivers/media/dvb/dvb-usb/mxl111sf.c |
1709 | index b5c98da..bc6ea9f 100644 |
1710 | --- a/drivers/media/dvb/dvb-usb/mxl111sf.c |
1711 | +++ b/drivers/media/dvb/dvb-usb/mxl111sf.c |
1712 | @@ -351,15 +351,13 @@ static int mxl111sf_ep6_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) |
1713 | adap_state->ep6_clockphase, |
1714 | 0, 0); |
1715 | mxl_fail(ret); |
1716 | +#if 0 |
1717 | } else { |
1718 | ret = mxl111sf_disable_656_port(state); |
1719 | mxl_fail(ret); |
1720 | +#endif |
1721 | } |
1722 | |
1723 | - mxl111sf_read_reg(state, 0x12, &tmp); |
1724 | - tmp &= ~0x04; |
1725 | - mxl111sf_write_reg(state, 0x12, tmp); |
1726 | - |
1727 | return ret; |
1728 | } |
1729 | |
1730 | diff --git a/drivers/media/dvb/frontends/lgdt330x.c b/drivers/media/dvb/frontends/lgdt330x.c |
1731 | index 43971e6..aa63d68 100644 |
1732 | --- a/drivers/media/dvb/frontends/lgdt330x.c |
1733 | +++ b/drivers/media/dvb/frontends/lgdt330x.c |
1734 | @@ -104,8 +104,8 @@ static int i2c_write_demod_bytes (struct lgdt330x_state* state, |
1735 | * then reads the data returned for (len) bytes. |
1736 | */ |
1737 | |
1738 | -static u8 i2c_read_demod_bytes (struct lgdt330x_state* state, |
1739 | - enum I2C_REG reg, u8* buf, int len) |
1740 | +static int i2c_read_demod_bytes(struct lgdt330x_state *state, |
1741 | + enum I2C_REG reg, u8 *buf, int len) |
1742 | { |
1743 | u8 wr [] = { reg }; |
1744 | struct i2c_msg msg [] = { |
1745 | @@ -118,6 +118,8 @@ static u8 i2c_read_demod_bytes (struct lgdt330x_state* state, |
1746 | ret = i2c_transfer(state->i2c, msg, 2); |
1747 | if (ret != 2) { |
1748 | printk(KERN_WARNING "lgdt330x: %s: addr 0x%02x select 0x%02x error (ret == %i)\n", __func__, state->config->demod_address, reg, ret); |
1749 | + if (ret >= 0) |
1750 | + ret = -EIO; |
1751 | } else { |
1752 | ret = 0; |
1753 | } |
1754 | diff --git a/drivers/media/video/pvrusb2/pvrusb2-devattr.c b/drivers/media/video/pvrusb2/pvrusb2-devattr.c |
1755 | index c6da8f7..d8c8982 100644 |
1756 | --- a/drivers/media/video/pvrusb2/pvrusb2-devattr.c |
1757 | +++ b/drivers/media/video/pvrusb2/pvrusb2-devattr.c |
1758 | @@ -320,7 +320,17 @@ static struct tda829x_config tda829x_no_probe = { |
1759 | .probe_tuner = TDA829X_DONT_PROBE, |
1760 | }; |
1761 | |
1762 | +static struct tda18271_std_map hauppauge_tda18271_dvbt_std_map = { |
1763 | + .dvbt_6 = { .if_freq = 3300, .agc_mode = 3, .std = 4, |
1764 | + .if_lvl = 1, .rfagc_top = 0x37, }, |
1765 | + .dvbt_7 = { .if_freq = 3800, .agc_mode = 3, .std = 5, |
1766 | + .if_lvl = 1, .rfagc_top = 0x37, }, |
1767 | + .dvbt_8 = { .if_freq = 4300, .agc_mode = 3, .std = 6, |
1768 | + .if_lvl = 1, .rfagc_top = 0x37, }, |
1769 | +}; |
1770 | + |
1771 | static struct tda18271_config hauppauge_tda18271_dvb_config = { |
1772 | + .std_map = &hauppauge_tda18271_dvbt_std_map, |
1773 | .gate = TDA18271_GATE_ANALOG, |
1774 | .output_opt = TDA18271_OUTPUT_LT_OFF, |
1775 | }; |
1776 | diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c |
1777 | index 0cb17d9..b99318e 100644 |
1778 | --- a/drivers/mtd/ubi/scan.c |
1779 | +++ b/drivers/mtd/ubi/scan.c |
1780 | @@ -1174,7 +1174,7 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi) |
1781 | |
1782 | ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); |
1783 | if (!ech) |
1784 | - goto out_slab; |
1785 | + goto out_si; |
1786 | |
1787 | vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL); |
1788 | if (!vidh) |
1789 | @@ -1235,8 +1235,6 @@ out_vidh: |
1790 | ubi_free_vid_hdr(ubi, vidh); |
1791 | out_ech: |
1792 | kfree(ech); |
1793 | -out_slab: |
1794 | - kmem_cache_destroy(si->scan_leb_slab); |
1795 | out_si: |
1796 | ubi_scan_destroy_si(si); |
1797 | return ERR_PTR(err); |
1798 | @@ -1325,7 +1323,9 @@ void ubi_scan_destroy_si(struct ubi_scan_info *si) |
1799 | } |
1800 | } |
1801 | |
1802 | - kmem_cache_destroy(si->scan_leb_slab); |
1803 | + if (si->scan_leb_slab) |
1804 | + kmem_cache_destroy(si->scan_leb_slab); |
1805 | + |
1806 | kfree(si); |
1807 | } |
1808 | |
1809 | diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c |
1810 | index 0696e36..cf42971 100644 |
1811 | --- a/drivers/mtd/ubi/wl.c |
1812 | +++ b/drivers/mtd/ubi/wl.c |
1813 | @@ -389,7 +389,7 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max) |
1814 | */ |
1815 | int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) |
1816 | { |
1817 | - int err, medium_ec; |
1818 | + int err; |
1819 | struct ubi_wl_entry *e, *first, *last; |
1820 | |
1821 | ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || |
1822 | @@ -427,7 +427,7 @@ retry: |
1823 | * For unknown data we pick a physical eraseblock with medium |
1824 | * erase counter. But we by no means can pick a physical |
1825 | * eraseblock with erase counter greater or equivalent than the |
1826 | - * lowest erase counter plus %WL_FREE_MAX_DIFF. |
1827 | + * lowest erase counter plus %WL_FREE_MAX_DIFF/2. |
1828 | */ |
1829 | first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, |
1830 | u.rb); |
1831 | @@ -436,10 +436,8 @@ retry: |
1832 | if (last->ec - first->ec < WL_FREE_MAX_DIFF) |
1833 | e = rb_entry(ubi->free.rb_node, |
1834 | struct ubi_wl_entry, u.rb); |
1835 | - else { |
1836 | - medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; |
1837 | - e = find_wl_entry(&ubi->free, medium_ec); |
1838 | - } |
1839 | + else |
1840 | + e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF/2); |
1841 | break; |
1842 | case UBI_SHORTTERM: |
1843 | /* |
1844 | diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h |
1845 | index 9fe18d1..f478a22 100644 |
1846 | --- a/drivers/net/ethernet/intel/e1000e/e1000.h |
1847 | +++ b/drivers/net/ethernet/intel/e1000e/e1000.h |
1848 | @@ -309,6 +309,7 @@ struct e1000_adapter { |
1849 | u32 txd_cmd; |
1850 | |
1851 | bool detect_tx_hung; |
1852 | + bool tx_hang_recheck; |
1853 | u8 tx_timeout_factor; |
1854 | |
1855 | u32 tx_int_delay; |
1856 | diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c |
1857 | index a855db1..4e933d1 100644 |
1858 | --- a/drivers/net/ethernet/intel/e1000e/netdev.c |
1859 | +++ b/drivers/net/ethernet/intel/e1000e/netdev.c |
1860 | @@ -1030,6 +1030,7 @@ static void e1000_print_hw_hang(struct work_struct *work) |
1861 | struct e1000_adapter *adapter = container_of(work, |
1862 | struct e1000_adapter, |
1863 | print_hang_task); |
1864 | + struct net_device *netdev = adapter->netdev; |
1865 | struct e1000_ring *tx_ring = adapter->tx_ring; |
1866 | unsigned int i = tx_ring->next_to_clean; |
1867 | unsigned int eop = tx_ring->buffer_info[i].next_to_watch; |
1868 | @@ -1041,6 +1042,21 @@ static void e1000_print_hw_hang(struct work_struct *work) |
1869 | if (test_bit(__E1000_DOWN, &adapter->state)) |
1870 | return; |
1871 | |
1872 | + if (!adapter->tx_hang_recheck && |
1873 | + (adapter->flags2 & FLAG2_DMA_BURST)) { |
1874 | + /* May be block on write-back, flush and detect again |
1875 | + * flush pending descriptor writebacks to memory |
1876 | + */ |
1877 | + ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); |
1878 | + /* execute the writes immediately */ |
1879 | + e1e_flush(); |
1880 | + adapter->tx_hang_recheck = true; |
1881 | + return; |
1882 | + } |
1883 | + /* Real hang detected */ |
1884 | + adapter->tx_hang_recheck = false; |
1885 | + netif_stop_queue(netdev); |
1886 | + |
1887 | e1e_rphy(hw, PHY_STATUS, &phy_status); |
1888 | e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); |
1889 | e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); |
1890 | @@ -1154,10 +1170,10 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter) |
1891 | if (tx_ring->buffer_info[i].time_stamp && |
1892 | time_after(jiffies, tx_ring->buffer_info[i].time_stamp |
1893 | + (adapter->tx_timeout_factor * HZ)) && |
1894 | - !(er32(STATUS) & E1000_STATUS_TXOFF)) { |
1895 | + !(er32(STATUS) & E1000_STATUS_TXOFF)) |
1896 | schedule_work(&adapter->print_hang_task); |
1897 | - netif_stop_queue(netdev); |
1898 | - } |
1899 | + else |
1900 | + adapter->tx_hang_recheck = false; |
1901 | } |
1902 | adapter->total_tx_bytes += total_tx_bytes; |
1903 | adapter->total_tx_packets += total_tx_packets; |
1904 | @@ -3782,6 +3798,7 @@ static int e1000_open(struct net_device *netdev) |
1905 | |
1906 | e1000_irq_enable(adapter); |
1907 | |
1908 | + adapter->tx_hang_recheck = false; |
1909 | netif_start_queue(netdev); |
1910 | |
1911 | adapter->idle_check = true; |
1912 | diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c |
1913 | index 7803efa..f612b35 100644 |
1914 | --- a/drivers/net/ethernet/marvell/sky2.c |
1915 | +++ b/drivers/net/ethernet/marvell/sky2.c |
1916 | @@ -95,6 +95,10 @@ static int disable_msi = 0; |
1917 | module_param(disable_msi, int, 0); |
1918 | MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)"); |
1919 | |
1920 | +static int legacy_pme = 0; |
1921 | +module_param(legacy_pme, int, 0); |
1922 | +MODULE_PARM_DESC(legacy_pme, "Legacy power management"); |
1923 | + |
1924 | static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = { |
1925 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */ |
1926 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */ |
1927 | @@ -867,6 +871,13 @@ static void sky2_wol_init(struct sky2_port *sky2) |
1928 | /* Disable PiG firmware */ |
1929 | sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF); |
1930 | |
1931 | + /* Needed by some broken BIOSes, use PCI rather than PCI-e for WOL */ |
1932 | + if (legacy_pme) { |
1933 | + u32 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); |
1934 | + reg1 |= PCI_Y2_PME_LEGACY; |
1935 | + sky2_pci_write32(hw, PCI_DEV_REG1, reg1); |
1936 | + } |
1937 | + |
1938 | /* block receiver */ |
1939 | sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); |
1940 | sky2_read32(hw, B0_CTST); |
1941 | diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c |
1942 | index 81b96e3..750e330 100644 |
1943 | --- a/drivers/net/usb/usbnet.c |
1944 | +++ b/drivers/net/usb/usbnet.c |
1945 | @@ -589,6 +589,14 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) |
1946 | entry = (struct skb_data *) skb->cb; |
1947 | urb = entry->urb; |
1948 | |
1949 | + /* |
1950 | + * Get reference count of the URB to avoid it to be |
1951 | + * freed during usb_unlink_urb, which may trigger |
1952 | + * use-after-free problem inside usb_unlink_urb since |
1953 | + * usb_unlink_urb is always racing with .complete |
1954 | + * handler(include defer_bh). |
1955 | + */ |
1956 | + usb_get_urb(urb); |
1957 | spin_unlock_irqrestore(&q->lock, flags); |
1958 | // during some PM-driven resume scenarios, |
1959 | // these (async) unlinks complete immediately |
1960 | @@ -597,6 +605,7 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q) |
1961 | netdev_dbg(dev->net, "unlink urb err, %d\n", retval); |
1962 | else |
1963 | count++; |
1964 | + usb_put_urb(urb); |
1965 | spin_lock_irqsave(&q->lock, flags); |
1966 | } |
1967 | spin_unlock_irqrestore (&q->lock, flags); |
1968 | @@ -1028,7 +1037,6 @@ static void tx_complete (struct urb *urb) |
1969 | } |
1970 | |
1971 | usb_autopm_put_interface_async(dev->intf); |
1972 | - urb->dev = NULL; |
1973 | entry->state = tx_done; |
1974 | defer_bh(dev, skb, &dev->txq); |
1975 | } |
1976 | diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c |
1977 | index fcf5416..3d75d4c 100644 |
1978 | --- a/drivers/net/wireless/iwlwifi/iwl-core.c |
1979 | +++ b/drivers/net/wireless/iwlwifi/iwl-core.c |
1980 | @@ -1787,20 +1787,10 @@ void iwl_bg_watchdog(unsigned long data) |
1981 | if (timeout == 0) |
1982 | return; |
1983 | |
1984 | - /* monitor and check for stuck cmd queue */ |
1985 | - if (iwl_check_stuck_queue(priv, priv->shrd->cmd_queue)) |
1986 | - return; |
1987 | - |
1988 | - /* monitor and check for other stuck queues */ |
1989 | - if (iwl_is_any_associated(priv)) { |
1990 | - for (cnt = 0; cnt < hw_params(priv).max_txq_num; cnt++) { |
1991 | - /* skip as we already checked the command queue */ |
1992 | - if (cnt == priv->shrd->cmd_queue) |
1993 | - continue; |
1994 | - if (iwl_check_stuck_queue(priv, cnt)) |
1995 | - return; |
1996 | - } |
1997 | - } |
1998 | + /* monitor and check for stuck queues */ |
1999 | + for (cnt = 0; cnt < hw_params(priv).max_txq_num; cnt++) |
2000 | + if (iwl_check_stuck_queue(priv, cnt)) |
2001 | + return; |
2002 | |
2003 | mod_timer(&priv->watchdog, jiffies + |
2004 | msecs_to_jiffies(IWL_WD_TICK(timeout))); |
2005 | diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c |
2006 | index 78d0d69..428401b 100644 |
2007 | --- a/drivers/net/wireless/p54/p54spi.c |
2008 | +++ b/drivers/net/wireless/p54/p54spi.c |
2009 | @@ -622,19 +622,19 @@ static int __devinit p54spi_probe(struct spi_device *spi) |
2010 | ret = spi_setup(spi); |
2011 | if (ret < 0) { |
2012 | dev_err(&priv->spi->dev, "spi_setup failed"); |
2013 | - goto err_free_common; |
2014 | + goto err_free; |
2015 | } |
2016 | |
2017 | ret = gpio_request(p54spi_gpio_power, "p54spi power"); |
2018 | if (ret < 0) { |
2019 | dev_err(&priv->spi->dev, "power GPIO request failed: %d", ret); |
2020 | - goto err_free_common; |
2021 | + goto err_free; |
2022 | } |
2023 | |
2024 | ret = gpio_request(p54spi_gpio_irq, "p54spi irq"); |
2025 | if (ret < 0) { |
2026 | dev_err(&priv->spi->dev, "irq GPIO request failed: %d", ret); |
2027 | - goto err_free_common; |
2028 | + goto err_free_gpio_power; |
2029 | } |
2030 | |
2031 | gpio_direction_output(p54spi_gpio_power, 0); |
2032 | @@ -645,7 +645,7 @@ static int __devinit p54spi_probe(struct spi_device *spi) |
2033 | priv->spi); |
2034 | if (ret < 0) { |
2035 | dev_err(&priv->spi->dev, "request_irq() failed"); |
2036 | - goto err_free_common; |
2037 | + goto err_free_gpio_irq; |
2038 | } |
2039 | |
2040 | irq_set_irq_type(gpio_to_irq(p54spi_gpio_irq), IRQ_TYPE_EDGE_RISING); |
2041 | @@ -677,6 +677,12 @@ static int __devinit p54spi_probe(struct spi_device *spi) |
2042 | return 0; |
2043 | |
2044 | err_free_common: |
2045 | + free_irq(gpio_to_irq(p54spi_gpio_irq), spi); |
2046 | +err_free_gpio_irq: |
2047 | + gpio_free(p54spi_gpio_irq); |
2048 | +err_free_gpio_power: |
2049 | + gpio_free(p54spi_gpio_power); |
2050 | +err_free: |
2051 | p54_free_common(priv->hw); |
2052 | return ret; |
2053 | } |
2054 | diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c |
2055 | index 3265b34..cb71e88 100644 |
2056 | --- a/drivers/net/wireless/rt2x00/rt2800usb.c |
2057 | +++ b/drivers/net/wireless/rt2x00/rt2800usb.c |
2058 | @@ -935,6 +935,7 @@ static struct usb_device_id rt2800usb_device_table[] = { |
2059 | { USB_DEVICE(0x07d1, 0x3c0f) }, |
2060 | { USB_DEVICE(0x07d1, 0x3c11) }, |
2061 | { USB_DEVICE(0x07d1, 0x3c16) }, |
2062 | + { USB_DEVICE(0x2001, 0x3c1b) }, |
2063 | /* Draytek */ |
2064 | { USB_DEVICE(0x07fa, 0x7712) }, |
2065 | /* DVICO */ |
2066 | diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c |
2067 | index eb61061..9afcad3 100644 |
2068 | --- a/drivers/net/wireless/rtlwifi/pci.c |
2069 | +++ b/drivers/net/wireless/rtlwifi/pci.c |
2070 | @@ -657,6 +657,8 @@ static void _rtl_receive_one(struct ieee80211_hw *hw, struct sk_buff *skb, |
2071 | return; |
2072 | |
2073 | uskb = dev_alloc_skb(skb->len + 128); |
2074 | + if (!uskb) |
2075 | + return; /* exit if allocation failed */ |
2076 | memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status, sizeof(rx_status)); |
2077 | pdata = (u8 *)skb_put(uskb, skb->len); |
2078 | memcpy(pdata, skb->data, skb->len); |
2079 | @@ -1153,10 +1155,12 @@ static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw, |
2080 | ring->idx = (ring->idx + 1) % ring->entries; |
2081 | } |
2082 | |
2083 | - pci_free_consistent(rtlpci->pdev, |
2084 | - sizeof(*ring->desc) * ring->entries, |
2085 | - ring->desc, ring->dma); |
2086 | - ring->desc = NULL; |
2087 | + if (ring->desc) { |
2088 | + pci_free_consistent(rtlpci->pdev, |
2089 | + sizeof(*ring->desc) * ring->entries, |
2090 | + ring->desc, ring->dma); |
2091 | + ring->desc = NULL; |
2092 | + } |
2093 | } |
2094 | |
2095 | static void _rtl_pci_free_rx_ring(struct rtl_pci *rtlpci) |
2096 | @@ -1180,12 +1184,14 @@ static void _rtl_pci_free_rx_ring(struct rtl_pci *rtlpci) |
2097 | kfree_skb(skb); |
2098 | } |
2099 | |
2100 | - pci_free_consistent(rtlpci->pdev, |
2101 | + if (rtlpci->rx_ring[rx_queue_idx].desc) { |
2102 | + pci_free_consistent(rtlpci->pdev, |
2103 | sizeof(*rtlpci->rx_ring[rx_queue_idx]. |
2104 | desc) * rtlpci->rxringcount, |
2105 | rtlpci->rx_ring[rx_queue_idx].desc, |
2106 | rtlpci->rx_ring[rx_queue_idx].dma); |
2107 | - rtlpci->rx_ring[rx_queue_idx].desc = NULL; |
2108 | + rtlpci->rx_ring[rx_queue_idx].desc = NULL; |
2109 | + } |
2110 | } |
2111 | } |
2112 | |
2113 | diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c |
2114 | index 72a98ca..a004ad7 100644 |
2115 | --- a/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c |
2116 | +++ b/drivers/net/wireless/rtlwifi/rtl8192c/dm_common.c |
2117 | @@ -524,6 +524,10 @@ void rtl92c_dm_write_dig(struct ieee80211_hw *hw) |
2118 | dm_digtable.cur_igvalue, dm_digtable.pre_igvalue, |
2119 | dm_digtable.backoff_val)); |
2120 | |
2121 | + dm_digtable.cur_igvalue += 2; |
2122 | + if (dm_digtable.cur_igvalue > 0x3f) |
2123 | + dm_digtable.cur_igvalue = 0x3f; |
2124 | + |
2125 | if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) { |
2126 | rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f, |
2127 | dm_digtable.cur_igvalue); |
2128 | @@ -1219,13 +1223,18 @@ static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw) |
2129 | ("PreState = %d, CurState = %d\n", |
2130 | p_ra->pre_ratr_state, p_ra->ratr_state)); |
2131 | |
2132 | - rcu_read_lock(); |
2133 | - sta = ieee80211_find_sta(mac->vif, mac->bssid); |
2134 | + /* Only the PCI card uses sta in the update rate table |
2135 | + * callback routine */ |
2136 | + if (rtlhal->interface == INTF_PCI) { |
2137 | + rcu_read_lock(); |
2138 | + sta = ieee80211_find_sta(mac->vif, mac->bssid); |
2139 | + } |
2140 | rtlpriv->cfg->ops->update_rate_tbl(hw, sta, |
2141 | p_ra->ratr_state); |
2142 | |
2143 | p_ra->pre_ratr_state = p_ra->ratr_state; |
2144 | - rcu_read_unlock(); |
2145 | + if (rtlhal->interface == INTF_PCI) |
2146 | + rcu_read_unlock(); |
2147 | } |
2148 | } |
2149 | } |
2150 | diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c |
2151 | index 950c65a..13fc0f9 100644 |
2152 | --- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c |
2153 | +++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c |
2154 | @@ -752,6 +752,8 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished) |
2155 | |
2156 | |
2157 | skb = dev_alloc_skb(totalpacketlen); |
2158 | + if (!skb) |
2159 | + return; |
2160 | memcpy((u8 *) skb_put(skb, totalpacketlen), |
2161 | &reserved_page_packet, totalpacketlen); |
2162 | |
2163 | diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c |
2164 | index 82f060b..c44757f 100644 |
2165 | --- a/drivers/net/wireless/rtlwifi/rtl8192de/fw.c |
2166 | +++ b/drivers/net/wireless/rtlwifi/rtl8192de/fw.c |
2167 | @@ -763,12 +763,16 @@ void rtl92d_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished) |
2168 | "rtl92d_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n", |
2169 | u1RsvdPageLoc, 3); |
2170 | skb = dev_alloc_skb(totalpacketlen); |
2171 | - memcpy((u8 *) skb_put(skb, totalpacketlen), &reserved_page_packet, |
2172 | - totalpacketlen); |
2173 | - rtstatus = _rtl92d_cmd_send_packet(hw, skb); |
2174 | + if (!skb) { |
2175 | + dlok = false; |
2176 | + } else { |
2177 | + memcpy((u8 *) skb_put(skb, totalpacketlen), |
2178 | + &reserved_page_packet, totalpacketlen); |
2179 | + rtstatus = _rtl92d_cmd_send_packet(hw, skb); |
2180 | |
2181 | - if (rtstatus) |
2182 | - dlok = true; |
2183 | + if (rtstatus) |
2184 | + dlok = true; |
2185 | + } |
2186 | if (dlok) { |
2187 | RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, |
2188 | ("Set RSVD page location to Fw.\n")); |
2189 | diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c |
2190 | index 54cb8a6..2b7bcc8 100644 |
2191 | --- a/drivers/net/wireless/rtlwifi/usb.c |
2192 | +++ b/drivers/net/wireless/rtlwifi/usb.c |
2193 | @@ -481,12 +481,14 @@ static void _rtl_usb_rx_process_noagg(struct ieee80211_hw *hw, |
2194 | u8 *pdata; |
2195 | |
2196 | uskb = dev_alloc_skb(skb->len + 128); |
2197 | - memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status, |
2198 | - sizeof(rx_status)); |
2199 | - pdata = (u8 *)skb_put(uskb, skb->len); |
2200 | - memcpy(pdata, skb->data, skb->len); |
2201 | + if (uskb) { /* drop packet on allocation failure */ |
2202 | + memcpy(IEEE80211_SKB_RXCB(uskb), &rx_status, |
2203 | + sizeof(rx_status)); |
2204 | + pdata = (u8 *)skb_put(uskb, skb->len); |
2205 | + memcpy(pdata, skb->data, skb->len); |
2206 | + ieee80211_rx_irqsafe(hw, uskb); |
2207 | + } |
2208 | dev_kfree_skb_any(skb); |
2209 | - ieee80211_rx_irqsafe(hw, uskb); |
2210 | } else { |
2211 | dev_kfree_skb_any(skb); |
2212 | } |
2213 | diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c |
2214 | index 24f049e..2275162 100644 |
2215 | --- a/drivers/pci/pcie/aspm.c |
2216 | +++ b/drivers/pci/pcie/aspm.c |
2217 | @@ -500,9 +500,6 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev) |
2218 | int pos; |
2219 | u32 reg32; |
2220 | |
2221 | - if (aspm_disabled) |
2222 | - return 0; |
2223 | - |
2224 | /* |
2225 | * Some functions in a slot might not all be PCIe functions, |
2226 | * very strange. Disable ASPM for the whole slot |
2227 | @@ -511,6 +508,16 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev) |
2228 | pos = pci_pcie_cap(child); |
2229 | if (!pos) |
2230 | return -EINVAL; |
2231 | + |
2232 | + /* |
2233 | + * If ASPM is disabled then we're not going to change |
2234 | + * the BIOS state. It's safe to continue even if it's a |
2235 | + * pre-1.1 device |
2236 | + */ |
2237 | + |
2238 | + if (aspm_disabled) |
2239 | + continue; |
2240 | + |
2241 | /* |
2242 | * Disable ASPM for pre-1.1 PCIe device, we follow MS to use |
2243 | * RBER bit to determine if a function is 1.1 version device |
2244 | diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c |
2245 | index 8a1c031..565742b 100644 |
2246 | --- a/drivers/rtc/interface.c |
2247 | +++ b/drivers/rtc/interface.c |
2248 | @@ -445,6 +445,11 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) |
2249 | if (rtc->uie_rtctimer.enabled == enabled) |
2250 | goto out; |
2251 | |
2252 | + if (rtc->uie_unsupported) { |
2253 | + err = -EINVAL; |
2254 | + goto out; |
2255 | + } |
2256 | + |
2257 | if (enabled) { |
2258 | struct rtc_time tm; |
2259 | ktime_t now, onesec; |
2260 | @@ -763,6 +768,14 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer) |
2261 | return 0; |
2262 | } |
2263 | |
2264 | +static void rtc_alarm_disable(struct rtc_device *rtc) |
2265 | +{ |
2266 | + if (!rtc->ops || !rtc->ops->alarm_irq_enable) |
2267 | + return; |
2268 | + |
2269 | + rtc->ops->alarm_irq_enable(rtc->dev.parent, false); |
2270 | +} |
2271 | + |
2272 | /** |
2273 | * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue |
2274 | * @rtc rtc device |
2275 | @@ -784,8 +797,10 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer) |
2276 | struct rtc_wkalrm alarm; |
2277 | int err; |
2278 | next = timerqueue_getnext(&rtc->timerqueue); |
2279 | - if (!next) |
2280 | + if (!next) { |
2281 | + rtc_alarm_disable(rtc); |
2282 | return; |
2283 | + } |
2284 | alarm.time = rtc_ktime_to_tm(next->expires); |
2285 | alarm.enabled = 1; |
2286 | err = __rtc_set_alarm(rtc, &alarm); |
2287 | @@ -847,7 +862,8 @@ again: |
2288 | err = __rtc_set_alarm(rtc, &alarm); |
2289 | if (err == -ETIME) |
2290 | goto again; |
2291 | - } |
2292 | + } else |
2293 | + rtc_alarm_disable(rtc); |
2294 | |
2295 | mutex_unlock(&rtc->ops_lock); |
2296 | } |
2297 | diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c |
2298 | index da60915..0fc2d22 100644 |
2299 | --- a/drivers/rtc/rtc-mpc5121.c |
2300 | +++ b/drivers/rtc/rtc-mpc5121.c |
2301 | @@ -360,6 +360,8 @@ static int __devinit mpc5121_rtc_probe(struct platform_device *op) |
2302 | &mpc5200_rtc_ops, THIS_MODULE); |
2303 | } |
2304 | |
2305 | + rtc->rtc->uie_unsupported = 1; |
2306 | + |
2307 | if (IS_ERR(rtc->rtc)) { |
2308 | err = PTR_ERR(rtc->rtc); |
2309 | goto out_free_irq; |
2310 | diff --git a/drivers/staging/rtl8712/Kconfig b/drivers/staging/rtl8712/Kconfig |
2311 | index ea37473..6a43312 100644 |
2312 | --- a/drivers/staging/rtl8712/Kconfig |
2313 | +++ b/drivers/staging/rtl8712/Kconfig |
2314 | @@ -9,13 +9,6 @@ config R8712U |
2315 | This option adds the Realtek RTL8712 USB device such as the D-Link DWA-130. |
2316 | If built as a module, it will be called r8712u. |
2317 | |
2318 | -config R8712_AP |
2319 | - bool "Realtek RTL8712U AP code" |
2320 | - depends on R8712U |
2321 | - default N |
2322 | - ---help--- |
2323 | - This option allows the Realtek RTL8712 USB device to be an Access Point. |
2324 | - |
2325 | config R8712_TX_AGGR |
2326 | bool "Realtek RTL8712U Transmit Aggregation code" |
2327 | depends on R8712U && BROKEN |
2328 | diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c |
2329 | index 98a3d68..fb11743 100644 |
2330 | --- a/drivers/staging/rtl8712/os_intfs.c |
2331 | +++ b/drivers/staging/rtl8712/os_intfs.c |
2332 | @@ -476,8 +476,6 @@ static int netdev_close(struct net_device *pnetdev) |
2333 | r8712_free_assoc_resources(padapter); |
2334 | /*s2-4.*/ |
2335 | r8712_free_network_queue(padapter); |
2336 | - /* The interface is no longer Up: */ |
2337 | - padapter->bup = false; |
2338 | release_firmware(padapter->fw); |
2339 | /* never exit with a firmware callback pending */ |
2340 | wait_for_completion(&padapter->rtl8712_fw_ready); |
2341 | diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c |
2342 | index 507584b8..ef35bc2 100644 |
2343 | --- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c |
2344 | +++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c |
2345 | @@ -2380,13 +2380,7 @@ static struct iw_statistics *r871x_get_wireless_stats(struct net_device *dev) |
2346 | tmp_qual = padapter->recvpriv.signal; |
2347 | tmp_noise = padapter->recvpriv.noise; |
2348 | piwstats->qual.level = tmp_level; |
2349 | - /*piwstats->qual.qual = tmp_qual; |
2350 | - * The NetworkManager of Fedora 10, 13 will use the link |
2351 | - * quality for its display. |
2352 | - * So, use the fw_rssi on link quality variable because |
2353 | - * fw_rssi will be updated per 2 seconds. |
2354 | - */ |
2355 | - piwstats->qual.qual = tmp_level; |
2356 | + piwstats->qual.qual = tmp_qual; |
2357 | piwstats->qual.noise = tmp_noise; |
2358 | } |
2359 | piwstats->qual.updated = IW_QUAL_ALL_UPDATED; |
2360 | diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c |
2361 | index 64f5696..1247b3d 100644 |
2362 | --- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c |
2363 | +++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c |
2364 | @@ -42,9 +42,8 @@ static void _init_stainfo(struct sta_info *psta) |
2365 | _init_listhead(&psta->hash_list); |
2366 | _r8712_init_sta_xmit_priv(&psta->sta_xmitpriv); |
2367 | _r8712_init_sta_recv_priv(&psta->sta_recvpriv); |
2368 | -#ifdef CONFIG_R8712_AP |
2369 | + _init_listhead(&psta->asoc_list); |
2370 | _init_listhead(&psta->auth_list); |
2371 | -#endif |
2372 | } |
2373 | |
2374 | u32 _r8712_init_sta_priv(struct sta_priv *pstapriv) |
2375 | @@ -71,10 +70,8 @@ u32 _r8712_init_sta_priv(struct sta_priv *pstapriv) |
2376 | get_list_head(&pstapriv->free_sta_queue)); |
2377 | psta++; |
2378 | } |
2379 | -#ifdef CONFIG_R8712_AP |
2380 | _init_listhead(&pstapriv->asoc_list); |
2381 | _init_listhead(&pstapriv->auth_list); |
2382 | -#endif |
2383 | return _SUCCESS; |
2384 | } |
2385 | |
2386 | diff --git a/drivers/staging/rtl8712/sta_info.h b/drivers/staging/rtl8712/sta_info.h |
2387 | index 48d6a14..f8016e9 100644 |
2388 | --- a/drivers/staging/rtl8712/sta_info.h |
2389 | +++ b/drivers/staging/rtl8712/sta_info.h |
2390 | @@ -90,7 +90,6 @@ struct sta_info { |
2391 | * curr_network(mlme_priv/security_priv/qos/ht) : AP CAP/INFO |
2392 | * sta_info: (AP & STA) CAP/INFO |
2393 | */ |
2394 | -#ifdef CONFIG_R8712_AP |
2395 | struct list_head asoc_list; |
2396 | struct list_head auth_list; |
2397 | unsigned int expire_to; |
2398 | @@ -98,7 +97,6 @@ struct sta_info { |
2399 | unsigned int authalg; |
2400 | unsigned char chg_txt[128]; |
2401 | unsigned int tx_ra_bitmap; |
2402 | -#endif |
2403 | }; |
2404 | |
2405 | struct sta_priv { |
2406 | @@ -111,13 +109,11 @@ struct sta_priv { |
2407 | struct __queue sleep_q; |
2408 | struct __queue wakeup_q; |
2409 | struct _adapter *padapter; |
2410 | -#ifdef CONFIG_R8712_AP |
2411 | struct list_head asoc_list; |
2412 | struct list_head auth_list; |
2413 | unsigned int auth_to; /* sec, time to expire in authenticating. */ |
2414 | unsigned int assoc_to; /* sec, time to expire before associating. */ |
2415 | unsigned int expire_to; /* sec , time to expire after associated. */ |
2416 | -#endif |
2417 | }; |
2418 | |
2419 | static inline u32 wifi_mac_hash(u8 *mac) |
2420 | diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c |
2421 | index f5e469d..16ad9fe 100644 |
2422 | --- a/drivers/staging/zcache/zcache-main.c |
2423 | +++ b/drivers/staging/zcache/zcache-main.c |
2424 | @@ -299,10 +299,12 @@ static void zbud_free_and_delist(struct zbud_hdr *zh) |
2425 | struct zbud_page *zbpg = |
2426 | container_of(zh, struct zbud_page, buddy[budnum]); |
2427 | |
2428 | + spin_lock(&zbud_budlists_spinlock); |
2429 | spin_lock(&zbpg->lock); |
2430 | if (list_empty(&zbpg->bud_list)) { |
2431 | /* ignore zombie page... see zbud_evict_pages() */ |
2432 | spin_unlock(&zbpg->lock); |
2433 | + spin_unlock(&zbud_budlists_spinlock); |
2434 | return; |
2435 | } |
2436 | size = zbud_free(zh); |
2437 | @@ -310,7 +312,6 @@ static void zbud_free_and_delist(struct zbud_hdr *zh) |
2438 | zh_other = &zbpg->buddy[(budnum == 0) ? 1 : 0]; |
2439 | if (zh_other->size == 0) { /* was unbuddied: unlist and free */ |
2440 | chunks = zbud_size_to_chunks(size) ; |
2441 | - spin_lock(&zbud_budlists_spinlock); |
2442 | BUG_ON(list_empty(&zbud_unbuddied[chunks].list)); |
2443 | list_del_init(&zbpg->bud_list); |
2444 | zbud_unbuddied[chunks].count--; |
2445 | @@ -318,7 +319,6 @@ static void zbud_free_and_delist(struct zbud_hdr *zh) |
2446 | zbud_free_raw_page(zbpg); |
2447 | } else { /* was buddied: move remaining buddy to unbuddied list */ |
2448 | chunks = zbud_size_to_chunks(zh_other->size) ; |
2449 | - spin_lock(&zbud_budlists_spinlock); |
2450 | list_del_init(&zbpg->bud_list); |
2451 | zcache_zbud_buddied_count--; |
2452 | list_add_tail(&zbpg->bud_list, &zbud_unbuddied[chunks].list); |
2453 | diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c |
2454 | index 03d3528..0842cc7 100644 |
2455 | --- a/drivers/target/iscsi/iscsi_target.c |
2456 | +++ b/drivers/target/iscsi/iscsi_target.c |
2457 | @@ -781,7 +781,7 @@ static int iscsit_alloc_buffs(struct iscsi_cmd *cmd) |
2458 | struct scatterlist *sgl; |
2459 | u32 length = cmd->se_cmd.data_length; |
2460 | int nents = DIV_ROUND_UP(length, PAGE_SIZE); |
2461 | - int i = 0, ret; |
2462 | + int i = 0, j = 0, ret; |
2463 | /* |
2464 | * If no SCSI payload is present, allocate the default iovecs used for |
2465 | * iSCSI PDU Header |
2466 | @@ -822,17 +822,15 @@ static int iscsit_alloc_buffs(struct iscsi_cmd *cmd) |
2467 | */ |
2468 | ret = iscsit_allocate_iovecs(cmd); |
2469 | if (ret < 0) |
2470 | - goto page_alloc_failed; |
2471 | + return -ENOMEM; |
2472 | |
2473 | return 0; |
2474 | |
2475 | page_alloc_failed: |
2476 | - while (i >= 0) { |
2477 | - __free_page(sg_page(&sgl[i])); |
2478 | - i--; |
2479 | - } |
2480 | - kfree(cmd->t_mem_sg); |
2481 | - cmd->t_mem_sg = NULL; |
2482 | + while (j < i) |
2483 | + __free_page(sg_page(&sgl[j++])); |
2484 | + |
2485 | + kfree(sgl); |
2486 | return -ENOMEM; |
2487 | } |
2488 | |
2489 | diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c |
2490 | index db32784..83dcf49 100644 |
2491 | --- a/drivers/target/iscsi/iscsi_target_configfs.c |
2492 | +++ b/drivers/target/iscsi/iscsi_target_configfs.c |
2493 | @@ -816,9 +816,6 @@ static struct se_node_acl *lio_target_make_nodeacl( |
2494 | if (!se_nacl_new) |
2495 | return ERR_PTR(-ENOMEM); |
2496 | |
2497 | - acl = container_of(se_nacl_new, struct iscsi_node_acl, |
2498 | - se_node_acl); |
2499 | - |
2500 | cmdsn_depth = ISCSI_TPG_ATTRIB(tpg)->default_cmdsn_depth; |
2501 | /* |
2502 | * se_nacl_new may be released by core_tpg_add_initiator_node_acl() |
2503 | @@ -829,7 +826,8 @@ static struct se_node_acl *lio_target_make_nodeacl( |
2504 | if (IS_ERR(se_nacl)) |
2505 | return se_nacl; |
2506 | |
2507 | - stats_cg = &acl->se_node_acl.acl_fabric_stat_group; |
2508 | + acl = container_of(se_nacl, struct iscsi_node_acl, se_node_acl); |
2509 | + stats_cg = &se_nacl->acl_fabric_stat_group; |
2510 | |
2511 | stats_cg->default_groups = kzalloc(sizeof(struct config_group) * 2, |
2512 | GFP_KERNEL); |
2513 | diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c |
2514 | index 81d5832..5d1d4f2 100644 |
2515 | --- a/drivers/target/loopback/tcm_loop.c |
2516 | +++ b/drivers/target/loopback/tcm_loop.c |
2517 | @@ -866,6 +866,9 @@ static int tcm_loop_queue_data_in(struct se_cmd *se_cmd) |
2518 | |
2519 | sc->result = SAM_STAT_GOOD; |
2520 | set_host_byte(sc, DID_OK); |
2521 | + if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || |
2522 | + (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)) |
2523 | + scsi_set_resid(sc, se_cmd->residual_count); |
2524 | sc->scsi_done(sc); |
2525 | return 0; |
2526 | } |
2527 | @@ -891,6 +894,9 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd) |
2528 | sc->result = se_cmd->scsi_status; |
2529 | |
2530 | set_host_byte(sc, DID_OK); |
2531 | + if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) || |
2532 | + (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT)) |
2533 | + scsi_set_resid(sc, se_cmd->residual_count); |
2534 | sc->scsi_done(sc); |
2535 | return 0; |
2536 | } |
2537 | diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c |
2538 | index 1d24512..5b05744 100644 |
2539 | --- a/drivers/target/target_core_alua.c |
2540 | +++ b/drivers/target/target_core_alua.c |
2541 | @@ -30,6 +30,7 @@ |
2542 | #include <linux/export.h> |
2543 | #include <scsi/scsi.h> |
2544 | #include <scsi/scsi_cmnd.h> |
2545 | +#include <asm/unaligned.h> |
2546 | |
2547 | #include <target/target_core_base.h> |
2548 | #include <target/target_core_device.h> |
2549 | @@ -268,8 +269,7 @@ int target_emulate_set_target_port_groups(struct se_task *task) |
2550 | * changed. |
2551 | */ |
2552 | if (primary) { |
2553 | - tg_pt_id = ((ptr[2] << 8) & 0xff); |
2554 | - tg_pt_id |= (ptr[3] & 0xff); |
2555 | + tg_pt_id = get_unaligned_be16(ptr + 2); |
2556 | /* |
2557 | * Locate the matching target port group ID from |
2558 | * the global tg_pt_gp list |
2559 | @@ -313,8 +313,7 @@ int target_emulate_set_target_port_groups(struct se_task *task) |
2560 | * the Target Port in question for the the incoming |
2561 | * SET_TARGET_PORT_GROUPS op. |
2562 | */ |
2563 | - rtpi = ((ptr[2] << 8) & 0xff); |
2564 | - rtpi |= (ptr[3] & 0xff); |
2565 | + rtpi = get_unaligned_be16(ptr + 2); |
2566 | /* |
2567 | * Locate the matching relative target port identifer |
2568 | * for the struct se_device storage object. |
2569 | diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c |
2570 | index 8facd33..65ea65a 100644 |
2571 | --- a/drivers/target/target_core_cdb.c |
2572 | +++ b/drivers/target/target_core_cdb.c |
2573 | @@ -116,7 +116,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd) |
2574 | goto out; |
2575 | } |
2576 | |
2577 | - buf[7] = 0x32; /* Sync=1 and CmdQue=1 */ |
2578 | + buf[7] = 0x2; /* CmdQue=1 */ |
2579 | |
2580 | /* |
2581 | * Do not include vendor, product, reversion info in INQUIRY |
2582 | diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c |
2583 | index 19f8aca..f8773ae 100644 |
2584 | --- a/drivers/target/target_core_device.c |
2585 | +++ b/drivers/target/target_core_device.c |
2586 | @@ -658,7 +658,9 @@ int target_report_luns(struct se_task *se_task) |
2587 | unsigned char *buf; |
2588 | u32 cdb_offset = 0, lun_count = 0, offset = 8, i; |
2589 | |
2590 | - buf = (unsigned char *) transport_kmap_data_sg(se_cmd); |
2591 | + buf = transport_kmap_data_sg(se_cmd); |
2592 | + if (!buf) |
2593 | + return -ENOMEM; |
2594 | |
2595 | /* |
2596 | * If no struct se_session pointer is present, this struct se_cmd is |
2597 | @@ -696,12 +698,12 @@ int target_report_luns(struct se_task *se_task) |
2598 | * See SPC3 r07, page 159. |
2599 | */ |
2600 | done: |
2601 | - transport_kunmap_data_sg(se_cmd); |
2602 | lun_count *= 8; |
2603 | buf[0] = ((lun_count >> 24) & 0xff); |
2604 | buf[1] = ((lun_count >> 16) & 0xff); |
2605 | buf[2] = ((lun_count >> 8) & 0xff); |
2606 | buf[3] = (lun_count & 0xff); |
2607 | + transport_kunmap_data_sg(se_cmd); |
2608 | |
2609 | se_task->task_scsi_status = GOOD; |
2610 | transport_complete_task(se_task, 1); |
2611 | diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c |
2612 | index 71fc9ce..754b669 100644 |
2613 | --- a/drivers/target/tcm_fc/tfc_cmd.c |
2614 | +++ b/drivers/target/tcm_fc/tfc_cmd.c |
2615 | @@ -329,10 +329,12 @@ static void ft_send_resp_status(struct fc_lport *lport, |
2616 | |
2617 | fc_fill_reply_hdr(fp, rx_fp, FC_RCTL_DD_CMD_STATUS, 0); |
2618 | sp = fr_seq(fp); |
2619 | - if (sp) |
2620 | + if (sp) { |
2621 | lport->tt.seq_send(lport, sp, fp); |
2622 | - else |
2623 | + lport->tt.exch_done(sp); |
2624 | + } else { |
2625 | lport->tt.frame_send(lport, fp); |
2626 | + } |
2627 | } |
2628 | |
2629 | /* |
2630 | diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c |
2631 | index d15a071..0174d2d 100644 |
2632 | --- a/drivers/tty/moxa.c |
2633 | +++ b/drivers/tty/moxa.c |
2634 | @@ -1331,7 +1331,7 @@ static void moxa_start(struct tty_struct *tty) |
2635 | if (ch == NULL) |
2636 | return; |
2637 | |
2638 | - if (!(ch->statusflags & TXSTOPPED)) |
2639 | + if (!test_bit(TXSTOPPED, &ch->statusflags)) |
2640 | return; |
2641 | |
2642 | MoxaPortTxEnable(ch); |
2643 | diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c |
2644 | index aff9d61..829e51a 100644 |
2645 | --- a/drivers/tty/serial/sh-sci.c |
2646 | +++ b/drivers/tty/serial/sh-sci.c |
2647 | @@ -1123,17 +1123,20 @@ static void sci_dma_tx_complete(void *arg) |
2648 | port->icount.tx += sg_dma_len(&s->sg_tx); |
2649 | |
2650 | async_tx_ack(s->desc_tx); |
2651 | - s->cookie_tx = -EINVAL; |
2652 | s->desc_tx = NULL; |
2653 | |
2654 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
2655 | uart_write_wakeup(port); |
2656 | |
2657 | if (!uart_circ_empty(xmit)) { |
2658 | + s->cookie_tx = 0; |
2659 | schedule_work(&s->work_tx); |
2660 | - } else if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { |
2661 | - u16 ctrl = sci_in(port, SCSCR); |
2662 | - sci_out(port, SCSCR, ctrl & ~SCSCR_TIE); |
2663 | + } else { |
2664 | + s->cookie_tx = -EINVAL; |
2665 | + if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) { |
2666 | + u16 ctrl = sci_in(port, SCSCR); |
2667 | + sci_out(port, SCSCR, ctrl & ~SCSCR_TIE); |
2668 | + } |
2669 | } |
2670 | |
2671 | spin_unlock_irqrestore(&port->lock, flags); |
2672 | @@ -1395,8 +1398,10 @@ static void sci_start_tx(struct uart_port *port) |
2673 | } |
2674 | |
2675 | if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) && |
2676 | - s->cookie_tx < 0) |
2677 | + s->cookie_tx < 0) { |
2678 | + s->cookie_tx = 0; |
2679 | schedule_work(&s->work_tx); |
2680 | + } |
2681 | #endif |
2682 | |
2683 | if (!s->chan_tx || port->type == PORT_SCIFA || port->type == PORT_SCIFB) { |
2684 | diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c |
2685 | index 45d3e80..f343808 100644 |
2686 | --- a/drivers/tty/vt/consolemap.c |
2687 | +++ b/drivers/tty/vt/consolemap.c |
2688 | @@ -516,6 +516,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list) |
2689 | int err = 0, err1, i; |
2690 | struct uni_pagedir *p, *q; |
2691 | |
2692 | + /* Save original vc_unipagdir_loc in case we allocate a new one */ |
2693 | p = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc; |
2694 | if (p->readonly) return -EIO; |
2695 | |
2696 | @@ -528,26 +529,57 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list) |
2697 | err1 = con_clear_unimap(vc, NULL); |
2698 | if (err1) return err1; |
2699 | |
2700 | + /* |
2701 | + * Since refcount was > 1, con_clear_unimap() allocated a |
2702 | + * a new uni_pagedir for this vc. Re: p != q |
2703 | + */ |
2704 | q = (struct uni_pagedir *)*vc->vc_uni_pagedir_loc; |
2705 | - for (i = 0, l = 0; i < 32; i++) |
2706 | + |
2707 | + /* |
2708 | + * uni_pgdir is a 32*32*64 table with rows allocated |
2709 | + * when its first entry is added. The unicode value must |
2710 | + * still be incremented for empty rows. We are copying |
2711 | + * entries from "p" (old) to "q" (new). |
2712 | + */ |
2713 | + l = 0; /* unicode value */ |
2714 | + for (i = 0; i < 32; i++) |
2715 | if ((p1 = p->uni_pgdir[i])) |
2716 | for (j = 0; j < 32; j++) |
2717 | - if ((p2 = p1[j])) |
2718 | + if ((p2 = p1[j])) { |
2719 | for (k = 0; k < 64; k++, l++) |
2720 | if (p2[k] != 0xffff) { |
2721 | + /* |
2722 | + * Found one, copy entry for unicode |
2723 | + * l with fontpos value p2[k]. |
2724 | + */ |
2725 | err1 = con_insert_unipair(q, l, p2[k]); |
2726 | if (err1) { |
2727 | p->refcount++; |
2728 | *vc->vc_uni_pagedir_loc = (unsigned long)p; |
2729 | con_release_unimap(q); |
2730 | kfree(q); |
2731 | - return err1; |
2732 | + return err1; |
2733 | } |
2734 | - } |
2735 | - p = q; |
2736 | - } else if (p == dflt) |
2737 | + } |
2738 | + } else { |
2739 | + /* Account for row of 64 empty entries */ |
2740 | + l += 64; |
2741 | + } |
2742 | + else |
2743 | + /* Account for empty table */ |
2744 | + l += 32 * 64; |
2745 | + |
2746 | + /* |
2747 | + * Finished copying font table, set vc_uni_pagedir to new table |
2748 | + */ |
2749 | + p = q; |
2750 | + } else if (p == dflt) { |
2751 | dflt = NULL; |
2752 | - |
2753 | + } |
2754 | + |
2755 | + /* |
2756 | + * Insert user specified unicode pairs into new table. |
2757 | + */ |
2758 | while (ct--) { |
2759 | unsigned short unicode, fontpos; |
2760 | __get_user(unicode, &list->unicode); |
2761 | @@ -557,11 +589,14 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list) |
2762 | list++; |
2763 | } |
2764 | |
2765 | + /* |
2766 | + * Merge with fontmaps of any other virtual consoles. |
2767 | + */ |
2768 | if (con_unify_unimap(vc, p)) |
2769 | return err; |
2770 | |
2771 | for (i = 0; i <= 3; i++) |
2772 | - set_inverse_transl(vc, p, i); /* Update all inverse translations */ |
2773 | + set_inverse_transl(vc, p, i); /* Update inverse translations */ |
2774 | set_inverse_trans_unicode(vc, p); |
2775 | |
2776 | return err; |
2777 | diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c |
2778 | index fd4aee1..9eb71d8 100644 |
2779 | --- a/drivers/usb/class/cdc-wdm.c |
2780 | +++ b/drivers/usb/class/cdc-wdm.c |
2781 | @@ -397,7 +397,7 @@ outnl: |
2782 | static ssize_t wdm_read |
2783 | (struct file *file, char __user *buffer, size_t count, loff_t *ppos) |
2784 | { |
2785 | - int rv, cntr = 0; |
2786 | + int rv, cntr; |
2787 | int i = 0; |
2788 | struct wdm_device *desc = file->private_data; |
2789 | |
2790 | @@ -406,7 +406,8 @@ static ssize_t wdm_read |
2791 | if (rv < 0) |
2792 | return -ERESTARTSYS; |
2793 | |
2794 | - if (desc->length == 0) { |
2795 | + cntr = ACCESS_ONCE(desc->length); |
2796 | + if (cntr == 0) { |
2797 | desc->read = 0; |
2798 | retry: |
2799 | if (test_bit(WDM_DISCONNECTING, &desc->flags)) { |
2800 | @@ -456,26 +457,30 @@ retry: |
2801 | spin_unlock_irq(&desc->iuspin); |
2802 | goto retry; |
2803 | } |
2804 | - clear_bit(WDM_READ, &desc->flags); |
2805 | + cntr = desc->length; |
2806 | spin_unlock_irq(&desc->iuspin); |
2807 | } |
2808 | |
2809 | - cntr = count > desc->length ? desc->length : count; |
2810 | + if (cntr > count) |
2811 | + cntr = count; |
2812 | rv = copy_to_user(buffer, desc->ubuf, cntr); |
2813 | if (rv > 0) { |
2814 | rv = -EFAULT; |
2815 | goto err; |
2816 | } |
2817 | |
2818 | + spin_lock_irq(&desc->iuspin); |
2819 | + |
2820 | for (i = 0; i < desc->length - cntr; i++) |
2821 | desc->ubuf[i] = desc->ubuf[i + cntr]; |
2822 | |
2823 | - spin_lock_irq(&desc->iuspin); |
2824 | desc->length -= cntr; |
2825 | - spin_unlock_irq(&desc->iuspin); |
2826 | /* in case we had outstanding data */ |
2827 | if (!desc->length) |
2828 | clear_bit(WDM_READ, &desc->flags); |
2829 | + |
2830 | + spin_unlock_irq(&desc->iuspin); |
2831 | + |
2832 | rv = cntr; |
2833 | |
2834 | err: |
2835 | diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c |
2836 | index 25dbd86..3700aa6 100644 |
2837 | --- a/drivers/usb/dwc3/gadget.c |
2838 | +++ b/drivers/usb/dwc3/gadget.c |
2839 | @@ -449,16 +449,16 @@ static int dwc3_gadget_ep_enable(struct usb_ep *ep, |
2840 | |
2841 | switch (usb_endpoint_type(desc)) { |
2842 | case USB_ENDPOINT_XFER_CONTROL: |
2843 | - strncat(dep->name, "-control", sizeof(dep->name)); |
2844 | + strlcat(dep->name, "-control", sizeof(dep->name)); |
2845 | break; |
2846 | case USB_ENDPOINT_XFER_ISOC: |
2847 | - strncat(dep->name, "-isoc", sizeof(dep->name)); |
2848 | + strlcat(dep->name, "-isoc", sizeof(dep->name)); |
2849 | break; |
2850 | case USB_ENDPOINT_XFER_BULK: |
2851 | - strncat(dep->name, "-bulk", sizeof(dep->name)); |
2852 | + strlcat(dep->name, "-bulk", sizeof(dep->name)); |
2853 | break; |
2854 | case USB_ENDPOINT_XFER_INT: |
2855 | - strncat(dep->name, "-int", sizeof(dep->name)); |
2856 | + strlcat(dep->name, "-int", sizeof(dep->name)); |
2857 | break; |
2858 | default: |
2859 | dev_err(dwc->dev, "invalid endpoint transfer type\n"); |
2860 | @@ -1405,7 +1405,7 @@ static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc, |
2861 | static void dwc3_gadget_start_isoc(struct dwc3 *dwc, |
2862 | struct dwc3_ep *dep, const struct dwc3_event_depevt *event) |
2863 | { |
2864 | - u32 uf; |
2865 | + u32 uf, mask; |
2866 | |
2867 | if (list_empty(&dep->request_list)) { |
2868 | dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n", |
2869 | @@ -1413,16 +1413,10 @@ static void dwc3_gadget_start_isoc(struct dwc3 *dwc, |
2870 | return; |
2871 | } |
2872 | |
2873 | - if (event->parameters) { |
2874 | - u32 mask; |
2875 | - |
2876 | - mask = ~(dep->interval - 1); |
2877 | - uf = event->parameters & mask; |
2878 | - /* 4 micro frames in the future */ |
2879 | - uf += dep->interval * 4; |
2880 | - } else { |
2881 | - uf = 0; |
2882 | - } |
2883 | + mask = ~(dep->interval - 1); |
2884 | + uf = event->parameters & mask; |
2885 | + /* 4 micro frames in the future */ |
2886 | + uf += dep->interval * 4; |
2887 | |
2888 | __dwc3_gadget_kick_transfer(dep, uf, 1); |
2889 | } |
2890 | diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c |
2891 | index c154064..21ab474 100644 |
2892 | --- a/drivers/usb/gadget/f_subset.c |
2893 | +++ b/drivers/usb/gadget/f_subset.c |
2894 | @@ -74,7 +74,7 @@ static inline struct f_gether *func_to_geth(struct usb_function *f) |
2895 | |
2896 | /* interface descriptor: */ |
2897 | |
2898 | -static struct usb_interface_descriptor subset_data_intf __initdata = { |
2899 | +static struct usb_interface_descriptor subset_data_intf = { |
2900 | .bLength = sizeof subset_data_intf, |
2901 | .bDescriptorType = USB_DT_INTERFACE, |
2902 | |
2903 | @@ -87,7 +87,7 @@ static struct usb_interface_descriptor subset_data_intf __initdata = { |
2904 | /* .iInterface = DYNAMIC */ |
2905 | }; |
2906 | |
2907 | -static struct usb_cdc_header_desc mdlm_header_desc __initdata = { |
2908 | +static struct usb_cdc_header_desc mdlm_header_desc = { |
2909 | .bLength = sizeof mdlm_header_desc, |
2910 | .bDescriptorType = USB_DT_CS_INTERFACE, |
2911 | .bDescriptorSubType = USB_CDC_HEADER_TYPE, |
2912 | @@ -95,7 +95,7 @@ static struct usb_cdc_header_desc mdlm_header_desc __initdata = { |
2913 | .bcdCDC = cpu_to_le16(0x0110), |
2914 | }; |
2915 | |
2916 | -static struct usb_cdc_mdlm_desc mdlm_desc __initdata = { |
2917 | +static struct usb_cdc_mdlm_desc mdlm_desc = { |
2918 | .bLength = sizeof mdlm_desc, |
2919 | .bDescriptorType = USB_DT_CS_INTERFACE, |
2920 | .bDescriptorSubType = USB_CDC_MDLM_TYPE, |
2921 | @@ -111,7 +111,7 @@ static struct usb_cdc_mdlm_desc mdlm_desc __initdata = { |
2922 | * can't really use its struct. All we do here is say that we're using |
2923 | * the submode of "SAFE" which directly matches the CDC Subset. |
2924 | */ |
2925 | -static u8 mdlm_detail_desc[] __initdata = { |
2926 | +static u8 mdlm_detail_desc[] = { |
2927 | 6, |
2928 | USB_DT_CS_INTERFACE, |
2929 | USB_CDC_MDLM_DETAIL_TYPE, |
2930 | @@ -121,7 +121,7 @@ static u8 mdlm_detail_desc[] __initdata = { |
2931 | 0, /* network data capabilities ("raw" encapsulation) */ |
2932 | }; |
2933 | |
2934 | -static struct usb_cdc_ether_desc ether_desc __initdata = { |
2935 | +static struct usb_cdc_ether_desc ether_desc = { |
2936 | .bLength = sizeof ether_desc, |
2937 | .bDescriptorType = USB_DT_CS_INTERFACE, |
2938 | .bDescriptorSubType = USB_CDC_ETHERNET_TYPE, |
2939 | @@ -136,7 +136,7 @@ static struct usb_cdc_ether_desc ether_desc __initdata = { |
2940 | |
2941 | /* full speed support: */ |
2942 | |
2943 | -static struct usb_endpoint_descriptor fs_subset_in_desc __initdata = { |
2944 | +static struct usb_endpoint_descriptor fs_subset_in_desc = { |
2945 | .bLength = USB_DT_ENDPOINT_SIZE, |
2946 | .bDescriptorType = USB_DT_ENDPOINT, |
2947 | |
2948 | @@ -144,7 +144,7 @@ static struct usb_endpoint_descriptor fs_subset_in_desc __initdata = { |
2949 | .bmAttributes = USB_ENDPOINT_XFER_BULK, |
2950 | }; |
2951 | |
2952 | -static struct usb_endpoint_descriptor fs_subset_out_desc __initdata = { |
2953 | +static struct usb_endpoint_descriptor fs_subset_out_desc = { |
2954 | .bLength = USB_DT_ENDPOINT_SIZE, |
2955 | .bDescriptorType = USB_DT_ENDPOINT, |
2956 | |
2957 | @@ -152,7 +152,7 @@ static struct usb_endpoint_descriptor fs_subset_out_desc __initdata = { |
2958 | .bmAttributes = USB_ENDPOINT_XFER_BULK, |
2959 | }; |
2960 | |
2961 | -static struct usb_descriptor_header *fs_eth_function[] __initdata = { |
2962 | +static struct usb_descriptor_header *fs_eth_function[] = { |
2963 | (struct usb_descriptor_header *) &subset_data_intf, |
2964 | (struct usb_descriptor_header *) &mdlm_header_desc, |
2965 | (struct usb_descriptor_header *) &mdlm_desc, |
2966 | @@ -165,7 +165,7 @@ static struct usb_descriptor_header *fs_eth_function[] __initdata = { |
2967 | |
2968 | /* high speed support: */ |
2969 | |
2970 | -static struct usb_endpoint_descriptor hs_subset_in_desc __initdata = { |
2971 | +static struct usb_endpoint_descriptor hs_subset_in_desc = { |
2972 | .bLength = USB_DT_ENDPOINT_SIZE, |
2973 | .bDescriptorType = USB_DT_ENDPOINT, |
2974 | |
2975 | @@ -173,7 +173,7 @@ static struct usb_endpoint_descriptor hs_subset_in_desc __initdata = { |
2976 | .wMaxPacketSize = cpu_to_le16(512), |
2977 | }; |
2978 | |
2979 | -static struct usb_endpoint_descriptor hs_subset_out_desc __initdata = { |
2980 | +static struct usb_endpoint_descriptor hs_subset_out_desc = { |
2981 | .bLength = USB_DT_ENDPOINT_SIZE, |
2982 | .bDescriptorType = USB_DT_ENDPOINT, |
2983 | |
2984 | @@ -181,7 +181,7 @@ static struct usb_endpoint_descriptor hs_subset_out_desc __initdata = { |
2985 | .wMaxPacketSize = cpu_to_le16(512), |
2986 | }; |
2987 | |
2988 | -static struct usb_descriptor_header *hs_eth_function[] __initdata = { |
2989 | +static struct usb_descriptor_header *hs_eth_function[] = { |
2990 | (struct usb_descriptor_header *) &subset_data_intf, |
2991 | (struct usb_descriptor_header *) &mdlm_header_desc, |
2992 | (struct usb_descriptor_header *) &mdlm_desc, |
2993 | @@ -194,7 +194,7 @@ static struct usb_descriptor_header *hs_eth_function[] __initdata = { |
2994 | |
2995 | /* super speed support: */ |
2996 | |
2997 | -static struct usb_endpoint_descriptor ss_subset_in_desc __initdata = { |
2998 | +static struct usb_endpoint_descriptor ss_subset_in_desc = { |
2999 | .bLength = USB_DT_ENDPOINT_SIZE, |
3000 | .bDescriptorType = USB_DT_ENDPOINT, |
3001 | |
3002 | @@ -202,7 +202,7 @@ static struct usb_endpoint_descriptor ss_subset_in_desc __initdata = { |
3003 | .wMaxPacketSize = cpu_to_le16(1024), |
3004 | }; |
3005 | |
3006 | -static struct usb_endpoint_descriptor ss_subset_out_desc __initdata = { |
3007 | +static struct usb_endpoint_descriptor ss_subset_out_desc = { |
3008 | .bLength = USB_DT_ENDPOINT_SIZE, |
3009 | .bDescriptorType = USB_DT_ENDPOINT, |
3010 | |
3011 | @@ -210,7 +210,7 @@ static struct usb_endpoint_descriptor ss_subset_out_desc __initdata = { |
3012 | .wMaxPacketSize = cpu_to_le16(1024), |
3013 | }; |
3014 | |
3015 | -static struct usb_ss_ep_comp_descriptor ss_subset_bulk_comp_desc __initdata = { |
3016 | +static struct usb_ss_ep_comp_descriptor ss_subset_bulk_comp_desc = { |
3017 | .bLength = sizeof ss_subset_bulk_comp_desc, |
3018 | .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, |
3019 | |
3020 | @@ -219,7 +219,7 @@ static struct usb_ss_ep_comp_descriptor ss_subset_bulk_comp_desc __initdata = { |
3021 | /* .bmAttributes = 0, */ |
3022 | }; |
3023 | |
3024 | -static struct usb_descriptor_header *ss_eth_function[] __initdata = { |
3025 | +static struct usb_descriptor_header *ss_eth_function[] = { |
3026 | (struct usb_descriptor_header *) &subset_data_intf, |
3027 | (struct usb_descriptor_header *) &mdlm_header_desc, |
3028 | (struct usb_descriptor_header *) &mdlm_desc, |
3029 | @@ -290,7 +290,7 @@ static void geth_disable(struct usb_function *f) |
3030 | |
3031 | /* serial function driver setup/binding */ |
3032 | |
3033 | -static int __init |
3034 | +static int |
3035 | geth_bind(struct usb_configuration *c, struct usb_function *f) |
3036 | { |
3037 | struct usb_composite_dev *cdev = c->cdev; |
3038 | @@ -404,7 +404,7 @@ geth_unbind(struct usb_configuration *c, struct usb_function *f) |
3039 | * Caller must have called @gether_setup(). Caller is also responsible |
3040 | * for calling @gether_cleanup() before module unload. |
3041 | */ |
3042 | -int __init geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]) |
3043 | +int geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]) |
3044 | { |
3045 | struct f_gether *geth; |
3046 | int status; |
3047 | diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c |
3048 | index dd28ef3..8e3e509 100644 |
3049 | --- a/drivers/usb/gadget/fsl_udc_core.c |
3050 | +++ b/drivers/usb/gadget/fsl_udc_core.c |
3051 | @@ -768,7 +768,7 @@ static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req) |
3052 | * @is_last: return flag if it is the last dTD of the request |
3053 | * return: pointer to the built dTD */ |
3054 | static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length, |
3055 | - dma_addr_t *dma, int *is_last) |
3056 | + dma_addr_t *dma, int *is_last, gfp_t gfp_flags) |
3057 | { |
3058 | u32 swap_temp; |
3059 | struct ep_td_struct *dtd; |
3060 | @@ -777,7 +777,7 @@ static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length, |
3061 | *length = min(req->req.length - req->req.actual, |
3062 | (unsigned)EP_MAX_LENGTH_TRANSFER); |
3063 | |
3064 | - dtd = dma_pool_alloc(udc_controller->td_pool, GFP_KERNEL, dma); |
3065 | + dtd = dma_pool_alloc(udc_controller->td_pool, gfp_flags, dma); |
3066 | if (dtd == NULL) |
3067 | return dtd; |
3068 | |
3069 | @@ -827,7 +827,7 @@ static struct ep_td_struct *fsl_build_dtd(struct fsl_req *req, unsigned *length, |
3070 | } |
3071 | |
3072 | /* Generate dtd chain for a request */ |
3073 | -static int fsl_req_to_dtd(struct fsl_req *req) |
3074 | +static int fsl_req_to_dtd(struct fsl_req *req, gfp_t gfp_flags) |
3075 | { |
3076 | unsigned count; |
3077 | int is_last; |
3078 | @@ -836,7 +836,7 @@ static int fsl_req_to_dtd(struct fsl_req *req) |
3079 | dma_addr_t dma; |
3080 | |
3081 | do { |
3082 | - dtd = fsl_build_dtd(req, &count, &dma, &is_last); |
3083 | + dtd = fsl_build_dtd(req, &count, &dma, &is_last, gfp_flags); |
3084 | if (dtd == NULL) |
3085 | return -ENOMEM; |
3086 | |
3087 | @@ -910,13 +910,11 @@ fsl_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) |
3088 | req->req.actual = 0; |
3089 | req->dtd_count = 0; |
3090 | |
3091 | - spin_lock_irqsave(&udc->lock, flags); |
3092 | - |
3093 | /* build dtds and push them to device queue */ |
3094 | - if (!fsl_req_to_dtd(req)) { |
3095 | + if (!fsl_req_to_dtd(req, gfp_flags)) { |
3096 | + spin_lock_irqsave(&udc->lock, flags); |
3097 | fsl_queue_td(ep, req); |
3098 | } else { |
3099 | - spin_unlock_irqrestore(&udc->lock, flags); |
3100 | return -ENOMEM; |
3101 | } |
3102 | |
3103 | @@ -1295,7 +1293,7 @@ static int ep0_prime_status(struct fsl_udc *udc, int direction) |
3104 | ep_is_in(ep) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); |
3105 | req->mapped = 1; |
3106 | |
3107 | - if (fsl_req_to_dtd(req) == 0) |
3108 | + if (fsl_req_to_dtd(req, GFP_ATOMIC) == 0) |
3109 | fsl_queue_td(ep, req); |
3110 | else |
3111 | return -ENOMEM; |
3112 | @@ -1379,7 +1377,7 @@ static void ch9getstatus(struct fsl_udc *udc, u8 request_type, u16 value, |
3113 | req->mapped = 1; |
3114 | |
3115 | /* prime the data phase */ |
3116 | - if ((fsl_req_to_dtd(req) == 0)) |
3117 | + if ((fsl_req_to_dtd(req, GFP_ATOMIC) == 0)) |
3118 | fsl_queue_td(ep, req); |
3119 | else /* no mem */ |
3120 | goto stall; |
3121 | diff --git a/drivers/usb/gadget/hid.c b/drivers/usb/gadget/hid.c |
3122 | index f888c3e..3493adf 100644 |
3123 | --- a/drivers/usb/gadget/hid.c |
3124 | +++ b/drivers/usb/gadget/hid.c |
3125 | @@ -60,9 +60,9 @@ static struct usb_device_descriptor device_desc = { |
3126 | /* .bDeviceClass = USB_CLASS_COMM, */ |
3127 | /* .bDeviceSubClass = 0, */ |
3128 | /* .bDeviceProtocol = 0, */ |
3129 | - .bDeviceClass = 0xEF, |
3130 | - .bDeviceSubClass = 2, |
3131 | - .bDeviceProtocol = 1, |
3132 | + .bDeviceClass = USB_CLASS_PER_INTERFACE, |
3133 | + .bDeviceSubClass = 0, |
3134 | + .bDeviceProtocol = 0, |
3135 | /* .bMaxPacketSize0 = f(hardware) */ |
3136 | |
3137 | /* Vendor and product id can be overridden by module parameters. */ |
3138 | diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c |
3139 | index 6ccae27..7138540 100644 |
3140 | --- a/drivers/usb/gadget/inode.c |
3141 | +++ b/drivers/usb/gadget/inode.c |
3142 | @@ -1043,6 +1043,8 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) |
3143 | // FIXME don't call this with the spinlock held ... |
3144 | if (copy_to_user (buf, dev->req->buf, len)) |
3145 | retval = -EFAULT; |
3146 | + else |
3147 | + retval = len; |
3148 | clean_req (dev->gadget->ep0, dev->req); |
3149 | /* NOTE userspace can't yet choose to stall */ |
3150 | } |
3151 | diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c |
3152 | index b556a72..da487fd 100644 |
3153 | --- a/drivers/usb/host/ehci-fsl.c |
3154 | +++ b/drivers/usb/host/ehci-fsl.c |
3155 | @@ -216,6 +216,8 @@ static void ehci_fsl_setup_phy(struct ehci_hcd *ehci, |
3156 | unsigned int port_offset) |
3157 | { |
3158 | u32 portsc; |
3159 | + struct usb_hcd *hcd = ehci_to_hcd(ehci); |
3160 | + void __iomem *non_ehci = hcd->regs; |
3161 | |
3162 | portsc = ehci_readl(ehci, &ehci->regs->port_status[port_offset]); |
3163 | portsc &= ~(PORT_PTS_MSK | PORT_PTS_PTW); |
3164 | @@ -231,6 +233,8 @@ static void ehci_fsl_setup_phy(struct ehci_hcd *ehci, |
3165 | portsc |= PORT_PTS_PTW; |
3166 | /* fall through */ |
3167 | case FSL_USB2_PHY_UTMI: |
3168 | + /* enable UTMI PHY */ |
3169 | + setbits32(non_ehci + FSL_SOC_USB_CTRL, CTRL_UTMI_PHY_EN); |
3170 | portsc |= PORT_PTS_UTMI; |
3171 | break; |
3172 | case FSL_USB2_PHY_NONE: |
3173 | diff --git a/drivers/usb/host/ehci-fsl.h b/drivers/usb/host/ehci-fsl.h |
3174 | index 4918062..bea5013 100644 |
3175 | --- a/drivers/usb/host/ehci-fsl.h |
3176 | +++ b/drivers/usb/host/ehci-fsl.h |
3177 | @@ -45,5 +45,6 @@ |
3178 | #define FSL_SOC_USB_PRICTRL 0x40c /* NOTE: big-endian */ |
3179 | #define FSL_SOC_USB_SICTRL 0x410 /* NOTE: big-endian */ |
3180 | #define FSL_SOC_USB_CTRL 0x500 /* NOTE: big-endian */ |
3181 | +#define CTRL_UTMI_PHY_EN (1<<9) |
3182 | #define SNOOP_SIZE_2GB 0x1e |
3183 | #endif /* _EHCI_FSL_H */ |
3184 | diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c |
3185 | index 9037035..5a42cf0 100644 |
3186 | --- a/drivers/usb/host/fsl-mph-dr-of.c |
3187 | +++ b/drivers/usb/host/fsl-mph-dr-of.c |
3188 | @@ -94,7 +94,6 @@ struct platform_device * __devinit fsl_usb2_device_register( |
3189 | pdev->dev.parent = &ofdev->dev; |
3190 | |
3191 | pdev->dev.coherent_dma_mask = ofdev->dev.coherent_dma_mask; |
3192 | - pdev->dev.dma_mask = &pdev->archdata.dma_mask; |
3193 | *pdev->dev.dma_mask = *ofdev->dev.dma_mask; |
3194 | |
3195 | retval = platform_device_add_data(pdev, pdata, sizeof(*pdata)); |
3196 | diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c |
3197 | index 922148f..c860597 100644 |
3198 | --- a/drivers/usb/musb/musb_gadget.c |
3199 | +++ b/drivers/usb/musb/musb_gadget.c |
3200 | @@ -576,6 +576,15 @@ void musb_g_tx(struct musb *musb, u8 epnum) |
3201 | |
3202 | if (request->actual == request->length) { |
3203 | musb_g_giveback(musb_ep, request, 0); |
3204 | + /* |
3205 | + * In the giveback function the MUSB lock is |
3206 | + * released and acquired after sometime. During |
3207 | + * this time period the INDEX register could get |
3208 | + * changed by the gadget_queue function especially |
3209 | + * on SMP systems. Reselect the INDEX to be sure |
3210 | + * we are reading/modifying the right registers |
3211 | + */ |
3212 | + musb_ep_select(mbase, epnum); |
3213 | req = musb_ep->desc ? next_request(musb_ep) : NULL; |
3214 | if (!req) { |
3215 | dev_dbg(musb->controller, "%s idle now\n", |
3216 | @@ -985,6 +994,15 @@ void musb_g_rx(struct musb *musb, u8 epnum) |
3217 | } |
3218 | #endif |
3219 | musb_g_giveback(musb_ep, request, 0); |
3220 | + /* |
3221 | + * In the giveback function the MUSB lock is |
3222 | + * released and acquired after sometime. During |
3223 | + * this time period the INDEX register could get |
3224 | + * changed by the gadget_queue function especially |
3225 | + * on SMP systems. Reselect the INDEX to be sure |
3226 | + * we are reading/modifying the right registers |
3227 | + */ |
3228 | + musb_ep_select(mbase, epnum); |
3229 | |
3230 | req = next_request(musb_ep); |
3231 | if (!req) |
3232 | diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c |
3233 | index 7f4e803..aa0d183 100644 |
3234 | --- a/drivers/usb/renesas_usbhs/mod_gadget.c |
3235 | +++ b/drivers/usb/renesas_usbhs/mod_gadget.c |
3236 | @@ -816,6 +816,11 @@ static int usbhsg_stop(struct usbhs_priv *priv) |
3237 | return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED); |
3238 | } |
3239 | |
3240 | +static void usbhs_mod_gadget_release(struct device *pdev) |
3241 | +{ |
3242 | + /* do nothing */ |
3243 | +} |
3244 | + |
3245 | int usbhs_mod_gadget_probe(struct usbhs_priv *priv) |
3246 | { |
3247 | struct usbhsg_gpriv *gpriv; |
3248 | @@ -864,6 +869,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv) |
3249 | */ |
3250 | dev_set_name(&gpriv->gadget.dev, "gadget"); |
3251 | gpriv->gadget.dev.parent = dev; |
3252 | + gpriv->gadget.dev.release = usbhs_mod_gadget_release; |
3253 | gpriv->gadget.name = "renesas_usbhs_udc"; |
3254 | gpriv->gadget.ops = &usbhsg_gadget_ops; |
3255 | gpriv->gadget.is_dualspeed = 1; |
3256 | diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c |
3257 | index 33d25d4..4c12404 100644 |
3258 | --- a/drivers/usb/serial/cp210x.c |
3259 | +++ b/drivers/usb/serial/cp210x.c |
3260 | @@ -49,6 +49,7 @@ static int cp210x_tiocmset_port(struct usb_serial_port *port, |
3261 | unsigned int, unsigned int); |
3262 | static void cp210x_break_ctl(struct tty_struct *, int); |
3263 | static int cp210x_startup(struct usb_serial *); |
3264 | +static void cp210x_release(struct usb_serial *); |
3265 | static void cp210x_dtr_rts(struct usb_serial_port *p, int on); |
3266 | |
3267 | static int debug; |
3268 | @@ -121,6 +122,8 @@ static const struct usb_device_id id_table[] = { |
3269 | { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ |
3270 | { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ |
3271 | { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ |
3272 | + { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ |
3273 | + { USB_DEVICE(0x10C4, 0xEA80) }, /* Silicon Labs factory default */ |
3274 | { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ |
3275 | { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */ |
3276 | { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */ |
3277 | @@ -149,6 +152,10 @@ static const struct usb_device_id id_table[] = { |
3278 | |
3279 | MODULE_DEVICE_TABLE(usb, id_table); |
3280 | |
3281 | +struct cp210x_port_private { |
3282 | + __u8 bInterfaceNumber; |
3283 | +}; |
3284 | + |
3285 | static struct usb_driver cp210x_driver = { |
3286 | .name = "cp210x", |
3287 | .probe = usb_serial_probe, |
3288 | @@ -174,6 +181,7 @@ static struct usb_serial_driver cp210x_device = { |
3289 | .tiocmget = cp210x_tiocmget, |
3290 | .tiocmset = cp210x_tiocmset, |
3291 | .attach = cp210x_startup, |
3292 | + .release = cp210x_release, |
3293 | .dtr_rts = cp210x_dtr_rts |
3294 | }; |
3295 | |
3296 | @@ -261,6 +269,7 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request, |
3297 | unsigned int *data, int size) |
3298 | { |
3299 | struct usb_serial *serial = port->serial; |
3300 | + struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); |
3301 | __le32 *buf; |
3302 | int result, i, length; |
3303 | |
3304 | @@ -276,7 +285,7 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request, |
3305 | /* Issue the request, attempting to read 'size' bytes */ |
3306 | result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), |
3307 | request, REQTYPE_DEVICE_TO_HOST, 0x0000, |
3308 | - 0, buf, size, 300); |
3309 | + port_priv->bInterfaceNumber, buf, size, 300); |
3310 | |
3311 | /* Convert data into an array of integers */ |
3312 | for (i = 0; i < length; i++) |
3313 | @@ -304,6 +313,7 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request, |
3314 | unsigned int *data, int size) |
3315 | { |
3316 | struct usb_serial *serial = port->serial; |
3317 | + struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); |
3318 | __le32 *buf; |
3319 | int result, i, length; |
3320 | |
3321 | @@ -325,12 +335,12 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request, |
3322 | result = usb_control_msg(serial->dev, |
3323 | usb_sndctrlpipe(serial->dev, 0), |
3324 | request, REQTYPE_HOST_TO_DEVICE, 0x0000, |
3325 | - 0, buf, size, 300); |
3326 | + port_priv->bInterfaceNumber, buf, size, 300); |
3327 | } else { |
3328 | result = usb_control_msg(serial->dev, |
3329 | usb_sndctrlpipe(serial->dev, 0), |
3330 | request, REQTYPE_HOST_TO_DEVICE, data[0], |
3331 | - 0, NULL, 0, 300); |
3332 | + port_priv->bInterfaceNumber, NULL, 0, 300); |
3333 | } |
3334 | |
3335 | kfree(buf); |
3336 | @@ -830,11 +840,39 @@ static void cp210x_break_ctl (struct tty_struct *tty, int break_state) |
3337 | |
3338 | static int cp210x_startup(struct usb_serial *serial) |
3339 | { |
3340 | + struct cp210x_port_private *port_priv; |
3341 | + int i; |
3342 | + |
3343 | /* cp210x buffers behave strangely unless device is reset */ |
3344 | usb_reset_device(serial->dev); |
3345 | + |
3346 | + for (i = 0; i < serial->num_ports; i++) { |
3347 | + port_priv = kzalloc(sizeof(*port_priv), GFP_KERNEL); |
3348 | + if (!port_priv) |
3349 | + return -ENOMEM; |
3350 | + |
3351 | + memset(port_priv, 0x00, sizeof(*port_priv)); |
3352 | + port_priv->bInterfaceNumber = |
3353 | + serial->interface->cur_altsetting->desc.bInterfaceNumber; |
3354 | + |
3355 | + usb_set_serial_port_data(serial->port[i], port_priv); |
3356 | + } |
3357 | + |
3358 | return 0; |
3359 | } |
3360 | |
3361 | +static void cp210x_release(struct usb_serial *serial) |
3362 | +{ |
3363 | + struct cp210x_port_private *port_priv; |
3364 | + int i; |
3365 | + |
3366 | + for (i = 0; i < serial->num_ports; i++) { |
3367 | + port_priv = usb_get_serial_port_data(serial->port[i]); |
3368 | + kfree(port_priv); |
3369 | + usb_set_serial_port_data(serial->port[i], NULL); |
3370 | + } |
3371 | +} |
3372 | + |
3373 | static int __init cp210x_init(void) |
3374 | { |
3375 | int retval; |
3376 | diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c |
3377 | index f030471..f2c9ef7 100644 |
3378 | --- a/drivers/usb/serial/ftdi_sio.c |
3379 | +++ b/drivers/usb/serial/ftdi_sio.c |
3380 | @@ -188,6 +188,7 @@ static struct usb_device_id id_table_combined [] = { |
3381 | .driver_info = (kernel_ulong_t)&ftdi_8u2232c_quirk }, |
3382 | { USB_DEVICE(FTDI_VID, FTDI_4232H_PID) }, |
3383 | { USB_DEVICE(FTDI_VID, FTDI_232H_PID) }, |
3384 | + { USB_DEVICE(FTDI_VID, FTDI_FTX_PID) }, |
3385 | { USB_DEVICE(FTDI_VID, FTDI_MICRO_CHAMELEON_PID) }, |
3386 | { USB_DEVICE(FTDI_VID, FTDI_RELAIS_PID) }, |
3387 | { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_PID) }, |
3388 | @@ -536,6 +537,10 @@ static struct usb_device_id id_table_combined [] = { |
3389 | { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_6_PID) }, |
3390 | { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_7_PID) }, |
3391 | { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803_8_PID) }, |
3392 | + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_1_PID) }, |
3393 | + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_2_PID) }, |
3394 | + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_3_PID) }, |
3395 | + { USB_DEVICE(SEALEVEL_VID, SEALEVEL_2803R_4_PID) }, |
3396 | { USB_DEVICE(IDTECH_VID, IDTECH_IDT1221U_PID) }, |
3397 | { USB_DEVICE(OCT_VID, OCT_US101_PID) }, |
3398 | { USB_DEVICE(OCT_VID, OCT_DK201_PID) }, |
3399 | @@ -797,7 +802,7 @@ static struct usb_device_id id_table_combined [] = { |
3400 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
3401 | { USB_DEVICE(ADI_VID, ADI_GNICEPLUS_PID), |
3402 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
3403 | - { USB_DEVICE(HORNBY_VID, HORNBY_ELITE_PID) }, |
3404 | + { USB_DEVICE(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID) }, |
3405 | { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) }, |
3406 | { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), |
3407 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
3408 | @@ -846,6 +851,9 @@ static struct usb_device_id id_table_combined [] = { |
3409 | { USB_DEVICE(ST_VID, ST_STMCLT1030_PID), |
3410 | .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk }, |
3411 | { USB_DEVICE(FTDI_VID, FTDI_RF_R106) }, |
3412 | + { USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID), |
3413 | + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
3414 | + { USB_DEVICE(FTDI_VID, FTDI_LUMEL_PD12_PID) }, |
3415 | { }, /* Optional parameter entry */ |
3416 | { } /* Terminating entry */ |
3417 | }; |
3418 | @@ -868,7 +876,8 @@ static const char *ftdi_chip_name[] = { |
3419 | [FT232RL] = "FT232RL", |
3420 | [FT2232H] = "FT2232H", |
3421 | [FT4232H] = "FT4232H", |
3422 | - [FT232H] = "FT232H" |
3423 | + [FT232H] = "FT232H", |
3424 | + [FTX] = "FT-X" |
3425 | }; |
3426 | |
3427 | |
3428 | @@ -1169,7 +1178,8 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty, |
3429 | break; |
3430 | case FT232BM: /* FT232BM chip */ |
3431 | case FT2232C: /* FT2232C chip */ |
3432 | - case FT232RL: |
3433 | + case FT232RL: /* FT232RL chip */ |
3434 | + case FTX: /* FT-X series */ |
3435 | if (baud <= 3000000) { |
3436 | __u16 product_id = le16_to_cpu( |
3437 | port->serial->dev->descriptor.idProduct); |
3438 | @@ -1458,10 +1468,14 @@ static void ftdi_determine_type(struct usb_serial_port *port) |
3439 | } else if (version < 0x900) { |
3440 | /* Assume it's an FT232RL */ |
3441 | priv->chip_type = FT232RL; |
3442 | - } else { |
3443 | + } else if (version < 0x1000) { |
3444 | /* Assume it's an FT232H */ |
3445 | priv->chip_type = FT232H; |
3446 | + } else { |
3447 | + /* Assume it's an FT-X series device */ |
3448 | + priv->chip_type = FTX; |
3449 | } |
3450 | + |
3451 | dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]); |
3452 | } |
3453 | |
3454 | @@ -1589,7 +1603,8 @@ static int create_sysfs_attrs(struct usb_serial_port *port) |
3455 | priv->chip_type == FT232RL || |
3456 | priv->chip_type == FT2232H || |
3457 | priv->chip_type == FT4232H || |
3458 | - priv->chip_type == FT232H)) { |
3459 | + priv->chip_type == FT232H || |
3460 | + priv->chip_type == FTX)) { |
3461 | retval = device_create_file(&port->dev, |
3462 | &dev_attr_latency_timer); |
3463 | } |
3464 | @@ -1611,7 +1626,8 @@ static void remove_sysfs_attrs(struct usb_serial_port *port) |
3465 | priv->chip_type == FT232RL || |
3466 | priv->chip_type == FT2232H || |
3467 | priv->chip_type == FT4232H || |
3468 | - priv->chip_type == FT232H) { |
3469 | + priv->chip_type == FT232H || |
3470 | + priv->chip_type == FTX) { |
3471 | device_remove_file(&port->dev, &dev_attr_latency_timer); |
3472 | } |
3473 | } |
3474 | @@ -1763,7 +1779,8 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial) |
3475 | |
3476 | dbg("%s", __func__); |
3477 | |
3478 | - if (strcmp(udev->manufacturer, "CALAO Systems") == 0) |
3479 | + if ((udev->manufacturer && !strcmp(udev->manufacturer, "CALAO Systems")) || |
3480 | + (udev->product && !strcmp(udev->product, "BeagleBone/XDS100"))) |
3481 | return ftdi_jtag_probe(serial); |
3482 | |
3483 | return 0; |
3484 | @@ -2284,6 +2301,7 @@ static int ftdi_tiocmget(struct tty_struct *tty) |
3485 | case FT2232H: |
3486 | case FT4232H: |
3487 | case FT232H: |
3488 | + case FTX: |
3489 | len = 2; |
3490 | break; |
3491 | default: |
3492 | diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h |
3493 | index 19584fa..ed58c6f 100644 |
3494 | --- a/drivers/usb/serial/ftdi_sio.h |
3495 | +++ b/drivers/usb/serial/ftdi_sio.h |
3496 | @@ -157,7 +157,8 @@ enum ftdi_chip_type { |
3497 | FT232RL = 5, |
3498 | FT2232H = 6, |
3499 | FT4232H = 7, |
3500 | - FT232H = 8 |
3501 | + FT232H = 8, |
3502 | + FTX = 9, |
3503 | }; |
3504 | |
3505 | enum ftdi_sio_baudrate { |
3506 | diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h |
3507 | index 4eb7715..c6dd18e 100644 |
3508 | --- a/drivers/usb/serial/ftdi_sio_ids.h |
3509 | +++ b/drivers/usb/serial/ftdi_sio_ids.h |
3510 | @@ -23,12 +23,15 @@ |
3511 | #define FTDI_8U2232C_PID 0x6010 /* Dual channel device */ |
3512 | #define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */ |
3513 | #define FTDI_232H_PID 0x6014 /* Single channel hi-speed device */ |
3514 | +#define FTDI_FTX_PID 0x6015 /* FT-X series (FT201X, FT230X, FT231X, etc) */ |
3515 | #define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */ |
3516 | #define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */ |
3517 | |
3518 | |
3519 | /*** third-party PIDs (using FTDI_VID) ***/ |
3520 | |
3521 | +#define FTDI_LUMEL_PD12_PID 0x6002 |
3522 | + |
3523 | /* |
3524 | * Marvell OpenRD Base, Client |
3525 | * http://www.open-rd.org |
3526 | @@ -97,6 +100,8 @@ |
3527 | #define FTDI_TACTRIX_OPENPORT_13S_PID 0xCC49 /* OpenPort 1.3 Subaru */ |
3528 | #define FTDI_TACTRIX_OPENPORT_13U_PID 0xCC4A /* OpenPort 1.3 Universal */ |
3529 | |
3530 | +#define FTDI_DISTORTEC_JTAG_LOCK_PICK_PID 0xCFF8 |
3531 | + |
3532 | /* SCS HF Radio Modems PID's (http://www.scs-ptc.com) */ |
3533 | /* the VID is the standard ftdi vid (FTDI_VID) */ |
3534 | #define FTDI_SCS_DEVICE_0_PID 0xD010 /* SCS PTC-IIusb */ |
3535 | @@ -532,10 +537,14 @@ |
3536 | #define ADI_GNICEPLUS_PID 0xF001 |
3537 | |
3538 | /* |
3539 | - * Hornby Elite |
3540 | + * Microchip Technology, Inc. |
3541 | + * |
3542 | + * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are also used by: |
3543 | + * Hornby Elite - Digital Command Control Console |
3544 | + * http://www.hornby.com/hornby-dcc/controllers/ |
3545 | */ |
3546 | -#define HORNBY_VID 0x04D8 |
3547 | -#define HORNBY_ELITE_PID 0x000A |
3548 | +#define MICROCHIP_VID 0x04D8 |
3549 | +#define MICROCHIP_USB_BOARD_PID 0x000A /* CDC RS-232 Emulation Demo */ |
3550 | |
3551 | /* |
3552 | * RATOC REX-USB60F |
3553 | @@ -680,6 +689,10 @@ |
3554 | #define SEALEVEL_2803_6_PID 0X2863 /* SeaLINK+8 (2803) Port 6 */ |
3555 | #define SEALEVEL_2803_7_PID 0X2873 /* SeaLINK+8 (2803) Port 7 */ |
3556 | #define SEALEVEL_2803_8_PID 0X2883 /* SeaLINK+8 (2803) Port 8 */ |
3557 | +#define SEALEVEL_2803R_1_PID 0Xa02a /* SeaLINK+8 (2803-ROHS) Port 1+2 */ |
3558 | +#define SEALEVEL_2803R_2_PID 0Xa02b /* SeaLINK+8 (2803-ROHS) Port 3+4 */ |
3559 | +#define SEALEVEL_2803R_3_PID 0Xa02c /* SeaLINK+8 (2803-ROHS) Port 5+6 */ |
3560 | +#define SEALEVEL_2803R_4_PID 0Xa02d /* SeaLINK+8 (2803-ROHS) Port 7+8 */ |
3561 | |
3562 | /* |
3563 | * JETI SPECTROMETER SPECBOS 1201 |
3564 | diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c |
3565 | index e4db5ad..9f0b2bf 100644 |
3566 | --- a/drivers/usb/serial/generic.c |
3567 | +++ b/drivers/usb/serial/generic.c |
3568 | @@ -215,8 +215,10 @@ retry: |
3569 | clear_bit(i, &port->write_urbs_free); |
3570 | result = usb_submit_urb(urb, GFP_ATOMIC); |
3571 | if (result) { |
3572 | - dev_err(&port->dev, "%s - error submitting urb: %d\n", |
3573 | + if (!port->port.console) { |
3574 | + dev_err(&port->dev, "%s - error submitting urb: %d\n", |
3575 | __func__, result); |
3576 | + } |
3577 | set_bit(i, &port->write_urbs_free); |
3578 | spin_lock_irqsave(&port->lock, flags); |
3579 | port->tx_bytes -= count; |
3580 | diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c |
3581 | index c72abd5..5c7d654 100644 |
3582 | --- a/drivers/usb/serial/mos7840.c |
3583 | +++ b/drivers/usb/serial/mos7840.c |
3584 | @@ -174,6 +174,7 @@ |
3585 | |
3586 | #define CLK_MULTI_REGISTER ((__u16)(0x02)) |
3587 | #define CLK_START_VALUE_REGISTER ((__u16)(0x03)) |
3588 | +#define GPIO_REGISTER ((__u16)(0x07)) |
3589 | |
3590 | #define SERIAL_LCR_DLAB ((__u16)(0x0080)) |
3591 | |
3592 | @@ -1103,14 +1104,25 @@ static int mos7840_open(struct tty_struct *tty, struct usb_serial_port *port) |
3593 | mos7840_port->read_urb = port->read_urb; |
3594 | |
3595 | /* set up our bulk in urb */ |
3596 | - |
3597 | - usb_fill_bulk_urb(mos7840_port->read_urb, |
3598 | - serial->dev, |
3599 | - usb_rcvbulkpipe(serial->dev, |
3600 | - port->bulk_in_endpointAddress), |
3601 | - port->bulk_in_buffer, |
3602 | - mos7840_port->read_urb->transfer_buffer_length, |
3603 | - mos7840_bulk_in_callback, mos7840_port); |
3604 | + if ((serial->num_ports == 2) |
3605 | + && ((((__u16)port->number - |
3606 | + (__u16)(port->serial->minor)) % 2) != 0)) { |
3607 | + usb_fill_bulk_urb(mos7840_port->read_urb, |
3608 | + serial->dev, |
3609 | + usb_rcvbulkpipe(serial->dev, |
3610 | + (port->bulk_in_endpointAddress) + 2), |
3611 | + port->bulk_in_buffer, |
3612 | + mos7840_port->read_urb->transfer_buffer_length, |
3613 | + mos7840_bulk_in_callback, mos7840_port); |
3614 | + } else { |
3615 | + usb_fill_bulk_urb(mos7840_port->read_urb, |
3616 | + serial->dev, |
3617 | + usb_rcvbulkpipe(serial->dev, |
3618 | + port->bulk_in_endpointAddress), |
3619 | + port->bulk_in_buffer, |
3620 | + mos7840_port->read_urb->transfer_buffer_length, |
3621 | + mos7840_bulk_in_callback, mos7840_port); |
3622 | + } |
3623 | |
3624 | dbg("mos7840_open: bulkin endpoint is %d", |
3625 | port->bulk_in_endpointAddress); |
3626 | @@ -1521,13 +1533,25 @@ static int mos7840_write(struct tty_struct *tty, struct usb_serial_port *port, |
3627 | memcpy(urb->transfer_buffer, current_position, transfer_size); |
3628 | |
3629 | /* fill urb with data and submit */ |
3630 | - usb_fill_bulk_urb(urb, |
3631 | - serial->dev, |
3632 | - usb_sndbulkpipe(serial->dev, |
3633 | - port->bulk_out_endpointAddress), |
3634 | - urb->transfer_buffer, |
3635 | - transfer_size, |
3636 | - mos7840_bulk_out_data_callback, mos7840_port); |
3637 | + if ((serial->num_ports == 2) |
3638 | + && ((((__u16)port->number - |
3639 | + (__u16)(port->serial->minor)) % 2) != 0)) { |
3640 | + usb_fill_bulk_urb(urb, |
3641 | + serial->dev, |
3642 | + usb_sndbulkpipe(serial->dev, |
3643 | + (port->bulk_out_endpointAddress) + 2), |
3644 | + urb->transfer_buffer, |
3645 | + transfer_size, |
3646 | + mos7840_bulk_out_data_callback, mos7840_port); |
3647 | + } else { |
3648 | + usb_fill_bulk_urb(urb, |
3649 | + serial->dev, |
3650 | + usb_sndbulkpipe(serial->dev, |
3651 | + port->bulk_out_endpointAddress), |
3652 | + urb->transfer_buffer, |
3653 | + transfer_size, |
3654 | + mos7840_bulk_out_data_callback, mos7840_port); |
3655 | + } |
3656 | |
3657 | data1 = urb->transfer_buffer; |
3658 | dbg("bulkout endpoint is %d", port->bulk_out_endpointAddress); |
3659 | @@ -1840,7 +1864,7 @@ static int mos7840_send_cmd_write_baud_rate(struct moschip_port *mos7840_port, |
3660 | |
3661 | } else { |
3662 | #ifdef HW_flow_control |
3663 | - / *setting h/w flow control bit to 0 */ |
3664 | + /* setting h/w flow control bit to 0 */ |
3665 | Data = 0xb; |
3666 | mos7840_port->shadowMCR = Data; |
3667 | status = mos7840_set_uart_reg(port, MODEM_CONTROL_REGISTER, |
3668 | @@ -2309,19 +2333,26 @@ static int mos7840_ioctl(struct tty_struct *tty, |
3669 | |
3670 | static int mos7840_calc_num_ports(struct usb_serial *serial) |
3671 | { |
3672 | - int mos7840_num_ports = 0; |
3673 | - |
3674 | - dbg("numberofendpoints: cur %d, alt %d", |
3675 | - (int)serial->interface->cur_altsetting->desc.bNumEndpoints, |
3676 | - (int)serial->interface->altsetting->desc.bNumEndpoints); |
3677 | - if (serial->interface->cur_altsetting->desc.bNumEndpoints == 5) { |
3678 | - mos7840_num_ports = serial->num_ports = 2; |
3679 | - } else if (serial->interface->cur_altsetting->desc.bNumEndpoints == 9) { |
3680 | + __u16 Data = 0x00; |
3681 | + int ret = 0; |
3682 | + int mos7840_num_ports; |
3683 | + |
3684 | + ret = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), |
3685 | + MCS_RDREQ, MCS_RD_RTYPE, 0, GPIO_REGISTER, &Data, |
3686 | + VENDOR_READ_LENGTH, MOS_WDR_TIMEOUT); |
3687 | + |
3688 | + if ((Data & 0x01) == 0) { |
3689 | + mos7840_num_ports = 2; |
3690 | + serial->num_bulk_in = 2; |
3691 | + serial->num_bulk_out = 2; |
3692 | + serial->num_ports = 2; |
3693 | + } else { |
3694 | + mos7840_num_ports = 4; |
3695 | serial->num_bulk_in = 4; |
3696 | serial->num_bulk_out = 4; |
3697 | - mos7840_num_ports = serial->num_ports = 4; |
3698 | + serial->num_ports = 4; |
3699 | } |
3700 | - dbg ("mos7840_num_ports = %d", mos7840_num_ports); |
3701 | + |
3702 | return mos7840_num_ports; |
3703 | } |
3704 | |
3705 | diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
3706 | index 68fa8c7..54898c9 100644 |
3707 | --- a/drivers/usb/serial/option.c |
3708 | +++ b/drivers/usb/serial/option.c |
3709 | @@ -307,6 +307,9 @@ static void option_instat_callback(struct urb *urb); |
3710 | #define TELIT_VENDOR_ID 0x1bc7 |
3711 | #define TELIT_PRODUCT_UC864E 0x1003 |
3712 | #define TELIT_PRODUCT_UC864G 0x1004 |
3713 | +#define TELIT_PRODUCT_CC864_DUAL 0x1005 |
3714 | +#define TELIT_PRODUCT_CC864_SINGLE 0x1006 |
3715 | +#define TELIT_PRODUCT_DE910_DUAL 0x1010 |
3716 | |
3717 | /* ZTE PRODUCTS */ |
3718 | #define ZTE_VENDOR_ID 0x19d2 |
3719 | @@ -484,6 +487,9 @@ static void option_instat_callback(struct urb *urb); |
3720 | #define LG_VENDOR_ID 0x1004 |
3721 | #define LG_PRODUCT_L02C 0x618f |
3722 | |
3723 | +/* MediaTek products */ |
3724 | +#define MEDIATEK_VENDOR_ID 0x0e8d |
3725 | + |
3726 | /* some devices interfaces need special handling due to a number of reasons */ |
3727 | enum option_blacklist_reason { |
3728 | OPTION_BLACKLIST_NONE = 0, |
3729 | @@ -768,6 +774,9 @@ static const struct usb_device_id option_ids[] = { |
3730 | { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) }, |
3731 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) }, |
3732 | { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) }, |
3733 | + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) }, |
3734 | + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, |
3735 | + { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, |
3736 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ |
3737 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff), |
3738 | .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, |
3739 | @@ -892,6 +901,8 @@ static const struct usb_device_id option_ids[] = { |
3740 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) }, |
3741 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) }, |
3742 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) }, |
3743 | + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff), |
3744 | + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
3745 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) }, |
3746 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) }, |
3747 | { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) }, |
3748 | @@ -1198,6 +1209,10 @@ static const struct usb_device_id option_ids[] = { |
3749 | { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) }, |
3750 | { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) }, |
3751 | { USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */ |
3752 | + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x00, 0x00) }, |
3753 | + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a1, 0xff, 0x02, 0x01) }, |
3754 | + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x00, 0x00) }, |
3755 | + { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, 0x00a2, 0xff, 0x02, 0x01) }, /* MediaTek MT6276M modem & app port */ |
3756 | { } /* Terminating entry */ |
3757 | }; |
3758 | MODULE_DEVICE_TABLE(usb, option_ids); |
3759 | @@ -1360,6 +1375,7 @@ static int option_probe(struct usb_serial *serial, |
3760 | serial->interface->cur_altsetting->desc.bInterfaceNumber, |
3761 | OPTION_BLACKLIST_RESERVED_IF, |
3762 | (const struct option_blacklist_info *) id->driver_info)) |
3763 | + return -ENODEV; |
3764 | |
3765 | /* Don't bind network interface on Samsung GT-B3730, it is handled by a separate module */ |
3766 | if (serial->dev->descriptor.idVendor == SAMSUNG_VENDOR_ID && |
3767 | diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c |
3768 | index aa9367f..3187d8b 100644 |
3769 | --- a/drivers/usb/serial/qcserial.c |
3770 | +++ b/drivers/usb/serial/qcserial.c |
3771 | @@ -24,34 +24,44 @@ |
3772 | |
3773 | static int debug; |
3774 | |
3775 | +#define DEVICE_G1K(v, p) \ |
3776 | + USB_DEVICE(v, p), .driver_info = 1 |
3777 | + |
3778 | static const struct usb_device_id id_table[] = { |
3779 | - {USB_DEVICE(0x05c6, 0x9211)}, /* Acer Gobi QDL device */ |
3780 | - {USB_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ |
3781 | - {USB_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ |
3782 | - {USB_DEVICE(0x03f0, 0x201d)}, /* HP un2400 Gobi QDL Device */ |
3783 | - {USB_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */ |
3784 | - {USB_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */ |
3785 | - {USB_DEVICE(0x04da, 0x250c)}, /* Panasonic Gobi QDL device */ |
3786 | - {USB_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */ |
3787 | - {USB_DEVICE(0x413c, 0x8171)}, /* Dell Gobi QDL device */ |
3788 | - {USB_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */ |
3789 | - {USB_DEVICE(0x1410, 0xa008)}, /* Novatel Gobi QDL device */ |
3790 | - {USB_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */ |
3791 | - {USB_DEVICE(0x0b05, 0x1774)}, /* Asus Gobi QDL device */ |
3792 | - {USB_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */ |
3793 | - {USB_DEVICE(0x19d2, 0xfff2)}, /* ONDA Gobi QDL device */ |
3794 | - {USB_DEVICE(0x1557, 0x0a80)}, /* OQO Gobi QDL device */ |
3795 | - {USB_DEVICE(0x05c6, 0x9001)}, /* Generic Gobi Modem device */ |
3796 | - {USB_DEVICE(0x05c6, 0x9002)}, /* Generic Gobi Modem device */ |
3797 | - {USB_DEVICE(0x05c6, 0x9202)}, /* Generic Gobi Modem device */ |
3798 | - {USB_DEVICE(0x05c6, 0x9203)}, /* Generic Gobi Modem device */ |
3799 | - {USB_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */ |
3800 | - {USB_DEVICE(0x05c6, 0x9008)}, /* Generic Gobi QDL device */ |
3801 | - {USB_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */ |
3802 | - {USB_DEVICE(0x05c6, 0x9201)}, /* Generic Gobi QDL device */ |
3803 | - {USB_DEVICE(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ |
3804 | - {USB_DEVICE(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ |
3805 | - {USB_DEVICE(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */ |
3806 | + /* Gobi 1000 devices */ |
3807 | + {DEVICE_G1K(0x05c6, 0x9211)}, /* Acer Gobi QDL device */ |
3808 | + {DEVICE_G1K(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ |
3809 | + {DEVICE_G1K(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ |
3810 | + {DEVICE_G1K(0x03f0, 0x201d)}, /* HP un2400 Gobi QDL Device */ |
3811 | + {DEVICE_G1K(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */ |
3812 | + {DEVICE_G1K(0x04da, 0x250c)}, /* Panasonic Gobi QDL device */ |
3813 | + {DEVICE_G1K(0x413c, 0x8172)}, /* Dell Gobi Modem device */ |
3814 | + {DEVICE_G1K(0x413c, 0x8171)}, /* Dell Gobi QDL device */ |
3815 | + {DEVICE_G1K(0x1410, 0xa001)}, /* Novatel Gobi Modem device */ |
3816 | + {DEVICE_G1K(0x1410, 0xa008)}, /* Novatel Gobi QDL device */ |
3817 | + {DEVICE_G1K(0x0b05, 0x1776)}, /* Asus Gobi Modem device */ |
3818 | + {DEVICE_G1K(0x0b05, 0x1774)}, /* Asus Gobi QDL device */ |
3819 | + {DEVICE_G1K(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */ |
3820 | + {DEVICE_G1K(0x19d2, 0xfff2)}, /* ONDA Gobi QDL device */ |
3821 | + {DEVICE_G1K(0x1557, 0x0a80)}, /* OQO Gobi QDL device */ |
3822 | + {DEVICE_G1K(0x05c6, 0x9001)}, /* Generic Gobi Modem device */ |
3823 | + {DEVICE_G1K(0x05c6, 0x9002)}, /* Generic Gobi Modem device */ |
3824 | + {DEVICE_G1K(0x05c6, 0x9202)}, /* Generic Gobi Modem device */ |
3825 | + {DEVICE_G1K(0x05c6, 0x9203)}, /* Generic Gobi Modem device */ |
3826 | + {DEVICE_G1K(0x05c6, 0x9222)}, /* Generic Gobi Modem device */ |
3827 | + {DEVICE_G1K(0x05c6, 0x9008)}, /* Generic Gobi QDL device */ |
3828 | + {DEVICE_G1K(0x05c6, 0x9009)}, /* Generic Gobi Modem device */ |
3829 | + {DEVICE_G1K(0x05c6, 0x9201)}, /* Generic Gobi QDL device */ |
3830 | + {DEVICE_G1K(0x05c6, 0x9221)}, /* Generic Gobi QDL device */ |
3831 | + {DEVICE_G1K(0x05c6, 0x9231)}, /* Generic Gobi QDL device */ |
3832 | + {DEVICE_G1K(0x1f45, 0x0001)}, /* Unknown Gobi QDL device */ |
3833 | + |
3834 | + /* Gobi 2000 devices */ |
3835 | + {USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi 2000 QDL device */ |
3836 | + {USB_DEVICE(0x1410, 0xa011)}, /* Novatel Gobi 2000 QDL device */ |
3837 | + {USB_DEVICE(0x1410, 0xa012)}, /* Novatel Gobi 2000 QDL device */ |
3838 | + {USB_DEVICE(0x1410, 0xa013)}, /* Novatel Gobi 2000 QDL device */ |
3839 | + {USB_DEVICE(0x1410, 0xa014)}, /* Novatel Gobi 2000 QDL device */ |
3840 | {USB_DEVICE(0x413c, 0x8185)}, /* Dell Gobi 2000 QDL device (N0218, VU936) */ |
3841 | {USB_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ |
3842 | {USB_DEVICE(0x05c6, 0x9208)}, /* Generic Gobi 2000 QDL device */ |
3843 | @@ -86,7 +96,18 @@ static const struct usb_device_id id_table[] = { |
3844 | {USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ |
3845 | {USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */ |
3846 | {USB_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */ |
3847 | + |
3848 | + /* Gobi 3000 devices */ |
3849 | + {USB_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Gobi 3000 QDL */ |
3850 | + {USB_DEVICE(0x05c6, 0x920c)}, /* Gobi 3000 QDL */ |
3851 | + {USB_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */ |
3852 | + {USB_DEVICE(0x1410, 0xa020)}, /* Novatel Gobi 3000 QDL */ |
3853 | + {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */ |
3854 | + {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */ |
3855 | + {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ |
3856 | {USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */ |
3857 | + {USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */ |
3858 | + {USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */ |
3859 | { } /* Terminating entry */ |
3860 | }; |
3861 | MODULE_DEVICE_TABLE(usb, id_table); |
3862 | @@ -108,8 +129,10 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) |
3863 | int retval = -ENODEV; |
3864 | __u8 nintf; |
3865 | __u8 ifnum; |
3866 | + bool is_gobi1k = id->driver_info ? true : false; |
3867 | |
3868 | dbg("%s", __func__); |
3869 | + dbg("Is Gobi 1000 = %d", is_gobi1k); |
3870 | |
3871 | nintf = serial->dev->actconfig->desc.bNumInterfaces; |
3872 | dbg("Num Interfaces = %d", nintf); |
3873 | @@ -157,15 +180,25 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) |
3874 | |
3875 | case 3: |
3876 | case 4: |
3877 | - /* Composite mode */ |
3878 | - /* ifnum == 0 is a broadband network adapter */ |
3879 | - if (ifnum == 1) { |
3880 | - /* |
3881 | - * Diagnostics Monitor (serial line 9600 8N1) |
3882 | - * Qualcomm DM protocol |
3883 | - * use "libqcdm" (ModemManager) for communication |
3884 | - */ |
3885 | - dbg("Diagnostics Monitor found"); |
3886 | + /* Composite mode; don't bind to the QMI/net interface as that |
3887 | + * gets handled by other drivers. |
3888 | + */ |
3889 | + |
3890 | + /* Gobi 1K USB layout: |
3891 | + * 0: serial port (doesn't respond) |
3892 | + * 1: serial port (doesn't respond) |
3893 | + * 2: AT-capable modem port |
3894 | + * 3: QMI/net |
3895 | + * |
3896 | + * Gobi 2K+ USB layout: |
3897 | + * 0: QMI/net |
3898 | + * 1: DM/DIAG (use libqcdm from ModemManager for communication) |
3899 | + * 2: AT-capable modem port |
3900 | + * 3: NMEA |
3901 | + */ |
3902 | + |
3903 | + if (ifnum == 1 && !is_gobi1k) { |
3904 | + dbg("Gobi 2K+ DM/DIAG interface found"); |
3905 | retval = usb_set_interface(serial->dev, ifnum, 0); |
3906 | if (retval < 0) { |
3907 | dev_err(&serial->dev->dev, |
3908 | @@ -184,13 +217,13 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) |
3909 | retval = -ENODEV; |
3910 | kfree(data); |
3911 | } |
3912 | - } else if (ifnum==3) { |
3913 | + } else if (ifnum==3 && !is_gobi1k) { |
3914 | /* |
3915 | * NMEA (serial line 9600 8N1) |
3916 | * # echo "\$GPS_START" > /dev/ttyUSBx |
3917 | * # echo "\$GPS_STOP" > /dev/ttyUSBx |
3918 | */ |
3919 | - dbg("NMEA GPS interface found"); |
3920 | + dbg("Gobi 2K+ NMEA GPS interface found"); |
3921 | retval = usb_set_interface(serial->dev, ifnum, 0); |
3922 | if (retval < 0) { |
3923 | dev_err(&serial->dev->dev, |
3924 | diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c |
3925 | index 32c93d7..e39b188 100644 |
3926 | --- a/drivers/usb/storage/realtek_cr.c |
3927 | +++ b/drivers/usb/storage/realtek_cr.c |
3928 | @@ -509,9 +509,14 @@ static int __do_config_autodelink(struct us_data *us, u8 *data, u16 len) |
3929 | int retval; |
3930 | u16 addr = 0xFE47; |
3931 | u8 cmnd[12] = {0}; |
3932 | + u8 *buf; |
3933 | |
3934 | US_DEBUGP("%s, addr = 0x%x, len = %d\n", __FUNCTION__, addr, len); |
3935 | |
3936 | + buf = kmemdup(data, len, GFP_NOIO); |
3937 | + if (!buf) |
3938 | + return USB_STOR_TRANSPORT_ERROR; |
3939 | + |
3940 | cmnd[0] = 0xF0; |
3941 | cmnd[1] = 0x0E; |
3942 | cmnd[2] = (u8)(addr >> 8); |
3943 | @@ -519,7 +524,8 @@ static int __do_config_autodelink(struct us_data *us, u8 *data, u16 len) |
3944 | cmnd[4] = (u8)(len >> 8); |
3945 | cmnd[5] = (u8)len; |
3946 | |
3947 | - retval = rts51x_bulk_transport_special(us, 0, cmnd, 12, data, len, DMA_TO_DEVICE, NULL); |
3948 | + retval = rts51x_bulk_transport_special(us, 0, cmnd, 12, buf, len, DMA_TO_DEVICE, NULL); |
3949 | + kfree(buf); |
3950 | if (retval != USB_STOR_TRANSPORT_GOOD) { |
3951 | return -EIO; |
3952 | } |
3953 | diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c |
3954 | index 772f601..6f54f74 100644 |
3955 | --- a/drivers/video/backlight/tosa_lcd.c |
3956 | +++ b/drivers/video/backlight/tosa_lcd.c |
3957 | @@ -271,7 +271,7 @@ static int tosa_lcd_resume(struct spi_device *spi) |
3958 | } |
3959 | #else |
3960 | #define tosa_lcd_suspend NULL |
3961 | -#define tosa_lcd_reume NULL |
3962 | +#define tosa_lcd_resume NULL |
3963 | #endif |
3964 | |
3965 | static struct spi_driver tosa_lcd_driver = { |
3966 | diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c |
3967 | index ad93629..7a41220 100644 |
3968 | --- a/drivers/video/fbmem.c |
3969 | +++ b/drivers/video/fbmem.c |
3970 | @@ -1651,6 +1651,7 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) |
3971 | if (ret) |
3972 | return -EINVAL; |
3973 | |
3974 | + unlink_framebuffer(fb_info); |
3975 | if (fb_info->pixmap.addr && |
3976 | (fb_info->pixmap.flags & FB_PIXMAP_DEFAULT)) |
3977 | kfree(fb_info->pixmap.addr); |
3978 | @@ -1658,7 +1659,6 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) |
3979 | registered_fb[i] = NULL; |
3980 | num_registered_fb--; |
3981 | fb_cleanup_device(fb_info); |
3982 | - device_destroy(fb_class, MKDEV(FB_MAJOR, i)); |
3983 | event.info = fb_info; |
3984 | fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); |
3985 | |
3986 | @@ -1667,6 +1667,22 @@ static int do_unregister_framebuffer(struct fb_info *fb_info) |
3987 | return 0; |
3988 | } |
3989 | |
3990 | +int unlink_framebuffer(struct fb_info *fb_info) |
3991 | +{ |
3992 | + int i; |
3993 | + |
3994 | + i = fb_info->node; |
3995 | + if (i < 0 || i >= FB_MAX || registered_fb[i] != fb_info) |
3996 | + return -EINVAL; |
3997 | + |
3998 | + if (fb_info->dev) { |
3999 | + device_destroy(fb_class, MKDEV(FB_MAJOR, i)); |
4000 | + fb_info->dev = NULL; |
4001 | + } |
4002 | + return 0; |
4003 | +} |
4004 | +EXPORT_SYMBOL(unlink_framebuffer); |
4005 | + |
4006 | void remove_conflicting_framebuffers(struct apertures_struct *a, |
4007 | const char *name, bool primary) |
4008 | { |
4009 | diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c |
4010 | index 3473e75..41746bb 100644 |
4011 | --- a/drivers/video/udlfb.c |
4012 | +++ b/drivers/video/udlfb.c |
4013 | @@ -1739,7 +1739,7 @@ static void dlfb_usb_disconnect(struct usb_interface *interface) |
4014 | for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++) |
4015 | device_remove_file(info->dev, &fb_device_attrs[i]); |
4016 | device_remove_bin_file(info->dev, &edid_attr); |
4017 | - |
4018 | + unlink_framebuffer(info); |
4019 | usb_set_intfdata(interface, NULL); |
4020 | |
4021 | /* if clients still have us open, will be freed on last close */ |
4022 | diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c |
4023 | index 8f1fe32..b4c2c99 100644 |
4024 | --- a/fs/cifs/cifsfs.c |
4025 | +++ b/fs/cifs/cifsfs.c |
4026 | @@ -76,7 +76,7 @@ MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 " |
4027 | unsigned int cifs_max_pending = CIFS_MAX_REQ; |
4028 | module_param(cifs_max_pending, int, 0444); |
4029 | MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. " |
4030 | - "Default: 50 Range: 2 to 256"); |
4031 | + "Default: 32767 Range: 2 to 32767."); |
4032 | unsigned short echo_retries = 5; |
4033 | module_param(echo_retries, ushort, 0644); |
4034 | MODULE_PARM_DESC(echo_retries, "Number of echo attempts before giving up and " |
4035 | @@ -1116,9 +1116,9 @@ init_cifs(void) |
4036 | if (cifs_max_pending < 2) { |
4037 | cifs_max_pending = 2; |
4038 | cFYI(1, "cifs_max_pending set to min of 2"); |
4039 | - } else if (cifs_max_pending > 256) { |
4040 | - cifs_max_pending = 256; |
4041 | - cFYI(1, "cifs_max_pending set to max of 256"); |
4042 | + } else if (cifs_max_pending > CIFS_MAX_REQ) { |
4043 | + cifs_max_pending = CIFS_MAX_REQ; |
4044 | + cFYI(1, "cifs_max_pending set to max of %u", CIFS_MAX_REQ); |
4045 | } |
4046 | |
4047 | rc = cifs_fscache_register(); |
4048 | diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h |
4049 | index 8238aa1..c467ac8 100644 |
4050 | --- a/fs/cifs/cifsglob.h |
4051 | +++ b/fs/cifs/cifsglob.h |
4052 | @@ -55,14 +55,9 @@ |
4053 | |
4054 | /* |
4055 | * MAX_REQ is the maximum number of requests that WE will send |
4056 | - * on one socket concurrently. It also matches the most common |
4057 | - * value of max multiplex returned by servers. We may |
4058 | - * eventually want to use the negotiated value (in case |
4059 | - * future servers can handle more) when we are more confident that |
4060 | - * we will not have problems oveloading the socket with pending |
4061 | - * write data. |
4062 | + * on one socket concurrently. |
4063 | */ |
4064 | -#define CIFS_MAX_REQ 50 |
4065 | +#define CIFS_MAX_REQ 32767 |
4066 | |
4067 | #define RFC1001_NAME_LEN 15 |
4068 | #define RFC1001_NAME_LEN_WITH_NULL (RFC1001_NAME_LEN + 1) |
4069 | @@ -263,6 +258,7 @@ struct TCP_Server_Info { |
4070 | bool session_estab; /* mark when very first sess is established */ |
4071 | u16 dialect; /* dialect index that server chose */ |
4072 | enum securityEnum secType; |
4073 | + bool oplocks:1; /* enable oplocks */ |
4074 | unsigned int maxReq; /* Clients should submit no more */ |
4075 | /* than maxReq distinct unanswered SMBs to the server when using */ |
4076 | /* multiplexed reads or writes */ |
4077 | diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c |
4078 | index 6600aa2..0e6adac 100644 |
4079 | --- a/fs/cifs/cifssmb.c |
4080 | +++ b/fs/cifs/cifssmb.c |
4081 | @@ -458,7 +458,10 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses) |
4082 | goto neg_err_exit; |
4083 | } |
4084 | server->sec_mode = (__u8)le16_to_cpu(rsp->SecurityMode); |
4085 | - server->maxReq = le16_to_cpu(rsp->MaxMpxCount); |
4086 | + server->maxReq = min_t(unsigned int, |
4087 | + le16_to_cpu(rsp->MaxMpxCount), |
4088 | + cifs_max_pending); |
4089 | + server->oplocks = server->maxReq > 1 ? enable_oplocks : false; |
4090 | server->maxBuf = le16_to_cpu(rsp->MaxBufSize); |
4091 | server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs); |
4092 | /* even though we do not use raw we might as well set this |
4093 | @@ -564,7 +567,9 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses) |
4094 | |
4095 | /* one byte, so no need to convert this or EncryptionKeyLen from |
4096 | little endian */ |
4097 | - server->maxReq = le16_to_cpu(pSMBr->MaxMpxCount); |
4098 | + server->maxReq = min_t(unsigned int, le16_to_cpu(pSMBr->MaxMpxCount), |
4099 | + cifs_max_pending); |
4100 | + server->oplocks = server->maxReq > 1 ? enable_oplocks : false; |
4101 | /* probably no need to store and check maxvcs */ |
4102 | server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize); |
4103 | server->max_rw = le32_to_cpu(pSMBr->MaxRawSize); |
4104 | diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c |
4105 | index 720edf5..9e0675a 100644 |
4106 | --- a/fs/cifs/connect.c |
4107 | +++ b/fs/cifs/connect.c |
4108 | @@ -625,14 +625,10 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) |
4109 | spin_unlock(&GlobalMid_Lock); |
4110 | wake_up_all(&server->response_q); |
4111 | |
4112 | - /* |
4113 | - * Check if we have blocked requests that need to free. Note that |
4114 | - * cifs_max_pending is normally 50, but can be set at module install |
4115 | - * time to as little as two. |
4116 | - */ |
4117 | + /* Check if we have blocked requests that need to free. */ |
4118 | spin_lock(&GlobalMid_Lock); |
4119 | - if (atomic_read(&server->inFlight) >= cifs_max_pending) |
4120 | - atomic_set(&server->inFlight, cifs_max_pending - 1); |
4121 | + if (atomic_read(&server->inFlight) >= server->maxReq) |
4122 | + atomic_set(&server->inFlight, server->maxReq - 1); |
4123 | /* |
4124 | * We do not want to set the max_pending too low or we could end up |
4125 | * with the counter going negative. |
4126 | @@ -1890,6 +1886,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info) |
4127 | tcp_ses->noautotune = volume_info->noautotune; |
4128 | tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay; |
4129 | atomic_set(&tcp_ses->inFlight, 0); |
4130 | + tcp_ses->maxReq = 1; /* enough to send negotiate request */ |
4131 | init_waitqueue_head(&tcp_ses->response_q); |
4132 | init_waitqueue_head(&tcp_ses->request_q); |
4133 | INIT_LIST_HEAD(&tcp_ses->pending_mid_q); |
4134 | @@ -3220,7 +3217,7 @@ cifs_ra_pages(struct cifs_sb_info *cifs_sb) |
4135 | int |
4136 | cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) |
4137 | { |
4138 | - int rc = 0; |
4139 | + int rc; |
4140 | int xid; |
4141 | struct cifs_ses *pSesInfo; |
4142 | struct cifs_tcon *tcon; |
4143 | @@ -3247,6 +3244,7 @@ try_mount_again: |
4144 | FreeXid(xid); |
4145 | } |
4146 | #endif |
4147 | + rc = 0; |
4148 | tcon = NULL; |
4149 | pSesInfo = NULL; |
4150 | srvTcp = NULL; |
4151 | diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c |
4152 | index bf68b4f..6937e7c 100644 |
4153 | --- a/fs/cifs/dir.c |
4154 | +++ b/fs/cifs/dir.c |
4155 | @@ -171,7 +171,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, |
4156 | } |
4157 | tcon = tlink_tcon(tlink); |
4158 | |
4159 | - if (enable_oplocks) |
4160 | + if (tcon->ses->server->oplocks) |
4161 | oplock = REQ_OPLOCK; |
4162 | |
4163 | if (nd) |
4164 | @@ -492,7 +492,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, |
4165 | { |
4166 | int xid; |
4167 | int rc = 0; /* to get around spurious gcc warning, set to zero here */ |
4168 | - __u32 oplock = enable_oplocks ? REQ_OPLOCK : 0; |
4169 | + __u32 oplock; |
4170 | __u16 fileHandle = 0; |
4171 | bool posix_open = false; |
4172 | struct cifs_sb_info *cifs_sb; |
4173 | @@ -518,6 +518,8 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, |
4174 | } |
4175 | pTcon = tlink_tcon(tlink); |
4176 | |
4177 | + oplock = pTcon->ses->server->oplocks ? REQ_OPLOCK : 0; |
4178 | + |
4179 | /* |
4180 | * Don't allow the separator character in a path component. |
4181 | * The VFS will not allow "/", but "\" is allowed by posix. |
4182 | diff --git a/fs/cifs/file.c b/fs/cifs/file.c |
4183 | index 5e64748..159fcc5 100644 |
4184 | --- a/fs/cifs/file.c |
4185 | +++ b/fs/cifs/file.c |
4186 | @@ -380,7 +380,7 @@ int cifs_open(struct inode *inode, struct file *file) |
4187 | cFYI(1, "inode = 0x%p file flags are 0x%x for %s", |
4188 | inode, file->f_flags, full_path); |
4189 | |
4190 | - if (enable_oplocks) |
4191 | + if (tcon->ses->server->oplocks) |
4192 | oplock = REQ_OPLOCK; |
4193 | else |
4194 | oplock = 0; |
4195 | @@ -505,7 +505,7 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush) |
4196 | cFYI(1, "inode = 0x%p file flags 0x%x for %s", |
4197 | inode, pCifsFile->f_flags, full_path); |
4198 | |
4199 | - if (enable_oplocks) |
4200 | + if (tcon->ses->server->oplocks) |
4201 | oplock = REQ_OPLOCK; |
4202 | else |
4203 | oplock = 0; |
4204 | @@ -960,9 +960,9 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) |
4205 | INIT_LIST_HEAD(&locks_to_send); |
4206 | |
4207 | /* |
4208 | - * Allocating count locks is enough because no locks can be added to |
4209 | - * the list while we are holding cinode->lock_mutex that protects |
4210 | - * locking operations of this inode. |
4211 | + * Allocating count locks is enough because no FL_POSIX locks can be |
4212 | + * added to the list while we are holding cinode->lock_mutex that |
4213 | + * protects locking operations of this inode. |
4214 | */ |
4215 | for (; i < count; i++) { |
4216 | lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL); |
4217 | @@ -973,18 +973,20 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) |
4218 | list_add_tail(&lck->llist, &locks_to_send); |
4219 | } |
4220 | |
4221 | - i = 0; |
4222 | el = locks_to_send.next; |
4223 | lock_flocks(); |
4224 | cifs_for_each_lock(cfile->dentry->d_inode, before) { |
4225 | + flock = *before; |
4226 | + if ((flock->fl_flags & FL_POSIX) == 0) |
4227 | + continue; |
4228 | if (el == &locks_to_send) { |
4229 | - /* something is really wrong */ |
4230 | + /* |
4231 | + * The list ended. We don't have enough allocated |
4232 | + * structures - something is really wrong. |
4233 | + */ |
4234 | cERROR(1, "Can't push all brlocks!"); |
4235 | break; |
4236 | } |
4237 | - flock = *before; |
4238 | - if ((flock->fl_flags & FL_POSIX) == 0) |
4239 | - continue; |
4240 | length = 1 + flock->fl_end - flock->fl_start; |
4241 | if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK) |
4242 | type = CIFS_RDLCK; |
4243 | @@ -996,7 +998,6 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) |
4244 | lck->length = length; |
4245 | lck->type = type; |
4246 | lck->offset = flock->fl_start; |
4247 | - i++; |
4248 | el = el->next; |
4249 | } |
4250 | unlock_flocks(); |
4251 | diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c |
4252 | index 0cc9584..99a27cf 100644 |
4253 | --- a/fs/cifs/transport.c |
4254 | +++ b/fs/cifs/transport.c |
4255 | @@ -265,12 +265,12 @@ static int wait_for_free_request(struct TCP_Server_Info *server, |
4256 | |
4257 | spin_lock(&GlobalMid_Lock); |
4258 | while (1) { |
4259 | - if (atomic_read(&server->inFlight) >= cifs_max_pending) { |
4260 | + if (atomic_read(&server->inFlight) >= server->maxReq) { |
4261 | spin_unlock(&GlobalMid_Lock); |
4262 | cifs_num_waiters_inc(server); |
4263 | wait_event(server->request_q, |
4264 | atomic_read(&server->inFlight) |
4265 | - < cifs_max_pending); |
4266 | + < server->maxReq); |
4267 | cifs_num_waiters_dec(server); |
4268 | spin_lock(&GlobalMid_Lock); |
4269 | } else { |
4270 | diff --git a/fs/dcache.c b/fs/dcache.c |
4271 | index f7908ae..eb723d3 100644 |
4272 | --- a/fs/dcache.c |
4273 | +++ b/fs/dcache.c |
4274 | @@ -2357,6 +2357,7 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) |
4275 | if (d_ancestor(alias, dentry)) { |
4276 | /* Check for loops */ |
4277 | actual = ERR_PTR(-ELOOP); |
4278 | + spin_unlock(&inode->i_lock); |
4279 | } else if (IS_ROOT(alias)) { |
4280 | /* Is this an anonymous mountpoint that we |
4281 | * could splice into our tree? */ |
4282 | @@ -2366,7 +2367,7 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) |
4283 | goto found; |
4284 | } else { |
4285 | /* Nope, but we must(!) avoid directory |
4286 | - * aliasing */ |
4287 | + * aliasing. This drops inode->i_lock */ |
4288 | actual = __d_unalias(inode, dentry, alias); |
4289 | } |
4290 | write_sequnlock(&rename_lock); |
4291 | diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h |
4292 | index 5b0e26a..dbae4d9 100644 |
4293 | --- a/fs/ext4/ext4.h |
4294 | +++ b/fs/ext4/ext4.h |
4295 | @@ -184,6 +184,8 @@ struct mpage_da_data { |
4296 | #define EXT4_IO_END_UNWRITTEN 0x0001 |
4297 | #define EXT4_IO_END_ERROR 0x0002 |
4298 | #define EXT4_IO_END_QUEUED 0x0004 |
4299 | +#define EXT4_IO_END_DIRECT 0x0008 |
4300 | +#define EXT4_IO_END_IN_FSYNC 0x0010 |
4301 | |
4302 | struct ext4_io_page { |
4303 | struct page *p_page; |
4304 | diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h |
4305 | index 5802fa1..95af6f8 100644 |
4306 | --- a/fs/ext4/ext4_jbd2.h |
4307 | +++ b/fs/ext4/ext4_jbd2.h |
4308 | @@ -261,43 +261,45 @@ static inline void ext4_update_inode_fsync_trans(handle_t *handle, |
4309 | /* super.c */ |
4310 | int ext4_force_commit(struct super_block *sb); |
4311 | |
4312 | -static inline int ext4_should_journal_data(struct inode *inode) |
4313 | +/* |
4314 | + * Ext4 inode journal modes |
4315 | + */ |
4316 | +#define EXT4_INODE_JOURNAL_DATA_MODE 0x01 /* journal data mode */ |
4317 | +#define EXT4_INODE_ORDERED_DATA_MODE 0x02 /* ordered data mode */ |
4318 | +#define EXT4_INODE_WRITEBACK_DATA_MODE 0x04 /* writeback data mode */ |
4319 | + |
4320 | +static inline int ext4_inode_journal_mode(struct inode *inode) |
4321 | { |
4322 | if (EXT4_JOURNAL(inode) == NULL) |
4323 | - return 0; |
4324 | - if (!S_ISREG(inode->i_mode)) |
4325 | - return 1; |
4326 | - if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) |
4327 | - return 1; |
4328 | - if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA)) |
4329 | - return 1; |
4330 | - return 0; |
4331 | + return EXT4_INODE_WRITEBACK_DATA_MODE; /* writeback */ |
4332 | + /* We do not support data journalling with delayed allocation */ |
4333 | + if (!S_ISREG(inode->i_mode) || |
4334 | + test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) |
4335 | + return EXT4_INODE_JOURNAL_DATA_MODE; /* journal data */ |
4336 | + if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA) && |
4337 | + !test_opt(inode->i_sb, DELALLOC)) |
4338 | + return EXT4_INODE_JOURNAL_DATA_MODE; /* journal data */ |
4339 | + if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) |
4340 | + return EXT4_INODE_ORDERED_DATA_MODE; /* ordered */ |
4341 | + if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) |
4342 | + return EXT4_INODE_WRITEBACK_DATA_MODE; /* writeback */ |
4343 | + else |
4344 | + BUG(); |
4345 | +} |
4346 | + |
4347 | +static inline int ext4_should_journal_data(struct inode *inode) |
4348 | +{ |
4349 | + return ext4_inode_journal_mode(inode) & EXT4_INODE_JOURNAL_DATA_MODE; |
4350 | } |
4351 | |
4352 | static inline int ext4_should_order_data(struct inode *inode) |
4353 | { |
4354 | - if (EXT4_JOURNAL(inode) == NULL) |
4355 | - return 0; |
4356 | - if (!S_ISREG(inode->i_mode)) |
4357 | - return 0; |
4358 | - if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA)) |
4359 | - return 0; |
4360 | - if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA) |
4361 | - return 1; |
4362 | - return 0; |
4363 | + return ext4_inode_journal_mode(inode) & EXT4_INODE_ORDERED_DATA_MODE; |
4364 | } |
4365 | |
4366 | static inline int ext4_should_writeback_data(struct inode *inode) |
4367 | { |
4368 | - if (EXT4_JOURNAL(inode) == NULL) |
4369 | - return 1; |
4370 | - if (!S_ISREG(inode->i_mode)) |
4371 | - return 0; |
4372 | - if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA)) |
4373 | - return 0; |
4374 | - if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA) |
4375 | - return 1; |
4376 | - return 0; |
4377 | + return ext4_inode_journal_mode(inode) & EXT4_INODE_WRITEBACK_DATA_MODE; |
4378 | } |
4379 | |
4380 | /* |
4381 | diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c |
4382 | index 607b155..7507036 100644 |
4383 | --- a/fs/ext4/extents.c |
4384 | +++ b/fs/ext4/extents.c |
4385 | @@ -301,6 +301,8 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) |
4386 | ext4_fsblk_t block = ext4_ext_pblock(ext); |
4387 | int len = ext4_ext_get_actual_len(ext); |
4388 | |
4389 | + if (len == 0) |
4390 | + return 0; |
4391 | return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); |
4392 | } |
4393 | |
4394 | diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c |
4395 | index 00a2cb7..bb6c7d8 100644 |
4396 | --- a/fs/ext4/fsync.c |
4397 | +++ b/fs/ext4/fsync.c |
4398 | @@ -89,6 +89,7 @@ int ext4_flush_completed_IO(struct inode *inode) |
4399 | io = list_entry(ei->i_completed_io_list.next, |
4400 | ext4_io_end_t, list); |
4401 | list_del_init(&io->list); |
4402 | + io->flag |= EXT4_IO_END_IN_FSYNC; |
4403 | /* |
4404 | * Calling ext4_end_io_nolock() to convert completed |
4405 | * IO to written. |
4406 | @@ -108,6 +109,7 @@ int ext4_flush_completed_IO(struct inode *inode) |
4407 | if (ret < 0) |
4408 | ret2 = ret; |
4409 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
4410 | + io->flag &= ~EXT4_IO_END_IN_FSYNC; |
4411 | } |
4412 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
4413 | return (ret2 < 0) ? ret2 : 0; |
4414 | diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c |
4415 | index 92655fd..3ce7613 100644 |
4416 | --- a/fs/ext4/inode.c |
4417 | +++ b/fs/ext4/inode.c |
4418 | @@ -2480,13 +2480,14 @@ static int ext4_da_write_end(struct file *file, |
4419 | int write_mode = (int)(unsigned long)fsdata; |
4420 | |
4421 | if (write_mode == FALL_BACK_TO_NONDELALLOC) { |
4422 | - if (ext4_should_order_data(inode)) { |
4423 | + switch (ext4_inode_journal_mode(inode)) { |
4424 | + case EXT4_INODE_ORDERED_DATA_MODE: |
4425 | return ext4_ordered_write_end(file, mapping, pos, |
4426 | len, copied, page, fsdata); |
4427 | - } else if (ext4_should_writeback_data(inode)) { |
4428 | + case EXT4_INODE_WRITEBACK_DATA_MODE: |
4429 | return ext4_writeback_write_end(file, mapping, pos, |
4430 | len, copied, page, fsdata); |
4431 | - } else { |
4432 | + default: |
4433 | BUG(); |
4434 | } |
4435 | } |
4436 | @@ -2793,9 +2794,6 @@ out: |
4437 | |
4438 | /* queue the work to convert unwritten extents to written */ |
4439 | queue_work(wq, &io_end->work); |
4440 | - |
4441 | - /* XXX: probably should move into the real I/O completion handler */ |
4442 | - inode_dio_done(inode); |
4443 | } |
4444 | |
4445 | static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate) |
4446 | @@ -2919,9 +2917,12 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb, |
4447 | iocb->private = NULL; |
4448 | EXT4_I(inode)->cur_aio_dio = NULL; |
4449 | if (!is_sync_kiocb(iocb)) { |
4450 | - iocb->private = ext4_init_io_end(inode, GFP_NOFS); |
4451 | - if (!iocb->private) |
4452 | + ext4_io_end_t *io_end = |
4453 | + ext4_init_io_end(inode, GFP_NOFS); |
4454 | + if (!io_end) |
4455 | return -ENOMEM; |
4456 | + io_end->flag |= EXT4_IO_END_DIRECT; |
4457 | + iocb->private = io_end; |
4458 | /* |
4459 | * we save the io structure for current async |
4460 | * direct IO, so that later ext4_map_blocks() |
4461 | @@ -3084,18 +3085,25 @@ static const struct address_space_operations ext4_da_aops = { |
4462 | |
4463 | void ext4_set_aops(struct inode *inode) |
4464 | { |
4465 | - if (ext4_should_order_data(inode) && |
4466 | - test_opt(inode->i_sb, DELALLOC)) |
4467 | - inode->i_mapping->a_ops = &ext4_da_aops; |
4468 | - else if (ext4_should_order_data(inode)) |
4469 | - inode->i_mapping->a_ops = &ext4_ordered_aops; |
4470 | - else if (ext4_should_writeback_data(inode) && |
4471 | - test_opt(inode->i_sb, DELALLOC)) |
4472 | - inode->i_mapping->a_ops = &ext4_da_aops; |
4473 | - else if (ext4_should_writeback_data(inode)) |
4474 | - inode->i_mapping->a_ops = &ext4_writeback_aops; |
4475 | - else |
4476 | + switch (ext4_inode_journal_mode(inode)) { |
4477 | + case EXT4_INODE_ORDERED_DATA_MODE: |
4478 | + if (test_opt(inode->i_sb, DELALLOC)) |
4479 | + inode->i_mapping->a_ops = &ext4_da_aops; |
4480 | + else |
4481 | + inode->i_mapping->a_ops = &ext4_ordered_aops; |
4482 | + break; |
4483 | + case EXT4_INODE_WRITEBACK_DATA_MODE: |
4484 | + if (test_opt(inode->i_sb, DELALLOC)) |
4485 | + inode->i_mapping->a_ops = &ext4_da_aops; |
4486 | + else |
4487 | + inode->i_mapping->a_ops = &ext4_writeback_aops; |
4488 | + break; |
4489 | + case EXT4_INODE_JOURNAL_DATA_MODE: |
4490 | inode->i_mapping->a_ops = &ext4_journalled_aops; |
4491 | + break; |
4492 | + default: |
4493 | + BUG(); |
4494 | + } |
4495 | } |
4496 | |
4497 | |
4498 | diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c |
4499 | index 7e106c8..24feb1c 100644 |
4500 | --- a/fs/ext4/page-io.c |
4501 | +++ b/fs/ext4/page-io.c |
4502 | @@ -111,6 +111,8 @@ int ext4_end_io_nolock(ext4_io_end_t *io) |
4503 | if (io->iocb) |
4504 | aio_complete(io->iocb, io->result, 0); |
4505 | |
4506 | + if (io->flag & EXT4_IO_END_DIRECT) |
4507 | + inode_dio_done(inode); |
4508 | /* Wake up anyone waiting on unwritten extent conversion */ |
4509 | if (atomic_dec_and_test(&EXT4_I(inode)->i_aiodio_unwritten)) |
4510 | wake_up_all(ext4_ioend_wq(io->inode)); |
4511 | @@ -128,12 +130,18 @@ static void ext4_end_io_work(struct work_struct *work) |
4512 | unsigned long flags; |
4513 | |
4514 | spin_lock_irqsave(&ei->i_completed_io_lock, flags); |
4515 | + if (io->flag & EXT4_IO_END_IN_FSYNC) |
4516 | + goto requeue; |
4517 | if (list_empty(&io->list)) { |
4518 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
4519 | goto free; |
4520 | } |
4521 | |
4522 | if (!mutex_trylock(&inode->i_mutex)) { |
4523 | + bool was_queued; |
4524 | +requeue: |
4525 | + was_queued = !!(io->flag & EXT4_IO_END_QUEUED); |
4526 | + io->flag |= EXT4_IO_END_QUEUED; |
4527 | spin_unlock_irqrestore(&ei->i_completed_io_lock, flags); |
4528 | /* |
4529 | * Requeue the work instead of waiting so that the work |
4530 | @@ -146,9 +154,8 @@ static void ext4_end_io_work(struct work_struct *work) |
4531 | * yield the cpu if it sees an end_io request that has already |
4532 | * been requeued. |
4533 | */ |
4534 | - if (io->flag & EXT4_IO_END_QUEUED) |
4535 | + if (was_queued) |
4536 | yield(); |
4537 | - io->flag |= EXT4_IO_END_QUEUED; |
4538 | return; |
4539 | } |
4540 | list_del_init(&io->list); |
4541 | diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c |
4542 | index 0be5a78..2d0ca24 100644 |
4543 | --- a/fs/hugetlbfs/inode.c |
4544 | +++ b/fs/hugetlbfs/inode.c |
4545 | @@ -238,17 +238,10 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, |
4546 | loff_t isize; |
4547 | ssize_t retval = 0; |
4548 | |
4549 | - mutex_lock(&inode->i_mutex); |
4550 | - |
4551 | /* validate length */ |
4552 | if (len == 0) |
4553 | goto out; |
4554 | |
4555 | - isize = i_size_read(inode); |
4556 | - if (!isize) |
4557 | - goto out; |
4558 | - |
4559 | - end_index = (isize - 1) >> huge_page_shift(h); |
4560 | for (;;) { |
4561 | struct page *page; |
4562 | unsigned long nr, ret; |
4563 | @@ -256,18 +249,21 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, |
4564 | |
4565 | /* nr is the maximum number of bytes to copy from this page */ |
4566 | nr = huge_page_size(h); |
4567 | + isize = i_size_read(inode); |
4568 | + if (!isize) |
4569 | + goto out; |
4570 | + end_index = (isize - 1) >> huge_page_shift(h); |
4571 | if (index >= end_index) { |
4572 | if (index > end_index) |
4573 | goto out; |
4574 | nr = ((isize - 1) & ~huge_page_mask(h)) + 1; |
4575 | - if (nr <= offset) { |
4576 | + if (nr <= offset) |
4577 | goto out; |
4578 | - } |
4579 | } |
4580 | nr = nr - offset; |
4581 | |
4582 | /* Find the page */ |
4583 | - page = find_get_page(mapping, index); |
4584 | + page = find_lock_page(mapping, index); |
4585 | if (unlikely(page == NULL)) { |
4586 | /* |
4587 | * We have a HOLE, zero out the user-buffer for the |
4588 | @@ -279,17 +275,18 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, |
4589 | else |
4590 | ra = 0; |
4591 | } else { |
4592 | + unlock_page(page); |
4593 | + |
4594 | /* |
4595 | * We have the page, copy it to user space buffer. |
4596 | */ |
4597 | ra = hugetlbfs_read_actor(page, offset, buf, len, nr); |
4598 | ret = ra; |
4599 | + page_cache_release(page); |
4600 | } |
4601 | if (ra < 0) { |
4602 | if (retval == 0) |
4603 | retval = ra; |
4604 | - if (page) |
4605 | - page_cache_release(page); |
4606 | goto out; |
4607 | } |
4608 | |
4609 | @@ -299,16 +296,12 @@ static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, |
4610 | index += offset >> huge_page_shift(h); |
4611 | offset &= ~huge_page_mask(h); |
4612 | |
4613 | - if (page) |
4614 | - page_cache_release(page); |
4615 | - |
4616 | /* short read or no more work */ |
4617 | if ((ret != nr) || (len == 0)) |
4618 | break; |
4619 | } |
4620 | out: |
4621 | *ppos = ((loff_t)index << huge_page_shift(h)) + offset; |
4622 | - mutex_unlock(&inode->i_mutex); |
4623 | return retval; |
4624 | } |
4625 | |
4626 | diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c |
4627 | index a0e41a4..8267de5 100644 |
4628 | --- a/fs/jbd2/transaction.c |
4629 | +++ b/fs/jbd2/transaction.c |
4630 | @@ -1948,6 +1948,8 @@ zap_buffer_unlocked: |
4631 | clear_buffer_mapped(bh); |
4632 | clear_buffer_req(bh); |
4633 | clear_buffer_new(bh); |
4634 | + clear_buffer_delay(bh); |
4635 | + clear_buffer_unwritten(bh); |
4636 | bh->b_bdev = NULL; |
4637 | return may_free; |
4638 | } |
4639 | diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c |
4640 | index c061b9a..2444780 100644 |
4641 | --- a/fs/lockd/svc.c |
4642 | +++ b/fs/lockd/svc.c |
4643 | @@ -440,7 +440,7 @@ static int param_set_##name(const char *val, struct kernel_param *kp) \ |
4644 | __typeof__(type) num = which_strtol(val, &endp, 0); \ |
4645 | if (endp == val || *endp || num < (min) || num > (max)) \ |
4646 | return -EINVAL; \ |
4647 | - *((int *) kp->arg) = num; \ |
4648 | + *((type *) kp->arg) = num; \ |
4649 | return 0; \ |
4650 | } |
4651 | |
4652 | diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c |
4653 | index 726e59a..168cb93 100644 |
4654 | --- a/fs/nfs/callback_xdr.c |
4655 | +++ b/fs/nfs/callback_xdr.c |
4656 | @@ -9,6 +9,8 @@ |
4657 | #include <linux/sunrpc/svc.h> |
4658 | #include <linux/nfs4.h> |
4659 | #include <linux/nfs_fs.h> |
4660 | +#include <linux/ratelimit.h> |
4661 | +#include <linux/printk.h> |
4662 | #include <linux/slab.h> |
4663 | #include <linux/sunrpc/bc_xprt.h> |
4664 | #include "nfs4_fs.h" |
4665 | @@ -167,7 +169,7 @@ static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound |
4666 | if (hdr->minorversion <= 1) { |
4667 | hdr->cb_ident = ntohl(*p++); /* ignored by v4.1 */ |
4668 | } else { |
4669 | - printk(KERN_WARNING "%s: NFSv4 server callback with " |
4670 | + pr_warn_ratelimited("NFS: %s: NFSv4 server callback with " |
4671 | "illegal minor version %u!\n", |
4672 | __func__, hdr->minorversion); |
4673 | return htonl(NFS4ERR_MINOR_VERS_MISMATCH); |
4674 | diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c |
4675 | index 7f26540..ac889af 100644 |
4676 | --- a/fs/nfs/delegation.c |
4677 | +++ b/fs/nfs/delegation.c |
4678 | @@ -466,6 +466,17 @@ static void nfs_delegation_run_state_manager(struct nfs_client *clp) |
4679 | nfs4_schedule_state_manager(clp); |
4680 | } |
4681 | |
4682 | +void nfs_remove_bad_delegation(struct inode *inode) |
4683 | +{ |
4684 | + struct nfs_delegation *delegation; |
4685 | + |
4686 | + delegation = nfs_detach_delegation(NFS_I(inode), NFS_SERVER(inode)); |
4687 | + if (delegation) { |
4688 | + nfs_inode_find_state_and_recover(inode, &delegation->stateid); |
4689 | + nfs_free_delegation(delegation); |
4690 | + } |
4691 | +} |
4692 | + |
4693 | /** |
4694 | * nfs_expire_all_delegation_types |
4695 | * @clp: client to process |
4696 | diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h |
4697 | index d9322e4..691a796 100644 |
4698 | --- a/fs/nfs/delegation.h |
4699 | +++ b/fs/nfs/delegation.h |
4700 | @@ -45,6 +45,7 @@ void nfs_expire_unreferenced_delegations(struct nfs_client *clp); |
4701 | void nfs_handle_cb_pathdown(struct nfs_client *clp); |
4702 | int nfs_client_return_marked_delegations(struct nfs_client *clp); |
4703 | int nfs_delegations_present(struct nfs_client *clp); |
4704 | +void nfs_remove_bad_delegation(struct inode *inode); |
4705 | |
4706 | void nfs_delegation_mark_reclaim(struct nfs_client *clp); |
4707 | void nfs_delegation_reap_unclaimed(struct nfs_client *clp); |
4708 | diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h |
4709 | index 693ae22..0983b25 100644 |
4710 | --- a/fs/nfs/nfs4_fs.h |
4711 | +++ b/fs/nfs/nfs4_fs.h |
4712 | @@ -191,6 +191,7 @@ struct nfs4_exception { |
4713 | long timeout; |
4714 | int retry; |
4715 | struct nfs4_state *state; |
4716 | + struct inode *inode; |
4717 | }; |
4718 | |
4719 | struct nfs4_state_recovery_ops { |
4720 | @@ -324,6 +325,8 @@ extern void nfs4_put_open_state(struct nfs4_state *); |
4721 | extern void nfs4_close_state(struct nfs4_state *, fmode_t); |
4722 | extern void nfs4_close_sync(struct nfs4_state *, fmode_t); |
4723 | extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); |
4724 | +extern void nfs_inode_find_state_and_recover(struct inode *inode, |
4725 | + const nfs4_stateid *stateid); |
4726 | extern void nfs4_schedule_lease_recovery(struct nfs_client *); |
4727 | extern void nfs4_schedule_state_manager(struct nfs_client *); |
4728 | extern void nfs4_schedule_path_down_recovery(struct nfs_client *clp); |
4729 | diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
4730 | index e527030..d945700 100644 |
4731 | --- a/fs/nfs/nfs4proc.c |
4732 | +++ b/fs/nfs/nfs4proc.c |
4733 | @@ -257,15 +257,28 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc |
4734 | { |
4735 | struct nfs_client *clp = server->nfs_client; |
4736 | struct nfs4_state *state = exception->state; |
4737 | + struct inode *inode = exception->inode; |
4738 | int ret = errorcode; |
4739 | |
4740 | exception->retry = 0; |
4741 | switch(errorcode) { |
4742 | case 0: |
4743 | return 0; |
4744 | + case -NFS4ERR_OPENMODE: |
4745 | + if (nfs_have_delegation(inode, FMODE_READ)) { |
4746 | + nfs_inode_return_delegation(inode); |
4747 | + exception->retry = 1; |
4748 | + return 0; |
4749 | + } |
4750 | + if (state == NULL) |
4751 | + break; |
4752 | + nfs4_schedule_stateid_recovery(server, state); |
4753 | + goto wait_on_recovery; |
4754 | + case -NFS4ERR_DELEG_REVOKED: |
4755 | case -NFS4ERR_ADMIN_REVOKED: |
4756 | case -NFS4ERR_BAD_STATEID: |
4757 | - case -NFS4ERR_OPENMODE: |
4758 | + if (state != NULL) |
4759 | + nfs_remove_bad_delegation(state->inode); |
4760 | if (state == NULL) |
4761 | break; |
4762 | nfs4_schedule_stateid_recovery(server, state); |
4763 | @@ -1316,8 +1329,11 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state |
4764 | * The show must go on: exit, but mark the |
4765 | * stateid as needing recovery. |
4766 | */ |
4767 | + case -NFS4ERR_DELEG_REVOKED: |
4768 | case -NFS4ERR_ADMIN_REVOKED: |
4769 | case -NFS4ERR_BAD_STATEID: |
4770 | + nfs_inode_find_state_and_recover(state->inode, |
4771 | + stateid); |
4772 | nfs4_schedule_stateid_recovery(server, state); |
4773 | case -EKEYEXPIRED: |
4774 | /* |
4775 | @@ -1822,7 +1838,7 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, |
4776 | * the user though... |
4777 | */ |
4778 | if (status == -NFS4ERR_BAD_SEQID) { |
4779 | - printk(KERN_WARNING "NFS: v4 server %s " |
4780 | + pr_warn_ratelimited("NFS: v4 server %s " |
4781 | " returned a bad sequence-id error!\n", |
4782 | NFS_SERVER(dir)->nfs_client->cl_hostname); |
4783 | exception.retry = 1; |
4784 | @@ -1893,7 +1909,10 @@ static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, |
4785 | struct nfs4_state *state) |
4786 | { |
4787 | struct nfs_server *server = NFS_SERVER(inode); |
4788 | - struct nfs4_exception exception = { }; |
4789 | + struct nfs4_exception exception = { |
4790 | + .state = state, |
4791 | + .inode = inode, |
4792 | + }; |
4793 | int err; |
4794 | do { |
4795 | err = nfs4_handle_exception(server, |
4796 | @@ -2223,11 +2242,12 @@ static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, |
4797 | switch (err) { |
4798 | case 0: |
4799 | case -NFS4ERR_WRONGSEC: |
4800 | - break; |
4801 | + goto out; |
4802 | default: |
4803 | err = nfs4_handle_exception(server, err, &exception); |
4804 | } |
4805 | } while (exception.retry); |
4806 | +out: |
4807 | return err; |
4808 | } |
4809 | |
4810 | @@ -3707,8 +3727,11 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, |
4811 | if (task->tk_status >= 0) |
4812 | return 0; |
4813 | switch(task->tk_status) { |
4814 | + case -NFS4ERR_DELEG_REVOKED: |
4815 | case -NFS4ERR_ADMIN_REVOKED: |
4816 | case -NFS4ERR_BAD_STATEID: |
4817 | + if (state != NULL) |
4818 | + nfs_remove_bad_delegation(state->inode); |
4819 | case -NFS4ERR_OPENMODE: |
4820 | if (state == NULL) |
4821 | break; |
4822 | @@ -4526,7 +4549,9 @@ out: |
4823 | |
4824 | static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) |
4825 | { |
4826 | - struct nfs4_exception exception = { }; |
4827 | + struct nfs4_exception exception = { |
4828 | + .state = state, |
4829 | + }; |
4830 | int err; |
4831 | |
4832 | do { |
4833 | @@ -4619,6 +4644,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) |
4834 | * The show must go on: exit, but mark the |
4835 | * stateid as needing recovery. |
4836 | */ |
4837 | + case -NFS4ERR_DELEG_REVOKED: |
4838 | case -NFS4ERR_ADMIN_REVOKED: |
4839 | case -NFS4ERR_BAD_STATEID: |
4840 | case -NFS4ERR_OPENMODE: |
4841 | @@ -5957,21 +5983,22 @@ nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) |
4842 | return; |
4843 | |
4844 | switch (task->tk_status) { /* Just ignore these failures */ |
4845 | - case NFS4ERR_DELEG_REVOKED: /* layout was recalled */ |
4846 | - case NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ |
4847 | - case NFS4ERR_BADLAYOUT: /* no layout */ |
4848 | - case NFS4ERR_GRACE: /* loca_recalim always false */ |
4849 | + case -NFS4ERR_DELEG_REVOKED: /* layout was recalled */ |
4850 | + case -NFS4ERR_BADIOMODE: /* no IOMODE_RW layout for range */ |
4851 | + case -NFS4ERR_BADLAYOUT: /* no layout */ |
4852 | + case -NFS4ERR_GRACE: /* loca_recalim always false */ |
4853 | task->tk_status = 0; |
4854 | - } |
4855 | - |
4856 | - if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { |
4857 | - rpc_restart_call_prepare(task); |
4858 | - return; |
4859 | - } |
4860 | - |
4861 | - if (task->tk_status == 0) |
4862 | + break; |
4863 | + case 0: |
4864 | nfs_post_op_update_inode_force_wcc(data->args.inode, |
4865 | data->res.fattr); |
4866 | + break; |
4867 | + default: |
4868 | + if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { |
4869 | + rpc_restart_call_prepare(task); |
4870 | + return; |
4871 | + } |
4872 | + } |
4873 | } |
4874 | |
4875 | static void nfs4_layoutcommit_release(void *calldata) |
4876 | @@ -6074,11 +6101,12 @@ nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, |
4877 | case 0: |
4878 | case -NFS4ERR_WRONGSEC: |
4879 | case -NFS4ERR_NOTSUPP: |
4880 | - break; |
4881 | + goto out; |
4882 | default: |
4883 | err = nfs4_handle_exception(server, err, &exception); |
4884 | } |
4885 | } while (exception.retry); |
4886 | +out: |
4887 | return err; |
4888 | } |
4889 | |
4890 | diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c |
4891 | index a58eed7..66020ac 100644 |
4892 | --- a/fs/nfs/nfs4state.c |
4893 | +++ b/fs/nfs/nfs4state.c |
4894 | @@ -935,7 +935,7 @@ static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) |
4895 | case -NFS4ERR_BAD_SEQID: |
4896 | if (seqid->sequence->flags & NFS_SEQID_CONFIRMED) |
4897 | return; |
4898 | - printk(KERN_WARNING "NFS: v4 server returned a bad" |
4899 | + pr_warn_ratelimited("NFS: v4 server returned a bad" |
4900 | " sequence-id error on an" |
4901 | " unconfirmed sequence %p!\n", |
4902 | seqid->sequence); |
4903 | @@ -1071,12 +1071,37 @@ void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4 |
4904 | { |
4905 | struct nfs_client *clp = server->nfs_client; |
4906 | |
4907 | - if (test_and_clear_bit(NFS_DELEGATED_STATE, &state->flags)) |
4908 | - nfs_async_inode_return_delegation(state->inode, &state->stateid); |
4909 | nfs4_state_mark_reclaim_nograce(clp, state); |
4910 | nfs4_schedule_state_manager(clp); |
4911 | } |
4912 | |
4913 | +void nfs_inode_find_state_and_recover(struct inode *inode, |
4914 | + const nfs4_stateid *stateid) |
4915 | +{ |
4916 | + struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; |
4917 | + struct nfs_inode *nfsi = NFS_I(inode); |
4918 | + struct nfs_open_context *ctx; |
4919 | + struct nfs4_state *state; |
4920 | + bool found = false; |
4921 | + |
4922 | + spin_lock(&inode->i_lock); |
4923 | + list_for_each_entry(ctx, &nfsi->open_files, list) { |
4924 | + state = ctx->state; |
4925 | + if (state == NULL) |
4926 | + continue; |
4927 | + if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) |
4928 | + continue; |
4929 | + if (memcmp(state->stateid.data, stateid->data, sizeof(state->stateid.data)) != 0) |
4930 | + continue; |
4931 | + nfs4_state_mark_reclaim_nograce(clp, state); |
4932 | + found = true; |
4933 | + } |
4934 | + spin_unlock(&inode->i_lock); |
4935 | + if (found) |
4936 | + nfs4_schedule_state_manager(clp); |
4937 | +} |
4938 | + |
4939 | + |
4940 | static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops) |
4941 | { |
4942 | struct inode *inode = state->inode; |
4943 | @@ -1739,7 +1764,7 @@ static void nfs4_state_manager(struct nfs_client *clp) |
4944 | } while (atomic_read(&clp->cl_count) > 1); |
4945 | return; |
4946 | out_error: |
4947 | - printk(KERN_WARNING "Error: state manager failed on NFSv4 server %s" |
4948 | + pr_warn_ratelimited("NFS: state manager failed on NFSv4 server %s" |
4949 | " with error %d\n", clp->cl_hostname, -status); |
4950 | nfs4_end_drain_session(clp); |
4951 | nfs4_clear_state_manager_bit(clp); |
4952 | diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c |
4953 | index be177f7..d6c078e 100644 |
4954 | --- a/fs/proc/namespaces.c |
4955 | +++ b/fs/proc/namespaces.c |
4956 | @@ -54,7 +54,7 @@ static struct dentry *proc_ns_instantiate(struct inode *dir, |
4957 | ei->ns_ops = ns_ops; |
4958 | ei->ns = ns; |
4959 | |
4960 | - dentry->d_op = &pid_dentry_operations; |
4961 | + d_set_d_op(dentry, &pid_dentry_operations); |
4962 | d_add(dentry, inode); |
4963 | /* Close the race of the process dying before we return the dentry */ |
4964 | if (pid_revalidate(dentry, NULL)) |
4965 | diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c |
4966 | index a6b6217..53c3bce 100644 |
4967 | --- a/fs/proc/proc_sysctl.c |
4968 | +++ b/fs/proc/proc_sysctl.c |
4969 | @@ -188,20 +188,32 @@ static ssize_t proc_sys_write(struct file *filp, const char __user *buf, |
4970 | |
4971 | static int proc_sys_open(struct inode *inode, struct file *filp) |
4972 | { |
4973 | + struct ctl_table_header *head = grab_header(inode); |
4974 | struct ctl_table *table = PROC_I(inode)->sysctl_entry; |
4975 | |
4976 | + /* sysctl was unregistered */ |
4977 | + if (IS_ERR(head)) |
4978 | + return PTR_ERR(head); |
4979 | + |
4980 | if (table->poll) |
4981 | filp->private_data = proc_sys_poll_event(table->poll); |
4982 | |
4983 | + sysctl_head_finish(head); |
4984 | + |
4985 | return 0; |
4986 | } |
4987 | |
4988 | static unsigned int proc_sys_poll(struct file *filp, poll_table *wait) |
4989 | { |
4990 | struct inode *inode = filp->f_path.dentry->d_inode; |
4991 | + struct ctl_table_header *head = grab_header(inode); |
4992 | struct ctl_table *table = PROC_I(inode)->sysctl_entry; |
4993 | - unsigned long event = (unsigned long)filp->private_data; |
4994 | unsigned int ret = DEFAULT_POLLMASK; |
4995 | + unsigned long event; |
4996 | + |
4997 | + /* sysctl was unregistered */ |
4998 | + if (IS_ERR(head)) |
4999 | + return POLLERR | POLLHUP; |
5000 | |
5001 | if (!table->proc_handler) |
5002 | goto out; |
5003 | @@ -209,6 +221,7 @@ static unsigned int proc_sys_poll(struct file *filp, poll_table *wait) |
5004 | if (!table->poll) |
5005 | goto out; |
5006 | |
5007 | + event = (unsigned long)filp->private_data; |
5008 | poll_wait(filp, &table->poll->wait, wait); |
5009 | |
5010 | if (event != atomic_read(&table->poll->event)) { |
5011 | @@ -217,6 +230,8 @@ static unsigned int proc_sys_poll(struct file *filp, poll_table *wait) |
5012 | } |
5013 | |
5014 | out: |
5015 | + sysctl_head_finish(head); |
5016 | + |
5017 | return ret; |
5018 | } |
5019 | |
5020 | diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c |
5021 | index 7dcd2a2..3efa725 100644 |
5022 | --- a/fs/proc/task_mmu.c |
5023 | +++ b/fs/proc/task_mmu.c |
5024 | @@ -409,6 +409,9 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, |
5025 | } else { |
5026 | spin_unlock(&walk->mm->page_table_lock); |
5027 | } |
5028 | + |
5029 | + if (pmd_trans_unstable(pmd)) |
5030 | + return 0; |
5031 | /* |
5032 | * The mmap_sem held all the way back in m_start() is what |
5033 | * keeps khugepaged out of here and from collapsing things |
5034 | @@ -507,6 +510,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, |
5035 | struct page *page; |
5036 | |
5037 | split_huge_page_pmd(walk->mm, pmd); |
5038 | + if (pmd_trans_unstable(pmd)) |
5039 | + return 0; |
5040 | |
5041 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
5042 | for (; addr != end; pte++, addr += PAGE_SIZE) { |
5043 | @@ -670,6 +675,8 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, |
5044 | int err = 0; |
5045 | |
5046 | split_huge_page_pmd(walk->mm, pmd); |
5047 | + if (pmd_trans_unstable(pmd)) |
5048 | + return 0; |
5049 | |
5050 | /* find the first VMA at or above 'addr' */ |
5051 | vma = find_vma(walk->mm, addr); |
5052 | @@ -961,6 +968,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, |
5053 | spin_unlock(&walk->mm->page_table_lock); |
5054 | } |
5055 | |
5056 | + if (pmd_trans_unstable(pmd)) |
5057 | + return 0; |
5058 | orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); |
5059 | do { |
5060 | struct page *page = can_gather_numa_stats(*pte, md->vma, addr); |
5061 | diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c |
5062 | index deb804b..9db61a4 100644 |
5063 | --- a/fs/sysfs/inode.c |
5064 | +++ b/fs/sysfs/inode.c |
5065 | @@ -136,12 +136,13 @@ static int sysfs_sd_setsecdata(struct sysfs_dirent *sd, void **secdata, u32 *sec |
5066 | void *old_secdata; |
5067 | size_t old_secdata_len; |
5068 | |
5069 | - iattrs = sd->s_iattr; |
5070 | - if (!iattrs) |
5071 | - iattrs = sysfs_init_inode_attrs(sd); |
5072 | - if (!iattrs) |
5073 | - return -ENOMEM; |
5074 | + if (!sd->s_iattr) { |
5075 | + sd->s_iattr = sysfs_init_inode_attrs(sd); |
5076 | + if (!sd->s_iattr) |
5077 | + return -ENOMEM; |
5078 | + } |
5079 | |
5080 | + iattrs = sd->s_iattr; |
5081 | old_secdata = iattrs->ia_secdata; |
5082 | old_secdata_len = iattrs->ia_secdata_len; |
5083 | |
5084 | diff --git a/fs/udf/file.c b/fs/udf/file.c |
5085 | index dca0c38..d567b84 100644 |
5086 | --- a/fs/udf/file.c |
5087 | +++ b/fs/udf/file.c |
5088 | @@ -201,12 +201,10 @@ out: |
5089 | static int udf_release_file(struct inode *inode, struct file *filp) |
5090 | { |
5091 | if (filp->f_mode & FMODE_WRITE) { |
5092 | - mutex_lock(&inode->i_mutex); |
5093 | down_write(&UDF_I(inode)->i_data_sem); |
5094 | udf_discard_prealloc(inode); |
5095 | udf_truncate_tail_extent(inode); |
5096 | up_write(&UDF_I(inode)->i_data_sem); |
5097 | - mutex_unlock(&inode->i_mutex); |
5098 | } |
5099 | return 0; |
5100 | } |
5101 | diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c |
5102 | index 0fa98b1..cfc4277 100644 |
5103 | --- a/fs/xfs/xfs_iget.c |
5104 | +++ b/fs/xfs/xfs_iget.c |
5105 | @@ -353,9 +353,20 @@ xfs_iget_cache_miss( |
5106 | BUG(); |
5107 | } |
5108 | |
5109 | - spin_lock(&pag->pag_ici_lock); |
5110 | + /* |
5111 | + * These values must be set before inserting the inode into the radix |
5112 | + * tree as the moment it is inserted a concurrent lookup (allowed by the |
5113 | + * RCU locking mechanism) can find it and that lookup must see that this |
5114 | + * is an inode currently under construction (i.e. that XFS_INEW is set). |
5115 | + * The ip->i_flags_lock that protects the XFS_INEW flag forms the |
5116 | + * memory barrier that ensures this detection works correctly at lookup |
5117 | + * time. |
5118 | + */ |
5119 | + ip->i_udquot = ip->i_gdquot = NULL; |
5120 | + xfs_iflags_set(ip, XFS_INEW); |
5121 | |
5122 | /* insert the new inode */ |
5123 | + spin_lock(&pag->pag_ici_lock); |
5124 | error = radix_tree_insert(&pag->pag_ici_root, agino, ip); |
5125 | if (unlikely(error)) { |
5126 | WARN_ON(error != -EEXIST); |
5127 | @@ -363,11 +374,6 @@ xfs_iget_cache_miss( |
5128 | error = EAGAIN; |
5129 | goto out_preload_end; |
5130 | } |
5131 | - |
5132 | - /* These values _must_ be set before releasing the radix tree lock! */ |
5133 | - ip->i_udquot = ip->i_gdquot = NULL; |
5134 | - xfs_iflags_set(ip, XFS_INEW); |
5135 | - |
5136 | spin_unlock(&pag->pag_ici_lock); |
5137 | radix_tree_preload_end(); |
5138 | |
5139 | diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c |
5140 | index 541a508..4f5d0ce 100644 |
5141 | --- a/fs/xfs/xfs_log_recover.c |
5142 | +++ b/fs/xfs/xfs_log_recover.c |
5143 | @@ -3161,37 +3161,26 @@ xlog_recover_process_iunlinks( |
5144 | */ |
5145 | continue; |
5146 | } |
5147 | + /* |
5148 | + * Unlock the buffer so that it can be acquired in the normal |
5149 | + * course of the transaction to truncate and free each inode. |
5150 | + * Because we are not racing with anyone else here for the AGI |
5151 | + * buffer, we don't even need to hold it locked to read the |
5152 | + * initial unlinked bucket entries out of the buffer. We keep |
5153 | + * buffer reference though, so that it stays pinned in memory |
5154 | + * while we need the buffer. |
5155 | + */ |
5156 | agi = XFS_BUF_TO_AGI(agibp); |
5157 | + xfs_buf_unlock(agibp); |
5158 | |
5159 | for (bucket = 0; bucket < XFS_AGI_UNLINKED_BUCKETS; bucket++) { |
5160 | agino = be32_to_cpu(agi->agi_unlinked[bucket]); |
5161 | while (agino != NULLAGINO) { |
5162 | - /* |
5163 | - * Release the agi buffer so that it can |
5164 | - * be acquired in the normal course of the |
5165 | - * transaction to truncate and free the inode. |
5166 | - */ |
5167 | - xfs_buf_relse(agibp); |
5168 | - |
5169 | agino = xlog_recover_process_one_iunlink(mp, |
5170 | agno, agino, bucket); |
5171 | - |
5172 | - /* |
5173 | - * Reacquire the agibuffer and continue around |
5174 | - * the loop. This should never fail as we know |
5175 | - * the buffer was good earlier on. |
5176 | - */ |
5177 | - error = xfs_read_agi(mp, NULL, agno, &agibp); |
5178 | - ASSERT(error == 0); |
5179 | - agi = XFS_BUF_TO_AGI(agibp); |
5180 | } |
5181 | } |
5182 | - |
5183 | - /* |
5184 | - * Release the buffer for the current agi so we can |
5185 | - * go on to the next one. |
5186 | - */ |
5187 | - xfs_buf_relse(agibp); |
5188 | + xfs_buf_rele(agibp); |
5189 | } |
5190 | |
5191 | mp->m_dmevmask = mp_dmevmask; |
5192 | diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h |
5193 | index 76bff2b..a03c098 100644 |
5194 | --- a/include/asm-generic/pgtable.h |
5195 | +++ b/include/asm-generic/pgtable.h |
5196 | @@ -425,6 +425,8 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, |
5197 | unsigned long size); |
5198 | #endif |
5199 | |
5200 | +#ifdef CONFIG_MMU |
5201 | + |
5202 | #ifndef CONFIG_TRANSPARENT_HUGEPAGE |
5203 | static inline int pmd_trans_huge(pmd_t pmd) |
5204 | { |
5205 | @@ -441,7 +443,66 @@ static inline int pmd_write(pmd_t pmd) |
5206 | return 0; |
5207 | } |
5208 | #endif /* __HAVE_ARCH_PMD_WRITE */ |
5209 | +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
5210 | + |
5211 | +/* |
5212 | + * This function is meant to be used by sites walking pagetables with |
5213 | + * the mmap_sem hold in read mode to protect against MADV_DONTNEED and |
5214 | + * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd |
5215 | + * into a null pmd and the transhuge page fault can convert a null pmd |
5216 | + * into an hugepmd or into a regular pmd (if the hugepage allocation |
5217 | + * fails). While holding the mmap_sem in read mode the pmd becomes |
5218 | + * stable and stops changing under us only if it's not null and not a |
5219 | + * transhuge pmd. When those races occurs and this function makes a |
5220 | + * difference vs the standard pmd_none_or_clear_bad, the result is |
5221 | + * undefined so behaving like if the pmd was none is safe (because it |
5222 | + * can return none anyway). The compiler level barrier() is critically |
5223 | + * important to compute the two checks atomically on the same pmdval. |
5224 | + */ |
5225 | +static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) |
5226 | +{ |
5227 | + /* depend on compiler for an atomic pmd read */ |
5228 | + pmd_t pmdval = *pmd; |
5229 | + /* |
5230 | + * The barrier will stabilize the pmdval in a register or on |
5231 | + * the stack so that it will stop changing under the code. |
5232 | + */ |
5233 | +#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
5234 | + barrier(); |
5235 | +#endif |
5236 | + if (pmd_none(pmdval)) |
5237 | + return 1; |
5238 | + if (unlikely(pmd_bad(pmdval))) { |
5239 | + if (!pmd_trans_huge(pmdval)) |
5240 | + pmd_clear_bad(pmd); |
5241 | + return 1; |
5242 | + } |
5243 | + return 0; |
5244 | +} |
5245 | + |
5246 | +/* |
5247 | + * This is a noop if Transparent Hugepage Support is not built into |
5248 | + * the kernel. Otherwise it is equivalent to |
5249 | + * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in |
5250 | + * places that already verified the pmd is not none and they want to |
5251 | + * walk ptes while holding the mmap sem in read mode (write mode don't |
5252 | + * need this). If THP is not enabled, the pmd can't go away under the |
5253 | + * code even if MADV_DONTNEED runs, but if THP is enabled we need to |
5254 | + * run a pmd_trans_unstable before walking the ptes after |
5255 | + * split_huge_page_pmd returns (because it may have run when the pmd |
5256 | + * become null, but then a page fault can map in a THP and not a |
5257 | + * regular page). |
5258 | + */ |
5259 | +static inline int pmd_trans_unstable(pmd_t *pmd) |
5260 | +{ |
5261 | +#ifdef CONFIG_TRANSPARENT_HUGEPAGE |
5262 | + return pmd_none_or_trans_huge_or_clear_bad(pmd); |
5263 | +#else |
5264 | + return 0; |
5265 | #endif |
5266 | +} |
5267 | + |
5268 | +#endif /* CONFIG_MMU */ |
5269 | |
5270 | #endif /* !__ASSEMBLY__ */ |
5271 | |
5272 | diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h |
5273 | index 2292d1a..991ef01 100644 |
5274 | --- a/include/asm-generic/unistd.h |
5275 | +++ b/include/asm-generic/unistd.h |
5276 | @@ -218,7 +218,7 @@ __SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev) |
5277 | |
5278 | /* fs/sendfile.c */ |
5279 | #define __NR3264_sendfile 71 |
5280 | -__SC_3264(__NR3264_sendfile, sys_sendfile64, sys_sendfile) |
5281 | +__SYSCALL(__NR3264_sendfile, sys_sendfile64) |
5282 | |
5283 | /* fs/select.c */ |
5284 | #define __NR_pselect6 72 |
5285 | diff --git a/include/linux/fb.h b/include/linux/fb.h |
5286 | index 1d6836c..73845ce 100644 |
5287 | --- a/include/linux/fb.h |
5288 | +++ b/include/linux/fb.h |
5289 | @@ -997,6 +997,7 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, |
5290 | /* drivers/video/fbmem.c */ |
5291 | extern int register_framebuffer(struct fb_info *fb_info); |
5292 | extern int unregister_framebuffer(struct fb_info *fb_info); |
5293 | +extern int unlink_framebuffer(struct fb_info *fb_info); |
5294 | extern void remove_conflicting_framebuffers(struct apertures_struct *a, |
5295 | const char *name, bool primary); |
5296 | extern int fb_prepare_logo(struct fb_info *fb_info, int rotate); |
5297 | diff --git a/include/linux/math64.h b/include/linux/math64.h |
5298 | index 23fcdfc..b8ba855 100644 |
5299 | --- a/include/linux/math64.h |
5300 | +++ b/include/linux/math64.h |
5301 | @@ -6,6 +6,8 @@ |
5302 | |
5303 | #if BITS_PER_LONG == 64 |
5304 | |
5305 | +#define div64_long(x,y) div64_s64((x),(y)) |
5306 | + |
5307 | /** |
5308 | * div_u64_rem - unsigned 64bit divide with 32bit divisor with remainder |
5309 | * |
5310 | @@ -45,6 +47,8 @@ static inline s64 div64_s64(s64 dividend, s64 divisor) |
5311 | |
5312 | #elif BITS_PER_LONG == 32 |
5313 | |
5314 | +#define div64_long(x,y) div_s64((x),(y)) |
5315 | + |
5316 | #ifndef div_u64_rem |
5317 | static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder) |
5318 | { |
5319 | diff --git a/include/linux/rtc.h b/include/linux/rtc.h |
5320 | index 93f4d03..fcabfb4 100644 |
5321 | --- a/include/linux/rtc.h |
5322 | +++ b/include/linux/rtc.h |
5323 | @@ -202,7 +202,8 @@ struct rtc_device |
5324 | struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */ |
5325 | int pie_enabled; |
5326 | struct work_struct irqwork; |
5327 | - |
5328 | + /* Some hardware can't support UIE mode */ |
5329 | + int uie_unsupported; |
5330 | |
5331 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL |
5332 | struct work_struct uie_task; |
5333 | diff --git a/kernel/futex.c b/kernel/futex.c |
5334 | index 1614be2..0677023 100644 |
5335 | --- a/kernel/futex.c |
5336 | +++ b/kernel/futex.c |
5337 | @@ -2641,6 +2641,16 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, |
5338 | } |
5339 | |
5340 | switch (cmd) { |
5341 | + case FUTEX_LOCK_PI: |
5342 | + case FUTEX_UNLOCK_PI: |
5343 | + case FUTEX_TRYLOCK_PI: |
5344 | + case FUTEX_WAIT_REQUEUE_PI: |
5345 | + case FUTEX_CMP_REQUEUE_PI: |
5346 | + if (!futex_cmpxchg_enabled) |
5347 | + return -ENOSYS; |
5348 | + } |
5349 | + |
5350 | + switch (cmd) { |
5351 | case FUTEX_WAIT: |
5352 | val3 = FUTEX_BITSET_MATCH_ANY; |
5353 | case FUTEX_WAIT_BITSET: |
5354 | @@ -2661,16 +2671,13 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, |
5355 | ret = futex_wake_op(uaddr, flags, uaddr2, val, val2, val3); |
5356 | break; |
5357 | case FUTEX_LOCK_PI: |
5358 | - if (futex_cmpxchg_enabled) |
5359 | - ret = futex_lock_pi(uaddr, flags, val, timeout, 0); |
5360 | + ret = futex_lock_pi(uaddr, flags, val, timeout, 0); |
5361 | break; |
5362 | case FUTEX_UNLOCK_PI: |
5363 | - if (futex_cmpxchg_enabled) |
5364 | - ret = futex_unlock_pi(uaddr, flags); |
5365 | + ret = futex_unlock_pi(uaddr, flags); |
5366 | break; |
5367 | case FUTEX_TRYLOCK_PI: |
5368 | - if (futex_cmpxchg_enabled) |
5369 | - ret = futex_lock_pi(uaddr, flags, 0, timeout, 1); |
5370 | + ret = futex_lock_pi(uaddr, flags, 0, timeout, 1); |
5371 | break; |
5372 | case FUTEX_WAIT_REQUEUE_PI: |
5373 | val3 = FUTEX_BITSET_MATCH_ANY; |
5374 | diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c |
5375 | index fb7db75..25784d6 100644 |
5376 | --- a/kernel/irq/chip.c |
5377 | +++ b/kernel/irq/chip.c |
5378 | @@ -61,8 +61,7 @@ int irq_set_irq_type(unsigned int irq, unsigned int type) |
5379 | return -EINVAL; |
5380 | |
5381 | type &= IRQ_TYPE_SENSE_MASK; |
5382 | - if (type != IRQ_TYPE_NONE) |
5383 | - ret = __irq_set_trigger(desc, irq, type); |
5384 | + ret = __irq_set_trigger(desc, irq, type); |
5385 | irq_put_desc_busunlock(desc, flags); |
5386 | return ret; |
5387 | } |
5388 | diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c |
5389 | index ae95cd2..7600092 100644 |
5390 | --- a/kernel/irq/manage.c |
5391 | +++ b/kernel/irq/manage.c |
5392 | @@ -773,7 +773,7 @@ static int irq_thread(void *data) |
5393 | struct irqaction *action); |
5394 | int wake; |
5395 | |
5396 | - if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD, |
5397 | + if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, |
5398 | &action->thread_flags)) |
5399 | handler_fn = irq_forced_thread_fn; |
5400 | else |
5401 | diff --git a/kernel/module.c b/kernel/module.c |
5402 | index 178333c..6969ef0 100644 |
5403 | --- a/kernel/module.c |
5404 | +++ b/kernel/module.c |
5405 | @@ -2341,8 +2341,7 @@ static int copy_and_check(struct load_info *info, |
5406 | return -ENOEXEC; |
5407 | |
5408 | /* Suck in entire file: we'll want most of it. */ |
5409 | - /* vmalloc barfs on "unusual" numbers. Check here */ |
5410 | - if (len > 64 * 1024 * 1024 || (hdr = vmalloc(len)) == NULL) |
5411 | + if ((hdr = vmalloc(len)) == NULL) |
5412 | return -ENOMEM; |
5413 | |
5414 | if (copy_from_user(hdr, umod, len) != 0) { |
5415 | diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c |
5416 | index 624538a..7c0d578 100644 |
5417 | --- a/kernel/power/hibernate.c |
5418 | +++ b/kernel/power/hibernate.c |
5419 | @@ -648,7 +648,7 @@ int hibernate(void) |
5420 | /* Allocate memory management structures */ |
5421 | error = create_basic_memory_bitmaps(); |
5422 | if (error) |
5423 | - goto Exit; |
5424 | + goto Enable_umh; |
5425 | |
5426 | printk(KERN_INFO "PM: Syncing filesystems ... "); |
5427 | sys_sync(); |
5428 | @@ -656,7 +656,7 @@ int hibernate(void) |
5429 | |
5430 | error = prepare_processes(); |
5431 | if (error) |
5432 | - goto Finish; |
5433 | + goto Free_bitmaps; |
5434 | |
5435 | error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM); |
5436 | if (error) |
5437 | @@ -689,8 +689,9 @@ int hibernate(void) |
5438 | |
5439 | Thaw: |
5440 | thaw_processes(); |
5441 | - Finish: |
5442 | + Free_bitmaps: |
5443 | free_basic_memory_bitmaps(); |
5444 | + Enable_umh: |
5445 | usermodehelper_enable(); |
5446 | Exit: |
5447 | pm_notifier_call_chain(PM_POST_HIBERNATION); |
5448 | diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c |
5449 | index f6117a4..4b85a7a 100644 |
5450 | --- a/kernel/time/ntp.c |
5451 | +++ b/kernel/time/ntp.c |
5452 | @@ -275,7 +275,7 @@ static inline s64 ntp_update_offset_fll(s64 offset64, long secs) |
5453 | |
5454 | time_status |= STA_MODE; |
5455 | |
5456 | - return div_s64(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs); |
5457 | + return div64_long(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs); |
5458 | } |
5459 | |
5460 | static void ntp_update_offset(long offset) |
5461 | diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c |
5462 | index ad72a03..6d40244 100644 |
5463 | --- a/lib/kobject_uevent.c |
5464 | +++ b/lib/kobject_uevent.c |
5465 | @@ -29,16 +29,17 @@ |
5466 | |
5467 | u64 uevent_seqnum; |
5468 | char uevent_helper[UEVENT_HELPER_PATH_LEN] = CONFIG_UEVENT_HELPER_PATH; |
5469 | -static DEFINE_SPINLOCK(sequence_lock); |
5470 | #ifdef CONFIG_NET |
5471 | struct uevent_sock { |
5472 | struct list_head list; |
5473 | struct sock *sk; |
5474 | }; |
5475 | static LIST_HEAD(uevent_sock_list); |
5476 | -static DEFINE_MUTEX(uevent_sock_mutex); |
5477 | #endif |
5478 | |
5479 | +/* This lock protects uevent_seqnum and uevent_sock_list */ |
5480 | +static DEFINE_MUTEX(uevent_sock_mutex); |
5481 | + |
5482 | /* the strings here must match the enum in include/linux/kobject.h */ |
5483 | static const char *kobject_actions[] = { |
5484 | [KOBJ_ADD] = "add", |
5485 | @@ -136,7 +137,6 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, |
5486 | struct kobject *top_kobj; |
5487 | struct kset *kset; |
5488 | const struct kset_uevent_ops *uevent_ops; |
5489 | - u64 seq; |
5490 | int i = 0; |
5491 | int retval = 0; |
5492 | #ifdef CONFIG_NET |
5493 | @@ -243,17 +243,16 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, |
5494 | else if (action == KOBJ_REMOVE) |
5495 | kobj->state_remove_uevent_sent = 1; |
5496 | |
5497 | + mutex_lock(&uevent_sock_mutex); |
5498 | /* we will send an event, so request a new sequence number */ |
5499 | - spin_lock(&sequence_lock); |
5500 | - seq = ++uevent_seqnum; |
5501 | - spin_unlock(&sequence_lock); |
5502 | - retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)seq); |
5503 | - if (retval) |
5504 | + retval = add_uevent_var(env, "SEQNUM=%llu", (unsigned long long)++uevent_seqnum); |
5505 | + if (retval) { |
5506 | + mutex_unlock(&uevent_sock_mutex); |
5507 | goto exit; |
5508 | + } |
5509 | |
5510 | #if defined(CONFIG_NET) |
5511 | /* send netlink message */ |
5512 | - mutex_lock(&uevent_sock_mutex); |
5513 | list_for_each_entry(ue_sk, &uevent_sock_list, list) { |
5514 | struct sock *uevent_sock = ue_sk->sk; |
5515 | struct sk_buff *skb; |
5516 | @@ -287,8 +286,8 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, |
5517 | } else |
5518 | retval = -ENOMEM; |
5519 | } |
5520 | - mutex_unlock(&uevent_sock_mutex); |
5521 | #endif |
5522 | + mutex_unlock(&uevent_sock_mutex); |
5523 | |
5524 | /* call uevent_helper, usually only enabled during early boot */ |
5525 | if (uevent_helper[0] && !kobj_usermode_filter(kobj)) { |
5526 | diff --git a/mm/bootmem.c b/mm/bootmem.c |
5527 | index 1a77012..b863822 100644 |
5528 | --- a/mm/bootmem.c |
5529 | +++ b/mm/bootmem.c |
5530 | @@ -768,14 +768,13 @@ void * __init alloc_bootmem_section(unsigned long size, |
5531 | unsigned long section_nr) |
5532 | { |
5533 | bootmem_data_t *bdata; |
5534 | - unsigned long pfn, goal, limit; |
5535 | + unsigned long pfn, goal; |
5536 | |
5537 | pfn = section_nr_to_pfn(section_nr); |
5538 | goal = pfn << PAGE_SHIFT; |
5539 | - limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT; |
5540 | bdata = &bootmem_node_data[early_pfn_to_nid(pfn)]; |
5541 | |
5542 | - return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, limit); |
5543 | + return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, 0); |
5544 | } |
5545 | #endif |
5546 | |
5547 | diff --git a/mm/memcontrol.c b/mm/memcontrol.c |
5548 | index de67e91..778554f 100644 |
5549 | --- a/mm/memcontrol.c |
5550 | +++ b/mm/memcontrol.c |
5551 | @@ -5237,6 +5237,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, |
5552 | spinlock_t *ptl; |
5553 | |
5554 | split_huge_page_pmd(walk->mm, pmd); |
5555 | + if (pmd_trans_unstable(pmd)) |
5556 | + return 0; |
5557 | |
5558 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
5559 | for (; addr != end; pte++, addr += PAGE_SIZE) |
5560 | @@ -5398,6 +5400,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, |
5561 | spinlock_t *ptl; |
5562 | |
5563 | split_huge_page_pmd(walk->mm, pmd); |
5564 | + if (pmd_trans_unstable(pmd)) |
5565 | + return 0; |
5566 | retry: |
5567 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
5568 | for (; addr != end; addr += PAGE_SIZE) { |
5569 | diff --git a/mm/memory.c b/mm/memory.c |
5570 | index 829d437..1b1ca17 100644 |
5571 | --- a/mm/memory.c |
5572 | +++ b/mm/memory.c |
5573 | @@ -1228,16 +1228,24 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, |
5574 | do { |
5575 | next = pmd_addr_end(addr, end); |
5576 | if (pmd_trans_huge(*pmd)) { |
5577 | - if (next-addr != HPAGE_PMD_SIZE) { |
5578 | + if (next - addr != HPAGE_PMD_SIZE) { |
5579 | VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); |
5580 | split_huge_page_pmd(vma->vm_mm, pmd); |
5581 | } else if (zap_huge_pmd(tlb, vma, pmd)) |
5582 | - continue; |
5583 | + goto next; |
5584 | /* fall through */ |
5585 | } |
5586 | - if (pmd_none_or_clear_bad(pmd)) |
5587 | - continue; |
5588 | + /* |
5589 | + * Here there can be other concurrent MADV_DONTNEED or |
5590 | + * trans huge page faults running, and if the pmd is |
5591 | + * none or trans huge it can change under us. This is |
5592 | + * because MADV_DONTNEED holds the mmap_sem in read |
5593 | + * mode. |
5594 | + */ |
5595 | + if (pmd_none_or_trans_huge_or_clear_bad(pmd)) |
5596 | + goto next; |
5597 | next = zap_pte_range(tlb, vma, pmd, addr, next, details); |
5598 | +next: |
5599 | cond_resched(); |
5600 | } while (pmd++, addr = next, addr != end); |
5601 | |
5602 | diff --git a/mm/mempolicy.c b/mm/mempolicy.c |
5603 | index c3fdbcb..b26aae2 100644 |
5604 | --- a/mm/mempolicy.c |
5605 | +++ b/mm/mempolicy.c |
5606 | @@ -512,7 +512,7 @@ static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud, |
5607 | do { |
5608 | next = pmd_addr_end(addr, end); |
5609 | split_huge_page_pmd(vma->vm_mm, pmd); |
5610 | - if (pmd_none_or_clear_bad(pmd)) |
5611 | + if (pmd_none_or_trans_huge_or_clear_bad(pmd)) |
5612 | continue; |
5613 | if (check_pte_range(vma, pmd, addr, next, nodes, |
5614 | flags, private)) |
5615 | diff --git a/mm/mincore.c b/mm/mincore.c |
5616 | index 636a868..936b4ce 100644 |
5617 | --- a/mm/mincore.c |
5618 | +++ b/mm/mincore.c |
5619 | @@ -164,7 +164,7 @@ static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud, |
5620 | } |
5621 | /* fall through */ |
5622 | } |
5623 | - if (pmd_none_or_clear_bad(pmd)) |
5624 | + if (pmd_none_or_trans_huge_or_clear_bad(pmd)) |
5625 | mincore_unmapped_range(vma, addr, next, vec); |
5626 | else |
5627 | mincore_pte_range(vma, pmd, addr, next, vec); |
5628 | diff --git a/mm/pagewalk.c b/mm/pagewalk.c |
5629 | index 2f5cf10..aa9701e 100644 |
5630 | --- a/mm/pagewalk.c |
5631 | +++ b/mm/pagewalk.c |
5632 | @@ -59,7 +59,7 @@ again: |
5633 | continue; |
5634 | |
5635 | split_huge_page_pmd(walk->mm, pmd); |
5636 | - if (pmd_none_or_clear_bad(pmd)) |
5637 | + if (pmd_none_or_trans_huge_or_clear_bad(pmd)) |
5638 | goto again; |
5639 | err = walk_pte_range(pmd, addr, next, walk); |
5640 | if (err) |
5641 | diff --git a/mm/slub.c b/mm/slub.c |
5642 | index 1a919f0..a99c785 100644 |
5643 | --- a/mm/slub.c |
5644 | +++ b/mm/slub.c |
5645 | @@ -3911,13 +3911,14 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, |
5646 | if (kmem_cache_open(s, n, |
5647 | size, align, flags, ctor)) { |
5648 | list_add(&s->list, &slab_caches); |
5649 | + up_write(&slub_lock); |
5650 | if (sysfs_slab_add(s)) { |
5651 | + down_write(&slub_lock); |
5652 | list_del(&s->list); |
5653 | kfree(n); |
5654 | kfree(s); |
5655 | goto err; |
5656 | } |
5657 | - up_write(&slub_lock); |
5658 | return s; |
5659 | } |
5660 | kfree(n); |
5661 | diff --git a/mm/sparse.c b/mm/sparse.c |
5662 | index 61d7cde..a8bc7d3 100644 |
5663 | --- a/mm/sparse.c |
5664 | +++ b/mm/sparse.c |
5665 | @@ -353,29 +353,21 @@ static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map, |
5666 | |
5667 | usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), |
5668 | usemap_count); |
5669 | - if (usemap) { |
5670 | - for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
5671 | - if (!present_section_nr(pnum)) |
5672 | - continue; |
5673 | - usemap_map[pnum] = usemap; |
5674 | - usemap += size; |
5675 | + if (!usemap) { |
5676 | + usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count); |
5677 | + if (!usemap) { |
5678 | + printk(KERN_WARNING "%s: allocation failed\n", __func__); |
5679 | + return; |
5680 | } |
5681 | - return; |
5682 | } |
5683 | |
5684 | - usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count); |
5685 | - if (usemap) { |
5686 | - for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
5687 | - if (!present_section_nr(pnum)) |
5688 | - continue; |
5689 | - usemap_map[pnum] = usemap; |
5690 | - usemap += size; |
5691 | - check_usemap_section_nr(nodeid, usemap_map[pnum]); |
5692 | - } |
5693 | - return; |
5694 | + for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
5695 | + if (!present_section_nr(pnum)) |
5696 | + continue; |
5697 | + usemap_map[pnum] = usemap; |
5698 | + usemap += size; |
5699 | + check_usemap_section_nr(nodeid, usemap_map[pnum]); |
5700 | } |
5701 | - |
5702 | - printk(KERN_WARNING "%s: allocation failed\n", __func__); |
5703 | } |
5704 | |
5705 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
5706 | diff --git a/mm/swapfile.c b/mm/swapfile.c |
5707 | index b1cd120..2015a1e 100644 |
5708 | --- a/mm/swapfile.c |
5709 | +++ b/mm/swapfile.c |
5710 | @@ -931,9 +931,7 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, |
5711 | pmd = pmd_offset(pud, addr); |
5712 | do { |
5713 | next = pmd_addr_end(addr, end); |
5714 | - if (unlikely(pmd_trans_huge(*pmd))) |
5715 | - continue; |
5716 | - if (pmd_none_or_clear_bad(pmd)) |
5717 | + if (pmd_none_or_trans_huge_or_clear_bad(pmd)) |
5718 | continue; |
5719 | ret = unuse_pte_range(vma, pmd, addr, next, entry, page); |
5720 | if (ret) |
5721 | diff --git a/net/core/dev.c b/net/core/dev.c |
5722 | index c56cacf..55cd370 100644 |
5723 | --- a/net/core/dev.c |
5724 | +++ b/net/core/dev.c |
5725 | @@ -3634,7 +3634,8 @@ EXPORT_SYMBOL(napi_gro_receive); |
5726 | static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) |
5727 | { |
5728 | __skb_pull(skb, skb_headlen(skb)); |
5729 | - skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb)); |
5730 | + /* restore the reserve we had after netdev_alloc_skb_ip_align() */ |
5731 | + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); |
5732 | skb->vlan_tci = 0; |
5733 | skb->dev = napi->dev; |
5734 | skb->skb_iif = 0; |
5735 | diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c |
5736 | index 9083e82..2ef859a 100644 |
5737 | --- a/net/core/rtnetlink.c |
5738 | +++ b/net/core/rtnetlink.c |
5739 | @@ -1116,6 +1116,8 @@ static const struct nla_policy ifla_vf_policy[IFLA_VF_MAX+1] = { |
5740 | .len = sizeof(struct ifla_vf_vlan) }, |
5741 | [IFLA_VF_TX_RATE] = { .type = NLA_BINARY, |
5742 | .len = sizeof(struct ifla_vf_tx_rate) }, |
5743 | + [IFLA_VF_SPOOFCHK] = { .type = NLA_BINARY, |
5744 | + .len = sizeof(struct ifla_vf_spoofchk) }, |
5745 | }; |
5746 | |
5747 | static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = { |
5748 | diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c |
5749 | index ec56271..f7f07e2 100644 |
5750 | --- a/net/ipv6/ip6_output.c |
5751 | +++ b/net/ipv6/ip6_output.c |
5752 | @@ -1411,8 +1411,9 @@ alloc_new_skb: |
5753 | */ |
5754 | skb->ip_summed = csummode; |
5755 | skb->csum = 0; |
5756 | - /* reserve for fragmentation */ |
5757 | - skb_reserve(skb, hh_len+sizeof(struct frag_hdr)); |
5758 | + /* reserve for fragmentation and ipsec header */ |
5759 | + skb_reserve(skb, hh_len + sizeof(struct frag_hdr) + |
5760 | + dst_exthdrlen); |
5761 | |
5762 | if (sk->sk_type == SOCK_DGRAM) |
5763 | skb_shinfo(skb)->tx_flags = tx_flags; |
5764 | @@ -1420,9 +1421,9 @@ alloc_new_skb: |
5765 | /* |
5766 | * Find where to start putting bytes |
5767 | */ |
5768 | - data = skb_put(skb, fraglen + dst_exthdrlen); |
5769 | - skb_set_network_header(skb, exthdrlen + dst_exthdrlen); |
5770 | - data += fragheaderlen + dst_exthdrlen; |
5771 | + data = skb_put(skb, fraglen); |
5772 | + skb_set_network_header(skb, exthdrlen); |
5773 | + data += fragheaderlen; |
5774 | skb->transport_header = (skb->network_header + |
5775 | fragheaderlen); |
5776 | if (fraggap) { |
5777 | diff --git a/net/ipv6/route.c b/net/ipv6/route.c |
5778 | index b582a0a..059b9d9 100644 |
5779 | --- a/net/ipv6/route.c |
5780 | +++ b/net/ipv6/route.c |
5781 | @@ -2446,8 +2446,12 @@ static int rt6_fill_node(struct net *net, |
5782 | |
5783 | rcu_read_lock(); |
5784 | n = dst_get_neighbour(&rt->dst); |
5785 | - if (n) |
5786 | - NLA_PUT(skb, RTA_GATEWAY, 16, &n->primary_key); |
5787 | + if (n) { |
5788 | + if (nla_put(skb, RTA_GATEWAY, 16, &n->primary_key) < 0) { |
5789 | + rcu_read_unlock(); |
5790 | + goto nla_put_failure; |
5791 | + } |
5792 | + } |
5793 | rcu_read_unlock(); |
5794 | |
5795 | if (rt->dst.dev) |
5796 | diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c |
5797 | index 8a90d75..b1bd16f 100644 |
5798 | --- a/net/l2tp/l2tp_ppp.c |
5799 | +++ b/net/l2tp/l2tp_ppp.c |
5800 | @@ -915,7 +915,7 @@ static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr, |
5801 | goto end_put_sess; |
5802 | } |
5803 | |
5804 | - inet = inet_sk(sk); |
5805 | + inet = inet_sk(tunnel->sock); |
5806 | if (tunnel->version == 2) { |
5807 | struct sockaddr_pppol2tp sp; |
5808 | len = sizeof(sp); |
5809 | diff --git a/net/rds/send.c b/net/rds/send.c |
5810 | index e2d63c5..96531d4 100644 |
5811 | --- a/net/rds/send.c |
5812 | +++ b/net/rds/send.c |
5813 | @@ -935,7 +935,6 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, |
5814 | /* Mirror Linux UDP mirror of BSD error message compatibility */ |
5815 | /* XXX: Perhaps MSG_MORE someday */ |
5816 | if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) { |
5817 | - printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags); |
5818 | ret = -EOPNOTSUPP; |
5819 | goto out; |
5820 | } |
5821 | diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c |
5822 | index 72ad836..4530a91 100644 |
5823 | --- a/net/sunrpc/cache.c |
5824 | +++ b/net/sunrpc/cache.c |
5825 | @@ -828,6 +828,8 @@ static ssize_t cache_do_downcall(char *kaddr, const char __user *buf, |
5826 | { |
5827 | ssize_t ret; |
5828 | |
5829 | + if (count == 0) |
5830 | + return -EINVAL; |
5831 | if (copy_from_user(kaddr, buf, count)) |
5832 | return -EFAULT; |
5833 | kaddr[count] = '\0'; |
5834 | diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c |
5835 | index 00a1a2a..4e2b3b4 100644 |
5836 | --- a/net/sunrpc/sched.c |
5837 | +++ b/net/sunrpc/sched.c |
5838 | @@ -500,14 +500,18 @@ EXPORT_SYMBOL_GPL(rpc_wake_up_next); |
5839 | */ |
5840 | void rpc_wake_up(struct rpc_wait_queue *queue) |
5841 | { |
5842 | - struct rpc_task *task, *next; |
5843 | struct list_head *head; |
5844 | |
5845 | spin_lock_bh(&queue->lock); |
5846 | head = &queue->tasks[queue->maxpriority]; |
5847 | for (;;) { |
5848 | - list_for_each_entry_safe(task, next, head, u.tk_wait.list) |
5849 | + while (!list_empty(head)) { |
5850 | + struct rpc_task *task; |
5851 | + task = list_first_entry(head, |
5852 | + struct rpc_task, |
5853 | + u.tk_wait.list); |
5854 | rpc_wake_up_task_queue_locked(queue, task); |
5855 | + } |
5856 | if (head == &queue->tasks[0]) |
5857 | break; |
5858 | head--; |
5859 | @@ -525,13 +529,16 @@ EXPORT_SYMBOL_GPL(rpc_wake_up); |
5860 | */ |
5861 | void rpc_wake_up_status(struct rpc_wait_queue *queue, int status) |
5862 | { |
5863 | - struct rpc_task *task, *next; |
5864 | struct list_head *head; |
5865 | |
5866 | spin_lock_bh(&queue->lock); |
5867 | head = &queue->tasks[queue->maxpriority]; |
5868 | for (;;) { |
5869 | - list_for_each_entry_safe(task, next, head, u.tk_wait.list) { |
5870 | + while (!list_empty(head)) { |
5871 | + struct rpc_task *task; |
5872 | + task = list_first_entry(head, |
5873 | + struct rpc_task, |
5874 | + u.tk_wait.list); |
5875 | task->tk_status = status; |
5876 | rpc_wake_up_task_queue_locked(queue, task); |
5877 | } |
5878 | diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c |
5879 | index 39e02c5..2f6d11d 100644 |
5880 | --- a/net/xfrm/xfrm_replay.c |
5881 | +++ b/net/xfrm/xfrm_replay.c |
5882 | @@ -167,7 +167,7 @@ static void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq) |
5883 | } |
5884 | |
5885 | if (xfrm_aevent_is_on(xs_net(x))) |
5886 | - xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); |
5887 | + x->repl->notify(x, XFRM_REPLAY_UPDATE); |
5888 | } |
5889 | |
5890 | static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb) |
5891 | @@ -279,7 +279,7 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq) |
5892 | replay_esn->bmp[nr] |= (1U << bitnr); |
5893 | |
5894 | if (xfrm_aevent_is_on(xs_net(x))) |
5895 | - xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); |
5896 | + x->repl->notify(x, XFRM_REPLAY_UPDATE); |
5897 | } |
5898 | |
5899 | static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event) |
5900 | @@ -473,7 +473,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq) |
5901 | replay_esn->bmp[nr] |= (1U << bitnr); |
5902 | |
5903 | if (xfrm_aevent_is_on(xs_net(x))) |
5904 | - xfrm_replay_notify(x, XFRM_REPLAY_UPDATE); |
5905 | + x->repl->notify(x, XFRM_REPLAY_UPDATE); |
5906 | } |
5907 | |
5908 | static struct xfrm_replay xfrm_replay_legacy = { |
5909 | diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig |
5910 | index 4f554f2..063298a 100644 |
5911 | --- a/security/integrity/ima/Kconfig |
5912 | +++ b/security/integrity/ima/Kconfig |
5913 | @@ -9,7 +9,7 @@ config IMA |
5914 | select CRYPTO_HMAC |
5915 | select CRYPTO_MD5 |
5916 | select CRYPTO_SHA1 |
5917 | - select TCG_TPM if !S390 && !UML |
5918 | + select TCG_TPM if HAS_IOMEM && !UML |
5919 | select TCG_TIS if TCG_TPM |
5920 | help |
5921 | The Trusted Computing Group(TCG) runtime Integrity |
5922 | diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c |
5923 | index c1da422..b58b4b1 100644 |
5924 | --- a/sound/pci/hda/hda_eld.c |
5925 | +++ b/sound/pci/hda/hda_eld.c |
5926 | @@ -385,8 +385,8 @@ error: |
5927 | static void hdmi_print_pcm_rates(int pcm, char *buf, int buflen) |
5928 | { |
5929 | static unsigned int alsa_rates[] = { |
5930 | - 5512, 8000, 11025, 16000, 22050, 32000, 44100, 48000, 88200, |
5931 | - 96000, 176400, 192000, 384000 |
5932 | + 5512, 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000, |
5933 | + 88200, 96000, 176400, 192000, 384000 |
5934 | }; |
5935 | int i, j; |
5936 | |
5937 | diff --git a/sound/soc/fsl/p1022_ds.c b/sound/soc/fsl/p1022_ds.c |
5938 | index 2c064a9..075677c 100644 |
5939 | --- a/sound/soc/fsl/p1022_ds.c |
5940 | +++ b/sound/soc/fsl/p1022_ds.c |
5941 | @@ -392,7 +392,8 @@ static int p1022_ds_probe(struct platform_device *pdev) |
5942 | } |
5943 | |
5944 | if (strcasecmp(sprop, "i2s-slave") == 0) { |
5945 | - mdata->dai_format = SND_SOC_DAIFMT_I2S; |
5946 | + mdata->dai_format = SND_SOC_DAIFMT_NB_NF | |
5947 | + SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM; |
5948 | mdata->codec_clk_direction = SND_SOC_CLOCK_OUT; |
5949 | mdata->cpu_clk_direction = SND_SOC_CLOCK_IN; |
5950 | |
5951 | @@ -409,31 +410,38 @@ static int p1022_ds_probe(struct platform_device *pdev) |
5952 | } |
5953 | mdata->clk_frequency = be32_to_cpup(iprop); |
5954 | } else if (strcasecmp(sprop, "i2s-master") == 0) { |
5955 | - mdata->dai_format = SND_SOC_DAIFMT_I2S; |
5956 | + mdata->dai_format = SND_SOC_DAIFMT_NB_NF | |
5957 | + SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS; |
5958 | mdata->codec_clk_direction = SND_SOC_CLOCK_IN; |
5959 | mdata->cpu_clk_direction = SND_SOC_CLOCK_OUT; |
5960 | } else if (strcasecmp(sprop, "lj-slave") == 0) { |
5961 | - mdata->dai_format = SND_SOC_DAIFMT_LEFT_J; |
5962 | + mdata->dai_format = SND_SOC_DAIFMT_NB_NF | |
5963 | + SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBM_CFM; |
5964 | mdata->codec_clk_direction = SND_SOC_CLOCK_OUT; |
5965 | mdata->cpu_clk_direction = SND_SOC_CLOCK_IN; |
5966 | } else if (strcasecmp(sprop, "lj-master") == 0) { |
5967 | - mdata->dai_format = SND_SOC_DAIFMT_LEFT_J; |
5968 | + mdata->dai_format = SND_SOC_DAIFMT_NB_NF | |
5969 | + SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBS_CFS; |
5970 | mdata->codec_clk_direction = SND_SOC_CLOCK_IN; |
5971 | mdata->cpu_clk_direction = SND_SOC_CLOCK_OUT; |
5972 | } else if (strcasecmp(sprop, "rj-slave") == 0) { |
5973 | - mdata->dai_format = SND_SOC_DAIFMT_RIGHT_J; |
5974 | + mdata->dai_format = SND_SOC_DAIFMT_NB_NF | |
5975 | + SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBM_CFM; |
5976 | mdata->codec_clk_direction = SND_SOC_CLOCK_OUT; |
5977 | mdata->cpu_clk_direction = SND_SOC_CLOCK_IN; |
5978 | } else if (strcasecmp(sprop, "rj-master") == 0) { |
5979 | - mdata->dai_format = SND_SOC_DAIFMT_RIGHT_J; |
5980 | + mdata->dai_format = SND_SOC_DAIFMT_NB_NF | |
5981 | + SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBS_CFS; |
5982 | mdata->codec_clk_direction = SND_SOC_CLOCK_IN; |
5983 | mdata->cpu_clk_direction = SND_SOC_CLOCK_OUT; |
5984 | } else if (strcasecmp(sprop, "ac97-slave") == 0) { |
5985 | - mdata->dai_format = SND_SOC_DAIFMT_AC97; |
5986 | + mdata->dai_format = SND_SOC_DAIFMT_NB_NF | |
5987 | + SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBM_CFM; |
5988 | mdata->codec_clk_direction = SND_SOC_CLOCK_OUT; |
5989 | mdata->cpu_clk_direction = SND_SOC_CLOCK_IN; |
5990 | } else if (strcasecmp(sprop, "ac97-master") == 0) { |
5991 | - mdata->dai_format = SND_SOC_DAIFMT_AC97; |
5992 | + mdata->dai_format = SND_SOC_DAIFMT_NB_NF | |
5993 | + SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBS_CFS; |
5994 | mdata->codec_clk_direction = SND_SOC_CLOCK_IN; |
5995 | mdata->cpu_clk_direction = SND_SOC_CLOCK_OUT; |
5996 | } else { |
5997 | diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c |
5998 | index 8ad93ee..b583e60 100644 |
5999 | --- a/sound/soc/pxa/pxa-ssp.c |
6000 | +++ b/sound/soc/pxa/pxa-ssp.c |
6001 | @@ -668,6 +668,38 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream, |
6002 | return 0; |
6003 | } |
6004 | |
6005 | +static void pxa_ssp_set_running_bit(struct snd_pcm_substream *substream, |
6006 | + struct ssp_device *ssp, int value) |
6007 | +{ |
6008 | + uint32_t sscr0 = pxa_ssp_read_reg(ssp, SSCR0); |
6009 | + uint32_t sscr1 = pxa_ssp_read_reg(ssp, SSCR1); |
6010 | + uint32_t sspsp = pxa_ssp_read_reg(ssp, SSPSP); |
6011 | + uint32_t sssr = pxa_ssp_read_reg(ssp, SSSR); |
6012 | + |
6013 | + if (value && (sscr0 & SSCR0_SSE)) |
6014 | + pxa_ssp_write_reg(ssp, SSCR0, sscr0 & ~SSCR0_SSE); |
6015 | + |
6016 | + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { |
6017 | + if (value) |
6018 | + sscr1 |= SSCR1_TSRE; |
6019 | + else |
6020 | + sscr1 &= ~SSCR1_TSRE; |
6021 | + } else { |
6022 | + if (value) |
6023 | + sscr1 |= SSCR1_RSRE; |
6024 | + else |
6025 | + sscr1 &= ~SSCR1_RSRE; |
6026 | + } |
6027 | + |
6028 | + pxa_ssp_write_reg(ssp, SSCR1, sscr1); |
6029 | + |
6030 | + if (value) { |
6031 | + pxa_ssp_write_reg(ssp, SSSR, sssr); |
6032 | + pxa_ssp_write_reg(ssp, SSPSP, sspsp); |
6033 | + pxa_ssp_write_reg(ssp, SSCR0, sscr0 | SSCR0_SSE); |
6034 | + } |
6035 | +} |
6036 | + |
6037 | static int pxa_ssp_trigger(struct snd_pcm_substream *substream, int cmd, |
6038 | struct snd_soc_dai *cpu_dai) |
6039 | { |
6040 | @@ -681,42 +713,21 @@ static int pxa_ssp_trigger(struct snd_pcm_substream *substream, int cmd, |
6041 | pxa_ssp_enable(ssp); |
6042 | break; |
6043 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: |
6044 | - val = pxa_ssp_read_reg(ssp, SSCR1); |
6045 | - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
6046 | - val |= SSCR1_TSRE; |
6047 | - else |
6048 | - val |= SSCR1_RSRE; |
6049 | - pxa_ssp_write_reg(ssp, SSCR1, val); |
6050 | + pxa_ssp_set_running_bit(substream, ssp, 1); |
6051 | val = pxa_ssp_read_reg(ssp, SSSR); |
6052 | pxa_ssp_write_reg(ssp, SSSR, val); |
6053 | break; |
6054 | case SNDRV_PCM_TRIGGER_START: |
6055 | - val = pxa_ssp_read_reg(ssp, SSCR1); |
6056 | - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
6057 | - val |= SSCR1_TSRE; |
6058 | - else |
6059 | - val |= SSCR1_RSRE; |
6060 | - pxa_ssp_write_reg(ssp, SSCR1, val); |
6061 | - pxa_ssp_enable(ssp); |
6062 | + pxa_ssp_set_running_bit(substream, ssp, 1); |
6063 | break; |
6064 | case SNDRV_PCM_TRIGGER_STOP: |
6065 | - val = pxa_ssp_read_reg(ssp, SSCR1); |
6066 | - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
6067 | - val &= ~SSCR1_TSRE; |
6068 | - else |
6069 | - val &= ~SSCR1_RSRE; |
6070 | - pxa_ssp_write_reg(ssp, SSCR1, val); |
6071 | + pxa_ssp_set_running_bit(substream, ssp, 0); |
6072 | break; |
6073 | case SNDRV_PCM_TRIGGER_SUSPEND: |
6074 | pxa_ssp_disable(ssp); |
6075 | break; |
6076 | case SNDRV_PCM_TRIGGER_PAUSE_PUSH: |
6077 | - val = pxa_ssp_read_reg(ssp, SSCR1); |
6078 | - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) |
6079 | - val &= ~SSCR1_TSRE; |
6080 | - else |
6081 | - val &= ~SSCR1_RSRE; |
6082 | - pxa_ssp_write_reg(ssp, SSCR1, val); |
6083 | + pxa_ssp_set_running_bit(substream, ssp, 0); |
6084 | break; |
6085 | |
6086 | default: |