Contents of /trunk/kernel26-magellan/patches-2.6.35-r4/0103-2.6.35.4-all-fixes.patch
Parent Directory | Revision Log
Revision 1138 -
(show annotations)
(download)
Tue Sep 21 13:53:34 2010 UTC (14 years ago) by niro
File size: 150801 byte(s)
Tue Sep 21 13:53:34 2010 UTC (14 years ago) by niro
File size: 150801 byte(s)
-2.6.35-magellan-r4: updated to linux-2.6.35.5, removed CVE-2010-3301 hotfix, as it is provided by 2.6.35.5
1 | diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h |
2 | index 9dcb11e..bf62c44 100644 |
3 | --- a/arch/arm/include/asm/ptrace.h |
4 | +++ b/arch/arm/include/asm/ptrace.h |
5 | @@ -158,15 +158,24 @@ struct pt_regs { |
6 | */ |
7 | static inline int valid_user_regs(struct pt_regs *regs) |
8 | { |
9 | - if (user_mode(regs) && (regs->ARM_cpsr & PSR_I_BIT) == 0) { |
10 | - regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT); |
11 | - return 1; |
12 | + unsigned long mode = regs->ARM_cpsr & MODE_MASK; |
13 | + |
14 | + /* |
15 | + * Always clear the F (FIQ) and A (delayed abort) bits |
16 | + */ |
17 | + regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT); |
18 | + |
19 | + if ((regs->ARM_cpsr & PSR_I_BIT) == 0) { |
20 | + if (mode == USR_MODE) |
21 | + return 1; |
22 | + if (elf_hwcap & HWCAP_26BIT && mode == USR26_MODE) |
23 | + return 1; |
24 | } |
25 | |
26 | /* |
27 | * Force CPSR to something logical... |
28 | */ |
29 | - regs->ARM_cpsr &= PSR_f | PSR_s | (PSR_x & ~PSR_A_BIT) | PSR_T_BIT | MODE32_BIT; |
30 | + regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT; |
31 | if (!(elf_hwcap & HWCAP_26BIT)) |
32 | regs->ARM_cpsr |= USR_MODE; |
33 | |
34 | diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c |
35 | index 827cbc4..ea9ee4e 100644 |
36 | --- a/arch/arm/mach-ixp4xx/ixdp425-setup.c |
37 | +++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c |
38 | @@ -100,6 +100,7 @@ ixdp425_flash_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) |
39 | |
40 | static struct platform_nand_data ixdp425_flash_nand_data = { |
41 | .chip = { |
42 | + .nr_chips = 1, |
43 | .chip_delay = 30, |
44 | .options = NAND_NO_AUTOINCR, |
45 | #ifdef CONFIG_MTD_PARTITIONS |
46 | diff --git a/arch/arm/mach-mx3/mach-qong.c b/arch/arm/mach-mx3/mach-qong.c |
47 | index e5b5b83..1f9363f 100644 |
48 | --- a/arch/arm/mach-mx3/mach-qong.c |
49 | +++ b/arch/arm/mach-mx3/mach-qong.c |
50 | @@ -169,6 +169,7 @@ static void qong_nand_select_chip(struct mtd_info *mtd, int chip) |
51 | |
52 | static struct platform_nand_data qong_nand_data = { |
53 | .chip = { |
54 | + .nr_chips = 1, |
55 | .chip_delay = 20, |
56 | .options = 0, |
57 | }, |
58 | diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c |
59 | index 5041d1b..696b1a9 100644 |
60 | --- a/arch/arm/mach-orion5x/ts78xx-setup.c |
61 | +++ b/arch/arm/mach-orion5x/ts78xx-setup.c |
62 | @@ -216,6 +216,7 @@ static struct mtd_partition ts78xx_ts_nand_parts[] = { |
63 | |
64 | static struct platform_nand_data ts78xx_ts_nand_data = { |
65 | .chip = { |
66 | + .nr_chips = 1, |
67 | .part_probe_types = ts_nand_part_probes, |
68 | .partitions = ts78xx_ts_nand_parts, |
69 | .nr_partitions = ARRAY_SIZE(ts78xx_ts_nand_parts), |
70 | diff --git a/arch/blackfin/mach-bf537/boards/stamp.c b/arch/blackfin/mach-bf537/boards/stamp.c |
71 | index 9eaf5b0..68a27bc 100644 |
72 | --- a/arch/blackfin/mach-bf537/boards/stamp.c |
73 | +++ b/arch/blackfin/mach-bf537/boards/stamp.c |
74 | @@ -400,6 +400,7 @@ static int bfin_plat_nand_dev_ready(struct mtd_info *mtd) |
75 | |
76 | static struct platform_nand_data bfin_plat_nand_data = { |
77 | .chip = { |
78 | + .nr_chips = 1, |
79 | .chip_delay = 30, |
80 | #ifdef CONFIG_MTD_PARTITIONS |
81 | .part_probe_types = part_probes, |
82 | diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c |
83 | index bfcfa86..35b6d12 100644 |
84 | --- a/arch/blackfin/mach-bf561/boards/acvilon.c |
85 | +++ b/arch/blackfin/mach-bf561/boards/acvilon.c |
86 | @@ -284,6 +284,7 @@ static int bfin_plat_nand_dev_ready(struct mtd_info *mtd) |
87 | |
88 | static struct platform_nand_data bfin_plat_nand_data = { |
89 | .chip = { |
90 | + .nr_chips = 1, |
91 | .chip_delay = 30, |
92 | #ifdef CONFIG_MTD_PARTITIONS |
93 | .part_probe_types = part_probes, |
94 | diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile |
95 | index 5d2f17d..b2e3635 100644 |
96 | --- a/arch/powerpc/Makefile |
97 | +++ b/arch/powerpc/Makefile |
98 | @@ -164,7 +164,7 @@ drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/ |
99 | all: zImage |
100 | |
101 | # With make 3.82 we cannot mix normal and wildcard targets |
102 | -BOOT_TARGETS1 := zImage zImage.initrd uImaged |
103 | +BOOT_TARGETS1 := zImage zImage.initrd uImage |
104 | BOOT_TARGETS2 := zImage% dtbImage% treeImage.% cuImage.% simpleImage.% |
105 | |
106 | PHONY += $(BOOT_TARGETS1) $(BOOT_TARGETS2) |
107 | diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h |
108 | index 2050ca0..bdb2ff8 100644 |
109 | --- a/arch/sparc/include/asm/atomic_64.h |
110 | +++ b/arch/sparc/include/asm/atomic_64.h |
111 | @@ -20,14 +20,14 @@ |
112 | #define atomic64_set(v, i) (((v)->counter) = i) |
113 | |
114 | extern void atomic_add(int, atomic_t *); |
115 | -extern void atomic64_add(int, atomic64_t *); |
116 | +extern void atomic64_add(long, atomic64_t *); |
117 | extern void atomic_sub(int, atomic_t *); |
118 | -extern void atomic64_sub(int, atomic64_t *); |
119 | +extern void atomic64_sub(long, atomic64_t *); |
120 | |
121 | extern int atomic_add_ret(int, atomic_t *); |
122 | -extern int atomic64_add_ret(int, atomic64_t *); |
123 | +extern long atomic64_add_ret(long, atomic64_t *); |
124 | extern int atomic_sub_ret(int, atomic_t *); |
125 | -extern int atomic64_sub_ret(int, atomic64_t *); |
126 | +extern long atomic64_sub_ret(long, atomic64_t *); |
127 | |
128 | #define atomic_dec_return(v) atomic_sub_ret(1, v) |
129 | #define atomic64_dec_return(v) atomic64_sub_ret(1, v) |
130 | @@ -91,7 +91,7 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) |
131 | ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) |
132 | #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) |
133 | |
134 | -static inline int atomic64_add_unless(atomic64_t *v, long a, long u) |
135 | +static inline long atomic64_add_unless(atomic64_t *v, long a, long u) |
136 | { |
137 | long c, old; |
138 | c = atomic64_read(v); |
139 | diff --git a/arch/sparc/include/asm/fb.h b/arch/sparc/include/asm/fb.h |
140 | index e834880..2173432 100644 |
141 | --- a/arch/sparc/include/asm/fb.h |
142 | +++ b/arch/sparc/include/asm/fb.h |
143 | @@ -1,5 +1,6 @@ |
144 | #ifndef _SPARC_FB_H_ |
145 | #define _SPARC_FB_H_ |
146 | +#include <linux/console.h> |
147 | #include <linux/fb.h> |
148 | #include <linux/fs.h> |
149 | #include <asm/page.h> |
150 | @@ -18,6 +19,9 @@ static inline int fb_is_primary_device(struct fb_info *info) |
151 | struct device *dev = info->device; |
152 | struct device_node *node; |
153 | |
154 | + if (console_set_on_cmdline) |
155 | + return 0; |
156 | + |
157 | node = dev->of_node; |
158 | if (node && |
159 | node == of_console_device) |
160 | diff --git a/arch/sparc/include/asm/parport.h b/arch/sparc/include/asm/parport.h |
161 | index c333b8d..d21ad50 100644 |
162 | --- a/arch/sparc/include/asm/parport.h |
163 | +++ b/arch/sparc/include/asm/parport.h |
164 | @@ -228,6 +228,10 @@ static const struct of_device_id ecpp_match[] = { |
165 | .name = "parallel", |
166 | .compatible = "ns87317-ecpp", |
167 | }, |
168 | + { |
169 | + .name = "parallel", |
170 | + .compatible = "pnpALI,1533,3", |
171 | + }, |
172 | {}, |
173 | }; |
174 | |
175 | diff --git a/arch/sparc/include/asm/rwsem-const.h b/arch/sparc/include/asm/rwsem-const.h |
176 | index a303c9d..e4c61a1 100644 |
177 | --- a/arch/sparc/include/asm/rwsem-const.h |
178 | +++ b/arch/sparc/include/asm/rwsem-const.h |
179 | @@ -5,7 +5,7 @@ |
180 | #define RWSEM_UNLOCKED_VALUE 0x00000000 |
181 | #define RWSEM_ACTIVE_BIAS 0x00000001 |
182 | #define RWSEM_ACTIVE_MASK 0x0000ffff |
183 | -#define RWSEM_WAITING_BIAS 0xffff0000 |
184 | +#define RWSEM_WAITING_BIAS (-0x00010000) |
185 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
186 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
187 | |
188 | diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig |
189 | index dcb0593..f942bb7 100644 |
190 | --- a/arch/x86/Kconfig |
191 | +++ b/arch/x86/Kconfig |
192 | @@ -247,6 +247,11 @@ config ARCH_HWEIGHT_CFLAGS |
193 | |
194 | config KTIME_SCALAR |
195 | def_bool X86_32 |
196 | + |
197 | +config ARCH_CPU_PROBE_RELEASE |
198 | + def_bool y |
199 | + depends on HOTPLUG_CPU |
200 | + |
201 | source "init/Kconfig" |
202 | source "kernel/Kconfig.freezer" |
203 | |
204 | diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h |
205 | index c1cf59d..20955ea 100644 |
206 | --- a/arch/x86/include/asm/cmpxchg_32.h |
207 | +++ b/arch/x86/include/asm/cmpxchg_32.h |
208 | @@ -53,60 +53,33 @@ struct __xchg_dummy { |
209 | __xchg((v), (ptr), sizeof(*ptr)) |
210 | |
211 | /* |
212 | - * The semantics of XCHGCMP8B are a bit strange, this is why |
213 | - * there is a loop and the loading of %%eax and %%edx has to |
214 | - * be inside. This inlines well in most cases, the cached |
215 | - * cost is around ~38 cycles. (in the future we might want |
216 | - * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that |
217 | - * might have an implicit FPU-save as a cost, so it's not |
218 | - * clear which path to go.) |
219 | + * CMPXCHG8B only writes to the target if we had the previous |
220 | + * value in registers, otherwise it acts as a read and gives us the |
221 | + * "new previous" value. That is why there is a loop. Preloading |
222 | + * EDX:EAX is a performance optimization: in the common case it means |
223 | + * we need only one locked operation. |
224 | * |
225 | - * cmpxchg8b must be used with the lock prefix here to allow |
226 | - * the instruction to be executed atomically, see page 3-102 |
227 | - * of the instruction set reference 24319102.pdf. We need |
228 | - * the reader side to see the coherent 64bit value. |
229 | + * A SIMD/3DNOW!/MMX/FPU 64-bit store here would require at the very |
230 | + * least an FPU save and/or %cr0.ts manipulation. |
231 | + * |
232 | + * cmpxchg8b must be used with the lock prefix here to allow the |
233 | + * instruction to be executed atomically. We need to have the reader |
234 | + * side to see the coherent 64bit value. |
235 | */ |
236 | -static inline void __set_64bit(unsigned long long *ptr, |
237 | - unsigned int low, unsigned int high) |
238 | +static inline void set_64bit(volatile u64 *ptr, u64 value) |
239 | { |
240 | + u32 low = value; |
241 | + u32 high = value >> 32; |
242 | + u64 prev = *ptr; |
243 | + |
244 | asm volatile("\n1:\t" |
245 | - "movl (%1), %%eax\n\t" |
246 | - "movl 4(%1), %%edx\n\t" |
247 | - LOCK_PREFIX "cmpxchg8b (%1)\n\t" |
248 | + LOCK_PREFIX "cmpxchg8b %0\n\t" |
249 | "jnz 1b" |
250 | - : "=m" (*ptr) |
251 | - : "D" (ptr), |
252 | - "b" (low), |
253 | - "c" (high) |
254 | - : "ax", "dx", "memory"); |
255 | -} |
256 | - |
257 | -static inline void __set_64bit_constant(unsigned long long *ptr, |
258 | - unsigned long long value) |
259 | -{ |
260 | - __set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32)); |
261 | -} |
262 | - |
263 | -#define ll_low(x) *(((unsigned int *)&(x)) + 0) |
264 | -#define ll_high(x) *(((unsigned int *)&(x)) + 1) |
265 | - |
266 | -static inline void __set_64bit_var(unsigned long long *ptr, |
267 | - unsigned long long value) |
268 | -{ |
269 | - __set_64bit(ptr, ll_low(value), ll_high(value)); |
270 | + : "=m" (*ptr), "+A" (prev) |
271 | + : "b" (low), "c" (high) |
272 | + : "memory"); |
273 | } |
274 | |
275 | -#define set_64bit(ptr, value) \ |
276 | - (__builtin_constant_p((value)) \ |
277 | - ? __set_64bit_constant((ptr), (value)) \ |
278 | - : __set_64bit_var((ptr), (value))) |
279 | - |
280 | -#define _set_64bit(ptr, value) \ |
281 | - (__builtin_constant_p(value) \ |
282 | - ? __set_64bit(ptr, (unsigned int)(value), \ |
283 | - (unsigned int)((value) >> 32)) \ |
284 | - : __set_64bit(ptr, ll_low((value)), ll_high((value)))) |
285 | - |
286 | extern void __cmpxchg_wrong_size(void); |
287 | |
288 | /* |
289 | diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h |
290 | index b92f147..9596e7c 100644 |
291 | --- a/arch/x86/include/asm/cmpxchg_64.h |
292 | +++ b/arch/x86/include/asm/cmpxchg_64.h |
293 | @@ -5,13 +5,11 @@ |
294 | |
295 | #define __xg(x) ((volatile long *)(x)) |
296 | |
297 | -static inline void set_64bit(volatile unsigned long *ptr, unsigned long val) |
298 | +static inline void set_64bit(volatile u64 *ptr, u64 val) |
299 | { |
300 | *ptr = val; |
301 | } |
302 | |
303 | -#define _set_64bit set_64bit |
304 | - |
305 | extern void __xchg_wrong_size(void); |
306 | extern void __cmpxchg_wrong_size(void); |
307 | |
308 | diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c |
309 | index a96489e..c07e513 100644 |
310 | --- a/arch/x86/kernel/apic/apic.c |
311 | +++ b/arch/x86/kernel/apic/apic.c |
312 | @@ -1606,7 +1606,7 @@ void __init init_apic_mappings(void) |
313 | * acpi lapic path already maps that address in |
314 | * acpi_register_lapic_address() |
315 | */ |
316 | - if (!acpi_lapic) |
317 | + if (!acpi_lapic && !smp_found_config) |
318 | set_fixmap_nocache(FIX_APIC_BASE, apic_phys); |
319 | |
320 | apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n", |
321 | diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c |
322 | index e41ed24..2b18af1 100644 |
323 | --- a/arch/x86/kernel/apic/io_apic.c |
324 | +++ b/arch/x86/kernel/apic/io_apic.c |
325 | @@ -1728,6 +1728,8 @@ __apicdebuginit(void) print_IO_APIC(void) |
326 | struct irq_pin_list *entry; |
327 | |
328 | cfg = desc->chip_data; |
329 | + if (!cfg) |
330 | + continue; |
331 | entry = cfg->irq_2_pin; |
332 | if (!entry) |
333 | continue; |
334 | diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c |
335 | index 214ac86..d8d86d0 100644 |
336 | --- a/arch/x86/kernel/cpu/perf_event_intel.c |
337 | +++ b/arch/x86/kernel/cpu/perf_event_intel.c |
338 | @@ -491,33 +491,78 @@ static void intel_pmu_enable_all(int added) |
339 | * Intel Errata AAP53 (model 30) |
340 | * Intel Errata BD53 (model 44) |
341 | * |
342 | - * These chips need to be 'reset' when adding counters by programming |
343 | - * the magic three (non counting) events 0x4300D2, 0x4300B1 and 0x4300B5 |
344 | - * either in sequence on the same PMC or on different PMCs. |
345 | + * The official story: |
346 | + * These chips need to be 'reset' when adding counters by programming the |
347 | + * magic three (non-counting) events 0x4300B5, 0x4300D2, and 0x4300B1 either |
348 | + * in sequence on the same PMC or on different PMCs. |
349 | + * |
350 | + * In practise it appears some of these events do in fact count, and |
351 | + * we need to programm all 4 events. |
352 | */ |
353 | -static void intel_pmu_nhm_enable_all(int added) |
354 | +static void intel_pmu_nhm_workaround(void) |
355 | { |
356 | - if (added) { |
357 | - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
358 | - int i; |
359 | + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
360 | + static const unsigned long nhm_magic[4] = { |
361 | + 0x4300B5, |
362 | + 0x4300D2, |
363 | + 0x4300B1, |
364 | + 0x4300B1 |
365 | + }; |
366 | + struct perf_event *event; |
367 | + int i; |
368 | + |
369 | + /* |
370 | + * The Errata requires below steps: |
371 | + * 1) Clear MSR_IA32_PEBS_ENABLE and MSR_CORE_PERF_GLOBAL_CTRL; |
372 | + * 2) Configure 4 PERFEVTSELx with the magic events and clear |
373 | + * the corresponding PMCx; |
374 | + * 3) set bit0~bit3 of MSR_CORE_PERF_GLOBAL_CTRL; |
375 | + * 4) Clear MSR_CORE_PERF_GLOBAL_CTRL; |
376 | + * 5) Clear 4 pairs of ERFEVTSELx and PMCx; |
377 | + */ |
378 | + |
379 | + /* |
380 | + * The real steps we choose are a little different from above. |
381 | + * A) To reduce MSR operations, we don't run step 1) as they |
382 | + * are already cleared before this function is called; |
383 | + * B) Call x86_perf_event_update to save PMCx before configuring |
384 | + * PERFEVTSELx with magic number; |
385 | + * C) With step 5), we do clear only when the PERFEVTSELx is |
386 | + * not used currently. |
387 | + * D) Call x86_perf_event_set_period to restore PMCx; |
388 | + */ |
389 | |
390 | - wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 0, 0x4300D2); |
391 | - wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x4300B1); |
392 | - wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x4300B5); |
393 | + /* We always operate 4 pairs of PERF Counters */ |
394 | + for (i = 0; i < 4; i++) { |
395 | + event = cpuc->events[i]; |
396 | + if (event) |
397 | + x86_perf_event_update(event); |
398 | + } |
399 | |
400 | - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x3); |
401 | - wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); |
402 | + for (i = 0; i < 4; i++) { |
403 | + wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, nhm_magic[i]); |
404 | + wrmsrl(MSR_ARCH_PERFMON_PERFCTR0 + i, 0x0); |
405 | + } |
406 | |
407 | - for (i = 0; i < 3; i++) { |
408 | - struct perf_event *event = cpuc->events[i]; |
409 | + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0xf); |
410 | + wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0); |
411 | |
412 | - if (!event) |
413 | - continue; |
414 | + for (i = 0; i < 4; i++) { |
415 | + event = cpuc->events[i]; |
416 | |
417 | + if (event) { |
418 | + x86_perf_event_set_period(event); |
419 | __x86_pmu_enable_event(&event->hw, |
420 | - ARCH_PERFMON_EVENTSEL_ENABLE); |
421 | - } |
422 | + ARCH_PERFMON_EVENTSEL_ENABLE); |
423 | + } else |
424 | + wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + i, 0x0); |
425 | } |
426 | +} |
427 | + |
428 | +static void intel_pmu_nhm_enable_all(int added) |
429 | +{ |
430 | + if (added) |
431 | + intel_pmu_nhm_workaround(); |
432 | intel_pmu_enable_all(added); |
433 | } |
434 | |
435 | diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c |
436 | index ae85d69..0ffe19e 100644 |
437 | --- a/arch/x86/kernel/cpu/perf_event_p4.c |
438 | +++ b/arch/x86/kernel/cpu/perf_event_p4.c |
439 | @@ -581,6 +581,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) |
440 | cpuc = &__get_cpu_var(cpu_hw_events); |
441 | |
442 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
443 | + int overflow; |
444 | |
445 | if (!test_bit(idx, cpuc->active_mask)) |
446 | continue; |
447 | @@ -591,12 +592,14 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) |
448 | WARN_ON_ONCE(hwc->idx != idx); |
449 | |
450 | /* it might be unflagged overflow */ |
451 | - handled = p4_pmu_clear_cccr_ovf(hwc); |
452 | + overflow = p4_pmu_clear_cccr_ovf(hwc); |
453 | |
454 | val = x86_perf_event_update(event); |
455 | - if (!handled && (val & (1ULL << (x86_pmu.cntval_bits - 1)))) |
456 | + if (!overflow && (val & (1ULL << (x86_pmu.cntval_bits - 1)))) |
457 | continue; |
458 | |
459 | + handled += overflow; |
460 | + |
461 | /* event overflow for sure */ |
462 | data.period = event->hw.last_period; |
463 | |
464 | @@ -612,7 +615,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs) |
465 | inc_irq_stat(apic_perf_irqs); |
466 | } |
467 | |
468 | - return handled; |
469 | + return handled > 0; |
470 | } |
471 | |
472 | /* |
473 | diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c |
474 | index d86dbf7..d7b6f7f 100644 |
475 | --- a/arch/x86/kernel/mpparse.c |
476 | +++ b/arch/x86/kernel/mpparse.c |
477 | @@ -274,6 +274,18 @@ static void __init smp_dump_mptable(struct mpc_table *mpc, unsigned char *mpt) |
478 | |
479 | void __init default_smp_read_mpc_oem(struct mpc_table *mpc) { } |
480 | |
481 | +static void __init smp_register_lapic_address(unsigned long address) |
482 | +{ |
483 | + mp_lapic_addr = address; |
484 | + |
485 | + set_fixmap_nocache(FIX_APIC_BASE, address); |
486 | + if (boot_cpu_physical_apicid == -1U) { |
487 | + boot_cpu_physical_apicid = read_apic_id(); |
488 | + apic_version[boot_cpu_physical_apicid] = |
489 | + GET_APIC_VERSION(apic_read(APIC_LVR)); |
490 | + } |
491 | +} |
492 | + |
493 | static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) |
494 | { |
495 | char str[16]; |
496 | @@ -295,6 +307,10 @@ static int __init smp_read_mpc(struct mpc_table *mpc, unsigned early) |
497 | if (early) |
498 | return 1; |
499 | |
500 | + /* Initialize the lapic mapping */ |
501 | + if (!acpi_lapic) |
502 | + smp_register_lapic_address(mpc->lapic); |
503 | + |
504 | if (mpc->oemptr) |
505 | x86_init.mpparse.smp_read_mpc_oem(mpc); |
506 | |
507 | diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c |
508 | index 11015fd..0bf2ece 100644 |
509 | --- a/arch/x86/kernel/smpboot.c |
510 | +++ b/arch/x86/kernel/smpboot.c |
511 | @@ -91,6 +91,25 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 }; |
512 | static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); |
513 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) |
514 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) |
515 | + |
516 | +/* |
517 | + * We need this for trampoline_base protection from concurrent accesses when |
518 | + * off- and onlining cores wildly. |
519 | + */ |
520 | +static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex); |
521 | + |
522 | +void cpu_hotplug_driver_lock() |
523 | +{ |
524 | + mutex_lock(&x86_cpu_hotplug_driver_mutex); |
525 | +} |
526 | + |
527 | +void cpu_hotplug_driver_unlock() |
528 | +{ |
529 | + mutex_unlock(&x86_cpu_hotplug_driver_mutex); |
530 | +} |
531 | + |
532 | +ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; } |
533 | +ssize_t arch_cpu_release(const char *buf, size_t count) { return -1; } |
534 | #else |
535 | static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; |
536 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) |
537 | diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S |
538 | index 4a5979a..78ee8e0 100644 |
539 | --- a/arch/x86/lib/atomic64_386_32.S |
540 | +++ b/arch/x86/lib/atomic64_386_32.S |
541 | @@ -25,150 +25,170 @@ |
542 | CFI_ADJUST_CFA_OFFSET -4 |
543 | .endm |
544 | |
545 | -.macro BEGIN func reg |
546 | -$v = \reg |
547 | - |
548 | -ENTRY(atomic64_\func\()_386) |
549 | - CFI_STARTPROC |
550 | - LOCK $v |
551 | - |
552 | -.macro RETURN |
553 | - UNLOCK $v |
554 | +#define BEGIN(op) \ |
555 | +.macro END; \ |
556 | + CFI_ENDPROC; \ |
557 | +ENDPROC(atomic64_##op##_386); \ |
558 | +.purgem END; \ |
559 | +.endm; \ |
560 | +ENTRY(atomic64_##op##_386); \ |
561 | + CFI_STARTPROC; \ |
562 | + LOCK v; |
563 | + |
564 | +#define RET \ |
565 | + UNLOCK v; \ |
566 | ret |
567 | -.endm |
568 | - |
569 | -.macro END_ |
570 | - CFI_ENDPROC |
571 | -ENDPROC(atomic64_\func\()_386) |
572 | -.purgem RETURN |
573 | -.purgem END_ |
574 | -.purgem END |
575 | -.endm |
576 | - |
577 | -.macro END |
578 | -RETURN |
579 | -END_ |
580 | -.endm |
581 | -.endm |
582 | - |
583 | -BEGIN read %ecx |
584 | - movl ($v), %eax |
585 | - movl 4($v), %edx |
586 | -END |
587 | - |
588 | -BEGIN set %esi |
589 | - movl %ebx, ($v) |
590 | - movl %ecx, 4($v) |
591 | -END |
592 | - |
593 | -BEGIN xchg %esi |
594 | - movl ($v), %eax |
595 | - movl 4($v), %edx |
596 | - movl %ebx, ($v) |
597 | - movl %ecx, 4($v) |
598 | -END |
599 | - |
600 | -BEGIN add %ecx |
601 | - addl %eax, ($v) |
602 | - adcl %edx, 4($v) |
603 | -END |
604 | |
605 | -BEGIN add_return %ecx |
606 | - addl ($v), %eax |
607 | - adcl 4($v), %edx |
608 | - movl %eax, ($v) |
609 | - movl %edx, 4($v) |
610 | -END |
611 | - |
612 | -BEGIN sub %ecx |
613 | - subl %eax, ($v) |
614 | - sbbl %edx, 4($v) |
615 | -END |
616 | - |
617 | -BEGIN sub_return %ecx |
618 | +#define RET_END \ |
619 | + RET; \ |
620 | + END |
621 | + |
622 | +#define v %ecx |
623 | +BEGIN(read) |
624 | + movl (v), %eax |
625 | + movl 4(v), %edx |
626 | +RET_END |
627 | +#undef v |
628 | + |
629 | +#define v %esi |
630 | +BEGIN(set) |
631 | + movl %ebx, (v) |
632 | + movl %ecx, 4(v) |
633 | +RET_END |
634 | +#undef v |
635 | + |
636 | +#define v %esi |
637 | +BEGIN(xchg) |
638 | + movl (v), %eax |
639 | + movl 4(v), %edx |
640 | + movl %ebx, (v) |
641 | + movl %ecx, 4(v) |
642 | +RET_END |
643 | +#undef v |
644 | + |
645 | +#define v %ecx |
646 | +BEGIN(add) |
647 | + addl %eax, (v) |
648 | + adcl %edx, 4(v) |
649 | +RET_END |
650 | +#undef v |
651 | + |
652 | +#define v %ecx |
653 | +BEGIN(add_return) |
654 | + addl (v), %eax |
655 | + adcl 4(v), %edx |
656 | + movl %eax, (v) |
657 | + movl %edx, 4(v) |
658 | +RET_END |
659 | +#undef v |
660 | + |
661 | +#define v %ecx |
662 | +BEGIN(sub) |
663 | + subl %eax, (v) |
664 | + sbbl %edx, 4(v) |
665 | +RET_END |
666 | +#undef v |
667 | + |
668 | +#define v %ecx |
669 | +BEGIN(sub_return) |
670 | negl %edx |
671 | negl %eax |
672 | sbbl $0, %edx |
673 | - addl ($v), %eax |
674 | - adcl 4($v), %edx |
675 | - movl %eax, ($v) |
676 | - movl %edx, 4($v) |
677 | -END |
678 | - |
679 | -BEGIN inc %esi |
680 | - addl $1, ($v) |
681 | - adcl $0, 4($v) |
682 | -END |
683 | - |
684 | -BEGIN inc_return %esi |
685 | - movl ($v), %eax |
686 | - movl 4($v), %edx |
687 | + addl (v), %eax |
688 | + adcl 4(v), %edx |
689 | + movl %eax, (v) |
690 | + movl %edx, 4(v) |
691 | +RET_END |
692 | +#undef v |
693 | + |
694 | +#define v %esi |
695 | +BEGIN(inc) |
696 | + addl $1, (v) |
697 | + adcl $0, 4(v) |
698 | +RET_END |
699 | +#undef v |
700 | + |
701 | +#define v %esi |
702 | +BEGIN(inc_return) |
703 | + movl (v), %eax |
704 | + movl 4(v), %edx |
705 | addl $1, %eax |
706 | adcl $0, %edx |
707 | - movl %eax, ($v) |
708 | - movl %edx, 4($v) |
709 | -END |
710 | - |
711 | -BEGIN dec %esi |
712 | - subl $1, ($v) |
713 | - sbbl $0, 4($v) |
714 | -END |
715 | - |
716 | -BEGIN dec_return %esi |
717 | - movl ($v), %eax |
718 | - movl 4($v), %edx |
719 | + movl %eax, (v) |
720 | + movl %edx, 4(v) |
721 | +RET_END |
722 | +#undef v |
723 | + |
724 | +#define v %esi |
725 | +BEGIN(dec) |
726 | + subl $1, (v) |
727 | + sbbl $0, 4(v) |
728 | +RET_END |
729 | +#undef v |
730 | + |
731 | +#define v %esi |
732 | +BEGIN(dec_return) |
733 | + movl (v), %eax |
734 | + movl 4(v), %edx |
735 | subl $1, %eax |
736 | sbbl $0, %edx |
737 | - movl %eax, ($v) |
738 | - movl %edx, 4($v) |
739 | -END |
740 | + movl %eax, (v) |
741 | + movl %edx, 4(v) |
742 | +RET_END |
743 | +#undef v |
744 | |
745 | -BEGIN add_unless %ecx |
746 | +#define v %ecx |
747 | +BEGIN(add_unless) |
748 | addl %eax, %esi |
749 | adcl %edx, %edi |
750 | - addl ($v), %eax |
751 | - adcl 4($v), %edx |
752 | + addl (v), %eax |
753 | + adcl 4(v), %edx |
754 | cmpl %eax, %esi |
755 | je 3f |
756 | 1: |
757 | - movl %eax, ($v) |
758 | - movl %edx, 4($v) |
759 | + movl %eax, (v) |
760 | + movl %edx, 4(v) |
761 | movl $1, %eax |
762 | 2: |
763 | -RETURN |
764 | + RET |
765 | 3: |
766 | cmpl %edx, %edi |
767 | jne 1b |
768 | xorl %eax, %eax |
769 | jmp 2b |
770 | -END_ |
771 | +END |
772 | +#undef v |
773 | |
774 | -BEGIN inc_not_zero %esi |
775 | - movl ($v), %eax |
776 | - movl 4($v), %edx |
777 | +#define v %esi |
778 | +BEGIN(inc_not_zero) |
779 | + movl (v), %eax |
780 | + movl 4(v), %edx |
781 | testl %eax, %eax |
782 | je 3f |
783 | 1: |
784 | addl $1, %eax |
785 | adcl $0, %edx |
786 | - movl %eax, ($v) |
787 | - movl %edx, 4($v) |
788 | + movl %eax, (v) |
789 | + movl %edx, 4(v) |
790 | movl $1, %eax |
791 | 2: |
792 | -RETURN |
793 | + RET |
794 | 3: |
795 | testl %edx, %edx |
796 | jne 1b |
797 | jmp 2b |
798 | -END_ |
799 | +END |
800 | +#undef v |
801 | |
802 | -BEGIN dec_if_positive %esi |
803 | - movl ($v), %eax |
804 | - movl 4($v), %edx |
805 | +#define v %esi |
806 | +BEGIN(dec_if_positive) |
807 | + movl (v), %eax |
808 | + movl 4(v), %edx |
809 | subl $1, %eax |
810 | sbbl $0, %edx |
811 | js 1f |
812 | - movl %eax, ($v) |
813 | - movl %edx, 4($v) |
814 | + movl %eax, (v) |
815 | + movl %edx, 4(v) |
816 | 1: |
817 | -END |
818 | +RET_END |
819 | +#undef v |
820 | diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c |
821 | index b28d2f1..f6b48f6 100644 |
822 | --- a/arch/x86/oprofile/nmi_int.c |
823 | +++ b/arch/x86/oprofile/nmi_int.c |
824 | @@ -634,6 +634,18 @@ static int __init ppro_init(char **cpu_type) |
825 | if (force_arch_perfmon && cpu_has_arch_perfmon) |
826 | return 0; |
827 | |
828 | + /* |
829 | + * Documentation on identifying Intel processors by CPU family |
830 | + * and model can be found in the Intel Software Developer's |
831 | + * Manuals (SDM): |
832 | + * |
833 | + * http://www.intel.com/products/processor/manuals/ |
834 | + * |
835 | + * As of May 2010 the documentation for this was in the: |
836 | + * "Intel 64 and IA-32 Architectures Software Developer's |
837 | + * Manual Volume 3B: System Programming Guide", "Table B-1 |
838 | + * CPUID Signature Values of DisplayFamily_DisplayModel". |
839 | + */ |
840 | switch (cpu_model) { |
841 | case 0 ... 2: |
842 | *cpu_type = "i386/ppro"; |
843 | @@ -655,12 +667,13 @@ static int __init ppro_init(char **cpu_type) |
844 | case 15: case 23: |
845 | *cpu_type = "i386/core_2"; |
846 | break; |
847 | + case 0x1a: |
848 | + case 0x1e: |
849 | case 0x2e: |
850 | - case 26: |
851 | spec = &op_arch_perfmon_spec; |
852 | *cpu_type = "i386/core_i7"; |
853 | break; |
854 | - case 28: |
855 | + case 0x1c: |
856 | *cpu_type = "i386/atom"; |
857 | break; |
858 | default: |
859 | diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c |
860 | index 864dd46..18645f4 100644 |
861 | --- a/drivers/acpi/apei/erst.c |
862 | +++ b/drivers/acpi/apei/erst.c |
863 | @@ -33,6 +33,7 @@ |
864 | #include <linux/uaccess.h> |
865 | #include <linux/cper.h> |
866 | #include <linux/nmi.h> |
867 | +#include <linux/hardirq.h> |
868 | #include <acpi/apei.h> |
869 | |
870 | #include "apei-internal.h" |
871 | diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c |
872 | index a754715..d84af6c 100644 |
873 | --- a/drivers/char/agp/intel-gtt.c |
874 | +++ b/drivers/char/agp/intel-gtt.c |
875 | @@ -25,6 +25,10 @@ |
876 | #define USE_PCI_DMA_API 1 |
877 | #endif |
878 | |
879 | +/* Max amount of stolen space, anything above will be returned to Linux */ |
880 | +int intel_max_stolen = 32 * 1024 * 1024; |
881 | +EXPORT_SYMBOL(intel_max_stolen); |
882 | + |
883 | static const struct aper_size_info_fixed intel_i810_sizes[] = |
884 | { |
885 | {64, 16384, 4}, |
886 | @@ -710,7 +714,12 @@ static void intel_i830_init_gtt_entries(void) |
887 | break; |
888 | } |
889 | } |
890 | - if (gtt_entries > 0) { |
891 | + if (!local && gtt_entries > intel_max_stolen) { |
892 | + dev_info(&agp_bridge->dev->dev, |
893 | + "detected %dK stolen memory, trimming to %dK\n", |
894 | + gtt_entries / KB(1), intel_max_stolen / KB(1)); |
895 | + gtt_entries = intel_max_stolen / KB(4); |
896 | + } else if (gtt_entries > 0) { |
897 | dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n", |
898 | gtt_entries / KB(1), local ? "local" : "stolen"); |
899 | gtt_entries /= KB(4); |
900 | diff --git a/drivers/char/mem.c b/drivers/char/mem.c |
901 | index f54dab8..a398ecd 100644 |
902 | --- a/drivers/char/mem.c |
903 | +++ b/drivers/char/mem.c |
904 | @@ -916,7 +916,7 @@ static int __init chr_dev_init(void) |
905 | NULL, devlist[minor].name); |
906 | } |
907 | |
908 | - return 0; |
909 | + return tty_init(); |
910 | } |
911 | |
912 | fs_initcall(chr_dev_init); |
913 | diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c |
914 | index d71f0fc..507441a 100644 |
915 | --- a/drivers/char/tty_io.c |
916 | +++ b/drivers/char/tty_io.c |
917 | @@ -3128,7 +3128,7 @@ static struct cdev tty_cdev, console_cdev; |
918 | * Ok, now we can initialize the rest of the tty devices and can count |
919 | * on memory allocations, interrupts etc.. |
920 | */ |
921 | -static int __init tty_init(void) |
922 | +int __init tty_init(void) |
923 | { |
924 | cdev_init(&tty_cdev, &tty_fops); |
925 | if (cdev_add(&tty_cdev, MKDEV(TTYAUX_MAJOR, 0), 1) || |
926 | @@ -3149,4 +3149,4 @@ static int __init tty_init(void) |
927 | #endif |
928 | return 0; |
929 | } |
930 | -module_init(tty_init); |
931 | + |
932 | diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c |
933 | index 4a66201..c9736ed 100644 |
934 | --- a/drivers/gpu/drm/drm_drv.c |
935 | +++ b/drivers/gpu/drm/drm_drv.c |
936 | @@ -502,7 +502,9 @@ long drm_ioctl(struct file *filp, |
937 | retcode = -EFAULT; |
938 | goto err_i1; |
939 | } |
940 | - } |
941 | + } else |
942 | + memset(kdata, 0, _IOC_SIZE(cmd)); |
943 | + |
944 | if (ioctl->flags & DRM_UNLOCKED) |
945 | retcode = func(dev, kdata, file_priv); |
946 | else { |
947 | diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c |
948 | index 2305a12..013a0ae 100644 |
949 | --- a/drivers/gpu/drm/i915/i915_dma.c |
950 | +++ b/drivers/gpu/drm/i915/i915_dma.c |
951 | @@ -40,6 +40,8 @@ |
952 | #include <linux/vga_switcheroo.h> |
953 | #include <linux/slab.h> |
954 | |
955 | +extern int intel_max_stolen; /* from AGP driver */ |
956 | + |
957 | /** |
958 | * Sets up the hardware status page for devices that need a physical address |
959 | * in the register. |
960 | @@ -2104,6 +2106,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) |
961 | if (ret) |
962 | goto out_iomapfree; |
963 | |
964 | + if (prealloc_size > intel_max_stolen) { |
965 | + DRM_INFO("detected %dM stolen memory, trimming to %dM\n", |
966 | + prealloc_size >> 20, intel_max_stolen >> 20); |
967 | + prealloc_size = intel_max_stolen; |
968 | + } |
969 | + |
970 | dev_priv->wq = create_singlethread_workqueue("i915"); |
971 | if (dev_priv->wq == NULL) { |
972 | DRM_ERROR("Failed to create our workqueue.\n"); |
973 | diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c |
974 | index 8a84306..e9a4b12 100644 |
975 | --- a/drivers/gpu/drm/i915/intel_display.c |
976 | +++ b/drivers/gpu/drm/i915/intel_display.c |
977 | @@ -1502,6 +1502,7 @@ static void ironlake_enable_pll_edp (struct drm_crtc *crtc) |
978 | dpa_ctl = I915_READ(DP_A); |
979 | dpa_ctl |= DP_PLL_ENABLE; |
980 | I915_WRITE(DP_A, dpa_ctl); |
981 | + POSTING_READ(DP_A); |
982 | udelay(200); |
983 | } |
984 | |
985 | @@ -4816,14 +4817,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, |
986 | work->pending_flip_obj = obj; |
987 | |
988 | if (intel_crtc->plane) |
989 | - flip_mask = I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; |
990 | + flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; |
991 | else |
992 | - flip_mask = I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT; |
993 | + flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; |
994 | |
995 | - /* Wait for any previous flip to finish */ |
996 | - if (IS_GEN3(dev)) |
997 | - while (I915_READ(ISR) & flip_mask) |
998 | - ; |
999 | + if (IS_GEN3(dev) || IS_GEN2(dev)) { |
1000 | + BEGIN_LP_RING(2); |
1001 | + OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); |
1002 | + OUT_RING(0); |
1003 | + ADVANCE_LP_RING(); |
1004 | + } |
1005 | |
1006 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ |
1007 | offset = obj_priv->gtt_offset; |
1008 | @@ -4837,12 +4840,18 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, |
1009 | OUT_RING(offset | obj_priv->tiling_mode); |
1010 | pipesrc = I915_READ(pipesrc_reg); |
1011 | OUT_RING(pipesrc & 0x0fff0fff); |
1012 | - } else { |
1013 | + } else if (IS_GEN3(dev)) { |
1014 | OUT_RING(MI_DISPLAY_FLIP_I915 | |
1015 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
1016 | OUT_RING(fb->pitch); |
1017 | OUT_RING(offset); |
1018 | OUT_RING(MI_NOOP); |
1019 | + } else { |
1020 | + OUT_RING(MI_DISPLAY_FLIP | |
1021 | + MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); |
1022 | + OUT_RING(fb->pitch); |
1023 | + OUT_RING(offset); |
1024 | + OUT_RING(MI_NOOP); |
1025 | } |
1026 | ADVANCE_LP_RING(); |
1027 | |
1028 | diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c |
1029 | index 10673ae..6bfef51 100644 |
1030 | --- a/drivers/gpu/drm/radeon/radeon_atombios.c |
1031 | +++ b/drivers/gpu/drm/radeon/radeon_atombios.c |
1032 | @@ -206,6 +206,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, |
1033 | uint16_t *line_mux, |
1034 | struct radeon_hpd *hpd) |
1035 | { |
1036 | + struct radeon_device *rdev = dev->dev_private; |
1037 | |
1038 | /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ |
1039 | if ((dev->pdev->device == 0x791e) && |
1040 | @@ -308,13 +309,22 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, |
1041 | } |
1042 | } |
1043 | |
1044 | - /* Acer laptop reports DVI-D as DVI-I */ |
1045 | + /* Acer laptop reports DVI-D as DVI-I and hpd pins reversed */ |
1046 | if ((dev->pdev->device == 0x95c4) && |
1047 | (dev->pdev->subsystem_vendor == 0x1025) && |
1048 | (dev->pdev->subsystem_device == 0x013c)) { |
1049 | + struct radeon_gpio_rec gpio; |
1050 | + |
1051 | if ((*connector_type == DRM_MODE_CONNECTOR_DVII) && |
1052 | - (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) |
1053 | + (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) { |
1054 | + gpio = radeon_lookup_gpio(rdev, 6); |
1055 | + *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio); |
1056 | *connector_type = DRM_MODE_CONNECTOR_DVID; |
1057 | + } else if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) && |
1058 | + (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) { |
1059 | + gpio = radeon_lookup_gpio(rdev, 7); |
1060 | + *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio); |
1061 | + } |
1062 | } |
1063 | |
1064 | /* XFX Pine Group device rv730 reports no VGA DDC lines |
1065 | @@ -1049,7 +1059,7 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev) |
1066 | } |
1067 | break; |
1068 | case 2: |
1069 | - if (igp_info->info_2.ucMemoryType & 0x0f) |
1070 | + if (igp_info->info_2.ulBootUpSidePortClock) |
1071 | return true; |
1072 | break; |
1073 | default: |
1074 | diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c |
1075 | index dd279da..a718463 100644 |
1076 | --- a/drivers/gpu/drm/radeon/radeon_device.c |
1077 | +++ b/drivers/gpu/drm/radeon/radeon_device.c |
1078 | @@ -199,7 +199,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 |
1079 | mc->mc_vram_size = mc->aper_size; |
1080 | } |
1081 | mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; |
1082 | - if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) { |
1083 | + if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) { |
1084 | dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n"); |
1085 | mc->real_vram_size = mc->aper_size; |
1086 | mc->mc_vram_size = mc->aper_size; |
1087 | diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c |
1088 | index 5def6f5..0cd2704 100644 |
1089 | --- a/drivers/gpu/drm/radeon/radeon_i2c.c |
1090 | +++ b/drivers/gpu/drm/radeon/radeon_i2c.c |
1091 | @@ -95,6 +95,13 @@ static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state) |
1092 | } |
1093 | } |
1094 | |
1095 | + /* switch the pads to ddc mode */ |
1096 | + if (ASIC_IS_DCE3(rdev) && rec->hw_capable) { |
1097 | + temp = RREG32(rec->mask_clk_reg); |
1098 | + temp &= ~(1 << 16); |
1099 | + WREG32(rec->mask_clk_reg, temp); |
1100 | + } |
1101 | + |
1102 | /* clear the output pin values */ |
1103 | temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask; |
1104 | WREG32(rec->a_clk_reg, temp); |
1105 | diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c |
1106 | index 059bfa4..a108c7e 100644 |
1107 | --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c |
1108 | +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c |
1109 | @@ -121,11 +121,12 @@ int radeon_irq_kms_init(struct radeon_device *rdev) |
1110 | * chips. Disable MSI on them for now. |
1111 | */ |
1112 | if ((rdev->family >= CHIP_RV380) && |
1113 | - (!(rdev->flags & RADEON_IS_IGP))) { |
1114 | + (!(rdev->flags & RADEON_IS_IGP)) && |
1115 | + (!(rdev->flags & RADEON_IS_AGP))) { |
1116 | int ret = pci_enable_msi(rdev->pdev); |
1117 | if (!ret) { |
1118 | rdev->msi_enabled = 1; |
1119 | - DRM_INFO("radeon: using MSI.\n"); |
1120 | + dev_info(rdev->dev, "radeon: using MSI.\n"); |
1121 | } |
1122 | } |
1123 | rdev->irq.installed = true; |
1124 | diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c |
1125 | index ab389f8..b20379e 100644 |
1126 | --- a/drivers/gpu/drm/radeon/radeon_kms.c |
1127 | +++ b/drivers/gpu/drm/radeon/radeon_kms.c |
1128 | @@ -106,7 +106,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
1129 | |
1130 | info = data; |
1131 | value_ptr = (uint32_t *)((unsigned long)info->value); |
1132 | - value = *value_ptr; |
1133 | + if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) |
1134 | + return -EFAULT; |
1135 | + |
1136 | switch (info->request) { |
1137 | case RADEON_INFO_DEVICE_ID: |
1138 | value = dev->pci_device; |
1139 | diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c |
1140 | index e1e5255..cf3a51f 100644 |
1141 | --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c |
1142 | +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c |
1143 | @@ -272,7 +272,7 @@ static uint8_t radeon_compute_pll_gain(uint16_t ref_freq, uint16_t ref_div, |
1144 | if (!ref_div) |
1145 | return 1; |
1146 | |
1147 | - vcoFreq = ((unsigned)ref_freq & fb_div) / ref_div; |
1148 | + vcoFreq = ((unsigned)ref_freq * fb_div) / ref_div; |
1149 | |
1150 | /* |
1151 | * This is horribly crude: the VCO frequency range is divided into |
1152 | diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c |
1153 | index 3fa6984..c91b741 100644 |
1154 | --- a/drivers/gpu/drm/radeon/radeon_pm.c |
1155 | +++ b/drivers/gpu/drm/radeon/radeon_pm.c |
1156 | @@ -224,6 +224,11 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev) |
1157 | { |
1158 | int i; |
1159 | |
1160 | + /* no need to take locks, etc. if nothing's going to change */ |
1161 | + if ((rdev->pm.requested_clock_mode_index == rdev->pm.current_clock_mode_index) && |
1162 | + (rdev->pm.requested_power_state_index == rdev->pm.current_power_state_index)) |
1163 | + return; |
1164 | + |
1165 | mutex_lock(&rdev->ddev->struct_mutex); |
1166 | mutex_lock(&rdev->vram_mutex); |
1167 | mutex_lock(&rdev->cp.mutex); |
1168 | diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c |
1169 | index 4a64b85..68e69a4 100644 |
1170 | --- a/drivers/hwmon/pc87360.c |
1171 | +++ b/drivers/hwmon/pc87360.c |
1172 | @@ -1610,11 +1610,8 @@ static struct pc87360_data *pc87360_update_device(struct device *dev) |
1173 | |
1174 | static int __init pc87360_device_add(unsigned short address) |
1175 | { |
1176 | - struct resource res = { |
1177 | - .name = "pc87360", |
1178 | - .flags = IORESOURCE_IO, |
1179 | - }; |
1180 | - int err, i; |
1181 | + struct resource res[3]; |
1182 | + int err, i, res_count; |
1183 | |
1184 | pdev = platform_device_alloc("pc87360", address); |
1185 | if (!pdev) { |
1186 | @@ -1623,22 +1620,28 @@ static int __init pc87360_device_add(unsigned short address) |
1187 | goto exit; |
1188 | } |
1189 | |
1190 | + memset(res, 0, 3 * sizeof(struct resource)); |
1191 | + res_count = 0; |
1192 | for (i = 0; i < 3; i++) { |
1193 | if (!extra_isa[i]) |
1194 | continue; |
1195 | - res.start = extra_isa[i]; |
1196 | - res.end = extra_isa[i] + PC87360_EXTENT - 1; |
1197 | + res[res_count].start = extra_isa[i]; |
1198 | + res[res_count].end = extra_isa[i] + PC87360_EXTENT - 1; |
1199 | + res[res_count].name = "pc87360", |
1200 | + res[res_count].flags = IORESOURCE_IO, |
1201 | |
1202 | - err = acpi_check_resource_conflict(&res); |
1203 | + err = acpi_check_resource_conflict(&res[res_count]); |
1204 | if (err) |
1205 | goto exit_device_put; |
1206 | |
1207 | - err = platform_device_add_resources(pdev, &res, 1); |
1208 | - if (err) { |
1209 | - printk(KERN_ERR "pc87360: Device resource[%d] " |
1210 | - "addition failed (%d)\n", i, err); |
1211 | - goto exit_device_put; |
1212 | - } |
1213 | + res_count++; |
1214 | + } |
1215 | + |
1216 | + err = platform_device_add_resources(pdev, res, res_count); |
1217 | + if (err) { |
1218 | + printk(KERN_ERR "pc87360: Device resources addition failed " |
1219 | + "(%d)\n", err); |
1220 | + goto exit_device_put; |
1221 | } |
1222 | |
1223 | err = platform_device_add(pdev); |
1224 | diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c |
1225 | index 6fbe899..05b15ed 100644 |
1226 | --- a/drivers/isdn/gigaset/capi.c |
1227 | +++ b/drivers/isdn/gigaset/capi.c |
1228 | @@ -378,13 +378,13 @@ void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *dskb) |
1229 | ++bcs->trans_up; |
1230 | |
1231 | if (!ap) { |
1232 | - dev_err(cs->dev, "%s: no application\n", __func__); |
1233 | + gig_dbg(DEBUG_MCMD, "%s: application gone", __func__); |
1234 | return; |
1235 | } |
1236 | |
1237 | /* don't send further B3 messages if disconnected */ |
1238 | if (bcs->apconnstate < APCONN_ACTIVE) { |
1239 | - gig_dbg(DEBUG_LLDATA, "disconnected, discarding ack"); |
1240 | + gig_dbg(DEBUG_MCMD, "%s: disconnected", __func__); |
1241 | return; |
1242 | } |
1243 | |
1244 | @@ -422,13 +422,14 @@ void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb) |
1245 | bcs->trans_down++; |
1246 | |
1247 | if (!ap) { |
1248 | - dev_err(cs->dev, "%s: no application\n", __func__); |
1249 | + gig_dbg(DEBUG_MCMD, "%s: application gone", __func__); |
1250 | + dev_kfree_skb_any(skb); |
1251 | return; |
1252 | } |
1253 | |
1254 | /* don't send further B3 messages if disconnected */ |
1255 | if (bcs->apconnstate < APCONN_ACTIVE) { |
1256 | - gig_dbg(DEBUG_LLDATA, "disconnected, discarding data"); |
1257 | + gig_dbg(DEBUG_MCMD, "%s: disconnected", __func__); |
1258 | dev_kfree_skb_any(skb); |
1259 | return; |
1260 | } |
1261 | @@ -747,7 +748,7 @@ void gigaset_isdn_connD(struct bc_state *bcs) |
1262 | ap = bcs->ap; |
1263 | if (!ap) { |
1264 | spin_unlock_irqrestore(&bcs->aplock, flags); |
1265 | - dev_err(cs->dev, "%s: no application\n", __func__); |
1266 | + gig_dbg(DEBUG_CMD, "%s: application gone", __func__); |
1267 | return; |
1268 | } |
1269 | if (bcs->apconnstate == APCONN_NONE) { |
1270 | @@ -843,7 +844,7 @@ void gigaset_isdn_connB(struct bc_state *bcs) |
1271 | ap = bcs->ap; |
1272 | if (!ap) { |
1273 | spin_unlock_irqrestore(&bcs->aplock, flags); |
1274 | - dev_err(cs->dev, "%s: no application\n", __func__); |
1275 | + gig_dbg(DEBUG_CMD, "%s: application gone", __func__); |
1276 | return; |
1277 | } |
1278 | if (!bcs->apconnstate) { |
1279 | @@ -901,13 +902,12 @@ void gigaset_isdn_connB(struct bc_state *bcs) |
1280 | */ |
1281 | void gigaset_isdn_hupB(struct bc_state *bcs) |
1282 | { |
1283 | - struct cardstate *cs = bcs->cs; |
1284 | struct gigaset_capi_appl *ap = bcs->ap; |
1285 | |
1286 | /* ToDo: assure order of DISCONNECT_B3_IND and DISCONNECT_IND ? */ |
1287 | |
1288 | if (!ap) { |
1289 | - dev_err(cs->dev, "%s: no application\n", __func__); |
1290 | + gig_dbg(DEBUG_CMD, "%s: application gone", __func__); |
1291 | return; |
1292 | } |
1293 | |
1294 | @@ -1044,6 +1044,7 @@ static inline void remove_appl_from_channel(struct bc_state *bcs, |
1295 | do { |
1296 | if (bcap->bcnext == ap) { |
1297 | bcap->bcnext = bcap->bcnext->bcnext; |
1298 | + spin_unlock_irqrestore(&bcs->aplock, flags); |
1299 | return; |
1300 | } |
1301 | bcap = bcap->bcnext; |
1302 | diff --git a/drivers/isdn/sc/ioctl.c b/drivers/isdn/sc/ioctl.c |
1303 | index 1081091..2655e3a 100644 |
1304 | --- a/drivers/isdn/sc/ioctl.c |
1305 | +++ b/drivers/isdn/sc/ioctl.c |
1306 | @@ -174,7 +174,7 @@ int sc_ioctl(int card, scs_ioctl *data) |
1307 | pr_debug("%s: SCIOGETSPID: ioctl received\n", |
1308 | sc_adapter[card]->devicename); |
1309 | |
1310 | - spid = kmalloc(SCIOC_SPIDSIZE, GFP_KERNEL); |
1311 | + spid = kzalloc(SCIOC_SPIDSIZE, GFP_KERNEL); |
1312 | if (!spid) { |
1313 | kfree(rcvmsg); |
1314 | return -ENOMEM; |
1315 | @@ -194,7 +194,7 @@ int sc_ioctl(int card, scs_ioctl *data) |
1316 | kfree(rcvmsg); |
1317 | return status; |
1318 | } |
1319 | - strcpy(spid, rcvmsg->msg_data.byte_array); |
1320 | + strlcpy(spid, rcvmsg->msg_data.byte_array, SCIOC_SPIDSIZE); |
1321 | |
1322 | /* |
1323 | * Package the switch type and send to user space |
1324 | @@ -272,12 +272,12 @@ int sc_ioctl(int card, scs_ioctl *data) |
1325 | return status; |
1326 | } |
1327 | |
1328 | - dn = kmalloc(SCIOC_DNSIZE, GFP_KERNEL); |
1329 | + dn = kzalloc(SCIOC_DNSIZE, GFP_KERNEL); |
1330 | if (!dn) { |
1331 | kfree(rcvmsg); |
1332 | return -ENOMEM; |
1333 | } |
1334 | - strcpy(dn, rcvmsg->msg_data.byte_array); |
1335 | + strlcpy(dn, rcvmsg->msg_data.byte_array, SCIOC_DNSIZE); |
1336 | kfree(rcvmsg); |
1337 | |
1338 | /* |
1339 | @@ -348,7 +348,7 @@ int sc_ioctl(int card, scs_ioctl *data) |
1340 | pr_debug("%s: SCIOSTAT: ioctl received\n", |
1341 | sc_adapter[card]->devicename); |
1342 | |
1343 | - bi = kmalloc (sizeof(boardInfo), GFP_KERNEL); |
1344 | + bi = kzalloc(sizeof(boardInfo), GFP_KERNEL); |
1345 | if (!bi) { |
1346 | kfree(rcvmsg); |
1347 | return -ENOMEM; |
1348 | diff --git a/drivers/md/dm-exception-store.c b/drivers/md/dm-exception-store.c |
1349 | index 2b7907b..0bdb201 100644 |
1350 | --- a/drivers/md/dm-exception-store.c |
1351 | +++ b/drivers/md/dm-exception-store.c |
1352 | @@ -173,7 +173,9 @@ int dm_exception_store_set_chunk_size(struct dm_exception_store *store, |
1353 | |
1354 | /* Validate the chunk size against the device block size */ |
1355 | if (chunk_size % |
1356 | - (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9)) { |
1357 | + (bdev_logical_block_size(dm_snap_cow(store->snap)->bdev) >> 9) || |
1358 | + chunk_size % |
1359 | + (bdev_logical_block_size(dm_snap_origin(store->snap)->bdev) >> 9)) { |
1360 | *error = "Chunk size is not a multiple of device blocksize"; |
1361 | return -EINVAL; |
1362 | } |
1363 | diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h |
1364 | index e8dfa06..0b25362 100644 |
1365 | --- a/drivers/md/dm-exception-store.h |
1366 | +++ b/drivers/md/dm-exception-store.h |
1367 | @@ -126,8 +126,9 @@ struct dm_exception_store { |
1368 | }; |
1369 | |
1370 | /* |
1371 | - * Obtain the cow device used by a given snapshot. |
1372 | + * Obtain the origin or cow device used by a given snapshot. |
1373 | */ |
1374 | +struct dm_dev *dm_snap_origin(struct dm_snapshot *snap); |
1375 | struct dm_dev *dm_snap_cow(struct dm_snapshot *snap); |
1376 | |
1377 | /* |
1378 | diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c |
1379 | index d7500e1..bb6bdc8 100644 |
1380 | --- a/drivers/md/dm-ioctl.c |
1381 | +++ b/drivers/md/dm-ioctl.c |
1382 | @@ -249,40 +249,50 @@ static void __hash_remove(struct hash_cell *hc) |
1383 | |
1384 | static void dm_hash_remove_all(int keep_open_devices) |
1385 | { |
1386 | - int i, dev_skipped, dev_removed; |
1387 | + int i, dev_skipped; |
1388 | struct hash_cell *hc; |
1389 | - struct list_head *tmp, *n; |
1390 | + struct mapped_device *md; |
1391 | + |
1392 | +retry: |
1393 | + dev_skipped = 0; |
1394 | |
1395 | down_write(&_hash_lock); |
1396 | |
1397 | -retry: |
1398 | - dev_skipped = dev_removed = 0; |
1399 | for (i = 0; i < NUM_BUCKETS; i++) { |
1400 | - list_for_each_safe (tmp, n, _name_buckets + i) { |
1401 | - hc = list_entry(tmp, struct hash_cell, name_list); |
1402 | + list_for_each_entry(hc, _name_buckets + i, name_list) { |
1403 | + md = hc->md; |
1404 | + dm_get(md); |
1405 | |
1406 | - if (keep_open_devices && |
1407 | - dm_lock_for_deletion(hc->md)) { |
1408 | + if (keep_open_devices && dm_lock_for_deletion(md)) { |
1409 | + dm_put(md); |
1410 | dev_skipped++; |
1411 | continue; |
1412 | } |
1413 | + |
1414 | __hash_remove(hc); |
1415 | - dev_removed = 1; |
1416 | - } |
1417 | - } |
1418 | |
1419 | - /* |
1420 | - * Some mapped devices may be using other mapped devices, so if any |
1421 | - * still exist, repeat until we make no further progress. |
1422 | - */ |
1423 | - if (dev_skipped) { |
1424 | - if (dev_removed) |
1425 | - goto retry; |
1426 | + up_write(&_hash_lock); |
1427 | |
1428 | - DMWARN("remove_all left %d open device(s)", dev_skipped); |
1429 | + dm_put(md); |
1430 | + if (likely(keep_open_devices)) |
1431 | + dm_destroy(md); |
1432 | + else |
1433 | + dm_destroy_immediate(md); |
1434 | + |
1435 | + /* |
1436 | + * Some mapped devices may be using other mapped |
1437 | + * devices, so repeat until we make no further |
1438 | + * progress. If a new mapped device is created |
1439 | + * here it will also get removed. |
1440 | + */ |
1441 | + goto retry; |
1442 | + } |
1443 | } |
1444 | |
1445 | up_write(&_hash_lock); |
1446 | + |
1447 | + if (dev_skipped) |
1448 | + DMWARN("remove_all left %d open device(s)", dev_skipped); |
1449 | } |
1450 | |
1451 | static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old, |
1452 | @@ -640,6 +650,7 @@ static int dev_create(struct dm_ioctl *param, size_t param_size) |
1453 | r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md); |
1454 | if (r) { |
1455 | dm_put(md); |
1456 | + dm_destroy(md); |
1457 | return r; |
1458 | } |
1459 | |
1460 | @@ -742,6 +753,7 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size) |
1461 | param->flags |= DM_UEVENT_GENERATED_FLAG; |
1462 | |
1463 | dm_put(md); |
1464 | + dm_destroy(md); |
1465 | return 0; |
1466 | } |
1467 | |
1468 | diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c |
1469 | index 5485377..a1f2ab5 100644 |
1470 | --- a/drivers/md/dm-snap.c |
1471 | +++ b/drivers/md/dm-snap.c |
1472 | @@ -148,6 +148,12 @@ struct dm_snapshot { |
1473 | #define RUNNING_MERGE 0 |
1474 | #define SHUTDOWN_MERGE 1 |
1475 | |
1476 | +struct dm_dev *dm_snap_origin(struct dm_snapshot *s) |
1477 | +{ |
1478 | + return s->origin; |
1479 | +} |
1480 | +EXPORT_SYMBOL(dm_snap_origin); |
1481 | + |
1482 | struct dm_dev *dm_snap_cow(struct dm_snapshot *s) |
1483 | { |
1484 | return s->cow; |
1485 | @@ -1065,10 +1071,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
1486 | origin_mode = FMODE_WRITE; |
1487 | } |
1488 | |
1489 | - origin_path = argv[0]; |
1490 | - argv++; |
1491 | - argc--; |
1492 | - |
1493 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
1494 | if (!s) { |
1495 | ti->error = "Cannot allocate snapshot context private " |
1496 | @@ -1077,6 +1079,16 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
1497 | goto bad; |
1498 | } |
1499 | |
1500 | + origin_path = argv[0]; |
1501 | + argv++; |
1502 | + argc--; |
1503 | + |
1504 | + r = dm_get_device(ti, origin_path, origin_mode, &s->origin); |
1505 | + if (r) { |
1506 | + ti->error = "Cannot get origin device"; |
1507 | + goto bad_origin; |
1508 | + } |
1509 | + |
1510 | cow_path = argv[0]; |
1511 | argv++; |
1512 | argc--; |
1513 | @@ -1097,12 +1109,6 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) |
1514 | argv += args_used; |
1515 | argc -= args_used; |
1516 | |
1517 | - r = dm_get_device(ti, origin_path, origin_mode, &s->origin); |
1518 | - if (r) { |
1519 | - ti->error = "Cannot get origin device"; |
1520 | - goto bad_origin; |
1521 | - } |
1522 | - |
1523 | s->ti = ti; |
1524 | s->valid = 1; |
1525 | s->active = 0; |
1526 | @@ -1212,15 +1218,15 @@ bad_kcopyd: |
1527 | dm_exception_table_exit(&s->complete, exception_cache); |
1528 | |
1529 | bad_hash_tables: |
1530 | - dm_put_device(ti, s->origin); |
1531 | - |
1532 | -bad_origin: |
1533 | dm_exception_store_destroy(s->store); |
1534 | |
1535 | bad_store: |
1536 | dm_put_device(ti, s->cow); |
1537 | |
1538 | bad_cow: |
1539 | + dm_put_device(ti, s->origin); |
1540 | + |
1541 | +bad_origin: |
1542 | kfree(s); |
1543 | |
1544 | bad: |
1545 | @@ -1314,12 +1320,12 @@ static void snapshot_dtr(struct dm_target *ti) |
1546 | |
1547 | mempool_destroy(s->pending_pool); |
1548 | |
1549 | - dm_put_device(ti, s->origin); |
1550 | - |
1551 | dm_exception_store_destroy(s->store); |
1552 | |
1553 | dm_put_device(ti, s->cow); |
1554 | |
1555 | + dm_put_device(ti, s->origin); |
1556 | + |
1557 | kfree(s); |
1558 | } |
1559 | |
1560 | @@ -1899,8 +1905,14 @@ static int snapshot_iterate_devices(struct dm_target *ti, |
1561 | iterate_devices_callout_fn fn, void *data) |
1562 | { |
1563 | struct dm_snapshot *snap = ti->private; |
1564 | + int r; |
1565 | + |
1566 | + r = fn(ti, snap->origin, 0, ti->len, data); |
1567 | |
1568 | - return fn(ti, snap->origin, 0, ti->len, data); |
1569 | + if (!r) |
1570 | + r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data); |
1571 | + |
1572 | + return r; |
1573 | } |
1574 | |
1575 | |
1576 | diff --git a/drivers/md/dm.c b/drivers/md/dm.c |
1577 | index d21e128..e3a512d 100644 |
1578 | --- a/drivers/md/dm.c |
1579 | +++ b/drivers/md/dm.c |
1580 | @@ -19,6 +19,7 @@ |
1581 | #include <linux/slab.h> |
1582 | #include <linux/idr.h> |
1583 | #include <linux/hdreg.h> |
1584 | +#include <linux/delay.h> |
1585 | |
1586 | #include <trace/events/block.h> |
1587 | |
1588 | @@ -2141,6 +2142,7 @@ static struct mapped_device *dm_find_md(dev_t dev) |
1589 | md = idr_find(&_minor_idr, minor); |
1590 | if (md && (md == MINOR_ALLOCED || |
1591 | (MINOR(disk_devt(dm_disk(md))) != minor) || |
1592 | + dm_deleting_md(md) || |
1593 | test_bit(DMF_FREEING, &md->flags))) { |
1594 | md = NULL; |
1595 | goto out; |
1596 | @@ -2175,6 +2177,7 @@ void dm_set_mdptr(struct mapped_device *md, void *ptr) |
1597 | void dm_get(struct mapped_device *md) |
1598 | { |
1599 | atomic_inc(&md->holders); |
1600 | + BUG_ON(test_bit(DMF_FREEING, &md->flags)); |
1601 | } |
1602 | |
1603 | const char *dm_device_name(struct mapped_device *md) |
1604 | @@ -2183,27 +2186,55 @@ const char *dm_device_name(struct mapped_device *md) |
1605 | } |
1606 | EXPORT_SYMBOL_GPL(dm_device_name); |
1607 | |
1608 | -void dm_put(struct mapped_device *md) |
1609 | +static void __dm_destroy(struct mapped_device *md, bool wait) |
1610 | { |
1611 | struct dm_table *map; |
1612 | |
1613 | - BUG_ON(test_bit(DMF_FREEING, &md->flags)); |
1614 | + might_sleep(); |
1615 | |
1616 | - if (atomic_dec_and_lock(&md->holders, &_minor_lock)) { |
1617 | - map = dm_get_live_table(md); |
1618 | - idr_replace(&_minor_idr, MINOR_ALLOCED, |
1619 | - MINOR(disk_devt(dm_disk(md)))); |
1620 | - set_bit(DMF_FREEING, &md->flags); |
1621 | - spin_unlock(&_minor_lock); |
1622 | - if (!dm_suspended_md(md)) { |
1623 | - dm_table_presuspend_targets(map); |
1624 | - dm_table_postsuspend_targets(map); |
1625 | - } |
1626 | - dm_sysfs_exit(md); |
1627 | - dm_table_put(map); |
1628 | - dm_table_destroy(__unbind(md)); |
1629 | - free_dev(md); |
1630 | + spin_lock(&_minor_lock); |
1631 | + map = dm_get_live_table(md); |
1632 | + idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); |
1633 | + set_bit(DMF_FREEING, &md->flags); |
1634 | + spin_unlock(&_minor_lock); |
1635 | + |
1636 | + if (!dm_suspended_md(md)) { |
1637 | + dm_table_presuspend_targets(map); |
1638 | + dm_table_postsuspend_targets(map); |
1639 | } |
1640 | + |
1641 | + /* |
1642 | + * Rare, but there may be I/O requests still going to complete, |
1643 | + * for example. Wait for all references to disappear. |
1644 | + * No one should increment the reference count of the mapped_device, |
1645 | + * after the mapped_device state becomes DMF_FREEING. |
1646 | + */ |
1647 | + if (wait) |
1648 | + while (atomic_read(&md->holders)) |
1649 | + msleep(1); |
1650 | + else if (atomic_read(&md->holders)) |
1651 | + DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)", |
1652 | + dm_device_name(md), atomic_read(&md->holders)); |
1653 | + |
1654 | + dm_sysfs_exit(md); |
1655 | + dm_table_put(map); |
1656 | + dm_table_destroy(__unbind(md)); |
1657 | + free_dev(md); |
1658 | +} |
1659 | + |
1660 | +void dm_destroy(struct mapped_device *md) |
1661 | +{ |
1662 | + __dm_destroy(md, true); |
1663 | +} |
1664 | + |
1665 | +void dm_destroy_immediate(struct mapped_device *md) |
1666 | +{ |
1667 | + __dm_destroy(md, false); |
1668 | +} |
1669 | + |
1670 | +void dm_put(struct mapped_device *md) |
1671 | +{ |
1672 | + atomic_dec(&md->holders); |
1673 | } |
1674 | EXPORT_SYMBOL_GPL(dm_put); |
1675 | |
1676 | diff --git a/drivers/md/dm.h b/drivers/md/dm.h |
1677 | index bad1724..8223671 100644 |
1678 | --- a/drivers/md/dm.h |
1679 | +++ b/drivers/md/dm.h |
1680 | @@ -122,6 +122,11 @@ void dm_linear_exit(void); |
1681 | int dm_stripe_init(void); |
1682 | void dm_stripe_exit(void); |
1683 | |
1684 | +/* |
1685 | + * mapped_device operations |
1686 | + */ |
1687 | +void dm_destroy(struct mapped_device *md); |
1688 | +void dm_destroy_immediate(struct mapped_device *md); |
1689 | int dm_open_count(struct mapped_device *md); |
1690 | int dm_lock_for_deletion(struct mapped_device *md); |
1691 | |
1692 | diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c |
1693 | index 8327e24..300ec15 100644 |
1694 | --- a/drivers/memstick/core/mspro_block.c |
1695 | +++ b/drivers/memstick/core/mspro_block.c |
1696 | @@ -1040,6 +1040,7 @@ static int mspro_block_read_attributes(struct memstick_dev *card) |
1697 | snprintf(s_attr->name, sizeof(s_attr->name), |
1698 | "attr_x%02x", attr->entries[cnt].id); |
1699 | |
1700 | + sysfs_attr_init(&s_attr->dev_attr.attr); |
1701 | s_attr->dev_attr.attr.name = s_attr->name; |
1702 | s_attr->dev_attr.attr.mode = S_IRUGO; |
1703 | s_attr->dev_attr.show = mspro_block_attr_show(s_attr->id); |
1704 | @@ -1330,13 +1331,14 @@ static void mspro_block_remove(struct memstick_dev *card) |
1705 | struct mspro_block_data *msb = memstick_get_drvdata(card); |
1706 | unsigned long flags; |
1707 | |
1708 | - del_gendisk(msb->disk); |
1709 | - dev_dbg(&card->dev, "mspro block remove\n"); |
1710 | spin_lock_irqsave(&msb->q_lock, flags); |
1711 | msb->eject = 1; |
1712 | blk_start_queue(msb->queue); |
1713 | spin_unlock_irqrestore(&msb->q_lock, flags); |
1714 | |
1715 | + del_gendisk(msb->disk); |
1716 | + dev_dbg(&card->dev, "mspro block remove\n"); |
1717 | + |
1718 | blk_cleanup_queue(msb->queue); |
1719 | msb->queue = NULL; |
1720 | |
1721 | diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c |
1722 | index 62f3ea9..3364061 100644 |
1723 | --- a/drivers/mtd/chips/cfi_cmdset_0001.c |
1724 | +++ b/drivers/mtd/chips/cfi_cmdset_0001.c |
1725 | @@ -717,7 +717,7 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd, |
1726 | chip = &newcfi->chips[0]; |
1727 | for (i = 0; i < cfi->numchips; i++) { |
1728 | shared[i].writing = shared[i].erasing = NULL; |
1729 | - spin_lock_init(&shared[i].lock); |
1730 | + mutex_init(&shared[i].lock); |
1731 | for (j = 0; j < numparts; j++) { |
1732 | *chip = cfi->chips[i]; |
1733 | chip->start += j << partshift; |
1734 | @@ -886,7 +886,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr |
1735 | */ |
1736 | struct flchip_shared *shared = chip->priv; |
1737 | struct flchip *contender; |
1738 | - spin_lock(&shared->lock); |
1739 | + mutex_lock(&shared->lock); |
1740 | contender = shared->writing; |
1741 | if (contender && contender != chip) { |
1742 | /* |
1743 | @@ -899,7 +899,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr |
1744 | * get_chip returns success we're clear to go ahead. |
1745 | */ |
1746 | ret = mutex_trylock(&contender->mutex); |
1747 | - spin_unlock(&shared->lock); |
1748 | + mutex_unlock(&shared->lock); |
1749 | if (!ret) |
1750 | goto retry; |
1751 | mutex_unlock(&chip->mutex); |
1752 | @@ -914,7 +914,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr |
1753 | mutex_unlock(&contender->mutex); |
1754 | return ret; |
1755 | } |
1756 | - spin_lock(&shared->lock); |
1757 | + mutex_lock(&shared->lock); |
1758 | |
1759 | /* We should not own chip if it is already |
1760 | * in FL_SYNCING state. Put contender and retry. */ |
1761 | @@ -930,7 +930,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr |
1762 | * on this chip. Sleep. */ |
1763 | if (mode == FL_ERASING && shared->erasing |
1764 | && shared->erasing->oldstate == FL_ERASING) { |
1765 | - spin_unlock(&shared->lock); |
1766 | + mutex_unlock(&shared->lock); |
1767 | set_current_state(TASK_UNINTERRUPTIBLE); |
1768 | add_wait_queue(&chip->wq, &wait); |
1769 | mutex_unlock(&chip->mutex); |
1770 | @@ -944,7 +944,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr |
1771 | shared->writing = chip; |
1772 | if (mode == FL_ERASING) |
1773 | shared->erasing = chip; |
1774 | - spin_unlock(&shared->lock); |
1775 | + mutex_unlock(&shared->lock); |
1776 | } |
1777 | ret = chip_ready(map, chip, adr, mode); |
1778 | if (ret == -EAGAIN) |
1779 | @@ -959,7 +959,7 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad |
1780 | |
1781 | if (chip->priv) { |
1782 | struct flchip_shared *shared = chip->priv; |
1783 | - spin_lock(&shared->lock); |
1784 | + mutex_lock(&shared->lock); |
1785 | if (shared->writing == chip && chip->oldstate == FL_READY) { |
1786 | /* We own the ability to write, but we're done */ |
1787 | shared->writing = shared->erasing; |
1788 | @@ -967,7 +967,7 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad |
1789 | /* give back ownership to who we loaned it from */ |
1790 | struct flchip *loaner = shared->writing; |
1791 | mutex_lock(&loaner->mutex); |
1792 | - spin_unlock(&shared->lock); |
1793 | + mutex_unlock(&shared->lock); |
1794 | mutex_unlock(&chip->mutex); |
1795 | put_chip(map, loaner, loaner->start); |
1796 | mutex_lock(&chip->mutex); |
1797 | @@ -985,11 +985,11 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad |
1798 | * Don't let the switch below mess things up since |
1799 | * we don't have ownership to resume anything. |
1800 | */ |
1801 | - spin_unlock(&shared->lock); |
1802 | + mutex_unlock(&shared->lock); |
1803 | wake_up(&chip->wq); |
1804 | return; |
1805 | } |
1806 | - spin_unlock(&shared->lock); |
1807 | + mutex_unlock(&shared->lock); |
1808 | } |
1809 | |
1810 | switch(chip->oldstate) { |
1811 | diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c |
1812 | index fece5be..04fdfcc 100644 |
1813 | --- a/drivers/mtd/lpddr/lpddr_cmds.c |
1814 | +++ b/drivers/mtd/lpddr/lpddr_cmds.c |
1815 | @@ -98,7 +98,7 @@ struct mtd_info *lpddr_cmdset(struct map_info *map) |
1816 | numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum; |
1817 | for (i = 0; i < numchips; i++) { |
1818 | shared[i].writing = shared[i].erasing = NULL; |
1819 | - spin_lock_init(&shared[i].lock); |
1820 | + mutex_init(&shared[i].lock); |
1821 | for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) { |
1822 | *chip = lpddr->chips[i]; |
1823 | chip->start += j << lpddr->chipshift; |
1824 | @@ -217,7 +217,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode) |
1825 | */ |
1826 | struct flchip_shared *shared = chip->priv; |
1827 | struct flchip *contender; |
1828 | - spin_lock(&shared->lock); |
1829 | + mutex_lock(&shared->lock); |
1830 | contender = shared->writing; |
1831 | if (contender && contender != chip) { |
1832 | /* |
1833 | @@ -230,7 +230,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode) |
1834 | * get_chip returns success we're clear to go ahead. |
1835 | */ |
1836 | ret = mutex_trylock(&contender->mutex); |
1837 | - spin_unlock(&shared->lock); |
1838 | + mutex_unlock(&shared->lock); |
1839 | if (!ret) |
1840 | goto retry; |
1841 | mutex_unlock(&chip->mutex); |
1842 | @@ -245,7 +245,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode) |
1843 | mutex_unlock(&contender->mutex); |
1844 | return ret; |
1845 | } |
1846 | - spin_lock(&shared->lock); |
1847 | + mutex_lock(&shared->lock); |
1848 | |
1849 | /* We should not own chip if it is already in FL_SYNCING |
1850 | * state. Put contender and retry. */ |
1851 | @@ -261,7 +261,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode) |
1852 | Must sleep in such a case. */ |
1853 | if (mode == FL_ERASING && shared->erasing |
1854 | && shared->erasing->oldstate == FL_ERASING) { |
1855 | - spin_unlock(&shared->lock); |
1856 | + mutex_unlock(&shared->lock); |
1857 | set_current_state(TASK_UNINTERRUPTIBLE); |
1858 | add_wait_queue(&chip->wq, &wait); |
1859 | mutex_unlock(&chip->mutex); |
1860 | @@ -275,7 +275,7 @@ static int get_chip(struct map_info *map, struct flchip *chip, int mode) |
1861 | shared->writing = chip; |
1862 | if (mode == FL_ERASING) |
1863 | shared->erasing = chip; |
1864 | - spin_unlock(&shared->lock); |
1865 | + mutex_unlock(&shared->lock); |
1866 | } |
1867 | |
1868 | ret = chip_ready(map, chip, mode); |
1869 | @@ -348,7 +348,7 @@ static void put_chip(struct map_info *map, struct flchip *chip) |
1870 | { |
1871 | if (chip->priv) { |
1872 | struct flchip_shared *shared = chip->priv; |
1873 | - spin_lock(&shared->lock); |
1874 | + mutex_lock(&shared->lock); |
1875 | if (shared->writing == chip && chip->oldstate == FL_READY) { |
1876 | /* We own the ability to write, but we're done */ |
1877 | shared->writing = shared->erasing; |
1878 | @@ -356,7 +356,7 @@ static void put_chip(struct map_info *map, struct flchip *chip) |
1879 | /* give back the ownership */ |
1880 | struct flchip *loaner = shared->writing; |
1881 | mutex_lock(&loaner->mutex); |
1882 | - spin_unlock(&shared->lock); |
1883 | + mutex_unlock(&shared->lock); |
1884 | mutex_unlock(&chip->mutex); |
1885 | put_chip(map, loaner); |
1886 | mutex_lock(&chip->mutex); |
1887 | @@ -374,11 +374,11 @@ static void put_chip(struct map_info *map, struct flchip *chip) |
1888 | * Don't let the switch below mess things up since |
1889 | * we don't have ownership to resume anything. |
1890 | */ |
1891 | - spin_unlock(&shared->lock); |
1892 | + mutex_unlock(&shared->lock); |
1893 | wake_up(&chip->wq); |
1894 | return; |
1895 | } |
1896 | - spin_unlock(&shared->lock); |
1897 | + mutex_unlock(&shared->lock); |
1898 | } |
1899 | |
1900 | switch (chip->oldstate) { |
1901 | diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c |
1902 | index 4a7b864..5bcc34a 100644 |
1903 | --- a/drivers/mtd/nand/nand_base.c |
1904 | +++ b/drivers/mtd/nand/nand_base.c |
1905 | @@ -2852,6 +2852,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, |
1906 | */ |
1907 | if (id_data[0] == id_data[6] && id_data[1] == id_data[7] && |
1908 | id_data[0] == NAND_MFR_SAMSUNG && |
1909 | + (chip->cellinfo & NAND_CI_CELLTYPE_MSK) && |
1910 | id_data[5] != 0x00) { |
1911 | /* Calc pagesize */ |
1912 | mtd->writesize = 2048 << (extid & 0x03); |
1913 | diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c |
1914 | index 90e143e..317aff4 100644 |
1915 | --- a/drivers/mtd/nand/plat_nand.c |
1916 | +++ b/drivers/mtd/nand/plat_nand.c |
1917 | @@ -37,6 +37,11 @@ static int __devinit plat_nand_probe(struct platform_device *pdev) |
1918 | struct resource *res; |
1919 | int err = 0; |
1920 | |
1921 | + if (pdata->chip.nr_chips < 1) { |
1922 | + dev_err(&pdev->dev, "invalid number of chips specified\n"); |
1923 | + return -EINVAL; |
1924 | + } |
1925 | + |
1926 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1927 | if (!res) |
1928 | return -ENXIO; |
1929 | diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c |
1930 | index e02fa4f..4d89f37 100644 |
1931 | --- a/drivers/mtd/nand/pxa3xx_nand.c |
1932 | +++ b/drivers/mtd/nand/pxa3xx_nand.c |
1933 | @@ -363,7 +363,7 @@ static struct pxa3xx_nand_flash *builtin_flash_types[] = { |
1934 | #define tAR_NDTR1(r) (((r) >> 0) & 0xf) |
1935 | |
1936 | /* convert nano-seconds to nand flash controller clock cycles */ |
1937 | -#define ns2cycle(ns, clk) (int)(((ns) * (clk / 1000000) / 1000) - 1) |
1938 | +#define ns2cycle(ns, clk) (int)((ns) * (clk / 1000000) / 1000) |
1939 | |
1940 | /* convert nand flash controller clock cycles to nano-seconds */ |
1941 | #define cycle2ns(c, clk) ((((c) + 1) * 1000000 + clk / 500) / (clk / 1000)) |
1942 | diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c |
1943 | index f654db9..d206f21 100644 |
1944 | --- a/drivers/net/e1000e/82571.c |
1945 | +++ b/drivers/net/e1000e/82571.c |
1946 | @@ -936,12 +936,14 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) |
1947 | ew32(IMC, 0xffffffff); |
1948 | icr = er32(ICR); |
1949 | |
1950 | - /* Install any alternate MAC address into RAR0 */ |
1951 | - ret_val = e1000_check_alt_mac_addr_generic(hw); |
1952 | - if (ret_val) |
1953 | - return ret_val; |
1954 | + if (hw->mac.type == e1000_82571) { |
1955 | + /* Install any alternate MAC address into RAR0 */ |
1956 | + ret_val = e1000_check_alt_mac_addr_generic(hw); |
1957 | + if (ret_val) |
1958 | + return ret_val; |
1959 | |
1960 | - e1000e_set_laa_state_82571(hw, true); |
1961 | + e1000e_set_laa_state_82571(hw, true); |
1962 | + } |
1963 | |
1964 | /* Reinitialize the 82571 serdes link state machine */ |
1965 | if (hw->phy.media_type == e1000_media_type_internal_serdes) |
1966 | @@ -1618,14 +1620,16 @@ static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw) |
1967 | { |
1968 | s32 ret_val = 0; |
1969 | |
1970 | - /* |
1971 | - * If there's an alternate MAC address place it in RAR0 |
1972 | - * so that it will override the Si installed default perm |
1973 | - * address. |
1974 | - */ |
1975 | - ret_val = e1000_check_alt_mac_addr_generic(hw); |
1976 | - if (ret_val) |
1977 | - goto out; |
1978 | + if (hw->mac.type == e1000_82571) { |
1979 | + /* |
1980 | + * If there's an alternate MAC address place it in RAR0 |
1981 | + * so that it will override the Si installed default perm |
1982 | + * address. |
1983 | + */ |
1984 | + ret_val = e1000_check_alt_mac_addr_generic(hw); |
1985 | + if (ret_val) |
1986 | + goto out; |
1987 | + } |
1988 | |
1989 | ret_val = e1000_read_mac_addr_generic(hw); |
1990 | |
1991 | @@ -1833,6 +1837,7 @@ struct e1000_info e1000_82573_info = { |
1992 | | FLAG_HAS_SMART_POWER_DOWN |
1993 | | FLAG_HAS_AMT |
1994 | | FLAG_HAS_SWSM_ON_LOAD, |
1995 | + .flags2 = FLAG2_DISABLE_ASPM_L1, |
1996 | .pba = 20, |
1997 | .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, |
1998 | .get_variants = e1000_get_variants_82571, |
1999 | diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h |
2000 | index 4dc02c7..75289ca 100644 |
2001 | --- a/drivers/net/e1000e/defines.h |
2002 | +++ b/drivers/net/e1000e/defines.h |
2003 | @@ -620,6 +620,7 @@ |
2004 | #define E1000_FLASH_UPDATES 2000 |
2005 | |
2006 | /* NVM Word Offsets */ |
2007 | +#define NVM_COMPAT 0x0003 |
2008 | #define NVM_ID_LED_SETTINGS 0x0004 |
2009 | #define NVM_INIT_CONTROL2_REG 0x000F |
2010 | #define NVM_INIT_CONTROL3_PORT_B 0x0014 |
2011 | @@ -642,6 +643,9 @@ |
2012 | /* Mask bits for fields in Word 0x1a of the NVM */ |
2013 | #define NVM_WORD1A_ASPM_MASK 0x000C |
2014 | |
2015 | +/* Mask bits for fields in Word 0x03 of the EEPROM */ |
2016 | +#define NVM_COMPAT_LOM 0x0800 |
2017 | + |
2018 | /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ |
2019 | #define NVM_SUM 0xBABA |
2020 | |
2021 | diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c |
2022 | index a968e3a..768c105 100644 |
2023 | --- a/drivers/net/e1000e/lib.c |
2024 | +++ b/drivers/net/e1000e/lib.c |
2025 | @@ -183,6 +183,16 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw) |
2026 | u16 offset, nvm_alt_mac_addr_offset, nvm_data; |
2027 | u8 alt_mac_addr[ETH_ALEN]; |
2028 | |
2029 | + ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data); |
2030 | + if (ret_val) |
2031 | + goto out; |
2032 | + |
2033 | + /* Check for LOM (vs. NIC) or one of two valid mezzanine cards */ |
2034 | + if (!((nvm_data & NVM_COMPAT_LOM) || |
2035 | + (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) || |
2036 | + (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD))) |
2037 | + goto out; |
2038 | + |
2039 | ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1, |
2040 | &nvm_alt_mac_addr_offset); |
2041 | if (ret_val) { |
2042 | diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c |
2043 | index 648972d..ab9fe22 100644 |
2044 | --- a/drivers/net/wireless/ath/ath5k/base.c |
2045 | +++ b/drivers/net/wireless/ath/ath5k/base.c |
2046 | @@ -48,6 +48,7 @@ |
2047 | #include <linux/netdevice.h> |
2048 | #include <linux/cache.h> |
2049 | #include <linux/pci.h> |
2050 | +#include <linux/pci-aspm.h> |
2051 | #include <linux/ethtool.h> |
2052 | #include <linux/uaccess.h> |
2053 | #include <linux/slab.h> |
2054 | @@ -472,6 +473,26 @@ ath5k_pci_probe(struct pci_dev *pdev, |
2055 | int ret; |
2056 | u8 csz; |
2057 | |
2058 | + /* |
2059 | + * L0s needs to be disabled on all ath5k cards. |
2060 | + * |
2061 | + * For distributions shipping with CONFIG_PCIEASPM (this will be enabled |
2062 | + * by default in the future in 2.6.36) this will also mean both L1 and |
2063 | + * L0s will be disabled when a pre 1.1 PCIe device is detected. We do |
2064 | + * know L1 works correctly even for all ath5k pre 1.1 PCIe devices |
2065 | + * though but cannot currently undue the effect of a blacklist, for |
2066 | + * details you can read pcie_aspm_sanity_check() and see how it adjusts |
2067 | + * the device link capability. |
2068 | + * |
2069 | + * It may be possible in the future to implement some PCI API to allow |
2070 | + * drivers to override blacklists for pre 1.1 PCIe but for now it is |
2071 | + * best to accept that both L0s and L1 will be disabled completely for |
2072 | + * distributions shipping with CONFIG_PCIEASPM rather than having this |
2073 | + * issue present. Motivation for adding this new API will be to help |
2074 | + * with power consumption for some of these devices. |
2075 | + */ |
2076 | + pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); |
2077 | + |
2078 | ret = pci_enable_device(pdev); |
2079 | if (ret) { |
2080 | dev_err(&pdev->dev, "can't enable device\n"); |
2081 | diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c |
2082 | index 2571b44..5fcbc2f 100644 |
2083 | --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c |
2084 | +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c |
2085 | @@ -68,18 +68,23 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) |
2086 | struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); |
2087 | struct ieee80211_sta *sta = tx_info->control.sta; |
2088 | struct ath9k_htc_sta *ista; |
2089 | - struct ath9k_htc_vif *avp; |
2090 | struct ath9k_htc_tx_ctl tx_ctl; |
2091 | enum htc_endpoint_id epid; |
2092 | u16 qnum, hw_qnum; |
2093 | __le16 fc; |
2094 | u8 *tx_fhdr; |
2095 | - u8 sta_idx; |
2096 | + u8 sta_idx, vif_idx; |
2097 | |
2098 | hdr = (struct ieee80211_hdr *) skb->data; |
2099 | fc = hdr->frame_control; |
2100 | |
2101 | - avp = (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv; |
2102 | + if (tx_info->control.vif && |
2103 | + (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv) |
2104 | + vif_idx = ((struct ath9k_htc_vif *) |
2105 | + tx_info->control.vif->drv_priv)->index; |
2106 | + else |
2107 | + vif_idx = priv->nvifs; |
2108 | + |
2109 | if (sta) { |
2110 | ista = (struct ath9k_htc_sta *) sta->drv_priv; |
2111 | sta_idx = ista->index; |
2112 | @@ -96,7 +101,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) |
2113 | memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr)); |
2114 | |
2115 | tx_hdr.node_idx = sta_idx; |
2116 | - tx_hdr.vif_idx = avp->index; |
2117 | + tx_hdr.vif_idx = vif_idx; |
2118 | |
2119 | if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { |
2120 | tx_ctl.type = ATH9K_HTC_AMPDU; |
2121 | @@ -156,7 +161,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb) |
2122 | tx_ctl.type = ATH9K_HTC_NORMAL; |
2123 | |
2124 | mgmt_hdr.node_idx = sta_idx; |
2125 | - mgmt_hdr.vif_idx = avp->index; |
2126 | + mgmt_hdr.vif_idx = vif_idx; |
2127 | mgmt_hdr.tidno = 0; |
2128 | mgmt_hdr.flags = 0; |
2129 | |
2130 | diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c |
2131 | index c44a303..2a9480d 100644 |
2132 | --- a/drivers/net/wireless/iwlwifi/iwl-3945.c |
2133 | +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c |
2134 | @@ -915,22 +915,6 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv, |
2135 | rts_retry_limit = data_retry_limit; |
2136 | tx_cmd->rts_retry_limit = rts_retry_limit; |
2137 | |
2138 | - if (ieee80211_is_mgmt(fc)) { |
2139 | - switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { |
2140 | - case cpu_to_le16(IEEE80211_STYPE_AUTH): |
2141 | - case cpu_to_le16(IEEE80211_STYPE_DEAUTH): |
2142 | - case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): |
2143 | - case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): |
2144 | - if (tx_flags & TX_CMD_FLG_RTS_MSK) { |
2145 | - tx_flags &= ~TX_CMD_FLG_RTS_MSK; |
2146 | - tx_flags |= TX_CMD_FLG_CTS_MSK; |
2147 | - } |
2148 | - break; |
2149 | - default: |
2150 | - break; |
2151 | - } |
2152 | - } |
2153 | - |
2154 | tx_cmd->rate = rate; |
2155 | tx_cmd->tx_flags = tx_flags; |
2156 | |
2157 | diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c |
2158 | index 01658cf..2a30397 100644 |
2159 | --- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c |
2160 | +++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c |
2161 | @@ -209,10 +209,21 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv) |
2162 | } |
2163 | } |
2164 | |
2165 | -static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info, |
2166 | - __le32 *tx_flags) |
2167 | +static void iwlagn_rts_tx_cmd_flag(struct iwl_priv *priv, |
2168 | + struct ieee80211_tx_info *info, |
2169 | + __le16 fc, __le32 *tx_flags) |
2170 | { |
2171 | - *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK; |
2172 | + if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS || |
2173 | + info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { |
2174 | + *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK; |
2175 | + return; |
2176 | + } |
2177 | + |
2178 | + if (priv->cfg->use_rts_for_ht && |
2179 | + info->flags & IEEE80211_TX_CTL_AMPDU) { |
2180 | + *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK; |
2181 | + return; |
2182 | + } |
2183 | } |
2184 | |
2185 | /* Calc max signal level (dBm) among 3 possible receivers */ |
2186 | diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c |
2187 | index cf4a95b..ca46831 100644 |
2188 | --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c |
2189 | +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c |
2190 | @@ -325,18 +325,11 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid, |
2191 | struct iwl_lq_sta *lq_data, |
2192 | struct ieee80211_sta *sta) |
2193 | { |
2194 | - if ((tid < TID_MAX_LOAD_COUNT) && |
2195 | - !rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta)) { |
2196 | - if (priv->cfg->use_rts_for_ht) { |
2197 | - /* |
2198 | - * switch to RTS/CTS if it is the prefer protection |
2199 | - * method for HT traffic |
2200 | - */ |
2201 | - IWL_DEBUG_HT(priv, "use RTS/CTS protection for HT\n"); |
2202 | - priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN; |
2203 | - iwlcore_commit_rxon(priv); |
2204 | - } |
2205 | - } |
2206 | + if (tid < TID_MAX_LOAD_COUNT) |
2207 | + rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta); |
2208 | + else |
2209 | + IWL_ERR(priv, "tid exceeds max load count: %d/%d\n", |
2210 | + tid, TID_MAX_LOAD_COUNT); |
2211 | } |
2212 | |
2213 | static inline int get_num_of_ant_from_rate(u32 rate_n_flags) |
2214 | diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c |
2215 | index 7d614c4..3a3d27c 100644 |
2216 | --- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c |
2217 | +++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c |
2218 | @@ -376,10 +376,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv, |
2219 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; |
2220 | } |
2221 | |
2222 | - priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags); |
2223 | - |
2224 | - if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) |
2225 | - tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; |
2226 | + priv->cfg->ops->utils->rts_tx_cmd_flag(priv, info, fc, &tx_flags); |
2227 | |
2228 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); |
2229 | if (ieee80211_is_mgmt(fc)) { |
2230 | @@ -453,21 +450,6 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv, |
2231 | if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) |
2232 | rate_flags |= RATE_MCS_CCK_MSK; |
2233 | |
2234 | - /* Set up RTS and CTS flags for certain packets */ |
2235 | - switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { |
2236 | - case cpu_to_le16(IEEE80211_STYPE_AUTH): |
2237 | - case cpu_to_le16(IEEE80211_STYPE_DEAUTH): |
2238 | - case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): |
2239 | - case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): |
2240 | - if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) { |
2241 | - tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK; |
2242 | - tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK; |
2243 | - } |
2244 | - break; |
2245 | - default: |
2246 | - break; |
2247 | - } |
2248 | - |
2249 | /* Set up antennas */ |
2250 | priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant); |
2251 | rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant); |
2252 | diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c |
2253 | index 24aff65..c7f56b4 100644 |
2254 | --- a/drivers/net/wireless/iwlwifi/iwl-agn.c |
2255 | +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c |
2256 | @@ -200,13 +200,6 @@ int iwl_commit_rxon(struct iwl_priv *priv) |
2257 | |
2258 | priv->start_calib = 0; |
2259 | if (new_assoc) { |
2260 | - /* |
2261 | - * allow CTS-to-self if possible for new association. |
2262 | - * this is relevant only for 5000 series and up, |
2263 | - * but will not damage 4965 |
2264 | - */ |
2265 | - priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN; |
2266 | - |
2267 | /* Apply the new configuration |
2268 | * RXON assoc doesn't clear the station table in uCode, |
2269 | */ |
2270 | @@ -3336,13 +3329,40 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, |
2271 | IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n", |
2272 | priv->_agn.agg_tids_count); |
2273 | } |
2274 | + if (priv->cfg->use_rts_for_ht) { |
2275 | + struct iwl_station_priv *sta_priv = |
2276 | + (void *) sta->drv_priv; |
2277 | + /* |
2278 | + * switch off RTS/CTS if it was previously enabled |
2279 | + */ |
2280 | + |
2281 | + sta_priv->lq_sta.lq.general_params.flags &= |
2282 | + ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; |
2283 | + iwl_send_lq_cmd(priv, &sta_priv->lq_sta.lq, |
2284 | + CMD_ASYNC, false); |
2285 | + } |
2286 | + break; |
2287 | if (test_bit(STATUS_EXIT_PENDING, &priv->status)) |
2288 | return 0; |
2289 | else |
2290 | return ret; |
2291 | case IEEE80211_AMPDU_TX_OPERATIONAL: |
2292 | - /* do nothing */ |
2293 | - return -EOPNOTSUPP; |
2294 | + if (priv->cfg->use_rts_for_ht) { |
2295 | + struct iwl_station_priv *sta_priv = |
2296 | + (void *) sta->drv_priv; |
2297 | + |
2298 | + /* |
2299 | + * switch to RTS/CTS if it is the prefer protection |
2300 | + * method for HT traffic |
2301 | + */ |
2302 | + |
2303 | + sta_priv->lq_sta.lq.general_params.flags |= |
2304 | + LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK; |
2305 | + iwl_send_lq_cmd(priv, &sta_priv->lq_sta.lq, |
2306 | + CMD_ASYNC, false); |
2307 | + } |
2308 | + ret = 0; |
2309 | + break; |
2310 | default: |
2311 | IWL_DEBUG_HT(priv, "unknown\n"); |
2312 | return -EINVAL; |
2313 | @@ -3423,6 +3443,49 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw, |
2314 | return 0; |
2315 | } |
2316 | |
2317 | +static void iwlagn_configure_filter(struct ieee80211_hw *hw, |
2318 | + unsigned int changed_flags, |
2319 | + unsigned int *total_flags, |
2320 | + u64 multicast) |
2321 | +{ |
2322 | + struct iwl_priv *priv = hw->priv; |
2323 | + __le32 filter_or = 0, filter_nand = 0; |
2324 | + |
2325 | +#define CHK(test, flag) do { \ |
2326 | + if (*total_flags & (test)) \ |
2327 | + filter_or |= (flag); \ |
2328 | + else \ |
2329 | + filter_nand |= (flag); \ |
2330 | + } while (0) |
2331 | + |
2332 | + IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", |
2333 | + changed_flags, *total_flags); |
2334 | + |
2335 | + CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); |
2336 | + CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); |
2337 | + CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); |
2338 | + |
2339 | +#undef CHK |
2340 | + |
2341 | + mutex_lock(&priv->mutex); |
2342 | + |
2343 | + priv->staging_rxon.filter_flags &= ~filter_nand; |
2344 | + priv->staging_rxon.filter_flags |= filter_or; |
2345 | + |
2346 | + iwlcore_commit_rxon(priv); |
2347 | + |
2348 | + mutex_unlock(&priv->mutex); |
2349 | + |
2350 | + /* |
2351 | + * Receiving all multicast frames is always enabled by the |
2352 | + * default flags setup in iwl_connection_init_rx_config() |
2353 | + * since we currently do not support programming multicast |
2354 | + * filters into the device. |
2355 | + */ |
2356 | + *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | |
2357 | + FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; |
2358 | +} |
2359 | + |
2360 | /***************************************************************************** |
2361 | * |
2362 | * driver setup and teardown |
2363 | @@ -3583,7 +3646,7 @@ static struct ieee80211_ops iwl_hw_ops = { |
2364 | .add_interface = iwl_mac_add_interface, |
2365 | .remove_interface = iwl_mac_remove_interface, |
2366 | .config = iwl_mac_config, |
2367 | - .configure_filter = iwl_configure_filter, |
2368 | + .configure_filter = iwlagn_configure_filter, |
2369 | .set_key = iwl_mac_set_key, |
2370 | .update_tkip_key = iwl_mac_update_tkip_key, |
2371 | .conf_tx = iwl_mac_conf_tx, |
2372 | diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c |
2373 | index 5bbc529..cd5b664 100644 |
2374 | --- a/drivers/net/wireless/iwlwifi/iwl-core.c |
2375 | +++ b/drivers/net/wireless/iwlwifi/iwl-core.c |
2376 | @@ -403,19 +403,36 @@ EXPORT_SYMBOL(iwlcore_free_geos); |
2377 | * iwlcore_rts_tx_cmd_flag: Set rts/cts. 3945 and 4965 only share this |
2378 | * function. |
2379 | */ |
2380 | -void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info, |
2381 | - __le32 *tx_flags) |
2382 | +void iwlcore_rts_tx_cmd_flag(struct iwl_priv *priv, |
2383 | + struct ieee80211_tx_info *info, |
2384 | + __le16 fc, __le32 *tx_flags) |
2385 | { |
2386 | if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) { |
2387 | *tx_flags |= TX_CMD_FLG_RTS_MSK; |
2388 | *tx_flags &= ~TX_CMD_FLG_CTS_MSK; |
2389 | + *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; |
2390 | + |
2391 | + if (!ieee80211_is_mgmt(fc)) |
2392 | + return; |
2393 | + |
2394 | + switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { |
2395 | + case cpu_to_le16(IEEE80211_STYPE_AUTH): |
2396 | + case cpu_to_le16(IEEE80211_STYPE_DEAUTH): |
2397 | + case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ): |
2398 | + case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ): |
2399 | + *tx_flags &= ~TX_CMD_FLG_RTS_MSK; |
2400 | + *tx_flags |= TX_CMD_FLG_CTS_MSK; |
2401 | + break; |
2402 | + } |
2403 | } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { |
2404 | *tx_flags &= ~TX_CMD_FLG_RTS_MSK; |
2405 | *tx_flags |= TX_CMD_FLG_CTS_MSK; |
2406 | + *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; |
2407 | } |
2408 | } |
2409 | EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag); |
2410 | |
2411 | + |
2412 | static bool is_single_rx_stream(struct iwl_priv *priv) |
2413 | { |
2414 | return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC || |
2415 | @@ -1294,51 +1311,6 @@ out: |
2416 | EXPORT_SYMBOL(iwl_apm_init); |
2417 | |
2418 | |
2419 | - |
2420 | -void iwl_configure_filter(struct ieee80211_hw *hw, |
2421 | - unsigned int changed_flags, |
2422 | - unsigned int *total_flags, |
2423 | - u64 multicast) |
2424 | -{ |
2425 | - struct iwl_priv *priv = hw->priv; |
2426 | - __le32 filter_or = 0, filter_nand = 0; |
2427 | - |
2428 | -#define CHK(test, flag) do { \ |
2429 | - if (*total_flags & (test)) \ |
2430 | - filter_or |= (flag); \ |
2431 | - else \ |
2432 | - filter_nand |= (flag); \ |
2433 | - } while (0) |
2434 | - |
2435 | - IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", |
2436 | - changed_flags, *total_flags); |
2437 | - |
2438 | - CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); |
2439 | - CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); |
2440 | - CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); |
2441 | - |
2442 | -#undef CHK |
2443 | - |
2444 | - mutex_lock(&priv->mutex); |
2445 | - |
2446 | - priv->staging_rxon.filter_flags &= ~filter_nand; |
2447 | - priv->staging_rxon.filter_flags |= filter_or; |
2448 | - |
2449 | - iwlcore_commit_rxon(priv); |
2450 | - |
2451 | - mutex_unlock(&priv->mutex); |
2452 | - |
2453 | - /* |
2454 | - * Receiving all multicast frames is always enabled by the |
2455 | - * default flags setup in iwl_connection_init_rx_config() |
2456 | - * since we currently do not support programming multicast |
2457 | - * filters into the device. |
2458 | - */ |
2459 | - *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | |
2460 | - FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; |
2461 | -} |
2462 | -EXPORT_SYMBOL(iwl_configure_filter); |
2463 | - |
2464 | int iwl_set_hw_params(struct iwl_priv *priv) |
2465 | { |
2466 | priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; |
2467 | @@ -1936,6 +1908,10 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw, |
2468 | priv->staging_rxon.flags |= RXON_FLG_TGG_PROTECT_MSK; |
2469 | else |
2470 | priv->staging_rxon.flags &= ~RXON_FLG_TGG_PROTECT_MSK; |
2471 | + if (bss_conf->use_cts_prot) |
2472 | + priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN; |
2473 | + else |
2474 | + priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN; |
2475 | } |
2476 | |
2477 | if (changes & BSS_CHANGED_BASIC_RATES) { |
2478 | diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h |
2479 | index 31775bd..e8ef317 100644 |
2480 | --- a/drivers/net/wireless/iwlwifi/iwl-core.h |
2481 | +++ b/drivers/net/wireless/iwlwifi/iwl-core.h |
2482 | @@ -102,8 +102,9 @@ struct iwl_hcmd_utils_ops { |
2483 | u32 min_average_noise, |
2484 | u8 default_chain); |
2485 | void (*chain_noise_reset)(struct iwl_priv *priv); |
2486 | - void (*rts_tx_cmd_flag)(struct ieee80211_tx_info *info, |
2487 | - __le32 *tx_flags); |
2488 | + void (*rts_tx_cmd_flag)(struct iwl_priv *priv, |
2489 | + struct ieee80211_tx_info *info, |
2490 | + __le16 fc, __le32 *tx_flags); |
2491 | int (*calc_rssi)(struct iwl_priv *priv, |
2492 | struct iwl_rx_phy_res *rx_resp); |
2493 | void (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif); |
2494 | @@ -355,9 +356,6 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv, |
2495 | u32 decrypt_res, |
2496 | struct ieee80211_rx_status *stats); |
2497 | void iwl_irq_handle_error(struct iwl_priv *priv); |
2498 | -void iwl_configure_filter(struct ieee80211_hw *hw, |
2499 | - unsigned int changed_flags, |
2500 | - unsigned int *total_flags, u64 multicast); |
2501 | int iwl_set_hw_params(struct iwl_priv *priv); |
2502 | void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif); |
2503 | void iwl_bss_info_changed(struct ieee80211_hw *hw, |
2504 | @@ -375,8 +373,9 @@ void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif); |
2505 | void iwl_mac_reset_tsf(struct ieee80211_hw *hw); |
2506 | int iwl_alloc_txq_mem(struct iwl_priv *priv); |
2507 | void iwl_free_txq_mem(struct iwl_priv *priv); |
2508 | -void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info, |
2509 | - __le32 *tx_flags); |
2510 | +void iwlcore_rts_tx_cmd_flag(struct iwl_priv *priv, |
2511 | + struct ieee80211_tx_info *info, |
2512 | + __le16 fc, __le32 *tx_flags); |
2513 | #ifdef CONFIG_IWLWIFI_DEBUGFS |
2514 | int iwl_alloc_traffic_mem(struct iwl_priv *priv); |
2515 | void iwl_free_traffic_mem(struct iwl_priv *priv); |
2516 | diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c |
2517 | index a27872d..39c0d2d 100644 |
2518 | --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c |
2519 | +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c |
2520 | @@ -434,10 +434,7 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv, |
2521 | tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; |
2522 | } |
2523 | |
2524 | - priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags); |
2525 | - |
2526 | - if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) |
2527 | - tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; |
2528 | + priv->cfg->ops->utils->rts_tx_cmd_flag(priv, info, fc, &tx_flags); |
2529 | |
2530 | tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); |
2531 | if (ieee80211_is_mgmt(fc)) { |
2532 | @@ -3465,6 +3462,55 @@ static int iwl3945_mac_sta_add(struct ieee80211_hw *hw, |
2533 | |
2534 | return 0; |
2535 | } |
2536 | + |
2537 | +static void iwl3945_configure_filter(struct ieee80211_hw *hw, |
2538 | + unsigned int changed_flags, |
2539 | + unsigned int *total_flags, |
2540 | + u64 multicast) |
2541 | +{ |
2542 | + struct iwl_priv *priv = hw->priv; |
2543 | + __le32 filter_or = 0, filter_nand = 0; |
2544 | + |
2545 | +#define CHK(test, flag) do { \ |
2546 | + if (*total_flags & (test)) \ |
2547 | + filter_or |= (flag); \ |
2548 | + else \ |
2549 | + filter_nand |= (flag); \ |
2550 | + } while (0) |
2551 | + |
2552 | + IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n", |
2553 | + changed_flags, *total_flags); |
2554 | + |
2555 | + CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); |
2556 | + CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); |
2557 | + CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); |
2558 | + |
2559 | +#undef CHK |
2560 | + |
2561 | + mutex_lock(&priv->mutex); |
2562 | + |
2563 | + priv->staging_rxon.filter_flags &= ~filter_nand; |
2564 | + priv->staging_rxon.filter_flags |= filter_or; |
2565 | + |
2566 | + /* |
2567 | + * Committing directly here breaks for some reason, |
2568 | + * but we'll eventually commit the filter flags |
2569 | + * change anyway. |
2570 | + */ |
2571 | + |
2572 | + mutex_unlock(&priv->mutex); |
2573 | + |
2574 | + /* |
2575 | + * Receiving all multicast frames is always enabled by the |
2576 | + * default flags setup in iwl_connection_init_rx_config() |
2577 | + * since we currently do not support programming multicast |
2578 | + * filters into the device. |
2579 | + */ |
2580 | + *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | |
2581 | + FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; |
2582 | +} |
2583 | + |
2584 | + |
2585 | /***************************************************************************** |
2586 | * |
2587 | * sysfs attributes |
2588 | @@ -3870,7 +3916,7 @@ static struct ieee80211_ops iwl3945_hw_ops = { |
2589 | .add_interface = iwl_mac_add_interface, |
2590 | .remove_interface = iwl_mac_remove_interface, |
2591 | .config = iwl_mac_config, |
2592 | - .configure_filter = iwl_configure_filter, |
2593 | + .configure_filter = iwl3945_configure_filter, |
2594 | .set_key = iwl3945_mac_set_key, |
2595 | .conf_tx = iwl_mac_conf_tx, |
2596 | .reset_tsf = iwl_mac_reset_tsf, |
2597 | diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.c b/drivers/net/wireless/wl12xx/wl1251_cmd.c |
2598 | index a37b30c..ce3722f 100644 |
2599 | --- a/drivers/net/wireless/wl12xx/wl1251_cmd.c |
2600 | +++ b/drivers/net/wireless/wl12xx/wl1251_cmd.c |
2601 | @@ -484,7 +484,7 @@ int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout) |
2602 | |
2603 | cmd->timeout = timeout; |
2604 | |
2605 | - ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd)); |
2606 | + ret = wl1251_cmd_send(wl, CMD_TRIGGER_SCAN_TO, cmd, sizeof(*cmd)); |
2607 | if (ret < 0) { |
2608 | wl1251_error("cmd trigger scan to failed: %d", ret); |
2609 | goto out; |
2610 | diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c |
2611 | index 71ff154..90111d7 100644 |
2612 | --- a/drivers/platform/x86/compal-laptop.c |
2613 | +++ b/drivers/platform/x86/compal-laptop.c |
2614 | @@ -259,6 +259,14 @@ static struct dmi_system_id __initdata compal_dmi_table[] = { |
2615 | .callback = dmi_check_cb |
2616 | }, |
2617 | { |
2618 | + .ident = "Dell Mini 1012", |
2619 | + .matches = { |
2620 | + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
2621 | + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"), |
2622 | + }, |
2623 | + .callback = dmi_check_cb |
2624 | + }, |
2625 | + { |
2626 | .ident = "Dell Inspiron 11z", |
2627 | .matches = { |
2628 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
2629 | @@ -375,5 +383,6 @@ MODULE_ALIAS("dmi:*:rnIFT00:rvrIFT00:*"); |
2630 | MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron910:*"); |
2631 | MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1010:*"); |
2632 | MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1011:*"); |
2633 | +MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1012:*"); |
2634 | MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1110:*"); |
2635 | MODULE_ALIAS("dmi:*:svnDellInc.:pnInspiron1210:*"); |
2636 | diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c |
2637 | index 661e3ac..6110601 100644 |
2638 | --- a/drivers/platform/x86/dell-laptop.c |
2639 | +++ b/drivers/platform/x86/dell-laptop.c |
2640 | @@ -116,6 +116,13 @@ static struct dmi_system_id __devinitdata dell_blacklist[] = { |
2641 | }, |
2642 | }, |
2643 | { |
2644 | + .ident = "Dell Mini 1012", |
2645 | + .matches = { |
2646 | + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
2647 | + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"), |
2648 | + }, |
2649 | + }, |
2650 | + { |
2651 | .ident = "Dell Inspiron 11z", |
2652 | .matches = { |
2653 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
2654 | diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c |
2655 | index 5a1dc8a..03713bc 100644 |
2656 | --- a/drivers/regulator/wm8994-regulator.c |
2657 | +++ b/drivers/regulator/wm8994-regulator.c |
2658 | @@ -219,8 +219,6 @@ static __devinit int wm8994_ldo_probe(struct platform_device *pdev) |
2659 | |
2660 | ldo->wm8994 = wm8994; |
2661 | |
2662 | - ldo->is_enabled = true; |
2663 | - |
2664 | if (pdata->ldo[id].enable && gpio_is_valid(pdata->ldo[id].enable)) { |
2665 | ldo->enable = pdata->ldo[id].enable; |
2666 | |
2667 | @@ -237,7 +235,8 @@ static __devinit int wm8994_ldo_probe(struct platform_device *pdev) |
2668 | ret); |
2669 | goto err_gpio; |
2670 | } |
2671 | - } |
2672 | + } else |
2673 | + ldo->is_enabled = true; |
2674 | |
2675 | ldo->regulator = regulator_register(&wm8994_ldo_desc[id], &pdev->dev, |
2676 | pdata->ldo[id].init_data, ldo); |
2677 | diff --git a/drivers/serial/suncore.c b/drivers/serial/suncore.c |
2678 | index 544f2e2..6381a02 100644 |
2679 | --- a/drivers/serial/suncore.c |
2680 | +++ b/drivers/serial/suncore.c |
2681 | @@ -55,7 +55,12 @@ EXPORT_SYMBOL(sunserial_unregister_minors); |
2682 | int sunserial_console_match(struct console *con, struct device_node *dp, |
2683 | struct uart_driver *drv, int line, bool ignore_line) |
2684 | { |
2685 | - if (!con || of_console_device != dp) |
2686 | + if (!con) |
2687 | + return 0; |
2688 | + |
2689 | + drv->cons = con; |
2690 | + |
2691 | + if (of_console_device != dp) |
2692 | return 0; |
2693 | |
2694 | if (!ignore_line) { |
2695 | @@ -69,12 +74,10 @@ int sunserial_console_match(struct console *con, struct device_node *dp, |
2696 | return 0; |
2697 | } |
2698 | |
2699 | - con->index = line; |
2700 | - drv->cons = con; |
2701 | - |
2702 | - if (!console_set_on_cmdline) |
2703 | + if (!console_set_on_cmdline) { |
2704 | + con->index = line; |
2705 | add_preferred_console(con->name, line, NULL); |
2706 | - |
2707 | + } |
2708 | return 1; |
2709 | } |
2710 | EXPORT_SYMBOL(sunserial_console_match); |
2711 | diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c |
2712 | index 7a582e8..ce1d251 100644 |
2713 | --- a/drivers/staging/batman-adv/hard-interface.c |
2714 | +++ b/drivers/staging/batman-adv/hard-interface.c |
2715 | @@ -128,6 +128,9 @@ static bool hardif_is_iface_up(struct batman_if *batman_if) |
2716 | |
2717 | static void update_mac_addresses(struct batman_if *batman_if) |
2718 | { |
2719 | + if (!batman_if || !batman_if->packet_buff) |
2720 | + return; |
2721 | + |
2722 | addr_to_string(batman_if->addr_str, batman_if->net_dev->dev_addr); |
2723 | |
2724 | memcpy(((struct batman_packet *)(batman_if->packet_buff))->orig, |
2725 | @@ -194,8 +197,6 @@ static void hardif_activate_interface(struct bat_priv *bat_priv, |
2726 | if (batman_if->if_status != IF_INACTIVE) |
2727 | return; |
2728 | |
2729 | - dev_hold(batman_if->net_dev); |
2730 | - |
2731 | update_mac_addresses(batman_if); |
2732 | batman_if->if_status = IF_TO_BE_ACTIVATED; |
2733 | |
2734 | @@ -222,8 +223,6 @@ static void hardif_deactivate_interface(struct batman_if *batman_if) |
2735 | (batman_if->if_status != IF_TO_BE_ACTIVATED)) |
2736 | return; |
2737 | |
2738 | - dev_put(batman_if->net_dev); |
2739 | - |
2740 | batman_if->if_status = IF_INACTIVE; |
2741 | |
2742 | printk(KERN_INFO "batman-adv:Interface deactivated: %s\n", |
2743 | @@ -321,12 +320,14 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev) |
2744 | if (ret != 1) |
2745 | goto out; |
2746 | |
2747 | + dev_hold(net_dev); |
2748 | + |
2749 | batman_if = kmalloc(sizeof(struct batman_if), GFP_ATOMIC); |
2750 | if (!batman_if) { |
2751 | printk(KERN_ERR "batman-adv:" |
2752 | "Can't add interface (%s): out of memory\n", |
2753 | net_dev->name); |
2754 | - goto out; |
2755 | + goto release_dev; |
2756 | } |
2757 | |
2758 | batman_if->dev = kstrdup(net_dev->name, GFP_ATOMIC); |
2759 | @@ -340,6 +341,7 @@ static struct batman_if *hardif_add_interface(struct net_device *net_dev) |
2760 | batman_if->if_num = -1; |
2761 | batman_if->net_dev = net_dev; |
2762 | batman_if->if_status = IF_NOT_IN_USE; |
2763 | + batman_if->packet_buff = NULL; |
2764 | INIT_LIST_HEAD(&batman_if->list); |
2765 | |
2766 | check_known_mac_addr(batman_if->net_dev->dev_addr); |
2767 | @@ -350,6 +352,8 @@ free_dev: |
2768 | kfree(batman_if->dev); |
2769 | free_if: |
2770 | kfree(batman_if); |
2771 | +release_dev: |
2772 | + dev_put(net_dev); |
2773 | out: |
2774 | return NULL; |
2775 | } |
2776 | @@ -378,6 +382,7 @@ static void hardif_remove_interface(struct batman_if *batman_if) |
2777 | batman_if->if_status = IF_TO_BE_REMOVED; |
2778 | list_del_rcu(&batman_if->list); |
2779 | sysfs_del_hardif(&batman_if->hardif_obj); |
2780 | + dev_put(batman_if->net_dev); |
2781 | call_rcu(&batman_if->rcu, hardif_free_interface); |
2782 | } |
2783 | |
2784 | @@ -397,15 +402,13 @@ static int hard_if_event(struct notifier_block *this, |
2785 | /* FIXME: each batman_if will be attached to a softif */ |
2786 | struct bat_priv *bat_priv = netdev_priv(soft_device); |
2787 | |
2788 | - if (!batman_if) |
2789 | - batman_if = hardif_add_interface(net_dev); |
2790 | + if (!batman_if && event == NETDEV_REGISTER) |
2791 | + batman_if = hardif_add_interface(net_dev); |
2792 | |
2793 | if (!batman_if) |
2794 | goto out; |
2795 | |
2796 | switch (event) { |
2797 | - case NETDEV_REGISTER: |
2798 | - break; |
2799 | case NETDEV_UP: |
2800 | hardif_activate_interface(bat_priv, batman_if); |
2801 | break; |
2802 | diff --git a/drivers/staging/batman-adv/originator.c b/drivers/staging/batman-adv/originator.c |
2803 | index 568aef8..2177c50 100644 |
2804 | --- a/drivers/staging/batman-adv/originator.c |
2805 | +++ b/drivers/staging/batman-adv/originator.c |
2806 | @@ -401,11 +401,12 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num) |
2807 | int orig_hash_add_if(struct batman_if *batman_if, int max_if_num) |
2808 | { |
2809 | struct orig_node *orig_node; |
2810 | + unsigned long flags; |
2811 | HASHIT(hashit); |
2812 | |
2813 | /* resize all orig nodes because orig_node->bcast_own(_sum) depend on |
2814 | * if_num */ |
2815 | - spin_lock(&orig_hash_lock); |
2816 | + spin_lock_irqsave(&orig_hash_lock, flags); |
2817 | |
2818 | while (hash_iterate(orig_hash, &hashit)) { |
2819 | orig_node = hashit.bucket->data; |
2820 | @@ -414,11 +415,11 @@ int orig_hash_add_if(struct batman_if *batman_if, int max_if_num) |
2821 | goto err; |
2822 | } |
2823 | |
2824 | - spin_unlock(&orig_hash_lock); |
2825 | + spin_unlock_irqrestore(&orig_hash_lock, flags); |
2826 | return 0; |
2827 | |
2828 | err: |
2829 | - spin_unlock(&orig_hash_lock); |
2830 | + spin_unlock_irqrestore(&orig_hash_lock, flags); |
2831 | return -ENOMEM; |
2832 | } |
2833 | |
2834 | @@ -480,12 +481,13 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num) |
2835 | { |
2836 | struct batman_if *batman_if_tmp; |
2837 | struct orig_node *orig_node; |
2838 | + unsigned long flags; |
2839 | HASHIT(hashit); |
2840 | int ret; |
2841 | |
2842 | /* resize all orig nodes because orig_node->bcast_own(_sum) depend on |
2843 | * if_num */ |
2844 | - spin_lock(&orig_hash_lock); |
2845 | + spin_lock_irqsave(&orig_hash_lock, flags); |
2846 | |
2847 | while (hash_iterate(orig_hash, &hashit)) { |
2848 | orig_node = hashit.bucket->data; |
2849 | @@ -512,10 +514,10 @@ int orig_hash_del_if(struct batman_if *batman_if, int max_if_num) |
2850 | rcu_read_unlock(); |
2851 | |
2852 | batman_if->if_num = -1; |
2853 | - spin_unlock(&orig_hash_lock); |
2854 | + spin_unlock_irqrestore(&orig_hash_lock, flags); |
2855 | return 0; |
2856 | |
2857 | err: |
2858 | - spin_unlock(&orig_hash_lock); |
2859 | + spin_unlock_irqrestore(&orig_hash_lock, flags); |
2860 | return -ENOMEM; |
2861 | } |
2862 | diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c |
2863 | index bfc99a9..221f999 100644 |
2864 | --- a/drivers/usb/host/xhci-ring.c |
2865 | +++ b/drivers/usb/host/xhci-ring.c |
2866 | @@ -131,7 +131,7 @@ static void next_trb(struct xhci_hcd *xhci, |
2867 | *seg = (*seg)->next; |
2868 | *trb = ((*seg)->trbs); |
2869 | } else { |
2870 | - *trb = (*trb)++; |
2871 | + (*trb)++; |
2872 | } |
2873 | } |
2874 | |
2875 | diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c |
2876 | index 2bef441..80bf833 100644 |
2877 | --- a/drivers/usb/serial/cp210x.c |
2878 | +++ b/drivers/usb/serial/cp210x.c |
2879 | @@ -222,8 +222,8 @@ static struct usb_serial_driver cp210x_device = { |
2880 | #define BITS_STOP_2 0x0002 |
2881 | |
2882 | /* CP210X_SET_BREAK */ |
2883 | -#define BREAK_ON 0x0000 |
2884 | -#define BREAK_OFF 0x0001 |
2885 | +#define BREAK_ON 0x0001 |
2886 | +#define BREAK_OFF 0x0000 |
2887 | |
2888 | /* CP210X_(SET_MHS|GET_MDMSTS) */ |
2889 | #define CONTROL_DTR 0x0001 |
2890 | diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c |
2891 | index eb12d9b..63ddb2f 100644 |
2892 | --- a/drivers/usb/serial/ftdi_sio.c |
2893 | +++ b/drivers/usb/serial/ftdi_sio.c |
2894 | @@ -180,6 +180,7 @@ static struct usb_device_id id_table_combined [] = { |
2895 | { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, |
2896 | { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, |
2897 | { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, |
2898 | + { USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) }, |
2899 | { USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) }, |
2900 | { USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) }, |
2901 | { USB_DEVICE(FTDI_VID, FTDI_XF_547_PID) }, |
2902 | @@ -750,6 +751,8 @@ static struct usb_device_id id_table_combined [] = { |
2903 | { USB_DEVICE(FTDI_VID, XVERVE_SIGNALYZER_SH4_PID), |
2904 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
2905 | { USB_DEVICE(FTDI_VID, SEGWAY_RMP200_PID) }, |
2906 | + { USB_DEVICE(IONICS_VID, IONICS_PLUGCOMPUTER_PID), |
2907 | + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
2908 | { }, /* Optional parameter entry */ |
2909 | { } /* Terminating entry */ |
2910 | }; |
2911 | @@ -1376,7 +1379,7 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port) |
2912 | } |
2913 | |
2914 | /* set max packet size based on descriptor */ |
2915 | - priv->max_packet_size = ep_desc->wMaxPacketSize; |
2916 | + priv->max_packet_size = le16_to_cpu(ep_desc->wMaxPacketSize); |
2917 | |
2918 | dev_info(&udev->dev, "Setting MaxPacketSize %d\n", priv->max_packet_size); |
2919 | } |
2920 | diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h |
2921 | index 6e612c5..2e95857 100644 |
2922 | --- a/drivers/usb/serial/ftdi_sio_ids.h |
2923 | +++ b/drivers/usb/serial/ftdi_sio_ids.h |
2924 | @@ -110,6 +110,9 @@ |
2925 | /* Propox devices */ |
2926 | #define FTDI_PROPOX_JTAGCABLEII_PID 0xD738 |
2927 | |
2928 | +/* Lenz LI-USB Computer Interface. */ |
2929 | +#define FTDI_LENZ_LIUSB_PID 0xD780 |
2930 | + |
2931 | /* |
2932 | * Xsens Technologies BV products (http://www.xsens.com). |
2933 | */ |
2934 | @@ -989,6 +992,12 @@ |
2935 | #define ALTI2_N3_PID 0x6001 /* Neptune 3 */ |
2936 | |
2937 | /* |
2938 | + * Ionics PlugComputer |
2939 | + */ |
2940 | +#define IONICS_VID 0x1c0c |
2941 | +#define IONICS_PLUGCOMPUTER_PID 0x0102 |
2942 | + |
2943 | +/* |
2944 | * Dresden Elektronik Sensor Terminal Board |
2945 | */ |
2946 | #define DE_VID 0x1cf1 /* Vendor ID */ |
2947 | diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c |
2948 | index 0fca265..9991063 100644 |
2949 | --- a/drivers/usb/serial/io_ti.c |
2950 | +++ b/drivers/usb/serial/io_ti.c |
2951 | @@ -1151,7 +1151,7 @@ static int download_fw(struct edgeport_serial *serial) |
2952 | |
2953 | /* Check if we have an old version in the I2C and |
2954 | update if necessary */ |
2955 | - if (download_cur_ver != download_new_ver) { |
2956 | + if (download_cur_ver < download_new_ver) { |
2957 | dbg("%s - Update I2C dld from %d.%d to %d.%d", |
2958 | __func__, |
2959 | firmware_version->Ver_Major, |
2960 | diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c |
2961 | index a6b207c..1f00f24 100644 |
2962 | --- a/drivers/usb/serial/navman.c |
2963 | +++ b/drivers/usb/serial/navman.c |
2964 | @@ -25,6 +25,7 @@ static int debug; |
2965 | |
2966 | static const struct usb_device_id id_table[] = { |
2967 | { USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */ |
2968 | + { USB_DEVICE(0x0df7, 0x0900) }, /* Mobile Action i-gotU */ |
2969 | { }, |
2970 | }; |
2971 | MODULE_DEVICE_TABLE(usb, id_table); |
2972 | diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c |
2973 | index 5c35b3a..80c74d4 100644 |
2974 | --- a/drivers/usb/serial/option.c |
2975 | +++ b/drivers/usb/serial/option.c |
2976 | @@ -368,6 +368,10 @@ static void option_instat_callback(struct urb *urb); |
2977 | #define OLIVETTI_VENDOR_ID 0x0b3c |
2978 | #define OLIVETTI_PRODUCT_OLICARD100 0xc000 |
2979 | |
2980 | +/* Celot products */ |
2981 | +#define CELOT_VENDOR_ID 0x211f |
2982 | +#define CELOT_PRODUCT_CT680M 0x6801 |
2983 | + |
2984 | /* some devices interfaces need special handling due to a number of reasons */ |
2985 | enum option_blacklist_reason { |
2986 | OPTION_BLACKLIST_NONE = 0, |
2987 | @@ -891,10 +895,9 @@ static const struct usb_device_id option_ids[] = { |
2988 | { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) }, |
2989 | { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)}, |
2990 | { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)}, |
2991 | - |
2992 | { USB_DEVICE(CINTERION_VENDOR_ID, 0x0047) }, |
2993 | - |
2994 | { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, |
2995 | + { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ |
2996 | { } /* Terminating entry */ |
2997 | }; |
2998 | MODULE_DEVICE_TABLE(usb, option_ids); |
2999 | diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c |
3000 | index 6b60018..c98f0fb 100644 |
3001 | --- a/drivers/usb/serial/pl2303.c |
3002 | +++ b/drivers/usb/serial/pl2303.c |
3003 | @@ -86,6 +86,7 @@ static const struct usb_device_id id_table[] = { |
3004 | { USB_DEVICE(SUPERIAL_VENDOR_ID, SUPERIAL_PRODUCT_ID) }, |
3005 | { USB_DEVICE(HP_VENDOR_ID, HP_LD220_PRODUCT_ID) }, |
3006 | { USB_DEVICE(CRESSI_VENDOR_ID, CRESSI_EDY_PRODUCT_ID) }, |
3007 | + { USB_DEVICE(ZEAGLE_VENDOR_ID, ZEAGLE_N2ITION3_PRODUCT_ID) }, |
3008 | { USB_DEVICE(SONY_VENDOR_ID, SONY_QN3USB_PRODUCT_ID) }, |
3009 | { USB_DEVICE(SANWA_VENDOR_ID, SANWA_PRODUCT_ID) }, |
3010 | { USB_DEVICE(ADLINK_VENDOR_ID, ADLINK_ND6530_PRODUCT_ID) }, |
3011 | diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h |
3012 | index a871645..43eb9bd 100644 |
3013 | --- a/drivers/usb/serial/pl2303.h |
3014 | +++ b/drivers/usb/serial/pl2303.h |
3015 | @@ -128,6 +128,10 @@ |
3016 | #define CRESSI_VENDOR_ID 0x04b8 |
3017 | #define CRESSI_EDY_PRODUCT_ID 0x0521 |
3018 | |
3019 | +/* Zeagle dive computer interface */ |
3020 | +#define ZEAGLE_VENDOR_ID 0x04b8 |
3021 | +#define ZEAGLE_N2ITION3_PRODUCT_ID 0x0522 |
3022 | + |
3023 | /* Sony, USB data cable for CMD-Jxx mobile phones */ |
3024 | #define SONY_VENDOR_ID 0x054c |
3025 | #define SONY_QN3USB_PRODUCT_ID 0x0437 |
3026 | diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h |
3027 | index f3a4e15..f96a471 100644 |
3028 | --- a/drivers/video/matrox/matroxfb_base.h |
3029 | +++ b/drivers/video/matrox/matroxfb_base.h |
3030 | @@ -151,13 +151,13 @@ static inline void mga_writel(vaddr_t va, unsigned int offs, u_int32_t value) { |
3031 | static inline void mga_memcpy_toio(vaddr_t va, const void* src, int len) { |
3032 | #if defined(__alpha__) || defined(__i386__) || defined(__x86_64__) |
3033 | /* |
3034 | - * memcpy_toio works for us if: |
3035 | + * iowrite32_rep works for us if: |
3036 | * (1) Copies data as 32bit quantities, not byte after byte, |
3037 | * (2) Performs LE ordered stores, and |
3038 | * (3) It copes with unaligned source (destination is guaranteed to be page |
3039 | * aligned and length is guaranteed to be multiple of 4). |
3040 | */ |
3041 | - memcpy_toio(va.vaddr, src, len); |
3042 | + iowrite32_rep(va.vaddr, src, len >> 2); |
3043 | #else |
3044 | u_int32_t __iomem* addr = va.vaddr; |
3045 | |
3046 | diff --git a/firmware/Makefile b/firmware/Makefile |
3047 | index 020e629..99955ed 100644 |
3048 | --- a/firmware/Makefile |
3049 | +++ b/firmware/Makefile |
3050 | @@ -142,7 +142,7 @@ fw-shipped-$(CONFIG_YAM) += yam/1200.bin yam/9600.bin |
3051 | fw-shipped-all := $(fw-shipped-y) $(fw-shipped-m) $(fw-shipped-) |
3052 | |
3053 | # Directories which we _might_ need to create, so we have a rule for them. |
3054 | -firmware-dirs := $(sort $(patsubst %,$(objtree)/$(obj)/%/,$(dir $(fw-external-y) $(fw-shipped-all)))) |
3055 | +firmware-dirs := $(sort $(addprefix $(objtree)/$(obj)/,$(dir $(fw-external-y) $(fw-shipped-all)))) |
3056 | |
3057 | quiet_cmd_mkdir = MKDIR $(patsubst $(objtree)/%,%,$@) |
3058 | cmd_mkdir = mkdir -p $@ |
3059 | diff --git a/fs/char_dev.c b/fs/char_dev.c |
3060 | index d6db933..f80a4f2 100644 |
3061 | --- a/fs/char_dev.c |
3062 | +++ b/fs/char_dev.c |
3063 | @@ -20,6 +20,7 @@ |
3064 | #include <linux/cdev.h> |
3065 | #include <linux/mutex.h> |
3066 | #include <linux/backing-dev.h> |
3067 | +#include <linux/tty.h> |
3068 | |
3069 | #include "internal.h" |
3070 | |
3071 | diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c |
3072 | index e60416d..d69551e 100644 |
3073 | --- a/fs/nfs/dir.c |
3074 | +++ b/fs/nfs/dir.c |
3075 | @@ -1103,7 +1103,7 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd) |
3076 | if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL)) |
3077 | goto no_open_dput; |
3078 | /* We can't create new files, or truncate existing ones here */ |
3079 | - openflags &= ~(O_CREAT|O_TRUNC); |
3080 | + openflags &= ~(O_CREAT|O_EXCL|O_TRUNC); |
3081 | |
3082 | /* |
3083 | * Note: we're not holding inode->i_mutex and so may be racing with |
3084 | diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
3085 | index 70015dd..330a3c9 100644 |
3086 | --- a/fs/nfs/nfs4proc.c |
3087 | +++ b/fs/nfs/nfs4proc.c |
3088 | @@ -2023,7 +2023,8 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) |
3089 | struct rpc_cred *cred; |
3090 | struct nfs4_state *state; |
3091 | struct dentry *res; |
3092 | - fmode_t fmode = nd->intent.open.flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC); |
3093 | + int open_flags = nd->intent.open.flags; |
3094 | + fmode_t fmode = open_flags & (FMODE_READ | FMODE_WRITE | FMODE_EXEC); |
3095 | |
3096 | if (nd->flags & LOOKUP_CREATE) { |
3097 | attr.ia_mode = nd->intent.open.create_mode; |
3098 | @@ -2031,8 +2032,9 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) |
3099 | if (!IS_POSIXACL(dir)) |
3100 | attr.ia_mode &= ~current_umask(); |
3101 | } else { |
3102 | + open_flags &= ~O_EXCL; |
3103 | attr.ia_valid = 0; |
3104 | - BUG_ON(nd->intent.open.flags & O_CREAT); |
3105 | + BUG_ON(open_flags & O_CREAT); |
3106 | } |
3107 | |
3108 | cred = rpc_lookup_cred(); |
3109 | @@ -2041,7 +2043,7 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd) |
3110 | parent = dentry->d_parent; |
3111 | /* Protect against concurrent sillydeletes */ |
3112 | nfs_block_sillyrename(parent); |
3113 | - state = nfs4_do_open(dir, &path, fmode, nd->intent.open.flags, &attr, cred); |
3114 | + state = nfs4_do_open(dir, &path, fmode, open_flags, &attr, cred); |
3115 | put_rpccred(cred); |
3116 | if (IS_ERR(state)) { |
3117 | if (PTR_ERR(state) == -ENOENT) { |
3118 | diff --git a/fs/nfs/super.c b/fs/nfs/super.c |
3119 | index f9df16d..6bf11d7 100644 |
3120 | --- a/fs/nfs/super.c |
3121 | +++ b/fs/nfs/super.c |
3122 | @@ -652,6 +652,13 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss, |
3123 | |
3124 | if (nfss->options & NFS_OPTION_FSCACHE) |
3125 | seq_printf(m, ",fsc"); |
3126 | + |
3127 | + if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONEG) { |
3128 | + if (nfss->flags & NFS_MOUNT_LOOKUP_CACHE_NONE) |
3129 | + seq_printf(m, ",lookupcache=none"); |
3130 | + else |
3131 | + seq_printf(m, ",lookupcache=pos"); |
3132 | + } |
3133 | } |
3134 | |
3135 | /* |
3136 | diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c |
3137 | index 414ef68..fbb354c 100644 |
3138 | --- a/fs/nilfs2/super.c |
3139 | +++ b/fs/nilfs2/super.c |
3140 | @@ -336,9 +336,10 @@ int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno) |
3141 | list_add(&sbi->s_list, &nilfs->ns_supers); |
3142 | up_write(&nilfs->ns_super_sem); |
3143 | |
3144 | + err = -ENOMEM; |
3145 | sbi->s_ifile = nilfs_ifile_new(sbi, nilfs->ns_inode_size); |
3146 | if (!sbi->s_ifile) |
3147 | - return -ENOMEM; |
3148 | + goto delist; |
3149 | |
3150 | down_read(&nilfs->ns_segctor_sem); |
3151 | err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, cno, 0, &raw_cp, |
3152 | @@ -369,6 +370,7 @@ int nilfs_attach_checkpoint(struct nilfs_sb_info *sbi, __u64 cno) |
3153 | nilfs_mdt_destroy(sbi->s_ifile); |
3154 | sbi->s_ifile = NULL; |
3155 | |
3156 | + delist: |
3157 | down_write(&nilfs->ns_super_sem); |
3158 | list_del_init(&sbi->s_list); |
3159 | up_write(&nilfs->ns_super_sem); |
3160 | diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c |
3161 | index da70229..a76e0aa 100644 |
3162 | --- a/fs/ocfs2/acl.c |
3163 | +++ b/fs/ocfs2/acl.c |
3164 | @@ -290,12 +290,30 @@ static int ocfs2_set_acl(handle_t *handle, |
3165 | |
3166 | int ocfs2_check_acl(struct inode *inode, int mask) |
3167 | { |
3168 | - struct posix_acl *acl = ocfs2_get_acl(inode, ACL_TYPE_ACCESS); |
3169 | + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
3170 | + struct buffer_head *di_bh = NULL; |
3171 | + struct posix_acl *acl; |
3172 | + int ret = -EAGAIN; |
3173 | |
3174 | - if (IS_ERR(acl)) |
3175 | + if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL)) |
3176 | + return ret; |
3177 | + |
3178 | + ret = ocfs2_read_inode_block(inode, &di_bh); |
3179 | + if (ret < 0) { |
3180 | + mlog_errno(ret); |
3181 | + return ret; |
3182 | + } |
3183 | + |
3184 | + acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, di_bh); |
3185 | + |
3186 | + brelse(di_bh); |
3187 | + |
3188 | + if (IS_ERR(acl)) { |
3189 | + mlog_errno(PTR_ERR(acl)); |
3190 | return PTR_ERR(acl); |
3191 | + } |
3192 | if (acl) { |
3193 | - int ret = posix_acl_permission(inode, acl, mask); |
3194 | + ret = posix_acl_permission(inode, acl, mask); |
3195 | posix_acl_release(acl); |
3196 | return ret; |
3197 | } |
3198 | @@ -344,7 +362,7 @@ int ocfs2_init_acl(handle_t *handle, |
3199 | { |
3200 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); |
3201 | struct posix_acl *acl = NULL; |
3202 | - int ret = 0; |
3203 | + int ret = 0, ret2; |
3204 | mode_t mode; |
3205 | |
3206 | if (!S_ISLNK(inode->i_mode)) { |
3207 | @@ -381,7 +399,12 @@ int ocfs2_init_acl(handle_t *handle, |
3208 | mode = inode->i_mode; |
3209 | ret = posix_acl_create_masq(clone, &mode); |
3210 | if (ret >= 0) { |
3211 | - ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode); |
3212 | + ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode); |
3213 | + if (ret2) { |
3214 | + mlog_errno(ret2); |
3215 | + ret = ret2; |
3216 | + goto cleanup; |
3217 | + } |
3218 | if (ret > 0) { |
3219 | ret = ocfs2_set_acl(handle, inode, |
3220 | di_bh, ACL_TYPE_ACCESS, |
3221 | diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c |
3222 | index 94b97fc..ffb4c68 100644 |
3223 | --- a/fs/ocfs2/dlm/dlmmaster.c |
3224 | +++ b/fs/ocfs2/dlm/dlmmaster.c |
3225 | @@ -511,8 +511,6 @@ static void dlm_lockres_release(struct kref *kref) |
3226 | |
3227 | atomic_dec(&dlm->res_cur_count); |
3228 | |
3229 | - dlm_put(dlm); |
3230 | - |
3231 | if (!hlist_unhashed(&res->hash_node) || |
3232 | !list_empty(&res->granted) || |
3233 | !list_empty(&res->converting) || |
3234 | @@ -585,8 +583,6 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm, |
3235 | res->migration_pending = 0; |
3236 | res->inflight_locks = 0; |
3237 | |
3238 | - /* put in dlm_lockres_release */ |
3239 | - dlm_grab(dlm); |
3240 | res->dlm = dlm; |
3241 | |
3242 | kref_init(&res->refs); |
3243 | @@ -3050,8 +3046,6 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data, |
3244 | /* check for pre-existing lock */ |
3245 | spin_lock(&dlm->spinlock); |
3246 | res = __dlm_lookup_lockres(dlm, name, namelen, hash); |
3247 | - spin_lock(&dlm->master_lock); |
3248 | - |
3249 | if (res) { |
3250 | spin_lock(&res->spinlock); |
3251 | if (res->state & DLM_LOCK_RES_RECOVERING) { |
3252 | @@ -3069,14 +3063,15 @@ int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data, |
3253 | spin_unlock(&res->spinlock); |
3254 | } |
3255 | |
3256 | + spin_lock(&dlm->master_lock); |
3257 | /* ignore status. only nonzero status would BUG. */ |
3258 | ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, |
3259 | name, namelen, |
3260 | migrate->new_master, |
3261 | migrate->master); |
3262 | |
3263 | -unlock: |
3264 | spin_unlock(&dlm->master_lock); |
3265 | +unlock: |
3266 | spin_unlock(&dlm->spinlock); |
3267 | |
3268 | if (oldmle) { |
3269 | diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c |
3270 | index 9dfaac7..aaaffbc 100644 |
3271 | --- a/fs/ocfs2/dlm/dlmrecovery.c |
3272 | +++ b/fs/ocfs2/dlm/dlmrecovery.c |
3273 | @@ -1997,6 +1997,8 @@ void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, |
3274 | struct list_head *queue; |
3275 | struct dlm_lock *lock, *next; |
3276 | |
3277 | + assert_spin_locked(&dlm->spinlock); |
3278 | + assert_spin_locked(&res->spinlock); |
3279 | res->state |= DLM_LOCK_RES_RECOVERING; |
3280 | if (!list_empty(&res->recovering)) { |
3281 | mlog(0, |
3282 | @@ -2326,19 +2328,15 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) |
3283 | /* zero the lvb if necessary */ |
3284 | dlm_revalidate_lvb(dlm, res, dead_node); |
3285 | if (res->owner == dead_node) { |
3286 | - if (res->state & DLM_LOCK_RES_DROPPING_REF) |
3287 | - mlog(0, "%s:%.*s: owned by " |
3288 | - "dead node %u, this node was " |
3289 | - "dropping its ref when it died. " |
3290 | - "continue, dropping the flag.\n", |
3291 | - dlm->name, res->lockname.len, |
3292 | - res->lockname.name, dead_node); |
3293 | - |
3294 | - /* the wake_up for this will happen when the |
3295 | - * RECOVERING flag is dropped later */ |
3296 | - res->state &= ~DLM_LOCK_RES_DROPPING_REF; |
3297 | + if (res->state & DLM_LOCK_RES_DROPPING_REF) { |
3298 | + mlog(ML_NOTICE, "Ignore %.*s for " |
3299 | + "recovery as it is being freed\n", |
3300 | + res->lockname.len, |
3301 | + res->lockname.name); |
3302 | + } else |
3303 | + dlm_move_lockres_to_recovery_list(dlm, |
3304 | + res); |
3305 | |
3306 | - dlm_move_lockres_to_recovery_list(dlm, res); |
3307 | } else if (res->owner == dlm->node_num) { |
3308 | dlm_free_dead_locks(dlm, res, dead_node); |
3309 | __dlm_lockres_calc_usage(dlm, res); |
3310 | diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c |
3311 | index d4f73ca..2211acf 100644 |
3312 | --- a/fs/ocfs2/dlm/dlmthread.c |
3313 | +++ b/fs/ocfs2/dlm/dlmthread.c |
3314 | @@ -92,19 +92,27 @@ int __dlm_lockres_has_locks(struct dlm_lock_resource *res) |
3315 | * truly ready to be freed. */ |
3316 | int __dlm_lockres_unused(struct dlm_lock_resource *res) |
3317 | { |
3318 | - if (!__dlm_lockres_has_locks(res) && |
3319 | - (list_empty(&res->dirty) && !(res->state & DLM_LOCK_RES_DIRTY))) { |
3320 | - /* try not to scan the bitmap unless the first two |
3321 | - * conditions are already true */ |
3322 | - int bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); |
3323 | - if (bit >= O2NM_MAX_NODES) { |
3324 | - /* since the bit for dlm->node_num is not |
3325 | - * set, inflight_locks better be zero */ |
3326 | - BUG_ON(res->inflight_locks != 0); |
3327 | - return 1; |
3328 | - } |
3329 | - } |
3330 | - return 0; |
3331 | + int bit; |
3332 | + |
3333 | + if (__dlm_lockres_has_locks(res)) |
3334 | + return 0; |
3335 | + |
3336 | + if (!list_empty(&res->dirty) || res->state & DLM_LOCK_RES_DIRTY) |
3337 | + return 0; |
3338 | + |
3339 | + if (res->state & DLM_LOCK_RES_RECOVERING) |
3340 | + return 0; |
3341 | + |
3342 | + bit = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); |
3343 | + if (bit < O2NM_MAX_NODES) |
3344 | + return 0; |
3345 | + |
3346 | + /* |
3347 | + * since the bit for dlm->node_num is not set, inflight_locks better |
3348 | + * be zero |
3349 | + */ |
3350 | + BUG_ON(res->inflight_locks != 0); |
3351 | + return 1; |
3352 | } |
3353 | |
3354 | |
3355 | @@ -152,45 +160,25 @@ void dlm_lockres_calc_usage(struct dlm_ctxt *dlm, |
3356 | spin_unlock(&dlm->spinlock); |
3357 | } |
3358 | |
3359 | -static int dlm_purge_lockres(struct dlm_ctxt *dlm, |
3360 | +static void dlm_purge_lockres(struct dlm_ctxt *dlm, |
3361 | struct dlm_lock_resource *res) |
3362 | { |
3363 | int master; |
3364 | int ret = 0; |
3365 | |
3366 | - spin_lock(&res->spinlock); |
3367 | - if (!__dlm_lockres_unused(res)) { |
3368 | - mlog(0, "%s:%.*s: tried to purge but not unused\n", |
3369 | - dlm->name, res->lockname.len, res->lockname.name); |
3370 | - __dlm_print_one_lock_resource(res); |
3371 | - spin_unlock(&res->spinlock); |
3372 | - BUG(); |
3373 | - } |
3374 | - |
3375 | - if (res->state & DLM_LOCK_RES_MIGRATING) { |
3376 | - mlog(0, "%s:%.*s: Delay dropref as this lockres is " |
3377 | - "being remastered\n", dlm->name, res->lockname.len, |
3378 | - res->lockname.name); |
3379 | - /* Re-add the lockres to the end of the purge list */ |
3380 | - if (!list_empty(&res->purge)) { |
3381 | - list_del_init(&res->purge); |
3382 | - list_add_tail(&res->purge, &dlm->purge_list); |
3383 | - } |
3384 | - spin_unlock(&res->spinlock); |
3385 | - return 0; |
3386 | - } |
3387 | + assert_spin_locked(&dlm->spinlock); |
3388 | + assert_spin_locked(&res->spinlock); |
3389 | |
3390 | master = (res->owner == dlm->node_num); |
3391 | |
3392 | - if (!master) |
3393 | - res->state |= DLM_LOCK_RES_DROPPING_REF; |
3394 | - spin_unlock(&res->spinlock); |
3395 | |
3396 | mlog(0, "purging lockres %.*s, master = %d\n", res->lockname.len, |
3397 | res->lockname.name, master); |
3398 | |
3399 | if (!master) { |
3400 | + res->state |= DLM_LOCK_RES_DROPPING_REF; |
3401 | /* drop spinlock... retake below */ |
3402 | + spin_unlock(&res->spinlock); |
3403 | spin_unlock(&dlm->spinlock); |
3404 | |
3405 | spin_lock(&res->spinlock); |
3406 | @@ -208,31 +196,35 @@ static int dlm_purge_lockres(struct dlm_ctxt *dlm, |
3407 | mlog(0, "%s:%.*s: dlm_deref_lockres returned %d\n", |
3408 | dlm->name, res->lockname.len, res->lockname.name, ret); |
3409 | spin_lock(&dlm->spinlock); |
3410 | + spin_lock(&res->spinlock); |
3411 | } |
3412 | |
3413 | - spin_lock(&res->spinlock); |
3414 | if (!list_empty(&res->purge)) { |
3415 | mlog(0, "removing lockres %.*s:%p from purgelist, " |
3416 | "master = %d\n", res->lockname.len, res->lockname.name, |
3417 | res, master); |
3418 | list_del_init(&res->purge); |
3419 | - spin_unlock(&res->spinlock); |
3420 | dlm_lockres_put(res); |
3421 | dlm->purge_count--; |
3422 | - } else |
3423 | - spin_unlock(&res->spinlock); |
3424 | + } |
3425 | + |
3426 | + if (!__dlm_lockres_unused(res)) { |
3427 | + mlog(ML_ERROR, "found lockres %s:%.*s: in use after deref\n", |
3428 | + dlm->name, res->lockname.len, res->lockname.name); |
3429 | + __dlm_print_one_lock_resource(res); |
3430 | + BUG(); |
3431 | + } |
3432 | |
3433 | __dlm_unhash_lockres(res); |
3434 | |
3435 | /* lockres is not in the hash now. drop the flag and wake up |
3436 | * any processes waiting in dlm_get_lock_resource. */ |
3437 | if (!master) { |
3438 | - spin_lock(&res->spinlock); |
3439 | res->state &= ~DLM_LOCK_RES_DROPPING_REF; |
3440 | spin_unlock(&res->spinlock); |
3441 | wake_up(&res->wq); |
3442 | - } |
3443 | - return 0; |
3444 | + } else |
3445 | + spin_unlock(&res->spinlock); |
3446 | } |
3447 | |
3448 | static void dlm_run_purge_list(struct dlm_ctxt *dlm, |
3449 | @@ -251,17 +243,7 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm, |
3450 | lockres = list_entry(dlm->purge_list.next, |
3451 | struct dlm_lock_resource, purge); |
3452 | |
3453 | - /* Status of the lockres *might* change so double |
3454 | - * check. If the lockres is unused, holding the dlm |
3455 | - * spinlock will prevent people from getting and more |
3456 | - * refs on it -- there's no need to keep the lockres |
3457 | - * spinlock. */ |
3458 | spin_lock(&lockres->spinlock); |
3459 | - unused = __dlm_lockres_unused(lockres); |
3460 | - spin_unlock(&lockres->spinlock); |
3461 | - |
3462 | - if (!unused) |
3463 | - continue; |
3464 | |
3465 | purge_jiffies = lockres->last_used + |
3466 | msecs_to_jiffies(DLM_PURGE_INTERVAL_MS); |
3467 | @@ -273,15 +255,29 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm, |
3468 | * in tail order, we can stop at the first |
3469 | * unpurgable resource -- anyone added after |
3470 | * him will have a greater last_used value */ |
3471 | + spin_unlock(&lockres->spinlock); |
3472 | break; |
3473 | } |
3474 | |
3475 | + /* Status of the lockres *might* change so double |
3476 | + * check. If the lockres is unused, holding the dlm |
3477 | + * spinlock will prevent people from getting and more |
3478 | + * refs on it. */ |
3479 | + unused = __dlm_lockres_unused(lockres); |
3480 | + if (!unused || |
3481 | + (lockres->state & DLM_LOCK_RES_MIGRATING)) { |
3482 | + mlog(0, "lockres %s:%.*s: is in use or " |
3483 | + "being remastered, used %d, state %d\n", |
3484 | + dlm->name, lockres->lockname.len, |
3485 | + lockres->lockname.name, !unused, lockres->state); |
3486 | + list_move_tail(&dlm->purge_list, &lockres->purge); |
3487 | + spin_unlock(&lockres->spinlock); |
3488 | + continue; |
3489 | + } |
3490 | + |
3491 | dlm_lockres_get(lockres); |
3492 | |
3493 | - /* This may drop and reacquire the dlm spinlock if it |
3494 | - * has to do migration. */ |
3495 | - if (dlm_purge_lockres(dlm, lockres)) |
3496 | - BUG(); |
3497 | + dlm_purge_lockres(dlm, lockres); |
3498 | |
3499 | dlm_lockres_put(lockres); |
3500 | |
3501 | diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c |
3502 | index 3ac5aa7..73a11cc 100644 |
3503 | --- a/fs/ocfs2/refcounttree.c |
3504 | +++ b/fs/ocfs2/refcounttree.c |
3505 | @@ -2436,16 +2436,26 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb, |
3506 | len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) + |
3507 | le32_to_cpu(rec.r_clusters)) - cpos; |
3508 | /* |
3509 | - * If the refcount rec already exist, cool. We just need |
3510 | - * to check whether there is a split. Otherwise we just need |
3511 | - * to increase the refcount. |
3512 | - * If we will insert one, increases recs_add. |
3513 | - * |
3514 | * We record all the records which will be inserted to the |
3515 | * same refcount block, so that we can tell exactly whether |
3516 | * we need a new refcount block or not. |
3517 | + * |
3518 | + * If we will insert a new one, this is easy and only happens |
3519 | + * during adding refcounted flag to the extent, so we don't |
3520 | + * have a chance of spliting. We just need one record. |
3521 | + * |
3522 | + * If the refcount rec already exists, that would be a little |
3523 | + * complicated. we may have to: |
3524 | + * 1) split at the beginning if the start pos isn't aligned. |
3525 | + * we need 1 more record in this case. |
3526 | + * 2) split int the end if the end pos isn't aligned. |
3527 | + * we need 1 more record in this case. |
3528 | + * 3) split in the middle because of file system fragmentation. |
3529 | + * we need 2 more records in this case(we can't detect this |
3530 | + * beforehand, so always think of the worst case). |
3531 | */ |
3532 | if (rec.r_refcount) { |
3533 | + recs_add += 2; |
3534 | /* Check whether we need a split at the beginning. */ |
3535 | if (cpos == start_cpos && |
3536 | cpos != le64_to_cpu(rec.r_cpos)) |
3537 | diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h |
3538 | index e5039a2..103f08a 100644 |
3539 | --- a/include/acpi/platform/aclinux.h |
3540 | +++ b/include/acpi/platform/aclinux.h |
3541 | @@ -148,13 +148,17 @@ static inline void *acpi_os_acquire_object(acpi_cache_t * cache) |
3542 | #define ACPI_ALLOCATE_ZEROED(a) acpi_os_allocate_zeroed(a) |
3543 | #define ACPI_FREE(a) kfree(a) |
3544 | |
3545 | -/* Used within ACPICA to show where it is safe to preempt execution */ |
3546 | -#include <linux/hardirq.h> |
3547 | +#ifndef CONFIG_PREEMPT |
3548 | +/* |
3549 | + * Used within ACPICA to show where it is safe to preempt execution |
3550 | + * when CONFIG_PREEMPT=n |
3551 | + */ |
3552 | #define ACPI_PREEMPTION_POINT() \ |
3553 | do { \ |
3554 | - if (!in_atomic_preempt_off() && !irqs_disabled()) \ |
3555 | + if (!irqs_disabled()) \ |
3556 | cond_resched(); \ |
3557 | } while (0) |
3558 | +#endif |
3559 | |
3560 | #endif /* __KERNEL__ */ |
3561 | |
3562 | diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h |
3563 | index b8bb9a6..ee7e258 100644 |
3564 | --- a/include/linux/mm_types.h |
3565 | +++ b/include/linux/mm_types.h |
3566 | @@ -134,7 +134,7 @@ struct vm_area_struct { |
3567 | within vm_mm. */ |
3568 | |
3569 | /* linked list of VM areas per task, sorted by address */ |
3570 | - struct vm_area_struct *vm_next; |
3571 | + struct vm_area_struct *vm_next, *vm_prev; |
3572 | |
3573 | pgprot_t vm_page_prot; /* Access permissions of this VMA. */ |
3574 | unsigned long vm_flags; /* Flags, see mm.h. */ |
3575 | diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h |
3576 | index f43e9b4..23cc10f 100644 |
3577 | --- a/include/linux/mtd/flashchip.h |
3578 | +++ b/include/linux/mtd/flashchip.h |
3579 | @@ -92,7 +92,7 @@ struct flchip { |
3580 | /* This is used to handle contention on write/erase operations |
3581 | between partitions of the same physical chip. */ |
3582 | struct flchip_shared { |
3583 | - spinlock_t lock; |
3584 | + struct mutex lock; |
3585 | struct flchip *writing; |
3586 | struct flchip *erasing; |
3587 | }; |
3588 | diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h |
3589 | index f89e7fd..eb674b7 100644 |
3590 | --- a/include/linux/skbuff.h |
3591 | +++ b/include/linux/skbuff.h |
3592 | @@ -169,6 +169,7 @@ struct skb_shared_hwtstamps { |
3593 | * @software: generate software time stamp |
3594 | * @in_progress: device driver is going to provide |
3595 | * hardware time stamp |
3596 | + * @prevent_sk_orphan: make sk reference available on driver level |
3597 | * @flags: all shared_tx flags |
3598 | * |
3599 | * These flags are attached to packets as part of the |
3600 | @@ -178,7 +179,8 @@ union skb_shared_tx { |
3601 | struct { |
3602 | __u8 hardware:1, |
3603 | software:1, |
3604 | - in_progress:1; |
3605 | + in_progress:1, |
3606 | + prevent_sk_orphan:1; |
3607 | }; |
3608 | __u8 flags; |
3609 | }; |
3610 | diff --git a/include/linux/tty.h b/include/linux/tty.h |
3611 | index 931078b..7802a24 100644 |
3612 | --- a/include/linux/tty.h |
3613 | +++ b/include/linux/tty.h |
3614 | @@ -552,6 +552,9 @@ static inline void tty_audit_push_task(struct task_struct *tsk, |
3615 | } |
3616 | #endif |
3617 | |
3618 | +/* tty_io.c */ |
3619 | +extern int __init tty_init(void); |
3620 | + |
3621 | /* tty_ioctl.c */ |
3622 | extern int n_tty_ioctl_helper(struct tty_struct *tty, struct file *file, |
3623 | unsigned int cmd, unsigned long arg); |
3624 | diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h |
3625 | index 6a664c3..7dc97d1 100644 |
3626 | --- a/include/sound/emu10k1.h |
3627 | +++ b/include/sound/emu10k1.h |
3628 | @@ -1707,6 +1707,7 @@ struct snd_emu10k1 { |
3629 | unsigned int card_type; /* EMU10K1_CARD_* */ |
3630 | unsigned int ecard_ctrl; /* ecard control bits */ |
3631 | unsigned long dma_mask; /* PCI DMA mask */ |
3632 | + unsigned int delay_pcm_irq; /* in samples */ |
3633 | int max_cache_pages; /* max memory size / PAGE_SIZE */ |
3634 | struct snd_dma_buffer silent_page; /* silent page */ |
3635 | struct snd_dma_buffer ptb_pages; /* page table pages */ |
3636 | diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h |
3637 | index 9496b96..fa8223a 100644 |
3638 | --- a/include/trace/events/timer.h |
3639 | +++ b/include/trace/events/timer.h |
3640 | @@ -74,14 +74,16 @@ TRACE_EVENT(timer_expire_entry, |
3641 | TP_STRUCT__entry( |
3642 | __field( void *, timer ) |
3643 | __field( unsigned long, now ) |
3644 | + __field( void *, function) |
3645 | ), |
3646 | |
3647 | TP_fast_assign( |
3648 | __entry->timer = timer; |
3649 | __entry->now = jiffies; |
3650 | + __entry->function = timer->function; |
3651 | ), |
3652 | |
3653 | - TP_printk("timer=%p now=%lu", __entry->timer, __entry->now) |
3654 | + TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now) |
3655 | ); |
3656 | |
3657 | /** |
3658 | @@ -213,14 +215,16 @@ TRACE_EVENT(hrtimer_expire_entry, |
3659 | TP_STRUCT__entry( |
3660 | __field( void *, hrtimer ) |
3661 | __field( s64, now ) |
3662 | + __field( void *, function) |
3663 | ), |
3664 | |
3665 | TP_fast_assign( |
3666 | __entry->hrtimer = hrtimer; |
3667 | __entry->now = now->tv64; |
3668 | + __entry->function = hrtimer->function; |
3669 | ), |
3670 | |
3671 | - TP_printk("hrtimer=%p now=%llu", __entry->hrtimer, |
3672 | + TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function, |
3673 | (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now })) |
3674 | ); |
3675 | |
3676 | diff --git a/kernel/fork.c b/kernel/fork.c |
3677 | index b6cce14..e96c0cd 100644 |
3678 | --- a/kernel/fork.c |
3679 | +++ b/kernel/fork.c |
3680 | @@ -300,7 +300,7 @@ out: |
3681 | #ifdef CONFIG_MMU |
3682 | static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
3683 | { |
3684 | - struct vm_area_struct *mpnt, *tmp, **pprev; |
3685 | + struct vm_area_struct *mpnt, *tmp, *prev, **pprev; |
3686 | struct rb_node **rb_link, *rb_parent; |
3687 | int retval; |
3688 | unsigned long charge; |
3689 | @@ -328,6 +328,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
3690 | if (retval) |
3691 | goto out; |
3692 | |
3693 | + prev = NULL; |
3694 | for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { |
3695 | struct file *file; |
3696 | |
3697 | @@ -359,7 +360,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
3698 | goto fail_nomem_anon_vma_fork; |
3699 | tmp->vm_flags &= ~VM_LOCKED; |
3700 | tmp->vm_mm = mm; |
3701 | - tmp->vm_next = NULL; |
3702 | + tmp->vm_next = tmp->vm_prev = NULL; |
3703 | file = tmp->vm_file; |
3704 | if (file) { |
3705 | struct inode *inode = file->f_path.dentry->d_inode; |
3706 | @@ -392,6 +393,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
3707 | */ |
3708 | *pprev = tmp; |
3709 | pprev = &tmp->vm_next; |
3710 | + tmp->vm_prev = prev; |
3711 | + prev = tmp; |
3712 | |
3713 | __vma_link_rb(mm, tmp, rb_link, rb_parent); |
3714 | rb_link = &tmp->vm_rb.rb_right; |
3715 | diff --git a/kernel/sched.c b/kernel/sched.c |
3716 | index 63b4a14..6d0dbeb 100644 |
3717 | --- a/kernel/sched.c |
3718 | +++ b/kernel/sched.c |
3719 | @@ -3694,8 +3694,16 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) |
3720 | /* |
3721 | * Owner changed, break to re-assess state. |
3722 | */ |
3723 | - if (lock->owner != owner) |
3724 | + if (lock->owner != owner) { |
3725 | + /* |
3726 | + * If the lock has switched to a different owner, |
3727 | + * we likely have heavy contention. Return 0 to quit |
3728 | + * optimistic spinning and not contend further: |
3729 | + */ |
3730 | + if (lock->owner) |
3731 | + return 0; |
3732 | break; |
3733 | + } |
3734 | |
3735 | /* |
3736 | * Is that owner really running on that cpu? |
3737 | diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c |
3738 | index caf8d4d..b87c22f 100644 |
3739 | --- a/kernel/time/timekeeping.c |
3740 | +++ b/kernel/time/timekeeping.c |
3741 | @@ -736,6 +736,7 @@ static void timekeeping_adjust(s64 offset) |
3742 | static cycle_t logarithmic_accumulation(cycle_t offset, int shift) |
3743 | { |
3744 | u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; |
3745 | + u64 raw_nsecs; |
3746 | |
3747 | /* If the offset is smaller then a shifted interval, do nothing */ |
3748 | if (offset < timekeeper.cycle_interval<<shift) |
3749 | @@ -752,12 +753,15 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) |
3750 | second_overflow(); |
3751 | } |
3752 | |
3753 | - /* Accumulate into raw time */ |
3754 | - raw_time.tv_nsec += timekeeper.raw_interval << shift;; |
3755 | - while (raw_time.tv_nsec >= NSEC_PER_SEC) { |
3756 | - raw_time.tv_nsec -= NSEC_PER_SEC; |
3757 | - raw_time.tv_sec++; |
3758 | + /* Accumulate raw time */ |
3759 | + raw_nsecs = timekeeper.raw_interval << shift; |
3760 | + raw_nsecs += raw_time.tv_nsec; |
3761 | + if (raw_nsecs >= NSEC_PER_SEC) { |
3762 | + u64 raw_secs = raw_nsecs; |
3763 | + raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); |
3764 | + raw_time.tv_sec += raw_secs; |
3765 | } |
3766 | + raw_time.tv_nsec = raw_nsecs; |
3767 | |
3768 | /* Accumulate error between NTP and clock interval */ |
3769 | timekeeper.ntp_error += tick_length << shift; |
3770 | diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c |
3771 | index 1da7b6e..5ec8f1d 100644 |
3772 | --- a/kernel/trace/ring_buffer.c |
3773 | +++ b/kernel/trace/ring_buffer.c |
3774 | @@ -3868,6 +3868,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer, |
3775 | rpos = reader->read; |
3776 | pos += size; |
3777 | |
3778 | + if (rpos >= commit) |
3779 | + break; |
3780 | + |
3781 | event = rb_reader_event(cpu_buffer); |
3782 | size = rb_event_length(event); |
3783 | } while (len > size); |
3784 | diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c |
3785 | index 79f4bac..b4c179a 100644 |
3786 | --- a/kernel/trace/trace_functions_graph.c |
3787 | +++ b/kernel/trace/trace_functions_graph.c |
3788 | @@ -507,7 +507,15 @@ get_return_for_leaf(struct trace_iterator *iter, |
3789 | * if the output fails. |
3790 | */ |
3791 | data->ent = *curr; |
3792 | - data->ret = *next; |
3793 | + /* |
3794 | + * If the next event is not a return type, then |
3795 | + * we only care about what type it is. Otherwise we can |
3796 | + * safely copy the entire event. |
3797 | + */ |
3798 | + if (next->ent.type == TRACE_GRAPH_RET) |
3799 | + data->ret = *next; |
3800 | + else |
3801 | + data->ret.ent.type = next->ent.type; |
3802 | } |
3803 | } |
3804 | |
3805 | diff --git a/mm/memory.c b/mm/memory.c |
3806 | index 307bf77..53cf85d 100644 |
3807 | --- a/mm/memory.c |
3808 | +++ b/mm/memory.c |
3809 | @@ -2770,11 +2770,18 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo |
3810 | { |
3811 | address &= PAGE_MASK; |
3812 | if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { |
3813 | - address -= PAGE_SIZE; |
3814 | - if (find_vma(vma->vm_mm, address) != vma) |
3815 | - return -ENOMEM; |
3816 | + struct vm_area_struct *prev = vma->vm_prev; |
3817 | + |
3818 | + /* |
3819 | + * Is there a mapping abutting this one below? |
3820 | + * |
3821 | + * That's only ok if it's the same stack mapping |
3822 | + * that has gotten split.. |
3823 | + */ |
3824 | + if (prev && prev->vm_end == address) |
3825 | + return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; |
3826 | |
3827 | - expand_stack(vma, address); |
3828 | + expand_stack(vma, address - PAGE_SIZE); |
3829 | } |
3830 | return 0; |
3831 | } |
3832 | diff --git a/mm/mlock.c b/mm/mlock.c |
3833 | index 49e5e4c..cbae7c5 100644 |
3834 | --- a/mm/mlock.c |
3835 | +++ b/mm/mlock.c |
3836 | @@ -135,6 +135,19 @@ void munlock_vma_page(struct page *page) |
3837 | } |
3838 | } |
3839 | |
3840 | +/* Is the vma a continuation of the stack vma above it? */ |
3841 | +static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) |
3842 | +{ |
3843 | + return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); |
3844 | +} |
3845 | + |
3846 | +static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) |
3847 | +{ |
3848 | + return (vma->vm_flags & VM_GROWSDOWN) && |
3849 | + (vma->vm_start == addr) && |
3850 | + !vma_stack_continue(vma->vm_prev, addr); |
3851 | +} |
3852 | + |
3853 | /** |
3854 | * __mlock_vma_pages_range() - mlock a range of pages in the vma. |
3855 | * @vma: target vma |
3856 | @@ -168,11 +181,9 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma, |
3857 | gup_flags |= FOLL_WRITE; |
3858 | |
3859 | /* We don't try to access the guard page of a stack vma */ |
3860 | - if (vma->vm_flags & VM_GROWSDOWN) { |
3861 | - if (start == vma->vm_start) { |
3862 | - start += PAGE_SIZE; |
3863 | - nr_pages--; |
3864 | - } |
3865 | + if (stack_guard_page(vma, start)) { |
3866 | + addr += PAGE_SIZE; |
3867 | + nr_pages--; |
3868 | } |
3869 | |
3870 | while (nr_pages > 0) { |
3871 | diff --git a/mm/mmap.c b/mm/mmap.c |
3872 | index 456ec6f..3867cfc 100644 |
3873 | --- a/mm/mmap.c |
3874 | +++ b/mm/mmap.c |
3875 | @@ -388,17 +388,23 @@ static inline void |
3876 | __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, |
3877 | struct vm_area_struct *prev, struct rb_node *rb_parent) |
3878 | { |
3879 | + struct vm_area_struct *next; |
3880 | + |
3881 | + vma->vm_prev = prev; |
3882 | if (prev) { |
3883 | - vma->vm_next = prev->vm_next; |
3884 | + next = prev->vm_next; |
3885 | prev->vm_next = vma; |
3886 | } else { |
3887 | mm->mmap = vma; |
3888 | if (rb_parent) |
3889 | - vma->vm_next = rb_entry(rb_parent, |
3890 | + next = rb_entry(rb_parent, |
3891 | struct vm_area_struct, vm_rb); |
3892 | else |
3893 | - vma->vm_next = NULL; |
3894 | + next = NULL; |
3895 | } |
3896 | + vma->vm_next = next; |
3897 | + if (next) |
3898 | + next->vm_prev = vma; |
3899 | } |
3900 | |
3901 | void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma, |
3902 | @@ -485,7 +491,11 @@ static inline void |
3903 | __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma, |
3904 | struct vm_area_struct *prev) |
3905 | { |
3906 | - prev->vm_next = vma->vm_next; |
3907 | + struct vm_area_struct *next = vma->vm_next; |
3908 | + |
3909 | + prev->vm_next = next; |
3910 | + if (next) |
3911 | + next->vm_prev = prev; |
3912 | rb_erase(&vma->vm_rb, &mm->mm_rb); |
3913 | if (mm->mmap_cache == vma) |
3914 | mm->mmap_cache = prev; |
3915 | @@ -1900,6 +1910,7 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, |
3916 | unsigned long addr; |
3917 | |
3918 | insertion_point = (prev ? &prev->vm_next : &mm->mmap); |
3919 | + vma->vm_prev = NULL; |
3920 | do { |
3921 | rb_erase(&vma->vm_rb, &mm->mm_rb); |
3922 | mm->map_count--; |
3923 | @@ -1907,6 +1918,8 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma, |
3924 | vma = vma->vm_next; |
3925 | } while (vma && vma->vm_start < end); |
3926 | *insertion_point = vma; |
3927 | + if (vma) |
3928 | + vma->vm_prev = prev; |
3929 | tail_vma->vm_next = NULL; |
3930 | if (mm->unmap_area == arch_unmap_area) |
3931 | addr = prev ? prev->vm_end : mm->mmap_base; |
3932 | diff --git a/mm/nommu.c b/mm/nommu.c |
3933 | index b76f3ee..e48b38c 100644 |
3934 | --- a/mm/nommu.c |
3935 | +++ b/mm/nommu.c |
3936 | @@ -609,7 +609,7 @@ static void protect_vma(struct vm_area_struct *vma, unsigned long flags) |
3937 | */ |
3938 | static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) |
3939 | { |
3940 | - struct vm_area_struct *pvma, **pp; |
3941 | + struct vm_area_struct *pvma, **pp, *next; |
3942 | struct address_space *mapping; |
3943 | struct rb_node **p, *parent; |
3944 | |
3945 | @@ -669,8 +669,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) |
3946 | break; |
3947 | } |
3948 | |
3949 | - vma->vm_next = *pp; |
3950 | + next = *pp; |
3951 | *pp = vma; |
3952 | + vma->vm_next = next; |
3953 | + if (next) |
3954 | + next->vm_prev = vma; |
3955 | } |
3956 | |
3957 | /* |
3958 | diff --git a/mm/slab.c b/mm/slab.c |
3959 | index e49f8f4..e4f747f 100644 |
3960 | --- a/mm/slab.c |
3961 | +++ b/mm/slab.c |
3962 | @@ -2331,8 +2331,8 @@ kmem_cache_create (const char *name, size_t size, size_t align, |
3963 | } |
3964 | #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC) |
3965 | if (size >= malloc_sizes[INDEX_L3 + 1].cs_size |
3966 | - && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) { |
3967 | - cachep->obj_offset += PAGE_SIZE - size; |
3968 | + && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) { |
3969 | + cachep->obj_offset += PAGE_SIZE - ALIGN(size, align); |
3970 | size = PAGE_SIZE; |
3971 | } |
3972 | #endif |
3973 | diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c |
3974 | index 753fc42..f49bcd9 100644 |
3975 | --- a/net/bridge/br_device.c |
3976 | +++ b/net/bridge/br_device.c |
3977 | @@ -22,7 +22,7 @@ |
3978 | #include <asm/uaccess.h> |
3979 | #include "br_private.h" |
3980 | |
3981 | -/* net device transmit always called with no BH (preempt_disabled) */ |
3982 | +/* net device transmit always called with BH disabled */ |
3983 | netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) |
3984 | { |
3985 | struct net_bridge *br = netdev_priv(dev); |
3986 | @@ -46,9 +46,12 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) |
3987 | skb_reset_mac_header(skb); |
3988 | skb_pull(skb, ETH_HLEN); |
3989 | |
3990 | + rcu_read_lock(); |
3991 | if (is_multicast_ether_addr(dest)) { |
3992 | - if (br_multicast_rcv(br, NULL, skb)) |
3993 | + if (br_multicast_rcv(br, NULL, skb)) { |
3994 | + kfree_skb(skb); |
3995 | goto out; |
3996 | + } |
3997 | |
3998 | mdst = br_mdb_get(br, skb); |
3999 | if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) |
4000 | @@ -61,6 +64,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) |
4001 | br_flood_deliver(br, skb); |
4002 | |
4003 | out: |
4004 | + rcu_read_unlock(); |
4005 | return NETDEV_TX_OK; |
4006 | } |
4007 | |
4008 | diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c |
4009 | index b01dde3..7204ad3 100644 |
4010 | --- a/net/bridge/br_fdb.c |
4011 | +++ b/net/bridge/br_fdb.c |
4012 | @@ -214,7 +214,7 @@ void br_fdb_delete_by_port(struct net_bridge *br, |
4013 | spin_unlock_bh(&br->hash_lock); |
4014 | } |
4015 | |
4016 | -/* No locking or refcounting, assumes caller has no preempt (rcu_read_lock) */ |
4017 | +/* No locking or refcounting, assumes caller has rcu_read_lock */ |
4018 | struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br, |
4019 | const unsigned char *addr) |
4020 | { |
4021 | diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c |
4022 | index d36e700..114365c 100644 |
4023 | --- a/net/bridge/br_input.c |
4024 | +++ b/net/bridge/br_input.c |
4025 | @@ -37,7 +37,7 @@ static int br_pass_frame_up(struct sk_buff *skb) |
4026 | netif_receive_skb); |
4027 | } |
4028 | |
4029 | -/* note: already called with rcu_read_lock (preempt_disabled) */ |
4030 | +/* note: already called with rcu_read_lock */ |
4031 | int br_handle_frame_finish(struct sk_buff *skb) |
4032 | { |
4033 | const unsigned char *dest = eth_hdr(skb)->h_dest; |
4034 | @@ -108,7 +108,7 @@ drop: |
4035 | goto out; |
4036 | } |
4037 | |
4038 | -/* note: already called with rcu_read_lock (preempt_disabled) */ |
4039 | +/* note: already called with rcu_read_lock */ |
4040 | static int br_handle_local_finish(struct sk_buff *skb) |
4041 | { |
4042 | struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); |
4043 | @@ -133,7 +133,7 @@ static inline int is_link_local(const unsigned char *dest) |
4044 | /* |
4045 | * Called via br_handle_frame_hook. |
4046 | * Return NULL if skb is handled |
4047 | - * note: already called with rcu_read_lock (preempt_disabled) |
4048 | + * note: already called with rcu_read_lock |
4049 | */ |
4050 | struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb) |
4051 | { |
4052 | diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c |
4053 | index 217bd22..5854e82 100644 |
4054 | --- a/net/bridge/br_stp_bpdu.c |
4055 | +++ b/net/bridge/br_stp_bpdu.c |
4056 | @@ -131,7 +131,7 @@ void br_send_tcn_bpdu(struct net_bridge_port *p) |
4057 | /* |
4058 | * Called from llc. |
4059 | * |
4060 | - * NO locks, but rcu_read_lock (preempt_disabled) |
4061 | + * NO locks, but rcu_read_lock |
4062 | */ |
4063 | void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb, |
4064 | struct net_device *dev) |
4065 | diff --git a/net/can/bcm.c b/net/can/bcm.c |
4066 | index 9c65e9d..08ffe9e 100644 |
4067 | --- a/net/can/bcm.c |
4068 | +++ b/net/can/bcm.c |
4069 | @@ -60,6 +60,13 @@ |
4070 | #include <net/sock.h> |
4071 | #include <net/net_namespace.h> |
4072 | |
4073 | +/* |
4074 | + * To send multiple CAN frame content within TX_SETUP or to filter |
4075 | + * CAN messages with multiplex index within RX_SETUP, the number of |
4076 | + * different filters is limited to 256 due to the one byte index value. |
4077 | + */ |
4078 | +#define MAX_NFRAMES 256 |
4079 | + |
4080 | /* use of last_frames[index].can_dlc */ |
4081 | #define RX_RECV 0x40 /* received data for this element */ |
4082 | #define RX_THR 0x80 /* element not been sent due to throttle feature */ |
4083 | @@ -89,16 +96,16 @@ struct bcm_op { |
4084 | struct list_head list; |
4085 | int ifindex; |
4086 | canid_t can_id; |
4087 | - int flags; |
4088 | + u32 flags; |
4089 | unsigned long frames_abs, frames_filtered; |
4090 | struct timeval ival1, ival2; |
4091 | struct hrtimer timer, thrtimer; |
4092 | struct tasklet_struct tsklet, thrtsklet; |
4093 | ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; |
4094 | int rx_ifindex; |
4095 | - int count; |
4096 | - int nframes; |
4097 | - int currframe; |
4098 | + u32 count; |
4099 | + u32 nframes; |
4100 | + u32 currframe; |
4101 | struct can_frame *frames; |
4102 | struct can_frame *last_frames; |
4103 | struct can_frame sframe; |
4104 | @@ -175,7 +182,7 @@ static int bcm_proc_show(struct seq_file *m, void *v) |
4105 | |
4106 | seq_printf(m, "rx_op: %03X %-5s ", |
4107 | op->can_id, bcm_proc_getifname(ifname, op->ifindex)); |
4108 | - seq_printf(m, "[%d]%c ", op->nframes, |
4109 | + seq_printf(m, "[%u]%c ", op->nframes, |
4110 | (op->flags & RX_CHECK_DLC)?'d':' '); |
4111 | if (op->kt_ival1.tv64) |
4112 | seq_printf(m, "timeo=%lld ", |
4113 | @@ -198,7 +205,7 @@ static int bcm_proc_show(struct seq_file *m, void *v) |
4114 | |
4115 | list_for_each_entry(op, &bo->tx_ops, list) { |
4116 | |
4117 | - seq_printf(m, "tx_op: %03X %s [%d] ", |
4118 | + seq_printf(m, "tx_op: %03X %s [%u] ", |
4119 | op->can_id, |
4120 | bcm_proc_getifname(ifname, op->ifindex), |
4121 | op->nframes); |
4122 | @@ -283,7 +290,7 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, |
4123 | struct can_frame *firstframe; |
4124 | struct sockaddr_can *addr; |
4125 | struct sock *sk = op->sk; |
4126 | - int datalen = head->nframes * CFSIZ; |
4127 | + unsigned int datalen = head->nframes * CFSIZ; |
4128 | int err; |
4129 | |
4130 | skb = alloc_skb(sizeof(*head) + datalen, gfp_any()); |
4131 | @@ -468,7 +475,7 @@ rx_changed_settime: |
4132 | * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly |
4133 | * received data stored in op->last_frames[] |
4134 | */ |
4135 | -static void bcm_rx_cmp_to_index(struct bcm_op *op, int index, |
4136 | +static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index, |
4137 | const struct can_frame *rxdata) |
4138 | { |
4139 | /* |
4140 | @@ -554,7 +561,8 @@ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) |
4141 | /* |
4142 | * bcm_rx_do_flush - helper for bcm_rx_thr_flush |
4143 | */ |
4144 | -static inline int bcm_rx_do_flush(struct bcm_op *op, int update, int index) |
4145 | +static inline int bcm_rx_do_flush(struct bcm_op *op, int update, |
4146 | + unsigned int index) |
4147 | { |
4148 | if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) { |
4149 | if (update) |
4150 | @@ -575,7 +583,7 @@ static int bcm_rx_thr_flush(struct bcm_op *op, int update) |
4151 | int updated = 0; |
4152 | |
4153 | if (op->nframes > 1) { |
4154 | - int i; |
4155 | + unsigned int i; |
4156 | |
4157 | /* for MUX filter we start at index 1 */ |
4158 | for (i = 1; i < op->nframes; i++) |
4159 | @@ -624,7 +632,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data) |
4160 | { |
4161 | struct bcm_op *op = (struct bcm_op *)data; |
4162 | const struct can_frame *rxframe = (struct can_frame *)skb->data; |
4163 | - int i; |
4164 | + unsigned int i; |
4165 | |
4166 | /* disable timeout */ |
4167 | hrtimer_cancel(&op->timer); |
4168 | @@ -822,14 +830,15 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, |
4169 | { |
4170 | struct bcm_sock *bo = bcm_sk(sk); |
4171 | struct bcm_op *op; |
4172 | - int i, err; |
4173 | + unsigned int i; |
4174 | + int err; |
4175 | |
4176 | /* we need a real device to send frames */ |
4177 | if (!ifindex) |
4178 | return -ENODEV; |
4179 | |
4180 | - /* we need at least one can_frame */ |
4181 | - if (msg_head->nframes < 1) |
4182 | + /* check nframes boundaries - we need at least one can_frame */ |
4183 | + if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES) |
4184 | return -EINVAL; |
4185 | |
4186 | /* check the given can_id */ |
4187 | @@ -993,6 +1002,10 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, |
4188 | msg_head->nframes = 0; |
4189 | } |
4190 | |
4191 | + /* the first element contains the mux-mask => MAX_NFRAMES + 1 */ |
4192 | + if (msg_head->nframes > MAX_NFRAMES + 1) |
4193 | + return -EINVAL; |
4194 | + |
4195 | if ((msg_head->flags & RX_RTR_FRAME) && |
4196 | ((msg_head->nframes != 1) || |
4197 | (!(msg_head->can_id & CAN_RTR_FLAG)))) |
4198 | diff --git a/net/can/raw.c b/net/can/raw.c |
4199 | index da99cf1..1650599 100644 |
4200 | --- a/net/can/raw.c |
4201 | +++ b/net/can/raw.c |
4202 | @@ -655,6 +655,10 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock, |
4203 | err = sock_tx_timestamp(msg, sk, skb_tx(skb)); |
4204 | if (err < 0) |
4205 | goto free_skb; |
4206 | + |
4207 | + /* to be able to check the received tx sock reference in raw_rcv() */ |
4208 | + skb_tx(skb)->prevent_sk_orphan = 1; |
4209 | + |
4210 | skb->dev = dev; |
4211 | skb->sk = sk; |
4212 | |
4213 | diff --git a/net/core/dev.c b/net/core/dev.c |
4214 | index 1f466e8..95cc486 100644 |
4215 | --- a/net/core/dev.c |
4216 | +++ b/net/core/dev.c |
4217 | @@ -2504,6 +2504,7 @@ int netif_rx(struct sk_buff *skb) |
4218 | struct rps_dev_flow voidflow, *rflow = &voidflow; |
4219 | int cpu; |
4220 | |
4221 | + preempt_disable(); |
4222 | rcu_read_lock(); |
4223 | |
4224 | cpu = get_rps_cpu(skb->dev, skb, &rflow); |
4225 | @@ -2513,6 +2514,7 @@ int netif_rx(struct sk_buff *skb) |
4226 | ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); |
4227 | |
4228 | rcu_read_unlock(); |
4229 | + preempt_enable(); |
4230 | } |
4231 | #else |
4232 | { |
4233 | @@ -3064,7 +3066,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
4234 | int mac_len; |
4235 | enum gro_result ret; |
4236 | |
4237 | - if (!(skb->dev->features & NETIF_F_GRO)) |
4238 | + if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb)) |
4239 | goto normal; |
4240 | |
4241 | if (skb_is_gso(skb) || skb_has_frags(skb)) |
4242 | @@ -3133,7 +3135,7 @@ pull: |
4243 | put_page(skb_shinfo(skb)->frags[0].page); |
4244 | memmove(skb_shinfo(skb)->frags, |
4245 | skb_shinfo(skb)->frags + 1, |
4246 | - --skb_shinfo(skb)->nr_frags); |
4247 | + --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t)); |
4248 | } |
4249 | } |
4250 | |
4251 | @@ -3151,9 +3153,6 @@ __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) |
4252 | { |
4253 | struct sk_buff *p; |
4254 | |
4255 | - if (netpoll_rx_on(skb)) |
4256 | - return GRO_NORMAL; |
4257 | - |
4258 | for (p = napi->gro_list; p; p = p->next) { |
4259 | NAPI_GRO_CB(p)->same_flow = |
4260 | (p->dev == skb->dev) && |
4261 | diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c |
4262 | index 65afeae..c259714 100644 |
4263 | --- a/net/ipv4/tcp.c |
4264 | +++ b/net/ipv4/tcp.c |
4265 | @@ -2176,6 +2176,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level, |
4266 | GFP_KERNEL); |
4267 | if (cvp == NULL) |
4268 | return -ENOMEM; |
4269 | + |
4270 | + kref_init(&cvp->kref); |
4271 | } |
4272 | lock_sock(sk); |
4273 | tp->rx_opt.cookie_in_always = |
4274 | @@ -2190,12 +2192,11 @@ static int do_tcp_setsockopt(struct sock *sk, int level, |
4275 | */ |
4276 | kref_put(&tp->cookie_values->kref, |
4277 | tcp_cookie_values_release); |
4278 | - kref_init(&cvp->kref); |
4279 | - tp->cookie_values = cvp; |
4280 | } else { |
4281 | cvp = tp->cookie_values; |
4282 | } |
4283 | } |
4284 | + |
4285 | if (cvp != NULL) { |
4286 | cvp->cookie_desired = ctd.tcpct_cookie_desired; |
4287 | |
4288 | @@ -2209,6 +2210,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level, |
4289 | cvp->s_data_desired = ctd.tcpct_s_data_desired; |
4290 | cvp->s_data_constant = 0; /* false */ |
4291 | } |
4292 | + |
4293 | + tp->cookie_values = cvp; |
4294 | } |
4295 | release_sock(sk); |
4296 | return err; |
4297 | diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c |
4298 | index a2eb965..54d7308 100644 |
4299 | --- a/net/netlink/af_netlink.c |
4300 | +++ b/net/netlink/af_netlink.c |
4301 | @@ -1400,7 +1400,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, |
4302 | struct netlink_sock *nlk = nlk_sk(sk); |
4303 | int noblock = flags&MSG_DONTWAIT; |
4304 | size_t copied; |
4305 | - struct sk_buff *skb, *frag __maybe_unused = NULL; |
4306 | + struct sk_buff *skb, *data_skb; |
4307 | int err; |
4308 | |
4309 | if (flags&MSG_OOB) |
4310 | @@ -1412,45 +1412,35 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, |
4311 | if (skb == NULL) |
4312 | goto out; |
4313 | |
4314 | + data_skb = skb; |
4315 | + |
4316 | #ifdef CONFIG_COMPAT_NETLINK_MESSAGES |
4317 | if (unlikely(skb_shinfo(skb)->frag_list)) { |
4318 | - bool need_compat = !!(flags & MSG_CMSG_COMPAT); |
4319 | - |
4320 | /* |
4321 | - * If this skb has a frag_list, then here that means that |
4322 | - * we will have to use the frag_list skb for compat tasks |
4323 | - * and the regular skb for non-compat tasks. |
4324 | + * If this skb has a frag_list, then here that means that we |
4325 | + * will have to use the frag_list skb's data for compat tasks |
4326 | + * and the regular skb's data for normal (non-compat) tasks. |
4327 | * |
4328 | - * The skb might (and likely will) be cloned, so we can't |
4329 | - * just reset frag_list and go on with things -- we need to |
4330 | - * keep that. For the compat case that's easy -- simply get |
4331 | - * a reference to the compat skb and free the regular one |
4332 | - * including the frag. For the non-compat case, we need to |
4333 | - * avoid sending the frag to the user -- so assign NULL but |
4334 | - * restore it below before freeing the skb. |
4335 | + * If we need to send the compat skb, assign it to the |
4336 | + * 'data_skb' variable so that it will be used below for data |
4337 | + * copying. We keep 'skb' for everything else, including |
4338 | + * freeing both later. |
4339 | */ |
4340 | - if (need_compat) { |
4341 | - struct sk_buff *compskb = skb_shinfo(skb)->frag_list; |
4342 | - skb_get(compskb); |
4343 | - kfree_skb(skb); |
4344 | - skb = compskb; |
4345 | - } else { |
4346 | - frag = skb_shinfo(skb)->frag_list; |
4347 | - skb_shinfo(skb)->frag_list = NULL; |
4348 | - } |
4349 | + if (flags & MSG_CMSG_COMPAT) |
4350 | + data_skb = skb_shinfo(skb)->frag_list; |
4351 | } |
4352 | #endif |
4353 | |
4354 | msg->msg_namelen = 0; |
4355 | |
4356 | - copied = skb->len; |
4357 | + copied = data_skb->len; |
4358 | if (len < copied) { |
4359 | msg->msg_flags |= MSG_TRUNC; |
4360 | copied = len; |
4361 | } |
4362 | |
4363 | - skb_reset_transport_header(skb); |
4364 | - err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); |
4365 | + skb_reset_transport_header(data_skb); |
4366 | + err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied); |
4367 | |
4368 | if (msg->msg_name) { |
4369 | struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name; |
4370 | @@ -1470,11 +1460,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, |
4371 | } |
4372 | siocb->scm->creds = *NETLINK_CREDS(skb); |
4373 | if (flags & MSG_TRUNC) |
4374 | - copied = skb->len; |
4375 | - |
4376 | -#ifdef CONFIG_COMPAT_NETLINK_MESSAGES |
4377 | - skb_shinfo(skb)->frag_list = frag; |
4378 | -#endif |
4379 | + copied = data_skb->len; |
4380 | |
4381 | skb_free_datagram(sk, skb); |
4382 | |
4383 | diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c |
4384 | index 724553e..abbf4fa 100644 |
4385 | --- a/net/sched/act_nat.c |
4386 | +++ b/net/sched/act_nat.c |
4387 | @@ -218,6 +218,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, |
4388 | if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph))) |
4389 | goto drop; |
4390 | |
4391 | + icmph = (void *)(skb_network_header(skb) + ihl); |
4392 | iph = (void *)(icmph + 1); |
4393 | if (egress) |
4394 | addr = iph->daddr; |
4395 | @@ -246,7 +247,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, |
4396 | iph->saddr = new_addr; |
4397 | |
4398 | inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr, |
4399 | - 1); |
4400 | + 0); |
4401 | break; |
4402 | } |
4403 | default: |
4404 | diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c |
4405 | index c657628..a9be0ef 100644 |
4406 | --- a/net/sched/sch_sfq.c |
4407 | +++ b/net/sched/sch_sfq.c |
4408 | @@ -497,11 +497,22 @@ nla_put_failure: |
4409 | return -1; |
4410 | } |
4411 | |
4412 | +static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg) |
4413 | +{ |
4414 | + return NULL; |
4415 | +} |
4416 | + |
4417 | static unsigned long sfq_get(struct Qdisc *sch, u32 classid) |
4418 | { |
4419 | return 0; |
4420 | } |
4421 | |
4422 | +static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent, |
4423 | + u32 classid) |
4424 | +{ |
4425 | + return 0; |
4426 | +} |
4427 | + |
4428 | static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl) |
4429 | { |
4430 | struct sfq_sched_data *q = qdisc_priv(sch); |
4431 | @@ -554,8 +565,10 @@ static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg) |
4432 | } |
4433 | |
4434 | static const struct Qdisc_class_ops sfq_class_ops = { |
4435 | + .leaf = sfq_leaf, |
4436 | .get = sfq_get, |
4437 | .tcf_chain = sfq_find_tcf, |
4438 | + .bind_tcf = sfq_bind, |
4439 | .dump = sfq_dump_class, |
4440 | .dump_stats = sfq_dump_class_stats, |
4441 | .walk = sfq_walk, |
4442 | diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c |
4443 | index ef17fcf..e4be688 100644 |
4444 | --- a/net/wireless/mlme.c |
4445 | +++ b/net/wireless/mlme.c |
4446 | @@ -842,12 +842,18 @@ int cfg80211_mlme_action(struct cfg80211_registered_device *rdev, |
4447 | return -EINVAL; |
4448 | if (mgmt->u.action.category != WLAN_CATEGORY_PUBLIC) { |
4449 | /* Verify that we are associated with the destination AP */ |
4450 | + wdev_lock(wdev); |
4451 | + |
4452 | if (!wdev->current_bss || |
4453 | memcmp(wdev->current_bss->pub.bssid, mgmt->bssid, |
4454 | ETH_ALEN) != 0 || |
4455 | memcmp(wdev->current_bss->pub.bssid, mgmt->da, |
4456 | - ETH_ALEN) != 0) |
4457 | + ETH_ALEN) != 0) { |
4458 | + wdev_unlock(wdev); |
4459 | return -ENOTCONN; |
4460 | + } |
4461 | + wdev_unlock(wdev); |
4462 | + |
4463 | } |
4464 | |
4465 | if (memcmp(mgmt->sa, dev->dev_addr, ETH_ALEN) != 0) |
4466 | diff --git a/scripts/mkmakefile b/scripts/mkmakefile |
4467 | index 67d59c7..5325423 100644 |
4468 | --- a/scripts/mkmakefile |
4469 | +++ b/scripts/mkmakefile |
4470 | @@ -44,7 +44,9 @@ all: |
4471 | |
4472 | Makefile:; |
4473 | |
4474 | -\$(all) %/: all |
4475 | +\$(all): all |
4476 | @: |
4477 | |
4478 | +%/: all |
4479 | + @: |
4480 | EOF |
4481 | diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c |
4482 | index 303ac04..1990918 100644 |
4483 | --- a/sound/core/pcm_native.c |
4484 | +++ b/sound/core/pcm_native.c |
4485 | @@ -981,6 +981,10 @@ static int snd_pcm_do_pause(struct snd_pcm_substream *substream, int push) |
4486 | { |
4487 | if (substream->runtime->trigger_master != substream) |
4488 | return 0; |
4489 | + /* some drivers might use hw_ptr to recover from the pause - |
4490 | + update the hw_ptr now */ |
4491 | + if (push) |
4492 | + snd_pcm_update_hw_ptr(substream); |
4493 | /* The jiffies check in snd_pcm_update_hw_ptr*() is done by |
4494 | * a delta betwen the current jiffies, this gives a large enough |
4495 | * delta, effectively to skip the check once. |
4496 | diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c |
4497 | index 4203782..aff8387 100644 |
4498 | --- a/sound/pci/emu10k1/emu10k1.c |
4499 | +++ b/sound/pci/emu10k1/emu10k1.c |
4500 | @@ -52,6 +52,7 @@ static int max_synth_voices[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 64}; |
4501 | static int max_buffer_size[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 128}; |
4502 | static int enable_ir[SNDRV_CARDS]; |
4503 | static uint subsystem[SNDRV_CARDS]; /* Force card subsystem model */ |
4504 | +static uint delay_pcm_irq[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2}; |
4505 | |
4506 | module_param_array(index, int, NULL, 0444); |
4507 | MODULE_PARM_DESC(index, "Index value for the EMU10K1 soundcard."); |
4508 | @@ -73,6 +74,8 @@ module_param_array(enable_ir, bool, NULL, 0444); |
4509 | MODULE_PARM_DESC(enable_ir, "Enable IR."); |
4510 | module_param_array(subsystem, uint, NULL, 0444); |
4511 | MODULE_PARM_DESC(subsystem, "Force card subsystem model."); |
4512 | +module_param_array(delay_pcm_irq, uint, NULL, 0444); |
4513 | +MODULE_PARM_DESC(delay_pcm_irq, "Delay PCM interrupt by specified number of samples (default 0)."); |
4514 | /* |
4515 | * Class 0401: 1102:0008 (rev 00) Subsystem: 1102:1001 -> Audigy2 Value Model:SB0400 |
4516 | */ |
4517 | @@ -127,6 +130,7 @@ static int __devinit snd_card_emu10k1_probe(struct pci_dev *pci, |
4518 | &emu)) < 0) |
4519 | goto error; |
4520 | card->private_data = emu; |
4521 | + emu->delay_pcm_irq = delay_pcm_irq[dev] & 0x1f; |
4522 | if ((err = snd_emu10k1_pcm(emu, 0, NULL)) < 0) |
4523 | goto error; |
4524 | if ((err = snd_emu10k1_pcm_mic(emu, 1, NULL)) < 0) |
4525 | diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c |
4526 | index 55b83ef..622bace 100644 |
4527 | --- a/sound/pci/emu10k1/emupcm.c |
4528 | +++ b/sound/pci/emu10k1/emupcm.c |
4529 | @@ -332,7 +332,7 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu, |
4530 | evoice->epcm->ccca_start_addr = start_addr + ccis; |
4531 | if (extra) { |
4532 | start_addr += ccis; |
4533 | - end_addr += ccis; |
4534 | + end_addr += ccis + emu->delay_pcm_irq; |
4535 | } |
4536 | if (stereo && !extra) { |
4537 | snd_emu10k1_ptr_write(emu, CPF, voice, CPF_STEREO_MASK); |
4538 | @@ -360,7 +360,9 @@ static void snd_emu10k1_pcm_init_voice(struct snd_emu10k1 *emu, |
4539 | /* Assumption that PT is already 0 so no harm overwriting */ |
4540 | snd_emu10k1_ptr_write(emu, PTRX, voice, (send_amount[0] << 8) | send_amount[1]); |
4541 | snd_emu10k1_ptr_write(emu, DSL, voice, end_addr | (send_amount[3] << 24)); |
4542 | - snd_emu10k1_ptr_write(emu, PSST, voice, start_addr | (send_amount[2] << 24)); |
4543 | + snd_emu10k1_ptr_write(emu, PSST, voice, |
4544 | + (start_addr + (extra ? emu->delay_pcm_irq : 0)) | |
4545 | + (send_amount[2] << 24)); |
4546 | if (emu->card_capabilities->emu_model) |
4547 | pitch_target = PITCH_48000; /* Disable interpolators on emu1010 card */ |
4548 | else |
4549 | @@ -732,6 +734,23 @@ static void snd_emu10k1_playback_stop_voice(struct snd_emu10k1 *emu, struct snd_ |
4550 | snd_emu10k1_ptr_write(emu, IP, voice, 0); |
4551 | } |
4552 | |
4553 | +static inline void snd_emu10k1_playback_mangle_extra(struct snd_emu10k1 *emu, |
4554 | + struct snd_emu10k1_pcm *epcm, |
4555 | + struct snd_pcm_substream *substream, |
4556 | + struct snd_pcm_runtime *runtime) |
4557 | +{ |
4558 | + unsigned int ptr, period_pos; |
4559 | + |
4560 | + /* try to sychronize the current position for the interrupt |
4561 | + source voice */ |
4562 | + period_pos = runtime->status->hw_ptr - runtime->hw_ptr_interrupt; |
4563 | + period_pos %= runtime->period_size; |
4564 | + ptr = snd_emu10k1_ptr_read(emu, CCCA, epcm->extra->number); |
4565 | + ptr &= ~0x00ffffff; |
4566 | + ptr |= epcm->ccca_start_addr + period_pos; |
4567 | + snd_emu10k1_ptr_write(emu, CCCA, epcm->extra->number, ptr); |
4568 | +} |
4569 | + |
4570 | static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream, |
4571 | int cmd) |
4572 | { |
4573 | @@ -753,6 +772,8 @@ static int snd_emu10k1_playback_trigger(struct snd_pcm_substream *substream, |
4574 | /* follow thru */ |
4575 | case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: |
4576 | case SNDRV_PCM_TRIGGER_RESUME: |
4577 | + if (cmd == SNDRV_PCM_TRIGGER_PAUSE_RELEASE) |
4578 | + snd_emu10k1_playback_mangle_extra(emu, epcm, substream, runtime); |
4579 | mix = &emu->pcm_mixer[substream->number]; |
4580 | snd_emu10k1_playback_prepare_voice(emu, epcm->voices[0], 1, 0, mix); |
4581 | snd_emu10k1_playback_prepare_voice(emu, epcm->voices[1], 0, 0, mix); |
4582 | @@ -869,8 +890,9 @@ static snd_pcm_uframes_t snd_emu10k1_playback_pointer(struct snd_pcm_substream * |
4583 | #endif |
4584 | /* |
4585 | printk(KERN_DEBUG |
4586 | - "ptr = 0x%x, buffer_size = 0x%x, period_size = 0x%x\n", |
4587 | - ptr, runtime->buffer_size, runtime->period_size); |
4588 | + "ptr = 0x%lx, buffer_size = 0x%lx, period_size = 0x%lx\n", |
4589 | + (long)ptr, (long)runtime->buffer_size, |
4590 | + (long)runtime->period_size); |
4591 | */ |
4592 | return ptr; |
4593 | } |
4594 | diff --git a/sound/pci/emu10k1/memory.c b/sound/pci/emu10k1/memory.c |
4595 | index ffb1ddb..957a311 100644 |
4596 | --- a/sound/pci/emu10k1/memory.c |
4597 | +++ b/sound/pci/emu10k1/memory.c |
4598 | @@ -310,8 +310,10 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst |
4599 | if (snd_BUG_ON(!hdr)) |
4600 | return NULL; |
4601 | |
4602 | + idx = runtime->period_size >= runtime->buffer_size ? |
4603 | + (emu->delay_pcm_irq * 2) : 0; |
4604 | mutex_lock(&hdr->block_mutex); |
4605 | - blk = search_empty(emu, runtime->dma_bytes); |
4606 | + blk = search_empty(emu, runtime->dma_bytes + idx); |
4607 | if (blk == NULL) { |
4608 | mutex_unlock(&hdr->block_mutex); |
4609 | return NULL; |
4610 | diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c |
4611 | index 2bf2cb5..baadda4 100644 |
4612 | --- a/sound/pci/hda/patch_conexant.c |
4613 | +++ b/sound/pci/hda/patch_conexant.c |
4614 | @@ -2970,6 +2970,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { |
4615 | SND_PCI_QUIRK(0x1028, 0x02f5, "Dell", |
4616 | CXT5066_DELL_LAPTOP), |
4617 | SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT5066_OLPC_XO_1_5), |
4618 | + SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTO), |
4619 | SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTO), |
4620 | SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), |
4621 | SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), |
4622 | diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c |
4623 | index aa7cc51..6d9a542 100644 |
4624 | --- a/sound/pci/hda/patch_realtek.c |
4625 | +++ b/sound/pci/hda/patch_realtek.c |
4626 | @@ -6864,6 +6864,7 @@ static int patch_alc260(struct hda_codec *codec) |
4627 | |
4628 | spec->stream_analog_playback = &alc260_pcm_analog_playback; |
4629 | spec->stream_analog_capture = &alc260_pcm_analog_capture; |
4630 | + spec->stream_analog_alt_capture = &alc260_pcm_analog_capture; |
4631 | |
4632 | spec->stream_digital_playback = &alc260_pcm_digital_playback; |
4633 | spec->stream_digital_capture = &alc260_pcm_digital_capture; |
4634 | diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c |
4635 | index 6433e65..4677492 100644 |
4636 | --- a/sound/pci/intel8x0.c |
4637 | +++ b/sound/pci/intel8x0.c |
4638 | @@ -1776,6 +1776,12 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = { |
4639 | }, |
4640 | { |
4641 | .subvendor = 0x1014, |
4642 | + .subdevice = 0x0534, |
4643 | + .name = "ThinkPad X31", |
4644 | + .type = AC97_TUNE_INV_EAPD |
4645 | + }, |
4646 | + { |
4647 | + .subvendor = 0x1014, |
4648 | .subdevice = 0x1f00, |
4649 | .name = "MS-9128", |
4650 | .type = AC97_TUNE_ALC_JACK |
4651 | diff --git a/sound/pci/riptide/riptide.c b/sound/pci/riptide/riptide.c |
4652 | index ad44626..c737287 100644 |
4653 | --- a/sound/pci/riptide/riptide.c |
4654 | +++ b/sound/pci/riptide/riptide.c |
4655 | @@ -1224,15 +1224,14 @@ static int try_to_load_firmware(struct cmdif *cif, struct snd_riptide *chip) |
4656 | firmware.firmware.ASIC, firmware.firmware.CODEC, |
4657 | firmware.firmware.AUXDSP, firmware.firmware.PROG); |
4658 | |
4659 | + if (!chip) |
4660 | + return 1; |
4661 | + |
4662 | for (i = 0; i < FIRMWARE_VERSIONS; i++) { |
4663 | if (!memcmp(&firmware_versions[i], &firmware, sizeof(firmware))) |
4664 | - break; |
4665 | - } |
4666 | - if (i >= FIRMWARE_VERSIONS) |
4667 | - return 0; /* no match */ |
4668 | + return 1; /* OK */ |
4669 | |
4670 | - if (!chip) |
4671 | - return 1; /* OK */ |
4672 | + } |
4673 | |
4674 | snd_printdd("Writing Firmware\n"); |
4675 | if (!chip->fw_entry) { |
4676 | diff --git a/sound/soc/codecs/wm8580.c b/sound/soc/codecs/wm8580.c |
4677 | index c3571ee..72deeab 100644 |
4678 | --- a/sound/soc/codecs/wm8580.c |
4679 | +++ b/sound/soc/codecs/wm8580.c |
4680 | @@ -269,9 +269,9 @@ SOC_DOUBLE("DAC2 Invert Switch", WM8580_DAC_CONTROL4, 2, 3, 1, 0), |
4681 | SOC_DOUBLE("DAC3 Invert Switch", WM8580_DAC_CONTROL4, 4, 5, 1, 0), |
4682 | |
4683 | SOC_SINGLE("DAC ZC Switch", WM8580_DAC_CONTROL5, 5, 1, 0), |
4684 | -SOC_SINGLE("DAC1 Switch", WM8580_DAC_CONTROL5, 0, 1, 0), |
4685 | -SOC_SINGLE("DAC2 Switch", WM8580_DAC_CONTROL5, 1, 1, 0), |
4686 | -SOC_SINGLE("DAC3 Switch", WM8580_DAC_CONTROL5, 2, 1, 0), |
4687 | +SOC_SINGLE("DAC1 Switch", WM8580_DAC_CONTROL5, 0, 1, 1), |
4688 | +SOC_SINGLE("DAC2 Switch", WM8580_DAC_CONTROL5, 1, 1, 1), |
4689 | +SOC_SINGLE("DAC3 Switch", WM8580_DAC_CONTROL5, 2, 1, 1), |
4690 | |
4691 | SOC_DOUBLE("ADC Mute Switch", WM8580_ADC_CONTROL1, 0, 1, 1, 0), |
4692 | SOC_SINGLE("ADC High-Pass Filter Switch", WM8580_ADC_CONTROL1, 4, 1, 0), |
4693 | diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c |
4694 | index 4e212ed..f8154e6 100644 |
4695 | --- a/sound/soc/codecs/wm8776.c |
4696 | +++ b/sound/soc/codecs/wm8776.c |
4697 | @@ -178,13 +178,6 @@ static int wm8776_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) |
4698 | case SND_SOC_DAIFMT_LEFT_J: |
4699 | iface |= 0x0001; |
4700 | break; |
4701 | - /* FIXME: CHECK A/B */ |
4702 | - case SND_SOC_DAIFMT_DSP_A: |
4703 | - iface |= 0x0003; |
4704 | - break; |
4705 | - case SND_SOC_DAIFMT_DSP_B: |
4706 | - iface |= 0x0007; |
4707 | - break; |
4708 | default: |
4709 | return -EINVAL; |
4710 | } |
4711 | diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c |
4712 | index 472af38..adbc68c 100644 |
4713 | --- a/sound/soc/soc-cache.c |
4714 | +++ b/sound/soc/soc-cache.c |
4715 | @@ -340,7 +340,7 @@ static unsigned int snd_soc_16_8_read_i2c(struct snd_soc_codec *codec, |
4716 | static unsigned int snd_soc_16_8_read(struct snd_soc_codec *codec, |
4717 | unsigned int reg) |
4718 | { |
4719 | - u16 *cache = codec->reg_cache; |
4720 | + u8 *cache = codec->reg_cache; |
4721 | |
4722 | reg &= 0xff; |
4723 | if (reg >= codec->reg_cache_size) |
4724 | @@ -351,7 +351,7 @@ static unsigned int snd_soc_16_8_read(struct snd_soc_codec *codec, |
4725 | static int snd_soc_16_8_write(struct snd_soc_codec *codec, unsigned int reg, |
4726 | unsigned int value) |
4727 | { |
4728 | - u16 *cache = codec->reg_cache; |
4729 | + u8 *cache = codec->reg_cache; |
4730 | u8 data[3]; |
4731 | int ret; |
4732 |