Magellan Linux

Contents of /trunk/kernel-magellan/patches-3.1.5-r1/0104-3.1.5-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1599 - (show annotations) (download)
Mon Dec 12 13:47:29 2011 UTC (12 years, 4 months ago) by niro
File size: 152944 byte(s)
3.1.5-magellan-r1, updated to linux-3.1.5
1 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
2 index 3146ed3..dfe0140 100644
3 --- a/arch/arm/Kconfig
4 +++ b/arch/arm/Kconfig
5 @@ -1297,6 +1297,18 @@ config ARM_ERRATA_764369
6 relevant cache maintenance functions and sets a specific bit
7 in the diagnostic control register of the SCU.
8
9 +config PL310_ERRATA_769419
10 + bool "PL310 errata: no automatic Store Buffer drain"
11 + depends on CACHE_L2X0
12 + help
13 + On revisions of the PL310 prior to r3p2, the Store Buffer does
14 + not automatically drain. This can cause normal, non-cacheable
15 + writes to be retained when the memory system is idle, leading
16 + to suboptimal I/O performance for drivers using coherent DMA.
17 + This option adds a write barrier to the cpu_idle loop so that,
18 + on systems with an outer cache, the store buffer is drained
19 + explicitly.
20 +
21 endmenu
22
23 source "arch/arm/common/Kconfig"
24 diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
25 index 227a477..d95763d 100644
26 --- a/arch/arm/configs/ezx_defconfig
27 +++ b/arch/arm/configs/ezx_defconfig
28 @@ -287,7 +287,7 @@ CONFIG_USB=y
29 # CONFIG_USB_DEVICE_CLASS is not set
30 CONFIG_USB_OHCI_HCD=y
31 CONFIG_USB_GADGET=y
32 -CONFIG_USB_GADGET_PXA27X=y
33 +CONFIG_USB_PXA27X=y
34 CONFIG_USB_ETH=m
35 # CONFIG_USB_ETH_RNDIS is not set
36 CONFIG_MMC=y
37 diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
38 index 176ec22..fd996bb 100644
39 --- a/arch/arm/configs/imote2_defconfig
40 +++ b/arch/arm/configs/imote2_defconfig
41 @@ -263,7 +263,7 @@ CONFIG_USB=y
42 # CONFIG_USB_DEVICE_CLASS is not set
43 CONFIG_USB_OHCI_HCD=y
44 CONFIG_USB_GADGET=y
45 -CONFIG_USB_GADGET_PXA27X=y
46 +CONFIG_USB_PXA27X=y
47 CONFIG_USB_ETH=m
48 # CONFIG_USB_ETH_RNDIS is not set
49 CONFIG_MMC=y
50 diff --git a/arch/arm/configs/magician_defconfig b/arch/arm/configs/magician_defconfig
51 index a88e64d..443675d 100644
52 --- a/arch/arm/configs/magician_defconfig
53 +++ b/arch/arm/configs/magician_defconfig
54 @@ -132,7 +132,7 @@ CONFIG_USB_MON=m
55 CONFIG_USB_OHCI_HCD=y
56 CONFIG_USB_GADGET=y
57 CONFIG_USB_GADGET_VBUS_DRAW=500
58 -CONFIG_USB_GADGET_PXA27X=y
59 +CONFIG_USB_PXA27X=y
60 CONFIG_USB_ETH=m
61 # CONFIG_USB_ETH_RNDIS is not set
62 CONFIG_USB_GADGETFS=m
63 diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig
64 index 59577ad..547a3c1 100644
65 --- a/arch/arm/configs/zeus_defconfig
66 +++ b/arch/arm/configs/zeus_defconfig
67 @@ -140,7 +140,7 @@ CONFIG_USB_SERIAL=m
68 CONFIG_USB_SERIAL_GENERIC=y
69 CONFIG_USB_SERIAL_MCT_U232=m
70 CONFIG_USB_GADGET=m
71 -CONFIG_USB_GADGET_PXA27X=y
72 +CONFIG_USB_PXA27X=y
73 CONFIG_USB_ETH=m
74 CONFIG_USB_GADGETFS=m
75 CONFIG_USB_FILE_STORAGE=m
76 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
77 index 1a347f4..c9d11ea 100644
78 --- a/arch/arm/kernel/process.c
79 +++ b/arch/arm/kernel/process.c
80 @@ -192,6 +192,9 @@ void cpu_idle(void)
81 #endif
82
83 local_irq_disable();
84 +#ifdef CONFIG_PL310_ERRATA_769419
85 + wmb();
86 +#endif
87 if (hlt_counter) {
88 local_irq_enable();
89 cpu_relax();
90 diff --git a/arch/arm/mach-mxs/clock-mx28.c b/arch/arm/mach-mxs/clock-mx28.c
91 index 5dcc59d..b3a7124 100644
92 --- a/arch/arm/mach-mxs/clock-mx28.c
93 +++ b/arch/arm/mach-mxs/clock-mx28.c
94 @@ -404,7 +404,7 @@ static int name##_set_rate(struct clk *clk, unsigned long rate) \
95 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
96 reg &= ~BM_CLKCTRL_##dr##_DIV; \
97 reg |= div << BP_CLKCTRL_##dr##_DIV; \
98 - if (reg | (1 << clk->enable_shift)) { \
99 + if (reg & (1 << clk->enable_shift)) { \
100 pr_err("%s: clock is gated\n", __func__); \
101 return -EINVAL; \
102 } \
103 diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
104 index 57b66d5..b45c88d 100644
105 --- a/arch/arm/mach-omap2/Kconfig
106 +++ b/arch/arm/mach-omap2/Kconfig
107 @@ -329,6 +329,7 @@ config MACH_OMAP4_PANDA
108 config OMAP3_EMU
109 bool "OMAP3 debugging peripherals"
110 depends on ARCH_OMAP3
111 + select ARM_AMBA
112 select OC_ETM
113 help
114 Say Y here to enable debugging hardware of omap3
115 diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
116 index 84cc0bd..f396c83 100644
117 --- a/arch/arm/mach-omap2/omap_hwmod.c
118 +++ b/arch/arm/mach-omap2/omap_hwmod.c
119 @@ -749,7 +749,7 @@ static int _count_mpu_irqs(struct omap_hwmod *oh)
120 ohii = &oh->mpu_irqs[i++];
121 } while (ohii->irq != -1);
122
123 - return i;
124 + return i-1;
125 }
126
127 /**
128 @@ -772,7 +772,7 @@ static int _count_sdma_reqs(struct omap_hwmod *oh)
129 ohdi = &oh->sdma_reqs[i++];
130 } while (ohdi->dma_req != -1);
131
132 - return i;
133 + return i-1;
134 }
135
136 /**
137 @@ -795,7 +795,7 @@ static int _count_ocp_if_addr_spaces(struct omap_hwmod_ocp_if *os)
138 mem = &os->addr[i++];
139 } while (mem->pa_start != mem->pa_end);
140
141 - return i;
142 + return i-1;
143 }
144
145 /**
146 diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
147 index 472bf22..95eb863 100644
148 --- a/arch/arm/mach-omap2/pm.c
149 +++ b/arch/arm/mach-omap2/pm.c
150 @@ -23,6 +23,7 @@
151 #include "powerdomain.h"
152 #include "clockdomain.h"
153 #include "pm.h"
154 +#include "twl-common.h"
155
156 static struct omap_device_pm_latency *pm_lats;
157
158 @@ -251,11 +252,8 @@ postcore_initcall(omap2_common_pm_init);
159
160 static int __init omap2_common_pm_late_init(void)
161 {
162 - /* Init the OMAP TWL parameters */
163 - omap3_twl_init();
164 - omap4_twl_init();
165 -
166 /* Init the voltage layer */
167 + omap_pmic_late_init();
168 omap_voltage_late_init();
169
170 /* Initialize the voltages */
171 diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
172 index 34c01a7..19ff010 100644
173 --- a/arch/arm/mach-omap2/smartreflex.c
174 +++ b/arch/arm/mach-omap2/smartreflex.c
175 @@ -137,7 +137,7 @@ static irqreturn_t sr_interrupt(int irq, void *data)
176 sr_write_reg(sr_info, ERRCONFIG_V1, status);
177 } else if (sr_info->ip_type == SR_TYPE_V2) {
178 /* Read the status bits */
179 - sr_read_reg(sr_info, IRQSTATUS);
180 + status = sr_read_reg(sr_info, IRQSTATUS);
181
182 /* Clear them by writing back */
183 sr_write_reg(sr_info, IRQSTATUS, status);
184 diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
185 index daa056e..47133fa 100644
186 --- a/arch/arm/mach-omap2/twl-common.c
187 +++ b/arch/arm/mach-omap2/twl-common.c
188 @@ -30,6 +30,7 @@
189 #include <plat/usb.h>
190
191 #include "twl-common.h"
192 +#include "pm.h"
193
194 static struct i2c_board_info __initdata pmic_i2c_board_info = {
195 .addr = 0x48,
196 @@ -48,6 +49,16 @@ void __init omap_pmic_init(int bus, u32 clkrate,
197 omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
198 }
199
200 +void __init omap_pmic_late_init(void)
201 +{
202 + /* Init the OMAP TWL parameters (if PMIC has been registerd) */
203 + if (!pmic_i2c_board_info.irq)
204 + return;
205 +
206 + omap3_twl_init();
207 + omap4_twl_init();
208 +}
209 +
210 #if defined(CONFIG_ARCH_OMAP3)
211 static struct twl4030_usb_data omap3_usb_pdata = {
212 .usb_mode = T2_USB_MODE_ULPI,
213 diff --git a/arch/arm/mach-omap2/twl-common.h b/arch/arm/mach-omap2/twl-common.h
214 index 5e83a5b..275dde8 100644
215 --- a/arch/arm/mach-omap2/twl-common.h
216 +++ b/arch/arm/mach-omap2/twl-common.h
217 @@ -1,6 +1,8 @@
218 #ifndef __OMAP_PMIC_COMMON__
219 #define __OMAP_PMIC_COMMON__
220
221 +#include <plat/irqs.h>
222 +
223 #define TWL_COMMON_PDATA_USB (1 << 0)
224 #define TWL_COMMON_PDATA_BCI (1 << 1)
225 #define TWL_COMMON_PDATA_MADC (1 << 2)
226 @@ -30,6 +32,7 @@ struct twl4030_platform_data;
227
228 void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq,
229 struct twl4030_platform_data *pmic_data);
230 +void omap_pmic_late_init(void);
231
232 static inline void omap2_pmic_init(const char *pmic_type,
233 struct twl4030_platform_data *pmic_data)
234 diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
235 index ef3e8b1..6c6f5e4 100644
236 --- a/arch/arm/mach-pxa/balloon3.c
237 +++ b/arch/arm/mach-pxa/balloon3.c
238 @@ -307,7 +307,7 @@ static inline void balloon3_mmc_init(void) {}
239 /******************************************************************************
240 * USB Gadget
241 ******************************************************************************/
242 -#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
243 +#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
244 static void balloon3_udc_command(int cmd)
245 {
246 if (cmd == PXA2XX_UDC_CMD_CONNECT)
247 diff --git a/arch/arm/mach-pxa/colibri-pxa320.c b/arch/arm/mach-pxa/colibri-pxa320.c
248 index d2c6631..db3a938 100644
249 --- a/arch/arm/mach-pxa/colibri-pxa320.c
250 +++ b/arch/arm/mach-pxa/colibri-pxa320.c
251 @@ -146,7 +146,7 @@ static void __init colibri_pxa320_init_eth(void)
252 static inline void __init colibri_pxa320_init_eth(void) {}
253 #endif /* CONFIG_AX88796 */
254
255 -#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
256 +#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
257 static struct gpio_vbus_mach_info colibri_pxa320_gpio_vbus_info = {
258 .gpio_vbus = mfp_to_gpio(MFP_PIN_GPIO96),
259 .gpio_pullup = -1,
260 diff --git a/arch/arm/mach-pxa/gumstix.c b/arch/arm/mach-pxa/gumstix.c
261 index deaa111..4aa3dba 100644
262 --- a/arch/arm/mach-pxa/gumstix.c
263 +++ b/arch/arm/mach-pxa/gumstix.c
264 @@ -106,7 +106,7 @@ static void __init gumstix_mmc_init(void)
265 }
266 #endif
267
268 -#ifdef CONFIG_USB_GADGET_PXA25X
269 +#ifdef CONFIG_USB_PXA25X
270 static struct gpio_vbus_mach_info gumstix_udc_info = {
271 .gpio_vbus = GPIO_GUMSTIX_USB_GPIOn,
272 .gpio_pullup = GPIO_GUMSTIX_USB_GPIOx,
273 diff --git a/arch/arm/mach-pxa/include/mach/palm27x.h b/arch/arm/mach-pxa/include/mach/palm27x.h
274 index 0a5e5ea..8d56043 100644
275 --- a/arch/arm/mach-pxa/include/mach/palm27x.h
276 +++ b/arch/arm/mach-pxa/include/mach/palm27x.h
277 @@ -37,8 +37,8 @@ extern void __init palm27x_lcd_init(int power,
278 static inline void palm27x_lcd_init(int power, struct pxafb_mode_info *mode) {}
279 #endif
280
281 -#if defined(CONFIG_USB_GADGET_PXA27X) || \
282 - defined(CONFIG_USB_GADGET_PXA27X_MODULE)
283 +#if defined(CONFIG_USB_PXA27X) || \
284 + defined(CONFIG_USB_PXA27X_MODULE)
285 extern void __init palm27x_udc_init(int vbus, int pullup,
286 int vbus_inverted);
287 #else
288 diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
289 index 325c245..fbc10d7 100644
290 --- a/arch/arm/mach-pxa/palm27x.c
291 +++ b/arch/arm/mach-pxa/palm27x.c
292 @@ -164,8 +164,8 @@ void __init palm27x_lcd_init(int power, struct pxafb_mode_info *mode)
293 /******************************************************************************
294 * USB Gadget
295 ******************************************************************************/
296 -#if defined(CONFIG_USB_GADGET_PXA27X) || \
297 - defined(CONFIG_USB_GADGET_PXA27X_MODULE)
298 +#if defined(CONFIG_USB_PXA27X) || \
299 + defined(CONFIG_USB_PXA27X_MODULE)
300 static struct gpio_vbus_mach_info palm27x_udc_info = {
301 .gpio_vbus_inverted = 1,
302 };
303 diff --git a/arch/arm/mach-pxa/palmtc.c b/arch/arm/mach-pxa/palmtc.c
304 index 6ad4a6c..9c27b01 100644
305 --- a/arch/arm/mach-pxa/palmtc.c
306 +++ b/arch/arm/mach-pxa/palmtc.c
307 @@ -338,7 +338,7 @@ static inline void palmtc_mkp_init(void) {}
308 /******************************************************************************
309 * UDC
310 ******************************************************************************/
311 -#if defined(CONFIG_USB_GADGET_PXA25X)||defined(CONFIG_USB_GADGET_PXA25X_MODULE)
312 +#if defined(CONFIG_USB_PXA25X)||defined(CONFIG_USB_PXA25X_MODULE)
313 static struct gpio_vbus_mach_info palmtc_udc_info = {
314 .gpio_vbus = GPIO_NR_PALMTC_USB_DETECT_N,
315 .gpio_vbus_inverted = 1,
316 diff --git a/arch/arm/mach-pxa/vpac270.c b/arch/arm/mach-pxa/vpac270.c
317 index 5f8490a..3a1fa1d 100644
318 --- a/arch/arm/mach-pxa/vpac270.c
319 +++ b/arch/arm/mach-pxa/vpac270.c
320 @@ -343,7 +343,7 @@ static inline void vpac270_uhc_init(void) {}
321 /******************************************************************************
322 * USB Gadget
323 ******************************************************************************/
324 -#if defined(CONFIG_USB_GADGET_PXA27X)||defined(CONFIG_USB_GADGET_PXA27X_MODULE)
325 +#if defined(CONFIG_USB_PXA27X)||defined(CONFIG_USB_PXA27X_MODULE)
326 static struct gpio_vbus_mach_info vpac270_gpio_vbus_info = {
327 .gpio_vbus = GPIO41_VPAC270_UDC_DETECT,
328 .gpio_pullup = -1,
329 diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
330 index ae0e14b..5804cfa 100644
331 --- a/arch/s390/kernel/ptrace.c
332 +++ b/arch/s390/kernel/ptrace.c
333 @@ -897,6 +897,14 @@ static int s390_last_break_get(struct task_struct *target,
334 return 0;
335 }
336
337 +static int s390_last_break_set(struct task_struct *target,
338 + const struct user_regset *regset,
339 + unsigned int pos, unsigned int count,
340 + const void *kbuf, const void __user *ubuf)
341 +{
342 + return 0;
343 +}
344 +
345 #endif
346
347 static const struct user_regset s390_regsets[] = {
348 @@ -923,6 +931,7 @@ static const struct user_regset s390_regsets[] = {
349 .size = sizeof(long),
350 .align = sizeof(long),
351 .get = s390_last_break_get,
352 + .set = s390_last_break_set,
353 },
354 #endif
355 };
356 @@ -1080,6 +1089,14 @@ static int s390_compat_last_break_get(struct task_struct *target,
357 return 0;
358 }
359
360 +static int s390_compat_last_break_set(struct task_struct *target,
361 + const struct user_regset *regset,
362 + unsigned int pos, unsigned int count,
363 + const void *kbuf, const void __user *ubuf)
364 +{
365 + return 0;
366 +}
367 +
368 static const struct user_regset s390_compat_regsets[] = {
369 [REGSET_GENERAL] = {
370 .core_note_type = NT_PRSTATUS,
371 @@ -1103,6 +1120,7 @@ static const struct user_regset s390_compat_regsets[] = {
372 .size = sizeof(long),
373 .align = sizeof(long),
374 .get = s390_compat_last_break_get,
375 + .set = s390_compat_last_break_set,
376 },
377 [REGSET_GENERAL_EXTENDED] = {
378 .core_note_type = NT_S390_HIGH_GPRS,
379 diff --git a/arch/x86/include/asm/timer.h b/arch/x86/include/asm/timer.h
380 index fa7b917..431793e 100644
381 --- a/arch/x86/include/asm/timer.h
382 +++ b/arch/x86/include/asm/timer.h
383 @@ -32,6 +32,22 @@ extern int no_timer_check;
384 * (mathieu.desnoyers@polymtl.ca)
385 *
386 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
387 + *
388 + * In:
389 + *
390 + * ns = cycles * cyc2ns_scale / SC
391 + *
392 + * Although we may still have enough bits to store the value of ns,
393 + * in some cases, we may not have enough bits to store cycles * cyc2ns_scale,
394 + * leading to an incorrect result.
395 + *
396 + * To avoid this, we can decompose 'cycles' into quotient and remainder
397 + * of division by SC. Then,
398 + *
399 + * ns = (quot * SC + rem) * cyc2ns_scale / SC
400 + * = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC
401 + *
402 + * - sqazi@google.com
403 */
404
405 DECLARE_PER_CPU(unsigned long, cyc2ns);
406 @@ -41,9 +57,14 @@ DECLARE_PER_CPU(unsigned long long, cyc2ns_offset);
407
408 static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
409 {
410 + unsigned long long quot;
411 + unsigned long long rem;
412 int cpu = smp_processor_id();
413 unsigned long long ns = per_cpu(cyc2ns_offset, cpu);
414 - ns += cyc * per_cpu(cyc2ns, cpu) >> CYC2NS_SCALE_FACTOR;
415 + quot = (cyc >> CYC2NS_SCALE_FACTOR);
416 + rem = cyc & ((1ULL << CYC2NS_SCALE_FACTOR) - 1);
417 + ns += quot * per_cpu(cyc2ns, cpu) +
418 + ((rem * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR);
419 return ns;
420 }
421
422 diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
423 index 1b1ef3a..3213c52 100644
424 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
425 +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
426 @@ -508,6 +508,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
427 unsigned long from = cpuc->lbr_entries[0].from;
428 unsigned long old_to, to = cpuc->lbr_entries[0].to;
429 unsigned long ip = regs->ip;
430 + int is_64bit = 0;
431
432 /*
433 * We don't need to fixup if the PEBS assist is fault like
434 @@ -559,7 +560,10 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
435 } else
436 kaddr = (void *)to;
437
438 - kernel_insn_init(&insn, kaddr);
439 +#ifdef CONFIG_X86_64
440 + is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
441 +#endif
442 + insn_init(&insn, kaddr, is_64bit);
443 insn_get_length(&insn);
444 to += insn.length;
445 } while (to < ip);
446 diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
447 index 9103b89..0741b062 100644
448 --- a/arch/x86/kernel/mpparse.c
449 +++ b/arch/x86/kernel/mpparse.c
450 @@ -95,8 +95,8 @@ static void __init MP_bus_info(struct mpc_bus *m)
451 }
452 #endif
453
454 + set_bit(m->busid, mp_bus_not_pci);
455 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
456 - set_bit(m->busid, mp_bus_not_pci);
457 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
458 mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
459 #endif
460 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
461 index 9242436..d4a705f 100644
462 --- a/arch/x86/kernel/reboot.c
463 +++ b/arch/x86/kernel/reboot.c
464 @@ -124,7 +124,7 @@ __setup("reboot=", reboot_setup);
465 */
466
467 /*
468 - * Some machines require the "reboot=b" commandline option,
469 + * Some machines require the "reboot=b" or "reboot=k" commandline options,
470 * this quirk makes that automatic.
471 */
472 static int __init set_bios_reboot(const struct dmi_system_id *d)
473 @@ -136,6 +136,15 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
474 return 0;
475 }
476
477 +static int __init set_kbd_reboot(const struct dmi_system_id *d)
478 +{
479 + if (reboot_type != BOOT_KBD) {
480 + reboot_type = BOOT_KBD;
481 + printk(KERN_INFO "%s series board detected. Selecting KBD-method for reboot.\n", d->ident);
482 + }
483 + return 0;
484 +}
485 +
486 static struct dmi_system_id __initdata reboot_dmi_table[] = {
487 { /* Handle problems with rebooting on Dell E520's */
488 .callback = set_bios_reboot,
489 @@ -295,7 +304,7 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
490 },
491 },
492 { /* Handle reboot issue on Acer Aspire one */
493 - .callback = set_bios_reboot,
494 + .callback = set_kbd_reboot,
495 .ident = "Acer Aspire One A110",
496 .matches = {
497 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
498 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
499 index b499626..f4f29b1 100644
500 --- a/arch/x86/mm/highmem_32.c
501 +++ b/arch/x86/mm/highmem_32.c
502 @@ -45,6 +45,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
503 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
504 BUG_ON(!pte_none(*(kmap_pte-idx)));
505 set_pte(kmap_pte-idx, mk_pte(page, prot));
506 + arch_flush_lazy_mmu_mode();
507
508 return (void *)vaddr;
509 }
510 @@ -88,6 +89,7 @@ void __kunmap_atomic(void *kvaddr)
511 */
512 kpte_clear_flush(kmap_pte-idx, vaddr);
513 kmap_atomic_idx_pop();
514 + arch_flush_lazy_mmu_mode();
515 }
516 #ifdef CONFIG_DEBUG_HIGHMEM
517 else {
518 diff --git a/arch/x86/oprofile/init.c b/arch/x86/oprofile/init.c
519 index cdfe4c5..f148cf6 100644
520 --- a/arch/x86/oprofile/init.c
521 +++ b/arch/x86/oprofile/init.c
522 @@ -21,6 +21,7 @@ extern int op_nmi_timer_init(struct oprofile_operations *ops);
523 extern void op_nmi_exit(void);
524 extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth);
525
526 +static int nmi_timer;
527
528 int __init oprofile_arch_init(struct oprofile_operations *ops)
529 {
530 @@ -31,8 +32,9 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
531 #ifdef CONFIG_X86_LOCAL_APIC
532 ret = op_nmi_init(ops);
533 #endif
534 + nmi_timer = (ret != 0);
535 #ifdef CONFIG_X86_IO_APIC
536 - if (ret < 0)
537 + if (nmi_timer)
538 ret = op_nmi_timer_init(ops);
539 #endif
540 ops->backtrace = x86_backtrace;
541 @@ -44,6 +46,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
542 void oprofile_arch_exit(void)
543 {
544 #ifdef CONFIG_X86_LOCAL_APIC
545 - op_nmi_exit();
546 + if (!nmi_timer)
547 + op_nmi_exit();
548 #endif
549 }
550 diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
551 index 3cf303e..38a3297 100644
552 --- a/drivers/crypto/mv_cesa.c
553 +++ b/drivers/crypto/mv_cesa.c
554 @@ -342,11 +342,13 @@ static void mv_process_hash_current(int first_block)
555 else
556 op.config |= CFG_MID_FRAG;
557
558 - writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
559 - writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
560 - writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
561 - writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
562 - writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
563 + if (first_block) {
564 + writel(req_ctx->state[0], cpg->reg + DIGEST_INITIAL_VAL_A);
565 + writel(req_ctx->state[1], cpg->reg + DIGEST_INITIAL_VAL_B);
566 + writel(req_ctx->state[2], cpg->reg + DIGEST_INITIAL_VAL_C);
567 + writel(req_ctx->state[3], cpg->reg + DIGEST_INITIAL_VAL_D);
568 + writel(req_ctx->state[4], cpg->reg + DIGEST_INITIAL_VAL_E);
569 + }
570 }
571
572 memcpy(cpg->sram + SRAM_CONFIG, &op, sizeof(struct sec_accel_config));
573 diff --git a/drivers/firmware/sigma.c b/drivers/firmware/sigma.c
574 index f10fc52..1eedb6f 100644
575 --- a/drivers/firmware/sigma.c
576 +++ b/drivers/firmware/sigma.c
577 @@ -14,13 +14,34 @@
578 #include <linux/module.h>
579 #include <linux/sigma.h>
580
581 -/* Return: 0==OK, <0==error, =1 ==no more actions */
582 +static size_t sigma_action_size(struct sigma_action *sa)
583 +{
584 + size_t payload = 0;
585 +
586 + switch (sa->instr) {
587 + case SIGMA_ACTION_WRITEXBYTES:
588 + case SIGMA_ACTION_WRITESINGLE:
589 + case SIGMA_ACTION_WRITESAFELOAD:
590 + payload = sigma_action_len(sa);
591 + break;
592 + default:
593 + break;
594 + }
595 +
596 + payload = ALIGN(payload, 2);
597 +
598 + return payload + sizeof(struct sigma_action);
599 +}
600 +
601 +/*
602 + * Returns a negative error value in case of an error, 0 if processing of
603 + * the firmware should be stopped after this action, 1 otherwise.
604 + */
605 static int
606 -process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
607 +process_sigma_action(struct i2c_client *client, struct sigma_action *sa)
608 {
609 - struct sigma_action *sa = (void *)(ssfw->fw->data + ssfw->pos);
610 size_t len = sigma_action_len(sa);
611 - int ret = 0;
612 + int ret;
613
614 pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__,
615 sa->instr, sa->addr, len);
616 @@ -29,44 +50,50 @@ process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
617 case SIGMA_ACTION_WRITEXBYTES:
618 case SIGMA_ACTION_WRITESINGLE:
619 case SIGMA_ACTION_WRITESAFELOAD:
620 - if (ssfw->fw->size < ssfw->pos + len)
621 - return -EINVAL;
622 ret = i2c_master_send(client, (void *)&sa->addr, len);
623 if (ret < 0)
624 return -EINVAL;
625 break;
626 -
627 case SIGMA_ACTION_DELAY:
628 - ret = 0;
629 udelay(len);
630 len = 0;
631 break;
632 -
633 case SIGMA_ACTION_END:
634 - return 1;
635 -
636 + return 0;
637 default:
638 return -EINVAL;
639 }
640
641 - /* when arrive here ret=0 or sent data */
642 - ssfw->pos += sigma_action_size(sa, len);
643 - return ssfw->pos == ssfw->fw->size;
644 + return 1;
645 }
646
647 static int
648 process_sigma_actions(struct i2c_client *client, struct sigma_firmware *ssfw)
649 {
650 - pr_debug("%s: processing %p\n", __func__, ssfw);
651 + struct sigma_action *sa;
652 + size_t size;
653 + int ret;
654 +
655 + while (ssfw->pos + sizeof(*sa) <= ssfw->fw->size) {
656 + sa = (struct sigma_action *)(ssfw->fw->data + ssfw->pos);
657 +
658 + size = sigma_action_size(sa);
659 + ssfw->pos += size;
660 + if (ssfw->pos > ssfw->fw->size || size == 0)
661 + break;
662 +
663 + ret = process_sigma_action(client, sa);
664
665 - while (1) {
666 - int ret = process_sigma_action(client, ssfw);
667 pr_debug("%s: action returned %i\n", __func__, ret);
668 - if (ret == 1)
669 - return 0;
670 - else if (ret)
671 +
672 + if (ret <= 0)
673 return ret;
674 }
675 +
676 + if (ssfw->pos != ssfw->fw->size)
677 + return -EINVAL;
678 +
679 + return 0;
680 }
681
682 int process_sigma_firmware(struct i2c_client *client, const char *name)
683 @@ -89,16 +116,24 @@ int process_sigma_firmware(struct i2c_client *client, const char *name)
684
685 /* then verify the header */
686 ret = -EINVAL;
687 - if (fw->size < sizeof(*ssfw_head))
688 +
689 + /*
690 + * Reject too small or unreasonable large files. The upper limit has been
691 + * chosen a bit arbitrarily, but it should be enough for all practical
692 + * purposes and having the limit makes it easier to avoid integer
693 + * overflows later in the loading process.
694 + */
695 + if (fw->size < sizeof(*ssfw_head) || fw->size >= 0x4000000)
696 goto done;
697
698 ssfw_head = (void *)fw->data;
699 if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic)))
700 goto done;
701
702 - crc = crc32(0, fw->data, fw->size);
703 + crc = crc32(0, fw->data + sizeof(*ssfw_head),
704 + fw->size - sizeof(*ssfw_head));
705 pr_debug("%s: crc=%x\n", __func__, crc);
706 - if (crc != ssfw_head->crc)
707 + if (crc != le32_to_cpu(ssfw_head->crc))
708 goto done;
709
710 ssfw.pos = sizeof(*ssfw_head);
711 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
712 index fe738f0..2410c40 100644
713 --- a/drivers/gpu/drm/drm_crtc.c
714 +++ b/drivers/gpu/drm/drm_crtc.c
715 @@ -1868,6 +1868,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
716 }
717
718 if (num_clips && clips_ptr) {
719 + if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
720 + ret = -EINVAL;
721 + goto out_err1;
722 + }
723 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
724 if (!clips) {
725 ret = -ENOMEM;
726 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
727 index 9cbb0cd..73248d0 100644
728 --- a/drivers/gpu/drm/i915/i915_irq.c
729 +++ b/drivers/gpu/drm/i915/i915_irq.c
730 @@ -822,6 +822,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
731
732 /* Fences */
733 switch (INTEL_INFO(dev)->gen) {
734 + case 7:
735 case 6:
736 for (i = 0; i < 16; i++)
737 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
738 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
739 index 90587de..ad381a2 100644
740 --- a/drivers/gpu/drm/i915/i915_reg.h
741 +++ b/drivers/gpu/drm/i915/i915_reg.h
742 @@ -3401,6 +3401,10 @@
743 #define GT_FIFO_FREE_ENTRIES 0x120008
744 #define GT_FIFO_NUM_RESERVED_ENTRIES 20
745
746 +#define GEN6_UCGCTL2 0x9404
747 +# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
748 +# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
749 +
750 #define GEN6_RPNSWREQ 0xA008
751 #define GEN6_TURBO_DISABLE (1<<31)
752 #define GEN6_FREQUENCY(x) ((x)<<25)
753 diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
754 index f107423..9c7706a 100644
755 --- a/drivers/gpu/drm/i915/i915_suspend.c
756 +++ b/drivers/gpu/drm/i915/i915_suspend.c
757 @@ -370,6 +370,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
758
759 /* Fences */
760 switch (INTEL_INFO(dev)->gen) {
761 + case 7:
762 case 6:
763 for (i = 0; i < 16; i++)
764 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
765 @@ -404,6 +405,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
766
767 /* Fences */
768 switch (INTEL_INFO(dev)->gen) {
769 + case 7:
770 case 6:
771 for (i = 0; i < 16; i++)
772 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
773 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
774 index e1340a2..07e7cf3 100644
775 --- a/drivers/gpu/drm/i915/intel_display.c
776 +++ b/drivers/gpu/drm/i915/intel_display.c
777 @@ -7882,6 +7882,20 @@ static void gen6_init_clock_gating(struct drm_device *dev)
778 I915_WRITE(WM2_LP_ILK, 0);
779 I915_WRITE(WM1_LP_ILK, 0);
780
781 + /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
782 + * gating disable must be set. Failure to set it results in
783 + * flickering pixels due to Z write ordering failures after
784 + * some amount of runtime in the Mesa "fire" demo, and Unigine
785 + * Sanctuary and Tropics, and apparently anything else with
786 + * alpha test or pixel discard.
787 + *
788 + * According to the spec, bit 11 (RCCUNIT) must also be set,
789 + * but we didn't debug actual testcases to find it out.
790 + */
791 + I915_WRITE(GEN6_UCGCTL2,
792 + GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
793 + GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
794 +
795 /*
796 * According to the spec the following bits should be
797 * set in order to enable memory self-refresh and fbc:
798 diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
799 index af08ff3..007f6ca 100644
800 --- a/drivers/gpu/drm/i915/intel_panel.c
801 +++ b/drivers/gpu/drm/i915/intel_panel.c
802 @@ -326,7 +326,8 @@ static int intel_panel_update_status(struct backlight_device *bd)
803 static int intel_panel_get_brightness(struct backlight_device *bd)
804 {
805 struct drm_device *dev = bl_get_data(bd);
806 - return intel_panel_get_backlight(dev);
807 + struct drm_i915_private *dev_priv = dev->dev_private;
808 + return dev_priv->backlight_level;
809 }
810
811 static const struct backlight_ops intel_panel_bl_ops = {
812 diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
813 index fb0a00a..d4ee6f0 100644
814 --- a/drivers/gpu/drm/radeon/evergreen.c
815 +++ b/drivers/gpu/drm/radeon/evergreen.c
816 @@ -82,6 +82,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
817 {
818 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
819 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
820 + int i;
821
822 /* Lock the graphics update lock */
823 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
824 @@ -99,7 +100,11 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
825 (u32)crtc_base);
826
827 /* Wait for update_pending to go high. */
828 - while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
829 + for (i = 0; i < rdev->usec_timeout; i++) {
830 + if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
831 + break;
832 + udelay(1);
833 + }
834 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
835
836 /* Unlock the lock, so double-buffering can take place inside vblank */
837 diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
838 index c9a0dae..b94d871 100644
839 --- a/drivers/gpu/drm/radeon/r100.c
840 +++ b/drivers/gpu/drm/radeon/r100.c
841 @@ -84,13 +84,18 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
842 {
843 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
844 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
845 + int i;
846
847 /* Lock the graphics update lock */
848 /* update the scanout addresses */
849 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
850
851 /* Wait for update_pending to go high. */
852 - while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
853 + for (i = 0; i < rdev->usec_timeout; i++) {
854 + if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
855 + break;
856 + udelay(1);
857 + }
858 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
859
860 /* Unlock the lock, so double-buffering can take place inside vblank */
861 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
862 index bf2b615..285acc4 100644
863 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
864 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
865 @@ -85,6 +85,18 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
866 for (i = 0; i < num_indices; i++) {
867 gpio = &i2c_info->asGPIO_Info[i];
868
869 + /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
870 + if ((rdev->family == CHIP_R420) ||
871 + (rdev->family == CHIP_R423) ||
872 + (rdev->family == CHIP_RV410)) {
873 + if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
874 + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
875 + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
876 + gpio->ucClkMaskShift = 0x19;
877 + gpio->ucDataMaskShift = 0x18;
878 + }
879 + }
880 +
881 /* some evergreen boards have bad data for this entry */
882 if (ASIC_IS_DCE4(rdev)) {
883 if ((i == 7) &&
884 @@ -169,6 +181,18 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
885 gpio = &i2c_info->asGPIO_Info[i];
886 i2c.valid = false;
887
888 + /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
889 + if ((rdev->family == CHIP_R420) ||
890 + (rdev->family == CHIP_R423) ||
891 + (rdev->family == CHIP_RV410)) {
892 + if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
893 + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
894 + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
895 + gpio->ucClkMaskShift = 0x19;
896 + gpio->ucDataMaskShift = 0x18;
897 + }
898 + }
899 +
900 /* some evergreen boards have bad data for this entry */
901 if (ASIC_IS_DCE4(rdev)) {
902 if ((i == 7) &&
903 diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
904 index 29d85cf..0e89a9b 100644
905 --- a/drivers/gpu/drm/radeon/rs600.c
906 +++ b/drivers/gpu/drm/radeon/rs600.c
907 @@ -62,6 +62,7 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
908 {
909 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
910 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
911 + int i;
912
913 /* Lock the graphics update lock */
914 tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
915 @@ -74,7 +75,11 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
916 (u32)crtc_base);
917
918 /* Wait for update_pending to go high. */
919 - while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
920 + for (i = 0; i < rdev->usec_timeout; i++) {
921 + if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
922 + break;
923 + udelay(1);
924 + }
925 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
926
927 /* Unlock the lock, so double-buffering can take place inside vblank */
928 diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
929 index b13c2ee..ddc206a 100644
930 --- a/drivers/gpu/drm/radeon/rv770.c
931 +++ b/drivers/gpu/drm/radeon/rv770.c
932 @@ -47,6 +47,7 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
933 {
934 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
935 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
936 + int i;
937
938 /* Lock the graphics update lock */
939 tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
940 @@ -66,7 +67,11 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
941 (u32)crtc_base);
942
943 /* Wait for update_pending to go high. */
944 - while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
945 + for (i = 0; i < rdev->usec_timeout; i++) {
946 + if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
947 + break;
948 + udelay(1);
949 + }
950 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
951
952 /* Unlock the lock, so double-buffering can take place inside vblank */
953 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
954 index 5be9f47..f26ae31 100644
955 --- a/drivers/hid/hid-core.c
956 +++ b/drivers/hid/hid-core.c
957 @@ -1728,8 +1728,8 @@ static const struct hid_device_id hid_ignore_list[] = {
958 { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
959 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
960 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
961 + { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
962 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
963 - { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) },
964 { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
965 { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) },
966 { HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) },
967 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
968 index 0d87d98..53c4634 100644
969 --- a/drivers/hid/hid-ids.h
970 +++ b/drivers/hid/hid-ids.h
971 @@ -266,7 +266,7 @@
972 #define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
973
974 #define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc
975 -#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0001
976 +#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
977
978 #define USB_VENDOR_ID_GLAB 0x06c2
979 #define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038
980 diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
981 index d6d5868..eca3bcc 100644
982 --- a/drivers/i2c/algos/i2c-algo-bit.c
983 +++ b/drivers/i2c/algos/i2c-algo-bit.c
984 @@ -486,7 +486,7 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
985
986 if (flags & I2C_M_TEN) {
987 /* a ten bit address */
988 - addr = 0xf0 | ((msg->addr >> 7) & 0x03);
989 + addr = 0xf0 | ((msg->addr >> 7) & 0x06);
990 bit_dbg(2, &i2c_adap->dev, "addr0: %d\n", addr);
991 /* try extended address code...*/
992 ret = try_address(i2c_adap, addr, retries);
993 @@ -496,7 +496,7 @@ static int bit_doAddress(struct i2c_adapter *i2c_adap, struct i2c_msg *msg)
994 return -EREMOTEIO;
995 }
996 /* the remaining 8 bit address */
997 - ret = i2c_outb(i2c_adap, msg->addr & 0x7f);
998 + ret = i2c_outb(i2c_adap, msg->addr & 0xff);
999 if ((ret != 1) && !nak_ok) {
1000 /* the chip did not ack / xmission error occurred */
1001 dev_err(&i2c_adap->dev, "died at 2nd address code\n");
1002 diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
1003 index 236ad9a..f2a84c6 100644
1004 --- a/drivers/infiniband/core/addr.c
1005 +++ b/drivers/infiniband/core/addr.c
1006 @@ -215,7 +215,9 @@ static int addr4_resolve(struct sockaddr_in *src_in,
1007
1008 neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
1009 if (!neigh || !(neigh->nud_state & NUD_VALID)) {
1010 + rcu_read_lock();
1011 neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
1012 + rcu_read_unlock();
1013 ret = -ENODATA;
1014 if (neigh)
1015 goto release;
1016 @@ -273,15 +275,16 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
1017 goto put;
1018 }
1019
1020 + rcu_read_lock();
1021 neigh = dst_get_neighbour(dst);
1022 if (!neigh || !(neigh->nud_state & NUD_VALID)) {
1023 if (neigh)
1024 neigh_event_send(neigh, NULL);
1025 ret = -ENODATA;
1026 - goto put;
1027 + } else {
1028 + ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
1029 }
1030 -
1031 - ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
1032 + rcu_read_unlock();
1033 put:
1034 dst_release(dst);
1035 return ret;
1036 diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
1037 index 6cd642a..e55ce7a 100644
1038 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
1039 +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
1040 @@ -1365,8 +1365,10 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
1041 goto reject;
1042 }
1043 dst = &rt->dst;
1044 + rcu_read_lock();
1045 neigh = dst_get_neighbour(dst);
1046 l2t = t3_l2t_get(tdev, neigh, neigh->dev);
1047 + rcu_read_unlock();
1048 if (!l2t) {
1049 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1050 __func__);
1051 @@ -1936,10 +1938,12 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1052 }
1053 ep->dst = &rt->dst;
1054
1055 + rcu_read_lock();
1056 neigh = dst_get_neighbour(ep->dst);
1057
1058 /* get a l2t entry */
1059 ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev);
1060 + rcu_read_unlock();
1061 if (!ep->l2t) {
1062 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1063 err = -ENOMEM;
1064 diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
1065 index 77f769d..daa93e9 100644
1066 --- a/drivers/infiniband/hw/cxgb4/cm.c
1067 +++ b/drivers/infiniband/hw/cxgb4/cm.c
1068 @@ -1358,6 +1358,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1069 goto reject;
1070 }
1071 dst = &rt->dst;
1072 + rcu_read_lock();
1073 neigh = dst_get_neighbour(dst);
1074 if (neigh->dev->flags & IFF_LOOPBACK) {
1075 pdev = ip_dev_find(&init_net, peer_ip);
1076 @@ -1384,6 +1385,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1077 rss_qid = dev->rdev.lldi.rxq_ids[
1078 cxgb4_port_idx(neigh->dev) * step];
1079 }
1080 + rcu_read_unlock();
1081 if (!l2t) {
1082 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1083 __func__);
1084 @@ -1909,6 +1911,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1085 }
1086 ep->dst = &rt->dst;
1087
1088 + rcu_read_lock();
1089 neigh = dst_get_neighbour(ep->dst);
1090
1091 /* get a l2t entry */
1092 @@ -1945,6 +1948,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1093 ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
1094 cxgb4_port_idx(neigh->dev) * step];
1095 }
1096 + rcu_read_unlock();
1097 if (!ep->l2t) {
1098 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1099 err = -ENOMEM;
1100 diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
1101 index 3a91d9d..5c22514 100644
1102 --- a/drivers/infiniband/hw/mlx4/qp.c
1103 +++ b/drivers/infiniband/hw/mlx4/qp.c
1104 @@ -1309,7 +1309,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1105 int is_eth;
1106 int is_vlan = 0;
1107 int is_grh;
1108 - u16 vlan;
1109 + u16 vlan = 0;
1110
1111 send_size = 0;
1112 for (i = 0; i < wr->num_sge; ++i)
1113 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
1114 index c118663..a237547 100644
1115 --- a/drivers/infiniband/hw/nes/nes_cm.c
1116 +++ b/drivers/infiniband/hw/nes/nes_cm.c
1117 @@ -1150,9 +1150,11 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
1118 neigh_release(neigh);
1119 }
1120
1121 - if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID)))
1122 + if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) {
1123 + rcu_read_lock();
1124 neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
1125 -
1126 + rcu_read_unlock();
1127 + }
1128 ip_rt_put(rt);
1129 return rc;
1130 }
1131 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
1132 index fe89c46..a98c414 100644
1133 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
1134 +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
1135 @@ -555,6 +555,7 @@ static int path_rec_start(struct net_device *dev,
1136 return 0;
1137 }
1138
1139 +/* called with rcu_read_lock */
1140 static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
1141 {
1142 struct ipoib_dev_priv *priv = netdev_priv(dev);
1143 @@ -636,6 +637,7 @@ err_drop:
1144 spin_unlock_irqrestore(&priv->lock, flags);
1145 }
1146
1147 +/* called with rcu_read_lock */
1148 static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
1149 {
1150 struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
1151 @@ -720,13 +722,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
1152 struct neighbour *n = NULL;
1153 unsigned long flags;
1154
1155 + rcu_read_lock();
1156 if (likely(skb_dst(skb)))
1157 n = dst_get_neighbour(skb_dst(skb));
1158
1159 if (likely(n)) {
1160 if (unlikely(!*to_ipoib_neigh(n))) {
1161 ipoib_path_lookup(skb, dev);
1162 - return NETDEV_TX_OK;
1163 + goto unlock;
1164 }
1165
1166 neigh = *to_ipoib_neigh(n);
1167 @@ -749,17 +752,17 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
1168 ipoib_neigh_free(dev, neigh);
1169 spin_unlock_irqrestore(&priv->lock, flags);
1170 ipoib_path_lookup(skb, dev);
1171 - return NETDEV_TX_OK;
1172 + goto unlock;
1173 }
1174
1175 if (ipoib_cm_get(neigh)) {
1176 if (ipoib_cm_up(neigh)) {
1177 ipoib_cm_send(dev, skb, ipoib_cm_get(neigh));
1178 - return NETDEV_TX_OK;
1179 + goto unlock;
1180 }
1181 } else if (neigh->ah) {
1182 ipoib_send(dev, skb, neigh->ah, IPOIB_QPN(n->ha));
1183 - return NETDEV_TX_OK;
1184 + goto unlock;
1185 }
1186
1187 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
1188 @@ -793,13 +796,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
1189 phdr->hwaddr + 4);
1190 dev_kfree_skb_any(skb);
1191 ++dev->stats.tx_dropped;
1192 - return NETDEV_TX_OK;
1193 + goto unlock;
1194 }
1195
1196 unicast_arp_send(skb, dev, phdr);
1197 }
1198 }
1199 -
1200 +unlock:
1201 + rcu_read_unlock();
1202 return NETDEV_TX_OK;
1203 }
1204
1205 @@ -837,7 +841,7 @@ static int ipoib_hard_header(struct sk_buff *skb,
1206 dst = skb_dst(skb);
1207 n = NULL;
1208 if (dst)
1209 - n = dst_get_neighbour(dst);
1210 + n = dst_get_neighbour_raw(dst);
1211 if ((!dst || !n) && daddr) {
1212 struct ipoib_pseudoheader *phdr =
1213 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
1214 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
1215 index ecea4fe..a8d2a89 100644
1216 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
1217 +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
1218 @@ -265,7 +265,7 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
1219
1220 skb->dev = dev;
1221 if (dst)
1222 - n = dst_get_neighbour(dst);
1223 + n = dst_get_neighbour_raw(dst);
1224 if (!dst || !n) {
1225 /* put pseudoheader back on for next time */
1226 skb_push(skb, sizeof (struct ipoib_pseudoheader));
1227 @@ -721,6 +721,8 @@ out:
1228 if (mcast && mcast->ah) {
1229 struct dst_entry *dst = skb_dst(skb);
1230 struct neighbour *n = NULL;
1231 +
1232 + rcu_read_lock();
1233 if (dst)
1234 n = dst_get_neighbour(dst);
1235 if (n && !*to_ipoib_neigh(n)) {
1236 @@ -733,7 +735,7 @@ out:
1237 list_add_tail(&neigh->list, &mcast->neigh_list);
1238 }
1239 }
1240 -
1241 + rcu_read_unlock();
1242 spin_unlock_irqrestore(&priv->lock, flags);
1243 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
1244 return;
1245 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
1246 index a7ddc98..eabbf1a 100644
1247 --- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
1248 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
1249 @@ -542,6 +542,24 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1250 return 0;
1251 }
1252
1253 +void iwlagn_config_ht40(struct ieee80211_conf *conf,
1254 + struct iwl_rxon_context *ctx)
1255 +{
1256 + if (conf_is_ht40_minus(conf)) {
1257 + ctx->ht.extension_chan_offset =
1258 + IEEE80211_HT_PARAM_CHA_SEC_BELOW;
1259 + ctx->ht.is_40mhz = true;
1260 + } else if (conf_is_ht40_plus(conf)) {
1261 + ctx->ht.extension_chan_offset =
1262 + IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
1263 + ctx->ht.is_40mhz = true;
1264 + } else {
1265 + ctx->ht.extension_chan_offset =
1266 + IEEE80211_HT_PARAM_CHA_SEC_NONE;
1267 + ctx->ht.is_40mhz = false;
1268 + }
1269 +}
1270 +
1271 int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
1272 {
1273 struct iwl_priv *priv = hw->priv;
1274 @@ -600,19 +618,11 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
1275 ctx->ht.enabled = conf_is_ht(conf);
1276
1277 if (ctx->ht.enabled) {
1278 - if (conf_is_ht40_minus(conf)) {
1279 - ctx->ht.extension_chan_offset =
1280 - IEEE80211_HT_PARAM_CHA_SEC_BELOW;
1281 - ctx->ht.is_40mhz = true;
1282 - } else if (conf_is_ht40_plus(conf)) {
1283 - ctx->ht.extension_chan_offset =
1284 - IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
1285 - ctx->ht.is_40mhz = true;
1286 - } else {
1287 - ctx->ht.extension_chan_offset =
1288 - IEEE80211_HT_PARAM_CHA_SEC_NONE;
1289 - ctx->ht.is_40mhz = false;
1290 - }
1291 + /* if HT40 is used, it should not change
1292 + * after associated except channel switch */
1293 + if (iwl_is_associated_ctx(ctx) &&
1294 + !ctx->ht.is_40mhz)
1295 + iwlagn_config_ht40(conf, ctx);
1296 } else
1297 ctx->ht.is_40mhz = false;
1298
1299 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
1300 index 37e6240..211a5ad 100644
1301 --- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
1302 +++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
1303 @@ -440,9 +440,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
1304
1305 switch (keyconf->cipher) {
1306 case WLAN_CIPHER_SUITE_TKIP:
1307 - keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1308 - keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1309 -
1310 if (sta)
1311 addr = sta->addr;
1312 else /* station mode case only */
1313 @@ -455,8 +452,6 @@ int iwl_set_dynamic_key(struct iwl_priv *priv,
1314 seq.tkip.iv32, p1k, CMD_SYNC);
1315 break;
1316 case WLAN_CIPHER_SUITE_CCMP:
1317 - keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1318 - /* fall through */
1319 case WLAN_CIPHER_SUITE_WEP40:
1320 case WLAN_CIPHER_SUITE_WEP104:
1321 ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
1322 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
1323 index f9c3cd9..f473c01 100644
1324 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c
1325 +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
1326 @@ -2793,6 +2793,17 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1327 return -EOPNOTSUPP;
1328 }
1329
1330 + switch (key->cipher) {
1331 + case WLAN_CIPHER_SUITE_TKIP:
1332 + key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1333 + /* fall through */
1334 + case WLAN_CIPHER_SUITE_CCMP:
1335 + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1336 + break;
1337 + default:
1338 + break;
1339 + }
1340 +
1341 /*
1342 * We could program these keys into the hardware as well, but we
1343 * don't expect much multicast traffic in IBSS and having keys
1344 @@ -3075,21 +3086,9 @@ static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
1345
1346 /* Configure HT40 channels */
1347 ctx->ht.enabled = conf_is_ht(conf);
1348 - if (ctx->ht.enabled) {
1349 - if (conf_is_ht40_minus(conf)) {
1350 - ctx->ht.extension_chan_offset =
1351 - IEEE80211_HT_PARAM_CHA_SEC_BELOW;
1352 - ctx->ht.is_40mhz = true;
1353 - } else if (conf_is_ht40_plus(conf)) {
1354 - ctx->ht.extension_chan_offset =
1355 - IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
1356 - ctx->ht.is_40mhz = true;
1357 - } else {
1358 - ctx->ht.extension_chan_offset =
1359 - IEEE80211_HT_PARAM_CHA_SEC_NONE;
1360 - ctx->ht.is_40mhz = false;
1361 - }
1362 - } else
1363 + if (ctx->ht.enabled)
1364 + iwlagn_config_ht40(conf, ctx);
1365 + else
1366 ctx->ht.is_40mhz = false;
1367
1368 if ((le16_to_cpu(ctx->staging.channel) != ch))
1369 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
1370 index d941c4c..e172f6b 100644
1371 --- a/drivers/net/wireless/iwlwifi/iwl-agn.h
1372 +++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
1373 @@ -135,6 +135,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
1374 struct ieee80211_vif *vif,
1375 struct ieee80211_bss_conf *bss_conf,
1376 u32 changes);
1377 +void iwlagn_config_ht40(struct ieee80211_conf *conf,
1378 + struct iwl_rxon_context *ctx);
1379
1380 /* uCode */
1381 void iwlagn_rx_calib_result(struct iwl_priv *priv,
1382 diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
1383 index 2fdbffa..32d64e7 100644
1384 --- a/drivers/net/wireless/iwlwifi/iwl-pci.c
1385 +++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
1386 @@ -442,10 +442,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1387 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
1388
1389 err = pci_enable_msi(pdev);
1390 - if (err) {
1391 - dev_printk(KERN_ERR, &pdev->dev, "pci_enable_msi failed");
1392 - goto out_iounmap;
1393 - }
1394 + if (err)
1395 + dev_printk(KERN_ERR, &pdev->dev,
1396 + "pci_enable_msi failed(0X%x)", err);
1397
1398 /* TODO: Move this away, not needed if not MSI */
1399 /* enable rfkill interrupt: hw bug w/a */
1400 @@ -466,7 +465,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1401
1402 out_disable_msi:
1403 pci_disable_msi(pdev);
1404 -out_iounmap:
1405 pci_iounmap(pdev, pci_bus->hw_base);
1406 out_pci_release_regions:
1407 pci_set_drvdata(pdev, NULL);
1408 diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
1409 index 6d9204fe..b33ceb1 100644
1410 --- a/drivers/net/wireless/p54/p54spi.c
1411 +++ b/drivers/net/wireless/p54/p54spi.c
1412 @@ -589,8 +589,6 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
1413
1414 WARN_ON(priv->fw_state != FW_STATE_READY);
1415
1416 - cancel_work_sync(&priv->work);
1417 -
1418 p54spi_power_off(priv);
1419 spin_lock_irqsave(&priv->tx_lock, flags);
1420 INIT_LIST_HEAD(&priv->tx_pending);
1421 @@ -598,6 +596,8 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
1422
1423 priv->fw_state = FW_STATE_OFF;
1424 mutex_unlock(&priv->mutex);
1425 +
1426 + cancel_work_sync(&priv->work);
1427 }
1428
1429 static int __devinit p54spi_probe(struct spi_device *spi)
1430 @@ -657,6 +657,7 @@ static int __devinit p54spi_probe(struct spi_device *spi)
1431 init_completion(&priv->fw_comp);
1432 INIT_LIST_HEAD(&priv->tx_pending);
1433 mutex_init(&priv->mutex);
1434 + spin_lock_init(&priv->tx_lock);
1435 SET_IEEE80211_DEV(hw, &spi->dev);
1436 priv->common.open = p54spi_op_start;
1437 priv->common.stop = p54spi_op_stop;
1438 diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
1439 index 0019dfd..c6ad97f 100644
1440 --- a/drivers/net/wireless/rt2x00/rt2800lib.c
1441 +++ b/drivers/net/wireless/rt2x00/rt2800lib.c
1442 @@ -3699,7 +3699,7 @@ static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i)
1443 /* Apparently the data is read from end to start */
1444 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, &reg);
1445 /* The returned value is in CPU order, but eeprom is le */
1446 - rt2x00dev->eeprom[i] = cpu_to_le32(reg);
1447 + *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
1448 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, &reg);
1449 *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
1450 rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, &reg);
1451 diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
1452 index a693fef..0b04b2e 100644
1453 --- a/drivers/net/wireless/rtlwifi/ps.c
1454 +++ b/drivers/net/wireless/rtlwifi/ps.c
1455 @@ -394,7 +394,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
1456 if (mac->link_state != MAC80211_LINKED)
1457 return;
1458
1459 - spin_lock(&rtlpriv->locks.lps_lock);
1460 + spin_lock_irq(&rtlpriv->locks.lps_lock);
1461
1462 /* Idle for a while if we connect to AP a while ago. */
1463 if (mac->cnt_after_linked >= 2) {
1464 @@ -406,7 +406,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
1465 }
1466 }
1467
1468 - spin_unlock(&rtlpriv->locks.lps_lock);
1469 + spin_unlock_irq(&rtlpriv->locks.lps_lock);
1470 }
1471
1472 /*Leave the leisure power save mode.*/
1473 @@ -415,8 +415,9 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
1474 struct rtl_priv *rtlpriv = rtl_priv(hw);
1475 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1476 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1477 + unsigned long flags;
1478
1479 - spin_lock(&rtlpriv->locks.lps_lock);
1480 + spin_lock_irqsave(&rtlpriv->locks.lps_lock, flags);
1481
1482 if (ppsc->fwctrl_lps) {
1483 if (ppsc->dot11_psmode != EACTIVE) {
1484 @@ -437,7 +438,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
1485 rtl_lps_set_psmode(hw, EACTIVE);
1486 }
1487 }
1488 - spin_unlock(&rtlpriv->locks.lps_lock);
1489 + spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flags);
1490 }
1491
1492 /* For sw LPS*/
1493 @@ -538,9 +539,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
1494 RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
1495 }
1496
1497 - spin_lock(&rtlpriv->locks.lps_lock);
1498 + spin_lock_irq(&rtlpriv->locks.lps_lock);
1499 rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
1500 - spin_unlock(&rtlpriv->locks.lps_lock);
1501 + spin_unlock_irq(&rtlpriv->locks.lps_lock);
1502 }
1503
1504 void rtl_swlps_rfon_wq_callback(void *data)
1505 @@ -573,9 +574,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
1506 if (rtlpriv->link_info.busytraffic)
1507 return;
1508
1509 - spin_lock(&rtlpriv->locks.lps_lock);
1510 + spin_lock_irq(&rtlpriv->locks.lps_lock);
1511 rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
1512 - spin_unlock(&rtlpriv->locks.lps_lock);
1513 + spin_unlock_irq(&rtlpriv->locks.lps_lock);
1514
1515 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
1516 !RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
1517 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
1518 index dccd863..f8c752e 100644
1519 --- a/drivers/oprofile/oprof.c
1520 +++ b/drivers/oprofile/oprof.c
1521 @@ -239,26 +239,45 @@ int oprofile_set_ulong(unsigned long *addr, unsigned long val)
1522 return err;
1523 }
1524
1525 +static int timer_mode;
1526 +
1527 static int __init oprofile_init(void)
1528 {
1529 int err;
1530
1531 + /* always init architecture to setup backtrace support */
1532 err = oprofile_arch_init(&oprofile_ops);
1533 - if (err < 0 || timer) {
1534 - printk(KERN_INFO "oprofile: using timer interrupt.\n");
1535 +
1536 + timer_mode = err || timer; /* fall back to timer mode on errors */
1537 + if (timer_mode) {
1538 + if (!err)
1539 + oprofile_arch_exit();
1540 err = oprofile_timer_init(&oprofile_ops);
1541 if (err)
1542 return err;
1543 }
1544 - return oprofilefs_register();
1545 +
1546 + err = oprofilefs_register();
1547 + if (!err)
1548 + return 0;
1549 +
1550 + /* failed */
1551 + if (timer_mode)
1552 + oprofile_timer_exit();
1553 + else
1554 + oprofile_arch_exit();
1555 +
1556 + return err;
1557 }
1558
1559
1560 static void __exit oprofile_exit(void)
1561 {
1562 - oprofile_timer_exit();
1563 oprofilefs_unregister();
1564 - oprofile_arch_exit();
1565 + if (timer_mode)
1566 + oprofile_timer_exit();
1567 + else
1568 + oprofile_arch_exit();
1569 }
1570
1571
1572 diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
1573 index 3ef4462..878fba1 100644
1574 --- a/drivers/oprofile/timer_int.c
1575 +++ b/drivers/oprofile/timer_int.c
1576 @@ -110,6 +110,7 @@ int oprofile_timer_init(struct oprofile_operations *ops)
1577 ops->start = oprofile_hrtimer_start;
1578 ops->stop = oprofile_hrtimer_stop;
1579 ops->cpu_type = "timer";
1580 + printk(KERN_INFO "oprofile: using timer interrupt.\n");
1581 return 0;
1582 }
1583
1584 diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
1585 index aca972b..dd7e0c5 100644
1586 --- a/drivers/pci/hotplug/shpchp_core.c
1587 +++ b/drivers/pci/hotplug/shpchp_core.c
1588 @@ -278,8 +278,8 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value)
1589
1590 static int is_shpc_capable(struct pci_dev *dev)
1591 {
1592 - if ((dev->vendor == PCI_VENDOR_ID_AMD) || (dev->device ==
1593 - PCI_DEVICE_ID_AMD_GOLAM_7450))
1594 + if (dev->vendor == PCI_VENDOR_ID_AMD &&
1595 + dev->device == PCI_DEVICE_ID_AMD_GOLAM_7450)
1596 return 1;
1597 if (!pci_find_capability(dev, PCI_CAP_ID_SHPC))
1598 return 0;
1599 diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
1600 index 36547f0..75ba231 100644
1601 --- a/drivers/pci/hotplug/shpchp_hpc.c
1602 +++ b/drivers/pci/hotplug/shpchp_hpc.c
1603 @@ -944,8 +944,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev)
1604 ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */
1605 ctrl_dbg(ctrl, "Hotplug Controller:\n");
1606
1607 - if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device ==
1608 - PCI_DEVICE_ID_AMD_GOLAM_7450)) {
1609 + if (pdev->vendor == PCI_VENDOR_ID_AMD &&
1610 + pdev->device == PCI_DEVICE_ID_AMD_GOLAM_7450) {
1611 /* amd shpc driver doesn't use Base Offset; assume 0 */
1612 ctrl->mmio_base = pci_resource_start(pdev, 0);
1613 ctrl->mmio_size = pci_resource_len(pdev, 0);
1614 diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
1615 index cd41045..11d1ab4 100644
1616 --- a/drivers/regulator/aat2870-regulator.c
1617 +++ b/drivers/regulator/aat2870-regulator.c
1618 @@ -159,7 +159,7 @@ static struct aat2870_regulator *aat2870_get_regulator(int id)
1619 break;
1620 }
1621
1622 - if (!ri)
1623 + if (i == ARRAY_SIZE(aat2870_regulators))
1624 return NULL;
1625
1626 ri->enable_addr = AAT2870_LDO_EN;
1627 diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
1628 index ee8747f..11cc308 100644
1629 --- a/drivers/regulator/twl-regulator.c
1630 +++ b/drivers/regulator/twl-regulator.c
1631 @@ -71,6 +71,7 @@ struct twlreg_info {
1632 #define VREG_TYPE 1
1633 #define VREG_REMAP 2
1634 #define VREG_DEDICATED 3 /* LDO control */
1635 +#define VREG_VOLTAGE_SMPS_4030 9
1636 /* TWL6030 register offsets */
1637 #define VREG_TRANS 1
1638 #define VREG_STATE 2
1639 @@ -514,6 +515,32 @@ static struct regulator_ops twl4030ldo_ops = {
1640 .get_status = twl4030reg_get_status,
1641 };
1642
1643 +static int
1644 +twl4030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
1645 + unsigned *selector)
1646 +{
1647 + struct twlreg_info *info = rdev_get_drvdata(rdev);
1648 + int vsel = DIV_ROUND_UP(min_uV - 600000, 12500);
1649 +
1650 + twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS_4030,
1651 + vsel);
1652 + return 0;
1653 +}
1654 +
1655 +static int twl4030smps_get_voltage(struct regulator_dev *rdev)
1656 +{
1657 + struct twlreg_info *info = rdev_get_drvdata(rdev);
1658 + int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER,
1659 + VREG_VOLTAGE_SMPS_4030);
1660 +
1661 + return vsel * 12500 + 600000;
1662 +}
1663 +
1664 +static struct regulator_ops twl4030smps_ops = {
1665 + .set_voltage = twl4030smps_set_voltage,
1666 + .get_voltage = twl4030smps_get_voltage,
1667 +};
1668 +
1669 static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
1670 {
1671 struct twlreg_info *info = rdev_get_drvdata(rdev);
1672 @@ -856,6 +883,21 @@ static struct regulator_ops twlsmps_ops = {
1673 }, \
1674 }
1675
1676 +#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \
1677 + { \
1678 + .base = offset, \
1679 + .id = num, \
1680 + .delay = turnon_delay, \
1681 + .remap = remap_conf, \
1682 + .desc = { \
1683 + .name = #label, \
1684 + .id = TWL4030_REG_##label, \
1685 + .ops = &twl4030smps_ops, \
1686 + .type = REGULATOR_VOLTAGE, \
1687 + .owner = THIS_MODULE, \
1688 + }, \
1689 + }
1690 +
1691 #define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) { \
1692 .base = offset, \
1693 .min_mV = min_mVolts, \
1694 @@ -947,8 +989,8 @@ static struct twlreg_info twl_regs[] = {
1695 TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08),
1696 TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08),
1697 TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08),
1698 - TWL4030_ADJUSTABLE_LDO(VDD1, 0x55, 15, 1000, 0x08),
1699 - TWL4030_ADJUSTABLE_LDO(VDD2, 0x63, 16, 1000, 0x08),
1700 + TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08),
1701 + TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08),
1702 TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08),
1703 TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08),
1704 TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08),
1705 diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
1706 index 01a7df5..b82a155 100644
1707 --- a/drivers/rtc/class.c
1708 +++ b/drivers/rtc/class.c
1709 @@ -66,7 +66,7 @@ static int rtc_suspend(struct device *dev, pm_message_t mesg)
1710 */
1711 delta = timespec_sub(old_system, old_rtc);
1712 delta_delta = timespec_sub(delta, old_delta);
1713 - if (abs(delta_delta.tv_sec) >= 2) {
1714 + if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) {
1715 /*
1716 * if delta_delta is too large, assume time correction
1717 * has occured and set old_delta to the current delta.
1718 @@ -100,9 +100,8 @@ static int rtc_resume(struct device *dev)
1719 rtc_tm_to_time(&tm, &new_rtc.tv_sec);
1720 new_rtc.tv_nsec = 0;
1721
1722 - if (new_rtc.tv_sec <= old_rtc.tv_sec) {
1723 - if (new_rtc.tv_sec < old_rtc.tv_sec)
1724 - pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
1725 + if (new_rtc.tv_sec < old_rtc.tv_sec) {
1726 + pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
1727 return 0;
1728 }
1729
1730 @@ -119,7 +118,8 @@ static int rtc_resume(struct device *dev)
1731 sleep_time = timespec_sub(sleep_time,
1732 timespec_sub(new_system, old_system));
1733
1734 - timekeeping_inject_sleeptime(&sleep_time);
1735 + if (sleep_time.tv_sec >= 0)
1736 + timekeeping_inject_sleeptime(&sleep_time);
1737 return 0;
1738 }
1739
1740 diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
1741 index 44e91e5..3d9d2b9 100644
1742 --- a/drivers/rtc/interface.c
1743 +++ b/drivers/rtc/interface.c
1744 @@ -318,6 +318,20 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
1745 }
1746 EXPORT_SYMBOL_GPL(rtc_read_alarm);
1747
1748 +static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
1749 +{
1750 + int err;
1751 +
1752 + if (!rtc->ops)
1753 + err = -ENODEV;
1754 + else if (!rtc->ops->set_alarm)
1755 + err = -EINVAL;
1756 + else
1757 + err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
1758 +
1759 + return err;
1760 +}
1761 +
1762 static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
1763 {
1764 struct rtc_time tm;
1765 @@ -341,14 +355,7 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
1766 * over right here, before we set the alarm.
1767 */
1768
1769 - if (!rtc->ops)
1770 - err = -ENODEV;
1771 - else if (!rtc->ops->set_alarm)
1772 - err = -EINVAL;
1773 - else
1774 - err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
1775 -
1776 - return err;
1777 + return ___rtc_set_alarm(rtc, alarm);
1778 }
1779
1780 int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
1781 @@ -762,6 +769,20 @@ static int rtc_timer_enqueue(struct rtc_device *rtc, struct rtc_timer *timer)
1782 return 0;
1783 }
1784
1785 +static void rtc_alarm_disable(struct rtc_device *rtc)
1786 +{
1787 + struct rtc_wkalrm alarm;
1788 + struct rtc_time tm;
1789 +
1790 + __rtc_read_time(rtc, &tm);
1791 +
1792 + alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm),
1793 + ktime_set(300, 0)));
1794 + alarm.enabled = 0;
1795 +
1796 + ___rtc_set_alarm(rtc, &alarm);
1797 +}
1798 +
1799 /**
1800 * rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
1801 * @rtc rtc device
1802 @@ -783,8 +804,10 @@ static void rtc_timer_remove(struct rtc_device *rtc, struct rtc_timer *timer)
1803 struct rtc_wkalrm alarm;
1804 int err;
1805 next = timerqueue_getnext(&rtc->timerqueue);
1806 - if (!next)
1807 + if (!next) {
1808 + rtc_alarm_disable(rtc);
1809 return;
1810 + }
1811 alarm.time = rtc_ktime_to_tm(next->expires);
1812 alarm.enabled = 1;
1813 err = __rtc_set_alarm(rtc, &alarm);
1814 @@ -846,7 +869,8 @@ again:
1815 err = __rtc_set_alarm(rtc, &alarm);
1816 if (err == -ETIME)
1817 goto again;
1818 - }
1819 + } else
1820 + rtc_alarm_disable(rtc);
1821
1822 mutex_unlock(&rtc->ops_lock);
1823 }
1824 diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
1825 index fafb8c2..c74e867 100644
1826 --- a/drivers/s390/net/qeth_l3_main.c
1827 +++ b/drivers/s390/net/qeth_l3_main.c
1828 @@ -2740,11 +2740,13 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
1829 struct neighbour *n = NULL;
1830 struct dst_entry *dst;
1831
1832 + rcu_read_lock();
1833 dst = skb_dst(skb);
1834 if (dst)
1835 n = dst_get_neighbour(dst);
1836 if (n) {
1837 cast_type = n->type;
1838 + rcu_read_unlock();
1839 if ((cast_type == RTN_BROADCAST) ||
1840 (cast_type == RTN_MULTICAST) ||
1841 (cast_type == RTN_ANYCAST))
1842 @@ -2752,6 +2754,8 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
1843 else
1844 return RTN_UNSPEC;
1845 }
1846 + rcu_read_unlock();
1847 +
1848 /* try something else */
1849 if (skb->protocol == ETH_P_IPV6)
1850 return (skb_network_header(skb)[24] == 0xff) ?
1851 @@ -2807,6 +2811,8 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
1852 }
1853
1854 hdr->hdr.l3.length = skb->len - sizeof(struct qeth_hdr);
1855 +
1856 + rcu_read_lock();
1857 dst = skb_dst(skb);
1858 if (dst)
1859 n = dst_get_neighbour(dst);
1860 @@ -2853,6 +2859,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
1861 QETH_CAST_UNICAST | QETH_HDR_PASSTHRU;
1862 }
1863 }
1864 + rcu_read_unlock();
1865 }
1866
1867 static inline void qeth_l3_hdr_csum(struct qeth_card *card,
1868 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
1869 index b4d43ae..6d219e4 100644
1870 --- a/drivers/scsi/scsi_lib.c
1871 +++ b/drivers/scsi/scsi_lib.c
1872 @@ -1408,6 +1408,8 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
1873
1874 blk_start_request(req);
1875
1876 + scmd_printk(KERN_INFO, cmd, "killing request\n");
1877 +
1878 sdev = cmd->device;
1879 starget = scsi_target(sdev);
1880 shost = sdev->host;
1881 @@ -1489,7 +1491,6 @@ static void scsi_request_fn(struct request_queue *q)
1882 struct request *req;
1883
1884 if (!sdev) {
1885 - printk("scsi: killing requests for dead queue\n");
1886 while ((req = blk_peek_request(q)) != NULL)
1887 scsi_kill_request(req, q);
1888 return;
1889 diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
1890 index e90e3cc..b347cda 100644
1891 --- a/drivers/staging/comedi/comedi_fops.c
1892 +++ b/drivers/staging/comedi/comedi_fops.c
1893 @@ -1432,7 +1432,21 @@ static int do_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
1894 return ret;
1895 }
1896
1897 -static void comedi_unmap(struct vm_area_struct *area)
1898 +
1899 +static void comedi_vm_open(struct vm_area_struct *area)
1900 +{
1901 + struct comedi_async *async;
1902 + struct comedi_device *dev;
1903 +
1904 + async = area->vm_private_data;
1905 + dev = async->subdevice->device;
1906 +
1907 + mutex_lock(&dev->mutex);
1908 + async->mmap_count++;
1909 + mutex_unlock(&dev->mutex);
1910 +}
1911 +
1912 +static void comedi_vm_close(struct vm_area_struct *area)
1913 {
1914 struct comedi_async *async;
1915 struct comedi_device *dev;
1916 @@ -1446,15 +1460,13 @@ static void comedi_unmap(struct vm_area_struct *area)
1917 }
1918
1919 static struct vm_operations_struct comedi_vm_ops = {
1920 - .close = comedi_unmap,
1921 + .open = comedi_vm_open,
1922 + .close = comedi_vm_close,
1923 };
1924
1925 static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
1926 {
1927 const unsigned minor = iminor(file->f_dentry->d_inode);
1928 - struct comedi_device_file_info *dev_file_info =
1929 - comedi_get_device_file_info(minor);
1930 - struct comedi_device *dev = dev_file_info->device;
1931 struct comedi_async *async = NULL;
1932 unsigned long start = vma->vm_start;
1933 unsigned long size;
1934 @@ -1462,6 +1474,15 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
1935 int i;
1936 int retval;
1937 struct comedi_subdevice *s;
1938 + struct comedi_device_file_info *dev_file_info;
1939 + struct comedi_device *dev;
1940 +
1941 + dev_file_info = comedi_get_device_file_info(minor);
1942 + if (dev_file_info == NULL)
1943 + return -ENODEV;
1944 + dev = dev_file_info->device;
1945 + if (dev == NULL)
1946 + return -ENODEV;
1947
1948 mutex_lock(&dev->mutex);
1949 if (!dev->attached) {
1950 @@ -1528,11 +1549,17 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait)
1951 {
1952 unsigned int mask = 0;
1953 const unsigned minor = iminor(file->f_dentry->d_inode);
1954 - struct comedi_device_file_info *dev_file_info =
1955 - comedi_get_device_file_info(minor);
1956 - struct comedi_device *dev = dev_file_info->device;
1957 struct comedi_subdevice *read_subdev;
1958 struct comedi_subdevice *write_subdev;
1959 + struct comedi_device_file_info *dev_file_info;
1960 + struct comedi_device *dev;
1961 + dev_file_info = comedi_get_device_file_info(minor);
1962 +
1963 + if (dev_file_info == NULL)
1964 + return -ENODEV;
1965 + dev = dev_file_info->device;
1966 + if (dev == NULL)
1967 + return -ENODEV;
1968
1969 mutex_lock(&dev->mutex);
1970 if (!dev->attached) {
1971 @@ -1578,9 +1605,15 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
1972 int n, m, count = 0, retval = 0;
1973 DECLARE_WAITQUEUE(wait, current);
1974 const unsigned minor = iminor(file->f_dentry->d_inode);
1975 - struct comedi_device_file_info *dev_file_info =
1976 - comedi_get_device_file_info(minor);
1977 - struct comedi_device *dev = dev_file_info->device;
1978 + struct comedi_device_file_info *dev_file_info;
1979 + struct comedi_device *dev;
1980 + dev_file_info = comedi_get_device_file_info(minor);
1981 +
1982 + if (dev_file_info == NULL)
1983 + return -ENODEV;
1984 + dev = dev_file_info->device;
1985 + if (dev == NULL)
1986 + return -ENODEV;
1987
1988 if (!dev->attached) {
1989 DPRINTK("no driver configured on comedi%i\n", dev->minor);
1990 @@ -1640,11 +1673,11 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
1991 retval = -EAGAIN;
1992 break;
1993 }
1994 + schedule();
1995 if (signal_pending(current)) {
1996 retval = -ERESTARTSYS;
1997 break;
1998 }
1999 - schedule();
2000 if (!s->busy)
2001 break;
2002 if (s->busy != file) {
2003 @@ -1683,9 +1716,15 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
2004 int n, m, count = 0, retval = 0;
2005 DECLARE_WAITQUEUE(wait, current);
2006 const unsigned minor = iminor(file->f_dentry->d_inode);
2007 - struct comedi_device_file_info *dev_file_info =
2008 - comedi_get_device_file_info(minor);
2009 - struct comedi_device *dev = dev_file_info->device;
2010 + struct comedi_device_file_info *dev_file_info;
2011 + struct comedi_device *dev;
2012 + dev_file_info = comedi_get_device_file_info(minor);
2013 +
2014 + if (dev_file_info == NULL)
2015 + return -ENODEV;
2016 + dev = dev_file_info->device;
2017 + if (dev == NULL)
2018 + return -ENODEV;
2019
2020 if (!dev->attached) {
2021 DPRINTK("no driver configured on comedi%i\n", dev->minor);
2022 @@ -1741,11 +1780,11 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
2023 retval = -EAGAIN;
2024 break;
2025 }
2026 + schedule();
2027 if (signal_pending(current)) {
2028 retval = -ERESTARTSYS;
2029 break;
2030 }
2031 - schedule();
2032 if (!s->busy) {
2033 retval = 0;
2034 break;
2035 @@ -1885,11 +1924,17 @@ ok:
2036 static int comedi_close(struct inode *inode, struct file *file)
2037 {
2038 const unsigned minor = iminor(inode);
2039 - struct comedi_device_file_info *dev_file_info =
2040 - comedi_get_device_file_info(minor);
2041 - struct comedi_device *dev = dev_file_info->device;
2042 struct comedi_subdevice *s = NULL;
2043 int i;
2044 + struct comedi_device_file_info *dev_file_info;
2045 + struct comedi_device *dev;
2046 + dev_file_info = comedi_get_device_file_info(minor);
2047 +
2048 + if (dev_file_info == NULL)
2049 + return -ENODEV;
2050 + dev = dev_file_info->device;
2051 + if (dev == NULL)
2052 + return -ENODEV;
2053
2054 mutex_lock(&dev->mutex);
2055
2056 @@ -1923,10 +1968,15 @@ static int comedi_close(struct inode *inode, struct file *file)
2057 static int comedi_fasync(int fd, struct file *file, int on)
2058 {
2059 const unsigned minor = iminor(file->f_dentry->d_inode);
2060 - struct comedi_device_file_info *dev_file_info =
2061 - comedi_get_device_file_info(minor);
2062 + struct comedi_device_file_info *dev_file_info;
2063 + struct comedi_device *dev;
2064 + dev_file_info = comedi_get_device_file_info(minor);
2065
2066 - struct comedi_device *dev = dev_file_info->device;
2067 + if (dev_file_info == NULL)
2068 + return -ENODEV;
2069 + dev = dev_file_info->device;
2070 + if (dev == NULL)
2071 + return -ENODEV;
2072
2073 return fasync_helper(fd, file, on, &dev->async_queue);
2074 }
2075 diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c
2076 index 16c73fb..890e6cc 100644
2077 --- a/drivers/staging/rts_pstor/rtsx.c
2078 +++ b/drivers/staging/rts_pstor/rtsx.c
2079 @@ -1015,6 +1015,7 @@ static int __devinit rtsx_probe(struct pci_dev *pci, const struct pci_device_id
2080 th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan");
2081 if (IS_ERR(th)) {
2082 printk(KERN_ERR "Unable to start the device-scanning thread\n");
2083 + complete(&dev->scanning_done);
2084 quiesce_and_remove_host(dev);
2085 err = PTR_ERR(th);
2086 goto errout;
2087 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
2088 index 09c44ab..3872b8c 100644
2089 --- a/drivers/staging/usbip/vhci_rx.c
2090 +++ b/drivers/staging/usbip/vhci_rx.c
2091 @@ -68,6 +68,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
2092 {
2093 struct usbip_device *ud = &vdev->ud;
2094 struct urb *urb;
2095 + unsigned long flags;
2096
2097 spin_lock(&vdev->priv_lock);
2098 urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
2099 @@ -101,9 +102,9 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
2100
2101 usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
2102
2103 - spin_lock(&the_controller->lock);
2104 + spin_lock_irqsave(&the_controller->lock, flags);
2105 usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
2106 - spin_unlock(&the_controller->lock);
2107 + spin_unlock_irqrestore(&the_controller->lock, flags);
2108
2109 usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
2110
2111 @@ -141,6 +142,7 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
2112 {
2113 struct vhci_unlink *unlink;
2114 struct urb *urb;
2115 + unsigned long flags;
2116
2117 usbip_dump_header(pdu);
2118
2119 @@ -170,9 +172,9 @@ static void vhci_recv_ret_unlink(struct vhci_device *vdev,
2120 urb->status = pdu->u.ret_unlink.status;
2121 pr_info("urb->status %d\n", urb->status);
2122
2123 - spin_lock(&the_controller->lock);
2124 + spin_lock_irqsave(&the_controller->lock, flags);
2125 usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
2126 - spin_unlock(&the_controller->lock);
2127 + spin_unlock_irqrestore(&the_controller->lock, flags);
2128
2129 usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
2130 urb->status);
2131 diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
2132 index 29bec34..c787af2 100644
2133 --- a/drivers/usb/host/ehci-sched.c
2134 +++ b/drivers/usb/host/ehci-sched.c
2135 @@ -1476,30 +1476,36 @@ iso_stream_schedule (
2136 * jump until after the queue is primed.
2137 */
2138 else {
2139 + int done = 0;
2140 start = SCHEDULE_SLOP + (now & ~0x07);
2141
2142 /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
2143
2144 - /* find a uframe slot with enough bandwidth */
2145 - next = start + period;
2146 - for (; start < next; start++) {
2147 -
2148 + /* find a uframe slot with enough bandwidth.
2149 + * Early uframes are more precious because full-speed
2150 + * iso IN transfers can't use late uframes,
2151 + * and therefore they should be allocated last.
2152 + */
2153 + next = start;
2154 + start += period;
2155 + do {
2156 + start--;
2157 /* check schedule: enough space? */
2158 if (stream->highspeed) {
2159 if (itd_slot_ok(ehci, mod, start,
2160 stream->usecs, period))
2161 - break;
2162 + done = 1;
2163 } else {
2164 if ((start % 8) >= 6)
2165 continue;
2166 if (sitd_slot_ok(ehci, mod, stream,
2167 start, sched, period))
2168 - break;
2169 + done = 1;
2170 }
2171 - }
2172 + } while (start > next && !done);
2173
2174 /* no room in the schedule */
2175 - if (start == next) {
2176 + if (!done) {
2177 ehci_dbg(ehci, "iso resched full %p (now %d max %d)\n",
2178 urb, now, now + mod);
2179 status = -ENOSPC;
2180 diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
2181 index d6e1754..a403b53 100644
2182 --- a/drivers/usb/host/whci/qset.c
2183 +++ b/drivers/usb/host/whci/qset.c
2184 @@ -124,7 +124,7 @@ void qset_clear(struct whc *whc, struct whc_qset *qset)
2185 {
2186 qset->td_start = qset->td_end = qset->ntds = 0;
2187
2188 - qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
2189 + qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
2190 qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
2191 qset->qh.err_count = 0;
2192 qset->qh.scratch[0] = 0;
2193 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
2194 index 67900ff..10dc1bd 100644
2195 --- a/drivers/usb/host/xhci.c
2196 +++ b/drivers/usb/host/xhci.c
2197 @@ -657,7 +657,10 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
2198 ring = xhci->cmd_ring;
2199 seg = ring->deq_seg;
2200 do {
2201 - memset(seg->trbs, 0, SEGMENT_SIZE);
2202 + memset(seg->trbs, 0,
2203 + sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
2204 + seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
2205 + cpu_to_le32(~TRB_CYCLE);
2206 seg = seg->next;
2207 } while (seg != ring->deq_seg);
2208
2209 diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
2210 index 20a2873..a057a5a 100644
2211 --- a/drivers/usb/musb/musb_core.c
2212 +++ b/drivers/usb/musb/musb_core.c
2213 @@ -2302,18 +2302,12 @@ static int musb_suspend(struct device *dev)
2214 */
2215 }
2216
2217 - musb_save_context(musb);
2218 -
2219 spin_unlock_irqrestore(&musb->lock, flags);
2220 return 0;
2221 }
2222
2223 static int musb_resume_noirq(struct device *dev)
2224 {
2225 - struct musb *musb = dev_to_musb(dev);
2226 -
2227 - musb_restore_context(musb);
2228 -
2229 /* for static cmos like DaVinci, register values were preserved
2230 * unless for some reason the whole soc powered down or the USB
2231 * module got reset through the PSC (vs just being disabled).
2232 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
2233 index e16394c..bbd2cdd 100644
2234 --- a/drivers/usb/serial/ftdi_sio.c
2235 +++ b/drivers/usb/serial/ftdi_sio.c
2236 @@ -735,6 +735,7 @@ static struct usb_device_id id_table_combined [] = {
2237 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
2238 { USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
2239 { USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
2240 + { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
2241 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
2242 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
2243 { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
2244 diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
2245 index 571fa96..055b64e 100644
2246 --- a/drivers/usb/serial/ftdi_sio_ids.h
2247 +++ b/drivers/usb/serial/ftdi_sio_ids.h
2248 @@ -112,6 +112,7 @@
2249
2250 /* Propox devices */
2251 #define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
2252 +#define FTDI_PROPOX_ISPCABLEIII_PID 0xD739
2253
2254 /* Lenz LI-USB Computer Interface. */
2255 #define FTDI_LENZ_LIUSB_PID 0xD780
2256 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
2257 index 3a47cbe..e98a1e1 100644
2258 --- a/drivers/usb/serial/option.c
2259 +++ b/drivers/usb/serial/option.c
2260 @@ -657,6 +657,9 @@ static const struct usb_device_id option_ids[] = {
2261 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
2262 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
2263 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
2264 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
2265 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
2266 + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x08) },
2267 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
2268 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
2269 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
2270 @@ -743,6 +746,7 @@ static const struct usb_device_id option_ids[] = {
2271 { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
2272 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
2273 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
2274 + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
2275 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
2276 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
2277 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
2278 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
2279 index 3041a97..24caba7 100644
2280 --- a/drivers/usb/storage/unusual_devs.h
2281 +++ b/drivers/usb/storage/unusual_devs.h
2282 @@ -1854,6 +1854,13 @@ UNUSUAL_DEV( 0x1370, 0x6828, 0x0110, 0x0110,
2283 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2284 US_FL_IGNORE_RESIDUE ),
2285
2286 +/* Reported by Qinglin Ye <yestyle@gmail.com> */
2287 +UNUSUAL_DEV( 0x13fe, 0x3600, 0x0100, 0x0100,
2288 + "Kingston",
2289 + "DT 101 G2",
2290 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2291 + US_FL_BULK_IGNORE_TAG ),
2292 +
2293 /* Reported by Francesco Foresti <frafore@tiscali.it> */
2294 UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
2295 "Super Top",
2296 diff --git a/drivers/video/via/share.h b/drivers/video/via/share.h
2297 index 61b0bd5..1603023 100644
2298 --- a/drivers/video/via/share.h
2299 +++ b/drivers/video/via/share.h
2300 @@ -557,8 +557,8 @@
2301 #define M1200X720_R60_VSP POSITIVE
2302
2303 /* 1200x900@60 Sync Polarity (DCON) */
2304 -#define M1200X900_R60_HSP NEGATIVE
2305 -#define M1200X900_R60_VSP NEGATIVE
2306 +#define M1200X900_R60_HSP POSITIVE
2307 +#define M1200X900_R60_VSP POSITIVE
2308
2309 /* 1280x600@60 Sync Polarity (GTF Mode) */
2310 #define M1280x600_R60_HSP NEGATIVE
2311 diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
2312 index 58609bd..2a83425 100644
2313 --- a/fs/ecryptfs/crypto.c
2314 +++ b/fs/ecryptfs/crypto.c
2315 @@ -967,7 +967,7 @@ static void ecryptfs_set_default_crypt_stat_vals(
2316
2317 /**
2318 * ecryptfs_new_file_context
2319 - * @ecryptfs_dentry: The eCryptfs dentry
2320 + * @ecryptfs_inode: The eCryptfs inode
2321 *
2322 * If the crypto context for the file has not yet been established,
2323 * this is where we do that. Establishing a new crypto context
2324 @@ -984,13 +984,13 @@ static void ecryptfs_set_default_crypt_stat_vals(
2325 *
2326 * Returns zero on success; non-zero otherwise
2327 */
2328 -int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry)
2329 +int ecryptfs_new_file_context(struct inode *ecryptfs_inode)
2330 {
2331 struct ecryptfs_crypt_stat *crypt_stat =
2332 - &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
2333 + &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
2334 struct ecryptfs_mount_crypt_stat *mount_crypt_stat =
2335 &ecryptfs_superblock_to_private(
2336 - ecryptfs_dentry->d_sb)->mount_crypt_stat;
2337 + ecryptfs_inode->i_sb)->mount_crypt_stat;
2338 int cipher_name_len;
2339 int rc = 0;
2340
2341 @@ -1299,12 +1299,12 @@ static int ecryptfs_write_headers_virt(char *page_virt, size_t max,
2342 }
2343
2344 static int
2345 -ecryptfs_write_metadata_to_contents(struct dentry *ecryptfs_dentry,
2346 +ecryptfs_write_metadata_to_contents(struct inode *ecryptfs_inode,
2347 char *virt, size_t virt_len)
2348 {
2349 int rc;
2350
2351 - rc = ecryptfs_write_lower(ecryptfs_dentry->d_inode, virt,
2352 + rc = ecryptfs_write_lower(ecryptfs_inode, virt,
2353 0, virt_len);
2354 if (rc < 0)
2355 printk(KERN_ERR "%s: Error attempting to write header "
2356 @@ -1338,7 +1338,8 @@ static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask,
2357
2358 /**
2359 * ecryptfs_write_metadata
2360 - * @ecryptfs_dentry: The eCryptfs dentry
2361 + * @ecryptfs_dentry: The eCryptfs dentry, which should be negative
2362 + * @ecryptfs_inode: The newly created eCryptfs inode
2363 *
2364 * Write the file headers out. This will likely involve a userspace
2365 * callout, in which the session key is encrypted with one or more
2366 @@ -1348,10 +1349,11 @@ static unsigned long ecryptfs_get_zeroed_pages(gfp_t gfp_mask,
2367 *
2368 * Returns zero on success; non-zero on error
2369 */
2370 -int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
2371 +int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry,
2372 + struct inode *ecryptfs_inode)
2373 {
2374 struct ecryptfs_crypt_stat *crypt_stat =
2375 - &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
2376 + &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
2377 unsigned int order;
2378 char *virt;
2379 size_t virt_len;
2380 @@ -1391,7 +1393,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry)
2381 rc = ecryptfs_write_metadata_to_xattr(ecryptfs_dentry, virt,
2382 size);
2383 else
2384 - rc = ecryptfs_write_metadata_to_contents(ecryptfs_dentry, virt,
2385 + rc = ecryptfs_write_metadata_to_contents(ecryptfs_inode, virt,
2386 virt_len);
2387 if (rc) {
2388 printk(KERN_ERR "%s: Error writing metadata out to lower file; "
2389 @@ -1943,7 +1945,7 @@ static unsigned char *portable_filename_chars = ("-.0123456789ABCD"
2390
2391 /* We could either offset on every reverse map or just pad some 0x00's
2392 * at the front here */
2393 -static const unsigned char filename_rev_map[] = {
2394 +static const unsigned char filename_rev_map[256] = {
2395 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 7 */
2396 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 15 */
2397 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 23 */
2398 @@ -1959,7 +1961,7 @@ static const unsigned char filename_rev_map[] = {
2399 0x00, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, /* 103 */
2400 0x2D, 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0x34, /* 111 */
2401 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x3B, 0x3C, /* 119 */
2402 - 0x3D, 0x3E, 0x3F
2403 + 0x3D, 0x3E, 0x3F /* 123 - 255 initialized to 0x00 */
2404 };
2405
2406 /**
2407 diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
2408 index b36c557..9ce1e92 100644
2409 --- a/fs/ecryptfs/ecryptfs_kernel.h
2410 +++ b/fs/ecryptfs/ecryptfs_kernel.h
2411 @@ -584,9 +584,10 @@ int ecryptfs_init_crypt_ctx(struct ecryptfs_crypt_stat *crypt_stat);
2412 int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode);
2413 int ecryptfs_encrypt_page(struct page *page);
2414 int ecryptfs_decrypt_page(struct page *page);
2415 -int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry);
2416 +int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry,
2417 + struct inode *ecryptfs_inode);
2418 int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry);
2419 -int ecryptfs_new_file_context(struct dentry *ecryptfs_dentry);
2420 +int ecryptfs_new_file_context(struct inode *ecryptfs_inode);
2421 void ecryptfs_write_crypt_stat_flags(char *page_virt,
2422 struct ecryptfs_crypt_stat *crypt_stat,
2423 size_t *written);
2424 diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
2425 index c6ac98c..d3f95f9 100644
2426 --- a/fs/ecryptfs/file.c
2427 +++ b/fs/ecryptfs/file.c
2428 @@ -139,6 +139,27 @@ out:
2429 return rc;
2430 }
2431
2432 +static void ecryptfs_vma_close(struct vm_area_struct *vma)
2433 +{
2434 + filemap_write_and_wait(vma->vm_file->f_mapping);
2435 +}
2436 +
2437 +static const struct vm_operations_struct ecryptfs_file_vm_ops = {
2438 + .close = ecryptfs_vma_close,
2439 + .fault = filemap_fault,
2440 +};
2441 +
2442 +static int ecryptfs_file_mmap(struct file *file, struct vm_area_struct *vma)
2443 +{
2444 + int rc;
2445 +
2446 + rc = generic_file_mmap(file, vma);
2447 + if (!rc)
2448 + vma->vm_ops = &ecryptfs_file_vm_ops;
2449 +
2450 + return rc;
2451 +}
2452 +
2453 struct kmem_cache *ecryptfs_file_info_cache;
2454
2455 /**
2456 @@ -349,7 +370,7 @@ const struct file_operations ecryptfs_main_fops = {
2457 #ifdef CONFIG_COMPAT
2458 .compat_ioctl = ecryptfs_compat_ioctl,
2459 #endif
2460 - .mmap = generic_file_mmap,
2461 + .mmap = ecryptfs_file_mmap,
2462 .open = ecryptfs_open,
2463 .flush = ecryptfs_flush,
2464 .release = ecryptfs_release,
2465 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
2466 index 11f8582..528da01 100644
2467 --- a/fs/ecryptfs/inode.c
2468 +++ b/fs/ecryptfs/inode.c
2469 @@ -172,22 +172,23 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode,
2470 * it. It will also update the eCryptfs directory inode to mimic the
2471 * stat of the lower directory inode.
2472 *
2473 - * Returns zero on success; non-zero on error condition
2474 + * Returns the new eCryptfs inode on success; an ERR_PTR on error condition
2475 */
2476 -static int
2477 +static struct inode *
2478 ecryptfs_do_create(struct inode *directory_inode,
2479 struct dentry *ecryptfs_dentry, int mode)
2480 {
2481 int rc;
2482 struct dentry *lower_dentry;
2483 struct dentry *lower_dir_dentry;
2484 + struct inode *inode;
2485
2486 lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
2487 lower_dir_dentry = lock_parent(lower_dentry);
2488 if (IS_ERR(lower_dir_dentry)) {
2489 ecryptfs_printk(KERN_ERR, "Error locking directory of "
2490 "dentry\n");
2491 - rc = PTR_ERR(lower_dir_dentry);
2492 + inode = ERR_CAST(lower_dir_dentry);
2493 goto out;
2494 }
2495 rc = ecryptfs_create_underlying_file(lower_dir_dentry->d_inode,
2496 @@ -195,20 +196,19 @@ ecryptfs_do_create(struct inode *directory_inode,
2497 if (rc) {
2498 printk(KERN_ERR "%s: Failure to create dentry in lower fs; "
2499 "rc = [%d]\n", __func__, rc);
2500 + inode = ERR_PTR(rc);
2501 goto out_lock;
2502 }
2503 - rc = ecryptfs_interpose(lower_dentry, ecryptfs_dentry,
2504 - directory_inode->i_sb);
2505 - if (rc) {
2506 - ecryptfs_printk(KERN_ERR, "Failure in ecryptfs_interpose\n");
2507 + inode = __ecryptfs_get_inode(lower_dentry->d_inode,
2508 + directory_inode->i_sb);
2509 + if (IS_ERR(inode))
2510 goto out_lock;
2511 - }
2512 fsstack_copy_attr_times(directory_inode, lower_dir_dentry->d_inode);
2513 fsstack_copy_inode_size(directory_inode, lower_dir_dentry->d_inode);
2514 out_lock:
2515 unlock_dir(lower_dir_dentry);
2516 out:
2517 - return rc;
2518 + return inode;
2519 }
2520
2521 /**
2522 @@ -219,26 +219,26 @@ out:
2523 *
2524 * Returns zero on success
2525 */
2526 -static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry)
2527 +static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry,
2528 + struct inode *ecryptfs_inode)
2529 {
2530 struct ecryptfs_crypt_stat *crypt_stat =
2531 - &ecryptfs_inode_to_private(ecryptfs_dentry->d_inode)->crypt_stat;
2532 + &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
2533 int rc = 0;
2534
2535 - if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) {
2536 + if (S_ISDIR(ecryptfs_inode->i_mode)) {
2537 ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
2538 crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
2539 goto out;
2540 }
2541 ecryptfs_printk(KERN_DEBUG, "Initializing crypto context\n");
2542 - rc = ecryptfs_new_file_context(ecryptfs_dentry);
2543 + rc = ecryptfs_new_file_context(ecryptfs_inode);
2544 if (rc) {
2545 ecryptfs_printk(KERN_ERR, "Error creating new file "
2546 "context; rc = [%d]\n", rc);
2547 goto out;
2548 }
2549 - rc = ecryptfs_get_lower_file(ecryptfs_dentry,
2550 - ecryptfs_dentry->d_inode);
2551 + rc = ecryptfs_get_lower_file(ecryptfs_dentry, ecryptfs_inode);
2552 if (rc) {
2553 printk(KERN_ERR "%s: Error attempting to initialize "
2554 "the lower file for the dentry with name "
2555 @@ -246,10 +246,10 @@ static int ecryptfs_initialize_file(struct dentry *ecryptfs_dentry)
2556 ecryptfs_dentry->d_name.name, rc);
2557 goto out;
2558 }
2559 - rc = ecryptfs_write_metadata(ecryptfs_dentry);
2560 + rc = ecryptfs_write_metadata(ecryptfs_dentry, ecryptfs_inode);
2561 if (rc)
2562 printk(KERN_ERR "Error writing headers; rc = [%d]\n", rc);
2563 - ecryptfs_put_lower_file(ecryptfs_dentry->d_inode);
2564 + ecryptfs_put_lower_file(ecryptfs_inode);
2565 out:
2566 return rc;
2567 }
2568 @@ -269,18 +269,28 @@ static int
2569 ecryptfs_create(struct inode *directory_inode, struct dentry *ecryptfs_dentry,
2570 int mode, struct nameidata *nd)
2571 {
2572 + struct inode *ecryptfs_inode;
2573 int rc;
2574
2575 - /* ecryptfs_do_create() calls ecryptfs_interpose() */
2576 - rc = ecryptfs_do_create(directory_inode, ecryptfs_dentry, mode);
2577 - if (unlikely(rc)) {
2578 + ecryptfs_inode = ecryptfs_do_create(directory_inode, ecryptfs_dentry,
2579 + mode);
2580 + if (unlikely(IS_ERR(ecryptfs_inode))) {
2581 ecryptfs_printk(KERN_WARNING, "Failed to create file in"
2582 "lower filesystem\n");
2583 + rc = PTR_ERR(ecryptfs_inode);
2584 goto out;
2585 }
2586 /* At this point, a file exists on "disk"; we need to make sure
2587 * that this on disk file is prepared to be an ecryptfs file */
2588 - rc = ecryptfs_initialize_file(ecryptfs_dentry);
2589 + rc = ecryptfs_initialize_file(ecryptfs_dentry, ecryptfs_inode);
2590 + if (rc) {
2591 + drop_nlink(ecryptfs_inode);
2592 + unlock_new_inode(ecryptfs_inode);
2593 + iput(ecryptfs_inode);
2594 + goto out;
2595 + }
2596 + d_instantiate(ecryptfs_dentry, ecryptfs_inode);
2597 + unlock_new_inode(ecryptfs_inode);
2598 out:
2599 return rc;
2600 }
2601 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
2602 index 89c47f4..b644b9c 100644
2603 --- a/fs/ext4/inode.c
2604 +++ b/fs/ext4/inode.c
2605 @@ -2656,8 +2656,8 @@ out:
2606 spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
2607
2608 /* queue the work to convert unwritten extents to written */
2609 - queue_work(wq, &io_end->work);
2610 iocb->private = NULL;
2611 + queue_work(wq, &io_end->work);
2612
2613 /* XXX: probably should move into the real I/O completion handler */
2614 inode_dio_done(inode);
2615 diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
2616 index 620972b..8e8b06b 100644
2617 --- a/fs/xfs/xfs_buf.h
2618 +++ b/fs/xfs/xfs_buf.h
2619 @@ -320,7 +320,6 @@ extern struct list_head *xfs_get_buftarg_list(void);
2620 #define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
2621 #define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
2622
2623 -#define xfs_binval(buftarg) xfs_flush_buftarg(buftarg, 1)
2624 #define XFS_bflush(buftarg) xfs_flush_buftarg(buftarg, 1)
2625
2626 #endif /* __XFS_BUF_H__ */
2627 diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
2628 index 7f7b424..b7e75c6 100644
2629 --- a/fs/xfs/xfs_file.c
2630 +++ b/fs/xfs/xfs_file.c
2631 @@ -317,7 +317,19 @@ xfs_file_aio_read(
2632 if (XFS_FORCED_SHUTDOWN(mp))
2633 return -EIO;
2634
2635 - if (unlikely(ioflags & IO_ISDIRECT)) {
2636 + /*
2637 + * Locking is a bit tricky here. If we take an exclusive lock
2638 + * for direct IO, we effectively serialise all new concurrent
2639 + * read IO to this file and block it behind IO that is currently in
2640 + * progress because IO in progress holds the IO lock shared. We only
2641 + * need to hold the lock exclusive to blow away the page cache, so
2642 + * only take lock exclusively if the page cache needs invalidation.
2643 + * This allows the normal direct IO case of no page cache pages to
2644 + * proceeed concurrently without serialisation.
2645 + */
2646 + xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
2647 + if ((ioflags & IO_ISDIRECT) && inode->i_mapping->nrpages) {
2648 + xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED);
2649 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL);
2650
2651 if (inode->i_mapping->nrpages) {
2652 @@ -330,8 +342,7 @@ xfs_file_aio_read(
2653 }
2654 }
2655 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
2656 - } else
2657 - xfs_rw_ilock(ip, XFS_IOLOCK_SHARED);
2658 + }
2659
2660 trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags);
2661
2662 @@ -666,6 +677,7 @@ xfs_file_aio_write_checks(
2663 xfs_fsize_t new_size;
2664 int error = 0;
2665
2666 + xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
2667 error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
2668 if (error) {
2669 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock);
2670 @@ -757,14 +769,24 @@ xfs_file_dio_aio_write(
2671 *iolock = XFS_IOLOCK_EXCL;
2672 else
2673 *iolock = XFS_IOLOCK_SHARED;
2674 - xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
2675 + xfs_rw_ilock(ip, *iolock);
2676
2677 ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);
2678 if (ret)
2679 return ret;
2680
2681 + /*
2682 + * Recheck if there are cached pages that need invalidate after we got
2683 + * the iolock to protect against other threads adding new pages while
2684 + * we were waiting for the iolock.
2685 + */
2686 + if (mapping->nrpages && *iolock == XFS_IOLOCK_SHARED) {
2687 + xfs_rw_iunlock(ip, *iolock);
2688 + *iolock = XFS_IOLOCK_EXCL;
2689 + xfs_rw_ilock(ip, *iolock);
2690 + }
2691 +
2692 if (mapping->nrpages) {
2693 - WARN_ON(*iolock != XFS_IOLOCK_EXCL);
2694 ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1,
2695 FI_REMAPF_LOCKED);
2696 if (ret)
2697 @@ -809,7 +831,7 @@ xfs_file_buffered_aio_write(
2698 size_t count = ocount;
2699
2700 *iolock = XFS_IOLOCK_EXCL;
2701 - xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
2702 + xfs_rw_ilock(ip, *iolock);
2703
2704 ret = xfs_file_aio_write_checks(file, &pos, &count, iolock);
2705 if (ret)
2706 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
2707 index 673704f..474920b 100644
2708 --- a/fs/xfs/xfs_iops.c
2709 +++ b/fs/xfs/xfs_iops.c
2710 @@ -465,7 +465,7 @@ xfs_vn_getattr(
2711 trace_xfs_getattr(ip);
2712
2713 if (XFS_FORCED_SHUTDOWN(mp))
2714 - return XFS_ERROR(EIO);
2715 + return -XFS_ERROR(EIO);
2716
2717 stat->size = XFS_ISIZE(ip);
2718 stat->dev = inode->i_sb->s_dev;
2719 diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
2720 index 0081657..d4d5775 100644
2721 --- a/fs/xfs/xfs_mount.c
2722 +++ b/fs/xfs/xfs_mount.c
2723 @@ -44,9 +44,6 @@
2724 #include "xfs_trace.h"
2725
2726
2727 -STATIC void xfs_unmountfs_wait(xfs_mount_t *);
2728 -
2729 -
2730 #ifdef HAVE_PERCPU_SB
2731 STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
2732 int);
2733 @@ -1496,11 +1493,6 @@ xfs_unmountfs(
2734 */
2735 xfs_log_force(mp, XFS_LOG_SYNC);
2736
2737 - xfs_binval(mp->m_ddev_targp);
2738 - if (mp->m_rtdev_targp) {
2739 - xfs_binval(mp->m_rtdev_targp);
2740 - }
2741 -
2742 /*
2743 * Unreserve any blocks we have so that when we unmount we don't account
2744 * the reserved free space as used. This is really only necessary for
2745 @@ -1526,7 +1518,16 @@ xfs_unmountfs(
2746 xfs_warn(mp, "Unable to update superblock counters. "
2747 "Freespace may not be correct on next mount.");
2748 xfs_unmountfs_writesb(mp);
2749 - xfs_unmountfs_wait(mp); /* wait for async bufs */
2750 +
2751 + /*
2752 + * Make sure all buffers have been flushed and completed before
2753 + * unmounting the log.
2754 + */
2755 + error = xfs_flush_buftarg(mp->m_ddev_targp, 1);
2756 + if (error)
2757 + xfs_warn(mp, "%d busy buffers during unmount.", error);
2758 + xfs_wait_buftarg(mp->m_ddev_targp);
2759 +
2760 xfs_log_unmount_write(mp);
2761 xfs_log_unmount(mp);
2762 xfs_uuid_unmount(mp);
2763 @@ -1537,16 +1538,6 @@ xfs_unmountfs(
2764 xfs_free_perag(mp);
2765 }
2766
2767 -STATIC void
2768 -xfs_unmountfs_wait(xfs_mount_t *mp)
2769 -{
2770 - if (mp->m_logdev_targp != mp->m_ddev_targp)
2771 - xfs_wait_buftarg(mp->m_logdev_targp);
2772 - if (mp->m_rtdev_targp)
2773 - xfs_wait_buftarg(mp->m_rtdev_targp);
2774 - xfs_wait_buftarg(mp->m_ddev_targp);
2775 -}
2776 -
2777 int
2778 xfs_fs_writable(xfs_mount_t *mp)
2779 {
2780 diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
2781 index 9a0aa76..95ba6dc 100644
2782 --- a/fs/xfs/xfs_qm.c
2783 +++ b/fs/xfs/xfs_qm.c
2784 @@ -674,7 +674,8 @@ xfs_qm_dqattach_one(
2785 * disk and we didn't ask it to allocate;
2786 * ESRCH if quotas got turned off suddenly.
2787 */
2788 - error = xfs_qm_dqget(ip->i_mount, ip, id, type, XFS_QMOPT_DOWARN, &dqp);
2789 + error = xfs_qm_dqget(ip->i_mount, ip, id, type,
2790 + doalloc | XFS_QMOPT_DOWARN, &dqp);
2791 if (error)
2792 return error;
2793
2794 diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
2795 index 51fc429..b9e2873 100644
2796 --- a/fs/xfs/xfs_vnodeops.c
2797 +++ b/fs/xfs/xfs_vnodeops.c
2798 @@ -113,7 +113,7 @@ xfs_readlink(
2799 char *link)
2800 {
2801 xfs_mount_t *mp = ip->i_mount;
2802 - int pathlen;
2803 + xfs_fsize_t pathlen;
2804 int error = 0;
2805
2806 trace_xfs_readlink(ip);
2807 @@ -123,13 +123,19 @@ xfs_readlink(
2808
2809 xfs_ilock(ip, XFS_ILOCK_SHARED);
2810
2811 - ASSERT(S_ISLNK(ip->i_d.di_mode));
2812 - ASSERT(ip->i_d.di_size <= MAXPATHLEN);
2813 -
2814 pathlen = ip->i_d.di_size;
2815 if (!pathlen)
2816 goto out;
2817
2818 + if (pathlen < 0 || pathlen > MAXPATHLEN) {
2819 + xfs_alert(mp, "%s: inode (%llu) bad symlink length (%lld)",
2820 + __func__, (unsigned long long) ip->i_ino,
2821 + (long long) pathlen);
2822 + ASSERT(0);
2823 + return XFS_ERROR(EFSCORRUPTED);
2824 + }
2825 +
2826 +
2827 if (ip->i_df.if_flags & XFS_IFINLINE) {
2828 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
2829 link[pathlen] = '\0';
2830 diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
2831 index c4961ea..53dfa109 100644
2832 --- a/include/drm/drm_mode.h
2833 +++ b/include/drm/drm_mode.h
2834 @@ -233,6 +233,8 @@ struct drm_mode_fb_cmd {
2835 #define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
2836 #define DRM_MODE_FB_DIRTY_FLAGS 0x03
2837
2838 +#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256
2839 +
2840 /*
2841 * Mark a region of a framebuffer as dirty.
2842 *
2843 diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
2844 index f81676f..4e4fbb8 100644
2845 --- a/include/drm/drm_pciids.h
2846 +++ b/include/drm/drm_pciids.h
2847 @@ -197,6 +197,14 @@
2848 {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
2849 {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
2850 {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
2851 + {0x1002, 0x6840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2852 + {0x1002, 0x6841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2853 + {0x1002, 0x6842, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2854 + {0x1002, 0x6843, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2855 + {0x1002, 0x6849, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
2856 + {0x1002, 0x6850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
2857 + {0x1002, 0x6858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
2858 + {0x1002, 0x6859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
2859 {0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2860 {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
2861 {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
2862 diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
2863 index 139c4db..c86c940 100644
2864 --- a/include/linux/clocksource.h
2865 +++ b/include/linux/clocksource.h
2866 @@ -156,6 +156,7 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
2867 * @mult: cycle to nanosecond multiplier
2868 * @shift: cycle to nanosecond divisor (power of two)
2869 * @max_idle_ns: max idle time permitted by the clocksource (nsecs)
2870 + * @maxadj maximum adjustment value to mult (~11%)
2871 * @flags: flags describing special properties
2872 * @archdata: arch-specific data
2873 * @suspend: suspend function for the clocksource, if necessary
2874 @@ -172,7 +173,7 @@ struct clocksource {
2875 u32 mult;
2876 u32 shift;
2877 u64 max_idle_ns;
2878 -
2879 + u32 maxadj;
2880 #ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
2881 struct arch_clocksource_data archdata;
2882 #endif
2883 diff --git a/include/linux/sigma.h b/include/linux/sigma.h
2884 index e2accb3..d0de882 100644
2885 --- a/include/linux/sigma.h
2886 +++ b/include/linux/sigma.h
2887 @@ -24,7 +24,7 @@ struct sigma_firmware {
2888 struct sigma_firmware_header {
2889 unsigned char magic[7];
2890 u8 version;
2891 - u32 crc;
2892 + __le32 crc;
2893 };
2894
2895 enum {
2896 @@ -40,19 +40,14 @@ enum {
2897 struct sigma_action {
2898 u8 instr;
2899 u8 len_hi;
2900 - u16 len;
2901 - u16 addr;
2902 + __le16 len;
2903 + __be16 addr;
2904 unsigned char payload[];
2905 };
2906
2907 static inline u32 sigma_action_len(struct sigma_action *sa)
2908 {
2909 - return (sa->len_hi << 16) | sa->len;
2910 -}
2911 -
2912 -static inline size_t sigma_action_size(struct sigma_action *sa, u32 payload_len)
2913 -{
2914 - return sizeof(*sa) + payload_len + (payload_len % 2);
2915 + return (sa->len_hi << 16) | le16_to_cpu(sa->len);
2916 }
2917
2918 extern int process_sigma_firmware(struct i2c_client *client, const char *name);
2919 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
2920 index 78c83e6..e9ff3fc 100644
2921 --- a/include/net/inetpeer.h
2922 +++ b/include/net/inetpeer.h
2923 @@ -35,6 +35,7 @@ struct inet_peer {
2924
2925 u32 metrics[RTAX_MAX];
2926 u32 rate_tokens; /* rate limiting for ICMP */
2927 + int redirect_genid;
2928 unsigned long rate_last;
2929 unsigned long pmtu_expires;
2930 u32 pmtu_orig;
2931 diff --git a/include/net/red.h b/include/net/red.h
2932 index 3319f16..b72a3b8 100644
2933 --- a/include/net/red.h
2934 +++ b/include/net/red.h
2935 @@ -116,7 +116,7 @@ struct red_parms {
2936 u32 qR; /* Cached random number */
2937
2938 unsigned long qavg; /* Average queue length: A scaled */
2939 - psched_time_t qidlestart; /* Start of current idle period */
2940 + ktime_t qidlestart; /* Start of current idle period */
2941 };
2942
2943 static inline u32 red_rmask(u8 Plog)
2944 @@ -148,17 +148,17 @@ static inline void red_set_parms(struct red_parms *p,
2945
2946 static inline int red_is_idling(struct red_parms *p)
2947 {
2948 - return p->qidlestart != PSCHED_PASTPERFECT;
2949 + return p->qidlestart.tv64 != 0;
2950 }
2951
2952 static inline void red_start_of_idle_period(struct red_parms *p)
2953 {
2954 - p->qidlestart = psched_get_time();
2955 + p->qidlestart = ktime_get();
2956 }
2957
2958 static inline void red_end_of_idle_period(struct red_parms *p)
2959 {
2960 - p->qidlestart = PSCHED_PASTPERFECT;
2961 + p->qidlestart.tv64 = 0;
2962 }
2963
2964 static inline void red_restart(struct red_parms *p)
2965 @@ -170,13 +170,10 @@ static inline void red_restart(struct red_parms *p)
2966
2967 static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
2968 {
2969 - psched_time_t now;
2970 - long us_idle;
2971 + s64 delta = ktime_us_delta(ktime_get(), p->qidlestart);
2972 + long us_idle = min_t(s64, delta, p->Scell_max);
2973 int shift;
2974
2975 - now = psched_get_time();
2976 - us_idle = psched_tdiff_bounded(now, p->qidlestart, p->Scell_max);
2977 -
2978 /*
2979 * The problem: ideally, average length queue recalcultion should
2980 * be done over constant clock intervals. This is too expensive, so
2981 diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
2982 index e691818..a3f638a 100644
2983 --- a/kernel/cgroup_freezer.c
2984 +++ b/kernel/cgroup_freezer.c
2985 @@ -153,6 +153,13 @@ static void freezer_destroy(struct cgroup_subsys *ss,
2986 kfree(cgroup_freezer(cgroup));
2987 }
2988
2989 +/* task is frozen or will freeze immediately when next it gets woken */
2990 +static bool is_task_frozen_enough(struct task_struct *task)
2991 +{
2992 + return frozen(task) ||
2993 + (task_is_stopped_or_traced(task) && freezing(task));
2994 +}
2995 +
2996 /*
2997 * The call to cgroup_lock() in the freezer.state write method prevents
2998 * a write to that file racing against an attach, and hence the
2999 @@ -231,7 +238,7 @@ static void update_if_frozen(struct cgroup *cgroup,
3000 cgroup_iter_start(cgroup, &it);
3001 while ((task = cgroup_iter_next(cgroup, &it))) {
3002 ntotal++;
3003 - if (frozen(task))
3004 + if (is_task_frozen_enough(task))
3005 nfrozen++;
3006 }
3007
3008 @@ -284,7 +291,7 @@ static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
3009 while ((task = cgroup_iter_next(cgroup, &it))) {
3010 if (!freeze_task(task, true))
3011 continue;
3012 - if (frozen(task))
3013 + if (is_task_frozen_enough(task))
3014 continue;
3015 if (!freezing(task) && !freezer_should_skip(task))
3016 num_cant_freeze_now++;
3017 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
3018 index a9205e3..2043c08 100644
3019 --- a/kernel/hrtimer.c
3020 +++ b/kernel/hrtimer.c
3021 @@ -885,10 +885,13 @@ static void __remove_hrtimer(struct hrtimer *timer,
3022 struct hrtimer_clock_base *base,
3023 unsigned long newstate, int reprogram)
3024 {
3025 + struct timerqueue_node *next_timer;
3026 if (!(timer->state & HRTIMER_STATE_ENQUEUED))
3027 goto out;
3028
3029 - if (&timer->node == timerqueue_getnext(&base->active)) {
3030 + next_timer = timerqueue_getnext(&base->active);
3031 + timerqueue_del(&base->active, &timer->node);
3032 + if (&timer->node == next_timer) {
3033 #ifdef CONFIG_HIGH_RES_TIMERS
3034 /* Reprogram the clock event device. if enabled */
3035 if (reprogram && hrtimer_hres_active()) {
3036 @@ -901,7 +904,6 @@ static void __remove_hrtimer(struct hrtimer *timer,
3037 }
3038 #endif
3039 }
3040 - timerqueue_del(&base->active, &timer->node);
3041 if (!timerqueue_getnext(&base->active))
3042 base->cpu_base->active_bases &= ~(1 << base->index);
3043 out:
3044 diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
3045 index 9b956fa..d6c4adc 100644
3046 --- a/kernel/irq/manage.c
3047 +++ b/kernel/irq/manage.c
3048 @@ -620,8 +620,9 @@ static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
3049
3050 static int irq_wait_for_interrupt(struct irqaction *action)
3051 {
3052 + set_current_state(TASK_INTERRUPTIBLE);
3053 +
3054 while (!kthread_should_stop()) {
3055 - set_current_state(TASK_INTERRUPTIBLE);
3056
3057 if (test_and_clear_bit(IRQTF_RUNTHREAD,
3058 &action->thread_flags)) {
3059 @@ -629,7 +630,9 @@ static int irq_wait_for_interrupt(struct irqaction *action)
3060 return 0;
3061 }
3062 schedule();
3063 + set_current_state(TASK_INTERRUPTIBLE);
3064 }
3065 + __set_current_state(TASK_RUNNING);
3066 return -1;
3067 }
3068
3069 diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
3070 index b5f4742..dc813a9 100644
3071 --- a/kernel/irq/spurious.c
3072 +++ b/kernel/irq/spurious.c
3073 @@ -84,7 +84,9 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
3074 */
3075 action = desc->action;
3076 if (!action || !(action->flags & IRQF_SHARED) ||
3077 - (action->flags & __IRQF_TIMER) || !action->next)
3078 + (action->flags & __IRQF_TIMER) ||
3079 + (action->handler(irq, action->dev_id) == IRQ_HANDLED) ||
3080 + !action->next)
3081 goto out;
3082
3083 /* Already running on another processor */
3084 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
3085 index a8ce450..e6f1f24 100644
3086 --- a/kernel/jump_label.c
3087 +++ b/kernel/jump_label.c
3088 @@ -66,8 +66,9 @@ void jump_label_inc(struct jump_label_key *key)
3089 return;
3090
3091 jump_label_lock();
3092 - if (atomic_add_return(1, &key->enabled) == 1)
3093 + if (atomic_read(&key->enabled) == 0)
3094 jump_label_update(key, JUMP_LABEL_ENABLE);
3095 + atomic_inc(&key->enabled);
3096 jump_label_unlock();
3097 }
3098
3099 diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
3100 index e4c699d..13dfaab 100644
3101 --- a/kernel/time/clockevents.c
3102 +++ b/kernel/time/clockevents.c
3103 @@ -286,6 +286,7 @@ void clockevents_exchange_device(struct clock_event_device *old,
3104 * released list and do a notify add later.
3105 */
3106 if (old) {
3107 + old->event_handler = clockevents_handle_noop;
3108 clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
3109 list_del(&old->list);
3110 list_add(&old->list, &clockevents_released);
3111 diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
3112 index e0980f0..8f77da1 100644
3113 --- a/kernel/time/clocksource.c
3114 +++ b/kernel/time/clocksource.c
3115 @@ -494,6 +494,22 @@ void clocksource_touch_watchdog(void)
3116 }
3117
3118 /**
3119 + * clocksource_max_adjustment- Returns max adjustment amount
3120 + * @cs: Pointer to clocksource
3121 + *
3122 + */
3123 +static u32 clocksource_max_adjustment(struct clocksource *cs)
3124 +{
3125 + u64 ret;
3126 + /*
3127 + * We won't try to correct for more then 11% adjustments (110,000 ppm),
3128 + */
3129 + ret = (u64)cs->mult * 11;
3130 + do_div(ret,100);
3131 + return (u32)ret;
3132 +}
3133 +
3134 +/**
3135 * clocksource_max_deferment - Returns max time the clocksource can be deferred
3136 * @cs: Pointer to clocksource
3137 *
3138 @@ -505,25 +521,28 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
3139 /*
3140 * Calculate the maximum number of cycles that we can pass to the
3141 * cyc2ns function without overflowing a 64-bit signed result. The
3142 - * maximum number of cycles is equal to ULLONG_MAX/cs->mult which
3143 - * is equivalent to the below.
3144 - * max_cycles < (2^63)/cs->mult
3145 - * max_cycles < 2^(log2((2^63)/cs->mult))
3146 - * max_cycles < 2^(log2(2^63) - log2(cs->mult))
3147 - * max_cycles < 2^(63 - log2(cs->mult))
3148 - * max_cycles < 1 << (63 - log2(cs->mult))
3149 + * maximum number of cycles is equal to ULLONG_MAX/(cs->mult+cs->maxadj)
3150 + * which is equivalent to the below.
3151 + * max_cycles < (2^63)/(cs->mult + cs->maxadj)
3152 + * max_cycles < 2^(log2((2^63)/(cs->mult + cs->maxadj)))
3153 + * max_cycles < 2^(log2(2^63) - log2(cs->mult + cs->maxadj))
3154 + * max_cycles < 2^(63 - log2(cs->mult + cs->maxadj))
3155 + * max_cycles < 1 << (63 - log2(cs->mult + cs->maxadj))
3156 * Please note that we add 1 to the result of the log2 to account for
3157 * any rounding errors, ensure the above inequality is satisfied and
3158 * no overflow will occur.
3159 */
3160 - max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1));
3161 + max_cycles = 1ULL << (63 - (ilog2(cs->mult + cs->maxadj) + 1));
3162
3163 /*
3164 * The actual maximum number of cycles we can defer the clocksource is
3165 * determined by the minimum of max_cycles and cs->mask.
3166 + * Note: Here we subtract the maxadj to make sure we don't sleep for
3167 + * too long if there's a large negative adjustment.
3168 */
3169 max_cycles = min_t(u64, max_cycles, (u64) cs->mask);
3170 - max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift);
3171 + max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult - cs->maxadj,
3172 + cs->shift);
3173
3174 /*
3175 * To ensure that the clocksource does not wrap whilst we are idle,
3176 @@ -531,7 +550,7 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
3177 * note a margin of 12.5% is used because this can be computed with
3178 * a shift, versus say 10% which would require division.
3179 */
3180 - return max_nsecs - (max_nsecs >> 5);
3181 + return max_nsecs - (max_nsecs >> 3);
3182 }
3183
3184 #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
3185 @@ -642,7 +661,6 @@ static void clocksource_enqueue(struct clocksource *cs)
3186 void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
3187 {
3188 u64 sec;
3189 -
3190 /*
3191 * Calc the maximum number of seconds which we can run before
3192 * wrapping around. For clocksources which have a mask > 32bit
3193 @@ -653,7 +671,7 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
3194 * ~ 0.06ppm granularity for NTP. We apply the same 12.5%
3195 * margin as we do in clocksource_max_deferment()
3196 */
3197 - sec = (cs->mask - (cs->mask >> 5));
3198 + sec = (cs->mask - (cs->mask >> 3));
3199 do_div(sec, freq);
3200 do_div(sec, scale);
3201 if (!sec)
3202 @@ -663,6 +681,20 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
3203
3204 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
3205 NSEC_PER_SEC / scale, sec * scale);
3206 +
3207 + /*
3208 + * for clocksources that have large mults, to avoid overflow.
3209 + * Since mult may be adjusted by ntp, add an safety extra margin
3210 + *
3211 + */
3212 + cs->maxadj = clocksource_max_adjustment(cs);
3213 + while ((cs->mult + cs->maxadj < cs->mult)
3214 + || (cs->mult - cs->maxadj > cs->mult)) {
3215 + cs->mult >>= 1;
3216 + cs->shift--;
3217 + cs->maxadj = clocksource_max_adjustment(cs);
3218 + }
3219 +
3220 cs->max_idle_ns = clocksource_max_deferment(cs);
3221 }
3222 EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
3223 @@ -703,6 +735,12 @@ EXPORT_SYMBOL_GPL(__clocksource_register_scale);
3224 */
3225 int clocksource_register(struct clocksource *cs)
3226 {
3227 + /* calculate max adjustment for given mult/shift */
3228 + cs->maxadj = clocksource_max_adjustment(cs);
3229 + WARN_ONCE(cs->mult + cs->maxadj < cs->mult,
3230 + "Clocksource %s might overflow on 11%% adjustment\n",
3231 + cs->name);
3232 +
3233 /* calculate max idle time permitted for this clocksource */
3234 cs->max_idle_ns = clocksource_max_deferment(cs);
3235
3236 diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
3237 index c7218d1..7a90d02 100644
3238 --- a/kernel/time/tick-broadcast.c
3239 +++ b/kernel/time/tick-broadcast.c
3240 @@ -71,7 +71,7 @@ int tick_check_broadcast_device(struct clock_event_device *dev)
3241 (dev->features & CLOCK_EVT_FEAT_C3STOP))
3242 return 0;
3243
3244 - clockevents_exchange_device(NULL, dev);
3245 + clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
3246 tick_broadcast_device.evtdev = dev;
3247 if (!cpumask_empty(tick_get_broadcast_mask()))
3248 tick_broadcast_start_periodic(dev);
3249 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
3250 index 2b021b0e..6f9798b 100644
3251 --- a/kernel/time/timekeeping.c
3252 +++ b/kernel/time/timekeeping.c
3253 @@ -249,6 +249,8 @@ ktime_t ktime_get(void)
3254 secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
3255 nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
3256 nsecs += timekeeping_get_ns();
3257 + /* If arch requires, add in gettimeoffset() */
3258 + nsecs += arch_gettimeoffset();
3259
3260 } while (read_seqretry(&xtime_lock, seq));
3261 /*
3262 @@ -280,6 +282,8 @@ void ktime_get_ts(struct timespec *ts)
3263 *ts = xtime;
3264 tomono = wall_to_monotonic;
3265 nsecs = timekeeping_get_ns();
3266 + /* If arch requires, add in gettimeoffset() */
3267 + nsecs += arch_gettimeoffset();
3268
3269 } while (read_seqretry(&xtime_lock, seq));
3270
3271 @@ -820,6 +824,13 @@ static void timekeeping_adjust(s64 offset)
3272 } else
3273 return;
3274
3275 + WARN_ONCE(timekeeper.clock->maxadj &&
3276 + (timekeeper.mult + adj > timekeeper.clock->mult +
3277 + timekeeper.clock->maxadj),
3278 + "Adjusting %s more then 11%% (%ld vs %ld)\n",
3279 + timekeeper.clock->name, (long)timekeeper.mult + adj,
3280 + (long)timekeeper.clock->mult +
3281 + timekeeper.clock->maxadj);
3282 timekeeper.mult += adj;
3283 timekeeper.xtime_interval += interval;
3284 timekeeper.xtime_nsec -= offset;
3285 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
3286 index c3e4575..48d3762 100644
3287 --- a/kernel/trace/ftrace.c
3288 +++ b/kernel/trace/ftrace.c
3289 @@ -151,7 +151,6 @@ void clear_ftrace_function(void)
3290 ftrace_pid_function = ftrace_stub;
3291 }
3292
3293 -#undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
3294 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
3295 /*
3296 * For those archs that do not test ftrace_trace_stop in their
3297 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
3298 index 581876f..c212a7f 100644
3299 --- a/kernel/trace/trace_events.c
3300 +++ b/kernel/trace/trace_events.c
3301 @@ -1078,7 +1078,6 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
3302 /* First see if we did not already create this dir */
3303 list_for_each_entry(system, &event_subsystems, list) {
3304 if (strcmp(system->name, name) == 0) {
3305 - __get_system(system);
3306 system->nr_events++;
3307 return system->entry;
3308 }
3309 diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
3310 index 256764e..bd3c636 100644
3311 --- a/kernel/trace/trace_events_filter.c
3312 +++ b/kernel/trace/trace_events_filter.c
3313 @@ -1766,7 +1766,7 @@ static int replace_system_preds(struct event_subsystem *system,
3314 * replace the filter for the call.
3315 */
3316 filter = call->filter;
3317 - call->filter = filter_item->filter;
3318 + rcu_assign_pointer(call->filter, filter_item->filter);
3319 filter_item->filter = filter;
3320
3321 fail = false;
3322 @@ -1821,7 +1821,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
3323 filter = call->filter;
3324 if (!filter)
3325 goto out_unlock;
3326 - call->filter = NULL;
3327 + RCU_INIT_POINTER(call->filter, NULL);
3328 /* Make sure the filter is not being used */
3329 synchronize_sched();
3330 __free_filter(filter);
3331 @@ -1862,7 +1862,7 @@ out:
3332 * string
3333 */
3334 tmp = call->filter;
3335 - call->filter = filter;
3336 + rcu_assign_pointer(call->filter, filter);
3337 if (tmp) {
3338 /* Make sure the call is done with the filter */
3339 synchronize_sched();
3340 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
3341 index dae27ba..bb28a5f 100644
3342 --- a/mm/hugetlb.c
3343 +++ b/mm/hugetlb.c
3344 @@ -2422,6 +2422,8 @@ retry_avoidcopy:
3345 * anon_vma prepared.
3346 */
3347 if (unlikely(anon_vma_prepare(vma))) {
3348 + page_cache_release(new_page);
3349 + page_cache_release(old_page);
3350 /* Caller expects lock to be held */
3351 spin_lock(&mm->page_table_lock);
3352 return VM_FAULT_OOM;
3353 diff --git a/mm/slab.c b/mm/slab.c
3354 index 6d90a09..893c76d 100644
3355 --- a/mm/slab.c
3356 +++ b/mm/slab.c
3357 @@ -595,6 +595,7 @@ static enum {
3358 PARTIAL_AC,
3359 PARTIAL_L3,
3360 EARLY,
3361 + LATE,
3362 FULL
3363 } g_cpucache_up;
3364
3365 @@ -671,7 +672,7 @@ static void init_node_lock_keys(int q)
3366 {
3367 struct cache_sizes *s = malloc_sizes;
3368
3369 - if (g_cpucache_up != FULL)
3370 + if (g_cpucache_up < LATE)
3371 return;
3372
3373 for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
3374 @@ -1666,6 +1667,8 @@ void __init kmem_cache_init_late(void)
3375 {
3376 struct kmem_cache *cachep;
3377
3378 + g_cpucache_up = LATE;
3379 +
3380 /* Annotate slab for lockdep -- annotate the malloc caches */
3381 init_lock_keys();
3382
3383 diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
3384 index 995cbe0..e79ff75 100644
3385 --- a/net/bridge/br_multicast.c
3386 +++ b/net/bridge/br_multicast.c
3387 @@ -1501,6 +1501,8 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
3388
3389 __skb_pull(skb2, offset);
3390 skb_reset_transport_header(skb2);
3391 + skb_postpull_rcsum(skb2, skb_network_header(skb2),
3392 + skb_network_header_len(skb2));
3393
3394 icmp6_type = icmp6_hdr(skb2)->icmp6_type;
3395
3396 diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
3397 index c1f4154..c7056b2 100644
3398 --- a/net/ipv4/ah4.c
3399 +++ b/net/ipv4/ah4.c
3400 @@ -136,8 +136,6 @@ static void ah_output_done(struct crypto_async_request *base, int err)
3401 memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr));
3402 }
3403
3404 - err = ah->nexthdr;
3405 -
3406 kfree(AH_SKB_CB(skb)->tmp);
3407 xfrm_output_resume(skb, err);
3408 }
3409 @@ -264,12 +262,12 @@ static void ah_input_done(struct crypto_async_request *base, int err)
3410 if (err)
3411 goto out;
3412
3413 + err = ah->nexthdr;
3414 +
3415 skb->network_header += ah_hlen;
3416 memcpy(skb_network_header(skb), work_iph, ihl);
3417 __skb_pull(skb, ah_hlen + ihl);
3418 skb_set_transport_header(skb, -ihl);
3419 -
3420 - err = ah->nexthdr;
3421 out:
3422 kfree(AH_SKB_CB(skb)->tmp);
3423 xfrm_input_resume(skb, err);
3424 diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
3425 index ec93335..05d20cc 100644
3426 --- a/net/ipv4/ip_options.c
3427 +++ b/net/ipv4/ip_options.c
3428 @@ -640,6 +640,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
3429 }
3430 if (srrptr <= srrspace) {
3431 opt->srr_is_hit = 1;
3432 + iph->daddr = nexthop;
3433 opt->is_changed = 1;
3434 }
3435 return 0;
3436 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
3437 index 075212e..05ac666c 100644
3438 --- a/net/ipv4/route.c
3439 +++ b/net/ipv4/route.c
3440 @@ -112,7 +112,7 @@
3441 #include <net/secure_seq.h>
3442
3443 #define RT_FL_TOS(oldflp4) \
3444 - ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
3445 + ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
3446
3447 #define IP_MAX_MTU 0xFFF0
3448
3449 @@ -132,6 +132,7 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
3450 static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
3451 static int ip_rt_min_advmss __read_mostly = 256;
3452 static int rt_chain_length_max __read_mostly = 20;
3453 +static int redirect_genid;
3454
3455 /*
3456 * Interface to generic destination cache.
3457 @@ -417,9 +418,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
3458 else {
3459 struct rtable *r = v;
3460 struct neighbour *n;
3461 - int len;
3462 + int len, HHUptod;
3463
3464 + rcu_read_lock();
3465 n = dst_get_neighbour(&r->dst);
3466 + HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
3467 + rcu_read_unlock();
3468 +
3469 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
3470 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
3471 r->dst.dev ? r->dst.dev->name : "*",
3472 @@ -433,7 +438,7 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
3473 dst_metric(&r->dst, RTAX_RTTVAR)),
3474 r->rt_key_tos,
3475 -1,
3476 - (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0,
3477 + HHUptod,
3478 r->rt_spec_dst, &len);
3479
3480 seq_printf(seq, "%*s\n", 127 - len, "");
3481 @@ -838,6 +843,7 @@ static void rt_cache_invalidate(struct net *net)
3482
3483 get_random_bytes(&shuffle, sizeof(shuffle));
3484 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
3485 + redirect_genid++;
3486 }
3487
3488 /*
3489 @@ -1305,11 +1311,40 @@ static void rt_del(unsigned hash, struct rtable *rt)
3490 spin_unlock_bh(rt_hash_lock_addr(hash));
3491 }
3492
3493 +static void check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
3494 +{
3495 + struct rtable *rt = (struct rtable *) dst;
3496 + __be32 orig_gw = rt->rt_gateway;
3497 + struct neighbour *n, *old_n;
3498 +
3499 + dst_confirm(&rt->dst);
3500 +
3501 + rt->rt_gateway = peer->redirect_learned.a4;
3502 +
3503 + n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
3504 + if (IS_ERR(n)) {
3505 + rt->rt_gateway = orig_gw;
3506 + return;
3507 + }
3508 + old_n = xchg(&rt->dst._neighbour, n);
3509 + if (old_n)
3510 + neigh_release(old_n);
3511 + if (!(n->nud_state & NUD_VALID)) {
3512 + neigh_event_send(n, NULL);
3513 + } else {
3514 + rt->rt_flags |= RTCF_REDIRECTED;
3515 + call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
3516 + }
3517 +}
3518 +
3519 /* called in rcu_read_lock() section */
3520 void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
3521 __be32 saddr, struct net_device *dev)
3522 {
3523 + int s, i;
3524 struct in_device *in_dev = __in_dev_get_rcu(dev);
3525 + __be32 skeys[2] = { saddr, 0 };
3526 + int ikeys[2] = { dev->ifindex, 0 };
3527 struct inet_peer *peer;
3528 struct net *net;
3529
3530 @@ -1332,13 +1367,45 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
3531 goto reject_redirect;
3532 }
3533
3534 - peer = inet_getpeer_v4(daddr, 1);
3535 - if (peer) {
3536 - peer->redirect_learned.a4 = new_gw;
3537 + for (s = 0; s < 2; s++) {
3538 + for (i = 0; i < 2; i++) {
3539 + unsigned int hash;
3540 + struct rtable __rcu **rthp;
3541 + struct rtable *rt;
3542
3543 - inet_putpeer(peer);
3544 + hash = rt_hash(daddr, skeys[s], ikeys[i], rt_genid(net));
3545 +
3546 + rthp = &rt_hash_table[hash].chain;
3547
3548 - atomic_inc(&__rt_peer_genid);
3549 + while ((rt = rcu_dereference(*rthp)) != NULL) {
3550 + rthp = &rt->dst.rt_next;
3551 +
3552 + if (rt->rt_key_dst != daddr ||
3553 + rt->rt_key_src != skeys[s] ||
3554 + rt->rt_oif != ikeys[i] ||
3555 + rt_is_input_route(rt) ||
3556 + rt_is_expired(rt) ||
3557 + !net_eq(dev_net(rt->dst.dev), net) ||
3558 + rt->dst.error ||
3559 + rt->dst.dev != dev ||
3560 + rt->rt_gateway != old_gw)
3561 + continue;
3562 +
3563 + if (!rt->peer)
3564 + rt_bind_peer(rt, rt->rt_dst, 1);
3565 +
3566 + peer = rt->peer;
3567 + if (peer) {
3568 + if (peer->redirect_learned.a4 != new_gw ||
3569 + peer->redirect_genid != redirect_genid) {
3570 + peer->redirect_learned.a4 = new_gw;
3571 + peer->redirect_genid = redirect_genid;
3572 + atomic_inc(&__rt_peer_genid);
3573 + }
3574 + check_peer_redir(&rt->dst, peer);
3575 + }
3576 + }
3577 + }
3578 }
3579 return;
3580
3581 @@ -1568,11 +1635,10 @@ unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
3582 est_mtu = mtu;
3583 peer->pmtu_learned = mtu;
3584 peer->pmtu_expires = pmtu_expires;
3585 + atomic_inc(&__rt_peer_genid);
3586 }
3587
3588 inet_putpeer(peer);
3589 -
3590 - atomic_inc(&__rt_peer_genid);
3591 }
3592 return est_mtu ? : new_mtu;
3593 }
3594 @@ -1625,40 +1691,9 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
3595 }
3596 }
3597
3598 -static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
3599 -{
3600 - struct rtable *rt = (struct rtable *) dst;
3601 - __be32 orig_gw = rt->rt_gateway;
3602 - struct neighbour *n, *old_n;
3603 -
3604 - dst_confirm(&rt->dst);
3605 -
3606 - rt->rt_gateway = peer->redirect_learned.a4;
3607 -
3608 - n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
3609 - if (IS_ERR(n))
3610 - return PTR_ERR(n);
3611 - old_n = xchg(&rt->dst._neighbour, n);
3612 - if (old_n)
3613 - neigh_release(old_n);
3614 - if (!n || !(n->nud_state & NUD_VALID)) {
3615 - if (n)
3616 - neigh_event_send(n, NULL);
3617 - rt->rt_gateway = orig_gw;
3618 - return -EAGAIN;
3619 - } else {
3620 - rt->rt_flags |= RTCF_REDIRECTED;
3621 - call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
3622 - }
3623 - return 0;
3624 -}
3625
3626 -static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
3627 +static void ipv4_validate_peer(struct rtable *rt)
3628 {
3629 - struct rtable *rt = (struct rtable *) dst;
3630 -
3631 - if (rt_is_expired(rt))
3632 - return NULL;
3633 if (rt->rt_peer_genid != rt_peer_genid()) {
3634 struct inet_peer *peer;
3635
3636 @@ -1667,17 +1702,26 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
3637
3638 peer = rt->peer;
3639 if (peer) {
3640 - check_peer_pmtu(dst, peer);
3641 + check_peer_pmtu(&rt->dst, peer);
3642
3643 + if (peer->redirect_genid != redirect_genid)
3644 + peer->redirect_learned.a4 = 0;
3645 if (peer->redirect_learned.a4 &&
3646 - peer->redirect_learned.a4 != rt->rt_gateway) {
3647 - if (check_peer_redir(dst, peer))
3648 - return NULL;
3649 - }
3650 + peer->redirect_learned.a4 != rt->rt_gateway)
3651 + check_peer_redir(&rt->dst, peer);
3652 }
3653
3654 rt->rt_peer_genid = rt_peer_genid();
3655 }
3656 +}
3657 +
3658 +static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
3659 +{
3660 + struct rtable *rt = (struct rtable *) dst;
3661 +
3662 + if (rt_is_expired(rt))
3663 + return NULL;
3664 + ipv4_validate_peer(rt);
3665 return dst;
3666 }
3667
3668 @@ -1820,6 +1864,8 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
3669 dst_init_metrics(&rt->dst, peer->metrics, false);
3670
3671 check_peer_pmtu(&rt->dst, peer);
3672 + if (peer->redirect_genid != redirect_genid)
3673 + peer->redirect_learned.a4 = 0;
3674 if (peer->redirect_learned.a4 &&
3675 peer->redirect_learned.a4 != rt->rt_gateway) {
3676 rt->rt_gateway = peer->redirect_learned.a4;
3677 @@ -2325,6 +2371,7 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr,
3678 rth->rt_mark == skb->mark &&
3679 net_eq(dev_net(rth->dst.dev), net) &&
3680 !rt_is_expired(rth)) {
3681 + ipv4_validate_peer(rth);
3682 if (noref) {
3683 dst_use_noref(&rth->dst, jiffies);
3684 skb_dst_set_noref(skb, &rth->dst);
3685 @@ -2383,11 +2430,11 @@ EXPORT_SYMBOL(ip_route_input_common);
3686 static struct rtable *__mkroute_output(const struct fib_result *res,
3687 const struct flowi4 *fl4,
3688 __be32 orig_daddr, __be32 orig_saddr,
3689 - int orig_oif, struct net_device *dev_out,
3690 + int orig_oif, __u8 orig_rtos,
3691 + struct net_device *dev_out,
3692 unsigned int flags)
3693 {
3694 struct fib_info *fi = res->fi;
3695 - u32 tos = RT_FL_TOS(fl4);
3696 struct in_device *in_dev;
3697 u16 type = res->type;
3698 struct rtable *rth;
3699 @@ -2438,7 +2485,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
3700 rth->rt_genid = rt_genid(dev_net(dev_out));
3701 rth->rt_flags = flags;
3702 rth->rt_type = type;
3703 - rth->rt_key_tos = tos;
3704 + rth->rt_key_tos = orig_rtos;
3705 rth->rt_dst = fl4->daddr;
3706 rth->rt_src = fl4->saddr;
3707 rth->rt_route_iif = 0;
3708 @@ -2488,7 +2535,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
3709 static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
3710 {
3711 struct net_device *dev_out = NULL;
3712 - u32 tos = RT_FL_TOS(fl4);
3713 + __u8 tos = RT_FL_TOS(fl4);
3714 unsigned int flags = 0;
3715 struct fib_result res;
3716 struct rtable *rth;
3717 @@ -2664,7 +2711,7 @@ static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
3718
3719 make_route:
3720 rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
3721 - dev_out, flags);
3722 + tos, dev_out, flags);
3723 if (!IS_ERR(rth)) {
3724 unsigned int hash;
3725
3726 @@ -2700,6 +2747,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4)
3727 (IPTOS_RT_MASK | RTO_ONLINK)) &&
3728 net_eq(dev_net(rth->dst.dev), net) &&
3729 !rt_is_expired(rth)) {
3730 + ipv4_validate_peer(rth);
3731 dst_use(&rth->dst, jiffies);
3732 RT_CACHE_STAT_INC(out_hit);
3733 rcu_read_unlock_bh();
3734 diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
3735 index 2195ae6..7a33aaa 100644
3736 --- a/net/ipv6/ah6.c
3737 +++ b/net/ipv6/ah6.c
3738 @@ -324,8 +324,6 @@ static void ah6_output_done(struct crypto_async_request *base, int err)
3739 #endif
3740 }
3741
3742 - err = ah->nexthdr;
3743 -
3744 kfree(AH_SKB_CB(skb)->tmp);
3745 xfrm_output_resume(skb, err);
3746 }
3747 @@ -466,12 +464,12 @@ static void ah6_input_done(struct crypto_async_request *base, int err)
3748 if (err)
3749 goto out;
3750
3751 + err = ah->nexthdr;
3752 +
3753 skb->network_header += ah_hlen;
3754 memcpy(skb_network_header(skb), work_iph, hdr_len);
3755 __skb_pull(skb, ah_hlen + hdr_len);
3756 skb_set_transport_header(skb, -hdr_len);
3757 -
3758 - err = ah->nexthdr;
3759 out:
3760 kfree(AH_SKB_CB(skb)->tmp);
3761 xfrm_input_resume(skb, err);
3762 diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
3763 index c8be8ef..db7db43 100644
3764 --- a/net/mac80211/agg-tx.c
3765 +++ b/net/mac80211/agg-tx.c
3766 @@ -162,6 +162,12 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
3767 return -ENOENT;
3768 }
3769
3770 + /* if we're already stopping ignore any new requests to stop */
3771 + if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
3772 + spin_unlock_bh(&sta->lock);
3773 + return -EALREADY;
3774 + }
3775 +
3776 if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
3777 /* not even started yet! */
3778 ieee80211_assign_tid_tx(sta, tid, NULL);
3779 @@ -170,6 +176,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
3780 return 0;
3781 }
3782
3783 + set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
3784 +
3785 spin_unlock_bh(&sta->lock);
3786
3787 #ifdef CONFIG_MAC80211_HT_DEBUG
3788 @@ -177,8 +185,6 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
3789 sta->sta.addr, tid);
3790 #endif /* CONFIG_MAC80211_HT_DEBUG */
3791
3792 - set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
3793 -
3794 del_timer_sync(&tid_tx->addba_resp_timer);
3795
3796 /*
3797 @@ -188,6 +194,20 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
3798 */
3799 clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
3800
3801 + /*
3802 + * There might be a few packets being processed right now (on
3803 + * another CPU) that have already gotten past the aggregation
3804 + * check when it was still OPERATIONAL and consequently have
3805 + * IEEE80211_TX_CTL_AMPDU set. In that case, this code might
3806 + * call into the driver at the same time or even before the
3807 + * TX paths calls into it, which could confuse the driver.
3808 + *
3809 + * Wait for all currently running TX paths to finish before
3810 + * telling the driver. New packets will not go through since
3811 + * the aggregation session is no longer OPERATIONAL.
3812 + */
3813 + synchronize_net();
3814 +
3815 tid_tx->stop_initiator = initiator;
3816 tid_tx->tx_stop = tx;
3817
3818 @@ -772,12 +792,27 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
3819 goto out;
3820 }
3821
3822 - del_timer(&tid_tx->addba_resp_timer);
3823 + del_timer_sync(&tid_tx->addba_resp_timer);
3824
3825 #ifdef CONFIG_MAC80211_HT_DEBUG
3826 printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
3827 #endif
3828
3829 + /*
3830 + * addba_resp_timer may have fired before we got here, and
3831 + * caused WANT_STOP to be set. If the stop then was already
3832 + * processed further, STOPPING might be set.
3833 + */
3834 + if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
3835 + test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
3836 +#ifdef CONFIG_MAC80211_HT_DEBUG
3837 + printk(KERN_DEBUG
3838 + "got addBA resp for tid %d but we already gave up\n",
3839 + tid);
3840 +#endif
3841 + goto out;
3842 + }
3843 +
3844 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
3845 == WLAN_STATUS_SUCCESS) {
3846 /*
3847 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
3848 index acb4423..3d90dad 100644
3849 --- a/net/mac80211/main.c
3850 +++ b/net/mac80211/main.c
3851 @@ -742,6 +742,12 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
3852 if (!local->int_scan_req)
3853 return -ENOMEM;
3854
3855 + for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
3856 + if (!local->hw.wiphy->bands[band])
3857 + continue;
3858 + local->int_scan_req->rates[band] = (u32) -1;
3859 + }
3860 +
3861 /* if low-level driver supports AP, we also support VLAN */
3862 if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
3863 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
3864 diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
3865 index 6649463..d617161 100644
3866 --- a/net/sched/sch_red.c
3867 +++ b/net/sched/sch_red.c
3868 @@ -209,8 +209,8 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt)
3869 ctl->Plog, ctl->Scell_log,
3870 nla_data(tb[TCA_RED_STAB]));
3871
3872 - if (skb_queue_empty(&sch->q))
3873 - red_end_of_idle_period(&q->parms);
3874 + if (!q->qdisc->q.qlen)
3875 + red_start_of_idle_period(&q->parms);
3876
3877 sch_tree_unlock(sch);
3878 return 0;
3879 diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
3880 index a3b7120..4f4c52c 100644
3881 --- a/net/sched/sch_teql.c
3882 +++ b/net/sched/sch_teql.c
3883 @@ -225,11 +225,11 @@ static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt)
3884
3885
3886 static int
3887 -__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
3888 +__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
3889 + struct net_device *dev, struct netdev_queue *txq,
3890 + struct neighbour *mn)
3891 {
3892 - struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
3893 - struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
3894 - struct neighbour *mn = dst_get_neighbour(skb_dst(skb));
3895 + struct teql_sched_data *q = qdisc_priv(txq->qdisc);
3896 struct neighbour *n = q->ncache;
3897
3898 if (mn->tbl == NULL)
3899 @@ -262,17 +262,26 @@ __teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *
3900 }
3901
3902 static inline int teql_resolve(struct sk_buff *skb,
3903 - struct sk_buff *skb_res, struct net_device *dev)
3904 + struct sk_buff *skb_res,
3905 + struct net_device *dev,
3906 + struct netdev_queue *txq)
3907 {
3908 - struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
3909 + struct dst_entry *dst = skb_dst(skb);
3910 + struct neighbour *mn;
3911 + int res;
3912 +
3913 if (txq->qdisc == &noop_qdisc)
3914 return -ENODEV;
3915
3916 - if (dev->header_ops == NULL ||
3917 - skb_dst(skb) == NULL ||
3918 - dst_get_neighbour(skb_dst(skb)) == NULL)
3919 + if (!dev->header_ops || !dst)
3920 return 0;
3921 - return __teql_resolve(skb, skb_res, dev);
3922 +
3923 + rcu_read_lock();
3924 + mn = dst_get_neighbour(dst);
3925 + res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0;
3926 + rcu_read_unlock();
3927 +
3928 + return res;
3929 }
3930
3931 static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
3932 @@ -307,7 +316,7 @@ restart:
3933 continue;
3934 }
3935
3936 - switch (teql_resolve(skb, skb_res, slave)) {
3937 + switch (teql_resolve(skb, skb_res, slave, slave_txq)) {
3938 case 0:
3939 if (__netif_tx_trylock(slave_txq)) {
3940 unsigned int length = qdisc_pkt_len(skb);
3941 diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
3942 index d7f97ef..b60b750 100644
3943 --- a/net/sunrpc/xprtsock.c
3944 +++ b/net/sunrpc/xprtsock.c
3945 @@ -496,7 +496,7 @@ static int xs_nospace(struct rpc_task *task)
3946 struct rpc_rqst *req = task->tk_rqstp;
3947 struct rpc_xprt *xprt = req->rq_xprt;
3948 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
3949 - int ret = 0;
3950 + int ret = -EAGAIN;
3951
3952 dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
3953 task->tk_pid, req->rq_slen - req->rq_bytes_sent,
3954 @@ -508,7 +508,6 @@ static int xs_nospace(struct rpc_task *task)
3955 /* Don't race with disconnect */
3956 if (xprt_connected(xprt)) {
3957 if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
3958 - ret = -EAGAIN;
3959 /*
3960 * Notify TCP that we're limited by the application
3961 * window size
3962 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
3963 index 1308050..a5c1320 100644
3964 --- a/net/wireless/nl80211.c
3965 +++ b/net/wireless/nl80211.c
3966 @@ -83,8 +83,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
3967 [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
3968 [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 },
3969
3970 - [NL80211_ATTR_MAC] = { .type = NLA_BINARY, .len = ETH_ALEN },
3971 - [NL80211_ATTR_PREV_BSSID] = { .type = NLA_BINARY, .len = ETH_ALEN },
3972 + [NL80211_ATTR_MAC] = { .len = ETH_ALEN },
3973 + [NL80211_ATTR_PREV_BSSID] = { .len = ETH_ALEN },
3974
3975 [NL80211_ATTR_KEY] = { .type = NLA_NESTED, },
3976 [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY,
3977 diff --git a/net/wireless/reg.c b/net/wireless/reg.c
3978 index 1658eff..1bc36d2 100644
3979 --- a/net/wireless/reg.c
3980 +++ b/net/wireless/reg.c
3981 @@ -57,8 +57,17 @@
3982 #define REG_DBG_PRINT(args...)
3983 #endif
3984
3985 +static struct regulatory_request core_request_world = {
3986 + .initiator = NL80211_REGDOM_SET_BY_CORE,
3987 + .alpha2[0] = '0',
3988 + .alpha2[1] = '0',
3989 + .intersect = false,
3990 + .processed = true,
3991 + .country_ie_env = ENVIRON_ANY,
3992 +};
3993 +
3994 /* Receipt of information from last regulatory request */
3995 -static struct regulatory_request *last_request;
3996 +static struct regulatory_request *last_request = &core_request_world;
3997
3998 /* To trigger userspace events */
3999 static struct platform_device *reg_pdev;
4000 @@ -150,7 +159,7 @@ static char user_alpha2[2];
4001 module_param(ieee80211_regdom, charp, 0444);
4002 MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
4003
4004 -static void reset_regdomains(void)
4005 +static void reset_regdomains(bool full_reset)
4006 {
4007 /* avoid freeing static information or freeing something twice */
4008 if (cfg80211_regdomain == cfg80211_world_regdom)
4009 @@ -165,6 +174,13 @@ static void reset_regdomains(void)
4010
4011 cfg80211_world_regdom = &world_regdom;
4012 cfg80211_regdomain = NULL;
4013 +
4014 + if (!full_reset)
4015 + return;
4016 +
4017 + if (last_request != &core_request_world)
4018 + kfree(last_request);
4019 + last_request = &core_request_world;
4020 }
4021
4022 /*
4023 @@ -175,7 +191,7 @@ static void update_world_regdomain(const struct ieee80211_regdomain *rd)
4024 {
4025 BUG_ON(!last_request);
4026
4027 - reset_regdomains();
4028 + reset_regdomains(false);
4029
4030 cfg80211_world_regdom = rd;
4031 cfg80211_regdomain = rd;
4032 @@ -1396,7 +1412,8 @@ static int __regulatory_hint(struct wiphy *wiphy,
4033 }
4034
4035 new_request:
4036 - kfree(last_request);
4037 + if (last_request != &core_request_world)
4038 + kfree(last_request);
4039
4040 last_request = pending_request;
4041 last_request->intersect = intersect;
4042 @@ -1566,9 +1583,6 @@ static int regulatory_hint_core(const char *alpha2)
4043 {
4044 struct regulatory_request *request;
4045
4046 - kfree(last_request);
4047 - last_request = NULL;
4048 -
4049 request = kzalloc(sizeof(struct regulatory_request),
4050 GFP_KERNEL);
4051 if (!request)
4052 @@ -1766,7 +1780,7 @@ static void restore_regulatory_settings(bool reset_user)
4053 mutex_lock(&cfg80211_mutex);
4054 mutex_lock(&reg_mutex);
4055
4056 - reset_regdomains();
4057 + reset_regdomains(true);
4058 restore_alpha2(alpha2, reset_user);
4059
4060 /*
4061 @@ -2026,12 +2040,18 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
4062 }
4063
4064 request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
4065 + if (!request_wiphy &&
4066 + (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER ||
4067 + last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)) {
4068 + schedule_delayed_work(&reg_timeout, 0);
4069 + return -ENODEV;
4070 + }
4071
4072 if (!last_request->intersect) {
4073 int r;
4074
4075 if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) {
4076 - reset_regdomains();
4077 + reset_regdomains(false);
4078 cfg80211_regdomain = rd;
4079 return 0;
4080 }
4081 @@ -2052,7 +2072,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
4082 if (r)
4083 return r;
4084
4085 - reset_regdomains();
4086 + reset_regdomains(false);
4087 cfg80211_regdomain = rd;
4088 return 0;
4089 }
4090 @@ -2077,7 +2097,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
4091
4092 rd = NULL;
4093
4094 - reset_regdomains();
4095 + reset_regdomains(false);
4096 cfg80211_regdomain = intersected_rd;
4097
4098 return 0;
4099 @@ -2097,7 +2117,7 @@ static int __set_regdom(const struct ieee80211_regdomain *rd)
4100 kfree(rd);
4101 rd = NULL;
4102
4103 - reset_regdomains();
4104 + reset_regdomains(false);
4105 cfg80211_regdomain = intersected_rd;
4106
4107 return 0;
4108 @@ -2250,11 +2270,8 @@ void /* __init_or_exit */ regulatory_exit(void)
4109 mutex_lock(&cfg80211_mutex);
4110 mutex_lock(&reg_mutex);
4111
4112 - reset_regdomains();
4113 -
4114 - kfree(last_request);
4115 + reset_regdomains(true);
4116
4117 - last_request = NULL;
4118 dev_set_uevent_suppress(&reg_pdev->dev, true);
4119
4120 platform_device_unregister(reg_pdev);
4121 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4122 index be79c9f..c687e14 100644
4123 --- a/sound/pci/hda/patch_realtek.c
4124 +++ b/sound/pci/hda/patch_realtek.c
4125 @@ -1023,8 +1023,20 @@ static bool alc_rebuild_imux_for_auto_mic(struct hda_codec *codec)
4126 spec->imux_pins[2] = spec->dock_mic_pin;
4127 for (i = 0; i < 3; i++) {
4128 strcpy(imux->items[i].label, texts[i]);
4129 - if (spec->imux_pins[i])
4130 + if (spec->imux_pins[i]) {
4131 + hda_nid_t pin = spec->imux_pins[i];
4132 + int c;
4133 + for (c = 0; c < spec->num_adc_nids; c++) {
4134 + hda_nid_t cap = spec->capsrc_nids ?
4135 + spec->capsrc_nids[c] : spec->adc_nids[c];
4136 + int idx = get_connection_index(codec, cap, pin);
4137 + if (idx >= 0) {
4138 + imux->items[i].index = idx;
4139 + break;
4140 + }
4141 + }
4142 imux->num_items = i + 1;
4143 + }
4144 }
4145 spec->num_mux_defs = 1;
4146 spec->input_mux = imux;
4147 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
4148 index 7ed9011..4538caa 100644
4149 --- a/sound/pci/hda/patch_sigmatel.c
4150 +++ b/sound/pci/hda/patch_sigmatel.c
4151 @@ -5035,20 +5035,6 @@ static int stac92xx_pre_resume(struct hda_codec *codec)
4152 return 0;
4153 }
4154
4155 -static int stac92xx_post_suspend(struct hda_codec *codec)
4156 -{
4157 - struct sigmatel_spec *spec = codec->spec;
4158 - if (spec->gpio_led > 8) {
4159 - /* with vref-out pin used for mute led control
4160 - * codec AFG is prevented from D3 state, but on
4161 - * system suspend it can (and should) be used
4162 - */
4163 - snd_hda_codec_read(codec, codec->afg, 0,
4164 - AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
4165 - }
4166 - return 0;
4167 -}
4168 -
4169 static void stac92xx_set_power_state(struct hda_codec *codec, hda_nid_t fg,
4170 unsigned int power_state)
4171 {
4172 @@ -5655,8 +5641,6 @@ again:
4173 } else {
4174 codec->patch_ops.set_power_state =
4175 stac92xx_set_power_state;
4176 - codec->patch_ops.post_suspend =
4177 - stac92xx_post_suspend;
4178 }
4179 codec->patch_ops.pre_resume = stac92xx_pre_resume;
4180 codec->patch_ops.check_power_status =
4181 @@ -5978,8 +5962,6 @@ again:
4182 } else {
4183 codec->patch_ops.set_power_state =
4184 stac92xx_set_power_state;
4185 - codec->patch_ops.post_suspend =
4186 - stac92xx_post_suspend;
4187 }
4188 codec->patch_ops.pre_resume = stac92xx_pre_resume;
4189 codec->patch_ops.check_power_status =
4190 diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
4191 index 4ebfbd8..d636d93 100644
4192 --- a/sound/pci/hda/patch_via.c
4193 +++ b/sound/pci/hda/patch_via.c
4194 @@ -207,6 +207,7 @@ struct via_spec {
4195 /* work to check hp jack state */
4196 struct hda_codec *codec;
4197 struct delayed_work vt1708_hp_work;
4198 + int hp_work_active;
4199 int vt1708_jack_detect;
4200 int vt1708_hp_present;
4201
4202 @@ -304,27 +305,35 @@ enum {
4203 static void analog_low_current_mode(struct hda_codec *codec);
4204 static bool is_aa_path_mute(struct hda_codec *codec);
4205
4206 -static void vt1708_start_hp_work(struct via_spec *spec)
4207 +#define hp_detect_with_aa(codec) \
4208 + (snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") == 1 && \
4209 + !is_aa_path_mute(codec))
4210 +
4211 +static void vt1708_stop_hp_work(struct via_spec *spec)
4212 {
4213 if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0)
4214 return;
4215 - snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81,
4216 - !spec->vt1708_jack_detect);
4217 - if (!delayed_work_pending(&spec->vt1708_hp_work))
4218 - schedule_delayed_work(&spec->vt1708_hp_work,
4219 - msecs_to_jiffies(100));
4220 + if (spec->hp_work_active) {
4221 + snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, 1);
4222 + cancel_delayed_work_sync(&spec->vt1708_hp_work);
4223 + spec->hp_work_active = 0;
4224 + }
4225 }
4226
4227 -static void vt1708_stop_hp_work(struct via_spec *spec)
4228 +static void vt1708_update_hp_work(struct via_spec *spec)
4229 {
4230 if (spec->codec_type != VT1708 || spec->autocfg.hp_pins[0] == 0)
4231 return;
4232 - if (snd_hda_get_bool_hint(spec->codec, "analog_loopback_hp_detect") == 1
4233 - && !is_aa_path_mute(spec->codec))
4234 - return;
4235 - snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81,
4236 - !spec->vt1708_jack_detect);
4237 - cancel_delayed_work_sync(&spec->vt1708_hp_work);
4238 + if (spec->vt1708_jack_detect &&
4239 + (spec->active_streams || hp_detect_with_aa(spec->codec))) {
4240 + if (!spec->hp_work_active) {
4241 + snd_hda_codec_write(spec->codec, 0x1, 0, 0xf81, 0);
4242 + schedule_delayed_work(&spec->vt1708_hp_work,
4243 + msecs_to_jiffies(100));
4244 + spec->hp_work_active = 1;
4245 + }
4246 + } else if (!hp_detect_with_aa(spec->codec))
4247 + vt1708_stop_hp_work(spec);
4248 }
4249
4250 static void set_widgets_power_state(struct hda_codec *codec)
4251 @@ -342,12 +351,7 @@ static int analog_input_switch_put(struct snd_kcontrol *kcontrol,
4252
4253 set_widgets_power_state(codec);
4254 analog_low_current_mode(snd_kcontrol_chip(kcontrol));
4255 - if (snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") == 1) {
4256 - if (is_aa_path_mute(codec))
4257 - vt1708_start_hp_work(codec->spec);
4258 - else
4259 - vt1708_stop_hp_work(codec->spec);
4260 - }
4261 + vt1708_update_hp_work(codec->spec);
4262 return change;
4263 }
4264
4265 @@ -1153,7 +1157,7 @@ static int via_playback_multi_pcm_prepare(struct hda_pcm_stream *hinfo,
4266 spec->cur_dac_stream_tag = stream_tag;
4267 spec->cur_dac_format = format;
4268 mutex_unlock(&spec->config_mutex);
4269 - vt1708_start_hp_work(spec);
4270 + vt1708_update_hp_work(spec);
4271 return 0;
4272 }
4273
4274 @@ -1173,7 +1177,7 @@ static int via_playback_hp_pcm_prepare(struct hda_pcm_stream *hinfo,
4275 spec->cur_hp_stream_tag = stream_tag;
4276 spec->cur_hp_format = format;
4277 mutex_unlock(&spec->config_mutex);
4278 - vt1708_start_hp_work(spec);
4279 + vt1708_update_hp_work(spec);
4280 return 0;
4281 }
4282
4283 @@ -1187,7 +1191,7 @@ static int via_playback_multi_pcm_cleanup(struct hda_pcm_stream *hinfo,
4284 snd_hda_multi_out_analog_cleanup(codec, &spec->multiout);
4285 spec->active_streams &= ~STREAM_MULTI_OUT;
4286 mutex_unlock(&spec->config_mutex);
4287 - vt1708_stop_hp_work(spec);
4288 + vt1708_update_hp_work(spec);
4289 return 0;
4290 }
4291
4292 @@ -1202,7 +1206,7 @@ static int via_playback_hp_pcm_cleanup(struct hda_pcm_stream *hinfo,
4293 snd_hda_codec_setup_stream(codec, spec->hp_dac_nid, 0, 0, 0);
4294 spec->active_streams &= ~STREAM_INDEP_HP;
4295 mutex_unlock(&spec->config_mutex);
4296 - vt1708_stop_hp_work(spec);
4297 + vt1708_update_hp_work(spec);
4298 return 0;
4299 }
4300
4301 @@ -1632,7 +1636,8 @@ static void via_hp_automute(struct hda_codec *codec)
4302 int nums;
4303 struct via_spec *spec = codec->spec;
4304
4305 - if (!spec->hp_independent_mode && spec->autocfg.hp_pins[0])
4306 + if (!spec->hp_independent_mode && spec->autocfg.hp_pins[0] &&
4307 + (spec->codec_type != VT1708 || spec->vt1708_jack_detect))
4308 present = snd_hda_jack_detect(codec, spec->autocfg.hp_pins[0]);
4309
4310 if (spec->smart51_enabled)
4311 @@ -2599,8 +2604,6 @@ static int vt1708_jack_detect_get(struct snd_kcontrol *kcontrol,
4312
4313 if (spec->codec_type != VT1708)
4314 return 0;
4315 - spec->vt1708_jack_detect =
4316 - !((snd_hda_codec_read(codec, 0x1, 0, 0xf84, 0) >> 8) & 0x1);
4317 ucontrol->value.integer.value[0] = spec->vt1708_jack_detect;
4318 return 0;
4319 }
4320 @@ -2610,18 +2613,22 @@ static int vt1708_jack_detect_put(struct snd_kcontrol *kcontrol,
4321 {
4322 struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
4323 struct via_spec *spec = codec->spec;
4324 - int change;
4325 + int val;
4326
4327 if (spec->codec_type != VT1708)
4328 return 0;
4329 - spec->vt1708_jack_detect = ucontrol->value.integer.value[0];
4330 - change = (0x1 & (snd_hda_codec_read(codec, 0x1, 0, 0xf84, 0) >> 8))
4331 - == !spec->vt1708_jack_detect;
4332 - if (spec->vt1708_jack_detect) {
4333 + val = !!ucontrol->value.integer.value[0];
4334 + if (spec->vt1708_jack_detect == val)
4335 + return 0;
4336 + spec->vt1708_jack_detect = val;
4337 + if (spec->vt1708_jack_detect &&
4338 + snd_hda_get_bool_hint(codec, "analog_loopback_hp_detect") != 1) {
4339 mute_aa_path(codec, 1);
4340 notify_aa_path_ctls(codec);
4341 }
4342 - return change;
4343 + via_hp_automute(codec);
4344 + vt1708_update_hp_work(spec);
4345 + return 1;
4346 }
4347
4348 static const struct snd_kcontrol_new vt1708_jack_detect_ctl = {
4349 @@ -2758,6 +2765,7 @@ static int via_init(struct hda_codec *codec)
4350 via_auto_init_unsol_event(codec);
4351
4352 via_hp_automute(codec);
4353 + vt1708_update_hp_work(spec);
4354
4355 return 0;
4356 }
4357 @@ -2774,7 +2782,9 @@ static void vt1708_update_hp_jack_state(struct work_struct *work)
4358 spec->vt1708_hp_present ^= 1;
4359 via_hp_automute(spec->codec);
4360 }
4361 - vt1708_start_hp_work(spec);
4362 + if (spec->vt1708_jack_detect)
4363 + schedule_delayed_work(&spec->vt1708_hp_work,
4364 + msecs_to_jiffies(100));
4365 }
4366
4367 static int get_mux_nids(struct hda_codec *codec)
4368 diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c
4369 index 5c8717e..aa73372 100644
4370 --- a/sound/pci/lx6464es/lx_core.c
4371 +++ b/sound/pci/lx6464es/lx_core.c
4372 @@ -80,8 +80,12 @@ unsigned long lx_dsp_reg_read(struct lx6464es *chip, int port)
4373
4374 void lx_dsp_reg_readbuf(struct lx6464es *chip, int port, u32 *data, u32 len)
4375 {
4376 - void __iomem *address = lx_dsp_register(chip, port);
4377 - memcpy_fromio(data, address, len*sizeof(u32));
4378 + u32 __iomem *address = lx_dsp_register(chip, port);
4379 + int i;
4380 +
4381 + /* we cannot use memcpy_fromio */
4382 + for (i = 0; i != len; ++i)
4383 + data[i] = ioread32(address + i);
4384 }
4385
4386
4387 @@ -94,8 +98,12 @@ void lx_dsp_reg_write(struct lx6464es *chip, int port, unsigned data)
4388 void lx_dsp_reg_writebuf(struct lx6464es *chip, int port, const u32 *data,
4389 u32 len)
4390 {
4391 - void __iomem *address = lx_dsp_register(chip, port);
4392 - memcpy_toio(address, data, len*sizeof(u32));
4393 + u32 __iomem *address = lx_dsp_register(chip, port);
4394 + int i;
4395 +
4396 + /* we cannot use memcpy_to */
4397 + for (i = 0; i != len; ++i)
4398 + iowrite32(data[i], address + i);
4399 }
4400
4401
4402 diff --git a/sound/soc/codecs/ad1836.h b/sound/soc/codecs/ad1836.h
4403 index 444747f..dd7be0d 100644
4404 --- a/sound/soc/codecs/ad1836.h
4405 +++ b/sound/soc/codecs/ad1836.h
4406 @@ -34,7 +34,7 @@
4407
4408 #define AD1836_ADC_CTRL2 13
4409 #define AD1836_ADC_WORD_LEN_MASK 0x30
4410 -#define AD1836_ADC_WORD_OFFSET 5
4411 +#define AD1836_ADC_WORD_OFFSET 4
4412 #define AD1836_ADC_SERFMT_MASK (7 << 6)
4413 #define AD1836_ADC_SERFMT_PCK256 (0x4 << 6)
4414 #define AD1836_ADC_SERFMT_PCK128 (0x5 << 6)
4415 diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
4416 index fbd7eb9..d5630af 100644
4417 --- a/sound/soc/codecs/sta32x.c
4418 +++ b/sound/soc/codecs/sta32x.c
4419 @@ -76,6 +76,8 @@ struct sta32x_priv {
4420
4421 unsigned int mclk;
4422 unsigned int format;
4423 +
4424 + u32 coef_shadow[STA32X_COEF_COUNT];
4425 };
4426
4427 static const DECLARE_TLV_DB_SCALE(mvol_tlv, -12700, 50, 1);
4428 @@ -227,6 +229,7 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
4429 struct snd_ctl_elem_value *ucontrol)
4430 {
4431 struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
4432 + struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
4433 int numcoef = kcontrol->private_value >> 16;
4434 int index = kcontrol->private_value & 0xffff;
4435 unsigned int cfud;
4436 @@ -239,6 +242,11 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
4437 snd_soc_write(codec, STA32X_CFUD, cfud);
4438
4439 snd_soc_write(codec, STA32X_CFADDR2, index);
4440 + for (i = 0; i < numcoef && (index + i < STA32X_COEF_COUNT); i++)
4441 + sta32x->coef_shadow[index + i] =
4442 + (ucontrol->value.bytes.data[3 * i] << 16)
4443 + | (ucontrol->value.bytes.data[3 * i + 1] << 8)
4444 + | (ucontrol->value.bytes.data[3 * i + 2]);
4445 for (i = 0; i < 3 * numcoef; i++)
4446 snd_soc_write(codec, STA32X_B1CF1 + i,
4447 ucontrol->value.bytes.data[i]);
4448 @@ -252,6 +260,48 @@ static int sta32x_coefficient_put(struct snd_kcontrol *kcontrol,
4449 return 0;
4450 }
4451
4452 +int sta32x_sync_coef_shadow(struct snd_soc_codec *codec)
4453 +{
4454 + struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
4455 + unsigned int cfud;
4456 + int i;
4457 +
4458 + /* preserve reserved bits in STA32X_CFUD */
4459 + cfud = snd_soc_read(codec, STA32X_CFUD) & 0xf0;
4460 +
4461 + for (i = 0; i < STA32X_COEF_COUNT; i++) {
4462 + snd_soc_write(codec, STA32X_CFADDR2, i);
4463 + snd_soc_write(codec, STA32X_B1CF1,
4464 + (sta32x->coef_shadow[i] >> 16) & 0xff);
4465 + snd_soc_write(codec, STA32X_B1CF2,
4466 + (sta32x->coef_shadow[i] >> 8) & 0xff);
4467 + snd_soc_write(codec, STA32X_B1CF3,
4468 + (sta32x->coef_shadow[i]) & 0xff);
4469 + /* chip documentation does not say if the bits are
4470 + * self-clearing, so do it explicitly */
4471 + snd_soc_write(codec, STA32X_CFUD, cfud);
4472 + snd_soc_write(codec, STA32X_CFUD, cfud | 0x01);
4473 + }
4474 + return 0;
4475 +}
4476 +
4477 +int sta32x_cache_sync(struct snd_soc_codec *codec)
4478 +{
4479 + unsigned int mute;
4480 + int rc;
4481 +
4482 + if (!codec->cache_sync)
4483 + return 0;
4484 +
4485 + /* mute during register sync */
4486 + mute = snd_soc_read(codec, STA32X_MMUTE);
4487 + snd_soc_write(codec, STA32X_MMUTE, mute | STA32X_MMUTE_MMUTE);
4488 + sta32x_sync_coef_shadow(codec);
4489 + rc = snd_soc_cache_sync(codec);
4490 + snd_soc_write(codec, STA32X_MMUTE, mute);
4491 + return rc;
4492 +}
4493 +
4494 #define SINGLE_COEF(xname, index) \
4495 { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
4496 .info = sta32x_coefficient_info, \
4497 @@ -657,7 +707,7 @@ static int sta32x_set_bias_level(struct snd_soc_codec *codec,
4498 return ret;
4499 }
4500
4501 - snd_soc_cache_sync(codec);
4502 + sta32x_cache_sync(codec);
4503 }
4504
4505 /* Power up to mute */
4506 @@ -792,6 +842,17 @@ static int sta32x_probe(struct snd_soc_codec *codec)
4507 STA32X_CxCFG_OM_MASK,
4508 2 << STA32X_CxCFG_OM_SHIFT);
4509
4510 + /* initialize coefficient shadow RAM with reset values */
4511 + for (i = 4; i <= 49; i += 5)
4512 + sta32x->coef_shadow[i] = 0x400000;
4513 + for (i = 50; i <= 54; i++)
4514 + sta32x->coef_shadow[i] = 0x7fffff;
4515 + sta32x->coef_shadow[55] = 0x5a9df7;
4516 + sta32x->coef_shadow[56] = 0x7fffff;
4517 + sta32x->coef_shadow[59] = 0x7fffff;
4518 + sta32x->coef_shadow[60] = 0x400000;
4519 + sta32x->coef_shadow[61] = 0x400000;
4520 +
4521 sta32x_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
4522 /* Bias level configuration will have done an extra enable */
4523 regulator_bulk_disable(ARRAY_SIZE(sta32x->supplies), sta32x->supplies);
4524 diff --git a/sound/soc/codecs/sta32x.h b/sound/soc/codecs/sta32x.h
4525 index b97ee5a..d8e32a6 100644
4526 --- a/sound/soc/codecs/sta32x.h
4527 +++ b/sound/soc/codecs/sta32x.h
4528 @@ -19,6 +19,7 @@
4529 /* STA326 register addresses */
4530
4531 #define STA32X_REGISTER_COUNT 0x2d
4532 +#define STA32X_COEF_COUNT 62
4533
4534 #define STA32X_CONFA 0x00
4535 #define STA32X_CONFB 0x01
4536 diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
4537 index 76b4361..f5a0ec4 100644
4538 --- a/sound/soc/codecs/wm8731.c
4539 +++ b/sound/soc/codecs/wm8731.c
4540 @@ -463,6 +463,7 @@ static int wm8731_set_bias_level(struct snd_soc_codec *codec,
4541 snd_soc_write(codec, WM8731_PWR, 0xffff);
4542 regulator_bulk_disable(ARRAY_SIZE(wm8731->supplies),
4543 wm8731->supplies);
4544 + codec->cache_sync = 1;
4545 break;
4546 }
4547 codec->dapm.bias_level = level;
4548 diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c
4549 index aa091a0..66d18a3 100644
4550 --- a/sound/soc/codecs/wm8753.c
4551 +++ b/sound/soc/codecs/wm8753.c
4552 @@ -189,6 +189,9 @@ static int wm8753_set_dai(struct snd_kcontrol *kcontrol,
4553 struct wm8753_priv *wm8753 = snd_soc_codec_get_drvdata(codec);
4554 u16 ioctl;
4555
4556 + if (wm8753->dai_func == ucontrol->value.integer.value[0])
4557 + return 0;
4558 +
4559 if (codec->active)
4560 return -EBUSY;
4561
4562 diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
4563 index d48afea..6729829 100644
4564 --- a/sound/soc/fsl/fsl_ssi.c
4565 +++ b/sound/soc/fsl/fsl_ssi.c
4566 @@ -703,6 +703,7 @@ static int __devinit fsl_ssi_probe(struct platform_device *pdev)
4567
4568 /* Initialize the the device_attribute structure */
4569 dev_attr = &ssi_private->dev_attr;
4570 + sysfs_attr_init(&dev_attr->attr);
4571 dev_attr->attr.name = "statistics";
4572 dev_attr->attr.mode = S_IRUGO;
4573 dev_attr->show = fsl_sysfs_ssi_show;
4574 diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
4575 index 0a7ed5b..6c164dc 100644
4576 --- a/tools/perf/util/trace-event-parse.c
4577 +++ b/tools/perf/util/trace-event-parse.c
4578 @@ -1537,6 +1537,8 @@ process_flags(struct event *event, struct print_arg *arg, char **tok)
4579 field = malloc_or_die(sizeof(*field));
4580
4581 type = process_arg(event, field, &token);
4582 + while (type == EVENT_OP)
4583 + type = process_op(event, field, &token);
4584 if (test_type_token(type, token, EVENT_DELIM, ","))
4585 goto out_free;
4586