Magellan Linux

Annotation of /trunk/kernel-lts/patches-3.4/0101-3.4.2-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1907 - (hide annotations) (download)
Wed Oct 10 11:20:27 2012 UTC (11 years, 7 months ago) by niro
File size: 92686 byte(s)
-3.4.13-lts-r1
1 niro 1907 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
2     index 36586dba..7a8660a 100644
3     --- a/arch/arm/Kconfig
4     +++ b/arch/arm/Kconfig
5     @@ -556,7 +556,7 @@ config ARCH_IXP4XX
6     select ARCH_HAS_DMA_SET_COHERENT_MASK
7     select CLKSRC_MMIO
8     select CPU_XSCALE
9     - select GENERIC_GPIO
10     + select ARCH_REQUIRE_GPIOLIB
11     select GENERIC_CLOCKEVENTS
12     select MIGHT_HAVE_PCI
13     select NEED_MACH_IO_H
14     diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
15     index ebbd7fc..a9f8094 100644
16     --- a/arch/arm/mach-ixp4xx/common.c
17     +++ b/arch/arm/mach-ixp4xx/common.c
18     @@ -28,6 +28,7 @@
19     #include <linux/clockchips.h>
20     #include <linux/io.h>
21     #include <linux/export.h>
22     +#include <linux/gpio.h>
23    
24     #include <mach/udc.h>
25     #include <mach/hardware.h>
26     @@ -107,7 +108,7 @@ static signed char irq2gpio[32] = {
27     7, 8, 9, 10, 11, 12, -1, -1,
28     };
29    
30     -int gpio_to_irq(int gpio)
31     +static int ixp4xx_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
32     {
33     int irq;
34    
35     @@ -117,7 +118,6 @@ int gpio_to_irq(int gpio)
36     }
37     return -EINVAL;
38     }
39     -EXPORT_SYMBOL(gpio_to_irq);
40    
41     int irq_to_gpio(unsigned int irq)
42     {
43     @@ -383,12 +383,56 @@ static struct platform_device *ixp46x_devices[] __initdata = {
44     unsigned long ixp4xx_exp_bus_size;
45     EXPORT_SYMBOL(ixp4xx_exp_bus_size);
46    
47     +static int ixp4xx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
48     +{
49     + gpio_line_config(gpio, IXP4XX_GPIO_IN);
50     +
51     + return 0;
52     +}
53     +
54     +static int ixp4xx_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
55     + int level)
56     +{
57     + gpio_line_set(gpio, level);
58     + gpio_line_config(gpio, IXP4XX_GPIO_OUT);
59     +
60     + return 0;
61     +}
62     +
63     +static int ixp4xx_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
64     +{
65     + int value;
66     +
67     + gpio_line_get(gpio, &value);
68     +
69     + return value;
70     +}
71     +
72     +static void ixp4xx_gpio_set_value(struct gpio_chip *chip, unsigned gpio,
73     + int value)
74     +{
75     + gpio_line_set(gpio, value);
76     +}
77     +
78     +static struct gpio_chip ixp4xx_gpio_chip = {
79     + .label = "IXP4XX_GPIO_CHIP",
80     + .direction_input = ixp4xx_gpio_direction_input,
81     + .direction_output = ixp4xx_gpio_direction_output,
82     + .get = ixp4xx_gpio_get_value,
83     + .set = ixp4xx_gpio_set_value,
84     + .to_irq = ixp4xx_gpio_to_irq,
85     + .base = 0,
86     + .ngpio = 16,
87     +};
88     +
89     void __init ixp4xx_sys_init(void)
90     {
91     ixp4xx_exp_bus_size = SZ_16M;
92    
93     platform_add_devices(ixp4xx_devices, ARRAY_SIZE(ixp4xx_devices));
94    
95     + gpiochip_add(&ixp4xx_gpio_chip);
96     +
97     if (cpu_is_ixp46x()) {
98     int region;
99    
100     diff --git a/arch/arm/mach-ixp4xx/include/mach/gpio.h b/arch/arm/mach-ixp4xx/include/mach/gpio.h
101     index 83d6b4e..ef37f26 100644
102     --- a/arch/arm/mach-ixp4xx/include/mach/gpio.h
103     +++ b/arch/arm/mach-ixp4xx/include/mach/gpio.h
104     @@ -1,79 +1,2 @@
105     -/*
106     - * arch/arm/mach-ixp4xx/include/mach/gpio.h
107     - *
108     - * IXP4XX GPIO wrappers for arch-neutral GPIO calls
109     - *
110     - * Written by Milan Svoboda <msvoboda@ra.rockwell.com>
111     - * Based on PXA implementation by Philipp Zabel <philipp.zabel@gmail.com>
112     - *
113     - * This program is free software; you can redistribute it and/or modify
114     - * it under the terms of the GNU General Public License as published by
115     - * the Free Software Foundation; either version 2 of the License, or
116     - * (at your option) any later version.
117     - *
118     - * This program is distributed in the hope that it will be useful,
119     - * but WITHOUT ANY WARRANTY; without even the implied warranty of
120     - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
121     - * GNU General Public License for more details.
122     - *
123     - * You should have received a copy of the GNU General Public License
124     - * along with this program; if not, write to the Free Software
125     - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
126     - *
127     - */
128     -
129     -#ifndef __ASM_ARCH_IXP4XX_GPIO_H
130     -#define __ASM_ARCH_IXP4XX_GPIO_H
131     -
132     -#include <linux/kernel.h>
133     -#include <mach/hardware.h>
134     -
135     -#define __ARM_GPIOLIB_COMPLEX
136     -
137     -static inline int gpio_request(unsigned gpio, const char *label)
138     -{
139     - return 0;
140     -}
141     -
142     -static inline void gpio_free(unsigned gpio)
143     -{
144     - might_sleep();
145     -
146     - return;
147     -}
148     -
149     -static inline int gpio_direction_input(unsigned gpio)
150     -{
151     - gpio_line_config(gpio, IXP4XX_GPIO_IN);
152     - return 0;
153     -}
154     -
155     -static inline int gpio_direction_output(unsigned gpio, int level)
156     -{
157     - gpio_line_set(gpio, level);
158     - gpio_line_config(gpio, IXP4XX_GPIO_OUT);
159     - return 0;
160     -}
161     -
162     -static inline int gpio_get_value(unsigned gpio)
163     -{
164     - int value;
165     -
166     - gpio_line_get(gpio, &value);
167     -
168     - return value;
169     -}
170     -
171     -static inline void gpio_set_value(unsigned gpio, int value)
172     -{
173     - gpio_line_set(gpio, value);
174     -}
175     -
176     -#include <asm-generic/gpio.h> /* cansleep wrappers */
177     -
178     -extern int gpio_to_irq(int gpio);
179     -#define gpio_to_irq gpio_to_irq
180     -extern int irq_to_gpio(unsigned int irq);
181     -
182     -#endif
183     +/* empty */
184    
185     diff --git a/arch/ia64/include/asm/Kbuild b/arch/ia64/include/asm/Kbuild
186     index 241d1c5..d4eb938 100644
187     --- a/arch/ia64/include/asm/Kbuild
188     +++ b/arch/ia64/include/asm/Kbuild
189     @@ -1,6 +1,7 @@
190     include include/asm-generic/Kbuild.asm
191    
192     header-y += break.h
193     +header-y += cmpxchg.h
194     header-y += fpu.h
195     header-y += gcc_intrin.h
196     header-y += ia64regs.h
197     diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
198     index ac22dc7..333b85e 100644
199     --- a/arch/microblaze/Kconfig
200     +++ b/arch/microblaze/Kconfig
201     @@ -57,7 +57,7 @@ config GENERIC_CLOCKEVENTS
202     def_bool y
203    
204     config GENERIC_GPIO
205     - def_bool y
206     + bool
207    
208     config GENERIC_CSUM
209     def_bool y
210     diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
211     index 3d5de96..1d7dd96 100644
212     --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
213     +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
214     @@ -2,6 +2,7 @@
215     #define BCM63XX_GPIO_H
216    
217     #include <linux/init.h>
218     +#include <bcm63xx_cpu.h>
219    
220     int __init bcm63xx_gpio_init(void);
221    
222     diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
223     index 5350342..07ef351 100644
224     --- a/arch/parisc/kernel/entry.S
225     +++ b/arch/parisc/kernel/entry.S
226     @@ -552,7 +552,7 @@
227     * entry (identifying the physical page) and %r23 up with
228     * the from tlb entry (or nothing if only a to entry---for
229     * clear_user_page_asm) */
230     - .macro do_alias spc,tmp,tmp1,va,pte,prot,fault
231     + .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype
232     cmpib,COND(<>),n 0,\spc,\fault
233     ldil L%(TMPALIAS_MAP_START),\tmp
234     #if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
235     @@ -581,11 +581,15 @@
236     */
237     cmpiclr,= 0x01,\tmp,%r0
238     ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
239     -#ifdef CONFIG_64BIT
240     +.ifc \patype,20
241     depd,z \prot,8,7,\prot
242     -#else
243     +.else
244     +.ifc \patype,11
245     depw,z \prot,8,7,\prot
246     -#endif
247     +.else
248     + .error "undefined PA type to do_alias"
249     +.endif
250     +.endif
251     /*
252     * OK, it is in the temp alias region, check whether "from" or "to".
253     * Check "subtle" note in pacache.S re: r23/r26.
254     @@ -1189,7 +1193,7 @@ dtlb_miss_20w:
255     nop
256    
257     dtlb_check_alias_20w:
258     - do_alias spc,t0,t1,va,pte,prot,dtlb_fault
259     + do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
260    
261     idtlbt pte,prot
262    
263     @@ -1213,7 +1217,7 @@ nadtlb_miss_20w:
264     nop
265    
266     nadtlb_check_alias_20w:
267     - do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
268     + do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
269    
270     idtlbt pte,prot
271    
272     @@ -1245,7 +1249,7 @@ dtlb_miss_11:
273     nop
274    
275     dtlb_check_alias_11:
276     - do_alias spc,t0,t1,va,pte,prot,dtlb_fault
277     + do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11
278    
279     idtlba pte,(va)
280     idtlbp prot,(va)
281     @@ -1277,7 +1281,7 @@ nadtlb_miss_11:
282     nop
283    
284     nadtlb_check_alias_11:
285     - do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
286     + do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11
287    
288     idtlba pte,(va)
289     idtlbp prot,(va)
290     @@ -1304,7 +1308,7 @@ dtlb_miss_20:
291     nop
292    
293     dtlb_check_alias_20:
294     - do_alias spc,t0,t1,va,pte,prot,dtlb_fault
295     + do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
296    
297     idtlbt pte,prot
298    
299     @@ -1330,7 +1334,7 @@ nadtlb_miss_20:
300     nop
301    
302     nadtlb_check_alias_20:
303     - do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate
304     + do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
305    
306     idtlbt pte,prot
307    
308     @@ -1457,7 +1461,7 @@ naitlb_miss_20w:
309     nop
310    
311     naitlb_check_alias_20w:
312     - do_alias spc,t0,t1,va,pte,prot,naitlb_fault
313     + do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
314    
315     iitlbt pte,prot
316    
317     @@ -1511,7 +1515,7 @@ naitlb_miss_11:
318     nop
319    
320     naitlb_check_alias_11:
321     - do_alias spc,t0,t1,va,pte,prot,itlb_fault
322     + do_alias spc,t0,t1,va,pte,prot,itlb_fault,11
323    
324     iitlba pte,(%sr0, va)
325     iitlbp prot,(%sr0, va)
326     @@ -1557,7 +1561,7 @@ naitlb_miss_20:
327     nop
328    
329     naitlb_check_alias_20:
330     - do_alias spc,t0,t1,va,pte,prot,naitlb_fault
331     + do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
332    
333     iitlbt pte,prot
334    
335     diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
336     index fa6f2b8..64a9998 100644
337     --- a/arch/parisc/kernel/vmlinux.lds.S
338     +++ b/arch/parisc/kernel/vmlinux.lds.S
339     @@ -50,8 +50,10 @@ SECTIONS
340     . = KERNEL_BINARY_TEXT_START;
341    
342     _text = .; /* Text and read-only data */
343     - .text ALIGN(16) : {
344     + .head ALIGN(16) : {
345     HEAD_TEXT
346     + } = 0
347     + .text ALIGN(16) : {
348     TEXT_TEXT
349     SCHED_TEXT
350     LOCK_TEXT
351     @@ -65,7 +67,7 @@ SECTIONS
352     *(.fixup)
353     *(.lock.text) /* out-of-line lock text */
354     *(.gnu.warning)
355     - } = 0
356     + }
357     /* End of text section */
358     _etext = .;
359    
360     diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
361     index 47acaf3..32856fa 100644
362     --- a/arch/x86/kernel/nmi.c
363     +++ b/arch/x86/kernel/nmi.c
364     @@ -491,14 +491,16 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs)
365     */
366     if (unlikely(is_debug_stack(regs->sp))) {
367     debug_stack_set_zero();
368     - __get_cpu_var(update_debug_stack) = 1;
369     + this_cpu_write(update_debug_stack, 1);
370     }
371     }
372    
373     static inline void nmi_nesting_postprocess(void)
374     {
375     - if (unlikely(__get_cpu_var(update_debug_stack)))
376     + if (unlikely(this_cpu_read(update_debug_stack))) {
377     debug_stack_reset();
378     + this_cpu_write(update_debug_stack, 0);
379     + }
380     }
381     #endif
382    
383     diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
384     index 685845c..cf11783 100644
385     --- a/arch/x86/kernel/ptrace.c
386     +++ b/arch/x86/kernel/ptrace.c
387     @@ -1211,12 +1211,6 @@ static long x32_arch_ptrace(struct task_struct *child,
388     0, sizeof(struct user_i387_struct),
389     datap);
390    
391     - /* normal 64bit interface to access TLS data.
392     - Works just like arch_prctl, except that the arguments
393     - are reversed. */
394     - case PTRACE_ARCH_PRCTL:
395     - return do_arch_prctl(child, data, addr);
396     -
397     default:
398     return compat_ptrace_request(child, request, addr, data);
399     }
400     diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
401     index 95dccce..6c7f1e8 100644
402     --- a/arch/x86/xen/enlighten.c
403     +++ b/arch/x86/xen/enlighten.c
404     @@ -1106,7 +1106,10 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
405     .wbinvd = native_wbinvd,
406    
407     .read_msr = native_read_msr_safe,
408     + .rdmsr_regs = native_rdmsr_safe_regs,
409     .write_msr = xen_write_msr_safe,
410     + .wrmsr_regs = native_wrmsr_safe_regs,
411     +
412     .read_tsc = native_read_tsc,
413     .read_pmc = native_read_pmc,
414    
415     diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
416     index 86933ca..7dd3f9f 100644
417     --- a/drivers/acpi/battery.c
418     +++ b/drivers/acpi/battery.c
419     @@ -643,11 +643,19 @@ static int acpi_battery_update(struct acpi_battery *battery)
420    
421     static void acpi_battery_refresh(struct acpi_battery *battery)
422     {
423     + int power_unit;
424     +
425     if (!battery->bat.dev)
426     return;
427    
428     + power_unit = battery->power_unit;
429     +
430     acpi_battery_get_info(battery);
431     - /* The battery may have changed its reporting units. */
432     +
433     + if (power_unit == battery->power_unit)
434     + return;
435     +
436     + /* The battery has changed its reporting units. */
437     sysfs_remove_battery(battery);
438     sysfs_add_battery(battery);
439     }
440     diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
441     index e8cd652..9851093 100644
442     --- a/drivers/atm/solos-pci.c
443     +++ b/drivers/atm/solos-pci.c
444     @@ -984,6 +984,7 @@ static uint32_t fpga_tx(struct solos_card *card)
445     } else if (skb && card->using_dma) {
446     SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data,
447     skb->len, PCI_DMA_TODEVICE);
448     + card->tx_skb[port] = skb;
449     iowrite32(SKB_CB(skb)->dma_addr,
450     card->config_regs + TX_DMA_ADDR(port));
451     }
452     @@ -1152,7 +1153,8 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
453     db_fpga_upgrade = db_firmware_upgrade = 0;
454     }
455    
456     - if (card->fpga_version >= DMA_SUPPORTED){
457     + if (card->fpga_version >= DMA_SUPPORTED) {
458     + pci_set_master(dev);
459     card->using_dma = 1;
460     } else {
461     card->using_dma = 0;
462     diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
463     index 79a7de1..d4d162f 100644
464     --- a/drivers/gpu/drm/i915/intel_display.c
465     +++ b/drivers/gpu/drm/i915/intel_display.c
466     @@ -8368,7 +8368,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
467     I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
468     I915_WRITE(GEN6_RP_CONTROL,
469     GEN6_RP_MEDIA_TURBO |
470     - GEN6_RP_MEDIA_HW_MODE |
471     + GEN6_RP_MEDIA_HW_NORMAL_MODE |
472     GEN6_RP_MEDIA_IS_GFX |
473     GEN6_RP_ENABLE |
474     GEN6_RP_UP_BUSY_AVG |
475     diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
476     index 4b63791..1d19408 100644
477     --- a/drivers/gpu/drm/i915/intel_dp.c
478     +++ b/drivers/gpu/drm/i915/intel_dp.c
479     @@ -1148,10 +1148,10 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
480    
481     DRM_DEBUG_KMS("Turn eDP power off\n");
482    
483     - WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
484     + WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
485    
486     pp = ironlake_get_pp_control(dev_priv);
487     - pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
488     + pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
489     I915_WRITE(PCH_PP_CONTROL, pp);
490     POSTING_READ(PCH_PP_CONTROL);
491    
492     @@ -1259,18 +1259,16 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
493     {
494     struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
495    
496     +
497     + /* Make sure the panel is off before trying to change the mode. But also
498     + * ensure that we have vdd while we switch off the panel. */
499     + ironlake_edp_panel_vdd_on(intel_dp);
500     ironlake_edp_backlight_off(intel_dp);
501     ironlake_edp_panel_off(intel_dp);
502    
503     - /* Wake up the sink first */
504     - ironlake_edp_panel_vdd_on(intel_dp);
505     intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
506     intel_dp_link_down(intel_dp);
507     ironlake_edp_panel_vdd_off(intel_dp, false);
508     -
509     - /* Make sure the panel is off before trying to
510     - * change the mode
511     - */
512     }
513    
514     static void intel_dp_commit(struct drm_encoder *encoder)
515     @@ -1302,10 +1300,11 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
516     uint32_t dp_reg = I915_READ(intel_dp->output_reg);
517    
518     if (mode != DRM_MODE_DPMS_ON) {
519     + /* Switching the panel off requires vdd. */
520     + ironlake_edp_panel_vdd_on(intel_dp);
521     ironlake_edp_backlight_off(intel_dp);
522     ironlake_edp_panel_off(intel_dp);
523    
524     - ironlake_edp_panel_vdd_on(intel_dp);
525     intel_dp_sink_dpms(intel_dp, mode);
526     intel_dp_link_down(intel_dp);
527     ironlake_edp_panel_vdd_off(intel_dp, false);
528     diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
529     index 9c71183..9fadd64 100644
530     --- a/drivers/gpu/drm/i915/intel_lvds.c
531     +++ b/drivers/gpu/drm/i915/intel_lvds.c
532     @@ -747,6 +747,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
533     },
534     {
535     .callback = intel_no_lvds_dmi_callback,
536     + .ident = "Hewlett-Packard HP t5740e Thin Client",
537     + .matches = {
538     + DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
539     + DMI_MATCH(DMI_PRODUCT_NAME, "HP t5740e Thin Client"),
540     + },
541     + },
542     + {
543     + .callback = intel_no_lvds_dmi_callback,
544     .ident = "Hewlett-Packard t5745",
545     .matches = {
546     DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
547     diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
548     index ae5e748..eea58c6 100644
549     --- a/drivers/gpu/drm/i915/intel_sdvo.c
550     +++ b/drivers/gpu/drm/i915/intel_sdvo.c
551     @@ -769,10 +769,12 @@ static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
552     ((v_sync_len & 0x30) >> 4);
553    
554     dtd->part2.dtd_flags = 0x18;
555     + if (mode->flags & DRM_MODE_FLAG_INTERLACE)
556     + dtd->part2.dtd_flags |= DTD_FLAG_INTERLACE;
557     if (mode->flags & DRM_MODE_FLAG_PHSYNC)
558     - dtd->part2.dtd_flags |= 0x2;
559     + dtd->part2.dtd_flags |= DTD_FLAG_HSYNC_POSITIVE;
560     if (mode->flags & DRM_MODE_FLAG_PVSYNC)
561     - dtd->part2.dtd_flags |= 0x4;
562     + dtd->part2.dtd_flags |= DTD_FLAG_VSYNC_POSITIVE;
563    
564     dtd->part2.sdvo_flags = 0;
565     dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
566     @@ -806,9 +808,11 @@ static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
567     mode->clock = dtd->part1.clock * 10;
568    
569     mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
570     - if (dtd->part2.dtd_flags & 0x2)
571     + if (dtd->part2.dtd_flags & DTD_FLAG_INTERLACE)
572     + mode->flags |= DRM_MODE_FLAG_INTERLACE;
573     + if (dtd->part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
574     mode->flags |= DRM_MODE_FLAG_PHSYNC;
575     - if (dtd->part2.dtd_flags & 0x4)
576     + if (dtd->part2.dtd_flags & DTD_FLAG_VSYNC_POSITIVE)
577     mode->flags |= DRM_MODE_FLAG_PVSYNC;
578     }
579    
580     diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
581     index 6b7b22f..9d03014 100644
582     --- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
583     +++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
584     @@ -61,6 +61,11 @@ struct intel_sdvo_caps {
585     u16 output_flags;
586     } __attribute__((packed));
587    
588     +/* Note: SDVO detailed timing flags match EDID misc flags. */
589     +#define DTD_FLAG_HSYNC_POSITIVE (1 << 1)
590     +#define DTD_FLAG_VSYNC_POSITIVE (1 << 2)
591     +#define DTD_FLAG_INTERLACE (1 << 7)
592     +
593     /** This matches the EDID DTD structure, more or less */
594     struct intel_sdvo_dtd {
595     struct {
596     diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
597     index 05f765e..c82b1d4 100644
598     --- a/drivers/gpu/drm/i915/intel_tv.c
599     +++ b/drivers/gpu/drm/i915/intel_tv.c
600     @@ -674,6 +674,54 @@ static const struct tv_mode tv_modes[] = {
601     .filter_table = filter_table,
602     },
603     {
604     + .name = "480p",
605     + .clock = 107520,
606     + .refresh = 59940,
607     + .oversample = TV_OVERSAMPLE_4X,
608     + .component_only = 1,
609     +
610     + .hsync_end = 64, .hblank_end = 122,
611     + .hblank_start = 842, .htotal = 857,
612     +
613     + .progressive = true, .trilevel_sync = false,
614     +
615     + .vsync_start_f1 = 12, .vsync_start_f2 = 12,
616     + .vsync_len = 12,
617     +
618     + .veq_ena = false,
619     +
620     + .vi_end_f1 = 44, .vi_end_f2 = 44,
621     + .nbr_end = 479,
622     +
623     + .burst_ena = false,
624     +
625     + .filter_table = filter_table,
626     + },
627     + {
628     + .name = "576p",
629     + .clock = 107520,
630     + .refresh = 50000,
631     + .oversample = TV_OVERSAMPLE_4X,
632     + .component_only = 1,
633     +
634     + .hsync_end = 64, .hblank_end = 139,
635     + .hblank_start = 859, .htotal = 863,
636     +
637     + .progressive = true, .trilevel_sync = false,
638     +
639     + .vsync_start_f1 = 10, .vsync_start_f2 = 10,
640     + .vsync_len = 10,
641     +
642     + .veq_ena = false,
643     +
644     + .vi_end_f1 = 48, .vi_end_f2 = 48,
645     + .nbr_end = 575,
646     +
647     + .burst_ena = false,
648     +
649     + .filter_table = filter_table,
650     + },
651     + {
652     .name = "720p@60Hz",
653     .clock = 148800,
654     .refresh = 60000,
655     @@ -1185,6 +1233,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
656    
657     I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
658     I915_WRITE(TV_CTL, save_tv_ctl);
659     + POSTING_READ(TV_CTL);
660     +
661     + /* For unknown reasons the hw barfs if we don't do this vblank wait. */
662     + intel_wait_for_vblank(intel_tv->base.base.dev,
663     + to_intel_crtc(intel_tv->base.base.crtc)->pipe);
664    
665     /* Restore interrupt config */
666     if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
667     diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
668     index cfa372c..51e8d08 100644
669     --- a/drivers/gpu/drm/radeon/evergreen.c
670     +++ b/drivers/gpu/drm/radeon/evergreen.c
671     @@ -1029,6 +1029,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
672     WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
673     WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
674     WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
675     + if ((rdev->family == CHIP_JUNIPER) ||
676     + (rdev->family == CHIP_CYPRESS) ||
677     + (rdev->family == CHIP_HEMLOCK) ||
678     + (rdev->family == CHIP_BARTS))
679     + WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
680     }
681     WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
682     WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
683     @@ -2136,9 +2141,12 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
684     /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
685     if (rdev->flags & RADEON_IS_IGP)
686     rdev->config.evergreen.tile_config |= 1 << 4;
687     - else
688     - rdev->config.evergreen.tile_config |=
689     - ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
690     + else {
691     + if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
692     + rdev->config.evergreen.tile_config |= 1 << 4;
693     + else
694     + rdev->config.evergreen.tile_config |= 0 << 4;
695     + }
696     rdev->config.evergreen.tile_config |=
697     ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
698     rdev->config.evergreen.tile_config |=
699     @@ -2170,9 +2178,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
700     WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
701     WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
702     WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
703     - }
704     + }
705    
706     - grbm_gfx_index |= SE_BROADCAST_WRITES;
707     + grbm_gfx_index = INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES;
708     WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
709     WREG32(RLC_GFX_INDEX, grbm_gfx_index);
710    
711     diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
712     index b4eefc3..a5b88aa 100644
713     --- a/drivers/gpu/drm/radeon/evergreend.h
714     +++ b/drivers/gpu/drm/radeon/evergreend.h
715     @@ -232,6 +232,7 @@
716     #define MC_VM_MD_L1_TLB0_CNTL 0x2654
717     #define MC_VM_MD_L1_TLB1_CNTL 0x2658
718     #define MC_VM_MD_L1_TLB2_CNTL 0x265C
719     +#define MC_VM_MD_L1_TLB3_CNTL 0x2698
720    
721     #define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C
722     #define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660
723     diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
724     index a48ca53..ad0a380 100644
725     --- a/drivers/gpu/drm/radeon/ni.c
726     +++ b/drivers/gpu/drm/radeon/ni.c
727     @@ -657,15 +657,28 @@ static void cayman_gpu_init(struct radeon_device *rdev)
728     rdev->config.cayman.max_pipes_per_simd = 4;
729     rdev->config.cayman.max_tile_pipes = 2;
730     if ((rdev->pdev->device == 0x9900) ||
731     - (rdev->pdev->device == 0x9901)) {
732     + (rdev->pdev->device == 0x9901) ||
733     + (rdev->pdev->device == 0x9905) ||
734     + (rdev->pdev->device == 0x9906) ||
735     + (rdev->pdev->device == 0x9907) ||
736     + (rdev->pdev->device == 0x9908) ||
737     + (rdev->pdev->device == 0x9909) ||
738     + (rdev->pdev->device == 0x9910) ||
739     + (rdev->pdev->device == 0x9917)) {
740     rdev->config.cayman.max_simds_per_se = 6;
741     rdev->config.cayman.max_backends_per_se = 2;
742     } else if ((rdev->pdev->device == 0x9903) ||
743     - (rdev->pdev->device == 0x9904)) {
744     + (rdev->pdev->device == 0x9904) ||
745     + (rdev->pdev->device == 0x990A) ||
746     + (rdev->pdev->device == 0x9913) ||
747     + (rdev->pdev->device == 0x9918)) {
748     rdev->config.cayman.max_simds_per_se = 4;
749     rdev->config.cayman.max_backends_per_se = 2;
750     - } else if ((rdev->pdev->device == 0x9990) ||
751     - (rdev->pdev->device == 0x9991)) {
752     + } else if ((rdev->pdev->device == 0x9919) ||
753     + (rdev->pdev->device == 0x9990) ||
754     + (rdev->pdev->device == 0x9991) ||
755     + (rdev->pdev->device == 0x9994) ||
756     + (rdev->pdev->device == 0x99A0)) {
757     rdev->config.cayman.max_simds_per_se = 3;
758     rdev->config.cayman.max_backends_per_se = 1;
759     } else {
760     @@ -865,10 +878,13 @@ static void cayman_gpu_init(struct radeon_device *rdev)
761    
762     /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
763     if (rdev->flags & RADEON_IS_IGP)
764     - rdev->config.evergreen.tile_config |= 1 << 4;
765     - else
766     - rdev->config.cayman.tile_config |=
767     - ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
768     + rdev->config.cayman.tile_config |= 1 << 4;
769     + else {
770     + if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
771     + rdev->config.cayman.tile_config |= 1 << 4;
772     + else
773     + rdev->config.cayman.tile_config |= 0 << 4;
774     + }
775     rdev->config.cayman.tile_config |=
776     ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
777     rdev->config.cayman.tile_config |=
778     diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
779     index ba66f30..24e3939 100644
780     --- a/drivers/gpu/drm/radeon/r600_audio.c
781     +++ b/drivers/gpu/drm/radeon/r600_audio.c
782     @@ -239,6 +239,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
783     struct radeon_device *rdev = dev->dev_private;
784     struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
785     struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
786     + struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
787     int base_rate = 48000;
788    
789     switch (radeon_encoder->encoder_id) {
790     @@ -264,8 +265,8 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
791     WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
792     WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
793    
794     - /* Some magic trigger or src sel? */
795     - WREG32_P(0x5ac, 0x01, ~0x77);
796     + /* Select DTO source */
797     + WREG32(0x5ac, radeon_crtc->crtc_id);
798     } else {
799     switch (dig->dig_encoder) {
800     case 0:
801     diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
802     index f6e69b8..b1e3820 100644
803     --- a/drivers/gpu/drm/radeon/radeon_atombios.c
804     +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
805     @@ -444,7 +444,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
806     */
807     if ((dev->pdev->device == 0x9498) &&
808     (dev->pdev->subsystem_vendor == 0x1682) &&
809     - (dev->pdev->subsystem_device == 0x2452)) {
810     + (dev->pdev->subsystem_device == 0x2452) &&
811     + (i2c_bus->valid == false) &&
812     + !(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) {
813     struct radeon_device *rdev = dev->dev_private;
814     *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
815     }
816     diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
817     index 5cac832..2418cf6 100644
818     --- a/drivers/gpu/drm/radeon/radeon_cs.c
819     +++ b/drivers/gpu/drm/radeon/radeon_cs.c
820     @@ -158,6 +158,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
821     return 0;
822     }
823    
824     +/* XXX: note that this is called from the legacy UMS CS ioctl as well */
825     int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
826     {
827     struct drm_radeon_cs *cs = data;
828     @@ -252,22 +253,24 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
829     }
830     }
831    
832     - if ((p->cs_flags & RADEON_CS_USE_VM) &&
833     - !p->rdev->vm_manager.enabled) {
834     - DRM_ERROR("VM not active on asic!\n");
835     - return -EINVAL;
836     - }
837     -
838     - /* we only support VM on SI+ */
839     - if ((p->rdev->family >= CHIP_TAHITI) &&
840     - ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
841     - DRM_ERROR("VM required on SI+!\n");
842     - return -EINVAL;
843     - }
844     + /* these are KMS only */
845     + if (p->rdev) {
846     + if ((p->cs_flags & RADEON_CS_USE_VM) &&
847     + !p->rdev->vm_manager.enabled) {
848     + DRM_ERROR("VM not active on asic!\n");
849     + return -EINVAL;
850     + }
851    
852     - if (radeon_cs_get_ring(p, ring, priority))
853     - return -EINVAL;
854     + /* we only support VM on SI+ */
855     + if ((p->rdev->family >= CHIP_TAHITI) &&
856     + ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
857     + DRM_ERROR("VM required on SI+!\n");
858     + return -EINVAL;
859     + }
860    
861     + if (radeon_cs_get_ring(p, ring, priority))
862     + return -EINVAL;
863     + }
864    
865     /* deal with non-vm */
866     if ((p->chunk_ib_idx != -1) &&
867     diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
868     index c58a036..62050f5 100644
869     --- a/drivers/gpu/drm/radeon/radeon_gart.c
870     +++ b/drivers/gpu/drm/radeon/radeon_gart.c
871     @@ -478,12 +478,18 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
872    
873     mutex_lock(&vm->mutex);
874     if (last_pfn > vm->last_pfn) {
875     - /* grow va space 32M by 32M */
876     - unsigned align = ((32 << 20) >> 12) - 1;
877     + /* release mutex and lock in right order */
878     + mutex_unlock(&vm->mutex);
879     radeon_mutex_lock(&rdev->cs_mutex);
880     - radeon_vm_unbind_locked(rdev, vm);
881     + mutex_lock(&vm->mutex);
882     + /* and check again */
883     + if (last_pfn > vm->last_pfn) {
884     + /* grow va space 32M by 32M */
885     + unsigned align = ((32 << 20) >> 12) - 1;
886     + radeon_vm_unbind_locked(rdev, vm);
887     + vm->last_pfn = (last_pfn + align) & ~align;
888     + }
889     radeon_mutex_unlock(&rdev->cs_mutex);
890     - vm->last_pfn = (last_pfn + align) & ~align;
891     }
892     head = &vm->va;
893     last_offset = 0;
894     @@ -597,8 +603,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
895     if (bo_va == NULL)
896     return 0;
897    
898     - mutex_lock(&vm->mutex);
899     radeon_mutex_lock(&rdev->cs_mutex);
900     + mutex_lock(&vm->mutex);
901     radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
902     radeon_mutex_unlock(&rdev->cs_mutex);
903     list_del(&bo_va->vm_list);
904     @@ -643,9 +649,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
905     struct radeon_bo_va *bo_va, *tmp;
906     int r;
907    
908     - mutex_lock(&vm->mutex);
909     -
910     radeon_mutex_lock(&rdev->cs_mutex);
911     + mutex_lock(&vm->mutex);
912     radeon_vm_unbind_locked(rdev, vm);
913     radeon_mutex_unlock(&rdev->cs_mutex);
914    
915     diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
916     index cdab1ae..80a292b 100644
917     --- a/drivers/gpu/drm/radeon/rv770.c
918     +++ b/drivers/gpu/drm/radeon/rv770.c
919     @@ -151,6 +151,8 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
920     WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
921     WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
922     WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
923     + if (rdev->family == CHIP_RV740)
924     + WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
925     WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
926     WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
927     WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
928     @@ -689,8 +691,12 @@ static void rv770_gpu_init(struct radeon_device *rdev)
929    
930     if (rdev->family == CHIP_RV770)
931     gb_tiling_config |= BANK_TILING(1);
932     - else
933     - gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
934     + else {
935     + if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
936     + gb_tiling_config |= BANK_TILING(1);
937     + else
938     + gb_tiling_config |= BANK_TILING(0);
939     + }
940     rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
941     gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
942     if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
943     diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
944     index 79fa588..7538092 100644
945     --- a/drivers/gpu/drm/radeon/rv770d.h
946     +++ b/drivers/gpu/drm/radeon/rv770d.h
947     @@ -174,6 +174,7 @@
948     #define MC_VM_MD_L1_TLB0_CNTL 0x2654
949     #define MC_VM_MD_L1_TLB1_CNTL 0x2658
950     #define MC_VM_MD_L1_TLB2_CNTL 0x265C
951     +#define MC_VM_MD_L1_TLB3_CNTL 0x2698
952     #define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
953     #define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
954     #define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
955     diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
956     index 1f5c67c..1843418 100644
957     --- a/drivers/gpu/drm/ttm/ttm_bo.c
958     +++ b/drivers/gpu/drm/ttm/ttm_bo.c
959     @@ -1821,6 +1821,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
960     spin_unlock(&glob->lru_lock);
961     (void) ttm_bo_cleanup_refs(bo, false, false, false);
962     kref_put(&bo->list_kref, ttm_bo_release_list);
963     + spin_lock(&glob->lru_lock);
964     continue;
965     }
966    
967     diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
968     index 51c9ba5..21ee782 100644
969     --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
970     +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
971     @@ -66,7 +66,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
972     cmd += sizeof(remap_cmd) / sizeof(uint32);
973    
974     for (i = 0; i < num_pages; ++i) {
975     - if (VMW_PPN_SIZE > 4)
976     + if (VMW_PPN_SIZE <= 4)
977     *cmd = page_to_pfn(*pages++);
978     else
979     *((uint64_t *)cmd) = page_to_pfn(*pages++);
980     diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
981     index a5bee8e..a2e418c 100644
982     --- a/drivers/iommu/amd_iommu.c
983     +++ b/drivers/iommu/amd_iommu.c
984     @@ -450,12 +450,27 @@ static void dump_command(unsigned long phys_addr)
985    
986     static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
987     {
988     - u32 *event = __evt;
989     - int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
990     - int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
991     - int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
992     - int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
993     - u64 address = (u64)(((u64)event[3]) << 32) | event[2];
994     + int type, devid, domid, flags;
995     + volatile u32 *event = __evt;
996     + int count = 0;
997     + u64 address;
998     +
999     +retry:
1000     + type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
1001     + devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
1002     + domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
1003     + flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
1004     + address = (u64)(((u64)event[3]) << 32) | event[2];
1005     +
1006     + if (type == 0) {
1007     + /* Did we hit the erratum? */
1008     + if (++count == LOOP_TIMEOUT) {
1009     + pr_err("AMD-Vi: No event written to event log\n");
1010     + return;
1011     + }
1012     + udelay(1);
1013     + goto retry;
1014     + }
1015    
1016     printk(KERN_ERR "AMD-Vi: Event logged [");
1017    
1018     @@ -508,6 +523,8 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
1019     default:
1020     printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
1021     }
1022     +
1023     + memset(__evt, 0, 4 * sizeof(u32));
1024     }
1025    
1026     static void iommu_poll_events(struct amd_iommu *iommu)
1027     @@ -530,26 +547,12 @@ static void iommu_poll_events(struct amd_iommu *iommu)
1028     spin_unlock_irqrestore(&iommu->lock, flags);
1029     }
1030    
1031     -static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
1032     +static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
1033     {
1034     struct amd_iommu_fault fault;
1035     - volatile u64 *raw;
1036     - int i;
1037    
1038     INC_STATS_COUNTER(pri_requests);
1039    
1040     - raw = (u64 *)(iommu->ppr_log + head);
1041     -
1042     - /*
1043     - * Hardware bug: Interrupt may arrive before the entry is written to
1044     - * memory. If this happens we need to wait for the entry to arrive.
1045     - */
1046     - for (i = 0; i < LOOP_TIMEOUT; ++i) {
1047     - if (PPR_REQ_TYPE(raw[0]) != 0)
1048     - break;
1049     - udelay(1);
1050     - }
1051     -
1052     if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
1053     pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
1054     return;
1055     @@ -561,12 +564,6 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
1056     fault.tag = PPR_TAG(raw[0]);
1057     fault.flags = PPR_FLAGS(raw[0]);
1058    
1059     - /*
1060     - * To detect the hardware bug we need to clear the entry
1061     - * to back to zero.
1062     - */
1063     - raw[0] = raw[1] = 0;
1064     -
1065     atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
1066     }
1067    
1068     @@ -578,25 +575,62 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
1069     if (iommu->ppr_log == NULL)
1070     return;
1071    
1072     + /* enable ppr interrupts again */
1073     + writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
1074     +
1075     spin_lock_irqsave(&iommu->lock, flags);
1076    
1077     head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
1078     tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
1079    
1080     while (head != tail) {
1081     + volatile u64 *raw;
1082     + u64 entry[2];
1083     + int i;
1084    
1085     - /* Handle PPR entry */
1086     - iommu_handle_ppr_entry(iommu, head);
1087     + raw = (u64 *)(iommu->ppr_log + head);
1088    
1089     - /* Update and refresh ring-buffer state*/
1090     + /*
1091     + * Hardware bug: Interrupt may arrive before the entry is
1092     + * written to memory. If this happens we need to wait for the
1093     + * entry to arrive.
1094     + */
1095     + for (i = 0; i < LOOP_TIMEOUT; ++i) {
1096     + if (PPR_REQ_TYPE(raw[0]) != 0)
1097     + break;
1098     + udelay(1);
1099     + }
1100     +
1101     + /* Avoid memcpy function-call overhead */
1102     + entry[0] = raw[0];
1103     + entry[1] = raw[1];
1104     +
1105     + /*
1106     + * To detect the hardware bug we need to clear the entry
1107     + * back to zero.
1108     + */
1109     + raw[0] = raw[1] = 0UL;
1110     +
1111     + /* Update head pointer of hardware ring-buffer */
1112     head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
1113     writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
1114     +
1115     + /*
1116     + * Release iommu->lock because ppr-handling might need to
1117     + * re-aquire it
1118     + */
1119     + spin_unlock_irqrestore(&iommu->lock, flags);
1120     +
1121     + /* Handle PPR entry */
1122     + iommu_handle_ppr_entry(iommu, entry);
1123     +
1124     + spin_lock_irqsave(&iommu->lock, flags);
1125     +
1126     + /* Refresh ring-buffer information */
1127     + head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
1128     tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
1129     }
1130    
1131     - /* enable ppr interrupts again */
1132     - writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
1133     -
1134     spin_unlock_irqrestore(&iommu->lock, flags);
1135     }
1136    
1137     @@ -2035,20 +2069,20 @@ out_err:
1138     }
1139    
1140     /* FIXME: Move this to PCI code */
1141     -#define PCI_PRI_TLP_OFF (1 << 2)
1142     +#define PCI_PRI_TLP_OFF (1 << 15)
1143    
1144     bool pci_pri_tlp_required(struct pci_dev *pdev)
1145     {
1146     - u16 control;
1147     + u16 status;
1148     int pos;
1149    
1150     pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
1151     if (!pos)
1152     return false;
1153    
1154     - pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
1155     + pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
1156    
1157     - return (control & PCI_PRI_TLP_OFF) ? true : false;
1158     + return (status & PCI_PRI_TLP_OFF) ? true : false;
1159     }
1160    
1161     /*
1162     diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
1163     index c567903..542024b 100644
1164     --- a/drivers/iommu/amd_iommu_init.c
1165     +++ b/drivers/iommu/amd_iommu_init.c
1166     @@ -1029,6 +1029,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1167     if (!iommu->dev)
1168     return 1;
1169    
1170     + iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
1171     + PCI_DEVFN(0, 0));
1172     +
1173     iommu->cap_ptr = h->cap_ptr;
1174     iommu->pci_seg = h->pci_seg;
1175     iommu->mmio_phys = h->mmio_phys;
1176     @@ -1323,20 +1326,16 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1177     {
1178     int i, j;
1179     u32 ioc_feature_control;
1180     - struct pci_dev *pdev = NULL;
1181     + struct pci_dev *pdev = iommu->root_pdev;
1182    
1183     /* RD890 BIOSes may not have completely reconfigured the iommu */
1184     - if (!is_rd890_iommu(iommu->dev))
1185     + if (!is_rd890_iommu(iommu->dev) || !pdev)
1186     return;
1187    
1188     /*
1189     * First, we need to ensure that the iommu is enabled. This is
1190     * controlled by a register in the northbridge
1191     */
1192     - pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0));
1193     -
1194     - if (!pdev)
1195     - return;
1196    
1197     /* Select Northbridge indirect register 0x75 and enable writing */
1198     pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
1199     @@ -1346,8 +1345,6 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1200     if (!(ioc_feature_control & 0x1))
1201     pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
1202    
1203     - pci_dev_put(pdev);
1204     -
1205     /* Restore the iommu BAR */
1206     pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1207     iommu->stored_addr_lo);
1208     diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
1209     index 2452f3b..2435555 100644
1210     --- a/drivers/iommu/amd_iommu_types.h
1211     +++ b/drivers/iommu/amd_iommu_types.h
1212     @@ -481,6 +481,9 @@ struct amd_iommu {
1213     /* Pointer to PCI device of this IOMMU */
1214     struct pci_dev *dev;
1215    
1216     + /* Cache pdev to root device for resume quirks */
1217     + struct pci_dev *root_pdev;
1218     +
1219     /* physical address of MMIO space */
1220     u64 mmio_phys;
1221     /* virtual address of MMIO space */
1222     diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
1223     index 15dd59b..d7e9577 100644
1224     --- a/drivers/md/raid1.c
1225     +++ b/drivers/md/raid1.c
1226     @@ -2548,6 +2548,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
1227     err = -EINVAL;
1228     spin_lock_init(&conf->device_lock);
1229     rdev_for_each(rdev, mddev) {
1230     + struct request_queue *q;
1231     int disk_idx = rdev->raid_disk;
1232     if (disk_idx >= mddev->raid_disks
1233     || disk_idx < 0)
1234     @@ -2560,6 +2561,9 @@ static struct r1conf *setup_conf(struct mddev *mddev)
1235     if (disk->rdev)
1236     goto abort;
1237     disk->rdev = rdev;
1238     + q = bdev_get_queue(rdev->bdev);
1239     + if (q->merge_bvec_fn)
1240     + mddev->merge_check_needed = 1;
1241    
1242     disk->head_position = 0;
1243     }
1244     diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
1245     index 3f91c2e..d037adb 100644
1246     --- a/drivers/md/raid10.c
1247     +++ b/drivers/md/raid10.c
1248     @@ -3311,7 +3311,7 @@ static int run(struct mddev *mddev)
1249     (conf->raid_disks / conf->near_copies));
1250    
1251     rdev_for_each(rdev, mddev) {
1252     -
1253     + struct request_queue *q;
1254     disk_idx = rdev->raid_disk;
1255     if (disk_idx >= conf->raid_disks
1256     || disk_idx < 0)
1257     @@ -3327,6 +3327,9 @@ static int run(struct mddev *mddev)
1258     goto out_free_conf;
1259     disk->rdev = rdev;
1260     }
1261     + q = bdev_get_queue(rdev->bdev);
1262     + if (q->merge_bvec_fn)
1263     + mddev->merge_check_needed = 1;
1264    
1265     disk_stack_limits(mddev->gendisk, rdev->bdev,
1266     rdev->data_offset << 9);
1267     diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
1268     index 5760c1a..27143e0 100644
1269     --- a/drivers/mtd/Kconfig
1270     +++ b/drivers/mtd/Kconfig
1271     @@ -128,7 +128,7 @@ config MTD_AFS_PARTS
1272    
1273     config MTD_OF_PARTS
1274     tristate "OpenFirmware partitioning information support"
1275     - default Y
1276     + default y
1277     depends on OF
1278     help
1279     This provides a partition parsing function which derives
1280     diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
1281     index a4a80b7..7d7000d 100644
1282     --- a/drivers/mtd/devices/block2mtd.c
1283     +++ b/drivers/mtd/devices/block2mtd.c
1284     @@ -271,7 +271,6 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
1285     dev->mtd.flags = MTD_CAP_RAM;
1286     dev->mtd._erase = block2mtd_erase;
1287     dev->mtd._write = block2mtd_write;
1288     - dev->mtd._writev = mtd_writev;
1289     dev->mtd._sync = block2mtd_sync;
1290     dev->mtd._read = block2mtd_read;
1291     dev->mtd.priv = dev;
1292     diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
1293     index cc0678a..6f87c74 100644
1294     --- a/drivers/mtd/nand/mxc_nand.c
1295     +++ b/drivers/mtd/nand/mxc_nand.c
1296     @@ -1219,12 +1219,6 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1297     if (nfc_is_v21() && mtd->writesize == 4096)
1298     this->ecc.layout = &nandv2_hw_eccoob_4k;
1299    
1300     - /* second phase scan */
1301     - if (nand_scan_tail(mtd)) {
1302     - err = -ENXIO;
1303     - goto escan;
1304     - }
1305     -
1306     if (this->ecc.mode == NAND_ECC_HW) {
1307     if (nfc_is_v1())
1308     this->ecc.strength = 1;
1309     @@ -1232,6 +1226,12 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1310     this->ecc.strength = (host->eccsize == 4) ? 4 : 8;
1311     }
1312    
1313     + /* second phase scan */
1314     + if (nand_scan_tail(mtd)) {
1315     + err = -ENXIO;
1316     + goto escan;
1317     + }
1318     +
1319     /* Register the partitions */
1320     mtd_device_parse_register(mtd, part_probes, NULL, pdata->parts,
1321     pdata->nr_parts);
1322     diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
1323     index 20a112f..30d1319 100644
1324     --- a/drivers/mtd/nand/nand_bbt.c
1325     +++ b/drivers/mtd/nand/nand_bbt.c
1326     @@ -324,6 +324,7 @@ static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
1327    
1328     buf += mtd->oobsize + mtd->writesize;
1329     len -= mtd->writesize;
1330     + offs += mtd->writesize;
1331     }
1332     return 0;
1333     }
1334     diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
1335     index 7b34d8c..fc87e89 100644
1336     --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
1337     +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
1338     @@ -437,7 +437,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
1339     length = status & BCOM_FEC_RX_BD_LEN_MASK;
1340     skb_put(rskb, length - 4); /* length without CRC32 */
1341     rskb->protocol = eth_type_trans(rskb, dev);
1342     - if (!skb_defer_rx_timestamp(skb))
1343     + if (!skb_defer_rx_timestamp(rskb))
1344     netif_rx(rskb);
1345    
1346     spin_lock(&priv->lock);
1347     diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
1348     index 42b5151..609fcc3 100644
1349     --- a/drivers/net/usb/asix.c
1350     +++ b/drivers/net/usb/asix.c
1351     @@ -35,6 +35,7 @@
1352     #include <linux/crc32.h>
1353     #include <linux/usb/usbnet.h>
1354     #include <linux/slab.h>
1355     +#include <linux/if_vlan.h>
1356    
1357     #define DRIVER_VERSION "22-Dec-2011"
1358     #define DRIVER_NAME "asix"
1359     @@ -321,7 +322,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1360     return 0;
1361     }
1362    
1363     - if ((size > dev->net->mtu + ETH_HLEN) ||
1364     + if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
1365     (size + offset > skb->len)) {
1366     netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
1367     size);
1368     diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
1369     index 23eaa1b..d59dd01 100644
1370     --- a/drivers/net/wireless/ath/ath9k/xmit.c
1371     +++ b/drivers/net/wireless/ath/ath9k/xmit.c
1372     @@ -64,7 +64,8 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
1373     static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1374     struct ath_txq *txq,
1375     struct ath_atx_tid *tid,
1376     - struct sk_buff *skb);
1377     + struct sk_buff *skb,
1378     + bool dequeue);
1379    
1380     enum {
1381     MCS_HT20,
1382     @@ -811,7 +812,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
1383     fi = get_frame_info(skb);
1384     bf = fi->bf;
1385     if (!fi->bf)
1386     - bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1387     + bf = ath_tx_setup_buffer(sc, txq, tid, skb, true);
1388    
1389     if (!bf)
1390     continue;
1391     @@ -1726,7 +1727,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1392     return;
1393     }
1394    
1395     - bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1396     + bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
1397     if (!bf)
1398     return;
1399    
1400     @@ -1753,7 +1754,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1401    
1402     bf = fi->bf;
1403     if (!bf)
1404     - bf = ath_tx_setup_buffer(sc, txq, tid, skb);
1405     + bf = ath_tx_setup_buffer(sc, txq, tid, skb, false);
1406    
1407     if (!bf)
1408     return;
1409     @@ -1814,7 +1815,8 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1410     static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1411     struct ath_txq *txq,
1412     struct ath_atx_tid *tid,
1413     - struct sk_buff *skb)
1414     + struct sk_buff *skb,
1415     + bool dequeue)
1416     {
1417     struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1418     struct ath_frame_info *fi = get_frame_info(skb);
1419     @@ -1863,6 +1865,8 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1420     return bf;
1421    
1422     error:
1423     + if (dequeue)
1424     + __skb_unlink(skb, &tid->buf_q);
1425     dev_kfree_skb_any(skb);
1426     return NULL;
1427     }
1428     @@ -1893,7 +1897,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
1429     */
1430     ath_tx_send_ampdu(sc, tid, skb, txctl);
1431     } else {
1432     - bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
1433     + bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
1434     if (!bf)
1435     return;
1436    
1437     diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
1438     index ea10862..4da050f 100644
1439     --- a/drivers/net/wireless/iwlwifi/iwl-2000.c
1440     +++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
1441     @@ -183,7 +183,7 @@ static const struct iwl_base_params iwl2000_base_params = {
1442     .chain_noise_scale = 1000,
1443     .wd_timeout = IWL_DEF_WD_TIMEOUT,
1444     .max_event_log_size = 512,
1445     - .shadow_reg_enable = true,
1446     + .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
1447     .hd_v2 = true,
1448     };
1449    
1450     @@ -202,7 +202,7 @@ static const struct iwl_base_params iwl2030_base_params = {
1451     .chain_noise_scale = 1000,
1452     .wd_timeout = IWL_LONG_WD_TIMEOUT,
1453     .max_event_log_size = 512,
1454     - .shadow_reg_enable = true,
1455     + .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
1456     .hd_v2 = true,
1457     };
1458    
1459     diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
1460     index f0c9150..9f71b85 100644
1461     --- a/drivers/net/wireless/iwlwifi/iwl-6000.c
1462     +++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
1463     @@ -282,7 +282,7 @@ static const struct iwl_base_params iwl6000_base_params = {
1464     .chain_noise_scale = 1000,
1465     .wd_timeout = IWL_DEF_WD_TIMEOUT,
1466     .max_event_log_size = 512,
1467     - .shadow_reg_enable = true,
1468     + .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
1469     };
1470    
1471     static const struct iwl_base_params iwl6050_base_params = {
1472     @@ -299,7 +299,7 @@ static const struct iwl_base_params iwl6050_base_params = {
1473     .chain_noise_scale = 1500,
1474     .wd_timeout = IWL_DEF_WD_TIMEOUT,
1475     .max_event_log_size = 1024,
1476     - .shadow_reg_enable = true,
1477     + .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
1478     };
1479    
1480     static const struct iwl_base_params iwl6000_g2_base_params = {
1481     @@ -316,7 +316,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
1482     .chain_noise_scale = 1000,
1483     .wd_timeout = IWL_LONG_WD_TIMEOUT,
1484     .max_event_log_size = 512,
1485     - .shadow_reg_enable = true,
1486     + .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
1487     };
1488    
1489     static const struct iwl_ht_params iwl6000_ht_params = {
1490     diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
1491     index 7e590b3..da2be3e 100644
1492     --- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
1493     +++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
1494     @@ -884,6 +884,7 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
1495     if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
1496     (priv->bt_full_concurrent != full_concurrent)) {
1497     priv->bt_full_concurrent = full_concurrent;
1498     + priv->last_bt_traffic_load = priv->bt_traffic_load;
1499    
1500     /* Update uCode's rate table. */
1501     tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
1502     diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/wl1251/sdio.c
1503     index 1b851f6..e2750a1 100644
1504     --- a/drivers/net/wireless/wl1251/sdio.c
1505     +++ b/drivers/net/wireless/wl1251/sdio.c
1506     @@ -260,6 +260,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
1507     }
1508    
1509     if (wl->irq) {
1510     + irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
1511     ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl);
1512     if (ret < 0) {
1513     wl1251_error("request_irq() failed: %d", ret);
1514     @@ -267,7 +268,6 @@ static int wl1251_sdio_probe(struct sdio_func *func,
1515     }
1516    
1517     irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
1518     - disable_irq(wl->irq);
1519    
1520     wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
1521     wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
1522     diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/wl1251/spi.c
1523     index 6248c35..87f6305 100644
1524     --- a/drivers/net/wireless/wl1251/spi.c
1525     +++ b/drivers/net/wireless/wl1251/spi.c
1526     @@ -281,6 +281,7 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
1527    
1528     wl->use_eeprom = pdata->use_eeprom;
1529    
1530     + irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
1531     ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl);
1532     if (ret < 0) {
1533     wl1251_error("request_irq() failed: %d", ret);
1534     @@ -289,8 +290,6 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
1535    
1536     irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
1537    
1538     - disable_irq(wl->irq);
1539     -
1540     ret = wl1251_init_ieee80211(wl);
1541     if (ret)
1542     goto out_irq;
1543     diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
1544     index 5dfd749..4037fd5 100644
1545     --- a/drivers/scsi/scsi_lib.c
1546     +++ b/drivers/scsi/scsi_lib.c
1547     @@ -1378,16 +1378,19 @@ static int scsi_lld_busy(struct request_queue *q)
1548     {
1549     struct scsi_device *sdev = q->queuedata;
1550     struct Scsi_Host *shost;
1551     - struct scsi_target *starget;
1552    
1553     if (!sdev)
1554     return 0;
1555    
1556     shost = sdev->host;
1557     - starget = scsi_target(sdev);
1558    
1559     - if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
1560     - scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
1561     + /*
1562     + * Ignore host/starget busy state.
1563     + * Since block layer does not have a concept of fairness across
1564     + * multiple queues, congestion of host/starget needs to be handled
1565     + * in SCSI layer.
1566     + */
1567     + if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1568     return 1;
1569    
1570     return 0;
1571     diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c
1572     index 74708fc..ae78148 100644
1573     --- a/drivers/scsi/scsi_wait_scan.c
1574     +++ b/drivers/scsi/scsi_wait_scan.c
1575     @@ -12,7 +12,7 @@
1576    
1577     #include <linux/module.h>
1578     #include <linux/device.h>
1579     -#include <scsi/scsi_scan.h>
1580     +#include "scsi_priv.h"
1581    
1582     static int __init wait_scan_init(void)
1583     {
1584     diff --git a/fs/attr.c b/fs/attr.c
1585     index 73f69a6..d94d1b6 100644
1586     --- a/fs/attr.c
1587     +++ b/fs/attr.c
1588     @@ -176,6 +176,11 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
1589     return -EPERM;
1590     }
1591    
1592     + if ((ia_valid & ATTR_SIZE) && IS_I_VERSION(inode)) {
1593     + if (attr->ia_size != inode->i_size)
1594     + inode_inc_iversion(inode);
1595     + }
1596     +
1597     if ((ia_valid & ATTR_MODE)) {
1598     umode_t amode = attr->ia_mode;
1599     /* Flag setting protected by i_mutex */
1600     diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
1601     index 4ff6313..73fea28 100644
1602     --- a/fs/cifs/cifsglob.h
1603     +++ b/fs/cifs/cifsglob.h
1604     @@ -43,6 +43,7 @@
1605    
1606     #define CIFS_MIN_RCV_POOL 4
1607    
1608     +#define MAX_REOPEN_ATT 5 /* these many maximum attempts to reopen a file */
1609     /*
1610     * default attribute cache timeout (jiffies)
1611     */
1612     diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
1613     index 96192c1..97f5d03 100644
1614     --- a/fs/cifs/cifsproto.h
1615     +++ b/fs/cifs/cifsproto.h
1616     @@ -192,11 +192,13 @@ extern int CIFSTCon(unsigned int xid, struct cifs_ses *ses,
1617    
1618     extern int CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
1619     const char *searchName, const struct nls_table *nls_codepage,
1620     - __u16 *searchHandle, struct cifs_search_info *psrch_inf,
1621     + __u16 *searchHandle, __u16 search_flags,
1622     + struct cifs_search_info *psrch_inf,
1623     int map, const char dirsep);
1624    
1625     extern int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
1626     - __u16 searchHandle, struct cifs_search_info *psrch_inf);
1627     + __u16 searchHandle, __u16 search_flags,
1628     + struct cifs_search_info *psrch_inf);
1629    
1630     extern int CIFSFindClose(const int, struct cifs_tcon *tcon,
1631     const __u16 search_handle);
1632     diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
1633     index da2f544..6b79efd 100644
1634     --- a/fs/cifs/cifssmb.c
1635     +++ b/fs/cifs/cifssmb.c
1636     @@ -4344,7 +4344,7 @@ int
1637     CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
1638     const char *searchName,
1639     const struct nls_table *nls_codepage,
1640     - __u16 *pnetfid,
1641     + __u16 *pnetfid, __u16 search_flags,
1642     struct cifs_search_info *psrch_inf, int remap, const char dirsep)
1643     {
1644     /* level 257 SMB_ */
1645     @@ -4416,8 +4416,7 @@ findFirstRetry:
1646     cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
1647     ATTR_DIRECTORY);
1648     pSMB->SearchCount = cpu_to_le16(CIFSMaxBufSize/sizeof(FILE_UNIX_INFO));
1649     - pSMB->SearchFlags = cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END |
1650     - CIFS_SEARCH_RETURN_RESUME);
1651     + pSMB->SearchFlags = cpu_to_le16(search_flags);
1652     pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
1653    
1654     /* BB what should we set StorageType to? Does it matter? BB */
1655     @@ -4487,8 +4486,8 @@ findFirstRetry:
1656     return rc;
1657     }
1658    
1659     -int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
1660     - __u16 searchHandle, struct cifs_search_info *psrch_inf)
1661     +int CIFSFindNext(const int xid, struct cifs_tcon *tcon, __u16 searchHandle,
1662     + __u16 search_flags, struct cifs_search_info *psrch_inf)
1663     {
1664     TRANSACTION2_FNEXT_REQ *pSMB = NULL;
1665     TRANSACTION2_FNEXT_RSP *pSMBr = NULL;
1666     @@ -4531,8 +4530,7 @@ int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
1667     cpu_to_le16(CIFSMaxBufSize / sizeof(FILE_UNIX_INFO));
1668     pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
1669     pSMB->ResumeKey = psrch_inf->resume_key;
1670     - pSMB->SearchFlags =
1671     - cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME);
1672     + pSMB->SearchFlags = cpu_to_le16(search_flags);
1673    
1674     name_len = psrch_inf->resume_name_len;
1675     params += name_len;
1676     diff --git a/fs/cifs/file.c b/fs/cifs/file.c
1677     index 81725e9..e7ebb5a 100644
1678     --- a/fs/cifs/file.c
1679     +++ b/fs/cifs/file.c
1680     @@ -1539,10 +1539,11 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1681     struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1682     bool fsuid_only)
1683     {
1684     - struct cifsFileInfo *open_file;
1685     + struct cifsFileInfo *open_file, *inv_file = NULL;
1686     struct cifs_sb_info *cifs_sb;
1687     bool any_available = false;
1688     int rc;
1689     + unsigned int refind = 0;
1690    
1691     /* Having a null inode here (because mapping->host was set to zero by
1692     the VFS or MM) should not happen but we had reports of on oops (due to
1693     @@ -1562,40 +1563,25 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1694    
1695     spin_lock(&cifs_file_list_lock);
1696     refind_writable:
1697     + if (refind > MAX_REOPEN_ATT) {
1698     + spin_unlock(&cifs_file_list_lock);
1699     + return NULL;
1700     + }
1701     list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1702     if (!any_available && open_file->pid != current->tgid)
1703     continue;
1704     if (fsuid_only && open_file->uid != current_fsuid())
1705     continue;
1706     if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1707     - cifsFileInfo_get(open_file);
1708     -
1709     if (!open_file->invalidHandle) {
1710     /* found a good writable file */
1711     + cifsFileInfo_get(open_file);
1712     spin_unlock(&cifs_file_list_lock);
1713     return open_file;
1714     + } else {
1715     + if (!inv_file)
1716     + inv_file = open_file;
1717     }
1718     -
1719     - spin_unlock(&cifs_file_list_lock);
1720     -
1721     - /* Had to unlock since following call can block */
1722     - rc = cifs_reopen_file(open_file, false);
1723     - if (!rc)
1724     - return open_file;
1725     -
1726     - /* if it fails, try another handle if possible */
1727     - cFYI(1, "wp failed on reopen file");
1728     - cifsFileInfo_put(open_file);
1729     -
1730     - spin_lock(&cifs_file_list_lock);
1731     -
1732     - /* else we simply continue to the next entry. Thus
1733     - we do not loop on reopen errors. If we
1734     - can not reopen the file, for example if we
1735     - reconnected to a server with another client
1736     - racing to delete or lock the file we would not
1737     - make progress if we restarted before the beginning
1738     - of the loop here. */
1739     }
1740     }
1741     /* couldn't find useable FH with same pid, try any available */
1742     @@ -1603,7 +1589,30 @@ refind_writable:
1743     any_available = true;
1744     goto refind_writable;
1745     }
1746     +
1747     + if (inv_file) {
1748     + any_available = false;
1749     + cifsFileInfo_get(inv_file);
1750     + }
1751     +
1752     spin_unlock(&cifs_file_list_lock);
1753     +
1754     + if (inv_file) {
1755     + rc = cifs_reopen_file(inv_file, false);
1756     + if (!rc)
1757     + return inv_file;
1758     + else {
1759     + spin_lock(&cifs_file_list_lock);
1760     + list_move_tail(&inv_file->flist,
1761     + &cifs_inode->openFileList);
1762     + spin_unlock(&cifs_file_list_lock);
1763     + cifsFileInfo_put(inv_file);
1764     + spin_lock(&cifs_file_list_lock);
1765     + ++refind;
1766     + goto refind_writable;
1767     + }
1768     + }
1769     +
1770     return NULL;
1771     }
1772    
1773     diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
1774     index e2bbc68..0a8224d 100644
1775     --- a/fs/cifs/readdir.c
1776     +++ b/fs/cifs/readdir.c
1777     @@ -219,6 +219,7 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
1778    
1779     static int initiate_cifs_search(const int xid, struct file *file)
1780     {
1781     + __u16 search_flags;
1782     int rc = 0;
1783     char *full_path = NULL;
1784     struct cifsFileInfo *cifsFile;
1785     @@ -270,8 +271,12 @@ ffirst_retry:
1786     cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
1787     }
1788    
1789     + search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
1790     + if (backup_cred(cifs_sb))
1791     + search_flags |= CIFS_SEARCH_BACKUP_SEARCH;
1792     +
1793     rc = CIFSFindFirst(xid, pTcon, full_path, cifs_sb->local_nls,
1794     - &cifsFile->netfid, &cifsFile->srch_inf,
1795     + &cifsFile->netfid, search_flags, &cifsFile->srch_inf,
1796     cifs_sb->mnt_cifs_flags &
1797     CIFS_MOUNT_MAP_SPECIAL_CHR, CIFS_DIR_SEP(cifs_sb));
1798     if (rc == 0)
1799     @@ -502,11 +507,13 @@ static int cifs_save_resume_key(const char *current_entry,
1800     static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
1801     struct file *file, char **ppCurrentEntry, int *num_to_ret)
1802     {
1803     + __u16 search_flags;
1804     int rc = 0;
1805     int pos_in_buf = 0;
1806     loff_t first_entry_in_buffer;
1807     loff_t index_to_find = file->f_pos;
1808     struct cifsFileInfo *cifsFile = file->private_data;
1809     + struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1810     /* check if index in the buffer */
1811    
1812     if ((cifsFile == NULL) || (ppCurrentEntry == NULL) ||
1813     @@ -560,10 +567,14 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
1814     cifsFile);
1815     }
1816    
1817     + search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
1818     + if (backup_cred(cifs_sb))
1819     + search_flags |= CIFS_SEARCH_BACKUP_SEARCH;
1820     +
1821     while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
1822     (rc == 0) && !cifsFile->srch_inf.endOfSearch) {
1823     cFYI(1, "calling findnext2");
1824     - rc = CIFSFindNext(xid, pTcon, cifsFile->netfid,
1825     + rc = CIFSFindNext(xid, pTcon, cifsFile->netfid, search_flags,
1826     &cifsFile->srch_inf);
1827     /* FindFirst/Next set last_entry to NULL on malformed reply */
1828     if (cifsFile->srch_inf.last_entry)
1829     diff --git a/fs/exofs/super.c b/fs/exofs/super.c
1830     index 735ca06..59e0849 100644
1831     --- a/fs/exofs/super.c
1832     +++ b/fs/exofs/super.c
1833     @@ -745,7 +745,6 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
1834     sbi->one_comp.obj.partition = opts->pid;
1835     sbi->one_comp.obj.id = 0;
1836     exofs_make_credential(sbi->one_comp.cred, &sbi->one_comp.obj);
1837     - sbi->oc.numdevs = 1;
1838     sbi->oc.single_comp = EC_SINGLE_COMP;
1839     sbi->oc.comps = &sbi->one_comp;
1840    
1841     @@ -804,6 +803,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
1842     goto free_sbi;
1843    
1844     ore_comp_set_dev(&sbi->oc, 0, od);
1845     + sbi->oc.numdevs = 1;
1846     }
1847    
1848     __sbi_read_stats(sbi);
1849     diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
1850     index 409c2ee..8900f8b 100644
1851     --- a/fs/ext4/ialloc.c
1852     +++ b/fs/ext4/ialloc.c
1853     @@ -488,10 +488,12 @@ fallback_retry:
1854     for (i = 0; i < ngroups; i++) {
1855     grp = (parent_group + i) % ngroups;
1856     desc = ext4_get_group_desc(sb, grp, NULL);
1857     - grp_free = ext4_free_inodes_count(sb, desc);
1858     - if (desc && grp_free && grp_free >= avefreei) {
1859     - *group = grp;
1860     - return 0;
1861     + if (desc) {
1862     + grp_free = ext4_free_inodes_count(sb, desc);
1863     + if (grp_free && grp_free >= avefreei) {
1864     + *group = grp;
1865     + return 0;
1866     + }
1867     }
1868     }
1869    
1870     diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
1871     index 6eee255..1365903 100644
1872     --- a/fs/ext4/ioctl.c
1873     +++ b/fs/ext4/ioctl.c
1874     @@ -38,7 +38,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1875     handle_t *handle = NULL;
1876     int err, migrate = 0;
1877     struct ext4_iloc iloc;
1878     - unsigned int oldflags;
1879     + unsigned int oldflags, mask, i;
1880     unsigned int jflag;
1881    
1882     if (!inode_owner_or_capable(inode))
1883     @@ -115,9 +115,14 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1884     if (err)
1885     goto flags_err;
1886    
1887     - flags = flags & EXT4_FL_USER_MODIFIABLE;
1888     - flags |= oldflags & ~EXT4_FL_USER_MODIFIABLE;
1889     - ei->i_flags = flags;
1890     + for (i = 0, mask = 1; i < 32; i++, mask <<= 1) {
1891     + if (!(mask & EXT4_FL_USER_MODIFIABLE))
1892     + continue;
1893     + if (mask & flags)
1894     + ext4_set_inode_flag(inode, i);
1895     + else
1896     + ext4_clear_inode_flag(inode, i);
1897     + }
1898    
1899     ext4_set_inode_flags(inode);
1900     inode->i_ctime = ext4_current_time(inode);
1901     diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
1902     index 99ab428..6b0a57e 100644
1903     --- a/fs/ext4/mballoc.c
1904     +++ b/fs/ext4/mballoc.c
1905     @@ -2517,6 +2517,9 @@ int ext4_mb_release(struct super_block *sb)
1906     struct ext4_sb_info *sbi = EXT4_SB(sb);
1907     struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
1908    
1909     + if (sbi->s_proc)
1910     + remove_proc_entry("mb_groups", sbi->s_proc);
1911     +
1912     if (sbi->s_group_info) {
1913     for (i = 0; i < ngroups; i++) {
1914     grinfo = ext4_get_group_info(sb, i);
1915     @@ -2564,8 +2567,6 @@ int ext4_mb_release(struct super_block *sb)
1916     }
1917    
1918     free_percpu(sbi->s_locality_groups);
1919     - if (sbi->s_proc)
1920     - remove_proc_entry("mb_groups", sbi->s_proc);
1921    
1922     return 0;
1923     }
1924     @@ -4636,6 +4637,7 @@ do_more:
1925     */
1926     new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
1927     if (!new_entry) {
1928     + ext4_mb_unload_buddy(&e4b);
1929     err = -ENOMEM;
1930     goto error_return;
1931     }
1932     diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
1933     index 349d7b3..0a94cbb 100644
1934     --- a/fs/ext4/namei.c
1935     +++ b/fs/ext4/namei.c
1936     @@ -1037,6 +1037,12 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru
1937     EXT4_ERROR_INODE(dir, "bad inode number: %u", ino);
1938     return ERR_PTR(-EIO);
1939     }
1940     + if (unlikely(ino == dir->i_ino)) {
1941     + EXT4_ERROR_INODE(dir, "'%.*s' linked to parent dir",
1942     + dentry->d_name.len,
1943     + dentry->d_name.name);
1944     + return ERR_PTR(-EIO);
1945     + }
1946     inode = ext4_iget(dir->i_sb, ino);
1947     if (inode == ERR_PTR(-ESTALE)) {
1948     EXT4_ERROR_INODE(dir,
1949     diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
1950     index 59fa0be..53589ff 100644
1951     --- a/fs/ext4/resize.c
1952     +++ b/fs/ext4/resize.c
1953     @@ -161,6 +161,8 @@ static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
1954     if (flex_gd == NULL)
1955     goto out3;
1956    
1957     + if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data))
1958     + goto out2;
1959     flex_gd->count = flexbg_size;
1960    
1961     flex_gd->groups = kmalloc(sizeof(struct ext4_new_group_data) *
1962     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
1963     index e1fb1d5..a68703a 100644
1964     --- a/fs/ext4/super.c
1965     +++ b/fs/ext4/super.c
1966     @@ -497,6 +497,7 @@ void __ext4_error(struct super_block *sb, const char *function,
1967     printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
1968     sb->s_id, function, line, current->comm, &vaf);
1969     va_end(args);
1970     + save_error_info(sb, function, line);
1971    
1972     ext4_handle_error(sb);
1973     }
1974     @@ -3592,7 +3593,8 @@ no_journal:
1975     goto failed_mount4;
1976     }
1977    
1978     - ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY);
1979     + if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY))
1980     + sb->s_flags |= MS_RDONLY;
1981    
1982     /* determine the minimum size of new large inodes, if present */
1983     if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
1984     diff --git a/fs/namespace.c b/fs/namespace.c
1985     index e608199..4e46539 100644
1986     --- a/fs/namespace.c
1987     +++ b/fs/namespace.c
1988     @@ -1073,8 +1073,9 @@ void umount_tree(struct mount *mnt, int propagate, struct list_head *kill)
1989     list_del_init(&p->mnt_expire);
1990     list_del_init(&p->mnt_list);
1991     __touch_mnt_namespace(p->mnt_ns);
1992     + if (p->mnt_ns)
1993     + __mnt_make_shortterm(p);
1994     p->mnt_ns = NULL;
1995     - __mnt_make_shortterm(p);
1996     list_del_init(&p->mnt_child);
1997     if (mnt_has_parent(p)) {
1998     p->mnt_parent->mnt_ghosts++;
1999     diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
2000     index ba3019f..3e8edbe 100644
2001     --- a/fs/nfs/idmap.c
2002     +++ b/fs/nfs/idmap.c
2003     @@ -640,20 +640,16 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
2004     struct idmap_msg *im;
2005     struct idmap *idmap = (struct idmap *)aux;
2006     struct key *key = cons->key;
2007     - int ret;
2008     + int ret = -ENOMEM;
2009    
2010     /* msg and im are freed in idmap_pipe_destroy_msg */
2011     msg = kmalloc(sizeof(*msg), GFP_KERNEL);
2012     - if (IS_ERR(msg)) {
2013     - ret = PTR_ERR(msg);
2014     + if (!msg)
2015     goto out0;
2016     - }
2017    
2018     im = kmalloc(sizeof(*im), GFP_KERNEL);
2019     - if (IS_ERR(im)) {
2020     - ret = PTR_ERR(im);
2021     + if (!im)
2022     goto out1;
2023     - }
2024    
2025     ret = nfs_idmap_prepare_message(key->description, im, msg);
2026     if (ret < 0)
2027     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2028     index 99650aa..e9b1eb7 100644
2029     --- a/fs/nfs/nfs4proc.c
2030     +++ b/fs/nfs/nfs4proc.c
2031     @@ -101,6 +101,8 @@ static int nfs4_map_errors(int err)
2032     case -NFS4ERR_BADOWNER:
2033     case -NFS4ERR_BADNAME:
2034     return -EINVAL;
2035     + case -NFS4ERR_SHARE_DENIED:
2036     + return -EACCES;
2037     default:
2038     dprintk("%s could not handle NFSv4 error %d\n",
2039     __func__, -err);
2040     diff --git a/fs/proc/base.c b/fs/proc/base.c
2041     index 57b8159..9fc77b4 100644
2042     --- a/fs/proc/base.c
2043     +++ b/fs/proc/base.c
2044     @@ -1803,7 +1803,7 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
2045     rcu_read_lock();
2046     file = fcheck_files(files, fd);
2047     if (file) {
2048     - unsigned i_mode, f_mode = file->f_mode;
2049     + unsigned f_mode = file->f_mode;
2050    
2051     rcu_read_unlock();
2052     put_files_struct(files);
2053     @@ -1819,12 +1819,14 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
2054     inode->i_gid = 0;
2055     }
2056    
2057     - i_mode = S_IFLNK;
2058     - if (f_mode & FMODE_READ)
2059     - i_mode |= S_IRUSR | S_IXUSR;
2060     - if (f_mode & FMODE_WRITE)
2061     - i_mode |= S_IWUSR | S_IXUSR;
2062     - inode->i_mode = i_mode;
2063     + if (S_ISLNK(inode->i_mode)) {
2064     + unsigned i_mode = S_IFLNK;
2065     + if (f_mode & FMODE_READ)
2066     + i_mode |= S_IRUSR | S_IXUSR;
2067     + if (f_mode & FMODE_WRITE)
2068     + i_mode |= S_IWUSR | S_IXUSR;
2069     + inode->i_mode = i_mode;
2070     + }
2071    
2072     security_task_to_inode(task, inode);
2073     put_task_struct(task);
2074     @@ -1859,6 +1861,7 @@ static struct dentry *proc_fd_instantiate(struct inode *dir,
2075     ei = PROC_I(inode);
2076     ei->fd = fd;
2077    
2078     + inode->i_mode = S_IFLNK;
2079     inode->i_op = &proc_pid_link_inode_operations;
2080     inode->i_size = 64;
2081     ei->op.proc_get_link = proc_fd_link;
2082     diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
2083     index 1030a71..7faaf2a 100644
2084     --- a/fs/proc/task_mmu.c
2085     +++ b/fs/proc/task_mmu.c
2086     @@ -784,7 +784,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
2087    
2088     /* find the first VMA at or above 'addr' */
2089     vma = find_vma(walk->mm, addr);
2090     - if (pmd_trans_huge_lock(pmd, vma) == 1) {
2091     + if (vma && pmd_trans_huge_lock(pmd, vma) == 1) {
2092     for (; addr != end; addr += PAGE_SIZE) {
2093     unsigned long offset;
2094    
2095     diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
2096     index 58d0bda..81368ab 100644
2097     --- a/include/drm/drm_pciids.h
2098     +++ b/include/drm/drm_pciids.h
2099     @@ -181,6 +181,7 @@
2100     {0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
2101     {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
2102     {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
2103     + {0x1002, 0x674A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
2104     {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
2105     {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
2106     {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
2107     @@ -198,6 +199,7 @@
2108     {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
2109     {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
2110     {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
2111     + {0x1002, 0x6771, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
2112     {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
2113     {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
2114     {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
2115     @@ -229,10 +231,11 @@
2116     {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2117     {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
2118     {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
2119     + {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2120     {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2121     {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2122     - {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
2123     - {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
2124     + {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2125     + {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2126     {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
2127     {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
2128     {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
2129     @@ -531,6 +534,7 @@
2130     {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2131     {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
2132     {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
2133     + {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
2134     {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2135     {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2136     {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2137     @@ -550,6 +554,7 @@
2138     {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2139     {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2140     {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2141     + {0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2142     {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2143     {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2144     {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2145     @@ -561,11 +566,19 @@
2146     {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2147     {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2148     {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2149     + {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2150     + {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2151     + {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2152     + {0x1002, 0x9918, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2153     + {0x1002, 0x9919, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2154     {0x1002, 0x9990, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2155     {0x1002, 0x9991, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2156     {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2157     {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2158     {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2159     + {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2160     + {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2161     + {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2162     {0, 0, 0}
2163    
2164     #define r128_PCI_IDS \
2165     diff --git a/include/linux/Kbuild b/include/linux/Kbuild
2166     index 3c9b616..50f55c7 100644
2167     --- a/include/linux/Kbuild
2168     +++ b/include/linux/Kbuild
2169     @@ -227,6 +227,7 @@ header-y += kd.h
2170     header-y += kdev_t.h
2171     header-y += kernel.h
2172     header-y += kernelcapi.h
2173     +header-y += kernel-page-flags.h
2174     header-y += keyboard.h
2175     header-y += keyctl.h
2176     header-y += l2tp.h
2177     diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
2178     index 26a6571..a1bdf69 100644
2179     --- a/include/linux/kernel-page-flags.h
2180     +++ b/include/linux/kernel-page-flags.h
2181     @@ -32,6 +32,8 @@
2182     #define KPF_KSM 21
2183     #define KPF_THP 22
2184    
2185     +#ifdef __KERNEL__
2186     +
2187     /* kernel hacking assistances
2188     * WARNING: subject to change, never rely on them!
2189     */
2190     @@ -44,4 +46,6 @@
2191     #define KPF_ARCH 38
2192     #define KPF_UNCACHED 39
2193    
2194     +#endif /* __KERNEL__ */
2195     +
2196     #endif /* LINUX_KERNEL_PAGE_FLAGS_H */
2197     diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
2198     index 0d04cd6..ffc444c 100644
2199     --- a/include/linux/radix-tree.h
2200     +++ b/include/linux/radix-tree.h
2201     @@ -368,8 +368,11 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
2202     iter->index++;
2203     if (likely(*slot))
2204     return slot;
2205     - if (flags & RADIX_TREE_ITER_CONTIG)
2206     + if (flags & RADIX_TREE_ITER_CONTIG) {
2207     + /* forbid switching to the next chunk */
2208     + iter->next_index = 0;
2209     break;
2210     + }
2211     }
2212     }
2213     return NULL;
2214     diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
2215     index 111f26b..c168907 100644
2216     --- a/include/linux/skbuff.h
2217     +++ b/include/linux/skbuff.h
2218     @@ -1881,8 +1881,6 @@ static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
2219     {
2220     int delta = 0;
2221    
2222     - if (headroom < NET_SKB_PAD)
2223     - headroom = NET_SKB_PAD;
2224     if (headroom > skb_headroom(skb))
2225     delta = headroom - skb_headroom(skb);
2226    
2227     diff --git a/include/net/dst.h b/include/net/dst.h
2228     index bed833d..8197ead 100644
2229     --- a/include/net/dst.h
2230     +++ b/include/net/dst.h
2231     @@ -60,6 +60,7 @@ struct dst_entry {
2232     #define DST_NOCOUNT 0x0020
2233     #define DST_NOPEER 0x0040
2234     #define DST_FAKE_RTABLE 0x0080
2235     +#define DST_XFRM_TUNNEL 0x0100
2236    
2237     short error;
2238     short obsolete;
2239     diff --git a/kernel/fork.c b/kernel/fork.c
2240     index 687a15d..8163333 100644
2241     --- a/kernel/fork.c
2242     +++ b/kernel/fork.c
2243     @@ -356,7 +356,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
2244     }
2245     charge = 0;
2246     if (mpnt->vm_flags & VM_ACCOUNT) {
2247     - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
2248     + unsigned long len;
2249     + len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
2250     if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
2251     goto fail_nomem;
2252     charge = len;
2253     diff --git a/lib/radix-tree.c b/lib/radix-tree.c
2254     index 86516f5..3ac50dc 100644
2255     --- a/lib/radix-tree.c
2256     +++ b/lib/radix-tree.c
2257     @@ -673,6 +673,9 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
2258     * during iterating; it can be zero only at the beginning.
2259     * And we cannot overflow iter->next_index in a single step,
2260     * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
2261     + *
2262     + * This condition also used by radix_tree_next_slot() to stop
2263     + * contiguous iterating, and forbid swithing to the next chunk.
2264     */
2265     index = iter->next_index;
2266     if (!index && iter->index)
2267     diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2268     index ae8f708..263e177 100644
2269     --- a/mm/hugetlb.c
2270     +++ b/mm/hugetlb.c
2271     @@ -2157,6 +2157,15 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2272     kref_get(&reservations->refs);
2273     }
2274    
2275     +static void resv_map_put(struct vm_area_struct *vma)
2276     +{
2277     + struct resv_map *reservations = vma_resv_map(vma);
2278     +
2279     + if (!reservations)
2280     + return;
2281     + kref_put(&reservations->refs, resv_map_release);
2282     +}
2283     +
2284     static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2285     {
2286     struct hstate *h = hstate_vma(vma);
2287     @@ -2173,7 +2182,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2288     reserve = (end - start) -
2289     region_count(&reservations->regions, start, end);
2290    
2291     - kref_put(&reservations->refs, resv_map_release);
2292     + resv_map_put(vma);
2293    
2294     if (reserve) {
2295     hugetlb_acct_memory(h, -reserve);
2296     @@ -2990,12 +2999,16 @@ int hugetlb_reserve_pages(struct inode *inode,
2297     set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
2298     }
2299    
2300     - if (chg < 0)
2301     - return chg;
2302     + if (chg < 0) {
2303     + ret = chg;
2304     + goto out_err;
2305     + }
2306    
2307     /* There must be enough pages in the subpool for the mapping */
2308     - if (hugepage_subpool_get_pages(spool, chg))
2309     - return -ENOSPC;
2310     + if (hugepage_subpool_get_pages(spool, chg)) {
2311     + ret = -ENOSPC;
2312     + goto out_err;
2313     + }
2314    
2315     /*
2316     * Check enough hugepages are available for the reservation.
2317     @@ -3004,7 +3017,7 @@ int hugetlb_reserve_pages(struct inode *inode,
2318     ret = hugetlb_acct_memory(h, chg);
2319     if (ret < 0) {
2320     hugepage_subpool_put_pages(spool, chg);
2321     - return ret;
2322     + goto out_err;
2323     }
2324    
2325     /*
2326     @@ -3021,6 +3034,10 @@ int hugetlb_reserve_pages(struct inode *inode,
2327     if (!vma || vma->vm_flags & VM_MAYSHARE)
2328     region_add(&inode->i_mapping->private_list, from, to);
2329     return 0;
2330     +out_err:
2331     + if (vma)
2332     + resv_map_put(vma);
2333     + return ret;
2334     }
2335    
2336     void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
2337     diff --git a/mm/slub.c b/mm/slub.c
2338     index 80848cd..71de9b5 100644
2339     --- a/mm/slub.c
2340     +++ b/mm/slub.c
2341     @@ -1514,15 +1514,19 @@ static inline void *acquire_slab(struct kmem_cache *s,
2342     freelist = page->freelist;
2343     counters = page->counters;
2344     new.counters = counters;
2345     - if (mode)
2346     + if (mode) {
2347     new.inuse = page->objects;
2348     + new.freelist = NULL;
2349     + } else {
2350     + new.freelist = freelist;
2351     + }
2352    
2353     VM_BUG_ON(new.frozen);
2354     new.frozen = 1;
2355    
2356     } while (!__cmpxchg_double_slab(s, page,
2357     freelist, counters,
2358     - NULL, new.counters,
2359     + new.freelist, new.counters,
2360     "lock and freeze"));
2361    
2362     remove_partial(n, page);
2363     @@ -1564,7 +1568,6 @@ static void *get_partial_node(struct kmem_cache *s,
2364     object = t;
2365     available = page->objects - page->inuse;
2366     } else {
2367     - page->freelist = t;
2368     available = put_cpu_partial(s, page, 0);
2369     stat(s, CPU_PARTIAL_NODE);
2370     }
2371     diff --git a/mm/vmalloc.c b/mm/vmalloc.c
2372     index 94dff88..1196c77 100644
2373     --- a/mm/vmalloc.c
2374     +++ b/mm/vmalloc.c
2375     @@ -1185,9 +1185,10 @@ void __init vmalloc_init(void)
2376     /* Import existing vmlist entries. */
2377     for (tmp = vmlist; tmp; tmp = tmp->next) {
2378     va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
2379     - va->flags = tmp->flags | VM_VM_AREA;
2380     + va->flags = VM_VM_AREA;
2381     va->va_start = (unsigned long)tmp->addr;
2382     va->va_end = va->va_start + tmp->size;
2383     + va->vm = tmp;
2384     __insert_vmap_area(va);
2385     }
2386    
2387     diff --git a/mm/vmscan.c b/mm/vmscan.c
2388     index 33dc256..0932dc2 100644
2389     --- a/mm/vmscan.c
2390     +++ b/mm/vmscan.c
2391     @@ -722,7 +722,7 @@ static enum page_references page_check_references(struct page *page,
2392     return PAGEREF_RECLAIM;
2393    
2394     if (referenced_ptes) {
2395     - if (PageAnon(page))
2396     + if (PageSwapBacked(page))
2397     return PAGEREF_ACTIVATE;
2398     /*
2399     * All mapped pages start out with page table
2400     diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
2401     index 89a47b3..cb982a6 100644
2402     --- a/net/ipv4/esp4.c
2403     +++ b/net/ipv4/esp4.c
2404     @@ -459,28 +459,22 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
2405     struct esp_data *esp = x->data;
2406     u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
2407     u32 align = max_t(u32, blksize, esp->padlen);
2408     - u32 rem;
2409     -
2410     - mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
2411     - rem = mtu & (align - 1);
2412     - mtu &= ~(align - 1);
2413     + unsigned int net_adj;
2414    
2415     switch (x->props.mode) {
2416     - case XFRM_MODE_TUNNEL:
2417     - break;
2418     - default:
2419     case XFRM_MODE_TRANSPORT:
2420     - /* The worst case */
2421     - mtu -= blksize - 4;
2422     - mtu += min_t(u32, blksize - 4, rem);
2423     - break;
2424     case XFRM_MODE_BEET:
2425     - /* The worst case. */
2426     - mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem);
2427     + net_adj = sizeof(struct iphdr);
2428     break;
2429     + case XFRM_MODE_TUNNEL:
2430     + net_adj = 0;
2431     + break;
2432     + default:
2433     + BUG();
2434     }
2435    
2436     - return mtu - 2;
2437     + return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
2438     + net_adj) & ~(align - 1)) + (net_adj - 2);
2439     }
2440    
2441     static void esp4_err(struct sk_buff *skb, u32 info)
2442     diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
2443     index 5063fa3..8861f91 100644
2444     --- a/net/ipv4/fib_semantics.c
2445     +++ b/net/ipv4/fib_semantics.c
2446     @@ -145,6 +145,12 @@ static void free_fib_info_rcu(struct rcu_head *head)
2447     {
2448     struct fib_info *fi = container_of(head, struct fib_info, rcu);
2449    
2450     + change_nexthops(fi) {
2451     + if (nexthop_nh->nh_dev)
2452     + dev_put(nexthop_nh->nh_dev);
2453     + } endfor_nexthops(fi);
2454     +
2455     + release_net(fi->fib_net);
2456     if (fi->fib_metrics != (u32 *) dst_default_metrics)
2457     kfree(fi->fib_metrics);
2458     kfree(fi);
2459     @@ -156,13 +162,7 @@ void free_fib_info(struct fib_info *fi)
2460     pr_warn("Freeing alive fib_info %p\n", fi);
2461     return;
2462     }
2463     - change_nexthops(fi) {
2464     - if (nexthop_nh->nh_dev)
2465     - dev_put(nexthop_nh->nh_dev);
2466     - nexthop_nh->nh_dev = NULL;
2467     - } endfor_nexthops(fi);
2468     fib_info_cnt--;
2469     - release_net(fi->fib_net);
2470     call_rcu(&fi->rcu, free_fib_info_rcu);
2471     }
2472    
2473     diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
2474     index 1ac7938..65dd543 100644
2475     --- a/net/ipv6/esp6.c
2476     +++ b/net/ipv6/esp6.c
2477     @@ -411,19 +411,15 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
2478     struct esp_data *esp = x->data;
2479     u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
2480     u32 align = max_t(u32, blksize, esp->padlen);
2481     - u32 rem;
2482     + unsigned int net_adj;
2483    
2484     - mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
2485     - rem = mtu & (align - 1);
2486     - mtu &= ~(align - 1);
2487     -
2488     - if (x->props.mode != XFRM_MODE_TUNNEL) {
2489     - u32 padsize = ((blksize - 1) & 7) + 1;
2490     - mtu -= blksize - padsize;
2491     - mtu += min_t(u32, blksize - padsize, rem);
2492     - }
2493     + if (x->props.mode != XFRM_MODE_TUNNEL)
2494     + net_adj = sizeof(struct ipv6hdr);
2495     + else
2496     + net_adj = 0;
2497    
2498     - return mtu - 2;
2499     + return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
2500     + net_adj) & ~(align - 1)) + (net_adj - 2);
2501     }
2502    
2503     static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
2504     diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
2505     index b7ca461..13e5399 100644
2506     --- a/net/ipv6/ip6_output.c
2507     +++ b/net/ipv6/ip6_output.c
2508     @@ -1181,6 +1181,29 @@ static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
2509     return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
2510     }
2511    
2512     +static void ip6_append_data_mtu(int *mtu,
2513     + int *maxfraglen,
2514     + unsigned int fragheaderlen,
2515     + struct sk_buff *skb,
2516     + struct rt6_info *rt)
2517     +{
2518     + if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
2519     + if (skb == NULL) {
2520     + /* first fragment, reserve header_len */
2521     + *mtu = *mtu - rt->dst.header_len;
2522     +
2523     + } else {
2524     + /*
2525     + * this fragment is not first, the headers
2526     + * space is regarded as data space.
2527     + */
2528     + *mtu = dst_mtu(rt->dst.path);
2529     + }
2530     + *maxfraglen = ((*mtu - fragheaderlen) & ~7)
2531     + + fragheaderlen - sizeof(struct frag_hdr);
2532     + }
2533     +}
2534     +
2535     int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
2536     int offset, int len, int odd, struct sk_buff *skb),
2537     void *from, int length, int transhdrlen,
2538     @@ -1190,7 +1213,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
2539     struct inet_sock *inet = inet_sk(sk);
2540     struct ipv6_pinfo *np = inet6_sk(sk);
2541     struct inet_cork *cork;
2542     - struct sk_buff *skb;
2543     + struct sk_buff *skb, *skb_prev = NULL;
2544     unsigned int maxfraglen, fragheaderlen;
2545     int exthdrlen;
2546     int dst_exthdrlen;
2547     @@ -1248,8 +1271,12 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
2548     inet->cork.fl.u.ip6 = *fl6;
2549     np->cork.hop_limit = hlimit;
2550     np->cork.tclass = tclass;
2551     - mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
2552     - rt->dst.dev->mtu : dst_mtu(&rt->dst);
2553     + if (rt->dst.flags & DST_XFRM_TUNNEL)
2554     + mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
2555     + rt->dst.dev->mtu : dst_mtu(&rt->dst);
2556     + else
2557     + mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
2558     + rt->dst.dev->mtu : dst_mtu(rt->dst.path);
2559     if (np->frag_size < mtu) {
2560     if (np->frag_size)
2561     mtu = np->frag_size;
2562     @@ -1345,25 +1372,27 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
2563     unsigned int fraglen;
2564     unsigned int fraggap;
2565     unsigned int alloclen;
2566     - struct sk_buff *skb_prev;
2567     alloc_new_skb:
2568     - skb_prev = skb;
2569     -
2570     /* There's no room in the current skb */
2571     - if (skb_prev)
2572     - fraggap = skb_prev->len - maxfraglen;
2573     + if (skb)
2574     + fraggap = skb->len - maxfraglen;
2575     else
2576     fraggap = 0;
2577     + /* update mtu and maxfraglen if necessary */
2578     + if (skb == NULL || skb_prev == NULL)
2579     + ip6_append_data_mtu(&mtu, &maxfraglen,
2580     + fragheaderlen, skb, rt);
2581     +
2582     + skb_prev = skb;
2583    
2584     /*
2585     * If remaining data exceeds the mtu,
2586     * we know we need more fragment(s).
2587     */
2588     datalen = length + fraggap;
2589     - if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
2590     - datalen = maxfraglen - fragheaderlen;
2591    
2592     - fraglen = datalen + fragheaderlen;
2593     + if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
2594     + datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
2595     if ((flags & MSG_MORE) &&
2596     !(rt->dst.dev->features&NETIF_F_SG))
2597     alloclen = mtu;
2598     @@ -1372,13 +1401,16 @@ alloc_new_skb:
2599    
2600     alloclen += dst_exthdrlen;
2601    
2602     - /*
2603     - * The last fragment gets additional space at tail.
2604     - * Note: we overallocate on fragments with MSG_MODE
2605     - * because we have no idea if we're the last one.
2606     - */
2607     - if (datalen == length + fraggap)
2608     - alloclen += rt->dst.trailer_len;
2609     + if (datalen != length + fraggap) {
2610     + /*
2611     + * this is not the last fragment, the trailer
2612     + * space is regarded as data space.
2613     + */
2614     + datalen += rt->dst.trailer_len;
2615     + }
2616     +
2617     + alloclen += rt->dst.trailer_len;
2618     + fraglen = datalen + fragheaderlen;
2619    
2620     /*
2621     * We just reserve space for fragment header.
2622     diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
2623     index 6274f0b..cc8ad7b 100644
2624     --- a/net/l2tp/l2tp_ip.c
2625     +++ b/net/l2tp/l2tp_ip.c
2626     @@ -251,9 +251,16 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
2627     {
2628     struct inet_sock *inet = inet_sk(sk);
2629     struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
2630     - int ret = -EINVAL;
2631     + int ret;
2632     int chk_addr_ret;
2633    
2634     + if (!sock_flag(sk, SOCK_ZAPPED))
2635     + return -EINVAL;
2636     + if (addr_len < sizeof(struct sockaddr_l2tpip))
2637     + return -EINVAL;
2638     + if (addr->l2tp_family != AF_INET)
2639     + return -EINVAL;
2640     +
2641     ret = -EADDRINUSE;
2642     read_lock_bh(&l2tp_ip_lock);
2643     if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
2644     @@ -284,6 +291,8 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
2645     sk_del_node_init(sk);
2646     write_unlock_bh(&l2tp_ip_lock);
2647     ret = 0;
2648     + sock_reset_flag(sk, SOCK_ZAPPED);
2649     +
2650     out:
2651     release_sock(sk);
2652    
2653     @@ -304,13 +313,14 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
2654     __be32 saddr;
2655     int oif, rc;
2656    
2657     - rc = -EINVAL;
2658     + if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
2659     + return -EINVAL;
2660     +
2661     if (addr_len < sizeof(*lsa))
2662     - goto out;
2663     + return -EINVAL;
2664    
2665     - rc = -EAFNOSUPPORT;
2666     if (lsa->l2tp_family != AF_INET)
2667     - goto out;
2668     + return -EAFNOSUPPORT;
2669    
2670     lock_sock(sk);
2671    
2672     @@ -364,6 +374,14 @@ out:
2673     return rc;
2674     }
2675    
2676     +static int l2tp_ip_disconnect(struct sock *sk, int flags)
2677     +{
2678     + if (sock_flag(sk, SOCK_ZAPPED))
2679     + return 0;
2680     +
2681     + return udp_disconnect(sk, flags);
2682     +}
2683     +
2684     static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
2685     int *uaddr_len, int peer)
2686     {
2687     @@ -599,7 +617,7 @@ static struct proto l2tp_ip_prot = {
2688     .close = l2tp_ip_close,
2689     .bind = l2tp_ip_bind,
2690     .connect = l2tp_ip_connect,
2691     - .disconnect = udp_disconnect,
2692     + .disconnect = l2tp_ip_disconnect,
2693     .ioctl = udp_ioctl,
2694     .destroy = l2tp_ip_destroy_sock,
2695     .setsockopt = ip_setsockopt,
2696     diff --git a/net/mac80211/util.c b/net/mac80211/util.c
2697     index 32f7a3b..3862c96 100644
2698     --- a/net/mac80211/util.c
2699     +++ b/net/mac80211/util.c
2700     @@ -1321,6 +1321,12 @@ int ieee80211_reconfig(struct ieee80211_local *local)
2701     }
2702     }
2703    
2704     + /* add back keys */
2705     + list_for_each_entry(sdata, &local->interfaces, list)
2706     + if (ieee80211_sdata_running(sdata))
2707     + ieee80211_enable_keys(sdata);
2708     +
2709     + wake_up:
2710     /*
2711     * Clear the WLAN_STA_BLOCK_BA flag so new aggregation
2712     * sessions can be established after a resume.
2713     @@ -1342,12 +1348,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
2714     mutex_unlock(&local->sta_mtx);
2715     }
2716    
2717     - /* add back keys */
2718     - list_for_each_entry(sdata, &local->interfaces, list)
2719     - if (ieee80211_sdata_running(sdata))
2720     - ieee80211_enable_keys(sdata);
2721     -
2722     - wake_up:
2723     ieee80211_wake_queues_by_reason(hw,
2724     IEEE80211_QUEUE_STOP_REASON_SUSPEND);
2725    
2726     diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
2727     index adf2990..25302c8 100644
2728     --- a/net/sunrpc/clnt.c
2729     +++ b/net/sunrpc/clnt.c
2730     @@ -1288,6 +1288,8 @@ call_reserveresult(struct rpc_task *task)
2731     }
2732    
2733     switch (status) {
2734     + case -ENOMEM:
2735     + rpc_delay(task, HZ >> 2);
2736     case -EAGAIN: /* woken up; retry */
2737     task->tk_action = call_reserve;
2738     return;
2739     diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
2740     index 0cbcd1a..da72492 100644
2741     --- a/net/sunrpc/xprt.c
2742     +++ b/net/sunrpc/xprt.c
2743     @@ -984,15 +984,16 @@ static void xprt_alloc_slot(struct rpc_task *task)
2744     goto out_init_req;
2745     switch (PTR_ERR(req)) {
2746     case -ENOMEM:
2747     - rpc_delay(task, HZ >> 2);
2748     dprintk("RPC: dynamic allocation of request slot "
2749     "failed! Retrying\n");
2750     + task->tk_status = -ENOMEM;
2751     break;
2752     case -EAGAIN:
2753     rpc_sleep_on(&xprt->backlog, task, NULL);
2754     dprintk("RPC: waiting for request slot\n");
2755     + default:
2756     + task->tk_status = -EAGAIN;
2757     }
2758     - task->tk_status = -EAGAIN;
2759     return;
2760     out_init_req:
2761     task->tk_status = 0;
2762     diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
2763     index 7661576..a15d2a0 100644
2764     --- a/net/xfrm/xfrm_policy.c
2765     +++ b/net/xfrm/xfrm_policy.c
2766     @@ -1919,6 +1919,9 @@ no_transform:
2767     }
2768     ok:
2769     xfrm_pols_put(pols, drop_pols);
2770     + if (dst && dst->xfrm &&
2771     + dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
2772     + dst->flags |= DST_XFRM_TUNNEL;
2773     return dst;
2774    
2775     nopol:
2776     diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
2777     index 0eed611..67a4d6d 100644
2778     --- a/sound/usb/pcm.c
2779     +++ b/sound/usb/pcm.c
2780     @@ -699,6 +699,9 @@ static int snd_usb_pcm_check_knot(struct snd_pcm_runtime *runtime,
2781     int count = 0, needs_knot = 0;
2782     int err;
2783    
2784     + kfree(subs->rate_list.list);
2785     + subs->rate_list.list = NULL;
2786     +
2787     list_for_each_entry(fp, &subs->fmt_list, list) {
2788     if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS)
2789     return 0;
2790     diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
2791     index 7dab7b25..f77c96b 100644
2792     --- a/tools/vm/page-types.c
2793     +++ b/tools/vm/page-types.c
2794     @@ -35,6 +35,7 @@
2795     #include <sys/mount.h>
2796     #include <sys/statfs.h>
2797     #include "../../include/linux/magic.h"
2798     +#include "../../include/linux/kernel-page-flags.h"
2799    
2800    
2801     #ifndef MAX_PATH
2802     @@ -73,33 +74,6 @@
2803     #define KPF_BYTES 8
2804     #define PROC_KPAGEFLAGS "/proc/kpageflags"
2805    
2806     -/* copied from kpageflags_read() */
2807     -#define KPF_LOCKED 0
2808     -#define KPF_ERROR 1
2809     -#define KPF_REFERENCED 2
2810     -#define KPF_UPTODATE 3
2811     -#define KPF_DIRTY 4
2812     -#define KPF_LRU 5
2813     -#define KPF_ACTIVE 6
2814     -#define KPF_SLAB 7
2815     -#define KPF_WRITEBACK 8
2816     -#define KPF_RECLAIM 9
2817     -#define KPF_BUDDY 10
2818     -
2819     -/* [11-20] new additions in 2.6.31 */
2820     -#define KPF_MMAP 11
2821     -#define KPF_ANON 12
2822     -#define KPF_SWAPCACHE 13
2823     -#define KPF_SWAPBACKED 14
2824     -#define KPF_COMPOUND_HEAD 15
2825     -#define KPF_COMPOUND_TAIL 16
2826     -#define KPF_HUGE 17
2827     -#define KPF_UNEVICTABLE 18
2828     -#define KPF_HWPOISON 19
2829     -#define KPF_NOPAGE 20
2830     -#define KPF_KSM 21
2831     -#define KPF_THP 22
2832     -
2833     /* [32-] kernel hacking assistances */
2834     #define KPF_RESERVED 32
2835     #define KPF_MLOCKED 33