Magellan Linux

Annotation of /trunk/kernel-lts/patches-3.4/0122-3.4.23-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2018 - (hide annotations) (download)
Tue Jan 8 15:43:26 2013 UTC (11 years, 4 months ago) by niro
File size: 36924 byte(s)
-linux-3.4.23
1 niro 2018 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
2     index e14ae11..7fe19a3 100644
3     --- a/arch/arm/Kconfig
4     +++ b/arch/arm/Kconfig
5     @@ -579,6 +579,7 @@ config ARCH_KIRKWOOD
6     bool "Marvell Kirkwood"
7     select CPU_FEROCEON
8     select PCI
9     + select PCI_QUIRKS
10     select ARCH_REQUIRE_GPIOLIB
11     select GENERIC_CLOCKEVENTS
12     select NEED_MACH_IO_H
13     diff --git a/arch/arm/mach-dove/include/mach/pm.h b/arch/arm/mach-dove/include/mach/pm.h
14     index 3ad9f94..11799c3 100644
15     --- a/arch/arm/mach-dove/include/mach/pm.h
16     +++ b/arch/arm/mach-dove/include/mach/pm.h
17     @@ -45,7 +45,7 @@ static inline int pmu_to_irq(int pin)
18    
19     static inline int irq_to_pmu(int irq)
20     {
21     - if (IRQ_DOVE_PMU_START < irq && irq < NR_IRQS)
22     + if (IRQ_DOVE_PMU_START <= irq && irq < NR_IRQS)
23     return irq - IRQ_DOVE_PMU_START;
24    
25     return -EINVAL;
26     diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c
27     index f07fd16..9f2fd10 100644
28     --- a/arch/arm/mach-dove/irq.c
29     +++ b/arch/arm/mach-dove/irq.c
30     @@ -61,8 +61,20 @@ static void pmu_irq_ack(struct irq_data *d)
31     int pin = irq_to_pmu(d->irq);
32     u32 u;
33    
34     + /*
35     + * The PMU mask register is not RW0C: it is RW. This means that
36     + * the bits take whatever value is written to them; if you write
37     + * a '1', you will set the interrupt.
38     + *
39     + * Unfortunately this means there is NO race free way to clear
40     + * these interrupts.
41     + *
42     + * So, let's structure the code so that the window is as small as
43     + * possible.
44     + */
45     u = ~(1 << (pin & 31));
46     - writel(u, PMU_INTERRUPT_CAUSE);
47     + u &= readl_relaxed(PMU_INTERRUPT_CAUSE);
48     + writel_relaxed(u, PMU_INTERRUPT_CAUSE);
49     }
50    
51     static struct irq_chip pmu_irq_chip = {
52     diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c
53     index f56a011..c46d20e 100644
54     --- a/arch/arm/mach-kirkwood/pcie.c
55     +++ b/arch/arm/mach-kirkwood/pcie.c
56     @@ -212,14 +212,19 @@ static int __init kirkwood_pcie_setup(int nr, struct pci_sys_data *sys)
57     return 1;
58     }
59    
60     +/*
61     + * The root complex has a hardwired class of PCI_CLASS_MEMORY_OTHER, when it
62     + * is operating as a root complex this needs to be switched to
63     + * PCI_CLASS_BRIDGE_HOST or Linux will errantly try to process the BAR's on
64     + * the device. Decoding setup is handled by the orion code.
65     + */
66     static void __devinit rc_pci_fixup(struct pci_dev *dev)
67     {
68     - /*
69     - * Prevent enumeration of root complex.
70     - */
71     if (dev->bus->parent == NULL && dev->devfn == 0) {
72     int i;
73    
74     + dev->class &= 0xff;
75     + dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
76     for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
77     dev->resource[i].start = 0;
78     dev->resource[i].end = 0;
79     diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
80     index 4fa8815..92e05b6 100644
81     --- a/arch/x86/include/asm/fpu-internal.h
82     +++ b/arch/x86/include/asm/fpu-internal.h
83     @@ -334,14 +334,17 @@ static inline void __thread_fpu_begin(struct task_struct *tsk)
84     typedef struct { int preload; } fpu_switch_t;
85    
86     /*
87     - * FIXME! We could do a totally lazy restore, but we need to
88     - * add a per-cpu "this was the task that last touched the FPU
89     - * on this CPU" variable, and the task needs to have a "I last
90     - * touched the FPU on this CPU" and check them.
91     + * Must be run with preemption disabled: this clears the fpu_owner_task,
92     + * on this CPU.
93     *
94     - * We don't do that yet, so "fpu_lazy_restore()" always returns
95     - * false, but some day..
96     + * This will disable any lazy FPU state restore of the current FPU state,
97     + * but if the current thread owns the FPU, it will still be saved by.
98     */
99     +static inline void __cpu_disable_lazy_restore(unsigned int cpu)
100     +{
101     + per_cpu(fpu_owner_task, cpu) = NULL;
102     +}
103     +
104     static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
105     {
106     return new == percpu_read_stable(fpu_owner_task) &&
107     diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
108     index 6e1e406..849cdcf 100644
109     --- a/arch/x86/kernel/smpboot.c
110     +++ b/arch/x86/kernel/smpboot.c
111     @@ -66,6 +66,8 @@
112     #include <asm/mwait.h>
113     #include <asm/apic.h>
114     #include <asm/io_apic.h>
115     +#include <asm/i387.h>
116     +#include <asm/fpu-internal.h>
117     #include <asm/setup.h>
118     #include <asm/uv/uv.h>
119     #include <linux/mc146818rtc.h>
120     @@ -851,6 +853,9 @@ int __cpuinit native_cpu_up(unsigned int cpu)
121    
122     per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
123    
124     + /* the FPU context is blank, nobody can own it */
125     + __cpu_disable_lazy_restore(cpu);
126     +
127     err = do_boot_cpu(apicid, cpu);
128     if (err) {
129     pr_debug("do_boot_cpu failed %d\n", err);
130     diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
131     index bbac51e..4a2c131 100644
132     --- a/drivers/acpi/processor_driver.c
133     +++ b/drivers/acpi/processor_driver.c
134     @@ -407,6 +407,7 @@ static void acpi_processor_notify(struct acpi_device *device, u32 event)
135     acpi_bus_generate_proc_event(device, event, 0);
136     acpi_bus_generate_netlink_event(device->pnp.device_class,
137     dev_name(&device->dev), event, 0);
138     + break;
139     default:
140     ACPI_DEBUG_PRINT((ACPI_DB_INFO,
141     "Unsupported event [0x%x]\n", event));
142     diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
143     index 3bafa3b..f4059e9 100644
144     --- a/drivers/edac/i7300_edac.c
145     +++ b/drivers/edac/i7300_edac.c
146     @@ -215,8 +215,8 @@ static const char *ferr_fat_fbd_name[] = {
147     [0] = "Memory Write error on non-redundant retry or "
148     "FBD configuration Write error on retry",
149     };
150     -#define GET_FBD_FAT_IDX(fbderr) (fbderr & (3 << 28))
151     -#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 3))
152     +#define GET_FBD_FAT_IDX(fbderr) (((fbderr) >> 28) & 3)
153     +#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22))
154    
155     #define FERR_NF_FBD 0xa0
156     static const char *ferr_nf_fbd_name[] = {
157     @@ -243,7 +243,7 @@ static const char *ferr_nf_fbd_name[] = {
158     [1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
159     [0] = "Uncorrectable Data ECC on Replay",
160     };
161     -#define GET_FBD_NF_IDX(fbderr) (fbderr & (3 << 28))
162     +#define GET_FBD_NF_IDX(fbderr) (((fbderr) >> 28) & 3)
163     #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\
164     (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\
165     (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\
166     @@ -485,7 +485,7 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
167     errnum = find_first_bit(&errors,
168     ARRAY_SIZE(ferr_nf_fbd_name));
169     specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum);
170     - branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0;
171     + branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0;
172    
173     pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
174     REDMEMA, &syndrome);
175     diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
176     index 4ff7d5f..802fec2 100644
177     --- a/drivers/gpu/drm/i915/intel_lvds.c
178     +++ b/drivers/gpu/drm/i915/intel_lvds.c
179     @@ -785,6 +785,22 @@ static const struct dmi_system_id intel_no_lvds[] = {
180     DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
181     },
182     },
183     + {
184     + .callback = intel_no_lvds_dmi_callback,
185     + .ident = "Gigabyte GA-D525TUD",
186     + .matches = {
187     + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
188     + DMI_MATCH(DMI_BOARD_NAME, "D525TUD"),
189     + },
190     + },
191     + {
192     + .callback = intel_no_lvds_dmi_callback,
193     + .ident = "Supermicro X7SPA-H",
194     + .matches = {
195     + DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
196     + DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
197     + },
198     + },
199    
200     { } /* terminating entry */
201     };
202     diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
203     index e5328da..4a1d8f3 100644
204     --- a/drivers/gpu/drm/radeon/evergreen.c
205     +++ b/drivers/gpu/drm/radeon/evergreen.c
206     @@ -37,6 +37,16 @@
207     #define EVERGREEN_PFP_UCODE_SIZE 1120
208     #define EVERGREEN_PM4_UCODE_SIZE 1376
209    
210     +static const u32 crtc_offsets[6] =
211     +{
212     + EVERGREEN_CRTC0_REGISTER_OFFSET,
213     + EVERGREEN_CRTC1_REGISTER_OFFSET,
214     + EVERGREEN_CRTC2_REGISTER_OFFSET,
215     + EVERGREEN_CRTC3_REGISTER_OFFSET,
216     + EVERGREEN_CRTC4_REGISTER_OFFSET,
217     + EVERGREEN_CRTC5_REGISTER_OFFSET
218     +};
219     +
220     static void evergreen_gpu_init(struct radeon_device *rdev);
221     void evergreen_fini(struct radeon_device *rdev);
222     void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
223     @@ -101,17 +111,19 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
224    
225     void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
226     {
227     - struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
228     int i;
229    
230     - if (RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_MASTER_EN) {
231     + if (crtc >= rdev->num_crtc)
232     + return;
233     +
234     + if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
235     for (i = 0; i < rdev->usec_timeout; i++) {
236     - if (!(RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK))
237     + if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
238     break;
239     udelay(1);
240     }
241     for (i = 0; i < rdev->usec_timeout; i++) {
242     - if (RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK)
243     + if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
244     break;
245     udelay(1);
246     }
247     @@ -1117,116 +1129,105 @@ void evergreen_agp_enable(struct radeon_device *rdev)
248    
249     void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
250     {
251     + u32 crtc_enabled, tmp, frame_count, blackout;
252     + int i, j;
253     +
254     save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
255     save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
256    
257     - /* Stop all video */
258     + /* disable VGA render */
259     WREG32(VGA_RENDER_CONTROL, 0);
260     - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
261     - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
262     - if (rdev->num_crtc >= 4) {
263     - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
264     - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
265     - }
266     - if (rdev->num_crtc >= 6) {
267     - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
268     - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
269     - }
270     - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
271     - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
272     - if (rdev->num_crtc >= 4) {
273     - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
274     - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
275     - }
276     - if (rdev->num_crtc >= 6) {
277     - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
278     - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
279     - }
280     - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
281     - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
282     - if (rdev->num_crtc >= 4) {
283     - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
284     - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
285     - }
286     - if (rdev->num_crtc >= 6) {
287     - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
288     - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
289     + /* blank the display controllers */
290     + for (i = 0; i < rdev->num_crtc; i++) {
291     + crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
292     + if (crtc_enabled) {
293     + save->crtc_enabled[i] = true;
294     + if (ASIC_IS_DCE6(rdev)) {
295     + tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
296     + if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
297     + radeon_wait_for_vblank(rdev, i);
298     + tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
299     + WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
300     + }
301     + } else {
302     + tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
303     + if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
304     + radeon_wait_for_vblank(rdev, i);
305     + tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
306     + WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
307     + }
308     + }
309     + /* wait for the next frame */
310     + frame_count = radeon_get_vblank_counter(rdev, i);
311     + for (j = 0; j < rdev->usec_timeout; j++) {
312     + if (radeon_get_vblank_counter(rdev, i) != frame_count)
313     + break;
314     + udelay(1);
315     + }
316     + } else {
317     + save->crtc_enabled[i] = false;
318     + }
319     }
320    
321     - WREG32(D1VGA_CONTROL, 0);
322     - WREG32(D2VGA_CONTROL, 0);
323     - if (rdev->num_crtc >= 4) {
324     - WREG32(EVERGREEN_D3VGA_CONTROL, 0);
325     - WREG32(EVERGREEN_D4VGA_CONTROL, 0);
326     - }
327     - if (rdev->num_crtc >= 6) {
328     - WREG32(EVERGREEN_D5VGA_CONTROL, 0);
329     - WREG32(EVERGREEN_D6VGA_CONTROL, 0);
330     + radeon_mc_wait_for_idle(rdev);
331     +
332     + blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
333     + if ((blackout & BLACKOUT_MODE_MASK) != 1) {
334     + /* Block CPU access */
335     + WREG32(BIF_FB_EN, 0);
336     + /* blackout the MC */
337     + blackout &= ~BLACKOUT_MODE_MASK;
338     + WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
339     }
340     }
341    
342     void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
343     {
344     - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
345     - upper_32_bits(rdev->mc.vram_start));
346     - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
347     - upper_32_bits(rdev->mc.vram_start));
348     - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
349     - (u32)rdev->mc.vram_start);
350     - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
351     - (u32)rdev->mc.vram_start);
352     -
353     - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
354     - upper_32_bits(rdev->mc.vram_start));
355     - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
356     - upper_32_bits(rdev->mc.vram_start));
357     - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
358     - (u32)rdev->mc.vram_start);
359     - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
360     - (u32)rdev->mc.vram_start);
361     -
362     - if (rdev->num_crtc >= 4) {
363     - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
364     - upper_32_bits(rdev->mc.vram_start));
365     - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
366     - upper_32_bits(rdev->mc.vram_start));
367     - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
368     - (u32)rdev->mc.vram_start);
369     - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
370     - (u32)rdev->mc.vram_start);
371     -
372     - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
373     - upper_32_bits(rdev->mc.vram_start));
374     - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
375     - upper_32_bits(rdev->mc.vram_start));
376     - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
377     - (u32)rdev->mc.vram_start);
378     - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
379     - (u32)rdev->mc.vram_start);
380     - }
381     - if (rdev->num_crtc >= 6) {
382     - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
383     - upper_32_bits(rdev->mc.vram_start));
384     - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
385     - upper_32_bits(rdev->mc.vram_start));
386     - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
387     - (u32)rdev->mc.vram_start);
388     - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
389     - (u32)rdev->mc.vram_start);
390     + u32 tmp, frame_count;
391     + int i, j;
392    
393     - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
394     + /* update crtc base addresses */
395     + for (i = 0; i < rdev->num_crtc; i++) {
396     + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
397     upper_32_bits(rdev->mc.vram_start));
398     - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
399     + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
400     upper_32_bits(rdev->mc.vram_start));
401     - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
402     + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
403     (u32)rdev->mc.vram_start);
404     - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
405     + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
406     (u32)rdev->mc.vram_start);
407     }
408     -
409     WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
410     WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
411     - /* Unlock host access */
412     +
413     + /* unblackout the MC */
414     + tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
415     + tmp &= ~BLACKOUT_MODE_MASK;
416     + WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
417     + /* allow CPU access */
418     + WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
419     +
420     + for (i = 0; i < rdev->num_crtc; i++) {
421     + if (save->crtc_enabled) {
422     + if (ASIC_IS_DCE6(rdev)) {
423     + tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
424     + tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
425     + WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
426     + } else {
427     + tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
428     + tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
429     + WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
430     + }
431     + /* wait for the next frame */
432     + frame_count = radeon_get_vblank_counter(rdev, i);
433     + for (j = 0; j < rdev->usec_timeout; j++) {
434     + if (radeon_get_vblank_counter(rdev, i) != frame_count)
435     + break;
436     + udelay(1);
437     + }
438     + }
439     + }
440     + /* Unlock vga access */
441     WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
442     mdelay(1);
443     WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
444     diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
445     index 96c10b3..34a0e85 100644
446     --- a/drivers/gpu/drm/radeon/evergreen_reg.h
447     +++ b/drivers/gpu/drm/radeon/evergreen_reg.h
448     @@ -218,6 +218,8 @@
449     #define EVERGREEN_CRTC_CONTROL 0x6e70
450     # define EVERGREEN_CRTC_MASTER_EN (1 << 0)
451     # define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
452     +#define EVERGREEN_CRTC_BLANK_CONTROL 0x6e74
453     +# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
454     #define EVERGREEN_CRTC_STATUS 0x6e8c
455     # define EVERGREEN_CRTC_V_BLANK (1 << 0)
456     #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
457     diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
458     index 2eaaea0..81e744f 100644
459     --- a/drivers/gpu/drm/radeon/evergreend.h
460     +++ b/drivers/gpu/drm/radeon/evergreend.h
461     @@ -77,6 +77,10 @@
462    
463     #define CONFIG_MEMSIZE 0x5428
464    
465     +#define BIF_FB_EN 0x5490
466     +#define FB_READ_EN (1 << 0)
467     +#define FB_WRITE_EN (1 << 1)
468     +
469     #define CP_STRMOUT_CNTL 0x84FC
470    
471     #define CP_COHER_CNTL 0x85F0
472     @@ -200,6 +204,9 @@
473     #define NOOFCHAN_MASK 0x00003000
474     #define MC_SHARED_CHREMAP 0x2008
475    
476     +#define MC_SHARED_BLACKOUT_CNTL 0x20ac
477     +#define BLACKOUT_MODE_MASK 0x00000007
478     +
479     #define MC_ARB_RAMCFG 0x2760
480     #define NOOFBANK_SHIFT 0
481     #define NOOFBANK_MASK 0x00000003
482     diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
483     index 665df87..917e49c 100644
484     --- a/drivers/gpu/drm/radeon/radeon_asic.h
485     +++ b/drivers/gpu/drm/radeon/radeon_asic.h
486     @@ -400,6 +400,7 @@ void r700_cp_fini(struct radeon_device *rdev);
487     struct evergreen_mc_save {
488     u32 vga_render_control;
489     u32 vga_hdp_control;
490     + bool crtc_enabled[RADEON_MAX_CRTCS];
491     };
492    
493     void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
494     diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
495     index a2b5304..6137d00 100644
496     --- a/drivers/md/raid10.c
497     +++ b/drivers/md/raid10.c
498     @@ -1182,18 +1182,21 @@ retry_write:
499     blocked_rdev = rrdev;
500     break;
501     }
502     + if (rdev && (test_bit(Faulty, &rdev->flags)
503     + || test_bit(Unmerged, &rdev->flags)))
504     + rdev = NULL;
505     if (rrdev && (test_bit(Faulty, &rrdev->flags)
506     || test_bit(Unmerged, &rrdev->flags)))
507     rrdev = NULL;
508    
509     r10_bio->devs[i].bio = NULL;
510     r10_bio->devs[i].repl_bio = NULL;
511     - if (!rdev || test_bit(Faulty, &rdev->flags) ||
512     - test_bit(Unmerged, &rdev->flags)) {
513     +
514     + if (!rdev && !rrdev) {
515     set_bit(R10BIO_Degraded, &r10_bio->state);
516     continue;
517     }
518     - if (test_bit(WriteErrorSeen, &rdev->flags)) {
519     + if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
520     sector_t first_bad;
521     sector_t dev_sector = r10_bio->devs[i].addr;
522     int bad_sectors;
523     @@ -1235,8 +1238,10 @@ retry_write:
524     max_sectors = good_sectors;
525     }
526     }
527     - r10_bio->devs[i].bio = bio;
528     - atomic_inc(&rdev->nr_pending);
529     + if (rdev) {
530     + r10_bio->devs[i].bio = bio;
531     + atomic_inc(&rdev->nr_pending);
532     + }
533     if (rrdev) {
534     r10_bio->devs[i].repl_bio = bio;
535     atomic_inc(&rrdev->nr_pending);
536     @@ -1292,51 +1297,52 @@ retry_write:
537     for (i = 0; i < conf->copies; i++) {
538     struct bio *mbio;
539     int d = r10_bio->devs[i].devnum;
540     - if (!r10_bio->devs[i].bio)
541     - continue;
542     -
543     - mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
544     - md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
545     - max_sectors);
546     - r10_bio->devs[i].bio = mbio;
547     -
548     - mbio->bi_sector = (r10_bio->devs[i].addr+
549     - conf->mirrors[d].rdev->data_offset);
550     - mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
551     - mbio->bi_end_io = raid10_end_write_request;
552     - mbio->bi_rw = WRITE | do_sync | do_fua;
553     - mbio->bi_private = r10_bio;
554     -
555     - atomic_inc(&r10_bio->remaining);
556     - spin_lock_irqsave(&conf->device_lock, flags);
557     - bio_list_add(&conf->pending_bio_list, mbio);
558     - conf->pending_count++;
559     - spin_unlock_irqrestore(&conf->device_lock, flags);
560     -
561     - if (!r10_bio->devs[i].repl_bio)
562     - continue;
563     + if (r10_bio->devs[i].bio) {
564     + struct md_rdev *rdev = conf->mirrors[d].rdev;
565     + mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
566     + md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
567     + max_sectors);
568     + r10_bio->devs[i].bio = mbio;
569     +
570     + mbio->bi_sector = (r10_bio->devs[i].addr+
571     + rdev->data_offset);
572     + mbio->bi_bdev = rdev->bdev;
573     + mbio->bi_end_io = raid10_end_write_request;
574     + mbio->bi_rw = WRITE | do_sync | do_fua;
575     + mbio->bi_private = r10_bio;
576    
577     - mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
578     - md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
579     - max_sectors);
580     - r10_bio->devs[i].repl_bio = mbio;
581     + atomic_inc(&r10_bio->remaining);
582     + spin_lock_irqsave(&conf->device_lock, flags);
583     + bio_list_add(&conf->pending_bio_list, mbio);
584     + conf->pending_count++;
585     + spin_unlock_irqrestore(&conf->device_lock, flags);
586     + }
587    
588     - /* We are actively writing to the original device
589     - * so it cannot disappear, so the replacement cannot
590     - * become NULL here
591     - */
592     - mbio->bi_sector = (r10_bio->devs[i].addr+
593     - conf->mirrors[d].replacement->data_offset);
594     - mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
595     - mbio->bi_end_io = raid10_end_write_request;
596     - mbio->bi_rw = WRITE | do_sync | do_fua;
597     - mbio->bi_private = r10_bio;
598     + if (r10_bio->devs[i].repl_bio) {
599     + struct md_rdev *rdev = conf->mirrors[d].replacement;
600     + if (rdev == NULL) {
601     + /* Replacement just got moved to main 'rdev' */
602     + smp_mb();
603     + rdev = conf->mirrors[d].rdev;
604     + }
605     + mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
606     + md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
607     + max_sectors);
608     + r10_bio->devs[i].repl_bio = mbio;
609     +
610     + mbio->bi_sector = (r10_bio->devs[i].addr+
611     + rdev->data_offset);
612     + mbio->bi_bdev = rdev->bdev;
613     + mbio->bi_end_io = raid10_end_write_request;
614     + mbio->bi_rw = WRITE | do_sync | do_fua;
615     + mbio->bi_private = r10_bio;
616    
617     - atomic_inc(&r10_bio->remaining);
618     - spin_lock_irqsave(&conf->device_lock, flags);
619     - bio_list_add(&conf->pending_bio_list, mbio);
620     - conf->pending_count++;
621     - spin_unlock_irqrestore(&conf->device_lock, flags);
622     + atomic_inc(&r10_bio->remaining);
623     + spin_lock_irqsave(&conf->device_lock, flags);
624     + bio_list_add(&conf->pending_bio_list, mbio);
625     + conf->pending_count++;
626     + spin_unlock_irqrestore(&conf->device_lock, flags);
627     + }
628     }
629    
630     /* Don't remove the bias on 'remaining' (one_write_done) until
631     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
632     index 6af3101..b8e7f3e 100644
633     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
634     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
635     @@ -9131,10 +9131,13 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
636     */
637     static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
638     {
639     - u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
640     - if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
641     - BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
642     - REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp));
643     + if (!CHIP_IS_E1x(bp)) {
644     + u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
645     + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
646     + BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
647     + REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
648     + 1 << BP_FUNC(bp));
649     + }
650     }
651     }
652    
653     diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
654     index 7f6a23f..d16dae2 100644
655     --- a/fs/nfs/blocklayout/blocklayout.c
656     +++ b/fs/nfs/blocklayout/blocklayout.c
657     @@ -162,25 +162,39 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
658     return bio;
659     }
660    
661     -static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
662     +static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw,
663     sector_t isect, struct page *page,
664     struct pnfs_block_extent *be,
665     void (*end_io)(struct bio *, int err),
666     - struct parallel_io *par)
667     + struct parallel_io *par,
668     + unsigned int offset, int len)
669     {
670     + isect = isect + (offset >> SECTOR_SHIFT);
671     + dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
672     + npg, rw, (unsigned long long)isect, offset, len);
673     retry:
674     if (!bio) {
675     bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
676     if (!bio)
677     return ERR_PTR(-ENOMEM);
678     }
679     - if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
680     + if (bio_add_page(bio, page, len, offset) < len) {
681     bio = bl_submit_bio(rw, bio);
682     goto retry;
683     }
684     return bio;
685     }
686    
687     +static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
688     + sector_t isect, struct page *page,
689     + struct pnfs_block_extent *be,
690     + void (*end_io)(struct bio *, int err),
691     + struct parallel_io *par)
692     +{
693     + return do_add_page_to_bio(bio, npg, rw, isect, page, be,
694     + end_io, par, 0, PAGE_CACHE_SIZE);
695     +}
696     +
697     /* This is basically copied from mpage_end_io_read */
698     static void bl_end_io_read(struct bio *bio, int err)
699     {
700     @@ -443,6 +457,107 @@ map_block(struct buffer_head *bh, sector_t isect, struct pnfs_block_extent *be)
701     return;
702     }
703    
704     +static void
705     +bl_read_single_end_io(struct bio *bio, int error)
706     +{
707     + struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
708     + struct page *page = bvec->bv_page;
709     +
710     + /* Only one page in bvec */
711     + unlock_page(page);
712     +}
713     +
714     +static int
715     +bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
716     + unsigned int offset, unsigned int len)
717     +{
718     + struct bio *bio;
719     + struct page *shadow_page;
720     + sector_t isect;
721     + char *kaddr, *kshadow_addr;
722     + int ret = 0;
723     +
724     + dprintk("%s: offset %u len %u\n", __func__, offset, len);
725     +
726     + shadow_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
727     + if (shadow_page == NULL)
728     + return -ENOMEM;
729     +
730     + bio = bio_alloc(GFP_NOIO, 1);
731     + if (bio == NULL)
732     + return -ENOMEM;
733     +
734     + isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
735     + (offset / SECTOR_SIZE);
736     +
737     + bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
738     + bio->bi_bdev = be->be_mdev;
739     + bio->bi_end_io = bl_read_single_end_io;
740     +
741     + lock_page(shadow_page);
742     + if (bio_add_page(bio, shadow_page,
743     + SECTOR_SIZE, round_down(offset, SECTOR_SIZE)) == 0) {
744     + unlock_page(shadow_page);
745     + bio_put(bio);
746     + return -EIO;
747     + }
748     +
749     + submit_bio(READ, bio);
750     + wait_on_page_locked(shadow_page);
751     + if (unlikely(!test_bit(BIO_UPTODATE, &bio->bi_flags))) {
752     + ret = -EIO;
753     + } else {
754     + kaddr = kmap_atomic(page);
755     + kshadow_addr = kmap_atomic(shadow_page);
756     + memcpy(kaddr + offset, kshadow_addr + offset, len);
757     + kunmap_atomic(kshadow_addr);
758     + kunmap_atomic(kaddr);
759     + }
760     + __free_page(shadow_page);
761     + bio_put(bio);
762     +
763     + return ret;
764     +}
765     +
766     +static int
767     +bl_read_partial_page_sync(struct page *page, struct pnfs_block_extent *be,
768     + unsigned int dirty_offset, unsigned int dirty_len,
769     + bool full_page)
770     +{
771     + int ret = 0;
772     + unsigned int start, end;
773     +
774     + if (full_page) {
775     + start = 0;
776     + end = PAGE_CACHE_SIZE;
777     + } else {
778     + start = round_down(dirty_offset, SECTOR_SIZE);
779     + end = round_up(dirty_offset + dirty_len, SECTOR_SIZE);
780     + }
781     +
782     + dprintk("%s: offset %u len %d\n", __func__, dirty_offset, dirty_len);
783     + if (!be) {
784     + zero_user_segments(page, start, dirty_offset,
785     + dirty_offset + dirty_len, end);
786     + if (start == 0 && end == PAGE_CACHE_SIZE &&
787     + trylock_page(page)) {
788     + SetPageUptodate(page);
789     + unlock_page(page);
790     + }
791     + return ret;
792     + }
793     +
794     + if (start != dirty_offset)
795     + ret = bl_do_readpage_sync(page, be, start,
796     + dirty_offset - start);
797     +
798     + if (!ret && (dirty_offset + dirty_len < end))
799     + ret = bl_do_readpage_sync(page, be, dirty_offset + dirty_len,
800     + end - dirty_offset - dirty_len);
801     +
802     + return ret;
803     +}
804     +
805     /* Given an unmapped page, zero it or read in page for COW, page is locked
806     * by caller.
807     */
808     @@ -476,7 +591,6 @@ init_page_for_write(struct page *page, struct pnfs_block_extent *cow_read)
809     SetPageUptodate(page);
810    
811     cleanup:
812     - bl_put_extent(cow_read);
813     if (bh)
814     free_buffer_head(bh);
815     if (ret) {
816     @@ -547,6 +661,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
817     struct parallel_io *par;
818     loff_t offset = wdata->args.offset;
819     size_t count = wdata->args.count;
820     + unsigned int pg_offset, pg_len, saved_len;
821     struct page **pages = wdata->args.pages;
822     struct page *page;
823     pgoff_t index;
824     @@ -651,10 +766,11 @@ next_page:
825     if (!extent_length) {
826     /* We've used up the previous extent */
827     bl_put_extent(be);
828     + bl_put_extent(cow_read);
829     bio = bl_submit_bio(WRITE, bio);
830     /* Get the next one */
831     be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg),
832     - isect, NULL);
833     + isect, &cow_read);
834     if (!be || !is_writable(be, isect)) {
835     wdata->pnfs_error = -EINVAL;
836     goto out;
837     @@ -671,7 +787,26 @@ next_page:
838     extent_length = be->be_length -
839     (isect - be->be_f_offset);
840     }
841     - if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
842     +
843     + dprintk("%s offset %lld count %Zu\n", __func__, offset, count);
844     + pg_offset = offset & ~PAGE_CACHE_MASK;
845     + if (pg_offset + count > PAGE_CACHE_SIZE)
846     + pg_len = PAGE_CACHE_SIZE - pg_offset;
847     + else
848     + pg_len = count;
849     +
850     + saved_len = pg_len;
851     + if (be->be_state == PNFS_BLOCK_INVALID_DATA &&
852     + !bl_is_sector_init(be->be_inval, isect)) {
853     + ret = bl_read_partial_page_sync(pages[i], cow_read,
854     + pg_offset, pg_len, true);
855     + if (ret) {
856     + dprintk("%s bl_read_partial_page_sync fail %d\n",
857     + __func__, ret);
858     + wdata->pnfs_error = ret;
859     + goto out;
860     + }
861     +
862     ret = bl_mark_sectors_init(be->be_inval, isect,
863     PAGE_CACHE_SECTORS);
864     if (unlikely(ret)) {
865     @@ -680,15 +815,33 @@ next_page:
866     wdata->pnfs_error = ret;
867     goto out;
868     }
869     +
870     + /* Expand to full page write */
871     + pg_offset = 0;
872     + pg_len = PAGE_CACHE_SIZE;
873     + } else if ((pg_offset & (SECTOR_SIZE - 1)) ||
874     + (pg_len & (SECTOR_SIZE - 1))) {
875     + /* ahh, nasty case. We have to do sync full sector
876     + * read-modify-write cycles.
877     + */
878     + unsigned int saved_offset = pg_offset;
879     + ret = bl_read_partial_page_sync(pages[i], be, pg_offset,
880     + pg_len, false);
881     + pg_offset = round_down(pg_offset, SECTOR_SIZE);
882     + pg_len = round_up(saved_offset + pg_len, SECTOR_SIZE)
883     + - pg_offset;
884     }
885     - bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE,
886     + bio = do_add_page_to_bio(bio, wdata->npages - i, WRITE,
887     isect, pages[i], be,
888     - bl_end_io_write, par);
889     + bl_end_io_write, par,
890     + pg_offset, pg_len);
891     if (IS_ERR(bio)) {
892     wdata->pnfs_error = PTR_ERR(bio);
893     bio = NULL;
894     goto out;
895     }
896     + offset += saved_len;
897     + count -= saved_len;
898     isect += PAGE_CACHE_SECTORS;
899     last_isect = isect;
900     extent_length -= PAGE_CACHE_SECTORS;
901     @@ -706,17 +859,16 @@ next_page:
902     }
903    
904     write_done:
905     - wdata->res.count = (last_isect << SECTOR_SHIFT) - (offset);
906     - if (count < wdata->res.count) {
907     - wdata->res.count = count;
908     - }
909     + wdata->res.count = wdata->args.count;
910     out:
911     bl_put_extent(be);
912     + bl_put_extent(cow_read);
913     bl_submit_bio(WRITE, bio);
914     put_parallel(par);
915     return PNFS_ATTEMPTED;
916     out_mds:
917     bl_put_extent(be);
918     + bl_put_extent(cow_read);
919     kfree(par);
920     return PNFS_NOT_ATTEMPTED;
921     }
922     diff --git a/fs/nfs/blocklayout/blocklayout.h b/fs/nfs/blocklayout/blocklayout.h
923     index 0335069..39bb51a 100644
924     --- a/fs/nfs/blocklayout/blocklayout.h
925     +++ b/fs/nfs/blocklayout/blocklayout.h
926     @@ -41,6 +41,7 @@
927    
928     #define PAGE_CACHE_SECTORS (PAGE_CACHE_SIZE >> SECTOR_SHIFT)
929     #define PAGE_CACHE_SECTOR_SHIFT (PAGE_CACHE_SHIFT - SECTOR_SHIFT)
930     +#define SECTOR_SIZE (1 << SECTOR_SHIFT)
931    
932     struct block_mount_id {
933     spinlock_t bm_lock; /* protects list */
934     diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
935     index 0984a21..15f60d0 100644
936     --- a/kernel/sched/auto_group.c
937     +++ b/kernel/sched/auto_group.c
938     @@ -143,15 +143,11 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
939    
940     p->signal->autogroup = autogroup_kref_get(ag);
941    
942     - if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
943     - goto out;
944     -
945     t = p;
946     do {
947     sched_move_task(t);
948     } while_each_thread(p, t);
949    
950     -out:
951     unlock_task_sighand(p, &flags);
952     autogroup_kref_put(prev);
953     }
954     diff --git a/kernel/sched/auto_group.h b/kernel/sched/auto_group.h
955     index 8bd0471..443232e 100644
956     --- a/kernel/sched/auto_group.h
957     +++ b/kernel/sched/auto_group.h
958     @@ -4,11 +4,6 @@
959     #include <linux/rwsem.h>
960    
961     struct autogroup {
962     - /*
963     - * reference doesn't mean how many thread attach to this
964     - * autogroup now. It just stands for the number of task
965     - * could use this autogroup.
966     - */
967     struct kref kref;
968     struct task_group *tg;
969     struct rw_semaphore lock;
970     diff --git a/kernel/workqueue.c b/kernel/workqueue.c
971     index 56f793d..bcb9d34 100644
972     --- a/kernel/workqueue.c
973     +++ b/kernel/workqueue.c
974     @@ -2040,8 +2040,10 @@ static int rescuer_thread(void *__wq)
975     repeat:
976     set_current_state(TASK_INTERRUPTIBLE);
977    
978     - if (kthread_should_stop())
979     + if (kthread_should_stop()) {
980     + __set_current_state(TASK_RUNNING);
981     return 0;
982     + }
983    
984     /*
985     * See whether any cpu is asking for help. Unbounded
986     diff --git a/mm/memory-failure.c b/mm/memory-failure.c
987     index 274c3cc..d86fb20 100644
988     --- a/mm/memory-failure.c
989     +++ b/mm/memory-failure.c
990     @@ -1481,9 +1481,17 @@ int soft_offline_page(struct page *page, int flags)
991     {
992     int ret;
993     unsigned long pfn = page_to_pfn(page);
994     + struct page *hpage = compound_trans_head(page);
995    
996     if (PageHuge(page))
997     return soft_offline_huge_page(page, flags);
998     + if (PageTransHuge(hpage)) {
999     + if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) {
1000     + pr_info("soft offline: %#lx: failed to split THP\n",
1001     + pfn);
1002     + return -EBUSY;
1003     + }
1004     + }
1005    
1006     ret = get_any_page(page, pfn, flags);
1007     if (ret < 0)
1008     diff --git a/mm/sparse.c b/mm/sparse.c
1009     index a8bc7d3..290dba2 100644
1010     --- a/mm/sparse.c
1011     +++ b/mm/sparse.c
1012     @@ -619,7 +619,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
1013     {
1014     return; /* XXX: Not implemented yet */
1015     }
1016     -static void free_map_bootmem(struct page *page, unsigned long nr_pages)
1017     +static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
1018     {
1019     }
1020     #else
1021     @@ -660,10 +660,11 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages)
1022     get_order(sizeof(struct page) * nr_pages));
1023     }
1024    
1025     -static void free_map_bootmem(struct page *page, unsigned long nr_pages)
1026     +static void free_map_bootmem(struct page *memmap, unsigned long nr_pages)
1027     {
1028     unsigned long maps_section_nr, removing_section_nr, i;
1029     unsigned long magic;
1030     + struct page *page = virt_to_page(memmap);
1031    
1032     for (i = 0; i < nr_pages; i++, page++) {
1033     magic = (unsigned long) page->lru.next;
1034     @@ -712,13 +713,10 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap)
1035     */
1036    
1037     if (memmap) {
1038     - struct page *memmap_page;
1039     - memmap_page = virt_to_page(memmap);
1040     -
1041     nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
1042     >> PAGE_SHIFT;
1043    
1044     - free_map_bootmem(memmap_page, nr_pages);
1045     + free_map_bootmem(memmap, nr_pages);
1046     }
1047     }
1048    
1049     diff --git a/scripts/package/buildtar b/scripts/package/buildtar
1050     index 8a7b155..d0d748e 100644
1051     --- a/scripts/package/buildtar
1052     +++ b/scripts/package/buildtar
1053     @@ -109,7 +109,7 @@ esac
1054     if tar --owner=root --group=root --help >/dev/null 2>&1; then
1055     opts="--owner=root --group=root"
1056     fi
1057     - tar cf - . $opts | ${compress} > "${tarball}${file_ext}"
1058     + tar cf - boot/* lib/* $opts | ${compress} > "${tarball}${file_ext}"
1059     )
1060    
1061     echo "Tarball successfully created in ${tarball}${file_ext}"