Annotation of /trunk/kernel-magellan/patches-3.6/0109-3.6.10-all-fixes.patch
Parent Directory | Revision Log
Revision 2014 -
(hide annotations)
(download)
Tue Jan 8 09:12:21 2013 UTC (11 years, 5 months ago) by niro
File size: 34525 byte(s)
Tue Jan 8 09:12:21 2013 UTC (11 years, 5 months ago) by niro
File size: 34525 byte(s)
-linux 3.6.10
1 | niro | 2014 | diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig |
2 | index 48c19d4..589bdba 100644 | ||
3 | --- a/arch/arm/Kconfig | ||
4 | +++ b/arch/arm/Kconfig | ||
5 | @@ -585,6 +585,7 @@ config ARCH_KIRKWOOD | ||
6 | bool "Marvell Kirkwood" | ||
7 | select CPU_FEROCEON | ||
8 | select PCI | ||
9 | + select PCI_QUIRKS | ||
10 | select ARCH_REQUIRE_GPIOLIB | ||
11 | select GENERIC_CLOCKEVENTS | ||
12 | select NEED_MACH_IO_H | ||
13 | diff --git a/arch/arm/mach-dove/include/mach/pm.h b/arch/arm/mach-dove/include/mach/pm.h | ||
14 | index 3ad9f94..11799c3 100644 | ||
15 | --- a/arch/arm/mach-dove/include/mach/pm.h | ||
16 | +++ b/arch/arm/mach-dove/include/mach/pm.h | ||
17 | @@ -45,7 +45,7 @@ static inline int pmu_to_irq(int pin) | ||
18 | |||
19 | static inline int irq_to_pmu(int irq) | ||
20 | { | ||
21 | - if (IRQ_DOVE_PMU_START < irq && irq < NR_IRQS) | ||
22 | + if (IRQ_DOVE_PMU_START <= irq && irq < NR_IRQS) | ||
23 | return irq - IRQ_DOVE_PMU_START; | ||
24 | |||
25 | return -EINVAL; | ||
26 | diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c | ||
27 | index 9bc97a5..8c861ae 100644 | ||
28 | --- a/arch/arm/mach-dove/irq.c | ||
29 | +++ b/arch/arm/mach-dove/irq.c | ||
30 | @@ -45,8 +45,20 @@ static void pmu_irq_ack(struct irq_data *d) | ||
31 | int pin = irq_to_pmu(d->irq); | ||
32 | u32 u; | ||
33 | |||
34 | + /* | ||
35 | + * The PMU mask register is not RW0C: it is RW. This means that | ||
36 | + * the bits take whatever value is written to them; if you write | ||
37 | + * a '1', you will set the interrupt. | ||
38 | + * | ||
39 | + * Unfortunately this means there is NO race free way to clear | ||
40 | + * these interrupts. | ||
41 | + * | ||
42 | + * So, let's structure the code so that the window is as small as | ||
43 | + * possible. | ||
44 | + */ | ||
45 | u = ~(1 << (pin & 31)); | ||
46 | - writel(u, PMU_INTERRUPT_CAUSE); | ||
47 | + u &= readl_relaxed(PMU_INTERRUPT_CAUSE); | ||
48 | + writel_relaxed(u, PMU_INTERRUPT_CAUSE); | ||
49 | } | ||
50 | |||
51 | static struct irq_chip pmu_irq_chip = { | ||
52 | diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c | ||
53 | index 6e8b2ef..f3f55b2 100644 | ||
54 | --- a/arch/arm/mach-kirkwood/pcie.c | ||
55 | +++ b/arch/arm/mach-kirkwood/pcie.c | ||
56 | @@ -225,14 +225,19 @@ static int __init kirkwood_pcie_setup(int nr, struct pci_sys_data *sys) | ||
57 | return 1; | ||
58 | } | ||
59 | |||
60 | +/* | ||
61 | + * The root complex has a hardwired class of PCI_CLASS_MEMORY_OTHER, when it | ||
62 | + * is operating as a root complex this needs to be switched to | ||
63 | + * PCI_CLASS_BRIDGE_HOST or Linux will errantly try to process the BAR's on | ||
64 | + * the device. Decoding setup is handled by the orion code. | ||
65 | + */ | ||
66 | static void __devinit rc_pci_fixup(struct pci_dev *dev) | ||
67 | { | ||
68 | - /* | ||
69 | - * Prevent enumeration of root complex. | ||
70 | - */ | ||
71 | if (dev->bus->parent == NULL && dev->devfn == 0) { | ||
72 | int i; | ||
73 | |||
74 | + dev->class &= 0xff; | ||
75 | + dev->class |= PCI_CLASS_BRIDGE_HOST << 8; | ||
76 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | ||
77 | dev->resource[i].start = 0; | ||
78 | dev->resource[i].end = 0; | ||
79 | diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h | ||
80 | index 75f4c6d..04cb0f8 100644 | ||
81 | --- a/arch/x86/include/asm/fpu-internal.h | ||
82 | +++ b/arch/x86/include/asm/fpu-internal.h | ||
83 | @@ -334,14 +334,17 @@ static inline void __thread_fpu_begin(struct task_struct *tsk) | ||
84 | typedef struct { int preload; } fpu_switch_t; | ||
85 | |||
86 | /* | ||
87 | - * FIXME! We could do a totally lazy restore, but we need to | ||
88 | - * add a per-cpu "this was the task that last touched the FPU | ||
89 | - * on this CPU" variable, and the task needs to have a "I last | ||
90 | - * touched the FPU on this CPU" and check them. | ||
91 | + * Must be run with preemption disabled: this clears the fpu_owner_task, | ||
92 | + * on this CPU. | ||
93 | * | ||
94 | - * We don't do that yet, so "fpu_lazy_restore()" always returns | ||
95 | - * false, but some day.. | ||
96 | + * This will disable any lazy FPU state restore of the current FPU state, | ||
97 | + * but if the current thread owns the FPU, it will still be saved by. | ||
98 | */ | ||
99 | +static inline void __cpu_disable_lazy_restore(unsigned int cpu) | ||
100 | +{ | ||
101 | + per_cpu(fpu_owner_task, cpu) = NULL; | ||
102 | +} | ||
103 | + | ||
104 | static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu) | ||
105 | { | ||
106 | return new == this_cpu_read_stable(fpu_owner_task) && | ||
107 | diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c | ||
108 | index 9d92e19..e4da438 100644 | ||
109 | --- a/arch/x86/kernel/cpu/amd.c | ||
110 | +++ b/arch/x86/kernel/cpu/amd.c | ||
111 | @@ -631,6 +631,20 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | ||
112 | } | ||
113 | } | ||
114 | |||
115 | + /* | ||
116 | + * The way access filter has a performance penalty on some workloads. | ||
117 | + * Disable it on the affected CPUs. | ||
118 | + */ | ||
119 | + if ((c->x86 == 0x15) && | ||
120 | + (c->x86_model >= 0x02) && (c->x86_model < 0x20)) { | ||
121 | + u64 val; | ||
122 | + | ||
123 | + if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) { | ||
124 | + val |= 0x1E; | ||
125 | + wrmsrl_safe(0xc0011021, val); | ||
126 | + } | ||
127 | + } | ||
128 | + | ||
129 | cpu_detect_cache_sizes(c); | ||
130 | |||
131 | /* Multi core CPU? */ | ||
132 | diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c | ||
133 | index 7c5a8c3..23c39cf 100644 | ||
134 | --- a/arch/x86/kernel/smpboot.c | ||
135 | +++ b/arch/x86/kernel/smpboot.c | ||
136 | @@ -68,6 +68,8 @@ | ||
137 | #include <asm/mwait.h> | ||
138 | #include <asm/apic.h> | ||
139 | #include <asm/io_apic.h> | ||
140 | +#include <asm/i387.h> | ||
141 | +#include <asm/fpu-internal.h> | ||
142 | #include <asm/setup.h> | ||
143 | #include <asm/uv/uv.h> | ||
144 | #include <linux/mc146818rtc.h> | ||
145 | @@ -817,6 +819,9 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle) | ||
146 | |||
147 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | ||
148 | |||
149 | + /* the FPU context is blank, nobody can own it */ | ||
150 | + __cpu_disable_lazy_restore(cpu); | ||
151 | + | ||
152 | err = do_boot_cpu(apicid, cpu, tidle); | ||
153 | if (err) { | ||
154 | pr_debug("do_boot_cpu failed %d\n", err); | ||
155 | diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c | ||
156 | index bfc31cb..bebe17f 100644 | ||
157 | --- a/drivers/acpi/processor_driver.c | ||
158 | +++ b/drivers/acpi/processor_driver.c | ||
159 | @@ -409,6 +409,7 @@ static void acpi_processor_notify(struct acpi_device *device, u32 event) | ||
160 | acpi_bus_generate_proc_event(device, event, 0); | ||
161 | acpi_bus_generate_netlink_event(device->pnp.device_class, | ||
162 | dev_name(&device->dev), event, 0); | ||
163 | + break; | ||
164 | default: | ||
165 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
166 | "Unsupported event [0x%x]\n", event)); | ||
167 | diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c | ||
168 | index a09d066..9d669cd 100644 | ||
169 | --- a/drivers/edac/i7300_edac.c | ||
170 | +++ b/drivers/edac/i7300_edac.c | ||
171 | @@ -197,8 +197,8 @@ static const char *ferr_fat_fbd_name[] = { | ||
172 | [0] = "Memory Write error on non-redundant retry or " | ||
173 | "FBD configuration Write error on retry", | ||
174 | }; | ||
175 | -#define GET_FBD_FAT_IDX(fbderr) (fbderr & (3 << 28)) | ||
176 | -#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)) | ||
177 | +#define GET_FBD_FAT_IDX(fbderr) (((fbderr) >> 28) & 3) | ||
178 | +#define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22)) | ||
179 | |||
180 | #define FERR_NF_FBD 0xa0 | ||
181 | static const char *ferr_nf_fbd_name[] = { | ||
182 | @@ -225,7 +225,7 @@ static const char *ferr_nf_fbd_name[] = { | ||
183 | [1] = "Aliased Uncorrectable Non-Mirrored Demand Data ECC", | ||
184 | [0] = "Uncorrectable Data ECC on Replay", | ||
185 | }; | ||
186 | -#define GET_FBD_NF_IDX(fbderr) (fbderr & (3 << 28)) | ||
187 | +#define GET_FBD_NF_IDX(fbderr) (((fbderr) >> 28) & 3) | ||
188 | #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\ | ||
189 | (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\ | ||
190 | (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\ | ||
191 | @@ -464,7 +464,7 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci) | ||
192 | errnum = find_first_bit(&errors, | ||
193 | ARRAY_SIZE(ferr_nf_fbd_name)); | ||
194 | specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum); | ||
195 | - branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0; | ||
196 | + branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0; | ||
197 | |||
198 | pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, | ||
199 | REDMEMA, &syndrome); | ||
200 | diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c | ||
201 | index 3672101..10c8c00 100644 | ||
202 | --- a/drivers/edac/i7core_edac.c | ||
203 | +++ b/drivers/edac/i7core_edac.c | ||
204 | @@ -816,7 +816,7 @@ static ssize_t i7core_inject_store_##param( \ | ||
205 | struct device_attribute *mattr, \ | ||
206 | const char *data, size_t count) \ | ||
207 | { \ | ||
208 | - struct mem_ctl_info *mci = to_mci(dev); \ | ||
209 | + struct mem_ctl_info *mci = dev_get_drvdata(dev); \ | ||
210 | struct i7core_pvt *pvt; \ | ||
211 | long value; \ | ||
212 | int rc; \ | ||
213 | @@ -845,7 +845,7 @@ static ssize_t i7core_inject_show_##param( \ | ||
214 | struct device_attribute *mattr, \ | ||
215 | char *data) \ | ||
216 | { \ | ||
217 | - struct mem_ctl_info *mci = to_mci(dev); \ | ||
218 | + struct mem_ctl_info *mci = dev_get_drvdata(dev); \ | ||
219 | struct i7core_pvt *pvt; \ | ||
220 | \ | ||
221 | pvt = mci->pvt_info; \ | ||
222 | @@ -1052,7 +1052,7 @@ static ssize_t i7core_show_counter_##param( \ | ||
223 | struct device_attribute *mattr, \ | ||
224 | char *data) \ | ||
225 | { \ | ||
226 | - struct mem_ctl_info *mci = to_mci(dev); \ | ||
227 | + struct mem_ctl_info *mci = dev_get_drvdata(dev); \ | ||
228 | struct i7core_pvt *pvt = mci->pvt_info; \ | ||
229 | \ | ||
230 | edac_dbg(1, "\n"); \ | ||
231 | diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c | ||
232 | index e9a6f6a..0c52448 100644 | ||
233 | --- a/drivers/gpu/drm/i915/intel_lvds.c | ||
234 | +++ b/drivers/gpu/drm/i915/intel_lvds.c | ||
235 | @@ -788,6 +788,14 @@ static const struct dmi_system_id intel_no_lvds[] = { | ||
236 | DMI_MATCH(DMI_BOARD_NAME, "D525TUD"), | ||
237 | }, | ||
238 | }, | ||
239 | + { | ||
240 | + .callback = intel_no_lvds_dmi_callback, | ||
241 | + .ident = "Supermicro X7SPA-H", | ||
242 | + .matches = { | ||
243 | + DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), | ||
244 | + DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"), | ||
245 | + }, | ||
246 | + }, | ||
247 | |||
248 | { } /* terminating entry */ | ||
249 | }; | ||
250 | diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c | ||
251 | index e93b80a..5528fea 100644 | ||
252 | --- a/drivers/gpu/drm/radeon/evergreen.c | ||
253 | +++ b/drivers/gpu/drm/radeon/evergreen.c | ||
254 | @@ -37,6 +37,16 @@ | ||
255 | #define EVERGREEN_PFP_UCODE_SIZE 1120 | ||
256 | #define EVERGREEN_PM4_UCODE_SIZE 1376 | ||
257 | |||
258 | +static const u32 crtc_offsets[6] = | ||
259 | +{ | ||
260 | + EVERGREEN_CRTC0_REGISTER_OFFSET, | ||
261 | + EVERGREEN_CRTC1_REGISTER_OFFSET, | ||
262 | + EVERGREEN_CRTC2_REGISTER_OFFSET, | ||
263 | + EVERGREEN_CRTC3_REGISTER_OFFSET, | ||
264 | + EVERGREEN_CRTC4_REGISTER_OFFSET, | ||
265 | + EVERGREEN_CRTC5_REGISTER_OFFSET | ||
266 | +}; | ||
267 | + | ||
268 | static void evergreen_gpu_init(struct radeon_device *rdev); | ||
269 | void evergreen_fini(struct radeon_device *rdev); | ||
270 | void evergreen_pcie_gen2_enable(struct radeon_device *rdev); | ||
271 | @@ -109,17 +119,19 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) | ||
272 | */ | ||
273 | void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc) | ||
274 | { | ||
275 | - struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; | ||
276 | int i; | ||
277 | |||
278 | - if (RREG32(EVERGREEN_CRTC_CONTROL + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_MASTER_EN) { | ||
279 | + if (crtc >= rdev->num_crtc) | ||
280 | + return; | ||
281 | + | ||
282 | + if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) { | ||
283 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
284 | - if (!(RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK)) | ||
285 | + if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)) | ||
286 | break; | ||
287 | udelay(1); | ||
288 | } | ||
289 | for (i = 0; i < rdev->usec_timeout; i++) { | ||
290 | - if (RREG32(EVERGREEN_CRTC_STATUS + radeon_crtc->crtc_offset) & EVERGREEN_CRTC_V_BLANK) | ||
291 | + if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK) | ||
292 | break; | ||
293 | udelay(1); | ||
294 | } | ||
295 | @@ -1229,116 +1241,105 @@ void evergreen_agp_enable(struct radeon_device *rdev) | ||
296 | |||
297 | void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) | ||
298 | { | ||
299 | + u32 crtc_enabled, tmp, frame_count, blackout; | ||
300 | + int i, j; | ||
301 | + | ||
302 | save->vga_render_control = RREG32(VGA_RENDER_CONTROL); | ||
303 | save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); | ||
304 | |||
305 | - /* Stop all video */ | ||
306 | + /* disable VGA render */ | ||
307 | WREG32(VGA_RENDER_CONTROL, 0); | ||
308 | - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); | ||
309 | - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); | ||
310 | - if (rdev->num_crtc >= 4) { | ||
311 | - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); | ||
312 | - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); | ||
313 | - } | ||
314 | - if (rdev->num_crtc >= 6) { | ||
315 | - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); | ||
316 | - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); | ||
317 | - } | ||
318 | - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | ||
319 | - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | ||
320 | - if (rdev->num_crtc >= 4) { | ||
321 | - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | ||
322 | - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | ||
323 | - } | ||
324 | - if (rdev->num_crtc >= 6) { | ||
325 | - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | ||
326 | - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | ||
327 | - } | ||
328 | - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); | ||
329 | - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); | ||
330 | - if (rdev->num_crtc >= 4) { | ||
331 | - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); | ||
332 | - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); | ||
333 | - } | ||
334 | - if (rdev->num_crtc >= 6) { | ||
335 | - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); | ||
336 | - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); | ||
337 | + /* blank the display controllers */ | ||
338 | + for (i = 0; i < rdev->num_crtc; i++) { | ||
339 | + crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN; | ||
340 | + if (crtc_enabled) { | ||
341 | + save->crtc_enabled[i] = true; | ||
342 | + if (ASIC_IS_DCE6(rdev)) { | ||
343 | + tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); | ||
344 | + if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) { | ||
345 | + radeon_wait_for_vblank(rdev, i); | ||
346 | + tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; | ||
347 | + WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); | ||
348 | + } | ||
349 | + } else { | ||
350 | + tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); | ||
351 | + if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) { | ||
352 | + radeon_wait_for_vblank(rdev, i); | ||
353 | + tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; | ||
354 | + WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); | ||
355 | + } | ||
356 | + } | ||
357 | + /* wait for the next frame */ | ||
358 | + frame_count = radeon_get_vblank_counter(rdev, i); | ||
359 | + for (j = 0; j < rdev->usec_timeout; j++) { | ||
360 | + if (radeon_get_vblank_counter(rdev, i) != frame_count) | ||
361 | + break; | ||
362 | + udelay(1); | ||
363 | + } | ||
364 | + } else { | ||
365 | + save->crtc_enabled[i] = false; | ||
366 | + } | ||
367 | } | ||
368 | |||
369 | - WREG32(D1VGA_CONTROL, 0); | ||
370 | - WREG32(D2VGA_CONTROL, 0); | ||
371 | - if (rdev->num_crtc >= 4) { | ||
372 | - WREG32(EVERGREEN_D3VGA_CONTROL, 0); | ||
373 | - WREG32(EVERGREEN_D4VGA_CONTROL, 0); | ||
374 | - } | ||
375 | - if (rdev->num_crtc >= 6) { | ||
376 | - WREG32(EVERGREEN_D5VGA_CONTROL, 0); | ||
377 | - WREG32(EVERGREEN_D6VGA_CONTROL, 0); | ||
378 | + radeon_mc_wait_for_idle(rdev); | ||
379 | + | ||
380 | + blackout = RREG32(MC_SHARED_BLACKOUT_CNTL); | ||
381 | + if ((blackout & BLACKOUT_MODE_MASK) != 1) { | ||
382 | + /* Block CPU access */ | ||
383 | + WREG32(BIF_FB_EN, 0); | ||
384 | + /* blackout the MC */ | ||
385 | + blackout &= ~BLACKOUT_MODE_MASK; | ||
386 | + WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1); | ||
387 | } | ||
388 | } | ||
389 | |||
390 | void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) | ||
391 | { | ||
392 | - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET, | ||
393 | - upper_32_bits(rdev->mc.vram_start)); | ||
394 | - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET, | ||
395 | - upper_32_bits(rdev->mc.vram_start)); | ||
396 | - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET, | ||
397 | - (u32)rdev->mc.vram_start); | ||
398 | - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET, | ||
399 | - (u32)rdev->mc.vram_start); | ||
400 | - | ||
401 | - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET, | ||
402 | - upper_32_bits(rdev->mc.vram_start)); | ||
403 | - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET, | ||
404 | - upper_32_bits(rdev->mc.vram_start)); | ||
405 | - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET, | ||
406 | - (u32)rdev->mc.vram_start); | ||
407 | - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET, | ||
408 | - (u32)rdev->mc.vram_start); | ||
409 | - | ||
410 | - if (rdev->num_crtc >= 4) { | ||
411 | - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, | ||
412 | - upper_32_bits(rdev->mc.vram_start)); | ||
413 | - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET, | ||
414 | - upper_32_bits(rdev->mc.vram_start)); | ||
415 | - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, | ||
416 | - (u32)rdev->mc.vram_start); | ||
417 | - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET, | ||
418 | - (u32)rdev->mc.vram_start); | ||
419 | - | ||
420 | - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, | ||
421 | - upper_32_bits(rdev->mc.vram_start)); | ||
422 | - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET, | ||
423 | - upper_32_bits(rdev->mc.vram_start)); | ||
424 | - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, | ||
425 | - (u32)rdev->mc.vram_start); | ||
426 | - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET, | ||
427 | - (u32)rdev->mc.vram_start); | ||
428 | - } | ||
429 | - if (rdev->num_crtc >= 6) { | ||
430 | - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, | ||
431 | - upper_32_bits(rdev->mc.vram_start)); | ||
432 | - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET, | ||
433 | - upper_32_bits(rdev->mc.vram_start)); | ||
434 | - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, | ||
435 | - (u32)rdev->mc.vram_start); | ||
436 | - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET, | ||
437 | - (u32)rdev->mc.vram_start); | ||
438 | + u32 tmp, frame_count; | ||
439 | + int i, j; | ||
440 | |||
441 | - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, | ||
442 | + /* update crtc base addresses */ | ||
443 | + for (i = 0; i < rdev->num_crtc; i++) { | ||
444 | + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], | ||
445 | upper_32_bits(rdev->mc.vram_start)); | ||
446 | - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET, | ||
447 | + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], | ||
448 | upper_32_bits(rdev->mc.vram_start)); | ||
449 | - WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, | ||
450 | + WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], | ||
451 | (u32)rdev->mc.vram_start); | ||
452 | - WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET, | ||
453 | + WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], | ||
454 | (u32)rdev->mc.vram_start); | ||
455 | } | ||
456 | - | ||
457 | WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); | ||
458 | WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); | ||
459 | - /* Unlock host access */ | ||
460 | + | ||
461 | + /* unblackout the MC */ | ||
462 | + tmp = RREG32(MC_SHARED_BLACKOUT_CNTL); | ||
463 | + tmp &= ~BLACKOUT_MODE_MASK; | ||
464 | + WREG32(MC_SHARED_BLACKOUT_CNTL, tmp); | ||
465 | + /* allow CPU access */ | ||
466 | + WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN); | ||
467 | + | ||
468 | + for (i = 0; i < rdev->num_crtc; i++) { | ||
469 | + if (save->crtc_enabled) { | ||
470 | + if (ASIC_IS_DCE6(rdev)) { | ||
471 | + tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); | ||
472 | + tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; | ||
473 | + WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); | ||
474 | + } else { | ||
475 | + tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); | ||
476 | + tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; | ||
477 | + WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); | ||
478 | + } | ||
479 | + /* wait for the next frame */ | ||
480 | + frame_count = radeon_get_vblank_counter(rdev, i); | ||
481 | + for (j = 0; j < rdev->usec_timeout; j++) { | ||
482 | + if (radeon_get_vblank_counter(rdev, i) != frame_count) | ||
483 | + break; | ||
484 | + udelay(1); | ||
485 | + } | ||
486 | + } | ||
487 | + } | ||
488 | + /* Unlock vga access */ | ||
489 | WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); | ||
490 | mdelay(1); | ||
491 | WREG32(VGA_RENDER_CONTROL, save->vga_render_control); | ||
492 | diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h | ||
493 | index 8beac10..034f4c2 100644 | ||
494 | --- a/drivers/gpu/drm/radeon/evergreen_reg.h | ||
495 | +++ b/drivers/gpu/drm/radeon/evergreen_reg.h | ||
496 | @@ -218,6 +218,8 @@ | ||
497 | #define EVERGREEN_CRTC_CONTROL 0x6e70 | ||
498 | # define EVERGREEN_CRTC_MASTER_EN (1 << 0) | ||
499 | # define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24) | ||
500 | +#define EVERGREEN_CRTC_BLANK_CONTROL 0x6e74 | ||
501 | +# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8) | ||
502 | #define EVERGREEN_CRTC_STATUS 0x6e8c | ||
503 | # define EVERGREEN_CRTC_V_BLANK (1 << 0) | ||
504 | #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 | ||
505 | diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h | ||
506 | index 302af4f..2bc0f6a 100644 | ||
507 | --- a/drivers/gpu/drm/radeon/evergreend.h | ||
508 | +++ b/drivers/gpu/drm/radeon/evergreend.h | ||
509 | @@ -87,6 +87,10 @@ | ||
510 | |||
511 | #define CONFIG_MEMSIZE 0x5428 | ||
512 | |||
513 | +#define BIF_FB_EN 0x5490 | ||
514 | +#define FB_READ_EN (1 << 0) | ||
515 | +#define FB_WRITE_EN (1 << 1) | ||
516 | + | ||
517 | #define CP_STRMOUT_CNTL 0x84FC | ||
518 | |||
519 | #define CP_COHER_CNTL 0x85F0 | ||
520 | @@ -434,6 +438,9 @@ | ||
521 | #define NOOFCHAN_MASK 0x00003000 | ||
522 | #define MC_SHARED_CHREMAP 0x2008 | ||
523 | |||
524 | +#define MC_SHARED_BLACKOUT_CNTL 0x20ac | ||
525 | +#define BLACKOUT_MODE_MASK 0x00000007 | ||
526 | + | ||
527 | #define MC_ARB_RAMCFG 0x2760 | ||
528 | #define NOOFBANK_SHIFT 0 | ||
529 | #define NOOFBANK_MASK 0x00000003 | ||
530 | diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h | ||
531 | index 18c38d1..132429e 100644 | ||
532 | --- a/drivers/gpu/drm/radeon/radeon_asic.h | ||
533 | +++ b/drivers/gpu/drm/radeon/radeon_asic.h | ||
534 | @@ -389,6 +389,7 @@ void r700_cp_fini(struct radeon_device *rdev); | ||
535 | struct evergreen_mc_save { | ||
536 | u32 vga_render_control; | ||
537 | u32 vga_hdp_control; | ||
538 | + bool crtc_enabled[RADEON_MAX_CRTCS]; | ||
539 | }; | ||
540 | |||
541 | void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev); | ||
542 | diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c | ||
543 | index 05bb49e..7077dcf 100644 | ||
544 | --- a/drivers/md/raid1.c | ||
545 | +++ b/drivers/md/raid1.c | ||
546 | @@ -958,7 +958,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) | ||
547 | struct r1conf *conf = mddev->private; | ||
548 | struct bio *bio; | ||
549 | |||
550 | - if (from_schedule) { | ||
551 | + if (from_schedule || current->bio_list) { | ||
552 | spin_lock_irq(&conf->device_lock); | ||
553 | bio_list_merge(&conf->pending_bio_list, &plug->pending); | ||
554 | conf->pending_count += plug->pending_cnt; | ||
555 | diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | ||
556 | index 0875ecf..b028b5e 100644 | ||
557 | --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | ||
558 | +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | ||
559 | @@ -9545,10 +9545,13 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp) | ||
560 | */ | ||
561 | static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp) | ||
562 | { | ||
563 | - u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); | ||
564 | - if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { | ||
565 | - BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); | ||
566 | - REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp)); | ||
567 | + if (!CHIP_IS_E1x(bp)) { | ||
568 | + u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS); | ||
569 | + if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) { | ||
570 | + BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing"); | ||
571 | + REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, | ||
572 | + 1 << BP_FUNC(bp)); | ||
573 | + } | ||
574 | } | ||
575 | } | ||
576 | |||
577 | diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c | ||
578 | index 995d0cf..a2679d8 100644 | ||
579 | --- a/drivers/net/ethernet/realtek/8139cp.c | ||
580 | +++ b/drivers/net/ethernet/realtek/8139cp.c | ||
581 | @@ -979,17 +979,6 @@ static void cp_init_hw (struct cp_private *cp) | ||
582 | cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); | ||
583 | cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); | ||
584 | |||
585 | - cpw32_f(HiTxRingAddr, 0); | ||
586 | - cpw32_f(HiTxRingAddr + 4, 0); | ||
587 | - | ||
588 | - ring_dma = cp->ring_dma; | ||
589 | - cpw32_f(RxRingAddr, ring_dma & 0xffffffff); | ||
590 | - cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); | ||
591 | - | ||
592 | - ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; | ||
593 | - cpw32_f(TxRingAddr, ring_dma & 0xffffffff); | ||
594 | - cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); | ||
595 | - | ||
596 | cp_start_hw(cp); | ||
597 | cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */ | ||
598 | |||
599 | @@ -1003,6 +992,17 @@ static void cp_init_hw (struct cp_private *cp) | ||
600 | |||
601 | cpw8(Config5, cpr8(Config5) & PMEStatus); | ||
602 | |||
603 | + cpw32_f(HiTxRingAddr, 0); | ||
604 | + cpw32_f(HiTxRingAddr + 4, 0); | ||
605 | + | ||
606 | + ring_dma = cp->ring_dma; | ||
607 | + cpw32_f(RxRingAddr, ring_dma & 0xffffffff); | ||
608 | + cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16); | ||
609 | + | ||
610 | + ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE; | ||
611 | + cpw32_f(TxRingAddr, ring_dma & 0xffffffff); | ||
612 | + cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16); | ||
613 | + | ||
614 | cpw16(MultiIntr, 0); | ||
615 | |||
616 | cpw8_f(Cfg9346, Cfg9346_Lock); | ||
617 | diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c | ||
618 | index 3585f93..1391f8d 100644 | ||
619 | --- a/drivers/net/usb/qmi_wwan.c | ||
620 | +++ b/drivers/net/usb/qmi_wwan.c | ||
621 | @@ -398,16 +398,58 @@ static const struct usb_device_id products[] = { | ||
622 | }, | ||
623 | |||
624 | /* 3. Combined interface devices matching on interface number */ | ||
625 | + {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ | ||
626 | + {QMI_FIXED_INTF(0x19d2, 0x0002, 1)}, | ||
627 | + {QMI_FIXED_INTF(0x19d2, 0x0012, 1)}, | ||
628 | + {QMI_FIXED_INTF(0x19d2, 0x0017, 3)}, | ||
629 | + {QMI_FIXED_INTF(0x19d2, 0x0021, 4)}, | ||
630 | + {QMI_FIXED_INTF(0x19d2, 0x0025, 1)}, | ||
631 | + {QMI_FIXED_INTF(0x19d2, 0x0031, 4)}, | ||
632 | + {QMI_FIXED_INTF(0x19d2, 0x0042, 4)}, | ||
633 | + {QMI_FIXED_INTF(0x19d2, 0x0049, 5)}, | ||
634 | + {QMI_FIXED_INTF(0x19d2, 0x0052, 4)}, | ||
635 | {QMI_FIXED_INTF(0x19d2, 0x0055, 1)}, /* ZTE (Vodafone) K3520-Z */ | ||
636 | + {QMI_FIXED_INTF(0x19d2, 0x0058, 4)}, | ||
637 | {QMI_FIXED_INTF(0x19d2, 0x0063, 4)}, /* ZTE (Vodafone) K3565-Z */ | ||
638 | {QMI_FIXED_INTF(0x19d2, 0x0104, 4)}, /* ZTE (Vodafone) K4505-Z */ | ||
639 | + {QMI_FIXED_INTF(0x19d2, 0x0113, 5)}, | ||
640 | + {QMI_FIXED_INTF(0x19d2, 0x0118, 5)}, | ||
641 | + {QMI_FIXED_INTF(0x19d2, 0x0121, 5)}, | ||
642 | + {QMI_FIXED_INTF(0x19d2, 0x0123, 4)}, | ||
643 | + {QMI_FIXED_INTF(0x19d2, 0x0124, 5)}, | ||
644 | + {QMI_FIXED_INTF(0x19d2, 0x0125, 6)}, | ||
645 | + {QMI_FIXED_INTF(0x19d2, 0x0126, 5)}, | ||
646 | + {QMI_FIXED_INTF(0x19d2, 0x0130, 1)}, | ||
647 | + {QMI_FIXED_INTF(0x19d2, 0x0133, 3)}, | ||
648 | + {QMI_FIXED_INTF(0x19d2, 0x0141, 5)}, | ||
649 | {QMI_FIXED_INTF(0x19d2, 0x0157, 5)}, /* ZTE MF683 */ | ||
650 | + {QMI_FIXED_INTF(0x19d2, 0x0158, 3)}, | ||
651 | {QMI_FIXED_INTF(0x19d2, 0x0167, 4)}, /* ZTE MF820D */ | ||
652 | + {QMI_FIXED_INTF(0x19d2, 0x0168, 4)}, | ||
653 | + {QMI_FIXED_INTF(0x19d2, 0x0176, 3)}, | ||
654 | + {QMI_FIXED_INTF(0x19d2, 0x0178, 3)}, | ||
655 | + {QMI_FIXED_INTF(0x19d2, 0x0191, 4)}, /* ZTE EuFi890 */ | ||
656 | + {QMI_FIXED_INTF(0x19d2, 0x0199, 1)}, /* ZTE MF820S */ | ||
657 | + {QMI_FIXED_INTF(0x19d2, 0x0200, 1)}, | ||
658 | + {QMI_FIXED_INTF(0x19d2, 0x0257, 3)}, /* ZTE MF821 */ | ||
659 | {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */ | ||
660 | {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */ | ||
661 | {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */ | ||
662 | + {QMI_FIXED_INTF(0x19d2, 0x1012, 4)}, | ||
663 | {QMI_FIXED_INTF(0x19d2, 0x1018, 3)}, /* ZTE (Vodafone) K5006-Z */ | ||
664 | + {QMI_FIXED_INTF(0x19d2, 0x1021, 2)}, | ||
665 | + {QMI_FIXED_INTF(0x19d2, 0x1245, 4)}, | ||
666 | + {QMI_FIXED_INTF(0x19d2, 0x1247, 4)}, | ||
667 | + {QMI_FIXED_INTF(0x19d2, 0x1252, 4)}, | ||
668 | + {QMI_FIXED_INTF(0x19d2, 0x1254, 4)}, | ||
669 | + {QMI_FIXED_INTF(0x19d2, 0x1255, 3)}, | ||
670 | + {QMI_FIXED_INTF(0x19d2, 0x1255, 4)}, | ||
671 | + {QMI_FIXED_INTF(0x19d2, 0x1256, 4)}, | ||
672 | + {QMI_FIXED_INTF(0x19d2, 0x1401, 2)}, | ||
673 | {QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */ | ||
674 | + {QMI_FIXED_INTF(0x19d2, 0x1424, 2)}, | ||
675 | + {QMI_FIXED_INTF(0x19d2, 0x1425, 2)}, | ||
676 | + {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */ | ||
677 | {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ | ||
678 | {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ | ||
679 | {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ | ||
680 | diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c | ||
681 | index 1089639..2830ea2 100644 | ||
682 | --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c | ||
683 | +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c | ||
684 | @@ -1012,12 +1012,12 @@ static void iwl_calc_basic_rates(struct iwl_priv *priv, | ||
685 | * As a consequence, it's not as complicated as it sounds, just add | ||
686 | * any lower rates to the ACK rate bitmap. | ||
687 | */ | ||
688 | - if (IWL_RATE_11M_INDEX < lowest_present_ofdm) | ||
689 | - ofdm |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE; | ||
690 | - if (IWL_RATE_5M_INDEX < lowest_present_ofdm) | ||
691 | - ofdm |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE; | ||
692 | - if (IWL_RATE_2M_INDEX < lowest_present_ofdm) | ||
693 | - ofdm |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE; | ||
694 | + if (IWL_RATE_11M_INDEX < lowest_present_cck) | ||
695 | + cck |= IWL_RATE_11M_MASK >> IWL_FIRST_CCK_RATE; | ||
696 | + if (IWL_RATE_5M_INDEX < lowest_present_cck) | ||
697 | + cck |= IWL_RATE_5M_MASK >> IWL_FIRST_CCK_RATE; | ||
698 | + if (IWL_RATE_2M_INDEX < lowest_present_cck) | ||
699 | + cck |= IWL_RATE_2M_MASK >> IWL_FIRST_CCK_RATE; | ||
700 | /* 1M already there or needed so always add */ | ||
701 | cck |= IWL_RATE_1M_MASK >> IWL_FIRST_CCK_RATE; | ||
702 | |||
703 | diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c | ||
704 | index 7502660..c87ef74 100644 | ||
705 | --- a/drivers/target/target_core_transport.c | ||
706 | +++ b/drivers/target/target_core_transport.c | ||
707 | @@ -1756,8 +1756,10 @@ void target_execute_cmd(struct se_cmd *cmd) | ||
708 | /* | ||
709 | * If the received CDB has aleady been aborted stop processing it here. | ||
710 | */ | ||
711 | - if (transport_check_aborted_status(cmd, 1)) | ||
712 | + if (transport_check_aborted_status(cmd, 1)) { | ||
713 | + complete(&cmd->t_transport_stop_comp); | ||
714 | return; | ||
715 | + } | ||
716 | |||
717 | /* | ||
718 | * Determine if IOCTL context caller in requesting the stopping of this | ||
719 | @@ -3029,7 +3031,7 @@ void transport_send_task_abort(struct se_cmd *cmd) | ||
720 | unsigned long flags; | ||
721 | |||
722 | spin_lock_irqsave(&cmd->t_state_lock, flags); | ||
723 | - if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { | ||
724 | + if (cmd->se_cmd_flags & (SCF_SENT_CHECK_CONDITION | SCF_SENT_DELAYED_TAS)) { | ||
725 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | ||
726 | return; | ||
727 | } | ||
728 | diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c | ||
729 | index 0984a21..15f60d0 100644 | ||
730 | --- a/kernel/sched/auto_group.c | ||
731 | +++ b/kernel/sched/auto_group.c | ||
732 | @@ -143,15 +143,11 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag) | ||
733 | |||
734 | p->signal->autogroup = autogroup_kref_get(ag); | ||
735 | |||
736 | - if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled)) | ||
737 | - goto out; | ||
738 | - | ||
739 | t = p; | ||
740 | do { | ||
741 | sched_move_task(t); | ||
742 | } while_each_thread(p, t); | ||
743 | |||
744 | -out: | ||
745 | unlock_task_sighand(p, &flags); | ||
746 | autogroup_kref_put(prev); | ||
747 | } | ||
748 | diff --git a/kernel/sched/auto_group.h b/kernel/sched/auto_group.h | ||
749 | index 8bd0471..443232e 100644 | ||
750 | --- a/kernel/sched/auto_group.h | ||
751 | +++ b/kernel/sched/auto_group.h | ||
752 | @@ -4,11 +4,6 @@ | ||
753 | #include <linux/rwsem.h> | ||
754 | |||
755 | struct autogroup { | ||
756 | - /* | ||
757 | - * reference doesn't mean how many thread attach to this | ||
758 | - * autogroup now. It just stands for the number of task | ||
759 | - * could use this autogroup. | ||
760 | - */ | ||
761 | struct kref kref; | ||
762 | struct task_group *tg; | ||
763 | struct rw_semaphore lock; | ||
764 | diff --git a/kernel/workqueue.c b/kernel/workqueue.c | ||
765 | index 872bd6d..6d42247 100644 | ||
766 | --- a/kernel/workqueue.c | ||
767 | +++ b/kernel/workqueue.c | ||
768 | @@ -2266,8 +2266,10 @@ static int rescuer_thread(void *__wq) | ||
769 | repeat: | ||
770 | set_current_state(TASK_INTERRUPTIBLE); | ||
771 | |||
772 | - if (kthread_should_stop()) | ||
773 | + if (kthread_should_stop()) { | ||
774 | + __set_current_state(TASK_RUNNING); | ||
775 | return 0; | ||
776 | + } | ||
777 | |||
778 | /* | ||
779 | * See whether any cpu is asking for help. Unbounded | ||
780 | diff --git a/mm/memory-failure.c b/mm/memory-failure.c | ||
781 | index a6e2141..3dd21e2 100644 | ||
782 | --- a/mm/memory-failure.c | ||
783 | +++ b/mm/memory-failure.c | ||
784 | @@ -1474,9 +1474,17 @@ int soft_offline_page(struct page *page, int flags) | ||
785 | { | ||
786 | int ret; | ||
787 | unsigned long pfn = page_to_pfn(page); | ||
788 | + struct page *hpage = compound_trans_head(page); | ||
789 | |||
790 | if (PageHuge(page)) | ||
791 | return soft_offline_huge_page(page, flags); | ||
792 | + if (PageTransHuge(hpage)) { | ||
793 | + if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) { | ||
794 | + pr_info("soft offline: %#lx: failed to split THP\n", | ||
795 | + pfn); | ||
796 | + return -EBUSY; | ||
797 | + } | ||
798 | + } | ||
799 | |||
800 | ret = get_any_page(page, pfn, flags); | ||
801 | if (ret < 0) | ||
802 | diff --git a/mm/sparse.c b/mm/sparse.c | ||
803 | index fac95f2..a83de2f 100644 | ||
804 | --- a/mm/sparse.c | ||
805 | +++ b/mm/sparse.c | ||
806 | @@ -617,7 +617,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) | ||
807 | { | ||
808 | return; /* XXX: Not implemented yet */ | ||
809 | } | ||
810 | -static void free_map_bootmem(struct page *page, unsigned long nr_pages) | ||
811 | +static void free_map_bootmem(struct page *memmap, unsigned long nr_pages) | ||
812 | { | ||
813 | } | ||
814 | #else | ||
815 | @@ -658,10 +658,11 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) | ||
816 | get_order(sizeof(struct page) * nr_pages)); | ||
817 | } | ||
818 | |||
819 | -static void free_map_bootmem(struct page *page, unsigned long nr_pages) | ||
820 | +static void free_map_bootmem(struct page *memmap, unsigned long nr_pages) | ||
821 | { | ||
822 | unsigned long maps_section_nr, removing_section_nr, i; | ||
823 | unsigned long magic; | ||
824 | + struct page *page = virt_to_page(memmap); | ||
825 | |||
826 | for (i = 0; i < nr_pages; i++, page++) { | ||
827 | magic = (unsigned long) page->lru.next; | ||
828 | @@ -710,13 +711,10 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap) | ||
829 | */ | ||
830 | |||
831 | if (memmap) { | ||
832 | - struct page *memmap_page; | ||
833 | - memmap_page = virt_to_page(memmap); | ||
834 | - | ||
835 | nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) | ||
836 | >> PAGE_SHIFT; | ||
837 | |||
838 | - free_map_bootmem(memmap_page, nr_pages); | ||
839 | + free_map_bootmem(memmap, nr_pages); | ||
840 | } | ||
841 | } | ||
842 | |||
843 | diff --git a/mm/vmscan.c b/mm/vmscan.c | ||
844 | index 40db7d1..be5a9c1 100644 | ||
845 | --- a/mm/vmscan.c | ||
846 | +++ b/mm/vmscan.c | ||
847 | @@ -2383,6 +2383,19 @@ static void age_active_anon(struct zone *zone, struct scan_control *sc) | ||
848 | } while (memcg); | ||
849 | } | ||
850 | |||
851 | +static bool zone_balanced(struct zone *zone, int order, | ||
852 | + unsigned long balance_gap, int classzone_idx) | ||
853 | +{ | ||
854 | + if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) + | ||
855 | + balance_gap, classzone_idx, 0)) | ||
856 | + return false; | ||
857 | + | ||
858 | + if (COMPACTION_BUILD && order && !compaction_suitable(zone, order)) | ||
859 | + return false; | ||
860 | + | ||
861 | + return true; | ||
862 | +} | ||
863 | + | ||
864 | /* | ||
865 | * pgdat_balanced is used when checking if a node is balanced for high-order | ||
866 | * allocations. Only zones that meet watermarks and are in a zone allowed | ||
867 | @@ -2461,8 +2474,7 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining, | ||
868 | continue; | ||
869 | } | ||
870 | |||
871 | - if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), | ||
872 | - i, 0)) | ||
873 | + if (!zone_balanced(zone, order, 0, i)) | ||
874 | all_zones_ok = false; | ||
875 | else | ||
876 | balanced += zone->present_pages; | ||
877 | @@ -2571,8 +2583,7 @@ loop_again: | ||
878 | break; | ||
879 | } | ||
880 | |||
881 | - if (!zone_watermark_ok_safe(zone, order, | ||
882 | - high_wmark_pages(zone), 0, 0)) { | ||
883 | + if (!zone_balanced(zone, order, 0, 0)) { | ||
884 | end_zone = i; | ||
885 | break; | ||
886 | } else { | ||
887 | @@ -2648,9 +2659,8 @@ loop_again: | ||
888 | testorder = 0; | ||
889 | |||
890 | if ((buffer_heads_over_limit && is_highmem_idx(i)) || | ||
891 | - !zone_watermark_ok_safe(zone, testorder, | ||
892 | - high_wmark_pages(zone) + balance_gap, | ||
893 | - end_zone, 0)) { | ||
894 | + !zone_balanced(zone, testorder, | ||
895 | + balance_gap, end_zone)) { | ||
896 | shrink_zone(zone, &sc); | ||
897 | |||
898 | reclaim_state->reclaimed_slab = 0; | ||
899 | @@ -2677,8 +2687,7 @@ loop_again: | ||
900 | continue; | ||
901 | } | ||
902 | |||
903 | - if (!zone_watermark_ok_safe(zone, testorder, | ||
904 | - high_wmark_pages(zone), end_zone, 0)) { | ||
905 | + if (!zone_balanced(zone, testorder, 0, end_zone)) { | ||
906 | all_zones_ok = 0; | ||
907 | /* | ||
908 | * We are still under min water mark. This | ||
909 | diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c | ||
910 | index 635c325..2138dc3 100644 | ||
911 | --- a/net/mac80211/offchannel.c | ||
912 | +++ b/net/mac80211/offchannel.c | ||
913 | @@ -453,8 +453,6 @@ void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata) | ||
914 | list_move_tail(&roc->list, &tmp_list); | ||
915 | roc->abort = true; | ||
916 | } | ||
917 | - | ||
918 | - ieee80211_start_next_roc(local); | ||
919 | mutex_unlock(&local->mtx); | ||
920 | |||
921 | list_for_each_entry_safe(roc, tmp, &tmp_list, list) { |