Magellan Linux

Contents of /trunk/kernel-alx/patches-3.8/0112-3.8.13-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2176 - (show annotations) (download)
Thu May 16 13:33:08 2013 UTC (10 years, 11 months ago) by niro
File size: 112173 byte(s)
-linux-3.8.13
1 diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
2 index 7a32976..01b20a2 100644
3 --- a/arch/arm/xen/enlighten.c
4 +++ b/arch/arm/xen/enlighten.c
5 @@ -237,7 +237,7 @@ static int __init xen_init_events(void)
6 xen_init_IRQ();
7
8 if (request_percpu_irq(xen_events_irq, xen_arm_callback,
9 - "events", xen_vcpu)) {
10 + "events", &xen_vcpu)) {
11 pr_err("Error requesting IRQ %d\n", xen_events_irq);
12 return -EINVAL;
13 }
14 diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
15 index afadae6..0782eaf 100644
16 --- a/arch/arm64/mm/fault.c
17 +++ b/arch/arm64/mm/fault.c
18 @@ -148,6 +148,7 @@ void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
19 #define VM_FAULT_BADACCESS 0x020000
20
21 #define ESR_WRITE (1 << 6)
22 +#define ESR_CM (1 << 8)
23 #define ESR_LNX_EXEC (1 << 24)
24
25 /*
26 @@ -206,7 +207,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
27 struct task_struct *tsk;
28 struct mm_struct *mm;
29 int fault, sig, code;
30 - int write = esr & ESR_WRITE;
31 + bool write = (esr & ESR_WRITE) && !(esr & ESR_CM);
32 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
33 (write ? FAULT_FLAG_WRITE : 0);
34
35 diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
36 index 51fb00a..4f440a65 100644
37 --- a/arch/powerpc/include/asm/ppc-opcode.h
38 +++ b/arch/powerpc/include/asm/ppc-opcode.h
39 @@ -112,6 +112,10 @@
40 #define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff
41 #define PPC_INST_MTSPR_DSCR 0x7c1103a6
42 #define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff
43 +#define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6
44 +#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff
45 +#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
46 +#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff
47 #define PPC_INST_SLBFEE 0x7c0007a7
48
49 #define PPC_INST_STRING 0x7c00042a
50 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
51 index 3251840..6686794 100644
52 --- a/arch/powerpc/kernel/traps.c
53 +++ b/arch/powerpc/kernel/traps.c
54 @@ -961,7 +961,10 @@ static int emulate_instruction(struct pt_regs *regs)
55
56 #ifdef CONFIG_PPC64
57 /* Emulate the mfspr rD, DSCR. */
58 - if (((instword & PPC_INST_MFSPR_DSCR_MASK) == PPC_INST_MFSPR_DSCR) &&
59 + if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) ==
60 + PPC_INST_MFSPR_DSCR_USER) ||
61 + ((instword & PPC_INST_MFSPR_DSCR_MASK) ==
62 + PPC_INST_MFSPR_DSCR)) &&
63 cpu_has_feature(CPU_FTR_DSCR)) {
64 PPC_WARN_EMULATED(mfdscr, regs);
65 rd = (instword >> 21) & 0x1f;
66 @@ -969,7 +972,10 @@ static int emulate_instruction(struct pt_regs *regs)
67 return 0;
68 }
69 /* Emulate the mtspr DSCR, rD. */
70 - if (((instword & PPC_INST_MTSPR_DSCR_MASK) == PPC_INST_MTSPR_DSCR) &&
71 + if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) ==
72 + PPC_INST_MTSPR_DSCR_USER) ||
73 + ((instword & PPC_INST_MTSPR_DSCR_MASK) ==
74 + PPC_INST_MTSPR_DSCR)) &&
75 cpu_has_feature(CPU_FTR_DSCR)) {
76 PPC_WARN_EMULATED(mtdscr, regs);
77 rd = (instword >> 21) & 0x1f;
78 diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
79 index bba87ca..6a252c4 100644
80 --- a/arch/powerpc/mm/numa.c
81 +++ b/arch/powerpc/mm/numa.c
82 @@ -201,7 +201,7 @@ int __node_distance(int a, int b)
83 int distance = LOCAL_DISTANCE;
84
85 if (!form1_affinity)
86 - return distance;
87 + return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
88
89 for (i = 0; i < distance_ref_points_depth; i++) {
90 if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
91 diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
92 index da02e9c..d978353 100644
93 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
94 +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
95 @@ -310,7 +310,7 @@ void intel_pmu_lbr_read(void)
96 * - in case there is no HW filter
97 * - in case the HW filter has errata or limitations
98 */
99 -static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
100 +static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
101 {
102 u64 br_type = event->attr.branch_sample_type;
103 int mask = 0;
104 @@ -318,8 +318,11 @@ static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
105 if (br_type & PERF_SAMPLE_BRANCH_USER)
106 mask |= X86_BR_USER;
107
108 - if (br_type & PERF_SAMPLE_BRANCH_KERNEL)
109 + if (br_type & PERF_SAMPLE_BRANCH_KERNEL) {
110 + if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
111 + return -EACCES;
112 mask |= X86_BR_KERNEL;
113 + }
114
115 /* we ignore BRANCH_HV here */
116
117 @@ -339,6 +342,8 @@ static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
118 * be used by fixup code for some CPU
119 */
120 event->hw.branch_reg.reg = mask;
121 +
122 + return 0;
123 }
124
125 /*
126 @@ -386,7 +391,9 @@ int intel_pmu_setup_lbr_filter(struct perf_event *event)
127 /*
128 * setup SW LBR filter
129 */
130 - intel_pmu_setup_sw_lbr_filter(event);
131 + ret = intel_pmu_setup_sw_lbr_filter(event);
132 + if (ret)
133 + return ret;
134
135 /*
136 * setup HW LBR filter, if any
137 @@ -442,8 +449,18 @@ static int branch_type(unsigned long from, unsigned long to)
138 return X86_BR_NONE;
139
140 addr = buf;
141 - } else
142 - addr = (void *)from;
143 + } else {
144 + /*
145 + * The LBR logs any address in the IP, even if the IP just
146 + * faulted. This means userspace can control the from address.
147 + * Ensure we don't blindy read any address by validating it is
148 + * a known text address.
149 + */
150 + if (kernel_text_address(from))
151 + addr = (void *)from;
152 + else
153 + return X86_BR_NONE;
154 + }
155
156 /*
157 * decoder needs to know the ABI especially
158 diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
159 index b43200d..3e091f0 100644
160 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
161 +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
162 @@ -2428,7 +2428,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
163 static int __init uncore_type_init(struct intel_uncore_type *type)
164 {
165 struct intel_uncore_pmu *pmus;
166 - struct attribute_group *events_group;
167 + struct attribute_group *attr_group;
168 struct attribute **attrs;
169 int i, j;
170
171 @@ -2455,19 +2455,19 @@ static int __init uncore_type_init(struct intel_uncore_type *type)
172 while (type->event_descs[i].attr.attr.name)
173 i++;
174
175 - events_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
176 - sizeof(*events_group), GFP_KERNEL);
177 - if (!events_group)
178 + attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) +
179 + sizeof(*attr_group), GFP_KERNEL);
180 + if (!attr_group)
181 goto fail;
182
183 - attrs = (struct attribute **)(events_group + 1);
184 - events_group->name = "events";
185 - events_group->attrs = attrs;
186 + attrs = (struct attribute **)(attr_group + 1);
187 + attr_group->name = "events";
188 + attr_group->attrs = attrs;
189
190 for (j = 0; j < i; j++)
191 attrs[j] = &type->event_descs[j].attr.attr;
192
193 - type->events_group = events_group;
194 + type->events_group = attr_group;
195 }
196
197 type->pmu_group = &uncore_pmu_attr_group;
198 @@ -2853,6 +2853,7 @@ static int __init uncore_cpu_init(void)
199 msr_uncores = nhm_msr_uncores;
200 break;
201 case 42: /* Sandy Bridge */
202 + case 58: /* Ivy Bridge */
203 if (snb_uncore_cbox.num_boxes > max_cores)
204 snb_uncore_cbox.num_boxes = max_cores;
205 msr_uncores = snb_msr_uncores;
206 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
207 index d7aea41..7d7a36d 100644
208 --- a/arch/x86/mm/init.c
209 +++ b/arch/x86/mm/init.c
210 @@ -45,11 +45,15 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
211 int i;
212 unsigned long puds = 0, pmds = 0, ptes = 0, tables;
213 unsigned long start = 0, good_end;
214 + unsigned long pgd_extra = 0;
215 phys_addr_t base;
216
217 for (i = 0; i < nr_range; i++) {
218 unsigned long range, extra;
219
220 + if ((mr[i].end >> PGDIR_SHIFT) - (mr[i].start >> PGDIR_SHIFT))
221 + pgd_extra++;
222 +
223 range = mr[i].end - mr[i].start;
224 puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
225
226 @@ -74,6 +78,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
227 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
228 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
229 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
230 + tables += (pgd_extra * PAGE_SIZE);
231
232 #ifdef CONFIG_X86_32
233 /* for fixmap */
234 diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
235 index 0ca1ca7..c9303ed 100644
236 --- a/drivers/edac/edac_mc_sysfs.c
237 +++ b/drivers/edac/edac_mc_sysfs.c
238 @@ -330,17 +330,17 @@ static struct device_attribute *dynamic_csrow_dimm_attr[] = {
239 };
240
241 /* possible dynamic channel ce_count attribute files */
242 -DEVICE_CHANNEL(ch0_ce_count, S_IRUGO | S_IWUSR,
243 +DEVICE_CHANNEL(ch0_ce_count, S_IRUGO,
244 channel_ce_count_show, NULL, 0);
245 -DEVICE_CHANNEL(ch1_ce_count, S_IRUGO | S_IWUSR,
246 +DEVICE_CHANNEL(ch1_ce_count, S_IRUGO,
247 channel_ce_count_show, NULL, 1);
248 -DEVICE_CHANNEL(ch2_ce_count, S_IRUGO | S_IWUSR,
249 +DEVICE_CHANNEL(ch2_ce_count, S_IRUGO,
250 channel_ce_count_show, NULL, 2);
251 -DEVICE_CHANNEL(ch3_ce_count, S_IRUGO | S_IWUSR,
252 +DEVICE_CHANNEL(ch3_ce_count, S_IRUGO,
253 channel_ce_count_show, NULL, 3);
254 -DEVICE_CHANNEL(ch4_ce_count, S_IRUGO | S_IWUSR,
255 +DEVICE_CHANNEL(ch4_ce_count, S_IRUGO,
256 channel_ce_count_show, NULL, 4);
257 -DEVICE_CHANNEL(ch5_ce_count, S_IRUGO | S_IWUSR,
258 +DEVICE_CHANNEL(ch5_ce_count, S_IRUGO,
259 channel_ce_count_show, NULL, 5);
260
261 /* Total possible dynamic ce_count attribute file table */
262 diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
263 index 5ccf984..cac9c9a 100644
264 --- a/drivers/gpu/drm/ast/ast_drv.h
265 +++ b/drivers/gpu/drm/ast/ast_drv.h
266 @@ -239,6 +239,8 @@ struct ast_fbdev {
267 void *sysram;
268 int size;
269 struct ttm_bo_kmap_obj mapping;
270 + int x1, y1, x2, y2; /* dirty rect */
271 + spinlock_t dirty_lock;
272 };
273
274 #define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
275 diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
276 index d9ec779..9138678 100644
277 --- a/drivers/gpu/drm/ast/ast_fb.c
278 +++ b/drivers/gpu/drm/ast/ast_fb.c
279 @@ -52,16 +52,52 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
280 int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8;
281 int ret;
282 bool unmap = false;
283 + bool store_for_later = false;
284 + int x2, y2;
285 + unsigned long flags;
286
287 obj = afbdev->afb.obj;
288 bo = gem_to_ast_bo(obj);
289
290 + /*
291 + * try and reserve the BO, if we fail with busy
292 + * then the BO is being moved and we should
293 + * store up the damage until later.
294 + */
295 ret = ast_bo_reserve(bo, true);
296 if (ret) {
297 - DRM_ERROR("failed to reserve fb bo\n");
298 + if (ret != -EBUSY)
299 + return;
300 +
301 + store_for_later = true;
302 + }
303 +
304 + x2 = x + width - 1;
305 + y2 = y + height - 1;
306 + spin_lock_irqsave(&afbdev->dirty_lock, flags);
307 +
308 + if (afbdev->y1 < y)
309 + y = afbdev->y1;
310 + if (afbdev->y2 > y2)
311 + y2 = afbdev->y2;
312 + if (afbdev->x1 < x)
313 + x = afbdev->x1;
314 + if (afbdev->x2 > x2)
315 + x2 = afbdev->x2;
316 +
317 + if (store_for_later) {
318 + afbdev->x1 = x;
319 + afbdev->x2 = x2;
320 + afbdev->y1 = y;
321 + afbdev->y2 = y2;
322 + spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
323 return;
324 }
325
326 + afbdev->x1 = afbdev->y1 = INT_MAX;
327 + afbdev->x2 = afbdev->y2 = 0;
328 + spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
329 +
330 if (!bo->kmap.virtual) {
331 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
332 if (ret) {
333 @@ -71,10 +107,10 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
334 }
335 unmap = true;
336 }
337 - for (i = y; i < y + height; i++) {
338 + for (i = y; i <= y2; i++) {
339 /* assume equal stride for now */
340 src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp);
341 - memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
342 + memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, (x2 - x + 1) * bpp);
343
344 }
345 if (unmap)
346 @@ -305,6 +341,7 @@ int ast_fbdev_init(struct drm_device *dev)
347
348 ast->fbdev = afbdev;
349 afbdev->helper.funcs = &ast_fb_helper_funcs;
350 + spin_lock_init(&afbdev->dirty_lock);
351 ret = drm_fb_helper_init(dev, &afbdev->helper,
352 1, 1);
353 if (ret) {
354 diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
355 index 3602731..09da339 100644
356 --- a/drivers/gpu/drm/ast/ast_ttm.c
357 +++ b/drivers/gpu/drm/ast/ast_ttm.c
358 @@ -316,7 +316,7 @@ int ast_bo_reserve(struct ast_bo *bo, bool no_wait)
359
360 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
361 if (ret) {
362 - if (ret != -ERESTARTSYS)
363 + if (ret != -ERESTARTSYS && ret != -EBUSY)
364 DRM_ERROR("reserve failed %p\n", bo);
365 return ret;
366 }
367 diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
368 index 6e0cc72..7ca0595 100644
369 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h
370 +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
371 @@ -154,6 +154,8 @@ struct cirrus_fbdev {
372 struct list_head fbdev_list;
373 void *sysram;
374 int size;
375 + int x1, y1, x2, y2; /* dirty rect */
376 + spinlock_t dirty_lock;
377 };
378
379 struct cirrus_bo {
380 diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
381 index 6c6b4c8..1e64d6f 100644
382 --- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
383 +++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
384 @@ -26,16 +26,51 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
385 int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8;
386 int ret;
387 bool unmap = false;
388 + bool store_for_later = false;
389 + int x2, y2;
390 + unsigned long flags;
391
392 obj = afbdev->gfb.obj;
393 bo = gem_to_cirrus_bo(obj);
394
395 + /*
396 + * try and reserve the BO, if we fail with busy
397 + * then the BO is being moved and we should
398 + * store up the damage until later.
399 + */
400 ret = cirrus_bo_reserve(bo, true);
401 if (ret) {
402 - DRM_ERROR("failed to reserve fb bo\n");
403 + if (ret != -EBUSY)
404 + return;
405 + store_for_later = true;
406 + }
407 +
408 + x2 = x + width - 1;
409 + y2 = y + height - 1;
410 + spin_lock_irqsave(&afbdev->dirty_lock, flags);
411 +
412 + if (afbdev->y1 < y)
413 + y = afbdev->y1;
414 + if (afbdev->y2 > y2)
415 + y2 = afbdev->y2;
416 + if (afbdev->x1 < x)
417 + x = afbdev->x1;
418 + if (afbdev->x2 > x2)
419 + x2 = afbdev->x2;
420 +
421 + if (store_for_later) {
422 + afbdev->x1 = x;
423 + afbdev->x2 = x2;
424 + afbdev->y1 = y;
425 + afbdev->y2 = y2;
426 + spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
427 return;
428 }
429
430 + afbdev->x1 = afbdev->y1 = INT_MAX;
431 + afbdev->x2 = afbdev->y2 = 0;
432 + spin_unlock_irqrestore(&afbdev->dirty_lock, flags);
433 +
434 if (!bo->kmap.virtual) {
435 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
436 if (ret) {
437 @@ -282,6 +317,7 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
438
439 cdev->mode_info.gfbdev = gfbdev;
440 gfbdev->helper.funcs = &cirrus_fb_helper_funcs;
441 + spin_lock_init(&gfbdev->dirty_lock);
442
443 ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper,
444 cdev->num_crtc, CIRRUSFB_CONN_LIMIT);
445 diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
446 index 1413a26..2ed8cfc 100644
447 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
448 +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
449 @@ -321,7 +321,7 @@ int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait)
450
451 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
452 if (ret) {
453 - if (ret != -ERESTARTSYS)
454 + if (ret != -ERESTARTSYS && ret != -EBUSY)
455 DRM_ERROR("reserve failed %p\n", bo);
456 return ret;
457 }
458 diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
459 index 24efae4..539bae9 100644
460 --- a/drivers/gpu/drm/drm_gem.c
461 +++ b/drivers/gpu/drm/drm_gem.c
462 @@ -205,11 +205,11 @@ static void
463 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
464 {
465 if (obj->import_attach) {
466 - drm_prime_remove_imported_buf_handle(&filp->prime,
467 + drm_prime_remove_buf_handle(&filp->prime,
468 obj->import_attach->dmabuf);
469 }
470 if (obj->export_dma_buf) {
471 - drm_prime_remove_imported_buf_handle(&filp->prime,
472 + drm_prime_remove_buf_handle(&filp->prime,
473 obj->export_dma_buf);
474 }
475 }
476 diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
477 index 7f12573..4f6439d 100644
478 --- a/drivers/gpu/drm/drm_prime.c
479 +++ b/drivers/gpu/drm/drm_prime.c
480 @@ -61,6 +61,7 @@ struct drm_prime_member {
481 struct dma_buf *dma_buf;
482 uint32_t handle;
483 };
484 +static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
485
486 int drm_gem_prime_handle_to_fd(struct drm_device *dev,
487 struct drm_file *file_priv, uint32_t handle, uint32_t flags,
488 @@ -68,7 +69,8 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
489 {
490 struct drm_gem_object *obj;
491 void *buf;
492 - int ret;
493 + int ret = 0;
494 + struct dma_buf *dmabuf;
495
496 obj = drm_gem_object_lookup(dev, file_priv, handle);
497 if (!obj)
498 @@ -77,43 +79,44 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev,
499 mutex_lock(&file_priv->prime.lock);
500 /* re-export the original imported object */
501 if (obj->import_attach) {
502 - get_dma_buf(obj->import_attach->dmabuf);
503 - *prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags);
504 - drm_gem_object_unreference_unlocked(obj);
505 - mutex_unlock(&file_priv->prime.lock);
506 - return 0;
507 + dmabuf = obj->import_attach->dmabuf;
508 + goto out_have_obj;
509 }
510
511 if (obj->export_dma_buf) {
512 - get_dma_buf(obj->export_dma_buf);
513 - *prime_fd = dma_buf_fd(obj->export_dma_buf, flags);
514 - drm_gem_object_unreference_unlocked(obj);
515 - } else {
516 - buf = dev->driver->gem_prime_export(dev, obj, flags);
517 - if (IS_ERR(buf)) {
518 - /* normally the created dma-buf takes ownership of the ref,
519 - * but if that fails then drop the ref
520 - */
521 - drm_gem_object_unreference_unlocked(obj);
522 - mutex_unlock(&file_priv->prime.lock);
523 - return PTR_ERR(buf);
524 - }
525 - obj->export_dma_buf = buf;
526 - *prime_fd = dma_buf_fd(buf, flags);
527 + dmabuf = obj->export_dma_buf;
528 + goto out_have_obj;
529 }
530 +
531 + buf = dev->driver->gem_prime_export(dev, obj, flags);
532 + if (IS_ERR(buf)) {
533 + /* normally the created dma-buf takes ownership of the ref,
534 + * but if that fails then drop the ref
535 + */
536 + ret = PTR_ERR(buf);
537 + goto out;
538 + }
539 + obj->export_dma_buf = buf;
540 +
541 /* if we've exported this buffer the cheat and add it to the import list
542 * so we get the correct handle back
543 */
544 - ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
545 - obj->export_dma_buf, handle);
546 - if (ret) {
547 - drm_gem_object_unreference_unlocked(obj);
548 - mutex_unlock(&file_priv->prime.lock);
549 - return ret;
550 - }
551 + ret = drm_prime_add_buf_handle(&file_priv->prime,
552 + obj->export_dma_buf, handle);
553 + if (ret)
554 + goto out;
555
556 + *prime_fd = dma_buf_fd(buf, flags);
557 mutex_unlock(&file_priv->prime.lock);
558 return 0;
559 +
560 +out_have_obj:
561 + get_dma_buf(dmabuf);
562 + *prime_fd = dma_buf_fd(dmabuf, flags);
563 +out:
564 + drm_gem_object_unreference_unlocked(obj);
565 + mutex_unlock(&file_priv->prime.lock);
566 + return ret;
567 }
568 EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
569
570 @@ -130,7 +133,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
571
572 mutex_lock(&file_priv->prime.lock);
573
574 - ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime,
575 + ret = drm_prime_lookup_buf_handle(&file_priv->prime,
576 dma_buf, handle);
577 if (!ret) {
578 ret = 0;
579 @@ -149,7 +152,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
580 if (ret)
581 goto out_put;
582
583 - ret = drm_prime_add_imported_buf_handle(&file_priv->prime,
584 + ret = drm_prime_add_buf_handle(&file_priv->prime,
585 dma_buf, *handle);
586 if (ret)
587 goto fail;
588 @@ -307,7 +310,7 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv)
589 }
590 EXPORT_SYMBOL(drm_prime_destroy_file_private);
591
592 -int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
593 +static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle)
594 {
595 struct drm_prime_member *member;
596
597 @@ -315,14 +318,14 @@ int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv
598 if (!member)
599 return -ENOMEM;
600
601 + get_dma_buf(dma_buf);
602 member->dma_buf = dma_buf;
603 member->handle = handle;
604 list_add(&member->entry, &prime_fpriv->head);
605 return 0;
606 }
607 -EXPORT_SYMBOL(drm_prime_add_imported_buf_handle);
608
609 -int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
610 +int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle)
611 {
612 struct drm_prime_member *member;
613
614 @@ -334,19 +337,20 @@ int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fp
615 }
616 return -ENOENT;
617 }
618 -EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle);
619 +EXPORT_SYMBOL(drm_prime_lookup_buf_handle);
620
621 -void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
622 +void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf)
623 {
624 struct drm_prime_member *member, *safe;
625
626 mutex_lock(&prime_fpriv->lock);
627 list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) {
628 if (member->dma_buf == dma_buf) {
629 + dma_buf_put(dma_buf);
630 list_del(&member->entry);
631 kfree(member);
632 }
633 }
634 mutex_unlock(&prime_fpriv->lock);
635 }
636 -EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle);
637 +EXPORT_SYMBOL(drm_prime_remove_buf_handle);
638 diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
639 index 8652cdf..029eccf 100644
640 --- a/drivers/gpu/drm/gma500/psb_irq.c
641 +++ b/drivers/gpu/drm/gma500/psb_irq.c
642 @@ -211,7 +211,7 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS)
643
644 vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R);
645
646 - if (vdc_stat & _PSB_PIPE_EVENT_FLAG)
647 + if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE))
648 dsp_int = 1;
649
650 /* FIXME: Handle Medfield
651 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
652 index 7339a4b..e78419f 100644
653 --- a/drivers/gpu/drm/i915/i915_drv.h
654 +++ b/drivers/gpu/drm/i915/i915_drv.h
655 @@ -711,6 +711,7 @@ typedef struct drm_i915_private {
656 unsigned int int_crt_support:1;
657 unsigned int lvds_use_ssc:1;
658 unsigned int display_clock_mode:1;
659 + unsigned int fdi_rx_polarity_inverted:1;
660 int lvds_ssc_freq;
661 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
662 unsigned int lvds_val; /* used for checking LVDS channel mode */
663 @@ -774,6 +775,7 @@ typedef struct drm_i915_private {
664 unsigned long gtt_start;
665 unsigned long gtt_mappable_end;
666 unsigned long gtt_end;
667 + unsigned long stolen_base; /* limited to low memory (32-bit) */
668
669 struct io_mapping *gtt_mapping;
670 phys_addr_t gtt_base_addr;
671 diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
672 index de45b60..3b9d18b 100644
673 --- a/drivers/gpu/drm/i915/i915_gem.c
674 +++ b/drivers/gpu/drm/i915/i915_gem.c
675 @@ -2662,17 +2662,35 @@ static inline int fence_number(struct drm_i915_private *dev_priv,
676 return fence - dev_priv->fence_regs;
677 }
678
679 +static void i915_gem_write_fence__ipi(void *data)
680 +{
681 + wbinvd();
682 +}
683 +
684 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
685 struct drm_i915_fence_reg *fence,
686 bool enable)
687 {
688 - struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
689 - int reg = fence_number(dev_priv, fence);
690 -
691 - i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
692 + struct drm_device *dev = obj->base.dev;
693 + struct drm_i915_private *dev_priv = dev->dev_private;
694 + int fence_reg = fence_number(dev_priv, fence);
695 +
696 + /* In order to fully serialize access to the fenced region and
697 + * the update to the fence register we need to take extreme
698 + * measures on SNB+. In theory, the write to the fence register
699 + * flushes all memory transactions before, and coupled with the
700 + * mb() placed around the register write we serialise all memory
701 + * operations with respect to the changes in the tiler. Yet, on
702 + * SNB+ we need to take a step further and emit an explicit wbinvd()
703 + * on each processor in order to manually flush all memory
704 + * transactions before updating the fence register.
705 + */
706 + if (HAS_LLC(obj->base.dev))
707 + on_each_cpu(i915_gem_write_fence__ipi, NULL, 1);
708 + i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL);
709
710 if (enable) {
711 - obj->fence_reg = reg;
712 + obj->fence_reg = fence_reg;
713 fence->obj = obj;
714 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
715 } else {
716 diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
717 index a3f06bc..d8ac0a3 100644
718 --- a/drivers/gpu/drm/i915/i915_gem_context.c
719 +++ b/drivers/gpu/drm/i915/i915_gem_context.c
720 @@ -157,6 +157,13 @@ create_hw_context(struct drm_device *dev,
721 return ERR_PTR(-ENOMEM);
722 }
723
724 + if (INTEL_INFO(dev)->gen >= 7) {
725 + ret = i915_gem_object_set_cache_level(ctx->obj,
726 + I915_CACHE_LLC_MLC);
727 + if (ret)
728 + goto err_out;
729 + }
730 +
731 /* The ring associated with the context object is handled by the normal
732 * object tracking code. We give an initial ring value simple to pass an
733 * assertion in the context switch code.
734 diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
735 index 8e91083..be24312 100644
736 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c
737 +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
738 @@ -42,56 +42,50 @@
739 * for is a boon.
740 */
741
742 -#define PTE_ADDRESS_MASK 0xfffff000
743 -#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
744 -#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
745 -#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
746 -#define PTE_MAPPING_TYPE_CACHED (3 << 1)
747 -#define PTE_MAPPING_TYPE_MASK (3 << 1)
748 -#define PTE_VALID (1 << 0)
749 -
750 -/**
751 - * i915_stolen_to_phys - take an offset into stolen memory and turn it into
752 - * a physical one
753 - * @dev: drm device
754 - * @offset: address to translate
755 - *
756 - * Some chip functions require allocations from stolen space and need the
757 - * physical address of the memory in question.
758 - */
759 -static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
760 +static unsigned long i915_stolen_to_physical(struct drm_device *dev)
761 {
762 struct drm_i915_private *dev_priv = dev->dev_private;
763 struct pci_dev *pdev = dev_priv->bridge_dev;
764 u32 base;
765
766 -#if 0
767 /* On the machines I have tested the Graphics Base of Stolen Memory
768 - * is unreliable, so compute the base by subtracting the stolen memory
769 - * from the Top of Low Usable DRAM which is where the BIOS places
770 - * the graphics stolen memory.
771 + * is unreliable, so on those compute the base by subtracting the
772 + * stolen memory from the Top of Low Usable DRAM which is where the
773 + * BIOS places the graphics stolen memory.
774 + *
775 + * On gen2, the layout is slightly different with the Graphics Segment
776 + * immediately following Top of Memory (or Top of Usable DRAM). Note
777 + * it appears that TOUD is only reported by 865g, so we just use the
778 + * top of memory as determined by the e820 probe.
779 + *
780 + * XXX gen2 requires an unavailable symbol and 945gm fails with
781 + * its value of TOLUD.
782 */
783 - if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
784 - /* top 32bits are reserved = 0 */
785 + base = 0;
786 + if (INTEL_INFO(dev)->gen >= 6) {
787 + /* Read Base Data of Stolen Memory Register (BDSM) directly.
788 + * Note that there is also a MCHBAR miror at 0x1080c0 or
789 + * we could use device 2:0x5c instead.
790 + */
791 + pci_read_config_dword(pdev, 0xB0, &base);
792 + base &= ~4095; /* lower bits used for locking register */
793 + } else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
794 + /* Read Graphics Base of Stolen Memory directly */
795 pci_read_config_dword(pdev, 0xA4, &base);
796 - } else {
797 - /* XXX presume 8xx is the same as i915 */
798 - pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
799 - }
800 -#else
801 - if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
802 - u16 val;
803 - pci_read_config_word(pdev, 0xb0, &val);
804 - base = val >> 4 << 20;
805 - } else {
806 +#if 0
807 + } else if (IS_GEN3(dev)) {
808 u8 val;
809 + /* Stolen is immediately below Top of Low Usable DRAM */
810 pci_read_config_byte(pdev, 0x9c, &val);
811 base = val >> 3 << 27;
812 - }
813 - base -= dev_priv->mm.gtt->stolen_size;
814 + base -= dev_priv->mm.gtt->stolen_size;
815 + } else {
816 + /* Stolen is immediately above Top of Memory */
817 + base = max_low_pfn_mapped << PAGE_SHIFT;
818 #endif
819 + }
820
821 - return base + offset;
822 + return base;
823 }
824
825 static void i915_warn_stolen(struct drm_device *dev)
826 @@ -116,7 +110,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
827 if (!compressed_fb)
828 goto err;
829
830 - cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
831 + cfb_base = dev_priv->mm.stolen_base + compressed_fb->start;
832 if (!cfb_base)
833 goto err_fb;
834
835 @@ -129,7 +123,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
836 if (!compressed_llb)
837 goto err_fb;
838
839 - ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
840 + ll_base = dev_priv->mm.stolen_base + compressed_llb->start;
841 if (!ll_base)
842 goto err_llb;
843 }
844 @@ -148,7 +142,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
845 }
846
847 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
848 - cfb_base, ll_base, size >> 20);
849 + (long)cfb_base, (long)ll_base, size >> 20);
850 return;
851
852 err_llb:
853 @@ -180,6 +174,13 @@ int i915_gem_init_stolen(struct drm_device *dev)
854 struct drm_i915_private *dev_priv = dev->dev_private;
855 unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size;
856
857 + dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
858 + if (dev_priv->mm.stolen_base == 0)
859 + return 0;
860 +
861 + DRM_DEBUG_KMS("found %d bytes of stolen memory at %08lx\n",
862 + dev_priv->mm.gtt->stolen_size, dev_priv->mm.stolen_base);
863 +
864 /* Basic memrange allocator for stolen space */
865 drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
866
867 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
868 index 2bfd05a..ce70f0a 100644
869 --- a/drivers/gpu/drm/i915/i915_reg.h
870 +++ b/drivers/gpu/drm/i915/i915_reg.h
871 @@ -3839,7 +3839,7 @@
872 #define _TRANSB_CHICKEN2 0xf1064
873 #define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
874 #define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
875 -
876 +#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29)
877
878 #define SOUTH_CHICKEN1 0xc2000
879 #define FDIA_PHASE_SYNC_SHIFT_OVR 19
880 diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
881 index 55ffba1..bd83391 100644
882 --- a/drivers/gpu/drm/i915/intel_bios.c
883 +++ b/drivers/gpu/drm/i915/intel_bios.c
884 @@ -351,12 +351,14 @@ parse_general_features(struct drm_i915_private *dev_priv,
885 dev_priv->lvds_ssc_freq =
886 intel_bios_ssc_frequency(dev, general->ssc_freq);
887 dev_priv->display_clock_mode = general->display_clock_mode;
888 - DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n",
889 + dev_priv->fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted;
890 + DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
891 dev_priv->int_tv_support,
892 dev_priv->int_crt_support,
893 dev_priv->lvds_use_ssc,
894 dev_priv->lvds_ssc_freq,
895 - dev_priv->display_clock_mode);
896 + dev_priv->display_clock_mode,
897 + dev_priv->fdi_rx_polarity_inverted);
898 }
899 }
900
901 diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
902 index 36e57f9..e088d6f 100644
903 --- a/drivers/gpu/drm/i915/intel_bios.h
904 +++ b/drivers/gpu/drm/i915/intel_bios.h
905 @@ -127,7 +127,9 @@ struct bdb_general_features {
906 /* bits 3 */
907 u8 disable_smooth_vision:1;
908 u8 single_dvi:1;
909 - u8 rsvd9:6; /* finish byte */
910 + u8 rsvd9:1;
911 + u8 fdi_rx_polarity_inverted:1;
912 + u8 rsvd10:4; /* finish byte */
913
914 /* bits 4 */
915 u8 legacy_monitor_detect;
916 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
917 index d3f834a..faeaebc 100644
918 --- a/drivers/gpu/drm/i915/intel_display.c
919 +++ b/drivers/gpu/drm/i915/intel_display.c
920 @@ -7732,22 +7732,25 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes,
921 if (crtc->enabled)
922 *prepare_pipes |= 1 << intel_crtc->pipe;
923
924 - /* We only support modeset on one single crtc, hence we need to do that
925 - * only for the passed in crtc iff we change anything else than just
926 - * disable crtcs.
927 - *
928 - * This is actually not true, to be fully compatible with the old crtc
929 - * helper we automatically disable _any_ output (i.e. doesn't need to be
930 - * connected to the crtc we're modesetting on) if it's disconnected.
931 - * Which is a rather nutty api (since changed the output configuration
932 - * without userspace's explicit request can lead to confusion), but
933 - * alas. Hence we currently need to modeset on all pipes we prepare. */
934 + /*
935 + * For simplicity do a full modeset on any pipe where the output routing
936 + * changed. We could be more clever, but that would require us to be
937 + * more careful with calling the relevant encoder->mode_set functions.
938 + */
939 if (*prepare_pipes)
940 *modeset_pipes = *prepare_pipes;
941
942 /* ... and mask these out. */
943 *modeset_pipes &= ~(*disable_pipes);
944 *prepare_pipes &= ~(*disable_pipes);
945 +
946 + /*
947 + * HACK: We don't (yet) fully support global modesets. intel_set_config
948 + * obies this rule, but the modeset restore mode of
949 + * intel_modeset_setup_hw_state does not.
950 + */
951 + *modeset_pipes &= 1 << intel_crtc->pipe;
952 + *prepare_pipes &= 1 << intel_crtc->pipe;
953 }
954
955 static bool intel_crtc_in_use(struct drm_crtc *crtc)
956 @@ -9388,6 +9391,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
957 /* flush any delayed tasks or pending work */
958 flush_scheduled_work();
959
960 + /* destroy backlight, if any, before the connectors */
961 + intel_panel_destroy_backlight(dev);
962 +
963 drm_mode_config_cleanup(dev);
964 }
965
966 diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
967 index 73ce6e9..cbe1ec3 100644
968 --- a/drivers/gpu/drm/i915/intel_dp.c
969 +++ b/drivers/gpu/drm/i915/intel_dp.c
970 @@ -2467,17 +2467,14 @@ done:
971 static void
972 intel_dp_destroy(struct drm_connector *connector)
973 {
974 - struct drm_device *dev = connector->dev;
975 struct intel_dp *intel_dp = intel_attached_dp(connector);
976 struct intel_connector *intel_connector = to_intel_connector(connector);
977
978 if (!IS_ERR_OR_NULL(intel_connector->edid))
979 kfree(intel_connector->edid);
980
981 - if (is_edp(intel_dp)) {
982 - intel_panel_destroy_backlight(dev);
983 + if (is_edp(intel_dp))
984 intel_panel_fini(&intel_connector->panel);
985 - }
986
987 drm_sysfs_connector_remove(connector);
988 drm_connector_cleanup(connector);
989 diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
990 index 15da995..ba96e04 100644
991 --- a/drivers/gpu/drm/i915/intel_dvo.c
992 +++ b/drivers/gpu/drm/i915/intel_dvo.c
993 @@ -449,6 +449,7 @@ void intel_dvo_init(struct drm_device *dev)
994 const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
995 struct i2c_adapter *i2c;
996 int gpio;
997 + bool dvoinit;
998
999 /* Allow the I2C driver info to specify the GPIO to be used in
1000 * special cases, but otherwise default to what's defined
1001 @@ -468,7 +469,17 @@ void intel_dvo_init(struct drm_device *dev)
1002 i2c = intel_gmbus_get_adapter(dev_priv, gpio);
1003
1004 intel_dvo->dev = *dvo;
1005 - if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
1006 +
1007 + /* GMBUS NAK handling seems to be unstable, hence let the
1008 + * transmitter detection run in bit banging mode for now.
1009 + */
1010 + intel_gmbus_force_bit(i2c, true);
1011 +
1012 + dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
1013 +
1014 + intel_gmbus_force_bit(i2c, false);
1015 +
1016 + if (!dvoinit)
1017 continue;
1018
1019 intel_encoder->type = INTEL_OUTPUT_DVO;
1020 diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
1021 index 17aee74..8b383a6 100644
1022 --- a/drivers/gpu/drm/i915/intel_lvds.c
1023 +++ b/drivers/gpu/drm/i915/intel_lvds.c
1024 @@ -556,7 +556,6 @@ static void intel_lvds_destroy(struct drm_connector *connector)
1025 if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
1026 kfree(lvds_connector->base.edid);
1027
1028 - intel_panel_destroy_backlight(connector->dev);
1029 intel_panel_fini(&lvds_connector->base.panel);
1030
1031 drm_sysfs_connector_remove(connector);
1032 @@ -790,6 +789,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
1033 DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"),
1034 },
1035 },
1036 + {
1037 + .callback = intel_no_lvds_dmi_callback,
1038 + .ident = "Fujitsu Esprimo Q900",
1039 + .matches = {
1040 + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1041 + DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"),
1042 + },
1043 + },
1044
1045 { } /* terminating entry */
1046 };
1047 diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
1048 index bee8cb6..94d895b 100644
1049 --- a/drivers/gpu/drm/i915/intel_panel.c
1050 +++ b/drivers/gpu/drm/i915/intel_panel.c
1051 @@ -422,6 +422,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
1052
1053 intel_panel_init_backlight(dev);
1054
1055 + if (WARN_ON(dev_priv->backlight))
1056 + return -ENODEV;
1057 +
1058 memset(&props, 0, sizeof(props));
1059 props.type = BACKLIGHT_RAW;
1060 props.max_brightness = _intel_panel_get_max_backlight(dev);
1061 @@ -447,8 +450,10 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
1062 void intel_panel_destroy_backlight(struct drm_device *dev)
1063 {
1064 struct drm_i915_private *dev_priv = dev->dev_private;
1065 - if (dev_priv->backlight)
1066 + if (dev_priv->backlight) {
1067 backlight_device_unregister(dev_priv->backlight);
1068 + dev_priv->backlight = NULL;
1069 + }
1070 }
1071 #else
1072 int intel_panel_setup_backlight(struct drm_connector *connector)
1073 diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
1074 index dde0ded..253bcf3 100644
1075 --- a/drivers/gpu/drm/i915/intel_pm.c
1076 +++ b/drivers/gpu/drm/i915/intel_pm.c
1077 @@ -3560,6 +3560,7 @@ static void cpt_init_clock_gating(struct drm_device *dev)
1078 {
1079 struct drm_i915_private *dev_priv = dev->dev_private;
1080 int pipe;
1081 + uint32_t val;
1082
1083 /*
1084 * On Ibex Peak and Cougar Point, we need to disable clock
1085 @@ -3572,8 +3573,12 @@ static void cpt_init_clock_gating(struct drm_device *dev)
1086 /* The below fixes the weird display corruption, a few pixels shifted
1087 * downward, on (only) LVDS of some HP laptops with IVY.
1088 */
1089 - for_each_pipe(pipe)
1090 - I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE);
1091 + for_each_pipe(pipe) {
1092 + val = TRANS_CHICKEN2_TIMING_OVERRIDE;
1093 + if (dev_priv->fdi_rx_polarity_inverted)
1094 + val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
1095 + I915_WRITE(TRANS_CHICKEN2(pipe), val);
1096 + }
1097 /* WADP0ClockGatingDisable */
1098 for_each_pipe(pipe) {
1099 I915_WRITE(TRANS_CHICKEN1(pipe),
1100 diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
1101 index c275bf0..506c331 100644
1102 --- a/drivers/gpu/drm/i915/intel_sdvo.c
1103 +++ b/drivers/gpu/drm/i915/intel_sdvo.c
1104 @@ -1213,11 +1213,13 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
1105 struct drm_device *dev = encoder->base.dev;
1106 struct drm_i915_private *dev_priv = dev->dev_private;
1107 struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base);
1108 + u16 active_outputs;
1109 u32 tmp;
1110
1111 tmp = I915_READ(intel_sdvo->sdvo_reg);
1112 + intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs);
1113
1114 - if (!(tmp & SDVO_ENABLE))
1115 + if (!(tmp & SDVO_ENABLE) && (active_outputs == 0))
1116 return false;
1117
1118 if (HAS_PCH_CPT(dev))
1119 @@ -2704,7 +2706,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
1120 struct intel_sdvo *intel_sdvo;
1121 u32 hotplug_mask;
1122 int i;
1123 -
1124 intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL);
1125 if (!intel_sdvo)
1126 return false;
1127 diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
1128 index 5ea5033..a657709 100644
1129 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h
1130 +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
1131 @@ -116,6 +116,8 @@ struct mga_fbdev {
1132 void *sysram;
1133 int size;
1134 struct ttm_bo_kmap_obj mapping;
1135 + int x1, y1, x2, y2; /* dirty rect */
1136 + spinlock_t dirty_lock;
1137 };
1138
1139 struct mga_crtc {
1140 diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
1141 index 2f48648..41eefc4 100644
1142 --- a/drivers/gpu/drm/mgag200/mgag200_fb.c
1143 +++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
1144 @@ -28,16 +28,52 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
1145 int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8;
1146 int ret;
1147 bool unmap = false;
1148 + bool store_for_later = false;
1149 + int x2, y2;
1150 + unsigned long flags;
1151
1152 obj = mfbdev->mfb.obj;
1153 bo = gem_to_mga_bo(obj);
1154
1155 + /*
1156 + * try and reserve the BO, if we fail with busy
1157 + * then the BO is being moved and we should
1158 + * store up the damage until later.
1159 + */
1160 ret = mgag200_bo_reserve(bo, true);
1161 if (ret) {
1162 - DRM_ERROR("failed to reserve fb bo\n");
1163 + if (ret != -EBUSY)
1164 + return;
1165 +
1166 + store_for_later = true;
1167 + }
1168 +
1169 + x2 = x + width - 1;
1170 + y2 = y + height - 1;
1171 + spin_lock_irqsave(&mfbdev->dirty_lock, flags);
1172 +
1173 + if (mfbdev->y1 < y)
1174 + y = mfbdev->y1;
1175 + if (mfbdev->y2 > y2)
1176 + y2 = mfbdev->y2;
1177 + if (mfbdev->x1 < x)
1178 + x = mfbdev->x1;
1179 + if (mfbdev->x2 > x2)
1180 + x2 = mfbdev->x2;
1181 +
1182 + if (store_for_later) {
1183 + mfbdev->x1 = x;
1184 + mfbdev->x2 = x2;
1185 + mfbdev->y1 = y;
1186 + mfbdev->y2 = y2;
1187 + spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
1188 return;
1189 }
1190
1191 + mfbdev->x1 = mfbdev->y1 = INT_MAX;
1192 + mfbdev->x2 = mfbdev->y2 = 0;
1193 + spin_unlock_irqrestore(&mfbdev->dirty_lock, flags);
1194 +
1195 if (!bo->kmap.virtual) {
1196 ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
1197 if (ret) {
1198 @@ -47,10 +83,10 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
1199 }
1200 unmap = true;
1201 }
1202 - for (i = y; i < y + height; i++) {
1203 + for (i = y; i <= y2; i++) {
1204 /* assume equal stride for now */
1205 src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp);
1206 - memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, width * bpp);
1207 + memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, (x2 - x + 1) * bpp);
1208
1209 }
1210 if (unmap)
1211 @@ -269,6 +305,7 @@ int mgag200_fbdev_init(struct mga_device *mdev)
1212
1213 mdev->mfbdev = mfbdev;
1214 mfbdev->helper.funcs = &mga_fb_helper_funcs;
1215 + spin_lock_init(&mfbdev->dirty_lock);
1216
1217 ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper,
1218 mdev->num_crtc, MGAG200FB_CONN_LIMIT);
1219 diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
1220 index 8fc9d92..401c989 100644
1221 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
1222 +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
1223 @@ -315,8 +315,8 @@ int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait)
1224
1225 ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
1226 if (ret) {
1227 - if (ret != -ERESTARTSYS)
1228 - DRM_ERROR("reserve failed %p\n", bo);
1229 + if (ret != -ERESTARTSYS && ret != -EBUSY)
1230 + DRM_ERROR("reserve failed %p %d\n", bo, ret);
1231 return ret;
1232 }
1233 return 0;
1234 diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
1235 index 5ce9bf5..43672b6 100644
1236 --- a/drivers/gpu/drm/radeon/atom.c
1237 +++ b/drivers/gpu/drm/radeon/atom.c
1238 @@ -1389,10 +1389,10 @@ int atom_allocate_fb_scratch(struct atom_context *ctx)
1239 firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
1240
1241 DRM_DEBUG("atom firmware requested %08x %dkb\n",
1242 - firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
1243 - firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
1244 + le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
1245 + le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
1246
1247 - usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
1248 + usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
1249 }
1250 ctx->scratch_size_bytes = 0;
1251 if (usage_bytes == 0)
1252 diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
1253 index 21a892c..6d6fdb3 100644
1254 --- a/drivers/gpu/drm/radeon/atombios_crtc.c
1255 +++ b/drivers/gpu/drm/radeon/atombios_crtc.c
1256 @@ -557,6 +557,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
1257 /* use frac fb div on APUs */
1258 if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
1259 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
1260 + /* use frac fb div on RS780/RS880 */
1261 + if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
1262 + radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
1263 if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
1264 radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
1265 } else {
1266 diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
1267 index 1b0a4ec..90dc470 100644
1268 --- a/drivers/gpu/drm/radeon/evergreen.c
1269 +++ b/drivers/gpu/drm/radeon/evergreen.c
1270 @@ -105,6 +105,27 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
1271 }
1272 }
1273
1274 +static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc)
1275 +{
1276 + if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
1277 + return true;
1278 + else
1279 + return false;
1280 +}
1281 +
1282 +static bool dce4_is_counter_moving(struct radeon_device *rdev, int crtc)
1283 +{
1284 + u32 pos1, pos2;
1285 +
1286 + pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
1287 + pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]);
1288 +
1289 + if (pos1 != pos2)
1290 + return true;
1291 + else
1292 + return false;
1293 +}
1294 +
1295 /**
1296 * dce4_wait_for_vblank - vblank wait asic callback.
1297 *
1298 @@ -115,21 +136,28 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
1299 */
1300 void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
1301 {
1302 - int i;
1303 + unsigned i = 0;
1304
1305 if (crtc >= rdev->num_crtc)
1306 return;
1307
1308 - if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
1309 - for (i = 0; i < rdev->usec_timeout; i++) {
1310 - if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
1311 + if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN))
1312 + return;
1313 +
1314 + /* depending on when we hit vblank, we may be close to active; if so,
1315 + * wait for another frame.
1316 + */
1317 + while (dce4_is_in_vblank(rdev, crtc)) {
1318 + if (i++ % 100 == 0) {
1319 + if (!dce4_is_counter_moving(rdev, crtc))
1320 break;
1321 - udelay(1);
1322 }
1323 - for (i = 0; i < rdev->usec_timeout; i++) {
1324 - if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
1325 + }
1326 +
1327 + while (!dce4_is_in_vblank(rdev, crtc)) {
1328 + if (i++ % 100 == 0) {
1329 + if (!dce4_is_counter_moving(rdev, crtc))
1330 break;
1331 - udelay(1);
1332 }
1333 }
1334 }
1335 @@ -608,6 +636,16 @@ void evergreen_hpd_init(struct radeon_device *rdev)
1336
1337 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1338 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1339 +
1340 + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
1341 + connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
1342 + /* don't try to enable hpd on eDP or LVDS avoid breaking the
1343 + * aux dp channel on imac and help (but not completely fix)
1344 + * https://bugzilla.redhat.com/show_bug.cgi?id=726143
1345 + * also avoid interrupt storms during dpms.
1346 + */
1347 + continue;
1348 + }
1349 switch (radeon_connector->hpd.hpd) {
1350 case RADEON_HPD_1:
1351 WREG32(DC_HPD1_CONTROL, tmp);
1352 @@ -1325,17 +1363,16 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
1353 tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
1354 if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
1355 radeon_wait_for_vblank(rdev, i);
1356 - tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
1357 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
1358 + tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
1359 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
1360 - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
1361 }
1362 } else {
1363 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1364 if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
1365 radeon_wait_for_vblank(rdev, i);
1366 - tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1367 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
1368 + tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
1369 WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
1370 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
1371 }
1372 @@ -1347,6 +1384,15 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
1373 break;
1374 udelay(1);
1375 }
1376 +
1377 + /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
1378 + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
1379 + tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
1380 + tmp &= ~EVERGREEN_CRTC_MASTER_EN;
1381 + WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
1382 + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
1383 + save->crtc_enabled[i] = false;
1384 + /* ***** */
1385 } else {
1386 save->crtc_enabled[i] = false;
1387 }
1388 @@ -1364,6 +1410,22 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
1389 }
1390 /* wait for the MC to settle */
1391 udelay(100);
1392 +
1393 + /* lock double buffered regs */
1394 + for (i = 0; i < rdev->num_crtc; i++) {
1395 + if (save->crtc_enabled[i]) {
1396 + tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
1397 + if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) {
1398 + tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
1399 + WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
1400 + }
1401 + tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
1402 + if (!(tmp & 1)) {
1403 + tmp |= 1;
1404 + WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
1405 + }
1406 + }
1407 + }
1408 }
1409
1410 void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
1411 @@ -1385,6 +1447,33 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
1412 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
1413 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
1414
1415 + /* unlock regs and wait for update */
1416 + for (i = 0; i < rdev->num_crtc; i++) {
1417 + if (save->crtc_enabled[i]) {
1418 + tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
1419 + if ((tmp & 0x3) != 0) {
1420 + tmp &= ~0x3;
1421 + WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
1422 + }
1423 + tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
1424 + if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) {
1425 + tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK;
1426 + WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp);
1427 + }
1428 + tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]);
1429 + if (tmp & 1) {
1430 + tmp &= ~1;
1431 + WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
1432 + }
1433 + for (j = 0; j < rdev->usec_timeout; j++) {
1434 + tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
1435 + if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0)
1436 + break;
1437 + udelay(1);
1438 + }
1439 + }
1440 + }
1441 +
1442 /* unblackout the MC */
1443 tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
1444 tmp &= ~BLACKOUT_MODE_MASK;
1445 diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
1446 index 034f4c2..3e9773a 100644
1447 --- a/drivers/gpu/drm/radeon/evergreen_reg.h
1448 +++ b/drivers/gpu/drm/radeon/evergreen_reg.h
1449 @@ -225,6 +225,8 @@
1450 #define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
1451 #define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
1452 #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
1453 +#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
1454 +#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
1455
1456 #define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
1457 #define EVERGREEN_DC_GPIO_HPD_A 0x64b4
1458 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
1459 index b64e55d..10e1bd1 100644
1460 --- a/drivers/gpu/drm/radeon/ni.c
1461 +++ b/drivers/gpu/drm/radeon/ni.c
1462 @@ -471,7 +471,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
1463 (rdev->pdev->device == 0x990F) ||
1464 (rdev->pdev->device == 0x9910) ||
1465 (rdev->pdev->device == 0x9917) ||
1466 - (rdev->pdev->device == 0x9999)) {
1467 + (rdev->pdev->device == 0x9999) ||
1468 + (rdev->pdev->device == 0x999C)) {
1469 rdev->config.cayman.max_simds_per_se = 6;
1470 rdev->config.cayman.max_backends_per_se = 2;
1471 } else if ((rdev->pdev->device == 0x9903) ||
1472 @@ -480,7 +481,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
1473 (rdev->pdev->device == 0x990D) ||
1474 (rdev->pdev->device == 0x990E) ||
1475 (rdev->pdev->device == 0x9913) ||
1476 - (rdev->pdev->device == 0x9918)) {
1477 + (rdev->pdev->device == 0x9918) ||
1478 + (rdev->pdev->device == 0x999D)) {
1479 rdev->config.cayman.max_simds_per_se = 4;
1480 rdev->config.cayman.max_backends_per_se = 2;
1481 } else if ((rdev->pdev->device == 0x9919) ||
1482 @@ -619,6 +621,8 @@ static void cayman_gpu_init(struct radeon_device *rdev)
1483
1484 WREG32(GB_ADDR_CONFIG, gb_addr_config);
1485 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
1486 + if (ASIC_IS_DCE6(rdev))
1487 + WREG32(DMIF_ADDR_CALC, gb_addr_config);
1488 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1489 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
1490 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
1491 diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
1492 index 48e5022..e045f8c 100644
1493 --- a/drivers/gpu/drm/radeon/nid.h
1494 +++ b/drivers/gpu/drm/radeon/nid.h
1495 @@ -45,6 +45,10 @@
1496 #define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001
1497
1498 #define DMIF_ADDR_CONFIG 0xBD4
1499 +
1500 +/* DCE6 only */
1501 +#define DMIF_ADDR_CALC 0xC00
1502 +
1503 #define SRBM_GFX_CNTL 0x0E44
1504 #define RINGID(x) (((x) & 0x3) << 0)
1505 #define VMID(x) (((x) & 0x7) << 0)
1506 diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
1507 index 8ff7cac..62719ec 100644
1508 --- a/drivers/gpu/drm/radeon/r100.c
1509 +++ b/drivers/gpu/drm/radeon/r100.c
1510 @@ -69,6 +69,38 @@ MODULE_FIRMWARE(FIRMWARE_R520);
1511 * and others in some cases.
1512 */
1513
1514 +static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc)
1515 +{
1516 + if (crtc == 0) {
1517 + if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
1518 + return true;
1519 + else
1520 + return false;
1521 + } else {
1522 + if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
1523 + return true;
1524 + else
1525 + return false;
1526 + }
1527 +}
1528 +
1529 +static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc)
1530 +{
1531 + u32 vline1, vline2;
1532 +
1533 + if (crtc == 0) {
1534 + vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
1535 + vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
1536 + } else {
1537 + vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
1538 + vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL;
1539 + }
1540 + if (vline1 != vline2)
1541 + return true;
1542 + else
1543 + return false;
1544 +}
1545 +
1546 /**
1547 * r100_wait_for_vblank - vblank wait asic callback.
1548 *
1549 @@ -79,36 +111,33 @@ MODULE_FIRMWARE(FIRMWARE_R520);
1550 */
1551 void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
1552 {
1553 - int i;
1554 + unsigned i = 0;
1555
1556 if (crtc >= rdev->num_crtc)
1557 return;
1558
1559 if (crtc == 0) {
1560 - if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) {
1561 - for (i = 0; i < rdev->usec_timeout; i++) {
1562 - if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR))
1563 - break;
1564 - udelay(1);
1565 - }
1566 - for (i = 0; i < rdev->usec_timeout; i++) {
1567 - if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
1568 - break;
1569 - udelay(1);
1570 - }
1571 - }
1572 + if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN))
1573 + return;
1574 } else {
1575 - if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) {
1576 - for (i = 0; i < rdev->usec_timeout; i++) {
1577 - if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR))
1578 - break;
1579 - udelay(1);
1580 - }
1581 - for (i = 0; i < rdev->usec_timeout; i++) {
1582 - if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
1583 - break;
1584 - udelay(1);
1585 - }
1586 + if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN))
1587 + return;
1588 + }
1589 +
1590 + /* depending on when we hit vblank, we may be close to active; if so,
1591 + * wait for another frame.
1592 + */
1593 + while (r100_is_in_vblank(rdev, crtc)) {
1594 + if (i++ % 100 == 0) {
1595 + if (!r100_is_counter_moving(rdev, crtc))
1596 + break;
1597 + }
1598 + }
1599 +
1600 + while (!r100_is_in_vblank(rdev, crtc)) {
1601 + if (i++ % 100 == 0) {
1602 + if (!r100_is_counter_moving(rdev, crtc))
1603 + break;
1604 }
1605 }
1606 }
1607 diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
1608 index ec576aa..8ec2376 100644
1609 --- a/drivers/gpu/drm/radeon/r500_reg.h
1610 +++ b/drivers/gpu/drm/radeon/r500_reg.h
1611 @@ -357,7 +357,9 @@
1612 #define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
1613 #define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
1614
1615 +#define AVIVO_D1MODE_MASTER_UPDATE_LOCK 0x60e0
1616 #define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4
1617 +#define AVIVO_D1CRTC_UPDATE_LOCK 0x60e8
1618
1619 /* master controls */
1620 #define AVIVO_DC_CRTC_MASTER_EN 0x60f8
1621 diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
1622 index 95970ec..d89a1f8 100644
1623 --- a/drivers/gpu/drm/radeon/r600_hdmi.c
1624 +++ b/drivers/gpu/drm/radeon/r600_hdmi.c
1625 @@ -489,7 +489,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
1626 offset = dig->afmt->offset;
1627
1628 /* Older chipsets require setting HDMI and routing manually */
1629 - if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
1630 + if (ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE3(rdev)) {
1631 hdmi = HDMI0_ERROR_ACK | HDMI0_ENABLE;
1632 switch (radeon_encoder->encoder_id) {
1633 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
1634 @@ -557,7 +557,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
1635 radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
1636
1637 /* Older chipsets not handled by AtomBIOS */
1638 - if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
1639 + if (ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE3(rdev)) {
1640 switch (radeon_encoder->encoder_id) {
1641 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
1642 WREG32_P(AVIVO_TMDSA_CNTL, 0,
1643 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
1644 index f22eb57..96168ef 100644
1645 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
1646 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
1647 @@ -2028,6 +2028,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
1648 num_modes = power_info->info.ucNumOfPowerModeEntries;
1649 if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
1650 num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
1651 + if (num_modes == 0)
1652 + return state_index;
1653 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL);
1654 if (!rdev->pm.power_state)
1655 return state_index;
1656 @@ -2432,6 +2434,8 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
1657 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
1658
1659 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
1660 + if (power_info->pplib.ucNumStates == 0)
1661 + return state_index;
1662 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
1663 power_info->pplib.ucNumStates, GFP_KERNEL);
1664 if (!rdev->pm.power_state)
1665 @@ -2514,6 +2518,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
1666 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
1667 u16 data_offset;
1668 u8 frev, crev;
1669 + u8 *power_state_offset;
1670
1671 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
1672 &frev, &crev, &data_offset))
1673 @@ -2530,15 +2535,17 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
1674 non_clock_info_array = (struct _NonClockInfoArray *)
1675 (mode_info->atom_context->bios + data_offset +
1676 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
1677 + if (state_array->ucNumEntries == 0)
1678 + return state_index;
1679 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
1680 state_array->ucNumEntries, GFP_KERNEL);
1681 if (!rdev->pm.power_state)
1682 return state_index;
1683 + power_state_offset = (u8 *)state_array->states;
1684 for (i = 0; i < state_array->ucNumEntries; i++) {
1685 mode_index = 0;
1686 - power_state = (union pplib_power_state *)&state_array->states[i];
1687 - /* XXX this might be an inagua bug... */
1688 - non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
1689 + power_state = (union pplib_power_state *)power_state_offset;
1690 + non_clock_array_index = power_state->v2.nonClockInfoIndex;
1691 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
1692 &non_clock_info_array->nonClockInfo[non_clock_array_index];
1693 rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
1694 @@ -2550,9 +2557,6 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
1695 if (power_state->v2.ucNumDPMLevels) {
1696 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
1697 clock_array_index = power_state->v2.clockInfoIndex[j];
1698 - /* XXX this might be an inagua bug... */
1699 - if (clock_array_index >= clock_info_array->ucNumEntries)
1700 - continue;
1701 clock_info = (union pplib_clock_info *)
1702 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
1703 valid = radeon_atombios_parse_pplib_clock_info(rdev,
1704 @@ -2574,6 +2578,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
1705 non_clock_info);
1706 state_index++;
1707 }
1708 + power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
1709 }
1710 /* if multiple clock modes, mark the lowest as no display */
1711 for (i = 0; i < state_index; i++) {
1712 @@ -2620,7 +2625,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
1713 default:
1714 break;
1715 }
1716 - } else {
1717 + }
1718 +
1719 + if (state_index == 0) {
1720 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
1721 if (rdev->pm.power_state) {
1722 rdev->pm.power_state[0].clock_info =
1723 diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
1724 index 9c312f9..bc36922 100644
1725 --- a/drivers/gpu/drm/radeon/radeon_kms.c
1726 +++ b/drivers/gpu/drm/radeon/radeon_kms.c
1727 @@ -50,9 +50,13 @@ int radeon_driver_unload_kms(struct drm_device *dev)
1728
1729 if (rdev == NULL)
1730 return 0;
1731 + if (rdev->rmmio == NULL)
1732 + goto done_free;
1733 radeon_acpi_fini(rdev);
1734 radeon_modeset_fini(rdev);
1735 radeon_device_fini(rdev);
1736 +
1737 +done_free:
1738 kfree(rdev);
1739 dev->dev_private = NULL;
1740 return 0;
1741 diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
1742 index 338fd6a..788c64c 100644
1743 --- a/drivers/gpu/drm/radeon/radeon_pm.c
1744 +++ b/drivers/gpu/drm/radeon/radeon_pm.c
1745 @@ -843,7 +843,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
1746 struct radeon_device *rdev = dev->dev_private;
1747
1748 seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk);
1749 - seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1750 + /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */
1751 + if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP))
1752 + seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk);
1753 + else
1754 + seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
1755 seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk);
1756 if (rdev->asic->pm.get_memory_clock)
1757 seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
1758 diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
1759 index cd72062..8adc5b5 100644
1760 --- a/drivers/gpu/drm/radeon/radeon_ring.c
1761 +++ b/drivers/gpu/drm/radeon/radeon_ring.c
1762 @@ -161,7 +161,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
1763 radeon_semaphore_free(rdev, &ib->semaphore, NULL);
1764 }
1765 /* if we can't remember our last VM flush then flush now! */
1766 - if (ib->vm && !ib->vm->last_flush) {
1767 + /* XXX figure out why we have to flush for every IB */
1768 + if (ib->vm /*&& !ib->vm->last_flush*/) {
1769 radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
1770 }
1771 if (const_ib) {
1772 diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
1773 index 5a0fc74..46fa1b0 100644
1774 --- a/drivers/gpu/drm/radeon/rs600.c
1775 +++ b/drivers/gpu/drm/radeon/rs600.c
1776 @@ -52,23 +52,59 @@ static const u32 crtc_offsets[2] =
1777 AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
1778 };
1779
1780 +static bool avivo_is_in_vblank(struct radeon_device *rdev, int crtc)
1781 +{
1782 + if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
1783 + return true;
1784 + else
1785 + return false;
1786 +}
1787 +
1788 +static bool avivo_is_counter_moving(struct radeon_device *rdev, int crtc)
1789 +{
1790 + u32 pos1, pos2;
1791 +
1792 + pos1 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
1793 + pos2 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]);
1794 +
1795 + if (pos1 != pos2)
1796 + return true;
1797 + else
1798 + return false;
1799 +}
1800 +
1801 +/**
1802 + * avivo_wait_for_vblank - vblank wait asic callback.
1803 + *
1804 + * @rdev: radeon_device pointer
1805 + * @crtc: crtc to wait for vblank on
1806 + *
1807 + * Wait for vblank on the requested crtc (r5xx-r7xx).
1808 + */
1809 void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
1810 {
1811 - int i;
1812 + unsigned i = 0;
1813
1814 if (crtc >= rdev->num_crtc)
1815 return;
1816
1817 - if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN) {
1818 - for (i = 0; i < rdev->usec_timeout; i++) {
1819 - if (!(RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK))
1820 + if (!(RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN))
1821 + return;
1822 +
1823 + /* depending on when we hit vblank, we may be close to active; if so,
1824 + * wait for another frame.
1825 + */
1826 + while (avivo_is_in_vblank(rdev, crtc)) {
1827 + if (i++ % 100 == 0) {
1828 + if (!avivo_is_counter_moving(rdev, crtc))
1829 break;
1830 - udelay(1);
1831 }
1832 - for (i = 0; i < rdev->usec_timeout; i++) {
1833 - if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
1834 + }
1835 +
1836 + while (!avivo_is_in_vblank(rdev, crtc)) {
1837 + if (i++ % 100 == 0) {
1838 + if (!avivo_is_counter_moving(rdev, crtc))
1839 break;
1840 - udelay(1);
1841 }
1842 }
1843 }
1844 diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
1845 index 435ed35..ffcba73 100644
1846 --- a/drivers/gpu/drm/radeon/rv515.c
1847 +++ b/drivers/gpu/drm/radeon/rv515.c
1848 @@ -303,8 +303,10 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
1849 tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
1850 if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) {
1851 radeon_wait_for_vblank(rdev, i);
1852 + WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
1853 tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE;
1854 WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
1855 + WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
1856 }
1857 /* wait for the next frame */
1858 frame_count = radeon_get_vblank_counter(rdev, i);
1859 @@ -313,6 +315,15 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
1860 break;
1861 udelay(1);
1862 }
1863 +
1864 + /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
1865 + WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
1866 + tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]);
1867 + tmp &= ~AVIVO_CRTC_EN;
1868 + WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp);
1869 + WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
1870 + save->crtc_enabled[i] = false;
1871 + /* ***** */
1872 } else {
1873 save->crtc_enabled[i] = false;
1874 }
1875 @@ -338,6 +349,22 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
1876 }
1877 /* wait for the MC to settle */
1878 udelay(100);
1879 +
1880 + /* lock double buffered regs */
1881 + for (i = 0; i < rdev->num_crtc; i++) {
1882 + if (save->crtc_enabled[i]) {
1883 + tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
1884 + if (!(tmp & AVIVO_D1GRPH_UPDATE_LOCK)) {
1885 + tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
1886 + WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
1887 + }
1888 + tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
1889 + if (!(tmp & 1)) {
1890 + tmp |= 1;
1891 + WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
1892 + }
1893 + }
1894 + }
1895 }
1896
1897 void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
1898 @@ -348,7 +375,7 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
1899 /* update crtc base addresses */
1900 for (i = 0; i < rdev->num_crtc; i++) {
1901 if (rdev->family >= CHIP_RV770) {
1902 - if (i == 1) {
1903 + if (i == 0) {
1904 WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH,
1905 upper_32_bits(rdev->mc.vram_start));
1906 WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH,
1907 @@ -367,6 +394,33 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
1908 }
1909 WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
1910
1911 + /* unlock regs and wait for update */
1912 + for (i = 0; i < rdev->num_crtc; i++) {
1913 + if (save->crtc_enabled[i]) {
1914 + tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
1915 + if ((tmp & 0x3) != 0) {
1916 + tmp &= ~0x3;
1917 + WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
1918 + }
1919 + tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
1920 + if (tmp & AVIVO_D1GRPH_UPDATE_LOCK) {
1921 + tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK;
1922 + WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp);
1923 + }
1924 + tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]);
1925 + if (tmp & 1) {
1926 + tmp &= ~1;
1927 + WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
1928 + }
1929 + for (j = 0; j < rdev->usec_timeout; j++) {
1930 + tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
1931 + if ((tmp & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) == 0)
1932 + break;
1933 + udelay(1);
1934 + }
1935 + }
1936 + }
1937 +
1938 if (rdev->family >= CHIP_R600) {
1939 /* unblackout the MC */
1940 if (rdev->family >= CHIP_RV770)
1941 diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
1942 index dd00721..40d766e 100644
1943 --- a/drivers/gpu/drm/radeon/si.c
1944 +++ b/drivers/gpu/drm/radeon/si.c
1945 @@ -1374,7 +1374,7 @@ static void si_select_se_sh(struct radeon_device *rdev,
1946 u32 data = INSTANCE_BROADCAST_WRITES;
1947
1948 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1949 - data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
1950 + data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
1951 else if (se_num == 0xffffffff)
1952 data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
1953 else if (sh_num == 0xffffffff)
1954 @@ -1659,6 +1659,7 @@ static void si_gpu_init(struct radeon_device *rdev)
1955
1956 WREG32(GB_ADDR_CONFIG, gb_addr_config);
1957 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
1958 + WREG32(DMIF_ADDR_CALC, gb_addr_config);
1959 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1960 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
1961 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
1962 diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
1963 index c056aae..e9a01f0 100644
1964 --- a/drivers/gpu/drm/radeon/sid.h
1965 +++ b/drivers/gpu/drm/radeon/sid.h
1966 @@ -60,6 +60,8 @@
1967
1968 #define DMIF_ADDR_CONFIG 0xBD4
1969
1970 +#define DMIF_ADDR_CALC 0xC00
1971 +
1972 #define SRBM_STATUS 0xE50
1973
1974 #define SRBM_SOFT_RESET 0x0E60
1975 diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
1976 index 05bfe53..892cd87 100644
1977 --- a/drivers/infiniband/hw/cxgb4/qp.c
1978 +++ b/drivers/infiniband/hw/cxgb4/qp.c
1979 @@ -100,6 +100,16 @@ static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
1980 return 0;
1981 }
1982
1983 +static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
1984 +{
1985 + int ret = -ENOSYS;
1986 + if (user)
1987 + ret = alloc_oc_sq(rdev, sq);
1988 + if (ret)
1989 + ret = alloc_host_sq(rdev, sq);
1990 + return ret;
1991 +}
1992 +
1993 static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
1994 struct c4iw_dev_ucontext *uctx)
1995 {
1996 @@ -168,18 +178,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
1997 goto free_sw_rq;
1998 }
1999
2000 - if (user) {
2001 - ret = alloc_oc_sq(rdev, &wq->sq);
2002 - if (ret)
2003 - goto free_hwaddr;
2004 -
2005 - ret = alloc_host_sq(rdev, &wq->sq);
2006 - if (ret)
2007 - goto free_sq;
2008 - } else
2009 - ret = alloc_host_sq(rdev, &wq->sq);
2010 - if (ret)
2011 - goto free_hwaddr;
2012 + ret = alloc_sq(rdev, &wq->sq, user);
2013 + if (ret)
2014 + goto free_hwaddr;
2015 memset(wq->sq.queue, 0, wq->sq.memsize);
2016 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
2017
2018 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
2019 index 4647b50..4c867f4 100644
2020 --- a/drivers/iommu/amd_iommu.c
2021 +++ b/drivers/iommu/amd_iommu.c
2022 @@ -3948,6 +3948,9 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
2023 if (!table)
2024 goto out;
2025
2026 + /* Initialize table spin-lock */
2027 + spin_lock_init(&table->lock);
2028 +
2029 if (ioapic)
2030 /* Keep the first 32 indexes free for IOAPIC interrupts */
2031 table->min_index = 32;
2032 diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
2033 index f2fdbb7..82c63ac 100644
2034 --- a/drivers/net/ethernet/ibm/ibmveth.c
2035 +++ b/drivers/net/ethernet/ibm/ibmveth.c
2036 @@ -1326,7 +1326,7 @@ static const struct net_device_ops ibmveth_netdev_ops = {
2037
2038 static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
2039 {
2040 - int rc, i;
2041 + int rc, i, mac_len;
2042 struct net_device *netdev;
2043 struct ibmveth_adapter *adapter;
2044 unsigned char *mac_addr_p;
2045 @@ -1336,11 +1336,19 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
2046 dev->unit_address);
2047
2048 mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
2049 - NULL);
2050 + &mac_len);
2051 if (!mac_addr_p) {
2052 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
2053 return -EINVAL;
2054 }
2055 + /* Workaround for old/broken pHyp */
2056 + if (mac_len == 8)
2057 + mac_addr_p += 2;
2058 + else if (mac_len != 6) {
2059 + dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
2060 + mac_len);
2061 + return -EINVAL;
2062 + }
2063
2064 mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev,
2065 VETH_MCAST_FILTER_SIZE, NULL);
2066 @@ -1365,17 +1373,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
2067
2068 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
2069
2070 - /*
2071 - * Some older boxes running PHYP non-natively have an OF that returns
2072 - * a 8-byte local-mac-address field (and the first 2 bytes have to be
2073 - * ignored) while newer boxes' OF return a 6-byte field. Note that
2074 - * IEEE 1275 specifies that local-mac-address must be a 6-byte field.
2075 - * The RPA doc specifies that the first byte must be 10b, so we'll
2076 - * just look for it to solve this 8 vs. 6 byte field issue
2077 - */
2078 - if ((*mac_addr_p & 0x3) != 0x02)
2079 - mac_addr_p += 2;
2080 -
2081 adapter->mac_addr = 0;
2082 memcpy(&adapter->mac_addr, mac_addr_p, 6);
2083
2084 diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
2085 index fd4772a..522fb10 100644
2086 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c
2087 +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
2088 @@ -35,6 +35,7 @@
2089 #include <linux/slab.h>
2090 #include <linux/delay.h>
2091 #include <linux/vmalloc.h>
2092 +#include <linux/pm_runtime.h>
2093
2094 #include "e1000.h"
2095
2096 @@ -2053,7 +2054,19 @@ static int e1000_get_rxnfc(struct net_device *netdev,
2097 }
2098 }
2099
2100 +static int e1000e_ethtool_begin(struct net_device *netdev)
2101 +{
2102 + return pm_runtime_get_sync(netdev->dev.parent);
2103 +}
2104 +
2105 +static void e1000e_ethtool_complete(struct net_device *netdev)
2106 +{
2107 + pm_runtime_put_sync(netdev->dev.parent);
2108 +}
2109 +
2110 static const struct ethtool_ops e1000_ethtool_ops = {
2111 + .begin = e1000e_ethtool_begin,
2112 + .complete = e1000e_ethtool_complete,
2113 .get_settings = e1000_get_settings,
2114 .set_settings = e1000_set_settings,
2115 .get_drvinfo = e1000_get_drvinfo,
2116 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
2117 index 1f93880..8692eca 100644
2118 --- a/drivers/net/ethernet/intel/e1000e/netdev.c
2119 +++ b/drivers/net/ethernet/intel/e1000e/netdev.c
2120 @@ -3952,6 +3952,7 @@ static int e1000_open(struct net_device *netdev)
2121 netif_start_queue(netdev);
2122
2123 adapter->idle_check = true;
2124 + hw->mac.get_link_status = true;
2125 pm_runtime_put(&pdev->dev);
2126
2127 /* fire a link status change interrupt to start the watchdog */
2128 @@ -4312,6 +4313,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
2129 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
2130 int ret_val;
2131
2132 + pm_runtime_get_sync(&adapter->pdev->dev);
2133 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
2134 ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
2135 ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
2136 @@ -4322,6 +4324,7 @@ static void e1000_phy_read_status(struct e1000_adapter *adapter)
2137 ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
2138 if (ret_val)
2139 e_warn("Error reading PHY register\n");
2140 + pm_runtime_put_sync(&adapter->pdev->dev);
2141 } else {
2142 /* Do not read PHY registers if link is not up
2143 * Set values to typical power-on defaults
2144 @@ -5450,8 +5453,7 @@ release:
2145 return retval;
2146 }
2147
2148 -static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
2149 - bool runtime)
2150 +static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
2151 {
2152 struct net_device *netdev = pci_get_drvdata(pdev);
2153 struct e1000_adapter *adapter = netdev_priv(netdev);
2154 @@ -5475,10 +5477,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
2155 }
2156 e1000e_reset_interrupt_capability(adapter);
2157
2158 - retval = pci_save_state(pdev);
2159 - if (retval)
2160 - return retval;
2161 -
2162 status = er32(STATUS);
2163 if (status & E1000_STATUS_LU)
2164 wufc &= ~E1000_WUFC_LNKC;
2165 @@ -5534,13 +5532,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
2166 ew32(WUFC, 0);
2167 }
2168
2169 - *enable_wake = !!wufc;
2170 -
2171 - /* make sure adapter isn't asleep if manageability is enabled */
2172 - if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
2173 - (hw->mac.ops.check_mng_mode(hw)))
2174 - *enable_wake = true;
2175 -
2176 if (adapter->hw.phy.type == e1000_phy_igp_3)
2177 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
2178
2179 @@ -5551,26 +5542,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
2180
2181 pci_clear_master(pdev);
2182
2183 - return 0;
2184 -}
2185 -
2186 -static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
2187 -{
2188 - if (sleep && wake) {
2189 - pci_prepare_to_sleep(pdev);
2190 - return;
2191 - }
2192 -
2193 - pci_wake_from_d3(pdev, wake);
2194 - pci_set_power_state(pdev, PCI_D3hot);
2195 -}
2196 -
2197 -static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
2198 - bool wake)
2199 -{
2200 - struct net_device *netdev = pci_get_drvdata(pdev);
2201 - struct e1000_adapter *adapter = netdev_priv(netdev);
2202 -
2203 /* The pci-e switch on some quad port adapters will report a
2204 * correctable error when the MAC transitions from D0 to D3. To
2205 * prevent this we need to mask off the correctable errors on the
2206 @@ -5584,12 +5555,13 @@ static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
2207 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
2208 (devctl & ~PCI_EXP_DEVCTL_CERE));
2209
2210 - e1000_power_off(pdev, sleep, wake);
2211 + pci_save_state(pdev);
2212 + pci_prepare_to_sleep(pdev);
2213
2214 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
2215 - } else {
2216 - e1000_power_off(pdev, sleep, wake);
2217 }
2218 +
2219 + return 0;
2220 }
2221
2222 #ifdef CONFIG_PCIEASPM
2223 @@ -5640,9 +5612,7 @@ static int __e1000_resume(struct pci_dev *pdev)
2224 if (aspm_disable_flag)
2225 e1000e_disable_aspm(pdev, aspm_disable_flag);
2226
2227 - pci_set_power_state(pdev, PCI_D0);
2228 - pci_restore_state(pdev);
2229 - pci_save_state(pdev);
2230 + pci_set_master(pdev);
2231
2232 e1000e_set_interrupt_capability(adapter);
2233 if (netif_running(netdev)) {
2234 @@ -5708,14 +5678,8 @@ static int __e1000_resume(struct pci_dev *pdev)
2235 static int e1000_suspend(struct device *dev)
2236 {
2237 struct pci_dev *pdev = to_pci_dev(dev);
2238 - int retval;
2239 - bool wake;
2240 -
2241 - retval = __e1000_shutdown(pdev, &wake, false);
2242 - if (!retval)
2243 - e1000_complete_shutdown(pdev, true, wake);
2244
2245 - return retval;
2246 + return __e1000_shutdown(pdev, false);
2247 }
2248
2249 static int e1000_resume(struct device *dev)
2250 @@ -5738,13 +5702,10 @@ static int e1000_runtime_suspend(struct device *dev)
2251 struct net_device *netdev = pci_get_drvdata(pdev);
2252 struct e1000_adapter *adapter = netdev_priv(netdev);
2253
2254 - if (e1000e_pm_ready(adapter)) {
2255 - bool wake;
2256 -
2257 - __e1000_shutdown(pdev, &wake, true);
2258 - }
2259 + if (!e1000e_pm_ready(adapter))
2260 + return 0;
2261
2262 - return 0;
2263 + return __e1000_shutdown(pdev, true);
2264 }
2265
2266 static int e1000_idle(struct device *dev)
2267 @@ -5782,12 +5743,7 @@ static int e1000_runtime_resume(struct device *dev)
2268
2269 static void e1000_shutdown(struct pci_dev *pdev)
2270 {
2271 - bool wake = false;
2272 -
2273 - __e1000_shutdown(pdev, &wake, false);
2274 -
2275 - if (system_state == SYSTEM_POWER_OFF)
2276 - e1000_complete_shutdown(pdev, false, wake);
2277 + __e1000_shutdown(pdev, false);
2278 }
2279
2280 #ifdef CONFIG_NET_POLL_CONTROLLER
2281 @@ -5908,9 +5864,9 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
2282 "Cannot re-enable PCI device after reset.\n");
2283 result = PCI_ERS_RESULT_DISCONNECT;
2284 } else {
2285 - pci_set_master(pdev);
2286 pdev->state_saved = true;
2287 pci_restore_state(pdev);
2288 + pci_set_master(pdev);
2289
2290 pci_enable_wake(pdev, PCI_D3hot, 0);
2291 pci_enable_wake(pdev, PCI_D3cold, 0);
2292 @@ -6341,7 +6297,11 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2293
2294 /* initialize the wol settings based on the eeprom settings */
2295 adapter->wol = adapter->eeprom_wol;
2296 - device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
2297 +
2298 + /* make sure adapter isn't asleep if manageability is enabled */
2299 + if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) ||
2300 + (hw->mac.ops.check_mng_mode(hw)))
2301 + device_wakeup_enable(&pdev->dev);
2302
2303 /* save off EEPROM version number */
2304 e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
2305 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
2306 index 2d849da..2d56d71 100644
2307 --- a/drivers/net/ethernet/realtek/r8169.c
2308 +++ b/drivers/net/ethernet/realtek/r8169.c
2309 @@ -5779,6 +5779,14 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
2310 goto err_stop_0;
2311 }
2312
2313 + /* 8168evl does not automatically pad to minimum length. */
2314 + if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
2315 + skb->len < ETH_ZLEN)) {
2316 + if (skb_padto(skb, ETH_ZLEN))
2317 + goto err_update_stats;
2318 + skb_put(skb, ETH_ZLEN - skb->len);
2319 + }
2320 +
2321 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
2322 goto err_stop_0;
2323
2324 @@ -5850,6 +5858,7 @@ err_dma_1:
2325 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
2326 err_dma_0:
2327 dev_kfree_skb(skb);
2328 +err_update_stats:
2329 dev->stats.tx_dropped++;
2330 return NETDEV_TX_OK;
2331
2332 diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
2333 index f79cbcd..8c1ecc5 100644
2334 --- a/drivers/pci/pci-driver.c
2335 +++ b/drivers/pci/pci-driver.c
2336 @@ -628,6 +628,7 @@ static int pci_pm_suspend(struct device *dev)
2337 goto Fixup;
2338 }
2339
2340 + pci_dev->state_saved = false;
2341 if (pm->suspend) {
2342 pci_power_t prev = pci_dev->current_state;
2343 int error;
2344 @@ -774,6 +775,7 @@ static int pci_pm_freeze(struct device *dev)
2345 return 0;
2346 }
2347
2348 + pci_dev->state_saved = false;
2349 if (pm->freeze) {
2350 int error;
2351
2352 @@ -862,6 +864,7 @@ static int pci_pm_poweroff(struct device *dev)
2353 goto Fixup;
2354 }
2355
2356 + pci_dev->state_saved = false;
2357 if (pm->poweroff) {
2358 int error;
2359
2360 @@ -987,6 +990,7 @@ static int pci_pm_runtime_suspend(struct device *dev)
2361 if (!pm || !pm->runtime_suspend)
2362 return -ENOSYS;
2363
2364 + pci_dev->state_saved = false;
2365 pci_dev->no_d3cold = false;
2366 error = pm->runtime_suspend(dev);
2367 suspend_report_result(pm->runtime_suspend, error);
2368 diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c
2369 index 83b21d9..0c644e7 100644
2370 --- a/drivers/pwm/pwm-spear.c
2371 +++ b/drivers/pwm/pwm-spear.c
2372 @@ -143,7 +143,7 @@ static int spear_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
2373 u32 val;
2374
2375 rc = clk_enable(pc->clk);
2376 - if (!rc)
2377 + if (rc)
2378 return rc;
2379
2380 val = spear_pwm_readl(pc, pwm->hwpwm, PWMCR);
2381 @@ -209,12 +209,12 @@ static int spear_pwm_probe(struct platform_device *pdev)
2382 pc->chip.npwm = NUM_PWM;
2383
2384 ret = clk_prepare(pc->clk);
2385 - if (!ret)
2386 + if (ret)
2387 return ret;
2388
2389 if (of_device_is_compatible(np, "st,spear1340-pwm")) {
2390 ret = clk_enable(pc->clk);
2391 - if (!ret) {
2392 + if (ret) {
2393 clk_unprepare(pc->clk);
2394 return ret;
2395 }
2396 diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
2397 index f2845f1..2573cf4 100644
2398 --- a/drivers/usb/host/xhci-ring.c
2399 +++ b/drivers/usb/host/xhci-ring.c
2400 @@ -2461,14 +2461,21 @@ static int handle_tx_event(struct xhci_hcd *xhci,
2401 * TD list.
2402 */
2403 if (list_empty(&ep_ring->td_list)) {
2404 - xhci_warn(xhci, "WARN Event TRB for slot %d ep %d "
2405 - "with no TDs queued?\n",
2406 - TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2407 - ep_index);
2408 - xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2409 - (le32_to_cpu(event->flags) &
2410 - TRB_TYPE_BITMASK)>>10);
2411 - xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2412 + /*
2413 + * A stopped endpoint may generate an extra completion
2414 + * event if the device was suspended. Don't print
2415 + * warnings.
2416 + */
2417 + if (!(trb_comp_code == COMP_STOP ||
2418 + trb_comp_code == COMP_STOP_INVAL)) {
2419 + xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
2420 + TRB_TO_SLOT_ID(le32_to_cpu(event->flags)),
2421 + ep_index);
2422 + xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
2423 + (le32_to_cpu(event->flags) &
2424 + TRB_TYPE_BITMASK)>>10);
2425 + xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
2426 + }
2427 if (ep->skip) {
2428 ep->skip = false;
2429 xhci_dbg(xhci, "td_list is empty while skip "
2430 diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
2431 index 01443ce..13ddec9 100644
2432 --- a/fs/autofs4/expire.c
2433 +++ b/fs/autofs4/expire.c
2434 @@ -61,15 +61,6 @@ static int autofs4_mount_busy(struct vfsmount *mnt, struct dentry *dentry)
2435 /* This is an autofs submount, we can't expire it */
2436 if (autofs_type_indirect(sbi->type))
2437 goto done;
2438 -
2439 - /*
2440 - * Otherwise it's an offset mount and we need to check
2441 - * if we can umount its mount, if there is one.
2442 - */
2443 - if (!d_mountpoint(path.dentry)) {
2444 - status = 0;
2445 - goto done;
2446 - }
2447 }
2448
2449 /* Update the expiry counter if fs is busy */
2450 diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
2451 index ae94117..105b265 100644
2452 --- a/fs/btrfs/delayed-ref.c
2453 +++ b/fs/btrfs/delayed-ref.c
2454 @@ -36,16 +36,19 @@
2455 * compare two delayed tree backrefs with same bytenr and type
2456 */
2457 static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2,
2458 - struct btrfs_delayed_tree_ref *ref1)
2459 + struct btrfs_delayed_tree_ref *ref1, int type)
2460 {
2461 - if (ref1->root < ref2->root)
2462 - return -1;
2463 - if (ref1->root > ref2->root)
2464 - return 1;
2465 - if (ref1->parent < ref2->parent)
2466 - return -1;
2467 - if (ref1->parent > ref2->parent)
2468 - return 1;
2469 + if (type == BTRFS_TREE_BLOCK_REF_KEY) {
2470 + if (ref1->root < ref2->root)
2471 + return -1;
2472 + if (ref1->root > ref2->root)
2473 + return 1;
2474 + } else {
2475 + if (ref1->parent < ref2->parent)
2476 + return -1;
2477 + if (ref1->parent > ref2->parent)
2478 + return 1;
2479 + }
2480 return 0;
2481 }
2482
2483 @@ -109,7 +112,8 @@ static int comp_entry(struct btrfs_delayed_ref_node *ref2,
2484 if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
2485 ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
2486 return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
2487 - btrfs_delayed_node_to_tree_ref(ref1));
2488 + btrfs_delayed_node_to_tree_ref(ref1),
2489 + ref1->type);
2490 } else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY ||
2491 ref1->type == BTRFS_SHARED_DATA_REF_KEY) {
2492 return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2),
2493 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
2494 index 7c4e6cc..4b5398c 100644
2495 --- a/fs/btrfs/inode.c
2496 +++ b/fs/btrfs/inode.c
2497 @@ -5794,7 +5794,9 @@ out:
2498 * block must be cow'd
2499 */
2500 static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
2501 - struct inode *inode, u64 offset, u64 len)
2502 + struct inode *inode, u64 offset, u64 *len,
2503 + u64 *orig_start, u64 *orig_block_len,
2504 + u64 *ram_bytes)
2505 {
2506 struct btrfs_path *path;
2507 int ret;
2508 @@ -5851,8 +5853,12 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
2509 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
2510 backref_offset = btrfs_file_extent_offset(leaf, fi);
2511
2512 + *orig_start = key.offset - backref_offset;
2513 + *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
2514 + *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
2515 +
2516 extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi);
2517 - if (extent_end < offset + len) {
2518 + if (extent_end < offset + *len) {
2519 /* extent doesn't include our full range, must cow */
2520 goto out;
2521 }
2522 @@ -5876,13 +5882,14 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans,
2523 */
2524 disk_bytenr += backref_offset;
2525 disk_bytenr += offset - key.offset;
2526 - num_bytes = min(offset + len, extent_end) - offset;
2527 + num_bytes = min(offset + *len, extent_end) - offset;
2528 if (csum_exist_in_range(root, disk_bytenr, num_bytes))
2529 goto out;
2530 /*
2531 * all of the above have passed, it is safe to overwrite this extent
2532 * without cow
2533 */
2534 + *len = num_bytes;
2535 ret = 1;
2536 out:
2537 btrfs_free_path(path);
2538 @@ -6092,7 +6099,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
2539 em->block_start != EXTENT_MAP_HOLE)) {
2540 int type;
2541 int ret;
2542 - u64 block_start;
2543 + u64 block_start, orig_start, orig_block_len, ram_bytes;
2544
2545 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
2546 type = BTRFS_ORDERED_PREALLOC;
2547 @@ -6110,10 +6117,8 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
2548 if (IS_ERR(trans))
2549 goto must_cow;
2550
2551 - if (can_nocow_odirect(trans, inode, start, len) == 1) {
2552 - u64 orig_start = em->orig_start;
2553 - u64 orig_block_len = em->orig_block_len;
2554 -
2555 + if (can_nocow_odirect(trans, inode, start, &len, &orig_start,
2556 + &orig_block_len, &ram_bytes) == 1) {
2557 if (type == BTRFS_ORDERED_PREALLOC) {
2558 free_extent_map(em);
2559 em = create_pinned_em(inode, start, len,
2560 diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
2561 index a2fe9f5..0cfa2f4 100644
2562 --- a/fs/ext4/resize.c
2563 +++ b/fs/ext4/resize.c
2564 @@ -1880,6 +1880,10 @@ retry:
2565 return 0;
2566
2567 ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &offset);
2568 + if (n_group > (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) {
2569 + ext4_warning(sb, "resize would cause inodes_count overflow");
2570 + return -EINVAL;
2571 + }
2572 ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset);
2573
2574 n_desc_blocks = num_desc_blocks(sb, n_group + 1);
2575 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
2576 index ccee8cc..d0de769 100644
2577 --- a/fs/hugetlbfs/inode.c
2578 +++ b/fs/hugetlbfs/inode.c
2579 @@ -908,19 +908,19 @@ static int can_do_hugetlb_shm(void)
2580
2581 static int get_hstate_idx(int page_size_log)
2582 {
2583 - struct hstate *h;
2584 + struct hstate *h = hstate_sizelog(page_size_log);
2585
2586 - if (!page_size_log)
2587 - return default_hstate_idx;
2588 - h = size_to_hstate(1 << page_size_log);
2589 if (!h)
2590 return -1;
2591 return h - hstates;
2592 }
2593
2594 -struct file *hugetlb_file_setup(const char *name, unsigned long addr,
2595 - size_t size, vm_flags_t acctflag,
2596 - struct user_struct **user,
2597 +/*
2598 + * Note that size should be aligned to proper hugepage size in caller side,
2599 + * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
2600 + */
2601 +struct file *hugetlb_file_setup(const char *name, size_t size,
2602 + vm_flags_t acctflag, struct user_struct **user,
2603 int creat_flags, int page_size_log)
2604 {
2605 int error = -ENOMEM;
2606 @@ -929,8 +929,6 @@ struct file *hugetlb_file_setup(const char *name, unsigned long addr,
2607 struct path path;
2608 struct dentry *root;
2609 struct qstr quick_string;
2610 - struct hstate *hstate;
2611 - unsigned long num_pages;
2612 int hstate_idx;
2613
2614 hstate_idx = get_hstate_idx(page_size_log);
2615 @@ -969,12 +967,10 @@ struct file *hugetlb_file_setup(const char *name, unsigned long addr,
2616 if (!inode)
2617 goto out_dentry;
2618
2619 - hstate = hstate_inode(inode);
2620 - size += addr & ~huge_page_mask(hstate);
2621 - num_pages = ALIGN(size, huge_page_size(hstate)) >>
2622 - huge_page_shift(hstate);
2623 error = -ENOMEM;
2624 - if (hugetlb_reserve_pages(inode, 0, num_pages, NULL, acctflag))
2625 + if (hugetlb_reserve_pages(inode, 0,
2626 + size >> huge_page_shift(hstate_inode(inode)), NULL,
2627 + acctflag))
2628 goto out_inode;
2629
2630 d_instantiate(path.dentry, inode);
2631 diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
2632 index 51d53a4..e3c6121 100644
2633 --- a/fs/nfs/nfs4proc.c
2634 +++ b/fs/nfs/nfs4proc.c
2635 @@ -4513,9 +4513,9 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
2636 if (status != 0)
2637 goto out;
2638 /* Is this a delegated lock? */
2639 - if (test_bit(NFS_DELEGATED_STATE, &state->flags))
2640 - goto out;
2641 lsp = request->fl_u.nfs4_fl.owner;
2642 + if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0)
2643 + goto out;
2644 seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL);
2645 status = -ENOMEM;
2646 if (seqid == NULL)
2647 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
2648 index fad21c9..881fb15 100644
2649 --- a/include/drm/drmP.h
2650 +++ b/include/drm/drmP.h
2651 @@ -1559,9 +1559,8 @@ extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *s
2652
2653 void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv);
2654 void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv);
2655 -int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle);
2656 -int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle);
2657 -void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
2658 +int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle);
2659 +void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf);
2660
2661 int drm_prime_add_dma_buf(struct drm_device *dev, struct drm_gem_object *obj);
2662 int drm_prime_lookup_obj(struct drm_device *dev, struct dma_buf *buf,
2663 diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
2664 index 1cdbfe9..d7da55c 100644
2665 --- a/include/drm/drm_pciids.h
2666 +++ b/include/drm/drm_pciids.h
2667 @@ -227,6 +227,7 @@
2668 {0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
2669 {0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2670 {0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2671 + {0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2672 {0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2673 {0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2674 {0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2675 @@ -234,11 +235,13 @@
2676 {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2677 {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
2678 {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
2679 + {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2680 {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2681 {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2682 {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2683 {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2684 {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
2685 + {0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
2686 {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
2687 {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
2688 {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
2689 @@ -590,6 +593,8 @@
2690 {0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2691 {0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2692 {0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2693 + {0x1002, 0x999C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2694 + {0x1002, 0x999D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2695 {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2696 {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2697 {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
2698 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
2699 index f94bc83..1dbdb1a 100644
2700 --- a/include/linux/blkdev.h
2701 +++ b/include/linux/blkdev.h
2702 @@ -836,7 +836,7 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
2703 unsigned int cmd_flags)
2704 {
2705 if (unlikely(cmd_flags & REQ_DISCARD))
2706 - return q->limits.max_discard_sectors;
2707 + return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
2708
2709 if (unlikely(cmd_flags & REQ_WRITE_SAME))
2710 return q->limits.max_write_same_sectors;
2711 diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
2712 index 0c80d3f..db695d5 100644
2713 --- a/include/linux/hugetlb.h
2714 +++ b/include/linux/hugetlb.h
2715 @@ -185,8 +185,7 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
2716
2717 extern const struct file_operations hugetlbfs_file_operations;
2718 extern const struct vm_operations_struct hugetlb_vm_ops;
2719 -struct file *hugetlb_file_setup(const char *name, unsigned long addr,
2720 - size_t size, vm_flags_t acct,
2721 +struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
2722 struct user_struct **user, int creat_flags,
2723 int page_size_log);
2724
2725 @@ -205,8 +204,8 @@ static inline int is_file_hugepages(struct file *file)
2726
2727 #define is_file_hugepages(file) 0
2728 static inline struct file *
2729 -hugetlb_file_setup(const char *name, unsigned long addr, size_t size,
2730 - vm_flags_t acctflag, struct user_struct **user, int creat_flags,
2731 +hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
2732 + struct user_struct **user, int creat_flags,
2733 int page_size_log)
2734 {
2735 return ERR_PTR(-ENOSYS);
2736 @@ -284,6 +283,13 @@ static inline struct hstate *hstate_file(struct file *f)
2737 return hstate_inode(f->f_dentry->d_inode);
2738 }
2739
2740 +static inline struct hstate *hstate_sizelog(int page_size_log)
2741 +{
2742 + if (!page_size_log)
2743 + return &default_hstate;
2744 + return size_to_hstate(1 << page_size_log);
2745 +}
2746 +
2747 static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
2748 {
2749 return hstate_file(vma->vm_file);
2750 @@ -348,11 +354,12 @@ static inline int hstate_index(struct hstate *h)
2751 return h - hstates;
2752 }
2753
2754 -#else
2755 +#else /* CONFIG_HUGETLB_PAGE */
2756 struct hstate {};
2757 #define alloc_huge_page_node(h, nid) NULL
2758 #define alloc_bootmem_huge_page(h) NULL
2759 #define hstate_file(f) NULL
2760 +#define hstate_sizelog(s) NULL
2761 #define hstate_vma(v) NULL
2762 #define hstate_inode(i) NULL
2763 #define huge_page_size(h) PAGE_SIZE
2764 @@ -367,6 +374,6 @@ static inline unsigned int pages_per_huge_page(struct hstate *h)
2765 }
2766 #define hstate_index_to_shift(index) 0
2767 #define hstate_index(h) 0
2768 -#endif
2769 +#endif /* CONFIG_HUGETLB_PAGE */
2770
2771 #endif /* _LINUX_HUGETLB_H */
2772 diff --git a/ipc/shm.c b/ipc/shm.c
2773 index 9bab650..9ec2316 100644
2774 --- a/ipc/shm.c
2775 +++ b/ipc/shm.c
2776 @@ -491,10 +491,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
2777
2778 sprintf (name, "SYSV%08x", key);
2779 if (shmflg & SHM_HUGETLB) {
2780 + struct hstate *hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT)
2781 + & SHM_HUGE_MASK);
2782 + size_t hugesize = ALIGN(size, huge_page_size(hs));
2783 +
2784 /* hugetlb_file_setup applies strict accounting */
2785 if (shmflg & SHM_NORESERVE)
2786 acctflag = VM_NORESERVE;
2787 - file = hugetlb_file_setup(name, 0, size, acctflag,
2788 + file = hugetlb_file_setup(name, hugesize, acctflag,
2789 &shp->mlock_user, HUGETLB_SHMFS_INODE,
2790 (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
2791 } else {
2792 diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
2793 index 642a89c..a291aa2 100644
2794 --- a/kernel/audit_tree.c
2795 +++ b/kernel/audit_tree.c
2796 @@ -617,9 +617,9 @@ void audit_trim_trees(void)
2797 }
2798 spin_unlock(&hash_lock);
2799 trim_marked(tree);
2800 - put_tree(tree);
2801 drop_collected_mounts(root_mnt);
2802 skip_it:
2803 + put_tree(tree);
2804 mutex_lock(&audit_filter_mutex);
2805 }
2806 list_del(&cursor);
2807 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
2808 index 1c82852..55a9d05 100644
2809 --- a/kernel/trace/trace.c
2810 +++ b/kernel/trace/trace.c
2811 @@ -5020,36 +5020,32 @@ void trace_init_global_iter(struct trace_iterator *iter)
2812 iter->cpu_file = TRACE_PIPE_ALL_CPU;
2813 }
2814
2815 -static void
2816 -__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
2817 +void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
2818 {
2819 - static arch_spinlock_t ftrace_dump_lock =
2820 - (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
2821 /* use static because iter can be a bit big for the stack */
2822 static struct trace_iterator iter;
2823 + static atomic_t dump_running;
2824 unsigned int old_userobj;
2825 - static int dump_ran;
2826 unsigned long flags;
2827 int cnt = 0, cpu;
2828
2829 - /* only one dump */
2830 - local_irq_save(flags);
2831 - arch_spin_lock(&ftrace_dump_lock);
2832 - if (dump_ran)
2833 - goto out;
2834 -
2835 - dump_ran = 1;
2836 + /* Only allow one dump user at a time. */
2837 + if (atomic_inc_return(&dump_running) != 1) {
2838 + atomic_dec(&dump_running);
2839 + return;
2840 + }
2841
2842 + /*
2843 + * Always turn off tracing when we dump.
2844 + * We don't need to show trace output of what happens
2845 + * between multiple crashes.
2846 + *
2847 + * If the user does a sysrq-z, then they can re-enable
2848 + * tracing with echo 1 > tracing_on.
2849 + */
2850 tracing_off();
2851
2852 - /* Did function tracer already get disabled? */
2853 - if (ftrace_is_dead()) {
2854 - printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2855 - printk("# MAY BE MISSING FUNCTION EVENTS\n");
2856 - }
2857 -
2858 - if (disable_tracing)
2859 - ftrace_kill();
2860 + local_irq_save(flags);
2861
2862 trace_init_global_iter(&iter);
2863
2864 @@ -5082,6 +5078,12 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
2865
2866 printk(KERN_TRACE "Dumping ftrace buffer:\n");
2867
2868 + /* Did function tracer already get disabled? */
2869 + if (ftrace_is_dead()) {
2870 + printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2871 + printk("# MAY BE MISSING FUNCTION EVENTS\n");
2872 + }
2873 +
2874 /*
2875 * We need to stop all tracing on all CPUS to read the
2876 * the next buffer. This is a bit expensive, but is
2877 @@ -5121,26 +5123,14 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
2878 printk(KERN_TRACE "---------------------------------\n");
2879
2880 out_enable:
2881 - /* Re-enable tracing if requested */
2882 - if (!disable_tracing) {
2883 - trace_flags |= old_userobj;
2884 + trace_flags |= old_userobj;
2885
2886 - for_each_tracing_cpu(cpu) {
2887 - atomic_dec(&iter.tr->data[cpu]->disabled);
2888 - }
2889 - tracing_on();
2890 + for_each_tracing_cpu(cpu) {
2891 + atomic_dec(&iter.tr->data[cpu]->disabled);
2892 }
2893 -
2894 - out:
2895 - arch_spin_unlock(&ftrace_dump_lock);
2896 + atomic_dec(&dump_running);
2897 local_irq_restore(flags);
2898 }
2899 -
2900 -/* By default: disable tracing after the dump */
2901 -void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
2902 -{
2903 - __ftrace_dump(true, oops_dump_mode);
2904 -}
2905 EXPORT_SYMBOL_GPL(ftrace_dump);
2906
2907 __init static int tracer_alloc_buffers(void)
2908 diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
2909 index 5fc7aa5..81f6275 100644
2910 --- a/kernel/trace/trace_selftest.c
2911 +++ b/kernel/trace/trace_selftest.c
2912 @@ -702,8 +702,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
2913 /* Maximum number of functions to trace before diagnosing a hang */
2914 #define GRAPH_MAX_FUNC_TEST 100000000
2915
2916 -static void
2917 -__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode);
2918 static unsigned int graph_hang_thresh;
2919
2920 /* Wrap the real function entry probe to avoid possible hanging */
2921 @@ -713,8 +711,11 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace)
2922 if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) {
2923 ftrace_graph_stop();
2924 printk(KERN_WARNING "BUG: Function graph tracer hang!\n");
2925 - if (ftrace_dump_on_oops)
2926 - __ftrace_dump(false, DUMP_ALL);
2927 + if (ftrace_dump_on_oops) {
2928 + ftrace_dump(DUMP_ALL);
2929 + /* ftrace_dump() disables tracing */
2930 + tracing_on();
2931 + }
2932 return 0;
2933 }
2934
2935 diff --git a/mm/mmap.c b/mm/mmap.c
2936 index 32f3372..e6beac4 100644
2937 --- a/mm/mmap.c
2938 +++ b/mm/mmap.c
2939 @@ -1296,15 +1296,20 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
2940 file = fget(fd);
2941 if (!file)
2942 goto out;
2943 + if (is_file_hugepages(file))
2944 + len = ALIGN(len, huge_page_size(hstate_file(file)));
2945 } else if (flags & MAP_HUGETLB) {
2946 struct user_struct *user = NULL;
2947 +
2948 + len = ALIGN(len, huge_page_size(hstate_sizelog(
2949 + (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK)));
2950 /*
2951 * VM_NORESERVE is used because the reservations will be
2952 * taken when vm_ops->mmap() is called
2953 * A dummy user value is used because we are not locking
2954 * memory so no accounting is necessary
2955 */
2956 - file = hugetlb_file_setup(HUGETLB_ANON_FILE, addr, len,
2957 + file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
2958 VM_NORESERVE,
2959 &user, HUGETLB_ANONHUGE_INODE,
2960 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
2961 diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
2962 index c301300..c49dcd0 100644
2963 --- a/net/ipv4/netfilter/ipt_rpfilter.c
2964 +++ b/net/ipv4/netfilter/ipt_rpfilter.c
2965 @@ -66,6 +66,12 @@ static bool rpfilter_lookup_reverse(struct flowi4 *fl4,
2966 return dev_match;
2967 }
2968
2969 +static bool rpfilter_is_local(const struct sk_buff *skb)
2970 +{
2971 + const struct rtable *rt = skb_rtable(skb);
2972 + return rt && (rt->rt_flags & RTCF_LOCAL);
2973 +}
2974 +
2975 static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
2976 {
2977 const struct xt_rpfilter_info *info;
2978 @@ -76,7 +82,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
2979 info = par->matchinfo;
2980 invert = info->flags & XT_RPFILTER_INVERT;
2981
2982 - if (par->in->flags & IFF_LOOPBACK)
2983 + if (rpfilter_is_local(skb))
2984 return true ^ invert;
2985
2986 iph = ip_hdr(skb);
2987 diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c
2988 index 83acc14..0ea43c7 100644
2989 --- a/net/ipv6/netfilter/ip6t_NPT.c
2990 +++ b/net/ipv6/netfilter/ip6t_NPT.c
2991 @@ -57,7 +57,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,
2992 if (pfx_len - i >= 32)
2993 mask = 0;
2994 else
2995 - mask = htonl(~((1 << (pfx_len - i)) - 1));
2996 + mask = htonl((1 << (i - pfx_len + 32)) - 1);
2997
2998 idx = i / 32;
2999 addr->s6_addr32[idx] &= mask;
3000 diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
3001 index 5060d54..e0983f3 100644
3002 --- a/net/ipv6/netfilter/ip6t_rpfilter.c
3003 +++ b/net/ipv6/netfilter/ip6t_rpfilter.c
3004 @@ -71,6 +71,12 @@ static bool rpfilter_lookup_reverse6(const struct sk_buff *skb,
3005 return ret;
3006 }
3007
3008 +static bool rpfilter_is_local(const struct sk_buff *skb)
3009 +{
3010 + const struct rt6_info *rt = (const void *) skb_dst(skb);
3011 + return rt && (rt->rt6i_flags & RTF_LOCAL);
3012 +}
3013 +
3014 static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
3015 {
3016 const struct xt_rpfilter_info *info = par->matchinfo;
3017 @@ -78,7 +84,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
3018 struct ipv6hdr *iph;
3019 bool invert = info->flags & XT_RPFILTER_INVERT;
3020
3021 - if (par->in->flags & IFF_LOOPBACK)
3022 + if (rpfilter_is_local(skb))
3023 return true ^ invert;
3024
3025 iph = ipv6_hdr(skb);
3026 diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
3027 index 6d6d8f2..38ca630 100644
3028 --- a/net/netfilter/ipset/ip_set_core.c
3029 +++ b/net/netfilter/ipset/ip_set_core.c
3030 @@ -1470,7 +1470,8 @@ ip_set_utest(struct sock *ctnl, struct sk_buff *skb,
3031 if (ret == -EAGAIN)
3032 ret = 1;
3033
3034 - return ret < 0 ? ret : ret > 0 ? 0 : -IPSET_ERR_EXIST;
3035 + return (ret < 0 && ret != -ENOTEMPTY) ? ret :
3036 + ret > 0 ? 0 : -IPSET_ERR_EXIST;
3037 }
3038
3039 /* Get headed data of a set */
3040 diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
3041 index 8371c2b..09c744a 100644
3042 --- a/net/netfilter/ipset/ip_set_list_set.c
3043 +++ b/net/netfilter/ipset/ip_set_list_set.c
3044 @@ -174,9 +174,13 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,
3045 {
3046 const struct set_elem *e = list_set_elem(map, i);
3047
3048 - if (i == map->size - 1 && e->id != IPSET_INVALID_ID)
3049 - /* Last element replaced: e.g. add new,before,last */
3050 - ip_set_put_byindex(e->id);
3051 + if (e->id != IPSET_INVALID_ID) {
3052 + const struct set_elem *x = list_set_elem(map, map->size - 1);
3053 +
3054 + /* Last element replaced or pushed off */
3055 + if (x->id != IPSET_INVALID_ID)
3056 + ip_set_put_byindex(x->id);
3057 + }
3058 if (with_timeout(map->timeout))
3059 list_elem_tadd(map, i, id, ip_set_timeout_set(timeout));
3060 else
3061 diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c
3062 index 12475ef..e5920fb 100644
3063 --- a/net/netfilter/ipvs/ip_vs_pe_sip.c
3064 +++ b/net/netfilter/ipvs/ip_vs_pe_sip.c
3065 @@ -37,14 +37,10 @@ static int get_callid(const char *dptr, unsigned int dataoff,
3066 if (ret > 0)
3067 break;
3068 if (!ret)
3069 - return 0;
3070 + return -EINVAL;
3071 dataoff += *matchoff;
3072 }
3073
3074 - /* Empty callid is useless */
3075 - if (!*matchlen)
3076 - return -EINVAL;
3077 -
3078 /* Too large is useless */
3079 if (*matchlen > IP_VS_PEDATA_MAXLEN)
3080 return -EINVAL;
3081 diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
3082 index 884f2b3..91527d5 100644
3083 --- a/net/netfilter/nf_conntrack_helper.c
3084 +++ b/net/netfilter/nf_conntrack_helper.c
3085 @@ -236,7 +236,9 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
3086 /* We only allow helper re-assignment of the same sort since
3087 * we cannot reallocate the helper extension area.
3088 */
3089 - if (help->helper != helper) {
3090 + struct nf_conntrack_helper *tmp = rcu_dereference(help->helper);
3091 +
3092 + if (tmp && tmp->help != helper->help) {
3093 RCU_INIT_POINTER(help->helper, NULL);
3094 goto out;
3095 }
3096 diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
3097 index 627b0e5..a081915 100644
3098 --- a/net/netfilter/nf_conntrack_netlink.c
3099 +++ b/net/netfilter/nf_conntrack_netlink.c
3100 @@ -1705,6 +1705,9 @@ ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
3101 if (nlh->nlmsg_flags & NLM_F_CREATE) {
3102 enum ip_conntrack_events events;
3103
3104 + if (!cda[CTA_TUPLE_ORIG] || !cda[CTA_TUPLE_REPLY])
3105 + return -EINVAL;
3106 +
3107 ct = ctnetlink_create_conntrack(net, zone, cda, &otuple,
3108 &rtuple, u3);
3109 if (IS_ERR(ct))
3110 diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
3111 index df8f4f2..b4e0d1c 100644
3112 --- a/net/netfilter/nf_conntrack_sip.c
3113 +++ b/net/netfilter/nf_conntrack_sip.c
3114 @@ -1547,7 +1547,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
3115
3116 msglen = origlen = end - dptr;
3117 if (msglen > datalen)
3118 - return NF_DROP;
3119 + return NF_ACCEPT;
3120
3121 ret = process_sip_msg(skb, ct, protoff, dataoff,
3122 &dptr, &msglen);
3123 diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
3124 index 5f2f910..4bc2aaf 100644
3125 --- a/net/netfilter/nf_nat_core.c
3126 +++ b/net/netfilter/nf_nat_core.c
3127 @@ -468,33 +468,22 @@ EXPORT_SYMBOL_GPL(nf_nat_packet);
3128 struct nf_nat_proto_clean {
3129 u8 l3proto;
3130 u8 l4proto;
3131 - bool hash;
3132 };
3133
3134 -/* Clear NAT section of all conntracks, in case we're loaded again. */
3135 -static int nf_nat_proto_clean(struct nf_conn *i, void *data)
3136 +/* kill conntracks with affected NAT section */
3137 +static int nf_nat_proto_remove(struct nf_conn *i, void *data)
3138 {
3139 const struct nf_nat_proto_clean *clean = data;
3140 struct nf_conn_nat *nat = nfct_nat(i);
3141
3142 if (!nat)
3143 return 0;
3144 - if (!(i->status & IPS_SRC_NAT_DONE))
3145 - return 0;
3146 +
3147 if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||
3148 (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))
3149 return 0;
3150
3151 - if (clean->hash) {
3152 - spin_lock_bh(&nf_nat_lock);
3153 - hlist_del_rcu(&nat->bysource);
3154 - spin_unlock_bh(&nf_nat_lock);
3155 - } else {
3156 - memset(nat, 0, sizeof(*nat));
3157 - i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK |
3158 - IPS_SEQ_ADJUST);
3159 - }
3160 - return 0;
3161 + return i->status & IPS_NAT_MASK ? 1 : 0;
3162 }
3163
3164 static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
3165 @@ -506,16 +495,8 @@ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
3166 struct net *net;
3167
3168 rtnl_lock();
3169 - /* Step 1 - remove from bysource hash */
3170 - clean.hash = true;
3171 for_each_net(net)
3172 - nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
3173 - synchronize_rcu();
3174 -
3175 - /* Step 2 - clean NAT section */
3176 - clean.hash = false;
3177 - for_each_net(net)
3178 - nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
3179 + nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
3180 rtnl_unlock();
3181 }
3182
3183 @@ -527,16 +508,9 @@ static void nf_nat_l3proto_clean(u8 l3proto)
3184 struct net *net;
3185
3186 rtnl_lock();
3187 - /* Step 1 - remove from bysource hash */
3188 - clean.hash = true;
3189 - for_each_net(net)
3190 - nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
3191 - synchronize_rcu();
3192
3193 - /* Step 2 - clean NAT section */
3194 - clean.hash = false;
3195 for_each_net(net)
3196 - nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
3197 + nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);
3198 rtnl_unlock();
3199 }
3200
3201 @@ -774,7 +748,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
3202 {
3203 struct nf_nat_proto_clean clean = {};
3204
3205 - nf_ct_iterate_cleanup(net, &nf_nat_proto_clean, &clean);
3206 + nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean);
3207 synchronize_rcu();
3208 nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
3209 }