Contents of /trunk/kernel-magellan/patches-3.9/0101-3.9.2-all-fixes.patch
Parent Directory | Revision Log
Revision 2178 -
(show annotations)
(download)
Wed May 22 06:42:34 2013 UTC (11 years, 4 months ago) by niro
File size: 103386 byte(s)
Wed May 22 06:42:34 2013 UTC (11 years, 4 months ago) by niro
File size: 103386 byte(s)
-linux-3.9.1-3
1 | diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c |
2 | index 8dc0605..99ce189 100644 |
3 | --- a/arch/arm/xen/enlighten.c |
4 | +++ b/arch/arm/xen/enlighten.c |
5 | @@ -239,7 +239,7 @@ static int __init xen_init_events(void) |
6 | xen_init_IRQ(); |
7 | |
8 | if (request_percpu_irq(xen_events_irq, xen_arm_callback, |
9 | - "events", xen_vcpu)) { |
10 | + "events", &xen_vcpu)) { |
11 | pr_err("Error requesting IRQ %d\n", xen_events_irq); |
12 | return -EINVAL; |
13 | } |
14 | diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c |
15 | index afadae6..0782eaf 100644 |
16 | --- a/arch/arm64/mm/fault.c |
17 | +++ b/arch/arm64/mm/fault.c |
18 | @@ -148,6 +148,7 @@ void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) |
19 | #define VM_FAULT_BADACCESS 0x020000 |
20 | |
21 | #define ESR_WRITE (1 << 6) |
22 | +#define ESR_CM (1 << 8) |
23 | #define ESR_LNX_EXEC (1 << 24) |
24 | |
25 | /* |
26 | @@ -206,7 +207,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, |
27 | struct task_struct *tsk; |
28 | struct mm_struct *mm; |
29 | int fault, sig, code; |
30 | - int write = esr & ESR_WRITE; |
31 | + bool write = (esr & ESR_WRITE) && !(esr & ESR_CM); |
32 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | |
33 | (write ? FAULT_FLAG_WRITE : 0); |
34 | |
35 | diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h |
36 | index 8752bc8..8cbc6e5 100644 |
37 | --- a/arch/powerpc/include/asm/ppc-opcode.h |
38 | +++ b/arch/powerpc/include/asm/ppc-opcode.h |
39 | @@ -113,6 +113,10 @@ |
40 | #define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff |
41 | #define PPC_INST_MTSPR_DSCR 0x7c1103a6 |
42 | #define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff |
43 | +#define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6 |
44 | +#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff |
45 | +#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6 |
46 | +#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff |
47 | #define PPC_INST_SLBFEE 0x7c0007a7 |
48 | |
49 | #define PPC_INST_STRING 0x7c00042a |
50 | diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c |
51 | index 37cc40e..83efa2f 100644 |
52 | --- a/arch/powerpc/kernel/traps.c |
53 | +++ b/arch/powerpc/kernel/traps.c |
54 | @@ -970,7 +970,10 @@ static int emulate_instruction(struct pt_regs *regs) |
55 | |
56 | #ifdef CONFIG_PPC64 |
57 | /* Emulate the mfspr rD, DSCR. */ |
58 | - if (((instword & PPC_INST_MFSPR_DSCR_MASK) == PPC_INST_MFSPR_DSCR) && |
59 | + if ((((instword & PPC_INST_MFSPR_DSCR_USER_MASK) == |
60 | + PPC_INST_MFSPR_DSCR_USER) || |
61 | + ((instword & PPC_INST_MFSPR_DSCR_MASK) == |
62 | + PPC_INST_MFSPR_DSCR)) && |
63 | cpu_has_feature(CPU_FTR_DSCR)) { |
64 | PPC_WARN_EMULATED(mfdscr, regs); |
65 | rd = (instword >> 21) & 0x1f; |
66 | @@ -978,7 +981,10 @@ static int emulate_instruction(struct pt_regs *regs) |
67 | return 0; |
68 | } |
69 | /* Emulate the mtspr DSCR, rD. */ |
70 | - if (((instword & PPC_INST_MTSPR_DSCR_MASK) == PPC_INST_MTSPR_DSCR) && |
71 | + if ((((instword & PPC_INST_MTSPR_DSCR_USER_MASK) == |
72 | + PPC_INST_MTSPR_DSCR_USER) || |
73 | + ((instword & PPC_INST_MTSPR_DSCR_MASK) == |
74 | + PPC_INST_MTSPR_DSCR)) && |
75 | cpu_has_feature(CPU_FTR_DSCR)) { |
76 | PPC_WARN_EMULATED(mtdscr, regs); |
77 | rd = (instword >> 21) & 0x1f; |
78 | diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c |
79 | index f410c3e..b75c52f 100644 |
80 | --- a/arch/powerpc/mm/hash_utils_64.c |
81 | +++ b/arch/powerpc/mm/hash_utils_64.c |
82 | @@ -1191,6 +1191,7 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, |
83 | * unmapping it first, it may see the speculated version. |
84 | */ |
85 | if (local && cpu_has_feature(CPU_FTR_TM) && |
86 | + current->thread.regs && |
87 | MSR_TM_ACTIVE(current->thread.regs->msr)) { |
88 | tm_enable(); |
89 | tm_abort(TM_CAUSE_TLBI); |
90 | diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c |
91 | index bba87ca..6a252c4 100644 |
92 | --- a/arch/powerpc/mm/numa.c |
93 | +++ b/arch/powerpc/mm/numa.c |
94 | @@ -201,7 +201,7 @@ int __node_distance(int a, int b) |
95 | int distance = LOCAL_DISTANCE; |
96 | |
97 | if (!form1_affinity) |
98 | - return distance; |
99 | + return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE); |
100 | |
101 | for (i = 0; i < distance_ref_points_depth; i++) { |
102 | if (distance_lookup_table[a][i] == distance_lookup_table[b][i]) |
103 | diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c |
104 | index cc45deb..4a0a462 100644 |
105 | --- a/arch/x86/kernel/cpu/perf_event_intel.c |
106 | +++ b/arch/x86/kernel/cpu/perf_event_intel.c |
107 | @@ -125,10 +125,15 @@ static struct event_constraint intel_ivb_event_constraints[] __read_mostly = |
108 | INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ |
109 | INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ |
110 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ |
111 | - INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOPS_RETIRED.* */ |
112 | - INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ |
113 | - INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ |
114 | - INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ |
115 | + /* |
116 | + * Errata BV98 -- MEM_*_RETIRED events can leak between counters of SMT |
117 | + * siblings; disable these events because they can corrupt unrelated |
118 | + * counters. |
119 | + */ |
120 | + INTEL_EVENT_CONSTRAINT(0xd0, 0x0), /* MEM_UOPS_RETIRED.* */ |
121 | + INTEL_EVENT_CONSTRAINT(0xd1, 0x0), /* MEM_LOAD_UOPS_RETIRED.* */ |
122 | + INTEL_EVENT_CONSTRAINT(0xd2, 0x0), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ |
123 | + INTEL_EVENT_CONSTRAINT(0xd3, 0x0), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ |
124 | EVENT_CONSTRAINT_END |
125 | }; |
126 | |
127 | diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c |
128 | index da02e9c..d978353 100644 |
129 | --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c |
130 | +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c |
131 | @@ -310,7 +310,7 @@ void intel_pmu_lbr_read(void) |
132 | * - in case there is no HW filter |
133 | * - in case the HW filter has errata or limitations |
134 | */ |
135 | -static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event) |
136 | +static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event) |
137 | { |
138 | u64 br_type = event->attr.branch_sample_type; |
139 | int mask = 0; |
140 | @@ -318,8 +318,11 @@ static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event) |
141 | if (br_type & PERF_SAMPLE_BRANCH_USER) |
142 | mask |= X86_BR_USER; |
143 | |
144 | - if (br_type & PERF_SAMPLE_BRANCH_KERNEL) |
145 | + if (br_type & PERF_SAMPLE_BRANCH_KERNEL) { |
146 | + if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN)) |
147 | + return -EACCES; |
148 | mask |= X86_BR_KERNEL; |
149 | + } |
150 | |
151 | /* we ignore BRANCH_HV here */ |
152 | |
153 | @@ -339,6 +342,8 @@ static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event) |
154 | * be used by fixup code for some CPU |
155 | */ |
156 | event->hw.branch_reg.reg = mask; |
157 | + |
158 | + return 0; |
159 | } |
160 | |
161 | /* |
162 | @@ -386,7 +391,9 @@ int intel_pmu_setup_lbr_filter(struct perf_event *event) |
163 | /* |
164 | * setup SW LBR filter |
165 | */ |
166 | - intel_pmu_setup_sw_lbr_filter(event); |
167 | + ret = intel_pmu_setup_sw_lbr_filter(event); |
168 | + if (ret) |
169 | + return ret; |
170 | |
171 | /* |
172 | * setup HW LBR filter, if any |
173 | @@ -442,8 +449,18 @@ static int branch_type(unsigned long from, unsigned long to) |
174 | return X86_BR_NONE; |
175 | |
176 | addr = buf; |
177 | - } else |
178 | - addr = (void *)from; |
179 | + } else { |
180 | + /* |
181 | + * The LBR logs any address in the IP, even if the IP just |
182 | + * faulted. This means userspace can control the from address. |
183 | + * Ensure we don't blindy read any address by validating it is |
184 | + * a known text address. |
185 | + */ |
186 | + if (kernel_text_address(from)) |
187 | + addr = (void *)from; |
188 | + else |
189 | + return X86_BR_NONE; |
190 | + } |
191 | |
192 | /* |
193 | * decoder needs to know the ABI especially |
194 | diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c |
195 | index b43200d..3e091f0 100644 |
196 | --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c |
197 | +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c |
198 | @@ -2428,7 +2428,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types) |
199 | static int __init uncore_type_init(struct intel_uncore_type *type) |
200 | { |
201 | struct intel_uncore_pmu *pmus; |
202 | - struct attribute_group *events_group; |
203 | + struct attribute_group *attr_group; |
204 | struct attribute **attrs; |
205 | int i, j; |
206 | |
207 | @@ -2455,19 +2455,19 @@ static int __init uncore_type_init(struct intel_uncore_type *type) |
208 | while (type->event_descs[i].attr.attr.name) |
209 | i++; |
210 | |
211 | - events_group = kzalloc(sizeof(struct attribute *) * (i + 1) + |
212 | - sizeof(*events_group), GFP_KERNEL); |
213 | - if (!events_group) |
214 | + attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) + |
215 | + sizeof(*attr_group), GFP_KERNEL); |
216 | + if (!attr_group) |
217 | goto fail; |
218 | |
219 | - attrs = (struct attribute **)(events_group + 1); |
220 | - events_group->name = "events"; |
221 | - events_group->attrs = attrs; |
222 | + attrs = (struct attribute **)(attr_group + 1); |
223 | + attr_group->name = "events"; |
224 | + attr_group->attrs = attrs; |
225 | |
226 | for (j = 0; j < i; j++) |
227 | attrs[j] = &type->event_descs[j].attr.attr; |
228 | |
229 | - type->events_group = events_group; |
230 | + type->events_group = attr_group; |
231 | } |
232 | |
233 | type->pmu_group = &uncore_pmu_attr_group; |
234 | @@ -2853,6 +2853,7 @@ static int __init uncore_cpu_init(void) |
235 | msr_uncores = nhm_msr_uncores; |
236 | break; |
237 | case 42: /* Sandy Bridge */ |
238 | + case 58: /* Ivy Bridge */ |
239 | if (snb_uncore_cbox.num_boxes > max_cores) |
240 | snb_uncore_cbox.num_boxes = max_cores; |
241 | msr_uncores = snb_msr_uncores; |
242 | diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c |
243 | index b2b9837..e8918ff 100644 |
244 | --- a/block/blk-cgroup.c |
245 | +++ b/block/blk-cgroup.c |
246 | @@ -972,10 +972,10 @@ int blkcg_activate_policy(struct request_queue *q, |
247 | if (!new_blkg) |
248 | return -ENOMEM; |
249 | |
250 | - preloaded = !radix_tree_preload(GFP_KERNEL); |
251 | - |
252 | blk_queue_bypass_start(q); |
253 | |
254 | + preloaded = !radix_tree_preload(GFP_KERNEL); |
255 | + |
256 | /* |
257 | * Make sure the root blkg exists and count the existing blkgs. As |
258 | * @q is bypassing at this point, blkg_lookup_create() can't be |
259 | diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c |
260 | index 5899a76..769d92e 100644 |
261 | --- a/drivers/edac/edac_mc_sysfs.c |
262 | +++ b/drivers/edac/edac_mc_sysfs.c |
263 | @@ -327,17 +327,17 @@ static struct device_attribute *dynamic_csrow_dimm_attr[] = { |
264 | }; |
265 | |
266 | /* possible dynamic channel ce_count attribute files */ |
267 | -DEVICE_CHANNEL(ch0_ce_count, S_IRUGO | S_IWUSR, |
268 | +DEVICE_CHANNEL(ch0_ce_count, S_IRUGO, |
269 | channel_ce_count_show, NULL, 0); |
270 | -DEVICE_CHANNEL(ch1_ce_count, S_IRUGO | S_IWUSR, |
271 | +DEVICE_CHANNEL(ch1_ce_count, S_IRUGO, |
272 | channel_ce_count_show, NULL, 1); |
273 | -DEVICE_CHANNEL(ch2_ce_count, S_IRUGO | S_IWUSR, |
274 | +DEVICE_CHANNEL(ch2_ce_count, S_IRUGO, |
275 | channel_ce_count_show, NULL, 2); |
276 | -DEVICE_CHANNEL(ch3_ce_count, S_IRUGO | S_IWUSR, |
277 | +DEVICE_CHANNEL(ch3_ce_count, S_IRUGO, |
278 | channel_ce_count_show, NULL, 3); |
279 | -DEVICE_CHANNEL(ch4_ce_count, S_IRUGO | S_IWUSR, |
280 | +DEVICE_CHANNEL(ch4_ce_count, S_IRUGO, |
281 | channel_ce_count_show, NULL, 4); |
282 | -DEVICE_CHANNEL(ch5_ce_count, S_IRUGO | S_IWUSR, |
283 | +DEVICE_CHANNEL(ch5_ce_count, S_IRUGO, |
284 | channel_ce_count_show, NULL, 5); |
285 | |
286 | /* Total possible dynamic ce_count attribute file table */ |
287 | diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h |
288 | index 5284292..02e52d5 100644 |
289 | --- a/drivers/gpu/drm/ast/ast_drv.h |
290 | +++ b/drivers/gpu/drm/ast/ast_drv.h |
291 | @@ -241,6 +241,8 @@ struct ast_fbdev { |
292 | void *sysram; |
293 | int size; |
294 | struct ttm_bo_kmap_obj mapping; |
295 | + int x1, y1, x2, y2; /* dirty rect */ |
296 | + spinlock_t dirty_lock; |
297 | }; |
298 | |
299 | #define to_ast_crtc(x) container_of(x, struct ast_crtc, base) |
300 | diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c |
301 | index 34931fe..fbc0823 100644 |
302 | --- a/drivers/gpu/drm/ast/ast_fb.c |
303 | +++ b/drivers/gpu/drm/ast/ast_fb.c |
304 | @@ -53,16 +53,52 @@ static void ast_dirty_update(struct ast_fbdev *afbdev, |
305 | int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8; |
306 | int ret; |
307 | bool unmap = false; |
308 | + bool store_for_later = false; |
309 | + int x2, y2; |
310 | + unsigned long flags; |
311 | |
312 | obj = afbdev->afb.obj; |
313 | bo = gem_to_ast_bo(obj); |
314 | |
315 | + /* |
316 | + * try and reserve the BO, if we fail with busy |
317 | + * then the BO is being moved and we should |
318 | + * store up the damage until later. |
319 | + */ |
320 | ret = ast_bo_reserve(bo, true); |
321 | if (ret) { |
322 | - DRM_ERROR("failed to reserve fb bo\n"); |
323 | + if (ret != -EBUSY) |
324 | + return; |
325 | + |
326 | + store_for_later = true; |
327 | + } |
328 | + |
329 | + x2 = x + width - 1; |
330 | + y2 = y + height - 1; |
331 | + spin_lock_irqsave(&afbdev->dirty_lock, flags); |
332 | + |
333 | + if (afbdev->y1 < y) |
334 | + y = afbdev->y1; |
335 | + if (afbdev->y2 > y2) |
336 | + y2 = afbdev->y2; |
337 | + if (afbdev->x1 < x) |
338 | + x = afbdev->x1; |
339 | + if (afbdev->x2 > x2) |
340 | + x2 = afbdev->x2; |
341 | + |
342 | + if (store_for_later) { |
343 | + afbdev->x1 = x; |
344 | + afbdev->x2 = x2; |
345 | + afbdev->y1 = y; |
346 | + afbdev->y2 = y2; |
347 | + spin_unlock_irqrestore(&afbdev->dirty_lock, flags); |
348 | return; |
349 | } |
350 | |
351 | + afbdev->x1 = afbdev->y1 = INT_MAX; |
352 | + afbdev->x2 = afbdev->y2 = 0; |
353 | + spin_unlock_irqrestore(&afbdev->dirty_lock, flags); |
354 | + |
355 | if (!bo->kmap.virtual) { |
356 | ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); |
357 | if (ret) { |
358 | @@ -72,10 +108,10 @@ static void ast_dirty_update(struct ast_fbdev *afbdev, |
359 | } |
360 | unmap = true; |
361 | } |
362 | - for (i = y; i < y + height; i++) { |
363 | + for (i = y; i <= y2; i++) { |
364 | /* assume equal stride for now */ |
365 | src_offset = dst_offset = i * afbdev->afb.base.pitches[0] + (x * bpp); |
366 | - memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp); |
367 | + memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, (x2 - x + 1) * bpp); |
368 | |
369 | } |
370 | if (unmap) |
371 | @@ -292,6 +328,7 @@ int ast_fbdev_init(struct drm_device *dev) |
372 | |
373 | ast->fbdev = afbdev; |
374 | afbdev->helper.funcs = &ast_fb_helper_funcs; |
375 | + spin_lock_init(&afbdev->dirty_lock); |
376 | ret = drm_fb_helper_init(dev, &afbdev->helper, |
377 | 1, 1); |
378 | if (ret) { |
379 | diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c |
380 | index 3602731..09da339 100644 |
381 | --- a/drivers/gpu/drm/ast/ast_ttm.c |
382 | +++ b/drivers/gpu/drm/ast/ast_ttm.c |
383 | @@ -316,7 +316,7 @@ int ast_bo_reserve(struct ast_bo *bo, bool no_wait) |
384 | |
385 | ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); |
386 | if (ret) { |
387 | - if (ret != -ERESTARTSYS) |
388 | + if (ret != -ERESTARTSYS && ret != -EBUSY) |
389 | DRM_ERROR("reserve failed %p\n", bo); |
390 | return ret; |
391 | } |
392 | diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h |
393 | index 6e0cc72..7ca0595 100644 |
394 | --- a/drivers/gpu/drm/cirrus/cirrus_drv.h |
395 | +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h |
396 | @@ -154,6 +154,8 @@ struct cirrus_fbdev { |
397 | struct list_head fbdev_list; |
398 | void *sysram; |
399 | int size; |
400 | + int x1, y1, x2, y2; /* dirty rect */ |
401 | + spinlock_t dirty_lock; |
402 | }; |
403 | |
404 | struct cirrus_bo { |
405 | diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c |
406 | index e25afcc..3541b56 100644 |
407 | --- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c |
408 | +++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c |
409 | @@ -27,16 +27,51 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev, |
410 | int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8; |
411 | int ret; |
412 | bool unmap = false; |
413 | + bool store_for_later = false; |
414 | + int x2, y2; |
415 | + unsigned long flags; |
416 | |
417 | obj = afbdev->gfb.obj; |
418 | bo = gem_to_cirrus_bo(obj); |
419 | |
420 | + /* |
421 | + * try and reserve the BO, if we fail with busy |
422 | + * then the BO is being moved and we should |
423 | + * store up the damage until later. |
424 | + */ |
425 | ret = cirrus_bo_reserve(bo, true); |
426 | if (ret) { |
427 | - DRM_ERROR("failed to reserve fb bo\n"); |
428 | + if (ret != -EBUSY) |
429 | + return; |
430 | + store_for_later = true; |
431 | + } |
432 | + |
433 | + x2 = x + width - 1; |
434 | + y2 = y + height - 1; |
435 | + spin_lock_irqsave(&afbdev->dirty_lock, flags); |
436 | + |
437 | + if (afbdev->y1 < y) |
438 | + y = afbdev->y1; |
439 | + if (afbdev->y2 > y2) |
440 | + y2 = afbdev->y2; |
441 | + if (afbdev->x1 < x) |
442 | + x = afbdev->x1; |
443 | + if (afbdev->x2 > x2) |
444 | + x2 = afbdev->x2; |
445 | + |
446 | + if (store_for_later) { |
447 | + afbdev->x1 = x; |
448 | + afbdev->x2 = x2; |
449 | + afbdev->y1 = y; |
450 | + afbdev->y2 = y2; |
451 | + spin_unlock_irqrestore(&afbdev->dirty_lock, flags); |
452 | return; |
453 | } |
454 | |
455 | + afbdev->x1 = afbdev->y1 = INT_MAX; |
456 | + afbdev->x2 = afbdev->y2 = 0; |
457 | + spin_unlock_irqrestore(&afbdev->dirty_lock, flags); |
458 | + |
459 | if (!bo->kmap.virtual) { |
460 | ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); |
461 | if (ret) { |
462 | @@ -268,6 +303,7 @@ int cirrus_fbdev_init(struct cirrus_device *cdev) |
463 | |
464 | cdev->mode_info.gfbdev = gfbdev; |
465 | gfbdev->helper.funcs = &cirrus_fb_helper_funcs; |
466 | + spin_lock_init(&gfbdev->dirty_lock); |
467 | |
468 | ret = drm_fb_helper_init(cdev->dev, &gfbdev->helper, |
469 | cdev->num_crtc, CIRRUSFB_CONN_LIMIT); |
470 | diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c |
471 | index 1413a26..2ed8cfc 100644 |
472 | --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c |
473 | +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c |
474 | @@ -321,7 +321,7 @@ int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait) |
475 | |
476 | ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); |
477 | if (ret) { |
478 | - if (ret != -ERESTARTSYS) |
479 | + if (ret != -ERESTARTSYS && ret != -EBUSY) |
480 | DRM_ERROR("reserve failed %p\n", bo); |
481 | return ret; |
482 | } |
483 | diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c |
484 | index af779ae..cf919e3 100644 |
485 | --- a/drivers/gpu/drm/drm_gem.c |
486 | +++ b/drivers/gpu/drm/drm_gem.c |
487 | @@ -205,11 +205,11 @@ static void |
488 | drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) |
489 | { |
490 | if (obj->import_attach) { |
491 | - drm_prime_remove_imported_buf_handle(&filp->prime, |
492 | + drm_prime_remove_buf_handle(&filp->prime, |
493 | obj->import_attach->dmabuf); |
494 | } |
495 | if (obj->export_dma_buf) { |
496 | - drm_prime_remove_imported_buf_handle(&filp->prime, |
497 | + drm_prime_remove_buf_handle(&filp->prime, |
498 | obj->export_dma_buf); |
499 | } |
500 | } |
501 | diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c |
502 | index 366910d..db767ca 100644 |
503 | --- a/drivers/gpu/drm/drm_prime.c |
504 | +++ b/drivers/gpu/drm/drm_prime.c |
505 | @@ -62,6 +62,7 @@ struct drm_prime_member { |
506 | struct dma_buf *dma_buf; |
507 | uint32_t handle; |
508 | }; |
509 | +static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle); |
510 | |
511 | static struct sg_table *drm_gem_map_dma_buf(struct dma_buf_attachment *attach, |
512 | enum dma_data_direction dir) |
513 | @@ -200,7 +201,8 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, |
514 | { |
515 | struct drm_gem_object *obj; |
516 | void *buf; |
517 | - int ret; |
518 | + int ret = 0; |
519 | + struct dma_buf *dmabuf; |
520 | |
521 | obj = drm_gem_object_lookup(dev, file_priv, handle); |
522 | if (!obj) |
523 | @@ -209,43 +211,44 @@ int drm_gem_prime_handle_to_fd(struct drm_device *dev, |
524 | mutex_lock(&file_priv->prime.lock); |
525 | /* re-export the original imported object */ |
526 | if (obj->import_attach) { |
527 | - get_dma_buf(obj->import_attach->dmabuf); |
528 | - *prime_fd = dma_buf_fd(obj->import_attach->dmabuf, flags); |
529 | - drm_gem_object_unreference_unlocked(obj); |
530 | - mutex_unlock(&file_priv->prime.lock); |
531 | - return 0; |
532 | + dmabuf = obj->import_attach->dmabuf; |
533 | + goto out_have_obj; |
534 | } |
535 | |
536 | if (obj->export_dma_buf) { |
537 | - get_dma_buf(obj->export_dma_buf); |
538 | - *prime_fd = dma_buf_fd(obj->export_dma_buf, flags); |
539 | - drm_gem_object_unreference_unlocked(obj); |
540 | - } else { |
541 | - buf = dev->driver->gem_prime_export(dev, obj, flags); |
542 | - if (IS_ERR(buf)) { |
543 | - /* normally the created dma-buf takes ownership of the ref, |
544 | - * but if that fails then drop the ref |
545 | - */ |
546 | - drm_gem_object_unreference_unlocked(obj); |
547 | - mutex_unlock(&file_priv->prime.lock); |
548 | - return PTR_ERR(buf); |
549 | - } |
550 | - obj->export_dma_buf = buf; |
551 | - *prime_fd = dma_buf_fd(buf, flags); |
552 | + dmabuf = obj->export_dma_buf; |
553 | + goto out_have_obj; |
554 | } |
555 | + |
556 | + buf = dev->driver->gem_prime_export(dev, obj, flags); |
557 | + if (IS_ERR(buf)) { |
558 | + /* normally the created dma-buf takes ownership of the ref, |
559 | + * but if that fails then drop the ref |
560 | + */ |
561 | + ret = PTR_ERR(buf); |
562 | + goto out; |
563 | + } |
564 | + obj->export_dma_buf = buf; |
565 | + |
566 | /* if we've exported this buffer the cheat and add it to the import list |
567 | * so we get the correct handle back |
568 | */ |
569 | - ret = drm_prime_add_imported_buf_handle(&file_priv->prime, |
570 | - obj->export_dma_buf, handle); |
571 | - if (ret) { |
572 | - drm_gem_object_unreference_unlocked(obj); |
573 | - mutex_unlock(&file_priv->prime.lock); |
574 | - return ret; |
575 | - } |
576 | + ret = drm_prime_add_buf_handle(&file_priv->prime, |
577 | + obj->export_dma_buf, handle); |
578 | + if (ret) |
579 | + goto out; |
580 | |
581 | + *prime_fd = dma_buf_fd(buf, flags); |
582 | mutex_unlock(&file_priv->prime.lock); |
583 | return 0; |
584 | + |
585 | +out_have_obj: |
586 | + get_dma_buf(dmabuf); |
587 | + *prime_fd = dma_buf_fd(dmabuf, flags); |
588 | +out: |
589 | + drm_gem_object_unreference_unlocked(obj); |
590 | + mutex_unlock(&file_priv->prime.lock); |
591 | + return ret; |
592 | } |
593 | EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); |
594 | |
595 | @@ -268,7 +271,6 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, |
596 | * refcount on gem itself instead of f_count of dmabuf. |
597 | */ |
598 | drm_gem_object_reference(obj); |
599 | - dma_buf_put(dma_buf); |
600 | return obj; |
601 | } |
602 | } |
603 | @@ -277,6 +279,8 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, |
604 | if (IS_ERR(attach)) |
605 | return ERR_PTR(PTR_ERR(attach)); |
606 | |
607 | + get_dma_buf(dma_buf); |
608 | + |
609 | sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); |
610 | if (IS_ERR_OR_NULL(sgt)) { |
611 | ret = PTR_ERR(sgt); |
612 | @@ -297,6 +301,8 @@ fail_unmap: |
613 | dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); |
614 | fail_detach: |
615 | dma_buf_detach(dma_buf, attach); |
616 | + dma_buf_put(dma_buf); |
617 | + |
618 | return ERR_PTR(ret); |
619 | } |
620 | EXPORT_SYMBOL(drm_gem_prime_import); |
621 | @@ -314,7 +320,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev, |
622 | |
623 | mutex_lock(&file_priv->prime.lock); |
624 | |
625 | - ret = drm_prime_lookup_imported_buf_handle(&file_priv->prime, |
626 | + ret = drm_prime_lookup_buf_handle(&file_priv->prime, |
627 | dma_buf, handle); |
628 | if (!ret) { |
629 | ret = 0; |
630 | @@ -333,12 +339,15 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev, |
631 | if (ret) |
632 | goto out_put; |
633 | |
634 | - ret = drm_prime_add_imported_buf_handle(&file_priv->prime, |
635 | + ret = drm_prime_add_buf_handle(&file_priv->prime, |
636 | dma_buf, *handle); |
637 | if (ret) |
638 | goto fail; |
639 | |
640 | mutex_unlock(&file_priv->prime.lock); |
641 | + |
642 | + dma_buf_put(dma_buf); |
643 | + |
644 | return 0; |
645 | |
646 | fail: |
647 | @@ -491,7 +500,7 @@ void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv) |
648 | } |
649 | EXPORT_SYMBOL(drm_prime_destroy_file_private); |
650 | |
651 | -int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle) |
652 | +static int drm_prime_add_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle) |
653 | { |
654 | struct drm_prime_member *member; |
655 | |
656 | @@ -499,14 +508,14 @@ int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv |
657 | if (!member) |
658 | return -ENOMEM; |
659 | |
660 | + get_dma_buf(dma_buf); |
661 | member->dma_buf = dma_buf; |
662 | member->handle = handle; |
663 | list_add(&member->entry, &prime_fpriv->head); |
664 | return 0; |
665 | } |
666 | -EXPORT_SYMBOL(drm_prime_add_imported_buf_handle); |
667 | |
668 | -int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle) |
669 | +int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle) |
670 | { |
671 | struct drm_prime_member *member; |
672 | |
673 | @@ -518,19 +527,20 @@ int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fp |
674 | } |
675 | return -ENOENT; |
676 | } |
677 | -EXPORT_SYMBOL(drm_prime_lookup_imported_buf_handle); |
678 | +EXPORT_SYMBOL(drm_prime_lookup_buf_handle); |
679 | |
680 | -void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf) |
681 | +void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf) |
682 | { |
683 | struct drm_prime_member *member, *safe; |
684 | |
685 | mutex_lock(&prime_fpriv->lock); |
686 | list_for_each_entry_safe(member, safe, &prime_fpriv->head, entry) { |
687 | if (member->dma_buf == dma_buf) { |
688 | + dma_buf_put(dma_buf); |
689 | list_del(&member->entry); |
690 | kfree(member); |
691 | } |
692 | } |
693 | mutex_unlock(&prime_fpriv->lock); |
694 | } |
695 | -EXPORT_SYMBOL(drm_prime_remove_imported_buf_handle); |
696 | +EXPORT_SYMBOL(drm_prime_remove_buf_handle); |
697 | diff --git a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c |
698 | index ba0a3aa..ff7f2a8 100644 |
699 | --- a/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c |
700 | +++ b/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c |
701 | @@ -235,7 +235,6 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, |
702 | * refcount on gem itself instead of f_count of dmabuf. |
703 | */ |
704 | drm_gem_object_reference(obj); |
705 | - dma_buf_put(dma_buf); |
706 | return obj; |
707 | } |
708 | } |
709 | @@ -244,6 +243,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev, |
710 | if (IS_ERR(attach)) |
711 | return ERR_PTR(-EINVAL); |
712 | |
713 | + get_dma_buf(dma_buf); |
714 | |
715 | sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); |
716 | if (IS_ERR_OR_NULL(sgt)) { |
717 | @@ -298,6 +298,8 @@ err_unmap_attach: |
718 | dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL); |
719 | err_buf_detach: |
720 | dma_buf_detach(dma_buf, attach); |
721 | + dma_buf_put(dma_buf); |
722 | + |
723 | return ERR_PTR(ret); |
724 | } |
725 | |
726 | diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c |
727 | index 8652cdf..029eccf 100644 |
728 | --- a/drivers/gpu/drm/gma500/psb_irq.c |
729 | +++ b/drivers/gpu/drm/gma500/psb_irq.c |
730 | @@ -211,7 +211,7 @@ irqreturn_t psb_irq_handler(DRM_IRQ_ARGS) |
731 | |
732 | vdc_stat = PSB_RVDC32(PSB_INT_IDENTITY_R); |
733 | |
734 | - if (vdc_stat & _PSB_PIPE_EVENT_FLAG) |
735 | + if (vdc_stat & (_PSB_PIPE_EVENT_FLAG|_PSB_IRQ_ASLE)) |
736 | dsp_int = 1; |
737 | |
738 | /* FIXME: Handle Medfield |
739 | diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h |
740 | index 01769e2..ef99b1c 100644 |
741 | --- a/drivers/gpu/drm/i915/i915_drv.h |
742 | +++ b/drivers/gpu/drm/i915/i915_drv.h |
743 | @@ -941,6 +941,7 @@ typedef struct drm_i915_private { |
744 | unsigned int int_crt_support:1; |
745 | unsigned int lvds_use_ssc:1; |
746 | unsigned int display_clock_mode:1; |
747 | + unsigned int fdi_rx_polarity_inverted:1; |
748 | int lvds_ssc_freq; |
749 | unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ |
750 | struct { |
751 | diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c |
752 | index 0e207e6..73cb479 100644 |
753 | --- a/drivers/gpu/drm/i915/i915_gem.c |
754 | +++ b/drivers/gpu/drm/i915/i915_gem.c |
755 | @@ -2678,17 +2678,35 @@ static inline int fence_number(struct drm_i915_private *dev_priv, |
756 | return fence - dev_priv->fence_regs; |
757 | } |
758 | |
759 | +static void i915_gem_write_fence__ipi(void *data) |
760 | +{ |
761 | + wbinvd(); |
762 | +} |
763 | + |
764 | static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, |
765 | struct drm_i915_fence_reg *fence, |
766 | bool enable) |
767 | { |
768 | - struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
769 | - int reg = fence_number(dev_priv, fence); |
770 | - |
771 | - i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); |
772 | + struct drm_device *dev = obj->base.dev; |
773 | + struct drm_i915_private *dev_priv = dev->dev_private; |
774 | + int fence_reg = fence_number(dev_priv, fence); |
775 | + |
776 | + /* In order to fully serialize access to the fenced region and |
777 | + * the update to the fence register we need to take extreme |
778 | + * measures on SNB+. In theory, the write to the fence register |
779 | + * flushes all memory transactions before, and coupled with the |
780 | + * mb() placed around the register write we serialise all memory |
781 | + * operations with respect to the changes in the tiler. Yet, on |
782 | + * SNB+ we need to take a step further and emit an explicit wbinvd() |
783 | + * on each processor in order to manually flush all memory |
784 | + * transactions before updating the fence register. |
785 | + */ |
786 | + if (HAS_LLC(obj->base.dev)) |
787 | + on_each_cpu(i915_gem_write_fence__ipi, NULL, 1); |
788 | + i915_gem_write_fence(dev, fence_reg, enable ? obj : NULL); |
789 | |
790 | if (enable) { |
791 | - obj->fence_reg = reg; |
792 | + obj->fence_reg = fence_reg; |
793 | fence->obj = obj; |
794 | list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list); |
795 | } else { |
796 | diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c |
797 | index 94d873a..a1e8ecb 100644 |
798 | --- a/drivers/gpu/drm/i915/i915_gem_context.c |
799 | +++ b/drivers/gpu/drm/i915/i915_gem_context.c |
800 | @@ -152,6 +152,13 @@ create_hw_context(struct drm_device *dev, |
801 | return ERR_PTR(-ENOMEM); |
802 | } |
803 | |
804 | + if (INTEL_INFO(dev)->gen >= 7) { |
805 | + ret = i915_gem_object_set_cache_level(ctx->obj, |
806 | + I915_CACHE_LLC_MLC); |
807 | + if (ret) |
808 | + goto err_out; |
809 | + } |
810 | + |
811 | /* The ring associated with the context object is handled by the normal |
812 | * object tracking code. We give an initial ring value simple to pass an |
813 | * assertion in the context switch code. |
814 | diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c |
815 | index 6a5af68..c303de1 100644 |
816 | --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c |
817 | +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c |
818 | @@ -271,7 +271,6 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, |
819 | * refcount on gem itself instead of f_count of dmabuf. |
820 | */ |
821 | drm_gem_object_reference(&obj->base); |
822 | - dma_buf_put(dma_buf); |
823 | return &obj->base; |
824 | } |
825 | } |
826 | @@ -281,6 +280,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, |
827 | if (IS_ERR(attach)) |
828 | return ERR_CAST(attach); |
829 | |
830 | + get_dma_buf(dma_buf); |
831 | + |
832 | obj = i915_gem_object_alloc(dev); |
833 | if (obj == NULL) { |
834 | ret = -ENOMEM; |
835 | @@ -300,5 +301,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, |
836 | |
837 | fail_detach: |
838 | dma_buf_detach(dma_buf, attach); |
839 | + dma_buf_put(dma_buf); |
840 | + |
841 | return ERR_PTR(ret); |
842 | } |
843 | diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c |
844 | index 926a1e2..193c8d1 100644 |
845 | --- a/drivers/gpu/drm/i915/i915_gem_gtt.c |
846 | +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c |
847 | @@ -182,8 +182,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) |
848 | /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 |
849 | * entries. For aliasing ppgtt support we just steal them at the end for |
850 | * now. */ |
851 | - first_pd_entry_in_global_pt = |
852 | - gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES; |
853 | + first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt); |
854 | |
855 | ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; |
856 | ppgtt->clear_range = gen6_ppgtt_clear_range; |
857 | diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h |
858 | index 848992f..c91124f 100644 |
859 | --- a/drivers/gpu/drm/i915/i915_reg.h |
860 | +++ b/drivers/gpu/drm/i915/i915_reg.h |
861 | @@ -3827,7 +3827,7 @@ |
862 | #define _TRANSB_CHICKEN2 0xf1064 |
863 | #define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) |
864 | #define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31) |
865 | - |
866 | +#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED (1<<29) |
867 | |
868 | #define SOUTH_CHICKEN1 0xc2000 |
869 | #define FDIA_PHASE_SYNC_SHIFT_OVR 19 |
870 | diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c |
871 | index 55ffba1..bd83391 100644 |
872 | --- a/drivers/gpu/drm/i915/intel_bios.c |
873 | +++ b/drivers/gpu/drm/i915/intel_bios.c |
874 | @@ -351,12 +351,14 @@ parse_general_features(struct drm_i915_private *dev_priv, |
875 | dev_priv->lvds_ssc_freq = |
876 | intel_bios_ssc_frequency(dev, general->ssc_freq); |
877 | dev_priv->display_clock_mode = general->display_clock_mode; |
878 | - DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n", |
879 | + dev_priv->fdi_rx_polarity_inverted = general->fdi_rx_polarity_inverted; |
880 | + DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n", |
881 | dev_priv->int_tv_support, |
882 | dev_priv->int_crt_support, |
883 | dev_priv->lvds_use_ssc, |
884 | dev_priv->lvds_ssc_freq, |
885 | - dev_priv->display_clock_mode); |
886 | + dev_priv->display_clock_mode, |
887 | + dev_priv->fdi_rx_polarity_inverted); |
888 | } |
889 | } |
890 | |
891 | diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h |
892 | index 36e57f9..e088d6f 100644 |
893 | --- a/drivers/gpu/drm/i915/intel_bios.h |
894 | +++ b/drivers/gpu/drm/i915/intel_bios.h |
895 | @@ -127,7 +127,9 @@ struct bdb_general_features { |
896 | /* bits 3 */ |
897 | u8 disable_smooth_vision:1; |
898 | u8 single_dvi:1; |
899 | - u8 rsvd9:6; /* finish byte */ |
900 | + u8 rsvd9:1; |
901 | + u8 fdi_rx_polarity_inverted:1; |
902 | + u8 rsvd10:4; /* finish byte */ |
903 | |
904 | /* bits 4 */ |
905 | u8 legacy_monitor_detect; |
906 | diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c |
907 | index b20d501..c2d173a 100644 |
908 | --- a/drivers/gpu/drm/i915/intel_display.c |
909 | +++ b/drivers/gpu/drm/i915/intel_display.c |
910 | @@ -7589,22 +7589,25 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes, |
911 | if (crtc->enabled) |
912 | *prepare_pipes |= 1 << intel_crtc->pipe; |
913 | |
914 | - /* We only support modeset on one single crtc, hence we need to do that |
915 | - * only for the passed in crtc iff we change anything else than just |
916 | - * disable crtcs. |
917 | - * |
918 | - * This is actually not true, to be fully compatible with the old crtc |
919 | - * helper we automatically disable _any_ output (i.e. doesn't need to be |
920 | - * connected to the crtc we're modesetting on) if it's disconnected. |
921 | - * Which is a rather nutty api (since changed the output configuration |
922 | - * without userspace's explicit request can lead to confusion), but |
923 | - * alas. Hence we currently need to modeset on all pipes we prepare. */ |
924 | + /* |
925 | + * For simplicity do a full modeset on any pipe where the output routing |
926 | + * changed. We could be more clever, but that would require us to be |
927 | + * more careful with calling the relevant encoder->mode_set functions. |
928 | + */ |
929 | if (*prepare_pipes) |
930 | *modeset_pipes = *prepare_pipes; |
931 | |
932 | /* ... and mask these out. */ |
933 | *modeset_pipes &= ~(*disable_pipes); |
934 | *prepare_pipes &= ~(*disable_pipes); |
935 | + |
936 | + /* |
937 | + * HACK: We don't (yet) fully support global modesets. intel_set_config |
938 | + * obies this rule, but the modeset restore mode of |
939 | + * intel_modeset_setup_hw_state does not. |
940 | + */ |
941 | + *modeset_pipes &= 1 << intel_crtc->pipe; |
942 | + *prepare_pipes &= 1 << intel_crtc->pipe; |
943 | } |
944 | |
945 | static bool intel_crtc_in_use(struct drm_crtc *crtc) |
946 | @@ -7771,9 +7774,9 @@ intel_modeset_check_state(struct drm_device *dev) |
947 | } |
948 | } |
949 | |
950 | -int intel_set_mode(struct drm_crtc *crtc, |
951 | - struct drm_display_mode *mode, |
952 | - int x, int y, struct drm_framebuffer *fb) |
953 | +static int __intel_set_mode(struct drm_crtc *crtc, |
954 | + struct drm_display_mode *mode, |
955 | + int x, int y, struct drm_framebuffer *fb) |
956 | { |
957 | struct drm_device *dev = crtc->dev; |
958 | drm_i915_private_t *dev_priv = dev->dev_private; |
959 | @@ -7863,8 +7866,6 @@ done: |
960 | if (ret && crtc->enabled) { |
961 | crtc->hwmode = *saved_hwmode; |
962 | crtc->mode = *saved_mode; |
963 | - } else { |
964 | - intel_modeset_check_state(dev); |
965 | } |
966 | |
967 | out: |
968 | @@ -7872,6 +7873,20 @@ out: |
969 | return ret; |
970 | } |
971 | |
972 | +int intel_set_mode(struct drm_crtc *crtc, |
973 | + struct drm_display_mode *mode, |
974 | + int x, int y, struct drm_framebuffer *fb) |
975 | +{ |
976 | + int ret; |
977 | + |
978 | + ret = __intel_set_mode(crtc, mode, x, y, fb); |
979 | + |
980 | + if (ret == 0) |
981 | + intel_modeset_check_state(crtc->dev); |
982 | + |
983 | + return ret; |
984 | +} |
985 | + |
986 | void intel_crtc_restore_mode(struct drm_crtc *crtc) |
987 | { |
988 | intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); |
989 | @@ -8314,7 +8329,7 @@ static void intel_setup_outputs(struct drm_device *dev) |
990 | I915_WRITE(PFIT_CONTROL, 0); |
991 | } |
992 | |
993 | - if (!(HAS_DDI(dev) && (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES))) |
994 | + if (!IS_ULT(dev)) |
995 | intel_crt_init(dev); |
996 | |
997 | if (HAS_DDI(dev)) { |
998 | @@ -9172,8 +9187,16 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, |
999 | } |
1000 | |
1001 | if (force_restore) { |
1002 | + /* |
1003 | + * We need to use raw interfaces for restoring state to avoid |
1004 | + * checking (bogus) intermediate states. |
1005 | + */ |
1006 | for_each_pipe(pipe) { |
1007 | - intel_crtc_restore_mode(dev_priv->pipe_to_crtc_mapping[pipe]); |
1008 | + struct drm_crtc *crtc = |
1009 | + dev_priv->pipe_to_crtc_mapping[pipe]; |
1010 | + |
1011 | + __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, |
1012 | + crtc->fb); |
1013 | } |
1014 | |
1015 | i915_redisable_vga(dev); |
1016 | @@ -9236,6 +9259,9 @@ void intel_modeset_cleanup(struct drm_device *dev) |
1017 | /* flush any delayed tasks or pending work */ |
1018 | flush_scheduled_work(); |
1019 | |
1020 | + /* destroy backlight, if any, before the connectors */ |
1021 | + intel_panel_destroy_backlight(dev); |
1022 | + |
1023 | drm_mode_config_cleanup(dev); |
1024 | |
1025 | intel_cleanup_overlay(dev); |
1026 | diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c |
1027 | index 8fc93f9..b8e17e5 100644 |
1028 | --- a/drivers/gpu/drm/i915/intel_dp.c |
1029 | +++ b/drivers/gpu/drm/i915/intel_dp.c |
1030 | @@ -2538,17 +2538,14 @@ done: |
1031 | static void |
1032 | intel_dp_destroy(struct drm_connector *connector) |
1033 | { |
1034 | - struct drm_device *dev = connector->dev; |
1035 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
1036 | struct intel_connector *intel_connector = to_intel_connector(connector); |
1037 | |
1038 | if (!IS_ERR_OR_NULL(intel_connector->edid)) |
1039 | kfree(intel_connector->edid); |
1040 | |
1041 | - if (is_edp(intel_dp)) { |
1042 | - intel_panel_destroy_backlight(dev); |
1043 | + if (is_edp(intel_dp)) |
1044 | intel_panel_fini(&intel_connector->panel); |
1045 | - } |
1046 | |
1047 | drm_sysfs_connector_remove(connector); |
1048 | drm_connector_cleanup(connector); |
1049 | diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c |
1050 | index 00e70db..cc70b16 100644 |
1051 | --- a/drivers/gpu/drm/i915/intel_dvo.c |
1052 | +++ b/drivers/gpu/drm/i915/intel_dvo.c |
1053 | @@ -448,6 +448,7 @@ void intel_dvo_init(struct drm_device *dev) |
1054 | const struct intel_dvo_device *dvo = &intel_dvo_devices[i]; |
1055 | struct i2c_adapter *i2c; |
1056 | int gpio; |
1057 | + bool dvoinit; |
1058 | |
1059 | /* Allow the I2C driver info to specify the GPIO to be used in |
1060 | * special cases, but otherwise default to what's defined |
1061 | @@ -467,7 +468,17 @@ void intel_dvo_init(struct drm_device *dev) |
1062 | i2c = intel_gmbus_get_adapter(dev_priv, gpio); |
1063 | |
1064 | intel_dvo->dev = *dvo; |
1065 | - if (!dvo->dev_ops->init(&intel_dvo->dev, i2c)) |
1066 | + |
1067 | + /* GMBUS NAK handling seems to be unstable, hence let the |
1068 | + * transmitter detection run in bit banging mode for now. |
1069 | + */ |
1070 | + intel_gmbus_force_bit(i2c, true); |
1071 | + |
1072 | + dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c); |
1073 | + |
1074 | + intel_gmbus_force_bit(i2c, false); |
1075 | + |
1076 | + if (!dvoinit) |
1077 | continue; |
1078 | |
1079 | intel_encoder->type = INTEL_OUTPUT_DVO; |
1080 | diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c |
1081 | index 3d1d974..e0d6985 100644 |
1082 | --- a/drivers/gpu/drm/i915/intel_lvds.c |
1083 | +++ b/drivers/gpu/drm/i915/intel_lvds.c |
1084 | @@ -618,7 +618,6 @@ static void intel_lvds_destroy(struct drm_connector *connector) |
1085 | if (!IS_ERR_OR_NULL(lvds_connector->base.edid)) |
1086 | kfree(lvds_connector->base.edid); |
1087 | |
1088 | - intel_panel_destroy_backlight(connector->dev); |
1089 | intel_panel_fini(&lvds_connector->base.panel); |
1090 | |
1091 | drm_sysfs_connector_remove(connector); |
1092 | @@ -850,6 +849,14 @@ static const struct dmi_system_id intel_no_lvds[] = { |
1093 | DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"), |
1094 | }, |
1095 | }, |
1096 | + { |
1097 | + .callback = intel_no_lvds_dmi_callback, |
1098 | + .ident = "Fujitsu Esprimo Q900", |
1099 | + .matches = { |
1100 | + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), |
1101 | + DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"), |
1102 | + }, |
1103 | + }, |
1104 | |
1105 | { } /* terminating entry */ |
1106 | }; |
1107 | diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c |
1108 | index bee8cb6..94d895b 100644 |
1109 | --- a/drivers/gpu/drm/i915/intel_panel.c |
1110 | +++ b/drivers/gpu/drm/i915/intel_panel.c |
1111 | @@ -422,6 +422,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector) |
1112 | |
1113 | intel_panel_init_backlight(dev); |
1114 | |
1115 | + if (WARN_ON(dev_priv->backlight)) |
1116 | + return -ENODEV; |
1117 | + |
1118 | memset(&props, 0, sizeof(props)); |
1119 | props.type = BACKLIGHT_RAW; |
1120 | props.max_brightness = _intel_panel_get_max_backlight(dev); |
1121 | @@ -447,8 +450,10 @@ int intel_panel_setup_backlight(struct drm_connector *connector) |
1122 | void intel_panel_destroy_backlight(struct drm_device *dev) |
1123 | { |
1124 | struct drm_i915_private *dev_priv = dev->dev_private; |
1125 | - if (dev_priv->backlight) |
1126 | + if (dev_priv->backlight) { |
1127 | backlight_device_unregister(dev_priv->backlight); |
1128 | + dev_priv->backlight = NULL; |
1129 | + } |
1130 | } |
1131 | #else |
1132 | int intel_panel_setup_backlight(struct drm_connector *connector) |
1133 | diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c |
1134 | index adca007..332b29e 100644 |
1135 | --- a/drivers/gpu/drm/i915/intel_pm.c |
1136 | +++ b/drivers/gpu/drm/i915/intel_pm.c |
1137 | @@ -3562,6 +3562,7 @@ static void cpt_init_clock_gating(struct drm_device *dev) |
1138 | { |
1139 | struct drm_i915_private *dev_priv = dev->dev_private; |
1140 | int pipe; |
1141 | + uint32_t val; |
1142 | |
1143 | /* |
1144 | * On Ibex Peak and Cougar Point, we need to disable clock |
1145 | @@ -3574,8 +3575,12 @@ static void cpt_init_clock_gating(struct drm_device *dev) |
1146 | /* The below fixes the weird display corruption, a few pixels shifted |
1147 | * downward, on (only) LVDS of some HP laptops with IVY. |
1148 | */ |
1149 | - for_each_pipe(pipe) |
1150 | - I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE); |
1151 | + for_each_pipe(pipe) { |
1152 | + val = TRANS_CHICKEN2_TIMING_OVERRIDE; |
1153 | + if (dev_priv->fdi_rx_polarity_inverted) |
1154 | + val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED; |
1155 | + I915_WRITE(TRANS_CHICKEN2(pipe), val); |
1156 | + } |
1157 | /* WADP0ClockGatingDisable */ |
1158 | for_each_pipe(pipe) { |
1159 | I915_WRITE(TRANS_CHICKEN1(pipe), |
1160 | diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c |
1161 | index d07a8cd..d6df786 100644 |
1162 | --- a/drivers/gpu/drm/i915/intel_sdvo.c |
1163 | +++ b/drivers/gpu/drm/i915/intel_sdvo.c |
1164 | @@ -1235,11 +1235,13 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder, |
1165 | struct drm_device *dev = encoder->base.dev; |
1166 | struct drm_i915_private *dev_priv = dev->dev_private; |
1167 | struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); |
1168 | + u16 active_outputs; |
1169 | u32 tmp; |
1170 | |
1171 | tmp = I915_READ(intel_sdvo->sdvo_reg); |
1172 | + intel_sdvo_get_active_outputs(intel_sdvo, &active_outputs); |
1173 | |
1174 | - if (!(tmp & SDVO_ENABLE)) |
1175 | + if (!(tmp & SDVO_ENABLE) && (active_outputs == 0)) |
1176 | return false; |
1177 | |
1178 | if (HAS_PCH_CPT(dev)) |
1179 | @@ -2739,7 +2741,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) |
1180 | struct intel_sdvo *intel_sdvo; |
1181 | u32 hotplug_mask; |
1182 | int i; |
1183 | - |
1184 | intel_sdvo = kzalloc(sizeof(struct intel_sdvo), GFP_KERNEL); |
1185 | if (!intel_sdvo) |
1186 | return false; |
1187 | diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h |
1188 | index 4d932c4..8065919 100644 |
1189 | --- a/drivers/gpu/drm/mgag200/mgag200_drv.h |
1190 | +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h |
1191 | @@ -115,6 +115,8 @@ struct mga_fbdev { |
1192 | void *sysram; |
1193 | int size; |
1194 | struct ttm_bo_kmap_obj mapping; |
1195 | + int x1, y1, x2, y2; /* dirty rect */ |
1196 | + spinlock_t dirty_lock; |
1197 | }; |
1198 | |
1199 | struct mga_crtc { |
1200 | diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c |
1201 | index d2253f6..b0dad27 100644 |
1202 | --- a/drivers/gpu/drm/mgag200/mgag200_fb.c |
1203 | +++ b/drivers/gpu/drm/mgag200/mgag200_fb.c |
1204 | @@ -29,16 +29,52 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev, |
1205 | int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8; |
1206 | int ret; |
1207 | bool unmap = false; |
1208 | + bool store_for_later = false; |
1209 | + int x2, y2; |
1210 | + unsigned long flags; |
1211 | |
1212 | obj = mfbdev->mfb.obj; |
1213 | bo = gem_to_mga_bo(obj); |
1214 | |
1215 | + /* |
1216 | + * try and reserve the BO, if we fail with busy |
1217 | + * then the BO is being moved and we should |
1218 | + * store up the damage until later. |
1219 | + */ |
1220 | ret = mgag200_bo_reserve(bo, true); |
1221 | if (ret) { |
1222 | - DRM_ERROR("failed to reserve fb bo\n"); |
1223 | + if (ret != -EBUSY) |
1224 | + return; |
1225 | + |
1226 | + store_for_later = true; |
1227 | + } |
1228 | + |
1229 | + x2 = x + width - 1; |
1230 | + y2 = y + height - 1; |
1231 | + spin_lock_irqsave(&mfbdev->dirty_lock, flags); |
1232 | + |
1233 | + if (mfbdev->y1 < y) |
1234 | + y = mfbdev->y1; |
1235 | + if (mfbdev->y2 > y2) |
1236 | + y2 = mfbdev->y2; |
1237 | + if (mfbdev->x1 < x) |
1238 | + x = mfbdev->x1; |
1239 | + if (mfbdev->x2 > x2) |
1240 | + x2 = mfbdev->x2; |
1241 | + |
1242 | + if (store_for_later) { |
1243 | + mfbdev->x1 = x; |
1244 | + mfbdev->x2 = x2; |
1245 | + mfbdev->y1 = y; |
1246 | + mfbdev->y2 = y2; |
1247 | + spin_unlock_irqrestore(&mfbdev->dirty_lock, flags); |
1248 | return; |
1249 | } |
1250 | |
1251 | + mfbdev->x1 = mfbdev->y1 = INT_MAX; |
1252 | + mfbdev->x2 = mfbdev->y2 = 0; |
1253 | + spin_unlock_irqrestore(&mfbdev->dirty_lock, flags); |
1254 | + |
1255 | if (!bo->kmap.virtual) { |
1256 | ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); |
1257 | if (ret) { |
1258 | @@ -48,10 +84,10 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev, |
1259 | } |
1260 | unmap = true; |
1261 | } |
1262 | - for (i = y; i < y + height; i++) { |
1263 | + for (i = y; i <= y2; i++) { |
1264 | /* assume equal stride for now */ |
1265 | src_offset = dst_offset = i * mfbdev->mfb.base.pitches[0] + (x * bpp); |
1266 | - memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, width * bpp); |
1267 | + memcpy_toio(bo->kmap.virtual + src_offset, mfbdev->sysram + src_offset, (x2 - x + 1) * bpp); |
1268 | |
1269 | } |
1270 | if (unmap) |
1271 | @@ -255,6 +291,7 @@ int mgag200_fbdev_init(struct mga_device *mdev) |
1272 | |
1273 | mdev->mfbdev = mfbdev; |
1274 | mfbdev->helper.funcs = &mga_fb_helper_funcs; |
1275 | + spin_lock_init(&mfbdev->dirty_lock); |
1276 | |
1277 | ret = drm_fb_helper_init(mdev->dev, &mfbdev->helper, |
1278 | mdev->num_crtc, MGAG200FB_CONN_LIMIT); |
1279 | diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c |
1280 | index 8fc9d92..401c989 100644 |
1281 | --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c |
1282 | +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c |
1283 | @@ -315,8 +315,8 @@ int mgag200_bo_reserve(struct mgag200_bo *bo, bool no_wait) |
1284 | |
1285 | ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0); |
1286 | if (ret) { |
1287 | - if (ret != -ERESTARTSYS) |
1288 | - DRM_ERROR("reserve failed %p\n", bo); |
1289 | + if (ret != -ERESTARTSYS && ret != -EBUSY) |
1290 | + DRM_ERROR("reserve failed %p %d\n", bo, ret); |
1291 | return ret; |
1292 | } |
1293 | return 0; |
1294 | diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c |
1295 | index ac74d1b..1bdf7e1 100644 |
1296 | --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c |
1297 | +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c |
1298 | @@ -212,7 +212,6 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, |
1299 | * refcount on gem itself instead of f_count of dmabuf. |
1300 | */ |
1301 | drm_gem_object_reference(obj); |
1302 | - dma_buf_put(buffer); |
1303 | return obj; |
1304 | } |
1305 | } |
1306 | diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c |
1307 | index 46a9c37..fb441a7 100644 |
1308 | --- a/drivers/gpu/drm/radeon/atom.c |
1309 | +++ b/drivers/gpu/drm/radeon/atom.c |
1310 | @@ -1394,10 +1394,10 @@ int atom_allocate_fb_scratch(struct atom_context *ctx) |
1311 | firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); |
1312 | |
1313 | DRM_DEBUG("atom firmware requested %08x %dkb\n", |
1314 | - firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, |
1315 | - firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); |
1316 | + le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware), |
1317 | + le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb)); |
1318 | |
1319 | - usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; |
1320 | + usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024; |
1321 | } |
1322 | ctx->scratch_size_bytes = 0; |
1323 | if (usage_bytes == 0) |
1324 | diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c |
1325 | index 21a892c..6d6fdb3 100644 |
1326 | --- a/drivers/gpu/drm/radeon/atombios_crtc.c |
1327 | +++ b/drivers/gpu/drm/radeon/atombios_crtc.c |
1328 | @@ -557,6 +557,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, |
1329 | /* use frac fb div on APUs */ |
1330 | if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) |
1331 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; |
1332 | + /* use frac fb div on RS780/RS880 */ |
1333 | + if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) |
1334 | + radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; |
1335 | if (ASIC_IS_DCE32(rdev) && mode->clock > 165000) |
1336 | radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; |
1337 | } else { |
1338 | diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c |
1339 | index 305a657..aeaa386 100644 |
1340 | --- a/drivers/gpu/drm/radeon/evergreen.c |
1341 | +++ b/drivers/gpu/drm/radeon/evergreen.c |
1342 | @@ -105,6 +105,27 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) |
1343 | } |
1344 | } |
1345 | |
1346 | +static bool dce4_is_in_vblank(struct radeon_device *rdev, int crtc) |
1347 | +{ |
1348 | + if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK) |
1349 | + return true; |
1350 | + else |
1351 | + return false; |
1352 | +} |
1353 | + |
1354 | +static bool dce4_is_counter_moving(struct radeon_device *rdev, int crtc) |
1355 | +{ |
1356 | + u32 pos1, pos2; |
1357 | + |
1358 | + pos1 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]); |
1359 | + pos2 = RREG32(EVERGREEN_CRTC_STATUS_POSITION + crtc_offsets[crtc]); |
1360 | + |
1361 | + if (pos1 != pos2) |
1362 | + return true; |
1363 | + else |
1364 | + return false; |
1365 | +} |
1366 | + |
1367 | /** |
1368 | * dce4_wait_for_vblank - vblank wait asic callback. |
1369 | * |
1370 | @@ -115,21 +136,28 @@ void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev) |
1371 | */ |
1372 | void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc) |
1373 | { |
1374 | - int i; |
1375 | + unsigned i = 0; |
1376 | |
1377 | if (crtc >= rdev->num_crtc) |
1378 | return; |
1379 | |
1380 | - if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) { |
1381 | - for (i = 0; i < rdev->usec_timeout; i++) { |
1382 | - if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)) |
1383 | + if (!(RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN)) |
1384 | + return; |
1385 | + |
1386 | + /* depending on when we hit vblank, we may be close to active; if so, |
1387 | + * wait for another frame. |
1388 | + */ |
1389 | + while (dce4_is_in_vblank(rdev, crtc)) { |
1390 | + if (i++ % 100 == 0) { |
1391 | + if (!dce4_is_counter_moving(rdev, crtc)) |
1392 | break; |
1393 | - udelay(1); |
1394 | } |
1395 | - for (i = 0; i < rdev->usec_timeout; i++) { |
1396 | - if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK) |
1397 | + } |
1398 | + |
1399 | + while (!dce4_is_in_vblank(rdev, crtc)) { |
1400 | + if (i++ % 100 == 0) { |
1401 | + if (!dce4_is_counter_moving(rdev, crtc)) |
1402 | break; |
1403 | - udelay(1); |
1404 | } |
1405 | } |
1406 | } |
1407 | @@ -608,6 +636,16 @@ void evergreen_hpd_init(struct radeon_device *rdev) |
1408 | |
1409 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
1410 | struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
1411 | + |
1412 | + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || |
1413 | + connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { |
1414 | + /* don't try to enable hpd on eDP or LVDS avoid breaking the |
1415 | + * aux dp channel on imac and help (but not completely fix) |
1416 | + * https://bugzilla.redhat.com/show_bug.cgi?id=726143 |
1417 | + * also avoid interrupt storms during dpms. |
1418 | + */ |
1419 | + continue; |
1420 | + } |
1421 | switch (radeon_connector->hpd.hpd) { |
1422 | case RADEON_HPD_1: |
1423 | WREG32(DC_HPD1_CONTROL, tmp); |
1424 | @@ -1325,17 +1363,16 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav |
1425 | tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]); |
1426 | if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) { |
1427 | radeon_wait_for_vblank(rdev, i); |
1428 | - tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; |
1429 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); |
1430 | + tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; |
1431 | WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); |
1432 | - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); |
1433 | } |
1434 | } else { |
1435 | tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); |
1436 | if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) { |
1437 | radeon_wait_for_vblank(rdev, i); |
1438 | - tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; |
1439 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); |
1440 | + tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE; |
1441 | WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); |
1442 | WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); |
1443 | } |
1444 | @@ -1347,6 +1384,15 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav |
1445 | break; |
1446 | udelay(1); |
1447 | } |
1448 | + |
1449 | + /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ |
1450 | + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); |
1451 | + tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); |
1452 | + tmp &= ~EVERGREEN_CRTC_MASTER_EN; |
1453 | + WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp); |
1454 | + WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0); |
1455 | + save->crtc_enabled[i] = false; |
1456 | + /* ***** */ |
1457 | } else { |
1458 | save->crtc_enabled[i] = false; |
1459 | } |
1460 | @@ -1364,6 +1410,22 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav |
1461 | } |
1462 | /* wait for the MC to settle */ |
1463 | udelay(100); |
1464 | + |
1465 | + /* lock double buffered regs */ |
1466 | + for (i = 0; i < rdev->num_crtc; i++) { |
1467 | + if (save->crtc_enabled[i]) { |
1468 | + tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); |
1469 | + if (!(tmp & EVERGREEN_GRPH_UPDATE_LOCK)) { |
1470 | + tmp |= EVERGREEN_GRPH_UPDATE_LOCK; |
1471 | + WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp); |
1472 | + } |
1473 | + tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]); |
1474 | + if (!(tmp & 1)) { |
1475 | + tmp |= 1; |
1476 | + WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp); |
1477 | + } |
1478 | + } |
1479 | + } |
1480 | } |
1481 | |
1482 | void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save) |
1483 | @@ -1385,6 +1447,33 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s |
1484 | WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); |
1485 | WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); |
1486 | |
1487 | + /* unlock regs and wait for update */ |
1488 | + for (i = 0; i < rdev->num_crtc; i++) { |
1489 | + if (save->crtc_enabled[i]) { |
1490 | + tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]); |
1491 | + if ((tmp & 0x3) != 0) { |
1492 | + tmp &= ~0x3; |
1493 | + WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); |
1494 | + } |
1495 | + tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); |
1496 | + if (tmp & EVERGREEN_GRPH_UPDATE_LOCK) { |
1497 | + tmp &= ~EVERGREEN_GRPH_UPDATE_LOCK; |
1498 | + WREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i], tmp); |
1499 | + } |
1500 | + tmp = RREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i]); |
1501 | + if (tmp & 1) { |
1502 | + tmp &= ~1; |
1503 | + WREG32(EVERGREEN_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp); |
1504 | + } |
1505 | + for (j = 0; j < rdev->usec_timeout; j++) { |
1506 | + tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); |
1507 | + if ((tmp & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING) == 0) |
1508 | + break; |
1509 | + udelay(1); |
1510 | + } |
1511 | + } |
1512 | + } |
1513 | + |
1514 | /* unblackout the MC */ |
1515 | tmp = RREG32(MC_SHARED_BLACKOUT_CNTL); |
1516 | tmp &= ~BLACKOUT_MODE_MASK; |
1517 | diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h |
1518 | index f585be1..881aba2 100644 |
1519 | --- a/drivers/gpu/drm/radeon/evergreen_reg.h |
1520 | +++ b/drivers/gpu/drm/radeon/evergreen_reg.h |
1521 | @@ -226,6 +226,8 @@ |
1522 | #define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0 |
1523 | #define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 |
1524 | #define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 |
1525 | +#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4 |
1526 | +#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 |
1527 | |
1528 | #define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0 |
1529 | #define EVERGREEN_DC_GPIO_HPD_A 0x64b4 |
1530 | diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c |
1531 | index 27769e7..0a32d89 100644 |
1532 | --- a/drivers/gpu/drm/radeon/ni.c |
1533 | +++ b/drivers/gpu/drm/radeon/ni.c |
1534 | @@ -473,7 +473,8 @@ static void cayman_gpu_init(struct radeon_device *rdev) |
1535 | (rdev->pdev->device == 0x990F) || |
1536 | (rdev->pdev->device == 0x9910) || |
1537 | (rdev->pdev->device == 0x9917) || |
1538 | - (rdev->pdev->device == 0x9999)) { |
1539 | + (rdev->pdev->device == 0x9999) || |
1540 | + (rdev->pdev->device == 0x999C)) { |
1541 | rdev->config.cayman.max_simds_per_se = 6; |
1542 | rdev->config.cayman.max_backends_per_se = 2; |
1543 | } else if ((rdev->pdev->device == 0x9903) || |
1544 | @@ -482,7 +483,8 @@ static void cayman_gpu_init(struct radeon_device *rdev) |
1545 | (rdev->pdev->device == 0x990D) || |
1546 | (rdev->pdev->device == 0x990E) || |
1547 | (rdev->pdev->device == 0x9913) || |
1548 | - (rdev->pdev->device == 0x9918)) { |
1549 | + (rdev->pdev->device == 0x9918) || |
1550 | + (rdev->pdev->device == 0x999D)) { |
1551 | rdev->config.cayman.max_simds_per_se = 4; |
1552 | rdev->config.cayman.max_backends_per_se = 2; |
1553 | } else if ((rdev->pdev->device == 0x9919) || |
1554 | @@ -621,6 +623,8 @@ static void cayman_gpu_init(struct radeon_device *rdev) |
1555 | |
1556 | WREG32(GB_ADDR_CONFIG, gb_addr_config); |
1557 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
1558 | + if (ASIC_IS_DCE6(rdev)) |
1559 | + WREG32(DMIF_ADDR_CALC, gb_addr_config); |
1560 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
1561 | WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); |
1562 | WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); |
1563 | diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h |
1564 | index 079dee2..445b235 100644 |
1565 | --- a/drivers/gpu/drm/radeon/nid.h |
1566 | +++ b/drivers/gpu/drm/radeon/nid.h |
1567 | @@ -45,6 +45,10 @@ |
1568 | #define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001 |
1569 | |
1570 | #define DMIF_ADDR_CONFIG 0xBD4 |
1571 | + |
1572 | +/* DCE6 only */ |
1573 | +#define DMIF_ADDR_CALC 0xC00 |
1574 | + |
1575 | #define SRBM_GFX_CNTL 0x0E44 |
1576 | #define RINGID(x) (((x) & 0x3) << 0) |
1577 | #define VMID(x) (((x) & 0x7) << 0) |
1578 | diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c |
1579 | index 9db5853..4973bff 100644 |
1580 | --- a/drivers/gpu/drm/radeon/r100.c |
1581 | +++ b/drivers/gpu/drm/radeon/r100.c |
1582 | @@ -69,6 +69,38 @@ MODULE_FIRMWARE(FIRMWARE_R520); |
1583 | * and others in some cases. |
1584 | */ |
1585 | |
1586 | +static bool r100_is_in_vblank(struct radeon_device *rdev, int crtc) |
1587 | +{ |
1588 | + if (crtc == 0) { |
1589 | + if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR) |
1590 | + return true; |
1591 | + else |
1592 | + return false; |
1593 | + } else { |
1594 | + if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR) |
1595 | + return true; |
1596 | + else |
1597 | + return false; |
1598 | + } |
1599 | +} |
1600 | + |
1601 | +static bool r100_is_counter_moving(struct radeon_device *rdev, int crtc) |
1602 | +{ |
1603 | + u32 vline1, vline2; |
1604 | + |
1605 | + if (crtc == 0) { |
1606 | + vline1 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; |
1607 | + vline2 = (RREG32(RADEON_CRTC_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; |
1608 | + } else { |
1609 | + vline1 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; |
1610 | + vline2 = (RREG32(RADEON_CRTC2_VLINE_CRNT_VLINE) >> 16) & RADEON_CRTC_V_TOTAL; |
1611 | + } |
1612 | + if (vline1 != vline2) |
1613 | + return true; |
1614 | + else |
1615 | + return false; |
1616 | +} |
1617 | + |
1618 | /** |
1619 | * r100_wait_for_vblank - vblank wait asic callback. |
1620 | * |
1621 | @@ -79,36 +111,33 @@ MODULE_FIRMWARE(FIRMWARE_R520); |
1622 | */ |
1623 | void r100_wait_for_vblank(struct radeon_device *rdev, int crtc) |
1624 | { |
1625 | - int i; |
1626 | + unsigned i = 0; |
1627 | |
1628 | if (crtc >= rdev->num_crtc) |
1629 | return; |
1630 | |
1631 | if (crtc == 0) { |
1632 | - if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) { |
1633 | - for (i = 0; i < rdev->usec_timeout; i++) { |
1634 | - if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)) |
1635 | - break; |
1636 | - udelay(1); |
1637 | - } |
1638 | - for (i = 0; i < rdev->usec_timeout; i++) { |
1639 | - if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR) |
1640 | - break; |
1641 | - udelay(1); |
1642 | - } |
1643 | - } |
1644 | + if (!(RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN)) |
1645 | + return; |
1646 | } else { |
1647 | - if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) { |
1648 | - for (i = 0; i < rdev->usec_timeout; i++) { |
1649 | - if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)) |
1650 | - break; |
1651 | - udelay(1); |
1652 | - } |
1653 | - for (i = 0; i < rdev->usec_timeout; i++) { |
1654 | - if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR) |
1655 | - break; |
1656 | - udelay(1); |
1657 | - } |
1658 | + if (!(RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN)) |
1659 | + return; |
1660 | + } |
1661 | + |
1662 | + /* depending on when we hit vblank, we may be close to active; if so, |
1663 | + * wait for another frame. |
1664 | + */ |
1665 | + while (r100_is_in_vblank(rdev, crtc)) { |
1666 | + if (i++ % 100 == 0) { |
1667 | + if (!r100_is_counter_moving(rdev, crtc)) |
1668 | + break; |
1669 | + } |
1670 | + } |
1671 | + |
1672 | + while (!r100_is_in_vblank(rdev, crtc)) { |
1673 | + if (i++ % 100 == 0) { |
1674 | + if (!r100_is_counter_moving(rdev, crtc)) |
1675 | + break; |
1676 | } |
1677 | } |
1678 | } |
1679 | diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h |
1680 | index c0dc8d3..1dd0d32 100644 |
1681 | --- a/drivers/gpu/drm/radeon/r500_reg.h |
1682 | +++ b/drivers/gpu/drm/radeon/r500_reg.h |
1683 | @@ -358,7 +358,9 @@ |
1684 | #define AVIVO_D1CRTC_STATUS_HV_COUNT 0x60ac |
1685 | #define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 |
1686 | |
1687 | +#define AVIVO_D1MODE_MASTER_UPDATE_LOCK 0x60e0 |
1688 | #define AVIVO_D1MODE_MASTER_UPDATE_MODE 0x60e4 |
1689 | +#define AVIVO_D1CRTC_UPDATE_LOCK 0x60e8 |
1690 | |
1691 | /* master controls */ |
1692 | #define AVIVO_DC_CRTC_MASTER_EN 0x60f8 |
1693 | diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c |
1694 | index 21ecc0e..8520833 100644 |
1695 | --- a/drivers/gpu/drm/radeon/r600_hdmi.c |
1696 | +++ b/drivers/gpu/drm/radeon/r600_hdmi.c |
1697 | @@ -433,7 +433,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder) |
1698 | offset = dig->afmt->offset; |
1699 | |
1700 | /* Older chipsets require setting HDMI and routing manually */ |
1701 | - if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { |
1702 | + if (ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE3(rdev)) { |
1703 | hdmi = HDMI0_ERROR_ACK | HDMI0_ENABLE; |
1704 | switch (radeon_encoder->encoder_id) { |
1705 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: |
1706 | @@ -501,7 +501,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder) |
1707 | radeon_irq_kms_disable_afmt(rdev, dig->afmt->id); |
1708 | |
1709 | /* Older chipsets not handled by AtomBIOS */ |
1710 | - if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) { |
1711 | + if (ASIC_IS_DCE2(rdev) && !ASIC_IS_DCE3(rdev)) { |
1712 | switch (radeon_encoder->encoder_id) { |
1713 | case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: |
1714 | WREG32_P(AVIVO_TMDSA_CNTL, 0, |
1715 | diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c |
1716 | index f22eb57..96168ef 100644 |
1717 | --- a/drivers/gpu/drm/radeon/radeon_atombios.c |
1718 | +++ b/drivers/gpu/drm/radeon/radeon_atombios.c |
1719 | @@ -2028,6 +2028,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) |
1720 | num_modes = power_info->info.ucNumOfPowerModeEntries; |
1721 | if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) |
1722 | num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; |
1723 | + if (num_modes == 0) |
1724 | + return state_index; |
1725 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL); |
1726 | if (!rdev->pm.power_state) |
1727 | return state_index; |
1728 | @@ -2432,6 +2434,8 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev) |
1729 | power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); |
1730 | |
1731 | radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); |
1732 | + if (power_info->pplib.ucNumStates == 0) |
1733 | + return state_index; |
1734 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * |
1735 | power_info->pplib.ucNumStates, GFP_KERNEL); |
1736 | if (!rdev->pm.power_state) |
1737 | @@ -2514,6 +2518,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) |
1738 | int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
1739 | u16 data_offset; |
1740 | u8 frev, crev; |
1741 | + u8 *power_state_offset; |
1742 | |
1743 | if (!atom_parse_data_header(mode_info->atom_context, index, NULL, |
1744 | &frev, &crev, &data_offset)) |
1745 | @@ -2530,15 +2535,17 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) |
1746 | non_clock_info_array = (struct _NonClockInfoArray *) |
1747 | (mode_info->atom_context->bios + data_offset + |
1748 | le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); |
1749 | + if (state_array->ucNumEntries == 0) |
1750 | + return state_index; |
1751 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * |
1752 | state_array->ucNumEntries, GFP_KERNEL); |
1753 | if (!rdev->pm.power_state) |
1754 | return state_index; |
1755 | + power_state_offset = (u8 *)state_array->states; |
1756 | for (i = 0; i < state_array->ucNumEntries; i++) { |
1757 | mode_index = 0; |
1758 | - power_state = (union pplib_power_state *)&state_array->states[i]; |
1759 | - /* XXX this might be an inagua bug... */ |
1760 | - non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */ |
1761 | + power_state = (union pplib_power_state *)power_state_offset; |
1762 | + non_clock_array_index = power_state->v2.nonClockInfoIndex; |
1763 | non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) |
1764 | &non_clock_info_array->nonClockInfo[non_clock_array_index]; |
1765 | rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) * |
1766 | @@ -2550,9 +2557,6 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) |
1767 | if (power_state->v2.ucNumDPMLevels) { |
1768 | for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { |
1769 | clock_array_index = power_state->v2.clockInfoIndex[j]; |
1770 | - /* XXX this might be an inagua bug... */ |
1771 | - if (clock_array_index >= clock_info_array->ucNumEntries) |
1772 | - continue; |
1773 | clock_info = (union pplib_clock_info *) |
1774 | &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; |
1775 | valid = radeon_atombios_parse_pplib_clock_info(rdev, |
1776 | @@ -2574,6 +2578,7 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) |
1777 | non_clock_info); |
1778 | state_index++; |
1779 | } |
1780 | + power_state_offset += 2 + power_state->v2.ucNumDPMLevels; |
1781 | } |
1782 | /* if multiple clock modes, mark the lowest as no display */ |
1783 | for (i = 0; i < state_index; i++) { |
1784 | @@ -2620,7 +2625,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) |
1785 | default: |
1786 | break; |
1787 | } |
1788 | - } else { |
1789 | + } |
1790 | + |
1791 | + if (state_index == 0) { |
1792 | rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); |
1793 | if (rdev->pm.power_state) { |
1794 | rdev->pm.power_state[0].clock_info = |
1795 | diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c |
1796 | index c75cb2c..c5b2765 100644 |
1797 | --- a/drivers/gpu/drm/radeon/radeon_kms.c |
1798 | +++ b/drivers/gpu/drm/radeon/radeon_kms.c |
1799 | @@ -50,9 +50,13 @@ int radeon_driver_unload_kms(struct drm_device *dev) |
1800 | |
1801 | if (rdev == NULL) |
1802 | return 0; |
1803 | + if (rdev->rmmio == NULL) |
1804 | + goto done_free; |
1805 | radeon_acpi_fini(rdev); |
1806 | radeon_modeset_fini(rdev); |
1807 | radeon_device_fini(rdev); |
1808 | + |
1809 | +done_free: |
1810 | kfree(rdev); |
1811 | dev->dev_private = NULL; |
1812 | return 0; |
1813 | diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c |
1814 | index 338fd6a..788c64c 100644 |
1815 | --- a/drivers/gpu/drm/radeon/radeon_pm.c |
1816 | +++ b/drivers/gpu/drm/radeon/radeon_pm.c |
1817 | @@ -843,7 +843,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) |
1818 | struct radeon_device *rdev = dev->dev_private; |
1819 | |
1820 | seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk); |
1821 | - seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); |
1822 | + /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */ |
1823 | + if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP)) |
1824 | + seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk); |
1825 | + else |
1826 | + seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); |
1827 | seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk); |
1828 | if (rdev->asic->pm.get_memory_clock) |
1829 | seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); |
1830 | diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c |
1831 | index 8d58e26..1ef5eaa 100644 |
1832 | --- a/drivers/gpu/drm/radeon/radeon_ring.c |
1833 | +++ b/drivers/gpu/drm/radeon/radeon_ring.c |
1834 | @@ -180,7 +180,8 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, |
1835 | radeon_semaphore_free(rdev, &ib->semaphore, NULL); |
1836 | } |
1837 | /* if we can't remember our last VM flush then flush now! */ |
1838 | - if (ib->vm && !ib->vm->last_flush) { |
1839 | + /* XXX figure out why we have to flush for every IB */ |
1840 | + if (ib->vm /*&& !ib->vm->last_flush*/) { |
1841 | radeon_ring_vm_flush(rdev, ib->ring, ib->vm); |
1842 | } |
1843 | if (const_ib) { |
1844 | diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c |
1845 | index 5a0fc74..46fa1b0 100644 |
1846 | --- a/drivers/gpu/drm/radeon/rs600.c |
1847 | +++ b/drivers/gpu/drm/radeon/rs600.c |
1848 | @@ -52,23 +52,59 @@ static const u32 crtc_offsets[2] = |
1849 | AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL |
1850 | }; |
1851 | |
1852 | +static bool avivo_is_in_vblank(struct radeon_device *rdev, int crtc) |
1853 | +{ |
1854 | + if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK) |
1855 | + return true; |
1856 | + else |
1857 | + return false; |
1858 | +} |
1859 | + |
1860 | +static bool avivo_is_counter_moving(struct radeon_device *rdev, int crtc) |
1861 | +{ |
1862 | + u32 pos1, pos2; |
1863 | + |
1864 | + pos1 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]); |
1865 | + pos2 = RREG32(AVIVO_D1CRTC_STATUS_POSITION + crtc_offsets[crtc]); |
1866 | + |
1867 | + if (pos1 != pos2) |
1868 | + return true; |
1869 | + else |
1870 | + return false; |
1871 | +} |
1872 | + |
1873 | +/** |
1874 | + * avivo_wait_for_vblank - vblank wait asic callback. |
1875 | + * |
1876 | + * @rdev: radeon_device pointer |
1877 | + * @crtc: crtc to wait for vblank on |
1878 | + * |
1879 | + * Wait for vblank on the requested crtc (r5xx-r7xx). |
1880 | + */ |
1881 | void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc) |
1882 | { |
1883 | - int i; |
1884 | + unsigned i = 0; |
1885 | |
1886 | if (crtc >= rdev->num_crtc) |
1887 | return; |
1888 | |
1889 | - if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN) { |
1890 | - for (i = 0; i < rdev->usec_timeout; i++) { |
1891 | - if (!(RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)) |
1892 | + if (!(RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN)) |
1893 | + return; |
1894 | + |
1895 | + /* depending on when we hit vblank, we may be close to active; if so, |
1896 | + * wait for another frame. |
1897 | + */ |
1898 | + while (avivo_is_in_vblank(rdev, crtc)) { |
1899 | + if (i++ % 100 == 0) { |
1900 | + if (!avivo_is_counter_moving(rdev, crtc)) |
1901 | break; |
1902 | - udelay(1); |
1903 | } |
1904 | - for (i = 0; i < rdev->usec_timeout; i++) { |
1905 | - if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK) |
1906 | + } |
1907 | + |
1908 | + while (!avivo_is_in_vblank(rdev, crtc)) { |
1909 | + if (i++ % 100 == 0) { |
1910 | + if (!avivo_is_counter_moving(rdev, crtc)) |
1911 | break; |
1912 | - udelay(1); |
1913 | } |
1914 | } |
1915 | } |
1916 | diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c |
1917 | index 435ed35..ffcba73 100644 |
1918 | --- a/drivers/gpu/drm/radeon/rv515.c |
1919 | +++ b/drivers/gpu/drm/radeon/rv515.c |
1920 | @@ -303,8 +303,10 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save) |
1921 | tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]); |
1922 | if (!(tmp & AVIVO_CRTC_DISP_READ_REQUEST_DISABLE)) { |
1923 | radeon_wait_for_vblank(rdev, i); |
1924 | + WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1); |
1925 | tmp |= AVIVO_CRTC_DISP_READ_REQUEST_DISABLE; |
1926 | WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp); |
1927 | + WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0); |
1928 | } |
1929 | /* wait for the next frame */ |
1930 | frame_count = radeon_get_vblank_counter(rdev, i); |
1931 | @@ -313,6 +315,15 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save) |
1932 | break; |
1933 | udelay(1); |
1934 | } |
1935 | + |
1936 | + /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ |
1937 | + WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 1); |
1938 | + tmp = RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i]); |
1939 | + tmp &= ~AVIVO_CRTC_EN; |
1940 | + WREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[i], tmp); |
1941 | + WREG32(AVIVO_D1CRTC_UPDATE_LOCK + crtc_offsets[i], 0); |
1942 | + save->crtc_enabled[i] = false; |
1943 | + /* ***** */ |
1944 | } else { |
1945 | save->crtc_enabled[i] = false; |
1946 | } |
1947 | @@ -338,6 +349,22 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save) |
1948 | } |
1949 | /* wait for the MC to settle */ |
1950 | udelay(100); |
1951 | + |
1952 | + /* lock double buffered regs */ |
1953 | + for (i = 0; i < rdev->num_crtc; i++) { |
1954 | + if (save->crtc_enabled[i]) { |
1955 | + tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]); |
1956 | + if (!(tmp & AVIVO_D1GRPH_UPDATE_LOCK)) { |
1957 | + tmp |= AVIVO_D1GRPH_UPDATE_LOCK; |
1958 | + WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp); |
1959 | + } |
1960 | + tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]); |
1961 | + if (!(tmp & 1)) { |
1962 | + tmp |= 1; |
1963 | + WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp); |
1964 | + } |
1965 | + } |
1966 | + } |
1967 | } |
1968 | |
1969 | void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) |
1970 | @@ -348,7 +375,7 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) |
1971 | /* update crtc base addresses */ |
1972 | for (i = 0; i < rdev->num_crtc; i++) { |
1973 | if (rdev->family >= CHIP_RV770) { |
1974 | - if (i == 1) { |
1975 | + if (i == 0) { |
1976 | WREG32(R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH, |
1977 | upper_32_bits(rdev->mc.vram_start)); |
1978 | WREG32(R700_D1GRPH_SECONDARY_SURFACE_ADDRESS_HIGH, |
1979 | @@ -367,6 +394,33 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) |
1980 | } |
1981 | WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); |
1982 | |
1983 | + /* unlock regs and wait for update */ |
1984 | + for (i = 0; i < rdev->num_crtc; i++) { |
1985 | + if (save->crtc_enabled[i]) { |
1986 | + tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]); |
1987 | + if ((tmp & 0x3) != 0) { |
1988 | + tmp &= ~0x3; |
1989 | + WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); |
1990 | + } |
1991 | + tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]); |
1992 | + if (tmp & AVIVO_D1GRPH_UPDATE_LOCK) { |
1993 | + tmp &= ~AVIVO_D1GRPH_UPDATE_LOCK; |
1994 | + WREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i], tmp); |
1995 | + } |
1996 | + tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i]); |
1997 | + if (tmp & 1) { |
1998 | + tmp &= ~1; |
1999 | + WREG32(AVIVO_D1MODE_MASTER_UPDATE_LOCK + crtc_offsets[i], tmp); |
2000 | + } |
2001 | + for (j = 0; j < rdev->usec_timeout; j++) { |
2002 | + tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]); |
2003 | + if ((tmp & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING) == 0) |
2004 | + break; |
2005 | + udelay(1); |
2006 | + } |
2007 | + } |
2008 | + } |
2009 | + |
2010 | if (rdev->family >= CHIP_R600) { |
2011 | /* unblackout the MC */ |
2012 | if (rdev->family >= CHIP_RV770) |
2013 | diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c |
2014 | index bafbe32..3dd7ecc 100644 |
2015 | --- a/drivers/gpu/drm/radeon/si.c |
2016 | +++ b/drivers/gpu/drm/radeon/si.c |
2017 | @@ -1463,7 +1463,7 @@ static void si_select_se_sh(struct radeon_device *rdev, |
2018 | u32 data = INSTANCE_BROADCAST_WRITES; |
2019 | |
2020 | if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) |
2021 | - data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES; |
2022 | + data |= SH_BROADCAST_WRITES | SE_BROADCAST_WRITES; |
2023 | else if (se_num == 0xffffffff) |
2024 | data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num); |
2025 | else if (sh_num == 0xffffffff) |
2026 | @@ -1765,6 +1765,7 @@ static void si_gpu_init(struct radeon_device *rdev) |
2027 | |
2028 | WREG32(GB_ADDR_CONFIG, gb_addr_config); |
2029 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
2030 | + WREG32(DMIF_ADDR_CALC, gb_addr_config); |
2031 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
2032 | WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); |
2033 | WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); |
2034 | diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h |
2035 | index 23fc08f..f84cff0 100644 |
2036 | --- a/drivers/gpu/drm/radeon/sid.h |
2037 | +++ b/drivers/gpu/drm/radeon/sid.h |
2038 | @@ -65,6 +65,8 @@ |
2039 | |
2040 | #define DMIF_ADDR_CONFIG 0xBD4 |
2041 | |
2042 | +#define DMIF_ADDR_CALC 0xC00 |
2043 | + |
2044 | #define SRBM_STATUS 0xE50 |
2045 | #define GRBM_RQ_PENDING (1 << 5) |
2046 | #define VMC_BUSY (1 << 8) |
2047 | diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c |
2048 | index c5b592d..bfac582 100644 |
2049 | --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c |
2050 | +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c |
2051 | @@ -75,7 +75,7 @@ static int modeset_init(struct drm_device *dev) |
2052 | mod->funcs->modeset_init(mod, dev); |
2053 | } |
2054 | |
2055 | - if ((priv->num_encoders = 0) || (priv->num_connectors == 0)) { |
2056 | + if ((priv->num_encoders == 0) || (priv->num_connectors == 0)) { |
2057 | /* oh nos! */ |
2058 | dev_err(dev->dev, "no encoders/connectors found\n"); |
2059 | return -ENXIO; |
2060 | diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c |
2061 | index 3816270..ef034fa 100644 |
2062 | --- a/drivers/gpu/drm/udl/udl_gem.c |
2063 | +++ b/drivers/gpu/drm/udl/udl_gem.c |
2064 | @@ -303,6 +303,8 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev, |
2065 | if (IS_ERR(attach)) |
2066 | return ERR_CAST(attach); |
2067 | |
2068 | + get_dma_buf(dma_buf); |
2069 | + |
2070 | sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); |
2071 | if (IS_ERR(sg)) { |
2072 | ret = PTR_ERR(sg); |
2073 | @@ -322,5 +324,7 @@ fail_unmap: |
2074 | dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL); |
2075 | fail_detach: |
2076 | dma_buf_detach(dma_buf, attach); |
2077 | + dma_buf_put(dma_buf); |
2078 | + |
2079 | return ERR_PTR(ret); |
2080 | } |
2081 | diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c |
2082 | index 70b1808..ed49ab3 100644 |
2083 | --- a/drivers/infiniband/hw/cxgb4/qp.c |
2084 | +++ b/drivers/infiniband/hw/cxgb4/qp.c |
2085 | @@ -100,6 +100,16 @@ static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) |
2086 | return 0; |
2087 | } |
2088 | |
2089 | +static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user) |
2090 | +{ |
2091 | + int ret = -ENOSYS; |
2092 | + if (user) |
2093 | + ret = alloc_oc_sq(rdev, sq); |
2094 | + if (ret) |
2095 | + ret = alloc_host_sq(rdev, sq); |
2096 | + return ret; |
2097 | +} |
2098 | + |
2099 | static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, |
2100 | struct c4iw_dev_ucontext *uctx) |
2101 | { |
2102 | @@ -168,18 +178,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, |
2103 | goto free_sw_rq; |
2104 | } |
2105 | |
2106 | - if (user) { |
2107 | - ret = alloc_oc_sq(rdev, &wq->sq); |
2108 | - if (ret) |
2109 | - goto free_hwaddr; |
2110 | - |
2111 | - ret = alloc_host_sq(rdev, &wq->sq); |
2112 | - if (ret) |
2113 | - goto free_sq; |
2114 | - } else |
2115 | - ret = alloc_host_sq(rdev, &wq->sq); |
2116 | - if (ret) |
2117 | - goto free_hwaddr; |
2118 | + ret = alloc_sq(rdev, &wq->sq, user); |
2119 | + if (ret) |
2120 | + goto free_hwaddr; |
2121 | memset(wq->sq.queue, 0, wq->sq.memsize); |
2122 | dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); |
2123 | |
2124 | diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c |
2125 | index b287ca3..cbb1645 100644 |
2126 | --- a/drivers/iommu/amd_iommu.c |
2127 | +++ b/drivers/iommu/amd_iommu.c |
2128 | @@ -3947,6 +3947,9 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) |
2129 | if (!table) |
2130 | goto out; |
2131 | |
2132 | + /* Initialize table spin-lock */ |
2133 | + spin_lock_init(&table->lock); |
2134 | + |
2135 | if (ioapic) |
2136 | /* Keep the first 32 indexes free for IOAPIC interrupts */ |
2137 | table->min_index = 32; |
2138 | diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c |
2139 | index c859771..f46dbef 100644 |
2140 | --- a/drivers/net/ethernet/ibm/ibmveth.c |
2141 | +++ b/drivers/net/ethernet/ibm/ibmveth.c |
2142 | @@ -1324,7 +1324,7 @@ static const struct net_device_ops ibmveth_netdev_ops = { |
2143 | |
2144 | static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) |
2145 | { |
2146 | - int rc, i; |
2147 | + int rc, i, mac_len; |
2148 | struct net_device *netdev; |
2149 | struct ibmveth_adapter *adapter; |
2150 | unsigned char *mac_addr_p; |
2151 | @@ -1334,11 +1334,19 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) |
2152 | dev->unit_address); |
2153 | |
2154 | mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR, |
2155 | - NULL); |
2156 | + &mac_len); |
2157 | if (!mac_addr_p) { |
2158 | dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n"); |
2159 | return -EINVAL; |
2160 | } |
2161 | + /* Workaround for old/broken pHyp */ |
2162 | + if (mac_len == 8) |
2163 | + mac_addr_p += 2; |
2164 | + else if (mac_len != 6) { |
2165 | + dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n", |
2166 | + mac_len); |
2167 | + return -EINVAL; |
2168 | + } |
2169 | |
2170 | mcastFilterSize_p = (unsigned int *)vio_get_attribute(dev, |
2171 | VETH_MCAST_FILTER_SIZE, NULL); |
2172 | @@ -1363,17 +1371,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) |
2173 | |
2174 | netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); |
2175 | |
2176 | - /* |
2177 | - * Some older boxes running PHYP non-natively have an OF that returns |
2178 | - * a 8-byte local-mac-address field (and the first 2 bytes have to be |
2179 | - * ignored) while newer boxes' OF return a 6-byte field. Note that |
2180 | - * IEEE 1275 specifies that local-mac-address must be a 6-byte field. |
2181 | - * The RPA doc specifies that the first byte must be 10b, so we'll |
2182 | - * just look for it to solve this 8 vs. 6 byte field issue |
2183 | - */ |
2184 | - if ((*mac_addr_p & 0x3) != 0x02) |
2185 | - mac_addr_p += 2; |
2186 | - |
2187 | adapter->mac_addr = 0; |
2188 | memcpy(&adapter->mac_addr, mac_addr_p, 6); |
2189 | |
2190 | diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c |
2191 | index 4ecbe64..15ba8c4 100644 |
2192 | --- a/drivers/net/ethernet/realtek/r8169.c |
2193 | +++ b/drivers/net/ethernet/realtek/r8169.c |
2194 | @@ -5787,6 +5787,14 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, |
2195 | goto err_stop_0; |
2196 | } |
2197 | |
2198 | + /* 8168evl does not automatically pad to minimum length. */ |
2199 | + if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 && |
2200 | + skb->len < ETH_ZLEN)) { |
2201 | + if (skb_padto(skb, ETH_ZLEN)) |
2202 | + goto err_update_stats; |
2203 | + skb_put(skb, ETH_ZLEN - skb->len); |
2204 | + } |
2205 | + |
2206 | if (unlikely(le32_to_cpu(txd->opts1) & DescOwn)) |
2207 | goto err_stop_0; |
2208 | |
2209 | @@ -5858,6 +5866,7 @@ err_dma_1: |
2210 | rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd); |
2211 | err_dma_0: |
2212 | dev_kfree_skb(skb); |
2213 | +err_update_stats: |
2214 | dev->stats.tx_dropped++; |
2215 | return NETDEV_TX_OK; |
2216 | |
2217 | diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c |
2218 | index 57136dc..299c53b 100644 |
2219 | --- a/drivers/net/usb/cdc_ether.c |
2220 | +++ b/drivers/net/usb/cdc_ether.c |
2221 | @@ -615,6 +615,13 @@ static const struct usb_device_id products [] = { |
2222 | .driver_info = 0, |
2223 | }, |
2224 | |
2225 | +/* Dell Wireless 5804 (Novatel E371) - handled by qmi_wwan */ |
2226 | +{ |
2227 | + USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x819b, USB_CLASS_COMM, |
2228 | + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), |
2229 | + .driver_info = 0, |
2230 | +}, |
2231 | + |
2232 | /* AnyDATA ADU960S - handled by qmi_wwan */ |
2233 | { |
2234 | USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM, |
2235 | diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c |
2236 | index 2a3579f..a7cafe4 100644 |
2237 | --- a/drivers/net/usb/qmi_wwan.c |
2238 | +++ b/drivers/net/usb/qmi_wwan.c |
2239 | @@ -496,6 +496,13 @@ static const struct usb_device_id products[] = { |
2240 | USB_CDC_PROTO_NONE), |
2241 | .driver_info = (unsigned long)&qmi_wwan_info, |
2242 | }, |
2243 | + { /* Dell Wireless 5804 (Novatel E371) */ |
2244 | + USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x819b, |
2245 | + USB_CLASS_COMM, |
2246 | + USB_CDC_SUBCLASS_ETHERNET, |
2247 | + USB_CDC_PROTO_NONE), |
2248 | + .driver_info = (unsigned long)&qmi_wwan_info, |
2249 | + }, |
2250 | { /* ADU960S */ |
2251 | USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, |
2252 | USB_CLASS_COMM, |
2253 | diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c |
2254 | index 8647dc6..f9c61fb 100644 |
2255 | --- a/drivers/pci/bus.c |
2256 | +++ b/drivers/pci/bus.c |
2257 | @@ -174,6 +174,7 @@ int pci_bus_add_device(struct pci_dev *dev) |
2258 | * Can not put in pci_device_add yet because resources |
2259 | * are not assigned yet for some devices. |
2260 | */ |
2261 | + pci_fixup_device(pci_fixup_final, dev); |
2262 | pci_create_sysfs_dev_files(dev); |
2263 | |
2264 | dev->match_driver = true; |
2265 | diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c |
2266 | index b494066..5427787 100644 |
2267 | --- a/drivers/pci/probe.c |
2268 | +++ b/drivers/pci/probe.c |
2269 | @@ -1339,7 +1339,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) |
2270 | list_add_tail(&dev->bus_list, &bus->devices); |
2271 | up_write(&pci_bus_sem); |
2272 | |
2273 | - pci_fixup_device(pci_fixup_final, dev); |
2274 | ret = pcibios_add_device(dev); |
2275 | WARN_ON(ret < 0); |
2276 | |
2277 | diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c |
2278 | index 69a2d9e..3223b57 100644 |
2279 | --- a/drivers/pwm/pwm-spear.c |
2280 | +++ b/drivers/pwm/pwm-spear.c |
2281 | @@ -143,7 +143,7 @@ static int spear_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) |
2282 | u32 val; |
2283 | |
2284 | rc = clk_enable(pc->clk); |
2285 | - if (!rc) |
2286 | + if (rc) |
2287 | return rc; |
2288 | |
2289 | val = spear_pwm_readl(pc, pwm->hwpwm, PWMCR); |
2290 | @@ -209,12 +209,12 @@ static int spear_pwm_probe(struct platform_device *pdev) |
2291 | pc->chip.npwm = NUM_PWM; |
2292 | |
2293 | ret = clk_prepare(pc->clk); |
2294 | - if (!ret) |
2295 | + if (ret) |
2296 | return ret; |
2297 | |
2298 | if (of_device_is_compatible(np, "st,spear1340-pwm")) { |
2299 | ret = clk_enable(pc->clk); |
2300 | - if (!ret) { |
2301 | + if (ret) { |
2302 | clk_unprepare(pc->clk); |
2303 | return ret; |
2304 | } |
2305 | diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig |
2306 | index c6d77e2..be6e121 100644 |
2307 | --- a/drivers/remoteproc/Kconfig |
2308 | +++ b/drivers/remoteproc/Kconfig |
2309 | @@ -6,6 +6,7 @@ config REMOTEPROC |
2310 | depends on HAS_DMA |
2311 | select FW_LOADER |
2312 | select VIRTIO |
2313 | + select VIRTUALIZATION |
2314 | |
2315 | config OMAP_REMOTEPROC |
2316 | tristate "OMAP remoteproc support" |
2317 | diff --git a/drivers/rpmsg/Kconfig b/drivers/rpmsg/Kconfig |
2318 | index f6e0ea6..69a2193 100644 |
2319 | --- a/drivers/rpmsg/Kconfig |
2320 | +++ b/drivers/rpmsg/Kconfig |
2321 | @@ -4,5 +4,6 @@ menu "Rpmsg drivers" |
2322 | config RPMSG |
2323 | tristate |
2324 | select VIRTIO |
2325 | + select VIRTUALIZATION |
2326 | |
2327 | endmenu |
2328 | diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c |
2329 | index 01443ce..13ddec9 100644 |
2330 | --- a/fs/autofs4/expire.c |
2331 | +++ b/fs/autofs4/expire.c |
2332 | @@ -61,15 +61,6 @@ static int autofs4_mount_busy(struct vfsmount *mnt, struct dentry *dentry) |
2333 | /* This is an autofs submount, we can't expire it */ |
2334 | if (autofs_type_indirect(sbi->type)) |
2335 | goto done; |
2336 | - |
2337 | - /* |
2338 | - * Otherwise it's an offset mount and we need to check |
2339 | - * if we can umount its mount, if there is one. |
2340 | - */ |
2341 | - if (!d_mountpoint(path.dentry)) { |
2342 | - status = 0; |
2343 | - goto done; |
2344 | - } |
2345 | } |
2346 | |
2347 | /* Update the expiry counter if fs is busy */ |
2348 | diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c |
2349 | index b7a0641..116abec 100644 |
2350 | --- a/fs/btrfs/delayed-ref.c |
2351 | +++ b/fs/btrfs/delayed-ref.c |
2352 | @@ -40,16 +40,19 @@ struct kmem_cache *btrfs_delayed_extent_op_cachep; |
2353 | * compare two delayed tree backrefs with same bytenr and type |
2354 | */ |
2355 | static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2, |
2356 | - struct btrfs_delayed_tree_ref *ref1) |
2357 | + struct btrfs_delayed_tree_ref *ref1, int type) |
2358 | { |
2359 | - if (ref1->root < ref2->root) |
2360 | - return -1; |
2361 | - if (ref1->root > ref2->root) |
2362 | - return 1; |
2363 | - if (ref1->parent < ref2->parent) |
2364 | - return -1; |
2365 | - if (ref1->parent > ref2->parent) |
2366 | - return 1; |
2367 | + if (type == BTRFS_TREE_BLOCK_REF_KEY) { |
2368 | + if (ref1->root < ref2->root) |
2369 | + return -1; |
2370 | + if (ref1->root > ref2->root) |
2371 | + return 1; |
2372 | + } else { |
2373 | + if (ref1->parent < ref2->parent) |
2374 | + return -1; |
2375 | + if (ref1->parent > ref2->parent) |
2376 | + return 1; |
2377 | + } |
2378 | return 0; |
2379 | } |
2380 | |
2381 | @@ -113,7 +116,8 @@ static int comp_entry(struct btrfs_delayed_ref_node *ref2, |
2382 | if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY || |
2383 | ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) { |
2384 | return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2), |
2385 | - btrfs_delayed_node_to_tree_ref(ref1)); |
2386 | + btrfs_delayed_node_to_tree_ref(ref1), |
2387 | + ref1->type); |
2388 | } else if (ref1->type == BTRFS_EXTENT_DATA_REF_KEY || |
2389 | ref1->type == BTRFS_SHARED_DATA_REF_KEY) { |
2390 | return comp_data_refs(btrfs_delayed_node_to_data_ref(ref2), |
2391 | diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c |
2392 | index 09c58a3..cc6ce3e 100644 |
2393 | --- a/fs/btrfs/inode.c |
2394 | +++ b/fs/btrfs/inode.c |
2395 | @@ -6502,7 +6502,9 @@ out: |
2396 | * block must be cow'd |
2397 | */ |
2398 | static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, |
2399 | - struct inode *inode, u64 offset, u64 len) |
2400 | + struct inode *inode, u64 offset, u64 *len, |
2401 | + u64 *orig_start, u64 *orig_block_len, |
2402 | + u64 *ram_bytes) |
2403 | { |
2404 | struct btrfs_path *path; |
2405 | int ret; |
2406 | @@ -6559,8 +6561,12 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, |
2407 | disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); |
2408 | backref_offset = btrfs_file_extent_offset(leaf, fi); |
2409 | |
2410 | + *orig_start = key.offset - backref_offset; |
2411 | + *orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi); |
2412 | + *ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); |
2413 | + |
2414 | extent_end = key.offset + btrfs_file_extent_num_bytes(leaf, fi); |
2415 | - if (extent_end < offset + len) { |
2416 | + if (extent_end < offset + *len) { |
2417 | /* extent doesn't include our full range, must cow */ |
2418 | goto out; |
2419 | } |
2420 | @@ -6584,13 +6590,14 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, |
2421 | */ |
2422 | disk_bytenr += backref_offset; |
2423 | disk_bytenr += offset - key.offset; |
2424 | - num_bytes = min(offset + len, extent_end) - offset; |
2425 | + num_bytes = min(offset + *len, extent_end) - offset; |
2426 | if (csum_exist_in_range(root, disk_bytenr, num_bytes)) |
2427 | goto out; |
2428 | /* |
2429 | * all of the above have passed, it is safe to overwrite this extent |
2430 | * without cow |
2431 | */ |
2432 | + *len = num_bytes; |
2433 | ret = 1; |
2434 | out: |
2435 | btrfs_free_path(path); |
2436 | @@ -6789,7 +6796,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, |
2437 | em->block_start != EXTENT_MAP_HOLE)) { |
2438 | int type; |
2439 | int ret; |
2440 | - u64 block_start; |
2441 | + u64 block_start, orig_start, orig_block_len, ram_bytes; |
2442 | |
2443 | if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) |
2444 | type = BTRFS_ORDERED_PREALLOC; |
2445 | @@ -6807,10 +6814,8 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, |
2446 | if (IS_ERR(trans)) |
2447 | goto must_cow; |
2448 | |
2449 | - if (can_nocow_odirect(trans, inode, start, len) == 1) { |
2450 | - u64 orig_start = em->orig_start; |
2451 | - u64 orig_block_len = em->orig_block_len; |
2452 | - |
2453 | + if (can_nocow_odirect(trans, inode, start, &len, &orig_start, |
2454 | + &orig_block_len, &ram_bytes) == 1) { |
2455 | if (type == BTRFS_ORDERED_PREALLOC) { |
2456 | free_extent_map(em); |
2457 | em = create_pinned_em(inode, start, len, |
2458 | diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c |
2459 | index 1357260..3beae6a 100644 |
2460 | --- a/fs/ext4/resize.c |
2461 | +++ b/fs/ext4/resize.c |
2462 | @@ -1882,6 +1882,10 @@ retry: |
2463 | return 0; |
2464 | |
2465 | ext4_get_group_no_and_offset(sb, n_blocks_count - 1, &n_group, &offset); |
2466 | + if (n_group > (0xFFFFFFFFUL / EXT4_INODES_PER_GROUP(sb))) { |
2467 | + ext4_warning(sb, "resize would cause inodes_count overflow"); |
2468 | + return -EINVAL; |
2469 | + } |
2470 | ext4_get_group_no_and_offset(sb, o_blocks_count - 1, &o_group, &offset); |
2471 | |
2472 | n_desc_blocks = num_desc_blocks(sb, n_group + 1); |
2473 | diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c |
2474 | index 523464e..a3f868a 100644 |
2475 | --- a/fs/hugetlbfs/inode.c |
2476 | +++ b/fs/hugetlbfs/inode.c |
2477 | @@ -909,11 +909,8 @@ static int can_do_hugetlb_shm(void) |
2478 | |
2479 | static int get_hstate_idx(int page_size_log) |
2480 | { |
2481 | - struct hstate *h; |
2482 | + struct hstate *h = hstate_sizelog(page_size_log); |
2483 | |
2484 | - if (!page_size_log) |
2485 | - return default_hstate_idx; |
2486 | - h = size_to_hstate(1 << page_size_log); |
2487 | if (!h) |
2488 | return -1; |
2489 | return h - hstates; |
2490 | @@ -929,9 +926,12 @@ static struct dentry_operations anon_ops = { |
2491 | .d_dname = hugetlb_dname |
2492 | }; |
2493 | |
2494 | -struct file *hugetlb_file_setup(const char *name, unsigned long addr, |
2495 | - size_t size, vm_flags_t acctflag, |
2496 | - struct user_struct **user, |
2497 | +/* |
2498 | + * Note that size should be aligned to proper hugepage size in caller side, |
2499 | + * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. |
2500 | + */ |
2501 | +struct file *hugetlb_file_setup(const char *name, size_t size, |
2502 | + vm_flags_t acctflag, struct user_struct **user, |
2503 | int creat_flags, int page_size_log) |
2504 | { |
2505 | struct file *file = ERR_PTR(-ENOMEM); |
2506 | @@ -939,8 +939,6 @@ struct file *hugetlb_file_setup(const char *name, unsigned long addr, |
2507 | struct path path; |
2508 | struct super_block *sb; |
2509 | struct qstr quick_string; |
2510 | - struct hstate *hstate; |
2511 | - unsigned long num_pages; |
2512 | int hstate_idx; |
2513 | |
2514 | hstate_idx = get_hstate_idx(page_size_log); |
2515 | @@ -980,12 +978,10 @@ struct file *hugetlb_file_setup(const char *name, unsigned long addr, |
2516 | if (!inode) |
2517 | goto out_dentry; |
2518 | |
2519 | - hstate = hstate_inode(inode); |
2520 | - size += addr & ~huge_page_mask(hstate); |
2521 | - num_pages = ALIGN(size, huge_page_size(hstate)) >> |
2522 | - huge_page_shift(hstate); |
2523 | file = ERR_PTR(-ENOMEM); |
2524 | - if (hugetlb_reserve_pages(inode, 0, num_pages, NULL, acctflag)) |
2525 | + if (hugetlb_reserve_pages(inode, 0, |
2526 | + size >> huge_page_shift(hstate_inode(inode)), NULL, |
2527 | + acctflag)) |
2528 | goto out_inode; |
2529 | |
2530 | d_instantiate(path.dentry, inode); |
2531 | diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c |
2532 | index c7856a1..0086401 100644 |
2533 | --- a/fs/nfs/nfs4proc.c |
2534 | +++ b/fs/nfs/nfs4proc.c |
2535 | @@ -4553,9 +4553,9 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock * |
2536 | if (status != 0) |
2537 | goto out; |
2538 | /* Is this a delegated lock? */ |
2539 | - if (test_bit(NFS_DELEGATED_STATE, &state->flags)) |
2540 | - goto out; |
2541 | lsp = request->fl_u.nfs4_fl.owner; |
2542 | + if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) == 0) |
2543 | + goto out; |
2544 | seqid = nfs_alloc_seqid(&lsp->ls_seqid, GFP_KERNEL); |
2545 | status = -ENOMEM; |
2546 | if (seqid == NULL) |
2547 | diff --git a/include/drm/drmP.h b/include/drm/drmP.h |
2548 | index 2d94d74..f1ce786 100644 |
2549 | --- a/include/drm/drmP.h |
2550 | +++ b/include/drm/drmP.h |
2551 | @@ -1593,9 +1593,8 @@ extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *s |
2552 | |
2553 | void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv); |
2554 | void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); |
2555 | -int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle); |
2556 | -int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle); |
2557 | -void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf); |
2558 | +int drm_prime_lookup_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle); |
2559 | +void drm_prime_remove_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf); |
2560 | |
2561 | int drm_prime_add_dma_buf(struct drm_device *dev, struct drm_gem_object *obj); |
2562 | int drm_prime_lookup_obj(struct drm_device *dev, struct dma_buf *buf, |
2563 | diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h |
2564 | index 918e8fe..c2af598 100644 |
2565 | --- a/include/drm/drm_pciids.h |
2566 | +++ b/include/drm/drm_pciids.h |
2567 | @@ -240,6 +240,7 @@ |
2568 | {0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ |
2569 | {0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
2570 | {0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
2571 | + {0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
2572 | {0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
2573 | {0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
2574 | {0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
2575 | @@ -247,11 +248,13 @@ |
2576 | {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
2577 | {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
2578 | {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
2579 | + {0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
2580 | {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
2581 | {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
2582 | {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
2583 | {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
2584 | {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
2585 | + {0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
2586 | {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
2587 | {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
2588 | {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ |
2589 | @@ -603,6 +606,8 @@ |
2590 | {0x1002, 0x9999, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
2591 | {0x1002, 0x999A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
2592 | {0x1002, 0x999B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
2593 | + {0x1002, 0x999C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
2594 | + {0x1002, 0x999D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
2595 | {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
2596 | {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
2597 | {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ |
2598 | diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h |
2599 | index 78feda9..33f358f 100644 |
2600 | --- a/include/linux/blkdev.h |
2601 | +++ b/include/linux/blkdev.h |
2602 | @@ -838,7 +838,7 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q, |
2603 | unsigned int cmd_flags) |
2604 | { |
2605 | if (unlikely(cmd_flags & REQ_DISCARD)) |
2606 | - return q->limits.max_discard_sectors; |
2607 | + return min(q->limits.max_discard_sectors, UINT_MAX >> 9); |
2608 | |
2609 | if (unlikely(cmd_flags & REQ_WRITE_SAME)) |
2610 | return q->limits.max_write_same_sectors; |
2611 | diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h |
2612 | index 16e4e9a..df1ff7c 100644 |
2613 | --- a/include/linux/hugetlb.h |
2614 | +++ b/include/linux/hugetlb.h |
2615 | @@ -185,8 +185,7 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) |
2616 | |
2617 | extern const struct file_operations hugetlbfs_file_operations; |
2618 | extern const struct vm_operations_struct hugetlb_vm_ops; |
2619 | -struct file *hugetlb_file_setup(const char *name, unsigned long addr, |
2620 | - size_t size, vm_flags_t acct, |
2621 | +struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, |
2622 | struct user_struct **user, int creat_flags, |
2623 | int page_size_log); |
2624 | |
2625 | @@ -205,8 +204,8 @@ static inline int is_file_hugepages(struct file *file) |
2626 | |
2627 | #define is_file_hugepages(file) 0 |
2628 | static inline struct file * |
2629 | -hugetlb_file_setup(const char *name, unsigned long addr, size_t size, |
2630 | - vm_flags_t acctflag, struct user_struct **user, int creat_flags, |
2631 | +hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, |
2632 | + struct user_struct **user, int creat_flags, |
2633 | int page_size_log) |
2634 | { |
2635 | return ERR_PTR(-ENOSYS); |
2636 | @@ -284,6 +283,13 @@ static inline struct hstate *hstate_file(struct file *f) |
2637 | return hstate_inode(file_inode(f)); |
2638 | } |
2639 | |
2640 | +static inline struct hstate *hstate_sizelog(int page_size_log) |
2641 | +{ |
2642 | + if (!page_size_log) |
2643 | + return &default_hstate; |
2644 | + return size_to_hstate(1 << page_size_log); |
2645 | +} |
2646 | + |
2647 | static inline struct hstate *hstate_vma(struct vm_area_struct *vma) |
2648 | { |
2649 | return hstate_file(vma->vm_file); |
2650 | @@ -348,11 +354,12 @@ static inline int hstate_index(struct hstate *h) |
2651 | return h - hstates; |
2652 | } |
2653 | |
2654 | -#else |
2655 | +#else /* CONFIG_HUGETLB_PAGE */ |
2656 | struct hstate {}; |
2657 | #define alloc_huge_page_node(h, nid) NULL |
2658 | #define alloc_bootmem_huge_page(h) NULL |
2659 | #define hstate_file(f) NULL |
2660 | +#define hstate_sizelog(s) NULL |
2661 | #define hstate_vma(v) NULL |
2662 | #define hstate_inode(i) NULL |
2663 | #define huge_page_size(h) PAGE_SIZE |
2664 | @@ -367,6 +374,6 @@ static inline unsigned int pages_per_huge_page(struct hstate *h) |
2665 | } |
2666 | #define hstate_index_to_shift(index) 0 |
2667 | #define hstate_index(h) 0 |
2668 | -#endif |
2669 | +#endif /* CONFIG_HUGETLB_PAGE */ |
2670 | |
2671 | #endif /* _LINUX_HUGETLB_H */ |
2672 | diff --git a/ipc/shm.c b/ipc/shm.c |
2673 | index 8247c49..34af1fe 100644 |
2674 | --- a/ipc/shm.c |
2675 | +++ b/ipc/shm.c |
2676 | @@ -491,10 +491,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) |
2677 | |
2678 | sprintf (name, "SYSV%08x", key); |
2679 | if (shmflg & SHM_HUGETLB) { |
2680 | + struct hstate *hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) |
2681 | + & SHM_HUGE_MASK); |
2682 | + size_t hugesize = ALIGN(size, huge_page_size(hs)); |
2683 | + |
2684 | /* hugetlb_file_setup applies strict accounting */ |
2685 | if (shmflg & SHM_NORESERVE) |
2686 | acctflag = VM_NORESERVE; |
2687 | - file = hugetlb_file_setup(name, 0, size, acctflag, |
2688 | + file = hugetlb_file_setup(name, hugesize, acctflag, |
2689 | &shp->mlock_user, HUGETLB_SHMFS_INODE, |
2690 | (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); |
2691 | } else { |
2692 | diff --git a/kernel/Makefile b/kernel/Makefile |
2693 | index bbde5f1..5a51e6c 100644 |
2694 | --- a/kernel/Makefile |
2695 | +++ b/kernel/Makefile |
2696 | @@ -175,7 +175,7 @@ signing_key.priv signing_key.x509: x509.genkey |
2697 | openssl req -new -nodes -utf8 -$(CONFIG_MODULE_SIG_HASH) -days 36500 \ |
2698 | -batch -x509 -config x509.genkey \ |
2699 | -outform DER -out signing_key.x509 \ |
2700 | - -keyout signing_key.priv |
2701 | + -keyout signing_key.priv 2>&1 |
2702 | @echo "###" |
2703 | @echo "### Key pair generated." |
2704 | @echo "###" |
2705 | diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c |
2706 | index 642a89c..a291aa2 100644 |
2707 | --- a/kernel/audit_tree.c |
2708 | +++ b/kernel/audit_tree.c |
2709 | @@ -617,9 +617,9 @@ void audit_trim_trees(void) |
2710 | } |
2711 | spin_unlock(&hash_lock); |
2712 | trim_marked(tree); |
2713 | - put_tree(tree); |
2714 | drop_collected_mounts(root_mnt); |
2715 | skip_it: |
2716 | + put_tree(tree); |
2717 | mutex_lock(&audit_filter_mutex); |
2718 | } |
2719 | list_del(&cursor); |
2720 | diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c |
2721 | index 7713d1b..3f28192 100644 |
2722 | --- a/kernel/trace/trace.c |
2723 | +++ b/kernel/trace/trace.c |
2724 | @@ -5168,36 +5168,32 @@ void trace_init_global_iter(struct trace_iterator *iter) |
2725 | iter->cpu_file = TRACE_PIPE_ALL_CPU; |
2726 | } |
2727 | |
2728 | -static void |
2729 | -__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) |
2730 | +void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) |
2731 | { |
2732 | - static arch_spinlock_t ftrace_dump_lock = |
2733 | - (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
2734 | /* use static because iter can be a bit big for the stack */ |
2735 | static struct trace_iterator iter; |
2736 | + static atomic_t dump_running; |
2737 | unsigned int old_userobj; |
2738 | - static int dump_ran; |
2739 | unsigned long flags; |
2740 | int cnt = 0, cpu; |
2741 | |
2742 | - /* only one dump */ |
2743 | - local_irq_save(flags); |
2744 | - arch_spin_lock(&ftrace_dump_lock); |
2745 | - if (dump_ran) |
2746 | - goto out; |
2747 | - |
2748 | - dump_ran = 1; |
2749 | + /* Only allow one dump user at a time. */ |
2750 | + if (atomic_inc_return(&dump_running) != 1) { |
2751 | + atomic_dec(&dump_running); |
2752 | + return; |
2753 | + } |
2754 | |
2755 | + /* |
2756 | + * Always turn off tracing when we dump. |
2757 | + * We don't need to show trace output of what happens |
2758 | + * between multiple crashes. |
2759 | + * |
2760 | + * If the user does a sysrq-z, then they can re-enable |
2761 | + * tracing with echo 1 > tracing_on. |
2762 | + */ |
2763 | tracing_off(); |
2764 | |
2765 | - /* Did function tracer already get disabled? */ |
2766 | - if (ftrace_is_dead()) { |
2767 | - printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); |
2768 | - printk("# MAY BE MISSING FUNCTION EVENTS\n"); |
2769 | - } |
2770 | - |
2771 | - if (disable_tracing) |
2772 | - ftrace_kill(); |
2773 | + local_irq_save(flags); |
2774 | |
2775 | /* Simulate the iterator */ |
2776 | trace_init_global_iter(&iter); |
2777 | @@ -5227,6 +5223,12 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) |
2778 | |
2779 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); |
2780 | |
2781 | + /* Did function tracer already get disabled? */ |
2782 | + if (ftrace_is_dead()) { |
2783 | + printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n"); |
2784 | + printk("# MAY BE MISSING FUNCTION EVENTS\n"); |
2785 | + } |
2786 | + |
2787 | /* |
2788 | * We need to stop all tracing on all CPUS to read the |
2789 | * the next buffer. This is a bit expensive, but is |
2790 | @@ -5266,26 +5268,14 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) |
2791 | printk(KERN_TRACE "---------------------------------\n"); |
2792 | |
2793 | out_enable: |
2794 | - /* Re-enable tracing if requested */ |
2795 | - if (!disable_tracing) { |
2796 | - trace_flags |= old_userobj; |
2797 | + trace_flags |= old_userobj; |
2798 | |
2799 | - for_each_tracing_cpu(cpu) { |
2800 | - atomic_dec(&iter.tr->data[cpu]->disabled); |
2801 | - } |
2802 | - tracing_on(); |
2803 | + for_each_tracing_cpu(cpu) { |
2804 | + atomic_dec(&iter.tr->data[cpu]->disabled); |
2805 | } |
2806 | - |
2807 | - out: |
2808 | - arch_spin_unlock(&ftrace_dump_lock); |
2809 | + atomic_dec(&dump_running); |
2810 | local_irq_restore(flags); |
2811 | } |
2812 | - |
2813 | -/* By default: disable tracing after the dump */ |
2814 | -void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) |
2815 | -{ |
2816 | - __ftrace_dump(true, oops_dump_mode); |
2817 | -} |
2818 | EXPORT_SYMBOL_GPL(ftrace_dump); |
2819 | |
2820 | __init static int tracer_alloc_buffers(void) |
2821 | diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c |
2822 | index 51c819c..eedc297 100644 |
2823 | --- a/kernel/trace/trace_selftest.c |
2824 | +++ b/kernel/trace/trace_selftest.c |
2825 | @@ -703,8 +703,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) |
2826 | /* Maximum number of functions to trace before diagnosing a hang */ |
2827 | #define GRAPH_MAX_FUNC_TEST 100000000 |
2828 | |
2829 | -static void |
2830 | -__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode); |
2831 | static unsigned int graph_hang_thresh; |
2832 | |
2833 | /* Wrap the real function entry probe to avoid possible hanging */ |
2834 | @@ -714,8 +712,11 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) |
2835 | if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { |
2836 | ftrace_graph_stop(); |
2837 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); |
2838 | - if (ftrace_dump_on_oops) |
2839 | - __ftrace_dump(false, DUMP_ALL); |
2840 | + if (ftrace_dump_on_oops) { |
2841 | + ftrace_dump(DUMP_ALL); |
2842 | + /* ftrace_dump() disables tracing */ |
2843 | + tracing_on(); |
2844 | + } |
2845 | return 0; |
2846 | } |
2847 | |
2848 | diff --git a/mm/mmap.c b/mm/mmap.c |
2849 | index 033094b..e17fc06 100644 |
2850 | --- a/mm/mmap.c |
2851 | +++ b/mm/mmap.c |
2852 | @@ -1327,15 +1327,20 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, |
2853 | file = fget(fd); |
2854 | if (!file) |
2855 | goto out; |
2856 | + if (is_file_hugepages(file)) |
2857 | + len = ALIGN(len, huge_page_size(hstate_file(file))); |
2858 | } else if (flags & MAP_HUGETLB) { |
2859 | struct user_struct *user = NULL; |
2860 | + |
2861 | + len = ALIGN(len, huge_page_size(hstate_sizelog( |
2862 | + (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK))); |
2863 | /* |
2864 | * VM_NORESERVE is used because the reservations will be |
2865 | * taken when vm_ops->mmap() is called |
2866 | * A dummy user value is used because we are not locking |
2867 | * memory so no accounting is necessary |
2868 | */ |
2869 | - file = hugetlb_file_setup(HUGETLB_ANON_FILE, addr, len, |
2870 | + file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, |
2871 | VM_NORESERVE, |
2872 | &user, HUGETLB_ANONHUGE_INODE, |
2873 | (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); |
2874 | diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c |
2875 | index 12475ef..e5920fb 100644 |
2876 | --- a/net/netfilter/ipvs/ip_vs_pe_sip.c |
2877 | +++ b/net/netfilter/ipvs/ip_vs_pe_sip.c |
2878 | @@ -37,14 +37,10 @@ static int get_callid(const char *dptr, unsigned int dataoff, |
2879 | if (ret > 0) |
2880 | break; |
2881 | if (!ret) |
2882 | - return 0; |
2883 | + return -EINVAL; |
2884 | dataoff += *matchoff; |
2885 | } |
2886 | |
2887 | - /* Empty callid is useless */ |
2888 | - if (!*matchlen) |
2889 | - return -EINVAL; |
2890 | - |
2891 | /* Too large is useless */ |
2892 | if (*matchlen > IP_VS_PEDATA_MAXLEN) |
2893 | return -EINVAL; |
2894 | diff --git a/scripts/kconfig/list.h b/scripts/kconfig/list.h |
2895 | index 0ae730b..b87206c 100644 |
2896 | --- a/scripts/kconfig/list.h |
2897 | +++ b/scripts/kconfig/list.h |
2898 | @@ -51,6 +51,19 @@ struct list_head { |
2899 | pos = list_entry(pos->member.next, typeof(*pos), member)) |
2900 | |
2901 | /** |
2902 | + * list_for_each_entry_safe - iterate over list of given type safe against removal of list entry |
2903 | + * @pos: the type * to use as a loop cursor. |
2904 | + * @n: another type * to use as temporary storage |
2905 | + * @head: the head for your list. |
2906 | + * @member: the name of the list_struct within the struct. |
2907 | + */ |
2908 | +#define list_for_each_entry_safe(pos, n, head, member) \ |
2909 | + for (pos = list_entry((head)->next, typeof(*pos), member), \ |
2910 | + n = list_entry(pos->member.next, typeof(*pos), member); \ |
2911 | + &pos->member != (head); \ |
2912 | + pos = n, n = list_entry(n->member.next, typeof(*n), member)) |
2913 | + |
2914 | +/** |
2915 | * list_empty - tests whether a list is empty |
2916 | * @head: the list to test. |
2917 | */ |
2918 | diff --git a/scripts/kconfig/mconf.c b/scripts/kconfig/mconf.c |
2919 | index 566288a..c5418d6 100644 |
2920 | --- a/scripts/kconfig/mconf.c |
2921 | +++ b/scripts/kconfig/mconf.c |
2922 | @@ -389,6 +389,7 @@ again: |
2923 | .targets = targets, |
2924 | .keys = keys, |
2925 | }; |
2926 | + struct jump_key *pos, *tmp; |
2927 | |
2928 | res = get_relations_str(sym_arr, &head); |
2929 | dres = show_textbox_ext(_("Search Results"), (char *) |
2930 | @@ -402,6 +403,8 @@ again: |
2931 | again = true; |
2932 | } |
2933 | str_free(&res); |
2934 | + list_for_each_entry_safe(pos, tmp, &head, entries) |
2935 | + free(pos); |
2936 | } while (again); |
2937 | free(sym_arr); |
2938 | str_free(&title); |