Magellan Linux

Contents of /trunk/kernel26-magellan/patches-2.6.35-r1/0100-2.6.35.1-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 1089 - (show annotations) (download)
Wed Aug 18 13:22:01 2010 UTC (13 years, 8 months ago) by niro
File size: 57210 byte(s)
2.6.35-magellan-r1

1 diff --git a/arch/arm/plat-mxc/gpio.c b/arch/arm/plat-mxc/gpio.c
2 index 71437c6..9ebbd31 100644
3 --- a/arch/arm/plat-mxc/gpio.c
4 +++ b/arch/arm/plat-mxc/gpio.c
5 @@ -214,13 +214,16 @@ static void _set_gpio_direction(struct gpio_chip *chip, unsigned offset,
6 struct mxc_gpio_port *port =
7 container_of(chip, struct mxc_gpio_port, chip);
8 u32 l;
9 + unsigned long flags;
10
11 + spin_lock_irqsave(&port->lock, flags);
12 l = __raw_readl(port->base + GPIO_GDIR);
13 if (dir)
14 l |= 1 << offset;
15 else
16 l &= ~(1 << offset);
17 __raw_writel(l, port->base + GPIO_GDIR);
18 + spin_unlock_irqrestore(&port->lock, flags);
19 }
20
21 static void mxc_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
22 @@ -229,9 +232,12 @@ static void mxc_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
23 container_of(chip, struct mxc_gpio_port, chip);
24 void __iomem *reg = port->base + GPIO_DR;
25 u32 l;
26 + unsigned long flags;
27
28 + spin_lock_irqsave(&port->lock, flags);
29 l = (__raw_readl(reg) & (~(1 << offset))) | (value << offset);
30 __raw_writel(l, reg);
31 + spin_unlock_irqrestore(&port->lock, flags);
32 }
33
34 static int mxc_gpio_get(struct gpio_chip *chip, unsigned offset)
35 @@ -285,6 +291,8 @@ int __init mxc_gpio_init(struct mxc_gpio_port *port, int cnt)
36 port[i].chip.base = i * 32;
37 port[i].chip.ngpio = 32;
38
39 + spin_lock_init(&port[i].lock);
40 +
41 /* its a serious configuration bug when it fails */
42 BUG_ON( gpiochip_add(&port[i].chip) < 0 );
43
44 diff --git a/arch/arm/plat-mxc/include/mach/gpio.h b/arch/arm/plat-mxc/include/mach/gpio.h
45 index 894d2f8..6bd932c 100644
46 --- a/arch/arm/plat-mxc/include/mach/gpio.h
47 +++ b/arch/arm/plat-mxc/include/mach/gpio.h
48 @@ -36,6 +36,7 @@ struct mxc_gpio_port {
49 int virtual_irq_start;
50 struct gpio_chip chip;
51 u32 both_edges;
52 + spinlock_t lock;
53 };
54
55 int mxc_gpio_init(struct mxc_gpio_port*, int);
56 diff --git a/arch/parisc/kernel/firmware.c b/arch/parisc/kernel/firmware.c
57 index 4c247e0..df971fa 100644
58 --- a/arch/parisc/kernel/firmware.c
59 +++ b/arch/parisc/kernel/firmware.c
60 @@ -1123,7 +1123,6 @@ static char __attribute__((aligned(64))) iodc_dbuf[4096];
61 */
62 int pdc_iodc_print(const unsigned char *str, unsigned count)
63 {
64 - static int posx; /* for simple TAB-Simulation... */
65 unsigned int i;
66 unsigned long flags;
67
68 @@ -1133,19 +1132,12 @@ int pdc_iodc_print(const unsigned char *str, unsigned count)
69 iodc_dbuf[i+0] = '\r';
70 iodc_dbuf[i+1] = '\n';
71 i += 2;
72 - posx = 0;
73 goto print;
74 - case '\t':
75 - while (posx & 7) {
76 - iodc_dbuf[i] = ' ';
77 - i++, posx++;
78 - }
79 - break;
80 case '\b': /* BS */
81 - posx -= 2;
82 + i--; /* overwrite last */
83 default:
84 iodc_dbuf[i] = str[i];
85 - i++, posx++;
86 + i++;
87 break;
88 }
89 }
90 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
91 index 65d8d79..27dff06 100644
92 --- a/arch/x86/xen/enlighten.c
93 +++ b/arch/x86/xen/enlighten.c
94 @@ -927,7 +927,7 @@ static const struct pv_init_ops xen_init_ops __initdata = {
95 };
96
97 static const struct pv_time_ops xen_time_ops __initdata = {
98 - .sched_clock = xen_sched_clock,
99 + .sched_clock = xen_clocksource_read,
100 };
101
102 static const struct pv_cpu_ops xen_cpu_ops __initdata = {
103 diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
104 index b3c6c59..a86df42 100644
105 --- a/arch/x86/xen/time.c
106 +++ b/arch/x86/xen/time.c
107 @@ -155,45 +155,6 @@ static void do_stolen_accounting(void)
108 account_idle_ticks(ticks);
109 }
110
111 -/*
112 - * Xen sched_clock implementation. Returns the number of unstolen
113 - * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED
114 - * states.
115 - */
116 -unsigned long long xen_sched_clock(void)
117 -{
118 - struct vcpu_runstate_info state;
119 - cycle_t now;
120 - u64 ret;
121 - s64 offset;
122 -
123 - /*
124 - * Ideally sched_clock should be called on a per-cpu basis
125 - * anyway, so preempt should already be disabled, but that's
126 - * not current practice at the moment.
127 - */
128 - preempt_disable();
129 -
130 - now = xen_clocksource_read();
131 -
132 - get_runstate_snapshot(&state);
133 -
134 - WARN_ON(state.state != RUNSTATE_running);
135 -
136 - offset = now - state.state_entry_time;
137 - if (offset < 0)
138 - offset = 0;
139 -
140 - ret = state.time[RUNSTATE_blocked] +
141 - state.time[RUNSTATE_running] +
142 - offset;
143 -
144 - preempt_enable();
145 -
146 - return ret;
147 -}
148 -
149 -
150 /* Get the TSC speed from Xen */
151 unsigned long xen_tsc_khz(void)
152 {
153 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
154 index ac9f798..c20a6c9 100644
155 --- a/drivers/edac/amd64_edac.c
156 +++ b/drivers/edac/amd64_edac.c
157 @@ -178,7 +178,7 @@ static int amd64_set_scrub_rate(struct mem_ctl_info *mci, u32 *bandwidth)
158
159 default:
160 amd64_printk(KERN_ERR, "Unsupported family!\n");
161 - break;
162 + return -EINVAL;
163 }
164 return amd64_search_set_scrub_rate(pvt->misc_f3_ctl, *bandwidth,
165 min_scrubrate);
166 @@ -1430,7 +1430,7 @@ static inline u64 f10_get_base_addr_offset(u64 sys_addr, int hi_range_sel,
167 u64 chan_off;
168
169 if (hi_range_sel) {
170 - if (!(dct_sel_base_addr & 0xFFFFF800) &&
171 + if (!(dct_sel_base_addr & 0xFFFF0000) &&
172 hole_valid && (sys_addr >= 0x100000000ULL))
173 chan_off = hole_off << 16;
174 else
175 @@ -1679,7 +1679,7 @@ static void f10_map_sysaddr_to_csrow(struct mem_ctl_info *mci,
176 * ganged. Otherwise @chan should already contain the channel at
177 * this point.
178 */
179 - if (dct_ganging_enabled(pvt) && pvt->nbcfg & K8_NBCFG_CHIPKILL)
180 + if (dct_ganging_enabled(pvt) && (pvt->nbcfg & K8_NBCFG_CHIPKILL))
181 chan = get_channel_from_ecc_syndrome(mci, syndrome);
182
183 if (chan >= 0)
184 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
185 index 5e21b31..8a84306 100644
186 --- a/drivers/gpu/drm/i915/intel_display.c
187 +++ b/drivers/gpu/drm/i915/intel_display.c
188 @@ -42,6 +42,7 @@
189 bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
190 static void intel_update_watermarks(struct drm_device *dev);
191 static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule);
192 +static void intel_crtc_update_cursor(struct drm_crtc *crtc);
193
194 typedef struct {
195 /* given values */
196 @@ -3403,6 +3404,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
197 return -EINVAL;
198 }
199
200 + /* Ensure that the cursor is valid for the new mode before changing... */
201 + intel_crtc_update_cursor(crtc);
202 +
203 if (is_lvds && dev_priv->lvds_downclock_avail) {
204 has_reduced_clock = limit->find_pll(limit, crtc,
205 dev_priv->lvds_downclock,
206 @@ -3939,6 +3943,85 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
207 }
208 }
209
210 +/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
211 +static void intel_crtc_update_cursor(struct drm_crtc *crtc)
212 +{
213 + struct drm_device *dev = crtc->dev;
214 + struct drm_i915_private *dev_priv = dev->dev_private;
215 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
216 + int pipe = intel_crtc->pipe;
217 + int x = intel_crtc->cursor_x;
218 + int y = intel_crtc->cursor_y;
219 + uint32_t base, pos;
220 + bool visible;
221 +
222 + pos = 0;
223 +
224 + if (crtc->fb) {
225 + base = intel_crtc->cursor_addr;
226 + if (x > (int) crtc->fb->width)
227 + base = 0;
228 +
229 + if (y > (int) crtc->fb->height)
230 + base = 0;
231 + } else
232 + base = 0;
233 +
234 + if (x < 0) {
235 + if (x + intel_crtc->cursor_width < 0)
236 + base = 0;
237 +
238 + pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
239 + x = -x;
240 + }
241 + pos |= x << CURSOR_X_SHIFT;
242 +
243 + if (y < 0) {
244 + if (y + intel_crtc->cursor_height < 0)
245 + base = 0;
246 +
247 + pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
248 + y = -y;
249 + }
250 + pos |= y << CURSOR_Y_SHIFT;
251 +
252 + visible = base != 0;
253 + if (!visible && !intel_crtc->cursor_visble)
254 + return;
255 +
256 + I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos);
257 + if (intel_crtc->cursor_visble != visible) {
258 + uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
259 + if (base) {
260 + /* Hooray for CUR*CNTR differences */
261 + if (IS_MOBILE(dev) || IS_I9XX(dev)) {
262 + cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
263 + cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
264 + cntl |= pipe << 28; /* Connect to correct pipe */
265 + } else {
266 + cntl &= ~(CURSOR_FORMAT_MASK);
267 + cntl |= CURSOR_ENABLE;
268 + cntl |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
269 + }
270 + } else {
271 + if (IS_MOBILE(dev) || IS_I9XX(dev)) {
272 + cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
273 + cntl |= CURSOR_MODE_DISABLE;
274 + } else {
275 + cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
276 + }
277 + }
278 + I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
279 +
280 + intel_crtc->cursor_visble = visible;
281 + }
282 + /* and commit changes on next vblank */
283 + I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
284 +
285 + if (visible)
286 + intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
287 +}
288 +
289 static int intel_crtc_cursor_set(struct drm_crtc *crtc,
290 struct drm_file *file_priv,
291 uint32_t handle,
292 @@ -3949,11 +4032,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
293 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
294 struct drm_gem_object *bo;
295 struct drm_i915_gem_object *obj_priv;
296 - int pipe = intel_crtc->pipe;
297 - uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
298 - uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
299 - uint32_t temp = I915_READ(control);
300 - size_t addr;
301 + uint32_t addr;
302 int ret;
303
304 DRM_DEBUG_KMS("\n");
305 @@ -3961,12 +4040,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
306 /* if we want to turn off the cursor ignore width and height */
307 if (!handle) {
308 DRM_DEBUG_KMS("cursor off\n");
309 - if (IS_MOBILE(dev) || IS_I9XX(dev)) {
310 - temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
311 - temp |= CURSOR_MODE_DISABLE;
312 - } else {
313 - temp &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
314 - }
315 addr = 0;
316 bo = NULL;
317 mutex_lock(&dev->struct_mutex);
318 @@ -4008,7 +4081,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
319
320 addr = obj_priv->gtt_offset;
321 } else {
322 - ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
323 + ret = i915_gem_attach_phys_object(dev, bo,
324 + (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
325 if (ret) {
326 DRM_ERROR("failed to attach phys object\n");
327 goto fail_locked;
328 @@ -4019,21 +4093,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
329 if (!IS_I9XX(dev))
330 I915_WRITE(CURSIZE, (height << 12) | width);
331
332 - /* Hooray for CUR*CNTR differences */
333 - if (IS_MOBILE(dev) || IS_I9XX(dev)) {
334 - temp &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
335 - temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
336 - temp |= (pipe << 28); /* Connect to correct pipe */
337 - } else {
338 - temp &= ~(CURSOR_FORMAT_MASK);
339 - temp |= CURSOR_ENABLE;
340 - temp |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
341 - }
342 -
343 finish:
344 - I915_WRITE(control, temp);
345 - I915_WRITE(base, addr);
346 -
347 if (intel_crtc->cursor_bo) {
348 if (dev_priv->info->cursor_needs_physical) {
349 if (intel_crtc->cursor_bo != bo)
350 @@ -4047,6 +4107,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
351
352 intel_crtc->cursor_addr = addr;
353 intel_crtc->cursor_bo = bo;
354 + intel_crtc->cursor_width = width;
355 + intel_crtc->cursor_height = height;
356 +
357 + intel_crtc_update_cursor(crtc);
358
359 return 0;
360 fail_unpin:
361 @@ -4060,34 +4124,12 @@ fail:
362
363 static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
364 {
365 - struct drm_device *dev = crtc->dev;
366 - struct drm_i915_private *dev_priv = dev->dev_private;
367 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
368 - struct intel_framebuffer *intel_fb;
369 - int pipe = intel_crtc->pipe;
370 - uint32_t temp = 0;
371 - uint32_t adder;
372 -
373 - if (crtc->fb) {
374 - intel_fb = to_intel_framebuffer(crtc->fb);
375 - intel_mark_busy(dev, intel_fb->obj);
376 - }
377 -
378 - if (x < 0) {
379 - temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
380 - x = -x;
381 - }
382 - if (y < 0) {
383 - temp |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
384 - y = -y;
385 - }
386
387 - temp |= x << CURSOR_X_SHIFT;
388 - temp |= y << CURSOR_Y_SHIFT;
389 + intel_crtc->cursor_x = x;
390 + intel_crtc->cursor_y = y;
391
392 - adder = intel_crtc->cursor_addr;
393 - I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
394 - I915_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
395 + intel_crtc_update_cursor(crtc);
396
397 return 0;
398 }
399 diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
400 index 2f7970b..2702652 100644
401 --- a/drivers/gpu/drm/i915/intel_drv.h
402 +++ b/drivers/gpu/drm/i915/intel_drv.h
403 @@ -143,8 +143,6 @@ struct intel_crtc {
404 struct drm_crtc base;
405 enum pipe pipe;
406 enum plane plane;
407 - struct drm_gem_object *cursor_bo;
408 - uint32_t cursor_addr;
409 u8 lut_r[256], lut_g[256], lut_b[256];
410 int dpms_mode;
411 bool busy; /* is scanout buffer being updated frequently? */
412 @@ -153,6 +151,12 @@ struct intel_crtc {
413 struct intel_overlay *overlay;
414 struct intel_unpin_work *unpin_work;
415 int fdi_lanes;
416 +
417 + struct drm_gem_object *cursor_bo;
418 + uint32_t cursor_addr;
419 + int16_t cursor_x, cursor_y;
420 + int16_t cursor_width, cursor_height;
421 + bool cursor_visble;
422 };
423
424 #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
425 diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
426 index d7ad513..fe05ba2 100644
427 --- a/drivers/gpu/drm/i915/intel_overlay.c
428 +++ b/drivers/gpu/drm/i915/intel_overlay.c
429 @@ -958,7 +958,7 @@ static int check_overlay_src(struct drm_device *dev,
430 || rec->src_width < N_HORIZ_Y_TAPS*4)
431 return -EINVAL;
432
433 - /* check alingment constrains */
434 + /* check alignment constraints */
435 switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
436 case I915_OVERLAY_RGB:
437 /* not implemented */
438 @@ -990,7 +990,10 @@ static int check_overlay_src(struct drm_device *dev,
439 return -EINVAL;
440
441 /* stride checking */
442 - stride_mask = 63;
443 + if (IS_I830(dev) || IS_845G(dev))
444 + stride_mask = 255;
445 + else
446 + stride_mask = 63;
447
448 if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
449 return -EINVAL;
450 diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
451 index e100f69..bb3de01 100644
452 --- a/drivers/gpu/drm/radeon/r600.c
453 +++ b/drivers/gpu/drm/radeon/r600.c
454 @@ -869,7 +869,17 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
455 u32 tmp;
456
457 /* flush hdp cache so updates hit vram */
458 - WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
459 + if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
460 + void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
461 + u32 tmp;
462 +
463 + /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
464 + * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
465 + */
466 + WREG32(HDP_DEBUG1, 0);
467 + tmp = readl((void __iomem *)ptr);
468 + } else
469 + WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
470
471 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
472 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
473 @@ -3512,5 +3522,15 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
474 */
475 void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
476 {
477 - WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
478 + /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
479 + * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
480 + */
481 + if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
482 + void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
483 + u32 tmp;
484 +
485 + WREG32(HDP_DEBUG1, 0);
486 + tmp = readl((void __iomem *)ptr);
487 + } else
488 + WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
489 }
490 diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
491 index 59c1f87..84bc28e 100644
492 --- a/drivers/gpu/drm/radeon/r600d.h
493 +++ b/drivers/gpu/drm/radeon/r600d.h
494 @@ -245,6 +245,7 @@
495 #define HDP_NONSURFACE_SIZE 0x2C0C
496 #define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
497 #define HDP_TILING_CONFIG 0x2F3C
498 +#define HDP_DEBUG1 0x2F34
499
500 #define MC_VM_AGP_TOP 0x2184
501 #define MC_VM_AGP_BOT 0x2188
502 diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
503 index d5b9373..d33b6c9 100644
504 --- a/drivers/gpu/drm/radeon/radeon_object.c
505 +++ b/drivers/gpu/drm/radeon/radeon_object.c
506 @@ -110,6 +110,7 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
507 bo->surface_reg = -1;
508 INIT_LIST_HEAD(&bo->list);
509
510 +retry:
511 radeon_ttm_placement_from_domain(bo, domain);
512 /* Kernel allocation are uninterruptible */
513 mutex_lock(&rdev->vram_mutex);
514 @@ -118,10 +119,15 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
515 &radeon_ttm_bo_destroy);
516 mutex_unlock(&rdev->vram_mutex);
517 if (unlikely(r != 0)) {
518 - if (r != -ERESTARTSYS)
519 + if (r != -ERESTARTSYS) {
520 + if (domain == RADEON_GEM_DOMAIN_VRAM) {
521 + domain |= RADEON_GEM_DOMAIN_GTT;
522 + goto retry;
523 + }
524 dev_err(rdev->dev,
525 "object_init failed for (%lu, 0x%08X)\n",
526 size, domain);
527 + }
528 return r;
529 }
530 *bo_ptr = bo;
531 @@ -321,6 +327,7 @@ int radeon_bo_list_validate(struct list_head *head)
532 {
533 struct radeon_bo_list *lobj;
534 struct radeon_bo *bo;
535 + u32 domain;
536 int r;
537
538 list_for_each_entry(lobj, head, list) {
539 @@ -333,17 +340,19 @@ int radeon_bo_list_validate(struct list_head *head)
540 list_for_each_entry(lobj, head, list) {
541 bo = lobj->bo;
542 if (!bo->pin_count) {
543 - if (lobj->wdomain) {
544 - radeon_ttm_placement_from_domain(bo,
545 - lobj->wdomain);
546 - } else {
547 - radeon_ttm_placement_from_domain(bo,
548 - lobj->rdomain);
549 - }
550 + domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
551 +
552 + retry:
553 + radeon_ttm_placement_from_domain(bo, domain);
554 r = ttm_bo_validate(&bo->tbo, &bo->placement,
555 true, false, false);
556 - if (unlikely(r))
557 + if (unlikely(r)) {
558 + if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
559 + domain |= RADEON_GEM_DOMAIN_GTT;
560 + goto retry;
561 + }
562 return r;
563 + }
564 }
565 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
566 lobj->tiling_flags = bo->tiling_flags;
567 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
568 index ce4ecbe..76c768b 100644
569 --- a/drivers/gpu/drm/radeon/rs690.c
570 +++ b/drivers/gpu/drm/radeon/rs690.c
571 @@ -398,7 +398,9 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
572 struct drm_display_mode *mode1 = NULL;
573 struct rs690_watermark wm0;
574 struct rs690_watermark wm1;
575 - u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt;
576 + u32 tmp;
577 + u32 d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
578 + u32 d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
579 fixed20_12 priority_mark02, priority_mark12, fill_rate;
580 fixed20_12 a, b;
581
582 @@ -495,10 +497,6 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
583 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
584 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
585 }
586 - WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
587 - WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
588 - WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
589 - WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
590 } else if (mode0) {
591 if (dfixed_trunc(wm0.dbpp) > 64)
592 a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
593 @@ -528,13 +526,7 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
594 d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
595 if (rdev->disp_priority == 2)
596 d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
597 - WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
598 - WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
599 - WREG32(R_006D48_D2MODE_PRIORITY_A_CNT,
600 - S_006D48_D2MODE_PRIORITY_A_OFF(1));
601 - WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
602 - S_006D4C_D2MODE_PRIORITY_B_OFF(1));
603 - } else {
604 + } else if (mode1) {
605 if (dfixed_trunc(wm1.dbpp) > 64)
606 a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
607 else
608 @@ -563,13 +555,12 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
609 d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
610 if (rdev->disp_priority == 2)
611 d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
612 - WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
613 - S_006548_D1MODE_PRIORITY_A_OFF(1));
614 - WREG32(R_00654C_D1MODE_PRIORITY_B_CNT,
615 - S_00654C_D1MODE_PRIORITY_B_OFF(1));
616 - WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
617 - WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
618 }
619 +
620 + WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
621 + WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
622 + WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
623 + WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
624 }
625
626 uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
627 diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
628 index 0c9c169..7e4fbdb 100644
629 --- a/drivers/gpu/drm/radeon/rv515.c
630 +++ b/drivers/gpu/drm/radeon/rv515.c
631 @@ -925,7 +925,9 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
632 struct drm_display_mode *mode1 = NULL;
633 struct rv515_watermark wm0;
634 struct rv515_watermark wm1;
635 - u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt;
636 + u32 tmp;
637 + u32 d1mode_priority_a_cnt = MODE_PRIORITY_OFF;
638 + u32 d2mode_priority_a_cnt = MODE_PRIORITY_OFF;
639 fixed20_12 priority_mark02, priority_mark12, fill_rate;
640 fixed20_12 a, b;
641
642 @@ -999,10 +1001,6 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
643 d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
644 d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
645 }
646 - WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
647 - WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
648 - WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
649 - WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
650 } else if (mode0) {
651 if (dfixed_trunc(wm0.dbpp) > 64)
652 a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
653 @@ -1032,11 +1030,7 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
654 d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
655 if (rdev->disp_priority == 2)
656 d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
657 - WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
658 - WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
659 - WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
660 - WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
661 - } else {
662 + } else if (mode1) {
663 if (dfixed_trunc(wm1.dbpp) > 64)
664 a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
665 else
666 @@ -1065,11 +1059,12 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
667 d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
668 if (rdev->disp_priority == 2)
669 d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
670 - WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
671 - WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
672 - WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
673 - WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
674 }
675 +
676 + WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
677 + WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
678 + WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
679 + WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
680 }
681
682 void rv515_bandwidth_update(struct radeon_device *rdev)
683 diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
684 index b7fd820..1d6fb79 100644
685 --- a/drivers/gpu/drm/radeon/rv770.c
686 +++ b/drivers/gpu/drm/radeon/rv770.c
687 @@ -189,7 +189,10 @@ static void rv770_mc_program(struct radeon_device *rdev)
688 WREG32((0x2c20 + j), 0x00000000);
689 WREG32((0x2c24 + j), 0x00000000);
690 }
691 - WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
692 + /* r7xx hw bug. Read from HDP_DEBUG1 rather
693 + * than writing to HDP_REG_COHERENCY_FLUSH_CNTL
694 + */
695 + tmp = RREG32(HDP_DEBUG1);
696
697 rv515_mc_stop(rdev, &save);
698 if (r600_mc_wait_for_idle(rdev)) {
699 diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
700 index 9506f8c..6111a02 100644
701 --- a/drivers/gpu/drm/radeon/rv770d.h
702 +++ b/drivers/gpu/drm/radeon/rv770d.h
703 @@ -128,6 +128,7 @@
704 #define HDP_NONSURFACE_SIZE 0x2C0C
705 #define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
706 #define HDP_TILING_CONFIG 0x2F3C
707 +#define HDP_DEBUG1 0x2F34
708
709 #define MC_SHARED_CHMAP 0x2004
710 #define NOOFCHAN_SHIFT 12
711 diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
712 index 48c84a5..00e5fcac8 100644
713 --- a/drivers/misc/enclosure.c
714 +++ b/drivers/misc/enclosure.c
715 @@ -285,8 +285,11 @@ enclosure_component_register(struct enclosure_device *edev,
716 cdev->groups = enclosure_groups;
717
718 err = device_register(cdev);
719 - if (err)
720 - ERR_PTR(err);
721 + if (err) {
722 + ecomp->number = -1;
723 + put_device(cdev);
724 + return ERR_PTR(err);
725 + }
726
727 return ecomp;
728 }
729 diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
730 index 5d1220d..664ed58 100644
731 --- a/drivers/net/e1000e/hw.h
732 +++ b/drivers/net/e1000e/hw.h
733 @@ -308,7 +308,7 @@ enum e1e_registers {
734 #define E1000_KMRNCTRLSTA_INBAND_PARAM 0x9 /* Kumeran InBand Parameters */
735 #define E1000_KMRNCTRLSTA_DIAG_NELPBK 0x1000 /* Nearend Loopback mode */
736 #define E1000_KMRNCTRLSTA_K1_CONFIG 0x7
737 -#define E1000_KMRNCTRLSTA_K1_ENABLE 0x140E
738 +#define E1000_KMRNCTRLSTA_K1_ENABLE 0x0002
739 #define E1000_KMRNCTRLSTA_K1_DISABLE 0x1400
740
741 #define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
742 diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
743 index 57a7e41..79e38dc 100644
744 --- a/drivers/net/e1000e/netdev.c
745 +++ b/drivers/net/e1000e/netdev.c
746 @@ -3419,13 +3419,18 @@ static int e1000_test_msi(struct e1000_adapter *adapter)
747
748 /* disable SERR in case the MSI write causes a master abort */
749 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
750 - pci_write_config_word(adapter->pdev, PCI_COMMAND,
751 - pci_cmd & ~PCI_COMMAND_SERR);
752 + if (pci_cmd & PCI_COMMAND_SERR)
753 + pci_write_config_word(adapter->pdev, PCI_COMMAND,
754 + pci_cmd & ~PCI_COMMAND_SERR);
755
756 err = e1000_test_msi_interrupt(adapter);
757
758 - /* restore previous setting of command word */
759 - pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
760 + /* re-enable SERR */
761 + if (pci_cmd & PCI_COMMAND_SERR) {
762 + pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
763 + pci_cmd |= PCI_COMMAND_SERR;
764 + pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
765 + }
766
767 /* success ! */
768 if (!err)
769 diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
770 index cea37e0..df2a6d7 100644
771 --- a/drivers/net/igb/igb_main.c
772 +++ b/drivers/net/igb/igb_main.c
773 @@ -630,9 +630,6 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
774 for (; i < adapter->rss_queues; i++)
775 adapter->rx_ring[i]->reg_idx = rbase_offset +
776 Q_IDX_82576(i);
777 - for (; j < adapter->rss_queues; j++)
778 - adapter->tx_ring[j]->reg_idx = rbase_offset +
779 - Q_IDX_82576(j);
780 }
781 case e1000_82575:
782 case e1000_82580:
783 @@ -996,7 +993,10 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
784
785 /* Number of supported queues. */
786 adapter->num_rx_queues = adapter->rss_queues;
787 - adapter->num_tx_queues = adapter->rss_queues;
788 + if (adapter->vfs_allocated_count)
789 + adapter->num_tx_queues = 1;
790 + else
791 + adapter->num_tx_queues = adapter->rss_queues;
792
793 /* start with one vector for every rx queue */
794 numvecs = adapter->num_rx_queues;
795 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
796 index 5fdbb53..dabafb8 100644
797 --- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
798 +++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
799 @@ -239,7 +239,7 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
800 if (qCoff > 15)
801 qCoff = 15;
802 else if (qCoff <= -16)
803 - qCoff = 16;
804 + qCoff = -16;
805
806 ath_print(common, ATH_DBG_CALIBRATE,
807 "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
808 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
809 index 23eb60e..cb4e7da 100644
810 --- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
811 +++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
812 @@ -944,7 +944,7 @@ static u8 ath9k_hw_ar9300_get_num_ant_config(struct ath_hw *ah,
813 return 1;
814 }
815
816 -static u16 ath9k_hw_ar9300_get_eeprom_antenna_cfg(struct ath_hw *ah,
817 +static u32 ath9k_hw_ar9300_get_eeprom_antenna_cfg(struct ath_hw *ah,
818 struct ath9k_channel *chan)
819 {
820 return -EINVAL;
821 diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
822 index 21354c1..5f01a0f 100644
823 --- a/drivers/net/wireless/ath/ath9k/eeprom.h
824 +++ b/drivers/net/wireless/ath/ath9k/eeprom.h
825 @@ -669,7 +669,7 @@ struct eeprom_ops {
826 int (*get_eeprom_ver)(struct ath_hw *hw);
827 int (*get_eeprom_rev)(struct ath_hw *hw);
828 u8 (*get_num_ant_config)(struct ath_hw *hw, enum ieee80211_band band);
829 - u16 (*get_eeprom_antenna_cfg)(struct ath_hw *hw,
830 + u32 (*get_eeprom_antenna_cfg)(struct ath_hw *hw,
831 struct ath9k_channel *chan);
832 void (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan);
833 void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan);
834 diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
835 index 41a77d1..1576bbb 100644
836 --- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
837 +++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
838 @@ -1149,13 +1149,13 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
839 }
840 }
841
842 -static u16 ath9k_hw_4k_get_eeprom_antenna_cfg(struct ath_hw *ah,
843 +static u32 ath9k_hw_4k_get_eeprom_antenna_cfg(struct ath_hw *ah,
844 struct ath9k_channel *chan)
845 {
846 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
847 struct modal_eep_4k_header *pModal = &eep->modalHeader;
848
849 - return pModal->antCtrlCommon & 0xFFFF;
850 + return pModal->antCtrlCommon;
851 }
852
853 static u8 ath9k_hw_4k_get_num_ant_config(struct ath_hw *ah,
854 diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
855 index b471db5..2705eb0 100644
856 --- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
857 +++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
858 @@ -1131,13 +1131,13 @@ static u8 ath9k_hw_AR9287_get_num_ant_config(struct ath_hw *ah,
859 return 1;
860 }
861
862 -static u16 ath9k_hw_AR9287_get_eeprom_antenna_cfg(struct ath_hw *ah,
863 +static u32 ath9k_hw_AR9287_get_eeprom_antenna_cfg(struct ath_hw *ah,
864 struct ath9k_channel *chan)
865 {
866 struct ar9287_eeprom *eep = &ah->eeprom.map9287;
867 struct modal_eep_ar9287_header *pModal = &eep->modalHeader;
868
869 - return pModal->antCtrlCommon & 0xFFFF;
870 + return pModal->antCtrlCommon;
871 }
872
873 static u16 ath9k_hw_AR9287_get_spur_channel(struct ath_hw *ah,
874 diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
875 index 7e1ed78..54ce34e 100644
876 --- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
877 +++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
878 @@ -729,7 +729,7 @@ static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah,
879 vpdTableI[i][sizeCurrVpdTable - 2]);
880 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
881
882 - if (tgtIndex > maxIndex) {
883 + if (tgtIndex >= maxIndex) {
884 while ((ss <= tgtIndex) &&
885 (k < (AR5416_NUM_PDADC_VALUES - 1))) {
886 tmpVal = (int16_t)((vpdTableI[i][sizeCurrVpdTable - 1] +
887 @@ -1437,14 +1437,14 @@ static u8 ath9k_hw_def_get_num_ant_config(struct ath_hw *ah,
888 return num_ant_config;
889 }
890
891 -static u16 ath9k_hw_def_get_eeprom_antenna_cfg(struct ath_hw *ah,
892 +static u32 ath9k_hw_def_get_eeprom_antenna_cfg(struct ath_hw *ah,
893 struct ath9k_channel *chan)
894 {
895 struct ar5416_eeprom_def *eep = &ah->eeprom.def;
896 struct modal_eep_header *pModal =
897 &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
898
899 - return pModal->antCtrlCommon & 0xFFFF;
900 + return pModal->antCtrlCommon;
901 }
902
903 static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
904 diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
905 index c33f17d..2feee1d 100644
906 --- a/drivers/net/wireless/ath/ath9k/hw.c
907 +++ b/drivers/net/wireless/ath/ath9k/hw.c
908 @@ -537,7 +537,8 @@ static int __ath9k_hw_init(struct ath_hw *ah)
909
910 if (ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
911 if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
912 - (AR_SREV_9280(ah) && !ah->is_pciexpress)) {
913 + ((AR_SREV_9160(ah) || AR_SREV_9280(ah)) &&
914 + !ah->is_pciexpress)) {
915 ah->config.serialize_regmode =
916 SER_REG_MODE_ON;
917 } else {
918 @@ -1232,9 +1233,11 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
919
920 if (!ah->chip_fullsleep) {
921 ath9k_hw_abortpcurecv(ah);
922 - if (!ath9k_hw_stopdmarecv(ah))
923 + if (!ath9k_hw_stopdmarecv(ah)) {
924 ath_print(common, ATH_DBG_XMIT,
925 "Failed to stop receive dma\n");
926 + bChannelChange = false;
927 + }
928 }
929
930 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
931 @@ -1265,7 +1268,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
932 macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
933
934 /* For chips on which RTC reset is done, save TSF before it gets cleared */
935 - if (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
936 + if (AR_SREV_9100(ah) ||
937 + (AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)))
938 tsf = ath9k_hw_gettsf64(ah);
939
940 saveLedState = REG_READ(ah, AR_CFG_LED) &
941 @@ -1297,7 +1301,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
942 }
943
944 /* Restore TSF */
945 - if (tsf && AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
946 + if (tsf)
947 ath9k_hw_settsf64(ah, tsf);
948
949 if (AR_SREV_9280_10_OR_LATER(ah))
950 @@ -1307,6 +1311,17 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
951 if (r)
952 return r;
953
954 + /*
955 + * Some AR91xx SoC devices frequently fail to accept TSF writes
956 + * right after the chip reset. When that happens, write a new
957 + * value after the initvals have been applied, with an offset
958 + * based on measured time difference
959 + */
960 + if (AR_SREV_9100(ah) && (ath9k_hw_gettsf64(ah) < tsf)) {
961 + tsf += 1500;
962 + ath9k_hw_settsf64(ah, tsf);
963 + }
964 +
965 /* Setup MFP options for CCMP */
966 if (AR_SREV_9280_20_OR_LATER(ah)) {
967 /* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt
968 diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
969 index 859aa4a..d8dd503 100644
970 --- a/drivers/net/wireless/ath/ath9k/xmit.c
971 +++ b/drivers/net/wireless/ath/ath9k/xmit.c
972 @@ -328,6 +328,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
973 u32 ba[WME_BA_BMP_SIZE >> 5];
974 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
975 bool rc_update = true;
976 + struct ieee80211_tx_rate rates[4];
977
978 skb = bf->bf_mpdu;
979 hdr = (struct ieee80211_hdr *)skb->data;
980 @@ -335,12 +336,30 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
981 tx_info = IEEE80211_SKB_CB(skb);
982 hw = bf->aphy->hw;
983
984 + memcpy(rates, tx_info->control.rates, sizeof(rates));
985 +
986 rcu_read_lock();
987
988 /* XXX: use ieee80211_find_sta! */
989 sta = ieee80211_find_sta_by_hw(hw, hdr->addr1);
990 if (!sta) {
991 rcu_read_unlock();
992 +
993 + INIT_LIST_HEAD(&bf_head);
994 + while (bf) {
995 + bf_next = bf->bf_next;
996 +
997 + bf->bf_state.bf_type |= BUF_XRETRY;
998 + if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) ||
999 + !bf->bf_stale || bf_next != NULL)
1000 + list_move_tail(&bf->list, &bf_head);
1001 +
1002 + ath_tx_rc_status(bf, ts, 0, 0, false);
1003 + ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
1004 + 0, 0);
1005 +
1006 + bf = bf_next;
1007 + }
1008 return;
1009 }
1010
1011 @@ -375,6 +394,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
1012 txfail = txpending = 0;
1013 bf_next = bf->bf_next;
1014
1015 + skb = bf->bf_mpdu;
1016 + tx_info = IEEE80211_SKB_CB(skb);
1017 +
1018 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
1019 /* transmit completion, subframe is
1020 * acked by block ack */
1021 @@ -428,6 +450,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
1022 spin_unlock_bh(&txq->axq_lock);
1023
1024 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
1025 + memcpy(tx_info->control.rates, rates, sizeof(rates));
1026 ath_tx_rc_status(bf, ts, nbad, txok, true);
1027 rc_update = false;
1028 } else {
1029 @@ -487,6 +510,14 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
1030 bf = bf_next;
1031 }
1032
1033 + /* prepend un-acked frames to the beginning of the pending frame queue */
1034 + if (!list_empty(&bf_pending)) {
1035 + spin_lock_bh(&txq->axq_lock);
1036 + list_splice(&bf_pending, &tid->buf_q);
1037 + ath_tx_queue_tid(txq, tid);
1038 + spin_unlock_bh(&txq->axq_lock);
1039 + }
1040 +
1041 if (tid->state & AGGR_CLEANUP) {
1042 if (tid->baw_head == tid->baw_tail) {
1043 tid->state &= ~AGGR_ADDBA_COMPLETE;
1044 @@ -499,14 +530,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
1045 return;
1046 }
1047
1048 - /* prepend un-acked frames to the beginning of the pending frame queue */
1049 - if (!list_empty(&bf_pending)) {
1050 - spin_lock_bh(&txq->axq_lock);
1051 - list_splice(&bf_pending, &tid->buf_q);
1052 - ath_tx_queue_tid(txq, tid);
1053 - spin_unlock_bh(&txq->axq_lock);
1054 - }
1055 -
1056 rcu_read_unlock();
1057
1058 if (needreset)
1059 @@ -2050,7 +2073,7 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
1060 tx_info->status.rates[i].idx = -1;
1061 }
1062
1063 - tx_info->status.rates[tx_rateindex].count = bf->bf_retries + 1;
1064 + tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
1065 }
1066
1067 static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
1068 @@ -2161,7 +2184,6 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1069 * This frame is sent out as a single frame.
1070 * Use hardware retry status for this frame.
1071 */
1072 - bf->bf_retries = ts.ts_longretry;
1073 if (ts.ts_status & ATH9K_TXERR_XRETRY)
1074 bf->bf_state.bf_type |= BUF_XRETRY;
1075 ath_tx_rc_status(bf, &ts, 0, txok, true);
1076 @@ -2280,7 +2302,6 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
1077 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
1078
1079 if (!bf_isampdu(bf)) {
1080 - bf->bf_retries = txs.ts_longretry;
1081 if (txs.ts_status & ATH9K_TXERR_XRETRY)
1082 bf->bf_state.bf_type |= BUF_XRETRY;
1083 ath_tx_rc_status(bf, &txs, 0, txok, true);
1084 @@ -2449,37 +2470,37 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
1085
1086 void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
1087 {
1088 - int i;
1089 - struct ath_atx_ac *ac, *ac_tmp;
1090 - struct ath_atx_tid *tid, *tid_tmp;
1091 + struct ath_atx_ac *ac;
1092 + struct ath_atx_tid *tid;
1093 struct ath_txq *txq;
1094 + int i, tidno;
1095
1096 - for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1097 - if (ATH_TXQ_SETUP(sc, i)) {
1098 - txq = &sc->tx.txq[i];
1099 + for (tidno = 0, tid = &an->tid[tidno];
1100 + tidno < WME_NUM_TID; tidno++, tid++) {
1101 + i = tid->ac->qnum;
1102
1103 - spin_lock_bh(&txq->axq_lock);
1104 + if (!ATH_TXQ_SETUP(sc, i))
1105 + continue;
1106
1107 - list_for_each_entry_safe(ac,
1108 - ac_tmp, &txq->axq_acq, list) {
1109 - tid = list_first_entry(&ac->tid_q,
1110 - struct ath_atx_tid, list);
1111 - if (tid && tid->an != an)
1112 - continue;
1113 - list_del(&ac->list);
1114 - ac->sched = false;
1115 -
1116 - list_for_each_entry_safe(tid,
1117 - tid_tmp, &ac->tid_q, list) {
1118 - list_del(&tid->list);
1119 - tid->sched = false;
1120 - ath_tid_drain(sc, txq, tid);
1121 - tid->state &= ~AGGR_ADDBA_COMPLETE;
1122 - tid->state &= ~AGGR_CLEANUP;
1123 - }
1124 - }
1125 + txq = &sc->tx.txq[i];
1126 + ac = tid->ac;
1127
1128 - spin_unlock_bh(&txq->axq_lock);
1129 + spin_lock_bh(&txq->axq_lock);
1130 +
1131 + if (tid->sched) {
1132 + list_del(&tid->list);
1133 + tid->sched = false;
1134 + }
1135 +
1136 + if (ac->sched) {
1137 + list_del(&ac->list);
1138 + tid->ac->sched = false;
1139 }
1140 +
1141 + ath_tid_drain(sc, txq, tid);
1142 + tid->state &= ~AGGR_ADDBA_COMPLETE;
1143 + tid->state &= ~AGGR_CLEANUP;
1144 +
1145 + spin_unlock_bh(&txq->axq_lock);
1146 }
1147 }
1148 diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
1149 index 386c5f9..e1af9fd 100644
1150 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c
1151 +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
1152 @@ -420,11 +420,10 @@ void iwl_bg_scan_check(struct work_struct *data)
1153 return;
1154
1155 mutex_lock(&priv->mutex);
1156 - if (test_bit(STATUS_SCANNING, &priv->status) ||
1157 - test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
1158 - IWL_DEBUG_SCAN(priv, "Scan completion watchdog resetting "
1159 - "adapter (%dms)\n",
1160 - jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
1161 + if (test_bit(STATUS_SCANNING, &priv->status) &&
1162 + !test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
1163 + IWL_DEBUG_SCAN(priv, "Scan completion watchdog (%dms)\n",
1164 + jiffies_to_msecs(IWL_SCAN_CHECK_WATCHDOG));
1165
1166 if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
1167 iwl_send_scan_abort(priv);
1168 @@ -489,12 +488,11 @@ void iwl_bg_abort_scan(struct work_struct *work)
1169 !test_bit(STATUS_GEO_CONFIGURED, &priv->status))
1170 return;
1171
1172 - mutex_lock(&priv->mutex);
1173 -
1174 - cancel_delayed_work_sync(&priv->scan_check);
1175 - set_bit(STATUS_SCAN_ABORTING, &priv->status);
1176 - iwl_send_scan_abort(priv);
1177 + cancel_delayed_work(&priv->scan_check);
1178
1179 + mutex_lock(&priv->mutex);
1180 + if (test_bit(STATUS_SCAN_ABORTING, &priv->status))
1181 + iwl_send_scan_abort(priv);
1182 mutex_unlock(&priv->mutex);
1183 }
1184 EXPORT_SYMBOL(iwl_bg_abort_scan);
1185 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
1186 index d504e2b..b50fedc 100644
1187 --- a/drivers/net/xen-netfront.c
1188 +++ b/drivers/net/xen-netfront.c
1189 @@ -1621,6 +1621,7 @@ static void backend_changed(struct xenbus_device *dev,
1190 if (xennet_connect(netdev) != 0)
1191 break;
1192 xenbus_switch_state(dev, XenbusStateConnected);
1193 + netif_notify_peers(netdev);
1194 break;
1195
1196 case XenbusStateClosing:
1197 diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
1198 index 188bc84..d02be78 100644
1199 --- a/drivers/parisc/led.c
1200 +++ b/drivers/parisc/led.c
1201 @@ -176,16 +176,18 @@ static ssize_t led_proc_write(struct file *file, const char *buf,
1202 size_t count, loff_t *pos)
1203 {
1204 void *data = PDE(file->f_path.dentry->d_inode)->data;
1205 - char *cur, lbuf[count + 1];
1206 + char *cur, lbuf[32];
1207 int d;
1208
1209 if (!capable(CAP_SYS_ADMIN))
1210 return -EACCES;
1211
1212 - memset(lbuf, 0, count + 1);
1213 + if (count >= sizeof(lbuf))
1214 + count = sizeof(lbuf)-1;
1215
1216 if (copy_from_user(lbuf, buf, count))
1217 return -EFAULT;
1218 + lbuf[count] = 0;
1219
1220 cur = lbuf;
1221
1222 diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
1223 index 59ae76b..d9c7e54 100644
1224 --- a/drivers/ssb/driver_chipcommon.c
1225 +++ b/drivers/ssb/driver_chipcommon.c
1226 @@ -235,6 +235,7 @@ void ssb_chipcommon_init(struct ssb_chipcommon *cc)
1227 return; /* We don't have a ChipCommon */
1228 if (cc->dev->id.revision >= 11)
1229 cc->status = chipco_read32(cc, SSB_CHIPCO_CHIPSTAT);
1230 + ssb_dprintk(KERN_INFO PFX "chipcommon status is 0x%x\n", cc->status);
1231 ssb_pmu_init(cc);
1232 chipco_powercontrol_init(cc);
1233 ssb_chipco_set_clockmode(cc, SSB_CLKMODE_FAST);
1234 diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
1235 index 6dcda86..6e88d2b 100644
1236 --- a/drivers/ssb/pci.c
1237 +++ b/drivers/ssb/pci.c
1238 @@ -626,11 +626,22 @@ static int ssb_pci_sprom_get(struct ssb_bus *bus,
1239 return -ENODEV;
1240 }
1241 if (bus->chipco.dev) { /* can be unavailible! */
1242 - bus->sprom_offset = (bus->chipco.dev->id.revision < 31) ?
1243 - SSB_SPROM_BASE1 : SSB_SPROM_BASE31;
1244 + /*
1245 + * get SPROM offset: SSB_SPROM_BASE1 except for
1246 + * chipcommon rev >= 31 or chip ID is 0x4312 and
1247 + * chipcommon status & 3 == 2
1248 + */
1249 + if (bus->chipco.dev->id.revision >= 31)
1250 + bus->sprom_offset = SSB_SPROM_BASE31;
1251 + else if (bus->chip_id == 0x4312 &&
1252 + (bus->chipco.status & 0x03) == 2)
1253 + bus->sprom_offset = SSB_SPROM_BASE31;
1254 + else
1255 + bus->sprom_offset = SSB_SPROM_BASE1;
1256 } else {
1257 bus->sprom_offset = SSB_SPROM_BASE1;
1258 }
1259 + ssb_dprintk(KERN_INFO PFX "SPROM offset is 0x%x\n", bus->sprom_offset);
1260
1261 buf = kcalloc(SSB_SPROMSIZE_WORDS_R123, sizeof(u16), GFP_KERNEL);
1262 if (!buf)
1263 diff --git a/fs/block_dev.c b/fs/block_dev.c
1264 index 99d6af8..b3171fb 100644
1265 --- a/fs/block_dev.c
1266 +++ b/fs/block_dev.c
1267 @@ -681,8 +681,8 @@ retry:
1268 if (!bd_may_claim(bdev, whole, holder))
1269 return -EBUSY;
1270
1271 - /* if someone else is claiming, wait for it to finish */
1272 - if (whole->bd_claiming && whole->bd_claiming != holder) {
1273 + /* if claiming is already in progress, wait for it to finish */
1274 + if (whole->bd_claiming) {
1275 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
1276 DEFINE_WAIT(wait);
1277
1278 diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
1279 index 2d428b0..3a9940e 100644
1280 --- a/include/drm/drm_pciids.h
1281 +++ b/include/drm/drm_pciids.h
1282 @@ -146,6 +146,8 @@
1283 {0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
1284 {0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
1285 {0x1002, 0x688A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
1286 + {0x1002, 0x688C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
1287 + {0x1002, 0x688D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
1288 {0x1002, 0x6898, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
1289 {0x1002, 0x6899, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
1290 {0x1002, 0x689c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \
1291 @@ -161,6 +163,7 @@
1292 {0x1002, 0x68be, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
1293 {0x1002, 0x68c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
1294 {0x1002, 0x68c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
1295 + {0x1002, 0x68c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
1296 {0x1002, 0x68c8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
1297 {0x1002, 0x68c9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
1298 {0x1002, 0x68d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_NEW_MEMMAP}, \
1299 @@ -174,6 +177,7 @@
1300 {0x1002, 0x68e8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
1301 {0x1002, 0x68e9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
1302 {0x1002, 0x68f1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
1303 + {0x1002, 0x68f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
1304 {0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
1305 {0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
1306 {0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
1307 @@ -314,6 +318,7 @@
1308 {0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
1309 {0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
1310 {0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
1311 + {0x1002, 0x945E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
1312 {0x1002, 0x9460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
1313 {0x1002, 0x9462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
1314 {0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
1315 @@ -324,6 +329,7 @@
1316 {0x1002, 0x9487, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
1317 {0x1002, 0x9488, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
1318 {0x1002, 0x9489, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
1319 + {0x1002, 0x948A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
1320 {0x1002, 0x948F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
1321 {0x1002, 0x9490, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_NEW_MEMMAP}, \
1322 {0x1002, 0x9491, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV730|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
1323 @@ -366,6 +372,7 @@
1324 {0x1002, 0x9553, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
1325 {0x1002, 0x9555, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
1326 {0x1002, 0x9557, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
1327 + {0x1002, 0x955f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV710|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
1328 {0x1002, 0x9580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_NEW_MEMMAP}, \
1329 {0x1002, 0x9581, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
1330 {0x1002, 0x9583, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV630|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
1331 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
1332 index b21e405..142bf18 100644
1333 --- a/include/linux/netdevice.h
1334 +++ b/include/linux/netdevice.h
1335 @@ -1775,6 +1775,8 @@ extern void netif_carrier_on(struct net_device *dev);
1336
1337 extern void netif_carrier_off(struct net_device *dev);
1338
1339 +extern void netif_notify_peers(struct net_device *dev);
1340 +
1341 /**
1342 * netif_dormant_on - mark device as dormant.
1343 * @dev: network device
1344 diff --git a/include/linux/notifier.h b/include/linux/notifier.h
1345 index 540703b..22c2abb 100644
1346 --- a/include/linux/notifier.h
1347 +++ b/include/linux/notifier.h
1348 @@ -210,6 +210,7 @@ static inline int notifier_to_errno(int ret)
1349 #define NETDEV_POST_INIT 0x0010
1350 #define NETDEV_UNREGISTER_BATCH 0x0011
1351 #define NETDEV_BONDING_DESLAVE 0x0012
1352 +#define NETDEV_NOTIFY_PEERS 0x0012
1353
1354 #define SYS_DOWN 0x0001 /* Notify of system down */
1355 #define SYS_RESTART SYS_DOWN
1356 diff --git a/kernel/signal.c b/kernel/signal.c
1357 index 906ae5a..bded651 100644
1358 --- a/kernel/signal.c
1359 +++ b/kernel/signal.c
1360 @@ -637,7 +637,7 @@ static inline bool si_fromuser(const struct siginfo *info)
1361
1362 /*
1363 * Bad permissions for sending the signal
1364 - * - the caller must hold at least the RCU read lock
1365 + * - the caller must hold the RCU read lock
1366 */
1367 static int check_kill_permission(int sig, struct siginfo *info,
1368 struct task_struct *t)
1369 @@ -1127,11 +1127,14 @@ struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long
1370
1371 /*
1372 * send signal info to all the members of a group
1373 - * - the caller must hold the RCU read lock at least
1374 */
1375 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1376 {
1377 - int ret = check_kill_permission(sig, info, p);
1378 + int ret;
1379 +
1380 + rcu_read_lock();
1381 + ret = check_kill_permission(sig, info, p);
1382 + rcu_read_unlock();
1383
1384 if (!ret && sig)
1385 ret = do_send_sig_info(sig, info, p, true);
1386 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
1387 index 98ce9bc..c85109d 100644
1388 --- a/net/9p/trans_fd.c
1389 +++ b/net/9p/trans_fd.c
1390 @@ -948,7 +948,7 @@ p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
1391
1392 csocket = NULL;
1393
1394 - if (strlen(addr) > UNIX_PATH_MAX) {
1395 + if (strlen(addr) >= UNIX_PATH_MAX) {
1396 P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n",
1397 addr);
1398 return -ENAMETOOLONG;
1399 diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
1400 index 382bc76..da14c49 100644
1401 --- a/net/ipv4/devinet.c
1402 +++ b/net/ipv4/devinet.c
1403 @@ -1081,6 +1081,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1404 }
1405 ip_mc_up(in_dev);
1406 /* fall through */
1407 + case NETDEV_NOTIFY_PEERS:
1408 case NETDEV_CHANGEADDR:
1409 /* Send gratuitous ARP to notify of link change */
1410 if (IN_DEV_ARP_NOTIFY(in_dev)) {
1411 diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
1412 index 3cd5f7b..ea13a80 100644
1413 --- a/net/mac80211/mesh_plink.c
1414 +++ b/net/mac80211/mesh_plink.c
1415 @@ -65,7 +65,6 @@ void mesh_plink_inc_estab_count(struct ieee80211_sub_if_data *sdata)
1416 {
1417 atomic_inc(&sdata->u.mesh.mshstats.estab_plinks);
1418 mesh_accept_plinks_update(sdata);
1419 - ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
1420 }
1421
1422 static inline
1423 @@ -73,7 +72,6 @@ void mesh_plink_dec_estab_count(struct ieee80211_sub_if_data *sdata)
1424 {
1425 atomic_dec(&sdata->u.mesh.mshstats.estab_plinks);
1426 mesh_accept_plinks_update(sdata);
1427 - ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
1428 }
1429
1430 /**
1431 @@ -115,7 +113,7 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
1432 }
1433
1434 /**
1435 - * mesh_plink_deactivate - deactivate mesh peer link
1436 + * __mesh_plink_deactivate - deactivate mesh peer link
1437 *
1438 * @sta: mesh peer link to deactivate
1439 *
1440 @@ -123,18 +121,23 @@ static struct sta_info *mesh_plink_alloc(struct ieee80211_sub_if_data *sdata,
1441 *
1442 * Locking: the caller must hold sta->lock
1443 */
1444 -static void __mesh_plink_deactivate(struct sta_info *sta)
1445 +static bool __mesh_plink_deactivate(struct sta_info *sta)
1446 {
1447 struct ieee80211_sub_if_data *sdata = sta->sdata;
1448 + bool deactivated = false;
1449
1450 - if (sta->plink_state == PLINK_ESTAB)
1451 + if (sta->plink_state == PLINK_ESTAB) {
1452 mesh_plink_dec_estab_count(sdata);
1453 + deactivated = true;
1454 + }
1455 sta->plink_state = PLINK_BLOCKED;
1456 mesh_path_flush_by_nexthop(sta);
1457 +
1458 + return deactivated;
1459 }
1460
1461 /**
1462 - * __mesh_plink_deactivate - deactivate mesh peer link
1463 + * mesh_plink_deactivate - deactivate mesh peer link
1464 *
1465 * @sta: mesh peer link to deactivate
1466 *
1467 @@ -142,9 +145,15 @@ static void __mesh_plink_deactivate(struct sta_info *sta)
1468 */
1469 void mesh_plink_deactivate(struct sta_info *sta)
1470 {
1471 + struct ieee80211_sub_if_data *sdata = sta->sdata;
1472 + bool deactivated;
1473 +
1474 spin_lock_bh(&sta->lock);
1475 - __mesh_plink_deactivate(sta);
1476 + deactivated = __mesh_plink_deactivate(sta);
1477 spin_unlock_bh(&sta->lock);
1478 +
1479 + if (deactivated)
1480 + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
1481 }
1482
1483 static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
1484 @@ -381,10 +390,16 @@ int mesh_plink_open(struct sta_info *sta)
1485
1486 void mesh_plink_block(struct sta_info *sta)
1487 {
1488 + struct ieee80211_sub_if_data *sdata = sta->sdata;
1489 + bool deactivated;
1490 +
1491 spin_lock_bh(&sta->lock);
1492 - __mesh_plink_deactivate(sta);
1493 + deactivated = __mesh_plink_deactivate(sta);
1494 sta->plink_state = PLINK_BLOCKED;
1495 spin_unlock_bh(&sta->lock);
1496 +
1497 + if (deactivated)
1498 + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
1499 }
1500
1501
1502 @@ -397,6 +412,7 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
1503 enum plink_event event;
1504 enum plink_frame_type ftype;
1505 size_t baselen;
1506 + bool deactivated;
1507 u8 ie_len;
1508 u8 *baseaddr;
1509 __le16 plid, llid, reason;
1510 @@ -651,8 +667,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
1511 case CNF_ACPT:
1512 del_timer(&sta->plink_timer);
1513 sta->plink_state = PLINK_ESTAB;
1514 - mesh_plink_inc_estab_count(sdata);
1515 spin_unlock_bh(&sta->lock);
1516 + mesh_plink_inc_estab_count(sdata);
1517 + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
1518 mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
1519 sta->sta.addr);
1520 break;
1521 @@ -684,8 +701,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
1522 case OPN_ACPT:
1523 del_timer(&sta->plink_timer);
1524 sta->plink_state = PLINK_ESTAB;
1525 - mesh_plink_inc_estab_count(sdata);
1526 spin_unlock_bh(&sta->lock);
1527 + mesh_plink_inc_estab_count(sdata);
1528 + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
1529 mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
1530 sta->sta.addr);
1531 mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
1532 @@ -702,11 +720,13 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_m
1533 case CLS_ACPT:
1534 reason = cpu_to_le16(MESH_CLOSE_RCVD);
1535 sta->reason = reason;
1536 - __mesh_plink_deactivate(sta);
1537 + deactivated = __mesh_plink_deactivate(sta);
1538 sta->plink_state = PLINK_HOLDING;
1539 llid = sta->llid;
1540 mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
1541 spin_unlock_bh(&sta->lock);
1542 + if (deactivated)
1543 + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
1544 mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
1545 plid, reason);
1546 break;
1547 diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
1548 index a63029e..bd1892f 100644
1549 --- a/net/sched/sch_generic.c
1550 +++ b/net/sched/sch_generic.c
1551 @@ -327,6 +327,24 @@ void netif_carrier_off(struct net_device *dev)
1552 }
1553 EXPORT_SYMBOL(netif_carrier_off);
1554
1555 +/**
1556 + * netif_notify_peers - notify network peers about existence of @dev
1557 + * @dev: network device
1558 + *
1559 + * Generate traffic such that interested network peers are aware of
1560 + * @dev, such as by generating a gratuitous ARP. This may be used when
1561 + * a device wants to inform the rest of the network about some sort of
1562 + * reconfiguration such as a failover event or virtual machine
1563 + * migration.
1564 + */
1565 +void netif_notify_peers(struct net_device *dev)
1566 +{
1567 + rtnl_lock();
1568 + call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1569 + rtnl_unlock();
1570 +}
1571 +EXPORT_SYMBOL(netif_notify_peers);
1572 +
1573 /* "NOOP" scheduler: the best scheduler, recommended for all interfaces
1574 under all circumstances. It is difficult to invent anything faster or
1575 cheaper.
1576 diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
1577 index 48ead6f..ef17fcf 100644
1578 --- a/net/wireless/mlme.c
1579 +++ b/net/wireless/mlme.c
1580 @@ -44,10 +44,10 @@ void cfg80211_send_rx_auth(struct net_device *dev, const u8 *buf, size_t len)
1581 }
1582 }
1583
1584 - WARN_ON(!done);
1585 -
1586 - nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL);
1587 - cfg80211_sme_rx_auth(dev, buf, len);
1588 + if (done) {
1589 + nl80211_send_rx_auth(rdev, dev, buf, len, GFP_KERNEL);
1590 + cfg80211_sme_rx_auth(dev, buf, len);
1591 + }
1592
1593 wdev_unlock(wdev);
1594 }
1595 diff --git a/net/wireless/scan.c b/net/wireless/scan.c
1596 index 58401d2..5ca8c71 100644
1597 --- a/net/wireless/scan.c
1598 +++ b/net/wireless/scan.c
1599 @@ -275,6 +275,7 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
1600 {
1601 struct cfg80211_registered_device *dev = wiphy_to_dev(wiphy);
1602 struct cfg80211_internal_bss *bss, *res = NULL;
1603 + unsigned long now = jiffies;
1604
1605 spin_lock_bh(&dev->bss_lock);
1606
1607 @@ -283,6 +284,10 @@ struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
1608 continue;
1609 if (channel && bss->pub.channel != channel)
1610 continue;
1611 + /* Don't get expired BSS structs */
1612 + if (time_after(now, bss->ts + IEEE80211_SCAN_RESULT_EXPIRE) &&
1613 + !atomic_read(&bss->hold))
1614 + continue;
1615 if (is_bss(&bss->pub, bssid, ssid, ssid_len)) {
1616 res = bss;
1617 kref_get(&res->ref);