Magellan Linux

Contents of /trunk/kernel-magellan/patches-3.11/0102-3.11.3-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2295 - (show annotations) (download)
Mon Oct 7 12:07:03 2013 UTC (10 years, 6 months ago) by niro
File size: 118131 byte(s)
-linux-3.11.3
1 diff --git a/Makefile b/Makefile
2 index aede319..4f91b99 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,6 +1,6 @@
6 VERSION = 3
7 PATCHLEVEL = 11
8 -SUBLEVEL = 2
9 +SUBLEVEL = 3
10 EXTRAVERSION =
11 NAME = Linux for Workgroups
12
13 diff --git a/arch/arm/mach-omap2/cclock44xx_data.c b/arch/arm/mach-omap2/cclock44xx_data.c
14 index 88e37a4..7adf7f1 100644
15 --- a/arch/arm/mach-omap2/cclock44xx_data.c
16 +++ b/arch/arm/mach-omap2/cclock44xx_data.c
17 @@ -1632,7 +1632,7 @@ static struct omap_clk omap44xx_clks[] = {
18 CLK(NULL, "auxclk5_src_ck", &auxclk5_src_ck),
19 CLK(NULL, "auxclk5_ck", &auxclk5_ck),
20 CLK(NULL, "auxclkreq5_ck", &auxclkreq5_ck),
21 - CLK("omap-gpmc", "fck", &dummy_ck),
22 + CLK("50000000.gpmc", "fck", &dummy_ck),
23 CLK("omap_i2c.1", "ick", &dummy_ck),
24 CLK("omap_i2c.2", "ick", &dummy_ck),
25 CLK("omap_i2c.3", "ick", &dummy_ck),
26 diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
27 index d5bbdcf..c410752 100644
28 --- a/block/cfq-iosched.c
29 +++ b/block/cfq-iosched.c
30 @@ -1803,7 +1803,7 @@ static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
31
32 if (samples) {
33 v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
34 - do_div(v, samples);
35 + v = div64_u64(v, samples);
36 }
37 __blkg_prfill_u64(sf, pd, v);
38 return 0;
39 diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
40 index 622d4ae..daf95fc 100644
41 --- a/drivers/gpu/drm/ast/ast_drv.h
42 +++ b/drivers/gpu/drm/ast/ast_drv.h
43 @@ -177,7 +177,7 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast,
44
45 static inline void ast_open_key(struct ast_private *ast)
46 {
47 - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xA1, 0xFF, 0x04);
48 + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x80, 0xA8);
49 }
50
51 #define AST_VIDMEM_SIZE_8M 0x00800000
52 diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
53 index fc83bb9..877b892 100644
54 --- a/drivers/gpu/drm/drm_crtc.c
55 +++ b/drivers/gpu/drm/drm_crtc.c
56 @@ -2604,10 +2604,22 @@ int drm_mode_getfb(struct drm_device *dev,
57 r->depth = fb->depth;
58 r->bpp = fb->bits_per_pixel;
59 r->pitch = fb->pitches[0];
60 - if (fb->funcs->create_handle)
61 - ret = fb->funcs->create_handle(fb, file_priv, &r->handle);
62 - else
63 + if (fb->funcs->create_handle) {
64 + if (file_priv->is_master || capable(CAP_SYS_ADMIN)) {
65 + ret = fb->funcs->create_handle(fb, file_priv,
66 + &r->handle);
67 + } else {
68 + /* GET_FB() is an unprivileged ioctl so we must not
69 + * return a buffer-handle to non-master processes! For
70 + * backwards-compatibility reasons, we cannot make
71 + * GET_FB() privileged, so just return an invalid handle
72 + * for non-masters. */
73 + r->handle = 0;
74 + ret = 0;
75 + }
76 + } else {
77 ret = -ENODEV;
78 + }
79
80 drm_framebuffer_unreference(fb);
81
82 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
83 index 1929bff..2f09e80 100644
84 --- a/drivers/gpu/drm/i915/i915_drv.h
85 +++ b/drivers/gpu/drm/i915/i915_drv.h
86 @@ -1091,6 +1091,13 @@ typedef struct drm_i915_private {
87
88 unsigned int fsb_freq, mem_freq, is_ddr3;
89
90 + /**
91 + * wq - Driver workqueue for GEM.
92 + *
93 + * NOTE: Work items scheduled here are not allowed to grab any modeset
94 + * locks, for otherwise the flushing done in the pageflip code will
95 + * result in deadlocks.
96 + */
97 struct workqueue_struct *wq;
98
99 /* Display functions */
100 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
101 index 3d92a7c..46d46ba 100644
102 --- a/drivers/gpu/drm/i915/i915_irq.c
103 +++ b/drivers/gpu/drm/i915/i915_irq.c
104 @@ -910,8 +910,13 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
105 dev_priv->display.hpd_irq_setup(dev);
106 spin_unlock(&dev_priv->irq_lock);
107
108 - queue_work(dev_priv->wq,
109 - &dev_priv->hotplug_work);
110 + /*
111 + * Our hotplug handler can grab modeset locks (by calling down into the
112 + * fb helpers). Hence it must not be run on our own dev-priv->wq work
113 + * queue for otherwise the flush_work in the pageflip code will
114 + * deadlock.
115 + */
116 + schedule_work(&dev_priv->hotplug_work);
117 }
118
119 static void gmbus_irq_handler(struct drm_device *dev)
120 @@ -1402,6 +1407,34 @@ done:
121 return ret;
122 }
123
124 +static void i915_error_wake_up(struct drm_i915_private *dev_priv,
125 + bool reset_completed)
126 +{
127 + struct intel_ring_buffer *ring;
128 + int i;
129 +
130 + /*
131 + * Notify all waiters for GPU completion events that reset state has
132 + * been changed, and that they need to restart their wait after
133 + * checking for potential errors (and bail out to drop locks if there is
134 + * a gpu reset pending so that i915_error_work_func can acquire them).
135 + */
136 +
137 + /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
138 + for_each_ring(ring, dev_priv, i)
139 + wake_up_all(&ring->irq_queue);
140 +
141 + /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
142 + wake_up_all(&dev_priv->pending_flip_queue);
143 +
144 + /*
145 + * Signal tasks blocked in i915_gem_wait_for_error that the pending
146 + * reset state is cleared.
147 + */
148 + if (reset_completed)
149 + wake_up_all(&dev_priv->gpu_error.reset_queue);
150 +}
151 +
152 /**
153 * i915_error_work_func - do process context error handling work
154 * @work: work struct
155 @@ -1416,11 +1449,10 @@ static void i915_error_work_func(struct work_struct *work)
156 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
157 gpu_error);
158 struct drm_device *dev = dev_priv->dev;
159 - struct intel_ring_buffer *ring;
160 char *error_event[] = { "ERROR=1", NULL };
161 char *reset_event[] = { "RESET=1", NULL };
162 char *reset_done_event[] = { "ERROR=0", NULL };
163 - int i, ret;
164 + int ret;
165
166 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
167
168 @@ -1439,8 +1471,16 @@ static void i915_error_work_func(struct work_struct *work)
169 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
170 reset_event);
171
172 + /*
173 + * All state reset _must_ be completed before we update the
174 + * reset counter, for otherwise waiters might miss the reset
175 + * pending state and not properly drop locks, resulting in
176 + * deadlocks with the reset work.
177 + */
178 ret = i915_reset(dev);
179
180 + intel_display_handle_reset(dev);
181 +
182 if (ret == 0) {
183 /*
184 * After all the gem state is reset, increment the reset
185 @@ -1461,12 +1501,11 @@ static void i915_error_work_func(struct work_struct *work)
186 atomic_set(&error->reset_counter, I915_WEDGED);
187 }
188
189 - for_each_ring(ring, dev_priv, i)
190 - wake_up_all(&ring->irq_queue);
191 -
192 - intel_display_handle_reset(dev);
193 -
194 - wake_up_all(&dev_priv->gpu_error.reset_queue);
195 + /*
196 + * Note: The wake_up also serves as a memory barrier so that
197 + * waiters see the update value of the reset counter atomic_t.
198 + */
199 + i915_error_wake_up(dev_priv, true);
200 }
201 }
202
203 @@ -2104,8 +2143,6 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
204 void i915_handle_error(struct drm_device *dev, bool wedged)
205 {
206 struct drm_i915_private *dev_priv = dev->dev_private;
207 - struct intel_ring_buffer *ring;
208 - int i;
209
210 i915_capture_error_state(dev);
211 i915_report_and_clear_eir(dev);
212 @@ -2115,14 +2152,28 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
213 &dev_priv->gpu_error.reset_counter);
214
215 /*
216 - * Wakeup waiting processes so that the reset work item
217 - * doesn't deadlock trying to grab various locks.
218 + * Wakeup waiting processes so that the reset work function
219 + * i915_error_work_func doesn't deadlock trying to grab various
220 + * locks. By bumping the reset counter first, the woken
221 + * processes will see a reset in progress and back off,
222 + * releasing their locks and then wait for the reset completion.
223 + * We must do this for _all_ gpu waiters that might hold locks
224 + * that the reset work needs to acquire.
225 + *
226 + * Note: The wake_up serves as the required memory barrier to
227 + * ensure that the waiters see the updated value of the reset
228 + * counter atomic_t.
229 */
230 - for_each_ring(ring, dev_priv, i)
231 - wake_up_all(&ring->irq_queue);
232 + i915_error_wake_up(dev_priv, false);
233 }
234
235 - queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
236 + /*
237 + * Our reset work can grab modeset locks (since it needs to reset the
238 + * state of outstanding pagelips). Hence it must not be run on our own
239 + * dev-priv->wq work queue for otherwise the flush_work in the pageflip
240 + * code will deadlock.
241 + */
242 + schedule_work(&dev_priv->gpu_error.work);
243 }
244
245 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
246 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
247 index ca40d1b..bedf15a 100644
248 --- a/drivers/gpu/drm/i915/intel_display.c
249 +++ b/drivers/gpu/drm/i915/intel_display.c
250 @@ -4837,9 +4837,6 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
251 return -EINVAL;
252 }
253
254 - /* Ensure that the cursor is valid for the new mode before changing... */
255 - intel_crtc_update_cursor(crtc, true);
256 -
257 if (is_lvds && dev_priv->lvds_downclock_avail) {
258 /*
259 * Ensure we match the reduced clock's P to the target clock.
260 @@ -5688,9 +5685,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
261 intel_crtc->config.dpll.p2 = clock.p2;
262 }
263
264 - /* Ensure that the cursor is valid for the new mode before changing... */
265 - intel_crtc_update_cursor(crtc, true);
266 -
267 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
268 if (intel_crtc->config.has_pch_encoder) {
269 fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll);
270 @@ -5897,9 +5891,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
271 if (!intel_ddi_pll_mode_set(crtc))
272 return -EINVAL;
273
274 - /* Ensure that the cursor is valid for the new mode before changing... */
275 - intel_crtc_update_cursor(crtc, true);
276 -
277 if (intel_crtc->config.has_dp_encoder)
278 intel_dp_set_m_n(intel_crtc);
279
280 @@ -6581,7 +6572,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
281 intel_crtc->cursor_width = width;
282 intel_crtc->cursor_height = height;
283
284 - intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
285 + if (intel_crtc->active)
286 + intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
287
288 return 0;
289 fail_unpin:
290 @@ -6600,7 +6592,8 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
291 intel_crtc->cursor_x = x;
292 intel_crtc->cursor_y = y;
293
294 - intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
295 + if (intel_crtc->active)
296 + intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
297
298 return 0;
299 }
300 diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
301 index cfb8fb6..119771f 100644
302 --- a/drivers/gpu/drm/i915/intel_opregion.c
303 +++ b/drivers/gpu/drm/i915/intel_opregion.c
304 @@ -173,7 +173,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
305 return ASLE_BACKLIGHT_FAILED;
306
307 intel_panel_set_backlight(dev, bclp, 255);
308 - iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv);
309 + iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv);
310
311 return 0;
312 }
313 diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
314 index f02fd9f..a66b27c 100644
315 --- a/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
316 +++ b/drivers/gpu/drm/nouveau/core/engine/disp/dacnv50.c
317 @@ -49,18 +49,23 @@ int
318 nv50_dac_sense(struct nv50_disp_priv *priv, int or, u32 loadval)
319 {
320 const u32 doff = (or * 0x800);
321 - int load = -EINVAL;
322 +
323 nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80150000);
324 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
325 +
326 nv_wr32(priv, 0x61a00c + doff, 0x00100000 | loadval);
327 mdelay(9);
328 udelay(500);
329 - nv_wr32(priv, 0x61a00c + doff, 0x80000000);
330 - load = (nv_rd32(priv, 0x61a00c + doff) & 0x38000000) >> 27;
331 - nv_wr32(priv, 0x61a00c + doff, 0x00000000);
332 + loadval = nv_mask(priv, 0x61a00c + doff, 0xffffffff, 0x00000000);
333 +
334 nv_mask(priv, 0x61a004 + doff, 0x807f0000, 0x80550000);
335 nv_wait(priv, 0x61a004 + doff, 0x80000000, 0x00000000);
336 - return load;
337 +
338 + nv_debug(priv, "DAC%d sense: 0x%08x\n", or, loadval);
339 + if (!(loadval & 0x80000000))
340 + return -ETIMEDOUT;
341 +
342 + return (loadval & 0x38000000) >> 27;
343 }
344
345 int
346 diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
347 index 32501f6..1602398 100644
348 --- a/drivers/gpu/drm/radeon/atombios_dp.c
349 +++ b/drivers/gpu/drm/radeon/atombios_dp.c
350 @@ -50,7 +50,7 @@ static char *pre_emph_names[] = {
351 * or from atom. Note that atom operates on
352 * dw units.
353 */
354 -static void radeon_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
355 +void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
356 {
357 #ifdef __BIG_ENDIAN
358 u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */
359 @@ -100,7 +100,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
360
361 base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
362
363 - radeon_copy_swap(base, send, send_bytes, true);
364 + radeon_atom_copy_swap(base, send, send_bytes, true);
365
366 args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4));
367 args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4));
368 @@ -137,7 +137,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
369 recv_bytes = recv_size;
370
371 if (recv && recv_size)
372 - radeon_copy_swap(recv, base + 16, recv_bytes, false);
373 + radeon_atom_copy_swap(recv, base + 16, recv_bytes, false);
374
375 return recv_bytes;
376 }
377 diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
378 index 092275d..7c2a285 100644
379 --- a/drivers/gpu/drm/radeon/atombios_encoders.c
380 +++ b/drivers/gpu/drm/radeon/atombios_encoders.c
381 @@ -1652,8 +1652,12 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
382 atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
383 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
384 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
385 - /* some early dce3.2 boards have a bug in their transmitter control table */
386 - if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730))
387 + /* some dce3.x boards have a bug in their transmitter control table.
388 + * ACTION_ENABLE_OUTPUT can probably be dropped since ACTION_ENABLE
389 + * does the same thing and more.
390 + */
391 + if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730) &&
392 + (rdev->family != CHIP_RS880))
393 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
394 }
395 if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
396 diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
397 index 082338d..2ca389d 100644
398 --- a/drivers/gpu/drm/radeon/atombios_i2c.c
399 +++ b/drivers/gpu/drm/radeon/atombios_i2c.c
400 @@ -27,6 +27,8 @@
401 #include "radeon.h"
402 #include "atom.h"
403
404 +extern void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
405 +
406 #define TARGET_HW_I2C_CLOCK 50
407
408 /* these are a limitation of ProcessI2cChannelTransaction not the hw */
409 @@ -77,7 +79,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
410 }
411
412 if (!(flags & HW_I2C_WRITE))
413 - memcpy(buf, base, num);
414 + radeon_atom_copy_swap(buf, base, num, false);
415
416 return 0;
417 }
418 diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
419 index 9953e1f..084e694 100644
420 --- a/drivers/gpu/drm/radeon/btc_dpm.c
421 +++ b/drivers/gpu/drm/radeon/btc_dpm.c
422 @@ -2699,6 +2699,12 @@ int btc_dpm_init(struct radeon_device *rdev)
423 else
424 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 10000;
425
426 + /* make sure dc limits are valid */
427 + if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
428 + (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
429 + rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
430 + rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
431 +
432 return 0;
433 }
434
435 diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
436 index 8928bd1..7a96842 100644
437 --- a/drivers/gpu/drm/radeon/cik.c
438 +++ b/drivers/gpu/drm/radeon/cik.c
439 @@ -1880,7 +1880,47 @@ static void cik_gpu_init(struct radeon_device *rdev)
440 gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
441 break;
442 case CHIP_KAVERI:
443 - /* TODO */
444 + rdev->config.cik.max_shader_engines = 1;
445 + rdev->config.cik.max_tile_pipes = 4;
446 + if ((rdev->pdev->device == 0x1304) ||
447 + (rdev->pdev->device == 0x1305) ||
448 + (rdev->pdev->device == 0x130C) ||
449 + (rdev->pdev->device == 0x130F) ||
450 + (rdev->pdev->device == 0x1310) ||
451 + (rdev->pdev->device == 0x1311) ||
452 + (rdev->pdev->device == 0x131C)) {
453 + rdev->config.cik.max_cu_per_sh = 8;
454 + rdev->config.cik.max_backends_per_se = 2;
455 + } else if ((rdev->pdev->device == 0x1309) ||
456 + (rdev->pdev->device == 0x130A) ||
457 + (rdev->pdev->device == 0x130D) ||
458 + (rdev->pdev->device == 0x1313) ||
459 + (rdev->pdev->device == 0x131D)) {
460 + rdev->config.cik.max_cu_per_sh = 6;
461 + rdev->config.cik.max_backends_per_se = 2;
462 + } else if ((rdev->pdev->device == 0x1306) ||
463 + (rdev->pdev->device == 0x1307) ||
464 + (rdev->pdev->device == 0x130B) ||
465 + (rdev->pdev->device == 0x130E) ||
466 + (rdev->pdev->device == 0x1315) ||
467 + (rdev->pdev->device == 0x131B)) {
468 + rdev->config.cik.max_cu_per_sh = 4;
469 + rdev->config.cik.max_backends_per_se = 1;
470 + } else {
471 + rdev->config.cik.max_cu_per_sh = 3;
472 + rdev->config.cik.max_backends_per_se = 1;
473 + }
474 + rdev->config.cik.max_sh_per_se = 1;
475 + rdev->config.cik.max_texture_channel_caches = 4;
476 + rdev->config.cik.max_gprs = 256;
477 + rdev->config.cik.max_gs_threads = 16;
478 + rdev->config.cik.max_hw_contexts = 8;
479 +
480 + rdev->config.cik.sc_prim_fifo_size_frontend = 0x20;
481 + rdev->config.cik.sc_prim_fifo_size_backend = 0x100;
482 + rdev->config.cik.sc_hiz_tile_fifo_size = 0x30;
483 + rdev->config.cik.sc_earlyz_tile_fifo_size = 0x130;
484 + gb_addr_config = BONAIRE_GB_ADDR_CONFIG_GOLDEN;
485 break;
486 case CHIP_KABINI:
487 default:
488 @@ -5763,6 +5803,10 @@ restart_ih:
489 break;
490 }
491 break;
492 + case 124: /* UVD */
493 + DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
494 + radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
495 + break;
496 case 146:
497 case 147:
498 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
499 @@ -5964,6 +6008,11 @@ static int cik_startup(struct radeon_device *rdev)
500 struct radeon_ring *ring;
501 int r;
502
503 + /* scratch needs to be initialized before MC */
504 + r = r600_vram_scratch_init(rdev);
505 + if (r)
506 + return r;
507 +
508 cik_mc_program(rdev);
509
510 if (rdev->flags & RADEON_IS_IGP) {
511 @@ -5993,10 +6042,6 @@ static int cik_startup(struct radeon_device *rdev)
512 }
513 }
514
515 - r = r600_vram_scratch_init(rdev);
516 - if (r)
517 - return r;
518 -
519 r = cik_pcie_gart_enable(rdev);
520 if (r)
521 return r;
522 @@ -6398,8 +6443,8 @@ static u32 dce8_line_buffer_adjust(struct radeon_device *rdev,
523 struct radeon_crtc *radeon_crtc,
524 struct drm_display_mode *mode)
525 {
526 - u32 tmp;
527 -
528 + u32 tmp, buffer_alloc, i;
529 + u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
530 /*
531 * Line Buffer Setup
532 * There are 6 line buffers, one for each display controllers.
533 @@ -6409,22 +6454,37 @@ static u32 dce8_line_buffer_adjust(struct radeon_device *rdev,
534 * them using the stereo blender.
535 */
536 if (radeon_crtc->base.enabled && mode) {
537 - if (mode->crtc_hdisplay < 1920)
538 + if (mode->crtc_hdisplay < 1920) {
539 tmp = 1;
540 - else if (mode->crtc_hdisplay < 2560)
541 + buffer_alloc = 2;
542 + } else if (mode->crtc_hdisplay < 2560) {
543 tmp = 2;
544 - else if (mode->crtc_hdisplay < 4096)
545 + buffer_alloc = 2;
546 + } else if (mode->crtc_hdisplay < 4096) {
547 tmp = 0;
548 - else {
549 + buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
550 + } else {
551 DRM_DEBUG_KMS("Mode too big for LB!\n");
552 tmp = 0;
553 + buffer_alloc = (rdev->flags & RADEON_IS_IGP) ? 2 : 4;
554 }
555 - } else
556 + } else {
557 tmp = 1;
558 + buffer_alloc = 0;
559 + }
560
561 WREG32(LB_MEMORY_CTRL + radeon_crtc->crtc_offset,
562 LB_MEMORY_CONFIG(tmp) | LB_MEMORY_SIZE(0x6B0));
563
564 + WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
565 + DMIF_BUFFERS_ALLOCATED(buffer_alloc));
566 + for (i = 0; i < rdev->usec_timeout; i++) {
567 + if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
568 + DMIF_BUFFERS_ALLOCATED_COMPLETED)
569 + break;
570 + udelay(1);
571 + }
572 +
573 if (radeon_crtc->base.enabled && mode) {
574 switch (tmp) {
575 case 0:
576 diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
577 index 7e9275e..ade318e 100644
578 --- a/drivers/gpu/drm/radeon/cikd.h
579 +++ b/drivers/gpu/drm/radeon/cikd.h
580 @@ -43,6 +43,10 @@
581
582 #define DMIF_ADDR_CALC 0xC00
583
584 +#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
585 +# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
586 +# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
587 +
588 #define SRBM_GFX_CNTL 0xE44
589 #define PIPEID(x) ((x) << 0)
590 #define MEID(x) ((x) << 2)
591 diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
592 index d5b49e3..94dab1e 100644
593 --- a/drivers/gpu/drm/radeon/evergreen.c
594 +++ b/drivers/gpu/drm/radeon/evergreen.c
595 @@ -1807,7 +1807,8 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
596 struct drm_display_mode *mode,
597 struct drm_display_mode *other_mode)
598 {
599 - u32 tmp;
600 + u32 tmp, buffer_alloc, i;
601 + u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
602 /*
603 * Line Buffer Setup
604 * There are 3 line buffers, each one shared by 2 display controllers.
605 @@ -1830,18 +1831,34 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
606 * non-linked crtcs for maximum line buffer allocation.
607 */
608 if (radeon_crtc->base.enabled && mode) {
609 - if (other_mode)
610 + if (other_mode) {
611 tmp = 0; /* 1/2 */
612 - else
613 + buffer_alloc = 1;
614 + } else {
615 tmp = 2; /* whole */
616 - } else
617 + buffer_alloc = 2;
618 + }
619 + } else {
620 tmp = 0;
621 + buffer_alloc = 0;
622 + }
623
624 /* second controller of the pair uses second half of the lb */
625 if (radeon_crtc->crtc_id % 2)
626 tmp += 4;
627 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
628
629 + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
630 + WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
631 + DMIF_BUFFERS_ALLOCATED(buffer_alloc));
632 + for (i = 0; i < rdev->usec_timeout; i++) {
633 + if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
634 + DMIF_BUFFERS_ALLOCATED_COMPLETED)
635 + break;
636 + udelay(1);
637 + }
638 + }
639 +
640 if (radeon_crtc->base.enabled && mode) {
641 switch (tmp) {
642 case 0:
643 @@ -5106,6 +5123,11 @@ static int evergreen_startup(struct radeon_device *rdev)
644 /* enable aspm */
645 evergreen_program_aspm(rdev);
646
647 + /* scratch needs to be initialized before MC */
648 + r = r600_vram_scratch_init(rdev);
649 + if (r)
650 + return r;
651 +
652 evergreen_mc_program(rdev);
653
654 if (ASIC_IS_DCE5(rdev)) {
655 @@ -5131,10 +5153,6 @@ static int evergreen_startup(struct radeon_device *rdev)
656 }
657 }
658
659 - r = r600_vram_scratch_init(rdev);
660 - if (r)
661 - return r;
662 -
663 if (rdev->flags & RADEON_IS_AGP) {
664 evergreen_agp_enable(rdev);
665 } else {
666 diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
667 index 0d582ac..20fd17c 100644
668 --- a/drivers/gpu/drm/radeon/evergreend.h
669 +++ b/drivers/gpu/drm/radeon/evergreend.h
670 @@ -1153,6 +1153,10 @@
671 # define LATENCY_LOW_WATERMARK(x) ((x) << 0)
672 # define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
673
674 +#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
675 +# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
676 +# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
677 +
678 #define IH_RB_CNTL 0x3e00
679 # define IH_RB_ENABLE (1 << 0)
680 # define IH_IB_SIZE(x) ((x) << 1) /* log2 */
681 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
682 index ccb4f8b5..fc55256 100644
683 --- a/drivers/gpu/drm/radeon/ni.c
684 +++ b/drivers/gpu/drm/radeon/ni.c
685 @@ -2083,6 +2083,11 @@ static int cayman_startup(struct radeon_device *rdev)
686 /* enable aspm */
687 evergreen_program_aspm(rdev);
688
689 + /* scratch needs to be initialized before MC */
690 + r = r600_vram_scratch_init(rdev);
691 + if (r)
692 + return r;
693 +
694 evergreen_mc_program(rdev);
695
696 if (rdev->flags & RADEON_IS_IGP) {
697 @@ -2109,10 +2114,6 @@ static int cayman_startup(struct radeon_device *rdev)
698 }
699 }
700
701 - r = r600_vram_scratch_init(rdev);
702 - if (r)
703 - return r;
704 -
705 r = cayman_pcie_gart_enable(rdev);
706 if (r)
707 return r;
708 diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
709 index f0f5f74..56d0d95 100644
710 --- a/drivers/gpu/drm/radeon/ni_dpm.c
711 +++ b/drivers/gpu/drm/radeon/ni_dpm.c
712 @@ -4270,6 +4270,12 @@ int ni_dpm_init(struct radeon_device *rdev)
713
714 ni_pi->use_power_boost_limit = true;
715
716 + /* make sure dc limits are valid */
717 + if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
718 + (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
719 + rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
720 + rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
721 +
722 return 0;
723 }
724
725 diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h
726 index b5564a3..2f7ad27 100644
727 --- a/drivers/gpu/drm/radeon/ppsmc.h
728 +++ b/drivers/gpu/drm/radeon/ppsmc.h
729 @@ -106,6 +106,8 @@ typedef uint8_t PPSMC_Result;
730 #define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112)
731 #define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d)
732 #define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e)
733 +#define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120)
734 +#define PPSMC_MSG_DisableBAPM ((uint32_t) 0x121)
735 #define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124)
736
737
738 diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
739 index e66e720..739ffbe 100644
740 --- a/drivers/gpu/drm/radeon/r600.c
741 +++ b/drivers/gpu/drm/radeon/r600.c
742 @@ -119,6 +119,11 @@ u32 r600_get_xclk(struct radeon_device *rdev)
743 return rdev->clock.spll.reference_freq;
744 }
745
746 +int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk)
747 +{
748 + return 0;
749 +}
750 +
751 /* get temperature in millidegrees */
752 int rv6xx_get_temp(struct radeon_device *rdev)
753 {
754 @@ -3334,6 +3339,11 @@ static int r600_startup(struct radeon_device *rdev)
755 /* enable pcie gen2 link */
756 r600_pcie_gen2_enable(rdev);
757
758 + /* scratch needs to be initialized before MC */
759 + r = r600_vram_scratch_init(rdev);
760 + if (r)
761 + return r;
762 +
763 r600_mc_program(rdev);
764
765 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
766 @@ -3344,10 +3354,6 @@ static int r600_startup(struct radeon_device *rdev)
767 }
768 }
769
770 - r = r600_vram_scratch_init(rdev);
771 - if (r)
772 - return r;
773 -
774 if (rdev->flags & RADEON_IS_AGP) {
775 r600_agp_enable(rdev);
776 } else {
777 diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
778 index f8f8b31..38317b9 100644
779 --- a/drivers/gpu/drm/radeon/radeon_asic.c
780 +++ b/drivers/gpu/drm/radeon/radeon_asic.c
781 @@ -1146,6 +1146,7 @@ static struct radeon_asic rv6xx_asic = {
782 .set_pcie_lanes = &r600_set_pcie_lanes,
783 .set_clock_gating = NULL,
784 .get_temperature = &rv6xx_get_temp,
785 + .set_uvd_clocks = &r600_set_uvd_clocks,
786 },
787 .dpm = {
788 .init = &rv6xx_dpm_init,
789 @@ -1257,6 +1258,7 @@ static struct radeon_asic rs780_asic = {
790 .set_pcie_lanes = NULL,
791 .set_clock_gating = NULL,
792 .get_temperature = &rv6xx_get_temp,
793 + .set_uvd_clocks = &r600_set_uvd_clocks,
794 },
795 .dpm = {
796 .init = &rs780_dpm_init,
797 diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
798 index 3d61d5a..ddbd2b8 100644
799 --- a/drivers/gpu/drm/radeon/radeon_asic.h
800 +++ b/drivers/gpu/drm/radeon/radeon_asic.h
801 @@ -405,6 +405,7 @@ int r600_mc_wait_for_idle(struct radeon_device *rdev);
802 u32 r600_get_xclk(struct radeon_device *rdev);
803 uint64_t r600_get_gpu_clock_counter(struct radeon_device *rdev);
804 int rv6xx_get_temp(struct radeon_device *rdev);
805 +int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
806 int r600_dpm_pre_set_power_state(struct radeon_device *rdev);
807 void r600_dpm_post_set_power_state(struct radeon_device *rdev);
808 /* rv6xx dpm */
809 diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
810 index 4ccd61f..11dc5c8 100644
811 --- a/drivers/gpu/drm/radeon/radeon_atombios.c
812 +++ b/drivers/gpu/drm/radeon/radeon_atombios.c
813 @@ -711,13 +711,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
814 (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
815 (ctx->bios + data_offset +
816 le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset));
817 + u8 *num_dst_objs = (u8 *)
818 + ((u8 *)router_src_dst_table + 1 +
819 + (router_src_dst_table->ucNumberOfSrc * 2));
820 + u16 *dst_objs = (u16 *)(num_dst_objs + 1);
821 int enum_id;
822
823 router.router_id = router_obj_id;
824 - for (enum_id = 0; enum_id < router_src_dst_table->ucNumberOfDst;
825 - enum_id++) {
826 + for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) {
827 if (le16_to_cpu(path->usConnObjectId) ==
828 - le16_to_cpu(router_src_dst_table->usDstObjectID[enum_id]))
829 + le16_to_cpu(dst_objs[enum_id]))
830 break;
831 }
832
833 @@ -1672,7 +1675,9 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
834 kfree(edid);
835 }
836 }
837 - record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
838 + record += fake_edid_record->ucFakeEDIDLength ?
839 + fake_edid_record->ucFakeEDIDLength + 2 :
840 + sizeof(ATOM_FAKE_EDID_PATCH_RECORD);
841 break;
842 case LCD_PANEL_RESOLUTION_RECORD_TYPE:
843 panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record;
844 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
845 index 2399f25..5a87c9f 100644
846 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
847 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
848 @@ -1489,6 +1489,24 @@ static const struct drm_connector_funcs radeon_dp_connector_funcs = {
849 .force = radeon_dvi_force,
850 };
851
852 +static const struct drm_connector_funcs radeon_edp_connector_funcs = {
853 + .dpms = drm_helper_connector_dpms,
854 + .detect = radeon_dp_detect,
855 + .fill_modes = drm_helper_probe_single_connector_modes,
856 + .set_property = radeon_lvds_set_property,
857 + .destroy = radeon_dp_connector_destroy,
858 + .force = radeon_dvi_force,
859 +};
860 +
861 +static const struct drm_connector_funcs radeon_lvds_bridge_connector_funcs = {
862 + .dpms = drm_helper_connector_dpms,
863 + .detect = radeon_dp_detect,
864 + .fill_modes = drm_helper_probe_single_connector_modes,
865 + .set_property = radeon_lvds_set_property,
866 + .destroy = radeon_dp_connector_destroy,
867 + .force = radeon_dvi_force,
868 +};
869 +
870 void
871 radeon_add_atom_connector(struct drm_device *dev,
872 uint32_t connector_id,
873 @@ -1580,8 +1598,6 @@ radeon_add_atom_connector(struct drm_device *dev,
874 goto failed;
875 radeon_dig_connector->igp_lane_info = igp_lane_info;
876 radeon_connector->con_priv = radeon_dig_connector;
877 - drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
878 - drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
879 if (i2c_bus->valid) {
880 /* add DP i2c bus */
881 if (connector_type == DRM_MODE_CONNECTOR_eDP)
882 @@ -1598,6 +1614,10 @@ radeon_add_atom_connector(struct drm_device *dev,
883 case DRM_MODE_CONNECTOR_VGA:
884 case DRM_MODE_CONNECTOR_DVIA:
885 default:
886 + drm_connector_init(dev, &radeon_connector->base,
887 + &radeon_dp_connector_funcs, connector_type);
888 + drm_connector_helper_add(&radeon_connector->base,
889 + &radeon_dp_connector_helper_funcs);
890 connector->interlace_allowed = true;
891 connector->doublescan_allowed = true;
892 radeon_connector->dac_load_detect = true;
893 @@ -1610,6 +1630,10 @@ radeon_add_atom_connector(struct drm_device *dev,
894 case DRM_MODE_CONNECTOR_HDMIA:
895 case DRM_MODE_CONNECTOR_HDMIB:
896 case DRM_MODE_CONNECTOR_DisplayPort:
897 + drm_connector_init(dev, &radeon_connector->base,
898 + &radeon_dp_connector_funcs, connector_type);
899 + drm_connector_helper_add(&radeon_connector->base,
900 + &radeon_dp_connector_helper_funcs);
901 drm_object_attach_property(&radeon_connector->base.base,
902 rdev->mode_info.underscan_property,
903 UNDERSCAN_OFF);
904 @@ -1634,6 +1658,10 @@ radeon_add_atom_connector(struct drm_device *dev,
905 break;
906 case DRM_MODE_CONNECTOR_LVDS:
907 case DRM_MODE_CONNECTOR_eDP:
908 + drm_connector_init(dev, &radeon_connector->base,
909 + &radeon_lvds_bridge_connector_funcs, connector_type);
910 + drm_connector_helper_add(&radeon_connector->base,
911 + &radeon_dp_connector_helper_funcs);
912 drm_object_attach_property(&radeon_connector->base.base,
913 dev->mode_config.scaling_mode_property,
914 DRM_MODE_SCALE_FULLSCREEN);
915 @@ -1797,7 +1825,7 @@ radeon_add_atom_connector(struct drm_device *dev,
916 goto failed;
917 radeon_dig_connector->igp_lane_info = igp_lane_info;
918 radeon_connector->con_priv = radeon_dig_connector;
919 - drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
920 + drm_connector_init(dev, &radeon_connector->base, &radeon_edp_connector_funcs, connector_type);
921 drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
922 if (i2c_bus->valid) {
923 /* add DP i2c bus */
924 diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
925 index 13a130f..7c110ef 100644
926 --- a/drivers/gpu/drm/radeon/radeon_cs.c
927 +++ b/drivers/gpu/drm/radeon/radeon_cs.c
928 @@ -80,9 +80,11 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
929 p->relocs[i].lobj.bo = p->relocs[i].robj;
930 p->relocs[i].lobj.written = !!r->write_domain;
931
932 - /* the first reloc of an UVD job is the
933 - msg and that must be in VRAM */
934 - if (p->ring == R600_RING_TYPE_UVD_INDEX && i == 0) {
935 + /* the first reloc of an UVD job is the msg and that must be in
936 + VRAM, also but everything into VRAM on AGP cards to avoid
937 + image corruptions */
938 + if (p->ring == R600_RING_TYPE_UVD_INDEX &&
939 + (i == 0 || p->rdev->flags & RADEON_IS_AGP)) {
940 /* TODO: is this still needed for NI+ ? */
941 p->relocs[i].lobj.domain =
942 RADEON_GEM_DOMAIN_VRAM;
943 diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
944 index 081886b..cc9e848 100644
945 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
946 +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
947 @@ -275,17 +275,19 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
948 dev_info(rdev->dev, "radeon: using MSI.\n");
949 }
950 }
951 +
952 + INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
953 + INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
954 + INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
955 +
956 rdev->irq.installed = true;
957 r = drm_irq_install(rdev->ddev);
958 if (r) {
959 rdev->irq.installed = false;
960 + flush_work(&rdev->hotplug_work);
961 return r;
962 }
963
964 - INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
965 - INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
966 - INIT_WORK(&rdev->reset_work, radeon_irq_reset_work_func);
967 -
968 DRM_INFO("radeon: irq initialized.\n");
969 return 0;
970 }
971 diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
972 index 49ff3d1..cc2ca38 100644
973 --- a/drivers/gpu/drm/radeon/radeon_kms.c
974 +++ b/drivers/gpu/drm/radeon/radeon_kms.c
975 @@ -433,6 +433,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
976 return -EINVAL;
977 }
978 break;
979 + case RADEON_INFO_SI_CP_DMA_COMPUTE:
980 + *value = 1;
981 + break;
982 default:
983 DRM_DEBUG_KMS("Invalid request %d\n", info->request);
984 return -EINVAL;
985 diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
986 index 233a9b9..b8074a8 100644
987 --- a/drivers/gpu/drm/radeon/rs400.c
988 +++ b/drivers/gpu/drm/radeon/rs400.c
989 @@ -174,10 +174,13 @@ int rs400_gart_enable(struct radeon_device *rdev)
990 /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0,
991 * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */
992 if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
993 - WREG32_MC(RS480_MC_MISC_CNTL,
994 - (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN));
995 + tmp = RREG32_MC(RS480_MC_MISC_CNTL);
996 + tmp |= RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN;
997 + WREG32_MC(RS480_MC_MISC_CNTL, tmp);
998 } else {
999 - WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN);
1000 + tmp = RREG32_MC(RS480_MC_MISC_CNTL);
1001 + tmp |= RS480_GART_INDEX_REG_EN;
1002 + WREG32_MC(RS480_MC_MISC_CNTL, tmp);
1003 }
1004 /* Enable gart */
1005 WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
1006 diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
1007 index d1a1ce7..4296723 100644
1008 --- a/drivers/gpu/drm/radeon/rs780_dpm.c
1009 +++ b/drivers/gpu/drm/radeon/rs780_dpm.c
1010 @@ -486,6 +486,9 @@ static void rs780_activate_engine_clk_scaling(struct radeon_device *rdev,
1011 (new_state->sclk_low == old_state->sclk_low))
1012 return;
1013
1014 + if (new_state->sclk_high == new_state->sclk_low)
1015 + return;
1016 +
1017 rs780_clk_scaling_enable(rdev, true);
1018 }
1019
1020 @@ -717,14 +720,18 @@ static void rs780_parse_pplib_non_clock_info(struct radeon_device *rdev,
1021 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
1022 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
1023 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
1024 - } else if (r600_is_uvd_state(rps->class, rps->class2)) {
1025 - rps->vclk = RS780_DEFAULT_VCLK_FREQ;
1026 - rps->dclk = RS780_DEFAULT_DCLK_FREQ;
1027 } else {
1028 rps->vclk = 0;
1029 rps->dclk = 0;
1030 }
1031
1032 + if (r600_is_uvd_state(rps->class, rps->class2)) {
1033 + if ((rps->vclk == 0) || (rps->dclk == 0)) {
1034 + rps->vclk = RS780_DEFAULT_VCLK_FREQ;
1035 + rps->dclk = RS780_DEFAULT_DCLK_FREQ;
1036 + }
1037 + }
1038 +
1039 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
1040 rdev->pm.dpm.boot_ps = rps;
1041 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
1042 diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
1043 index f5e92cf..73529c9 100644
1044 --- a/drivers/gpu/drm/radeon/rv770.c
1045 +++ b/drivers/gpu/drm/radeon/rv770.c
1046 @@ -1829,6 +1829,11 @@ static int rv770_startup(struct radeon_device *rdev)
1047 /* enable pcie gen2 link */
1048 rv770_pcie_gen2_enable(rdev);
1049
1050 + /* scratch needs to be initialized before MC */
1051 + r = r600_vram_scratch_init(rdev);
1052 + if (r)
1053 + return r;
1054 +
1055 rv770_mc_program(rdev);
1056
1057 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
1058 @@ -1839,10 +1844,6 @@ static int rv770_startup(struct radeon_device *rdev)
1059 }
1060 }
1061
1062 - r = r600_vram_scratch_init(rdev);
1063 - if (r)
1064 - return r;
1065 -
1066 if (rdev->flags & RADEON_IS_AGP) {
1067 rv770_agp_enable(rdev);
1068 } else {
1069 diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
1070 index 094c67a..4d50ca3 100644
1071 --- a/drivers/gpu/drm/radeon/rv770_dpm.c
1072 +++ b/drivers/gpu/drm/radeon/rv770_dpm.c
1073 @@ -2147,14 +2147,18 @@ static void rv7xx_parse_pplib_non_clock_info(struct radeon_device *rdev,
1074 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
1075 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
1076 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
1077 - } else if (r600_is_uvd_state(rps->class, rps->class2)) {
1078 - rps->vclk = RV770_DEFAULT_VCLK_FREQ;
1079 - rps->dclk = RV770_DEFAULT_DCLK_FREQ;
1080 } else {
1081 rps->vclk = 0;
1082 rps->dclk = 0;
1083 }
1084
1085 + if (r600_is_uvd_state(rps->class, rps->class2)) {
1086 + if ((rps->vclk == 0) || (rps->dclk == 0)) {
1087 + rps->vclk = RV770_DEFAULT_VCLK_FREQ;
1088 + rps->dclk = RV770_DEFAULT_DCLK_FREQ;
1089 + }
1090 + }
1091 +
1092 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
1093 rdev->pm.dpm.boot_ps = rps;
1094 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
1095 @@ -2517,8 +2521,16 @@ u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low)
1096 bool rv770_dpm_vblank_too_short(struct radeon_device *rdev)
1097 {
1098 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1099 + u32 switch_limit = 300;
1100 +
1101 + /* quirks */
1102 + /* ASUS K70AF */
1103 + if ((rdev->pdev->device == 0x9553) &&
1104 + (rdev->pdev->subsystem_vendor == 0x1043) &&
1105 + (rdev->pdev->subsystem_device == 0x1c42))
1106 + switch_limit = 200;
1107
1108 - if (vblank_time < 300)
1109 + if (vblank_time < switch_limit)
1110 return true;
1111 else
1112 return false;
1113 diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
1114 index daa8d2d..7af2113 100644
1115 --- a/drivers/gpu/drm/radeon/si.c
1116 +++ b/drivers/gpu/drm/radeon/si.c
1117 @@ -1704,7 +1704,8 @@ static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
1118 struct drm_display_mode *mode,
1119 struct drm_display_mode *other_mode)
1120 {
1121 - u32 tmp;
1122 + u32 tmp, buffer_alloc, i;
1123 + u32 pipe_offset = radeon_crtc->crtc_id * 0x20;
1124 /*
1125 * Line Buffer Setup
1126 * There are 3 line buffers, each one shared by 2 display controllers.
1127 @@ -1719,16 +1720,30 @@ static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
1128 * non-linked crtcs for maximum line buffer allocation.
1129 */
1130 if (radeon_crtc->base.enabled && mode) {
1131 - if (other_mode)
1132 + if (other_mode) {
1133 tmp = 0; /* 1/2 */
1134 - else
1135 + buffer_alloc = 1;
1136 + } else {
1137 tmp = 2; /* whole */
1138 - } else
1139 + buffer_alloc = 2;
1140 + }
1141 + } else {
1142 tmp = 0;
1143 + buffer_alloc = 0;
1144 + }
1145
1146 WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
1147 DC_LB_MEMORY_CONFIG(tmp));
1148
1149 + WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset,
1150 + DMIF_BUFFERS_ALLOCATED(buffer_alloc));
1151 + for (i = 0; i < rdev->usec_timeout; i++) {
1152 + if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) &
1153 + DMIF_BUFFERS_ALLOCATED_COMPLETED)
1154 + break;
1155 + udelay(1);
1156 + }
1157 +
1158 if (radeon_crtc->base.enabled && mode) {
1159 switch (tmp) {
1160 case 0:
1161 @@ -4083,13 +4098,64 @@ static int si_vm_packet3_ce_check(struct radeon_device *rdev,
1162 return 0;
1163 }
1164
1165 +static int si_vm_packet3_cp_dma_check(u32 *ib, u32 idx)
1166 +{
1167 + u32 start_reg, reg, i;
1168 + u32 command = ib[idx + 4];
1169 + u32 info = ib[idx + 1];
1170 + u32 idx_value = ib[idx];
1171 + if (command & PACKET3_CP_DMA_CMD_SAS) {
1172 + /* src address space is register */
1173 + if (((info & 0x60000000) >> 29) == 0) {
1174 + start_reg = idx_value << 2;
1175 + if (command & PACKET3_CP_DMA_CMD_SAIC) {
1176 + reg = start_reg;
1177 + if (!si_vm_reg_valid(reg)) {
1178 + DRM_ERROR("CP DMA Bad SRC register\n");
1179 + return -EINVAL;
1180 + }
1181 + } else {
1182 + for (i = 0; i < (command & 0x1fffff); i++) {
1183 + reg = start_reg + (4 * i);
1184 + if (!si_vm_reg_valid(reg)) {
1185 + DRM_ERROR("CP DMA Bad SRC register\n");
1186 + return -EINVAL;
1187 + }
1188 + }
1189 + }
1190 + }
1191 + }
1192 + if (command & PACKET3_CP_DMA_CMD_DAS) {
1193 + /* dst address space is register */
1194 + if (((info & 0x00300000) >> 20) == 0) {
1195 + start_reg = ib[idx + 2];
1196 + if (command & PACKET3_CP_DMA_CMD_DAIC) {
1197 + reg = start_reg;
1198 + if (!si_vm_reg_valid(reg)) {
1199 + DRM_ERROR("CP DMA Bad DST register\n");
1200 + return -EINVAL;
1201 + }
1202 + } else {
1203 + for (i = 0; i < (command & 0x1fffff); i++) {
1204 + reg = start_reg + (4 * i);
1205 + if (!si_vm_reg_valid(reg)) {
1206 + DRM_ERROR("CP DMA Bad DST register\n");
1207 + return -EINVAL;
1208 + }
1209 + }
1210 + }
1211 + }
1212 + }
1213 + return 0;
1214 +}
1215 +
1216 static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
1217 u32 *ib, struct radeon_cs_packet *pkt)
1218 {
1219 + int r;
1220 u32 idx = pkt->idx + 1;
1221 u32 idx_value = ib[idx];
1222 u32 start_reg, end_reg, reg, i;
1223 - u32 command, info;
1224
1225 switch (pkt->opcode) {
1226 case PACKET3_NOP:
1227 @@ -4190,50 +4256,9 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
1228 }
1229 break;
1230 case PACKET3_CP_DMA:
1231 - command = ib[idx + 4];
1232 - info = ib[idx + 1];
1233 - if (command & PACKET3_CP_DMA_CMD_SAS) {
1234 - /* src address space is register */
1235 - if (((info & 0x60000000) >> 29) == 0) {
1236 - start_reg = idx_value << 2;
1237 - if (command & PACKET3_CP_DMA_CMD_SAIC) {
1238 - reg = start_reg;
1239 - if (!si_vm_reg_valid(reg)) {
1240 - DRM_ERROR("CP DMA Bad SRC register\n");
1241 - return -EINVAL;
1242 - }
1243 - } else {
1244 - for (i = 0; i < (command & 0x1fffff); i++) {
1245 - reg = start_reg + (4 * i);
1246 - if (!si_vm_reg_valid(reg)) {
1247 - DRM_ERROR("CP DMA Bad SRC register\n");
1248 - return -EINVAL;
1249 - }
1250 - }
1251 - }
1252 - }
1253 - }
1254 - if (command & PACKET3_CP_DMA_CMD_DAS) {
1255 - /* dst address space is register */
1256 - if (((info & 0x00300000) >> 20) == 0) {
1257 - start_reg = ib[idx + 2];
1258 - if (command & PACKET3_CP_DMA_CMD_DAIC) {
1259 - reg = start_reg;
1260 - if (!si_vm_reg_valid(reg)) {
1261 - DRM_ERROR("CP DMA Bad DST register\n");
1262 - return -EINVAL;
1263 - }
1264 - } else {
1265 - for (i = 0; i < (command & 0x1fffff); i++) {
1266 - reg = start_reg + (4 * i);
1267 - if (!si_vm_reg_valid(reg)) {
1268 - DRM_ERROR("CP DMA Bad DST register\n");
1269 - return -EINVAL;
1270 - }
1271 - }
1272 - }
1273 - }
1274 - }
1275 + r = si_vm_packet3_cp_dma_check(ib, idx);
1276 + if (r)
1277 + return r;
1278 break;
1279 default:
1280 DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
1281 @@ -4245,6 +4270,7 @@ static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
1282 static int si_vm_packet3_compute_check(struct radeon_device *rdev,
1283 u32 *ib, struct radeon_cs_packet *pkt)
1284 {
1285 + int r;
1286 u32 idx = pkt->idx + 1;
1287 u32 idx_value = ib[idx];
1288 u32 start_reg, reg, i;
1289 @@ -4317,6 +4343,11 @@ static int si_vm_packet3_compute_check(struct radeon_device *rdev,
1290 return -EINVAL;
1291 }
1292 break;
1293 + case PACKET3_CP_DMA:
1294 + r = si_vm_packet3_cp_dma_check(ib, idx);
1295 + if (r)
1296 + return r;
1297 + break;
1298 default:
1299 DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
1300 return -EINVAL;
1301 @@ -6422,6 +6453,11 @@ static int si_startup(struct radeon_device *rdev)
1302 /* enable aspm */
1303 si_program_aspm(rdev);
1304
1305 + /* scratch needs to be initialized before MC */
1306 + r = r600_vram_scratch_init(rdev);
1307 + if (r)
1308 + return r;
1309 +
1310 si_mc_program(rdev);
1311
1312 if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
1313 @@ -6439,10 +6475,6 @@ static int si_startup(struct radeon_device *rdev)
1314 return r;
1315 }
1316
1317 - r = r600_vram_scratch_init(rdev);
1318 - if (r)
1319 - return r;
1320 -
1321 r = si_pcie_gart_enable(rdev);
1322 if (r)
1323 return r;
1324 diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
1325 index 88699e3..1cfba39 100644
1326 --- a/drivers/gpu/drm/radeon/si_dpm.c
1327 +++ b/drivers/gpu/drm/radeon/si_dpm.c
1328 @@ -6401,6 +6401,12 @@ int si_dpm_init(struct radeon_device *rdev)
1329
1330 si_initialize_powertune_defaults(rdev);
1331
1332 + /* make sure dc limits are valid */
1333 + if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
1334 + (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
1335 + rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
1336 + rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
1337 +
1338 return 0;
1339 }
1340
1341 diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
1342 index 2c8da27..2010d6b 100644
1343 --- a/drivers/gpu/drm/radeon/sid.h
1344 +++ b/drivers/gpu/drm/radeon/sid.h
1345 @@ -282,6 +282,10 @@
1346
1347 #define DMIF_ADDR_CALC 0xC00
1348
1349 +#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0
1350 +# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0)
1351 +# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4)
1352 +
1353 #define SRBM_STATUS 0xE50
1354 #define GRBM_RQ_PENDING (1 << 5)
1355 #define VMC_BUSY (1 << 8)
1356 diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
1357 index a1eb5f5..28f4380 100644
1358 --- a/drivers/gpu/drm/radeon/trinity_dpm.c
1359 +++ b/drivers/gpu/drm/radeon/trinity_dpm.c
1360 @@ -1091,6 +1091,7 @@ int trinity_dpm_enable(struct radeon_device *rdev)
1361 trinity_program_sclk_dpm(rdev);
1362 trinity_start_dpm(rdev);
1363 trinity_wait_for_dpm_enabled(rdev);
1364 + trinity_dpm_bapm_enable(rdev, false);
1365 trinity_release_mutex(rdev);
1366
1367 if (rdev->irq.installed &&
1368 @@ -1116,6 +1117,7 @@ void trinity_dpm_disable(struct radeon_device *rdev)
1369 trinity_release_mutex(rdev);
1370 return;
1371 }
1372 + trinity_dpm_bapm_enable(rdev, false);
1373 trinity_disable_clock_power_gating(rdev);
1374 sumo_clear_vc(rdev);
1375 trinity_wait_for_level_0(rdev);
1376 diff --git a/drivers/gpu/drm/radeon/trinity_dpm.h b/drivers/gpu/drm/radeon/trinity_dpm.h
1377 index e82df07..259d9e8 100644
1378 --- a/drivers/gpu/drm/radeon/trinity_dpm.h
1379 +++ b/drivers/gpu/drm/radeon/trinity_dpm.h
1380 @@ -118,6 +118,7 @@ struct trinity_power_info {
1381 #define TRINITY_AT_DFLT 30
1382
1383 /* trinity_smc.c */
1384 +int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable);
1385 int trinity_dpm_config(struct radeon_device *rdev, bool enable);
1386 int trinity_uvd_dpm_config(struct radeon_device *rdev);
1387 int trinity_dpm_force_state(struct radeon_device *rdev, u32 n);
1388 diff --git a/drivers/gpu/drm/radeon/trinity_smc.c b/drivers/gpu/drm/radeon/trinity_smc.c
1389 index a42d89f..9672bcb 100644
1390 --- a/drivers/gpu/drm/radeon/trinity_smc.c
1391 +++ b/drivers/gpu/drm/radeon/trinity_smc.c
1392 @@ -56,6 +56,14 @@ static int trinity_notify_message_to_smu(struct radeon_device *rdev, u32 id)
1393 return 0;
1394 }
1395
1396 +int trinity_dpm_bapm_enable(struct radeon_device *rdev, bool enable)
1397 +{
1398 + if (enable)
1399 + return trinity_notify_message_to_smu(rdev, PPSMC_MSG_EnableBAPM);
1400 + else
1401 + return trinity_notify_message_to_smu(rdev, PPSMC_MSG_DisableBAPM);
1402 +}
1403 +
1404 int trinity_dpm_config(struct radeon_device *rdev, bool enable)
1405 {
1406 if (enable)
1407 diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
1408 index 5e93a52..210d503 100644
1409 --- a/drivers/gpu/drm/ttm/ttm_tt.c
1410 +++ b/drivers/gpu/drm/ttm/ttm_tt.c
1411 @@ -170,7 +170,7 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
1412 ttm_tt_unbind(ttm);
1413 }
1414
1415 - if (likely(ttm->pages != NULL)) {
1416 + if (ttm->state == tt_unbound) {
1417 ttm->bdev->driver->ttm_tt_unpopulate(ttm);
1418 }
1419
1420 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1421 index 5956445..ee75486 100644
1422 --- a/drivers/hid/hid-core.c
1423 +++ b/drivers/hid/hid-core.c
1424 @@ -94,7 +94,6 @@ EXPORT_SYMBOL_GPL(hid_register_report);
1425 static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages, unsigned values)
1426 {
1427 struct hid_field *field;
1428 - int i;
1429
1430 if (report->maxfield == HID_MAX_FIELDS) {
1431 hid_err(report->device, "too many fields in report\n");
1432 @@ -113,9 +112,6 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned
1433 field->value = (s32 *)(field->usage + usages);
1434 field->report = report;
1435
1436 - for (i = 0; i < usages; i++)
1437 - field->usage[i].usage_index = i;
1438 -
1439 return field;
1440 }
1441
1442 @@ -226,9 +222,9 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
1443 {
1444 struct hid_report *report;
1445 struct hid_field *field;
1446 - int usages;
1447 + unsigned usages;
1448 unsigned offset;
1449 - int i;
1450 + unsigned i;
1451
1452 report = hid_register_report(parser->device, report_type, parser->global.report_id);
1453 if (!report) {
1454 @@ -255,7 +251,8 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
1455 if (!parser->local.usage_index) /* Ignore padding fields */
1456 return 0;
1457
1458 - usages = max_t(int, parser->local.usage_index, parser->global.report_count);
1459 + usages = max_t(unsigned, parser->local.usage_index,
1460 + parser->global.report_count);
1461
1462 field = hid_register_field(report, usages, parser->global.report_count);
1463 if (!field)
1464 @@ -266,13 +263,14 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
1465 field->application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
1466
1467 for (i = 0; i < usages; i++) {
1468 - int j = i;
1469 + unsigned j = i;
1470 /* Duplicate the last usage we parsed if we have excess values */
1471 if (i >= parser->local.usage_index)
1472 j = parser->local.usage_index - 1;
1473 field->usage[i].hid = parser->local.usage[j];
1474 field->usage[i].collection_index =
1475 parser->local.collection_index[j];
1476 + field->usage[i].usage_index = i;
1477 }
1478
1479 field->maxusage = usages;
1480 @@ -759,6 +757,64 @@ int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size)
1481 }
1482 EXPORT_SYMBOL_GPL(hid_parse_report);
1483
1484 +static const char * const hid_report_names[] = {
1485 + "HID_INPUT_REPORT",
1486 + "HID_OUTPUT_REPORT",
1487 + "HID_FEATURE_REPORT",
1488 +};
1489 +/**
1490 + * hid_validate_values - validate existing device report's value indexes
1491 + *
1492 + * @device: hid device
1493 + * @type: which report type to examine
1494 + * @id: which report ID to examine (0 for first)
1495 + * @field_index: which report field to examine
1496 + * @report_counts: expected number of values
1497 + *
1498 + * Validate the number of values in a given field of a given report, after
1499 + * parsing.
1500 + */
1501 +struct hid_report *hid_validate_values(struct hid_device *hid,
1502 + unsigned int type, unsigned int id,
1503 + unsigned int field_index,
1504 + unsigned int report_counts)
1505 +{
1506 + struct hid_report *report;
1507 +
1508 + if (type > HID_FEATURE_REPORT) {
1509 + hid_err(hid, "invalid HID report type %u\n", type);
1510 + return NULL;
1511 + }
1512 +
1513 + if (id >= HID_MAX_IDS) {
1514 + hid_err(hid, "invalid HID report id %u\n", id);
1515 + return NULL;
1516 + }
1517 +
1518 + /*
1519 + * Explicitly not using hid_get_report() here since it depends on
1520 + * ->numbered being checked, which may not always be the case when
1521 + * drivers go to access report values.
1522 + */
1523 + report = hid->report_enum[type].report_id_hash[id];
1524 + if (!report) {
1525 + hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
1526 + return NULL;
1527 + }
1528 + if (report->maxfield <= field_index) {
1529 + hid_err(hid, "not enough fields in %s %u\n",
1530 + hid_report_names[type], id);
1531 + return NULL;
1532 + }
1533 + if (report->field[field_index]->report_count < report_counts) {
1534 + hid_err(hid, "not enough values in %s %u field %u\n",
1535 + hid_report_names[type], id, field_index);
1536 + return NULL;
1537 + }
1538 + return report;
1539 +}
1540 +EXPORT_SYMBOL_GPL(hid_validate_values);
1541 +
1542 /**
1543 * hid_open_report - open a driver-specific device report
1544 *
1545 @@ -1237,7 +1293,7 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
1546 goto out;
1547 }
1548
1549 - if (hid->claimed != HID_CLAIMED_HIDRAW) {
1550 + if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) {
1551 for (a = 0; a < report->maxfield; a++)
1552 hid_input_field(hid, report->field[a], cdata, interrupt);
1553 hdrv = hid->driver;
1554 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
1555 index 3fc4034..e30dddc 100644
1556 --- a/drivers/hid/hid-input.c
1557 +++ b/drivers/hid/hid-input.c
1558 @@ -485,6 +485,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
1559 if (field->flags & HID_MAIN_ITEM_CONSTANT)
1560 goto ignore;
1561
1562 + /* Ignore if report count is out of bounds. */
1563 + if (field->report_count < 1)
1564 + goto ignore;
1565 +
1566 /* only LED usages are supported in output fields */
1567 if (field->report_type == HID_OUTPUT_REPORT &&
1568 (usage->hid & HID_USAGE_PAGE) != HID_UP_LED) {
1569 @@ -1168,7 +1172,11 @@ static void report_features(struct hid_device *hid)
1570
1571 rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
1572 list_for_each_entry(rep, &rep_enum->report_list, list)
1573 - for (i = 0; i < rep->maxfield; i++)
1574 + for (i = 0; i < rep->maxfield; i++) {
1575 + /* Ignore if report count is out of bounds. */
1576 + if (rep->field[i]->report_count < 1)
1577 + continue;
1578 +
1579 for (j = 0; j < rep->field[i]->maxusage; j++) {
1580 /* Verify if Battery Strength feature is available */
1581 hidinput_setup_battery(hid, HID_FEATURE_REPORT, rep->field[i]);
1582 @@ -1177,6 +1185,7 @@ static void report_features(struct hid_device *hid)
1583 drv->feature_mapping(hid, rep->field[i],
1584 rep->field[i]->usage + j);
1585 }
1586 + }
1587 }
1588
1589 static struct hid_input *hidinput_allocate(struct hid_device *hid)
1590 diff --git a/drivers/hid/hid-lenovo-tpkbd.c b/drivers/hid/hid-lenovo-tpkbd.c
1591 index 07837f5..31cf29a 100644
1592 --- a/drivers/hid/hid-lenovo-tpkbd.c
1593 +++ b/drivers/hid/hid-lenovo-tpkbd.c
1594 @@ -339,7 +339,15 @@ static int tpkbd_probe_tp(struct hid_device *hdev)
1595 struct tpkbd_data_pointer *data_pointer;
1596 size_t name_sz = strlen(dev_name(dev)) + 16;
1597 char *name_mute, *name_micmute;
1598 - int ret;
1599 + int i, ret;
1600 +
1601 + /* Validate required reports. */
1602 + for (i = 0; i < 4; i++) {
1603 + if (!hid_validate_values(hdev, HID_FEATURE_REPORT, 4, i, 1))
1604 + return -ENODEV;
1605 + }
1606 + if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 3, 0, 2))
1607 + return -ENODEV;
1608
1609 if (sysfs_create_group(&hdev->dev.kobj,
1610 &tpkbd_attr_group_pointer)) {
1611 @@ -406,22 +414,27 @@ static int tpkbd_probe(struct hid_device *hdev,
1612 ret = hid_parse(hdev);
1613 if (ret) {
1614 hid_err(hdev, "hid_parse failed\n");
1615 - goto err_free;
1616 + goto err;
1617 }
1618
1619 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
1620 if (ret) {
1621 hid_err(hdev, "hid_hw_start failed\n");
1622 - goto err_free;
1623 + goto err;
1624 }
1625
1626 uhdev = (struct usbhid_device *) hdev->driver_data;
1627
1628 - if (uhdev->ifnum == 1)
1629 - return tpkbd_probe_tp(hdev);
1630 + if (uhdev->ifnum == 1) {
1631 + ret = tpkbd_probe_tp(hdev);
1632 + if (ret)
1633 + goto err_hid;
1634 + }
1635
1636 return 0;
1637 -err_free:
1638 +err_hid:
1639 + hid_hw_stop(hdev);
1640 +err:
1641 return ret;
1642 }
1643
1644 diff --git a/drivers/hid/hid-lg2ff.c b/drivers/hid/hid-lg2ff.c
1645 index b3cd150..1a42eaa 100644
1646 --- a/drivers/hid/hid-lg2ff.c
1647 +++ b/drivers/hid/hid-lg2ff.c
1648 @@ -64,26 +64,13 @@ int lg2ff_init(struct hid_device *hid)
1649 struct hid_report *report;
1650 struct hid_input *hidinput = list_entry(hid->inputs.next,
1651 struct hid_input, list);
1652 - struct list_head *report_list =
1653 - &hid->report_enum[HID_OUTPUT_REPORT].report_list;
1654 struct input_dev *dev = hidinput->input;
1655 int error;
1656
1657 - if (list_empty(report_list)) {
1658 - hid_err(hid, "no output report found\n");
1659 + /* Check that the report looks ok */
1660 + report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7);
1661 + if (!report)
1662 return -ENODEV;
1663 - }
1664 -
1665 - report = list_entry(report_list->next, struct hid_report, list);
1666 -
1667 - if (report->maxfield < 1) {
1668 - hid_err(hid, "output report is empty\n");
1669 - return -ENODEV;
1670 - }
1671 - if (report->field[0]->report_count < 7) {
1672 - hid_err(hid, "not enough values in the field\n");
1673 - return -ENODEV;
1674 - }
1675
1676 lg2ff = kmalloc(sizeof(struct lg2ff_device), GFP_KERNEL);
1677 if (!lg2ff)
1678 diff --git a/drivers/hid/hid-lg3ff.c b/drivers/hid/hid-lg3ff.c
1679 index e52f181..8c2da18 100644
1680 --- a/drivers/hid/hid-lg3ff.c
1681 +++ b/drivers/hid/hid-lg3ff.c
1682 @@ -66,10 +66,11 @@ static int hid_lg3ff_play(struct input_dev *dev, void *data,
1683 int x, y;
1684
1685 /*
1686 - * Maxusage should always be 63 (maximum fields)
1687 - * likely a better way to ensure this data is clean
1688 + * Available values in the field should always be 63, but we only use up to
1689 + * 35. Instead, clear the entire area, however big it is.
1690 */
1691 - memset(report->field[0]->value, 0, sizeof(__s32)*report->field[0]->maxusage);
1692 + memset(report->field[0]->value, 0,
1693 + sizeof(__s32) * report->field[0]->report_count);
1694
1695 switch (effect->type) {
1696 case FF_CONSTANT:
1697 @@ -129,32 +130,14 @@ static const signed short ff3_joystick_ac[] = {
1698 int lg3ff_init(struct hid_device *hid)
1699 {
1700 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
1701 - struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
1702 struct input_dev *dev = hidinput->input;
1703 - struct hid_report *report;
1704 - struct hid_field *field;
1705 const signed short *ff_bits = ff3_joystick_ac;
1706 int error;
1707 int i;
1708
1709 - /* Find the report to use */
1710 - if (list_empty(report_list)) {
1711 - hid_err(hid, "No output report found\n");
1712 - return -1;
1713 - }
1714 -
1715 /* Check that the report looks ok */
1716 - report = list_entry(report_list->next, struct hid_report, list);
1717 - if (!report) {
1718 - hid_err(hid, "NULL output report\n");
1719 - return -1;
1720 - }
1721 -
1722 - field = report->field[0];
1723 - if (!field) {
1724 - hid_err(hid, "NULL field\n");
1725 - return -1;
1726 - }
1727 + if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 35))
1728 + return -ENODEV;
1729
1730 /* Assume single fixed device G940 */
1731 for (i = 0; ff_bits[i] >= 0; i++)
1732 diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
1733 index 0ddae2a..8782fe1 100644
1734 --- a/drivers/hid/hid-lg4ff.c
1735 +++ b/drivers/hid/hid-lg4ff.c
1736 @@ -484,34 +484,16 @@ static enum led_brightness lg4ff_led_get_brightness(struct led_classdev *led_cde
1737 int lg4ff_init(struct hid_device *hid)
1738 {
1739 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
1740 - struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
1741 struct input_dev *dev = hidinput->input;
1742 - struct hid_report *report;
1743 - struct hid_field *field;
1744 struct lg4ff_device_entry *entry;
1745 struct lg_drv_data *drv_data;
1746 struct usb_device_descriptor *udesc;
1747 int error, i, j;
1748 __u16 bcdDevice, rev_maj, rev_min;
1749
1750 - /* Find the report to use */
1751 - if (list_empty(report_list)) {
1752 - hid_err(hid, "No output report found\n");
1753 - return -1;
1754 - }
1755 -
1756 /* Check that the report looks ok */
1757 - report = list_entry(report_list->next, struct hid_report, list);
1758 - if (!report) {
1759 - hid_err(hid, "NULL output report\n");
1760 + if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
1761 return -1;
1762 - }
1763 -
1764 - field = report->field[0];
1765 - if (!field) {
1766 - hid_err(hid, "NULL field\n");
1767 - return -1;
1768 - }
1769
1770 /* Check what wheel has been connected */
1771 for (i = 0; i < ARRAY_SIZE(lg4ff_devices); i++) {
1772 diff --git a/drivers/hid/hid-lgff.c b/drivers/hid/hid-lgff.c
1773 index d7ea8c8..e1394af 100644
1774 --- a/drivers/hid/hid-lgff.c
1775 +++ b/drivers/hid/hid-lgff.c
1776 @@ -128,27 +128,14 @@ static void hid_lgff_set_autocenter(struct input_dev *dev, u16 magnitude)
1777 int lgff_init(struct hid_device* hid)
1778 {
1779 struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
1780 - struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list;
1781 struct input_dev *dev = hidinput->input;
1782 - struct hid_report *report;
1783 - struct hid_field *field;
1784 const signed short *ff_bits = ff_joystick;
1785 int error;
1786 int i;
1787
1788 - /* Find the report to use */
1789 - if (list_empty(report_list)) {
1790 - hid_err(hid, "No output report found\n");
1791 - return -1;
1792 - }
1793 -
1794 /* Check that the report looks ok */
1795 - report = list_entry(report_list->next, struct hid_report, list);
1796 - field = report->field[0];
1797 - if (!field) {
1798 - hid_err(hid, "NULL field\n");
1799 - return -1;
1800 - }
1801 + if (!hid_validate_values(hid, HID_OUTPUT_REPORT, 0, 0, 7))
1802 + return -ENODEV;
1803
1804 for (i = 0; i < ARRAY_SIZE(devices); i++) {
1805 if (dev->id.vendor == devices[i].idVendor &&
1806 diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
1807 index cd33084..a2469b5 100644
1808 --- a/drivers/hid/hid-logitech-dj.c
1809 +++ b/drivers/hid/hid-logitech-dj.c
1810 @@ -461,7 +461,7 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
1811 struct hid_report *report;
1812 struct hid_report_enum *output_report_enum;
1813 u8 *data = (u8 *)(&dj_report->device_index);
1814 - int i;
1815 + unsigned int i;
1816
1817 output_report_enum = &hdev->report_enum[HID_OUTPUT_REPORT];
1818 report = output_report_enum->report_id_hash[REPORT_ID_DJ_SHORT];
1819 @@ -471,7 +471,7 @@ static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
1820 return -ENODEV;
1821 }
1822
1823 - for (i = 0; i < report->field[0]->report_count; i++)
1824 + for (i = 0; i < DJREPORT_SHORT_LENGTH - 1; i++)
1825 report->field[0]->value[i] = data[i];
1826
1827 hid_hw_request(hdev, report, HID_REQ_SET_REPORT);
1828 @@ -783,6 +783,12 @@ static int logi_dj_probe(struct hid_device *hdev,
1829 goto hid_parse_fail;
1830 }
1831
1832 + if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, REPORT_ID_DJ_SHORT,
1833 + 0, DJREPORT_SHORT_LENGTH - 1)) {
1834 + retval = -ENODEV;
1835 + goto hid_parse_fail;
1836 + }
1837 +
1838 /* Starts the usb device and connects to upper interfaces hiddev and
1839 * hidraw */
1840 retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
1841 diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
1842 index cb0e361..2d3677c 100644
1843 --- a/drivers/hid/hid-multitouch.c
1844 +++ b/drivers/hid/hid-multitouch.c
1845 @@ -101,9 +101,9 @@ struct mt_device {
1846 unsigned last_slot_field; /* the last field of a slot */
1847 unsigned mt_report_id; /* the report ID of the multitouch device */
1848 unsigned pen_report_id; /* the report ID of the pen device */
1849 - __s8 inputmode; /* InputMode HID feature, -1 if non-existent */
1850 - __s8 inputmode_index; /* InputMode HID feature index in the report */
1851 - __s8 maxcontact_report_id; /* Maximum Contact Number HID feature,
1852 + __s16 inputmode; /* InputMode HID feature, -1 if non-existent */
1853 + __s16 inputmode_index; /* InputMode HID feature index in the report */
1854 + __s16 maxcontact_report_id; /* Maximum Contact Number HID feature,
1855 -1 if non-existent */
1856 __u8 num_received; /* how many contacts we received */
1857 __u8 num_expected; /* expected last contact index */
1858 @@ -317,20 +317,18 @@ static void mt_feature_mapping(struct hid_device *hdev,
1859 struct hid_field *field, struct hid_usage *usage)
1860 {
1861 struct mt_device *td = hid_get_drvdata(hdev);
1862 - int i;
1863
1864 switch (usage->hid) {
1865 case HID_DG_INPUTMODE:
1866 - td->inputmode = field->report->id;
1867 - td->inputmode_index = 0; /* has to be updated below */
1868 -
1869 - for (i=0; i < field->maxusage; i++) {
1870 - if (field->usage[i].hid == usage->hid) {
1871 - td->inputmode_index = i;
1872 - break;
1873 - }
1874 + /* Ignore if value index is out of bounds. */
1875 + if (usage->usage_index >= field->report_count) {
1876 + dev_err(&hdev->dev, "HID_DG_INPUTMODE out of range\n");
1877 + break;
1878 }
1879
1880 + td->inputmode = field->report->id;
1881 + td->inputmode_index = usage->usage_index;
1882 +
1883 break;
1884 case HID_DG_CONTACTMAX:
1885 td->maxcontact_report_id = field->report->id;
1886 @@ -536,6 +534,10 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
1887 mt_store_field(usage, td, hi);
1888 return 1;
1889 case HID_DG_CONTACTCOUNT:
1890 + /* Ignore if indexes are out of bounds. */
1891 + if (field->index >= field->report->maxfield ||
1892 + usage->usage_index >= field->report_count)
1893 + return 1;
1894 td->cc_index = field->index;
1895 td->cc_value_index = usage->usage_index;
1896 return 1;
1897 diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
1898 index 87fbe29..334a4b5 100644
1899 --- a/drivers/hid/hid-sony.c
1900 +++ b/drivers/hid/hid-sony.c
1901 @@ -537,6 +537,10 @@ static int buzz_init(struct hid_device *hdev)
1902 drv_data = hid_get_drvdata(hdev);
1903 BUG_ON(!(drv_data->quirks & BUZZ_CONTROLLER));
1904
1905 + /* Validate expected report characteristics. */
1906 + if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 7))
1907 + return -ENODEV;
1908 +
1909 buzz = kzalloc(sizeof(*buzz), GFP_KERNEL);
1910 if (!buzz) {
1911 hid_err(hdev, "Insufficient memory, cannot allocate driver data\n");
1912 diff --git a/drivers/hid/hid-steelseries.c b/drivers/hid/hid-steelseries.c
1913 index d164911..29f328f 100644
1914 --- a/drivers/hid/hid-steelseries.c
1915 +++ b/drivers/hid/hid-steelseries.c
1916 @@ -249,6 +249,11 @@ static int steelseries_srws1_probe(struct hid_device *hdev,
1917 goto err_free;
1918 }
1919
1920 + if (!hid_validate_values(hdev, HID_OUTPUT_REPORT, 0, 0, 16)) {
1921 + ret = -ENODEV;
1922 + goto err_free;
1923 + }
1924 +
1925 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
1926 if (ret) {
1927 hid_err(hdev, "hw start failed\n");
1928 diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c
1929 index 6ec28a3..a29756c 100644
1930 --- a/drivers/hid/hid-zpff.c
1931 +++ b/drivers/hid/hid-zpff.c
1932 @@ -68,21 +68,13 @@ static int zpff_init(struct hid_device *hid)
1933 struct hid_report *report;
1934 struct hid_input *hidinput = list_entry(hid->inputs.next,
1935 struct hid_input, list);
1936 - struct list_head *report_list =
1937 - &hid->report_enum[HID_OUTPUT_REPORT].report_list;
1938 struct input_dev *dev = hidinput->input;
1939 - int error;
1940 + int i, error;
1941
1942 - if (list_empty(report_list)) {
1943 - hid_err(hid, "no output report found\n");
1944 - return -ENODEV;
1945 - }
1946 -
1947 - report = list_entry(report_list->next, struct hid_report, list);
1948 -
1949 - if (report->maxfield < 4) {
1950 - hid_err(hid, "not enough fields in report\n");
1951 - return -ENODEV;
1952 + for (i = 0; i < 4; i++) {
1953 + report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1);
1954 + if (!report)
1955 + return -ENODEV;
1956 }
1957
1958 zpff = kzalloc(sizeof(struct zpff_device), GFP_KERNEL);
1959 diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
1960 index eec0af4..1c6bc96 100644
1961 --- a/drivers/net/ethernet/broadcom/bgmac.c
1962 +++ b/drivers/net/ethernet/broadcom/bgmac.c
1963 @@ -908,7 +908,7 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
1964 struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
1965 u8 et_swtype = 0;
1966 u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
1967 - BGMAC_CHIPCTL_1_IF_TYPE_RMII;
1968 + BGMAC_CHIPCTL_1_IF_TYPE_MII;
1969 char buf[2];
1970
1971 if (bcm47xx_nvram_getenv("et_swtype", buf, 1) > 0) {
1972 diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
1973 index 98d4b5f..12a35cf 100644
1974 --- a/drivers/net/ethernet/broadcom/bgmac.h
1975 +++ b/drivers/net/ethernet/broadcom/bgmac.h
1976 @@ -333,7 +333,7 @@
1977
1978 #define BGMAC_CHIPCTL_1_IF_TYPE_MASK 0x00000030
1979 #define BGMAC_CHIPCTL_1_IF_TYPE_RMII 0x00000000
1980 -#define BGMAC_CHIPCTL_1_IF_TYPE_MI 0x00000010
1981 +#define BGMAC_CHIPCTL_1_IF_TYPE_MII 0x00000010
1982 #define BGMAC_CHIPCTL_1_IF_TYPE_RGMII 0x00000020
1983 #define BGMAC_CHIPCTL_1_SW_TYPE_MASK 0x000000C0
1984 #define BGMAC_CHIPCTL_1_SW_TYPE_EPHY 0x00000000
1985 diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
1986 index 0da2214..a04d2da 100644
1987 --- a/drivers/net/ethernet/broadcom/tg3.c
1988 +++ b/drivers/net/ethernet/broadcom/tg3.c
1989 @@ -3030,6 +3030,20 @@ static bool tg3_phy_power_bug(struct tg3 *tp)
1990 return false;
1991 }
1992
1993 +static bool tg3_phy_led_bug(struct tg3 *tp)
1994 +{
1995 + switch (tg3_asic_rev(tp)) {
1996 + case ASIC_REV_5719:
1997 + case ASIC_REV_5720:
1998 + if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
1999 + !tp->pci_fn)
2000 + return true;
2001 + return false;
2002 + }
2003 +
2004 + return false;
2005 +}
2006 +
2007 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2008 {
2009 u32 val;
2010 @@ -3077,8 +3091,9 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2011 }
2012 return;
2013 } else if (do_low_power) {
2014 - tg3_writephy(tp, MII_TG3_EXT_CTRL,
2015 - MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2016 + if (!tg3_phy_led_bug(tp))
2017 + tg3_writephy(tp, MII_TG3_EXT_CTRL,
2018 + MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2019
2020 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2021 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2022 diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
2023 index ef94a59..1a9c4f6 100644
2024 --- a/drivers/net/ethernet/marvell/skge.c
2025 +++ b/drivers/net/ethernet/marvell/skge.c
2026 @@ -3092,6 +3092,9 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
2027 if (!nskb)
2028 goto resubmit;
2029
2030 + skb = e->skb;
2031 + prefetch(skb->data);
2032 +
2033 if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) {
2034 dev_kfree_skb(nskb);
2035 goto resubmit;
2036 @@ -3101,8 +3104,6 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
2037 dma_unmap_addr(e, mapaddr),
2038 dma_unmap_len(e, maplen),
2039 PCI_DMA_FROMDEVICE);
2040 - skb = e->skb;
2041 - prefetch(skb->data);
2042 }
2043
2044 skb_put(skb, len);
2045 diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
2046 index 03ad4dc..98aef3b 100644
2047 --- a/drivers/net/usb/cdc_ether.c
2048 +++ b/drivers/net/usb/cdc_ether.c
2049 @@ -726,6 +726,11 @@ static const struct usb_device_id products [] = {
2050 .bInterfaceProtocol = USB_CDC_PROTO_NONE,
2051 .driver_info = (unsigned long)&wwan_info,
2052 }, {
2053 + /* Telit modules */
2054 + USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM,
2055 + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
2056 + .driver_info = (kernel_ulong_t) &wwan_info,
2057 +}, {
2058 USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
2059 USB_CDC_PROTO_NONE),
2060 .driver_info = (unsigned long) &cdc_info,
2061 diff --git a/drivers/net/wireless/cw1200/cw1200_spi.c b/drivers/net/wireless/cw1200/cw1200_spi.c
2062 index d063760..f5e6b48 100644
2063 --- a/drivers/net/wireless/cw1200/cw1200_spi.c
2064 +++ b/drivers/net/wireless/cw1200/cw1200_spi.c
2065 @@ -40,7 +40,9 @@ struct hwbus_priv {
2066 struct cw1200_common *core;
2067 const struct cw1200_platform_data_spi *pdata;
2068 spinlock_t lock; /* Serialize all bus operations */
2069 + wait_queue_head_t wq;
2070 int claimed;
2071 + int irq_disabled;
2072 };
2073
2074 #define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2)
2075 @@ -197,8 +199,11 @@ static void cw1200_spi_lock(struct hwbus_priv *self)
2076 {
2077 unsigned long flags;
2078
2079 + DECLARE_WAITQUEUE(wait, current);
2080 +
2081 might_sleep();
2082
2083 + add_wait_queue(&self->wq, &wait);
2084 spin_lock_irqsave(&self->lock, flags);
2085 while (1) {
2086 set_current_state(TASK_UNINTERRUPTIBLE);
2087 @@ -211,6 +216,7 @@ static void cw1200_spi_lock(struct hwbus_priv *self)
2088 set_current_state(TASK_RUNNING);
2089 self->claimed = 1;
2090 spin_unlock_irqrestore(&self->lock, flags);
2091 + remove_wait_queue(&self->wq, &wait);
2092
2093 return;
2094 }
2095 @@ -222,6 +228,8 @@ static void cw1200_spi_unlock(struct hwbus_priv *self)
2096 spin_lock_irqsave(&self->lock, flags);
2097 self->claimed = 0;
2098 spin_unlock_irqrestore(&self->lock, flags);
2099 + wake_up(&self->wq);
2100 +
2101 return;
2102 }
2103
2104 @@ -230,6 +238,8 @@ static irqreturn_t cw1200_spi_irq_handler(int irq, void *dev_id)
2105 struct hwbus_priv *self = dev_id;
2106
2107 if (self->core) {
2108 + disable_irq_nosync(self->func->irq);
2109 + self->irq_disabled = 1;
2110 cw1200_irq_handler(self->core);
2111 return IRQ_HANDLED;
2112 } else {
2113 @@ -263,13 +273,22 @@ exit:
2114
2115 static int cw1200_spi_irq_unsubscribe(struct hwbus_priv *self)
2116 {
2117 - int ret = 0;
2118 -
2119 pr_debug("SW IRQ unsubscribe\n");
2120 disable_irq_wake(self->func->irq);
2121 free_irq(self->func->irq, self);
2122
2123 - return ret;
2124 + return 0;
2125 +}
2126 +
2127 +static int cw1200_spi_irq_enable(struct hwbus_priv *self, int enable)
2128 +{
2129 + /* Disables are handled by the interrupt handler */
2130 + if (enable && self->irq_disabled) {
2131 + enable_irq(self->func->irq);
2132 + self->irq_disabled = 0;
2133 + }
2134 +
2135 + return 0;
2136 }
2137
2138 static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata)
2139 @@ -349,6 +368,7 @@ static struct hwbus_ops cw1200_spi_hwbus_ops = {
2140 .unlock = cw1200_spi_unlock,
2141 .align_size = cw1200_spi_align_size,
2142 .power_mgmt = cw1200_spi_pm,
2143 + .irq_enable = cw1200_spi_irq_enable,
2144 };
2145
2146 /* Probe Function to be called by SPI stack when device is discovered */
2147 @@ -400,6 +420,8 @@ static int cw1200_spi_probe(struct spi_device *func)
2148
2149 spi_set_drvdata(func, self);
2150
2151 + init_waitqueue_head(&self->wq);
2152 +
2153 status = cw1200_spi_irq_subscribe(self);
2154
2155 status = cw1200_core_probe(&cw1200_spi_hwbus_ops,
2156 diff --git a/drivers/net/wireless/cw1200/fwio.c b/drivers/net/wireless/cw1200/fwio.c
2157 index acdff0f..0b2061b 100644
2158 --- a/drivers/net/wireless/cw1200/fwio.c
2159 +++ b/drivers/net/wireless/cw1200/fwio.c
2160 @@ -485,7 +485,7 @@ int cw1200_load_firmware(struct cw1200_common *priv)
2161
2162 /* Enable interrupt signalling */
2163 priv->hwbus_ops->lock(priv->hwbus_priv);
2164 - ret = __cw1200_irq_enable(priv, 1);
2165 + ret = __cw1200_irq_enable(priv, 2);
2166 priv->hwbus_ops->unlock(priv->hwbus_priv);
2167 if (ret < 0)
2168 goto unsubscribe;
2169 diff --git a/drivers/net/wireless/cw1200/hwbus.h b/drivers/net/wireless/cw1200/hwbus.h
2170 index 8b2fc83..51dfb3a 100644
2171 --- a/drivers/net/wireless/cw1200/hwbus.h
2172 +++ b/drivers/net/wireless/cw1200/hwbus.h
2173 @@ -28,6 +28,7 @@ struct hwbus_ops {
2174 void (*unlock)(struct hwbus_priv *self);
2175 size_t (*align_size)(struct hwbus_priv *self, size_t size);
2176 int (*power_mgmt)(struct hwbus_priv *self, bool suspend);
2177 + int (*irq_enable)(struct hwbus_priv *self, int enable);
2178 };
2179
2180 #endif /* CW1200_HWBUS_H */
2181 diff --git a/drivers/net/wireless/cw1200/hwio.c b/drivers/net/wireless/cw1200/hwio.c
2182 index ff230b7..41bd761 100644
2183 --- a/drivers/net/wireless/cw1200/hwio.c
2184 +++ b/drivers/net/wireless/cw1200/hwio.c
2185 @@ -273,6 +273,21 @@ int __cw1200_irq_enable(struct cw1200_common *priv, int enable)
2186 u16 val16;
2187 int ret;
2188
2189 + /* We need to do this hack because the SPI layer can sleep on I/O
2190 + and the general path involves I/O to the device in interrupt
2191 + context.
2192 +
2193 + However, the initial enable call needs to go to the hardware.
2194 +
2195 + We don't worry about shutdown because we do a full reset which
2196 + clears the interrupt enabled bits.
2197 + */
2198 + if (priv->hwbus_ops->irq_enable) {
2199 + ret = priv->hwbus_ops->irq_enable(priv->hwbus_priv, enable);
2200 + if (ret || enable < 2)
2201 + return ret;
2202 + }
2203 +
2204 if (HIF_8601_SILICON == priv->hw_type) {
2205 ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
2206 if (ret < 0) {
2207 diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
2208 index 1b41c8e..39d8863 100644
2209 --- a/drivers/net/wireless/rt2x00/rt2800lib.c
2210 +++ b/drivers/net/wireless/rt2x00/rt2800lib.c
2211 @@ -2790,6 +2790,13 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev)
2212 int i;
2213
2214 /*
2215 + * First check if temperature compensation is supported.
2216 + */
2217 + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
2218 + if (!rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC))
2219 + return 0;
2220 +
2221 + /*
2222 * Read TSSI boundaries for temperature compensation from
2223 * the EEPROM.
2224 *
2225 @@ -5404,19 +5411,20 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
2226 rt2800_init_registers(rt2x00dev)))
2227 return -EIO;
2228
2229 + if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev)))
2230 + return -EIO;
2231 +
2232 /*
2233 * Send signal to firmware during boot time.
2234 */
2235 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
2236 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
2237 - if (rt2x00_is_usb(rt2x00dev)) {
2238 + if (rt2x00_is_usb(rt2x00dev))
2239 rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0);
2240 - rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
2241 - }
2242 + rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0);
2243 msleep(1);
2244
2245 - if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) ||
2246 - rt2800_wait_bbp_ready(rt2x00dev)))
2247 + if (unlikely(rt2800_wait_bbp_ready(rt2x00dev)))
2248 return -EIO;
2249
2250 rt2800_init_bbp(rt2x00dev);
2251 diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
2252 index 01e264f..6e83e42 100644
2253 --- a/drivers/pci/pci-acpi.c
2254 +++ b/drivers/pci/pci-acpi.c
2255 @@ -47,6 +47,9 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
2256 if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev)
2257 return;
2258
2259 + if (pci_dev->pme_poll)
2260 + pci_dev->pme_poll = false;
2261 +
2262 if (pci_dev->current_state == PCI_D3cold) {
2263 pci_wakeup_event(pci_dev);
2264 pm_runtime_resume(&pci_dev->dev);
2265 @@ -57,9 +60,6 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
2266 if (pci_dev->pme_support)
2267 pci_check_pme_status(pci_dev);
2268
2269 - if (pci_dev->pme_poll)
2270 - pci_dev->pme_poll = false;
2271 -
2272 pci_wakeup_event(pci_dev);
2273 pm_runtime_resume(&pci_dev->dev);
2274
2275 diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
2276 index c588e8e..ac0e79e 100644
2277 --- a/drivers/usb/gadget/dummy_hcd.c
2278 +++ b/drivers/usb/gadget/dummy_hcd.c
2279 @@ -923,8 +923,9 @@ static int dummy_udc_stop(struct usb_gadget *g,
2280 struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
2281 struct dummy *dum = dum_hcd->dum;
2282
2283 - dev_dbg(udc_dev(dum), "unregister gadget driver '%s'\n",
2284 - driver->driver.name);
2285 + if (driver)
2286 + dev_dbg(udc_dev(dum), "unregister gadget driver '%s'\n",
2287 + driver->driver.name);
2288
2289 dum->driver = NULL;
2290
2291 @@ -1000,8 +1001,8 @@ static int dummy_udc_remove(struct platform_device *pdev)
2292 {
2293 struct dummy *dum = platform_get_drvdata(pdev);
2294
2295 - usb_del_gadget_udc(&dum->gadget);
2296 device_remove_file(&dum->gadget.dev, &dev_attr_function);
2297 + usb_del_gadget_udc(&dum->gadget);
2298 return 0;
2299 }
2300
2301 diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
2302 index 8fb4291..45e944f 100644
2303 --- a/fs/bio-integrity.c
2304 +++ b/fs/bio-integrity.c
2305 @@ -734,7 +734,7 @@ void bioset_integrity_free(struct bio_set *bs)
2306 mempool_destroy(bs->bio_integrity_pool);
2307
2308 if (bs->bvec_integrity_pool)
2309 - mempool_destroy(bs->bio_integrity_pool);
2310 + mempool_destroy(bs->bvec_integrity_pool);
2311 }
2312 EXPORT_SYMBOL(bioset_integrity_free);
2313
2314 diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
2315 index d62ce0d..4c019f4 100644
2316 --- a/fs/cifs/dir.c
2317 +++ b/fs/cifs/dir.c
2318 @@ -499,6 +499,7 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
2319 if (server->ops->close)
2320 server->ops->close(xid, tcon, &fid);
2321 cifs_del_pending_open(&open);
2322 + fput(file);
2323 rc = -ENOMEM;
2324 }
2325
2326 diff --git a/fs/udf/super.c b/fs/udf/super.c
2327 index 9ac4057..839a2ba 100644
2328 --- a/fs/udf/super.c
2329 +++ b/fs/udf/super.c
2330 @@ -630,6 +630,12 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
2331 struct udf_sb_info *sbi = UDF_SB(sb);
2332 int error = 0;
2333
2334 + if (sbi->s_lvid_bh) {
2335 + int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
2336 + if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
2337 + return -EACCES;
2338 + }
2339 +
2340 uopt.flags = sbi->s_flags;
2341 uopt.uid = sbi->s_uid;
2342 uopt.gid = sbi->s_gid;
2343 @@ -649,12 +655,6 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
2344 sbi->s_dmode = uopt.dmode;
2345 write_unlock(&sbi->s_cred_lock);
2346
2347 - if (sbi->s_lvid_bh) {
2348 - int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
2349 - if (write_rev > UDF_MAX_WRITE_VERSION)
2350 - *flags |= MS_RDONLY;
2351 - }
2352 -
2353 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
2354 goto out_unlock;
2355
2356 @@ -843,27 +843,38 @@ static int udf_find_fileset(struct super_block *sb,
2357 return 1;
2358 }
2359
2360 +/*
2361 + * Load primary Volume Descriptor Sequence
2362 + *
2363 + * Return <0 on error, 0 on success. -EAGAIN is special meaning next sequence
2364 + * should be tried.
2365 + */
2366 static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
2367 {
2368 struct primaryVolDesc *pvoldesc;
2369 struct ustr *instr, *outstr;
2370 struct buffer_head *bh;
2371 uint16_t ident;
2372 - int ret = 1;
2373 + int ret = -ENOMEM;
2374
2375 instr = kmalloc(sizeof(struct ustr), GFP_NOFS);
2376 if (!instr)
2377 - return 1;
2378 + return -ENOMEM;
2379
2380 outstr = kmalloc(sizeof(struct ustr), GFP_NOFS);
2381 if (!outstr)
2382 goto out1;
2383
2384 bh = udf_read_tagged(sb, block, block, &ident);
2385 - if (!bh)
2386 + if (!bh) {
2387 + ret = -EAGAIN;
2388 goto out2;
2389 + }
2390
2391 - BUG_ON(ident != TAG_IDENT_PVD);
2392 + if (ident != TAG_IDENT_PVD) {
2393 + ret = -EIO;
2394 + goto out_bh;
2395 + }
2396
2397 pvoldesc = (struct primaryVolDesc *)bh->b_data;
2398
2399 @@ -889,8 +900,9 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
2400 if (udf_CS0toUTF8(outstr, instr))
2401 udf_debug("volSetIdent[] = '%s'\n", outstr->u_name);
2402
2403 - brelse(bh);
2404 ret = 0;
2405 +out_bh:
2406 + brelse(bh);
2407 out2:
2408 kfree(outstr);
2409 out1:
2410 @@ -947,7 +959,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
2411
2412 if (mdata->s_mirror_fe == NULL) {
2413 udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
2414 - goto error_exit;
2415 + return -EIO;
2416 }
2417 }
2418
2419 @@ -964,23 +976,18 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
2420 addr.logicalBlockNum, addr.partitionReferenceNum);
2421
2422 mdata->s_bitmap_fe = udf_iget(sb, &addr);
2423 -
2424 if (mdata->s_bitmap_fe == NULL) {
2425 if (sb->s_flags & MS_RDONLY)
2426 udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
2427 else {
2428 udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
2429 - goto error_exit;
2430 + return -EIO;
2431 }
2432 }
2433 }
2434
2435 udf_debug("udf_load_metadata_files Ok\n");
2436 -
2437 return 0;
2438 -
2439 -error_exit:
2440 - return 1;
2441 }
2442
2443 static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
2444 @@ -1069,7 +1076,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
2445 if (!map->s_uspace.s_table) {
2446 udf_debug("cannot load unallocSpaceTable (part %d)\n",
2447 p_index);
2448 - return 1;
2449 + return -EIO;
2450 }
2451 map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
2452 udf_debug("unallocSpaceTable (part %d) @ %ld\n",
2453 @@ -1079,7 +1086,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
2454 if (phd->unallocSpaceBitmap.extLength) {
2455 struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
2456 if (!bitmap)
2457 - return 1;
2458 + return -ENOMEM;
2459 map->s_uspace.s_bitmap = bitmap;
2460 bitmap->s_extPosition = le32_to_cpu(
2461 phd->unallocSpaceBitmap.extPosition);
2462 @@ -1102,7 +1109,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
2463 if (!map->s_fspace.s_table) {
2464 udf_debug("cannot load freedSpaceTable (part %d)\n",
2465 p_index);
2466 - return 1;
2467 + return -EIO;
2468 }
2469
2470 map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
2471 @@ -1113,7 +1120,7 @@ static int udf_fill_partdesc_info(struct super_block *sb,
2472 if (phd->freedSpaceBitmap.extLength) {
2473 struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
2474 if (!bitmap)
2475 - return 1;
2476 + return -ENOMEM;
2477 map->s_fspace.s_bitmap = bitmap;
2478 bitmap->s_extPosition = le32_to_cpu(
2479 phd->freedSpaceBitmap.extPosition);
2480 @@ -1165,7 +1172,7 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
2481 udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
2482 }
2483 if (!sbi->s_vat_inode)
2484 - return 1;
2485 + return -EIO;
2486
2487 if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
2488 map->s_type_specific.s_virtual.s_start_offset = 0;
2489 @@ -1177,7 +1184,7 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
2490 pos = udf_block_map(sbi->s_vat_inode, 0);
2491 bh = sb_bread(sb, pos);
2492 if (!bh)
2493 - return 1;
2494 + return -EIO;
2495 vat20 = (struct virtualAllocationTable20 *)bh->b_data;
2496 } else {
2497 vat20 = (struct virtualAllocationTable20 *)
2498 @@ -1195,6 +1202,12 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
2499 return 0;
2500 }
2501
2502 +/*
2503 + * Load partition descriptor block
2504 + *
2505 + * Returns <0 on error, 0 on success, -EAGAIN is special - try next descriptor
2506 + * sequence.
2507 + */
2508 static int udf_load_partdesc(struct super_block *sb, sector_t block)
2509 {
2510 struct buffer_head *bh;
2511 @@ -1204,13 +1217,15 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
2512 int i, type1_idx;
2513 uint16_t partitionNumber;
2514 uint16_t ident;
2515 - int ret = 0;
2516 + int ret;
2517
2518 bh = udf_read_tagged(sb, block, block, &ident);
2519 if (!bh)
2520 - return 1;
2521 - if (ident != TAG_IDENT_PD)
2522 + return -EAGAIN;
2523 + if (ident != TAG_IDENT_PD) {
2524 + ret = 0;
2525 goto out_bh;
2526 + }
2527
2528 p = (struct partitionDesc *)bh->b_data;
2529 partitionNumber = le16_to_cpu(p->partitionNumber);
2530 @@ -1229,10 +1244,13 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
2531 if (i >= sbi->s_partitions) {
2532 udf_debug("Partition (%d) not found in partition map\n",
2533 partitionNumber);
2534 + ret = 0;
2535 goto out_bh;
2536 }
2537
2538 ret = udf_fill_partdesc_info(sb, p, i);
2539 + if (ret < 0)
2540 + goto out_bh;
2541
2542 /*
2543 * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and
2544 @@ -1249,32 +1267,37 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
2545 break;
2546 }
2547
2548 - if (i >= sbi->s_partitions)
2549 + if (i >= sbi->s_partitions) {
2550 + ret = 0;
2551 goto out_bh;
2552 + }
2553
2554 ret = udf_fill_partdesc_info(sb, p, i);
2555 - if (ret)
2556 + if (ret < 0)
2557 goto out_bh;
2558
2559 if (map->s_partition_type == UDF_METADATA_MAP25) {
2560 ret = udf_load_metadata_files(sb, i);
2561 - if (ret) {
2562 + if (ret < 0) {
2563 udf_err(sb, "error loading MetaData partition map %d\n",
2564 i);
2565 goto out_bh;
2566 }
2567 } else {
2568 - ret = udf_load_vat(sb, i, type1_idx);
2569 - if (ret)
2570 - goto out_bh;
2571 /*
2572 - * Mark filesystem read-only if we have a partition with
2573 - * virtual map since we don't handle writing to it (we
2574 - * overwrite blocks instead of relocating them).
2575 + * If we have a partition with virtual map, we don't handle
2576 + * writing to it (we overwrite blocks instead of relocating
2577 + * them).
2578 */
2579 - sb->s_flags |= MS_RDONLY;
2580 - pr_notice("Filesystem marked read-only because writing to pseudooverwrite partition is not implemented\n");
2581 + if (!(sb->s_flags & MS_RDONLY)) {
2582 + ret = -EACCES;
2583 + goto out_bh;
2584 + }
2585 + ret = udf_load_vat(sb, i, type1_idx);
2586 + if (ret < 0)
2587 + goto out_bh;
2588 }
2589 + ret = 0;
2590 out_bh:
2591 /* In case loading failed, we handle cleanup in udf_fill_super */
2592 brelse(bh);
2593 @@ -1340,11 +1363,11 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
2594 uint16_t ident;
2595 struct buffer_head *bh;
2596 unsigned int table_len;
2597 - int ret = 0;
2598 + int ret;
2599
2600 bh = udf_read_tagged(sb, block, block, &ident);
2601 if (!bh)
2602 - return 1;
2603 + return -EAGAIN;
2604 BUG_ON(ident != TAG_IDENT_LVD);
2605 lvd = (struct logicalVolDesc *)bh->b_data;
2606 table_len = le32_to_cpu(lvd->mapTableLength);
2607 @@ -1352,7 +1375,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
2608 udf_err(sb, "error loading logical volume descriptor: "
2609 "Partition table too long (%u > %lu)\n", table_len,
2610 sb->s_blocksize - sizeof(*lvd));
2611 - ret = 1;
2612 + ret = -EIO;
2613 goto out_bh;
2614 }
2615
2616 @@ -1396,11 +1419,10 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
2617 } else if (!strncmp(upm2->partIdent.ident,
2618 UDF_ID_SPARABLE,
2619 strlen(UDF_ID_SPARABLE))) {
2620 - if (udf_load_sparable_map(sb, map,
2621 - (struct sparablePartitionMap *)gpm) < 0) {
2622 - ret = 1;
2623 + ret = udf_load_sparable_map(sb, map,
2624 + (struct sparablePartitionMap *)gpm);
2625 + if (ret < 0)
2626 goto out_bh;
2627 - }
2628 } else if (!strncmp(upm2->partIdent.ident,
2629 UDF_ID_METADATA,
2630 strlen(UDF_ID_METADATA))) {
2631 @@ -1465,7 +1487,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
2632 }
2633 if (lvd->integritySeqExt.extLength)
2634 udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
2635 -
2636 + ret = 0;
2637 out_bh:
2638 brelse(bh);
2639 return ret;
2640 @@ -1503,22 +1525,18 @@ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_
2641 }
2642
2643 /*
2644 - * udf_process_sequence
2645 - *
2646 - * PURPOSE
2647 - * Process a main/reserve volume descriptor sequence.
2648 - *
2649 - * PRE-CONDITIONS
2650 - * sb Pointer to _locked_ superblock.
2651 - * block First block of first extent of the sequence.
2652 - * lastblock Lastblock of first extent of the sequence.
2653 + * Process a main/reserve volume descriptor sequence.
2654 + * @block First block of first extent of the sequence.
2655 + * @lastblock Lastblock of first extent of the sequence.
2656 + * @fileset There we store extent containing root fileset
2657 *
2658 - * HISTORY
2659 - * July 1, 1997 - Andrew E. Mileski
2660 - * Written, tested, and released.
2661 + * Returns <0 on error, 0 on success. -EAGAIN is special - try next descriptor
2662 + * sequence
2663 */
2664 -static noinline int udf_process_sequence(struct super_block *sb, long block,
2665 - long lastblock, struct kernel_lb_addr *fileset)
2666 +static noinline int udf_process_sequence(
2667 + struct super_block *sb,
2668 + sector_t block, sector_t lastblock,
2669 + struct kernel_lb_addr *fileset)
2670 {
2671 struct buffer_head *bh = NULL;
2672 struct udf_vds_record vds[VDS_POS_LENGTH];
2673 @@ -1529,6 +1547,7 @@ static noinline int udf_process_sequence(struct super_block *sb, long block,
2674 uint32_t vdsn;
2675 uint16_t ident;
2676 long next_s = 0, next_e = 0;
2677 + int ret;
2678
2679 memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
2680
2681 @@ -1543,7 +1562,7 @@ static noinline int udf_process_sequence(struct super_block *sb, long block,
2682 udf_err(sb,
2683 "Block %llu of volume descriptor sequence is corrupted or we could not read it\n",
2684 (unsigned long long)block);
2685 - return 1;
2686 + return -EAGAIN;
2687 }
2688
2689 /* Process each descriptor (ISO 13346 3/8.3-8.4) */
2690 @@ -1616,14 +1635,19 @@ static noinline int udf_process_sequence(struct super_block *sb, long block,
2691 */
2692 if (!vds[VDS_POS_PRIMARY_VOL_DESC].block) {
2693 udf_err(sb, "Primary Volume Descriptor not found!\n");
2694 - return 1;
2695 + return -EAGAIN;
2696 + }
2697 + ret = udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block);
2698 + if (ret < 0)
2699 + return ret;
2700 +
2701 + if (vds[VDS_POS_LOGICAL_VOL_DESC].block) {
2702 + ret = udf_load_logicalvol(sb,
2703 + vds[VDS_POS_LOGICAL_VOL_DESC].block,
2704 + fileset);
2705 + if (ret < 0)
2706 + return ret;
2707 }
2708 - if (udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block))
2709 - return 1;
2710 -
2711 - if (vds[VDS_POS_LOGICAL_VOL_DESC].block && udf_load_logicalvol(sb,
2712 - vds[VDS_POS_LOGICAL_VOL_DESC].block, fileset))
2713 - return 1;
2714
2715 if (vds[VDS_POS_PARTITION_DESC].block) {
2716 /*
2717 @@ -1632,19 +1656,27 @@ static noinline int udf_process_sequence(struct super_block *sb, long block,
2718 */
2719 for (block = vds[VDS_POS_PARTITION_DESC].block;
2720 block < vds[VDS_POS_TERMINATING_DESC].block;
2721 - block++)
2722 - if (udf_load_partdesc(sb, block))
2723 - return 1;
2724 + block++) {
2725 + ret = udf_load_partdesc(sb, block);
2726 + if (ret < 0)
2727 + return ret;
2728 + }
2729 }
2730
2731 return 0;
2732 }
2733
2734 +/*
2735 + * Load Volume Descriptor Sequence described by anchor in bh
2736 + *
2737 + * Returns <0 on error, 0 on success
2738 + */
2739 static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
2740 struct kernel_lb_addr *fileset)
2741 {
2742 struct anchorVolDescPtr *anchor;
2743 - long main_s, main_e, reserve_s, reserve_e;
2744 + sector_t main_s, main_e, reserve_s, reserve_e;
2745 + int ret;
2746
2747 anchor = (struct anchorVolDescPtr *)bh->b_data;
2748
2749 @@ -1662,18 +1694,26 @@ static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
2750
2751 /* Process the main & reserve sequences */
2752 /* responsible for finding the PartitionDesc(s) */
2753 - if (!udf_process_sequence(sb, main_s, main_e, fileset))
2754 - return 1;
2755 - udf_sb_free_partitions(sb);
2756 - if (!udf_process_sequence(sb, reserve_s, reserve_e, fileset))
2757 - return 1;
2758 + ret = udf_process_sequence(sb, main_s, main_e, fileset);
2759 + if (ret != -EAGAIN)
2760 + return ret;
2761 udf_sb_free_partitions(sb);
2762 - return 0;
2763 + ret = udf_process_sequence(sb, reserve_s, reserve_e, fileset);
2764 + if (ret < 0) {
2765 + udf_sb_free_partitions(sb);
2766 + /* No sequence was OK, return -EIO */
2767 + if (ret == -EAGAIN)
2768 + ret = -EIO;
2769 + }
2770 + return ret;
2771 }
2772
2773 /*
2774 * Check whether there is an anchor block in the given block and
2775 * load Volume Descriptor Sequence if so.
2776 + *
2777 + * Returns <0 on error, 0 on success, -EAGAIN is special - try next anchor
2778 + * block
2779 */
2780 static int udf_check_anchor_block(struct super_block *sb, sector_t block,
2781 struct kernel_lb_addr *fileset)
2782 @@ -1685,33 +1725,40 @@ static int udf_check_anchor_block(struct super_block *sb, sector_t block,
2783 if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV) &&
2784 udf_fixed_to_variable(block) >=
2785 sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits)
2786 - return 0;
2787 + return -EAGAIN;
2788
2789 bh = udf_read_tagged(sb, block, block, &ident);
2790 if (!bh)
2791 - return 0;
2792 + return -EAGAIN;
2793 if (ident != TAG_IDENT_AVDP) {
2794 brelse(bh);
2795 - return 0;
2796 + return -EAGAIN;
2797 }
2798 ret = udf_load_sequence(sb, bh, fileset);
2799 brelse(bh);
2800 return ret;
2801 }
2802
2803 -/* Search for an anchor volume descriptor pointer */
2804 -static sector_t udf_scan_anchors(struct super_block *sb, sector_t lastblock,
2805 - struct kernel_lb_addr *fileset)
2806 +/*
2807 + * Search for an anchor volume descriptor pointer.
2808 + *
2809 + * Returns < 0 on error, 0 on success. -EAGAIN is special - try next set
2810 + * of anchors.
2811 + */
2812 +static int udf_scan_anchors(struct super_block *sb, sector_t *lastblock,
2813 + struct kernel_lb_addr *fileset)
2814 {
2815 sector_t last[6];
2816 int i;
2817 struct udf_sb_info *sbi = UDF_SB(sb);
2818 int last_count = 0;
2819 + int ret;
2820
2821 /* First try user provided anchor */
2822 if (sbi->s_anchor) {
2823 - if (udf_check_anchor_block(sb, sbi->s_anchor, fileset))
2824 - return lastblock;
2825 + ret = udf_check_anchor_block(sb, sbi->s_anchor, fileset);
2826 + if (ret != -EAGAIN)
2827 + return ret;
2828 }
2829 /*
2830 * according to spec, anchor is in either:
2831 @@ -1720,39 +1767,46 @@ static sector_t udf_scan_anchors(struct super_block *sb, sector_t lastblock,
2832 * lastblock
2833 * however, if the disc isn't closed, it could be 512.
2834 */
2835 - if (udf_check_anchor_block(sb, sbi->s_session + 256, fileset))
2836 - return lastblock;
2837 + ret = udf_check_anchor_block(sb, sbi->s_session + 256, fileset);
2838 + if (ret != -EAGAIN)
2839 + return ret;
2840 /*
2841 * The trouble is which block is the last one. Drives often misreport
2842 * this so we try various possibilities.
2843 */
2844 - last[last_count++] = lastblock;
2845 - if (lastblock >= 1)
2846 - last[last_count++] = lastblock - 1;
2847 - last[last_count++] = lastblock + 1;
2848 - if (lastblock >= 2)
2849 - last[last_count++] = lastblock - 2;
2850 - if (lastblock >= 150)
2851 - last[last_count++] = lastblock - 150;
2852 - if (lastblock >= 152)
2853 - last[last_count++] = lastblock - 152;
2854 + last[last_count++] = *lastblock;
2855 + if (*lastblock >= 1)
2856 + last[last_count++] = *lastblock - 1;
2857 + last[last_count++] = *lastblock + 1;
2858 + if (*lastblock >= 2)
2859 + last[last_count++] = *lastblock - 2;
2860 + if (*lastblock >= 150)
2861 + last[last_count++] = *lastblock - 150;
2862 + if (*lastblock >= 152)
2863 + last[last_count++] = *lastblock - 152;
2864
2865 for (i = 0; i < last_count; i++) {
2866 if (last[i] >= sb->s_bdev->bd_inode->i_size >>
2867 sb->s_blocksize_bits)
2868 continue;
2869 - if (udf_check_anchor_block(sb, last[i], fileset))
2870 - return last[i];
2871 + ret = udf_check_anchor_block(sb, last[i], fileset);
2872 + if (ret != -EAGAIN) {
2873 + if (!ret)
2874 + *lastblock = last[i];
2875 + return ret;
2876 + }
2877 if (last[i] < 256)
2878 continue;
2879 - if (udf_check_anchor_block(sb, last[i] - 256, fileset))
2880 - return last[i];
2881 + ret = udf_check_anchor_block(sb, last[i] - 256, fileset);
2882 + if (ret != -EAGAIN) {
2883 + if (!ret)
2884 + *lastblock = last[i];
2885 + return ret;
2886 + }
2887 }
2888
2889 /* Finally try block 512 in case media is open */
2890 - if (udf_check_anchor_block(sb, sbi->s_session + 512, fileset))
2891 - return last[0];
2892 - return 0;
2893 + return udf_check_anchor_block(sb, sbi->s_session + 512, fileset);
2894 }
2895
2896 /*
2897 @@ -1760,54 +1814,59 @@ static sector_t udf_scan_anchors(struct super_block *sb, sector_t lastblock,
2898 * area specified by it. The function expects sbi->s_lastblock to be the last
2899 * block on the media.
2900 *
2901 - * Return 1 if ok, 0 if not found.
2902 - *
2903 + * Return <0 on error, 0 if anchor found. -EAGAIN is special meaning anchor
2904 + * was not found.
2905 */
2906 static int udf_find_anchor(struct super_block *sb,
2907 struct kernel_lb_addr *fileset)
2908 {
2909 - sector_t lastblock;
2910 struct udf_sb_info *sbi = UDF_SB(sb);
2911 + sector_t lastblock = sbi->s_last_block;
2912 + int ret;
2913
2914 - lastblock = udf_scan_anchors(sb, sbi->s_last_block, fileset);
2915 - if (lastblock)
2916 + ret = udf_scan_anchors(sb, &lastblock, fileset);
2917 + if (ret != -EAGAIN)
2918 goto out;
2919
2920 /* No anchor found? Try VARCONV conversion of block numbers */
2921 UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
2922 + lastblock = udf_variable_to_fixed(sbi->s_last_block);
2923 /* Firstly, we try to not convert number of the last block */
2924 - lastblock = udf_scan_anchors(sb,
2925 - udf_variable_to_fixed(sbi->s_last_block),
2926 - fileset);
2927 - if (lastblock)
2928 + ret = udf_scan_anchors(sb, &lastblock, fileset);
2929 + if (ret != -EAGAIN)
2930 goto out;
2931
2932 + lastblock = sbi->s_last_block;
2933 /* Secondly, we try with converted number of the last block */
2934 - lastblock = udf_scan_anchors(sb, sbi->s_last_block, fileset);
2935 - if (!lastblock) {
2936 + ret = udf_scan_anchors(sb, &lastblock, fileset);
2937 + if (ret < 0) {
2938 /* VARCONV didn't help. Clear it. */
2939 UDF_CLEAR_FLAG(sb, UDF_FLAG_VARCONV);
2940 - return 0;
2941 }
2942 out:
2943 - sbi->s_last_block = lastblock;
2944 - return 1;
2945 + if (ret == 0)
2946 + sbi->s_last_block = lastblock;
2947 + return ret;
2948 }
2949
2950 /*
2951 * Check Volume Structure Descriptor, find Anchor block and load Volume
2952 - * Descriptor Sequence
2953 + * Descriptor Sequence.
2954 + *
2955 + * Returns < 0 on error, 0 on success. -EAGAIN is special meaning anchor
2956 + * block was not found.
2957 */
2958 static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
2959 int silent, struct kernel_lb_addr *fileset)
2960 {
2961 struct udf_sb_info *sbi = UDF_SB(sb);
2962 loff_t nsr_off;
2963 + int ret;
2964
2965 if (!sb_set_blocksize(sb, uopt->blocksize)) {
2966 if (!silent)
2967 udf_warn(sb, "Bad block size\n");
2968 - return 0;
2969 + return -EINVAL;
2970 }
2971 sbi->s_last_block = uopt->lastblock;
2972 if (!uopt->novrs) {
2973 @@ -1828,12 +1887,13 @@ static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
2974
2975 /* Look for anchor block and load Volume Descriptor Sequence */
2976 sbi->s_anchor = uopt->anchor;
2977 - if (!udf_find_anchor(sb, fileset)) {
2978 - if (!silent)
2979 + ret = udf_find_anchor(sb, fileset);
2980 + if (ret < 0) {
2981 + if (!silent && ret == -EAGAIN)
2982 udf_warn(sb, "No anchor found\n");
2983 - return 0;
2984 + return ret;
2985 }
2986 - return 1;
2987 + return 0;
2988 }
2989
2990 static void udf_open_lvid(struct super_block *sb)
2991 @@ -1939,7 +1999,7 @@ u64 lvid_get_unique_id(struct super_block *sb)
2992
2993 static int udf_fill_super(struct super_block *sb, void *options, int silent)
2994 {
2995 - int ret;
2996 + int ret = -EINVAL;
2997 struct inode *inode = NULL;
2998 struct udf_options uopt;
2999 struct kernel_lb_addr rootdir, fileset;
3000 @@ -2011,7 +2071,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
3001 } else {
3002 uopt.blocksize = bdev_logical_block_size(sb->s_bdev);
3003 ret = udf_load_vrs(sb, &uopt, silent, &fileset);
3004 - if (!ret && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) {
3005 + if (ret == -EAGAIN && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) {
3006 if (!silent)
3007 pr_notice("Rescanning with blocksize %d\n",
3008 UDF_DEFAULT_BLOCKSIZE);
3009 @@ -2021,8 +2081,11 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
3010 ret = udf_load_vrs(sb, &uopt, silent, &fileset);
3011 }
3012 }
3013 - if (!ret) {
3014 - udf_warn(sb, "No partition found (1)\n");
3015 + if (ret < 0) {
3016 + if (ret == -EAGAIN) {
3017 + udf_warn(sb, "No partition found (1)\n");
3018 + ret = -EINVAL;
3019 + }
3020 goto error_out;
3021 }
3022
3023 @@ -2040,9 +2103,13 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
3024 udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
3025 le16_to_cpu(lvidiu->minUDFReadRev),
3026 UDF_MAX_READ_VERSION);
3027 + ret = -EINVAL;
3028 + goto error_out;
3029 + } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION &&
3030 + !(sb->s_flags & MS_RDONLY)) {
3031 + ret = -EACCES;
3032 goto error_out;
3033 - } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION)
3034 - sb->s_flags |= MS_RDONLY;
3035 + }
3036
3037 sbi->s_udfrev = minUDFWriteRev;
3038
3039 @@ -2054,17 +2121,20 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
3040
3041 if (!sbi->s_partitions) {
3042 udf_warn(sb, "No partition found (2)\n");
3043 + ret = -EINVAL;
3044 goto error_out;
3045 }
3046
3047 if (sbi->s_partmaps[sbi->s_partition].s_partition_flags &
3048 - UDF_PART_FLAG_READ_ONLY) {
3049 - pr_notice("Partition marked readonly; forcing readonly mount\n");
3050 - sb->s_flags |= MS_RDONLY;
3051 + UDF_PART_FLAG_READ_ONLY &&
3052 + !(sb->s_flags & MS_RDONLY)) {
3053 + ret = -EACCES;
3054 + goto error_out;
3055 }
3056
3057 if (udf_find_fileset(sb, &fileset, &rootdir)) {
3058 udf_warn(sb, "No fileset found\n");
3059 + ret = -EINVAL;
3060 goto error_out;
3061 }
3062
3063 @@ -2086,6 +2156,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
3064 if (!inode) {
3065 udf_err(sb, "Error in udf_iget, block=%d, partition=%d\n",
3066 rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
3067 + ret = -EIO;
3068 goto error_out;
3069 }
3070
3071 @@ -2093,6 +2164,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
3072 sb->s_root = d_make_root(inode);
3073 if (!sb->s_root) {
3074 udf_err(sb, "Couldn't allocate root dentry\n");
3075 + ret = -ENOMEM;
3076 goto error_out;
3077 }
3078 sb->s_maxbytes = MAX_LFS_FILESIZE;
3079 @@ -2113,7 +2185,7 @@ error_out:
3080 kfree(sbi);
3081 sb->s_fs_info = NULL;
3082
3083 - return -EINVAL;
3084 + return ret;
3085 }
3086
3087 void _udf_err(struct super_block *sb, const char *function,
3088 diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
3089 index 34efaf6..961013a 100644
3090 --- a/include/drm/drm_pciids.h
3091 +++ b/include/drm/drm_pciids.h
3092 @@ -1,4 +1,25 @@
3093 #define radeon_PCI_IDS \
3094 + {0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3095 + {0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3096 + {0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3097 + {0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3098 + {0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3099 + {0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3100 + {0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3101 + {0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3102 + {0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3103 + {0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3104 + {0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3105 + {0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3106 + {0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3107 + {0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3108 + {0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3109 + {0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3110 + {0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3111 + {0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3112 + {0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3113 + {0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3114 + {0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
3115 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
3116 {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
3117 {0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
3118 diff --git a/include/linux/hid.h b/include/linux/hid.h
3119 index ff545cc..6e18550 100644
3120 --- a/include/linux/hid.h
3121 +++ b/include/linux/hid.h
3122 @@ -749,6 +749,10 @@ void hid_output_report(struct hid_report *report, __u8 *data);
3123 struct hid_device *hid_allocate_device(void);
3124 struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id);
3125 int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size);
3126 +struct hid_report *hid_validate_values(struct hid_device *hid,
3127 + unsigned int type, unsigned int id,
3128 + unsigned int field_index,
3129 + unsigned int report_counts);
3130 int hid_open_report(struct hid_device *device);
3131 int hid_check_keys_pressed(struct hid_device *hid);
3132 int hid_connect(struct hid_device *hid, unsigned int connect_mask);
3133 diff --git a/include/linux/timex.h b/include/linux/timex.h
3134 index b3726e6..dd3edd7 100644
3135 --- a/include/linux/timex.h
3136 +++ b/include/linux/timex.h
3137 @@ -141,6 +141,7 @@ extern int do_adjtimex(struct timex *);
3138 extern void hardpps(const struct timespec *, const struct timespec *);
3139
3140 int read_current_timer(unsigned long *timer_val);
3141 +void ntp_notify_cmos_timer(void);
3142
3143 /* The clock frequency of the i8253/i8254 PIT */
3144 #define PIT_TICK_RATE 1193182ul
3145 diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
3146 index 321d4ac..fa8b3ad 100644
3147 --- a/include/uapi/drm/radeon_drm.h
3148 +++ b/include/uapi/drm/radeon_drm.h
3149 @@ -979,6 +979,8 @@ struct drm_radeon_cs {
3150 #define RADEON_INFO_RING_WORKING 0x15
3151 /* SI tile mode array */
3152 #define RADEON_INFO_SI_TILE_MODE_ARRAY 0x16
3153 +/* query if CP DMA is supported on the compute ring */
3154 +#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17
3155
3156
3157 struct drm_radeon_info {
3158 diff --git a/kernel/audit.c b/kernel/audit.c
3159 index 91e53d0..7b0e23a 100644
3160 --- a/kernel/audit.c
3161 +++ b/kernel/audit.c
3162 @@ -1117,9 +1117,10 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
3163
3164 sleep_time = timeout_start + audit_backlog_wait_time -
3165 jiffies;
3166 - if ((long)sleep_time > 0)
3167 + if ((long)sleep_time > 0) {
3168 wait_for_auditd(sleep_time);
3169 - continue;
3170 + continue;
3171 + }
3172 }
3173 if (audit_rate_check() && printk_ratelimit())
3174 printk(KERN_WARNING
3175 diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
3176 index a7959e0..25cc35d 100644
3177 --- a/kernel/sched/cputime.c
3178 +++ b/kernel/sched/cputime.c
3179 @@ -557,7 +557,7 @@ static void cputime_adjust(struct task_cputime *curr,
3180 struct cputime *prev,
3181 cputime_t *ut, cputime_t *st)
3182 {
3183 - cputime_t rtime, stime, utime, total;
3184 + cputime_t rtime, stime, utime;
3185
3186 if (vtime_accounting_enabled()) {
3187 *ut = curr->utime;
3188 @@ -565,9 +565,6 @@ static void cputime_adjust(struct task_cputime *curr,
3189 return;
3190 }
3191
3192 - stime = curr->stime;
3193 - total = stime + curr->utime;
3194 -
3195 /*
3196 * Tick based cputime accounting depend on random scheduling
3197 * timeslices of a task to be interrupted or not by the timer.
3198 @@ -588,13 +585,19 @@ static void cputime_adjust(struct task_cputime *curr,
3199 if (prev->stime + prev->utime >= rtime)
3200 goto out;
3201
3202 - if (total) {
3203 + stime = curr->stime;
3204 + utime = curr->utime;
3205 +
3206 + if (utime == 0) {
3207 + stime = rtime;
3208 + } else if (stime == 0) {
3209 + utime = rtime;
3210 + } else {
3211 + cputime_t total = stime + utime;
3212 +
3213 stime = scale_stime((__force u64)stime,
3214 (__force u64)rtime, (__force u64)total);
3215 utime = rtime - stime;
3216 - } else {
3217 - stime = rtime;
3218 - utime = 0;
3219 }
3220
3221 /*
3222 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
3223 index 68f1609..31cbc15 100644
3224 --- a/kernel/sched/fair.c
3225 +++ b/kernel/sched/fair.c
3226 @@ -5818,11 +5818,15 @@ static void task_fork_fair(struct task_struct *p)
3227 cfs_rq = task_cfs_rq(current);
3228 curr = cfs_rq->curr;
3229
3230 - if (unlikely(task_cpu(p) != this_cpu)) {
3231 - rcu_read_lock();
3232 - __set_task_cpu(p, this_cpu);
3233 - rcu_read_unlock();
3234 - }
3235 + /*
3236 + * Not only the cpu but also the task_group of the parent might have
3237 + * been changed after parent->se.parent,cfs_rq were copied to
3238 + * child->se.parent,cfs_rq. So call __set_task_cpu() to make those
3239 + * of child point to valid ones.
3240 + */
3241 + rcu_read_lock();
3242 + __set_task_cpu(p, this_cpu);
3243 + rcu_read_unlock();
3244
3245 update_curr(cfs_rq);
3246
3247 diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
3248 index 8f5b3b9..bb22151 100644
3249 --- a/kernel/time/ntp.c
3250 +++ b/kernel/time/ntp.c
3251 @@ -516,13 +516,13 @@ static void sync_cmos_clock(struct work_struct *work)
3252 schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next));
3253 }
3254
3255 -static void notify_cmos_timer(void)
3256 +void ntp_notify_cmos_timer(void)
3257 {
3258 schedule_delayed_work(&sync_cmos_work, 0);
3259 }
3260
3261 #else
3262 -static inline void notify_cmos_timer(void) { }
3263 +void ntp_notify_cmos_timer(void) { }
3264 #endif
3265
3266
3267 @@ -687,8 +687,6 @@ int __do_adjtimex(struct timex *txc, struct timespec *ts, s32 *time_tai)
3268 if (!(time_status & STA_NANO))
3269 txc->time.tv_usec /= NSEC_PER_USEC;
3270
3271 - notify_cmos_timer();
3272 -
3273 return result;
3274 }
3275
3276 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
3277 index 48b9fff..947ba25 100644
3278 --- a/kernel/time/timekeeping.c
3279 +++ b/kernel/time/timekeeping.c
3280 @@ -1703,6 +1703,8 @@ int do_adjtimex(struct timex *txc)
3281 write_seqcount_end(&timekeeper_seq);
3282 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
3283
3284 + ntp_notify_cmos_timer();
3285 +
3286 return ret;
3287 }
3288
3289 diff --git a/mm/swap.c b/mm/swap.c
3290 index 62b78a6..c899502 100644
3291 --- a/mm/swap.c
3292 +++ b/mm/swap.c
3293 @@ -31,6 +31,7 @@
3294 #include <linux/memcontrol.h>
3295 #include <linux/gfp.h>
3296 #include <linux/uio.h>
3297 +#include <linux/hugetlb.h>
3298
3299 #include "internal.h"
3300
3301 @@ -81,6 +82,19 @@ static void __put_compound_page(struct page *page)
3302
3303 static void put_compound_page(struct page *page)
3304 {
3305 + /*
3306 + * hugetlbfs pages cannot be split from under us. If this is a
3307 + * hugetlbfs page, check refcount on head page and release the page if
3308 + * the refcount becomes zero.
3309 + */
3310 + if (PageHuge(page)) {
3311 + page = compound_head(page);
3312 + if (put_page_testzero(page))
3313 + __put_compound_page(page);
3314 +
3315 + return;
3316 + }
3317 +
3318 if (unlikely(PageTail(page))) {
3319 /* __split_huge_page_refcount can run under us */
3320 struct page *page_head = compound_trans_head(page);
3321 @@ -184,38 +198,51 @@ bool __get_page_tail(struct page *page)
3322 * proper PT lock that already serializes against
3323 * split_huge_page().
3324 */
3325 - unsigned long flags;
3326 bool got = false;
3327 - struct page *page_head = compound_trans_head(page);
3328 + struct page *page_head;
3329
3330 - if (likely(page != page_head && get_page_unless_zero(page_head))) {
3331 + /*
3332 + * If this is a hugetlbfs page it cannot be split under us. Simply
3333 + * increment refcount for the head page.
3334 + */
3335 + if (PageHuge(page)) {
3336 + page_head = compound_head(page);
3337 + atomic_inc(&page_head->_count);
3338 + got = true;
3339 + } else {
3340 + unsigned long flags;
3341 +
3342 + page_head = compound_trans_head(page);
3343 + if (likely(page != page_head &&
3344 + get_page_unless_zero(page_head))) {
3345 +
3346 + /* Ref to put_compound_page() comment. */
3347 + if (PageSlab(page_head)) {
3348 + if (likely(PageTail(page))) {
3349 + __get_page_tail_foll(page, false);
3350 + return true;
3351 + } else {
3352 + put_page(page_head);
3353 + return false;
3354 + }
3355 + }
3356
3357 - /* Ref to put_compound_page() comment. */
3358 - if (PageSlab(page_head)) {
3359 + /*
3360 + * page_head wasn't a dangling pointer but it
3361 + * may not be a head page anymore by the time
3362 + * we obtain the lock. That is ok as long as it
3363 + * can't be freed from under us.
3364 + */
3365 + flags = compound_lock_irqsave(page_head);
3366 + /* here __split_huge_page_refcount won't run anymore */
3367 if (likely(PageTail(page))) {
3368 __get_page_tail_foll(page, false);
3369 - return true;
3370 - } else {
3371 - put_page(page_head);
3372 - return false;
3373 + got = true;
3374 }
3375 + compound_unlock_irqrestore(page_head, flags);
3376 + if (unlikely(!got))
3377 + put_page(page_head);
3378 }
3379 -
3380 - /*
3381 - * page_head wasn't a dangling pointer but it
3382 - * may not be a head page anymore by the time
3383 - * we obtain the lock. That is ok as long as it
3384 - * can't be freed from under us.
3385 - */
3386 - flags = compound_lock_irqsave(page_head);
3387 - /* here __split_huge_page_refcount won't run anymore */
3388 - if (likely(PageTail(page))) {
3389 - __get_page_tail_foll(page, false);
3390 - got = true;
3391 - }
3392 - compound_unlock_irqrestore(page_head, flags);
3393 - if (unlikely(!got))
3394 - put_page(page_head);
3395 }
3396 return got;
3397 }
3398 diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
3399 index 57beb17..707bc52 100644
3400 --- a/net/netfilter/ipset/ip_set_hash_gen.h
3401 +++ b/net/netfilter/ipset/ip_set_hash_gen.h
3402 @@ -325,18 +325,22 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length)
3403 static void
3404 mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length)
3405 {
3406 - u8 i, j;
3407 -
3408 - for (i = 0; i < nets_length - 1 && h->nets[i].cidr != cidr; i++)
3409 - ;
3410 - h->nets[i].nets--;
3411 -
3412 - if (h->nets[i].nets != 0)
3413 - return;
3414 -
3415 - for (j = i; j < nets_length - 1 && h->nets[j].nets; j++) {
3416 - h->nets[j].cidr = h->nets[j + 1].cidr;
3417 - h->nets[j].nets = h->nets[j + 1].nets;
3418 + u8 i, j, net_end = nets_length - 1;
3419 +
3420 + for (i = 0; i < nets_length; i++) {
3421 + if (h->nets[i].cidr != cidr)
3422 + continue;
3423 + if (h->nets[i].nets > 1 || i == net_end ||
3424 + h->nets[i + 1].nets == 0) {
3425 + h->nets[i].nets--;
3426 + return;
3427 + }
3428 + for (j = i; j < net_end && h->nets[j].nets; j++) {
3429 + h->nets[j].cidr = h->nets[j + 1].cidr;
3430 + h->nets[j].nets = h->nets[j + 1].nets;
3431 + }
3432 + h->nets[j].nets = 0;
3433 + return;
3434 }
3435 }
3436 #endif
3437 diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c
3438 index af7ffd4..f1eb0d1 100644
3439 --- a/net/sunrpc/auth_gss/gss_rpc_upcall.c
3440 +++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c
3441 @@ -213,6 +213,26 @@ static int gssp_call(struct net *net, struct rpc_message *msg)
3442 return status;
3443 }
3444
3445 +static void gssp_free_receive_pages(struct gssx_arg_accept_sec_context *arg)
3446 +{
3447 + int i;
3448 +
3449 + for (i = 0; i < arg->npages && arg->pages[i]; i++)
3450 + __free_page(arg->pages[i]);
3451 +}
3452 +
3453 +static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg)
3454 +{
3455 + arg->npages = DIV_ROUND_UP(NGROUPS_MAX * 4, PAGE_SIZE);
3456 + arg->pages = kzalloc(arg->npages * sizeof(struct page *), GFP_KERNEL);
3457 + /*
3458 + * XXX: actual pages are allocated by xdr layer in
3459 + * xdr_partial_copy_from_skb.
3460 + */
3461 + if (!arg->pages)
3462 + return -ENOMEM;
3463 + return 0;
3464 +}
3465
3466 /*
3467 * Public functions
3468 @@ -261,10 +281,16 @@ int gssp_accept_sec_context_upcall(struct net *net,
3469 arg.context_handle = &ctxh;
3470 res.output_token->len = GSSX_max_output_token_sz;
3471
3472 + ret = gssp_alloc_receive_pages(&arg);
3473 + if (ret)
3474 + return ret;
3475 +
3476 /* use nfs/ for targ_name ? */
3477
3478 ret = gssp_call(net, &msg);
3479
3480 + gssp_free_receive_pages(&arg);
3481 +
3482 /* we need to fetch all data even in case of error so
3483 * that we can free special strctures is they have been allocated */
3484 data->major_status = res.status.major_status;
3485 diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c
3486 index 3c85d1c..f0f78c5 100644
3487 --- a/net/sunrpc/auth_gss/gss_rpc_xdr.c
3488 +++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c
3489 @@ -166,14 +166,15 @@ static int dummy_dec_opt_array(struct xdr_stream *xdr,
3490 return 0;
3491 }
3492
3493 -static int get_s32(void **p, void *max, s32 *res)
3494 +static int get_host_u32(struct xdr_stream *xdr, u32 *res)
3495 {
3496 - void *base = *p;
3497 - void *next = (void *)((char *)base + sizeof(s32));
3498 - if (unlikely(next > max || next < base))
3499 + __be32 *p;
3500 +
3501 + p = xdr_inline_decode(xdr, 4);
3502 + if (!p)
3503 return -EINVAL;
3504 - memcpy(res, base, sizeof(s32));
3505 - *p = next;
3506 + /* Contents of linux creds are all host-endian: */
3507 + memcpy(res, p, sizeof(u32));
3508 return 0;
3509 }
3510
3511 @@ -182,9 +183,9 @@ static int gssx_dec_linux_creds(struct xdr_stream *xdr,
3512 {
3513 u32 length;
3514 __be32 *p;
3515 - void *q, *end;
3516 - s32 tmp;
3517 - int N, i, err;
3518 + u32 tmp;
3519 + u32 N;
3520 + int i, err;
3521
3522 p = xdr_inline_decode(xdr, 4);
3523 if (unlikely(p == NULL))
3524 @@ -192,33 +193,28 @@ static int gssx_dec_linux_creds(struct xdr_stream *xdr,
3525
3526 length = be32_to_cpup(p);
3527
3528 - /* FIXME: we do not want to use the scratch buffer for this one
3529 - * may need to use functions that allows us to access an io vector
3530 - * directly */
3531 - p = xdr_inline_decode(xdr, length);
3532 - if (unlikely(p == NULL))
3533 + if (length > (3 + NGROUPS_MAX) * sizeof(u32))
3534 return -ENOSPC;
3535
3536 - q = p;
3537 - end = q + length;
3538 -
3539 /* uid */
3540 - err = get_s32(&q, end, &tmp);
3541 + err = get_host_u32(xdr, &tmp);
3542 if (err)
3543 return err;
3544 creds->cr_uid = make_kuid(&init_user_ns, tmp);
3545
3546 /* gid */
3547 - err = get_s32(&q, end, &tmp);
3548 + err = get_host_u32(xdr, &tmp);
3549 if (err)
3550 return err;
3551 creds->cr_gid = make_kgid(&init_user_ns, tmp);
3552
3553 /* number of additional gid's */
3554 - err = get_s32(&q, end, &tmp);
3555 + err = get_host_u32(xdr, &tmp);
3556 if (err)
3557 return err;
3558 N = tmp;
3559 + if ((3 + N) * sizeof(u32) != length)
3560 + return -EINVAL;
3561 creds->cr_group_info = groups_alloc(N);
3562 if (creds->cr_group_info == NULL)
3563 return -ENOMEM;
3564 @@ -226,7 +222,7 @@ static int gssx_dec_linux_creds(struct xdr_stream *xdr,
3565 /* gid's */
3566 for (i = 0; i < N; i++) {
3567 kgid_t kgid;
3568 - err = get_s32(&q, end, &tmp);
3569 + err = get_host_u32(xdr, &tmp);
3570 if (err)
3571 goto out_free_groups;
3572 err = -EINVAL;
3573 @@ -784,6 +780,9 @@ void gssx_enc_accept_sec_context(struct rpc_rqst *req,
3574 /* arg->options */
3575 err = dummy_enc_opt_array(xdr, &arg->options);
3576
3577 + xdr_inline_pages(&req->rq_rcv_buf,
3578 + PAGE_SIZE/2 /* pretty arbitrary */,
3579 + arg->pages, 0 /* page base */, arg->npages * PAGE_SIZE);
3580 done:
3581 if (err)
3582 dprintk("RPC: gssx_enc_accept_sec_context: %d\n", err);
3583 diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.h b/net/sunrpc/auth_gss/gss_rpc_xdr.h
3584 index 1c98b27..685a688 100644
3585 --- a/net/sunrpc/auth_gss/gss_rpc_xdr.h
3586 +++ b/net/sunrpc/auth_gss/gss_rpc_xdr.h
3587 @@ -147,6 +147,8 @@ struct gssx_arg_accept_sec_context {
3588 struct gssx_cb *input_cb;
3589 u32 ret_deleg_cred;
3590 struct gssx_option_array options;
3591 + struct page **pages;
3592 + unsigned int npages;
3593 };
3594
3595 struct gssx_res_accept_sec_context {
3596 @@ -240,7 +242,8 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
3597 2 * GSSX_max_princ_sz + \
3598 8 + 8 + 4 + 4 + 4)
3599 #define GSSX_max_output_token_sz 1024
3600 -#define GSSX_max_creds_sz (4 + 4 + 4 + NGROUPS_MAX * 4)
3601 +/* grouplist not included; we allocate separate pages for that: */
3602 +#define GSSX_max_creds_sz (4 + 4 + 4 /* + NGROUPS_MAX*4 */)
3603 #define GSSX_RES_accept_sec_context_sz (GSSX_default_status_sz + \
3604 GSSX_default_ctx_sz + \
3605 GSSX_max_output_token_sz + \