Contents of /trunk/kernel26-alx/patches-2.6.27-r3/0145-2.6.27.46-all-fixes.patch
Parent Directory | Revision Log
Revision 1176 -
(show annotations)
(download)
Thu Oct 14 15:11:06 2010 UTC (13 years, 11 months ago) by niro
File size: 44616 byte(s)
Thu Oct 14 15:11:06 2010 UTC (13 years, 11 months ago) by niro
File size: 44616 byte(s)
-2.6.27-alx-r3: new magellan 0.5.2 kernel
1 | diff --git a/Documentation/filesystems/tmpfs.txt b/Documentation/filesystems/tmpfs.txt |
2 | index 222437e..a94fede 100644 |
3 | --- a/Documentation/filesystems/tmpfs.txt |
4 | +++ b/Documentation/filesystems/tmpfs.txt |
5 | @@ -82,11 +82,13 @@ tmpfs has a mount option to set the NUMA memory allocation policy for |
6 | all files in that instance (if CONFIG_NUMA is enabled) - which can be |
7 | adjusted on the fly via 'mount -o remount ...' |
8 | |
9 | -mpol=default prefers to allocate memory from the local node |
10 | +mpol=default use the process allocation policy |
11 | + (see set_mempolicy(2)) |
12 | mpol=prefer:Node prefers to allocate memory from the given Node |
13 | mpol=bind:NodeList allocates memory only from nodes in NodeList |
14 | mpol=interleave prefers to allocate from each node in turn |
15 | mpol=interleave:NodeList allocates from each node of NodeList in turn |
16 | +mpol=local prefers to allocate memory from the local node |
17 | |
18 | NodeList format is a comma-separated list of decimal numbers and ranges, |
19 | a range being two hyphen-separated decimal numbers, the smallest and |
20 | @@ -134,3 +136,5 @@ Author: |
21 | Christoph Rohland <cr@sap.com>, 1.12.01 |
22 | Updated: |
23 | Hugh Dickins <hugh@veritas.com>, 4 June 2007 |
24 | +Updated: |
25 | + KOSAKI Motohiro, 16 Mar 2010 |
26 | diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c |
27 | index a0e1dbe..5efd5b2 100644 |
28 | --- a/arch/x86/ia32/ia32_aout.c |
29 | +++ b/arch/x86/ia32/ia32_aout.c |
30 | @@ -324,7 +324,6 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs) |
31 | current->mm->free_area_cache = TASK_UNMAPPED_BASE; |
32 | current->mm->cached_hole_size = 0; |
33 | |
34 | - current->mm->mmap = NULL; |
35 | compute_creds(bprm); |
36 | current->flags &= ~PF_FORKNOEXEC; |
37 | |
38 | diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c |
39 | index 4cee61a..7981dbe 100644 |
40 | --- a/arch/x86/kvm/vmx.c |
41 | +++ b/arch/x86/kvm/vmx.c |
42 | @@ -2464,6 +2464,9 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
43 | unsigned long val; |
44 | int dr, reg; |
45 | |
46 | + if (!kvm_require_cpl(vcpu, 0)) |
47 | + return 1; |
48 | + |
49 | /* |
50 | * FIXME: this code assumes the host is debugging the guest. |
51 | * need to deal with guest debugging itself too. |
52 | diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c |
53 | index bf872f2..80ffc99 100644 |
54 | --- a/arch/x86/kvm/x86.c |
55 | +++ b/arch/x86/kvm/x86.c |
56 | @@ -198,6 +198,19 @@ static void __queue_exception(struct kvm_vcpu *vcpu) |
57 | } |
58 | |
59 | /* |
60 | + * Checks if cpl <= required_cpl; if true, return true. Otherwise queue |
61 | + * a #GP and return false. |
62 | + */ |
63 | +bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl) |
64 | +{ |
65 | + if (kvm_x86_ops->get_cpl(vcpu) <= required_cpl) |
66 | + return true; |
67 | + kvm_queue_exception_e(vcpu, GP_VECTOR, 0); |
68 | + return false; |
69 | +} |
70 | +EXPORT_SYMBOL_GPL(kvm_require_cpl); |
71 | + |
72 | +/* |
73 | * Load the pae pdptrs. Return true is they are all valid. |
74 | */ |
75 | int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3) |
76 | @@ -3645,7 +3658,13 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
77 | |
78 | vcpu->arch.cr2 = sregs->cr2; |
79 | mmu_reset_needed |= vcpu->arch.cr3 != sregs->cr3; |
80 | - vcpu->arch.cr3 = sregs->cr3; |
81 | + |
82 | + down_read(&vcpu->kvm->slots_lock); |
83 | + if (gfn_to_memslot(vcpu->kvm, sregs->cr3 >> PAGE_SHIFT)) |
84 | + vcpu->arch.cr3 = sregs->cr3; |
85 | + else |
86 | + set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests); |
87 | + up_read(&vcpu->kvm->slots_lock); |
88 | |
89 | kvm_set_cr8(vcpu, sregs->cr8); |
90 | |
91 | diff --git a/arch/x86/kvm/x86_emulate.c b/arch/x86/kvm/x86_emulate.c |
92 | index f2f9046..1dc1cfd 100644 |
93 | --- a/arch/x86/kvm/x86_emulate.c |
94 | +++ b/arch/x86/kvm/x86_emulate.c |
95 | @@ -581,6 +581,9 @@ static int do_insn_fetch(struct x86_emulate_ctxt *ctxt, |
96 | { |
97 | int rc = 0; |
98 | |
99 | + /* x86 instructions are limited to 15 bytes. */ |
100 | + if (eip + size - ctxt->decode.eip_orig > 15) |
101 | + return X86EMUL_UNHANDLEABLE; |
102 | eip += ctxt->cs_base; |
103 | while (size--) { |
104 | rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++); |
105 | @@ -839,7 +842,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) |
106 | /* Shadow copy of register state. Committed on successful emulation. */ |
107 | |
108 | memset(c, 0, sizeof(struct decode_cache)); |
109 | - c->eip = ctxt->vcpu->arch.rip; |
110 | + c->eip = c->eip_orig = ctxt->vcpu->arch.rip; |
111 | ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS); |
112 | memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); |
113 | |
114 | diff --git a/drivers/char/mem.c b/drivers/char/mem.c |
115 | index 672b08e..3191fc8 100644 |
116 | --- a/drivers/char/mem.c |
117 | +++ b/drivers/char/mem.c |
118 | @@ -724,6 +724,9 @@ static ssize_t read_zero(struct file * file, char __user * buf, |
119 | written += chunk - unwritten; |
120 | if (unwritten) |
121 | break; |
122 | + /* Consider changing this to just 'signal_pending()' with lots of testing */ |
123 | + if (fatal_signal_pending(current)) |
124 | + return written ? written : -EINTR; |
125 | buf += chunk; |
126 | count -= chunk; |
127 | cond_resched(); |
128 | diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c |
129 | index c4b82c7..e6788f4 100644 |
130 | --- a/drivers/char/tty_io.c |
131 | +++ b/drivers/char/tty_io.c |
132 | @@ -2437,8 +2437,10 @@ static int tty_fasync(int fd, struct file *filp, int on) |
133 | pid = task_pid(current); |
134 | type = PIDTYPE_PID; |
135 | } |
136 | - retval = __f_setown(filp, pid, type, 0); |
137 | + get_pid(pid); |
138 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); |
139 | + retval = __f_setown(filp, pid, type, 0); |
140 | + put_pid(pid); |
141 | if (retval) |
142 | goto out; |
143 | } else { |
144 | diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c |
145 | index c31afbd..11bb1fd 100644 |
146 | --- a/drivers/gpu/drm/r128/r128_cce.c |
147 | +++ b/drivers/gpu/drm/r128/r128_cce.c |
148 | @@ -353,6 +353,11 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init) |
149 | |
150 | DRM_DEBUG("\n"); |
151 | |
152 | + if (dev->dev_private) { |
153 | + DRM_DEBUG("called when already initialized\n"); |
154 | + return -EINVAL; |
155 | + } |
156 | + |
157 | dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER); |
158 | if (dev_priv == NULL) |
159 | return -ENOMEM; |
160 | @@ -651,6 +656,8 @@ int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_pri |
161 | |
162 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
163 | |
164 | + DEV_INIT_TEST_WITH_RETURN(dev_priv); |
165 | + |
166 | if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) { |
167 | DRM_DEBUG("while CCE running\n"); |
168 | return 0; |
169 | @@ -673,6 +680,8 @@ int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv |
170 | |
171 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
172 | |
173 | + DEV_INIT_TEST_WITH_RETURN(dev_priv); |
174 | + |
175 | /* Flush any pending CCE commands. This ensures any outstanding |
176 | * commands are exectuted by the engine before we turn it off. |
177 | */ |
178 | @@ -710,10 +719,7 @@ int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_pri |
179 | |
180 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
181 | |
182 | - if (!dev_priv) { |
183 | - DRM_DEBUG("called before init done\n"); |
184 | - return -EINVAL; |
185 | - } |
186 | + DEV_INIT_TEST_WITH_RETURN(dev_priv); |
187 | |
188 | r128_do_cce_reset(dev_priv); |
189 | |
190 | @@ -730,6 +736,8 @@ int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv |
191 | |
192 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
193 | |
194 | + DEV_INIT_TEST_WITH_RETURN(dev_priv); |
195 | + |
196 | if (dev_priv->cce_running) { |
197 | r128_do_cce_flush(dev_priv); |
198 | } |
199 | @@ -743,6 +751,8 @@ int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_ |
200 | |
201 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
202 | |
203 | + DEV_INIT_TEST_WITH_RETURN(dev->dev_private); |
204 | + |
205 | return r128_do_engine_reset(dev); |
206 | } |
207 | |
208 | diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h |
209 | index 011105e..bc030f6 100644 |
210 | --- a/drivers/gpu/drm/r128/r128_drv.h |
211 | +++ b/drivers/gpu/drm/r128/r128_drv.h |
212 | @@ -418,6 +418,14 @@ static __inline__ void r128_update_ring_snapshot(drm_r128_private_t * dev_priv) |
213 | * Misc helper macros |
214 | */ |
215 | |
216 | +#define DEV_INIT_TEST_WITH_RETURN(_dev_priv) \ |
217 | +do { \ |
218 | + if (!_dev_priv) { \ |
219 | + DRM_ERROR("called with no initialization\n"); \ |
220 | + return -EINVAL; \ |
221 | + } \ |
222 | +} while (0) |
223 | + |
224 | #define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \ |
225 | do { \ |
226 | drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; \ |
227 | diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c |
228 | index 51a9afc..7cd107f 100644 |
229 | --- a/drivers/gpu/drm/r128/r128_state.c |
230 | +++ b/drivers/gpu/drm/r128/r128_state.c |
231 | @@ -1244,14 +1244,18 @@ static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple) |
232 | static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *file_priv) |
233 | { |
234 | drm_r128_private_t *dev_priv = dev->dev_private; |
235 | - drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; |
236 | + drm_r128_sarea_t *sarea_priv; |
237 | drm_r128_clear_t *clear = data; |
238 | DRM_DEBUG("\n"); |
239 | |
240 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
241 | |
242 | + DEV_INIT_TEST_WITH_RETURN(dev_priv); |
243 | + |
244 | RING_SPACE_TEST_WITH_RETURN(dev_priv); |
245 | |
246 | + sarea_priv = dev_priv->sarea_priv; |
247 | + |
248 | if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS) |
249 | sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS; |
250 | |
251 | @@ -1312,6 +1316,8 @@ static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *fi |
252 | |
253 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
254 | |
255 | + DEV_INIT_TEST_WITH_RETURN(dev_priv); |
256 | + |
257 | RING_SPACE_TEST_WITH_RETURN(dev_priv); |
258 | |
259 | if (!dev_priv->page_flipping) |
260 | @@ -1331,6 +1337,8 @@ static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *fi |
261 | |
262 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
263 | |
264 | + DEV_INIT_TEST_WITH_RETURN(dev_priv); |
265 | + |
266 | RING_SPACE_TEST_WITH_RETURN(dev_priv); |
267 | |
268 | if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS) |
269 | @@ -1354,10 +1362,7 @@ static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file * |
270 | |
271 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
272 | |
273 | - if (!dev_priv) { |
274 | - DRM_ERROR("called with no initialization\n"); |
275 | - return -EINVAL; |
276 | - } |
277 | + DEV_INIT_TEST_WITH_RETURN(dev_priv); |
278 | |
279 | DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n", |
280 | DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard); |
281 | @@ -1410,10 +1415,7 @@ static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file |
282 | |
283 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
284 | |
285 | - if (!dev_priv) { |
286 | - DRM_ERROR("called with no initialization\n"); |
287 | - return -EINVAL; |
288 | - } |
289 | + DEV_INIT_TEST_WITH_RETURN(dev_priv); |
290 | |
291 | DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID, |
292 | elts->idx, elts->start, elts->end, elts->discard); |
293 | @@ -1476,6 +1478,8 @@ static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *fi |
294 | |
295 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
296 | |
297 | + DEV_INIT_TEST_WITH_RETURN(dev_priv); |
298 | + |
299 | DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit->idx); |
300 | |
301 | if (blit->idx < 0 || blit->idx >= dma->buf_count) { |
302 | @@ -1501,6 +1505,8 @@ static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *f |
303 | |
304 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
305 | |
306 | + DEV_INIT_TEST_WITH_RETURN(dev_priv); |
307 | + |
308 | RING_SPACE_TEST_WITH_RETURN(dev_priv); |
309 | |
310 | ret = -EINVAL; |
311 | @@ -1531,6 +1537,8 @@ static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file |
312 | |
313 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
314 | |
315 | + DEV_INIT_TEST_WITH_RETURN(dev_priv); |
316 | + |
317 | if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32))) |
318 | return -EFAULT; |
319 | |
320 | @@ -1555,10 +1563,7 @@ static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file |
321 | |
322 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
323 | |
324 | - if (!dev_priv) { |
325 | - DRM_ERROR("called with no initialization\n"); |
326 | - return -EINVAL; |
327 | - } |
328 | + DEV_INIT_TEST_WITH_RETURN(dev_priv); |
329 | |
330 | DRM_DEBUG("idx=%d s=%d e=%d d=%d\n", |
331 | indirect->idx, indirect->start, indirect->end, |
332 | @@ -1620,10 +1625,7 @@ static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *fi |
333 | drm_r128_getparam_t *param = data; |
334 | int value; |
335 | |
336 | - if (!dev_priv) { |
337 | - DRM_ERROR("called with no initialization\n"); |
338 | - return -EINVAL; |
339 | - } |
340 | + DEV_INIT_TEST_WITH_RETURN(dev_priv); |
341 | |
342 | DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); |
343 | |
344 | diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c |
345 | index 93c1722..2b8f439 100644 |
346 | --- a/drivers/hwmon/coretemp.c |
347 | +++ b/drivers/hwmon/coretemp.c |
348 | @@ -191,7 +191,7 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device * |
349 | if (err) { |
350 | dev_warn(dev, |
351 | "Unable to access MSR 0xEE, for Tjmax, left" |
352 | - " at default"); |
353 | + " at default\n"); |
354 | } else if (eax & 0x40000000) { |
355 | tjmax = 85000; |
356 | } |
357 | diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c |
358 | index affee01..488e867 100644 |
359 | --- a/drivers/hwmon/lm78.c |
360 | +++ b/drivers/hwmon/lm78.c |
361 | @@ -655,7 +655,7 @@ static int __devinit lm78_isa_probe(struct platform_device *pdev) |
362 | |
363 | /* Reserve the ISA region */ |
364 | res = platform_get_resource(pdev, IORESOURCE_IO, 0); |
365 | - if (!request_region(res->start, LM78_EXTENT, "lm78")) { |
366 | + if (!request_region(res->start + LM78_ADDR_REG_OFFSET, 2, "lm78")) { |
367 | err = -EBUSY; |
368 | goto exit; |
369 | } |
370 | @@ -699,7 +699,7 @@ static int __devinit lm78_isa_probe(struct platform_device *pdev) |
371 | device_remove_file(&pdev->dev, &dev_attr_name); |
372 | kfree(data); |
373 | exit_release_region: |
374 | - release_region(res->start, LM78_EXTENT); |
375 | + release_region(res->start + LM78_ADDR_REG_OFFSET, 2); |
376 | exit: |
377 | return err; |
378 | } |
379 | @@ -711,7 +711,7 @@ static int __devexit lm78_isa_remove(struct platform_device *pdev) |
380 | hwmon_device_unregister(data->hwmon_dev); |
381 | sysfs_remove_group(&pdev->dev.kobj, &lm78_group); |
382 | device_remove_file(&pdev->dev, &dev_attr_name); |
383 | - release_region(data->client.addr, LM78_EXTENT); |
384 | + release_region(data->client.addr + LM78_ADDR_REG_OFFSET, 2); |
385 | kfree(data); |
386 | |
387 | return 0; |
388 | @@ -836,9 +836,17 @@ static struct lm78_data *lm78_update_device(struct device *dev) |
389 | static int __init lm78_isa_found(unsigned short address) |
390 | { |
391 | int val, save, found = 0; |
392 | - |
393 | - if (!request_region(address, LM78_EXTENT, "lm78")) |
394 | - return 0; |
395 | + int port; |
396 | + |
397 | + /* Some boards declare base+0 to base+7 as a PNP device, some base+4 |
398 | + * to base+7 and some base+5 to base+6. So we better request each port |
399 | + * individually for the probing phase. */ |
400 | + for (port = address; port < address + LM78_EXTENT; port++) { |
401 | + if (!request_region(port, 1, "lm78")) { |
402 | + pr_debug("lm78: Failed to request port 0x%x\n", port); |
403 | + goto release; |
404 | + } |
405 | + } |
406 | |
407 | #define REALLY_SLOW_IO |
408 | /* We need the timeouts for at least some LM78-like |
409 | @@ -901,7 +909,8 @@ static int __init lm78_isa_found(unsigned short address) |
410 | val & 0x80 ? "LM79" : "LM78", (int)address); |
411 | |
412 | release: |
413 | - release_region(address, LM78_EXTENT); |
414 | + for (port--; port >= address; port--) |
415 | + release_region(port, 1); |
416 | return found; |
417 | } |
418 | |
419 | diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c |
420 | index a10d0d2..3d3fb00 100644 |
421 | --- a/drivers/i2c/i2c-core.c |
422 | +++ b/drivers/i2c/i2c-core.c |
423 | @@ -644,6 +644,9 @@ int i2c_del_adapter(struct i2c_adapter *adap) |
424 | } |
425 | } |
426 | |
427 | + /* device name is gone after device_unregister */ |
428 | + dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name); |
429 | + |
430 | /* clean up the sysfs representation */ |
431 | init_completion(&adap->dev_released); |
432 | device_unregister(&adap->dev); |
433 | @@ -654,8 +657,6 @@ int i2c_del_adapter(struct i2c_adapter *adap) |
434 | /* free bus id */ |
435 | idr_remove(&i2c_adapter_idr, adap->nr); |
436 | |
437 | - dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name); |
438 | - |
439 | /* Clear the device structure in case this adapter is ever going to be |
440 | added again */ |
441 | memset(&adap->dev, 0, sizeof(adap->dev)); |
442 | diff --git a/drivers/media/video/em28xx/em28xx-dvb.c b/drivers/media/video/em28xx/em28xx-dvb.c |
443 | index d2b1a1a..72894f4 100644 |
444 | --- a/drivers/media/video/em28xx/em28xx-dvb.c |
445 | +++ b/drivers/media/video/em28xx/em28xx-dvb.c |
446 | @@ -501,6 +501,7 @@ static int dvb_fini(struct em28xx *dev) |
447 | |
448 | if (dev->dvb) { |
449 | unregister_dvb(dev->dvb); |
450 | + kfree(dev->dvb); |
451 | dev->dvb = NULL; |
452 | } |
453 | |
454 | diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c |
455 | index 03c759b..82dfbe5 100644 |
456 | --- a/drivers/mtd/ubi/cdev.c |
457 | +++ b/drivers/mtd/ubi/cdev.c |
458 | @@ -793,7 +793,6 @@ static int ubi_cdev_ioctl(struct inode *inode, struct file *file, |
459 | break; |
460 | } |
461 | |
462 | - req.name[req.name_len] = '\0'; |
463 | err = verify_mkvol_req(ubi, &req); |
464 | if (err) |
465 | break; |
466 | diff --git a/drivers/net/b44.c b/drivers/net/b44.c |
467 | index f1521c6..012614e 100644 |
468 | --- a/drivers/net/b44.c |
469 | +++ b/drivers/net/b44.c |
470 | @@ -1502,8 +1502,7 @@ static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset) |
471 | for (k = 0; k< ethaddr_bytes; k++) { |
472 | ppattern[offset + magicsync + |
473 | (j * ETH_ALEN) + k] = macaddr[k]; |
474 | - len++; |
475 | - set_bit(len, (unsigned long *) pmask); |
476 | + set_bit(len++, (unsigned long *) pmask); |
477 | } |
478 | } |
479 | return len - 1; |
480 | diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c |
481 | index 5a07b50..598d2e9 100644 |
482 | --- a/drivers/net/bonding/bond_main.c |
483 | +++ b/drivers/net/bonding/bond_main.c |
484 | @@ -2228,6 +2228,9 @@ static int bond_miimon_inspect(struct bonding *bond) |
485 | { |
486 | struct slave *slave; |
487 | int i, link_state, commit = 0; |
488 | + bool ignore_updelay; |
489 | + |
490 | + ignore_updelay = !bond->curr_active_slave ? true : false; |
491 | |
492 | bond_for_each_slave(bond, slave, i) { |
493 | slave->new_link = BOND_LINK_NOCHANGE; |
494 | @@ -2292,6 +2295,7 @@ static int bond_miimon_inspect(struct bonding *bond) |
495 | ": %s: link status up for " |
496 | "interface %s, enabling it in %d ms.\n", |
497 | bond->dev->name, slave->dev->name, |
498 | + ignore_updelay ? 0 : |
499 | bond->params.updelay * |
500 | bond->params.miimon); |
501 | } |
502 | @@ -2310,9 +2314,13 @@ static int bond_miimon_inspect(struct bonding *bond) |
503 | continue; |
504 | } |
505 | |
506 | + if (ignore_updelay) |
507 | + slave->delay = 0; |
508 | + |
509 | if (slave->delay <= 0) { |
510 | slave->new_link = BOND_LINK_UP; |
511 | commit++; |
512 | + ignore_updelay = false; |
513 | continue; |
514 | } |
515 | |
516 | diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c |
517 | index f718215..68bba7f 100644 |
518 | --- a/drivers/net/r8169.c |
519 | +++ b/drivers/net/r8169.c |
520 | @@ -1842,9 +1842,9 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev) |
521 | static void rtl8169_set_rxbufsize(struct rtl8169_private *tp, |
522 | struct net_device *dev) |
523 | { |
524 | - unsigned int mtu = dev->mtu; |
525 | + unsigned int max_frame = dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN; |
526 | |
527 | - tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE; |
528 | + tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE; |
529 | } |
530 | |
531 | static int rtl8169_open(struct net_device *dev) |
532 | diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c |
533 | index 42963a9..aa9ff46 100644 |
534 | --- a/drivers/net/sky2.c |
535 | +++ b/drivers/net/sky2.c |
536 | @@ -1438,7 +1438,6 @@ static int sky2_up(struct net_device *dev) |
537 | if (ramsize > 0) { |
538 | u32 rxspace; |
539 | |
540 | - hw->flags |= SKY2_HW_RAM_BUFFER; |
541 | pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize); |
542 | if (ramsize < 16) |
543 | rxspace = ramsize / 2; |
544 | @@ -2846,6 +2845,9 @@ static int __devinit sky2_init(struct sky2_hw *hw) |
545 | ++hw->ports; |
546 | } |
547 | |
548 | + if (sky2_read8(hw, B2_E_0)) |
549 | + hw->flags |= SKY2_HW_RAM_BUFFER; |
550 | + |
551 | return 0; |
552 | } |
553 | |
554 | diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c |
555 | index 5ac2079..7eee236 100644 |
556 | --- a/drivers/parisc/eisa_eeprom.c |
557 | +++ b/drivers/parisc/eisa_eeprom.c |
558 | @@ -55,7 +55,7 @@ static ssize_t eisa_eeprom_read(struct file * file, |
559 | ssize_t ret; |
560 | int i; |
561 | |
562 | - if (*ppos >= HPEE_MAX_LENGTH) |
563 | + if (*ppos < 0 || *ppos >= HPEE_MAX_LENGTH) |
564 | return 0; |
565 | |
566 | count = *ppos + count < HPEE_MAX_LENGTH ? count : HPEE_MAX_LENGTH - *ppos; |
567 | diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c |
568 | index 3499a9d..f84f068 100644 |
569 | --- a/drivers/serial/8250.c |
570 | +++ b/drivers/serial/8250.c |
571 | @@ -70,6 +70,9 @@ static unsigned int nr_uarts = CONFIG_SERIAL_8250_RUNTIME_UARTS; |
572 | |
573 | #define PASS_LIMIT 256 |
574 | |
575 | +#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) |
576 | + |
577 | + |
578 | /* |
579 | * We default to IRQ0 for the "no irq" hack. Some |
580 | * machine types want others as well - they're free |
581 | @@ -1656,7 +1659,7 @@ static unsigned int serial8250_tx_empty(struct uart_port *port) |
582 | up->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; |
583 | spin_unlock_irqrestore(&up->port.lock, flags); |
584 | |
585 | - return lsr & UART_LSR_TEMT ? TIOCSER_TEMT : 0; |
586 | + return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0; |
587 | } |
588 | |
589 | static unsigned int serial8250_get_mctrl(struct uart_port *port) |
590 | @@ -1714,8 +1717,6 @@ static void serial8250_break_ctl(struct uart_port *port, int break_state) |
591 | spin_unlock_irqrestore(&up->port.lock, flags); |
592 | } |
593 | |
594 | -#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE) |
595 | - |
596 | /* |
597 | * Wait for transmitter & holding register to empty |
598 | */ |
599 | diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c |
600 | index 33b2935..8657266 100644 |
601 | --- a/drivers/usb/core/devio.c |
602 | +++ b/drivers/usb/core/devio.c |
603 | @@ -1123,6 +1123,13 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb, |
604 | free_async(as); |
605 | return -ENOMEM; |
606 | } |
607 | + /* Isochronous input data may end up being discontiguous |
608 | + * if some of the packets are short. Clear the buffer so |
609 | + * that the gaps don't leak kernel data to userspace. |
610 | + */ |
611 | + if (is_in && uurb->type == USBDEVFS_URB_TYPE_ISO) |
612 | + memset(as->urb->transfer_buffer, 0, |
613 | + uurb->buffer_length); |
614 | } |
615 | as->urb->dev = ps->dev; |
616 | as->urb->pipe = (uurb->type << 30) | |
617 | @@ -1224,10 +1231,14 @@ static int processcompl(struct async *as, void __user * __user *arg) |
618 | void __user *addr = as->userurb; |
619 | unsigned int i; |
620 | |
621 | - if (as->userbuffer) |
622 | - if (copy_to_user(as->userbuffer, urb->transfer_buffer, |
623 | - urb->transfer_buffer_length)) |
624 | + if (as->userbuffer && urb->actual_length) { |
625 | + if (urb->number_of_packets > 0) /* Isochronous */ |
626 | + i = urb->transfer_buffer_length; |
627 | + else /* Non-Isoc */ |
628 | + i = urb->actual_length; |
629 | + if (copy_to_user(as->userbuffer, urb->transfer_buffer, i)) |
630 | goto err_out; |
631 | + } |
632 | if (put_user(as->status, &userurb->status)) |
633 | goto err_out; |
634 | if (put_user(urb->actual_length, &userurb->actual_length)) |
635 | @@ -1246,14 +1257,11 @@ static int processcompl(struct async *as, void __user * __user *arg) |
636 | } |
637 | } |
638 | |
639 | - free_async(as); |
640 | - |
641 | if (put_user(addr, (void __user * __user *)arg)) |
642 | return -EFAULT; |
643 | return 0; |
644 | |
645 | err_out: |
646 | - free_async(as); |
647 | return -EFAULT; |
648 | } |
649 | |
650 | @@ -1283,8 +1291,11 @@ static struct async *reap_as(struct dev_state *ps) |
651 | static int proc_reapurb(struct dev_state *ps, void __user *arg) |
652 | { |
653 | struct async *as = reap_as(ps); |
654 | - if (as) |
655 | - return processcompl(as, (void __user * __user *)arg); |
656 | + if (as) { |
657 | + int retval = processcompl(as, (void __user * __user *)arg); |
658 | + free_async(as); |
659 | + return retval; |
660 | + } |
661 | if (signal_pending(current)) |
662 | return -EINTR; |
663 | return -EIO; |
664 | @@ -1292,11 +1303,16 @@ static int proc_reapurb(struct dev_state *ps, void __user *arg) |
665 | |
666 | static int proc_reapurbnonblock(struct dev_state *ps, void __user *arg) |
667 | { |
668 | + int retval; |
669 | struct async *as; |
670 | |
671 | - if (!(as = async_getcompleted(ps))) |
672 | - return -EAGAIN; |
673 | - return processcompl(as, (void __user * __user *)arg); |
674 | + as = async_getcompleted(ps); |
675 | + retval = -EAGAIN; |
676 | + if (as) { |
677 | + retval = processcompl(as, (void __user * __user *)arg); |
678 | + free_async(as); |
679 | + } |
680 | + return retval; |
681 | } |
682 | |
683 | #ifdef CONFIG_COMPAT |
684 | @@ -1347,9 +1363,9 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) |
685 | void __user *addr = as->userurb; |
686 | unsigned int i; |
687 | |
688 | - if (as->userbuffer) |
689 | + if (as->userbuffer && urb->actual_length) |
690 | if (copy_to_user(as->userbuffer, urb->transfer_buffer, |
691 | - urb->transfer_buffer_length)) |
692 | + urb->actual_length)) |
693 | return -EFAULT; |
694 | if (put_user(as->status, &userurb->status)) |
695 | return -EFAULT; |
696 | @@ -1369,7 +1385,6 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) |
697 | } |
698 | } |
699 | |
700 | - free_async(as); |
701 | if (put_user(ptr_to_compat(addr), (u32 __user *)arg)) |
702 | return -EFAULT; |
703 | return 0; |
704 | @@ -1378,8 +1393,11 @@ static int processcompl_compat(struct async *as, void __user * __user *arg) |
705 | static int proc_reapurb_compat(struct dev_state *ps, void __user *arg) |
706 | { |
707 | struct async *as = reap_as(ps); |
708 | - if (as) |
709 | - return processcompl_compat(as, (void __user * __user *)arg); |
710 | + if (as) { |
711 | + int retval = processcompl_compat(as, (void __user * __user *)arg); |
712 | + free_async(as); |
713 | + return retval; |
714 | + } |
715 | if (signal_pending(current)) |
716 | return -EINTR; |
717 | return -EIO; |
718 | @@ -1387,11 +1405,16 @@ static int proc_reapurb_compat(struct dev_state *ps, void __user *arg) |
719 | |
720 | static int proc_reapurbnonblock_compat(struct dev_state *ps, void __user *arg) |
721 | { |
722 | + int retval; |
723 | struct async *as; |
724 | |
725 | - if (!(as = async_getcompleted(ps))) |
726 | - return -EAGAIN; |
727 | - return processcompl_compat(as, (void __user * __user *)arg); |
728 | + retval = -EAGAIN; |
729 | + as = async_getcompleted(ps); |
730 | + if (as) { |
731 | + retval = processcompl_compat(as, (void __user * __user *)arg); |
732 | + free_async(as); |
733 | + } |
734 | + return retval; |
735 | } |
736 | |
737 | #endif |
738 | diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c |
739 | index ef9b038..f1a29e2 100644 |
740 | --- a/drivers/usb/host/ehci-hub.c |
741 | +++ b/drivers/usb/host/ehci-hub.c |
742 | @@ -254,10 +254,8 @@ static int ehci_bus_resume (struct usb_hcd *hcd) |
743 | temp = ehci_readl(ehci, &ehci->regs->port_status [i]); |
744 | temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); |
745 | if (test_bit(i, &ehci->bus_suspended) && |
746 | - (temp & PORT_SUSPEND)) { |
747 | - ehci->reset_done [i] = jiffies + msecs_to_jiffies (20); |
748 | + (temp & PORT_SUSPEND)) |
749 | temp |= PORT_RESUME; |
750 | - } |
751 | ehci_writel(ehci, temp, &ehci->regs->port_status [i]); |
752 | } |
753 | i = HCS_N_PORTS (ehci->hcs_params); |
754 | @@ -752,6 +750,9 @@ static int ehci_hub_control ( |
755 | ehci_readl(ehci, status_reg)); |
756 | } |
757 | |
758 | + if (!(temp & (PORT_RESUME|PORT_RESET))) |
759 | + ehci->reset_done[wIndex] = 0; |
760 | + |
761 | /* transfer dedicated ports to the companion hc */ |
762 | if ((temp & PORT_CONNECT) && |
763 | test_bit(wIndex, &ehci->companion_ports)) { |
764 | diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c |
765 | index 097dd55..da88a80 100644 |
766 | --- a/drivers/usb/host/ehci-q.c |
767 | +++ b/drivers/usb/host/ehci-q.c |
768 | @@ -346,12 +346,11 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh) |
769 | */ |
770 | if ((token & QTD_STS_XACT) && |
771 | QTD_CERR(token) == 0 && |
772 | - --qh->xacterrs > 0 && |
773 | + ++qh->xacterrs < QH_XACTERR_MAX && |
774 | !urb->unlinked) { |
775 | ehci_dbg(ehci, |
776 | - "detected XactErr len %d/%d retry %d\n", |
777 | - qtd->length - QTD_LENGTH(token), qtd->length, |
778 | - QH_XACTERR_MAX - qh->xacterrs); |
779 | + "detected XactErr len %zu/%zu retry %d\n", |
780 | + qtd->length - QTD_LENGTH(token), qtd->length, qh->xacterrs); |
781 | |
782 | /* reset the token in the qtd and the |
783 | * qh overlay (which still contains |
784 | @@ -451,7 +450,7 @@ halt: |
785 | last = qtd; |
786 | |
787 | /* reinit the xacterr counter for the next qtd */ |
788 | - qh->xacterrs = QH_XACTERR_MAX; |
789 | + qh->xacterrs = 0; |
790 | } |
791 | |
792 | /* last urb's completion might still need calling */ |
793 | @@ -898,7 +897,7 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh) |
794 | head->qh_next.qh = qh; |
795 | head->hw_next = dma; |
796 | |
797 | - qh->xacterrs = QH_XACTERR_MAX; |
798 | + qh->xacterrs = 0; |
799 | qh->qh_state = QH_STATE_LINKED; |
800 | /* qtd completions reported later by interrupt */ |
801 | } |
802 | diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c |
803 | index 18e8741..e813ca8 100644 |
804 | --- a/drivers/usb/host/ehci-sched.c |
805 | +++ b/drivers/usb/host/ehci-sched.c |
806 | @@ -542,6 +542,7 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) |
807 | } |
808 | } |
809 | qh->qh_state = QH_STATE_LINKED; |
810 | + qh->xacterrs = 0; |
811 | qh_get (qh); |
812 | |
813 | /* update per-qh bandwidth for usbfs */ |
814 | diff --git a/fs/exec.c b/fs/exec.c |
815 | index 5ec0f56..50da182 100644 |
816 | --- a/fs/exec.c |
817 | +++ b/fs/exec.c |
818 | @@ -1826,8 +1826,9 @@ int do_coredump(long signr, int exit_code, struct pt_regs * regs) |
819 | /* |
820 | * Dont allow local users get cute and trick others to coredump |
821 | * into their pre-created files: |
822 | + * Note, this is not relevant for pipes |
823 | */ |
824 | - if (inode->i_uid != current->fsuid) |
825 | + if (!ispipe && (inode->i_uid != current->fsuid)) |
826 | goto close_fail; |
827 | if (!file->f_op) |
828 | goto close_fail; |
829 | diff --git a/fs/ext4/super.c b/fs/ext4/super.c |
830 | index db2642a..baacaf8 100644 |
831 | --- a/fs/ext4/super.c |
832 | +++ b/fs/ext4/super.c |
833 | @@ -254,7 +254,8 @@ static const char *ext4_decode_error(struct super_block *sb, int errno, |
834 | errstr = "Out of memory"; |
835 | break; |
836 | case -EROFS: |
837 | - if (!sb || EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT) |
838 | + if (!sb || (EXT4_SB(sb)->s_journal && |
839 | + EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT)) |
840 | errstr = "Journal has aborted"; |
841 | else |
842 | errstr = "Readonly filesystem"; |
843 | diff --git a/fs/fcntl.c b/fs/fcntl.c |
844 | index 4eed4d6..ac79b7e 100644 |
845 | --- a/fs/fcntl.c |
846 | +++ b/fs/fcntl.c |
847 | @@ -200,9 +200,7 @@ static int setfl(int fd, struct file * filp, unsigned long arg) |
848 | static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, |
849 | uid_t uid, uid_t euid, int force) |
850 | { |
851 | - unsigned long flags; |
852 | - |
853 | - write_lock_irqsave(&filp->f_owner.lock, flags); |
854 | + write_lock_irq(&filp->f_owner.lock); |
855 | if (force || !filp->f_owner.pid) { |
856 | put_pid(filp->f_owner.pid); |
857 | filp->f_owner.pid = get_pid(pid); |
858 | @@ -210,7 +208,7 @@ static void f_modown(struct file *filp, struct pid *pid, enum pid_type type, |
859 | filp->f_owner.uid = uid; |
860 | filp->f_owner.euid = euid; |
861 | } |
862 | - write_unlock_irqrestore(&filp->f_owner.lock, flags); |
863 | + write_unlock_irq(&filp->f_owner.lock); |
864 | } |
865 | |
866 | int __f_setown(struct file *filp, struct pid *pid, enum pid_type type, |
867 | diff --git a/fs/namei.c b/fs/namei.c |
868 | index e6c73de..832cd4b 100644 |
869 | --- a/fs/namei.c |
870 | +++ b/fs/namei.c |
871 | @@ -841,6 +841,17 @@ fail: |
872 | } |
873 | |
874 | /* |
875 | + * This is a temporary kludge to deal with "automount" symlinks; proper |
876 | + * solution is to trigger them on follow_mount(), so that do_lookup() |
877 | + * would DTRT. To be killed before 2.6.34-final. |
878 | + */ |
879 | +static inline int follow_on_final(struct inode *inode, unsigned lookup_flags) |
880 | +{ |
881 | + return inode && unlikely(inode->i_op->follow_link) && |
882 | + ((lookup_flags & LOOKUP_FOLLOW) || S_ISDIR(inode->i_mode)); |
883 | +} |
884 | + |
885 | +/* |
886 | * Name resolution. |
887 | * This is the basic name resolution function, turning a pathname into |
888 | * the final dentry. We expect 'base' to be positive and a directory. |
889 | @@ -984,8 +995,7 @@ last_component: |
890 | if (err) |
891 | break; |
892 | inode = next.dentry->d_inode; |
893 | - if ((lookup_flags & LOOKUP_FOLLOW) |
894 | - && inode && inode->i_op && inode->i_op->follow_link) { |
895 | + if (follow_on_final(inode, lookup_flags)) { |
896 | err = do_follow_link(&next, nd); |
897 | if (err) |
898 | goto return_err; |
899 | diff --git a/include/asm-x86/checksum_32.h b/include/asm-x86/checksum_32.h |
900 | index 52bbb0d..0fbf77e 100644 |
901 | --- a/include/asm-x86/checksum_32.h |
902 | +++ b/include/asm-x86/checksum_32.h |
903 | @@ -161,7 +161,8 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr, |
904 | "adcl $0, %0 ;\n" |
905 | : "=&r" (sum) |
906 | : "r" (saddr), "r" (daddr), |
907 | - "r" (htonl(len)), "r" (htonl(proto)), "0" (sum)); |
908 | + "r" (htonl(len)), "r" (htonl(proto)), "0" (sum) |
909 | + : "memory"); |
910 | |
911 | return csum_fold(sum); |
912 | } |
913 | diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h |
914 | index cf7c887..69d4de9 100644 |
915 | --- a/include/asm-x86/kvm_host.h |
916 | +++ b/include/asm-x86/kvm_host.h |
917 | @@ -537,6 +537,7 @@ void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); |
918 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); |
919 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, |
920 | u32 error_code); |
921 | +bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); |
922 | |
923 | void kvm_inject_nmi(struct kvm_vcpu *vcpu); |
924 | |
925 | diff --git a/include/asm-x86/kvm_x86_emulate.h b/include/asm-x86/kvm_x86_emulate.h |
926 | index 4e8c1e4..fcbb680 100644 |
927 | --- a/include/asm-x86/kvm_x86_emulate.h |
928 | +++ b/include/asm-x86/kvm_x86_emulate.h |
929 | @@ -128,7 +128,7 @@ struct decode_cache { |
930 | u8 seg_override; |
931 | unsigned int d; |
932 | unsigned long regs[NR_VCPU_REGS]; |
933 | - unsigned long eip; |
934 | + unsigned long eip, eip_orig; |
935 | /* modrm */ |
936 | u8 modrm; |
937 | u8 modrm_mod; |
938 | diff --git a/include/linux/sched.h b/include/linux/sched.h |
939 | index ebe801e..6d32974 100644 |
940 | --- a/include/linux/sched.h |
941 | +++ b/include/linux/sched.h |
942 | @@ -901,7 +901,7 @@ struct sched_class { |
943 | void (*yield_task) (struct rq *rq); |
944 | int (*select_task_rq)(struct task_struct *p, int sync); |
945 | |
946 | - void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); |
947 | + void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync); |
948 | |
949 | struct task_struct * (*pick_next_task) (struct rq *rq); |
950 | void (*put_prev_task) (struct rq *rq, struct task_struct *p); |
951 | @@ -2232,6 +2232,28 @@ static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) |
952 | |
953 | #define TASK_STATE_TO_CHAR_STR "RSDTtZX" |
954 | |
955 | +static inline unsigned long task_rlimit(const struct task_struct *tsk, |
956 | + unsigned int limit) |
957 | +{ |
958 | + return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur); |
959 | +} |
960 | + |
961 | +static inline unsigned long task_rlimit_max(const struct task_struct *tsk, |
962 | + unsigned int limit) |
963 | +{ |
964 | + return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max); |
965 | +} |
966 | + |
967 | +static inline unsigned long rlimit(unsigned int limit) |
968 | +{ |
969 | + return task_rlimit(current, limit); |
970 | +} |
971 | + |
972 | +static inline unsigned long rlimit_max(unsigned int limit) |
973 | +{ |
974 | + return task_rlimit_max(current, limit); |
975 | +} |
976 | + |
977 | #endif /* __KERNEL__ */ |
978 | |
979 | #endif |
980 | diff --git a/include/linux/topology.h b/include/linux/topology.h |
981 | index 2158fc0..2565f4a 100644 |
982 | --- a/include/linux/topology.h |
983 | +++ b/include/linux/topology.h |
984 | @@ -99,7 +99,7 @@ void arch_update_cpu_topology(void); |
985 | | SD_BALANCE_FORK \ |
986 | | SD_BALANCE_EXEC \ |
987 | | SD_WAKE_AFFINE \ |
988 | - | SD_WAKE_IDLE \ |
989 | + | SD_WAKE_BALANCE \ |
990 | | SD_SHARE_CPUPOWER, \ |
991 | .last_balance = jiffies, \ |
992 | .balance_interval = 1, \ |
993 | @@ -120,10 +120,10 @@ void arch_update_cpu_topology(void); |
994 | .wake_idx = 1, \ |
995 | .forkexec_idx = 1, \ |
996 | .flags = SD_LOAD_BALANCE \ |
997 | - | SD_BALANCE_NEWIDLE \ |
998 | | SD_BALANCE_FORK \ |
999 | | SD_BALANCE_EXEC \ |
1000 | | SD_WAKE_AFFINE \ |
1001 | + | SD_WAKE_BALANCE \ |
1002 | | SD_SHARE_PKG_RESOURCES\ |
1003 | | BALANCE_FOR_MC_POWER, \ |
1004 | .last_balance = jiffies, \ |
1005 | diff --git a/kernel/futex.c b/kernel/futex.c |
1006 | index ec84da5..02d07e4 100644 |
1007 | --- a/kernel/futex.c |
1008 | +++ b/kernel/futex.c |
1009 | @@ -533,8 +533,25 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, |
1010 | return -EINVAL; |
1011 | |
1012 | WARN_ON(!atomic_read(&pi_state->refcount)); |
1013 | - WARN_ON(pid && pi_state->owner && |
1014 | - pi_state->owner->pid != pid); |
1015 | + |
1016 | + /* |
1017 | + * When pi_state->owner is NULL then the owner died |
1018 | + * and another waiter is on the fly. pi_state->owner |
1019 | + * is fixed up by the task which acquires |
1020 | + * pi_state->rt_mutex. |
1021 | + * |
1022 | + * We do not check for pid == 0 which can happen when |
1023 | + * the owner died and robust_list_exit() cleared the |
1024 | + * TID. |
1025 | + */ |
1026 | + if (pid && pi_state->owner) { |
1027 | + /* |
1028 | + * Bail out if user space manipulated the |
1029 | + * futex value. |
1030 | + */ |
1031 | + if (pid != task_pid_vnr(pi_state->owner)) |
1032 | + return -EINVAL; |
1033 | + } |
1034 | |
1035 | atomic_inc(&pi_state->refcount); |
1036 | *ps = pi_state; |
1037 | @@ -630,6 +647,13 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) |
1038 | if (!pi_state) |
1039 | return -EINVAL; |
1040 | |
1041 | + /* |
1042 | + * If current does not own the pi_state then the futex is |
1043 | + * inconsistent and user space fiddled with the futex value. |
1044 | + */ |
1045 | + if (pi_state->owner != current) |
1046 | + return -EINVAL; |
1047 | + |
1048 | spin_lock(&pi_state->pi_mutex.wait_lock); |
1049 | new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); |
1050 | |
1051 | diff --git a/kernel/printk.c b/kernel/printk.c |
1052 | index 204660d..b9df41b 100644 |
1053 | --- a/kernel/printk.c |
1054 | +++ b/kernel/printk.c |
1055 | @@ -995,7 +995,7 @@ int printk_needs_cpu(int cpu) |
1056 | void wake_up_klogd(void) |
1057 | { |
1058 | if (waitqueue_active(&log_wait)) |
1059 | - __get_cpu_var(printk_pending) = 1; |
1060 | + __raw_get_cpu_var(printk_pending) = 1; |
1061 | } |
1062 | |
1063 | /** |
1064 | diff --git a/kernel/sched.c b/kernel/sched.c |
1065 | index 98c0cdc..f01ff16 100644 |
1066 | --- a/kernel/sched.c |
1067 | +++ b/kernel/sched.c |
1068 | @@ -604,9 +604,9 @@ struct rq { |
1069 | |
1070 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
1071 | |
1072 | -static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) |
1073 | +static inline void check_preempt_curr(struct rq *rq, struct task_struct *p, int sync) |
1074 | { |
1075 | - rq->curr->sched_class->check_preempt_curr(rq, p); |
1076 | + rq->curr->sched_class->check_preempt_curr(rq, p, sync); |
1077 | } |
1078 | |
1079 | static inline int cpu_of(struct rq *rq) |
1080 | @@ -2285,7 +2285,7 @@ out_running: |
1081 | trace_mark(kernel_sched_wakeup, |
1082 | "pid %d state %ld ## rq %p task %p rq->curr %p", |
1083 | p->pid, p->state, rq, p, rq->curr); |
1084 | - check_preempt_curr(rq, p); |
1085 | + check_preempt_curr(rq, p, sync); |
1086 | |
1087 | p->state = TASK_RUNNING; |
1088 | #ifdef CONFIG_SMP |
1089 | @@ -2420,7 +2420,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) |
1090 | trace_mark(kernel_sched_wakeup_new, |
1091 | "pid %d state %ld ## rq %p task %p rq->curr %p", |
1092 | p->pid, p->state, rq, p, rq->curr); |
1093 | - check_preempt_curr(rq, p); |
1094 | + check_preempt_curr(rq, p, 0); |
1095 | #ifdef CONFIG_SMP |
1096 | if (p->sched_class->task_wake_up) |
1097 | p->sched_class->task_wake_up(rq, p); |
1098 | @@ -2880,7 +2880,7 @@ static void pull_task(struct rq *src_rq, struct task_struct *p, |
1099 | * Note that idle threads have a prio of MAX_PRIO, for this test |
1100 | * to be always true for them. |
1101 | */ |
1102 | - check_preempt_curr(this_rq, p); |
1103 | + check_preempt_curr(this_rq, p, 0); |
1104 | } |
1105 | |
1106 | /* |
1107 | @@ -5957,7 +5957,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) |
1108 | set_task_cpu(p, dest_cpu); |
1109 | if (on_rq) { |
1110 | activate_task(rq_dest, p, 0); |
1111 | - check_preempt_curr(rq_dest, p); |
1112 | + check_preempt_curr(rq_dest, p, 0); |
1113 | } |
1114 | done: |
1115 | ret = 1; |
1116 | diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c |
1117 | index fb8994c..ab8e6f3 100644 |
1118 | --- a/kernel/sched_fair.c |
1119 | +++ b/kernel/sched_fair.c |
1120 | @@ -1331,7 +1331,7 @@ static inline int depth_se(struct sched_entity *se) |
1121 | /* |
1122 | * Preempt the current task with a newly woken task if needed: |
1123 | */ |
1124 | -static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) |
1125 | +static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) |
1126 | { |
1127 | struct task_struct *curr = rq->curr; |
1128 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); |
1129 | @@ -1360,6 +1360,13 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) |
1130 | if (!sched_feat(WAKEUP_PREEMPT)) |
1131 | return; |
1132 | |
1133 | + if (sched_feat(WAKEUP_OVERLAP) && sync && |
1134 | + se->avg_overlap < sysctl_sched_migration_cost && |
1135 | + pse->avg_overlap < sysctl_sched_migration_cost) { |
1136 | + resched_task(curr); |
1137 | + return; |
1138 | + } |
1139 | + |
1140 | /* |
1141 | * preemption test can be made between sibling entities who are in the |
1142 | * same cfs_rq i.e who have a common parent. Walk up the hierarchy of |
1143 | @@ -1642,7 +1649,7 @@ static void prio_changed_fair(struct rq *rq, struct task_struct *p, |
1144 | if (p->prio > oldprio) |
1145 | resched_task(rq->curr); |
1146 | } else |
1147 | - check_preempt_curr(rq, p); |
1148 | + check_preempt_curr(rq, p, 0); |
1149 | } |
1150 | |
1151 | /* |
1152 | @@ -1659,7 +1666,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p, |
1153 | if (running) |
1154 | resched_task(rq->curr); |
1155 | else |
1156 | - check_preempt_curr(rq, p); |
1157 | + check_preempt_curr(rq, p, 0); |
1158 | } |
1159 | |
1160 | /* Account for a task changing its policy or group. |
1161 | diff --git a/kernel/sched_features.h b/kernel/sched_features.h |
1162 | index c4c88ae..4e51893 100644 |
1163 | --- a/kernel/sched_features.h |
1164 | +++ b/kernel/sched_features.h |
1165 | @@ -11,3 +11,4 @@ SCHED_FEAT(ASYM_GRAN, 1) |
1166 | SCHED_FEAT(LB_BIAS, 1) |
1167 | SCHED_FEAT(LB_WAKEUP_UPDATE, 1) |
1168 | SCHED_FEAT(ASYM_EFF_LOAD, 1) |
1169 | +SCHED_FEAT(WAKEUP_OVERLAP, 1) |
1170 | diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c |
1171 | index 3a4f92d..dec4cca 100644 |
1172 | --- a/kernel/sched_idletask.c |
1173 | +++ b/kernel/sched_idletask.c |
1174 | @@ -14,7 +14,7 @@ static int select_task_rq_idle(struct task_struct *p, int sync) |
1175 | /* |
1176 | * Idle tasks are unconditionally rescheduled: |
1177 | */ |
1178 | -static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p) |
1179 | +static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int sync) |
1180 | { |
1181 | resched_task(rq->idle); |
1182 | } |
1183 | @@ -76,7 +76,7 @@ static void switched_to_idle(struct rq *rq, struct task_struct *p, |
1184 | if (running) |
1185 | resched_task(rq->curr); |
1186 | else |
1187 | - check_preempt_curr(rq, p); |
1188 | + check_preempt_curr(rq, p, 0); |
1189 | } |
1190 | |
1191 | static void prio_changed_idle(struct rq *rq, struct task_struct *p, |
1192 | @@ -93,7 +93,7 @@ static void prio_changed_idle(struct rq *rq, struct task_struct *p, |
1193 | if (p->prio > oldprio) |
1194 | resched_task(rq->curr); |
1195 | } else |
1196 | - check_preempt_curr(rq, p); |
1197 | + check_preempt_curr(rq, p, 0); |
1198 | } |
1199 | |
1200 | /* |
1201 | diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c |
1202 | index 37f0721..68c4745 100644 |
1203 | --- a/kernel/sched_rt.c |
1204 | +++ b/kernel/sched_rt.c |
1205 | @@ -784,7 +784,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) |
1206 | /* |
1207 | * Preempt the current task with a newly woken task if needed: |
1208 | */ |
1209 | -static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p) |
1210 | +static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync) |
1211 | { |
1212 | if (p->prio < rq->curr->prio) { |
1213 | resched_task(rq->curr); |
1214 | diff --git a/mm/mempolicy.c b/mm/mempolicy.c |
1215 | index 7acf81c..b759b7d 100644 |
1216 | --- a/mm/mempolicy.c |
1217 | +++ b/mm/mempolicy.c |
1218 | @@ -2029,8 +2029,8 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) |
1219 | char *rest = nodelist; |
1220 | while (isdigit(*rest)) |
1221 | rest++; |
1222 | - if (!*rest) |
1223 | - err = 0; |
1224 | + if (*rest) |
1225 | + goto out; |
1226 | } |
1227 | break; |
1228 | case MPOL_INTERLEAVE: |
1229 | @@ -2039,7 +2039,6 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) |
1230 | */ |
1231 | if (!nodelist) |
1232 | nodes = node_states[N_HIGH_MEMORY]; |
1233 | - err = 0; |
1234 | break; |
1235 | case MPOL_LOCAL: |
1236 | /* |
1237 | @@ -2049,11 +2048,19 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) |
1238 | goto out; |
1239 | mode = MPOL_PREFERRED; |
1240 | break; |
1241 | - |
1242 | - /* |
1243 | - * case MPOL_BIND: mpol_new() enforces non-empty nodemask. |
1244 | - * case MPOL_DEFAULT: mpol_new() enforces empty nodemask, ignores flags. |
1245 | - */ |
1246 | + case MPOL_DEFAULT: |
1247 | + /* |
1248 | + * Insist on a empty nodelist |
1249 | + */ |
1250 | + if (!nodelist) |
1251 | + err = 0; |
1252 | + goto out; |
1253 | + case MPOL_BIND: |
1254 | + /* |
1255 | + * Insist on a nodelist |
1256 | + */ |
1257 | + if (!nodelist) |
1258 | + goto out; |
1259 | } |
1260 | |
1261 | mode_flags = 0; |
1262 | @@ -2067,14 +2074,17 @@ int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context) |
1263 | else if (!strcmp(flags, "relative")) |
1264 | mode_flags |= MPOL_F_RELATIVE_NODES; |
1265 | else |
1266 | - err = 1; |
1267 | + goto out; |
1268 | } |
1269 | |
1270 | new = mpol_new(mode, mode_flags, &nodes); |
1271 | if (IS_ERR(new)) |
1272 | - err = 1; |
1273 | - else if (no_context) |
1274 | - new->w.user_nodemask = nodes; /* save for contextualization */ |
1275 | + goto out; |
1276 | + err = 0; |
1277 | + if (no_context) { |
1278 | + /* save for contextualization */ |
1279 | + new->w.user_nodemask = nodes; |
1280 | + } |
1281 | |
1282 | out: |
1283 | /* Restore string for error message */ |
1284 | diff --git a/mm/migrate.c b/mm/migrate.c |
1285 | index d493c02..96178f4 100644 |
1286 | --- a/mm/migrate.c |
1287 | +++ b/mm/migrate.c |
1288 | @@ -1062,6 +1062,9 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, |
1289 | goto out; |
1290 | |
1291 | err = -ENODEV; |
1292 | + if (node < 0 || node >= MAX_NUMNODES) |
1293 | + goto out; |
1294 | + |
1295 | if (!node_state(node, N_HIGH_MEMORY)) |
1296 | goto out; |
1297 | |
1298 | diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c |
1299 | index cd9d526..2268a7e 100644 |
1300 | --- a/net/ax25/af_ax25.c |
1301 | +++ b/net/ax25/af_ax25.c |
1302 | @@ -894,7 +894,6 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev) |
1303 | |
1304 | sock_init_data(NULL, sk); |
1305 | |
1306 | - sk->sk_destruct = ax25_free_sock; |
1307 | sk->sk_type = osk->sk_type; |
1308 | sk->sk_priority = osk->sk_priority; |
1309 | sk->sk_protocol = osk->sk_protocol; |
1310 | @@ -932,6 +931,7 @@ struct sock *ax25_make_new(struct sock *osk, struct ax25_dev *ax25_dev) |
1311 | } |
1312 | |
1313 | sk->sk_protinfo = ax25; |
1314 | + sk->sk_destruct = ax25_free_sock; |
1315 | ax25->sk = sk; |
1316 | |
1317 | return sk; |
1318 | diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c |
1319 | index f976fc5..6d108fb 100644 |
1320 | --- a/net/ipv4/tcp_minisocks.c |
1321 | +++ b/net/ipv4/tcp_minisocks.c |
1322 | @@ -362,7 +362,7 @@ void tcp_twsk_destructor(struct sock *sk) |
1323 | #ifdef CONFIG_TCP_MD5SIG |
1324 | struct tcp_timewait_sock *twsk = tcp_twsk(sk); |
1325 | if (twsk->tw_md5_keylen) |
1326 | - tcp_put_md5sig_pool(); |
1327 | + tcp_free_md5sig_pool(); |
1328 | #endif |
1329 | } |
1330 | |
1331 | diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c |
1332 | index b7a50e9..0a913c9 100644 |
1333 | --- a/net/ipv6/sit.c |
1334 | +++ b/net/ipv6/sit.c |
1335 | @@ -260,7 +260,7 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t, |
1336 | |
1337 | c = 0; |
1338 | for (prl = t->prl; prl; prl = prl->next) { |
1339 | - if (c > cmax) |
1340 | + if (c >= cmax) |
1341 | break; |
1342 | if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr) |
1343 | continue; |
1344 | diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c |
1345 | index 1122c95..2b801a0 100644 |
1346 | --- a/net/sched/sch_api.c |
1347 | +++ b/net/sched/sch_api.c |
1348 | @@ -1453,6 +1453,8 @@ static int tc_fill_tclass(struct sk_buff *skb, struct Qdisc *q, |
1349 | nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); |
1350 | tcm = NLMSG_DATA(nlh); |
1351 | tcm->tcm_family = AF_UNSPEC; |
1352 | + tcm->tcm__pad1 = 0; |
1353 | + tcm->tcm__pad2 = 0; |
1354 | tcm->tcm_ifindex = qdisc_dev(q)->ifindex; |
1355 | tcm->tcm_parent = q->handle; |
1356 | tcm->tcm_handle = q->handle; |
1357 | diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c |
1358 | index facdaa9..3c33817 100644 |
1359 | --- a/net/unix/af_unix.c |
1360 | +++ b/net/unix/af_unix.c |
1361 | @@ -1491,6 +1491,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, |
1362 | struct sk_buff *skb; |
1363 | int sent=0; |
1364 | struct scm_cookie tmp_scm; |
1365 | + bool fds_sent = false; |
1366 | |
1367 | if (NULL == siocb->scm) |
1368 | siocb->scm = &tmp_scm; |
1369 | @@ -1552,12 +1553,14 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, |
1370 | size = min_t(int, size, skb_tailroom(skb)); |
1371 | |
1372 | memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); |
1373 | - if (siocb->scm->fp) { |
1374 | + /* Only send the fds in the first buffer */ |
1375 | + if (siocb->scm->fp && !fds_sent) { |
1376 | err = unix_attach_fds(siocb->scm, skb); |
1377 | if (err) { |
1378 | kfree_skb(skb); |
1379 | goto out_err; |
1380 | } |
1381 | + fds_sent = true; |
1382 | } |
1383 | |
1384 | if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) { |