Contents of /trunk/kernel-lts/patches-3.4/0151-3.4.52-all-fixes.patch
Parent Directory | Revision Log
Revision 2238 -
(show annotations)
(download)
Mon Jul 15 12:13:41 2013 UTC (11 years, 2 months ago) by niro
File size: 22670 byte(s)
Mon Jul 15 12:13:41 2013 UTC (11 years, 2 months ago) by niro
File size: 22670 byte(s)
-linux-3.4.52
1 | diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h |
2 | index 42dec04..0a5e8a5 100644 |
3 | --- a/arch/arm/include/asm/cacheflush.h |
4 | +++ b/arch/arm/include/asm/cacheflush.h |
5 | @@ -305,9 +305,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma, |
6 | } |
7 | |
8 | #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE |
9 | -static inline void flush_kernel_dcache_page(struct page *page) |
10 | -{ |
11 | -} |
12 | +extern void flush_kernel_dcache_page(struct page *); |
13 | |
14 | #define flush_dcache_mmap_lock(mapping) \ |
15 | spin_lock_irq(&(mapping)->tree_lock) |
16 | diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c |
17 | index 40ca11e..8f0d285 100644 |
18 | --- a/arch/arm/mm/flush.c |
19 | +++ b/arch/arm/mm/flush.c |
20 | @@ -299,6 +299,39 @@ void flush_dcache_page(struct page *page) |
21 | EXPORT_SYMBOL(flush_dcache_page); |
22 | |
23 | /* |
24 | + * Ensure cache coherency for the kernel mapping of this page. We can |
25 | + * assume that the page is pinned via kmap. |
26 | + * |
27 | + * If the page only exists in the page cache and there are no user |
28 | + * space mappings, this is a no-op since the page was already marked |
29 | + * dirty at creation. Otherwise, we need to flush the dirty kernel |
30 | + * cache lines directly. |
31 | + */ |
32 | +void flush_kernel_dcache_page(struct page *page) |
33 | +{ |
34 | + if (cache_is_vivt() || cache_is_vipt_aliasing()) { |
35 | + struct address_space *mapping; |
36 | + |
37 | + mapping = page_mapping(page); |
38 | + |
39 | + if (!mapping || mapping_mapped(mapping)) { |
40 | + void *addr; |
41 | + |
42 | + addr = page_address(page); |
43 | + /* |
44 | + * kmap_atomic() doesn't set the page virtual |
45 | + * address for highmem pages, and |
46 | + * kunmap_atomic() takes care of cache |
47 | + * flushing already. |
48 | + */ |
49 | + if (!IS_ENABLED(CONFIG_HIGHMEM) || addr) |
50 | + __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
51 | + } |
52 | + } |
53 | +} |
54 | +EXPORT_SYMBOL(flush_kernel_dcache_page); |
55 | + |
56 | +/* |
57 | * Flush an anonymous page so that users of get_user_pages() |
58 | * can safely access the data. The expected sequence is: |
59 | * |
60 | diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c |
61 | index d51225f..eb5293a 100644 |
62 | --- a/arch/arm/mm/nommu.c |
63 | +++ b/arch/arm/mm/nommu.c |
64 | @@ -57,6 +57,12 @@ void flush_dcache_page(struct page *page) |
65 | } |
66 | EXPORT_SYMBOL(flush_dcache_page); |
67 | |
68 | +void flush_kernel_dcache_page(struct page *page) |
69 | +{ |
70 | + __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); |
71 | +} |
72 | +EXPORT_SYMBOL(flush_kernel_dcache_page); |
73 | + |
74 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, |
75 | unsigned long uaddr, void *dst, const void *src, |
76 | unsigned long len) |
77 | diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c |
78 | index 147614e..6a8a382 100644 |
79 | --- a/drivers/net/wan/dlci.c |
80 | +++ b/drivers/net/wan/dlci.c |
81 | @@ -384,21 +384,37 @@ static int dlci_del(struct dlci_add *dlci) |
82 | struct frad_local *flp; |
83 | struct net_device *master, *slave; |
84 | int err; |
85 | + bool found = false; |
86 | + |
87 | + rtnl_lock(); |
88 | |
89 | /* validate slave device */ |
90 | master = __dev_get_by_name(&init_net, dlci->devname); |
91 | - if (!master) |
92 | - return -ENODEV; |
93 | + if (!master) { |
94 | + err = -ENODEV; |
95 | + goto out; |
96 | + } |
97 | + |
98 | + list_for_each_entry(dlp, &dlci_devs, list) { |
99 | + if (dlp->master == master) { |
100 | + found = true; |
101 | + break; |
102 | + } |
103 | + } |
104 | + if (!found) { |
105 | + err = -ENODEV; |
106 | + goto out; |
107 | + } |
108 | |
109 | if (netif_running(master)) { |
110 | - return -EBUSY; |
111 | + err = -EBUSY; |
112 | + goto out; |
113 | } |
114 | |
115 | dlp = netdev_priv(master); |
116 | slave = dlp->slave; |
117 | flp = netdev_priv(slave); |
118 | |
119 | - rtnl_lock(); |
120 | err = (*flp->deassoc)(slave, master); |
121 | if (!err) { |
122 | list_del(&dlp->list); |
123 | @@ -407,8 +423,8 @@ static int dlci_del(struct dlci_add *dlci) |
124 | |
125 | dev_put(slave); |
126 | } |
127 | +out: |
128 | rtnl_unlock(); |
129 | - |
130 | return err; |
131 | } |
132 | |
133 | diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c |
134 | index 7d47514..2e99f79 100644 |
135 | --- a/drivers/tty/serial/pch_uart.c |
136 | +++ b/drivers/tty/serial/pch_uart.c |
137 | @@ -1034,22 +1034,37 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv) |
138 | static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr) |
139 | { |
140 | u8 fcr = ioread8(priv->membase + UART_FCR); |
141 | + struct uart_port *port = &priv->port; |
142 | + struct tty_struct *tty = tty_port_tty_get(&port->state->port); |
143 | + char *error_msg[5] = {}; |
144 | + int i = 0; |
145 | |
146 | /* Reset FIFO */ |
147 | fcr |= UART_FCR_CLEAR_RCVR; |
148 | iowrite8(fcr, priv->membase + UART_FCR); |
149 | |
150 | if (lsr & PCH_UART_LSR_ERR) |
151 | - dev_err(&priv->pdev->dev, "Error data in FIFO\n"); |
152 | + error_msg[i++] = "Error data in FIFO\n"; |
153 | |
154 | - if (lsr & UART_LSR_FE) |
155 | - dev_err(&priv->pdev->dev, "Framing Error\n"); |
156 | + if (lsr & UART_LSR_FE) { |
157 | + port->icount.frame++; |
158 | + error_msg[i++] = " Framing Error\n"; |
159 | + } |
160 | |
161 | - if (lsr & UART_LSR_PE) |
162 | - dev_err(&priv->pdev->dev, "Parity Error\n"); |
163 | + if (lsr & UART_LSR_PE) { |
164 | + port->icount.parity++; |
165 | + error_msg[i++] = " Parity Error\n"; |
166 | + } |
167 | |
168 | - if (lsr & UART_LSR_OE) |
169 | - dev_err(&priv->pdev->dev, "Overrun Error\n"); |
170 | + if (lsr & UART_LSR_OE) { |
171 | + port->icount.overrun++; |
172 | + error_msg[i++] = " Overrun Error\n"; |
173 | + } |
174 | + |
175 | + if (tty == NULL) { |
176 | + for (i = 0; error_msg[i] != NULL; i++) |
177 | + dev_err(&priv->pdev->dev, error_msg[i]); |
178 | + } |
179 | } |
180 | |
181 | static irqreturn_t pch_uart_interrupt(int irq, void *dev_id) |
182 | diff --git a/fs/exec.c b/fs/exec.c |
183 | index 2b7f5ff..0ea0b4c 100644 |
184 | --- a/fs/exec.c |
185 | +++ b/fs/exec.c |
186 | @@ -1163,13 +1163,6 @@ void setup_new_exec(struct linux_binprm * bprm) |
187 | set_dumpable(current->mm, suid_dumpable); |
188 | } |
189 | |
190 | - /* |
191 | - * Flush performance counters when crossing a |
192 | - * security domain: |
193 | - */ |
194 | - if (!get_dumpable(current->mm)) |
195 | - perf_event_exit_task(current); |
196 | - |
197 | /* An exec changes our domain. We are no longer part of the thread |
198 | group */ |
199 | |
200 | @@ -1233,6 +1226,15 @@ void install_exec_creds(struct linux_binprm *bprm) |
201 | |
202 | commit_creds(bprm->cred); |
203 | bprm->cred = NULL; |
204 | + |
205 | + /* |
206 | + * Disable monitoring for regular users |
207 | + * when executing setuid binaries. Must |
208 | + * wait until new credentials are committed |
209 | + * by commit_creds() above |
210 | + */ |
211 | + if (get_dumpable(current->mm) != SUID_DUMP_USER) |
212 | + perf_event_exit_task(current); |
213 | /* |
214 | * cred_guard_mutex must be held at least to this point to prevent |
215 | * ptrace_attach() from altering our determination of the task's |
216 | diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c |
217 | index 8640a12..25c472b 100644 |
218 | --- a/fs/ubifs/dir.c |
219 | +++ b/fs/ubifs/dir.c |
220 | @@ -357,31 +357,50 @@ static unsigned int vfs_dent_type(uint8_t type) |
221 | static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) |
222 | { |
223 | int err, over = 0; |
224 | + loff_t pos = file->f_pos; |
225 | struct qstr nm; |
226 | union ubifs_key key; |
227 | struct ubifs_dent_node *dent; |
228 | struct inode *dir = file->f_path.dentry->d_inode; |
229 | struct ubifs_info *c = dir->i_sb->s_fs_info; |
230 | |
231 | - dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, file->f_pos); |
232 | + dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, pos); |
233 | |
234 | - if (file->f_pos > UBIFS_S_KEY_HASH_MASK || file->f_pos == 2) |
235 | + if (pos > UBIFS_S_KEY_HASH_MASK || pos == 2) |
236 | /* |
237 | * The directory was seek'ed to a senseless position or there |
238 | * are no more entries. |
239 | */ |
240 | return 0; |
241 | |
242 | + if (file->f_version == 0) { |
243 | + /* |
244 | + * The file was seek'ed, which means that @file->private_data |
245 | + * is now invalid. This may also be just the first |
246 | + * 'ubifs_readdir()' invocation, in which case |
247 | + * @file->private_data is NULL, and the below code is |
248 | + * basically a no-op. |
249 | + */ |
250 | + kfree(file->private_data); |
251 | + file->private_data = NULL; |
252 | + } |
253 | + |
254 | + /* |
255 | + * 'generic_file_llseek()' unconditionally sets @file->f_version to |
256 | + * zero, and we use this for detecting whether the file was seek'ed. |
257 | + */ |
258 | + file->f_version = 1; |
259 | + |
260 | /* File positions 0 and 1 correspond to "." and ".." */ |
261 | - if (file->f_pos == 0) { |
262 | + if (pos == 0) { |
263 | ubifs_assert(!file->private_data); |
264 | over = filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR); |
265 | if (over) |
266 | return 0; |
267 | - file->f_pos = 1; |
268 | + file->f_pos = pos = 1; |
269 | } |
270 | |
271 | - if (file->f_pos == 1) { |
272 | + if (pos == 1) { |
273 | ubifs_assert(!file->private_data); |
274 | over = filldir(dirent, "..", 2, 1, |
275 | parent_ino(file->f_path.dentry), DT_DIR); |
276 | @@ -397,7 +416,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) |
277 | goto out; |
278 | } |
279 | |
280 | - file->f_pos = key_hash_flash(c, &dent->key); |
281 | + file->f_pos = pos = key_hash_flash(c, &dent->key); |
282 | file->private_data = dent; |
283 | } |
284 | |
285 | @@ -405,17 +424,16 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) |
286 | if (!dent) { |
287 | /* |
288 | * The directory was seek'ed to and is now readdir'ed. |
289 | - * Find the entry corresponding to @file->f_pos or the |
290 | - * closest one. |
291 | + * Find the entry corresponding to @pos or the closest one. |
292 | */ |
293 | - dent_key_init_hash(c, &key, dir->i_ino, file->f_pos); |
294 | + dent_key_init_hash(c, &key, dir->i_ino, pos); |
295 | nm.name = NULL; |
296 | dent = ubifs_tnc_next_ent(c, &key, &nm); |
297 | if (IS_ERR(dent)) { |
298 | err = PTR_ERR(dent); |
299 | goto out; |
300 | } |
301 | - file->f_pos = key_hash_flash(c, &dent->key); |
302 | + file->f_pos = pos = key_hash_flash(c, &dent->key); |
303 | file->private_data = dent; |
304 | } |
305 | |
306 | @@ -427,7 +445,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) |
307 | ubifs_inode(dir)->creat_sqnum); |
308 | |
309 | nm.len = le16_to_cpu(dent->nlen); |
310 | - over = filldir(dirent, dent->name, nm.len, file->f_pos, |
311 | + over = filldir(dirent, dent->name, nm.len, pos, |
312 | le64_to_cpu(dent->inum), |
313 | vfs_dent_type(dent->type)); |
314 | if (over) |
315 | @@ -443,9 +461,17 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) |
316 | } |
317 | |
318 | kfree(file->private_data); |
319 | - file->f_pos = key_hash_flash(c, &dent->key); |
320 | + file->f_pos = pos = key_hash_flash(c, &dent->key); |
321 | file->private_data = dent; |
322 | cond_resched(); |
323 | + |
324 | + if (file->f_version == 0) |
325 | + /* |
326 | + * The file was seek'ed meanwhile, lets return and start |
327 | + * reading direntries from the new position on the next |
328 | + * invocation. |
329 | + */ |
330 | + return 0; |
331 | } |
332 | |
333 | out: |
334 | @@ -456,15 +482,13 @@ out: |
335 | |
336 | kfree(file->private_data); |
337 | file->private_data = NULL; |
338 | + /* 2 is a special value indicating that there are no more direntries */ |
339 | file->f_pos = 2; |
340 | return 0; |
341 | } |
342 | |
343 | -/* If a directory is seeked, we have to free saved readdir() state */ |
344 | static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int origin) |
345 | { |
346 | - kfree(file->private_data); |
347 | - file->private_data = NULL; |
348 | return generic_file_llseek(file, offset, origin); |
349 | } |
350 | |
351 | diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h |
352 | index 8e9a069..89fb74e 100644 |
353 | --- a/include/linux/perf_event.h |
354 | +++ b/include/linux/perf_event.h |
355 | @@ -950,8 +950,7 @@ struct perf_event { |
356 | /* mmap bits */ |
357 | struct mutex mmap_mutex; |
358 | atomic_t mmap_count; |
359 | - int mmap_locked; |
360 | - struct user_struct *mmap_user; |
361 | + |
362 | struct ring_buffer *rb; |
363 | struct list_head rb_entry; |
364 | |
365 | diff --git a/kernel/events/core.c b/kernel/events/core.c |
366 | index 839a24f..7ceb270 100644 |
367 | --- a/kernel/events/core.c |
368 | +++ b/kernel/events/core.c |
369 | @@ -193,9 +193,6 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, |
370 | static void update_context_time(struct perf_event_context *ctx); |
371 | static u64 perf_event_time(struct perf_event *event); |
372 | |
373 | -static void ring_buffer_attach(struct perf_event *event, |
374 | - struct ring_buffer *rb); |
375 | - |
376 | void __weak perf_event_print_debug(void) { } |
377 | |
378 | extern __weak const char *perf_pmu_name(void) |
379 | @@ -2849,6 +2846,7 @@ static void free_event_rcu(struct rcu_head *head) |
380 | } |
381 | |
382 | static void ring_buffer_put(struct ring_buffer *rb); |
383 | +static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb); |
384 | |
385 | static void free_event(struct perf_event *event) |
386 | { |
387 | @@ -2873,15 +2871,30 @@ static void free_event(struct perf_event *event) |
388 | if (has_branch_stack(event)) { |
389 | static_key_slow_dec_deferred(&perf_sched_events); |
390 | /* is system-wide event */ |
391 | - if (!(event->attach_state & PERF_ATTACH_TASK)) |
392 | + if (!(event->attach_state & PERF_ATTACH_TASK)) { |
393 | atomic_dec(&per_cpu(perf_branch_stack_events, |
394 | event->cpu)); |
395 | + } |
396 | } |
397 | } |
398 | |
399 | if (event->rb) { |
400 | - ring_buffer_put(event->rb); |
401 | - event->rb = NULL; |
402 | + struct ring_buffer *rb; |
403 | + |
404 | + /* |
405 | + * Can happen when we close an event with re-directed output. |
406 | + * |
407 | + * Since we have a 0 refcount, perf_mmap_close() will skip |
408 | + * over us; possibly making our ring_buffer_put() the last. |
409 | + */ |
410 | + mutex_lock(&event->mmap_mutex); |
411 | + rb = event->rb; |
412 | + if (rb) { |
413 | + rcu_assign_pointer(event->rb, NULL); |
414 | + ring_buffer_detach(event, rb); |
415 | + ring_buffer_put(rb); /* could be last */ |
416 | + } |
417 | + mutex_unlock(&event->mmap_mutex); |
418 | } |
419 | |
420 | if (is_cgroup_event(event)) |
421 | @@ -3119,30 +3132,13 @@ static unsigned int perf_poll(struct file *file, poll_table *wait) |
422 | unsigned int events = POLL_HUP; |
423 | |
424 | /* |
425 | - * Race between perf_event_set_output() and perf_poll(): perf_poll() |
426 | - * grabs the rb reference but perf_event_set_output() overrides it. |
427 | - * Here is the timeline for two threads T1, T2: |
428 | - * t0: T1, rb = rcu_dereference(event->rb) |
429 | - * t1: T2, old_rb = event->rb |
430 | - * t2: T2, event->rb = new rb |
431 | - * t3: T2, ring_buffer_detach(old_rb) |
432 | - * t4: T1, ring_buffer_attach(rb1) |
433 | - * t5: T1, poll_wait(event->waitq) |
434 | - * |
435 | - * To avoid this problem, we grab mmap_mutex in perf_poll() |
436 | - * thereby ensuring that the assignment of the new ring buffer |
437 | - * and the detachment of the old buffer appear atomic to perf_poll() |
438 | + * Pin the event->rb by taking event->mmap_mutex; otherwise |
439 | + * perf_event_set_output() can swizzle our rb and make us miss wakeups. |
440 | */ |
441 | mutex_lock(&event->mmap_mutex); |
442 | - |
443 | - rcu_read_lock(); |
444 | - rb = rcu_dereference(event->rb); |
445 | - if (rb) { |
446 | - ring_buffer_attach(event, rb); |
447 | + rb = event->rb; |
448 | + if (rb) |
449 | events = atomic_xchg(&rb->poll, 0); |
450 | - } |
451 | - rcu_read_unlock(); |
452 | - |
453 | mutex_unlock(&event->mmap_mutex); |
454 | |
455 | poll_wait(file, &event->waitq, wait); |
456 | @@ -3459,16 +3455,12 @@ static void ring_buffer_attach(struct perf_event *event, |
457 | return; |
458 | |
459 | spin_lock_irqsave(&rb->event_lock, flags); |
460 | - if (!list_empty(&event->rb_entry)) |
461 | - goto unlock; |
462 | - |
463 | - list_add(&event->rb_entry, &rb->event_list); |
464 | -unlock: |
465 | + if (list_empty(&event->rb_entry)) |
466 | + list_add(&event->rb_entry, &rb->event_list); |
467 | spin_unlock_irqrestore(&rb->event_lock, flags); |
468 | } |
469 | |
470 | -static void ring_buffer_detach(struct perf_event *event, |
471 | - struct ring_buffer *rb) |
472 | +static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb) |
473 | { |
474 | unsigned long flags; |
475 | |
476 | @@ -3487,13 +3479,10 @@ static void ring_buffer_wakeup(struct perf_event *event) |
477 | |
478 | rcu_read_lock(); |
479 | rb = rcu_dereference(event->rb); |
480 | - if (!rb) |
481 | - goto unlock; |
482 | - |
483 | - list_for_each_entry_rcu(event, &rb->event_list, rb_entry) |
484 | - wake_up_all(&event->waitq); |
485 | - |
486 | -unlock: |
487 | + if (rb) { |
488 | + list_for_each_entry_rcu(event, &rb->event_list, rb_entry) |
489 | + wake_up_all(&event->waitq); |
490 | + } |
491 | rcu_read_unlock(); |
492 | } |
493 | |
494 | @@ -3522,18 +3511,10 @@ static struct ring_buffer *ring_buffer_get(struct perf_event *event) |
495 | |
496 | static void ring_buffer_put(struct ring_buffer *rb) |
497 | { |
498 | - struct perf_event *event, *n; |
499 | - unsigned long flags; |
500 | - |
501 | if (!atomic_dec_and_test(&rb->refcount)) |
502 | return; |
503 | |
504 | - spin_lock_irqsave(&rb->event_lock, flags); |
505 | - list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) { |
506 | - list_del_init(&event->rb_entry); |
507 | - wake_up_all(&event->waitq); |
508 | - } |
509 | - spin_unlock_irqrestore(&rb->event_lock, flags); |
510 | + WARN_ON_ONCE(!list_empty(&rb->event_list)); |
511 | |
512 | call_rcu(&rb->rcu_head, rb_free_rcu); |
513 | } |
514 | @@ -3543,26 +3524,100 @@ static void perf_mmap_open(struct vm_area_struct *vma) |
515 | struct perf_event *event = vma->vm_file->private_data; |
516 | |
517 | atomic_inc(&event->mmap_count); |
518 | + atomic_inc(&event->rb->mmap_count); |
519 | } |
520 | |
521 | +/* |
522 | + * A buffer can be mmap()ed multiple times; either directly through the same |
523 | + * event, or through other events by use of perf_event_set_output(). |
524 | + * |
525 | + * In order to undo the VM accounting done by perf_mmap() we need to destroy |
526 | + * the buffer here, where we still have a VM context. This means we need |
527 | + * to detach all events redirecting to us. |
528 | + */ |
529 | static void perf_mmap_close(struct vm_area_struct *vma) |
530 | { |
531 | struct perf_event *event = vma->vm_file->private_data; |
532 | |
533 | - if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) { |
534 | - unsigned long size = perf_data_size(event->rb); |
535 | - struct user_struct *user = event->mmap_user; |
536 | - struct ring_buffer *rb = event->rb; |
537 | + struct ring_buffer *rb = event->rb; |
538 | + struct user_struct *mmap_user = rb->mmap_user; |
539 | + int mmap_locked = rb->mmap_locked; |
540 | + unsigned long size = perf_data_size(rb); |
541 | + |
542 | + atomic_dec(&rb->mmap_count); |
543 | + |
544 | + if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) |
545 | + return; |
546 | + |
547 | + /* Detach current event from the buffer. */ |
548 | + rcu_assign_pointer(event->rb, NULL); |
549 | + ring_buffer_detach(event, rb); |
550 | + mutex_unlock(&event->mmap_mutex); |
551 | + |
552 | + /* If there's still other mmap()s of this buffer, we're done. */ |
553 | + if (atomic_read(&rb->mmap_count)) { |
554 | + ring_buffer_put(rb); /* can't be last */ |
555 | + return; |
556 | + } |
557 | + |
558 | + /* |
559 | + * No other mmap()s, detach from all other events that might redirect |
560 | + * into the now unreachable buffer. Somewhat complicated by the |
561 | + * fact that rb::event_lock otherwise nests inside mmap_mutex. |
562 | + */ |
563 | +again: |
564 | + rcu_read_lock(); |
565 | + list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { |
566 | + if (!atomic_long_inc_not_zero(&event->refcount)) { |
567 | + /* |
568 | + * This event is en-route to free_event() which will |
569 | + * detach it and remove it from the list. |
570 | + */ |
571 | + continue; |
572 | + } |
573 | + rcu_read_unlock(); |
574 | |
575 | - atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm); |
576 | - vma->vm_mm->pinned_vm -= event->mmap_locked; |
577 | - rcu_assign_pointer(event->rb, NULL); |
578 | - ring_buffer_detach(event, rb); |
579 | + mutex_lock(&event->mmap_mutex); |
580 | + /* |
581 | + * Check we didn't race with perf_event_set_output() which can |
582 | + * swizzle the rb from under us while we were waiting to |
583 | + * acquire mmap_mutex. |
584 | + * |
585 | + * If we find a different rb; ignore this event, a next |
586 | + * iteration will no longer find it on the list. We have to |
587 | + * still restart the iteration to make sure we're not now |
588 | + * iterating the wrong list. |
589 | + */ |
590 | + if (event->rb == rb) { |
591 | + rcu_assign_pointer(event->rb, NULL); |
592 | + ring_buffer_detach(event, rb); |
593 | + ring_buffer_put(rb); /* can't be last, we still have one */ |
594 | + } |
595 | mutex_unlock(&event->mmap_mutex); |
596 | + put_event(event); |
597 | |
598 | - ring_buffer_put(rb); |
599 | - free_uid(user); |
600 | + /* |
601 | + * Restart the iteration; either we're on the wrong list or |
602 | + * destroyed its integrity by doing a deletion. |
603 | + */ |
604 | + goto again; |
605 | } |
606 | + rcu_read_unlock(); |
607 | + |
608 | + /* |
609 | + * It could be there's still a few 0-ref events on the list; they'll |
610 | + * get cleaned up by free_event() -- they'll also still have their |
611 | + * ref on the rb and will free it whenever they are done with it. |
612 | + * |
613 | + * Aside from that, this buffer is 'fully' detached and unmapped, |
614 | + * undo the VM accounting. |
615 | + */ |
616 | + |
617 | + atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm); |
618 | + vma->vm_mm->pinned_vm -= mmap_locked; |
619 | + free_uid(mmap_user); |
620 | + |
621 | + ring_buffer_put(rb); /* could be last */ |
622 | } |
623 | |
624 | static const struct vm_operations_struct perf_mmap_vmops = { |
625 | @@ -3612,12 +3667,24 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) |
626 | return -EINVAL; |
627 | |
628 | WARN_ON_ONCE(event->ctx->parent_ctx); |
629 | +again: |
630 | mutex_lock(&event->mmap_mutex); |
631 | if (event->rb) { |
632 | - if (event->rb->nr_pages == nr_pages) |
633 | - atomic_inc(&event->rb->refcount); |
634 | - else |
635 | + if (event->rb->nr_pages != nr_pages) { |
636 | ret = -EINVAL; |
637 | + goto unlock; |
638 | + } |
639 | + |
640 | + if (!atomic_inc_not_zero(&event->rb->mmap_count)) { |
641 | + /* |
642 | + * Raced against perf_mmap_close() through |
643 | + * perf_event_set_output(). Try again, hope for better |
644 | + * luck. |
645 | + */ |
646 | + mutex_unlock(&event->mmap_mutex); |
647 | + goto again; |
648 | + } |
649 | + |
650 | goto unlock; |
651 | } |
652 | |
653 | @@ -3658,12 +3725,16 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma) |
654 | ret = -ENOMEM; |
655 | goto unlock; |
656 | } |
657 | - rcu_assign_pointer(event->rb, rb); |
658 | + |
659 | + atomic_set(&rb->mmap_count, 1); |
660 | + rb->mmap_locked = extra; |
661 | + rb->mmap_user = get_current_user(); |
662 | |
663 | atomic_long_add(user_extra, &user->locked_vm); |
664 | - event->mmap_locked = extra; |
665 | - event->mmap_user = get_current_user(); |
666 | - vma->vm_mm->pinned_vm += event->mmap_locked; |
667 | + vma->vm_mm->pinned_vm += extra; |
668 | + |
669 | + ring_buffer_attach(event, rb); |
670 | + rcu_assign_pointer(event->rb, rb); |
671 | |
672 | perf_event_update_userpage(event); |
673 | |
674 | @@ -3672,7 +3743,11 @@ unlock: |
675 | atomic_inc(&event->mmap_count); |
676 | mutex_unlock(&event->mmap_mutex); |
677 | |
678 | - vma->vm_flags |= VM_RESERVED; |
679 | + /* |
680 | + * Since pinned accounting is per vm we cannot allow fork() to copy our |
681 | + * vma. |
682 | + */ |
683 | + vma->vm_flags |= VM_DONTCOPY | VM_RESERVED; |
684 | vma->vm_ops = &perf_mmap_vmops; |
685 | |
686 | return ret; |
687 | @@ -6161,6 +6236,8 @@ set: |
688 | if (atomic_read(&event->mmap_count)) |
689 | goto unlock; |
690 | |
691 | + old_rb = event->rb; |
692 | + |
693 | if (output_event) { |
694 | /* get the rb we want to redirect to */ |
695 | rb = ring_buffer_get(output_event); |
696 | @@ -6168,16 +6245,28 @@ set: |
697 | goto unlock; |
698 | } |
699 | |
700 | - old_rb = event->rb; |
701 | - rcu_assign_pointer(event->rb, rb); |
702 | if (old_rb) |
703 | ring_buffer_detach(event, old_rb); |
704 | + |
705 | + if (rb) |
706 | + ring_buffer_attach(event, rb); |
707 | + |
708 | + rcu_assign_pointer(event->rb, rb); |
709 | + |
710 | + if (old_rb) { |
711 | + ring_buffer_put(old_rb); |
712 | + /* |
713 | + * Since we detached before setting the new rb, so that we |
714 | + * could attach the new rb, we could have missed a wakeup. |
715 | + * Provide it now. |
716 | + */ |
717 | + wake_up_all(&event->waitq); |
718 | + } |
719 | + |
720 | ret = 0; |
721 | unlock: |
722 | mutex_unlock(&event->mmap_mutex); |
723 | |
724 | - if (old_rb) |
725 | - ring_buffer_put(old_rb); |
726 | out: |
727 | return ret; |
728 | } |
729 | diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c |
730 | index bb38c4d..fc8bfcf 100644 |
731 | --- a/kernel/events/hw_breakpoint.c |
732 | +++ b/kernel/events/hw_breakpoint.c |
733 | @@ -147,7 +147,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, |
734 | return; |
735 | } |
736 | |
737 | - for_each_online_cpu(cpu) { |
738 | + for_each_possible_cpu(cpu) { |
739 | unsigned int nr; |
740 | |
741 | nr = per_cpu(nr_cpu_bp_pinned[type], cpu); |
742 | @@ -233,7 +233,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, |
743 | if (cpu >= 0) { |
744 | toggle_bp_task_slot(bp, cpu, enable, type, weight); |
745 | } else { |
746 | - for_each_online_cpu(cpu) |
747 | + for_each_possible_cpu(cpu) |
748 | toggle_bp_task_slot(bp, cpu, enable, type, weight); |
749 | } |
750 | |
751 | diff --git a/kernel/events/internal.h b/kernel/events/internal.h |
752 | index b0b107f..b400e64 100644 |
753 | --- a/kernel/events/internal.h |
754 | +++ b/kernel/events/internal.h |
755 | @@ -30,6 +30,10 @@ struct ring_buffer { |
756 | spinlock_t event_lock; |
757 | struct list_head event_list; |
758 | |
759 | + atomic_t mmap_count; |
760 | + unsigned long mmap_locked; |
761 | + struct user_struct *mmap_user; |
762 | + |
763 | struct perf_event_mmap_page *user_page; |
764 | void *data_pages[0]; |
765 | }; |
766 | diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c |
767 | index fa07aed..932420d 100644 |
768 | --- a/net/bluetooth/l2cap_core.c |
769 | +++ b/net/bluetooth/l2cap_core.c |
770 | @@ -1880,6 +1880,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, |
771 | BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", |
772 | conn, code, ident, dlen); |
773 | |
774 | + if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE) |
775 | + return NULL; |
776 | + |
777 | len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; |
778 | count = min_t(unsigned int, conn->mtu, len); |
779 |