Contents of /alx-src/tags/kernel26-2.6.12-alx-r9/kernel/exit.c
Parent Directory | Revision Log
Revision 630 -
(show annotations)
(download)
Wed Mar 4 11:03:09 2009 UTC (15 years, 6 months ago) by niro
File MIME type: text/plain
File size: 39394 byte(s)
Wed Mar 4 11:03:09 2009 UTC (15 years, 6 months ago) by niro
File MIME type: text/plain
File size: 39394 byte(s)
Tag kernel26-2.6.12-alx-r9
1 | /* |
2 | * linux/kernel/exit.c |
3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | */ |
6 | |
7 | #include <linux/config.h> |
8 | #include <linux/mm.h> |
9 | #include <linux/slab.h> |
10 | #include <linux/interrupt.h> |
11 | #include <linux/smp_lock.h> |
12 | #include <linux/module.h> |
13 | #include <linux/completion.h> |
14 | #include <linux/personality.h> |
15 | #include <linux/tty.h> |
16 | #include <linux/namespace.h> |
17 | #include <linux/key.h> |
18 | #include <linux/security.h> |
19 | #include <linux/cpu.h> |
20 | #include <linux/acct.h> |
21 | #include <linux/file.h> |
22 | #include <linux/binfmts.h> |
23 | #include <linux/ptrace.h> |
24 | #include <linux/profile.h> |
25 | #include <linux/mount.h> |
26 | #include <linux/proc_fs.h> |
27 | #include <linux/mempolicy.h> |
28 | #include <linux/cpuset.h> |
29 | #include <linux/syscalls.h> |
30 | #include <linux/signal.h> |
31 | |
32 | #include <asm/uaccess.h> |
33 | #include <asm/unistd.h> |
34 | #include <asm/pgtable.h> |
35 | #include <asm/mmu_context.h> |
36 | |
37 | extern void sem_exit (void); |
38 | extern struct task_struct *child_reaper; |
39 | |
40 | int getrusage(struct task_struct *, int, struct rusage __user *); |
41 | |
42 | static void exit_mm(struct task_struct * tsk); |
43 | |
44 | static void __unhash_process(struct task_struct *p) |
45 | { |
46 | nr_threads--; |
47 | detach_pid(p, PIDTYPE_PID); |
48 | detach_pid(p, PIDTYPE_TGID); |
49 | if (thread_group_leader(p)) { |
50 | detach_pid(p, PIDTYPE_PGID); |
51 | detach_pid(p, PIDTYPE_SID); |
52 | if (p->pid) |
53 | __get_cpu_var(process_counts)--; |
54 | } |
55 | |
56 | REMOVE_LINKS(p); |
57 | } |
58 | |
59 | void release_task(struct task_struct * p) |
60 | { |
61 | int zap_leader; |
62 | task_t *leader; |
63 | struct dentry *proc_dentry; |
64 | |
65 | repeat: |
66 | atomic_dec(&p->user->processes); |
67 | spin_lock(&p->proc_lock); |
68 | proc_dentry = proc_pid_unhash(p); |
69 | write_lock_irq(&tasklist_lock); |
70 | if (unlikely(p->ptrace)) |
71 | __ptrace_unlink(p); |
72 | BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); |
73 | __exit_signal(p); |
74 | __exit_sighand(p); |
75 | __unhash_process(p); |
76 | |
77 | /* |
78 | * If we are the last non-leader member of the thread |
79 | * group, and the leader is zombie, then notify the |
80 | * group leader's parent process. (if it wants notification.) |
81 | */ |
82 | zap_leader = 0; |
83 | leader = p->group_leader; |
84 | if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) { |
85 | BUG_ON(leader->exit_signal == -1); |
86 | do_notify_parent(leader, leader->exit_signal); |
87 | /* |
88 | * If we were the last child thread and the leader has |
89 | * exited already, and the leader's parent ignores SIGCHLD, |
90 | * then we are the one who should release the leader. |
91 | * |
92 | * do_notify_parent() will have marked it self-reaping in |
93 | * that case. |
94 | */ |
95 | zap_leader = (leader->exit_signal == -1); |
96 | } |
97 | |
98 | write_unlock_irq(&tasklist_lock); |
99 | spin_unlock(&p->proc_lock); |
100 | proc_pid_flush(proc_dentry); |
101 | release_thread(p); |
102 | put_task_struct(p); |
103 | |
104 | p = leader; |
105 | if (unlikely(zap_leader)) |
106 | goto repeat; |
107 | } |
108 | |
109 | /* we are using it only for SMP init */ |
110 | |
111 | void unhash_process(struct task_struct *p) |
112 | { |
113 | struct dentry *proc_dentry; |
114 | |
115 | spin_lock(&p->proc_lock); |
116 | proc_dentry = proc_pid_unhash(p); |
117 | write_lock_irq(&tasklist_lock); |
118 | __unhash_process(p); |
119 | write_unlock_irq(&tasklist_lock); |
120 | spin_unlock(&p->proc_lock); |
121 | proc_pid_flush(proc_dentry); |
122 | } |
123 | |
124 | /* |
125 | * This checks not only the pgrp, but falls back on the pid if no |
126 | * satisfactory pgrp is found. I dunno - gdb doesn't work correctly |
127 | * without this... |
128 | */ |
129 | int session_of_pgrp(int pgrp) |
130 | { |
131 | struct task_struct *p; |
132 | int sid = -1; |
133 | |
134 | read_lock(&tasklist_lock); |
135 | do_each_task_pid(pgrp, PIDTYPE_PGID, p) { |
136 | if (p->signal->session > 0) { |
137 | sid = p->signal->session; |
138 | goto out; |
139 | } |
140 | } while_each_task_pid(pgrp, PIDTYPE_PGID, p); |
141 | p = find_task_by_pid(pgrp); |
142 | if (p) |
143 | sid = p->signal->session; |
144 | out: |
145 | read_unlock(&tasklist_lock); |
146 | |
147 | return sid; |
148 | } |
149 | |
150 | /* |
151 | * Determine if a process group is "orphaned", according to the POSIX |
152 | * definition in 2.2.2.52. Orphaned process groups are not to be affected |
153 | * by terminal-generated stop signals. Newly orphaned process groups are |
154 | * to receive a SIGHUP and a SIGCONT. |
155 | * |
156 | * "I ask you, have you ever known what it is to be an orphan?" |
157 | */ |
158 | static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task) |
159 | { |
160 | struct task_struct *p; |
161 | int ret = 1; |
162 | |
163 | do_each_task_pid(pgrp, PIDTYPE_PGID, p) { |
164 | if (p == ignored_task |
165 | || p->exit_state |
166 | || p->real_parent->pid == 1) |
167 | continue; |
168 | if (process_group(p->real_parent) != pgrp |
169 | && p->real_parent->signal->session == p->signal->session) { |
170 | ret = 0; |
171 | break; |
172 | } |
173 | } while_each_task_pid(pgrp, PIDTYPE_PGID, p); |
174 | return ret; /* (sighing) "Often!" */ |
175 | } |
176 | |
177 | int is_orphaned_pgrp(int pgrp) |
178 | { |
179 | int retval; |
180 | |
181 | read_lock(&tasklist_lock); |
182 | retval = will_become_orphaned_pgrp(pgrp, NULL); |
183 | read_unlock(&tasklist_lock); |
184 | |
185 | return retval; |
186 | } |
187 | |
188 | static inline int has_stopped_jobs(int pgrp) |
189 | { |
190 | int retval = 0; |
191 | struct task_struct *p; |
192 | |
193 | do_each_task_pid(pgrp, PIDTYPE_PGID, p) { |
194 | if (p->state != TASK_STOPPED) |
195 | continue; |
196 | |
197 | /* If p is stopped by a debugger on a signal that won't |
198 | stop it, then don't count p as stopped. This isn't |
199 | perfect but it's a good approximation. */ |
200 | if (unlikely (p->ptrace) |
201 | && p->exit_code != SIGSTOP |
202 | && p->exit_code != SIGTSTP |
203 | && p->exit_code != SIGTTOU |
204 | && p->exit_code != SIGTTIN) |
205 | continue; |
206 | |
207 | retval = 1; |
208 | break; |
209 | } while_each_task_pid(pgrp, PIDTYPE_PGID, p); |
210 | return retval; |
211 | } |
212 | |
213 | /** |
214 | * reparent_to_init - Reparent the calling kernel thread to the init task. |
215 | * |
216 | * If a kernel thread is launched as a result of a system call, or if |
217 | * it ever exits, it should generally reparent itself to init so that |
218 | * it is correctly cleaned up on exit. |
219 | * |
220 | * The various task state such as scheduling policy and priority may have |
221 | * been inherited from a user process, so we reset them to sane values here. |
222 | * |
223 | * NOTE that reparent_to_init() gives the caller full capabilities. |
224 | */ |
225 | static inline void reparent_to_init(void) |
226 | { |
227 | write_lock_irq(&tasklist_lock); |
228 | |
229 | ptrace_unlink(current); |
230 | /* Reparent to init */ |
231 | REMOVE_LINKS(current); |
232 | current->parent = child_reaper; |
233 | current->real_parent = child_reaper; |
234 | SET_LINKS(current); |
235 | |
236 | /* Set the exit signal to SIGCHLD so we signal init on exit */ |
237 | current->exit_signal = SIGCHLD; |
238 | |
239 | if ((current->policy == SCHED_NORMAL) && (task_nice(current) < 0)) |
240 | set_user_nice(current, 0); |
241 | /* cpus_allowed? */ |
242 | /* rt_priority? */ |
243 | /* signals? */ |
244 | security_task_reparent_to_init(current); |
245 | memcpy(current->signal->rlim, init_task.signal->rlim, |
246 | sizeof(current->signal->rlim)); |
247 | atomic_inc(&(INIT_USER->__count)); |
248 | write_unlock_irq(&tasklist_lock); |
249 | switch_uid(INIT_USER); |
250 | } |
251 | |
252 | void __set_special_pids(pid_t session, pid_t pgrp) |
253 | { |
254 | struct task_struct *curr = current; |
255 | |
256 | if (curr->signal->session != session) { |
257 | detach_pid(curr, PIDTYPE_SID); |
258 | curr->signal->session = session; |
259 | attach_pid(curr, PIDTYPE_SID, session); |
260 | } |
261 | if (process_group(curr) != pgrp) { |
262 | detach_pid(curr, PIDTYPE_PGID); |
263 | curr->signal->pgrp = pgrp; |
264 | attach_pid(curr, PIDTYPE_PGID, pgrp); |
265 | } |
266 | } |
267 | |
268 | void set_special_pids(pid_t session, pid_t pgrp) |
269 | { |
270 | write_lock_irq(&tasklist_lock); |
271 | __set_special_pids(session, pgrp); |
272 | write_unlock_irq(&tasklist_lock); |
273 | } |
274 | |
275 | /* |
276 | * Let kernel threads use this to say that they |
277 | * allow a certain signal (since daemonize() will |
278 | * have disabled all of them by default). |
279 | */ |
280 | int allow_signal(int sig) |
281 | { |
282 | if (!valid_signal(sig) || sig < 1) |
283 | return -EINVAL; |
284 | |
285 | spin_lock_irq(¤t->sighand->siglock); |
286 | sigdelset(¤t->blocked, sig); |
287 | if (!current->mm) { |
288 | /* Kernel threads handle their own signals. |
289 | Let the signal code know it'll be handled, so |
290 | that they don't get converted to SIGKILL or |
291 | just silently dropped */ |
292 | current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; |
293 | } |
294 | recalc_sigpending(); |
295 | spin_unlock_irq(¤t->sighand->siglock); |
296 | return 0; |
297 | } |
298 | |
299 | EXPORT_SYMBOL(allow_signal); |
300 | |
301 | int disallow_signal(int sig) |
302 | { |
303 | if (!valid_signal(sig) || sig < 1) |
304 | return -EINVAL; |
305 | |
306 | spin_lock_irq(¤t->sighand->siglock); |
307 | sigaddset(¤t->blocked, sig); |
308 | recalc_sigpending(); |
309 | spin_unlock_irq(¤t->sighand->siglock); |
310 | return 0; |
311 | } |
312 | |
313 | EXPORT_SYMBOL(disallow_signal); |
314 | |
315 | /* |
316 | * Put all the gunge required to become a kernel thread without |
317 | * attached user resources in one place where it belongs. |
318 | */ |
319 | |
320 | void daemonize(const char *name, ...) |
321 | { |
322 | va_list args; |
323 | struct fs_struct *fs; |
324 | sigset_t blocked; |
325 | |
326 | va_start(args, name); |
327 | vsnprintf(current->comm, sizeof(current->comm), name, args); |
328 | va_end(args); |
329 | |
330 | /* |
331 | * If we were started as result of loading a module, close all of the |
332 | * user space pages. We don't need them, and if we didn't close them |
333 | * they would be locked into memory. |
334 | */ |
335 | exit_mm(current); |
336 | |
337 | set_special_pids(1, 1); |
338 | down(&tty_sem); |
339 | current->signal->tty = NULL; |
340 | up(&tty_sem); |
341 | |
342 | /* Block and flush all signals */ |
343 | sigfillset(&blocked); |
344 | sigprocmask(SIG_BLOCK, &blocked, NULL); |
345 | flush_signals(current); |
346 | |
347 | /* Become as one with the init task */ |
348 | |
349 | exit_fs(current); /* current->fs->count--; */ |
350 | fs = init_task.fs; |
351 | current->fs = fs; |
352 | atomic_inc(&fs->count); |
353 | exit_files(current); |
354 | current->files = init_task.files; |
355 | atomic_inc(¤t->files->count); |
356 | |
357 | reparent_to_init(); |
358 | } |
359 | |
360 | EXPORT_SYMBOL(daemonize); |
361 | |
362 | static inline void close_files(struct files_struct * files) |
363 | { |
364 | int i, j; |
365 | |
366 | j = 0; |
367 | for (;;) { |
368 | unsigned long set; |
369 | i = j * __NFDBITS; |
370 | if (i >= files->max_fdset || i >= files->max_fds) |
371 | break; |
372 | set = files->open_fds->fds_bits[j++]; |
373 | while (set) { |
374 | if (set & 1) { |
375 | struct file * file = xchg(&files->fd[i], NULL); |
376 | if (file) |
377 | filp_close(file, files); |
378 | } |
379 | i++; |
380 | set >>= 1; |
381 | } |
382 | } |
383 | } |
384 | |
385 | struct files_struct *get_files_struct(struct task_struct *task) |
386 | { |
387 | struct files_struct *files; |
388 | |
389 | task_lock(task); |
390 | files = task->files; |
391 | if (files) |
392 | atomic_inc(&files->count); |
393 | task_unlock(task); |
394 | |
395 | return files; |
396 | } |
397 | |
398 | void fastcall put_files_struct(struct files_struct *files) |
399 | { |
400 | if (atomic_dec_and_test(&files->count)) { |
401 | close_files(files); |
402 | /* |
403 | * Free the fd and fdset arrays if we expanded them. |
404 | */ |
405 | if (files->fd != &files->fd_array[0]) |
406 | free_fd_array(files->fd, files->max_fds); |
407 | if (files->max_fdset > __FD_SETSIZE) { |
408 | free_fdset(files->open_fds, files->max_fdset); |
409 | free_fdset(files->close_on_exec, files->max_fdset); |
410 | } |
411 | kmem_cache_free(files_cachep, files); |
412 | } |
413 | } |
414 | |
415 | EXPORT_SYMBOL(put_files_struct); |
416 | |
417 | static inline void __exit_files(struct task_struct *tsk) |
418 | { |
419 | struct files_struct * files = tsk->files; |
420 | |
421 | if (files) { |
422 | task_lock(tsk); |
423 | tsk->files = NULL; |
424 | task_unlock(tsk); |
425 | put_files_struct(files); |
426 | } |
427 | } |
428 | |
429 | void exit_files(struct task_struct *tsk) |
430 | { |
431 | __exit_files(tsk); |
432 | } |
433 | |
434 | static inline void __put_fs_struct(struct fs_struct *fs) |
435 | { |
436 | /* No need to hold fs->lock if we are killing it */ |
437 | if (atomic_dec_and_test(&fs->count)) { |
438 | dput(fs->root); |
439 | mntput(fs->rootmnt); |
440 | dput(fs->pwd); |
441 | mntput(fs->pwdmnt); |
442 | if (fs->altroot) { |
443 | dput(fs->altroot); |
444 | mntput(fs->altrootmnt); |
445 | } |
446 | kmem_cache_free(fs_cachep, fs); |
447 | } |
448 | } |
449 | |
450 | void put_fs_struct(struct fs_struct *fs) |
451 | { |
452 | __put_fs_struct(fs); |
453 | } |
454 | |
455 | static inline void __exit_fs(struct task_struct *tsk) |
456 | { |
457 | struct fs_struct * fs = tsk->fs; |
458 | |
459 | if (fs) { |
460 | task_lock(tsk); |
461 | tsk->fs = NULL; |
462 | task_unlock(tsk); |
463 | __put_fs_struct(fs); |
464 | } |
465 | } |
466 | |
467 | void exit_fs(struct task_struct *tsk) |
468 | { |
469 | __exit_fs(tsk); |
470 | } |
471 | |
472 | EXPORT_SYMBOL_GPL(exit_fs); |
473 | |
474 | /* |
475 | * Turn us into a lazy TLB process if we |
476 | * aren't already.. |
477 | */ |
478 | static void exit_mm(struct task_struct * tsk) |
479 | { |
480 | struct mm_struct *mm = tsk->mm; |
481 | |
482 | mm_release(tsk, mm); |
483 | if (!mm) |
484 | return; |
485 | /* |
486 | * Serialize with any possible pending coredump. |
487 | * We must hold mmap_sem around checking core_waiters |
488 | * and clearing tsk->mm. The core-inducing thread |
489 | * will increment core_waiters for each thread in the |
490 | * group with ->mm != NULL. |
491 | */ |
492 | down_read(&mm->mmap_sem); |
493 | if (mm->core_waiters) { |
494 | up_read(&mm->mmap_sem); |
495 | down_write(&mm->mmap_sem); |
496 | if (!--mm->core_waiters) |
497 | complete(mm->core_startup_done); |
498 | up_write(&mm->mmap_sem); |
499 | |
500 | wait_for_completion(&mm->core_done); |
501 | down_read(&mm->mmap_sem); |
502 | } |
503 | atomic_inc(&mm->mm_count); |
504 | if (mm != tsk->active_mm) BUG(); |
505 | /* more a memory barrier than a real lock */ |
506 | task_lock(tsk); |
507 | tsk->mm = NULL; |
508 | up_read(&mm->mmap_sem); |
509 | enter_lazy_tlb(mm, current); |
510 | task_unlock(tsk); |
511 | mmput(mm); |
512 | } |
513 | |
514 | static inline void choose_new_parent(task_t *p, task_t *reaper, task_t *child_reaper) |
515 | { |
516 | /* |
517 | * Make sure we're not reparenting to ourselves and that |
518 | * the parent is not a zombie. |
519 | */ |
520 | BUG_ON(p == reaper || reaper->exit_state >= EXIT_ZOMBIE); |
521 | p->real_parent = reaper; |
522 | } |
523 | |
524 | static inline void reparent_thread(task_t *p, task_t *father, int traced) |
525 | { |
526 | /* We don't want people slaying init. */ |
527 | if (p->exit_signal != -1) |
528 | p->exit_signal = SIGCHLD; |
529 | |
530 | if (p->pdeath_signal) |
531 | /* We already hold the tasklist_lock here. */ |
532 | group_send_sig_info(p->pdeath_signal, (void *) 0, p); |
533 | |
534 | /* Move the child from its dying parent to the new one. */ |
535 | if (unlikely(traced)) { |
536 | /* Preserve ptrace links if someone else is tracing this child. */ |
537 | list_del_init(&p->ptrace_list); |
538 | if (p->parent != p->real_parent) |
539 | list_add(&p->ptrace_list, &p->real_parent->ptrace_children); |
540 | } else { |
541 | /* If this child is being traced, then we're the one tracing it |
542 | * anyway, so let go of it. |
543 | */ |
544 | p->ptrace = 0; |
545 | list_del_init(&p->sibling); |
546 | p->parent = p->real_parent; |
547 | list_add_tail(&p->sibling, &p->parent->children); |
548 | |
549 | /* If we'd notified the old parent about this child's death, |
550 | * also notify the new parent. |
551 | */ |
552 | if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 && |
553 | thread_group_empty(p)) |
554 | do_notify_parent(p, p->exit_signal); |
555 | else if (p->state == TASK_TRACED) { |
556 | /* |
557 | * If it was at a trace stop, turn it into |
558 | * a normal stop since it's no longer being |
559 | * traced. |
560 | */ |
561 | ptrace_untrace(p); |
562 | } |
563 | } |
564 | |
565 | /* |
566 | * process group orphan check |
567 | * Case ii: Our child is in a different pgrp |
568 | * than we are, and it was the only connection |
569 | * outside, so the child pgrp is now orphaned. |
570 | */ |
571 | if ((process_group(p) != process_group(father)) && |
572 | (p->signal->session == father->signal->session)) { |
573 | int pgrp = process_group(p); |
574 | |
575 | if (will_become_orphaned_pgrp(pgrp, NULL) && has_stopped_jobs(pgrp)) { |
576 | __kill_pg_info(SIGHUP, (void *)1, pgrp); |
577 | __kill_pg_info(SIGCONT, (void *)1, pgrp); |
578 | } |
579 | } |
580 | } |
581 | |
582 | /* |
583 | * When we die, we re-parent all our children. |
584 | * Try to give them to another thread in our thread |
585 | * group, and if no such member exists, give it to |
586 | * the global child reaper process (ie "init") |
587 | */ |
588 | static inline void forget_original_parent(struct task_struct * father, |
589 | struct list_head *to_release) |
590 | { |
591 | struct task_struct *p, *reaper = father; |
592 | struct list_head *_p, *_n; |
593 | |
594 | do { |
595 | reaper = next_thread(reaper); |
596 | if (reaper == father) { |
597 | reaper = child_reaper; |
598 | break; |
599 | } |
600 | } while (reaper->exit_state); |
601 | |
602 | /* |
603 | * There are only two places where our children can be: |
604 | * |
605 | * - in our child list |
606 | * - in our ptraced child list |
607 | * |
608 | * Search them and reparent children. |
609 | */ |
610 | list_for_each_safe(_p, _n, &father->children) { |
611 | int ptrace; |
612 | p = list_entry(_p,struct task_struct,sibling); |
613 | |
614 | ptrace = p->ptrace; |
615 | |
616 | /* if father isn't the real parent, then ptrace must be enabled */ |
617 | BUG_ON(father != p->real_parent && !ptrace); |
618 | |
619 | if (father == p->real_parent) { |
620 | /* reparent with a reaper, real father it's us */ |
621 | choose_new_parent(p, reaper, child_reaper); |
622 | reparent_thread(p, father, 0); |
623 | } else { |
624 | /* reparent ptraced task to its real parent */ |
625 | __ptrace_unlink (p); |
626 | if (p->exit_state == EXIT_ZOMBIE && p->exit_signal != -1 && |
627 | thread_group_empty(p)) |
628 | do_notify_parent(p, p->exit_signal); |
629 | } |
630 | |
631 | /* |
632 | * if the ptraced child is a zombie with exit_signal == -1 |
633 | * we must collect it before we exit, or it will remain |
634 | * zombie forever since we prevented it from self-reap itself |
635 | * while it was being traced by us, to be able to see it in wait4. |
636 | */ |
637 | if (unlikely(ptrace && p->exit_state == EXIT_ZOMBIE && p->exit_signal == -1)) |
638 | list_add(&p->ptrace_list, to_release); |
639 | } |
640 | list_for_each_safe(_p, _n, &father->ptrace_children) { |
641 | p = list_entry(_p,struct task_struct,ptrace_list); |
642 | choose_new_parent(p, reaper, child_reaper); |
643 | reparent_thread(p, father, 1); |
644 | } |
645 | } |
646 | |
647 | /* |
648 | * Send signals to all our closest relatives so that they know |
649 | * to properly mourn us.. |
650 | */ |
651 | static void exit_notify(struct task_struct *tsk) |
652 | { |
653 | int state; |
654 | struct task_struct *t; |
655 | struct list_head ptrace_dead, *_p, *_n; |
656 | |
657 | if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT) |
658 | && !thread_group_empty(tsk)) { |
659 | /* |
660 | * This occurs when there was a race between our exit |
661 | * syscall and a group signal choosing us as the one to |
662 | * wake up. It could be that we are the only thread |
663 | * alerted to check for pending signals, but another thread |
664 | * should be woken now to take the signal since we will not. |
665 | * Now we'll wake all the threads in the group just to make |
666 | * sure someone gets all the pending signals. |
667 | */ |
668 | read_lock(&tasklist_lock); |
669 | spin_lock_irq(&tsk->sighand->siglock); |
670 | for (t = next_thread(tsk); t != tsk; t = next_thread(t)) |
671 | if (!signal_pending(t) && !(t->flags & PF_EXITING)) { |
672 | recalc_sigpending_tsk(t); |
673 | if (signal_pending(t)) |
674 | signal_wake_up(t, 0); |
675 | } |
676 | spin_unlock_irq(&tsk->sighand->siglock); |
677 | read_unlock(&tasklist_lock); |
678 | } |
679 | |
680 | write_lock_irq(&tasklist_lock); |
681 | |
682 | /* |
683 | * This does two things: |
684 | * |
685 | * A. Make init inherit all the child processes |
686 | * B. Check to see if any process groups have become orphaned |
687 | * as a result of our exiting, and if they have any stopped |
688 | * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) |
689 | */ |
690 | |
691 | INIT_LIST_HEAD(&ptrace_dead); |
692 | forget_original_parent(tsk, &ptrace_dead); |
693 | BUG_ON(!list_empty(&tsk->children)); |
694 | BUG_ON(!list_empty(&tsk->ptrace_children)); |
695 | |
696 | /* |
697 | * Check to see if any process groups have become orphaned |
698 | * as a result of our exiting, and if they have any stopped |
699 | * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2) |
700 | * |
701 | * Case i: Our father is in a different pgrp than we are |
702 | * and we were the only connection outside, so our pgrp |
703 | * is about to become orphaned. |
704 | */ |
705 | |
706 | t = tsk->real_parent; |
707 | |
708 | if ((process_group(t) != process_group(tsk)) && |
709 | (t->signal->session == tsk->signal->session) && |
710 | will_become_orphaned_pgrp(process_group(tsk), tsk) && |
711 | has_stopped_jobs(process_group(tsk))) { |
712 | __kill_pg_info(SIGHUP, (void *)1, process_group(tsk)); |
713 | __kill_pg_info(SIGCONT, (void *)1, process_group(tsk)); |
714 | } |
715 | |
716 | /* Let father know we died |
717 | * |
718 | * Thread signals are configurable, but you aren't going to use |
719 | * that to send signals to arbitary processes. |
720 | * That stops right now. |
721 | * |
722 | * If the parent exec id doesn't match the exec id we saved |
723 | * when we started then we know the parent has changed security |
724 | * domain. |
725 | * |
726 | * If our self_exec id doesn't match our parent_exec_id then |
727 | * we have changed execution domain as these two values started |
728 | * the same after a fork. |
729 | * |
730 | */ |
731 | |
732 | if (tsk->exit_signal != SIGCHLD && tsk->exit_signal != -1 && |
733 | ( tsk->parent_exec_id != t->self_exec_id || |
734 | tsk->self_exec_id != tsk->parent_exec_id) |
735 | && !capable(CAP_KILL)) |
736 | tsk->exit_signal = SIGCHLD; |
737 | |
738 | |
739 | /* If something other than our normal parent is ptracing us, then |
740 | * send it a SIGCHLD instead of honoring exit_signal. exit_signal |
741 | * only has special meaning to our real parent. |
742 | */ |
743 | if (tsk->exit_signal != -1 && thread_group_empty(tsk)) { |
744 | int signal = tsk->parent == tsk->real_parent ? tsk->exit_signal : SIGCHLD; |
745 | do_notify_parent(tsk, signal); |
746 | } else if (tsk->ptrace) { |
747 | do_notify_parent(tsk, SIGCHLD); |
748 | } |
749 | |
750 | state = EXIT_ZOMBIE; |
751 | if (tsk->exit_signal == -1 && |
752 | (likely(tsk->ptrace == 0) || |
753 | unlikely(tsk->parent->signal->flags & SIGNAL_GROUP_EXIT))) |
754 | state = EXIT_DEAD; |
755 | tsk->exit_state = state; |
756 | |
757 | write_unlock_irq(&tasklist_lock); |
758 | |
759 | list_for_each_safe(_p, _n, &ptrace_dead) { |
760 | list_del_init(_p); |
761 | t = list_entry(_p,struct task_struct,ptrace_list); |
762 | release_task(t); |
763 | } |
764 | |
765 | /* If the process is dead, release it - nobody will wait for it */ |
766 | if (state == EXIT_DEAD) |
767 | release_task(tsk); |
768 | |
769 | /* PF_DEAD causes final put_task_struct after we schedule. */ |
770 | preempt_disable(); |
771 | tsk->flags |= PF_DEAD; |
772 | } |
773 | |
774 | fastcall NORET_TYPE void do_exit(long code) |
775 | { |
776 | struct task_struct *tsk = current; |
777 | int group_dead; |
778 | |
779 | profile_task_exit(tsk); |
780 | |
781 | WARN_ON(atomic_read(&tsk->fs_excl)); |
782 | |
783 | if (unlikely(in_interrupt())) |
784 | panic("Aiee, killing interrupt handler!"); |
785 | if (unlikely(!tsk->pid)) |
786 | panic("Attempted to kill the idle task!"); |
787 | if (unlikely(tsk->pid == 1)) |
788 | panic("Attempted to kill init!"); |
789 | if (tsk->io_context) |
790 | exit_io_context(); |
791 | |
792 | if (unlikely(current->ptrace & PT_TRACE_EXIT)) { |
793 | current->ptrace_message = code; |
794 | ptrace_notify((PTRACE_EVENT_EXIT << 8) | SIGTRAP); |
795 | } |
796 | |
797 | tsk->flags |= PF_EXITING; |
798 | |
799 | /* |
800 | * Make sure we don't try to process any timer firings |
801 | * while we are already exiting. |
802 | */ |
803 | tsk->it_virt_expires = cputime_zero; |
804 | tsk->it_prof_expires = cputime_zero; |
805 | tsk->it_sched_expires = 0; |
806 | |
807 | if (unlikely(in_atomic())) |
808 | printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", |
809 | current->comm, current->pid, |
810 | preempt_count()); |
811 | |
812 | acct_update_integrals(tsk); |
813 | update_mem_hiwater(tsk); |
814 | group_dead = atomic_dec_and_test(&tsk->signal->live); |
815 | if (group_dead) |
816 | acct_process(code); |
817 | exit_mm(tsk); |
818 | |
819 | exit_sem(tsk); |
820 | __exit_files(tsk); |
821 | __exit_fs(tsk); |
822 | exit_namespace(tsk); |
823 | exit_thread(); |
824 | cpuset_exit(tsk); |
825 | exit_keys(tsk); |
826 | |
827 | if (group_dead && tsk->signal->leader) |
828 | disassociate_ctty(1); |
829 | |
830 | module_put(tsk->thread_info->exec_domain->module); |
831 | if (tsk->binfmt) |
832 | module_put(tsk->binfmt->module); |
833 | |
834 | tsk->exit_code = code; |
835 | exit_notify(tsk); |
836 | #ifdef CONFIG_NUMA |
837 | mpol_free(tsk->mempolicy); |
838 | tsk->mempolicy = NULL; |
839 | #endif |
840 | |
841 | BUG_ON(!(current->flags & PF_DEAD)); |
842 | schedule(); |
843 | BUG(); |
844 | /* Avoid "noreturn function does return". */ |
845 | for (;;) ; |
846 | } |
847 | |
848 | EXPORT_SYMBOL_GPL(do_exit); |
849 | |
850 | NORET_TYPE void complete_and_exit(struct completion *comp, long code) |
851 | { |
852 | if (comp) |
853 | complete(comp); |
854 | |
855 | do_exit(code); |
856 | } |
857 | |
858 | EXPORT_SYMBOL(complete_and_exit); |
859 | |
860 | asmlinkage long sys_exit(int error_code) |
861 | { |
862 | do_exit((error_code&0xff)<<8); |
863 | } |
864 | |
865 | task_t fastcall *next_thread(const task_t *p) |
866 | { |
867 | return pid_task(p->pids[PIDTYPE_TGID].pid_list.next, PIDTYPE_TGID); |
868 | } |
869 | |
870 | EXPORT_SYMBOL(next_thread); |
871 | |
872 | /* |
873 | * Take down every thread in the group. This is called by fatal signals |
874 | * as well as by sys_exit_group (below). |
875 | */ |
876 | NORET_TYPE void |
877 | do_group_exit(int exit_code) |
878 | { |
879 | BUG_ON(exit_code & 0x80); /* core dumps don't get here */ |
880 | |
881 | if (current->signal->flags & SIGNAL_GROUP_EXIT) |
882 | exit_code = current->signal->group_exit_code; |
883 | else if (!thread_group_empty(current)) { |
884 | struct signal_struct *const sig = current->signal; |
885 | struct sighand_struct *const sighand = current->sighand; |
886 | read_lock(&tasklist_lock); |
887 | spin_lock_irq(&sighand->siglock); |
888 | if (sig->flags & SIGNAL_GROUP_EXIT) |
889 | /* Another thread got here before we took the lock. */ |
890 | exit_code = sig->group_exit_code; |
891 | else { |
892 | sig->flags = SIGNAL_GROUP_EXIT; |
893 | sig->group_exit_code = exit_code; |
894 | zap_other_threads(current); |
895 | } |
896 | spin_unlock_irq(&sighand->siglock); |
897 | read_unlock(&tasklist_lock); |
898 | } |
899 | |
900 | do_exit(exit_code); |
901 | /* NOTREACHED */ |
902 | } |
903 | |
904 | /* |
905 | * this kills every thread in the thread group. Note that any externally |
906 | * wait4()-ing process will get the correct exit code - even if this |
907 | * thread is not the thread group leader. |
908 | */ |
909 | asmlinkage void sys_exit_group(int error_code) |
910 | { |
911 | do_group_exit((error_code & 0xff) << 8); |
912 | } |
913 | |
914 | static int eligible_child(pid_t pid, int options, task_t *p) |
915 | { |
916 | if (pid > 0) { |
917 | if (p->pid != pid) |
918 | return 0; |
919 | } else if (!pid) { |
920 | if (process_group(p) != process_group(current)) |
921 | return 0; |
922 | } else if (pid != -1) { |
923 | if (process_group(p) != -pid) |
924 | return 0; |
925 | } |
926 | |
927 | /* |
928 | * Do not consider detached threads that are |
929 | * not ptraced: |
930 | */ |
931 | if (p->exit_signal == -1 && !p->ptrace) |
932 | return 0; |
933 | |
934 | /* Wait for all children (clone and not) if __WALL is set; |
935 | * otherwise, wait for clone children *only* if __WCLONE is |
936 | * set; otherwise, wait for non-clone children *only*. (Note: |
937 | * A "clone" child here is one that reports to its parent |
938 | * using a signal other than SIGCHLD.) */ |
939 | if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0)) |
940 | && !(options & __WALL)) |
941 | return 0; |
942 | /* |
943 | * Do not consider thread group leaders that are |
944 | * in a non-empty thread group: |
945 | */ |
946 | if (current->tgid != p->tgid && delay_group_leader(p)) |
947 | return 2; |
948 | |
949 | if (security_task_wait(p)) |
950 | return 0; |
951 | |
952 | return 1; |
953 | } |
954 | |
955 | static int wait_noreap_copyout(task_t *p, pid_t pid, uid_t uid, |
956 | int why, int status, |
957 | struct siginfo __user *infop, |
958 | struct rusage __user *rusagep) |
959 | { |
960 | int retval = rusagep ? getrusage(p, RUSAGE_BOTH, rusagep) : 0; |
961 | put_task_struct(p); |
962 | if (!retval) |
963 | retval = put_user(SIGCHLD, &infop->si_signo); |
964 | if (!retval) |
965 | retval = put_user(0, &infop->si_errno); |
966 | if (!retval) |
967 | retval = put_user((short)why, &infop->si_code); |
968 | if (!retval) |
969 | retval = put_user(pid, &infop->si_pid); |
970 | if (!retval) |
971 | retval = put_user(uid, &infop->si_uid); |
972 | if (!retval) |
973 | retval = put_user(status, &infop->si_status); |
974 | if (!retval) |
975 | retval = pid; |
976 | return retval; |
977 | } |
978 | |
979 | /* |
980 | * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold |
981 | * read_lock(&tasklist_lock) on entry. If we return zero, we still hold |
982 | * the lock and this task is uninteresting. If we return nonzero, we have |
983 | * released the lock and the system call should return. |
984 | */ |
985 | static int wait_task_zombie(task_t *p, int noreap, |
986 | struct siginfo __user *infop, |
987 | int __user *stat_addr, struct rusage __user *ru) |
988 | { |
989 | unsigned long state; |
990 | int retval; |
991 | int status; |
992 | |
993 | if (unlikely(noreap)) { |
994 | pid_t pid = p->pid; |
995 | uid_t uid = p->uid; |
996 | int exit_code = p->exit_code; |
997 | int why, status; |
998 | |
999 | if (unlikely(p->exit_state != EXIT_ZOMBIE)) |
1000 | return 0; |
1001 | if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) |
1002 | return 0; |
1003 | get_task_struct(p); |
1004 | read_unlock(&tasklist_lock); |
1005 | if ((exit_code & 0x7f) == 0) { |
1006 | why = CLD_EXITED; |
1007 | status = exit_code >> 8; |
1008 | } else { |
1009 | why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED; |
1010 | status = exit_code & 0x7f; |
1011 | } |
1012 | return wait_noreap_copyout(p, pid, uid, why, |
1013 | status, infop, ru); |
1014 | } |
1015 | |
1016 | /* |
1017 | * Try to move the task's state to DEAD |
1018 | * only one thread is allowed to do this: |
1019 | */ |
1020 | state = xchg(&p->exit_state, EXIT_DEAD); |
1021 | if (state != EXIT_ZOMBIE) { |
1022 | BUG_ON(state != EXIT_DEAD); |
1023 | return 0; |
1024 | } |
1025 | if (unlikely(p->exit_signal == -1 && p->ptrace == 0)) { |
1026 | /* |
1027 | * This can only happen in a race with a ptraced thread |
1028 | * dying on another processor. |
1029 | */ |
1030 | return 0; |
1031 | } |
1032 | |
1033 | if (likely(p->real_parent == p->parent) && likely(p->signal)) { |
1034 | /* |
1035 | * The resource counters for the group leader are in its |
1036 | * own task_struct. Those for dead threads in the group |
1037 | * are in its signal_struct, as are those for the child |
1038 | * processes it has previously reaped. All these |
1039 | * accumulate in the parent's signal_struct c* fields. |
1040 | * |
1041 | * We don't bother to take a lock here to protect these |
1042 | * p->signal fields, because they are only touched by |
1043 | * __exit_signal, which runs with tasklist_lock |
1044 | * write-locked anyway, and so is excluded here. We do |
1045 | * need to protect the access to p->parent->signal fields, |
1046 | * as other threads in the parent group can be right |
1047 | * here reaping other children at the same time. |
1048 | */ |
1049 | spin_lock_irq(&p->parent->sighand->siglock); |
1050 | p->parent->signal->cutime = |
1051 | cputime_add(p->parent->signal->cutime, |
1052 | cputime_add(p->utime, |
1053 | cputime_add(p->signal->utime, |
1054 | p->signal->cutime))); |
1055 | p->parent->signal->cstime = |
1056 | cputime_add(p->parent->signal->cstime, |
1057 | cputime_add(p->stime, |
1058 | cputime_add(p->signal->stime, |
1059 | p->signal->cstime))); |
1060 | p->parent->signal->cmin_flt += |
1061 | p->min_flt + p->signal->min_flt + p->signal->cmin_flt; |
1062 | p->parent->signal->cmaj_flt += |
1063 | p->maj_flt + p->signal->maj_flt + p->signal->cmaj_flt; |
1064 | p->parent->signal->cnvcsw += |
1065 | p->nvcsw + p->signal->nvcsw + p->signal->cnvcsw; |
1066 | p->parent->signal->cnivcsw += |
1067 | p->nivcsw + p->signal->nivcsw + p->signal->cnivcsw; |
1068 | spin_unlock_irq(&p->parent->sighand->siglock); |
1069 | } |
1070 | |
1071 | /* |
1072 | * Now we are sure this task is interesting, and no other |
1073 | * thread can reap it because we set its state to EXIT_DEAD. |
1074 | */ |
1075 | read_unlock(&tasklist_lock); |
1076 | |
1077 | retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; |
1078 | status = (p->signal->flags & SIGNAL_GROUP_EXIT) |
1079 | ? p->signal->group_exit_code : p->exit_code; |
1080 | if (!retval && stat_addr) |
1081 | retval = put_user(status, stat_addr); |
1082 | if (!retval && infop) |
1083 | retval = put_user(SIGCHLD, &infop->si_signo); |
1084 | if (!retval && infop) |
1085 | retval = put_user(0, &infop->si_errno); |
1086 | if (!retval && infop) { |
1087 | int why; |
1088 | |
1089 | if ((status & 0x7f) == 0) { |
1090 | why = CLD_EXITED; |
1091 | status >>= 8; |
1092 | } else { |
1093 | why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; |
1094 | status &= 0x7f; |
1095 | } |
1096 | retval = put_user((short)why, &infop->si_code); |
1097 | if (!retval) |
1098 | retval = put_user(status, &infop->si_status); |
1099 | } |
1100 | if (!retval && infop) |
1101 | retval = put_user(p->pid, &infop->si_pid); |
1102 | if (!retval && infop) |
1103 | retval = put_user(p->uid, &infop->si_uid); |
1104 | if (retval) { |
1105 | // TODO: is this safe? |
1106 | p->exit_state = EXIT_ZOMBIE; |
1107 | return retval; |
1108 | } |
1109 | retval = p->pid; |
1110 | if (p->real_parent != p->parent) { |
1111 | write_lock_irq(&tasklist_lock); |
1112 | /* Double-check with lock held. */ |
1113 | if (p->real_parent != p->parent) { |
1114 | __ptrace_unlink(p); |
1115 | // TODO: is this safe? |
1116 | p->exit_state = EXIT_ZOMBIE; |
1117 | /* |
1118 | * If this is not a detached task, notify the parent. |
1119 | * If it's still not detached after that, don't release |
1120 | * it now. |
1121 | */ |
1122 | if (p->exit_signal != -1) { |
1123 | do_notify_parent(p, p->exit_signal); |
1124 | if (p->exit_signal != -1) |
1125 | p = NULL; |
1126 | } |
1127 | } |
1128 | write_unlock_irq(&tasklist_lock); |
1129 | } |
1130 | if (p != NULL) |
1131 | release_task(p); |
1132 | BUG_ON(!retval); |
1133 | return retval; |
1134 | } |
1135 | |
1136 | /* |
1137 | * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold |
1138 | * read_lock(&tasklist_lock) on entry. If we return zero, we still hold |
1139 | * the lock and this task is uninteresting. If we return nonzero, we have |
1140 | * released the lock and the system call should return. |
1141 | */ |
1142 | static int wait_task_stopped(task_t *p, int delayed_group_leader, int noreap, |
1143 | struct siginfo __user *infop, |
1144 | int __user *stat_addr, struct rusage __user *ru) |
1145 | { |
1146 | int retval, exit_code; |
1147 | |
1148 | if (!p->exit_code) |
1149 | return 0; |
1150 | if (delayed_group_leader && !(p->ptrace & PT_PTRACED) && |
1151 | p->signal && p->signal->group_stop_count > 0) |
1152 | /* |
1153 | * A group stop is in progress and this is the group leader. |
1154 | * We won't report until all threads have stopped. |
1155 | */ |
1156 | return 0; |
1157 | |
1158 | /* |
1159 | * Now we are pretty sure this task is interesting. |
1160 | * Make sure it doesn't get reaped out from under us while we |
1161 | * give up the lock and then examine it below. We don't want to |
1162 | * keep holding onto the tasklist_lock while we call getrusage and |
1163 | * possibly take page faults for user memory. |
1164 | */ |
1165 | get_task_struct(p); |
1166 | read_unlock(&tasklist_lock); |
1167 | |
1168 | if (unlikely(noreap)) { |
1169 | pid_t pid = p->pid; |
1170 | uid_t uid = p->uid; |
1171 | int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED; |
1172 | |
1173 | exit_code = p->exit_code; |
1174 | if (unlikely(!exit_code) || |
1175 | unlikely(p->state > TASK_STOPPED)) |
1176 | goto bail_ref; |
1177 | return wait_noreap_copyout(p, pid, uid, |
1178 | why, (exit_code << 8) | 0x7f, |
1179 | infop, ru); |
1180 | } |
1181 | |
1182 | write_lock_irq(&tasklist_lock); |
1183 | |
1184 | /* |
1185 | * This uses xchg to be atomic with the thread resuming and setting |
1186 | * it. It must also be done with the write lock held to prevent a |
1187 | * race with the EXIT_ZOMBIE case. |
1188 | */ |
1189 | exit_code = xchg(&p->exit_code, 0); |
1190 | if (unlikely(p->exit_state)) { |
1191 | /* |
1192 | * The task resumed and then died. Let the next iteration |
1193 | * catch it in EXIT_ZOMBIE. Note that exit_code might |
1194 | * already be zero here if it resumed and did _exit(0). |
1195 | * The task itself is dead and won't touch exit_code again; |
1196 | * other processors in this function are locked out. |
1197 | */ |
1198 | p->exit_code = exit_code; |
1199 | exit_code = 0; |
1200 | } |
1201 | if (unlikely(exit_code == 0)) { |
1202 | /* |
1203 | * Another thread in this function got to it first, or it |
1204 | * resumed, or it resumed and then died. |
1205 | */ |
1206 | write_unlock_irq(&tasklist_lock); |
1207 | bail_ref: |
1208 | put_task_struct(p); |
1209 | /* |
1210 | * We are returning to the wait loop without having successfully |
1211 | * removed the process and having released the lock. We cannot |
1212 | * continue, since the "p" task pointer is potentially stale. |
1213 | * |
1214 | * Return -EAGAIN, and do_wait() will restart the loop from the |
1215 | * beginning. Do _not_ re-acquire the lock. |
1216 | */ |
1217 | return -EAGAIN; |
1218 | } |
1219 | |
1220 | /* move to end of parent's list to avoid starvation */ |
1221 | remove_parent(p); |
1222 | add_parent(p, p->parent); |
1223 | |
1224 | write_unlock_irq(&tasklist_lock); |
1225 | |
1226 | retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; |
1227 | if (!retval && stat_addr) |
1228 | retval = put_user((exit_code << 8) | 0x7f, stat_addr); |
1229 | if (!retval && infop) |
1230 | retval = put_user(SIGCHLD, &infop->si_signo); |
1231 | if (!retval && infop) |
1232 | retval = put_user(0, &infop->si_errno); |
1233 | if (!retval && infop) |
1234 | retval = put_user((short)((p->ptrace & PT_PTRACED) |
1235 | ? CLD_TRAPPED : CLD_STOPPED), |
1236 | &infop->si_code); |
1237 | if (!retval && infop) |
1238 | retval = put_user(exit_code, &infop->si_status); |
1239 | if (!retval && infop) |
1240 | retval = put_user(p->pid, &infop->si_pid); |
1241 | if (!retval && infop) |
1242 | retval = put_user(p->uid, &infop->si_uid); |
1243 | if (!retval) |
1244 | retval = p->pid; |
1245 | put_task_struct(p); |
1246 | |
1247 | BUG_ON(!retval); |
1248 | return retval; |
1249 | } |
1250 | |
1251 | /* |
1252 | * Handle do_wait work for one task in a live, non-stopped state. |
1253 | * read_lock(&tasklist_lock) on entry. If we return zero, we still hold |
1254 | * the lock and this task is uninteresting. If we return nonzero, we have |
1255 | * released the lock and the system call should return. |
1256 | */ |
1257 | static int wait_task_continued(task_t *p, int noreap, |
1258 | struct siginfo __user *infop, |
1259 | int __user *stat_addr, struct rusage __user *ru) |
1260 | { |
1261 | int retval; |
1262 | pid_t pid; |
1263 | uid_t uid; |
1264 | |
1265 | if (unlikely(!p->signal)) |
1266 | return 0; |
1267 | |
1268 | if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) |
1269 | return 0; |
1270 | |
1271 | spin_lock_irq(&p->sighand->siglock); |
1272 | /* Re-check with the lock held. */ |
1273 | if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { |
1274 | spin_unlock_irq(&p->sighand->siglock); |
1275 | return 0; |
1276 | } |
1277 | if (!noreap) |
1278 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; |
1279 | spin_unlock_irq(&p->sighand->siglock); |
1280 | |
1281 | pid = p->pid; |
1282 | uid = p->uid; |
1283 | get_task_struct(p); |
1284 | read_unlock(&tasklist_lock); |
1285 | |
1286 | if (!infop) { |
1287 | retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; |
1288 | put_task_struct(p); |
1289 | if (!retval && stat_addr) |
1290 | retval = put_user(0xffff, stat_addr); |
1291 | if (!retval) |
1292 | retval = p->pid; |
1293 | } else { |
1294 | retval = wait_noreap_copyout(p, pid, uid, |
1295 | CLD_CONTINUED, SIGCONT, |
1296 | infop, ru); |
1297 | BUG_ON(retval == 0); |
1298 | } |
1299 | |
1300 | return retval; |
1301 | } |
1302 | |
1303 | |
1304 | static inline int my_ptrace_child(struct task_struct *p) |
1305 | { |
1306 | if (!(p->ptrace & PT_PTRACED)) |
1307 | return 0; |
1308 | if (!(p->ptrace & PT_ATTACHED)) |
1309 | return 1; |
1310 | /* |
1311 | * This child was PTRACE_ATTACH'd. We should be seeing it only if |
1312 | * we are the attacher. If we are the real parent, this is a race |
1313 | * inside ptrace_attach. It is waiting for the tasklist_lock, |
1314 | * which we have to switch the parent links, but has already set |
1315 | * the flags in p->ptrace. |
1316 | */ |
1317 | return (p->parent != p->real_parent); |
1318 | } |
1319 | |
1320 | static long do_wait(pid_t pid, int options, struct siginfo __user *infop, |
1321 | int __user *stat_addr, struct rusage __user *ru) |
1322 | { |
1323 | DECLARE_WAITQUEUE(wait, current); |
1324 | struct task_struct *tsk; |
1325 | int flag, retval; |
1326 | |
1327 | add_wait_queue(¤t->signal->wait_chldexit,&wait); |
1328 | repeat: |
1329 | /* |
1330 | * We will set this flag if we see any child that might later |
1331 | * match our criteria, even if we are not able to reap it yet. |
1332 | */ |
1333 | flag = 0; |
1334 | current->state = TASK_INTERRUPTIBLE; |
1335 | read_lock(&tasklist_lock); |
1336 | tsk = current; |
1337 | do { |
1338 | struct task_struct *p; |
1339 | struct list_head *_p; |
1340 | int ret; |
1341 | |
1342 | list_for_each(_p,&tsk->children) { |
1343 | p = list_entry(_p,struct task_struct,sibling); |
1344 | |
1345 | ret = eligible_child(pid, options, p); |
1346 | if (!ret) |
1347 | continue; |
1348 | |
1349 | switch (p->state) { |
1350 | case TASK_TRACED: |
1351 | if (!my_ptrace_child(p)) |
1352 | continue; |
1353 | /*FALLTHROUGH*/ |
1354 | case TASK_STOPPED: |
1355 | /* |
1356 | * It's stopped now, so it might later |
1357 | * continue, exit, or stop again. |
1358 | */ |
1359 | flag = 1; |
1360 | if (!(options & WUNTRACED) && |
1361 | !my_ptrace_child(p)) |
1362 | continue; |
1363 | retval = wait_task_stopped(p, ret == 2, |
1364 | (options & WNOWAIT), |
1365 | infop, |
1366 | stat_addr, ru); |
1367 | if (retval == -EAGAIN) |
1368 | goto repeat; |
1369 | if (retval != 0) /* He released the lock. */ |
1370 | goto end; |
1371 | break; |
1372 | default: |
1373 | // case EXIT_DEAD: |
1374 | if (p->exit_state == EXIT_DEAD) |
1375 | continue; |
1376 | // case EXIT_ZOMBIE: |
1377 | if (p->exit_state == EXIT_ZOMBIE) { |
1378 | /* |
1379 | * Eligible but we cannot release |
1380 | * it yet: |
1381 | */ |
1382 | if (ret == 2) |
1383 | goto check_continued; |
1384 | if (!likely(options & WEXITED)) |
1385 | continue; |
1386 | retval = wait_task_zombie( |
1387 | p, (options & WNOWAIT), |
1388 | infop, stat_addr, ru); |
1389 | /* He released the lock. */ |
1390 | if (retval != 0) |
1391 | goto end; |
1392 | break; |
1393 | } |
1394 | check_continued: |
1395 | /* |
1396 | * It's running now, so it might later |
1397 | * exit, stop, or stop and then continue. |
1398 | */ |
1399 | flag = 1; |
1400 | if (!unlikely(options & WCONTINUED)) |
1401 | continue; |
1402 | retval = wait_task_continued( |
1403 | p, (options & WNOWAIT), |
1404 | infop, stat_addr, ru); |
1405 | if (retval != 0) /* He released the lock. */ |
1406 | goto end; |
1407 | break; |
1408 | } |
1409 | } |
1410 | if (!flag) { |
1411 | list_for_each(_p, &tsk->ptrace_children) { |
1412 | p = list_entry(_p, struct task_struct, |
1413 | ptrace_list); |
1414 | if (!eligible_child(pid, options, p)) |
1415 | continue; |
1416 | flag = 1; |
1417 | break; |
1418 | } |
1419 | } |
1420 | if (options & __WNOTHREAD) |
1421 | break; |
1422 | tsk = next_thread(tsk); |
1423 | if (tsk->signal != current->signal) |
1424 | BUG(); |
1425 | } while (tsk != current); |
1426 | |
1427 | read_unlock(&tasklist_lock); |
1428 | if (flag) { |
1429 | retval = 0; |
1430 | if (options & WNOHANG) |
1431 | goto end; |
1432 | retval = -ERESTARTSYS; |
1433 | if (signal_pending(current)) |
1434 | goto end; |
1435 | schedule(); |
1436 | goto repeat; |
1437 | } |
1438 | retval = -ECHILD; |
1439 | end: |
1440 | current->state = TASK_RUNNING; |
1441 | remove_wait_queue(¤t->signal->wait_chldexit,&wait); |
1442 | if (infop) { |
1443 | if (retval > 0) |
1444 | retval = 0; |
1445 | else { |
1446 | /* |
1447 | * For a WNOHANG return, clear out all the fields |
1448 | * we would set so the user can easily tell the |
1449 | * difference. |
1450 | */ |
1451 | if (!retval) |
1452 | retval = put_user(0, &infop->si_signo); |
1453 | if (!retval) |
1454 | retval = put_user(0, &infop->si_errno); |
1455 | if (!retval) |
1456 | retval = put_user(0, &infop->si_code); |
1457 | if (!retval) |
1458 | retval = put_user(0, &infop->si_pid); |
1459 | if (!retval) |
1460 | retval = put_user(0, &infop->si_uid); |
1461 | if (!retval) |
1462 | retval = put_user(0, &infop->si_status); |
1463 | } |
1464 | } |
1465 | return retval; |
1466 | } |
1467 | |
1468 | asmlinkage long sys_waitid(int which, pid_t pid, |
1469 | struct siginfo __user *infop, int options, |
1470 | struct rusage __user *ru) |
1471 | { |
1472 | long ret; |
1473 | |
1474 | if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED)) |
1475 | return -EINVAL; |
1476 | if (!(options & (WEXITED|WSTOPPED|WCONTINUED))) |
1477 | return -EINVAL; |
1478 | |
1479 | switch (which) { |
1480 | case P_ALL: |
1481 | pid = -1; |
1482 | break; |
1483 | case P_PID: |
1484 | if (pid <= 0) |
1485 | return -EINVAL; |
1486 | break; |
1487 | case P_PGID: |
1488 | if (pid <= 0) |
1489 | return -EINVAL; |
1490 | pid = -pid; |
1491 | break; |
1492 | default: |
1493 | return -EINVAL; |
1494 | } |
1495 | |
1496 | ret = do_wait(pid, options, infop, NULL, ru); |
1497 | |
1498 | /* avoid REGPARM breakage on x86: */ |
1499 | prevent_tail_call(ret); |
1500 | return ret; |
1501 | } |
1502 | |
1503 | asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr, |
1504 | int options, struct rusage __user *ru) |
1505 | { |
1506 | long ret; |
1507 | |
1508 | if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| |
1509 | __WNOTHREAD|__WCLONE|__WALL)) |
1510 | return -EINVAL; |
1511 | ret = do_wait(pid, options | WEXITED, NULL, stat_addr, ru); |
1512 | |
1513 | /* avoid REGPARM breakage on x86: */ |
1514 | prevent_tail_call(ret); |
1515 | return ret; |
1516 | } |
1517 | |
1518 | #ifdef __ARCH_WANT_SYS_WAITPID |
1519 | |
1520 | /* |
1521 | * sys_waitpid() remains for compatibility. waitpid() should be |
1522 | * implemented by calling sys_wait4() from libc.a. |
1523 | */ |
1524 | asmlinkage long sys_waitpid(pid_t pid, int __user *stat_addr, int options) |
1525 | { |
1526 | return sys_wait4(pid, stat_addr, options, NULL); |
1527 | } |
1528 | |
1529 | #endif |