Contents of /alx-src/tags/kernel26-2.6.12-alx-r9/kernel/signal.c
Parent Directory | Revision Log
Revision 630 -
(show annotations)
(download)
Wed Mar 4 11:03:09 2009 UTC (15 years, 3 months ago) by niro
File MIME type: text/plain
File size: 70856 byte(s)
Wed Mar 4 11:03:09 2009 UTC (15 years, 3 months ago) by niro
File MIME type: text/plain
File size: 70856 byte(s)
Tag kernel26-2.6.12-alx-r9
1 | /* |
2 | * linux/kernel/signal.c |
3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | * |
6 | * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson |
7 | * |
8 | * 2003-06-02 Jim Houston - Concurrent Computer Corp. |
9 | * Changes to use preallocated sigqueue structures |
10 | * to allow signals to be sent reliably. |
11 | */ |
12 | |
13 | #include <linux/config.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/module.h> |
16 | #include <linux/smp_lock.h> |
17 | #include <linux/init.h> |
18 | #include <linux/sched.h> |
19 | #include <linux/fs.h> |
20 | #include <linux/tty.h> |
21 | #include <linux/binfmts.h> |
22 | #include <linux/security.h> |
23 | #include <linux/syscalls.h> |
24 | #include <linux/ptrace.h> |
25 | #include <linux/posix-timers.h> |
26 | #include <linux/signal.h> |
27 | #include <asm/param.h> |
28 | #include <asm/uaccess.h> |
29 | #include <asm/unistd.h> |
30 | #include <asm/siginfo.h> |
31 | |
32 | /* |
33 | * SLAB caches for signal bits. |
34 | */ |
35 | |
36 | static kmem_cache_t *sigqueue_cachep; |
37 | |
38 | /* |
39 | * In POSIX a signal is sent either to a specific thread (Linux task) |
40 | * or to the process as a whole (Linux thread group). How the signal |
41 | * is sent determines whether it's to one thread or the whole group, |
42 | * which determines which signal mask(s) are involved in blocking it |
43 | * from being delivered until later. When the signal is delivered, |
44 | * either it's caught or ignored by a user handler or it has a default |
45 | * effect that applies to the whole thread group (POSIX process). |
46 | * |
47 | * The possible effects an unblocked signal set to SIG_DFL can have are: |
48 | * ignore - Nothing Happens |
49 | * terminate - kill the process, i.e. all threads in the group, |
50 | * similar to exit_group. The group leader (only) reports |
51 | * WIFSIGNALED status to its parent. |
52 | * coredump - write a core dump file describing all threads using |
53 | * the same mm and then kill all those threads |
54 | * stop - stop all the threads in the group, i.e. TASK_STOPPED state |
55 | * |
56 | * SIGKILL and SIGSTOP cannot be caught, blocked, or ignored. |
57 | * Other signals when not blocked and set to SIG_DFL behaves as follows. |
58 | * The job control signals also have other special effects. |
59 | * |
60 | * +--------------------+------------------+ |
61 | * | POSIX signal | default action | |
62 | * +--------------------+------------------+ |
63 | * | SIGHUP | terminate | |
64 | * | SIGINT | terminate | |
65 | * | SIGQUIT | coredump | |
66 | * | SIGILL | coredump | |
67 | * | SIGTRAP | coredump | |
68 | * | SIGABRT/SIGIOT | coredump | |
69 | * | SIGBUS | coredump | |
70 | * | SIGFPE | coredump | |
71 | * | SIGKILL | terminate(+) | |
72 | * | SIGUSR1 | terminate | |
73 | * | SIGSEGV | coredump | |
74 | * | SIGUSR2 | terminate | |
75 | * | SIGPIPE | terminate | |
76 | * | SIGALRM | terminate | |
77 | * | SIGTERM | terminate | |
78 | * | SIGCHLD | ignore | |
79 | * | SIGCONT | ignore(*) | |
80 | * | SIGSTOP | stop(*)(+) | |
81 | * | SIGTSTP | stop(*) | |
82 | * | SIGTTIN | stop(*) | |
83 | * | SIGTTOU | stop(*) | |
84 | * | SIGURG | ignore | |
85 | * | SIGXCPU | coredump | |
86 | * | SIGXFSZ | coredump | |
87 | * | SIGVTALRM | terminate | |
88 | * | SIGPROF | terminate | |
89 | * | SIGPOLL/SIGIO | terminate | |
90 | * | SIGSYS/SIGUNUSED | coredump | |
91 | * | SIGSTKFLT | terminate | |
92 | * | SIGWINCH | ignore | |
93 | * | SIGPWR | terminate | |
94 | * | SIGRTMIN-SIGRTMAX | terminate | |
95 | * +--------------------+------------------+ |
96 | * | non-POSIX signal | default action | |
97 | * +--------------------+------------------+ |
98 | * | SIGEMT | coredump | |
99 | * +--------------------+------------------+ |
100 | * |
101 | * (+) For SIGKILL and SIGSTOP the action is "always", not just "default". |
102 | * (*) Special job control effects: |
103 | * When SIGCONT is sent, it resumes the process (all threads in the group) |
104 | * from TASK_STOPPED state and also clears any pending/queued stop signals |
105 | * (any of those marked with "stop(*)"). This happens regardless of blocking, |
106 | * catching, or ignoring SIGCONT. When any stop signal is sent, it clears |
107 | * any pending/queued SIGCONT signals; this happens regardless of blocking, |
108 | * catching, or ignored the stop signal, though (except for SIGSTOP) the |
109 | * default action of stopping the process may happen later or never. |
110 | */ |
111 | |
112 | #ifdef SIGEMT |
113 | #define M_SIGEMT M(SIGEMT) |
114 | #else |
115 | #define M_SIGEMT 0 |
116 | #endif |
117 | |
118 | #if SIGRTMIN > BITS_PER_LONG |
119 | #define M(sig) (1ULL << ((sig)-1)) |
120 | #else |
121 | #define M(sig) (1UL << ((sig)-1)) |
122 | #endif |
123 | #define T(sig, mask) (M(sig) & (mask)) |
124 | |
125 | #define SIG_KERNEL_ONLY_MASK (\ |
126 | M(SIGKILL) | M(SIGSTOP) ) |
127 | |
128 | #define SIG_KERNEL_STOP_MASK (\ |
129 | M(SIGSTOP) | M(SIGTSTP) | M(SIGTTIN) | M(SIGTTOU) ) |
130 | |
131 | #define SIG_KERNEL_COREDUMP_MASK (\ |
132 | M(SIGQUIT) | M(SIGILL) | M(SIGTRAP) | M(SIGABRT) | \ |
133 | M(SIGFPE) | M(SIGSEGV) | M(SIGBUS) | M(SIGSYS) | \ |
134 | M(SIGXCPU) | M(SIGXFSZ) | M_SIGEMT ) |
135 | |
136 | #define SIG_KERNEL_IGNORE_MASK (\ |
137 | M(SIGCONT) | M(SIGCHLD) | M(SIGWINCH) | M(SIGURG) ) |
138 | |
139 | #define sig_kernel_only(sig) \ |
140 | (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_ONLY_MASK)) |
141 | #define sig_kernel_coredump(sig) \ |
142 | (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_COREDUMP_MASK)) |
143 | #define sig_kernel_ignore(sig) \ |
144 | (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_IGNORE_MASK)) |
145 | #define sig_kernel_stop(sig) \ |
146 | (((sig) < SIGRTMIN) && T(sig, SIG_KERNEL_STOP_MASK)) |
147 | |
148 | #define sig_user_defined(t, signr) \ |
149 | (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \ |
150 | ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN)) |
151 | |
152 | #define sig_fatal(t, signr) \ |
153 | (!T(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \ |
154 | (t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL) |
155 | |
156 | static int sig_ignored(struct task_struct *t, int sig) |
157 | { |
158 | void __user * handler; |
159 | |
160 | /* |
161 | * Tracers always want to know about signals.. |
162 | */ |
163 | if (t->ptrace & PT_PTRACED) |
164 | return 0; |
165 | |
166 | /* |
167 | * Blocked signals are never ignored, since the |
168 | * signal handler may change by the time it is |
169 | * unblocked. |
170 | */ |
171 | if (sigismember(&t->blocked, sig)) |
172 | return 0; |
173 | |
174 | /* Is it explicitly or implicitly ignored? */ |
175 | handler = t->sighand->action[sig-1].sa.sa_handler; |
176 | return handler == SIG_IGN || |
177 | (handler == SIG_DFL && sig_kernel_ignore(sig)); |
178 | } |
179 | |
180 | /* |
181 | * Re-calculate pending state from the set of locally pending |
182 | * signals, globally pending signals, and blocked signals. |
183 | */ |
184 | static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked) |
185 | { |
186 | unsigned long ready; |
187 | long i; |
188 | |
189 | switch (_NSIG_WORDS) { |
190 | default: |
191 | for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) |
192 | ready |= signal->sig[i] &~ blocked->sig[i]; |
193 | break; |
194 | |
195 | case 4: ready = signal->sig[3] &~ blocked->sig[3]; |
196 | ready |= signal->sig[2] &~ blocked->sig[2]; |
197 | ready |= signal->sig[1] &~ blocked->sig[1]; |
198 | ready |= signal->sig[0] &~ blocked->sig[0]; |
199 | break; |
200 | |
201 | case 2: ready = signal->sig[1] &~ blocked->sig[1]; |
202 | ready |= signal->sig[0] &~ blocked->sig[0]; |
203 | break; |
204 | |
205 | case 1: ready = signal->sig[0] &~ blocked->sig[0]; |
206 | } |
207 | return ready != 0; |
208 | } |
209 | |
210 | #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) |
211 | |
212 | fastcall void recalc_sigpending_tsk(struct task_struct *t) |
213 | { |
214 | if (t->signal->group_stop_count > 0 || |
215 | PENDING(&t->pending, &t->blocked) || |
216 | PENDING(&t->signal->shared_pending, &t->blocked)) |
217 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
218 | else |
219 | clear_tsk_thread_flag(t, TIF_SIGPENDING); |
220 | } |
221 | |
222 | void recalc_sigpending(void) |
223 | { |
224 | recalc_sigpending_tsk(current); |
225 | } |
226 | |
227 | /* Given the mask, find the first available signal that should be serviced. */ |
228 | |
229 | static int |
230 | next_signal(struct sigpending *pending, sigset_t *mask) |
231 | { |
232 | unsigned long i, *s, *m, x; |
233 | int sig = 0; |
234 | |
235 | s = pending->signal.sig; |
236 | m = mask->sig; |
237 | switch (_NSIG_WORDS) { |
238 | default: |
239 | for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m) |
240 | if ((x = *s &~ *m) != 0) { |
241 | sig = ffz(~x) + i*_NSIG_BPW + 1; |
242 | break; |
243 | } |
244 | break; |
245 | |
246 | case 2: if ((x = s[0] &~ m[0]) != 0) |
247 | sig = 1; |
248 | else if ((x = s[1] &~ m[1]) != 0) |
249 | sig = _NSIG_BPW + 1; |
250 | else |
251 | break; |
252 | sig += ffz(~x); |
253 | break; |
254 | |
255 | case 1: if ((x = *s &~ *m) != 0) |
256 | sig = ffz(~x) + 1; |
257 | break; |
258 | } |
259 | |
260 | return sig; |
261 | } |
262 | |
263 | static struct sigqueue *__sigqueue_alloc(struct task_struct *t, unsigned int __nocast flags, |
264 | int override_rlimit) |
265 | { |
266 | struct sigqueue *q = NULL; |
267 | |
268 | atomic_inc(&t->user->sigpending); |
269 | if (override_rlimit || |
270 | atomic_read(&t->user->sigpending) <= |
271 | t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) |
272 | q = kmem_cache_alloc(sigqueue_cachep, flags); |
273 | if (unlikely(q == NULL)) { |
274 | atomic_dec(&t->user->sigpending); |
275 | } else { |
276 | INIT_LIST_HEAD(&q->list); |
277 | q->flags = 0; |
278 | q->lock = NULL; |
279 | q->user = get_uid(t->user); |
280 | } |
281 | return(q); |
282 | } |
283 | |
284 | static inline void __sigqueue_free(struct sigqueue *q) |
285 | { |
286 | if (q->flags & SIGQUEUE_PREALLOC) |
287 | return; |
288 | atomic_dec(&q->user->sigpending); |
289 | free_uid(q->user); |
290 | kmem_cache_free(sigqueue_cachep, q); |
291 | } |
292 | |
293 | static void flush_sigqueue(struct sigpending *queue) |
294 | { |
295 | struct sigqueue *q; |
296 | |
297 | sigemptyset(&queue->signal); |
298 | while (!list_empty(&queue->list)) { |
299 | q = list_entry(queue->list.next, struct sigqueue , list); |
300 | list_del_init(&q->list); |
301 | __sigqueue_free(q); |
302 | } |
303 | } |
304 | |
305 | /* |
306 | * Flush all pending signals for a task. |
307 | */ |
308 | |
309 | void |
310 | flush_signals(struct task_struct *t) |
311 | { |
312 | unsigned long flags; |
313 | |
314 | spin_lock_irqsave(&t->sighand->siglock, flags); |
315 | clear_tsk_thread_flag(t,TIF_SIGPENDING); |
316 | flush_sigqueue(&t->pending); |
317 | flush_sigqueue(&t->signal->shared_pending); |
318 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
319 | } |
320 | |
321 | /* |
322 | * This function expects the tasklist_lock write-locked. |
323 | */ |
324 | void __exit_sighand(struct task_struct *tsk) |
325 | { |
326 | struct sighand_struct * sighand = tsk->sighand; |
327 | |
328 | /* Ok, we're done with the signal handlers */ |
329 | tsk->sighand = NULL; |
330 | if (atomic_dec_and_test(&sighand->count)) |
331 | kmem_cache_free(sighand_cachep, sighand); |
332 | } |
333 | |
334 | void exit_sighand(struct task_struct *tsk) |
335 | { |
336 | write_lock_irq(&tasklist_lock); |
337 | __exit_sighand(tsk); |
338 | write_unlock_irq(&tasklist_lock); |
339 | } |
340 | |
341 | /* |
342 | * This function expects the tasklist_lock write-locked. |
343 | */ |
344 | void __exit_signal(struct task_struct *tsk) |
345 | { |
346 | struct signal_struct * sig = tsk->signal; |
347 | struct sighand_struct * sighand = tsk->sighand; |
348 | |
349 | if (!sig) |
350 | BUG(); |
351 | if (!atomic_read(&sig->count)) |
352 | BUG(); |
353 | spin_lock(&sighand->siglock); |
354 | posix_cpu_timers_exit(tsk); |
355 | if (atomic_dec_and_test(&sig->count)) { |
356 | posix_cpu_timers_exit_group(tsk); |
357 | if (tsk == sig->curr_target) |
358 | sig->curr_target = next_thread(tsk); |
359 | tsk->signal = NULL; |
360 | spin_unlock(&sighand->siglock); |
361 | flush_sigqueue(&sig->shared_pending); |
362 | } else { |
363 | /* |
364 | * If there is any task waiting for the group exit |
365 | * then notify it: |
366 | */ |
367 | if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) { |
368 | wake_up_process(sig->group_exit_task); |
369 | sig->group_exit_task = NULL; |
370 | } |
371 | if (tsk == sig->curr_target) |
372 | sig->curr_target = next_thread(tsk); |
373 | tsk->signal = NULL; |
374 | /* |
375 | * Accumulate here the counters for all threads but the |
376 | * group leader as they die, so they can be added into |
377 | * the process-wide totals when those are taken. |
378 | * The group leader stays around as a zombie as long |
379 | * as there are other threads. When it gets reaped, |
380 | * the exit.c code will add its counts into these totals. |
381 | * We won't ever get here for the group leader, since it |
382 | * will have been the last reference on the signal_struct. |
383 | */ |
384 | sig->utime = cputime_add(sig->utime, tsk->utime); |
385 | sig->stime = cputime_add(sig->stime, tsk->stime); |
386 | sig->min_flt += tsk->min_flt; |
387 | sig->maj_flt += tsk->maj_flt; |
388 | sig->nvcsw += tsk->nvcsw; |
389 | sig->nivcsw += tsk->nivcsw; |
390 | sig->sched_time += tsk->sched_time; |
391 | spin_unlock(&sighand->siglock); |
392 | sig = NULL; /* Marker for below. */ |
393 | } |
394 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); |
395 | flush_sigqueue(&tsk->pending); |
396 | if (sig) { |
397 | /* |
398 | * We are cleaning up the signal_struct here. We delayed |
399 | * calling exit_itimers until after flush_sigqueue, just in |
400 | * case our thread-local pending queue contained a queued |
401 | * timer signal that would have been cleared in |
402 | * exit_itimers. When that called sigqueue_free, it would |
403 | * attempt to re-take the tasklist_lock and deadlock. This |
404 | * can never happen if we ensure that all queues the |
405 | * timer's signal might be queued on have been flushed |
406 | * first. The shared_pending queue, and our own pending |
407 | * queue are the only queues the timer could be on, since |
408 | * there are no other threads left in the group and timer |
409 | * signals are constrained to threads inside the group. |
410 | */ |
411 | exit_itimers(sig); |
412 | exit_thread_group_keys(sig); |
413 | kmem_cache_free(signal_cachep, sig); |
414 | } |
415 | } |
416 | |
417 | void exit_signal(struct task_struct *tsk) |
418 | { |
419 | write_lock_irq(&tasklist_lock); |
420 | __exit_signal(tsk); |
421 | write_unlock_irq(&tasklist_lock); |
422 | } |
423 | |
424 | /* |
425 | * Flush all handlers for a task. |
426 | */ |
427 | |
428 | void |
429 | flush_signal_handlers(struct task_struct *t, int force_default) |
430 | { |
431 | int i; |
432 | struct k_sigaction *ka = &t->sighand->action[0]; |
433 | for (i = _NSIG ; i != 0 ; i--) { |
434 | if (force_default || ka->sa.sa_handler != SIG_IGN) |
435 | ka->sa.sa_handler = SIG_DFL; |
436 | ka->sa.sa_flags = 0; |
437 | sigemptyset(&ka->sa.sa_mask); |
438 | ka++; |
439 | } |
440 | } |
441 | |
442 | |
443 | /* Notify the system that a driver wants to block all signals for this |
444 | * process, and wants to be notified if any signals at all were to be |
445 | * sent/acted upon. If the notifier routine returns non-zero, then the |
446 | * signal will be acted upon after all. If the notifier routine returns 0, |
447 | * then then signal will be blocked. Only one block per process is |
448 | * allowed. priv is a pointer to private data that the notifier routine |
449 | * can use to determine if the signal should be blocked or not. */ |
450 | |
451 | void |
452 | block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) |
453 | { |
454 | unsigned long flags; |
455 | |
456 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
457 | current->notifier_mask = mask; |
458 | current->notifier_data = priv; |
459 | current->notifier = notifier; |
460 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
461 | } |
462 | |
463 | /* Notify the system that blocking has ended. */ |
464 | |
465 | void |
466 | unblock_all_signals(void) |
467 | { |
468 | unsigned long flags; |
469 | |
470 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
471 | current->notifier = NULL; |
472 | current->notifier_data = NULL; |
473 | recalc_sigpending(); |
474 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
475 | } |
476 | |
477 | static inline int collect_signal(int sig, struct sigpending *list, siginfo_t *info) |
478 | { |
479 | struct sigqueue *q, *first = NULL; |
480 | int still_pending = 0; |
481 | |
482 | if (unlikely(!sigismember(&list->signal, sig))) |
483 | return 0; |
484 | |
485 | /* |
486 | * Collect the siginfo appropriate to this signal. Check if |
487 | * there is another siginfo for the same signal. |
488 | */ |
489 | list_for_each_entry(q, &list->list, list) { |
490 | if (q->info.si_signo == sig) { |
491 | if (first) { |
492 | still_pending = 1; |
493 | break; |
494 | } |
495 | first = q; |
496 | } |
497 | } |
498 | if (first) { |
499 | list_del_init(&first->list); |
500 | copy_siginfo(info, &first->info); |
501 | __sigqueue_free(first); |
502 | if (!still_pending) |
503 | sigdelset(&list->signal, sig); |
504 | } else { |
505 | |
506 | /* Ok, it wasn't in the queue. This must be |
507 | a fast-pathed signal or we must have been |
508 | out of queue space. So zero out the info. |
509 | */ |
510 | sigdelset(&list->signal, sig); |
511 | info->si_signo = sig; |
512 | info->si_errno = 0; |
513 | info->si_code = 0; |
514 | info->si_pid = 0; |
515 | info->si_uid = 0; |
516 | } |
517 | return 1; |
518 | } |
519 | |
520 | static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, |
521 | siginfo_t *info) |
522 | { |
523 | int sig = 0; |
524 | |
525 | /* SIGKILL must have priority, otherwise it is quite easy |
526 | * to create an unkillable process, sending sig < SIGKILL |
527 | * to self */ |
528 | if (unlikely(sigismember(&pending->signal, SIGKILL))) { |
529 | if (!sigismember(mask, SIGKILL)) |
530 | sig = SIGKILL; |
531 | } |
532 | |
533 | if (likely(!sig)) |
534 | sig = next_signal(pending, mask); |
535 | if (sig) { |
536 | if (current->notifier) { |
537 | if (sigismember(current->notifier_mask, sig)) { |
538 | if (!(current->notifier)(current->notifier_data)) { |
539 | clear_thread_flag(TIF_SIGPENDING); |
540 | return 0; |
541 | } |
542 | } |
543 | } |
544 | |
545 | if (!collect_signal(sig, pending, info)) |
546 | sig = 0; |
547 | |
548 | } |
549 | recalc_sigpending(); |
550 | |
551 | return sig; |
552 | } |
553 | |
554 | /* |
555 | * Dequeue a signal and return the element to the caller, which is |
556 | * expected to free it. |
557 | * |
558 | * All callers have to hold the siglock. |
559 | */ |
560 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) |
561 | { |
562 | int signr = __dequeue_signal(&tsk->pending, mask, info); |
563 | if (!signr) |
564 | signr = __dequeue_signal(&tsk->signal->shared_pending, |
565 | mask, info); |
566 | if (signr && unlikely(sig_kernel_stop(signr))) { |
567 | /* |
568 | * Set a marker that we have dequeued a stop signal. Our |
569 | * caller might release the siglock and then the pending |
570 | * stop signal it is about to process is no longer in the |
571 | * pending bitmasks, but must still be cleared by a SIGCONT |
572 | * (and overruled by a SIGKILL). So those cases clear this |
573 | * shared flag after we've set it. Note that this flag may |
574 | * remain set after the signal we return is ignored or |
575 | * handled. That doesn't matter because its only purpose |
576 | * is to alert stop-signal processing code when another |
577 | * processor has come along and cleared the flag. |
578 | */ |
579 | tsk->signal->flags |= SIGNAL_STOP_DEQUEUED; |
580 | } |
581 | if ( signr && |
582 | ((info->si_code & __SI_MASK) == __SI_TIMER) && |
583 | info->si_sys_private){ |
584 | /* |
585 | * Release the siglock to ensure proper locking order |
586 | * of timer locks outside of siglocks. Note, we leave |
587 | * irqs disabled here, since the posix-timers code is |
588 | * about to disable them again anyway. |
589 | */ |
590 | spin_unlock(&tsk->sighand->siglock); |
591 | do_schedule_next_timer(info); |
592 | spin_lock(&tsk->sighand->siglock); |
593 | } |
594 | return signr; |
595 | } |
596 | |
597 | /* |
598 | * Tell a process that it has a new active signal.. |
599 | * |
600 | * NOTE! we rely on the previous spin_lock to |
601 | * lock interrupts for us! We can only be called with |
602 | * "siglock" held, and the local interrupt must |
603 | * have been disabled when that got acquired! |
604 | * |
605 | * No need to set need_resched since signal event passing |
606 | * goes through ->blocked |
607 | */ |
608 | void signal_wake_up(struct task_struct *t, int resume) |
609 | { |
610 | unsigned int mask; |
611 | |
612 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
613 | |
614 | /* |
615 | * For SIGKILL, we want to wake it up in the stopped/traced case. |
616 | * We don't check t->state here because there is a race with it |
617 | * executing another processor and just now entering stopped state. |
618 | * By using wake_up_state, we ensure the process will wake up and |
619 | * handle its death signal. |
620 | */ |
621 | mask = TASK_INTERRUPTIBLE; |
622 | if (resume) |
623 | mask |= TASK_STOPPED | TASK_TRACED; |
624 | if (!wake_up_state(t, mask)) |
625 | kick_process(t); |
626 | } |
627 | |
628 | /* |
629 | * Remove signals in mask from the pending set and queue. |
630 | * Returns 1 if any signals were found. |
631 | * |
632 | * All callers must be holding the siglock. |
633 | */ |
634 | static int rm_from_queue(unsigned long mask, struct sigpending *s) |
635 | { |
636 | struct sigqueue *q, *n; |
637 | |
638 | if (!sigtestsetmask(&s->signal, mask)) |
639 | return 0; |
640 | |
641 | sigdelsetmask(&s->signal, mask); |
642 | list_for_each_entry_safe(q, n, &s->list, list) { |
643 | if (q->info.si_signo < SIGRTMIN && |
644 | (mask & sigmask(q->info.si_signo))) { |
645 | list_del_init(&q->list); |
646 | __sigqueue_free(q); |
647 | } |
648 | } |
649 | return 1; |
650 | } |
651 | |
652 | /* |
653 | * Bad permissions for sending the signal |
654 | */ |
655 | static int check_kill_permission(int sig, struct siginfo *info, |
656 | struct task_struct *t) |
657 | { |
658 | int error = -EINVAL; |
659 | if (!valid_signal(sig)) |
660 | return error; |
661 | error = -EPERM; |
662 | if ((!info || ((unsigned long)info != 1 && |
663 | (unsigned long)info != 2 && SI_FROMUSER(info))) |
664 | && ((sig != SIGCONT) || |
665 | (current->signal->session != t->signal->session)) |
666 | && (current->euid ^ t->suid) && (current->euid ^ t->uid) |
667 | && (current->uid ^ t->suid) && (current->uid ^ t->uid) |
668 | && !capable(CAP_KILL)) |
669 | return error; |
670 | return security_task_kill(t, info, sig); |
671 | } |
672 | |
673 | /* forward decl */ |
674 | static void do_notify_parent_cldstop(struct task_struct *tsk, |
675 | struct task_struct *parent, |
676 | int why); |
677 | |
678 | /* |
679 | * Handle magic process-wide effects of stop/continue signals. |
680 | * Unlike the signal actions, these happen immediately at signal-generation |
681 | * time regardless of blocking, ignoring, or handling. This does the |
682 | * actual continuing for SIGCONT, but not the actual stopping for stop |
683 | * signals. The process stop is done as a signal action for SIG_DFL. |
684 | */ |
685 | static void handle_stop_signal(int sig, struct task_struct *p) |
686 | { |
687 | struct task_struct *t; |
688 | |
689 | if (p->signal->flags & SIGNAL_GROUP_EXIT) |
690 | /* |
691 | * The process is in the middle of dying already. |
692 | */ |
693 | return; |
694 | |
695 | if (sig_kernel_stop(sig)) { |
696 | /* |
697 | * This is a stop signal. Remove SIGCONT from all queues. |
698 | */ |
699 | rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending); |
700 | t = p; |
701 | do { |
702 | rm_from_queue(sigmask(SIGCONT), &t->pending); |
703 | t = next_thread(t); |
704 | } while (t != p); |
705 | } else if (sig == SIGCONT) { |
706 | /* |
707 | * Remove all stop signals from all queues, |
708 | * and wake all threads. |
709 | */ |
710 | if (unlikely(p->signal->group_stop_count > 0)) { |
711 | /* |
712 | * There was a group stop in progress. We'll |
713 | * pretend it finished before we got here. We are |
714 | * obliged to report it to the parent: if the |
715 | * SIGSTOP happened "after" this SIGCONT, then it |
716 | * would have cleared this pending SIGCONT. If it |
717 | * happened "before" this SIGCONT, then the parent |
718 | * got the SIGCHLD about the stop finishing before |
719 | * the continue happened. We do the notification |
720 | * now, and it's as if the stop had finished and |
721 | * the SIGCHLD was pending on entry to this kill. |
722 | */ |
723 | p->signal->group_stop_count = 0; |
724 | p->signal->flags = SIGNAL_STOP_CONTINUED; |
725 | spin_unlock(&p->sighand->siglock); |
726 | if (p->ptrace & PT_PTRACED) |
727 | do_notify_parent_cldstop(p, p->parent, |
728 | CLD_STOPPED); |
729 | else |
730 | do_notify_parent_cldstop( |
731 | p->group_leader, |
732 | p->group_leader->real_parent, |
733 | CLD_STOPPED); |
734 | spin_lock(&p->sighand->siglock); |
735 | } |
736 | rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); |
737 | t = p; |
738 | do { |
739 | unsigned int state; |
740 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); |
741 | |
742 | /* |
743 | * If there is a handler for SIGCONT, we must make |
744 | * sure that no thread returns to user mode before |
745 | * we post the signal, in case it was the only |
746 | * thread eligible to run the signal handler--then |
747 | * it must not do anything between resuming and |
748 | * running the handler. With the TIF_SIGPENDING |
749 | * flag set, the thread will pause and acquire the |
750 | * siglock that we hold now and until we've queued |
751 | * the pending signal. |
752 | * |
753 | * Wake up the stopped thread _after_ setting |
754 | * TIF_SIGPENDING |
755 | */ |
756 | state = TASK_STOPPED; |
757 | if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) { |
758 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
759 | state |= TASK_INTERRUPTIBLE; |
760 | } |
761 | wake_up_state(t, state); |
762 | |
763 | t = next_thread(t); |
764 | } while (t != p); |
765 | |
766 | if (p->signal->flags & SIGNAL_STOP_STOPPED) { |
767 | /* |
768 | * We were in fact stopped, and are now continued. |
769 | * Notify the parent with CLD_CONTINUED. |
770 | */ |
771 | p->signal->flags = SIGNAL_STOP_CONTINUED; |
772 | p->signal->group_exit_code = 0; |
773 | spin_unlock(&p->sighand->siglock); |
774 | if (p->ptrace & PT_PTRACED) |
775 | do_notify_parent_cldstop(p, p->parent, |
776 | CLD_CONTINUED); |
777 | else |
778 | do_notify_parent_cldstop( |
779 | p->group_leader, |
780 | p->group_leader->real_parent, |
781 | CLD_CONTINUED); |
782 | spin_lock(&p->sighand->siglock); |
783 | } else { |
784 | /* |
785 | * We are not stopped, but there could be a stop |
786 | * signal in the middle of being processed after |
787 | * being removed from the queue. Clear that too. |
788 | */ |
789 | p->signal->flags = 0; |
790 | } |
791 | } else if (sig == SIGKILL) { |
792 | /* |
793 | * Make sure that any pending stop signal already dequeued |
794 | * is undone by the wakeup for SIGKILL. |
795 | */ |
796 | p->signal->flags = 0; |
797 | } |
798 | } |
799 | |
800 | static int send_signal(int sig, struct siginfo *info, struct task_struct *t, |
801 | struct sigpending *signals) |
802 | { |
803 | struct sigqueue * q = NULL; |
804 | int ret = 0; |
805 | |
806 | /* |
807 | * fast-pathed signals for kernel-internal things like SIGSTOP |
808 | * or SIGKILL. |
809 | */ |
810 | if ((unsigned long)info == 2) |
811 | goto out_set; |
812 | |
813 | /* Real-time signals must be queued if sent by sigqueue, or |
814 | some other real-time mechanism. It is implementation |
815 | defined whether kill() does so. We attempt to do so, on |
816 | the principle of least surprise, but since kill is not |
817 | allowed to fail with EAGAIN when low on memory we just |
818 | make sure at least one signal gets delivered and don't |
819 | pass on the info struct. */ |
820 | |
821 | q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN && |
822 | ((unsigned long) info < 2 || |
823 | info->si_code >= 0))); |
824 | if (q) { |
825 | list_add_tail(&q->list, &signals->list); |
826 | switch ((unsigned long) info) { |
827 | case 0: |
828 | q->info.si_signo = sig; |
829 | q->info.si_errno = 0; |
830 | q->info.si_code = SI_USER; |
831 | q->info.si_pid = current->pid; |
832 | q->info.si_uid = current->uid; |
833 | break; |
834 | case 1: |
835 | q->info.si_signo = sig; |
836 | q->info.si_errno = 0; |
837 | q->info.si_code = SI_KERNEL; |
838 | q->info.si_pid = 0; |
839 | q->info.si_uid = 0; |
840 | break; |
841 | default: |
842 | copy_siginfo(&q->info, info); |
843 | break; |
844 | } |
845 | } else { |
846 | if (sig >= SIGRTMIN && info && (unsigned long)info != 1 |
847 | && info->si_code != SI_USER) |
848 | /* |
849 | * Queue overflow, abort. We may abort if the signal was rt |
850 | * and sent by user using something other than kill(). |
851 | */ |
852 | return -EAGAIN; |
853 | if (((unsigned long)info > 1) && (info->si_code == SI_TIMER)) |
854 | /* |
855 | * Set up a return to indicate that we dropped |
856 | * the signal. |
857 | */ |
858 | ret = info->si_sys_private; |
859 | } |
860 | |
861 | out_set: |
862 | sigaddset(&signals->signal, sig); |
863 | return ret; |
864 | } |
865 | |
866 | #define LEGACY_QUEUE(sigptr, sig) \ |
867 | (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig))) |
868 | |
869 | |
870 | static int |
871 | specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) |
872 | { |
873 | int ret = 0; |
874 | |
875 | if (!irqs_disabled()) |
876 | BUG(); |
877 | assert_spin_locked(&t->sighand->siglock); |
878 | |
879 | if (((unsigned long)info > 2) && (info->si_code == SI_TIMER)) |
880 | /* |
881 | * Set up a return to indicate that we dropped the signal. |
882 | */ |
883 | ret = info->si_sys_private; |
884 | |
885 | /* Short-circuit ignored signals. */ |
886 | if (sig_ignored(t, sig)) |
887 | goto out; |
888 | |
889 | /* Support queueing exactly one non-rt signal, so that we |
890 | can get more detailed information about the cause of |
891 | the signal. */ |
892 | if (LEGACY_QUEUE(&t->pending, sig)) |
893 | goto out; |
894 | |
895 | ret = send_signal(sig, info, t, &t->pending); |
896 | if (!ret && !sigismember(&t->blocked, sig)) |
897 | signal_wake_up(t, sig == SIGKILL); |
898 | out: |
899 | return ret; |
900 | } |
901 | |
902 | /* |
903 | * Force a signal that the process can't ignore: if necessary |
904 | * we unblock the signal and change any SIG_IGN to SIG_DFL. |
905 | */ |
906 | |
907 | int |
908 | force_sig_info(int sig, struct siginfo *info, struct task_struct *t) |
909 | { |
910 | unsigned long int flags; |
911 | int ret; |
912 | |
913 | spin_lock_irqsave(&t->sighand->siglock, flags); |
914 | if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) { |
915 | t->sighand->action[sig-1].sa.sa_handler = SIG_DFL; |
916 | sigdelset(&t->blocked, sig); |
917 | recalc_sigpending_tsk(t); |
918 | } |
919 | ret = specific_send_sig_info(sig, info, t); |
920 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
921 | |
922 | return ret; |
923 | } |
924 | |
925 | void |
926 | force_sig_specific(int sig, struct task_struct *t) |
927 | { |
928 | unsigned long int flags; |
929 | |
930 | spin_lock_irqsave(&t->sighand->siglock, flags); |
931 | if (t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) |
932 | t->sighand->action[sig-1].sa.sa_handler = SIG_DFL; |
933 | sigdelset(&t->blocked, sig); |
934 | recalc_sigpending_tsk(t); |
935 | specific_send_sig_info(sig, (void *)2, t); |
936 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
937 | } |
938 | |
939 | /* |
940 | * Test if P wants to take SIG. After we've checked all threads with this, |
941 | * it's equivalent to finding no threads not blocking SIG. Any threads not |
942 | * blocking SIG were ruled out because they are not running and already |
943 | * have pending signals. Such threads will dequeue from the shared queue |
944 | * as soon as they're available, so putting the signal on the shared queue |
945 | * will be equivalent to sending it to one such thread. |
946 | */ |
947 | #define wants_signal(sig, p, mask) \ |
948 | (!sigismember(&(p)->blocked, sig) \ |
949 | && !((p)->state & mask) \ |
950 | && !((p)->flags & PF_EXITING) \ |
951 | && (task_curr(p) || !signal_pending(p))) |
952 | |
953 | |
954 | static void |
955 | __group_complete_signal(int sig, struct task_struct *p) |
956 | { |
957 | unsigned int mask; |
958 | struct task_struct *t; |
959 | |
960 | /* |
961 | * Don't bother traced and stopped tasks (but |
962 | * SIGKILL will punch through that). |
963 | */ |
964 | mask = TASK_STOPPED | TASK_TRACED; |
965 | if (sig == SIGKILL) |
966 | mask = 0; |
967 | |
968 | /* |
969 | * Now find a thread we can wake up to take the signal off the queue. |
970 | * |
971 | * If the main thread wants the signal, it gets first crack. |
972 | * Probably the least surprising to the average bear. |
973 | */ |
974 | if (wants_signal(sig, p, mask)) |
975 | t = p; |
976 | else if (thread_group_empty(p)) |
977 | /* |
978 | * There is just one thread and it does not need to be woken. |
979 | * It will dequeue unblocked signals before it runs again. |
980 | */ |
981 | return; |
982 | else { |
983 | /* |
984 | * Otherwise try to find a suitable thread. |
985 | */ |
986 | t = p->signal->curr_target; |
987 | if (t == NULL) |
988 | /* restart balancing at this thread */ |
989 | t = p->signal->curr_target = p; |
990 | BUG_ON(t->tgid != p->tgid); |
991 | |
992 | while (!wants_signal(sig, t, mask)) { |
993 | t = next_thread(t); |
994 | if (t == p->signal->curr_target) |
995 | /* |
996 | * No thread needs to be woken. |
997 | * Any eligible threads will see |
998 | * the signal in the queue soon. |
999 | */ |
1000 | return; |
1001 | } |
1002 | p->signal->curr_target = t; |
1003 | } |
1004 | |
1005 | /* |
1006 | * Found a killable thread. If the signal will be fatal, |
1007 | * then start taking the whole group down immediately. |
1008 | */ |
1009 | if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) && |
1010 | !sigismember(&t->real_blocked, sig) && |
1011 | (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) { |
1012 | /* |
1013 | * This signal will be fatal to the whole group. |
1014 | */ |
1015 | if (!sig_kernel_coredump(sig)) { |
1016 | /* |
1017 | * Start a group exit and wake everybody up. |
1018 | * This way we don't have other threads |
1019 | * running and doing things after a slower |
1020 | * thread has the fatal signal pending. |
1021 | */ |
1022 | p->signal->flags = SIGNAL_GROUP_EXIT; |
1023 | p->signal->group_exit_code = sig; |
1024 | p->signal->group_stop_count = 0; |
1025 | t = p; |
1026 | do { |
1027 | sigaddset(&t->pending.signal, SIGKILL); |
1028 | signal_wake_up(t, 1); |
1029 | t = next_thread(t); |
1030 | } while (t != p); |
1031 | return; |
1032 | } |
1033 | |
1034 | /* |
1035 | * There will be a core dump. We make all threads other |
1036 | * than the chosen one go into a group stop so that nothing |
1037 | * happens until it gets scheduled, takes the signal off |
1038 | * the shared queue, and does the core dump. This is a |
1039 | * little more complicated than strictly necessary, but it |
1040 | * keeps the signal state that winds up in the core dump |
1041 | * unchanged from the death state, e.g. which thread had |
1042 | * the core-dump signal unblocked. |
1043 | */ |
1044 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); |
1045 | rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending); |
1046 | p->signal->group_stop_count = 0; |
1047 | p->signal->group_exit_task = t; |
1048 | t = p; |
1049 | do { |
1050 | p->signal->group_stop_count++; |
1051 | signal_wake_up(t, 0); |
1052 | t = next_thread(t); |
1053 | } while (t != p); |
1054 | wake_up_process(p->signal->group_exit_task); |
1055 | return; |
1056 | } |
1057 | |
1058 | /* |
1059 | * The signal is already in the shared-pending queue. |
1060 | * Tell the chosen thread to wake up and dequeue it. |
1061 | */ |
1062 | signal_wake_up(t, sig == SIGKILL); |
1063 | return; |
1064 | } |
1065 | |
1066 | int |
1067 | __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1068 | { |
1069 | int ret = 0; |
1070 | |
1071 | assert_spin_locked(&p->sighand->siglock); |
1072 | handle_stop_signal(sig, p); |
1073 | |
1074 | if (((unsigned long)info > 2) && (info->si_code == SI_TIMER)) |
1075 | /* |
1076 | * Set up a return to indicate that we dropped the signal. |
1077 | */ |
1078 | ret = info->si_sys_private; |
1079 | |
1080 | /* Short-circuit ignored signals. */ |
1081 | if (sig_ignored(p, sig)) |
1082 | return ret; |
1083 | |
1084 | if (LEGACY_QUEUE(&p->signal->shared_pending, sig)) |
1085 | /* This is a non-RT signal and we already have one queued. */ |
1086 | return ret; |
1087 | |
1088 | /* |
1089 | * Put this signal on the shared-pending queue, or fail with EAGAIN. |
1090 | * We always use the shared queue for process-wide signals, |
1091 | * to avoid several races. |
1092 | */ |
1093 | ret = send_signal(sig, info, p, &p->signal->shared_pending); |
1094 | if (unlikely(ret)) |
1095 | return ret; |
1096 | |
1097 | __group_complete_signal(sig, p); |
1098 | return 0; |
1099 | } |
1100 | |
1101 | /* |
1102 | * Nuke all other threads in the group. |
1103 | */ |
1104 | void zap_other_threads(struct task_struct *p) |
1105 | { |
1106 | struct task_struct *t; |
1107 | |
1108 | p->signal->flags = SIGNAL_GROUP_EXIT; |
1109 | p->signal->group_stop_count = 0; |
1110 | |
1111 | if (thread_group_empty(p)) |
1112 | return; |
1113 | |
1114 | for (t = next_thread(p); t != p; t = next_thread(t)) { |
1115 | /* |
1116 | * Don't bother with already dead threads |
1117 | */ |
1118 | if (t->exit_state) |
1119 | continue; |
1120 | |
1121 | /* |
1122 | * We don't want to notify the parent, since we are |
1123 | * killed as part of a thread group due to another |
1124 | * thread doing an execve() or similar. So set the |
1125 | * exit signal to -1 to allow immediate reaping of |
1126 | * the process. But don't detach the thread group |
1127 | * leader. |
1128 | */ |
1129 | if (t != p->group_leader) |
1130 | t->exit_signal = -1; |
1131 | |
1132 | sigaddset(&t->pending.signal, SIGKILL); |
1133 | rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending); |
1134 | signal_wake_up(t, 1); |
1135 | } |
1136 | } |
1137 | |
1138 | /* |
1139 | * Must be called with the tasklist_lock held for reading! |
1140 | */ |
1141 | int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1142 | { |
1143 | unsigned long flags; |
1144 | int ret; |
1145 | |
1146 | ret = check_kill_permission(sig, info, p); |
1147 | if (!ret && sig && p->sighand) { |
1148 | spin_lock_irqsave(&p->sighand->siglock, flags); |
1149 | ret = __group_send_sig_info(sig, info, p); |
1150 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
1151 | } |
1152 | |
1153 | return ret; |
1154 | } |
1155 | |
1156 | /* |
1157 | * kill_pg_info() sends a signal to a process group: this is what the tty |
1158 | * control characters do (^C, ^Z etc) |
1159 | */ |
1160 | |
1161 | int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp) |
1162 | { |
1163 | struct task_struct *p = NULL; |
1164 | int retval, success; |
1165 | |
1166 | if (pgrp <= 0) |
1167 | return -EINVAL; |
1168 | |
1169 | success = 0; |
1170 | retval = -ESRCH; |
1171 | do_each_task_pid(pgrp, PIDTYPE_PGID, p) { |
1172 | int err = group_send_sig_info(sig, info, p); |
1173 | success |= !err; |
1174 | retval = err; |
1175 | } while_each_task_pid(pgrp, PIDTYPE_PGID, p); |
1176 | return success ? 0 : retval; |
1177 | } |
1178 | |
1179 | int |
1180 | kill_pg_info(int sig, struct siginfo *info, pid_t pgrp) |
1181 | { |
1182 | int retval; |
1183 | |
1184 | read_lock(&tasklist_lock); |
1185 | retval = __kill_pg_info(sig, info, pgrp); |
1186 | read_unlock(&tasklist_lock); |
1187 | |
1188 | return retval; |
1189 | } |
1190 | |
1191 | int |
1192 | kill_proc_info(int sig, struct siginfo *info, pid_t pid) |
1193 | { |
1194 | int error; |
1195 | struct task_struct *p; |
1196 | |
1197 | read_lock(&tasklist_lock); |
1198 | p = find_task_by_pid(pid); |
1199 | error = -ESRCH; |
1200 | if (p) |
1201 | error = group_send_sig_info(sig, info, p); |
1202 | read_unlock(&tasklist_lock); |
1203 | return error; |
1204 | } |
1205 | |
1206 | |
1207 | /* |
1208 | * kill_something_info() interprets pid in interesting ways just like kill(2). |
1209 | * |
1210 | * POSIX specifies that kill(-1,sig) is unspecified, but what we have |
1211 | * is probably wrong. Should make it like BSD or SYSV. |
1212 | */ |
1213 | |
1214 | static int kill_something_info(int sig, struct siginfo *info, int pid) |
1215 | { |
1216 | if (!pid) { |
1217 | return kill_pg_info(sig, info, process_group(current)); |
1218 | } else if (pid == -1) { |
1219 | int retval = 0, count = 0; |
1220 | struct task_struct * p; |
1221 | |
1222 | read_lock(&tasklist_lock); |
1223 | for_each_process(p) { |
1224 | if (p->pid > 1 && p->tgid != current->tgid) { |
1225 | int err = group_send_sig_info(sig, info, p); |
1226 | ++count; |
1227 | if (err != -EPERM) |
1228 | retval = err; |
1229 | } |
1230 | } |
1231 | read_unlock(&tasklist_lock); |
1232 | return count ? retval : -ESRCH; |
1233 | } else if (pid < 0) { |
1234 | return kill_pg_info(sig, info, -pid); |
1235 | } else { |
1236 | return kill_proc_info(sig, info, pid); |
1237 | } |
1238 | } |
1239 | |
1240 | /* |
1241 | * These are for backward compatibility with the rest of the kernel source. |
1242 | */ |
1243 | |
1244 | /* |
1245 | * These two are the most common entry points. They send a signal |
1246 | * just to the specific thread. |
1247 | */ |
1248 | int |
1249 | send_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1250 | { |
1251 | int ret; |
1252 | unsigned long flags; |
1253 | |
1254 | /* |
1255 | * Make sure legacy kernel users don't send in bad values |
1256 | * (normal paths check this in check_kill_permission). |
1257 | */ |
1258 | if (!valid_signal(sig)) |
1259 | return -EINVAL; |
1260 | |
1261 | /* |
1262 | * We need the tasklist lock even for the specific |
1263 | * thread case (when we don't need to follow the group |
1264 | * lists) in order to avoid races with "p->sighand" |
1265 | * going away or changing from under us. |
1266 | */ |
1267 | read_lock(&tasklist_lock); |
1268 | spin_lock_irqsave(&p->sighand->siglock, flags); |
1269 | ret = specific_send_sig_info(sig, info, p); |
1270 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
1271 | read_unlock(&tasklist_lock); |
1272 | return ret; |
1273 | } |
1274 | |
1275 | int |
1276 | send_sig(int sig, struct task_struct *p, int priv) |
1277 | { |
1278 | return send_sig_info(sig, (void*)(long)(priv != 0), p); |
1279 | } |
1280 | |
1281 | /* |
1282 | * This is the entry point for "process-wide" signals. |
1283 | * They will go to an appropriate thread in the thread group. |
1284 | */ |
1285 | int |
1286 | send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p) |
1287 | { |
1288 | int ret; |
1289 | read_lock(&tasklist_lock); |
1290 | ret = group_send_sig_info(sig, info, p); |
1291 | read_unlock(&tasklist_lock); |
1292 | return ret; |
1293 | } |
1294 | |
1295 | void |
1296 | force_sig(int sig, struct task_struct *p) |
1297 | { |
1298 | force_sig_info(sig, (void*)1L, p); |
1299 | } |
1300 | |
1301 | /* |
1302 | * When things go south during signal handling, we |
1303 | * will force a SIGSEGV. And if the signal that caused |
1304 | * the problem was already a SIGSEGV, we'll want to |
1305 | * make sure we don't even try to deliver the signal.. |
1306 | */ |
1307 | int |
1308 | force_sigsegv(int sig, struct task_struct *p) |
1309 | { |
1310 | if (sig == SIGSEGV) { |
1311 | unsigned long flags; |
1312 | spin_lock_irqsave(&p->sighand->siglock, flags); |
1313 | p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; |
1314 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
1315 | } |
1316 | force_sig(SIGSEGV, p); |
1317 | return 0; |
1318 | } |
1319 | |
1320 | int |
1321 | kill_pg(pid_t pgrp, int sig, int priv) |
1322 | { |
1323 | return kill_pg_info(sig, (void *)(long)(priv != 0), pgrp); |
1324 | } |
1325 | |
1326 | int |
1327 | kill_proc(pid_t pid, int sig, int priv) |
1328 | { |
1329 | return kill_proc_info(sig, (void *)(long)(priv != 0), pid); |
1330 | } |
1331 | |
1332 | /* |
1333 | * These functions support sending signals using preallocated sigqueue |
1334 | * structures. This is needed "because realtime applications cannot |
1335 | * afford to lose notifications of asynchronous events, like timer |
1336 | * expirations or I/O completions". In the case of Posix Timers |
1337 | * we allocate the sigqueue structure from the timer_create. If this |
1338 | * allocation fails we are able to report the failure to the application |
1339 | * with an EAGAIN error. |
1340 | */ |
1341 | |
1342 | struct sigqueue *sigqueue_alloc(void) |
1343 | { |
1344 | struct sigqueue *q; |
1345 | |
1346 | if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0))) |
1347 | q->flags |= SIGQUEUE_PREALLOC; |
1348 | return(q); |
1349 | } |
1350 | |
1351 | void sigqueue_free(struct sigqueue *q) |
1352 | { |
1353 | unsigned long flags; |
1354 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1355 | /* |
1356 | * If the signal is still pending remove it from the |
1357 | * pending queue. |
1358 | */ |
1359 | if (unlikely(!list_empty(&q->list))) { |
1360 | read_lock(&tasklist_lock); |
1361 | spin_lock_irqsave(q->lock, flags); |
1362 | if (!list_empty(&q->list)) |
1363 | list_del_init(&q->list); |
1364 | spin_unlock_irqrestore(q->lock, flags); |
1365 | read_unlock(&tasklist_lock); |
1366 | } |
1367 | q->flags &= ~SIGQUEUE_PREALLOC; |
1368 | __sigqueue_free(q); |
1369 | } |
1370 | |
1371 | int |
1372 | send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) |
1373 | { |
1374 | unsigned long flags; |
1375 | int ret = 0; |
1376 | |
1377 | /* |
1378 | * We need the tasklist lock even for the specific |
1379 | * thread case (when we don't need to follow the group |
1380 | * lists) in order to avoid races with "p->sighand" |
1381 | * going away or changing from under us. |
1382 | */ |
1383 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1384 | read_lock(&tasklist_lock); |
1385 | spin_lock_irqsave(&p->sighand->siglock, flags); |
1386 | |
1387 | if (unlikely(!list_empty(&q->list))) { |
1388 | /* |
1389 | * If an SI_TIMER entry is already queue just increment |
1390 | * the overrun count. |
1391 | */ |
1392 | if (q->info.si_code != SI_TIMER) |
1393 | BUG(); |
1394 | q->info.si_overrun++; |
1395 | goto out; |
1396 | } |
1397 | /* Short-circuit ignored signals. */ |
1398 | if (sig_ignored(p, sig)) { |
1399 | ret = 1; |
1400 | goto out; |
1401 | } |
1402 | |
1403 | q->lock = &p->sighand->siglock; |
1404 | list_add_tail(&q->list, &p->pending.list); |
1405 | sigaddset(&p->pending.signal, sig); |
1406 | if (!sigismember(&p->blocked, sig)) |
1407 | signal_wake_up(p, sig == SIGKILL); |
1408 | |
1409 | out: |
1410 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
1411 | read_unlock(&tasklist_lock); |
1412 | return(ret); |
1413 | } |
1414 | |
1415 | int |
1416 | send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p) |
1417 | { |
1418 | unsigned long flags; |
1419 | int ret = 0; |
1420 | |
1421 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1422 | read_lock(&tasklist_lock); |
1423 | spin_lock_irqsave(&p->sighand->siglock, flags); |
1424 | handle_stop_signal(sig, p); |
1425 | |
1426 | /* Short-circuit ignored signals. */ |
1427 | if (sig_ignored(p, sig)) { |
1428 | ret = 1; |
1429 | goto out; |
1430 | } |
1431 | |
1432 | if (unlikely(!list_empty(&q->list))) { |
1433 | /* |
1434 | * If an SI_TIMER entry is already queue just increment |
1435 | * the overrun count. Other uses should not try to |
1436 | * send the signal multiple times. |
1437 | */ |
1438 | if (q->info.si_code != SI_TIMER) |
1439 | BUG(); |
1440 | q->info.si_overrun++; |
1441 | goto out; |
1442 | } |
1443 | |
1444 | /* |
1445 | * Put this signal on the shared-pending queue. |
1446 | * We always use the shared queue for process-wide signals, |
1447 | * to avoid several races. |
1448 | */ |
1449 | q->lock = &p->sighand->siglock; |
1450 | list_add_tail(&q->list, &p->signal->shared_pending.list); |
1451 | sigaddset(&p->signal->shared_pending.signal, sig); |
1452 | |
1453 | __group_complete_signal(sig, p); |
1454 | out: |
1455 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
1456 | read_unlock(&tasklist_lock); |
1457 | return(ret); |
1458 | } |
1459 | |
1460 | /* |
1461 | * Wake up any threads in the parent blocked in wait* syscalls. |
1462 | */ |
1463 | static inline void __wake_up_parent(struct task_struct *p, |
1464 | struct task_struct *parent) |
1465 | { |
1466 | wake_up_interruptible_sync(&parent->signal->wait_chldexit); |
1467 | } |
1468 | |
1469 | /* |
1470 | * Let a parent know about the death of a child. |
1471 | * For a stopped/continued status change, use do_notify_parent_cldstop instead. |
1472 | */ |
1473 | |
1474 | void do_notify_parent(struct task_struct *tsk, int sig) |
1475 | { |
1476 | struct siginfo info; |
1477 | unsigned long flags; |
1478 | struct sighand_struct *psig; |
1479 | |
1480 | BUG_ON(sig == -1); |
1481 | |
1482 | /* do_notify_parent_cldstop should have been called instead. */ |
1483 | BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED)); |
1484 | |
1485 | BUG_ON(!tsk->ptrace && |
1486 | (tsk->group_leader != tsk || !thread_group_empty(tsk))); |
1487 | |
1488 | info.si_signo = sig; |
1489 | info.si_errno = 0; |
1490 | info.si_pid = tsk->pid; |
1491 | info.si_uid = tsk->uid; |
1492 | |
1493 | /* FIXME: find out whether or not this is supposed to be c*time. */ |
1494 | info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime, |
1495 | tsk->signal->utime)); |
1496 | info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime, |
1497 | tsk->signal->stime)); |
1498 | |
1499 | info.si_status = tsk->exit_code & 0x7f; |
1500 | if (tsk->exit_code & 0x80) |
1501 | info.si_code = CLD_DUMPED; |
1502 | else if (tsk->exit_code & 0x7f) |
1503 | info.si_code = CLD_KILLED; |
1504 | else { |
1505 | info.si_code = CLD_EXITED; |
1506 | info.si_status = tsk->exit_code >> 8; |
1507 | } |
1508 | |
1509 | psig = tsk->parent->sighand; |
1510 | spin_lock_irqsave(&psig->siglock, flags); |
1511 | if (sig == SIGCHLD && |
1512 | (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || |
1513 | (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { |
1514 | /* |
1515 | * We are exiting and our parent doesn't care. POSIX.1 |
1516 | * defines special semantics for setting SIGCHLD to SIG_IGN |
1517 | * or setting the SA_NOCLDWAIT flag: we should be reaped |
1518 | * automatically and not left for our parent's wait4 call. |
1519 | * Rather than having the parent do it as a magic kind of |
1520 | * signal handler, we just set this to tell do_exit that we |
1521 | * can be cleaned up without becoming a zombie. Note that |
1522 | * we still call __wake_up_parent in this case, because a |
1523 | * blocked sys_wait4 might now return -ECHILD. |
1524 | * |
1525 | * Whether we send SIGCHLD or not for SA_NOCLDWAIT |
1526 | * is implementation-defined: we do (if you don't want |
1527 | * it, just use SIG_IGN instead). |
1528 | */ |
1529 | tsk->exit_signal = -1; |
1530 | if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) |
1531 | sig = 0; |
1532 | } |
1533 | if (valid_signal(sig) && sig > 0) |
1534 | __group_send_sig_info(sig, &info, tsk->parent); |
1535 | __wake_up_parent(tsk, tsk->parent); |
1536 | spin_unlock_irqrestore(&psig->siglock, flags); |
1537 | } |
1538 | |
1539 | static void |
1540 | do_notify_parent_cldstop(struct task_struct *tsk, struct task_struct *parent, |
1541 | int why) |
1542 | { |
1543 | struct siginfo info; |
1544 | unsigned long flags; |
1545 | struct sighand_struct *sighand; |
1546 | |
1547 | info.si_signo = SIGCHLD; |
1548 | info.si_errno = 0; |
1549 | info.si_pid = tsk->pid; |
1550 | info.si_uid = tsk->uid; |
1551 | |
1552 | /* FIXME: find out whether or not this is supposed to be c*time. */ |
1553 | info.si_utime = cputime_to_jiffies(tsk->utime); |
1554 | info.si_stime = cputime_to_jiffies(tsk->stime); |
1555 | |
1556 | info.si_code = why; |
1557 | switch (why) { |
1558 | case CLD_CONTINUED: |
1559 | info.si_status = SIGCONT; |
1560 | break; |
1561 | case CLD_STOPPED: |
1562 | info.si_status = tsk->signal->group_exit_code & 0x7f; |
1563 | break; |
1564 | case CLD_TRAPPED: |
1565 | info.si_status = tsk->exit_code & 0x7f; |
1566 | break; |
1567 | default: |
1568 | BUG(); |
1569 | } |
1570 | |
1571 | sighand = parent->sighand; |
1572 | spin_lock_irqsave(&sighand->siglock, flags); |
1573 | if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && |
1574 | !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) |
1575 | __group_send_sig_info(SIGCHLD, &info, parent); |
1576 | /* |
1577 | * Even if SIGCHLD is not generated, we must wake up wait4 calls. |
1578 | */ |
1579 | __wake_up_parent(tsk, parent); |
1580 | spin_unlock_irqrestore(&sighand->siglock, flags); |
1581 | } |
1582 | |
1583 | /* |
1584 | * This must be called with current->sighand->siglock held. |
1585 | * |
1586 | * This should be the path for all ptrace stops. |
1587 | * We always set current->last_siginfo while stopped here. |
1588 | * That makes it a way to test a stopped process for |
1589 | * being ptrace-stopped vs being job-control-stopped. |
1590 | * |
1591 | * If we actually decide not to stop at all because the tracer is gone, |
1592 | * we leave nostop_code in current->exit_code. |
1593 | */ |
1594 | static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info) |
1595 | { |
1596 | /* |
1597 | * If there is a group stop in progress, |
1598 | * we must participate in the bookkeeping. |
1599 | */ |
1600 | if (current->signal->group_stop_count > 0) |
1601 | --current->signal->group_stop_count; |
1602 | |
1603 | current->last_siginfo = info; |
1604 | current->exit_code = exit_code; |
1605 | |
1606 | /* Let the debugger run. */ |
1607 | set_current_state(TASK_TRACED); |
1608 | spin_unlock_irq(¤t->sighand->siglock); |
1609 | read_lock(&tasklist_lock); |
1610 | if (likely(current->ptrace & PT_PTRACED) && |
1611 | likely(current->parent != current->real_parent || |
1612 | !(current->ptrace & PT_ATTACHED)) && |
1613 | (likely(current->parent->signal != current->signal) || |
1614 | !unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))) { |
1615 | do_notify_parent_cldstop(current, current->parent, |
1616 | CLD_TRAPPED); |
1617 | read_unlock(&tasklist_lock); |
1618 | schedule(); |
1619 | } else { |
1620 | /* |
1621 | * By the time we got the lock, our tracer went away. |
1622 | * Don't stop here. |
1623 | */ |
1624 | read_unlock(&tasklist_lock); |
1625 | set_current_state(TASK_RUNNING); |
1626 | current->exit_code = nostop_code; |
1627 | } |
1628 | |
1629 | /* |
1630 | * We are back. Now reacquire the siglock before touching |
1631 | * last_siginfo, so that we are sure to have synchronized with |
1632 | * any signal-sending on another CPU that wants to examine it. |
1633 | */ |
1634 | spin_lock_irq(¤t->sighand->siglock); |
1635 | current->last_siginfo = NULL; |
1636 | |
1637 | /* |
1638 | * Queued signals ignored us while we were stopped for tracing. |
1639 | * So check for any that we should take before resuming user mode. |
1640 | */ |
1641 | recalc_sigpending(); |
1642 | } |
1643 | |
1644 | void ptrace_notify(int exit_code) |
1645 | { |
1646 | siginfo_t info; |
1647 | |
1648 | BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); |
1649 | |
1650 | memset(&info, 0, sizeof info); |
1651 | info.si_signo = SIGTRAP; |
1652 | info.si_code = exit_code; |
1653 | info.si_pid = current->pid; |
1654 | info.si_uid = current->uid; |
1655 | |
1656 | /* Let the debugger run. */ |
1657 | spin_lock_irq(¤t->sighand->siglock); |
1658 | ptrace_stop(exit_code, 0, &info); |
1659 | spin_unlock_irq(¤t->sighand->siglock); |
1660 | } |
1661 | |
1662 | static void |
1663 | finish_stop(int stop_count) |
1664 | { |
1665 | /* |
1666 | * If there are no other threads in the group, or if there is |
1667 | * a group stop in progress and we are the last to stop, |
1668 | * report to the parent. When ptraced, every thread reports itself. |
1669 | */ |
1670 | if (stop_count < 0 || (current->ptrace & PT_PTRACED)) { |
1671 | read_lock(&tasklist_lock); |
1672 | do_notify_parent_cldstop(current, current->parent, |
1673 | CLD_STOPPED); |
1674 | read_unlock(&tasklist_lock); |
1675 | } |
1676 | else if (stop_count == 0) { |
1677 | read_lock(&tasklist_lock); |
1678 | do_notify_parent_cldstop(current->group_leader, |
1679 | current->group_leader->real_parent, |
1680 | CLD_STOPPED); |
1681 | read_unlock(&tasklist_lock); |
1682 | } |
1683 | |
1684 | schedule(); |
1685 | /* |
1686 | * Now we don't run again until continued. |
1687 | */ |
1688 | current->exit_code = 0; |
1689 | } |
1690 | |
1691 | /* |
1692 | * This performs the stopping for SIGSTOP and other stop signals. |
1693 | * We have to stop all threads in the thread group. |
1694 | * Returns nonzero if we've actually stopped and released the siglock. |
1695 | * Returns zero if we didn't stop and still hold the siglock. |
1696 | */ |
1697 | static int |
1698 | do_signal_stop(int signr) |
1699 | { |
1700 | struct signal_struct *sig = current->signal; |
1701 | struct sighand_struct *sighand = current->sighand; |
1702 | int stop_count = -1; |
1703 | |
1704 | if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) |
1705 | return 0; |
1706 | |
1707 | if (sig->group_stop_count > 0) { |
1708 | /* |
1709 | * There is a group stop in progress. We don't need to |
1710 | * start another one. |
1711 | */ |
1712 | signr = sig->group_exit_code; |
1713 | stop_count = --sig->group_stop_count; |
1714 | current->exit_code = signr; |
1715 | set_current_state(TASK_STOPPED); |
1716 | if (stop_count == 0) |
1717 | sig->flags = SIGNAL_STOP_STOPPED; |
1718 | spin_unlock_irq(&sighand->siglock); |
1719 | } |
1720 | else if (thread_group_empty(current)) { |
1721 | /* |
1722 | * Lock must be held through transition to stopped state. |
1723 | */ |
1724 | current->exit_code = current->signal->group_exit_code = signr; |
1725 | set_current_state(TASK_STOPPED); |
1726 | sig->flags = SIGNAL_STOP_STOPPED; |
1727 | spin_unlock_irq(&sighand->siglock); |
1728 | } |
1729 | else { |
1730 | /* |
1731 | * There is no group stop already in progress. |
1732 | * We must initiate one now, but that requires |
1733 | * dropping siglock to get both the tasklist lock |
1734 | * and siglock again in the proper order. Note that |
1735 | * this allows an intervening SIGCONT to be posted. |
1736 | * We need to check for that and bail out if necessary. |
1737 | */ |
1738 | struct task_struct *t; |
1739 | |
1740 | spin_unlock_irq(&sighand->siglock); |
1741 | |
1742 | /* signals can be posted during this window */ |
1743 | |
1744 | read_lock(&tasklist_lock); |
1745 | spin_lock_irq(&sighand->siglock); |
1746 | |
1747 | if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED)) { |
1748 | /* |
1749 | * Another stop or continue happened while we |
1750 | * didn't have the lock. We can just swallow this |
1751 | * signal now. If we raced with a SIGCONT, that |
1752 | * should have just cleared it now. If we raced |
1753 | * with another processor delivering a stop signal, |
1754 | * then the SIGCONT that wakes us up should clear it. |
1755 | */ |
1756 | read_unlock(&tasklist_lock); |
1757 | return 0; |
1758 | } |
1759 | |
1760 | if (sig->group_stop_count == 0) { |
1761 | sig->group_exit_code = signr; |
1762 | stop_count = 0; |
1763 | for (t = next_thread(current); t != current; |
1764 | t = next_thread(t)) |
1765 | /* |
1766 | * Setting state to TASK_STOPPED for a group |
1767 | * stop is always done with the siglock held, |
1768 | * so this check has no races. |
1769 | */ |
1770 | if (t->state < TASK_STOPPED) { |
1771 | stop_count++; |
1772 | signal_wake_up(t, 0); |
1773 | } |
1774 | sig->group_stop_count = stop_count; |
1775 | } |
1776 | else { |
1777 | /* A race with another thread while unlocked. */ |
1778 | signr = sig->group_exit_code; |
1779 | stop_count = --sig->group_stop_count; |
1780 | } |
1781 | |
1782 | current->exit_code = signr; |
1783 | set_current_state(TASK_STOPPED); |
1784 | if (stop_count == 0) |
1785 | sig->flags = SIGNAL_STOP_STOPPED; |
1786 | |
1787 | spin_unlock_irq(&sighand->siglock); |
1788 | read_unlock(&tasklist_lock); |
1789 | } |
1790 | |
1791 | finish_stop(stop_count); |
1792 | return 1; |
1793 | } |
1794 | |
1795 | /* |
1796 | * Do appropriate magic when group_stop_count > 0. |
1797 | * We return nonzero if we stopped, after releasing the siglock. |
1798 | * We return zero if we still hold the siglock and should look |
1799 | * for another signal without checking group_stop_count again. |
1800 | */ |
1801 | static inline int handle_group_stop(void) |
1802 | { |
1803 | int stop_count; |
1804 | |
1805 | if (current->signal->group_exit_task == current) { |
1806 | /* |
1807 | * Group stop is so we can do a core dump, |
1808 | * We are the initiating thread, so get on with it. |
1809 | */ |
1810 | current->signal->group_exit_task = NULL; |
1811 | return 0; |
1812 | } |
1813 | |
1814 | if (current->signal->flags & SIGNAL_GROUP_EXIT) |
1815 | /* |
1816 | * Group stop is so another thread can do a core dump, |
1817 | * or else we are racing against a death signal. |
1818 | * Just punt the stop so we can get the next signal. |
1819 | */ |
1820 | return 0; |
1821 | |
1822 | /* |
1823 | * There is a group stop in progress. We stop |
1824 | * without any associated signal being in our queue. |
1825 | */ |
1826 | stop_count = --current->signal->group_stop_count; |
1827 | if (stop_count == 0) |
1828 | current->signal->flags = SIGNAL_STOP_STOPPED; |
1829 | current->exit_code = current->signal->group_exit_code; |
1830 | set_current_state(TASK_STOPPED); |
1831 | spin_unlock_irq(¤t->sighand->siglock); |
1832 | finish_stop(stop_count); |
1833 | return 1; |
1834 | } |
1835 | |
1836 | int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, |
1837 | struct pt_regs *regs, void *cookie) |
1838 | { |
1839 | sigset_t *mask = ¤t->blocked; |
1840 | int signr = 0; |
1841 | |
1842 | relock: |
1843 | spin_lock_irq(¤t->sighand->siglock); |
1844 | for (;;) { |
1845 | struct k_sigaction *ka; |
1846 | |
1847 | if (unlikely(current->signal->group_stop_count > 0) && |
1848 | handle_group_stop()) |
1849 | goto relock; |
1850 | |
1851 | signr = dequeue_signal(current, mask, info); |
1852 | |
1853 | if (!signr) |
1854 | break; /* will return 0 */ |
1855 | |
1856 | if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) { |
1857 | ptrace_signal_deliver(regs, cookie); |
1858 | |
1859 | /* Let the debugger run. */ |
1860 | ptrace_stop(signr, signr, info); |
1861 | |
1862 | /* We're back. Did the debugger cancel the sig? */ |
1863 | signr = current->exit_code; |
1864 | if (signr == 0) |
1865 | continue; |
1866 | |
1867 | current->exit_code = 0; |
1868 | |
1869 | /* Update the siginfo structure if the signal has |
1870 | changed. If the debugger wanted something |
1871 | specific in the siginfo structure then it should |
1872 | have updated *info via PTRACE_SETSIGINFO. */ |
1873 | if (signr != info->si_signo) { |
1874 | info->si_signo = signr; |
1875 | info->si_errno = 0; |
1876 | info->si_code = SI_USER; |
1877 | info->si_pid = current->parent->pid; |
1878 | info->si_uid = current->parent->uid; |
1879 | } |
1880 | |
1881 | /* If the (new) signal is now blocked, requeue it. */ |
1882 | if (sigismember(¤t->blocked, signr)) { |
1883 | specific_send_sig_info(signr, info, current); |
1884 | continue; |
1885 | } |
1886 | } |
1887 | |
1888 | ka = ¤t->sighand->action[signr-1]; |
1889 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ |
1890 | continue; |
1891 | if (ka->sa.sa_handler != SIG_DFL) { |
1892 | /* Run the handler. */ |
1893 | *return_ka = *ka; |
1894 | |
1895 | if (ka->sa.sa_flags & SA_ONESHOT) |
1896 | ka->sa.sa_handler = SIG_DFL; |
1897 | |
1898 | break; /* will return non-zero "signr" value */ |
1899 | } |
1900 | |
1901 | /* |
1902 | * Now we are doing the default action for this signal. |
1903 | */ |
1904 | if (sig_kernel_ignore(signr)) /* Default is nothing. */ |
1905 | continue; |
1906 | |
1907 | /* Init gets no signals it doesn't want. */ |
1908 | if (current->pid == 1) |
1909 | continue; |
1910 | |
1911 | if (sig_kernel_stop(signr)) { |
1912 | /* |
1913 | * The default action is to stop all threads in |
1914 | * the thread group. The job control signals |
1915 | * do nothing in an orphaned pgrp, but SIGSTOP |
1916 | * always works. Note that siglock needs to be |
1917 | * dropped during the call to is_orphaned_pgrp() |
1918 | * because of lock ordering with tasklist_lock. |
1919 | * This allows an intervening SIGCONT to be posted. |
1920 | * We need to check for that and bail out if necessary. |
1921 | */ |
1922 | if (signr != SIGSTOP) { |
1923 | spin_unlock_irq(¤t->sighand->siglock); |
1924 | |
1925 | /* signals can be posted during this window */ |
1926 | |
1927 | if (is_orphaned_pgrp(process_group(current))) |
1928 | goto relock; |
1929 | |
1930 | spin_lock_irq(¤t->sighand->siglock); |
1931 | } |
1932 | |
1933 | if (likely(do_signal_stop(signr))) { |
1934 | /* It released the siglock. */ |
1935 | goto relock; |
1936 | } |
1937 | |
1938 | /* |
1939 | * We didn't actually stop, due to a race |
1940 | * with SIGCONT or something like that. |
1941 | */ |
1942 | continue; |
1943 | } |
1944 | |
1945 | spin_unlock_irq(¤t->sighand->siglock); |
1946 | |
1947 | /* |
1948 | * Anything else is fatal, maybe with a core dump. |
1949 | */ |
1950 | current->flags |= PF_SIGNALED; |
1951 | if (sig_kernel_coredump(signr)) { |
1952 | /* |
1953 | * If it was able to dump core, this kills all |
1954 | * other threads in the group and synchronizes with |
1955 | * their demise. If we lost the race with another |
1956 | * thread getting here, it set group_exit_code |
1957 | * first and our do_group_exit call below will use |
1958 | * that value and ignore the one we pass it. |
1959 | */ |
1960 | do_coredump((long)signr, signr, regs); |
1961 | } |
1962 | |
1963 | /* |
1964 | * Death signals, no core dump. |
1965 | */ |
1966 | do_group_exit(signr); |
1967 | /* NOTREACHED */ |
1968 | } |
1969 | spin_unlock_irq(¤t->sighand->siglock); |
1970 | return signr; |
1971 | } |
1972 | |
1973 | EXPORT_SYMBOL(recalc_sigpending); |
1974 | EXPORT_SYMBOL_GPL(dequeue_signal); |
1975 | EXPORT_SYMBOL(flush_signals); |
1976 | EXPORT_SYMBOL(force_sig); |
1977 | EXPORT_SYMBOL(kill_pg); |
1978 | EXPORT_SYMBOL(kill_proc); |
1979 | EXPORT_SYMBOL(ptrace_notify); |
1980 | EXPORT_SYMBOL(send_sig); |
1981 | EXPORT_SYMBOL(send_sig_info); |
1982 | EXPORT_SYMBOL(sigprocmask); |
1983 | EXPORT_SYMBOL(block_all_signals); |
1984 | EXPORT_SYMBOL(unblock_all_signals); |
1985 | |
1986 | |
1987 | /* |
1988 | * System call entry points. |
1989 | */ |
1990 | |
1991 | asmlinkage long sys_restart_syscall(void) |
1992 | { |
1993 | struct restart_block *restart = ¤t_thread_info()->restart_block; |
1994 | return restart->fn(restart); |
1995 | } |
1996 | |
1997 | long do_no_restart_syscall(struct restart_block *param) |
1998 | { |
1999 | return -EINTR; |
2000 | } |
2001 | |
2002 | /* |
2003 | * We don't need to get the kernel lock - this is all local to this |
2004 | * particular thread.. (and that's good, because this is _heavily_ |
2005 | * used by various programs) |
2006 | */ |
2007 | |
2008 | /* |
2009 | * This is also useful for kernel threads that want to temporarily |
2010 | * (or permanently) block certain signals. |
2011 | * |
2012 | * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel |
2013 | * interface happily blocks "unblockable" signals like SIGKILL |
2014 | * and friends. |
2015 | */ |
2016 | int sigprocmask(int how, sigset_t *set, sigset_t *oldset) |
2017 | { |
2018 | int error; |
2019 | sigset_t old_block; |
2020 | |
2021 | spin_lock_irq(¤t->sighand->siglock); |
2022 | old_block = current->blocked; |
2023 | error = 0; |
2024 | switch (how) { |
2025 | case SIG_BLOCK: |
2026 | sigorsets(¤t->blocked, ¤t->blocked, set); |
2027 | break; |
2028 | case SIG_UNBLOCK: |
2029 | signandsets(¤t->blocked, ¤t->blocked, set); |
2030 | break; |
2031 | case SIG_SETMASK: |
2032 | current->blocked = *set; |
2033 | break; |
2034 | default: |
2035 | error = -EINVAL; |
2036 | } |
2037 | recalc_sigpending(); |
2038 | spin_unlock_irq(¤t->sighand->siglock); |
2039 | if (oldset) |
2040 | *oldset = old_block; |
2041 | return error; |
2042 | } |
2043 | |
2044 | asmlinkage long |
2045 | sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize) |
2046 | { |
2047 | int error = -EINVAL; |
2048 | sigset_t old_set, new_set; |
2049 | |
2050 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2051 | if (sigsetsize != sizeof(sigset_t)) |
2052 | goto out; |
2053 | |
2054 | if (set) { |
2055 | error = -EFAULT; |
2056 | if (copy_from_user(&new_set, set, sizeof(*set))) |
2057 | goto out; |
2058 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
2059 | |
2060 | error = sigprocmask(how, &new_set, &old_set); |
2061 | if (error) |
2062 | goto out; |
2063 | if (oset) |
2064 | goto set_old; |
2065 | } else if (oset) { |
2066 | spin_lock_irq(¤t->sighand->siglock); |
2067 | old_set = current->blocked; |
2068 | spin_unlock_irq(¤t->sighand->siglock); |
2069 | |
2070 | set_old: |
2071 | error = -EFAULT; |
2072 | if (copy_to_user(oset, &old_set, sizeof(*oset))) |
2073 | goto out; |
2074 | } |
2075 | error = 0; |
2076 | out: |
2077 | return error; |
2078 | } |
2079 | |
2080 | long do_sigpending(void __user *set, unsigned long sigsetsize) |
2081 | { |
2082 | long error = -EINVAL; |
2083 | sigset_t pending; |
2084 | |
2085 | if (sigsetsize > sizeof(sigset_t)) |
2086 | goto out; |
2087 | |
2088 | spin_lock_irq(¤t->sighand->siglock); |
2089 | sigorsets(&pending, ¤t->pending.signal, |
2090 | ¤t->signal->shared_pending.signal); |
2091 | spin_unlock_irq(¤t->sighand->siglock); |
2092 | |
2093 | /* Outside the lock because only this thread touches it. */ |
2094 | sigandsets(&pending, ¤t->blocked, &pending); |
2095 | |
2096 | error = -EFAULT; |
2097 | if (!copy_to_user(set, &pending, sigsetsize)) |
2098 | error = 0; |
2099 | |
2100 | out: |
2101 | return error; |
2102 | } |
2103 | |
2104 | asmlinkage long |
2105 | sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize) |
2106 | { |
2107 | return do_sigpending(set, sigsetsize); |
2108 | } |
2109 | |
2110 | #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER |
2111 | |
2112 | int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) |
2113 | { |
2114 | int err; |
2115 | |
2116 | if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t))) |
2117 | return -EFAULT; |
2118 | if (from->si_code < 0) |
2119 | return __copy_to_user(to, from, sizeof(siginfo_t)) |
2120 | ? -EFAULT : 0; |
2121 | /* |
2122 | * If you change siginfo_t structure, please be sure |
2123 | * this code is fixed accordingly. |
2124 | * It should never copy any pad contained in the structure |
2125 | * to avoid security leaks, but must copy the generic |
2126 | * 3 ints plus the relevant union member. |
2127 | */ |
2128 | err = __put_user(from->si_signo, &to->si_signo); |
2129 | err |= __put_user(from->si_errno, &to->si_errno); |
2130 | err |= __put_user((short)from->si_code, &to->si_code); |
2131 | switch (from->si_code & __SI_MASK) { |
2132 | case __SI_KILL: |
2133 | err |= __put_user(from->si_pid, &to->si_pid); |
2134 | err |= __put_user(from->si_uid, &to->si_uid); |
2135 | break; |
2136 | case __SI_TIMER: |
2137 | err |= __put_user(from->si_tid, &to->si_tid); |
2138 | err |= __put_user(from->si_overrun, &to->si_overrun); |
2139 | err |= __put_user(from->si_ptr, &to->si_ptr); |
2140 | break; |
2141 | case __SI_POLL: |
2142 | err |= __put_user(from->si_band, &to->si_band); |
2143 | err |= __put_user(from->si_fd, &to->si_fd); |
2144 | break; |
2145 | case __SI_FAULT: |
2146 | err |= __put_user(from->si_addr, &to->si_addr); |
2147 | #ifdef __ARCH_SI_TRAPNO |
2148 | err |= __put_user(from->si_trapno, &to->si_trapno); |
2149 | #endif |
2150 | break; |
2151 | case __SI_CHLD: |
2152 | err |= __put_user(from->si_pid, &to->si_pid); |
2153 | err |= __put_user(from->si_uid, &to->si_uid); |
2154 | err |= __put_user(from->si_status, &to->si_status); |
2155 | err |= __put_user(from->si_utime, &to->si_utime); |
2156 | err |= __put_user(from->si_stime, &to->si_stime); |
2157 | break; |
2158 | case __SI_RT: /* This is not generated by the kernel as of now. */ |
2159 | case __SI_MESGQ: /* But this is */ |
2160 | err |= __put_user(from->si_pid, &to->si_pid); |
2161 | err |= __put_user(from->si_uid, &to->si_uid); |
2162 | err |= __put_user(from->si_ptr, &to->si_ptr); |
2163 | break; |
2164 | default: /* this is just in case for now ... */ |
2165 | err |= __put_user(from->si_pid, &to->si_pid); |
2166 | err |= __put_user(from->si_uid, &to->si_uid); |
2167 | break; |
2168 | } |
2169 | return err; |
2170 | } |
2171 | |
2172 | #endif |
2173 | |
2174 | asmlinkage long |
2175 | sys_rt_sigtimedwait(const sigset_t __user *uthese, |
2176 | siginfo_t __user *uinfo, |
2177 | const struct timespec __user *uts, |
2178 | size_t sigsetsize) |
2179 | { |
2180 | int ret, sig; |
2181 | sigset_t these; |
2182 | struct timespec ts; |
2183 | siginfo_t info; |
2184 | long timeout = 0; |
2185 | |
2186 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2187 | if (sigsetsize != sizeof(sigset_t)) |
2188 | return -EINVAL; |
2189 | |
2190 | if (copy_from_user(&these, uthese, sizeof(these))) |
2191 | return -EFAULT; |
2192 | |
2193 | /* |
2194 | * Invert the set of allowed signals to get those we |
2195 | * want to block. |
2196 | */ |
2197 | sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
2198 | signotset(&these); |
2199 | |
2200 | if (uts) { |
2201 | if (copy_from_user(&ts, uts, sizeof(ts))) |
2202 | return -EFAULT; |
2203 | if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0 |
2204 | || ts.tv_sec < 0) |
2205 | return -EINVAL; |
2206 | } |
2207 | |
2208 | spin_lock_irq(¤t->sighand->siglock); |
2209 | sig = dequeue_signal(current, &these, &info); |
2210 | if (!sig) { |
2211 | timeout = MAX_SCHEDULE_TIMEOUT; |
2212 | if (uts) |
2213 | timeout = (timespec_to_jiffies(&ts) |
2214 | + (ts.tv_sec || ts.tv_nsec)); |
2215 | |
2216 | if (timeout) { |
2217 | /* None ready -- temporarily unblock those we're |
2218 | * interested while we are sleeping in so that we'll |
2219 | * be awakened when they arrive. */ |
2220 | current->real_blocked = current->blocked; |
2221 | sigandsets(¤t->blocked, ¤t->blocked, &these); |
2222 | recalc_sigpending(); |
2223 | spin_unlock_irq(¤t->sighand->siglock); |
2224 | |
2225 | current->state = TASK_INTERRUPTIBLE; |
2226 | timeout = schedule_timeout(timeout); |
2227 | |
2228 | if (current->flags & PF_FREEZE) |
2229 | refrigerator(PF_FREEZE); |
2230 | spin_lock_irq(¤t->sighand->siglock); |
2231 | sig = dequeue_signal(current, &these, &info); |
2232 | current->blocked = current->real_blocked; |
2233 | siginitset(¤t->real_blocked, 0); |
2234 | recalc_sigpending(); |
2235 | } |
2236 | } |
2237 | spin_unlock_irq(¤t->sighand->siglock); |
2238 | |
2239 | if (sig) { |
2240 | ret = sig; |
2241 | if (uinfo) { |
2242 | if (copy_siginfo_to_user(uinfo, &info)) |
2243 | ret = -EFAULT; |
2244 | } |
2245 | } else { |
2246 | ret = -EAGAIN; |
2247 | if (timeout) |
2248 | ret = -EINTR; |
2249 | } |
2250 | |
2251 | return ret; |
2252 | } |
2253 | |
2254 | asmlinkage long |
2255 | sys_kill(int pid, int sig) |
2256 | { |
2257 | struct siginfo info; |
2258 | |
2259 | info.si_signo = sig; |
2260 | info.si_errno = 0; |
2261 | info.si_code = SI_USER; |
2262 | info.si_pid = current->tgid; |
2263 | info.si_uid = current->uid; |
2264 | |
2265 | return kill_something_info(sig, &info, pid); |
2266 | } |
2267 | |
2268 | /** |
2269 | * sys_tgkill - send signal to one specific thread |
2270 | * @tgid: the thread group ID of the thread |
2271 | * @pid: the PID of the thread |
2272 | * @sig: signal to be sent |
2273 | * |
2274 | * This syscall also checks the tgid and returns -ESRCH even if the PID |
2275 | * exists but it's not belonging to the target process anymore. This |
2276 | * method solves the problem of threads exiting and PIDs getting reused. |
2277 | */ |
2278 | asmlinkage long sys_tgkill(int tgid, int pid, int sig) |
2279 | { |
2280 | struct siginfo info; |
2281 | int error; |
2282 | struct task_struct *p; |
2283 | |
2284 | /* This is only valid for single tasks */ |
2285 | if (pid <= 0 || tgid <= 0) |
2286 | return -EINVAL; |
2287 | |
2288 | info.si_signo = sig; |
2289 | info.si_errno = 0; |
2290 | info.si_code = SI_TKILL; |
2291 | info.si_pid = current->tgid; |
2292 | info.si_uid = current->uid; |
2293 | |
2294 | read_lock(&tasklist_lock); |
2295 | p = find_task_by_pid(pid); |
2296 | error = -ESRCH; |
2297 | if (p && (p->tgid == tgid)) { |
2298 | error = check_kill_permission(sig, &info, p); |
2299 | /* |
2300 | * The null signal is a permissions and process existence |
2301 | * probe. No signal is actually delivered. |
2302 | */ |
2303 | if (!error && sig && p->sighand) { |
2304 | spin_lock_irq(&p->sighand->siglock); |
2305 | handle_stop_signal(sig, p); |
2306 | error = specific_send_sig_info(sig, &info, p); |
2307 | spin_unlock_irq(&p->sighand->siglock); |
2308 | } |
2309 | } |
2310 | read_unlock(&tasklist_lock); |
2311 | return error; |
2312 | } |
2313 | |
2314 | /* |
2315 | * Send a signal to only one task, even if it's a CLONE_THREAD task. |
2316 | */ |
2317 | asmlinkage long |
2318 | sys_tkill(int pid, int sig) |
2319 | { |
2320 | struct siginfo info; |
2321 | int error; |
2322 | struct task_struct *p; |
2323 | |
2324 | /* This is only valid for single tasks */ |
2325 | if (pid <= 0) |
2326 | return -EINVAL; |
2327 | |
2328 | info.si_signo = sig; |
2329 | info.si_errno = 0; |
2330 | info.si_code = SI_TKILL; |
2331 | info.si_pid = current->tgid; |
2332 | info.si_uid = current->uid; |
2333 | |
2334 | read_lock(&tasklist_lock); |
2335 | p = find_task_by_pid(pid); |
2336 | error = -ESRCH; |
2337 | if (p) { |
2338 | error = check_kill_permission(sig, &info, p); |
2339 | /* |
2340 | * The null signal is a permissions and process existence |
2341 | * probe. No signal is actually delivered. |
2342 | */ |
2343 | if (!error && sig && p->sighand) { |
2344 | spin_lock_irq(&p->sighand->siglock); |
2345 | handle_stop_signal(sig, p); |
2346 | error = specific_send_sig_info(sig, &info, p); |
2347 | spin_unlock_irq(&p->sighand->siglock); |
2348 | } |
2349 | } |
2350 | read_unlock(&tasklist_lock); |
2351 | return error; |
2352 | } |
2353 | |
2354 | asmlinkage long |
2355 | sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo) |
2356 | { |
2357 | siginfo_t info; |
2358 | |
2359 | if (copy_from_user(&info, uinfo, sizeof(siginfo_t))) |
2360 | return -EFAULT; |
2361 | |
2362 | /* Not even root can pretend to send signals from the kernel. |
2363 | Nor can they impersonate a kill(), which adds source info. */ |
2364 | if (info.si_code >= 0) |
2365 | return -EPERM; |
2366 | info.si_signo = sig; |
2367 | |
2368 | /* POSIX.1b doesn't mention process groups. */ |
2369 | return kill_proc_info(sig, &info, pid); |
2370 | } |
2371 | |
2372 | int |
2373 | do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact) |
2374 | { |
2375 | struct k_sigaction *k; |
2376 | |
2377 | if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) |
2378 | return -EINVAL; |
2379 | |
2380 | k = ¤t->sighand->action[sig-1]; |
2381 | |
2382 | spin_lock_irq(¤t->sighand->siglock); |
2383 | if (signal_pending(current)) { |
2384 | /* |
2385 | * If there might be a fatal signal pending on multiple |
2386 | * threads, make sure we take it before changing the action. |
2387 | */ |
2388 | spin_unlock_irq(¤t->sighand->siglock); |
2389 | return -ERESTARTNOINTR; |
2390 | } |
2391 | |
2392 | if (oact) |
2393 | *oact = *k; |
2394 | |
2395 | if (act) { |
2396 | /* |
2397 | * POSIX 3.3.1.3: |
2398 | * "Setting a signal action to SIG_IGN for a signal that is |
2399 | * pending shall cause the pending signal to be discarded, |
2400 | * whether or not it is blocked." |
2401 | * |
2402 | * "Setting a signal action to SIG_DFL for a signal that is |
2403 | * pending and whose default action is to ignore the signal |
2404 | * (for example, SIGCHLD), shall cause the pending signal to |
2405 | * be discarded, whether or not it is blocked" |
2406 | */ |
2407 | if (act->sa.sa_handler == SIG_IGN || |
2408 | (act->sa.sa_handler == SIG_DFL && |
2409 | sig_kernel_ignore(sig))) { |
2410 | /* |
2411 | * This is a fairly rare case, so we only take the |
2412 | * tasklist_lock once we're sure we'll need it. |
2413 | * Now we must do this little unlock and relock |
2414 | * dance to maintain the lock hierarchy. |
2415 | */ |
2416 | struct task_struct *t = current; |
2417 | spin_unlock_irq(&t->sighand->siglock); |
2418 | read_lock(&tasklist_lock); |
2419 | spin_lock_irq(&t->sighand->siglock); |
2420 | *k = *act; |
2421 | sigdelsetmask(&k->sa.sa_mask, |
2422 | sigmask(SIGKILL) | sigmask(SIGSTOP)); |
2423 | rm_from_queue(sigmask(sig), &t->signal->shared_pending); |
2424 | do { |
2425 | rm_from_queue(sigmask(sig), &t->pending); |
2426 | recalc_sigpending_tsk(t); |
2427 | t = next_thread(t); |
2428 | } while (t != current); |
2429 | spin_unlock_irq(¤t->sighand->siglock); |
2430 | read_unlock(&tasklist_lock); |
2431 | return 0; |
2432 | } |
2433 | |
2434 | *k = *act; |
2435 | sigdelsetmask(&k->sa.sa_mask, |
2436 | sigmask(SIGKILL) | sigmask(SIGSTOP)); |
2437 | } |
2438 | |
2439 | spin_unlock_irq(¤t->sighand->siglock); |
2440 | return 0; |
2441 | } |
2442 | |
2443 | int |
2444 | do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp) |
2445 | { |
2446 | stack_t oss; |
2447 | int error; |
2448 | |
2449 | if (uoss) { |
2450 | oss.ss_sp = (void __user *) current->sas_ss_sp; |
2451 | oss.ss_size = current->sas_ss_size; |
2452 | oss.ss_flags = sas_ss_flags(sp); |
2453 | } |
2454 | |
2455 | if (uss) { |
2456 | void __user *ss_sp; |
2457 | size_t ss_size; |
2458 | int ss_flags; |
2459 | |
2460 | error = -EFAULT; |
2461 | if (!access_ok(VERIFY_READ, uss, sizeof(*uss)) |
2462 | || __get_user(ss_sp, &uss->ss_sp) |
2463 | || __get_user(ss_flags, &uss->ss_flags) |
2464 | || __get_user(ss_size, &uss->ss_size)) |
2465 | goto out; |
2466 | |
2467 | error = -EPERM; |
2468 | if (on_sig_stack(sp)) |
2469 | goto out; |
2470 | |
2471 | error = -EINVAL; |
2472 | /* |
2473 | * |
2474 | * Note - this code used to test ss_flags incorrectly |
2475 | * old code may have been written using ss_flags==0 |
2476 | * to mean ss_flags==SS_ONSTACK (as this was the only |
2477 | * way that worked) - this fix preserves that older |
2478 | * mechanism |
2479 | */ |
2480 | if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) |
2481 | goto out; |
2482 | |
2483 | if (ss_flags == SS_DISABLE) { |
2484 | ss_size = 0; |
2485 | ss_sp = NULL; |
2486 | } else { |
2487 | error = -ENOMEM; |
2488 | if (ss_size < MINSIGSTKSZ) |
2489 | goto out; |
2490 | } |
2491 | |
2492 | current->sas_ss_sp = (unsigned long) ss_sp; |
2493 | current->sas_ss_size = ss_size; |
2494 | } |
2495 | |
2496 | if (uoss) { |
2497 | error = -EFAULT; |
2498 | if (copy_to_user(uoss, &oss, sizeof(oss))) |
2499 | goto out; |
2500 | } |
2501 | |
2502 | error = 0; |
2503 | out: |
2504 | return error; |
2505 | } |
2506 | |
2507 | #ifdef __ARCH_WANT_SYS_SIGPENDING |
2508 | |
2509 | asmlinkage long |
2510 | sys_sigpending(old_sigset_t __user *set) |
2511 | { |
2512 | return do_sigpending(set, sizeof(*set)); |
2513 | } |
2514 | |
2515 | #endif |
2516 | |
2517 | #ifdef __ARCH_WANT_SYS_SIGPROCMASK |
2518 | /* Some platforms have their own version with special arguments others |
2519 | support only sys_rt_sigprocmask. */ |
2520 | |
2521 | asmlinkage long |
2522 | sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset) |
2523 | { |
2524 | int error; |
2525 | old_sigset_t old_set, new_set; |
2526 | |
2527 | if (set) { |
2528 | error = -EFAULT; |
2529 | if (copy_from_user(&new_set, set, sizeof(*set))) |
2530 | goto out; |
2531 | new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP)); |
2532 | |
2533 | spin_lock_irq(¤t->sighand->siglock); |
2534 | old_set = current->blocked.sig[0]; |
2535 | |
2536 | error = 0; |
2537 | switch (how) { |
2538 | default: |
2539 | error = -EINVAL; |
2540 | break; |
2541 | case SIG_BLOCK: |
2542 | sigaddsetmask(¤t->blocked, new_set); |
2543 | break; |
2544 | case SIG_UNBLOCK: |
2545 | sigdelsetmask(¤t->blocked, new_set); |
2546 | break; |
2547 | case SIG_SETMASK: |
2548 | current->blocked.sig[0] = new_set; |
2549 | break; |
2550 | } |
2551 | |
2552 | recalc_sigpending(); |
2553 | spin_unlock_irq(¤t->sighand->siglock); |
2554 | if (error) |
2555 | goto out; |
2556 | if (oset) |
2557 | goto set_old; |
2558 | } else if (oset) { |
2559 | old_set = current->blocked.sig[0]; |
2560 | set_old: |
2561 | error = -EFAULT; |
2562 | if (copy_to_user(oset, &old_set, sizeof(*oset))) |
2563 | goto out; |
2564 | } |
2565 | error = 0; |
2566 | out: |
2567 | return error; |
2568 | } |
2569 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ |
2570 | |
2571 | #ifdef __ARCH_WANT_SYS_RT_SIGACTION |
2572 | asmlinkage long |
2573 | sys_rt_sigaction(int sig, |
2574 | const struct sigaction __user *act, |
2575 | struct sigaction __user *oact, |
2576 | size_t sigsetsize) |
2577 | { |
2578 | struct k_sigaction new_sa, old_sa; |
2579 | int ret = -EINVAL; |
2580 | |
2581 | /* XXX: Don't preclude handling different sized sigset_t's. */ |
2582 | if (sigsetsize != sizeof(sigset_t)) |
2583 | goto out; |
2584 | |
2585 | if (act) { |
2586 | if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) |
2587 | return -EFAULT; |
2588 | } |
2589 | |
2590 | ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); |
2591 | |
2592 | if (!ret && oact) { |
2593 | if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) |
2594 | return -EFAULT; |
2595 | } |
2596 | out: |
2597 | return ret; |
2598 | } |
2599 | #endif /* __ARCH_WANT_SYS_RT_SIGACTION */ |
2600 | |
2601 | #ifdef __ARCH_WANT_SYS_SGETMASK |
2602 | |
2603 | /* |
2604 | * For backwards compatibility. Functionality superseded by sigprocmask. |
2605 | */ |
2606 | asmlinkage long |
2607 | sys_sgetmask(void) |
2608 | { |
2609 | /* SMP safe */ |
2610 | return current->blocked.sig[0]; |
2611 | } |
2612 | |
2613 | asmlinkage long |
2614 | sys_ssetmask(int newmask) |
2615 | { |
2616 | int old; |
2617 | |
2618 | spin_lock_irq(¤t->sighand->siglock); |
2619 | old = current->blocked.sig[0]; |
2620 | |
2621 | siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)| |
2622 | sigmask(SIGSTOP))); |
2623 | recalc_sigpending(); |
2624 | spin_unlock_irq(¤t->sighand->siglock); |
2625 | |
2626 | return old; |
2627 | } |
2628 | #endif /* __ARCH_WANT_SGETMASK */ |
2629 | |
2630 | #ifdef __ARCH_WANT_SYS_SIGNAL |
2631 | /* |
2632 | * For backwards compatibility. Functionality superseded by sigaction. |
2633 | */ |
2634 | asmlinkage unsigned long |
2635 | sys_signal(int sig, __sighandler_t handler) |
2636 | { |
2637 | struct k_sigaction new_sa, old_sa; |
2638 | int ret; |
2639 | |
2640 | new_sa.sa.sa_handler = handler; |
2641 | new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; |
2642 | |
2643 | ret = do_sigaction(sig, &new_sa, &old_sa); |
2644 | |
2645 | return ret ? ret : (unsigned long)old_sa.sa.sa_handler; |
2646 | } |
2647 | #endif /* __ARCH_WANT_SYS_SIGNAL */ |
2648 | |
2649 | #ifdef __ARCH_WANT_SYS_PAUSE |
2650 | |
2651 | asmlinkage long |
2652 | sys_pause(void) |
2653 | { |
2654 | current->state = TASK_INTERRUPTIBLE; |
2655 | schedule(); |
2656 | return -ERESTARTNOHAND; |
2657 | } |
2658 | |
2659 | #endif |
2660 | |
2661 | void __init signals_init(void) |
2662 | { |
2663 | sigqueue_cachep = |
2664 | kmem_cache_create("sigqueue", |
2665 | sizeof(struct sigqueue), |
2666 | __alignof__(struct sigqueue), |
2667 | SLAB_PANIC, NULL, NULL); |
2668 | } |