Contents of /alx-src/tags/kernel26-2.6.12-alx-r9/kernel/posix-cpu-timers.c
Parent Directory | Revision Log
Revision 630 -
(show annotations)
(download)
Wed Mar 4 11:03:09 2009 UTC (15 years, 6 months ago) by niro
File MIME type: text/plain
File size: 41819 byte(s)
Wed Mar 4 11:03:09 2009 UTC (15 years, 6 months ago) by niro
File MIME type: text/plain
File size: 41819 byte(s)
Tag kernel26-2.6.12-alx-r9
1 | /* |
2 | * Implement CPU time clocks for the POSIX clock interface. |
3 | */ |
4 | |
5 | #include <linux/sched.h> |
6 | #include <linux/posix-timers.h> |
7 | #include <asm/uaccess.h> |
8 | #include <linux/errno.h> |
9 | |
10 | static int check_clock(clockid_t which_clock) |
11 | { |
12 | int error = 0; |
13 | struct task_struct *p; |
14 | const pid_t pid = CPUCLOCK_PID(which_clock); |
15 | |
16 | if (CPUCLOCK_WHICH(which_clock) >= CPUCLOCK_MAX) |
17 | return -EINVAL; |
18 | |
19 | if (pid == 0) |
20 | return 0; |
21 | |
22 | read_lock(&tasklist_lock); |
23 | p = find_task_by_pid(pid); |
24 | if (!p || (CPUCLOCK_PERTHREAD(which_clock) ? |
25 | p->tgid != current->tgid : p->tgid != pid)) { |
26 | error = -EINVAL; |
27 | } |
28 | read_unlock(&tasklist_lock); |
29 | |
30 | return error; |
31 | } |
32 | |
33 | static inline union cpu_time_count |
34 | timespec_to_sample(clockid_t which_clock, const struct timespec *tp) |
35 | { |
36 | union cpu_time_count ret; |
37 | ret.sched = 0; /* high half always zero when .cpu used */ |
38 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
39 | ret.sched = tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec; |
40 | } else { |
41 | ret.cpu = timespec_to_cputime(tp); |
42 | } |
43 | return ret; |
44 | } |
45 | |
46 | static void sample_to_timespec(clockid_t which_clock, |
47 | union cpu_time_count cpu, |
48 | struct timespec *tp) |
49 | { |
50 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
51 | tp->tv_sec = div_long_long_rem(cpu.sched, |
52 | NSEC_PER_SEC, &tp->tv_nsec); |
53 | } else { |
54 | cputime_to_timespec(cpu.cpu, tp); |
55 | } |
56 | } |
57 | |
58 | static inline int cpu_time_before(clockid_t which_clock, |
59 | union cpu_time_count now, |
60 | union cpu_time_count then) |
61 | { |
62 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
63 | return now.sched < then.sched; |
64 | } else { |
65 | return cputime_lt(now.cpu, then.cpu); |
66 | } |
67 | } |
68 | static inline void cpu_time_add(clockid_t which_clock, |
69 | union cpu_time_count *acc, |
70 | union cpu_time_count val) |
71 | { |
72 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
73 | acc->sched += val.sched; |
74 | } else { |
75 | acc->cpu = cputime_add(acc->cpu, val.cpu); |
76 | } |
77 | } |
78 | static inline union cpu_time_count cpu_time_sub(clockid_t which_clock, |
79 | union cpu_time_count a, |
80 | union cpu_time_count b) |
81 | { |
82 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
83 | a.sched -= b.sched; |
84 | } else { |
85 | a.cpu = cputime_sub(a.cpu, b.cpu); |
86 | } |
87 | return a; |
88 | } |
89 | |
90 | /* |
91 | * Update expiry time from increment, and increase overrun count, |
92 | * given the current clock sample. |
93 | */ |
94 | static inline void bump_cpu_timer(struct k_itimer *timer, |
95 | union cpu_time_count now) |
96 | { |
97 | int i; |
98 | |
99 | if (timer->it.cpu.incr.sched == 0) |
100 | return; |
101 | |
102 | if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { |
103 | unsigned long long delta, incr; |
104 | |
105 | if (now.sched < timer->it.cpu.expires.sched) |
106 | return; |
107 | incr = timer->it.cpu.incr.sched; |
108 | delta = now.sched + incr - timer->it.cpu.expires.sched; |
109 | /* Don't use (incr*2 < delta), incr*2 might overflow. */ |
110 | for (i = 0; incr < delta - incr; i++) |
111 | incr = incr << 1; |
112 | for (; i >= 0; incr >>= 1, i--) { |
113 | if (delta <= incr) |
114 | continue; |
115 | timer->it.cpu.expires.sched += incr; |
116 | timer->it_overrun += 1 << i; |
117 | delta -= incr; |
118 | } |
119 | } else { |
120 | cputime_t delta, incr; |
121 | |
122 | if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu)) |
123 | return; |
124 | incr = timer->it.cpu.incr.cpu; |
125 | delta = cputime_sub(cputime_add(now.cpu, incr), |
126 | timer->it.cpu.expires.cpu); |
127 | /* Don't use (incr*2 < delta), incr*2 might overflow. */ |
128 | for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++) |
129 | incr = cputime_add(incr, incr); |
130 | for (; i >= 0; incr = cputime_halve(incr), i--) { |
131 | if (cputime_le(delta, incr)) |
132 | continue; |
133 | timer->it.cpu.expires.cpu = |
134 | cputime_add(timer->it.cpu.expires.cpu, incr); |
135 | timer->it_overrun += 1 << i; |
136 | delta = cputime_sub(delta, incr); |
137 | } |
138 | } |
139 | } |
140 | |
141 | static inline cputime_t prof_ticks(struct task_struct *p) |
142 | { |
143 | return cputime_add(p->utime, p->stime); |
144 | } |
145 | static inline cputime_t virt_ticks(struct task_struct *p) |
146 | { |
147 | return p->utime; |
148 | } |
149 | static inline unsigned long long sched_ns(struct task_struct *p) |
150 | { |
151 | return (p == current) ? current_sched_time(p) : p->sched_time; |
152 | } |
153 | |
154 | int posix_cpu_clock_getres(clockid_t which_clock, struct timespec *tp) |
155 | { |
156 | int error = check_clock(which_clock); |
157 | if (!error) { |
158 | tp->tv_sec = 0; |
159 | tp->tv_nsec = ((NSEC_PER_SEC + HZ - 1) / HZ); |
160 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
161 | /* |
162 | * If sched_clock is using a cycle counter, we |
163 | * don't have any idea of its true resolution |
164 | * exported, but it is much more than 1s/HZ. |
165 | */ |
166 | tp->tv_nsec = 1; |
167 | } |
168 | } |
169 | return error; |
170 | } |
171 | |
172 | int posix_cpu_clock_set(clockid_t which_clock, const struct timespec *tp) |
173 | { |
174 | /* |
175 | * You can never reset a CPU clock, but we check for other errors |
176 | * in the call before failing with EPERM. |
177 | */ |
178 | int error = check_clock(which_clock); |
179 | if (error == 0) { |
180 | error = -EPERM; |
181 | } |
182 | return error; |
183 | } |
184 | |
185 | |
186 | /* |
187 | * Sample a per-thread clock for the given task. |
188 | */ |
189 | static int cpu_clock_sample(clockid_t which_clock, struct task_struct *p, |
190 | union cpu_time_count *cpu) |
191 | { |
192 | switch (CPUCLOCK_WHICH(which_clock)) { |
193 | default: |
194 | return -EINVAL; |
195 | case CPUCLOCK_PROF: |
196 | cpu->cpu = prof_ticks(p); |
197 | break; |
198 | case CPUCLOCK_VIRT: |
199 | cpu->cpu = virt_ticks(p); |
200 | break; |
201 | case CPUCLOCK_SCHED: |
202 | cpu->sched = sched_ns(p); |
203 | break; |
204 | } |
205 | return 0; |
206 | } |
207 | |
208 | /* |
209 | * Sample a process (thread group) clock for the given group_leader task. |
210 | * Must be called with tasklist_lock held for reading. |
211 | * Must be called with tasklist_lock held for reading, and p->sighand->siglock. |
212 | */ |
213 | static int cpu_clock_sample_group_locked(unsigned int clock_idx, |
214 | struct task_struct *p, |
215 | union cpu_time_count *cpu) |
216 | { |
217 | struct task_struct *t = p; |
218 | switch (clock_idx) { |
219 | default: |
220 | return -EINVAL; |
221 | case CPUCLOCK_PROF: |
222 | cpu->cpu = cputime_add(p->signal->utime, p->signal->stime); |
223 | do { |
224 | cpu->cpu = cputime_add(cpu->cpu, prof_ticks(t)); |
225 | t = next_thread(t); |
226 | } while (t != p); |
227 | break; |
228 | case CPUCLOCK_VIRT: |
229 | cpu->cpu = p->signal->utime; |
230 | do { |
231 | cpu->cpu = cputime_add(cpu->cpu, virt_ticks(t)); |
232 | t = next_thread(t); |
233 | } while (t != p); |
234 | break; |
235 | case CPUCLOCK_SCHED: |
236 | cpu->sched = p->signal->sched_time; |
237 | /* Add in each other live thread. */ |
238 | while ((t = next_thread(t)) != p) { |
239 | cpu->sched += t->sched_time; |
240 | } |
241 | if (p->tgid == current->tgid) { |
242 | /* |
243 | * We're sampling ourselves, so include the |
244 | * cycles not yet banked. We still omit |
245 | * other threads running on other CPUs, |
246 | * so the total can always be behind as |
247 | * much as max(nthreads-1,ncpus) * (NSEC_PER_SEC/HZ). |
248 | */ |
249 | cpu->sched += current_sched_time(current); |
250 | } else { |
251 | cpu->sched += p->sched_time; |
252 | } |
253 | break; |
254 | } |
255 | return 0; |
256 | } |
257 | |
258 | /* |
259 | * Sample a process (thread group) clock for the given group_leader task. |
260 | * Must be called with tasklist_lock held for reading. |
261 | */ |
262 | static int cpu_clock_sample_group(clockid_t which_clock, |
263 | struct task_struct *p, |
264 | union cpu_time_count *cpu) |
265 | { |
266 | int ret; |
267 | unsigned long flags; |
268 | spin_lock_irqsave(&p->sighand->siglock, flags); |
269 | ret = cpu_clock_sample_group_locked(CPUCLOCK_WHICH(which_clock), p, |
270 | cpu); |
271 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
272 | return ret; |
273 | } |
274 | |
275 | |
276 | int posix_cpu_clock_get(clockid_t which_clock, struct timespec *tp) |
277 | { |
278 | const pid_t pid = CPUCLOCK_PID(which_clock); |
279 | int error = -EINVAL; |
280 | union cpu_time_count rtn; |
281 | |
282 | if (pid == 0) { |
283 | /* |
284 | * Special case constant value for our own clocks. |
285 | * We don't have to do any lookup to find ourselves. |
286 | */ |
287 | if (CPUCLOCK_PERTHREAD(which_clock)) { |
288 | /* |
289 | * Sampling just ourselves we can do with no locking. |
290 | */ |
291 | error = cpu_clock_sample(which_clock, |
292 | current, &rtn); |
293 | } else { |
294 | read_lock(&tasklist_lock); |
295 | error = cpu_clock_sample_group(which_clock, |
296 | current, &rtn); |
297 | read_unlock(&tasklist_lock); |
298 | } |
299 | } else { |
300 | /* |
301 | * Find the given PID, and validate that the caller |
302 | * should be able to see it. |
303 | */ |
304 | struct task_struct *p; |
305 | read_lock(&tasklist_lock); |
306 | p = find_task_by_pid(pid); |
307 | if (p) { |
308 | if (CPUCLOCK_PERTHREAD(which_clock)) { |
309 | if (p->tgid == current->tgid) { |
310 | error = cpu_clock_sample(which_clock, |
311 | p, &rtn); |
312 | } |
313 | } else if (p->tgid == pid && p->signal) { |
314 | error = cpu_clock_sample_group(which_clock, |
315 | p, &rtn); |
316 | } |
317 | } |
318 | read_unlock(&tasklist_lock); |
319 | } |
320 | |
321 | if (error) |
322 | return error; |
323 | sample_to_timespec(which_clock, rtn, tp); |
324 | return 0; |
325 | } |
326 | |
327 | |
328 | /* |
329 | * Validate the clockid_t for a new CPU-clock timer, and initialize the timer. |
330 | * This is called from sys_timer_create with the new timer already locked. |
331 | */ |
332 | int posix_cpu_timer_create(struct k_itimer *new_timer) |
333 | { |
334 | int ret = 0; |
335 | const pid_t pid = CPUCLOCK_PID(new_timer->it_clock); |
336 | struct task_struct *p; |
337 | |
338 | if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX) |
339 | return -EINVAL; |
340 | |
341 | INIT_LIST_HEAD(&new_timer->it.cpu.entry); |
342 | new_timer->it.cpu.incr.sched = 0; |
343 | new_timer->it.cpu.expires.sched = 0; |
344 | |
345 | read_lock(&tasklist_lock); |
346 | if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) { |
347 | if (pid == 0) { |
348 | p = current; |
349 | } else { |
350 | p = find_task_by_pid(pid); |
351 | if (p && p->tgid != current->tgid) |
352 | p = NULL; |
353 | } |
354 | } else { |
355 | if (pid == 0) { |
356 | p = current->group_leader; |
357 | } else { |
358 | p = find_task_by_pid(pid); |
359 | if (p && p->tgid != pid) |
360 | p = NULL; |
361 | } |
362 | } |
363 | new_timer->it.cpu.task = p; |
364 | if (p) { |
365 | get_task_struct(p); |
366 | } else { |
367 | ret = -EINVAL; |
368 | } |
369 | read_unlock(&tasklist_lock); |
370 | |
371 | return ret; |
372 | } |
373 | |
374 | /* |
375 | * Clean up a CPU-clock timer that is about to be destroyed. |
376 | * This is called from timer deletion with the timer already locked. |
377 | * If we return TIMER_RETRY, it's necessary to release the timer's lock |
378 | * and try again. (This happens when the timer is in the middle of firing.) |
379 | */ |
380 | int posix_cpu_timer_del(struct k_itimer *timer) |
381 | { |
382 | struct task_struct *p = timer->it.cpu.task; |
383 | |
384 | if (timer->it.cpu.firing) |
385 | return TIMER_RETRY; |
386 | |
387 | if (unlikely(p == NULL)) |
388 | return 0; |
389 | |
390 | if (!list_empty(&timer->it.cpu.entry)) { |
391 | read_lock(&tasklist_lock); |
392 | if (unlikely(p->signal == NULL)) { |
393 | /* |
394 | * We raced with the reaping of the task. |
395 | * The deletion should have cleared us off the list. |
396 | */ |
397 | BUG_ON(!list_empty(&timer->it.cpu.entry)); |
398 | } else { |
399 | /* |
400 | * Take us off the task's timer list. |
401 | */ |
402 | spin_lock(&p->sighand->siglock); |
403 | list_del(&timer->it.cpu.entry); |
404 | spin_unlock(&p->sighand->siglock); |
405 | } |
406 | read_unlock(&tasklist_lock); |
407 | } |
408 | put_task_struct(p); |
409 | |
410 | return 0; |
411 | } |
412 | |
413 | /* |
414 | * Clean out CPU timers still ticking when a thread exited. The task |
415 | * pointer is cleared, and the expiry time is replaced with the residual |
416 | * time for later timer_gettime calls to return. |
417 | * This must be called with the siglock held. |
418 | */ |
419 | static void cleanup_timers(struct list_head *head, |
420 | cputime_t utime, cputime_t stime, |
421 | unsigned long long sched_time) |
422 | { |
423 | struct cpu_timer_list *timer, *next; |
424 | cputime_t ptime = cputime_add(utime, stime); |
425 | |
426 | list_for_each_entry_safe(timer, next, head, entry) { |
427 | timer->task = NULL; |
428 | list_del_init(&timer->entry); |
429 | if (cputime_lt(timer->expires.cpu, ptime)) { |
430 | timer->expires.cpu = cputime_zero; |
431 | } else { |
432 | timer->expires.cpu = cputime_sub(timer->expires.cpu, |
433 | ptime); |
434 | } |
435 | } |
436 | |
437 | ++head; |
438 | list_for_each_entry_safe(timer, next, head, entry) { |
439 | timer->task = NULL; |
440 | list_del_init(&timer->entry); |
441 | if (cputime_lt(timer->expires.cpu, utime)) { |
442 | timer->expires.cpu = cputime_zero; |
443 | } else { |
444 | timer->expires.cpu = cputime_sub(timer->expires.cpu, |
445 | utime); |
446 | } |
447 | } |
448 | |
449 | ++head; |
450 | list_for_each_entry_safe(timer, next, head, entry) { |
451 | timer->task = NULL; |
452 | list_del_init(&timer->entry); |
453 | if (timer->expires.sched < sched_time) { |
454 | timer->expires.sched = 0; |
455 | } else { |
456 | timer->expires.sched -= sched_time; |
457 | } |
458 | } |
459 | } |
460 | |
461 | /* |
462 | * These are both called with the siglock held, when the current thread |
463 | * is being reaped. When the final (leader) thread in the group is reaped, |
464 | * posix_cpu_timers_exit_group will be called after posix_cpu_timers_exit. |
465 | */ |
466 | void posix_cpu_timers_exit(struct task_struct *tsk) |
467 | { |
468 | cleanup_timers(tsk->cpu_timers, |
469 | tsk->utime, tsk->stime, tsk->sched_time); |
470 | |
471 | } |
472 | void posix_cpu_timers_exit_group(struct task_struct *tsk) |
473 | { |
474 | cleanup_timers(tsk->signal->cpu_timers, |
475 | cputime_add(tsk->utime, tsk->signal->utime), |
476 | cputime_add(tsk->stime, tsk->signal->stime), |
477 | tsk->sched_time + tsk->signal->sched_time); |
478 | } |
479 | |
480 | |
481 | /* |
482 | * Set the expiry times of all the threads in the process so one of them |
483 | * will go off before the process cumulative expiry total is reached. |
484 | */ |
485 | static void process_timer_rebalance(struct task_struct *p, |
486 | unsigned int clock_idx, |
487 | union cpu_time_count expires, |
488 | union cpu_time_count val) |
489 | { |
490 | cputime_t ticks, left; |
491 | unsigned long long ns, nsleft; |
492 | struct task_struct *t = p; |
493 | unsigned int nthreads = atomic_read(&p->signal->live); |
494 | |
495 | switch (clock_idx) { |
496 | default: |
497 | BUG(); |
498 | break; |
499 | case CPUCLOCK_PROF: |
500 | left = cputime_div(cputime_sub(expires.cpu, val.cpu), |
501 | nthreads); |
502 | do { |
503 | if (!unlikely(t->exit_state)) { |
504 | ticks = cputime_add(prof_ticks(t), left); |
505 | if (cputime_eq(t->it_prof_expires, |
506 | cputime_zero) || |
507 | cputime_gt(t->it_prof_expires, ticks)) { |
508 | t->it_prof_expires = ticks; |
509 | } |
510 | } |
511 | t = next_thread(t); |
512 | } while (t != p); |
513 | break; |
514 | case CPUCLOCK_VIRT: |
515 | left = cputime_div(cputime_sub(expires.cpu, val.cpu), |
516 | nthreads); |
517 | do { |
518 | if (!unlikely(t->exit_state)) { |
519 | ticks = cputime_add(virt_ticks(t), left); |
520 | if (cputime_eq(t->it_virt_expires, |
521 | cputime_zero) || |
522 | cputime_gt(t->it_virt_expires, ticks)) { |
523 | t->it_virt_expires = ticks; |
524 | } |
525 | } |
526 | t = next_thread(t); |
527 | } while (t != p); |
528 | break; |
529 | case CPUCLOCK_SCHED: |
530 | nsleft = expires.sched - val.sched; |
531 | do_div(nsleft, nthreads); |
532 | do { |
533 | if (!unlikely(t->exit_state)) { |
534 | ns = t->sched_time + nsleft; |
535 | if (t->it_sched_expires == 0 || |
536 | t->it_sched_expires > ns) { |
537 | t->it_sched_expires = ns; |
538 | } |
539 | } |
540 | t = next_thread(t); |
541 | } while (t != p); |
542 | break; |
543 | } |
544 | } |
545 | |
546 | static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) |
547 | { |
548 | /* |
549 | * That's all for this thread or process. |
550 | * We leave our residual in expires to be reported. |
551 | */ |
552 | put_task_struct(timer->it.cpu.task); |
553 | timer->it.cpu.task = NULL; |
554 | timer->it.cpu.expires = cpu_time_sub(timer->it_clock, |
555 | timer->it.cpu.expires, |
556 | now); |
557 | } |
558 | |
559 | /* |
560 | * Insert the timer on the appropriate list before any timers that |
561 | * expire later. This must be called with the tasklist_lock held |
562 | * for reading, and interrupts disabled. |
563 | */ |
564 | static void arm_timer(struct k_itimer *timer, union cpu_time_count now) |
565 | { |
566 | struct task_struct *p = timer->it.cpu.task; |
567 | struct list_head *head, *listpos; |
568 | struct cpu_timer_list *const nt = &timer->it.cpu; |
569 | struct cpu_timer_list *next; |
570 | unsigned long i; |
571 | |
572 | head = (CPUCLOCK_PERTHREAD(timer->it_clock) ? |
573 | p->cpu_timers : p->signal->cpu_timers); |
574 | head += CPUCLOCK_WHICH(timer->it_clock); |
575 | |
576 | BUG_ON(!irqs_disabled()); |
577 | spin_lock(&p->sighand->siglock); |
578 | |
579 | listpos = head; |
580 | if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { |
581 | list_for_each_entry(next, head, entry) { |
582 | if (next->expires.sched > nt->expires.sched) { |
583 | listpos = &next->entry; |
584 | break; |
585 | } |
586 | } |
587 | } else { |
588 | list_for_each_entry(next, head, entry) { |
589 | if (cputime_gt(next->expires.cpu, nt->expires.cpu)) { |
590 | listpos = &next->entry; |
591 | break; |
592 | } |
593 | } |
594 | } |
595 | list_add(&nt->entry, listpos); |
596 | |
597 | if (listpos == head) { |
598 | /* |
599 | * We are the new earliest-expiring timer. |
600 | * If we are a thread timer, there can always |
601 | * be a process timer telling us to stop earlier. |
602 | */ |
603 | |
604 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
605 | switch (CPUCLOCK_WHICH(timer->it_clock)) { |
606 | default: |
607 | BUG(); |
608 | case CPUCLOCK_PROF: |
609 | if (cputime_eq(p->it_prof_expires, |
610 | cputime_zero) || |
611 | cputime_gt(p->it_prof_expires, |
612 | nt->expires.cpu)) |
613 | p->it_prof_expires = nt->expires.cpu; |
614 | break; |
615 | case CPUCLOCK_VIRT: |
616 | if (cputime_eq(p->it_virt_expires, |
617 | cputime_zero) || |
618 | cputime_gt(p->it_virt_expires, |
619 | nt->expires.cpu)) |
620 | p->it_virt_expires = nt->expires.cpu; |
621 | break; |
622 | case CPUCLOCK_SCHED: |
623 | if (p->it_sched_expires == 0 || |
624 | p->it_sched_expires > nt->expires.sched) |
625 | p->it_sched_expires = nt->expires.sched; |
626 | break; |
627 | } |
628 | } else { |
629 | /* |
630 | * For a process timer, we must balance |
631 | * all the live threads' expirations. |
632 | */ |
633 | switch (CPUCLOCK_WHICH(timer->it_clock)) { |
634 | default: |
635 | BUG(); |
636 | case CPUCLOCK_VIRT: |
637 | if (!cputime_eq(p->signal->it_virt_expires, |
638 | cputime_zero) && |
639 | cputime_lt(p->signal->it_virt_expires, |
640 | timer->it.cpu.expires.cpu)) |
641 | break; |
642 | goto rebalance; |
643 | case CPUCLOCK_PROF: |
644 | if (!cputime_eq(p->signal->it_prof_expires, |
645 | cputime_zero) && |
646 | cputime_lt(p->signal->it_prof_expires, |
647 | timer->it.cpu.expires.cpu)) |
648 | break; |
649 | i = p->signal->rlim[RLIMIT_CPU].rlim_cur; |
650 | if (i != RLIM_INFINITY && |
651 | i <= cputime_to_secs(timer->it.cpu.expires.cpu)) |
652 | break; |
653 | goto rebalance; |
654 | case CPUCLOCK_SCHED: |
655 | rebalance: |
656 | process_timer_rebalance( |
657 | timer->it.cpu.task, |
658 | CPUCLOCK_WHICH(timer->it_clock), |
659 | timer->it.cpu.expires, now); |
660 | break; |
661 | } |
662 | } |
663 | } |
664 | |
665 | spin_unlock(&p->sighand->siglock); |
666 | } |
667 | |
668 | /* |
669 | * The timer is locked, fire it and arrange for its reload. |
670 | */ |
671 | static void cpu_timer_fire(struct k_itimer *timer) |
672 | { |
673 | if (unlikely(timer->sigq == NULL)) { |
674 | /* |
675 | * This a special case for clock_nanosleep, |
676 | * not a normal timer from sys_timer_create. |
677 | */ |
678 | wake_up_process(timer->it_process); |
679 | timer->it.cpu.expires.sched = 0; |
680 | } else if (timer->it.cpu.incr.sched == 0) { |
681 | /* |
682 | * One-shot timer. Clear it as soon as it's fired. |
683 | */ |
684 | posix_timer_event(timer, 0); |
685 | timer->it.cpu.expires.sched = 0; |
686 | } else if (posix_timer_event(timer, ++timer->it_requeue_pending)) { |
687 | /* |
688 | * The signal did not get queued because the signal |
689 | * was ignored, so we won't get any callback to |
690 | * reload the timer. But we need to keep it |
691 | * ticking in case the signal is deliverable next time. |
692 | */ |
693 | posix_cpu_timer_schedule(timer); |
694 | } |
695 | } |
696 | |
697 | /* |
698 | * Guts of sys_timer_settime for CPU timers. |
699 | * This is called with the timer locked and interrupts disabled. |
700 | * If we return TIMER_RETRY, it's necessary to release the timer's lock |
701 | * and try again. (This happens when the timer is in the middle of firing.) |
702 | */ |
703 | int posix_cpu_timer_set(struct k_itimer *timer, int flags, |
704 | struct itimerspec *new, struct itimerspec *old) |
705 | { |
706 | struct task_struct *p = timer->it.cpu.task; |
707 | union cpu_time_count old_expires, new_expires, val; |
708 | int ret; |
709 | |
710 | if (unlikely(p == NULL)) { |
711 | /* |
712 | * Timer refers to a dead task's clock. |
713 | */ |
714 | return -ESRCH; |
715 | } |
716 | |
717 | new_expires = timespec_to_sample(timer->it_clock, &new->it_value); |
718 | |
719 | read_lock(&tasklist_lock); |
720 | /* |
721 | * We need the tasklist_lock to protect against reaping that |
722 | * clears p->signal. If p has just been reaped, we can no |
723 | * longer get any information about it at all. |
724 | */ |
725 | if (unlikely(p->signal == NULL)) { |
726 | read_unlock(&tasklist_lock); |
727 | put_task_struct(p); |
728 | timer->it.cpu.task = NULL; |
729 | return -ESRCH; |
730 | } |
731 | |
732 | /* |
733 | * Disarm any old timer after extracting its expiry time. |
734 | */ |
735 | BUG_ON(!irqs_disabled()); |
736 | spin_lock(&p->sighand->siglock); |
737 | old_expires = timer->it.cpu.expires; |
738 | list_del_init(&timer->it.cpu.entry); |
739 | spin_unlock(&p->sighand->siglock); |
740 | |
741 | /* |
742 | * We need to sample the current value to convert the new |
743 | * value from to relative and absolute, and to convert the |
744 | * old value from absolute to relative. To set a process |
745 | * timer, we need a sample to balance the thread expiry |
746 | * times (in arm_timer). With an absolute time, we must |
747 | * check if it's already passed. In short, we need a sample. |
748 | */ |
749 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
750 | cpu_clock_sample(timer->it_clock, p, &val); |
751 | } else { |
752 | cpu_clock_sample_group(timer->it_clock, p, &val); |
753 | } |
754 | |
755 | if (old) { |
756 | if (old_expires.sched == 0) { |
757 | old->it_value.tv_sec = 0; |
758 | old->it_value.tv_nsec = 0; |
759 | } else { |
760 | /* |
761 | * Update the timer in case it has |
762 | * overrun already. If it has, |
763 | * we'll report it as having overrun |
764 | * and with the next reloaded timer |
765 | * already ticking, though we are |
766 | * swallowing that pending |
767 | * notification here to install the |
768 | * new setting. |
769 | */ |
770 | bump_cpu_timer(timer, val); |
771 | if (cpu_time_before(timer->it_clock, val, |
772 | timer->it.cpu.expires)) { |
773 | old_expires = cpu_time_sub( |
774 | timer->it_clock, |
775 | timer->it.cpu.expires, val); |
776 | sample_to_timespec(timer->it_clock, |
777 | old_expires, |
778 | &old->it_value); |
779 | } else { |
780 | old->it_value.tv_nsec = 1; |
781 | old->it_value.tv_sec = 0; |
782 | } |
783 | } |
784 | } |
785 | |
786 | if (unlikely(timer->it.cpu.firing)) { |
787 | /* |
788 | * We are colliding with the timer actually firing. |
789 | * Punt after filling in the timer's old value, and |
790 | * disable this firing since we are already reporting |
791 | * it as an overrun (thanks to bump_cpu_timer above). |
792 | */ |
793 | read_unlock(&tasklist_lock); |
794 | timer->it.cpu.firing = -1; |
795 | ret = TIMER_RETRY; |
796 | goto out; |
797 | } |
798 | |
799 | if (new_expires.sched != 0 && !(flags & TIMER_ABSTIME)) { |
800 | cpu_time_add(timer->it_clock, &new_expires, val); |
801 | } |
802 | |
803 | /* |
804 | * Install the new expiry time (or zero). |
805 | * For a timer with no notification action, we don't actually |
806 | * arm the timer (we'll just fake it for timer_gettime). |
807 | */ |
808 | timer->it.cpu.expires = new_expires; |
809 | if (new_expires.sched != 0 && |
810 | (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE && |
811 | cpu_time_before(timer->it_clock, val, new_expires)) { |
812 | arm_timer(timer, val); |
813 | } |
814 | |
815 | read_unlock(&tasklist_lock); |
816 | |
817 | /* |
818 | * Install the new reload setting, and |
819 | * set up the signal and overrun bookkeeping. |
820 | */ |
821 | timer->it.cpu.incr = timespec_to_sample(timer->it_clock, |
822 | &new->it_interval); |
823 | |
824 | /* |
825 | * This acts as a modification timestamp for the timer, |
826 | * so any automatic reload attempt will punt on seeing |
827 | * that we have reset the timer manually. |
828 | */ |
829 | timer->it_requeue_pending = (timer->it_requeue_pending + 2) & |
830 | ~REQUEUE_PENDING; |
831 | timer->it_overrun_last = 0; |
832 | timer->it_overrun = -1; |
833 | |
834 | if (new_expires.sched != 0 && |
835 | (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE && |
836 | !cpu_time_before(timer->it_clock, val, new_expires)) { |
837 | /* |
838 | * The designated time already passed, so we notify |
839 | * immediately, even if the thread never runs to |
840 | * accumulate more time on this clock. |
841 | */ |
842 | cpu_timer_fire(timer); |
843 | } |
844 | |
845 | ret = 0; |
846 | out: |
847 | if (old) { |
848 | sample_to_timespec(timer->it_clock, |
849 | timer->it.cpu.incr, &old->it_interval); |
850 | } |
851 | return ret; |
852 | } |
853 | |
854 | void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) |
855 | { |
856 | union cpu_time_count now; |
857 | struct task_struct *p = timer->it.cpu.task; |
858 | int clear_dead; |
859 | |
860 | /* |
861 | * Easy part: convert the reload time. |
862 | */ |
863 | sample_to_timespec(timer->it_clock, |
864 | timer->it.cpu.incr, &itp->it_interval); |
865 | |
866 | if (timer->it.cpu.expires.sched == 0) { /* Timer not armed at all. */ |
867 | itp->it_value.tv_sec = itp->it_value.tv_nsec = 0; |
868 | return; |
869 | } |
870 | |
871 | if (unlikely(p == NULL)) { |
872 | /* |
873 | * This task already died and the timer will never fire. |
874 | * In this case, expires is actually the dead value. |
875 | */ |
876 | dead: |
877 | sample_to_timespec(timer->it_clock, timer->it.cpu.expires, |
878 | &itp->it_value); |
879 | return; |
880 | } |
881 | |
882 | /* |
883 | * Sample the clock to take the difference with the expiry time. |
884 | */ |
885 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
886 | cpu_clock_sample(timer->it_clock, p, &now); |
887 | clear_dead = p->exit_state; |
888 | } else { |
889 | read_lock(&tasklist_lock); |
890 | if (unlikely(p->signal == NULL)) { |
891 | /* |
892 | * The process has been reaped. |
893 | * We can't even collect a sample any more. |
894 | * Call the timer disarmed, nothing else to do. |
895 | */ |
896 | put_task_struct(p); |
897 | timer->it.cpu.task = NULL; |
898 | timer->it.cpu.expires.sched = 0; |
899 | read_unlock(&tasklist_lock); |
900 | goto dead; |
901 | } else { |
902 | cpu_clock_sample_group(timer->it_clock, p, &now); |
903 | clear_dead = (unlikely(p->exit_state) && |
904 | thread_group_empty(p)); |
905 | } |
906 | read_unlock(&tasklist_lock); |
907 | } |
908 | |
909 | if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) { |
910 | if (timer->it.cpu.incr.sched == 0 && |
911 | cpu_time_before(timer->it_clock, |
912 | timer->it.cpu.expires, now)) { |
913 | /* |
914 | * Do-nothing timer expired and has no reload, |
915 | * so it's as if it was never set. |
916 | */ |
917 | timer->it.cpu.expires.sched = 0; |
918 | itp->it_value.tv_sec = itp->it_value.tv_nsec = 0; |
919 | return; |
920 | } |
921 | /* |
922 | * Account for any expirations and reloads that should |
923 | * have happened. |
924 | */ |
925 | bump_cpu_timer(timer, now); |
926 | } |
927 | |
928 | if (unlikely(clear_dead)) { |
929 | /* |
930 | * We've noticed that the thread is dead, but |
931 | * not yet reaped. Take this opportunity to |
932 | * drop our task ref. |
933 | */ |
934 | clear_dead_task(timer, now); |
935 | goto dead; |
936 | } |
937 | |
938 | if (cpu_time_before(timer->it_clock, now, timer->it.cpu.expires)) { |
939 | sample_to_timespec(timer->it_clock, |
940 | cpu_time_sub(timer->it_clock, |
941 | timer->it.cpu.expires, now), |
942 | &itp->it_value); |
943 | } else { |
944 | /* |
945 | * The timer should have expired already, but the firing |
946 | * hasn't taken place yet. Say it's just about to expire. |
947 | */ |
948 | itp->it_value.tv_nsec = 1; |
949 | itp->it_value.tv_sec = 0; |
950 | } |
951 | } |
952 | |
953 | /* |
954 | * Check for any per-thread CPU timers that have fired and move them off |
955 | * the tsk->cpu_timers[N] list onto the firing list. Here we update the |
956 | * tsk->it_*_expires values to reflect the remaining thread CPU timers. |
957 | */ |
958 | static void check_thread_timers(struct task_struct *tsk, |
959 | struct list_head *firing) |
960 | { |
961 | struct list_head *timers = tsk->cpu_timers; |
962 | |
963 | tsk->it_prof_expires = cputime_zero; |
964 | while (!list_empty(timers)) { |
965 | struct cpu_timer_list *t = list_entry(timers->next, |
966 | struct cpu_timer_list, |
967 | entry); |
968 | if (cputime_lt(prof_ticks(tsk), t->expires.cpu)) { |
969 | tsk->it_prof_expires = t->expires.cpu; |
970 | break; |
971 | } |
972 | t->firing = 1; |
973 | list_move_tail(&t->entry, firing); |
974 | } |
975 | |
976 | ++timers; |
977 | tsk->it_virt_expires = cputime_zero; |
978 | while (!list_empty(timers)) { |
979 | struct cpu_timer_list *t = list_entry(timers->next, |
980 | struct cpu_timer_list, |
981 | entry); |
982 | if (cputime_lt(virt_ticks(tsk), t->expires.cpu)) { |
983 | tsk->it_virt_expires = t->expires.cpu; |
984 | break; |
985 | } |
986 | t->firing = 1; |
987 | list_move_tail(&t->entry, firing); |
988 | } |
989 | |
990 | ++timers; |
991 | tsk->it_sched_expires = 0; |
992 | while (!list_empty(timers)) { |
993 | struct cpu_timer_list *t = list_entry(timers->next, |
994 | struct cpu_timer_list, |
995 | entry); |
996 | if (tsk->sched_time < t->expires.sched) { |
997 | tsk->it_sched_expires = t->expires.sched; |
998 | break; |
999 | } |
1000 | t->firing = 1; |
1001 | list_move_tail(&t->entry, firing); |
1002 | } |
1003 | } |
1004 | |
1005 | /* |
1006 | * Check for any per-thread CPU timers that have fired and move them |
1007 | * off the tsk->*_timers list onto the firing list. Per-thread timers |
1008 | * have already been taken off. |
1009 | */ |
1010 | static void check_process_timers(struct task_struct *tsk, |
1011 | struct list_head *firing) |
1012 | { |
1013 | struct signal_struct *const sig = tsk->signal; |
1014 | cputime_t utime, stime, ptime, virt_expires, prof_expires; |
1015 | unsigned long long sched_time, sched_expires; |
1016 | struct task_struct *t; |
1017 | struct list_head *timers = sig->cpu_timers; |
1018 | |
1019 | /* |
1020 | * Don't sample the current process CPU clocks if there are no timers. |
1021 | */ |
1022 | if (list_empty(&timers[CPUCLOCK_PROF]) && |
1023 | cputime_eq(sig->it_prof_expires, cputime_zero) && |
1024 | sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY && |
1025 | list_empty(&timers[CPUCLOCK_VIRT]) && |
1026 | cputime_eq(sig->it_virt_expires, cputime_zero) && |
1027 | list_empty(&timers[CPUCLOCK_SCHED])) |
1028 | return; |
1029 | |
1030 | /* |
1031 | * Collect the current process totals. |
1032 | */ |
1033 | utime = sig->utime; |
1034 | stime = sig->stime; |
1035 | sched_time = sig->sched_time; |
1036 | t = tsk; |
1037 | do { |
1038 | utime = cputime_add(utime, t->utime); |
1039 | stime = cputime_add(stime, t->stime); |
1040 | sched_time += t->sched_time; |
1041 | t = next_thread(t); |
1042 | } while (t != tsk); |
1043 | ptime = cputime_add(utime, stime); |
1044 | |
1045 | prof_expires = cputime_zero; |
1046 | while (!list_empty(timers)) { |
1047 | struct cpu_timer_list *t = list_entry(timers->next, |
1048 | struct cpu_timer_list, |
1049 | entry); |
1050 | if (cputime_lt(ptime, t->expires.cpu)) { |
1051 | prof_expires = t->expires.cpu; |
1052 | break; |
1053 | } |
1054 | t->firing = 1; |
1055 | list_move_tail(&t->entry, firing); |
1056 | } |
1057 | |
1058 | ++timers; |
1059 | virt_expires = cputime_zero; |
1060 | while (!list_empty(timers)) { |
1061 | struct cpu_timer_list *t = list_entry(timers->next, |
1062 | struct cpu_timer_list, |
1063 | entry); |
1064 | if (cputime_lt(utime, t->expires.cpu)) { |
1065 | virt_expires = t->expires.cpu; |
1066 | break; |
1067 | } |
1068 | t->firing = 1; |
1069 | list_move_tail(&t->entry, firing); |
1070 | } |
1071 | |
1072 | ++timers; |
1073 | sched_expires = 0; |
1074 | while (!list_empty(timers)) { |
1075 | struct cpu_timer_list *t = list_entry(timers->next, |
1076 | struct cpu_timer_list, |
1077 | entry); |
1078 | if (sched_time < t->expires.sched) { |
1079 | sched_expires = t->expires.sched; |
1080 | break; |
1081 | } |
1082 | t->firing = 1; |
1083 | list_move_tail(&t->entry, firing); |
1084 | } |
1085 | |
1086 | /* |
1087 | * Check for the special case process timers. |
1088 | */ |
1089 | if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { |
1090 | if (cputime_ge(ptime, sig->it_prof_expires)) { |
1091 | /* ITIMER_PROF fires and reloads. */ |
1092 | sig->it_prof_expires = sig->it_prof_incr; |
1093 | if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { |
1094 | sig->it_prof_expires = cputime_add( |
1095 | sig->it_prof_expires, ptime); |
1096 | } |
1097 | __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk); |
1098 | } |
1099 | if (!cputime_eq(sig->it_prof_expires, cputime_zero) && |
1100 | (cputime_eq(prof_expires, cputime_zero) || |
1101 | cputime_lt(sig->it_prof_expires, prof_expires))) { |
1102 | prof_expires = sig->it_prof_expires; |
1103 | } |
1104 | } |
1105 | if (!cputime_eq(sig->it_virt_expires, cputime_zero)) { |
1106 | if (cputime_ge(utime, sig->it_virt_expires)) { |
1107 | /* ITIMER_VIRTUAL fires and reloads. */ |
1108 | sig->it_virt_expires = sig->it_virt_incr; |
1109 | if (!cputime_eq(sig->it_virt_expires, cputime_zero)) { |
1110 | sig->it_virt_expires = cputime_add( |
1111 | sig->it_virt_expires, utime); |
1112 | } |
1113 | __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk); |
1114 | } |
1115 | if (!cputime_eq(sig->it_virt_expires, cputime_zero) && |
1116 | (cputime_eq(virt_expires, cputime_zero) || |
1117 | cputime_lt(sig->it_virt_expires, virt_expires))) { |
1118 | virt_expires = sig->it_virt_expires; |
1119 | } |
1120 | } |
1121 | if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { |
1122 | unsigned long psecs = cputime_to_secs(ptime); |
1123 | cputime_t x; |
1124 | if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) { |
1125 | /* |
1126 | * At the hard limit, we just die. |
1127 | * No need to calculate anything else now. |
1128 | */ |
1129 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); |
1130 | return; |
1131 | } |
1132 | if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) { |
1133 | /* |
1134 | * At the soft limit, send a SIGXCPU every second. |
1135 | */ |
1136 | __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); |
1137 | if (sig->rlim[RLIMIT_CPU].rlim_cur |
1138 | < sig->rlim[RLIMIT_CPU].rlim_max) { |
1139 | sig->rlim[RLIMIT_CPU].rlim_cur++; |
1140 | } |
1141 | } |
1142 | x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); |
1143 | if (cputime_eq(prof_expires, cputime_zero) || |
1144 | cputime_lt(x, prof_expires)) { |
1145 | prof_expires = x; |
1146 | } |
1147 | } |
1148 | |
1149 | if (!cputime_eq(prof_expires, cputime_zero) || |
1150 | !cputime_eq(virt_expires, cputime_zero) || |
1151 | sched_expires != 0) { |
1152 | /* |
1153 | * Rebalance the threads' expiry times for the remaining |
1154 | * process CPU timers. |
1155 | */ |
1156 | |
1157 | cputime_t prof_left, virt_left, ticks; |
1158 | unsigned long long sched_left, sched; |
1159 | const unsigned int nthreads = atomic_read(&sig->live); |
1160 | |
1161 | prof_left = cputime_sub(prof_expires, utime); |
1162 | prof_left = cputime_sub(prof_left, stime); |
1163 | prof_left = cputime_div(prof_left, nthreads); |
1164 | virt_left = cputime_sub(virt_expires, utime); |
1165 | virt_left = cputime_div(virt_left, nthreads); |
1166 | if (sched_expires) { |
1167 | sched_left = sched_expires - sched_time; |
1168 | do_div(sched_left, nthreads); |
1169 | } else { |
1170 | sched_left = 0; |
1171 | } |
1172 | t = tsk; |
1173 | do { |
1174 | ticks = cputime_add(cputime_add(t->utime, t->stime), |
1175 | prof_left); |
1176 | if (!cputime_eq(prof_expires, cputime_zero) && |
1177 | (cputime_eq(t->it_prof_expires, cputime_zero) || |
1178 | cputime_gt(t->it_prof_expires, ticks))) { |
1179 | t->it_prof_expires = ticks; |
1180 | } |
1181 | |
1182 | ticks = cputime_add(t->utime, virt_left); |
1183 | if (!cputime_eq(virt_expires, cputime_zero) && |
1184 | (cputime_eq(t->it_virt_expires, cputime_zero) || |
1185 | cputime_gt(t->it_virt_expires, ticks))) { |
1186 | t->it_virt_expires = ticks; |
1187 | } |
1188 | |
1189 | sched = t->sched_time + sched_left; |
1190 | if (sched_expires && (t->it_sched_expires == 0 || |
1191 | t->it_sched_expires > sched)) { |
1192 | t->it_sched_expires = sched; |
1193 | } |
1194 | |
1195 | do { |
1196 | t = next_thread(t); |
1197 | } while (unlikely(t->exit_state)); |
1198 | } while (t != tsk); |
1199 | } |
1200 | } |
1201 | |
1202 | /* |
1203 | * This is called from the signal code (via do_schedule_next_timer) |
1204 | * when the last timer signal was delivered and we have to reload the timer. |
1205 | */ |
1206 | void posix_cpu_timer_schedule(struct k_itimer *timer) |
1207 | { |
1208 | struct task_struct *p = timer->it.cpu.task; |
1209 | union cpu_time_count now; |
1210 | |
1211 | if (unlikely(p == NULL)) |
1212 | /* |
1213 | * The task was cleaned up already, no future firings. |
1214 | */ |
1215 | return; |
1216 | |
1217 | /* |
1218 | * Fetch the current sample and update the timer's expiry time. |
1219 | */ |
1220 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
1221 | cpu_clock_sample(timer->it_clock, p, &now); |
1222 | bump_cpu_timer(timer, now); |
1223 | if (unlikely(p->exit_state)) { |
1224 | clear_dead_task(timer, now); |
1225 | return; |
1226 | } |
1227 | read_lock(&tasklist_lock); /* arm_timer needs it. */ |
1228 | } else { |
1229 | read_lock(&tasklist_lock); |
1230 | if (unlikely(p->signal == NULL)) { |
1231 | /* |
1232 | * The process has been reaped. |
1233 | * We can't even collect a sample any more. |
1234 | */ |
1235 | put_task_struct(p); |
1236 | timer->it.cpu.task = p = NULL; |
1237 | timer->it.cpu.expires.sched = 0; |
1238 | read_unlock(&tasklist_lock); |
1239 | return; |
1240 | } else if (unlikely(p->exit_state) && thread_group_empty(p)) { |
1241 | /* |
1242 | * We've noticed that the thread is dead, but |
1243 | * not yet reaped. Take this opportunity to |
1244 | * drop our task ref. |
1245 | */ |
1246 | clear_dead_task(timer, now); |
1247 | read_unlock(&tasklist_lock); |
1248 | return; |
1249 | } |
1250 | cpu_clock_sample_group(timer->it_clock, p, &now); |
1251 | bump_cpu_timer(timer, now); |
1252 | /* Leave the tasklist_lock locked for the call below. */ |
1253 | } |
1254 | |
1255 | /* |
1256 | * Now re-arm for the new expiry time. |
1257 | */ |
1258 | arm_timer(timer, now); |
1259 | |
1260 | read_unlock(&tasklist_lock); |
1261 | } |
1262 | |
1263 | /* |
1264 | * This is called from the timer interrupt handler. The irq handler has |
1265 | * already updated our counts. We need to check if any timers fire now. |
1266 | * Interrupts are disabled. |
1267 | */ |
1268 | void run_posix_cpu_timers(struct task_struct *tsk) |
1269 | { |
1270 | LIST_HEAD(firing); |
1271 | struct k_itimer *timer, *next; |
1272 | |
1273 | BUG_ON(!irqs_disabled()); |
1274 | |
1275 | #define UNEXPIRED(clock) \ |
1276 | (cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \ |
1277 | cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires)) |
1278 | |
1279 | if (UNEXPIRED(prof) && UNEXPIRED(virt) && |
1280 | (tsk->it_sched_expires == 0 || |
1281 | tsk->sched_time < tsk->it_sched_expires)) |
1282 | return; |
1283 | |
1284 | #undef UNEXPIRED |
1285 | |
1286 | BUG_ON(tsk->exit_state); |
1287 | |
1288 | /* |
1289 | * Double-check with locks held. |
1290 | */ |
1291 | read_lock(&tasklist_lock); |
1292 | spin_lock(&tsk->sighand->siglock); |
1293 | |
1294 | /* |
1295 | * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N] |
1296 | * all the timers that are firing, and put them on the firing list. |
1297 | */ |
1298 | check_thread_timers(tsk, &firing); |
1299 | check_process_timers(tsk, &firing); |
1300 | |
1301 | /* |
1302 | * We must release these locks before taking any timer's lock. |
1303 | * There is a potential race with timer deletion here, as the |
1304 | * siglock now protects our private firing list. We have set |
1305 | * the firing flag in each timer, so that a deletion attempt |
1306 | * that gets the timer lock before we do will give it up and |
1307 | * spin until we've taken care of that timer below. |
1308 | */ |
1309 | spin_unlock(&tsk->sighand->siglock); |
1310 | read_unlock(&tasklist_lock); |
1311 | |
1312 | /* |
1313 | * Now that all the timers on our list have the firing flag, |
1314 | * noone will touch their list entries but us. We'll take |
1315 | * each timer's lock before clearing its firing flag, so no |
1316 | * timer call will interfere. |
1317 | */ |
1318 | list_for_each_entry_safe(timer, next, &firing, it.cpu.entry) { |
1319 | int firing; |
1320 | spin_lock(&timer->it_lock); |
1321 | list_del_init(&timer->it.cpu.entry); |
1322 | firing = timer->it.cpu.firing; |
1323 | timer->it.cpu.firing = 0; |
1324 | /* |
1325 | * The firing flag is -1 if we collided with a reset |
1326 | * of the timer, which already reported this |
1327 | * almost-firing as an overrun. So don't generate an event. |
1328 | */ |
1329 | if (likely(firing >= 0)) { |
1330 | cpu_timer_fire(timer); |
1331 | } |
1332 | spin_unlock(&timer->it_lock); |
1333 | } |
1334 | } |
1335 | |
1336 | /* |
1337 | * Set one of the process-wide special case CPU timers. |
1338 | * The tasklist_lock and tsk->sighand->siglock must be held by the caller. |
1339 | * The oldval argument is null for the RLIMIT_CPU timer, where *newval is |
1340 | * absolute; non-null for ITIMER_*, where *newval is relative and we update |
1341 | * it to be absolute, *oldval is absolute and we update it to be relative. |
1342 | */ |
1343 | void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, |
1344 | cputime_t *newval, cputime_t *oldval) |
1345 | { |
1346 | union cpu_time_count now; |
1347 | struct list_head *head; |
1348 | |
1349 | BUG_ON(clock_idx == CPUCLOCK_SCHED); |
1350 | cpu_clock_sample_group_locked(clock_idx, tsk, &now); |
1351 | |
1352 | if (oldval) { |
1353 | if (!cputime_eq(*oldval, cputime_zero)) { |
1354 | if (cputime_le(*oldval, now.cpu)) { |
1355 | /* Just about to fire. */ |
1356 | *oldval = jiffies_to_cputime(1); |
1357 | } else { |
1358 | *oldval = cputime_sub(*oldval, now.cpu); |
1359 | } |
1360 | } |
1361 | |
1362 | if (cputime_eq(*newval, cputime_zero)) |
1363 | return; |
1364 | *newval = cputime_add(*newval, now.cpu); |
1365 | |
1366 | /* |
1367 | * If the RLIMIT_CPU timer will expire before the |
1368 | * ITIMER_PROF timer, we have nothing else to do. |
1369 | */ |
1370 | if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur |
1371 | < cputime_to_secs(*newval)) |
1372 | return; |
1373 | } |
1374 | |
1375 | /* |
1376 | * Check whether there are any process timers already set to fire |
1377 | * before this one. If so, we don't have anything more to do. |
1378 | */ |
1379 | head = &tsk->signal->cpu_timers[clock_idx]; |
1380 | if (list_empty(head) || |
1381 | cputime_ge(list_entry(head->next, |
1382 | struct cpu_timer_list, entry)->expires.cpu, |
1383 | *newval)) { |
1384 | /* |
1385 | * Rejigger each thread's expiry time so that one will |
1386 | * notice before we hit the process-cumulative expiry time. |
1387 | */ |
1388 | union cpu_time_count expires = { .sched = 0 }; |
1389 | expires.cpu = *newval; |
1390 | process_timer_rebalance(tsk, clock_idx, expires, now); |
1391 | } |
1392 | } |
1393 | |
1394 | static long posix_cpu_clock_nanosleep_restart(struct restart_block *); |
1395 | |
1396 | int posix_cpu_nsleep(clockid_t which_clock, int flags, |
1397 | struct timespec *rqtp) |
1398 | { |
1399 | struct restart_block *restart_block = |
1400 | ¤t_thread_info()->restart_block; |
1401 | struct k_itimer timer; |
1402 | int error; |
1403 | |
1404 | /* |
1405 | * Diagnose required errors first. |
1406 | */ |
1407 | if (CPUCLOCK_PERTHREAD(which_clock) && |
1408 | (CPUCLOCK_PID(which_clock) == 0 || |
1409 | CPUCLOCK_PID(which_clock) == current->pid)) |
1410 | return -EINVAL; |
1411 | |
1412 | /* |
1413 | * Set up a temporary timer and then wait for it to go off. |
1414 | */ |
1415 | memset(&timer, 0, sizeof timer); |
1416 | spin_lock_init(&timer.it_lock); |
1417 | timer.it_clock = which_clock; |
1418 | timer.it_overrun = -1; |
1419 | error = posix_cpu_timer_create(&timer); |
1420 | timer.it_process = current; |
1421 | if (!error) { |
1422 | struct timespec __user *rmtp; |
1423 | static struct itimerspec zero_it; |
1424 | struct itimerspec it = { .it_value = *rqtp, |
1425 | .it_interval = {} }; |
1426 | |
1427 | spin_lock_irq(&timer.it_lock); |
1428 | error = posix_cpu_timer_set(&timer, flags, &it, NULL); |
1429 | if (error) { |
1430 | spin_unlock_irq(&timer.it_lock); |
1431 | return error; |
1432 | } |
1433 | |
1434 | while (!signal_pending(current)) { |
1435 | if (timer.it.cpu.expires.sched == 0) { |
1436 | /* |
1437 | * Our timer fired and was reset. |
1438 | */ |
1439 | spin_unlock_irq(&timer.it_lock); |
1440 | return 0; |
1441 | } |
1442 | |
1443 | /* |
1444 | * Block until cpu_timer_fire (or a signal) wakes us. |
1445 | */ |
1446 | __set_current_state(TASK_INTERRUPTIBLE); |
1447 | spin_unlock_irq(&timer.it_lock); |
1448 | schedule(); |
1449 | spin_lock_irq(&timer.it_lock); |
1450 | } |
1451 | |
1452 | /* |
1453 | * We were interrupted by a signal. |
1454 | */ |
1455 | sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp); |
1456 | posix_cpu_timer_set(&timer, 0, &zero_it, &it); |
1457 | spin_unlock_irq(&timer.it_lock); |
1458 | |
1459 | if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) { |
1460 | /* |
1461 | * It actually did fire already. |
1462 | */ |
1463 | return 0; |
1464 | } |
1465 | |
1466 | /* |
1467 | * Report back to the user the time still remaining. |
1468 | */ |
1469 | rmtp = (struct timespec __user *) restart_block->arg1; |
1470 | if (rmtp != NULL && !(flags & TIMER_ABSTIME) && |
1471 | copy_to_user(rmtp, &it.it_value, sizeof *rmtp)) |
1472 | return -EFAULT; |
1473 | |
1474 | restart_block->fn = posix_cpu_clock_nanosleep_restart; |
1475 | /* Caller already set restart_block->arg1 */ |
1476 | restart_block->arg0 = which_clock; |
1477 | restart_block->arg2 = rqtp->tv_sec; |
1478 | restart_block->arg3 = rqtp->tv_nsec; |
1479 | |
1480 | error = -ERESTART_RESTARTBLOCK; |
1481 | } |
1482 | |
1483 | return error; |
1484 | } |
1485 | |
1486 | static long |
1487 | posix_cpu_clock_nanosleep_restart(struct restart_block *restart_block) |
1488 | { |
1489 | clockid_t which_clock = restart_block->arg0; |
1490 | struct timespec t = { .tv_sec = restart_block->arg2, |
1491 | .tv_nsec = restart_block->arg3 }; |
1492 | restart_block->fn = do_no_restart_syscall; |
1493 | return posix_cpu_nsleep(which_clock, TIMER_ABSTIME, &t); |
1494 | } |
1495 | |
1496 | |
1497 | #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED) |
1498 | #define THREAD_CLOCK MAKE_THREAD_CPUCLOCK(0, CPUCLOCK_SCHED) |
1499 | |
1500 | static int process_cpu_clock_getres(clockid_t which_clock, struct timespec *tp) |
1501 | { |
1502 | return posix_cpu_clock_getres(PROCESS_CLOCK, tp); |
1503 | } |
1504 | static int process_cpu_clock_get(clockid_t which_clock, struct timespec *tp) |
1505 | { |
1506 | return posix_cpu_clock_get(PROCESS_CLOCK, tp); |
1507 | } |
1508 | static int process_cpu_timer_create(struct k_itimer *timer) |
1509 | { |
1510 | timer->it_clock = PROCESS_CLOCK; |
1511 | return posix_cpu_timer_create(timer); |
1512 | } |
1513 | static int process_cpu_nsleep(clockid_t which_clock, int flags, |
1514 | struct timespec *rqtp) |
1515 | { |
1516 | return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp); |
1517 | } |
1518 | static int thread_cpu_clock_getres(clockid_t which_clock, struct timespec *tp) |
1519 | { |
1520 | return posix_cpu_clock_getres(THREAD_CLOCK, tp); |
1521 | } |
1522 | static int thread_cpu_clock_get(clockid_t which_clock, struct timespec *tp) |
1523 | { |
1524 | return posix_cpu_clock_get(THREAD_CLOCK, tp); |
1525 | } |
1526 | static int thread_cpu_timer_create(struct k_itimer *timer) |
1527 | { |
1528 | timer->it_clock = THREAD_CLOCK; |
1529 | return posix_cpu_timer_create(timer); |
1530 | } |
1531 | static int thread_cpu_nsleep(clockid_t which_clock, int flags, |
1532 | struct timespec *rqtp) |
1533 | { |
1534 | return -EINVAL; |
1535 | } |
1536 | |
1537 | static __init int init_posix_cpu_timers(void) |
1538 | { |
1539 | struct k_clock process = { |
1540 | .clock_getres = process_cpu_clock_getres, |
1541 | .clock_get = process_cpu_clock_get, |
1542 | .clock_set = do_posix_clock_nosettime, |
1543 | .timer_create = process_cpu_timer_create, |
1544 | .nsleep = process_cpu_nsleep, |
1545 | }; |
1546 | struct k_clock thread = { |
1547 | .clock_getres = thread_cpu_clock_getres, |
1548 | .clock_get = thread_cpu_clock_get, |
1549 | .clock_set = do_posix_clock_nosettime, |
1550 | .timer_create = thread_cpu_timer_create, |
1551 | .nsleep = thread_cpu_nsleep, |
1552 | }; |
1553 | |
1554 | register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); |
1555 | register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread); |
1556 | |
1557 | return 0; |
1558 | } |
1559 | __initcall(init_posix_cpu_timers); |