Contents of /alx-src/tags/kernel26-2.6.12-alx-r9/kernel/sys.c
Parent Directory | Revision Log
Revision 630 -
(show annotations)
(download)
Wed Mar 4 11:03:09 2009 UTC (15 years, 6 months ago) by niro
File MIME type: text/plain
File size: 40816 byte(s)
Wed Mar 4 11:03:09 2009 UTC (15 years, 6 months ago) by niro
File MIME type: text/plain
File size: 40816 byte(s)
Tag kernel26-2.6.12-alx-r9
1 | /* |
2 | * linux/kernel/sys.c |
3 | * |
4 | * Copyright (C) 1991, 1992 Linus Torvalds |
5 | */ |
6 | |
7 | #include <linux/config.h> |
8 | #include <linux/module.h> |
9 | #include <linux/mm.h> |
10 | #include <linux/utsname.h> |
11 | #include <linux/mman.h> |
12 | #include <linux/smp_lock.h> |
13 | #include <linux/notifier.h> |
14 | #include <linux/reboot.h> |
15 | #include <linux/prctl.h> |
16 | #include <linux/init.h> |
17 | #include <linux/highuid.h> |
18 | #include <linux/fs.h> |
19 | #include <linux/workqueue.h> |
20 | #include <linux/device.h> |
21 | #include <linux/key.h> |
22 | #include <linux/times.h> |
23 | #include <linux/posix-timers.h> |
24 | #include <linux/security.h> |
25 | #include <linux/dcookies.h> |
26 | #include <linux/suspend.h> |
27 | #include <linux/tty.h> |
28 | #include <linux/signal.h> |
29 | |
30 | #include <linux/compat.h> |
31 | #include <linux/syscalls.h> |
32 | |
33 | #include <asm/uaccess.h> |
34 | #include <asm/io.h> |
35 | #include <asm/unistd.h> |
36 | |
37 | #ifndef SET_UNALIGN_CTL |
38 | # define SET_UNALIGN_CTL(a,b) (-EINVAL) |
39 | #endif |
40 | #ifndef GET_UNALIGN_CTL |
41 | # define GET_UNALIGN_CTL(a,b) (-EINVAL) |
42 | #endif |
43 | #ifndef SET_FPEMU_CTL |
44 | # define SET_FPEMU_CTL(a,b) (-EINVAL) |
45 | #endif |
46 | #ifndef GET_FPEMU_CTL |
47 | # define GET_FPEMU_CTL(a,b) (-EINVAL) |
48 | #endif |
49 | #ifndef SET_FPEXC_CTL |
50 | # define SET_FPEXC_CTL(a,b) (-EINVAL) |
51 | #endif |
52 | #ifndef GET_FPEXC_CTL |
53 | # define GET_FPEXC_CTL(a,b) (-EINVAL) |
54 | #endif |
55 | |
56 | /* |
57 | * this is where the system-wide overflow UID and GID are defined, for |
58 | * architectures that now have 32-bit UID/GID but didn't in the past |
59 | */ |
60 | |
61 | int overflowuid = DEFAULT_OVERFLOWUID; |
62 | int overflowgid = DEFAULT_OVERFLOWGID; |
63 | |
64 | #ifdef CONFIG_UID16 |
65 | EXPORT_SYMBOL(overflowuid); |
66 | EXPORT_SYMBOL(overflowgid); |
67 | #endif |
68 | |
69 | /* |
70 | * the same as above, but for filesystems which can only store a 16-bit |
71 | * UID and GID. as such, this is needed on all architectures |
72 | */ |
73 | |
74 | int fs_overflowuid = DEFAULT_FS_OVERFLOWUID; |
75 | int fs_overflowgid = DEFAULT_FS_OVERFLOWUID; |
76 | |
77 | EXPORT_SYMBOL(fs_overflowuid); |
78 | EXPORT_SYMBOL(fs_overflowgid); |
79 | |
80 | /* |
81 | * this indicates whether you can reboot with ctrl-alt-del: the default is yes |
82 | */ |
83 | |
84 | int C_A_D = 1; |
85 | int cad_pid = 1; |
86 | |
87 | /* |
88 | * Notifier list for kernel code which wants to be called |
89 | * at shutdown. This is used to stop any idling DMA operations |
90 | * and the like. |
91 | */ |
92 | |
93 | static struct notifier_block *reboot_notifier_list; |
94 | static DEFINE_RWLOCK(notifier_lock); |
95 | |
96 | /** |
97 | * notifier_chain_register - Add notifier to a notifier chain |
98 | * @list: Pointer to root list pointer |
99 | * @n: New entry in notifier chain |
100 | * |
101 | * Adds a notifier to a notifier chain. |
102 | * |
103 | * Currently always returns zero. |
104 | */ |
105 | |
106 | int notifier_chain_register(struct notifier_block **list, struct notifier_block *n) |
107 | { |
108 | write_lock(¬ifier_lock); |
109 | while(*list) |
110 | { |
111 | if(n->priority > (*list)->priority) |
112 | break; |
113 | list= &((*list)->next); |
114 | } |
115 | n->next = *list; |
116 | *list=n; |
117 | write_unlock(¬ifier_lock); |
118 | return 0; |
119 | } |
120 | |
121 | EXPORT_SYMBOL(notifier_chain_register); |
122 | |
123 | /** |
124 | * notifier_chain_unregister - Remove notifier from a notifier chain |
125 | * @nl: Pointer to root list pointer |
126 | * @n: New entry in notifier chain |
127 | * |
128 | * Removes a notifier from a notifier chain. |
129 | * |
130 | * Returns zero on success, or %-ENOENT on failure. |
131 | */ |
132 | |
133 | int notifier_chain_unregister(struct notifier_block **nl, struct notifier_block *n) |
134 | { |
135 | write_lock(¬ifier_lock); |
136 | while((*nl)!=NULL) |
137 | { |
138 | if((*nl)==n) |
139 | { |
140 | *nl=n->next; |
141 | write_unlock(¬ifier_lock); |
142 | return 0; |
143 | } |
144 | nl=&((*nl)->next); |
145 | } |
146 | write_unlock(¬ifier_lock); |
147 | return -ENOENT; |
148 | } |
149 | |
150 | EXPORT_SYMBOL(notifier_chain_unregister); |
151 | |
152 | /** |
153 | * notifier_call_chain - Call functions in a notifier chain |
154 | * @n: Pointer to root pointer of notifier chain |
155 | * @val: Value passed unmodified to notifier function |
156 | * @v: Pointer passed unmodified to notifier function |
157 | * |
158 | * Calls each function in a notifier chain in turn. |
159 | * |
160 | * If the return value of the notifier can be and'd |
161 | * with %NOTIFY_STOP_MASK, then notifier_call_chain |
162 | * will return immediately, with the return value of |
163 | * the notifier function which halted execution. |
164 | * Otherwise, the return value is the return value |
165 | * of the last notifier function called. |
166 | */ |
167 | |
168 | int notifier_call_chain(struct notifier_block **n, unsigned long val, void *v) |
169 | { |
170 | int ret=NOTIFY_DONE; |
171 | struct notifier_block *nb = *n; |
172 | |
173 | while(nb) |
174 | { |
175 | ret=nb->notifier_call(nb,val,v); |
176 | if(ret&NOTIFY_STOP_MASK) |
177 | { |
178 | return ret; |
179 | } |
180 | nb=nb->next; |
181 | } |
182 | return ret; |
183 | } |
184 | |
185 | EXPORT_SYMBOL(notifier_call_chain); |
186 | |
187 | /** |
188 | * register_reboot_notifier - Register function to be called at reboot time |
189 | * @nb: Info about notifier function to be called |
190 | * |
191 | * Registers a function with the list of functions |
192 | * to be called at reboot time. |
193 | * |
194 | * Currently always returns zero, as notifier_chain_register |
195 | * always returns zero. |
196 | */ |
197 | |
198 | int register_reboot_notifier(struct notifier_block * nb) |
199 | { |
200 | return notifier_chain_register(&reboot_notifier_list, nb); |
201 | } |
202 | |
203 | EXPORT_SYMBOL(register_reboot_notifier); |
204 | |
205 | /** |
206 | * unregister_reboot_notifier - Unregister previously registered reboot notifier |
207 | * @nb: Hook to be unregistered |
208 | * |
209 | * Unregisters a previously registered reboot |
210 | * notifier function. |
211 | * |
212 | * Returns zero on success, or %-ENOENT on failure. |
213 | */ |
214 | |
215 | int unregister_reboot_notifier(struct notifier_block * nb) |
216 | { |
217 | return notifier_chain_unregister(&reboot_notifier_list, nb); |
218 | } |
219 | |
220 | EXPORT_SYMBOL(unregister_reboot_notifier); |
221 | |
222 | static int set_one_prio(struct task_struct *p, int niceval, int error) |
223 | { |
224 | int no_nice; |
225 | |
226 | if (p->uid != current->euid && |
227 | p->euid != current->euid && !capable(CAP_SYS_NICE)) { |
228 | error = -EPERM; |
229 | goto out; |
230 | } |
231 | if (niceval < task_nice(p) && !can_nice(p, niceval)) { |
232 | error = -EACCES; |
233 | goto out; |
234 | } |
235 | no_nice = security_task_setnice(p, niceval); |
236 | if (no_nice) { |
237 | error = no_nice; |
238 | goto out; |
239 | } |
240 | if (error == -ESRCH) |
241 | error = 0; |
242 | set_user_nice(p, niceval); |
243 | out: |
244 | return error; |
245 | } |
246 | |
247 | asmlinkage long sys_setpriority(int which, int who, int niceval) |
248 | { |
249 | struct task_struct *g, *p; |
250 | struct user_struct *user; |
251 | int error = -EINVAL; |
252 | |
253 | if (which > 2 || which < 0) |
254 | goto out; |
255 | |
256 | /* normalize: avoid signed division (rounding problems) */ |
257 | error = -ESRCH; |
258 | if (niceval < -20) |
259 | niceval = -20; |
260 | if (niceval > 19) |
261 | niceval = 19; |
262 | |
263 | read_lock(&tasklist_lock); |
264 | switch (which) { |
265 | case PRIO_PROCESS: |
266 | if (!who) |
267 | who = current->pid; |
268 | p = find_task_by_pid(who); |
269 | if (p) |
270 | error = set_one_prio(p, niceval, error); |
271 | break; |
272 | case PRIO_PGRP: |
273 | if (!who) |
274 | who = process_group(current); |
275 | do_each_task_pid(who, PIDTYPE_PGID, p) { |
276 | error = set_one_prio(p, niceval, error); |
277 | } while_each_task_pid(who, PIDTYPE_PGID, p); |
278 | break; |
279 | case PRIO_USER: |
280 | user = current->user; |
281 | if (!who) |
282 | who = current->uid; |
283 | else |
284 | if ((who != current->uid) && !(user = find_user(who))) |
285 | goto out_unlock; /* No processes for this user */ |
286 | |
287 | do_each_thread(g, p) |
288 | if (p->uid == who) |
289 | error = set_one_prio(p, niceval, error); |
290 | while_each_thread(g, p); |
291 | if (who != current->uid) |
292 | free_uid(user); /* For find_user() */ |
293 | break; |
294 | } |
295 | out_unlock: |
296 | read_unlock(&tasklist_lock); |
297 | out: |
298 | return error; |
299 | } |
300 | |
301 | /* |
302 | * Ugh. To avoid negative return values, "getpriority()" will |
303 | * not return the normal nice-value, but a negated value that |
304 | * has been offset by 20 (ie it returns 40..1 instead of -20..19) |
305 | * to stay compatible. |
306 | */ |
307 | asmlinkage long sys_getpriority(int which, int who) |
308 | { |
309 | struct task_struct *g, *p; |
310 | struct user_struct *user; |
311 | long niceval, retval = -ESRCH; |
312 | |
313 | if (which > 2 || which < 0) |
314 | return -EINVAL; |
315 | |
316 | read_lock(&tasklist_lock); |
317 | switch (which) { |
318 | case PRIO_PROCESS: |
319 | if (!who) |
320 | who = current->pid; |
321 | p = find_task_by_pid(who); |
322 | if (p) { |
323 | niceval = 20 - task_nice(p); |
324 | if (niceval > retval) |
325 | retval = niceval; |
326 | } |
327 | break; |
328 | case PRIO_PGRP: |
329 | if (!who) |
330 | who = process_group(current); |
331 | do_each_task_pid(who, PIDTYPE_PGID, p) { |
332 | niceval = 20 - task_nice(p); |
333 | if (niceval > retval) |
334 | retval = niceval; |
335 | } while_each_task_pid(who, PIDTYPE_PGID, p); |
336 | break; |
337 | case PRIO_USER: |
338 | user = current->user; |
339 | if (!who) |
340 | who = current->uid; |
341 | else |
342 | if ((who != current->uid) && !(user = find_user(who))) |
343 | goto out_unlock; /* No processes for this user */ |
344 | |
345 | do_each_thread(g, p) |
346 | if (p->uid == who) { |
347 | niceval = 20 - task_nice(p); |
348 | if (niceval > retval) |
349 | retval = niceval; |
350 | } |
351 | while_each_thread(g, p); |
352 | if (who != current->uid) |
353 | free_uid(user); /* for find_user() */ |
354 | break; |
355 | } |
356 | out_unlock: |
357 | read_unlock(&tasklist_lock); |
358 | |
359 | return retval; |
360 | } |
361 | |
362 | |
363 | /* |
364 | * Reboot system call: for obvious reasons only root may call it, |
365 | * and even root needs to set up some magic numbers in the registers |
366 | * so that some mistake won't make this reboot the whole machine. |
367 | * You can also set the meaning of the ctrl-alt-del-key here. |
368 | * |
369 | * reboot doesn't sync: do that yourself before calling this. |
370 | */ |
371 | asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg) |
372 | { |
373 | char buffer[256]; |
374 | |
375 | /* We only trust the superuser with rebooting the system. */ |
376 | if (!capable(CAP_SYS_BOOT)) |
377 | return -EPERM; |
378 | |
379 | /* For safety, we require "magic" arguments. */ |
380 | if (magic1 != LINUX_REBOOT_MAGIC1 || |
381 | (magic2 != LINUX_REBOOT_MAGIC2 && |
382 | magic2 != LINUX_REBOOT_MAGIC2A && |
383 | magic2 != LINUX_REBOOT_MAGIC2B && |
384 | magic2 != LINUX_REBOOT_MAGIC2C)) |
385 | return -EINVAL; |
386 | |
387 | lock_kernel(); |
388 | switch (cmd) { |
389 | case LINUX_REBOOT_CMD_RESTART: |
390 | notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL); |
391 | system_state = SYSTEM_RESTART; |
392 | device_shutdown(); |
393 | printk(KERN_EMERG "Restarting system.\n"); |
394 | machine_restart(NULL); |
395 | break; |
396 | |
397 | case LINUX_REBOOT_CMD_CAD_ON: |
398 | C_A_D = 1; |
399 | break; |
400 | |
401 | case LINUX_REBOOT_CMD_CAD_OFF: |
402 | C_A_D = 0; |
403 | break; |
404 | |
405 | case LINUX_REBOOT_CMD_HALT: |
406 | notifier_call_chain(&reboot_notifier_list, SYS_HALT, NULL); |
407 | system_state = SYSTEM_HALT; |
408 | device_shutdown(); |
409 | printk(KERN_EMERG "System halted.\n"); |
410 | machine_halt(); |
411 | unlock_kernel(); |
412 | do_exit(0); |
413 | break; |
414 | |
415 | case LINUX_REBOOT_CMD_POWER_OFF: |
416 | notifier_call_chain(&reboot_notifier_list, SYS_POWER_OFF, NULL); |
417 | system_state = SYSTEM_POWER_OFF; |
418 | device_shutdown(); |
419 | printk(KERN_EMERG "Power down.\n"); |
420 | machine_power_off(); |
421 | unlock_kernel(); |
422 | do_exit(0); |
423 | break; |
424 | |
425 | case LINUX_REBOOT_CMD_RESTART2: |
426 | if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) { |
427 | unlock_kernel(); |
428 | return -EFAULT; |
429 | } |
430 | buffer[sizeof(buffer) - 1] = '\0'; |
431 | |
432 | notifier_call_chain(&reboot_notifier_list, SYS_RESTART, buffer); |
433 | system_state = SYSTEM_RESTART; |
434 | device_shutdown(); |
435 | printk(KERN_EMERG "Restarting system with command '%s'.\n", buffer); |
436 | machine_restart(buffer); |
437 | break; |
438 | |
439 | #ifdef CONFIG_SOFTWARE_SUSPEND |
440 | case LINUX_REBOOT_CMD_SW_SUSPEND: |
441 | { |
442 | int ret = software_suspend(); |
443 | unlock_kernel(); |
444 | return ret; |
445 | } |
446 | #endif |
447 | |
448 | default: |
449 | unlock_kernel(); |
450 | return -EINVAL; |
451 | } |
452 | unlock_kernel(); |
453 | return 0; |
454 | } |
455 | |
456 | static void deferred_cad(void *dummy) |
457 | { |
458 | notifier_call_chain(&reboot_notifier_list, SYS_RESTART, NULL); |
459 | machine_restart(NULL); |
460 | } |
461 | |
462 | /* |
463 | * This function gets called by ctrl-alt-del - ie the keyboard interrupt. |
464 | * As it's called within an interrupt, it may NOT sync: the only choice |
465 | * is whether to reboot at once, or just ignore the ctrl-alt-del. |
466 | */ |
467 | void ctrl_alt_del(void) |
468 | { |
469 | static DECLARE_WORK(cad_work, deferred_cad, NULL); |
470 | |
471 | if (C_A_D) |
472 | schedule_work(&cad_work); |
473 | else |
474 | kill_proc(cad_pid, SIGINT, 1); |
475 | } |
476 | |
477 | |
478 | /* |
479 | * Unprivileged users may change the real gid to the effective gid |
480 | * or vice versa. (BSD-style) |
481 | * |
482 | * If you set the real gid at all, or set the effective gid to a value not |
483 | * equal to the real gid, then the saved gid is set to the new effective gid. |
484 | * |
485 | * This makes it possible for a setgid program to completely drop its |
486 | * privileges, which is often a useful assertion to make when you are doing |
487 | * a security audit over a program. |
488 | * |
489 | * The general idea is that a program which uses just setregid() will be |
490 | * 100% compatible with BSD. A program which uses just setgid() will be |
491 | * 100% compatible with POSIX with saved IDs. |
492 | * |
493 | * SMP: There are not races, the GIDs are checked only by filesystem |
494 | * operations (as far as semantic preservation is concerned). |
495 | */ |
496 | asmlinkage long sys_setregid(gid_t rgid, gid_t egid) |
497 | { |
498 | int old_rgid = current->gid; |
499 | int old_egid = current->egid; |
500 | int new_rgid = old_rgid; |
501 | int new_egid = old_egid; |
502 | int retval; |
503 | |
504 | retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE); |
505 | if (retval) |
506 | return retval; |
507 | |
508 | if (rgid != (gid_t) -1) { |
509 | if ((old_rgid == rgid) || |
510 | (current->egid==rgid) || |
511 | capable(CAP_SETGID)) |
512 | new_rgid = rgid; |
513 | else |
514 | return -EPERM; |
515 | } |
516 | if (egid != (gid_t) -1) { |
517 | if ((old_rgid == egid) || |
518 | (current->egid == egid) || |
519 | (current->sgid == egid) || |
520 | capable(CAP_SETGID)) |
521 | new_egid = egid; |
522 | else { |
523 | return -EPERM; |
524 | } |
525 | } |
526 | if (new_egid != old_egid) |
527 | { |
528 | current->mm->dumpable = 0; |
529 | smp_wmb(); |
530 | } |
531 | if (rgid != (gid_t) -1 || |
532 | (egid != (gid_t) -1 && egid != old_rgid)) |
533 | current->sgid = new_egid; |
534 | current->fsgid = new_egid; |
535 | current->egid = new_egid; |
536 | current->gid = new_rgid; |
537 | key_fsgid_changed(current); |
538 | return 0; |
539 | } |
540 | |
541 | /* |
542 | * setgid() is implemented like SysV w/ SAVED_IDS |
543 | * |
544 | * SMP: Same implicit races as above. |
545 | */ |
546 | asmlinkage long sys_setgid(gid_t gid) |
547 | { |
548 | int old_egid = current->egid; |
549 | int retval; |
550 | |
551 | retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID); |
552 | if (retval) |
553 | return retval; |
554 | |
555 | if (capable(CAP_SETGID)) |
556 | { |
557 | if(old_egid != gid) |
558 | { |
559 | current->mm->dumpable=0; |
560 | smp_wmb(); |
561 | } |
562 | current->gid = current->egid = current->sgid = current->fsgid = gid; |
563 | } |
564 | else if ((gid == current->gid) || (gid == current->sgid)) |
565 | { |
566 | if(old_egid != gid) |
567 | { |
568 | current->mm->dumpable=0; |
569 | smp_wmb(); |
570 | } |
571 | current->egid = current->fsgid = gid; |
572 | } |
573 | else |
574 | return -EPERM; |
575 | |
576 | key_fsgid_changed(current); |
577 | return 0; |
578 | } |
579 | |
580 | static int set_user(uid_t new_ruid, int dumpclear) |
581 | { |
582 | struct user_struct *new_user; |
583 | |
584 | new_user = alloc_uid(new_ruid); |
585 | if (!new_user) |
586 | return -EAGAIN; |
587 | |
588 | if (atomic_read(&new_user->processes) >= |
589 | current->signal->rlim[RLIMIT_NPROC].rlim_cur && |
590 | new_user != &root_user) { |
591 | free_uid(new_user); |
592 | return -EAGAIN; |
593 | } |
594 | |
595 | switch_uid(new_user); |
596 | |
597 | if(dumpclear) |
598 | { |
599 | current->mm->dumpable = 0; |
600 | smp_wmb(); |
601 | } |
602 | current->uid = new_ruid; |
603 | return 0; |
604 | } |
605 | |
606 | /* |
607 | * Unprivileged users may change the real uid to the effective uid |
608 | * or vice versa. (BSD-style) |
609 | * |
610 | * If you set the real uid at all, or set the effective uid to a value not |
611 | * equal to the real uid, then the saved uid is set to the new effective uid. |
612 | * |
613 | * This makes it possible for a setuid program to completely drop its |
614 | * privileges, which is often a useful assertion to make when you are doing |
615 | * a security audit over a program. |
616 | * |
617 | * The general idea is that a program which uses just setreuid() will be |
618 | * 100% compatible with BSD. A program which uses just setuid() will be |
619 | * 100% compatible with POSIX with saved IDs. |
620 | */ |
621 | asmlinkage long sys_setreuid(uid_t ruid, uid_t euid) |
622 | { |
623 | int old_ruid, old_euid, old_suid, new_ruid, new_euid; |
624 | int retval; |
625 | |
626 | retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE); |
627 | if (retval) |
628 | return retval; |
629 | |
630 | new_ruid = old_ruid = current->uid; |
631 | new_euid = old_euid = current->euid; |
632 | old_suid = current->suid; |
633 | |
634 | if (ruid != (uid_t) -1) { |
635 | new_ruid = ruid; |
636 | if ((old_ruid != ruid) && |
637 | (current->euid != ruid) && |
638 | !capable(CAP_SETUID)) |
639 | return -EPERM; |
640 | } |
641 | |
642 | if (euid != (uid_t) -1) { |
643 | new_euid = euid; |
644 | if ((old_ruid != euid) && |
645 | (current->euid != euid) && |
646 | (current->suid != euid) && |
647 | !capable(CAP_SETUID)) |
648 | return -EPERM; |
649 | } |
650 | |
651 | if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0) |
652 | return -EAGAIN; |
653 | |
654 | if (new_euid != old_euid) |
655 | { |
656 | current->mm->dumpable=0; |
657 | smp_wmb(); |
658 | } |
659 | current->fsuid = current->euid = new_euid; |
660 | if (ruid != (uid_t) -1 || |
661 | (euid != (uid_t) -1 && euid != old_ruid)) |
662 | current->suid = current->euid; |
663 | current->fsuid = current->euid; |
664 | |
665 | key_fsuid_changed(current); |
666 | |
667 | return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE); |
668 | } |
669 | |
670 | |
671 | |
672 | /* |
673 | * setuid() is implemented like SysV with SAVED_IDS |
674 | * |
675 | * Note that SAVED_ID's is deficient in that a setuid root program |
676 | * like sendmail, for example, cannot set its uid to be a normal |
677 | * user and then switch back, because if you're root, setuid() sets |
678 | * the saved uid too. If you don't like this, blame the bright people |
679 | * in the POSIX committee and/or USG. Note that the BSD-style setreuid() |
680 | * will allow a root program to temporarily drop privileges and be able to |
681 | * regain them by swapping the real and effective uid. |
682 | */ |
683 | asmlinkage long sys_setuid(uid_t uid) |
684 | { |
685 | int old_euid = current->euid; |
686 | int old_ruid, old_suid, new_ruid, new_suid; |
687 | int retval; |
688 | |
689 | retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID); |
690 | if (retval) |
691 | return retval; |
692 | |
693 | old_ruid = new_ruid = current->uid; |
694 | old_suid = current->suid; |
695 | new_suid = old_suid; |
696 | |
697 | if (capable(CAP_SETUID)) { |
698 | if (uid != old_ruid && set_user(uid, old_euid != uid) < 0) |
699 | return -EAGAIN; |
700 | new_suid = uid; |
701 | } else if ((uid != current->uid) && (uid != new_suid)) |
702 | return -EPERM; |
703 | |
704 | if (old_euid != uid) |
705 | { |
706 | current->mm->dumpable = 0; |
707 | smp_wmb(); |
708 | } |
709 | current->fsuid = current->euid = uid; |
710 | current->suid = new_suid; |
711 | |
712 | key_fsuid_changed(current); |
713 | |
714 | return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID); |
715 | } |
716 | |
717 | |
718 | /* |
719 | * This function implements a generic ability to update ruid, euid, |
720 | * and suid. This allows you to implement the 4.4 compatible seteuid(). |
721 | */ |
722 | asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid) |
723 | { |
724 | int old_ruid = current->uid; |
725 | int old_euid = current->euid; |
726 | int old_suid = current->suid; |
727 | int retval; |
728 | |
729 | retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES); |
730 | if (retval) |
731 | return retval; |
732 | |
733 | if (!capable(CAP_SETUID)) { |
734 | if ((ruid != (uid_t) -1) && (ruid != current->uid) && |
735 | (ruid != current->euid) && (ruid != current->suid)) |
736 | return -EPERM; |
737 | if ((euid != (uid_t) -1) && (euid != current->uid) && |
738 | (euid != current->euid) && (euid != current->suid)) |
739 | return -EPERM; |
740 | if ((suid != (uid_t) -1) && (suid != current->uid) && |
741 | (suid != current->euid) && (suid != current->suid)) |
742 | return -EPERM; |
743 | } |
744 | if (ruid != (uid_t) -1) { |
745 | if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0) |
746 | return -EAGAIN; |
747 | } |
748 | if (euid != (uid_t) -1) { |
749 | if (euid != current->euid) |
750 | { |
751 | current->mm->dumpable = 0; |
752 | smp_wmb(); |
753 | } |
754 | current->euid = euid; |
755 | } |
756 | current->fsuid = current->euid; |
757 | if (suid != (uid_t) -1) |
758 | current->suid = suid; |
759 | |
760 | key_fsuid_changed(current); |
761 | |
762 | return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES); |
763 | } |
764 | |
765 | asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid) |
766 | { |
767 | int retval; |
768 | |
769 | if (!(retval = put_user(current->uid, ruid)) && |
770 | !(retval = put_user(current->euid, euid))) |
771 | retval = put_user(current->suid, suid); |
772 | |
773 | return retval; |
774 | } |
775 | |
776 | /* |
777 | * Same as above, but for rgid, egid, sgid. |
778 | */ |
779 | asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid) |
780 | { |
781 | int retval; |
782 | |
783 | retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES); |
784 | if (retval) |
785 | return retval; |
786 | |
787 | if (!capable(CAP_SETGID)) { |
788 | if ((rgid != (gid_t) -1) && (rgid != current->gid) && |
789 | (rgid != current->egid) && (rgid != current->sgid)) |
790 | return -EPERM; |
791 | if ((egid != (gid_t) -1) && (egid != current->gid) && |
792 | (egid != current->egid) && (egid != current->sgid)) |
793 | return -EPERM; |
794 | if ((sgid != (gid_t) -1) && (sgid != current->gid) && |
795 | (sgid != current->egid) && (sgid != current->sgid)) |
796 | return -EPERM; |
797 | } |
798 | if (egid != (gid_t) -1) { |
799 | if (egid != current->egid) |
800 | { |
801 | current->mm->dumpable = 0; |
802 | smp_wmb(); |
803 | } |
804 | current->egid = egid; |
805 | } |
806 | current->fsgid = current->egid; |
807 | if (rgid != (gid_t) -1) |
808 | current->gid = rgid; |
809 | if (sgid != (gid_t) -1) |
810 | current->sgid = sgid; |
811 | |
812 | key_fsgid_changed(current); |
813 | return 0; |
814 | } |
815 | |
816 | asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid) |
817 | { |
818 | int retval; |
819 | |
820 | if (!(retval = put_user(current->gid, rgid)) && |
821 | !(retval = put_user(current->egid, egid))) |
822 | retval = put_user(current->sgid, sgid); |
823 | |
824 | return retval; |
825 | } |
826 | |
827 | |
828 | /* |
829 | * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This |
830 | * is used for "access()" and for the NFS daemon (letting nfsd stay at |
831 | * whatever uid it wants to). It normally shadows "euid", except when |
832 | * explicitly set by setfsuid() or for access.. |
833 | */ |
834 | asmlinkage long sys_setfsuid(uid_t uid) |
835 | { |
836 | int old_fsuid; |
837 | |
838 | old_fsuid = current->fsuid; |
839 | if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS)) |
840 | return old_fsuid; |
841 | |
842 | if (uid == current->uid || uid == current->euid || |
843 | uid == current->suid || uid == current->fsuid || |
844 | capable(CAP_SETUID)) |
845 | { |
846 | if (uid != old_fsuid) |
847 | { |
848 | current->mm->dumpable = 0; |
849 | smp_wmb(); |
850 | } |
851 | current->fsuid = uid; |
852 | } |
853 | |
854 | key_fsuid_changed(current); |
855 | |
856 | security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS); |
857 | |
858 | return old_fsuid; |
859 | } |
860 | |
861 | /* |
862 | * Samma på svenska.. |
863 | */ |
864 | asmlinkage long sys_setfsgid(gid_t gid) |
865 | { |
866 | int old_fsgid; |
867 | |
868 | old_fsgid = current->fsgid; |
869 | if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS)) |
870 | return old_fsgid; |
871 | |
872 | if (gid == current->gid || gid == current->egid || |
873 | gid == current->sgid || gid == current->fsgid || |
874 | capable(CAP_SETGID)) |
875 | { |
876 | if (gid != old_fsgid) |
877 | { |
878 | current->mm->dumpable = 0; |
879 | smp_wmb(); |
880 | } |
881 | current->fsgid = gid; |
882 | key_fsgid_changed(current); |
883 | } |
884 | return old_fsgid; |
885 | } |
886 | |
887 | asmlinkage long sys_times(struct tms __user * tbuf) |
888 | { |
889 | /* |
890 | * In the SMP world we might just be unlucky and have one of |
891 | * the times increment as we use it. Since the value is an |
892 | * atomically safe type this is just fine. Conceptually its |
893 | * as if the syscall took an instant longer to occur. |
894 | */ |
895 | if (tbuf) { |
896 | struct tms tmp; |
897 | struct task_struct *tsk = current; |
898 | struct task_struct *t; |
899 | cputime_t utime, stime, cutime, cstime; |
900 | |
901 | read_lock(&tasklist_lock); |
902 | utime = tsk->signal->utime; |
903 | stime = tsk->signal->stime; |
904 | t = tsk; |
905 | do { |
906 | utime = cputime_add(utime, t->utime); |
907 | stime = cputime_add(stime, t->stime); |
908 | t = next_thread(t); |
909 | } while (t != tsk); |
910 | |
911 | /* |
912 | * While we have tasklist_lock read-locked, no dying thread |
913 | * can be updating current->signal->[us]time. Instead, |
914 | * we got their counts included in the live thread loop. |
915 | * However, another thread can come in right now and |
916 | * do a wait call that updates current->signal->c[us]time. |
917 | * To make sure we always see that pair updated atomically, |
918 | * we take the siglock around fetching them. |
919 | */ |
920 | spin_lock_irq(&tsk->sighand->siglock); |
921 | cutime = tsk->signal->cutime; |
922 | cstime = tsk->signal->cstime; |
923 | spin_unlock_irq(&tsk->sighand->siglock); |
924 | read_unlock(&tasklist_lock); |
925 | |
926 | tmp.tms_utime = cputime_to_clock_t(utime); |
927 | tmp.tms_stime = cputime_to_clock_t(stime); |
928 | tmp.tms_cutime = cputime_to_clock_t(cutime); |
929 | tmp.tms_cstime = cputime_to_clock_t(cstime); |
930 | if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) |
931 | return -EFAULT; |
932 | } |
933 | return (long) jiffies_64_to_clock_t(get_jiffies_64()); |
934 | } |
935 | |
936 | /* |
937 | * This needs some heavy checking ... |
938 | * I just haven't the stomach for it. I also don't fully |
939 | * understand sessions/pgrp etc. Let somebody who does explain it. |
940 | * |
941 | * OK, I think I have the protection semantics right.... this is really |
942 | * only important on a multi-user system anyway, to make sure one user |
943 | * can't send a signal to a process owned by another. -TYT, 12/12/91 |
944 | * |
945 | * Auch. Had to add the 'did_exec' flag to conform completely to POSIX. |
946 | * LBT 04.03.94 |
947 | */ |
948 | |
949 | asmlinkage long sys_setpgid(pid_t pid, pid_t pgid) |
950 | { |
951 | struct task_struct *p; |
952 | int err = -EINVAL; |
953 | |
954 | if (!pid) |
955 | pid = current->pid; |
956 | if (!pgid) |
957 | pgid = pid; |
958 | if (pgid < 0) |
959 | return -EINVAL; |
960 | |
961 | /* From this point forward we keep holding onto the tasklist lock |
962 | * so that our parent does not change from under us. -DaveM |
963 | */ |
964 | write_lock_irq(&tasklist_lock); |
965 | |
966 | err = -ESRCH; |
967 | p = find_task_by_pid(pid); |
968 | if (!p) |
969 | goto out; |
970 | |
971 | err = -EINVAL; |
972 | if (!thread_group_leader(p)) |
973 | goto out; |
974 | |
975 | if (p->parent == current || p->real_parent == current) { |
976 | err = -EPERM; |
977 | if (p->signal->session != current->signal->session) |
978 | goto out; |
979 | err = -EACCES; |
980 | if (p->did_exec) |
981 | goto out; |
982 | } else { |
983 | err = -ESRCH; |
984 | if (p != current) |
985 | goto out; |
986 | } |
987 | |
988 | err = -EPERM; |
989 | if (p->signal->leader) |
990 | goto out; |
991 | |
992 | if (pgid != pid) { |
993 | struct task_struct *p; |
994 | |
995 | do_each_task_pid(pgid, PIDTYPE_PGID, p) { |
996 | if (p->signal->session == current->signal->session) |
997 | goto ok_pgid; |
998 | } while_each_task_pid(pgid, PIDTYPE_PGID, p); |
999 | goto out; |
1000 | } |
1001 | |
1002 | ok_pgid: |
1003 | err = security_task_setpgid(p, pgid); |
1004 | if (err) |
1005 | goto out; |
1006 | |
1007 | if (process_group(p) != pgid) { |
1008 | detach_pid(p, PIDTYPE_PGID); |
1009 | p->signal->pgrp = pgid; |
1010 | attach_pid(p, PIDTYPE_PGID, pgid); |
1011 | } |
1012 | |
1013 | err = 0; |
1014 | out: |
1015 | /* All paths lead to here, thus we are safe. -DaveM */ |
1016 | write_unlock_irq(&tasklist_lock); |
1017 | return err; |
1018 | } |
1019 | |
1020 | asmlinkage long sys_getpgid(pid_t pid) |
1021 | { |
1022 | if (!pid) { |
1023 | return process_group(current); |
1024 | } else { |
1025 | int retval; |
1026 | struct task_struct *p; |
1027 | |
1028 | read_lock(&tasklist_lock); |
1029 | p = find_task_by_pid(pid); |
1030 | |
1031 | retval = -ESRCH; |
1032 | if (p) { |
1033 | retval = security_task_getpgid(p); |
1034 | if (!retval) |
1035 | retval = process_group(p); |
1036 | } |
1037 | read_unlock(&tasklist_lock); |
1038 | return retval; |
1039 | } |
1040 | } |
1041 | |
1042 | #ifdef __ARCH_WANT_SYS_GETPGRP |
1043 | |
1044 | asmlinkage long sys_getpgrp(void) |
1045 | { |
1046 | /* SMP - assuming writes are word atomic this is fine */ |
1047 | return process_group(current); |
1048 | } |
1049 | |
1050 | #endif |
1051 | |
1052 | asmlinkage long sys_getsid(pid_t pid) |
1053 | { |
1054 | if (!pid) { |
1055 | return current->signal->session; |
1056 | } else { |
1057 | int retval; |
1058 | struct task_struct *p; |
1059 | |
1060 | read_lock(&tasklist_lock); |
1061 | p = find_task_by_pid(pid); |
1062 | |
1063 | retval = -ESRCH; |
1064 | if(p) { |
1065 | retval = security_task_getsid(p); |
1066 | if (!retval) |
1067 | retval = p->signal->session; |
1068 | } |
1069 | read_unlock(&tasklist_lock); |
1070 | return retval; |
1071 | } |
1072 | } |
1073 | |
1074 | asmlinkage long sys_setsid(void) |
1075 | { |
1076 | struct pid *pid; |
1077 | int err = -EPERM; |
1078 | |
1079 | if (!thread_group_leader(current)) |
1080 | return -EINVAL; |
1081 | |
1082 | down(&tty_sem); |
1083 | write_lock_irq(&tasklist_lock); |
1084 | |
1085 | pid = find_pid(PIDTYPE_PGID, current->pid); |
1086 | if (pid) |
1087 | goto out; |
1088 | |
1089 | current->signal->leader = 1; |
1090 | __set_special_pids(current->pid, current->pid); |
1091 | current->signal->tty = NULL; |
1092 | current->signal->tty_old_pgrp = 0; |
1093 | err = process_group(current); |
1094 | out: |
1095 | write_unlock_irq(&tasklist_lock); |
1096 | up(&tty_sem); |
1097 | return err; |
1098 | } |
1099 | |
1100 | /* |
1101 | * Supplementary group IDs |
1102 | */ |
1103 | |
1104 | /* init to 2 - one for init_task, one to ensure it is never freed */ |
1105 | struct group_info init_groups = { .usage = ATOMIC_INIT(2) }; |
1106 | |
1107 | struct group_info *groups_alloc(int gidsetsize) |
1108 | { |
1109 | struct group_info *group_info; |
1110 | int nblocks; |
1111 | int i; |
1112 | |
1113 | nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK; |
1114 | /* Make sure we always allocate at least one indirect block pointer */ |
1115 | nblocks = nblocks ? : 1; |
1116 | group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER); |
1117 | if (!group_info) |
1118 | return NULL; |
1119 | group_info->ngroups = gidsetsize; |
1120 | group_info->nblocks = nblocks; |
1121 | atomic_set(&group_info->usage, 1); |
1122 | |
1123 | if (gidsetsize <= NGROUPS_SMALL) { |
1124 | group_info->blocks[0] = group_info->small_block; |
1125 | } else { |
1126 | for (i = 0; i < nblocks; i++) { |
1127 | gid_t *b; |
1128 | b = (void *)__get_free_page(GFP_USER); |
1129 | if (!b) |
1130 | goto out_undo_partial_alloc; |
1131 | group_info->blocks[i] = b; |
1132 | } |
1133 | } |
1134 | return group_info; |
1135 | |
1136 | out_undo_partial_alloc: |
1137 | while (--i >= 0) { |
1138 | free_page((unsigned long)group_info->blocks[i]); |
1139 | } |
1140 | kfree(group_info); |
1141 | return NULL; |
1142 | } |
1143 | |
1144 | EXPORT_SYMBOL(groups_alloc); |
1145 | |
1146 | void groups_free(struct group_info *group_info) |
1147 | { |
1148 | if (group_info->blocks[0] != group_info->small_block) { |
1149 | int i; |
1150 | for (i = 0; i < group_info->nblocks; i++) |
1151 | free_page((unsigned long)group_info->blocks[i]); |
1152 | } |
1153 | kfree(group_info); |
1154 | } |
1155 | |
1156 | EXPORT_SYMBOL(groups_free); |
1157 | |
1158 | /* export the group_info to a user-space array */ |
1159 | static int groups_to_user(gid_t __user *grouplist, |
1160 | struct group_info *group_info) |
1161 | { |
1162 | int i; |
1163 | int count = group_info->ngroups; |
1164 | |
1165 | for (i = 0; i < group_info->nblocks; i++) { |
1166 | int cp_count = min(NGROUPS_PER_BLOCK, count); |
1167 | int off = i * NGROUPS_PER_BLOCK; |
1168 | int len = cp_count * sizeof(*grouplist); |
1169 | |
1170 | if (copy_to_user(grouplist+off, group_info->blocks[i], len)) |
1171 | return -EFAULT; |
1172 | |
1173 | count -= cp_count; |
1174 | } |
1175 | return 0; |
1176 | } |
1177 | |
1178 | /* fill a group_info from a user-space array - it must be allocated already */ |
1179 | static int groups_from_user(struct group_info *group_info, |
1180 | gid_t __user *grouplist) |
1181 | { |
1182 | int i; |
1183 | int count = group_info->ngroups; |
1184 | |
1185 | for (i = 0; i < group_info->nblocks; i++) { |
1186 | int cp_count = min(NGROUPS_PER_BLOCK, count); |
1187 | int off = i * NGROUPS_PER_BLOCK; |
1188 | int len = cp_count * sizeof(*grouplist); |
1189 | |
1190 | if (copy_from_user(group_info->blocks[i], grouplist+off, len)) |
1191 | return -EFAULT; |
1192 | |
1193 | count -= cp_count; |
1194 | } |
1195 | return 0; |
1196 | } |
1197 | |
1198 | /* a simple Shell sort */ |
1199 | static void groups_sort(struct group_info *group_info) |
1200 | { |
1201 | int base, max, stride; |
1202 | int gidsetsize = group_info->ngroups; |
1203 | |
1204 | for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1) |
1205 | ; /* nothing */ |
1206 | stride /= 3; |
1207 | |
1208 | while (stride) { |
1209 | max = gidsetsize - stride; |
1210 | for (base = 0; base < max; base++) { |
1211 | int left = base; |
1212 | int right = left + stride; |
1213 | gid_t tmp = GROUP_AT(group_info, right); |
1214 | |
1215 | while (left >= 0 && GROUP_AT(group_info, left) > tmp) { |
1216 | GROUP_AT(group_info, right) = |
1217 | GROUP_AT(group_info, left); |
1218 | right = left; |
1219 | left -= stride; |
1220 | } |
1221 | GROUP_AT(group_info, right) = tmp; |
1222 | } |
1223 | stride /= 3; |
1224 | } |
1225 | } |
1226 | |
1227 | /* a simple bsearch */ |
1228 | static int groups_search(struct group_info *group_info, gid_t grp) |
1229 | { |
1230 | int left, right; |
1231 | |
1232 | if (!group_info) |
1233 | return 0; |
1234 | |
1235 | left = 0; |
1236 | right = group_info->ngroups; |
1237 | while (left < right) { |
1238 | int mid = (left+right)/2; |
1239 | int cmp = grp - GROUP_AT(group_info, mid); |
1240 | if (cmp > 0) |
1241 | left = mid + 1; |
1242 | else if (cmp < 0) |
1243 | right = mid; |
1244 | else |
1245 | return 1; |
1246 | } |
1247 | return 0; |
1248 | } |
1249 | |
1250 | /* validate and set current->group_info */ |
1251 | int set_current_groups(struct group_info *group_info) |
1252 | { |
1253 | int retval; |
1254 | struct group_info *old_info; |
1255 | |
1256 | retval = security_task_setgroups(group_info); |
1257 | if (retval) |
1258 | return retval; |
1259 | |
1260 | groups_sort(group_info); |
1261 | get_group_info(group_info); |
1262 | |
1263 | task_lock(current); |
1264 | old_info = current->group_info; |
1265 | current->group_info = group_info; |
1266 | task_unlock(current); |
1267 | |
1268 | put_group_info(old_info); |
1269 | |
1270 | return 0; |
1271 | } |
1272 | |
1273 | EXPORT_SYMBOL(set_current_groups); |
1274 | |
1275 | asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist) |
1276 | { |
1277 | int i = 0; |
1278 | |
1279 | /* |
1280 | * SMP: Nobody else can change our grouplist. Thus we are |
1281 | * safe. |
1282 | */ |
1283 | |
1284 | if (gidsetsize < 0) |
1285 | return -EINVAL; |
1286 | |
1287 | /* no need to grab task_lock here; it cannot change */ |
1288 | get_group_info(current->group_info); |
1289 | i = current->group_info->ngroups; |
1290 | if (gidsetsize) { |
1291 | if (i > gidsetsize) { |
1292 | i = -EINVAL; |
1293 | goto out; |
1294 | } |
1295 | if (groups_to_user(grouplist, current->group_info)) { |
1296 | i = -EFAULT; |
1297 | goto out; |
1298 | } |
1299 | } |
1300 | out: |
1301 | put_group_info(current->group_info); |
1302 | return i; |
1303 | } |
1304 | |
1305 | /* |
1306 | * SMP: Our groups are copy-on-write. We can set them safely |
1307 | * without another task interfering. |
1308 | */ |
1309 | |
1310 | asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist) |
1311 | { |
1312 | struct group_info *group_info; |
1313 | int retval; |
1314 | |
1315 | if (!capable(CAP_SETGID)) |
1316 | return -EPERM; |
1317 | if ((unsigned)gidsetsize > NGROUPS_MAX) |
1318 | return -EINVAL; |
1319 | |
1320 | group_info = groups_alloc(gidsetsize); |
1321 | if (!group_info) |
1322 | return -ENOMEM; |
1323 | retval = groups_from_user(group_info, grouplist); |
1324 | if (retval) { |
1325 | put_group_info(group_info); |
1326 | return retval; |
1327 | } |
1328 | |
1329 | retval = set_current_groups(group_info); |
1330 | put_group_info(group_info); |
1331 | |
1332 | return retval; |
1333 | } |
1334 | |
1335 | /* |
1336 | * Check whether we're fsgid/egid or in the supplemental group.. |
1337 | */ |
1338 | int in_group_p(gid_t grp) |
1339 | { |
1340 | int retval = 1; |
1341 | if (grp != current->fsgid) { |
1342 | get_group_info(current->group_info); |
1343 | retval = groups_search(current->group_info, grp); |
1344 | put_group_info(current->group_info); |
1345 | } |
1346 | return retval; |
1347 | } |
1348 | |
1349 | EXPORT_SYMBOL(in_group_p); |
1350 | |
1351 | int in_egroup_p(gid_t grp) |
1352 | { |
1353 | int retval = 1; |
1354 | if (grp != current->egid) { |
1355 | get_group_info(current->group_info); |
1356 | retval = groups_search(current->group_info, grp); |
1357 | put_group_info(current->group_info); |
1358 | } |
1359 | return retval; |
1360 | } |
1361 | |
1362 | EXPORT_SYMBOL(in_egroup_p); |
1363 | |
1364 | DECLARE_RWSEM(uts_sem); |
1365 | |
1366 | EXPORT_SYMBOL(uts_sem); |
1367 | |
1368 | asmlinkage long sys_newuname(struct new_utsname __user * name) |
1369 | { |
1370 | int errno = 0; |
1371 | |
1372 | down_read(&uts_sem); |
1373 | if (copy_to_user(name,&system_utsname,sizeof *name)) |
1374 | errno = -EFAULT; |
1375 | up_read(&uts_sem); |
1376 | return errno; |
1377 | } |
1378 | |
1379 | asmlinkage long sys_sethostname(char __user *name, int len) |
1380 | { |
1381 | int errno; |
1382 | char tmp[__NEW_UTS_LEN]; |
1383 | |
1384 | if (!capable(CAP_SYS_ADMIN)) |
1385 | return -EPERM; |
1386 | if (len < 0 || len > __NEW_UTS_LEN) |
1387 | return -EINVAL; |
1388 | down_write(&uts_sem); |
1389 | errno = -EFAULT; |
1390 | if (!copy_from_user(tmp, name, len)) { |
1391 | memcpy(system_utsname.nodename, tmp, len); |
1392 | system_utsname.nodename[len] = 0; |
1393 | errno = 0; |
1394 | } |
1395 | up_write(&uts_sem); |
1396 | return errno; |
1397 | } |
1398 | |
1399 | #ifdef __ARCH_WANT_SYS_GETHOSTNAME |
1400 | |
1401 | asmlinkage long sys_gethostname(char __user *name, int len) |
1402 | { |
1403 | int i, errno; |
1404 | |
1405 | if (len < 0) |
1406 | return -EINVAL; |
1407 | down_read(&uts_sem); |
1408 | i = 1 + strlen(system_utsname.nodename); |
1409 | if (i > len) |
1410 | i = len; |
1411 | errno = 0; |
1412 | if (copy_to_user(name, system_utsname.nodename, i)) |
1413 | errno = -EFAULT; |
1414 | up_read(&uts_sem); |
1415 | return errno; |
1416 | } |
1417 | |
1418 | #endif |
1419 | |
1420 | /* |
1421 | * Only setdomainname; getdomainname can be implemented by calling |
1422 | * uname() |
1423 | */ |
1424 | asmlinkage long sys_setdomainname(char __user *name, int len) |
1425 | { |
1426 | int errno; |
1427 | char tmp[__NEW_UTS_LEN]; |
1428 | |
1429 | if (!capable(CAP_SYS_ADMIN)) |
1430 | return -EPERM; |
1431 | if (len < 0 || len > __NEW_UTS_LEN) |
1432 | return -EINVAL; |
1433 | |
1434 | down_write(&uts_sem); |
1435 | errno = -EFAULT; |
1436 | if (!copy_from_user(tmp, name, len)) { |
1437 | memcpy(system_utsname.domainname, tmp, len); |
1438 | system_utsname.domainname[len] = 0; |
1439 | errno = 0; |
1440 | } |
1441 | up_write(&uts_sem); |
1442 | return errno; |
1443 | } |
1444 | |
1445 | asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim) |
1446 | { |
1447 | if (resource >= RLIM_NLIMITS) |
1448 | return -EINVAL; |
1449 | else { |
1450 | struct rlimit value; |
1451 | task_lock(current->group_leader); |
1452 | value = current->signal->rlim[resource]; |
1453 | task_unlock(current->group_leader); |
1454 | return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; |
1455 | } |
1456 | } |
1457 | |
1458 | #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT |
1459 | |
1460 | /* |
1461 | * Back compatibility for getrlimit. Needed for some apps. |
1462 | */ |
1463 | |
1464 | asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim) |
1465 | { |
1466 | struct rlimit x; |
1467 | if (resource >= RLIM_NLIMITS) |
1468 | return -EINVAL; |
1469 | |
1470 | task_lock(current->group_leader); |
1471 | x = current->signal->rlim[resource]; |
1472 | task_unlock(current->group_leader); |
1473 | if(x.rlim_cur > 0x7FFFFFFF) |
1474 | x.rlim_cur = 0x7FFFFFFF; |
1475 | if(x.rlim_max > 0x7FFFFFFF) |
1476 | x.rlim_max = 0x7FFFFFFF; |
1477 | return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0; |
1478 | } |
1479 | |
1480 | #endif |
1481 | |
1482 | asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) |
1483 | { |
1484 | struct rlimit new_rlim, *old_rlim; |
1485 | int retval; |
1486 | |
1487 | if (resource >= RLIM_NLIMITS) |
1488 | return -EINVAL; |
1489 | if(copy_from_user(&new_rlim, rlim, sizeof(*rlim))) |
1490 | return -EFAULT; |
1491 | if (new_rlim.rlim_cur > new_rlim.rlim_max) |
1492 | return -EINVAL; |
1493 | old_rlim = current->signal->rlim + resource; |
1494 | if ((new_rlim.rlim_max > old_rlim->rlim_max) && |
1495 | !capable(CAP_SYS_RESOURCE)) |
1496 | return -EPERM; |
1497 | if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN) |
1498 | return -EPERM; |
1499 | |
1500 | retval = security_task_setrlimit(resource, &new_rlim); |
1501 | if (retval) |
1502 | return retval; |
1503 | |
1504 | task_lock(current->group_leader); |
1505 | *old_rlim = new_rlim; |
1506 | task_unlock(current->group_leader); |
1507 | |
1508 | if (resource == RLIMIT_CPU && new_rlim.rlim_cur != RLIM_INFINITY && |
1509 | (cputime_eq(current->signal->it_prof_expires, cputime_zero) || |
1510 | new_rlim.rlim_cur <= cputime_to_secs( |
1511 | current->signal->it_prof_expires))) { |
1512 | cputime_t cputime = secs_to_cputime(new_rlim.rlim_cur); |
1513 | read_lock(&tasklist_lock); |
1514 | spin_lock_irq(¤t->sighand->siglock); |
1515 | set_process_cpu_timer(current, CPUCLOCK_PROF, |
1516 | &cputime, NULL); |
1517 | spin_unlock_irq(¤t->sighand->siglock); |
1518 | read_unlock(&tasklist_lock); |
1519 | } |
1520 | |
1521 | return 0; |
1522 | } |
1523 | |
1524 | /* |
1525 | * It would make sense to put struct rusage in the task_struct, |
1526 | * except that would make the task_struct be *really big*. After |
1527 | * task_struct gets moved into malloc'ed memory, it would |
1528 | * make sense to do this. It will make moving the rest of the information |
1529 | * a lot simpler! (Which we're not doing right now because we're not |
1530 | * measuring them yet). |
1531 | * |
1532 | * This expects to be called with tasklist_lock read-locked or better, |
1533 | * and the siglock not locked. It may momentarily take the siglock. |
1534 | * |
1535 | * When sampling multiple threads for RUSAGE_SELF, under SMP we might have |
1536 | * races with threads incrementing their own counters. But since word |
1537 | * reads are atomic, we either get new values or old values and we don't |
1538 | * care which for the sums. We always take the siglock to protect reading |
1539 | * the c* fields from p->signal from races with exit.c updating those |
1540 | * fields when reaping, so a sample either gets all the additions of a |
1541 | * given child after it's reaped, or none so this sample is before reaping. |
1542 | */ |
1543 | |
1544 | static void k_getrusage(struct task_struct *p, int who, struct rusage *r) |
1545 | { |
1546 | struct task_struct *t; |
1547 | unsigned long flags; |
1548 | cputime_t utime, stime; |
1549 | |
1550 | memset((char *) r, 0, sizeof *r); |
1551 | |
1552 | if (unlikely(!p->signal)) |
1553 | return; |
1554 | |
1555 | switch (who) { |
1556 | case RUSAGE_CHILDREN: |
1557 | spin_lock_irqsave(&p->sighand->siglock, flags); |
1558 | utime = p->signal->cutime; |
1559 | stime = p->signal->cstime; |
1560 | r->ru_nvcsw = p->signal->cnvcsw; |
1561 | r->ru_nivcsw = p->signal->cnivcsw; |
1562 | r->ru_minflt = p->signal->cmin_flt; |
1563 | r->ru_majflt = p->signal->cmaj_flt; |
1564 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
1565 | cputime_to_timeval(utime, &r->ru_utime); |
1566 | cputime_to_timeval(stime, &r->ru_stime); |
1567 | break; |
1568 | case RUSAGE_SELF: |
1569 | spin_lock_irqsave(&p->sighand->siglock, flags); |
1570 | utime = stime = cputime_zero; |
1571 | goto sum_group; |
1572 | case RUSAGE_BOTH: |
1573 | spin_lock_irqsave(&p->sighand->siglock, flags); |
1574 | utime = p->signal->cutime; |
1575 | stime = p->signal->cstime; |
1576 | r->ru_nvcsw = p->signal->cnvcsw; |
1577 | r->ru_nivcsw = p->signal->cnivcsw; |
1578 | r->ru_minflt = p->signal->cmin_flt; |
1579 | r->ru_majflt = p->signal->cmaj_flt; |
1580 | sum_group: |
1581 | utime = cputime_add(utime, p->signal->utime); |
1582 | stime = cputime_add(stime, p->signal->stime); |
1583 | r->ru_nvcsw += p->signal->nvcsw; |
1584 | r->ru_nivcsw += p->signal->nivcsw; |
1585 | r->ru_minflt += p->signal->min_flt; |
1586 | r->ru_majflt += p->signal->maj_flt; |
1587 | t = p; |
1588 | do { |
1589 | utime = cputime_add(utime, t->utime); |
1590 | stime = cputime_add(stime, t->stime); |
1591 | r->ru_nvcsw += t->nvcsw; |
1592 | r->ru_nivcsw += t->nivcsw; |
1593 | r->ru_minflt += t->min_flt; |
1594 | r->ru_majflt += t->maj_flt; |
1595 | t = next_thread(t); |
1596 | } while (t != p); |
1597 | spin_unlock_irqrestore(&p->sighand->siglock, flags); |
1598 | cputime_to_timeval(utime, &r->ru_utime); |
1599 | cputime_to_timeval(stime, &r->ru_stime); |
1600 | break; |
1601 | default: |
1602 | BUG(); |
1603 | } |
1604 | } |
1605 | |
1606 | int getrusage(struct task_struct *p, int who, struct rusage __user *ru) |
1607 | { |
1608 | struct rusage r; |
1609 | read_lock(&tasklist_lock); |
1610 | k_getrusage(p, who, &r); |
1611 | read_unlock(&tasklist_lock); |
1612 | return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0; |
1613 | } |
1614 | |
1615 | asmlinkage long sys_getrusage(int who, struct rusage __user *ru) |
1616 | { |
1617 | if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) |
1618 | return -EINVAL; |
1619 | return getrusage(current, who, ru); |
1620 | } |
1621 | |
1622 | asmlinkage long sys_umask(int mask) |
1623 | { |
1624 | mask = xchg(¤t->fs->umask, mask & S_IRWXUGO); |
1625 | return mask; |
1626 | } |
1627 | |
1628 | asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3, |
1629 | unsigned long arg4, unsigned long arg5) |
1630 | { |
1631 | long error; |
1632 | int sig; |
1633 | |
1634 | error = security_task_prctl(option, arg2, arg3, arg4, arg5); |
1635 | if (error) |
1636 | return error; |
1637 | |
1638 | switch (option) { |
1639 | case PR_SET_PDEATHSIG: |
1640 | sig = arg2; |
1641 | if (!valid_signal(sig)) { |
1642 | error = -EINVAL; |
1643 | break; |
1644 | } |
1645 | current->pdeath_signal = sig; |
1646 | break; |
1647 | case PR_GET_PDEATHSIG: |
1648 | error = put_user(current->pdeath_signal, (int __user *)arg2); |
1649 | break; |
1650 | case PR_GET_DUMPABLE: |
1651 | if (current->mm->dumpable) |
1652 | error = 1; |
1653 | break; |
1654 | case PR_SET_DUMPABLE: |
1655 | if (arg2 != 0 && arg2 != 1) { |
1656 | error = -EINVAL; |
1657 | break; |
1658 | } |
1659 | current->mm->dumpable = arg2; |
1660 | break; |
1661 | |
1662 | case PR_SET_UNALIGN: |
1663 | error = SET_UNALIGN_CTL(current, arg2); |
1664 | break; |
1665 | case PR_GET_UNALIGN: |
1666 | error = GET_UNALIGN_CTL(current, arg2); |
1667 | break; |
1668 | case PR_SET_FPEMU: |
1669 | error = SET_FPEMU_CTL(current, arg2); |
1670 | break; |
1671 | case PR_GET_FPEMU: |
1672 | error = GET_FPEMU_CTL(current, arg2); |
1673 | break; |
1674 | case PR_SET_FPEXC: |
1675 | error = SET_FPEXC_CTL(current, arg2); |
1676 | break; |
1677 | case PR_GET_FPEXC: |
1678 | error = GET_FPEXC_CTL(current, arg2); |
1679 | break; |
1680 | case PR_GET_TIMING: |
1681 | error = PR_TIMING_STATISTICAL; |
1682 | break; |
1683 | case PR_SET_TIMING: |
1684 | if (arg2 == PR_TIMING_STATISTICAL) |
1685 | error = 0; |
1686 | else |
1687 | error = -EINVAL; |
1688 | break; |
1689 | |
1690 | case PR_GET_KEEPCAPS: |
1691 | if (current->keep_capabilities) |
1692 | error = 1; |
1693 | break; |
1694 | case PR_SET_KEEPCAPS: |
1695 | if (arg2 != 0 && arg2 != 1) { |
1696 | error = -EINVAL; |
1697 | break; |
1698 | } |
1699 | current->keep_capabilities = arg2; |
1700 | break; |
1701 | case PR_SET_NAME: { |
1702 | struct task_struct *me = current; |
1703 | unsigned char ncomm[sizeof(me->comm)]; |
1704 | |
1705 | ncomm[sizeof(me->comm)-1] = 0; |
1706 | if (strncpy_from_user(ncomm, (char __user *)arg2, |
1707 | sizeof(me->comm)-1) < 0) |
1708 | return -EFAULT; |
1709 | set_task_comm(me, ncomm); |
1710 | return 0; |
1711 | } |
1712 | case PR_GET_NAME: { |
1713 | struct task_struct *me = current; |
1714 | unsigned char tcomm[sizeof(me->comm)]; |
1715 | |
1716 | get_task_comm(tcomm, me); |
1717 | if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm))) |
1718 | return -EFAULT; |
1719 | return 0; |
1720 | } |
1721 | default: |
1722 | error = -EINVAL; |
1723 | break; |
1724 | } |
1725 | return error; |
1726 | } |