Commit b34d8915 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'writable_limits' of git://decibel.fi.muni.cz/~xslaby/linux

* 'writable_limits' of git://decibel.fi.muni.cz/~xslaby/linux:
  unistd: add __NR_prlimit64 syscall numbers
  rlimits: implement prlimit64 syscall
  rlimits: switch more rlimit syscalls to do_prlimit
  rlimits: redo do_setrlimit to more generic do_prlimit
  rlimits: add rlimit64 structure
  rlimits: do security check under task_lock
  rlimits: allow setrlimit to non-current tasks
  rlimits: split sys_setrlimit
  rlimits: selinux, do rlimits changes under task_lock
  rlimits: make sure ->rlim_max never grows in sys_setrlimit
  rlimits: add task_struct to update_rlimit_cpu
  rlimits: security, add task_struct to setrlimit

Fix up various system call number conflicts.  We not only added fanotify
system calls in the meantime, but asm-generic/unistd.h added a wait4
along with a range of reserved per-architecture system calls.
parents e8a89ceb f33ebbe9
...@@ -844,4 +844,5 @@ ia32_sys_call_table: ...@@ -844,4 +844,5 @@ ia32_sys_call_table:
.quad compat_sys_recvmmsg .quad compat_sys_recvmmsg
.quad sys_fanotify_init .quad sys_fanotify_init
.quad sys32_fanotify_mark .quad sys32_fanotify_mark
.quad sys_prlimit64
ia32_syscall_end: ia32_syscall_end:
...@@ -345,10 +345,11 @@ ...@@ -345,10 +345,11 @@
#define __NR_recvmmsg 337 #define __NR_recvmmsg 337
#define __NR_fanotify_init 338 #define __NR_fanotify_init 338
#define __NR_fanotify_mark 339 #define __NR_fanotify_mark 339
#define __NR_prlimit64 340
#ifdef __KERNEL__ #ifdef __KERNEL__
#define NR_syscalls 340 #define NR_syscalls 341
#define __ARCH_WANT_IPC_PARSE_VERSION #define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_READDIR
......
...@@ -667,6 +667,8 @@ __SYSCALL(__NR_recvmmsg, sys_recvmmsg) ...@@ -667,6 +667,8 @@ __SYSCALL(__NR_recvmmsg, sys_recvmmsg)
__SYSCALL(__NR_fanotify_init, sys_fanotify_init) __SYSCALL(__NR_fanotify_init, sys_fanotify_init)
#define __NR_fanotify_mark 301 #define __NR_fanotify_mark 301
__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark) __SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
#define __NR_prlimit64 302
__SYSCALL(__NR_prlimit64, sys_prlimit64)
#ifndef __NO_STUBS #ifndef __NO_STUBS
#define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_READDIR
......
...@@ -339,3 +339,4 @@ ENTRY(sys_call_table) ...@@ -339,3 +339,4 @@ ENTRY(sys_call_table)
.long sys_recvmmsg .long sys_recvmmsg
.long sys_fanotify_init .long sys_fanotify_init
.long sys_fanotify_mark .long sys_fanotify_mark
.long sys_prlimit64
...@@ -640,9 +640,11 @@ __SYSCALL(__NR_recvmmsg, sys_recvmmsg) ...@@ -640,9 +640,11 @@ __SYSCALL(__NR_recvmmsg, sys_recvmmsg)
#define __NR_wait4 260 #define __NR_wait4 260
__SYSCALL(__NR_wait4, sys_wait4) __SYSCALL(__NR_wait4, sys_wait4)
#define __NR_prlimit64 261
__SYSCALL(__NR_prlimit64, sys_prlimit64)
#undef __NR_syscalls #undef __NR_syscalls
#define __NR_syscalls 261 #define __NR_syscalls 262
/* /*
* All syscalls below here should go away really, * All syscalls below here should go away really,
......
...@@ -117,6 +117,6 @@ void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx, ...@@ -117,6 +117,6 @@ void set_process_cpu_timer(struct task_struct *task, unsigned int clock_idx,
long clock_nanosleep_restart(struct restart_block *restart_block); long clock_nanosleep_restart(struct restart_block *restart_block);
void update_rlimit_cpu(unsigned long rlim_new); void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new);
#endif #endif
...@@ -43,6 +43,13 @@ struct rlimit { ...@@ -43,6 +43,13 @@ struct rlimit {
unsigned long rlim_max; unsigned long rlim_max;
}; };
#define RLIM64_INFINITY (~0ULL)
struct rlimit64 {
__u64 rlim_cur;
__u64 rlim_max;
};
#define PRIO_MIN (-20) #define PRIO_MIN (-20)
#define PRIO_MAX 20 #define PRIO_MAX 20
...@@ -73,6 +80,8 @@ struct rlimit { ...@@ -73,6 +80,8 @@ struct rlimit {
struct task_struct; struct task_struct;
int getrusage(struct task_struct *p, int who, struct rusage __user *ru); int getrusage(struct task_struct *p, int who, struct rusage __user *ru);
int do_prlimit(struct task_struct *tsk, unsigned int resource,
struct rlimit *new_rlim, struct rlimit *old_rlim);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -1499,7 +1499,8 @@ struct security_operations { ...@@ -1499,7 +1499,8 @@ struct security_operations {
int (*task_setnice) (struct task_struct *p, int nice); int (*task_setnice) (struct task_struct *p, int nice);
int (*task_setioprio) (struct task_struct *p, int ioprio); int (*task_setioprio) (struct task_struct *p, int ioprio);
int (*task_getioprio) (struct task_struct *p); int (*task_getioprio) (struct task_struct *p);
int (*task_setrlimit) (unsigned int resource, struct rlimit *new_rlim); int (*task_setrlimit) (struct task_struct *p, unsigned int resource,
struct rlimit *new_rlim);
int (*task_setscheduler) (struct task_struct *p, int policy, int (*task_setscheduler) (struct task_struct *p, int policy,
struct sched_param *lp); struct sched_param *lp);
int (*task_getscheduler) (struct task_struct *p); int (*task_getscheduler) (struct task_struct *p);
...@@ -1749,7 +1750,8 @@ void security_task_getsecid(struct task_struct *p, u32 *secid); ...@@ -1749,7 +1750,8 @@ void security_task_getsecid(struct task_struct *p, u32 *secid);
int security_task_setnice(struct task_struct *p, int nice); int security_task_setnice(struct task_struct *p, int nice);
int security_task_setioprio(struct task_struct *p, int ioprio); int security_task_setioprio(struct task_struct *p, int ioprio);
int security_task_getioprio(struct task_struct *p); int security_task_getioprio(struct task_struct *p);
int security_task_setrlimit(unsigned int resource, struct rlimit *new_rlim); int security_task_setrlimit(struct task_struct *p, unsigned int resource,
struct rlimit *new_rlim);
int security_task_setscheduler(struct task_struct *p, int security_task_setscheduler(struct task_struct *p,
int policy, struct sched_param *lp); int policy, struct sched_param *lp);
int security_task_getscheduler(struct task_struct *p); int security_task_getscheduler(struct task_struct *p);
...@@ -2311,7 +2313,8 @@ static inline int security_task_getioprio(struct task_struct *p) ...@@ -2311,7 +2313,8 @@ static inline int security_task_getioprio(struct task_struct *p)
return 0; return 0;
} }
static inline int security_task_setrlimit(unsigned int resource, static inline int security_task_setrlimit(struct task_struct *p,
unsigned int resource,
struct rlimit *new_rlim) struct rlimit *new_rlim)
{ {
return 0; return 0;
......
...@@ -35,6 +35,7 @@ struct oldold_utsname; ...@@ -35,6 +35,7 @@ struct oldold_utsname;
struct old_utsname; struct old_utsname;
struct pollfd; struct pollfd;
struct rlimit; struct rlimit;
struct rlimit64;
struct rusage; struct rusage;
struct sched_param; struct sched_param;
struct sel_arg_struct; struct sel_arg_struct;
...@@ -644,6 +645,9 @@ asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *r ...@@ -644,6 +645,9 @@ asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *r
#endif #endif
asmlinkage long sys_setrlimit(unsigned int resource, asmlinkage long sys_setrlimit(unsigned int resource,
struct rlimit __user *rlim); struct rlimit __user *rlim);
asmlinkage long sys_prlimit64(pid_t pid, unsigned int resource,
const struct rlimit64 __user *new_rlim,
struct rlimit64 __user *old_rlim);
asmlinkage long sys_getrusage(int who, struct rusage __user *ru); asmlinkage long sys_getrusage(int who, struct rusage __user *ru);
asmlinkage long sys_umask(int mask); asmlinkage long sys_umask(int mask);
......
...@@ -279,11 +279,6 @@ asmlinkage long compat_sys_setrlimit(unsigned int resource, ...@@ -279,11 +279,6 @@ asmlinkage long compat_sys_setrlimit(unsigned int resource,
struct compat_rlimit __user *rlim) struct compat_rlimit __user *rlim)
{ {
struct rlimit r; struct rlimit r;
int ret;
mm_segment_t old_fs = get_fs ();
if (resource >= RLIM_NLIMITS)
return -EINVAL;
if (!access_ok(VERIFY_READ, rlim, sizeof(*rlim)) || if (!access_ok(VERIFY_READ, rlim, sizeof(*rlim)) ||
__get_user(r.rlim_cur, &rlim->rlim_cur) || __get_user(r.rlim_cur, &rlim->rlim_cur) ||
...@@ -294,10 +289,7 @@ asmlinkage long compat_sys_setrlimit(unsigned int resource, ...@@ -294,10 +289,7 @@ asmlinkage long compat_sys_setrlimit(unsigned int resource,
r.rlim_cur = RLIM_INFINITY; r.rlim_cur = RLIM_INFINITY;
if (r.rlim_max == COMPAT_RLIM_INFINITY) if (r.rlim_max == COMPAT_RLIM_INFINITY)
r.rlim_max = RLIM_INFINITY; r.rlim_max = RLIM_INFINITY;
set_fs(KERNEL_DS); return do_prlimit(current, resource, &r, NULL);
ret = sys_setrlimit(resource, (struct rlimit __user *) &r);
set_fs(old_fs);
return ret;
} }
#ifdef COMPAT_RLIM_OLD_INFINITY #ifdef COMPAT_RLIM_OLD_INFINITY
...@@ -329,16 +321,13 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource, ...@@ -329,16 +321,13 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
#endif #endif
asmlinkage long compat_sys_getrlimit (unsigned int resource, asmlinkage long compat_sys_getrlimit(unsigned int resource,
struct compat_rlimit __user *rlim) struct compat_rlimit __user *rlim)
{ {
struct rlimit r; struct rlimit r;
int ret; int ret;
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS); ret = do_prlimit(current, resource, NULL, &r);
ret = sys_getrlimit(resource, (struct rlimit __user *) &r);
set_fs(old_fs);
if (!ret) { if (!ret) {
if (r.rlim_cur > COMPAT_RLIM_INFINITY) if (r.rlim_cur > COMPAT_RLIM_INFINITY)
r.rlim_cur = COMPAT_RLIM_INFINITY; r.rlim_cur = COMPAT_RLIM_INFINITY;
......
...@@ -16,13 +16,13 @@ ...@@ -16,13 +16,13 @@
* siglock protection since other code may update expiration cache as * siglock protection since other code may update expiration cache as
* well. * well.
*/ */
void update_rlimit_cpu(unsigned long rlim_new) void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
{ {
cputime_t cputime = secs_to_cputime(rlim_new); cputime_t cputime = secs_to_cputime(rlim_new);
spin_lock_irq(&current->sighand->siglock); spin_lock_irq(&task->sighand->siglock);
set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL);
spin_unlock_irq(&current->sighand->siglock); spin_unlock_irq(&task->sighand->siglock);
} }
static int check_clock(const clockid_t which_clock) static int check_clock(const clockid_t which_clock)
......
...@@ -1236,15 +1236,14 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len) ...@@ -1236,15 +1236,14 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim) SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
{ {
if (resource >= RLIM_NLIMITS) struct rlimit value;
return -EINVAL; int ret;
else {
struct rlimit value; ret = do_prlimit(current, resource, NULL, &value);
task_lock(current->group_leader); if (!ret)
value = current->signal->rlim[resource]; ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
task_unlock(current->group_leader);
return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0; return ret;
}
} }
#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
...@@ -1272,44 +1271,89 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource, ...@@ -1272,44 +1271,89 @@ SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
#endif #endif
SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) static inline bool rlim64_is_infinity(__u64 rlim64)
{ {
struct rlimit new_rlim, *old_rlim; #if BITS_PER_LONG < 64
int retval; return rlim64 >= ULONG_MAX;
#else
return rlim64 == RLIM64_INFINITY;
#endif
}
static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
{
if (rlim->rlim_cur == RLIM_INFINITY)
rlim64->rlim_cur = RLIM64_INFINITY;
else
rlim64->rlim_cur = rlim->rlim_cur;
if (rlim->rlim_max == RLIM_INFINITY)
rlim64->rlim_max = RLIM64_INFINITY;
else
rlim64->rlim_max = rlim->rlim_max;
}
static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
{
if (rlim64_is_infinity(rlim64->rlim_cur))
rlim->rlim_cur = RLIM_INFINITY;
else
rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
if (rlim64_is_infinity(rlim64->rlim_max))
rlim->rlim_max = RLIM_INFINITY;
else
rlim->rlim_max = (unsigned long)rlim64->rlim_max;
}
/* make sure you are allowed to change @tsk limits before calling this */
int do_prlimit(struct task_struct *tsk, unsigned int resource,
struct rlimit *new_rlim, struct rlimit *old_rlim)
{
struct rlimit *rlim;
int retval = 0;
if (resource >= RLIM_NLIMITS) if (resource >= RLIM_NLIMITS)
return -EINVAL; return -EINVAL;
if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) if (new_rlim) {
return -EFAULT; if (new_rlim->rlim_cur > new_rlim->rlim_max)
if (new_rlim.rlim_cur > new_rlim.rlim_max) return -EINVAL;
return -EINVAL; if (resource == RLIMIT_NOFILE &&
old_rlim = current->signal->rlim + resource; new_rlim->rlim_max > sysctl_nr_open)
if ((new_rlim.rlim_max > old_rlim->rlim_max) && return -EPERM;
!capable(CAP_SYS_RESOURCE))
return -EPERM;
if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open)
return -EPERM;
retval = security_task_setrlimit(resource, &new_rlim);
if (retval)
return retval;
if (resource == RLIMIT_CPU && new_rlim.rlim_cur == 0) {
/*
* The caller is asking for an immediate RLIMIT_CPU
* expiry. But we use the zero value to mean "it was
* never set". So let's cheat and make it one second
* instead
*/
new_rlim.rlim_cur = 1;
} }
task_lock(current->group_leader); /* protect tsk->signal and tsk->sighand from disappearing */
*old_rlim = new_rlim; read_lock(&tasklist_lock);
task_unlock(current->group_leader); if (!tsk->sighand) {
retval = -ESRCH;
if (resource != RLIMIT_CPU)
goto out; goto out;
}
rlim = tsk->signal->rlim + resource;
task_lock(tsk->group_leader);
if (new_rlim) {
if (new_rlim->rlim_max > rlim->rlim_max &&
!capable(CAP_SYS_RESOURCE))
retval = -EPERM;
if (!retval)
retval = security_task_setrlimit(tsk->group_leader,
resource, new_rlim);
if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) {
/*
* The caller is asking for an immediate RLIMIT_CPU
* expiry. But we use the zero value to mean "it was
* never set". So let's cheat and make it one second
* instead
*/
new_rlim->rlim_cur = 1;
}
}
if (!retval) {
if (old_rlim)
*old_rlim = *rlim;
if (new_rlim)
*rlim = *new_rlim;
}
task_unlock(tsk->group_leader);
/* /*
* RLIMIT_CPU handling. Note that the kernel fails to return an error * RLIMIT_CPU handling. Note that the kernel fails to return an error
...@@ -1317,14 +1361,84 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim) ...@@ -1317,14 +1361,84 @@ SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
* very long-standing error, and fixing it now risks breakage of * very long-standing error, and fixing it now risks breakage of
* applications, so we live with it * applications, so we live with it
*/ */
if (new_rlim.rlim_cur == RLIM_INFINITY) if (!retval && new_rlim && resource == RLIMIT_CPU &&
goto out; new_rlim->rlim_cur != RLIM_INFINITY)
update_rlimit_cpu(tsk, new_rlim->rlim_cur);
update_rlimit_cpu(new_rlim.rlim_cur);
out: out:
read_unlock(&tasklist_lock);
return retval;
}
/* rcu lock must be held */
static int check_prlimit_permission(struct task_struct *task)
{
const struct cred *cred = current_cred(), *tcred;
tcred = __task_cred(task);
if ((cred->uid != tcred->euid ||
cred->uid != tcred->suid ||
cred->uid != tcred->uid ||
cred->gid != tcred->egid ||
cred->gid != tcred->sgid ||
cred->gid != tcred->gid) &&
!capable(CAP_SYS_RESOURCE)) {
return -EPERM;
}
return 0; return 0;
} }
SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
const struct rlimit64 __user *, new_rlim,
struct rlimit64 __user *, old_rlim)
{
struct rlimit64 old64, new64;
struct rlimit old, new;
struct task_struct *tsk;
int ret;
if (new_rlim) {
if (copy_from_user(&new64, new_rlim, sizeof(new64)))
return -EFAULT;
rlim64_to_rlim(&new64, &new);
}
rcu_read_lock();
tsk = pid ? find_task_by_vpid(pid) : current;
if (!tsk) {
rcu_read_unlock();
return -ESRCH;
}
ret = check_prlimit_permission(tsk);
if (ret) {
rcu_read_unlock();
return ret;
}
get_task_struct(tsk);
rcu_read_unlock();
ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
old_rlim ? &old : NULL);
if (!ret && old_rlim) {
rlim_to_rlim64(&old, &old64);
if (copy_to_user(old_rlim, &old64, sizeof(old64)))
ret = -EFAULT;
}
put_task_struct(tsk);
return ret;
}
SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
{
struct rlimit new_rlim;
if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
return -EFAULT;
return do_prlimit(current, resource, &new_rlim, NULL);
}
/* /*
* It would make sense to put struct rusage in the task_struct, * It would make sense to put struct rusage in the task_struct,
* except that would make the task_struct be *really big*. After * except that would make the task_struct be *really big*. After
......
...@@ -411,7 +411,8 @@ static int cap_task_getioprio(struct task_struct *p) ...@@ -411,7 +411,8 @@ static int cap_task_getioprio(struct task_struct *p)
return 0; return 0;
} }
static int cap_task_setrlimit(unsigned int resource, struct rlimit *new_rlim) static int cap_task_setrlimit(struct task_struct *p, unsigned int resource,
struct rlimit *new_rlim)
{ {
return 0; return 0;
} }
......
...@@ -780,9 +780,10 @@ int security_task_getioprio(struct task_struct *p) ...@@ -780,9 +780,10 @@ int security_task_getioprio(struct task_struct *p)
return security_ops->task_getioprio(p); return security_ops->task_getioprio(p);
} }
int security_task_setrlimit(unsigned int resource, struct rlimit *new_rlim) int security_task_setrlimit(struct task_struct *p, unsigned int resource,
struct rlimit *new_rlim)
{ {
return security_ops->task_setrlimit(resource, new_rlim); return security_ops->task_setrlimit(p, resource, new_rlim);
} }
int security_task_setscheduler(struct task_struct *p, int security_task_setscheduler(struct task_struct *p,
......
...@@ -2284,12 +2284,15 @@ static void selinux_bprm_committing_creds(struct linux_binprm *bprm) ...@@ -2284,12 +2284,15 @@ static void selinux_bprm_committing_creds(struct linux_binprm *bprm)
rc = avc_has_perm(new_tsec->osid, new_tsec->sid, SECCLASS_PROCESS, rc = avc_has_perm(new_tsec->osid, new_tsec->sid, SECCLASS_PROCESS,
PROCESS__RLIMITINH, NULL); PROCESS__RLIMITINH, NULL);
if (rc) { if (rc) {
/* protect against do_prlimit() */
task_lock(current);
for (i = 0; i < RLIM_NLIMITS; i++) { for (i = 0; i < RLIM_NLIMITS; i++) {
rlim = current->signal->rlim + i; rlim = current->signal->rlim + i;
initrlim = init_task.signal->rlim + i; initrlim = init_task.signal->rlim + i;
rlim->rlim_cur = min(rlim->rlim_max, initrlim->rlim_cur); rlim->rlim_cur = min(rlim->rlim_max, initrlim->rlim_cur);
} }
update_rlimit_cpu(current->signal->rlim[RLIMIT_CPU].rlim_cur); task_unlock(current);
update_rlimit_cpu(current, rlimit(RLIMIT_CPU));
} }
} }
...@@ -3333,16 +3336,17 @@ static int selinux_task_getioprio(struct task_struct *p) ...@@ -3333,16 +3336,17 @@ static int selinux_task_getioprio(struct task_struct *p)
return current_has_perm(p, PROCESS__GETSCHED); return current_has_perm(p, PROCESS__GETSCHED);
} }
static int selinux_task_setrlimit(unsigned int resource, struct rlimit *new_rlim) static int selinux_task_setrlimit(struct task_struct *p, unsigned int resource,
struct rlimit *new_rlim)
{ {
struct rlimit *old_rlim = current->signal->rlim + resource; struct rlimit *old_rlim = p->signal->rlim + resource;
/* Control the ability to change the hard limit (whether /* Control the ability to change the hard limit (whether
lowering or raising it), so that the hard limit can lowering or raising it), so that the hard limit can
later be used as a safe reset point for the soft limit later be used as a safe reset point for the soft limit
upon context transitions. See selinux_bprm_committing_creds. */ upon context transitions. See selinux_bprm_committing_creds. */
if (old_rlim->rlim_max != new_rlim->rlim_max) if (old_rlim->rlim_max != new_rlim->rlim_max)
return current_has_perm(current, PROCESS__SETRLIMIT); return current_has_perm(p, PROCESS__SETRLIMIT);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment