Commit 684c531c authored by Chris Wright's avatar Chris Wright Committed by Linus Torvalds

[PATCH] RLIM: enforce rlimits on queued signals

Add a user_struct pointer to the sigqueue structure.  Charge sigqueue
allocation and destruction to the user_struct rather than a global pool.  This
per user rlimit accounting obsoletes the global queued_signals accouting.

The patch as charges the sigqueue struct allocation to the queue that it's
pending on (the receiver of the signal).  So the owner of the queue is charged
for whoever writes to it (much like quota for a 777 file).

The patch started out charging the task which allocated the sigqueue struct.
In most cases, these are always the same user (permission for sending a
signal), so those cases are moot.  In the cases where it isn't the same user,
it's a privileged user sending a signal to another user.

It seems wrong to charge the allocation to the privleged user, when the other
user could block receipt as long as it feels.  The flipside is, someone else
can fill your queue (expectation is that someone else is privileged).  I think
it's right the way it is.  The change to revert is very small.
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent cefb76b6
...@@ -19,6 +19,7 @@ struct sigqueue { ...@@ -19,6 +19,7 @@ struct sigqueue {
spinlock_t *lock; spinlock_t *lock;
int flags; int flags;
siginfo_t info; siginfo_t info;
struct user_struct *user;
}; };
/* flags values. */ /* flags values. */
......
...@@ -265,17 +265,19 @@ next_signal(struct sigpending *pending, sigset_t *mask) ...@@ -265,17 +265,19 @@ next_signal(struct sigpending *pending, sigset_t *mask)
return sig; return sig;
} }
struct sigqueue *__sigqueue_alloc(void) static struct sigqueue *__sigqueue_alloc(void)
{ {
struct sigqueue *q = 0; struct sigqueue *q = 0;
if (atomic_read(&nr_queued_signals) < max_queued_signals) if (atomic_read(&current->user->sigpending) <
current->rlim[RLIMIT_SIGPENDING].rlim_cur)
q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC); q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
if (q) { if (q) {
atomic_inc(&nr_queued_signals);
INIT_LIST_HEAD(&q->list); INIT_LIST_HEAD(&q->list);
q->flags = 0; q->flags = 0;
q->lock = 0; q->lock = 0;
q->user = get_uid(current->user);
atomic_inc(&q->user->sigpending);
} }
return(q); return(q);
} }
...@@ -284,8 +286,9 @@ static inline void __sigqueue_free(struct sigqueue *q) ...@@ -284,8 +286,9 @@ static inline void __sigqueue_free(struct sigqueue *q)
{ {
if (q->flags & SIGQUEUE_PREALLOC) if (q->flags & SIGQUEUE_PREALLOC)
return; return;
atomic_dec(&q->user->sigpending);
free_uid(q->user);
kmem_cache_free(sigqueue_cachep, q); kmem_cache_free(sigqueue_cachep, q);
atomic_dec(&nr_queued_signals);
} }
static void flush_sigqueue(struct sigpending *queue) static void flush_sigqueue(struct sigpending *queue)
...@@ -720,12 +723,14 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t, ...@@ -720,12 +723,14 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
make sure at least one signal gets delivered and don't make sure at least one signal gets delivered and don't
pass on the info struct. */ pass on the info struct. */
if (atomic_read(&nr_queued_signals) < max_queued_signals) if (atomic_read(&t->user->sigpending) <
t->rlim[RLIMIT_SIGPENDING].rlim_cur)
q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC); q = kmem_cache_alloc(sigqueue_cachep, GFP_ATOMIC);
if (q) { if (q) {
atomic_inc(&nr_queued_signals);
q->flags = 0; q->flags = 0;
q->user = get_uid(t->user);
atomic_inc(&q->user->sigpending);
list_add_tail(&q->list, &signals->list); list_add_tail(&q->list, &signals->list);
switch ((unsigned long) info) { switch ((unsigned long) info) {
case 0: case 0:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment