Commit 8733e611 authored by Linus Torvalds's avatar Linus Torvalds

revert broken select optimizations

Cset exclude: torvalds@penguin.transmeta.com|ChangeSet|20020619003306|07760
Cset exclude: ak@muc.de|ChangeSet|20020618172743|19150
parent ab6094f9
...@@ -12,9 +12,6 @@ ...@@ -12,9 +12,6 @@
* 24 January 2000 * 24 January 2000
* Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
* of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian). * of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
*
* Dec 2001
* Stack allocation and fast path (Andi Kleen)
*/ */
#include <linux/slab.h> #include <linux/slab.h>
...@@ -29,6 +26,21 @@ ...@@ -29,6 +26,21 @@
#define ROUND_UP(x,y) (((x)+(y)-1)/(y)) #define ROUND_UP(x,y) (((x)+(y)-1)/(y))
#define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM) #define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)
struct poll_table_entry {
struct file * filp;
wait_queue_t wait;
wait_queue_head_t * wait_address;
};
struct poll_table_page {
struct poll_table_page * next;
struct poll_table_entry * entry;
struct poll_table_entry entries[0];
};
#define POLL_TABLE_FULL(table) \
((unsigned long)((table)->entry+1) > PAGE_SIZE + (unsigned long)(table))
/* /*
* Ok, Peter made a complicated, but straightforward multiple_wait() function. * Ok, Peter made a complicated, but straightforward multiple_wait() function.
* I have rewritten this, taking some shortcuts: This code may not be easy to * I have rewritten this, taking some shortcuts: This code may not be easy to
...@@ -50,39 +62,30 @@ void poll_freewait(poll_table* pt) ...@@ -50,39 +62,30 @@ void poll_freewait(poll_table* pt)
struct poll_table_page *old; struct poll_table_page *old;
entry = p->entry; entry = p->entry;
while (entry > p->entries) { do {
entry--; entry--;
remove_wait_queue(entry->wait_address,&entry->wait); remove_wait_queue(entry->wait_address,&entry->wait);
fput(entry->filp); fput(entry->filp);
} } while (entry > p->entries);
old = p; old = p;
p = p->next; p = p->next;
if (old != &pt->inline_page) free_page((unsigned long) old);
free_page((unsigned long) old);
} }
} }
void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
{ {
struct poll_table_page *table = p->table; struct poll_table_page *table = p->table;
struct poll_table_page *new_table = NULL;
int sz;
if (!table) {
new_table = &p->inline_page;
} else {
sz = (table == &p->inline_page) ? POLL_INLINE_TABLE_LEN : PAGE_SIZE;
if ((char*)table->entry >= (char*)table + sz) {
new_table = (struct poll_table_page *)__get_free_page(GFP_KERNEL);
if (!new_table) {
p->error = -ENOMEM;
__set_current_state(TASK_RUNNING);
return;
}
}
}
if (new_table) { if (!table || POLL_TABLE_FULL(table)) {
struct poll_table_page *new_table;
new_table = (struct poll_table_page *) __get_free_page(GFP_KERNEL);
if (!new_table) {
p->error = -ENOMEM;
__set_current_state(TASK_RUNNING);
return;
}
new_table->entry = new_table->entries; new_table->entry = new_table->entries;
new_table->next = table; new_table->next = table;
p->table = new_table; p->table = new_table;
...@@ -110,6 +113,48 @@ void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table ...@@ -110,6 +113,48 @@ void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table
#define BITS(fds, n) (*__IN(fds, n)|*__OUT(fds, n)|*__EX(fds, n)) #define BITS(fds, n) (*__IN(fds, n)|*__OUT(fds, n)|*__EX(fds, n))
static int max_select_fd(unsigned long n, fd_set_bits *fds)
{
unsigned long *open_fds;
unsigned long set;
int max;
/* handle last in-complete long-word first */
set = ~(~0UL << (n & (__NFDBITS-1)));
n /= __NFDBITS;
open_fds = current->files->open_fds->fds_bits+n;
max = 0;
if (set) {
set &= BITS(fds, n);
if (set) {
if (!(set & ~*open_fds))
goto get_max;
return -EBADF;
}
}
while (n) {
open_fds--;
n--;
set = BITS(fds, n);
if (!set)
continue;
if (set & ~*open_fds)
return -EBADF;
if (max)
continue;
get_max:
do {
max++;
set >>= 1;
} while (set);
max += n * __NFDBITS;
}
return max;
}
#define BIT(i) (1UL << ((i)&(__NFDBITS-1)))
#define MEM(i,m) ((m)+(unsigned)(i)/__NFDBITS)
#define ISSET(i,m) (((i)&*(m)) != 0) #define ISSET(i,m) (((i)&*(m)) != 0)
#define SET(i,m) (*(m) |= (i)) #define SET(i,m) (*(m) |= (i))
...@@ -120,106 +165,84 @@ void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table ...@@ -120,106 +165,84 @@ void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table
int do_select(int n, fd_set_bits *fds, long *timeout) int do_select(int n, fd_set_bits *fds, long *timeout)
{ {
poll_table table, *wait; poll_table table, *wait;
int retval, off, maxoff; int retval, i, off;
long __timeout = *timeout; long __timeout = *timeout;
read_lock(&current->files->file_lock);
retval = max_select_fd(n, fds);
read_unlock(&current->files->file_lock);
if (retval < 0)
return retval;
n = retval;
poll_initwait(&table); poll_initwait(&table);
wait = &table; wait = &table;
if (!__timeout) if (!__timeout)
wait = NULL; wait = NULL;
retval = 0; retval = 0;
maxoff = FDS_LONGS(n);
for (;;) { for (;;) {
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
for (off = 0; off <= maxoff; off++) { for (i = 0 ; i < n; i++) {
unsigned long val = BITS(fds, off); unsigned long bit = BIT(i);
unsigned long mask;
while (val) { struct file *file;
int k = ffz(~val), index;
unsigned long mask, bit; off = i / __NFDBITS;
struct file *file; if (!(bit & BITS(fds, off)))
continue;
bit = (1UL << k); file = fget(i);
val &= ~bit; mask = POLLNVAL;
if (file) {
index = off*BITS_PER_LONG + k; mask = DEFAULT_POLLMASK;
if (index >= n) if (file->f_op && file->f_op->poll)
break; mask = file->f_op->poll(file, wait);
fput(file);
file = fget(index); }
mask = POLLNVAL; if ((mask & POLLIN_SET) && ISSET(bit, __IN(fds,off))) {
if (file) { SET(bit, __RES_IN(fds,off));
mask = DEFAULT_POLLMASK; retval++;
if (file->f_op && file->f_op->poll) wait = NULL;
mask = file->f_op->poll(file, wait); }
fput(file); if ((mask & POLLOUT_SET) && ISSET(bit, __OUT(fds,off))) {
} else { SET(bit, __RES_OUT(fds,off));
/* This error will shadow all other results. retval++;
* This matches previous linux behaviour */ wait = NULL;
retval = -EBADF; }
goto out; if ((mask & POLLEX_SET) && ISSET(bit, __EX(fds,off))) {
} SET(bit, __RES_EX(fds,off));
if ((mask & POLLIN_SET) && ISSET(bit, __IN(fds,off))) { retval++;
SET(bit, __RES_IN(fds,off)); wait = NULL;
retval++;
wait = NULL;
}
if ((mask & POLLOUT_SET) && ISSET(bit,__OUT(fds,off))) {
SET(bit, __RES_OUT(fds,off));
retval++;
wait = NULL;
}
if ((mask & POLLEX_SET) && ISSET(bit, __EX(fds,off))) {
SET(bit, __RES_EX(fds,off));
retval++;
wait = NULL;
}
} }
} }
wait = NULL; wait = NULL;
if (retval || !__timeout || signal_pending(current)) if (retval || !__timeout || signal_pending(current))
break; break;
if (table.error) { if(table.error) {
retval = table.error; retval = table.error;
break; break;
} }
__timeout = schedule_timeout(__timeout); __timeout = schedule_timeout(__timeout);
} }
out:
current->state = TASK_RUNNING; current->state = TASK_RUNNING;
poll_freewait(&table); poll_freewait(&table);
/* /*
* Update the caller timeout. * Up-to-date the caller timeout.
*/ */
*timeout = __timeout; *timeout = __timeout;
return retval; return retval;
} }
/* static void *select_bits_alloc(int size)
* We do a VERIFY_WRITE here even though we are only reading this time: {
* we'll write to it eventually.. return kmalloc(6 * size, GFP_KERNEL);
*/ }
static int get_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset) static void select_bits_free(void *bits, int size)
{ {
unsigned long rounded = FDS_BYTES(nr); kfree(bits);
if (ufdset) {
int error = verify_area(VERIFY_WRITE, ufdset, rounded);
if (!error && __copy_from_user(fdset, ufdset, rounded))
error = -EFAULT;
if (nr % __NFDBITS) {
unsigned long mask = ~(~0UL << (nr % __NFDBITS));
fdset[nr/__NFDBITS] &= mask;
}
return error;
}
memset(fdset, 0, rounded);
return 0;
} }
/* /*
...@@ -237,10 +260,9 @@ asmlinkage long ...@@ -237,10 +260,9 @@ asmlinkage long
sys_select(int n, fd_set *inp, fd_set *outp, fd_set *exp, struct timeval *tvp) sys_select(int n, fd_set *inp, fd_set *outp, fd_set *exp, struct timeval *tvp)
{ {
fd_set_bits fds; fd_set_bits fds;
unsigned long *bits; char *bits;
long timeout; long timeout;
int ret, size, max_fdset; int ret, size, max_fdset;
unsigned long stack_bits[FDS_LONGS(FAST_SELECT_MAX) * 6];
timeout = MAX_SCHEDULE_TIMEOUT; timeout = MAX_SCHEDULE_TIMEOUT;
if (tvp) { if (tvp) {
...@@ -275,27 +297,25 @@ sys_select(int n, fd_set *inp, fd_set *outp, fd_set *exp, struct timeval *tvp) ...@@ -275,27 +297,25 @@ sys_select(int n, fd_set *inp, fd_set *outp, fd_set *exp, struct timeval *tvp)
* since we used fdset we need to allocate memory in units of * since we used fdset we need to allocate memory in units of
* long-words. * long-words.
*/ */
size = FDS_LONGS(n); ret = -ENOMEM;
bits = stack_bits; size = FDS_BYTES(n);
if (n >= FAST_SELECT_MAX) { bits = select_bits_alloc(size);
ret = -ENOMEM; if (!bits)
bits = kmalloc(sizeof(unsigned long)*6*size, GFP_KERNEL); goto out_nofds;
if (!bits) fds.in = (unsigned long *) bits;
goto out_nofds; fds.out = (unsigned long *) (bits + size);
} fds.ex = (unsigned long *) (bits + 2*size);
fds.res_in = (unsigned long *) (bits + 3*size);
fds.in = bits; fds.res_out = (unsigned long *) (bits + 4*size);
fds.out = bits + size; fds.res_ex = (unsigned long *) (bits + 5*size);
fds.ex = bits + 2*size;
fds.res_in = bits + 3*size;
fds.res_out = bits + 4*size;
fds.res_ex = bits + 5*size;
if ((ret = get_fd_set(n, inp, fds.in)) || if ((ret = get_fd_set(n, inp, fds.in)) ||
(ret = get_fd_set(n, outp, fds.out)) || (ret = get_fd_set(n, outp, fds.out)) ||
(ret = get_fd_set(n, exp, fds.ex))) (ret = get_fd_set(n, exp, fds.ex)))
goto out; goto out;
memset(fds.res_in, 0, 3*size); zero_fd_set(n, fds.res_in);
zero_fd_set(n, fds.res_out);
zero_fd_set(n, fds.res_ex);
ret = do_select(n, &fds, &timeout); ret = do_select(n, &fds, &timeout);
...@@ -306,8 +326,8 @@ sys_select(int n, fd_set *inp, fd_set *outp, fd_set *exp, struct timeval *tvp) ...@@ -306,8 +326,8 @@ sys_select(int n, fd_set *inp, fd_set *outp, fd_set *exp, struct timeval *tvp)
usec = timeout % HZ; usec = timeout % HZ;
usec *= (1000000/HZ); usec *= (1000000/HZ);
} }
__put_user(sec, &tvp->tv_sec); put_user(sec, &tvp->tv_sec);
__put_user(usec, &tvp->tv_usec); put_user(usec, &tvp->tv_usec);
} }
if (ret < 0) if (ret < 0)
...@@ -324,10 +344,8 @@ sys_select(int n, fd_set *inp, fd_set *outp, fd_set *exp, struct timeval *tvp) ...@@ -324,10 +344,8 @@ sys_select(int n, fd_set *inp, fd_set *outp, fd_set *exp, struct timeval *tvp)
set_fd_set(n, exp, fds.res_ex); set_fd_set(n, exp, fds.res_ex);
out: out:
if (n >= FAST_SELECT_MAX) select_bits_free(bits, size);
kfree(bits);
out_nofds: out_nofds:
return ret; return ret;
} }
...@@ -392,42 +410,12 @@ static int do_poll(unsigned int nfds, unsigned int nchunks, unsigned int nleft, ...@@ -392,42 +410,12 @@ static int do_poll(unsigned int nfds, unsigned int nchunks, unsigned int nleft,
return count; return count;
} }
static int fast_poll(poll_table *table, poll_table *wait, struct pollfd *ufds,
unsigned int nfds, long timeout)
{
poll_table *pt = wait;
struct pollfd fds[FAST_POLL_MAX];
int count, i;
if (copy_from_user(fds, ufds, nfds * sizeof(struct pollfd)))
return -EFAULT;
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
count = 0;
do_pollfd(nfds, fds, &pt, &count);
pt = NULL;
if (count || !timeout || signal_pending(current))
break;
count = wait->error;
if (count)
break;
timeout = schedule_timeout(timeout);
}
current->state = TASK_RUNNING;
for (i = 0; i < nfds; i++)
__put_user(fds[i].revents, &ufds[i].revents);
poll_freewait(table);
if (!count && signal_pending(current))
return -EINTR;
return count;
}
asmlinkage long sys_poll(struct pollfd * ufds, unsigned int nfds, long timeout) asmlinkage long sys_poll(struct pollfd * ufds, unsigned int nfds, long timeout)
{ {
int i, j, err, fdcount; int i, j, fdcount, err;
struct pollfd **fds; struct pollfd **fds;
poll_table table, *wait; poll_table table, *wait;
int nchunks, nleft; int nchunks, nleft;
/* Do a sanity check on nfds ... */ /* Do a sanity check on nfds ... */
if (nfds > NR_OPEN) if (nfds > NR_OPEN)
...@@ -441,45 +429,43 @@ asmlinkage long sys_poll(struct pollfd * ufds, unsigned int nfds, long timeout) ...@@ -441,45 +429,43 @@ asmlinkage long sys_poll(struct pollfd * ufds, unsigned int nfds, long timeout)
timeout = MAX_SCHEDULE_TIMEOUT; timeout = MAX_SCHEDULE_TIMEOUT;
} }
poll_initwait(&table); poll_initwait(&table);
wait = &table; wait = &table;
if (!timeout) if (!timeout)
wait = NULL; wait = NULL;
if (nfds < FAST_POLL_MAX)
return fast_poll(&table, wait, ufds, nfds, timeout);
err = -ENOMEM; err = -ENOMEM;
fds = (struct pollfd **)kmalloc( fds = NULL;
(1 + (nfds - 1) / POLLFD_PER_PAGE) * sizeof(struct pollfd *), if (nfds != 0) {
GFP_KERNEL); fds = (struct pollfd **)kmalloc(
if (fds == NULL) (1 + (nfds - 1) / POLLFD_PER_PAGE) * sizeof(struct pollfd *),
goto out; GFP_KERNEL);
if (fds == NULL)
goto out;
}
nchunks = 0; nchunks = 0;
nleft = nfds; nleft = nfds;
while (nleft > POLLFD_PER_PAGE) { while (nleft > POLLFD_PER_PAGE) { /* allocate complete PAGE_SIZE chunks */
fds[nchunks] = (struct pollfd *)__get_free_page(GFP_KERNEL); fds[nchunks] = (struct pollfd *)__get_free_page(GFP_KERNEL);
if (fds[nchunks] == NULL) if (fds[nchunks] == NULL)
goto out_fds; goto out_fds;
nchunks++; nchunks++;
nleft -= POLLFD_PER_PAGE; nleft -= POLLFD_PER_PAGE;
} }
if (nleft) { if (nleft) { /* allocate last PAGE_SIZE chunk, only nleft elements used */
fds[nchunks] = (struct pollfd *)__get_free_page(GFP_KERNEL); fds[nchunks] = (struct pollfd *)__get_free_page(GFP_KERNEL);
if (fds[nchunks] == NULL) if (fds[nchunks] == NULL)
goto out_fds; goto out_fds;
} }
err = -EFAULT; err = -EFAULT;
for (i=0; i < nchunks; i++) for (i=0; i < nchunks; i++)
if (copy_from_user(fds[i], ufds + i*POLLFD_PER_PAGE, PAGE_SIZE)) if (copy_from_user(fds[i], ufds + i*POLLFD_PER_PAGE, PAGE_SIZE))
goto out_fds1; goto out_fds1;
if (nleft) { if (nleft) {
if (copy_from_user(fds[nchunks], ufds + nchunks*POLLFD_PER_PAGE, if (copy_from_user(fds[nchunks], ufds + nchunks*POLLFD_PER_PAGE,
nleft * sizeof(struct pollfd))) nleft * sizeof(struct pollfd)))
goto out_fds1; goto out_fds1;
} }
...@@ -503,7 +489,8 @@ asmlinkage long sys_poll(struct pollfd * ufds, unsigned int nfds, long timeout) ...@@ -503,7 +489,8 @@ asmlinkage long sys_poll(struct pollfd * ufds, unsigned int nfds, long timeout)
out_fds: out_fds:
for (i=0; i < nchunks; i++) for (i=0; i < nchunks; i++)
free_page((unsigned long)(fds[i])); free_page((unsigned long)(fds[i]));
kfree(fds); if (nfds != 0)
kfree(fds);
out: out:
poll_freewait(&table); poll_freewait(&table);
return err; return err;
......
...@@ -10,32 +10,13 @@ ...@@ -10,32 +10,13 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#define POLL_INLINE_BYTES 256 struct poll_table_page;
#define FAST_SELECT_MAX 128
#define FAST_POLL_MAX 128
#define POLL_INLINE_ENTRIES (1+(POLL_INLINE_BYTES / sizeof(struct poll_table_entry)))
struct poll_table_entry {
struct file * filp;
wait_queue_t wait;
wait_queue_head_t * wait_address;
};
struct poll_table_page {
struct poll_table_page * next;
struct poll_table_entry * entry;
struct poll_table_entry entries[0];
};
typedef struct poll_table_struct { typedef struct poll_table_struct {
int error; int error;
struct poll_table_page * table; struct poll_table_page * table;
struct poll_table_page inline_page;
struct poll_table_entry inline_table[POLL_INLINE_ENTRIES];
} poll_table; } poll_table;
#define POLL_INLINE_TABLE_LEN (sizeof(poll_table) - offsetof(poll_table, inline_page))
extern void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p); extern void __pollwait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p);
static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p)
...@@ -49,7 +30,6 @@ static inline void poll_initwait(poll_table* pt) ...@@ -49,7 +30,6 @@ static inline void poll_initwait(poll_table* pt)
pt->error = 0; pt->error = 0;
pt->table = NULL; pt->table = NULL;
} }
extern void poll_freewait(poll_table* pt); extern void poll_freewait(poll_table* pt);
...@@ -69,6 +49,27 @@ typedef struct { ...@@ -69,6 +49,27 @@ typedef struct {
#define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG) #define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG)
#define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long)) #define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long))
/*
* We do a VERIFY_WRITE here even though we are only reading this time:
* we'll write to it eventually..
*
* Use "unsigned long" accesses to let user-mode fd_set's be long-aligned.
*/
static inline
int get_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
{
nr = FDS_BYTES(nr);
if (ufdset) {
int error;
error = verify_area(VERIFY_WRITE, ufdset, nr);
if (!error && __copy_from_user(fdset, ufdset, nr))
error = -EFAULT;
return error;
}
memset(fdset, 0, nr);
return 0;
}
static inline static inline
void set_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset) void set_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
{ {
...@@ -76,6 +77,12 @@ void set_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset) ...@@ -76,6 +77,12 @@ void set_fd_set(unsigned long nr, void *ufdset, unsigned long *fdset)
__copy_to_user(ufdset, fdset, FDS_BYTES(nr)); __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
} }
static inline
void zero_fd_set(unsigned long nr, unsigned long *fdset)
{
memset(fdset, 0, FDS_BYTES(nr));
}
extern int do_select(int n, fd_set_bits *fds, long *timeout); extern int do_select(int n, fd_set_bits *fds, long *timeout);
#endif /* KERNEL */ #endif /* KERNEL */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment