Commit b884a92a authored by Linus Torvalds's avatar Linus Torvalds

Import 2.1.91pre2

parent f26125cb
......@@ -242,7 +242,7 @@ newversion:
include/linux/compile.h: $(CONFIGURATION) include/linux/version.h newversion
@echo -n \#define UTS_VERSION \"\#`cat .version` > .ver
@if [ -z "$(SMP)" ] ; then echo -n " SMP" >> .ver; fi
@if [ -n "$(SMP)" ] ; then echo -n " SMP" >> .ver; fi
@if [ -f .name ]; then echo -n \-`cat .name` >> .ver; fi
@echo ' '`date`'"' >> .ver
@echo \#define LINUX_COMPILE_TIME \"`date +%T`\" >> .ver
......
......@@ -171,6 +171,8 @@ int cpu_idle(void *unused)
asmlinkage int sys_idle(void)
{
if (current->pid != 0)
return -EPERM;
cpu_idle(NULL);
return 0;
}
......
......@@ -55,12 +55,9 @@ static char buffersize_index[17] =
number of unused buffer heads */
/*
* How large a hash table do we need?
* Hash table mask..
*/
#define HASH_PAGES_ORDER 4
#define HASH_PAGES (1UL << HASH_PAGES_ORDER)
#define NR_HASH (HASH_PAGES*PAGE_SIZE/sizeof(struct buffer_head *))
#define HASH_MASK (NR_HASH-1)
static unsigned long bh_hash_mask = 0;
static int grow_buffers(int pri, int size);
......@@ -421,7 +418,7 @@ void invalidate_buffers(kdev_t dev)
}
}
#define _hashfn(dev,block) (((unsigned)(HASHDEV(dev)^block))&HASH_MASK)
#define _hashfn(dev,block) (((unsigned)(HASHDEV(dev)^block)) & bh_hash_mask)
#define hash(dev,block) hash_table[_hashfn(dev,block)]
static inline void remove_from_hash_queue(struct buffer_head * bh)
......@@ -732,7 +729,7 @@ static void refill_freelist(int size)
needed = bdf_prm.b_un.nrefill * size;
while ((nr_free_pages > freepages.min*2) &&
BUFFER_MEM < (buffer_mem.max_percent * num_physpages / 100) &&
(buffermem >> PAGE_SHIFT) * 100 < (buffer_mem.max_percent * num_physpages) &&
grow_buffers(GFP_BUFFER, size)) {
obtained += PAGE_SIZE;
if (obtained >= needed)
......@@ -817,7 +814,6 @@ static void refill_freelist(int size)
*/
while (obtained < (needed >> 1) &&
nr_free_pages > freepages.min + 5 &&
BUFFER_MEM < (buffer_mem.max_percent * num_physpages / 100) &&
grow_buffers(GFP_BUFFER, size))
obtained += PAGE_SIZE;
......@@ -1707,11 +1703,16 @@ void show_buffers(void)
*/
void buffer_init(void)
{
hash_table = (struct buffer_head **)
__get_free_pages(GFP_ATOMIC, HASH_PAGES_ORDER);
int order = 5; /* Currently maximum order.. */
unsigned int nr_hash;
nr_hash = (1UL << order) * PAGE_SIZE / sizeof(struct buffer_head *);
hash_table = (struct buffer_head **) __get_free_pages(GFP_ATOMIC, order);
if (!hash_table)
panic("Failed to allocate buffer hash table\n");
memset(hash_table,0,NR_HASH*sizeof(struct buffer_head *));
memset(hash_table, 0, nr_hash * sizeof(struct buffer_head *));
bh_hash_mask = nr_hash-1;
bh_cachep = kmem_cache_create("buffer_head",
sizeof(struct buffer_head),
......
......@@ -39,7 +39,6 @@ extern atomic_t nr_async_pages;
extern struct inode swapper_inode;
extern unsigned long page_cache_size;
extern int buffermem;
#define BUFFER_MEM ((buffermem >> PAGE_SHIFT) + page_cache_size)
/* Incomplete types for prototype declarations: */
struct task_struct;
......
......@@ -191,7 +191,7 @@ static ctl_table vm_table[] = {
{VM_SWAPCTL, "swapctl",
&swap_control, sizeof(swap_control_t), 0600, NULL, &proc_dointvec},
{VM_SWAPOUT, "swapout_interval",
&swapout_interval, sizeof(int), 0600, NULL, &proc_dointvec_jiffies},
&swapout_interval, sizeof(int), 0600, NULL, &proc_dointvec},
{VM_FREEPG, "freepages",
&freepages, sizeof(freepages_t), 0600, NULL, &proc_dointvec},
{VM_BDFLUSH, "bdflush", &bdf_prm, 9*sizeof(int), 0600, NULL,
......
......@@ -150,6 +150,10 @@ int shrink_mmap(int priority, int gfp_mask)
}
tmp = tmp->b_this_page;
} while (tmp != bh);
/* Refuse to swap out all buffer pages */
if ((buffermem >> PAGE_SHIFT) * 100 > (buffer_mem.min_percent * num_physpages))
goto next;
}
/* We can't throw away shared pages, but we do mark
......
......@@ -124,6 +124,7 @@ static spinlock_t page_alloc_lock;
*/
int free_memory_available(int nr)
{
int retval = 0;
unsigned long flags;
struct free_area_struct * list = NULL;
......@@ -141,10 +142,11 @@ int free_memory_available(int nr)
if (list->next->next == memory_head(list))
continue;
/* More than one item? We're ok */
retval = nr + 1;
break;
} while (--nr >= 0);
spin_unlock_irqrestore(&page_alloc_lock, flags);
return nr + 1;
return retval;
}
static inline void free_pages_ok(unsigned long map_nr, unsigned long order)
......
......@@ -67,8 +67,8 @@ swap_control_t swap_control = {
swapstat_t swapstats = {0};
buffer_mem_t buffer_mem = {
6, /* minimum percent buffer + cache memory */
20, /* borrow percent buffer + cache memory */
90 /* maximum percent buffer + cache memory */
3, /* minimum percent buffer */
10, /* borrow percent buffer */
30 /* maximum percent buffer */
};
......@@ -31,7 +31,7 @@
/*
* When are we next due for a page scan?
*/
static int next_swap_jiffies = 0;
static unsigned long next_swap_jiffies = 0;
/*
* How often do we do a pageout scan during normal conditions?
......@@ -451,14 +451,13 @@ static inline int do_try_to_free_page(int gfp_mask)
stop = 3;
if (gfp_mask & __GFP_WAIT)
stop = 0;
if (BUFFER_MEM > buffer_mem.borrow_percent * num_physpages / 100)
if ((buffermem >> PAGE_SHIFT) * 100 > buffer_mem.borrow_percent * num_physpages)
state = 0;
switch (state) {
do {
case 0:
if (BUFFER_MEM > (buffer_mem.min_percent * num_physpages /100) &&
shrink_mmap(i, gfp_mask))
if (shrink_mmap(i, gfp_mask))
return 1;
state = 1;
case 1:
......@@ -547,18 +546,30 @@ int kswapd(void *unused)
run_task_queue(&tq_disk);
schedule();
swapstats.wakeups++;
/* Do the background pageout:
* When we've got loads of memory, we try
* (freepages.high - nr_free_pages) times to
* free memory. As memory gets tighter, kswapd
* gets more and more agressive. -- Rik.
/*
* Do the background pageout: be
* more aggressive if we're really
* low on free memory.
*
* Normally this is called 4 times
* a second if we need more memory,
* so this has a normal rate of
* X*4 pages of memory free'd per
* second. That rate goes up when
*
* - we're really low on memory (we get woken
* up a lot more)
* - other processes fail to allocate memory,
* at which time they try to do their own
* freeing.
*
* A "tries" value of 50 means up to 200 pages
* per second (1.6MB/s). This should be a /proc
* thing.
*/
tries = freepages.high - nr_free_pages;
if (tries < freepages.min) {
tries = freepages.min;
}
if (nr_free_pages < freepages.low)
tries <<= 1;
tries = 50;
while (tries--) {
int gfp_mask;
......@@ -583,7 +594,6 @@ int kswapd(void *unused)
/*
* The swap_tick function gets called on every clock tick.
*/
void swap_tick(void)
{
unsigned long now, want;
......@@ -604,13 +614,13 @@ void swap_tick(void)
case 0:
want = now;
/* Fall through */
case 1 ... 2:
case 1 ... 3:
want_wakeup = 1;
default:
}
if ((long) (now - want) >= 0) {
if (want_wakeup || (num_physpages * buffer_mem.max_percent / 100) < BUFFER_MEM) {
if (want_wakeup || (num_physpages * buffer_mem.max_percent) < (buffermem >> PAGE_SHIFT) * 100) {
/* Set the next wake-up time */
next_swap_jiffies = now + swapout_interval;
wake_up(&kswapd_wait);
......
......@@ -132,15 +132,13 @@ struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
unsigned long flags;
save_flags(flags);
cli();
skb=skb_peek(&sk->receive_queue);
skb = skb_peek(&sk->receive_queue);
if(skb!=NULL)
atomic_inc(&skb->users);
restore_flags(flags);
if(skb==NULL) /* shouldn't happen but .. */
goto restart;
return skb;
}
skb = skb_dequeue(&sk->receive_queue);
} else
skb = skb_dequeue(&sk->receive_queue);
if (!skb) /* Avoid race if someone beats us to the data */
goto restart;
return skb;
......@@ -163,30 +161,23 @@ void skb_free_datagram(struct sock * sk, struct sk_buff *skb)
int skb_copy_datagram(struct sk_buff *skb, int offset, char *to, int size)
{
int err;
err = copy_to_user(to, skb->h.raw+offset, size);
if (err)
{
err = -EFAULT;
}
int err = -EFAULT;
if (!copy_to_user(to, skb->h.raw + offset, size))
err = 0;
return err;
}
/*
* Copy a datagram to an iovec.
* Note: the iovec is modified during the copy.
*/
int skb_copy_datagram_iovec(struct sk_buff *skb, int offset, struct iovec *to,
int size)
{
int err;
err = memcpy_toiovec(to, skb->h.raw+offset, size);
if (err)
{
err = -EFAULT;
}
return err;
return memcpy_toiovec(to, skb->h.raw + offset, size);
}
/*
......
......@@ -30,7 +30,6 @@
/*
* Verify iovec
* verify area does a simple check for completly bogus addresses
*
* Save time not doing verify_area. copy_*_user will make this work
* in any case.
......@@ -79,22 +78,21 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, char *address, int mode)
}
/*
* Copy kernel to iovec.
* Copy kernel to iovec. Returns -EFAULT on error.
*
* Note: this modifies the original iovec.
*/
int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
{
int err;
int err = -EFAULT;
while(len>0)
{
if(iov->iov_len)
{
int copy = min(iov->iov_len, len);
err = copy_to_user(iov->iov_base, kdata, copy);
if (err)
if (copy_to_user(iov->iov_base, kdata, copy))
goto out;
kdata+=copy;
len-=copy;
......@@ -109,7 +107,7 @@ int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
}
/*
* Copy iovec to kernel.
* Copy iovec to kernel. Returns -EFAULT on error.
*
* Note: this modifies the original iovec.
*/
......@@ -147,35 +145,23 @@ int memcpy_fromiovecend(unsigned char *kdata, struct iovec *iov, int offset,
{
int err = -EFAULT;
while(offset>0)
/* Skip over the finished iovecs */
while(offset >= iov->iov_len)
{
if (offset > iov->iov_len)
{
offset -= iov->iov_len;
}
else
{
u8 *base = iov->iov_base + offset;
int copy = min(len, iov->iov_len - offset);
offset = 0;
if (copy_from_user(kdata, base, copy))
goto out;
len-=copy;
kdata+=copy;
}
offset -= iov->iov_len;
iov++;
}
while (len>0)
while (len > 0)
{
int copy = min(len, iov->iov_len);
u8 *base = iov->iov_base + offset;
int copy = min(len, iov->iov_len - offset);
if (copy_from_user(kdata, iov->iov_base, copy))
offset = 0;
if (copy_from_user(kdata, base, copy))
goto out;
len-=copy;
kdata+=copy;
len -= copy;
kdata += copy;
iov++;
}
err = 0;
......@@ -195,51 +181,22 @@ int memcpy_fromiovecend(unsigned char *kdata, struct iovec *iov, int offset,
int csum_partial_copy_fromiovecend(unsigned char *kdata, struct iovec *iov,
int offset, unsigned int len, int *csump)
{
int partial_cnt = 0;
int err = 0;
int csum;
int csum = *csump;
int partial_cnt = 0, err = 0;
do {
int copy = iov->iov_len - offset;
if (copy > 0) {
u8 *base = iov->iov_base + offset;
/* Normal case (single iov component) is fastly detected */
if (len <= copy) {
*csump = csum_and_copy_from_user(base, kdata,
len, *csump, &err);
goto out;
}
partial_cnt = copy % 4;
if (partial_cnt) {
copy -= partial_cnt;
if (copy_from_user(kdata + copy, base + copy,
partial_cnt))
goto out_fault;
}
*csump = csum_and_copy_from_user(base, kdata, copy,
*csump, &err);
if (err)
goto out;
len -= copy + partial_cnt;
kdata += copy + partial_cnt;
iov++;
break;
}
/* Skip over the finished iovecs */
while (offset >= iov->iov_len)
{
offset -= iov->iov_len;
iov++;
offset = -copy;
} while (offset > 0);
csum = *csump;
}
while (len > 0)
{
u8 *base = iov->iov_base;
unsigned int copy = min(len, iov->iov_len);
u8 *base = iov->iov_base + offset;
unsigned int copy = min(len, iov->iov_len - offset);
offset = 0;
/* There is a remnant from previous iov. */
if (partial_cnt)
{
......
......@@ -262,9 +262,9 @@ static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, int len,
struct scm_cookie *scm)
{
struct sock *sk = sock->sk;
struct sockaddr_pkt *saddr=(struct sockaddr_pkt *)msg->msg_name;
struct sk_buff *skb;
struct device *dev;
struct sockaddr_pkt *saddr=(struct sockaddr_pkt *)msg->msg_name;
unsigned short proto=0;
int err;
......@@ -309,6 +309,7 @@ static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, int len,
return -EMSGSIZE;
dev_lock_list();
err = -ENOBUFS;
skb = sock_wmalloc(sk, len+dev->hard_header_len+15, 0, GFP_KERNEL);
/*
......@@ -318,10 +319,7 @@ static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, int len,
*/
if (skb == NULL)
{
dev_unlock_list();
return(-ENOBUFS);
}
goto out_unlock;
/*
* Fill it in
......@@ -339,36 +337,32 @@ static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg, int len,
skb->data -= dev->hard_header_len;
skb->tail -= dev->hard_header_len;
}
/* Returns -EFAULT on error */
err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
skb->protocol = proto;
skb->dev = dev;
skb->priority = sk->priority;
dev_unlock_list();
if (err)
goto out_free;
err = -ENETDOWN;
if (!(dev->flags & IFF_UP))
goto out_free;
/*
* Now send it
*/
if (err)
{
err = -EFAULT;
}
else
{
if (!(dev->flags & IFF_UP))
{
err = -ENETDOWN;
}
}
if (err)
{
kfree_skb(skb);
return err;
}
dev_unlock_list();
dev_queue_xmit(skb);
return(len);
out_free:
kfree_skb(skb);
out_unlock:
dev_unlock_list();
return err;
}
#endif
......@@ -434,13 +428,12 @@ static int packet_sendmsg(struct socket *sock, struct msghdr *msg, int len,
struct scm_cookie *scm)
{
struct sock *sk = sock->sk;
struct sockaddr_ll *saddr=(struct sockaddr_ll *)msg->msg_name;
struct sk_buff *skb;
struct device *dev;
struct sockaddr_ll *saddr=(struct sockaddr_ll *)msg->msg_name;
unsigned short proto;
int ifindex;
int err;
int reserve = 0;
unsigned char *addr;
int ifindex, err, reserve = 0;
/*
* Check the flags.
......@@ -454,13 +447,15 @@ static int packet_sendmsg(struct socket *sock, struct msghdr *msg, int len,
*/
if (saddr == NULL) {
ifindex = sk->protinfo.af_packet->ifindex;
proto = sk->num;
ifindex = sk->protinfo.af_packet->ifindex;
proto = sk->num;
addr = NULL;
} else {
if (msg->msg_namelen < sizeof(struct sockaddr_ll))
return -EINVAL;
ifindex = saddr->sll_ifindex;
proto = saddr->sll_protocol;
ifindex = saddr->sll_ifindex;
proto = saddr->sll_protocol;
addr = saddr->sll_addr;
}
dev = dev_get_by_index(ifindex);
......@@ -474,55 +469,50 @@ static int packet_sendmsg(struct socket *sock, struct msghdr *msg, int len,
dev_lock_list();
skb = sock_alloc_send_skb(sk, len+dev->hard_header_len+15, 0, msg->msg_flags&MSG_DONTWAIT, &err);
if (skb==NULL) {
dev_unlock_list();
return err;
}
skb = sock_alloc_send_skb(sk, len+dev->hard_header_len+15, 0,
msg->msg_flags & MSG_DONTWAIT, &err);
if (skb==NULL)
goto out_unlock;
skb_reserve(skb, (dev->hard_header_len+15)&~15);
skb->nh.raw = skb->data;
if (dev->hard_header) {
if (dev->hard_header(skb, dev, ntohs(proto),
saddr ? saddr->sll_addr : NULL,
NULL, len) < 0
&& sock->type == SOCK_DGRAM) {
kfree_skb(skb);
dev_unlock_list();
return -EINVAL;
}
int res;
err = -EINVAL;
res = dev->hard_header(skb, dev, ntohs(proto), addr, NULL, len);
if (sock->type != SOCK_DGRAM) {
skb->tail = skb->data;
skb->len = 0;
}
} else if (res < 0)
goto out_free;
}
/* Returns -EFAULT on error */
err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
skb->protocol = proto;
skb->dev = dev;
skb->priority = sk->priority;
dev_unlock_list();
if (err)
goto out_free;
err = -ENETDOWN;
if (!(dev->flags & IFF_UP))
goto out_free;
/*
* Now send it
*/
if (err) {
err = -EFAULT;
} else {
if (!(dev->flags & IFF_UP))
err = -ENETDOWN;
}
if (err) {
kfree_skb(skb);
return err;
}
dev_unlock_list();
dev_queue_xmit(skb);
return(len);
out_free:
kfree_skb(skb);
out_unlock:
dev_unlock_list();
return err;
}
static void packet_destroy_timer(unsigned long data)
......@@ -699,6 +689,7 @@ static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len
static int packet_create(struct socket *sock, int protocol)
{
struct sock *sk;
int err;
if (!suser())
return -EPERM;
......@@ -711,27 +702,23 @@ static int packet_create(struct socket *sock, int protocol)
sock->state = SS_UNCONNECTED;
MOD_INC_USE_COUNT;
err = -ENOBUFS;
sk = sk_alloc(AF_PACKET, GFP_KERNEL, 1);
if (sk == NULL) {
MOD_DEC_USE_COUNT;
return -ENOBUFS;
}
if (sk == NULL)
goto out;
sk->reuse = 1;
sock->ops = &packet_ops;
#ifdef CONFIG_SOCK_PACKET
if (sock->type == SOCK_PACKET)
sock->ops = &packet_ops_spkt;
else
#endif
sock->ops = &packet_ops;
sock_init_data(sock,sk);
sk->protinfo.af_packet = kmalloc(sizeof(struct packet_opt), GFP_KERNEL);
if (sk->protinfo.af_packet == NULL) {
sk_free(sk);
MOD_DEC_USE_COUNT;
return -ENOBUFS;
}
if (sk->protinfo.af_packet == NULL)
goto out_free;
memset(sk->protinfo.af_packet, 0, sizeof(struct packet_opt));
sk->zapped=0;
sk->family = AF_PACKET;
......@@ -741,13 +728,11 @@ static int packet_create(struct socket *sock, int protocol)
* Attach a protocol block
*/
sk->protinfo.af_packet->prot_hook.func = packet_rcv;
#ifdef CONFIG_SOCK_PACKET
if (sock->type == SOCK_PACKET)
sk->protinfo.af_packet->prot_hook.func = packet_rcv_spkt;
else
#endif
sk->protinfo.af_packet->prot_hook.func = packet_rcv;
sk->protinfo.af_packet->prot_hook.data = (void *)sk;
if (protocol) {
......@@ -758,6 +743,12 @@ static int packet_create(struct socket *sock, int protocol)
sklist_insert_socket(&packet_sklist, sk);
return(0);
out_free:
sk_free(sk);
out:
MOD_DEC_USE_COUNT;
return err;
}
/*
......@@ -832,10 +823,8 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, int len,
/* We can't use skb_copy_datagram here */
err = memcpy_toiovec(msg->msg_iov, skb->data, copied);
if (err) {
err = -EFAULT;
if (err)
goto out_free;
}
sk->stamp=skb->stamp;
if (msg->msg_name)
......@@ -932,37 +921,39 @@ static void packet_dev_mclist(struct device *dev, struct packet_mclist *i, int w
static int packet_mc_add(struct sock *sk, struct packet_mreq *mreq)
{
int err;
struct packet_mclist *ml, *i;
struct device *dev;
int err;
rtnl_shlock();
dev = dev_get_by_index(mreq->mr_ifindex);
i = NULL;
err = -ENODEV;
dev = dev_get_by_index(mreq->mr_ifindex);
if (!dev)
goto done;
err = -EINVAL;
if (mreq->mr_alen > dev->addr_len)
goto done;
err = -ENOBUFS;
i = (struct packet_mclist *)kmalloc(sizeof(*i), GFP_KERNEL);
if (i == NULL)
goto done;
err = 0;
for (ml=sk->protinfo.af_packet->mclist; ml; ml=ml->next) {
if (ml->ifindex == mreq->mr_ifindex &&
ml->type == mreq->mr_type &&
ml->alen == mreq->mr_alen &&
memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
ml->count++;
err = 0;
/* Free the new element ... */
kfree(i);
goto done;
}
}
err = -ENOBUFS;
if (i == NULL)
goto done;
i->type = mreq->mr_type;
i->ifindex = mreq->mr_ifindex;
i->alen = mreq->mr_alen;
......@@ -971,13 +962,9 @@ static int packet_mc_add(struct sock *sk, struct packet_mreq *mreq)
i->next = sk->protinfo.af_packet->mclist;
sk->protinfo.af_packet->mclist = i;
packet_dev_mc(dev, i, +1);
i = NULL;
err = 0;
done:
rtnl_shunlock();
if (i)
kfree(i);
return err;
}
......@@ -1109,13 +1096,12 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg
case FIOGETOWN:
case SIOCGPGRP:
return put_user(sk->proc, (int *)arg);
return(0);
case SIOCGSTAMP:
if(sk->stamp.tv_sec==0)
return -ENOENT;
err = copy_to_user((void *)arg,&sk->stamp,sizeof(struct timeval));
if (err)
err = -EFAULT;
err = -EFAULT;
if (!copy_to_user((void *)arg, &sk->stamp, sizeof(struct timeval)))
err = 0;
return err;
case SIOCGIFFLAGS:
#ifndef CONFIG_INET
......
......@@ -1142,19 +1142,21 @@ asmlinkage int sys_sendmsg(int fd, struct msghdr *msg, unsigned flags)
char address[MAX_SOCK_ADDR];
struct iovec iov[UIO_FASTIOV];
unsigned char ctl[sizeof(struct cmsghdr) + 20]; /* 20 is size of ipv6_pktinfo */
struct msghdr msg_sys;
int err= -EINVAL;
int total_len;
unsigned char *ctl_buf = ctl;
struct msghdr msg_sys;
int err, total_len;
lock_kernel();
err=-EFAULT;
err = -EFAULT;
if (copy_from_user(&msg_sys,msg,sizeof(struct msghdr)))
goto out;
/* do not move before msg_sys is valid */
if (msg_sys.msg_iovlen>UIO_MAXIOV)
err = -EINVAL;
if (msg_sys.msg_iovlen > UIO_MAXIOV)
goto out;
/* This will also move the address data into kernel space */
err = verify_iovec(&msg_sys, iov, address, VERIFY_READ);
if (err < 0)
......@@ -1164,7 +1166,7 @@ asmlinkage int sys_sendmsg(int fd, struct msghdr *msg, unsigned flags)
sock = sockfd_lookup(fd, &err);
if (!sock)
goto out;
goto out_freeiov;
if (msg_sys.msg_controllen)
{
......@@ -1198,9 +1200,10 @@ asmlinkage int sys_sendmsg(int fd, struct msghdr *msg, unsigned flags)
if (ctl_buf != ctl)
sock_kfree_s(sock->sk, ctl_buf, msg_sys.msg_controllen);
failed2:
sockfd_put(sock);
out_freeiov:
if (msg_sys.msg_iov != iov)
kfree(msg_sys.msg_iov);
sockfd_put(sock);
out:
unlock_kernel();
return err;
......@@ -1229,16 +1232,13 @@ asmlinkage int sys_recvmsg(int fd, struct msghdr *msg, unsigned int flags)
int *uaddr_len;
lock_kernel();
err=-EFAULT;
if (copy_from_user(&msg_sys,msg,sizeof(struct msghdr)))
{
err=-EFAULT;
goto out;
}
if (msg_sys.msg_iovlen>UIO_MAXIOV)
{
err=-EINVAL;
err=-EINVAL;
if (msg_sys.msg_iovlen > UIO_MAXIOV)
goto out;
}
/*
* Save the user-mode address (verify_iovec will change the
......
......@@ -687,17 +687,19 @@ static int unix_stream_connect1(struct socket *sock, struct msghdr *msg,
skb=sock_alloc_send_skb(sk, len, 0, nonblock, &err); /* Marker object */
if(skb==NULL)
return err;
goto out;
memcpy(&UNIXCB(skb), cmsg, sizeof(*cmsg));
if (len)
memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
if (len) {
err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov,
len);
if (err)
goto out_free;
}
sk->state=TCP_CLOSE;
other=unix_find_other(sunaddr, addr_len, sk->type, hash, &err);
if(other==NULL)
{
kfree_skb(skb);
return err;
}
goto out_free;
other->ack_backlog++;
unix_peer(sk)=other;
skb_queue_tail(&other->receive_queue,skb);
......@@ -738,6 +740,11 @@ static int unix_stream_connect1(struct socket *sock, struct msghdr *msg,
if (!sk->protinfo.af_unix.addr)
unix_autobind(sock);
return 0;
out_free:
kfree_skb(skb);
out:
return err;
}
......@@ -908,8 +915,8 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, int len,
struct scm_cookie *scm)
{
struct sock *sk = sock->sk;
unix_socket *other;
struct sockaddr_un *sunaddr=msg->msg_name;
unix_socket *other;
int namelen = 0; /* fake GCC */
int err;
unsigned hash;
......@@ -935,9 +942,8 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, int len,
unix_autobind(sock);
skb = sock_alloc_send_skb(sk, len, 0, msg->msg_flags&MSG_DONTWAIT, &err);
if (skb==NULL)
return err;
goto out;
memcpy(UNIXCREDS(skb), &scm->creds, sizeof(struct ucred));
UNIXCB(skb).attr = msg->msg_flags;
......@@ -945,7 +951,9 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, int len,
unix_attach_fds(scm, skb);
skb->h.raw = skb->data;
memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
if (err)
goto out_free;
other = unix_peer(sk);
if (other && other->dead)
......@@ -957,26 +965,18 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, int len,
unix_unlock(other);
unix_peer(sk)=NULL;
other = NULL;
if (sunaddr == NULL) {
kfree_skb(skb);
return -ECONNRESET;
}
err = -ECONNRESET;
if (sunaddr == NULL)
goto out_free;
}
if (!other)
{
other = unix_find_other(sunaddr, namelen, sk->type, hash, &err);
if (other==NULL)
{
kfree_skb(skb);
return err;
}
goto out_free;
err = -EINVAL;
if (!unix_may_send(sk, other))
{
unix_unlock(other);
kfree_skb(skb);
return -EINVAL;
}
goto out_unlock;
}
skb_queue_tail(&other->receive_queue, skb);
......@@ -985,6 +985,13 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, int len,
if (!unix_peer(sk))
unix_unlock(other);
return len;
out_unlock:
unix_unlock(other);
out_free:
kfree_skb(skb);
out:
return err;
}
......@@ -1267,9 +1274,7 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg, int size
}
chunk = min(skb->len, size);
/* N.B. This could fail with a non-zero value (which means -EFAULT
* and the non-zero value is the number of bytes not copied).
*/
/* N.B. This could fail with -EFAULT */
memcpy_toiovec(msg->msg_iov, skb->data, chunk);
copied += chunk;
size -= chunk;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment