Commit 28609d40 authored by Jim Sung's avatar Jim Sung Committed by Greg Kroah-Hartman

usb: subtle increased memory usage in u_serial

OK, the USB gadget serial driver actually has a couple of problems.  On
gs_open(), it always allocates and queues an additional QUEUE_SIZE (16)
worth of requests, so with a loop like this:

    i=1 ; while echo $i > /dev/ttyGS0 ; do let i++ ; done

eventually we run into OOM (Out of Memory).

Technically, it is not a leak as everything gets freed up when the USB
connection is broken, but not on gs_close().

With a USB device/gadget controller driver that has limited resources
(e.g., Marvell has a this MAX_XDS_FOR_TR_CALLS of 64 for transmit and
receive), so even after 4

    stty -F /dev/ttyGS0

we cannot transmit anymore.  We can still receive (not necessarily
reliably) as now we have 16 * 4 = 64 descriptors/buffers ready, but the
device is otherwise not usable.
Signed-off-by: default avatarJim Sung <jsung@syncadence.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 58c0d9d7
...@@ -105,11 +105,15 @@ struct gs_port { ...@@ -105,11 +105,15 @@ struct gs_port {
wait_queue_head_t close_wait; /* wait for last close */ wait_queue_head_t close_wait; /* wait for last close */
struct list_head read_pool; struct list_head read_pool;
int read_started;
int read_allocated;
struct list_head read_queue; struct list_head read_queue;
unsigned n_read; unsigned n_read;
struct tasklet_struct push; struct tasklet_struct push;
struct list_head write_pool; struct list_head write_pool;
int write_started;
int write_allocated;
struct gs_buf port_write_buf; struct gs_buf port_write_buf;
wait_queue_head_t drain_wait; /* wait while writes drain */ wait_queue_head_t drain_wait; /* wait while writes drain */
...@@ -363,6 +367,9 @@ __acquires(&port->port_lock) ...@@ -363,6 +367,9 @@ __acquires(&port->port_lock)
struct usb_request *req; struct usb_request *req;
int len; int len;
if (port->write_started >= QUEUE_SIZE)
break;
req = list_entry(pool->next, struct usb_request, list); req = list_entry(pool->next, struct usb_request, list);
len = gs_send_packet(port, req->buf, in->maxpacket); len = gs_send_packet(port, req->buf, in->maxpacket);
if (len == 0) { if (len == 0) {
...@@ -397,6 +404,8 @@ __acquires(&port->port_lock) ...@@ -397,6 +404,8 @@ __acquires(&port->port_lock)
break; break;
} }
port->write_started++;
/* abort immediately after disconnect */ /* abort immediately after disconnect */
if (!port->port_usb) if (!port->port_usb)
break; break;
...@@ -418,7 +427,6 @@ __acquires(&port->port_lock) ...@@ -418,7 +427,6 @@ __acquires(&port->port_lock)
{ {
struct list_head *pool = &port->read_pool; struct list_head *pool = &port->read_pool;
struct usb_ep *out = port->port_usb->out; struct usb_ep *out = port->port_usb->out;
unsigned started = 0;
while (!list_empty(pool)) { while (!list_empty(pool)) {
struct usb_request *req; struct usb_request *req;
...@@ -430,6 +438,9 @@ __acquires(&port->port_lock) ...@@ -430,6 +438,9 @@ __acquires(&port->port_lock)
if (!tty) if (!tty)
break; break;
if (port->read_started >= QUEUE_SIZE)
break;
req = list_entry(pool->next, struct usb_request, list); req = list_entry(pool->next, struct usb_request, list);
list_del(&req->list); list_del(&req->list);
req->length = out->maxpacket; req->length = out->maxpacket;
...@@ -447,13 +458,13 @@ __acquires(&port->port_lock) ...@@ -447,13 +458,13 @@ __acquires(&port->port_lock)
list_add(&req->list, pool); list_add(&req->list, pool);
break; break;
} }
started++; port->read_started++;
/* abort immediately after disconnect */ /* abort immediately after disconnect */
if (!port->port_usb) if (!port->port_usb)
break; break;
} }
return started; return port->read_started;
} }
/* /*
...@@ -535,6 +546,7 @@ static void gs_rx_push(unsigned long _port) ...@@ -535,6 +546,7 @@ static void gs_rx_push(unsigned long _port)
} }
recycle: recycle:
list_move(&req->list, &port->read_pool); list_move(&req->list, &port->read_pool);
port->read_started--;
} }
/* Push from tty to ldisc; without low_latency set this is handled by /* Push from tty to ldisc; without low_latency set this is handled by
...@@ -587,6 +599,7 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req) ...@@ -587,6 +599,7 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
spin_lock(&port->port_lock); spin_lock(&port->port_lock);
list_add(&req->list, &port->write_pool); list_add(&req->list, &port->write_pool);
port->write_started--;
switch (req->status) { switch (req->status) {
default: default:
...@@ -608,7 +621,8 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req) ...@@ -608,7 +621,8 @@ static void gs_write_complete(struct usb_ep *ep, struct usb_request *req)
spin_unlock(&port->port_lock); spin_unlock(&port->port_lock);
} }
static void gs_free_requests(struct usb_ep *ep, struct list_head *head) static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
int *allocated)
{ {
struct usb_request *req; struct usb_request *req;
...@@ -616,25 +630,31 @@ static void gs_free_requests(struct usb_ep *ep, struct list_head *head) ...@@ -616,25 +630,31 @@ static void gs_free_requests(struct usb_ep *ep, struct list_head *head)
req = list_entry(head->next, struct usb_request, list); req = list_entry(head->next, struct usb_request, list);
list_del(&req->list); list_del(&req->list);
gs_free_req(ep, req); gs_free_req(ep, req);
if (allocated)
(*allocated)--;
} }
} }
static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
void (*fn)(struct usb_ep *, struct usb_request *)) void (*fn)(struct usb_ep *, struct usb_request *),
int *allocated)
{ {
int i; int i;
struct usb_request *req; struct usb_request *req;
int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
/* Pre-allocate up to QUEUE_SIZE transfers, but if we can't /* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
* do quite that many this time, don't fail ... we just won't * do quite that many this time, don't fail ... we just won't
* be as speedy as we might otherwise be. * be as speedy as we might otherwise be.
*/ */
for (i = 0; i < QUEUE_SIZE; i++) { for (i = 0; i < n; i++) {
req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC); req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
if (!req) if (!req)
return list_empty(head) ? -ENOMEM : 0; return list_empty(head) ? -ENOMEM : 0;
req->complete = fn; req->complete = fn;
list_add_tail(&req->list, head); list_add_tail(&req->list, head);
if (allocated)
(*allocated)++;
} }
return 0; return 0;
} }
...@@ -661,14 +681,15 @@ static int gs_start_io(struct gs_port *port) ...@@ -661,14 +681,15 @@ static int gs_start_io(struct gs_port *port)
* configurations may use different endpoints with a given port; * configurations may use different endpoints with a given port;
* and high speed vs full speed changes packet sizes too. * and high speed vs full speed changes packet sizes too.
*/ */
status = gs_alloc_requests(ep, head, gs_read_complete); status = gs_alloc_requests(ep, head, gs_read_complete,
&port->read_allocated);
if (status) if (status)
return status; return status;
status = gs_alloc_requests(port->port_usb->in, &port->write_pool, status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
gs_write_complete); gs_write_complete, &port->write_allocated);
if (status) { if (status) {
gs_free_requests(ep, head); gs_free_requests(ep, head, &port->read_allocated);
return status; return status;
} }
...@@ -680,8 +701,9 @@ static int gs_start_io(struct gs_port *port) ...@@ -680,8 +701,9 @@ static int gs_start_io(struct gs_port *port)
if (started) { if (started) {
tty_wakeup(port->port_tty); tty_wakeup(port->port_tty);
} else { } else {
gs_free_requests(ep, head); gs_free_requests(ep, head, &port->read_allocated);
gs_free_requests(port->port_usb->in, &port->write_pool); gs_free_requests(port->port_usb->in, &port->write_pool,
&port->write_allocated);
status = -EIO; status = -EIO;
} }
...@@ -1315,8 +1337,12 @@ void gserial_disconnect(struct gserial *gser) ...@@ -1315,8 +1337,12 @@ void gserial_disconnect(struct gserial *gser)
spin_lock_irqsave(&port->port_lock, flags); spin_lock_irqsave(&port->port_lock, flags);
if (port->open_count == 0 && !port->openclose) if (port->open_count == 0 && !port->openclose)
gs_buf_free(&port->port_write_buf); gs_buf_free(&port->port_write_buf);
gs_free_requests(gser->out, &port->read_pool); gs_free_requests(gser->out, &port->read_pool, NULL);
gs_free_requests(gser->out, &port->read_queue); gs_free_requests(gser->out, &port->read_queue, NULL);
gs_free_requests(gser->in, &port->write_pool); gs_free_requests(gser->in, &port->write_pool, NULL);
port->read_allocated = port->read_started =
port->write_allocated = port->write_started = 0;
spin_unlock_irqrestore(&port->port_lock, flags); spin_unlock_irqrestore(&port->port_lock, flags);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment