Commit c8d647a3 authored by Juergen Gross's avatar Juergen Gross

xen/pvcallsback: use lateeoi irq binding

In order to reduce the chance for the system becoming unresponsive due
to event storms triggered by a misbehaving pvcallsfront use the lateeoi
irq binding for pvcallsback and unmask the event channel only after
handling all write requests, which are the ones coming in via an irq.

This requires modifying the logic a little bit to not require an event
for each write request, but to keep the ioworker running until no
further data is found on the ring page to be processed.

This is part of XSA-332.

Cc: stable@vger.kernel.org
Reported-by: default avatarJulien Grall <julien@xen.org>
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Reviewed-by: default avatarStefano Stabellini <sstabellini@kernel.org>
Reviewed-by: default avatarWei Liu <wl@xen.org>
parent 86991b6e
...@@ -66,6 +66,7 @@ struct sock_mapping { ...@@ -66,6 +66,7 @@ struct sock_mapping {
atomic_t write; atomic_t write;
atomic_t io; atomic_t io;
atomic_t release; atomic_t release;
atomic_t eoi;
void (*saved_data_ready)(struct sock *sk); void (*saved_data_ready)(struct sock *sk);
struct pvcalls_ioworker ioworker; struct pvcalls_ioworker ioworker;
}; };
...@@ -87,7 +88,7 @@ static int pvcalls_back_release_active(struct xenbus_device *dev, ...@@ -87,7 +88,7 @@ static int pvcalls_back_release_active(struct xenbus_device *dev,
struct pvcalls_fedata *fedata, struct pvcalls_fedata *fedata,
struct sock_mapping *map); struct sock_mapping *map);
static void pvcalls_conn_back_read(void *opaque) static bool pvcalls_conn_back_read(void *opaque)
{ {
struct sock_mapping *map = (struct sock_mapping *)opaque; struct sock_mapping *map = (struct sock_mapping *)opaque;
struct msghdr msg; struct msghdr msg;
...@@ -107,17 +108,17 @@ static void pvcalls_conn_back_read(void *opaque) ...@@ -107,17 +108,17 @@ static void pvcalls_conn_back_read(void *opaque)
virt_mb(); virt_mb();
if (error) if (error)
return; return false;
size = pvcalls_queued(prod, cons, array_size); size = pvcalls_queued(prod, cons, array_size);
if (size >= array_size) if (size >= array_size)
return; return false;
spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags); spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) { if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) {
atomic_set(&map->read, 0); atomic_set(&map->read, 0);
spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock,
flags); flags);
return; return true;
} }
spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags); spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
wanted = array_size - size; wanted = array_size - size;
...@@ -141,7 +142,7 @@ static void pvcalls_conn_back_read(void *opaque) ...@@ -141,7 +142,7 @@ static void pvcalls_conn_back_read(void *opaque)
ret = inet_recvmsg(map->sock, &msg, wanted, MSG_DONTWAIT); ret = inet_recvmsg(map->sock, &msg, wanted, MSG_DONTWAIT);
WARN_ON(ret > wanted); WARN_ON(ret > wanted);
if (ret == -EAGAIN) /* shouldn't happen */ if (ret == -EAGAIN) /* shouldn't happen */
return; return true;
if (!ret) if (!ret)
ret = -ENOTCONN; ret = -ENOTCONN;
spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags); spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
...@@ -160,10 +161,10 @@ static void pvcalls_conn_back_read(void *opaque) ...@@ -160,10 +161,10 @@ static void pvcalls_conn_back_read(void *opaque)
virt_wmb(); virt_wmb();
notify_remote_via_irq(map->irq); notify_remote_via_irq(map->irq);
return; return true;
} }
static void pvcalls_conn_back_write(struct sock_mapping *map) static bool pvcalls_conn_back_write(struct sock_mapping *map)
{ {
struct pvcalls_data_intf *intf = map->ring; struct pvcalls_data_intf *intf = map->ring;
struct pvcalls_data *data = &map->data; struct pvcalls_data *data = &map->data;
...@@ -180,7 +181,7 @@ static void pvcalls_conn_back_write(struct sock_mapping *map) ...@@ -180,7 +181,7 @@ static void pvcalls_conn_back_write(struct sock_mapping *map)
array_size = XEN_FLEX_RING_SIZE(map->ring_order); array_size = XEN_FLEX_RING_SIZE(map->ring_order);
size = pvcalls_queued(prod, cons, array_size); size = pvcalls_queued(prod, cons, array_size);
if (size == 0) if (size == 0)
return; return false;
memset(&msg, 0, sizeof(msg)); memset(&msg, 0, sizeof(msg));
msg.msg_flags |= MSG_DONTWAIT; msg.msg_flags |= MSG_DONTWAIT;
...@@ -198,12 +199,11 @@ static void pvcalls_conn_back_write(struct sock_mapping *map) ...@@ -198,12 +199,11 @@ static void pvcalls_conn_back_write(struct sock_mapping *map)
atomic_set(&map->write, 0); atomic_set(&map->write, 0);
ret = inet_sendmsg(map->sock, &msg, size); ret = inet_sendmsg(map->sock, &msg, size);
if (ret == -EAGAIN || (ret >= 0 && ret < size)) { if (ret == -EAGAIN) {
atomic_inc(&map->write); atomic_inc(&map->write);
atomic_inc(&map->io); atomic_inc(&map->io);
return true;
} }
if (ret == -EAGAIN)
return;
/* write the data, then update the indexes */ /* write the data, then update the indexes */
virt_wmb(); virt_wmb();
...@@ -216,9 +216,13 @@ static void pvcalls_conn_back_write(struct sock_mapping *map) ...@@ -216,9 +216,13 @@ static void pvcalls_conn_back_write(struct sock_mapping *map)
} }
/* update the indexes, then notify the other end */ /* update the indexes, then notify the other end */
virt_wmb(); virt_wmb();
if (prod != cons + ret) if (prod != cons + ret) {
atomic_inc(&map->write); atomic_inc(&map->write);
atomic_inc(&map->io);
}
notify_remote_via_irq(map->irq); notify_remote_via_irq(map->irq);
return true;
} }
static void pvcalls_back_ioworker(struct work_struct *work) static void pvcalls_back_ioworker(struct work_struct *work)
...@@ -227,6 +231,7 @@ static void pvcalls_back_ioworker(struct work_struct *work) ...@@ -227,6 +231,7 @@ static void pvcalls_back_ioworker(struct work_struct *work)
struct pvcalls_ioworker, register_work); struct pvcalls_ioworker, register_work);
struct sock_mapping *map = container_of(ioworker, struct sock_mapping, struct sock_mapping *map = container_of(ioworker, struct sock_mapping,
ioworker); ioworker);
unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
while (atomic_read(&map->io) > 0) { while (atomic_read(&map->io) > 0) {
if (atomic_read(&map->release) > 0) { if (atomic_read(&map->release) > 0) {
...@@ -234,10 +239,18 @@ static void pvcalls_back_ioworker(struct work_struct *work) ...@@ -234,10 +239,18 @@ static void pvcalls_back_ioworker(struct work_struct *work)
return; return;
} }
if (atomic_read(&map->read) > 0) if (atomic_read(&map->read) > 0 &&
pvcalls_conn_back_read(map); pvcalls_conn_back_read(map))
if (atomic_read(&map->write) > 0) eoi_flags = 0;
pvcalls_conn_back_write(map); if (atomic_read(&map->write) > 0 &&
pvcalls_conn_back_write(map))
eoi_flags = 0;
if (atomic_read(&map->eoi) > 0 && !atomic_read(&map->write)) {
atomic_set(&map->eoi, 0);
xen_irq_lateeoi(map->irq, eoi_flags);
eoi_flags = XEN_EOI_FLAG_SPURIOUS;
}
atomic_dec(&map->io); atomic_dec(&map->io);
} }
...@@ -334,12 +347,9 @@ static struct sock_mapping *pvcalls_new_active_socket( ...@@ -334,12 +347,9 @@ static struct sock_mapping *pvcalls_new_active_socket(
goto out; goto out;
map->bytes = page; map->bytes = page;
ret = bind_interdomain_evtchn_to_irqhandler(fedata->dev->otherend_id, ret = bind_interdomain_evtchn_to_irqhandler_lateeoi(
evtchn, fedata->dev->otherend_id, evtchn,
pvcalls_back_conn_event, pvcalls_back_conn_event, 0, "pvcalls-backend", map);
0,
"pvcalls-backend",
map);
if (ret < 0) if (ret < 0)
goto out; goto out;
map->irq = ret; map->irq = ret;
...@@ -873,15 +883,18 @@ static irqreturn_t pvcalls_back_event(int irq, void *dev_id) ...@@ -873,15 +883,18 @@ static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
{ {
struct xenbus_device *dev = dev_id; struct xenbus_device *dev = dev_id;
struct pvcalls_fedata *fedata = NULL; struct pvcalls_fedata *fedata = NULL;
unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
if (dev == NULL) if (dev) {
return IRQ_HANDLED; fedata = dev_get_drvdata(&dev->dev);
if (fedata) {
pvcalls_back_work(fedata);
eoi_flags = 0;
}
}
fedata = dev_get_drvdata(&dev->dev); xen_irq_lateeoi(irq, eoi_flags);
if (fedata == NULL)
return IRQ_HANDLED;
pvcalls_back_work(fedata);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -891,12 +904,15 @@ static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map) ...@@ -891,12 +904,15 @@ static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map)
struct pvcalls_ioworker *iow; struct pvcalls_ioworker *iow;
if (map == NULL || map->sock == NULL || map->sock->sk == NULL || if (map == NULL || map->sock == NULL || map->sock->sk == NULL ||
map->sock->sk->sk_user_data != map) map->sock->sk->sk_user_data != map) {
xen_irq_lateeoi(irq, 0);
return IRQ_HANDLED; return IRQ_HANDLED;
}
iow = &map->ioworker; iow = &map->ioworker;
atomic_inc(&map->write); atomic_inc(&map->write);
atomic_inc(&map->eoi);
atomic_inc(&map->io); atomic_inc(&map->io);
queue_work(iow->wq, &iow->register_work); queue_work(iow->wq, &iow->register_work);
...@@ -932,7 +948,7 @@ static int backend_connect(struct xenbus_device *dev) ...@@ -932,7 +948,7 @@ static int backend_connect(struct xenbus_device *dev)
goto error; goto error;
} }
err = bind_interdomain_evtchn_to_irq(dev->otherend_id, evtchn); err = bind_interdomain_evtchn_to_irq_lateeoi(dev->otherend_id, evtchn);
if (err < 0) if (err < 0)
goto error; goto error;
fedata->irq = err; fedata->irq = err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment