Commit fbd029df authored by David S. Miller's avatar David S. Miller

Merge branch 'xen-harden-netfront'

Juergen Gross says:

====================
xen: harden netfront against malicious backends

Xen backends of para-virtualized devices can live in dom0 kernel, dom0
user land, or in a driver domain. This means that a backend might
reside in a less trusted environment than the Xen core components, so
a backend should not be able to do harm to a Xen guest (it can still
mess up I/O data, but it shouldn't be able to e.g. crash a guest by
other means or cause a privilege escalation in the guest).

Unfortunately netfront in the Linux kernel is fully trusting its
backend. This series is fixing netfront in this regard.

It was discussed to handle this as a security problem, but the topic
was discussed in public before, so it isn't a real secret.

It should be mentioned that a similar series has been posted some years
ago by Marek Marczykowski-Górecki, but this series has not been applied
due to a Xen header not having been available in the Xen git repo at
that time. Additionally my series is fixing some more DoS cases.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 755f9053 a884daa6
...@@ -126,21 +126,17 @@ struct netfront_queue { ...@@ -126,21 +126,17 @@ struct netfront_queue {
/* /*
* {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
* are linked from tx_skb_freelist through skb_entry.link. * are linked from tx_skb_freelist through tx_link.
*
* NB. Freelist index entries are always going to be less than
* PAGE_OFFSET, whereas pointers to skbs will always be equal or
* greater than PAGE_OFFSET: we use this property to distinguish
* them.
*/ */
union skb_entry { struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
struct sk_buff *skb; unsigned short tx_link[NET_TX_RING_SIZE];
unsigned long link; #define TX_LINK_NONE 0xffff
} tx_skbs[NET_TX_RING_SIZE]; #define TX_PENDING 0xfffe
grant_ref_t gref_tx_head; grant_ref_t gref_tx_head;
grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
struct page *grant_tx_page[NET_TX_RING_SIZE]; struct page *grant_tx_page[NET_TX_RING_SIZE];
unsigned tx_skb_freelist; unsigned tx_skb_freelist;
unsigned int tx_pend_queue;
spinlock_t rx_lock ____cacheline_aligned_in_smp; spinlock_t rx_lock ____cacheline_aligned_in_smp;
struct xen_netif_rx_front_ring rx; struct xen_netif_rx_front_ring rx;
...@@ -173,6 +169,9 @@ struct netfront_info { ...@@ -173,6 +169,9 @@ struct netfront_info {
bool netback_has_xdp_headroom; bool netback_has_xdp_headroom;
bool netfront_xdp_enabled; bool netfront_xdp_enabled;
/* Is device behaving sane? */
bool broken;
atomic_t rx_gso_checksum_fixup; atomic_t rx_gso_checksum_fixup;
}; };
...@@ -181,33 +180,25 @@ struct netfront_rx_info { ...@@ -181,33 +180,25 @@ struct netfront_rx_info {
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
}; };
static void skb_entry_set_link(union skb_entry *list, unsigned short id)
{
list->link = id;
}
static int skb_entry_is_link(const union skb_entry *list)
{
BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
return (unsigned long)list->skb < PAGE_OFFSET;
}
/* /*
* Access macros for acquiring freeing slots in tx_skbs[]. * Access macros for acquiring freeing slots in tx_skbs[].
*/ */
static void add_id_to_freelist(unsigned *head, union skb_entry *list, static void add_id_to_list(unsigned *head, unsigned short *list,
unsigned short id) unsigned short id)
{ {
skb_entry_set_link(&list[id], *head); list[id] = *head;
*head = id; *head = id;
} }
static unsigned short get_id_from_freelist(unsigned *head, static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
union skb_entry *list)
{ {
unsigned int id = *head; unsigned int id = *head;
*head = list[id].link;
if (id != TX_LINK_NONE) {
*head = list[id];
list[id] = TX_LINK_NONE;
}
return id; return id;
} }
...@@ -363,7 +354,7 @@ static int xennet_open(struct net_device *dev) ...@@ -363,7 +354,7 @@ static int xennet_open(struct net_device *dev)
unsigned int i = 0; unsigned int i = 0;
struct netfront_queue *queue = NULL; struct netfront_queue *queue = NULL;
if (!np->queues) if (!np->queues || np->broken)
return -ENODEV; return -ENODEV;
for (i = 0; i < num_queues; ++i) { for (i = 0; i < num_queues; ++i) {
...@@ -391,27 +382,47 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) ...@@ -391,27 +382,47 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
unsigned short id; unsigned short id;
struct sk_buff *skb; struct sk_buff *skb;
bool more_to_do; bool more_to_do;
const struct device *dev = &queue->info->netdev->dev;
BUG_ON(!netif_carrier_ok(queue->info->netdev)); BUG_ON(!netif_carrier_ok(queue->info->netdev));
do { do {
prod = queue->tx.sring->rsp_prod; prod = queue->tx.sring->rsp_prod;
if (RING_RESPONSE_PROD_OVERFLOW(&queue->tx, prod)) {
dev_alert(dev, "Illegal number of responses %u\n",
prod - queue->tx.rsp_cons);
goto err;
}
rmb(); /* Ensure we see responses up to 'rp'. */ rmb(); /* Ensure we see responses up to 'rp'. */
for (cons = queue->tx.rsp_cons; cons != prod; cons++) { for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
struct xen_netif_tx_response *txrsp; struct xen_netif_tx_response txrsp;
txrsp = RING_GET_RESPONSE(&queue->tx, cons); RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
if (txrsp->status == XEN_NETIF_RSP_NULL) if (txrsp.status == XEN_NETIF_RSP_NULL)
continue; continue;
id = txrsp->id; id = txrsp.id;
skb = queue->tx_skbs[id].skb; if (id >= RING_SIZE(&queue->tx)) {
dev_alert(dev,
"Response has incorrect id (%u)\n",
id);
goto err;
}
if (queue->tx_link[id] != TX_PENDING) {
dev_alert(dev,
"Response for inactive request\n");
goto err;
}
queue->tx_link[id] = TX_LINK_NONE;
skb = queue->tx_skbs[id];
queue->tx_skbs[id] = NULL;
if (unlikely(gnttab_query_foreign_access( if (unlikely(gnttab_query_foreign_access(
queue->grant_tx_ref[id]) != 0)) { queue->grant_tx_ref[id]) != 0)) {
pr_alert("%s: warning -- grant still in use by backend domain\n", dev_alert(dev,
__func__); "Grant still in use by backend domain\n");
BUG(); goto err;
} }
gnttab_end_foreign_access_ref( gnttab_end_foreign_access_ref(
queue->grant_tx_ref[id], GNTMAP_readonly); queue->grant_tx_ref[id], GNTMAP_readonly);
...@@ -419,7 +430,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) ...@@ -419,7 +430,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
&queue->gref_tx_head, queue->grant_tx_ref[id]); &queue->gref_tx_head, queue->grant_tx_ref[id]);
queue->grant_tx_ref[id] = GRANT_INVALID_REF; queue->grant_tx_ref[id] = GRANT_INVALID_REF;
queue->grant_tx_page[id] = NULL; queue->grant_tx_page[id] = NULL;
add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id); add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
dev_kfree_skb_irq(skb); dev_kfree_skb_irq(skb);
} }
...@@ -429,13 +440,20 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) ...@@ -429,13 +440,20 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
} while (more_to_do); } while (more_to_do);
xennet_maybe_wake_tx(queue); xennet_maybe_wake_tx(queue);
return;
err:
queue->info->broken = true;
dev_alert(dev, "Disabled for further use\n");
} }
struct xennet_gnttab_make_txreq { struct xennet_gnttab_make_txreq {
struct netfront_queue *queue; struct netfront_queue *queue;
struct sk_buff *skb; struct sk_buff *skb;
struct page *page; struct page *page;
struct xen_netif_tx_request *tx; /* Last request */ struct xen_netif_tx_request *tx; /* Last request on ring page */
struct xen_netif_tx_request tx_local; /* Last request local copy*/
unsigned int size; unsigned int size;
}; };
...@@ -451,7 +469,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, ...@@ -451,7 +469,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
struct netfront_queue *queue = info->queue; struct netfront_queue *queue = info->queue;
struct sk_buff *skb = info->skb; struct sk_buff *skb = info->skb;
id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
ref = gnttab_claim_grant_reference(&queue->gref_tx_head); ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
...@@ -459,34 +477,37 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, ...@@ -459,34 +477,37 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
gfn, GNTMAP_readonly); gfn, GNTMAP_readonly);
queue->tx_skbs[id].skb = skb; queue->tx_skbs[id] = skb;
queue->grant_tx_page[id] = page; queue->grant_tx_page[id] = page;
queue->grant_tx_ref[id] = ref; queue->grant_tx_ref[id] = ref;
tx->id = id; info->tx_local.id = id;
tx->gref = ref; info->tx_local.gref = ref;
tx->offset = offset; info->tx_local.offset = offset;
tx->size = len; info->tx_local.size = len;
tx->flags = 0; info->tx_local.flags = 0;
*tx = info->tx_local;
/*
* Put the request in the pending queue, it will be set to be pending
* when the producer index is about to be raised.
*/
add_id_to_list(&queue->tx_pend_queue, queue->tx_link, id);
info->tx = tx; info->tx = tx;
info->size += tx->size; info->size += info->tx_local.size;
} }
static struct xen_netif_tx_request *xennet_make_first_txreq( static struct xen_netif_tx_request *xennet_make_first_txreq(
struct netfront_queue *queue, struct sk_buff *skb, struct xennet_gnttab_make_txreq *info,
struct page *page, unsigned int offset, unsigned int len) unsigned int offset, unsigned int len)
{ {
struct xennet_gnttab_make_txreq info = { info->size = 0;
.queue = queue,
.skb = skb,
.page = page,
.size = 0,
};
gnttab_for_one_grant(page, offset, len, xennet_tx_setup_grant, &info); gnttab_for_one_grant(info->page, offset, len, xennet_tx_setup_grant, info);
return info.tx; return info->tx;
} }
static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset, static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
...@@ -499,35 +520,27 @@ static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset, ...@@ -499,35 +520,27 @@ static void xennet_make_one_txreq(unsigned long gfn, unsigned int offset,
xennet_tx_setup_grant(gfn, offset, len, data); xennet_tx_setup_grant(gfn, offset, len, data);
} }
static struct xen_netif_tx_request *xennet_make_txreqs( static void xennet_make_txreqs(
struct netfront_queue *queue, struct xen_netif_tx_request *tx, struct xennet_gnttab_make_txreq *info,
struct sk_buff *skb, struct page *page, struct page *page,
unsigned int offset, unsigned int len) unsigned int offset, unsigned int len)
{ {
struct xennet_gnttab_make_txreq info = {
.queue = queue,
.skb = skb,
.tx = tx,
};
/* Skip unused frames from start of page */ /* Skip unused frames from start of page */
page += offset >> PAGE_SHIFT; page += offset >> PAGE_SHIFT;
offset &= ~PAGE_MASK; offset &= ~PAGE_MASK;
while (len) { while (len) {
info.page = page; info->page = page;
info.size = 0; info->size = 0;
gnttab_foreach_grant_in_range(page, offset, len, gnttab_foreach_grant_in_range(page, offset, len,
xennet_make_one_txreq, xennet_make_one_txreq,
&info); info);
page++; page++;
offset = 0; offset = 0;
len -= info.size; len -= info->size;
} }
return info.tx;
} }
/* /*
...@@ -574,19 +587,34 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb, ...@@ -574,19 +587,34 @@ static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
return queue_idx; return queue_idx;
} }
static void xennet_mark_tx_pending(struct netfront_queue *queue)
{
unsigned int i;
while ((i = get_id_from_list(&queue->tx_pend_queue, queue->tx_link)) !=
TX_LINK_NONE)
queue->tx_link[i] = TX_PENDING;
}
static int xennet_xdp_xmit_one(struct net_device *dev, static int xennet_xdp_xmit_one(struct net_device *dev,
struct netfront_queue *queue, struct netfront_queue *queue,
struct xdp_frame *xdpf) struct xdp_frame *xdpf)
{ {
struct netfront_info *np = netdev_priv(dev); struct netfront_info *np = netdev_priv(dev);
struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
struct xennet_gnttab_make_txreq info = {
.queue = queue,
.skb = NULL,
.page = virt_to_page(xdpf->data),
};
int notify; int notify;
xennet_make_first_txreq(queue, NULL, xennet_make_first_txreq(&info,
virt_to_page(xdpf->data),
offset_in_page(xdpf->data), offset_in_page(xdpf->data),
xdpf->len); xdpf->len);
xennet_mark_tx_pending(queue);
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
if (notify) if (notify)
notify_remote_via_irq(queue->tx_irq); notify_remote_via_irq(queue->tx_irq);
...@@ -611,6 +639,8 @@ static int xennet_xdp_xmit(struct net_device *dev, int n, ...@@ -611,6 +639,8 @@ static int xennet_xdp_xmit(struct net_device *dev, int n,
int nxmit = 0; int nxmit = 0;
int i; int i;
if (unlikely(np->broken))
return -ENODEV;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL; return -EINVAL;
...@@ -638,7 +668,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev ...@@ -638,7 +668,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
{ {
struct netfront_info *np = netdev_priv(dev); struct netfront_info *np = netdev_priv(dev);
struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
struct xen_netif_tx_request *tx, *first_tx; struct xen_netif_tx_request *first_tx;
unsigned int i; unsigned int i;
int notify; int notify;
int slots; int slots;
...@@ -647,6 +677,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev ...@@ -647,6 +677,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
unsigned int len; unsigned int len;
unsigned long flags; unsigned long flags;
struct netfront_queue *queue = NULL; struct netfront_queue *queue = NULL;
struct xennet_gnttab_make_txreq info = { };
unsigned int num_queues = dev->real_num_tx_queues; unsigned int num_queues = dev->real_num_tx_queues;
u16 queue_index; u16 queue_index;
struct sk_buff *nskb; struct sk_buff *nskb;
...@@ -654,6 +685,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev ...@@ -654,6 +685,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
/* Drop the packet if no queues are set up */ /* Drop the packet if no queues are set up */
if (num_queues < 1) if (num_queues < 1)
goto drop; goto drop;
if (unlikely(np->broken))
goto drop;
/* Determine which queue to transmit this SKB on */ /* Determine which queue to transmit this SKB on */
queue_index = skb_get_queue_mapping(skb); queue_index = skb_get_queue_mapping(skb);
queue = &np->queues[queue_index]; queue = &np->queues[queue_index];
...@@ -704,21 +737,24 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev ...@@ -704,21 +737,24 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
} }
/* First request for the linear area. */ /* First request for the linear area. */
first_tx = tx = xennet_make_first_txreq(queue, skb, info.queue = queue;
page, offset, len); info.skb = skb;
offset += tx->size; info.page = page;
first_tx = xennet_make_first_txreq(&info, offset, len);
offset += info.tx_local.size;
if (offset == PAGE_SIZE) { if (offset == PAGE_SIZE) {
page++; page++;
offset = 0; offset = 0;
} }
len -= tx->size; len -= info.tx_local.size;
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
/* local packet? */ /* local packet? */
tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; first_tx->flags |= XEN_NETTXF_csum_blank |
XEN_NETTXF_data_validated;
else if (skb->ip_summed == CHECKSUM_UNNECESSARY) else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
/* remote but checksummed. */ /* remote but checksummed. */
tx->flags |= XEN_NETTXF_data_validated; first_tx->flags |= XEN_NETTXF_data_validated;
/* Optional extra info after the first request. */ /* Optional extra info after the first request. */
if (skb_shinfo(skb)->gso_size) { if (skb_shinfo(skb)->gso_size) {
...@@ -727,7 +763,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev ...@@ -727,7 +763,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
gso = (struct xen_netif_extra_info *) gso = (struct xen_netif_extra_info *)
RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
tx->flags |= XEN_NETTXF_extra_info; first_tx->flags |= XEN_NETTXF_extra_info;
gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.size = skb_shinfo(skb)->gso_size;
gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ? gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
...@@ -741,12 +777,12 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev ...@@ -741,12 +777,12 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
} }
/* Requests for the rest of the linear area. */ /* Requests for the rest of the linear area. */
tx = xennet_make_txreqs(queue, tx, skb, page, offset, len); xennet_make_txreqs(&info, page, offset, len);
/* Requests for all the frags. */ /* Requests for all the frags. */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
tx = xennet_make_txreqs(queue, tx, skb, skb_frag_page(frag), xennet_make_txreqs(&info, skb_frag_page(frag),
skb_frag_off(frag), skb_frag_off(frag),
skb_frag_size(frag)); skb_frag_size(frag));
} }
...@@ -757,6 +793,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev ...@@ -757,6 +793,8 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
/* timestamp packet in software */ /* timestamp packet in software */
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
xennet_mark_tx_pending(queue);
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
if (notify) if (notify)
notify_remote_via_irq(queue->tx_irq); notify_remote_via_irq(queue->tx_irq);
...@@ -814,7 +852,7 @@ static int xennet_get_extras(struct netfront_queue *queue, ...@@ -814,7 +852,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
RING_IDX rp) RING_IDX rp)
{ {
struct xen_netif_extra_info *extra; struct xen_netif_extra_info extra;
struct device *dev = &queue->info->netdev->dev; struct device *dev = &queue->info->netdev->dev;
RING_IDX cons = queue->rx.rsp_cons; RING_IDX cons = queue->rx.rsp_cons;
int err = 0; int err = 0;
...@@ -830,24 +868,22 @@ static int xennet_get_extras(struct netfront_queue *queue, ...@@ -830,24 +868,22 @@ static int xennet_get_extras(struct netfront_queue *queue,
break; break;
} }
extra = (struct xen_netif_extra_info *) RING_COPY_RESPONSE(&queue->rx, ++cons, &extra);
RING_GET_RESPONSE(&queue->rx, ++cons);
if (unlikely(!extra->type || if (unlikely(!extra.type ||
extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
if (net_ratelimit()) if (net_ratelimit())
dev_warn(dev, "Invalid extra type: %d\n", dev_warn(dev, "Invalid extra type: %d\n",
extra->type); extra.type);
err = -EINVAL; err = -EINVAL;
} else { } else {
memcpy(&extras[extra->type - 1], extra, extras[extra.type - 1] = extra;
sizeof(*extra));
} }
skb = xennet_get_rx_skb(queue, cons); skb = xennet_get_rx_skb(queue, cons);
ref = xennet_get_rx_ref(queue, cons); ref = xennet_get_rx_ref(queue, cons);
xennet_move_rx_slot(queue, skb, ref); xennet_move_rx_slot(queue, skb, ref);
} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
queue->rx.rsp_cons = cons; queue->rx.rsp_cons = cons;
return err; return err;
...@@ -905,7 +941,7 @@ static int xennet_get_responses(struct netfront_queue *queue, ...@@ -905,7 +941,7 @@ static int xennet_get_responses(struct netfront_queue *queue,
struct sk_buff_head *list, struct sk_buff_head *list,
bool *need_xdp_flush) bool *need_xdp_flush)
{ {
struct xen_netif_rx_response *rx = &rinfo->rx; struct xen_netif_rx_response *rx = &rinfo->rx, rx_local;
int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD); int max = XEN_NETIF_NR_SLOTS_MIN + (rx->status <= RX_COPY_THRESHOLD);
RING_IDX cons = queue->rx.rsp_cons; RING_IDX cons = queue->rx.rsp_cons;
struct sk_buff *skb = xennet_get_rx_skb(queue, cons); struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
...@@ -989,7 +1025,8 @@ static int xennet_get_responses(struct netfront_queue *queue, ...@@ -989,7 +1025,8 @@ static int xennet_get_responses(struct netfront_queue *queue,
break; break;
} }
rx = RING_GET_RESPONSE(&queue->rx, cons + slots); RING_COPY_RESPONSE(&queue->rx, cons + slots, &rx_local);
rx = &rx_local;
skb = xennet_get_rx_skb(queue, cons + slots); skb = xennet_get_rx_skb(queue, cons + slots);
ref = xennet_get_rx_ref(queue, cons + slots); ref = xennet_get_rx_ref(queue, cons + slots);
slots++; slots++;
...@@ -1044,10 +1081,11 @@ static int xennet_fill_frags(struct netfront_queue *queue, ...@@ -1044,10 +1081,11 @@ static int xennet_fill_frags(struct netfront_queue *queue,
struct sk_buff *nskb; struct sk_buff *nskb;
while ((nskb = __skb_dequeue(list))) { while ((nskb = __skb_dequeue(list))) {
struct xen_netif_rx_response *rx = struct xen_netif_rx_response rx;
RING_GET_RESPONSE(&queue->rx, ++cons);
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
RING_COPY_RESPONSE(&queue->rx, ++cons, &rx);
if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
...@@ -1062,7 +1100,7 @@ static int xennet_fill_frags(struct netfront_queue *queue, ...@@ -1062,7 +1100,7 @@ static int xennet_fill_frags(struct netfront_queue *queue,
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb_frag_page(nfrag), skb_frag_page(nfrag),
rx->offset, rx->status, PAGE_SIZE); rx.offset, rx.status, PAGE_SIZE);
skb_shinfo(nskb)->nr_frags = 0; skb_shinfo(nskb)->nr_frags = 0;
kfree_skb(nskb); kfree_skb(nskb);
...@@ -1156,12 +1194,19 @@ static int xennet_poll(struct napi_struct *napi, int budget) ...@@ -1156,12 +1194,19 @@ static int xennet_poll(struct napi_struct *napi, int budget)
skb_queue_head_init(&tmpq); skb_queue_head_init(&tmpq);
rp = queue->rx.sring->rsp_prod; rp = queue->rx.sring->rsp_prod;
if (RING_RESPONSE_PROD_OVERFLOW(&queue->rx, rp)) {
dev_alert(&dev->dev, "Illegal number of responses %u\n",
rp - queue->rx.rsp_cons);
queue->info->broken = true;
spin_unlock(&queue->rx_lock);
return 0;
}
rmb(); /* Ensure we see queued responses up to 'rp'. */ rmb(); /* Ensure we see queued responses up to 'rp'. */
i = queue->rx.rsp_cons; i = queue->rx.rsp_cons;
work_done = 0; work_done = 0;
while ((i != rp) && (work_done < budget)) { while ((i != rp) && (work_done < budget)) {
memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx)); RING_COPY_RESPONSE(&queue->rx, i, rx);
memset(extras, 0, sizeof(rinfo.extras)); memset(extras, 0, sizeof(rinfo.extras));
err = xennet_get_responses(queue, &rinfo, rp, &tmpq, err = xennet_get_responses(queue, &rinfo, rp, &tmpq,
...@@ -1286,17 +1331,18 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue) ...@@ -1286,17 +1331,18 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue)
for (i = 0; i < NET_TX_RING_SIZE; i++) { for (i = 0; i < NET_TX_RING_SIZE; i++) {
/* Skip over entries which are actually freelist references */ /* Skip over entries which are actually freelist references */
if (skb_entry_is_link(&queue->tx_skbs[i])) if (!queue->tx_skbs[i])
continue; continue;
skb = queue->tx_skbs[i].skb; skb = queue->tx_skbs[i];
queue->tx_skbs[i] = NULL;
get_page(queue->grant_tx_page[i]); get_page(queue->grant_tx_page[i]);
gnttab_end_foreign_access(queue->grant_tx_ref[i], gnttab_end_foreign_access(queue->grant_tx_ref[i],
GNTMAP_readonly, GNTMAP_readonly,
(unsigned long)page_address(queue->grant_tx_page[i])); (unsigned long)page_address(queue->grant_tx_page[i]));
queue->grant_tx_page[i] = NULL; queue->grant_tx_page[i] = NULL;
queue->grant_tx_ref[i] = GRANT_INVALID_REF; queue->grant_tx_ref[i] = GRANT_INVALID_REF;
add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i); add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
dev_kfree_skb_irq(skb); dev_kfree_skb_irq(skb);
} }
} }
...@@ -1376,6 +1422,9 @@ static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id) ...@@ -1376,6 +1422,9 @@ static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
struct netfront_queue *queue = dev_id; struct netfront_queue *queue = dev_id;
unsigned long flags; unsigned long flags;
if (queue->info->broken)
return IRQ_HANDLED;
spin_lock_irqsave(&queue->tx_lock, flags); spin_lock_irqsave(&queue->tx_lock, flags);
xennet_tx_buf_gc(queue); xennet_tx_buf_gc(queue);
spin_unlock_irqrestore(&queue->tx_lock, flags); spin_unlock_irqrestore(&queue->tx_lock, flags);
...@@ -1388,6 +1437,9 @@ static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id) ...@@ -1388,6 +1437,9 @@ static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
struct netfront_queue *queue = dev_id; struct netfront_queue *queue = dev_id;
struct net_device *dev = queue->info->netdev; struct net_device *dev = queue->info->netdev;
if (queue->info->broken)
return IRQ_HANDLED;
if (likely(netif_carrier_ok(dev) && if (likely(netif_carrier_ok(dev) &&
RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))) RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
napi_schedule(&queue->napi); napi_schedule(&queue->napi);
...@@ -1409,6 +1461,10 @@ static void xennet_poll_controller(struct net_device *dev) ...@@ -1409,6 +1461,10 @@ static void xennet_poll_controller(struct net_device *dev)
struct netfront_info *info = netdev_priv(dev); struct netfront_info *info = netdev_priv(dev);
unsigned int num_queues = dev->real_num_tx_queues; unsigned int num_queues = dev->real_num_tx_queues;
unsigned int i; unsigned int i;
if (info->broken)
return;
for (i = 0; i < num_queues; ++i) for (i = 0; i < num_queues; ++i)
xennet_interrupt(0, &info->queues[i]); xennet_interrupt(0, &info->queues[i]);
} }
...@@ -1480,6 +1536,11 @@ static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog, ...@@ -1480,6 +1536,11 @@ static int xennet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp) static int xennet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{ {
struct netfront_info *np = netdev_priv(dev);
if (np->broken)
return -ENODEV;
switch (xdp->command) { switch (xdp->command) {
case XDP_SETUP_PROG: case XDP_SETUP_PROG:
return xennet_xdp_set(dev, xdp->prog, xdp->extack); return xennet_xdp_set(dev, xdp->prog, xdp->extack);
...@@ -1853,13 +1914,15 @@ static int xennet_init_queue(struct netfront_queue *queue) ...@@ -1853,13 +1914,15 @@ static int xennet_init_queue(struct netfront_queue *queue)
snprintf(queue->name, sizeof(queue->name), "vif%s-q%u", snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
devid, queue->id); devid, queue->id);
/* Initialise tx_skbs as a free chain containing every entry. */ /* Initialise tx_skb_freelist as a free chain containing every entry. */
queue->tx_skb_freelist = 0; queue->tx_skb_freelist = 0;
queue->tx_pend_queue = TX_LINK_NONE;
for (i = 0; i < NET_TX_RING_SIZE; i++) { for (i = 0; i < NET_TX_RING_SIZE; i++) {
skb_entry_set_link(&queue->tx_skbs[i], i+1); queue->tx_link[i] = i + 1;
queue->grant_tx_ref[i] = GRANT_INVALID_REF; queue->grant_tx_ref[i] = GRANT_INVALID_REF;
queue->grant_tx_page[i] = NULL; queue->grant_tx_page[i] = NULL;
} }
queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
/* Clear out rx_skbs */ /* Clear out rx_skbs */
for (i = 0; i < NET_RX_RING_SIZE; i++) { for (i = 0; i < NET_RX_RING_SIZE; i++) {
...@@ -2128,6 +2191,9 @@ static int talk_to_netback(struct xenbus_device *dev, ...@@ -2128,6 +2191,9 @@ static int talk_to_netback(struct xenbus_device *dev,
if (info->queues) if (info->queues)
xennet_destroy_queues(info); xennet_destroy_queues(info);
/* For the case of a reconnect reset the "broken" indicator. */
info->broken = false;
err = xennet_create_queues(info, &num_queues); err = xennet_create_queues(info, &num_queues);
if (err < 0) { if (err < 0) {
xenbus_dev_fatal(dev, err, "creating queues"); xenbus_dev_fatal(dev, err, "creating queues");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment