Commit 21631d2d authored by Juergen Gross's avatar Juergen Gross Committed by David S. Miller

xen/netfront: disentangle tx_skb_freelist

The tx_skb_freelist elements are in a single linked list with the
request id used as link reference. The per element link field is in a
union with the skb pointer of an in use request.

Move the link reference out of the union in order to enable a later
reuse of it for requests which need a populated skb pointer.

Rename add_id_to_freelist() and get_id_from_freelist() to
add_id_to_list() and get_id_from_list() in order to prepare using
those for other lists as well. Define ~0 as value to indicate the end
of a list and place that value into the link for a request not being
on the list.

When freeing a skb zero the skb pointer in the request. Use a NULL
value of the skb pointer instead of skb_entry_is_link() for deciding
whether a request has a skb linked to it.

Remove skb_entry_set_link() and open code it instead as it is really
trivial now.
Signed-off-by: default avatarJuergen Gross <jgross@suse.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 162081ec
...@@ -126,17 +126,11 @@ struct netfront_queue { ...@@ -126,17 +126,11 @@ struct netfront_queue {
/* /*
* {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
* are linked from tx_skb_freelist through skb_entry.link. * are linked from tx_skb_freelist through tx_link.
*
* NB. Freelist index entries are always going to be less than
* PAGE_OFFSET, whereas pointers to skbs will always be equal or
* greater than PAGE_OFFSET: we use this property to distinguish
* them.
*/ */
union skb_entry { struct sk_buff *tx_skbs[NET_TX_RING_SIZE];
struct sk_buff *skb; unsigned short tx_link[NET_TX_RING_SIZE];
unsigned long link; #define TX_LINK_NONE 0xffff
} tx_skbs[NET_TX_RING_SIZE];
grant_ref_t gref_tx_head; grant_ref_t gref_tx_head;
grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
struct page *grant_tx_page[NET_TX_RING_SIZE]; struct page *grant_tx_page[NET_TX_RING_SIZE];
...@@ -181,33 +175,25 @@ struct netfront_rx_info { ...@@ -181,33 +175,25 @@ struct netfront_rx_info {
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
}; };
static void skb_entry_set_link(union skb_entry *list, unsigned short id)
{
list->link = id;
}
static int skb_entry_is_link(const union skb_entry *list)
{
BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
return (unsigned long)list->skb < PAGE_OFFSET;
}
/* /*
* Access macros for acquiring freeing slots in tx_skbs[]. * Access macros for acquiring freeing slots in tx_skbs[].
*/ */
static void add_id_to_freelist(unsigned *head, union skb_entry *list, static void add_id_to_list(unsigned *head, unsigned short *list,
unsigned short id) unsigned short id)
{ {
skb_entry_set_link(&list[id], *head); list[id] = *head;
*head = id; *head = id;
} }
static unsigned short get_id_from_freelist(unsigned *head, static unsigned short get_id_from_list(unsigned *head, unsigned short *list)
union skb_entry *list)
{ {
unsigned int id = *head; unsigned int id = *head;
*head = list[id].link;
if (id != TX_LINK_NONE) {
*head = list[id];
list[id] = TX_LINK_NONE;
}
return id; return id;
} }
...@@ -406,7 +392,8 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) ...@@ -406,7 +392,8 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
continue; continue;
id = txrsp.id; id = txrsp.id;
skb = queue->tx_skbs[id].skb; skb = queue->tx_skbs[id];
queue->tx_skbs[id] = NULL;
if (unlikely(gnttab_query_foreign_access( if (unlikely(gnttab_query_foreign_access(
queue->grant_tx_ref[id]) != 0)) { queue->grant_tx_ref[id]) != 0)) {
pr_alert("%s: warning -- grant still in use by backend domain\n", pr_alert("%s: warning -- grant still in use by backend domain\n",
...@@ -419,7 +406,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue) ...@@ -419,7 +406,7 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
&queue->gref_tx_head, queue->grant_tx_ref[id]); &queue->gref_tx_head, queue->grant_tx_ref[id]);
queue->grant_tx_ref[id] = GRANT_INVALID_REF; queue->grant_tx_ref[id] = GRANT_INVALID_REF;
queue->grant_tx_page[id] = NULL; queue->grant_tx_page[id] = NULL;
add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id); add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, id);
dev_kfree_skb_irq(skb); dev_kfree_skb_irq(skb);
} }
...@@ -452,7 +439,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, ...@@ -452,7 +439,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
struct netfront_queue *queue = info->queue; struct netfront_queue *queue = info->queue;
struct sk_buff *skb = info->skb; struct sk_buff *skb = info->skb;
id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); id = get_id_from_list(&queue->tx_skb_freelist, queue->tx_link);
tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
ref = gnttab_claim_grant_reference(&queue->gref_tx_head); ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
...@@ -460,7 +447,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, ...@@ -460,7 +447,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset,
gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
gfn, GNTMAP_readonly); gfn, GNTMAP_readonly);
queue->tx_skbs[id].skb = skb; queue->tx_skbs[id] = skb;
queue->grant_tx_page[id] = page; queue->grant_tx_page[id] = page;
queue->grant_tx_ref[id] = ref; queue->grant_tx_ref[id] = ref;
...@@ -1284,17 +1271,18 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue) ...@@ -1284,17 +1271,18 @@ static void xennet_release_tx_bufs(struct netfront_queue *queue)
for (i = 0; i < NET_TX_RING_SIZE; i++) { for (i = 0; i < NET_TX_RING_SIZE; i++) {
/* Skip over entries which are actually freelist references */ /* Skip over entries which are actually freelist references */
if (skb_entry_is_link(&queue->tx_skbs[i])) if (!queue->tx_skbs[i])
continue; continue;
skb = queue->tx_skbs[i].skb; skb = queue->tx_skbs[i];
queue->tx_skbs[i] = NULL;
get_page(queue->grant_tx_page[i]); get_page(queue->grant_tx_page[i]);
gnttab_end_foreign_access(queue->grant_tx_ref[i], gnttab_end_foreign_access(queue->grant_tx_ref[i],
GNTMAP_readonly, GNTMAP_readonly,
(unsigned long)page_address(queue->grant_tx_page[i])); (unsigned long)page_address(queue->grant_tx_page[i]));
queue->grant_tx_page[i] = NULL; queue->grant_tx_page[i] = NULL;
queue->grant_tx_ref[i] = GRANT_INVALID_REF; queue->grant_tx_ref[i] = GRANT_INVALID_REF;
add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i); add_id_to_list(&queue->tx_skb_freelist, queue->tx_link, i);
dev_kfree_skb_irq(skb); dev_kfree_skb_irq(skb);
} }
} }
...@@ -1851,13 +1839,14 @@ static int xennet_init_queue(struct netfront_queue *queue) ...@@ -1851,13 +1839,14 @@ static int xennet_init_queue(struct netfront_queue *queue)
snprintf(queue->name, sizeof(queue->name), "vif%s-q%u", snprintf(queue->name, sizeof(queue->name), "vif%s-q%u",
devid, queue->id); devid, queue->id);
/* Initialise tx_skbs as a free chain containing every entry. */ /* Initialise tx_skb_freelist as a free chain containing every entry. */
queue->tx_skb_freelist = 0; queue->tx_skb_freelist = 0;
for (i = 0; i < NET_TX_RING_SIZE; i++) { for (i = 0; i < NET_TX_RING_SIZE; i++) {
skb_entry_set_link(&queue->tx_skbs[i], i+1); queue->tx_link[i] = i + 1;
queue->grant_tx_ref[i] = GRANT_INVALID_REF; queue->grant_tx_ref[i] = GRANT_INVALID_REF;
queue->grant_tx_page[i] = NULL; queue->grant_tx_page[i] = NULL;
} }
queue->tx_link[NET_TX_RING_SIZE - 1] = TX_LINK_NONE;
/* Clear out rx_skbs */ /* Clear out rx_skbs */
for (i = 0; i < NET_RX_RING_SIZE; i++) { for (i = 0; i < NET_RX_RING_SIZE; i++) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment