Commit 121fa4b7 authored by Zoltan Kiss's avatar Zoltan Kiss Committed by David S. Miller

xen-netback: Minor refactoring of netback code

This patch contains a few bits of refactoring before introducing the grant
mapping changes:
- introducing xenvif_tx_pending_slots_available(), as this is used several
  times, and will be used more often
- rename the thread to vifX.Y-guest-rx, to signify it does RX work from the
  guest point of view
Signed-off-by: default avatarZoltan Kiss <zoltan.kiss@citrix.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 8f13dd96
...@@ -108,6 +108,15 @@ struct xenvif_rx_meta { ...@@ -108,6 +108,15 @@ struct xenvif_rx_meta {
*/ */
#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE) #define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
#define NETBACK_INVALID_HANDLE -1
/* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
* the maximum slots a valid packet can use. Now this value is defined
* to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
* all backend.
*/
#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
struct xenvif { struct xenvif {
/* Unique identifier for this interface. */ /* Unique identifier for this interface. */
domid_t domid; domid_t domid;
...@@ -216,7 +225,7 @@ void xenvif_carrier_off(struct xenvif *vif); ...@@ -216,7 +225,7 @@ void xenvif_carrier_off(struct xenvif *vif);
int xenvif_tx_action(struct xenvif *vif, int budget); int xenvif_tx_action(struct xenvif *vif, int budget);
int xenvif_kthread(void *data); int xenvif_kthread_guest_rx(void *data);
void xenvif_kick_thread(struct xenvif *vif); void xenvif_kick_thread(struct xenvif *vif);
/* Determine whether the needed number of slots (req) are available, /* Determine whether the needed number of slots (req) are available,
...@@ -226,6 +235,18 @@ bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed); ...@@ -226,6 +235,18 @@ bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed);
void xenvif_stop_queue(struct xenvif *vif); void xenvif_stop_queue(struct xenvif *vif);
static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
{
return MAX_PENDING_REQS -
vif->pending_prod + vif->pending_cons;
}
static inline bool xenvif_tx_pending_slots_available(struct xenvif *vif)
{
return nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
< MAX_PENDING_REQS;
}
extern bool separate_tx_rx_irq; extern bool separate_tx_rx_irq;
#endif /* __XEN_NETBACK__COMMON_H__ */ #endif /* __XEN_NETBACK__COMMON_H__ */
...@@ -421,8 +421,8 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, ...@@ -421,8 +421,8 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
disable_irq(vif->rx_irq); disable_irq(vif->rx_irq);
} }
task = kthread_create(xenvif_kthread, task = kthread_create(xenvif_kthread_guest_rx,
(void *)vif, "%s", vif->dev->name); (void *)vif, "%s-guest-rx", vif->dev->name);
if (IS_ERR(task)) { if (IS_ERR(task)) {
pr_warn("Could not allocate kthread for %s\n", vif->dev->name); pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
err = PTR_ERR(task); err = PTR_ERR(task);
......
...@@ -62,14 +62,6 @@ module_param(separate_tx_rx_irq, bool, 0644); ...@@ -62,14 +62,6 @@ module_param(separate_tx_rx_irq, bool, 0644);
static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT; static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
module_param(fatal_skb_slots, uint, 0444); module_param(fatal_skb_slots, uint, 0444);
/*
* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
* the maximum slots a valid packet can use. Now this value is defined
* to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
* all backend.
*/
#define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
/* /*
* If head != INVALID_PENDING_RING_IDX, it means this tx request is head of * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
* one or more merged tx requests, otherwise it is the continuation of * one or more merged tx requests, otherwise it is the continuation of
...@@ -131,12 +123,6 @@ static inline pending_ring_idx_t pending_index(unsigned i) ...@@ -131,12 +123,6 @@ static inline pending_ring_idx_t pending_index(unsigned i)
return i & (MAX_PENDING_REQS-1); return i & (MAX_PENDING_REQS-1);
} }
static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
{
return MAX_PENDING_REQS -
vif->pending_prod + vif->pending_cons;
}
bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed) bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
{ {
RING_IDX prod, cons; RING_IDX prod, cons;
...@@ -1116,8 +1102,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget) ...@@ -1116,8 +1102,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
struct sk_buff *skb; struct sk_buff *skb;
int ret; int ret;
while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX while (xenvif_tx_pending_slots_available(vif) &&
< MAX_PENDING_REQS) &&
(skb_queue_len(&vif->tx_queue) < budget)) { (skb_queue_len(&vif->tx_queue) < budget)) {
struct xen_netif_tx_request txreq; struct xen_netif_tx_request txreq;
struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX]; struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
...@@ -1487,8 +1472,7 @@ static inline int tx_work_todo(struct xenvif *vif) ...@@ -1487,8 +1472,7 @@ static inline int tx_work_todo(struct xenvif *vif)
{ {
if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) && if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) &&
(nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX xenvif_tx_pending_slots_available(vif))
< MAX_PENDING_REQS))
return 1; return 1;
return 0; return 0;
...@@ -1551,7 +1535,7 @@ static void xenvif_start_queue(struct xenvif *vif) ...@@ -1551,7 +1535,7 @@ static void xenvif_start_queue(struct xenvif *vif)
netif_wake_queue(vif->dev); netif_wake_queue(vif->dev);
} }
int xenvif_kthread(void *data) int xenvif_kthread_guest_rx(void *data)
{ {
struct xenvif *vif = data; struct xenvif *vif = data;
struct sk_buff *skb; struct sk_buff *skb;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment