Commit f942dc25 authored by Ian Campbell's avatar Ian Campbell Committed by David S. Miller

xen network backend driver

netback is the host side counterpart to the frontend driver in
drivers/net/xen-netfront.c. The PV protocol is also implemented by
frontend drivers in other OSes too, such as the BSDs and even Windows.

The patch is based on the driver from the xen.git pvops kernel tree but
has been put through the checkpatch.pl wringer plus several manual
cleanup passes and review iterations. The driver has been moved from
drivers/xen/netback to drivers/net/xen-netback.

One major change from xen.git is that the guest transmit path (i.e. what
looks like receive to netback) has been significantly reworked to remove
the dependency on the out of tree PageForeign page flag (a core kernel
patch which enables a per page destructor callback on the final
put_page). This page flag was used in order to implement a grant map
based transmit path (where guest pages are mapped directly into SKB
frags). Instead this version of netback uses grant copy operations into
regular memory belonging to the backend domain. Reinstating the grant
map functionality is something which I would like to revisit in the
future.

Note that this driver depends on 2e820f58 "xen/irq: implement
bind_interdomain_evtchn_to_irqhandler for backend drivers" which is in
linux next via the "xen-two" tree and is intended for the 2.6.39 merge
window:
        git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen.git stable/backends
this branch has only that single commit since 2.6.38-rc2 and is safe for
cross merging into the net branch.
Signed-off-by: default avatarIan Campbell <ian.campbell@citrix.com>
Reviewed-by: default avatarBen Hutchings <bhutchings@solarflare.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e0da2481
...@@ -2953,12 +2953,38 @@ config XEN_NETDEV_FRONTEND ...@@ -2953,12 +2953,38 @@ config XEN_NETDEV_FRONTEND
select XEN_XENBUS_FRONTEND select XEN_XENBUS_FRONTEND
default y default y
help help
The network device frontend driver allows the kernel to This driver provides support for Xen paravirtual network
access network devices exported exported by a virtual devices exported by a Xen network driver domain (often
machine containing a physical network device driver. The domain 0).
frontend driver is intended for unprivileged guest domains;
if you are compiling a kernel for a Xen guest, you almost The corresponding Linux backend driver is enabled by the
certainly want to enable this. CONFIG_XEN_NETDEV_BACKEND option.
If you are compiling a kernel for use as Xen guest, you
should say Y here. To compile this driver as a module, chose
M here: the module will be called xen-netfront.
config XEN_NETDEV_BACKEND
tristate "Xen backend network device"
depends on XEN_BACKEND
help
This driver allows the kernel to act as a Xen network driver
domain which exports paravirtual network devices to other
Xen domains. These devices can be accessed by any operating
system that implements a compatible front end.
The corresponding Linux frontend driver is enabled by the
CONFIG_XEN_NETDEV_FRONTEND configuration option.
The backend driver presents a standard network device
endpoint for each paravirtual network device to the driver
domain network stack. These can then be bridged or routed
etc in order to provide full network connectivity.
If you are compiling a kernel to run in a Xen network driver
domain (often this is domain 0) you should say Y here. To
compile this driver as a module, chose M here: the module
will be called xen-netback.
config ISERIES_VETH config ISERIES_VETH
tristate "iSeries Virtual Ethernet driver support" tristate "iSeries Virtual Ethernet driver support"
......
...@@ -172,6 +172,7 @@ obj-$(CONFIG_SLIP) += slip.o ...@@ -172,6 +172,7 @@ obj-$(CONFIG_SLIP) += slip.o
obj-$(CONFIG_SLHC) += slhc.o obj-$(CONFIG_SLHC) += slhc.o
obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
obj-$(CONFIG_XEN_NETDEV_BACKEND) += xen-netback/
obj-$(CONFIG_DUMMY) += dummy.o obj-$(CONFIG_DUMMY) += dummy.o
obj-$(CONFIG_IFB) += ifb.o obj-$(CONFIG_IFB) += ifb.o
......
obj-$(CONFIG_XEN_NETDEV_BACKEND) := xen-netback.o
xen-netback-y := netback.o xenbus.o interface.o
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef __XEN_NETBACK__COMMON_H__
#define __XEN_NETBACK__COMMON_H__
#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/ip.h>
#include <linux/in.h>
#include <linux/io.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <xen/interface/io/netif.h>
#include <xen/interface/grant_table.h>
#include <xen/grant_table.h>
#include <xen/xenbus.h>
struct xen_netbk;
struct xenvif {
/* Unique identifier for this interface. */
domid_t domid;
unsigned int handle;
/* Reference to netback processing backend. */
struct xen_netbk *netbk;
u8 fe_dev_addr[6];
/* Physical parameters of the comms window. */
grant_handle_t tx_shmem_handle;
grant_ref_t tx_shmem_ref;
grant_handle_t rx_shmem_handle;
grant_ref_t rx_shmem_ref;
unsigned int irq;
/* List of frontends to notify after a batch of frames sent. */
struct list_head notify_list;
/* The shared rings and indexes. */
struct xen_netif_tx_back_ring tx;
struct xen_netif_rx_back_ring rx;
struct vm_struct *tx_comms_area;
struct vm_struct *rx_comms_area;
/* Flags that must not be set in dev->features */
u32 features_disabled;
/* Frontend feature information. */
u8 can_sg:1;
u8 gso:1;
u8 gso_prefix:1;
u8 csum:1;
/* Internal feature information. */
u8 can_queue:1; /* can queue packets for receiver? */
/*
* Allow xenvif_start_xmit() to peek ahead in the rx request
* ring. This is a prediction of what rx_req_cons will be
* once all queued skbs are put on the ring.
*/
RING_IDX rx_req_cons_peek;
/* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
unsigned long credit_bytes;
unsigned long credit_usec;
unsigned long remaining_credit;
struct timer_list credit_timeout;
/* Statistics */
unsigned long rx_gso_checksum_fixup;
/* Miscellaneous private stuff. */
struct list_head schedule_list;
atomic_t refcnt;
struct net_device *dev;
wait_queue_head_t waiting_to_free;
};
#define XEN_NETIF_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE)
#define XEN_NETIF_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE)
struct xenvif *xenvif_alloc(struct device *parent,
domid_t domid,
unsigned int handle);
int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
unsigned long rx_ring_ref, unsigned int evtchn);
void xenvif_disconnect(struct xenvif *vif);
void xenvif_get(struct xenvif *vif);
void xenvif_put(struct xenvif *vif);
int xenvif_xenbus_init(void);
int xenvif_schedulable(struct xenvif *vif);
int xen_netbk_rx_ring_full(struct xenvif *vif);
int xen_netbk_must_stop_queue(struct xenvif *vif);
/* (Un)Map communication rings. */
void xen_netbk_unmap_frontend_rings(struct xenvif *vif);
int xen_netbk_map_frontend_rings(struct xenvif *vif,
grant_ref_t tx_ring_ref,
grant_ref_t rx_ring_ref);
/* (De)Register a xenvif with the netback backend. */
void xen_netbk_add_xenvif(struct xenvif *vif);
void xen_netbk_remove_xenvif(struct xenvif *vif);
/* (De)Schedule backend processing for a xenvif */
void xen_netbk_schedule_xenvif(struct xenvif *vif);
void xen_netbk_deschedule_xenvif(struct xenvif *vif);
/* Check for SKBs from frontend and schedule backend processing */
void xen_netbk_check_rx_xenvif(struct xenvif *vif);
/* Receive an SKB from the frontend */
void xenvif_receive_skb(struct xenvif *vif, struct sk_buff *skb);
/* Queue an SKB for transmission to the frontend */
void xen_netbk_queue_tx_skb(struct xenvif *vif, struct sk_buff *skb);
/* Notify xenvif that ring now has space to send an skb to the frontend */
void xenvif_notify_tx_completion(struct xenvif *vif);
/* Returns number of ring slots required to send an skb to the frontend */
unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb);
#endif /* __XEN_NETBACK__COMMON_H__ */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -359,7 +359,7 @@ static void xennet_tx_buf_gc(struct net_device *dev) ...@@ -359,7 +359,7 @@ static void xennet_tx_buf_gc(struct net_device *dev)
struct xen_netif_tx_response *txrsp; struct xen_netif_tx_response *txrsp;
txrsp = RING_GET_RESPONSE(&np->tx, cons); txrsp = RING_GET_RESPONSE(&np->tx, cons);
if (txrsp->status == NETIF_RSP_NULL) if (txrsp->status == XEN_NETIF_RSP_NULL)
continue; continue;
id = txrsp->id; id = txrsp->id;
...@@ -416,7 +416,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, ...@@ -416,7 +416,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
larger than a page), split it it into page-sized chunks. */ larger than a page), split it it into page-sized chunks. */
while (len > PAGE_SIZE - offset) { while (len > PAGE_SIZE - offset) {
tx->size = PAGE_SIZE - offset; tx->size = PAGE_SIZE - offset;
tx->flags |= NETTXF_more_data; tx->flags |= XEN_NETTXF_more_data;
len -= tx->size; len -= tx->size;
data += tx->size; data += tx->size;
offset = 0; offset = 0;
...@@ -442,7 +442,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, ...@@ -442,7 +442,7 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
for (i = 0; i < frags; i++) { for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i; skb_frag_t *frag = skb_shinfo(skb)->frags + i;
tx->flags |= NETTXF_more_data; tx->flags |= XEN_NETTXF_more_data;
id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
np->tx_skbs[id].skb = skb_get(skb); np->tx_skbs[id].skb = skb_get(skb);
...@@ -517,10 +517,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -517,10 +517,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx->flags = 0; tx->flags = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL) if (skb->ip_summed == CHECKSUM_PARTIAL)
/* local packet? */ /* local packet? */
tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
else if (skb->ip_summed == CHECKSUM_UNNECESSARY) else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
/* remote but checksummed. */ /* remote but checksummed. */
tx->flags |= NETTXF_data_validated; tx->flags |= XEN_NETTXF_data_validated;
if (skb_shinfo(skb)->gso_size) { if (skb_shinfo(skb)->gso_size) {
struct xen_netif_extra_info *gso; struct xen_netif_extra_info *gso;
...@@ -531,7 +531,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -531,7 +531,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (extra) if (extra)
extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
else else
tx->flags |= NETTXF_extra_info; tx->flags |= XEN_NETTXF_extra_info;
gso->u.gso.size = skb_shinfo(skb)->gso_size; gso->u.gso.size = skb_shinfo(skb)->gso_size;
gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
...@@ -651,7 +651,7 @@ static int xennet_get_responses(struct netfront_info *np, ...@@ -651,7 +651,7 @@ static int xennet_get_responses(struct netfront_info *np,
int err = 0; int err = 0;
unsigned long ret; unsigned long ret;
if (rx->flags & NETRXF_extra_info) { if (rx->flags & XEN_NETRXF_extra_info) {
err = xennet_get_extras(np, extras, rp); err = xennet_get_extras(np, extras, rp);
cons = np->rx.rsp_cons; cons = np->rx.rsp_cons;
} }
...@@ -688,7 +688,7 @@ static int xennet_get_responses(struct netfront_info *np, ...@@ -688,7 +688,7 @@ static int xennet_get_responses(struct netfront_info *np,
__skb_queue_tail(list, skb); __skb_queue_tail(list, skb);
next: next:
if (!(rx->flags & NETRXF_more_data)) if (!(rx->flags & XEN_NETRXF_more_data))
break; break;
if (cons + frags == rp) { if (cons + frags == rp) {
...@@ -983,9 +983,9 @@ static int xennet_poll(struct napi_struct *napi, int budget) ...@@ -983,9 +983,9 @@ static int xennet_poll(struct napi_struct *napi, int budget)
skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
skb->len += skb->data_len; skb->len += skb->data_len;
if (rx->flags & NETRXF_csum_blank) if (rx->flags & XEN_NETRXF_csum_blank)
skb->ip_summed = CHECKSUM_PARTIAL; skb->ip_summed = CHECKSUM_PARTIAL;
else if (rx->flags & NETRXF_data_validated) else if (rx->flags & XEN_NETRXF_data_validated)
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
__skb_queue_tail(&rxq, skb); __skb_queue_tail(&rxq, skb);
......
...@@ -22,50 +22,50 @@ ...@@ -22,50 +22,50 @@
/* /*
* This is the 'wire' format for packets: * This is the 'wire' format for packets:
* Request 1: netif_tx_request -- NETTXF_* (any flags) * Request 1: xen_netif_tx_request -- XEN_NETTXF_* (any flags)
* [Request 2: netif_tx_extra] (only if request 1 has NETTXF_extra_info) * [Request 2: xen_netif_extra_info] (only if request 1 has XEN_NETTXF_extra_info)
* [Request 3: netif_tx_extra] (only if request 2 has XEN_NETIF_EXTRA_MORE) * [Request 3: xen_netif_extra_info] (only if request 2 has XEN_NETIF_EXTRA_MORE)
* Request 4: netif_tx_request -- NETTXF_more_data * Request 4: xen_netif_tx_request -- XEN_NETTXF_more_data
* Request 5: netif_tx_request -- NETTXF_more_data * Request 5: xen_netif_tx_request -- XEN_NETTXF_more_data
* ... * ...
* Request N: netif_tx_request -- 0 * Request N: xen_netif_tx_request -- 0
*/ */
/* Protocol checksum field is blank in the packet (hardware offload)? */ /* Protocol checksum field is blank in the packet (hardware offload)? */
#define _NETTXF_csum_blank (0) #define _XEN_NETTXF_csum_blank (0)
#define NETTXF_csum_blank (1U<<_NETTXF_csum_blank) #define XEN_NETTXF_csum_blank (1U<<_XEN_NETTXF_csum_blank)
/* Packet data has been validated against protocol checksum. */ /* Packet data has been validated against protocol checksum. */
#define _NETTXF_data_validated (1) #define _XEN_NETTXF_data_validated (1)
#define NETTXF_data_validated (1U<<_NETTXF_data_validated) #define XEN_NETTXF_data_validated (1U<<_XEN_NETTXF_data_validated)
/* Packet continues in the next request descriptor. */ /* Packet continues in the next request descriptor. */
#define _NETTXF_more_data (2) #define _XEN_NETTXF_more_data (2)
#define NETTXF_more_data (1U<<_NETTXF_more_data) #define XEN_NETTXF_more_data (1U<<_XEN_NETTXF_more_data)
/* Packet to be followed by extra descriptor(s). */ /* Packet to be followed by extra descriptor(s). */
#define _NETTXF_extra_info (3) #define _XEN_NETTXF_extra_info (3)
#define NETTXF_extra_info (1U<<_NETTXF_extra_info) #define XEN_NETTXF_extra_info (1U<<_XEN_NETTXF_extra_info)
struct xen_netif_tx_request { struct xen_netif_tx_request {
grant_ref_t gref; /* Reference to buffer page */ grant_ref_t gref; /* Reference to buffer page */
uint16_t offset; /* Offset within buffer page */ uint16_t offset; /* Offset within buffer page */
uint16_t flags; /* NETTXF_* */ uint16_t flags; /* XEN_NETTXF_* */
uint16_t id; /* Echoed in response message. */ uint16_t id; /* Echoed in response message. */
uint16_t size; /* Packet size in bytes. */ uint16_t size; /* Packet size in bytes. */
}; };
/* Types of netif_extra_info descriptors. */ /* Types of xen_netif_extra_info descriptors. */
#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */ #define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */
#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ #define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
#define XEN_NETIF_EXTRA_TYPE_MAX (2) #define XEN_NETIF_EXTRA_TYPE_MAX (2)
/* netif_extra_info flags. */ /* xen_netif_extra_info flags. */
#define _XEN_NETIF_EXTRA_FLAG_MORE (0) #define _XEN_NETIF_EXTRA_FLAG_MORE (0)
#define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE) #define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE)
/* GSO types - only TCPv4 currently supported. */ /* GSO types - only TCPv4 currently supported. */
#define XEN_NETIF_GSO_TYPE_TCPV4 (1) #define XEN_NETIF_GSO_TYPE_TCPV4 (1)
/* /*
* This structure needs to fit within both netif_tx_request and * This structure needs to fit within both netif_tx_request and
...@@ -107,7 +107,7 @@ struct xen_netif_extra_info { ...@@ -107,7 +107,7 @@ struct xen_netif_extra_info {
struct xen_netif_tx_response { struct xen_netif_tx_response {
uint16_t id; uint16_t id;
int16_t status; /* NETIF_RSP_* */ int16_t status; /* XEN_NETIF_RSP_* */
}; };
struct xen_netif_rx_request { struct xen_netif_rx_request {
...@@ -116,25 +116,29 @@ struct xen_netif_rx_request { ...@@ -116,25 +116,29 @@ struct xen_netif_rx_request {
}; };
/* Packet data has been validated against protocol checksum. */ /* Packet data has been validated against protocol checksum. */
#define _NETRXF_data_validated (0) #define _XEN_NETRXF_data_validated (0)
#define NETRXF_data_validated (1U<<_NETRXF_data_validated) #define XEN_NETRXF_data_validated (1U<<_XEN_NETRXF_data_validated)
/* Protocol checksum field is blank in the packet (hardware offload)? */ /* Protocol checksum field is blank in the packet (hardware offload)? */
#define _NETRXF_csum_blank (1) #define _XEN_NETRXF_csum_blank (1)
#define NETRXF_csum_blank (1U<<_NETRXF_csum_blank) #define XEN_NETRXF_csum_blank (1U<<_XEN_NETRXF_csum_blank)
/* Packet continues in the next request descriptor. */ /* Packet continues in the next request descriptor. */
#define _NETRXF_more_data (2) #define _XEN_NETRXF_more_data (2)
#define NETRXF_more_data (1U<<_NETRXF_more_data) #define XEN_NETRXF_more_data (1U<<_XEN_NETRXF_more_data)
/* Packet to be followed by extra descriptor(s). */ /* Packet to be followed by extra descriptor(s). */
#define _NETRXF_extra_info (3) #define _XEN_NETRXF_extra_info (3)
#define NETRXF_extra_info (1U<<_NETRXF_extra_info) #define XEN_NETRXF_extra_info (1U<<_XEN_NETRXF_extra_info)
/* GSO Prefix descriptor. */
#define _XEN_NETRXF_gso_prefix (4)
#define XEN_NETRXF_gso_prefix (1U<<_XEN_NETRXF_gso_prefix)
struct xen_netif_rx_response { struct xen_netif_rx_response {
uint16_t id; uint16_t id;
uint16_t offset; /* Offset in page of start of received packet */ uint16_t offset; /* Offset in page of start of received packet */
uint16_t flags; /* NETRXF_* */ uint16_t flags; /* XEN_NETRXF_* */
int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */ int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */
}; };
...@@ -149,10 +153,10 @@ DEFINE_RING_TYPES(xen_netif_rx, ...@@ -149,10 +153,10 @@ DEFINE_RING_TYPES(xen_netif_rx,
struct xen_netif_rx_request, struct xen_netif_rx_request,
struct xen_netif_rx_response); struct xen_netif_rx_response);
#define NETIF_RSP_DROPPED -2 #define XEN_NETIF_RSP_DROPPED -2
#define NETIF_RSP_ERROR -1 #define XEN_NETIF_RSP_ERROR -1
#define NETIF_RSP_OKAY 0 #define XEN_NETIF_RSP_OKAY 0
/* No response: used for auxiliary requests (e.g., netif_tx_extra). */ /* No response: used for auxiliary requests (e.g., xen_netif_extra_info). */
#define NETIF_RSP_NULL 1 #define XEN_NETIF_RSP_NULL 1
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment