Commit 210c34dc authored by Paul Durrant's avatar Paul Durrant Committed by David S. Miller

xen-netback: add support for multicast control

Xen's PV network protocol includes messages to add/remove ethernet
multicast addresses to/from a filter list in the backend. This allows
the frontend to request the backend only forward multicast packets
which are of interest thus preventing unnecessary noise on the shared
ring.

The canonical netif header in git://xenbits.xen.org/xen.git specifies
the message format (two more XEN_NETIF_EXTRA_TYPEs) so the minimal
necessary changes have been pulled into include/xen/interface/io/netif.h.

To prevent the frontend from extending the multicast filter list
arbitrarily a limit (XEN_NETBK_MCAST_MAX) has been set to 64 entries.
This limit is not specified by the protocol and so may change in future.
If the limit is reached then the next XEN_NETIF_EXTRA_TYPE_MCAST_ADD
sent by the frontend will be failed with NETIF_RSP_ERROR.
Signed-off-by: default avatarPaul Durrant <paul.durrant@citrix.com>
Cc: Ian Campbell <ian.campbell@citrix.com>
Cc: Wei Liu <wei.liu2@citrix.com>
Acked-by: default avatarWei Liu <wei.liu2@citrix.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4db78d31
...@@ -210,12 +210,22 @@ enum state_bit_shift { ...@@ -210,12 +210,22 @@ enum state_bit_shift {
VIF_STATUS_CONNECTED, VIF_STATUS_CONNECTED,
}; };
struct xenvif_mcast_addr {
struct list_head entry;
struct rcu_head rcu;
u8 addr[6];
};
#define XEN_NETBK_MCAST_MAX 64
struct xenvif { struct xenvif {
/* Unique identifier for this interface. */ /* Unique identifier for this interface. */
domid_t domid; domid_t domid;
unsigned int handle; unsigned int handle;
u8 fe_dev_addr[6]; u8 fe_dev_addr[6];
struct list_head fe_mcast_addr;
unsigned int fe_mcast_count;
/* Frontend feature information. */ /* Frontend feature information. */
int gso_mask; int gso_mask;
...@@ -224,6 +234,7 @@ struct xenvif { ...@@ -224,6 +234,7 @@ struct xenvif {
u8 can_sg:1; u8 can_sg:1;
u8 ip_csum:1; u8 ip_csum:1;
u8 ipv6_csum:1; u8 ipv6_csum:1;
u8 multicast_control:1;
/* Is this interface disabled? True when backend discovers /* Is this interface disabled? True when backend discovers
* frontend is rogue. * frontend is rogue.
...@@ -341,4 +352,8 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, ...@@ -341,4 +352,8 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
struct sk_buff *skb); struct sk_buff *skb);
void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue); void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue);
/* Multicast control */
bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr);
void xenvif_mcast_addr_list_free(struct xenvif *vif);
#endif /* __XEN_NETBACK__COMMON_H__ */ #endif /* __XEN_NETBACK__COMMON_H__ */
...@@ -171,6 +171,13 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -171,6 +171,13 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
!xenvif_schedulable(vif)) !xenvif_schedulable(vif))
goto drop; goto drop;
if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
struct ethhdr *eth = (struct ethhdr *)skb->data;
if (!xenvif_mcast_match(vif, eth->h_dest))
goto drop;
}
cb = XENVIF_RX_CB(skb); cb = XENVIF_RX_CB(skb);
cb->expires = jiffies + vif->drain_timeout; cb->expires = jiffies + vif->drain_timeout;
...@@ -427,6 +434,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, ...@@ -427,6 +434,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
vif->num_queues = 0; vif->num_queues = 0;
spin_lock_init(&vif->lock); spin_lock_init(&vif->lock);
INIT_LIST_HEAD(&vif->fe_mcast_addr);
dev->netdev_ops = &xenvif_netdev_ops; dev->netdev_ops = &xenvif_netdev_ops;
dev->hw_features = NETIF_F_SG | dev->hw_features = NETIF_F_SG |
...@@ -661,6 +669,8 @@ void xenvif_disconnect(struct xenvif *vif) ...@@ -661,6 +669,8 @@ void xenvif_disconnect(struct xenvif *vif)
xenvif_unmap_frontend_rings(queue); xenvif_unmap_frontend_rings(queue);
} }
xenvif_mcast_addr_list_free(vif);
} }
/* Reverse the relevant parts of xenvif_init_queue(). /* Reverse the relevant parts of xenvif_init_queue().
......
...@@ -1157,6 +1157,80 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size) ...@@ -1157,6 +1157,80 @@ static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
return false; return false;
} }
/* No locking is required in xenvif_mcast_add/del() as they are
* only ever invoked from NAPI poll. An RCU list is used because
* xenvif_mcast_match() is called asynchronously, during start_xmit.
*/
static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
{
struct xenvif_mcast_addr *mcast;
if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
if (net_ratelimit())
netdev_err(vif->dev,
"Too many multicast addresses\n");
return -ENOSPC;
}
mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
if (!mcast)
return -ENOMEM;
ether_addr_copy(mcast->addr, addr);
list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
vif->fe_mcast_count++;
return 0;
}
static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
{
struct xenvif_mcast_addr *mcast;
list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
if (ether_addr_equal(addr, mcast->addr)) {
--vif->fe_mcast_count;
list_del_rcu(&mcast->entry);
kfree_rcu(mcast, rcu);
break;
}
}
}
bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
{
struct xenvif_mcast_addr *mcast;
rcu_read_lock();
list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
if (ether_addr_equal(addr, mcast->addr)) {
rcu_read_unlock();
return true;
}
}
rcu_read_unlock();
return false;
}
void xenvif_mcast_addr_list_free(struct xenvif *vif)
{
/* No need for locking or RCU here. NAPI poll and TX queue
* are stopped.
*/
while (!list_empty(&vif->fe_mcast_addr)) {
struct xenvif_mcast_addr *mcast;
mcast = list_first_entry(&vif->fe_mcast_addr,
struct xenvif_mcast_addr,
entry);
--vif->fe_mcast_count;
list_del(&mcast->entry);
kfree(mcast);
}
}
static void xenvif_tx_build_gops(struct xenvif_queue *queue, static void xenvif_tx_build_gops(struct xenvif_queue *queue,
int budget, int budget,
unsigned *copy_ops, unsigned *copy_ops,
...@@ -1215,6 +1289,31 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue, ...@@ -1215,6 +1289,31 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
break; break;
} }
if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
struct xen_netif_extra_info *extra;
extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
make_tx_response(queue, &txreq,
(ret == 0) ?
XEN_NETIF_RSP_OKAY :
XEN_NETIF_RSP_ERROR);
push_tx_responses(queue);
continue;
}
if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
struct xen_netif_extra_info *extra;
extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
make_tx_response(queue, &txreq, XEN_NETIF_RSP_OKAY);
push_tx_responses(queue);
continue;
}
ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do); ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
break; break;
......
...@@ -327,6 +327,14 @@ static int netback_probe(struct xenbus_device *dev, ...@@ -327,6 +327,14 @@ static int netback_probe(struct xenbus_device *dev,
goto abort_transaction; goto abort_transaction;
} }
/* We support multicast-control. */
err = xenbus_printf(xbt, dev->nodename,
"feature-multicast-control", "%d", 1);
if (err) {
message = "writing feature-multicast-control";
goto abort_transaction;
}
err = xenbus_transaction_end(xbt, 0); err = xenbus_transaction_end(xbt, 0);
} while (err == -EAGAIN); } while (err == -EAGAIN);
...@@ -1016,6 +1024,11 @@ static int read_xenbus_vif_flags(struct backend_info *be) ...@@ -1016,6 +1024,11 @@ static int read_xenbus_vif_flags(struct backend_info *be)
val = 0; val = 0;
vif->ipv6_csum = !!val; vif->ipv6_csum = !!val;
if (xenbus_scanf(XBT_NIL, dev->otherend, "request-multicast-control",
"%d", &val) < 0)
val = 0;
vif->multicast_control = !!val;
return 0; return 0;
} }
......
...@@ -156,7 +156,9 @@ struct xen_netif_tx_request { ...@@ -156,7 +156,9 @@ struct xen_netif_tx_request {
/* Types of xen_netif_extra_info descriptors. */ /* Types of xen_netif_extra_info descriptors. */
#define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */ #define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */
#define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ #define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */
#define XEN_NETIF_EXTRA_TYPE_MAX (2) #define XEN_NETIF_EXTRA_TYPE_MCAST_ADD (2) /* u.mcast */
#define XEN_NETIF_EXTRA_TYPE_MCAST_DEL (3) /* u.mcast */
#define XEN_NETIF_EXTRA_TYPE_MAX (4)
/* xen_netif_extra_info flags. */ /* xen_netif_extra_info flags. */
#define _XEN_NETIF_EXTRA_FLAG_MORE (0) #define _XEN_NETIF_EXTRA_FLAG_MORE (0)
...@@ -201,6 +203,10 @@ struct xen_netif_extra_info { ...@@ -201,6 +203,10 @@ struct xen_netif_extra_info {
uint16_t features; /* XEN_NETIF_GSO_FEAT_* */ uint16_t features; /* XEN_NETIF_GSO_FEAT_* */
} gso; } gso;
struct {
uint8_t addr[6]; /* Address to add/remove. */
} mcast;
uint16_t pad[3]; uint16_t pad[3];
} u; } u;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment