Commit aac3942c authored by David S. Miller's avatar David S. Miller

Merge branch 'team_multiq'

Jiri Pirko says:

====================
This patchset represents the way I walked when I was adding multiqueue
support for team driver.

Jiri Pirko (6):
  net: honour netif_set_real_num_tx_queues() retval
  rtnl: allow to specify different num for rx and tx queue count
  rtnl: allow to specify number of rx and tx queues on device creation
  net: rename bond_queue_mapping to slave_dev_queue_mapping
  bond_sysfs: use ream_num_tx_queues rather than params.tx_queue
  team: add multiqueue support
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6f458dfb 6c85f2bd
...@@ -395,8 +395,8 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, ...@@ -395,8 +395,8 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
skb->dev = slave_dev; skb->dev = slave_dev;
BUILD_BUG_ON(sizeof(skb->queue_mapping) != BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
sizeof(qdisc_skb_cb(skb)->bond_queue_mapping)); sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping; skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
if (unlikely(netpoll_tx_running(slave_dev))) if (unlikely(netpoll_tx_running(slave_dev)))
bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
...@@ -4184,7 +4184,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) ...@@ -4184,7 +4184,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
/* /*
* Save the original txq to restore before passing to the driver * Save the original txq to restore before passing to the driver
*/ */
qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping; qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
if (unlikely(txq >= dev->real_num_tx_queues)) { if (unlikely(txq >= dev->real_num_tx_queues)) {
do { do {
...@@ -4845,17 +4845,19 @@ static int bond_validate(struct nlattr *tb[], struct nlattr *data[]) ...@@ -4845,17 +4845,19 @@ static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
return 0; return 0;
} }
static int bond_get_tx_queues(struct net *net, struct nlattr *tb[]) static unsigned int bond_get_num_tx_queues(void)
{ {
return tx_queues; return tx_queues;
} }
static struct rtnl_link_ops bond_link_ops __read_mostly = { static struct rtnl_link_ops bond_link_ops __read_mostly = {
.kind = "bond", .kind = "bond",
.priv_size = sizeof(struct bonding), .priv_size = sizeof(struct bonding),
.setup = bond_setup, .setup = bond_setup,
.validate = bond_validate, .validate = bond_validate,
.get_tx_queues = bond_get_tx_queues, .get_num_tx_queues = bond_get_num_tx_queues,
.get_num_rx_queues = bond_get_num_tx_queues, /* Use the same number
as for TX queues */
}; };
/* Create a new bond based on the specified name and bonding parameters. /* Create a new bond based on the specified name and bonding parameters.
......
...@@ -1495,7 +1495,7 @@ static ssize_t bonding_store_queue_id(struct device *d, ...@@ -1495,7 +1495,7 @@ static ssize_t bonding_store_queue_id(struct device *d,
/* Check buffer length, valid ifname and queue id */ /* Check buffer length, valid ifname and queue id */
if (strlen(buffer) > IFNAMSIZ || if (strlen(buffer) > IFNAMSIZ ||
!dev_valid_name(buffer) || !dev_valid_name(buffer) ||
qid > bond->params.tx_queues) qid > bond->dev->real_num_tx_queues)
goto err_no_cmd; goto err_no_cmd;
/* Get the pointer to that interface if it exists */ /* Get the pointer to that interface if it exists */
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <net/rtnetlink.h> #include <net/rtnetlink.h>
#include <net/genetlink.h> #include <net/genetlink.h>
#include <net/netlink.h> #include <net/netlink.h>
#include <net/sch_generic.h>
#include <linux/if_team.h> #include <linux/if_team.h>
#define DRV_NAME "team" #define DRV_NAME "team"
...@@ -1121,6 +1122,22 @@ static const struct team_option team_options[] = { ...@@ -1121,6 +1122,22 @@ static const struct team_option team_options[] = {
}, },
}; };
static struct lock_class_key team_netdev_xmit_lock_key;
static struct lock_class_key team_netdev_addr_lock_key;
static void team_set_lockdep_class_one(struct net_device *dev,
struct netdev_queue *txq,
void *unused)
{
lockdep_set_class(&txq->_xmit_lock, &team_netdev_xmit_lock_key);
}
static void team_set_lockdep_class(struct net_device *dev)
{
lockdep_set_class(&dev->addr_list_lock, &team_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, team_set_lockdep_class_one, NULL);
}
static int team_init(struct net_device *dev) static int team_init(struct net_device *dev)
{ {
struct team *team = netdev_priv(dev); struct team *team = netdev_priv(dev);
...@@ -1148,6 +1165,8 @@ static int team_init(struct net_device *dev) ...@@ -1148,6 +1165,8 @@ static int team_init(struct net_device *dev)
goto err_options_register; goto err_options_register;
netif_carrier_off(dev); netif_carrier_off(dev);
team_set_lockdep_class(dev);
return 0; return 0;
err_options_register: err_options_register:
...@@ -1216,6 +1235,29 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1216,6 +1235,29 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb)
{
/*
* This helper function exists to help dev_pick_tx get the correct
* destination queue. Using a helper function skips a call to
* skb_tx_hash and will put the skbs in the queue we expect on their
* way down to the team driver.
*/
u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
/*
* Save the original txq to restore before passing to the driver
*/
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
if (unlikely(txq >= dev->real_num_tx_queues)) {
do {
txq -= dev->real_num_tx_queues;
} while (txq >= dev->real_num_tx_queues);
}
return txq;
}
static void team_change_rx_flags(struct net_device *dev, int change) static void team_change_rx_flags(struct net_device *dev, int change)
{ {
struct team *team = netdev_priv(dev); struct team *team = netdev_priv(dev);
...@@ -1469,6 +1511,7 @@ static const struct net_device_ops team_netdev_ops = { ...@@ -1469,6 +1511,7 @@ static const struct net_device_ops team_netdev_ops = {
.ndo_open = team_open, .ndo_open = team_open,
.ndo_stop = team_close, .ndo_stop = team_close,
.ndo_start_xmit = team_xmit, .ndo_start_xmit = team_xmit,
.ndo_select_queue = team_select_queue,
.ndo_change_rx_flags = team_change_rx_flags, .ndo_change_rx_flags = team_change_rx_flags,
.ndo_set_rx_mode = team_set_rx_mode, .ndo_set_rx_mode = team_set_rx_mode,
.ndo_set_mac_address = team_set_mac_address, .ndo_set_mac_address = team_set_mac_address,
...@@ -1543,12 +1586,24 @@ static int team_validate(struct nlattr *tb[], struct nlattr *data[]) ...@@ -1543,12 +1586,24 @@ static int team_validate(struct nlattr *tb[], struct nlattr *data[])
return 0; return 0;
} }
static unsigned int team_get_num_tx_queues(void)
{
return TEAM_DEFAULT_NUM_TX_QUEUES;
}
static unsigned int team_get_num_rx_queues(void)
{
return TEAM_DEFAULT_NUM_RX_QUEUES;
}
static struct rtnl_link_ops team_link_ops __read_mostly = { static struct rtnl_link_ops team_link_ops __read_mostly = {
.kind = DRV_NAME, .kind = DRV_NAME,
.priv_size = sizeof(struct team), .priv_size = sizeof(struct team),
.setup = team_setup, .setup = team_setup,
.newlink = team_newlink, .newlink = team_newlink,
.validate = team_validate, .validate = team_validate,
.get_num_tx_queues = team_get_num_tx_queues,
.get_num_rx_queues = team_get_num_rx_queues,
}; };
......
...@@ -140,6 +140,8 @@ enum { ...@@ -140,6 +140,8 @@ enum {
IFLA_EXT_MASK, /* Extended info mask, VFs, etc */ IFLA_EXT_MASK, /* Extended info mask, VFs, etc */
IFLA_PROMISCUITY, /* Promiscuity count: > 0 means acts PROMISC */ IFLA_PROMISCUITY, /* Promiscuity count: > 0 means acts PROMISC */
#define IFLA_PROMISCUITY IFLA_PROMISCUITY #define IFLA_PROMISCUITY IFLA_PROMISCUITY
IFLA_NUM_TX_QUEUES,
IFLA_NUM_RX_QUEUES,
__IFLA_MAX __IFLA_MAX
}; };
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/netpoll.h> #include <linux/netpoll.h>
#include <net/sch_generic.h>
struct team_pcpu_stats { struct team_pcpu_stats {
u64 rx_packets; u64 rx_packets;
...@@ -98,6 +99,10 @@ static inline void team_netpoll_send_skb(struct team_port *port, ...@@ -98,6 +99,10 @@ static inline void team_netpoll_send_skb(struct team_port *port,
static inline int team_dev_queue_xmit(struct team *team, struct team_port *port, static inline int team_dev_queue_xmit(struct team *team, struct team_port *port,
struct sk_buff *skb) struct sk_buff *skb)
{ {
BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping));
skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
skb->dev = port->dev; skb->dev = port->dev;
if (unlikely(netpoll_tx_running(port->dev))) { if (unlikely(netpoll_tx_running(port->dev))) {
team_netpoll_send_skb(port, skb); team_netpoll_send_skb(port, skb);
...@@ -236,6 +241,9 @@ extern void team_options_unregister(struct team *team, ...@@ -236,6 +241,9 @@ extern void team_options_unregister(struct team *team,
extern int team_mode_register(const struct team_mode *mode); extern int team_mode_register(const struct team_mode *mode);
extern void team_mode_unregister(const struct team_mode *mode); extern void team_mode_unregister(const struct team_mode *mode);
#define TEAM_DEFAULT_NUM_TX_QUEUES 16
#define TEAM_DEFAULT_NUM_RX_QUEUES 16
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#define TEAM_STRING_MAX_LEN 32 #define TEAM_STRING_MAX_LEN 32
......
...@@ -2110,7 +2110,12 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev, ...@@ -2110,7 +2110,12 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev,
static inline int netif_copy_real_num_queues(struct net_device *to_dev, static inline int netif_copy_real_num_queues(struct net_device *to_dev,
const struct net_device *from_dev) const struct net_device *from_dev)
{ {
netif_set_real_num_tx_queues(to_dev, from_dev->real_num_tx_queues); int err;
err = netif_set_real_num_tx_queues(to_dev,
from_dev->real_num_tx_queues);
if (err)
return err;
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
return netif_set_real_num_rx_queues(to_dev, return netif_set_real_num_rx_queues(to_dev,
from_dev->real_num_rx_queues); from_dev->real_num_rx_queues);
......
...@@ -44,8 +44,10 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh) ...@@ -44,8 +44,10 @@ static inline int rtnl_msg_family(const struct nlmsghdr *nlh)
* @get_xstats_size: Function to calculate required room for dumping device * @get_xstats_size: Function to calculate required room for dumping device
* specific statistics * specific statistics
* @fill_xstats: Function to dump device specific statistics * @fill_xstats: Function to dump device specific statistics
* @get_tx_queues: Function to determine number of transmit queues to create when * @get_num_tx_queues: Function to determine number of transmit queues
* creating a new device. * to create when creating a new device.
* @get_num_rx_queues: Function to determine number of receive queues
* to create when creating a new device.
*/ */
struct rtnl_link_ops { struct rtnl_link_ops {
struct list_head list; struct list_head list;
...@@ -77,8 +79,8 @@ struct rtnl_link_ops { ...@@ -77,8 +79,8 @@ struct rtnl_link_ops {
size_t (*get_xstats_size)(const struct net_device *dev); size_t (*get_xstats_size)(const struct net_device *dev);
int (*fill_xstats)(struct sk_buff *skb, int (*fill_xstats)(struct sk_buff *skb,
const struct net_device *dev); const struct net_device *dev);
int (*get_tx_queues)(struct net *net, unsigned int (*get_num_tx_queues)(void);
struct nlattr *tb[]); unsigned int (*get_num_rx_queues)(void);
}; };
extern int __rtnl_link_register(struct rtnl_link_ops *ops); extern int __rtnl_link_register(struct rtnl_link_ops *ops);
......
...@@ -220,7 +220,7 @@ struct tcf_proto { ...@@ -220,7 +220,7 @@ struct tcf_proto {
struct qdisc_skb_cb { struct qdisc_skb_cb {
unsigned int pkt_len; unsigned int pkt_len;
u16 bond_queue_mapping; u16 slave_dev_queue_mapping;
u16 _pad; u16 _pad;
unsigned char data[20]; unsigned char data[20];
}; };
......
...@@ -771,6 +771,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev, ...@@ -771,6 +771,8 @@ static noinline size_t if_nlmsg_size(const struct net_device *dev,
+ nla_total_size(4) /* IFLA_LINK */ + nla_total_size(4) /* IFLA_LINK */
+ nla_total_size(4) /* IFLA_MASTER */ + nla_total_size(4) /* IFLA_MASTER */
+ nla_total_size(4) /* IFLA_PROMISCUITY */ + nla_total_size(4) /* IFLA_PROMISCUITY */
+ nla_total_size(4) /* IFLA_NUM_TX_QUEUES */
+ nla_total_size(4) /* IFLA_NUM_RX_QUEUES */
+ nla_total_size(1) /* IFLA_OPERSTATE */ + nla_total_size(1) /* IFLA_OPERSTATE */
+ nla_total_size(1) /* IFLA_LINKMODE */ + nla_total_size(1) /* IFLA_LINKMODE */
+ nla_total_size(ext_filter_mask + nla_total_size(ext_filter_mask
...@@ -889,6 +891,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev, ...@@ -889,6 +891,8 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
nla_put_u32(skb, IFLA_MTU, dev->mtu) || nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
nla_put_u32(skb, IFLA_GROUP, dev->group) || nla_put_u32(skb, IFLA_GROUP, dev->group) ||
nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) || nla_put_u32(skb, IFLA_PROMISCUITY, dev->promiscuity) ||
nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) ||
nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) ||
(dev->ifindex != dev->iflink && (dev->ifindex != dev->iflink &&
nla_put_u32(skb, IFLA_LINK, dev->iflink)) || nla_put_u32(skb, IFLA_LINK, dev->iflink)) ||
(dev->master && (dev->master &&
...@@ -1106,6 +1110,8 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = { ...@@ -1106,6 +1110,8 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_AF_SPEC] = { .type = NLA_NESTED }, [IFLA_AF_SPEC] = { .type = NLA_NESTED },
[IFLA_EXT_MASK] = { .type = NLA_U32 }, [IFLA_EXT_MASK] = { .type = NLA_U32 },
[IFLA_PROMISCUITY] = { .type = NLA_U32 }, [IFLA_PROMISCUITY] = { .type = NLA_U32 },
[IFLA_NUM_TX_QUEUES] = { .type = NLA_U32 },
[IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
}; };
EXPORT_SYMBOL(ifla_policy); EXPORT_SYMBOL(ifla_policy);
...@@ -1624,17 +1630,22 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net, ...@@ -1624,17 +1630,22 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
{ {
int err; int err;
struct net_device *dev; struct net_device *dev;
unsigned int num_queues = 1; unsigned int num_tx_queues = 1;
unsigned int num_rx_queues = 1;
if (ops->get_tx_queues) { if (tb[IFLA_NUM_TX_QUEUES])
err = ops->get_tx_queues(src_net, tb); num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]);
if (err < 0) else if (ops->get_num_tx_queues)
goto err; num_tx_queues = ops->get_num_tx_queues();
num_queues = err;
} if (tb[IFLA_NUM_RX_QUEUES])
num_rx_queues = nla_get_u32(tb[IFLA_NUM_RX_QUEUES]);
else if (ops->get_num_rx_queues)
num_rx_queues = ops->get_num_rx_queues();
err = -ENOMEM; err = -ENOMEM;
dev = alloc_netdev_mq(ops->priv_size, ifname, ops->setup, num_queues); dev = alloc_netdev_mqs(ops->priv_size, ifname, ops->setup,
num_tx_queues, num_rx_queues);
if (!dev) if (!dev)
goto err; goto err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment