Commit 0da2b183 authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2019-01-22

This series contains updates to i40e and xsk.

Jan exports xdp_get_umem_from_qid() for other drivers/modules to use.
Refactored the code use the netdev provided umems, instead of containing
them inside our i40e_vsi.

Aleksandr fixes an issue where RSS queues were misconfigured, so limit
the RSS queue number to the online CPU number.

Damian adds support for ethtool's setting and getting the FEC
configuration.

Grzegorz fixes a type mismatch, where the return value was not matching
the function declaration.

Sergey adds checks in the queue configuration handler to ensure the
number of queue pairs requested by the VF is less than maximum possible.

Lihong cleans up code left around from earlier silicon validation in the
i40e debugfs code.

Julia Lawall and Colin Ian King clean up white space indentation issues
found.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cc648f85 d1b3fa86
......@@ -34,6 +34,7 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
#include <net/xdp_sock.h>
#include "i40e_type.h"
#include "i40e_prototype.h"
#include "i40e_client.h"
......@@ -523,6 +524,8 @@ struct i40e_pf {
#define I40E_FLAG_FD_SB_INACTIVE BIT(22)
#define I40E_FLAG_FD_SB_TO_CLOUD_FILTER BIT(23)
#define I40E_FLAG_DISABLE_FW_LLDP BIT(24)
#define I40E_FLAG_RS_FEC BIT(25)
#define I40E_FLAG_BASE_R_FEC BIT(26)
struct i40e_client_instance *cinst;
bool stat_offsets_loaded;
......@@ -787,11 +790,6 @@ struct i40e_vsi {
/* VSI specific handlers */
irqreturn_t (*irq_handler)(int irq, void *data);
/* AF_XDP zero-copy */
struct xdp_umem **xsk_umems;
u16 num_xsk_umems_used;
u16 num_xsk_umems;
} ____cacheline_internodealigned_in_smp;
struct i40e_netdev_priv {
......@@ -1091,6 +1089,8 @@ i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf);
i40e_status i40e_commit_partition_bw_setting(struct i40e_pf *pf);
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags);
static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
{
return !!vsi->xdp_prog;
......@@ -1104,10 +1104,10 @@ static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
if (ring_is_xdp(ring))
qid -= ring->vsi->alloc_queue_pairs;
if (!ring->vsi->xsk_umems || !ring->vsi->xsk_umems[qid] || !xdp_on)
if (!xdp_on)
return NULL;
return ring->vsi->xsk_umems[qid];
return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
}
int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
......
......@@ -1642,30 +1642,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
count = buf_tmp - i40e_dbg_netdev_ops_buf + 1;
}
if (strncmp(i40e_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) {
cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i", &vsi_seid);
if (cnt != 1) {
dev_info(&pf->pdev->dev, "tx_timeout <vsi_seid>\n");
goto netdev_ops_write_done;
}
vsi = i40e_dbg_find_vsi(pf, vsi_seid);
if (!vsi) {
dev_info(&pf->pdev->dev,
"tx_timeout: VSI %d not found\n", vsi_seid);
} else if (!vsi->netdev) {
dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n",
vsi_seid);
} else if (test_bit(__I40E_VSI_DOWN, vsi->state)) {
dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n",
vsi_seid);
} else if (rtnl_trylock()) {
vsi->netdev->netdev_ops->ndo_tx_timeout(vsi->netdev);
rtnl_unlock();
dev_info(&pf->pdev->dev, "tx_timeout called\n");
} else {
dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n");
}
} else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
int mtu;
cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
......@@ -1733,7 +1710,6 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
dev_info(&pf->pdev->dev, "unknown command '%s'\n",
i40e_dbg_netdev_ops_buf);
dev_info(&pf->pdev->dev, "available commands\n");
dev_info(&pf->pdev->dev, " tx_timeout <vsi_seid>\n");
dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n");
dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n");
dev_info(&pf->pdev->dev, " napi <vsi_seid>\n");
......
......@@ -26,8 +26,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 2
#define DRV_VERSION_MINOR 7
#define DRV_VERSION_BUILD 6
#define DRV_VERSION_MINOR 8
#define DRV_VERSION_BUILD 10
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
......@@ -11042,6 +11042,7 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
return 0;
queue_count = min_t(int, queue_count, num_online_cpus());
new_rss_size = min_t(int, queue_count, pf->rss_size_max);
if (queue_count != vsi->num_queue_pairs) {
......@@ -13859,6 +13860,29 @@ static void i40e_get_platform_mac_addr(struct pci_dev *pdev, struct i40e_pf *pf)
i40e_get_mac_addr(&pf->hw, pf->hw.mac.addr);
}
/**
* i40e_set_fec_in_flags - helper function for setting FEC options in flags
* @fec_cfg: FEC option to set in flags
* @flags: ptr to flags in which we set FEC option
**/
void i40e_set_fec_in_flags(u8 fec_cfg, u32 *flags)
{
if (fec_cfg & I40E_AQ_SET_FEC_AUTO)
*flags |= I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC;
if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_RS) ||
(fec_cfg & I40E_AQ_SET_FEC_ABILITY_RS)) {
*flags |= I40E_FLAG_RS_FEC;
*flags &= ~I40E_FLAG_BASE_R_FEC;
}
if ((fec_cfg & I40E_AQ_SET_FEC_REQUEST_KR) ||
(fec_cfg & I40E_AQ_SET_FEC_ABILITY_KR)) {
*flags |= I40E_FLAG_BASE_R_FEC;
*flags &= ~I40E_FLAG_RS_FEC;
}
if (fec_cfg == 0)
*flags &= ~(I40E_FLAG_RS_FEC | I40E_FLAG_BASE_R_FEC);
}
/**
* i40e_probe - Device initialization routine
* @pdev: PCI device information struct
......@@ -14350,6 +14374,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
/* set the FEC config due to the board capabilities */
i40e_set_fec_in_flags(abilities.fec_cfg_curr_mod_ext_info, &pf->flags);
/* get the supported phy types from the fw */
err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
if (err)
......
......@@ -2069,6 +2069,11 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
if (qci->num_queue_pairs > I40E_MAX_VF_QUEUES) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
for (i = 0; i < qci->num_queue_pairs; i++) {
qpi = &qci->qpair[i];
......@@ -3656,7 +3661,7 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
int ret;
pf->vf_aq_requests++;
if (local_vf_id >= pf->num_alloc_vfs)
if (local_vf_id < 0 || local_vf_id >= pf->num_alloc_vfs)
return -EINVAL;
vf = &(pf->vf[local_vf_id]);
......
......@@ -9,69 +9,6 @@
#include "i40e_txrx_common.h"
#include "i40e_xsk.h"
/**
* i40e_alloc_xsk_umems - Allocate an array to store per ring UMEMs
* @vsi: Current VSI
*
* Returns 0 on success, <0 on failure
**/
static int i40e_alloc_xsk_umems(struct i40e_vsi *vsi)
{
if (vsi->xsk_umems)
return 0;
vsi->num_xsk_umems_used = 0;
vsi->num_xsk_umems = vsi->alloc_queue_pairs;
vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems),
GFP_KERNEL);
if (!vsi->xsk_umems) {
vsi->num_xsk_umems = 0;
return -ENOMEM;
}
return 0;
}
/**
* i40e_add_xsk_umem - Store a UMEM for a certain ring/qid
* @vsi: Current VSI
* @umem: UMEM to store
* @qid: Ring/qid to associate with the UMEM
*
* Returns 0 on success, <0 on failure
**/
static int i40e_add_xsk_umem(struct i40e_vsi *vsi, struct xdp_umem *umem,
u16 qid)
{
int err;
err = i40e_alloc_xsk_umems(vsi);
if (err)
return err;
vsi->xsk_umems[qid] = umem;
vsi->num_xsk_umems_used++;
return 0;
}
/**
* i40e_remove_xsk_umem - Remove a UMEM for a certain ring/qid
* @vsi: Current VSI
* @qid: Ring/qid associated with the UMEM
**/
static void i40e_remove_xsk_umem(struct i40e_vsi *vsi, u16 qid)
{
vsi->xsk_umems[qid] = NULL;
vsi->num_xsk_umems_used--;
if (vsi->num_xsk_umems == 0) {
kfree(vsi->xsk_umems);
vsi->xsk_umems = NULL;
vsi->num_xsk_umems = 0;
}
}
/**
* i40e_xsk_umem_dma_map - DMA maps all UMEM memory for the netdev
* @vsi: Current VSI
......@@ -140,6 +77,7 @@ static void i40e_xsk_umem_dma_unmap(struct i40e_vsi *vsi, struct xdp_umem *umem)
static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
u16 qid)
{
struct net_device *netdev = vsi->netdev;
struct xdp_umem_fq_reuse *reuseq;
bool if_running;
int err;
......@@ -150,12 +88,9 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
if (qid >= vsi->num_queue_pairs)
return -EINVAL;
if (vsi->xsk_umems) {
if (qid >= vsi->num_xsk_umems)
if (qid >= netdev->real_num_rx_queues ||
qid >= netdev->real_num_tx_queues)
return -EINVAL;
if (vsi->xsk_umems[qid])
return -EBUSY;
}
reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
if (!reuseq)
......@@ -173,13 +108,7 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
err = i40e_queue_pair_disable(vsi, qid);
if (err)
return err;
}
err = i40e_add_xsk_umem(vsi, umem, qid);
if (err)
return err;
if (if_running) {
err = i40e_queue_pair_enable(vsi, qid);
if (err)
return err;
......@@ -197,11 +126,13 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
**/
static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
{
struct net_device *netdev = vsi->netdev;
struct xdp_umem *umem;
bool if_running;
int err;
if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems ||
!vsi->xsk_umems[qid])
umem = xdp_get_umem_from_qid(netdev, qid);
if (!umem)
return -EINVAL;
if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
......@@ -212,8 +143,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
return err;
}
i40e_xsk_umem_dma_unmap(vsi, vsi->xsk_umems[qid]);
i40e_remove_xsk_umem(vsi, qid);
i40e_xsk_umem_dma_unmap(vsi, umem);
if (if_running) {
err = i40e_queue_pair_enable(vsi, qid);
......@@ -237,20 +167,18 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
int i40e_xsk_umem_query(struct i40e_vsi *vsi, struct xdp_umem **umem,
u16 qid)
{
struct net_device *netdev = vsi->netdev;
struct xdp_umem *queried_umem;
if (vsi->type != I40E_VSI_MAIN)
return -EINVAL;
if (qid >= vsi->num_queue_pairs)
return -EINVAL;
queried_umem = xdp_get_umem_from_qid(netdev, qid);
if (vsi->xsk_umems) {
if (qid >= vsi->num_xsk_umems)
if (!queried_umem)
return -EINVAL;
*umem = vsi->xsk_umems[qid];
return 0;
}
*umem = NULL;
*umem = queried_umem;
return 0;
}
......@@ -945,13 +873,11 @@ void i40e_xsk_clean_tx_ring(struct i40e_ring *tx_ring)
**/
bool i40e_xsk_any_rx_ring_enabled(struct i40e_vsi *vsi)
{
struct net_device *netdev = vsi->netdev;
int i;
if (!vsi->xsk_umems)
return false;
for (i = 0; i < vsi->num_queue_pairs; i++) {
if (vsi->xsk_umems[i])
if (xdp_get_umem_from_qid(netdev, i))
return true;
}
......
......@@ -67,6 +67,7 @@ struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
return NULL;
}
EXPORT_SYMBOL(xdp_get_umem_from_qid);
static void xdp_clear_umem_at_qid(struct net_device *dev, u16 queue_id)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment