Commit efde611b authored by David S. Miller's avatar David S. Miller

Merge branch 'nfp-next'

Jakub Kicinski says

====================
nfp: cleanups and improvements

Main purpose of this set is to get rid of doing potentially long
mdelay()s but it also contains some trivial changes I've accumulated.
First two patches fix harmless copy-paste errors, next two clean up
the documentation and remove unused defines.  Patch 5 clarifies the
interpretation of RX descriptor fields.  Patch 6, by far the biggest,
adds ability to perform FW reconfig asynchronously thanks to which
we can stop using mdelay().
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7a7c1705 3d780b92
...@@ -59,8 +59,8 @@ ...@@ -59,8 +59,8 @@
netdev_warn((nn)->netdev, fmt, ## args); \ netdev_warn((nn)->netdev, fmt, ## args); \
} while (0) } while (0)
/* Max time to wait for NFP to respond on updates (in ms) */ /* Max time to wait for NFP to respond on updates (in seconds) */
#define NFP_NET_POLL_TIMEOUT 5000 #define NFP_NET_POLL_TIMEOUT 5
/* Bar allocation */ /* Bar allocation */
#define NFP_NET_CRTL_BAR 0 #define NFP_NET_CRTL_BAR 0
...@@ -447,6 +447,10 @@ static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver, ...@@ -447,6 +447,10 @@ static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
* @shared_name: Name for shared interrupt * @shared_name: Name for shared interrupt
* @me_freq_mhz: ME clock_freq (MHz) * @me_freq_mhz: ME clock_freq (MHz)
* @reconfig_lock: Protects HW reconfiguration request regs/machinery * @reconfig_lock: Protects HW reconfiguration request regs/machinery
* @reconfig_posted: Pending reconfig bits coming from async sources
* @reconfig_timer_active: Timer for reading reconfiguration results is pending
* @reconfig_sync_present: Some thread is performing synchronous reconfig
* @reconfig_timer: Timer for async reading of reconfig results
* @link_up: Is the link up? * @link_up: Is the link up?
* @link_status_lock: Protects @link_up and ensures atomicity with BAR reading * @link_status_lock: Protects @link_up and ensures atomicity with BAR reading
* @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter * @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter
...@@ -531,6 +535,10 @@ struct nfp_net { ...@@ -531,6 +535,10 @@ struct nfp_net {
spinlock_t link_status_lock; spinlock_t link_status_lock;
spinlock_t reconfig_lock; spinlock_t reconfig_lock;
u32 reconfig_posted;
bool reconfig_timer_active;
bool reconfig_sync_present;
struct timer_list reconfig_timer;
u32 rx_coalesce_usecs; u32 rx_coalesce_usecs;
u32 rx_coalesce_max_frames; u32 rx_coalesce_max_frames;
......
...@@ -80,6 +80,116 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, ...@@ -80,6 +80,116 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
put_unaligned_le32(reg, fw_ver); put_unaligned_le32(reg, fw_ver);
} }
/* Firmware reconfig
*
* Firmware reconfig may take a while so we have two versions of it -
* synchronous and asynchronous (posted). All synchronous callers are holding
* RTNL so we don't have to worry about serializing them.
*/
static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
{
nn_writel(nn, NFP_NET_CFG_UPDATE, update);
/* ensure update is written before pinging HW */
nn_pci_flush(nn);
nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
}
/* Pass 0 as update to run posted reconfigs. */
static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
{
update |= nn->reconfig_posted;
nn->reconfig_posted = 0;
nfp_net_reconfig_start(nn, update);
nn->reconfig_timer_active = true;
mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ);
}
static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
{
u32 reg;
reg = nn_readl(nn, NFP_NET_CFG_UPDATE);
if (reg == 0)
return true;
if (reg & NFP_NET_CFG_UPDATE_ERR) {
nn_err(nn, "Reconfig error: 0x%08x\n", reg);
return true;
} else if (last_check) {
nn_err(nn, "Reconfig timeout: 0x%08x\n", reg);
return true;
}
return false;
}
static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
{
bool timed_out = false;
/* Poll update field, waiting for NFP to ack the config */
while (!nfp_net_reconfig_check_done(nn, timed_out)) {
msleep(1);
timed_out = time_is_before_eq_jiffies(deadline);
}
if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
return -EIO;
return timed_out ? -EIO : 0;
}
static void nfp_net_reconfig_timer(unsigned long data)
{
struct nfp_net *nn = (void *)data;
spin_lock_bh(&nn->reconfig_lock);
nn->reconfig_timer_active = false;
/* If sync caller is present it will take over from us */
if (nn->reconfig_sync_present)
goto done;
/* Read reconfig status and report errors */
nfp_net_reconfig_check_done(nn, true);
if (nn->reconfig_posted)
nfp_net_reconfig_start_async(nn, 0);
done:
spin_unlock_bh(&nn->reconfig_lock);
}
/**
* nfp_net_reconfig_post() - Post async reconfig request
* @nn: NFP Net device to reconfigure
* @update: The value for the update field in the BAR config
*
* Record FW reconfiguration request. Reconfiguration will be kicked off
* whenever reconfiguration machinery is idle. Multiple requests can be
* merged together!
*/
static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
{
spin_lock_bh(&nn->reconfig_lock);
/* Sync caller will kick off async reconf when it's done, just post */
if (nn->reconfig_sync_present) {
nn->reconfig_posted |= update;
goto done;
}
/* Opportunistically check if the previous command is done */
if (!nn->reconfig_timer_active ||
nfp_net_reconfig_check_done(nn, false))
nfp_net_reconfig_start_async(nn, update);
else
nn->reconfig_posted |= update;
done:
spin_unlock_bh(&nn->reconfig_lock);
}
/** /**
* nfp_net_reconfig() - Reconfigure the firmware * nfp_net_reconfig() - Reconfigure the firmware
* @nn: NFP Net device to reconfigure * @nn: NFP Net device to reconfigure
...@@ -93,35 +203,45 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, ...@@ -93,35 +203,45 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
*/ */
int nfp_net_reconfig(struct nfp_net *nn, u32 update) int nfp_net_reconfig(struct nfp_net *nn, u32 update)
{ {
int cnt, ret = 0; bool cancelled_timer = false;
u32 new; u32 pre_posted_requests;
int ret;
spin_lock_bh(&nn->reconfig_lock); spin_lock_bh(&nn->reconfig_lock);
nn_writel(nn, NFP_NET_CFG_UPDATE, update); nn->reconfig_sync_present = true;
/* ensure update is written before pinging HW */
nn_pci_flush(nn);
nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
/* Poll update field, waiting for NFP to ack the config */ if (nn->reconfig_timer_active) {
for (cnt = 0; ; cnt++) { del_timer(&nn->reconfig_timer);
new = nn_readl(nn, NFP_NET_CFG_UPDATE); nn->reconfig_timer_active = false;
if (new == 0) cancelled_timer = true;
break;
if (new & NFP_NET_CFG_UPDATE_ERR) {
nn_err(nn, "Reconfig error: 0x%08x\n", new);
ret = -EIO;
break;
} else if (cnt >= NFP_NET_POLL_TIMEOUT) {
nn_err(nn, "Reconfig timeout for 0x%08x after %dms\n",
update, cnt);
ret = -EIO;
break;
} }
mdelay(1); pre_posted_requests = nn->reconfig_posted;
nn->reconfig_posted = 0;
spin_unlock_bh(&nn->reconfig_lock);
if (cancelled_timer)
nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
/* Run the posted reconfigs which were issued before we started */
if (pre_posted_requests) {
nfp_net_reconfig_start(nn, pre_posted_requests);
nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
} }
nfp_net_reconfig_start(nn, update);
ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
spin_lock_bh(&nn->reconfig_lock);
if (nn->reconfig_posted)
nfp_net_reconfig_start_async(nn, 0);
nn->reconfig_sync_present = false;
spin_unlock_bh(&nn->reconfig_lock); spin_unlock_bh(&nn->reconfig_lock);
return ret; return ret;
} }
...@@ -1298,23 +1418,25 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) ...@@ -1298,23 +1418,25 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_net_rx_give_one(rx_ring, new_skb, new_dma_addr); nfp_net_rx_give_one(rx_ring, new_skb, new_dma_addr);
/* < meta_len >
* <-- [rx_offset] -->
* ---------------------------------------------------------
* | [XX] | metadata | packet | XXXX |
* ---------------------------------------------------------
* <---------------- data_len --------------->
*
* The rx_offset is fixed for all packets, the meta_len can vary
* on a packet by packet basis. If rx_offset is set to zero
* (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
* buffer and is immediately followed by the packet (no [XX]).
*/
meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK; meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
data_len = le16_to_cpu(rxd->rxd.data_len); data_len = le16_to_cpu(rxd->rxd.data_len);
if (WARN_ON_ONCE(data_len > nn->fl_bufsz)) { if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
dev_kfree_skb_any(skb);
continue;
}
if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) {
/* The packet data starts after the metadata */
skb_reserve(skb, meta_len); skb_reserve(skb, meta_len);
} else { else
/* The packet data starts at a fixed offset */
skb_reserve(skb, nn->rx_offset); skb_reserve(skb, nn->rx_offset);
}
/* Adjust the SKB for the dynamic meta data pre-pended */
skb_put(skb, data_len - meta_len); skb_put(skb, data_len - meta_len);
nfp_net_set_hash(nn->netdev, skb, rxd); nfp_net_set_hash(nn->netdev, skb, rxd);
...@@ -2094,8 +2216,7 @@ static void nfp_net_set_rx_mode(struct net_device *netdev) ...@@ -2094,8 +2216,7 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
return; return;
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl); nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
if (nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN)) nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
return;
nn->ctrl = new_ctrl; nn->ctrl = new_ctrl;
} }
...@@ -2403,7 +2524,7 @@ static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port) ...@@ -2403,7 +2524,7 @@ static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 | be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 |
be16_to_cpu(nn->vxlan_ports[i])); be16_to_cpu(nn->vxlan_ports[i]));
nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VXLAN); nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN);
} }
/** /**
...@@ -2549,6 +2670,9 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev, ...@@ -2549,6 +2670,9 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
spin_lock_init(&nn->reconfig_lock); spin_lock_init(&nn->reconfig_lock);
spin_lock_init(&nn->link_status_lock); spin_lock_init(&nn->link_status_lock);
setup_timer(&nn->reconfig_timer,
nfp_net_reconfig_timer, (unsigned long)nn);
return nn; return nn;
} }
......
...@@ -81,14 +81,10 @@ ...@@ -81,14 +81,10 @@
/** /**
* @NFP_NET_TXR_MAX: Maximum number of TX rings * @NFP_NET_TXR_MAX: Maximum number of TX rings
* @NFP_NET_TXR_MASK: Mask for TX rings
* @NFP_NET_RXR_MAX: Maximum number of RX rings * @NFP_NET_RXR_MAX: Maximum number of RX rings
* @NFP_NET_RXR_MASK: Mask for RX rings
*/ */
#define NFP_NET_TXR_MAX 64 #define NFP_NET_TXR_MAX 64
#define NFP_NET_TXR_MASK (NFP_NET_TXR_MAX - 1)
#define NFP_NET_RXR_MAX 64 #define NFP_NET_RXR_MAX 64
#define NFP_NET_RXR_MASK (NFP_NET_RXR_MAX - 1)
/** /**
* Read/Write config words (0x0000 - 0x002c) * Read/Write config words (0x0000 - 0x002c)
...@@ -152,9 +148,9 @@ ...@@ -152,9 +148,9 @@
* @NFP_NET_CFG_VERSION: Firmware version number * @NFP_NET_CFG_VERSION: Firmware version number
* @NFP_NET_CFG_STS: Status * @NFP_NET_CFG_STS: Status
* @NFP_NET_CFG_CAP: Capabilities (same bits as @NFP_NET_CFG_CTRL) * @NFP_NET_CFG_CAP: Capabilities (same bits as @NFP_NET_CFG_CTRL)
* @NFP_NET_MAX_TXRINGS: Maximum number of TX rings * @NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings
* @NFP_NET_MAX_RXRINGS: Maximum number of RX rings * @NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings
* @NFP_NET_MAX_MTU: Maximum support MTU * @NFP_NET_CFG_MAX_MTU: Maximum support MTU
* @NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only) * @NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only)
* @NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only) * @NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only)
* *
......
...@@ -187,7 +187,7 @@ static const struct file_operations nfp_tx_q_fops = { ...@@ -187,7 +187,7 @@ static const struct file_operations nfp_tx_q_fops = {
void nfp_net_debugfs_adapter_add(struct nfp_net *nn) void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
{ {
static struct dentry *queues, *tx, *rx; struct dentry *queues, *tx, *rx;
char int_name[16]; char int_name[16];
int i; int i;
...@@ -200,7 +200,7 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn) ...@@ -200,7 +200,7 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
/* Create queue debugging sub-tree */ /* Create queue debugging sub-tree */
queues = debugfs_create_dir("queue", nn->debugfs_dir); queues = debugfs_create_dir("queue", nn->debugfs_dir);
if (IS_ERR_OR_NULL(nn->debugfs_dir)) if (IS_ERR_OR_NULL(queues))
return; return;
rx = debugfs_create_dir("rx", queues); rx = debugfs_create_dir("rx", queues);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment