Commit 74630534 authored by David S. Miller's avatar David S. Miller

Merge branch 'thunderx-irq-hints'

Sunil Goutham says:

====================
net: thunderx: Setting IRQ affinity hints and other optimizations

This patch series contains changes
- To add support for virtual function's irq affinity hint
- Replace napi_schedule() with napi_schedule_irqoff()
- Reduce page allocation overhead by allocating pages
  of higher order when pagesize is 4KB.
- Add couple of stats which helps in debugging
- Some miscellaneous changes to BGX driver.

Changes from v1:
- As suggested changed MAC address invalid log message
  to dev_err() instead of dev_warn().
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 65411adb 6e4be8d6
...@@ -248,10 +248,13 @@ struct nicvf_drv_stats { ...@@ -248,10 +248,13 @@ struct nicvf_drv_stats {
u64 rx_frames_jumbo; u64 rx_frames_jumbo;
u64 rx_drops; u64 rx_drops;
u64 rcv_buffer_alloc_failures;
/* Tx */ /* Tx */
u64 tx_frames_ok; u64 tx_frames_ok;
u64 tx_drops; u64 tx_drops;
u64 tx_tso; u64 tx_tso;
u64 tx_timeout;
u64 txq_stop; u64 txq_stop;
u64 txq_wake; u64 txq_wake;
}; };
...@@ -306,6 +309,7 @@ struct nicvf { ...@@ -306,6 +309,7 @@ struct nicvf {
struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS]; struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS];
char irq_name[NIC_VF_MSIX_VECTORS][20]; char irq_name[NIC_VF_MSIX_VECTORS][20];
bool irq_allocated[NIC_VF_MSIX_VECTORS]; bool irq_allocated[NIC_VF_MSIX_VECTORS];
cpumask_var_t affinity_mask[NIC_VF_MSIX_VECTORS];
/* VF <-> PF mailbox communication */ /* VF <-> PF mailbox communication */
bool pf_acked; bool pf_acked;
......
...@@ -89,9 +89,11 @@ static const struct nicvf_stat nicvf_drv_stats[] = { ...@@ -89,9 +89,11 @@ static const struct nicvf_stat nicvf_drv_stats[] = {
NICVF_DRV_STAT(rx_frames_1518), NICVF_DRV_STAT(rx_frames_1518),
NICVF_DRV_STAT(rx_frames_jumbo), NICVF_DRV_STAT(rx_frames_jumbo),
NICVF_DRV_STAT(rx_drops), NICVF_DRV_STAT(rx_drops),
NICVF_DRV_STAT(rcv_buffer_alloc_failures),
NICVF_DRV_STAT(tx_frames_ok), NICVF_DRV_STAT(tx_frames_ok),
NICVF_DRV_STAT(tx_tso), NICVF_DRV_STAT(tx_tso),
NICVF_DRV_STAT(tx_drops), NICVF_DRV_STAT(tx_drops),
NICVF_DRV_STAT(tx_timeout),
NICVF_DRV_STAT(txq_stop), NICVF_DRV_STAT(txq_stop),
NICVF_DRV_STAT(txq_wake), NICVF_DRV_STAT(txq_wake),
}; };
......
...@@ -828,7 +828,7 @@ static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq) ...@@ -828,7 +828,7 @@ static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq)
nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
/* Schedule NAPI */ /* Schedule NAPI */
napi_schedule(&cq_poll->napi); napi_schedule_irqoff(&cq_poll->napi);
/* Clear interrupt */ /* Clear interrupt */
nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx); nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
...@@ -899,6 +899,31 @@ static void nicvf_disable_msix(struct nicvf *nic) ...@@ -899,6 +899,31 @@ static void nicvf_disable_msix(struct nicvf *nic)
} }
} }
static void nicvf_set_irq_affinity(struct nicvf *nic)
{
int vec, cpu;
int irqnum;
for (vec = 0; vec < nic->num_vec; vec++) {
if (!nic->irq_allocated[vec])
continue;
if (!zalloc_cpumask_var(&nic->affinity_mask[vec], GFP_KERNEL))
return;
/* CQ interrupts */
if (vec < NICVF_INTR_ID_SQ)
/* Leave CPU0 for RBDR and other interrupts */
cpu = nicvf_netdev_qidx(nic, vec) + 1;
else
cpu = 0;
cpumask_set_cpu(cpumask_local_spread(cpu, nic->node),
nic->affinity_mask[vec]);
irqnum = nic->msix_entries[vec].vector;
irq_set_affinity_hint(irqnum, nic->affinity_mask[vec]);
}
}
static int nicvf_register_interrupts(struct nicvf *nic) static int nicvf_register_interrupts(struct nicvf *nic)
{ {
int irq, ret = 0; int irq, ret = 0;
...@@ -944,9 +969,14 @@ static int nicvf_register_interrupts(struct nicvf *nic) ...@@ -944,9 +969,14 @@ static int nicvf_register_interrupts(struct nicvf *nic)
ret = request_irq(nic->msix_entries[irq].vector, ret = request_irq(nic->msix_entries[irq].vector,
nicvf_qs_err_intr_handler, nicvf_qs_err_intr_handler,
0, nic->irq_name[irq], nic); 0, nic->irq_name[irq], nic);
if (!ret) if (ret)
goto err;
nic->irq_allocated[irq] = true; nic->irq_allocated[irq] = true;
/* Set IRQ affinities */
nicvf_set_irq_affinity(nic);
err: err:
if (ret) if (ret)
netdev_err(nic->netdev, "request_irq failed, vector %d\n", irq); netdev_err(nic->netdev, "request_irq failed, vector %d\n", irq);
...@@ -963,6 +993,9 @@ static void nicvf_unregister_interrupts(struct nicvf *nic) ...@@ -963,6 +993,9 @@ static void nicvf_unregister_interrupts(struct nicvf *nic)
if (!nic->irq_allocated[irq]) if (!nic->irq_allocated[irq])
continue; continue;
irq_set_affinity_hint(nic->msix_entries[irq].vector, NULL);
free_cpumask_var(nic->affinity_mask[irq]);
if (irq < NICVF_INTR_ID_SQ) if (irq < NICVF_INTR_ID_SQ)
free_irq(nic->msix_entries[irq].vector, nic->napi[irq]); free_irq(nic->msix_entries[irq].vector, nic->napi[irq]);
else else
...@@ -1394,6 +1427,7 @@ static void nicvf_tx_timeout(struct net_device *dev) ...@@ -1394,6 +1427,7 @@ static void nicvf_tx_timeout(struct net_device *dev)
netdev_warn(dev, "%s: Transmit timed out, resetting\n", netdev_warn(dev, "%s: Transmit timed out, resetting\n",
dev->name); dev->name);
nic->drv_stats.tx_timeout++;
schedule_work(&nic->reset_task); schedule_work(&nic->reset_task);
} }
......
...@@ -78,7 +78,7 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) ...@@ -78,7 +78,7 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
u32 buf_len, u64 **rbuf) u32 buf_len, u64 **rbuf)
{ {
int order = get_order(buf_len); int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0;
/* Check if request can be accomodated in previous allocated page */ /* Check if request can be accomodated in previous allocated page */
if (nic->rb_page) { if (nic->rb_page) {
...@@ -96,8 +96,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, ...@@ -96,8 +96,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
order); order);
if (!nic->rb_page) { if (!nic->rb_page) {
netdev_err(nic->netdev, nic->drv_stats.rcv_buffer_alloc_failures++;
"Failed to allocate new rcv buffer\n");
return -ENOMEM; return -ENOMEM;
} }
nic->rb_page_offset = 0; nic->rb_page_offset = 0;
......
...@@ -886,7 +886,8 @@ static void bgx_get_qlm_mode(struct bgx *bgx) ...@@ -886,7 +886,8 @@ static void bgx_get_qlm_mode(struct bgx *bgx)
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst) static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev,
u8 *dst)
{ {
u8 mac[ETH_ALEN]; u8 mac[ETH_ALEN];
int ret; int ret;
...@@ -897,10 +898,13 @@ static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst) ...@@ -897,10 +898,13 @@ static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst)
goto out; goto out;
if (!is_valid_ether_addr(mac)) { if (!is_valid_ether_addr(mac)) {
dev_err(dev, "MAC address invalid: %pM\n", mac);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
dev_info(dev, "MAC address set to: %pM\n", mac);
memcpy(dst, mac, ETH_ALEN); memcpy(dst, mac, ETH_ALEN);
out: out:
return ret; return ret;
...@@ -911,14 +915,15 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle, ...@@ -911,14 +915,15 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
u32 lvl, void *context, void **rv) u32 lvl, void *context, void **rv)
{ {
struct bgx *bgx = context; struct bgx *bgx = context;
struct device *dev = &bgx->pdev->dev;
struct acpi_device *adev; struct acpi_device *adev;
if (acpi_bus_get_device(handle, &adev)) if (acpi_bus_get_device(handle, &adev))
goto out; goto out;
acpi_get_mac_address(adev, bgx->lmac[bgx->lmac_count].mac); acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac);
SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, &bgx->pdev->dev); SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev);
bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count; bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
out: out:
...@@ -968,26 +973,27 @@ static int bgx_init_acpi_phy(struct bgx *bgx) ...@@ -968,26 +973,27 @@ static int bgx_init_acpi_phy(struct bgx *bgx)
static int bgx_init_of_phy(struct bgx *bgx) static int bgx_init_of_phy(struct bgx *bgx)
{ {
struct device_node *np; struct fwnode_handle *fwn;
struct device_node *np_child;
u8 lmac = 0; u8 lmac = 0;
char bgx_sel[5];
const char *mac; const char *mac;
/* Get BGX node from DT */ device_for_each_child_node(&bgx->pdev->dev, fwn) {
snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id); struct device_node *phy_np;
np = of_find_node_by_name(NULL, bgx_sel); struct device_node *node = to_of_node(fwn);
if (!np)
return -ENODEV;
for_each_child_of_node(np, np_child) { /* If it is not an OF node we cannot handle it yet, so
struct device_node *phy_np = of_parse_phandle(np_child, * exit the loop.
"phy-handle", 0); */
if (!node)
break;
phy_np = of_parse_phandle(node, "phy-handle", 0);
if (!phy_np) if (!phy_np)
continue; continue;
bgx->lmac[lmac].phydev = of_phy_find_device(phy_np); bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
mac = of_get_mac_address(np_child); mac = of_get_mac_address(node);
if (mac) if (mac)
ether_addr_copy(bgx->lmac[lmac].mac, mac); ether_addr_copy(bgx->lmac[lmac].mac, mac);
...@@ -995,7 +1001,7 @@ static int bgx_init_of_phy(struct bgx *bgx) ...@@ -995,7 +1001,7 @@ static int bgx_init_of_phy(struct bgx *bgx)
bgx->lmac[lmac].lmacid = lmac; bgx->lmac[lmac].lmacid = lmac;
lmac++; lmac++;
if (lmac == MAX_LMAC_PER_BGX) { if (lmac == MAX_LMAC_PER_BGX) {
of_node_put(np_child); of_node_put(node);
break; break;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment