Commit 74630534 authored by David S. Miller's avatar David S. Miller

Merge branch 'thunderx-irq-hints'

Sunil Goutham says:

====================
net: thunderx: Setting IRQ affinity hints and other optimizations

This patch series contains changes
- To add support for virtual function's irq affinity hint
- Replace napi_schedule() with napi_schedule_irqoff()
- Reduce page allocation overhead by allocating pages
  of higher order when pagesize is 4KB.
- Add couple of stats which helps in debugging
- Some miscellaneous changes to BGX driver.

Changes from v1:
- As suggested changed MAC address invalid log message
  to dev_err() instead of dev_warn().
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 65411adb 6e4be8d6
......@@ -248,10 +248,13 @@ struct nicvf_drv_stats {
u64 rx_frames_jumbo;
u64 rx_drops;
u64 rcv_buffer_alloc_failures;
/* Tx */
u64 tx_frames_ok;
u64 tx_drops;
u64 tx_tso;
u64 tx_timeout;
u64 txq_stop;
u64 txq_wake;
};
......@@ -306,6 +309,7 @@ struct nicvf {
struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS];
char irq_name[NIC_VF_MSIX_VECTORS][20];
bool irq_allocated[NIC_VF_MSIX_VECTORS];
cpumask_var_t affinity_mask[NIC_VF_MSIX_VECTORS];
/* VF <-> PF mailbox communication */
bool pf_acked;
......
......@@ -89,9 +89,11 @@ static const struct nicvf_stat nicvf_drv_stats[] = {
NICVF_DRV_STAT(rx_frames_1518),
NICVF_DRV_STAT(rx_frames_jumbo),
NICVF_DRV_STAT(rx_drops),
NICVF_DRV_STAT(rcv_buffer_alloc_failures),
NICVF_DRV_STAT(tx_frames_ok),
NICVF_DRV_STAT(tx_tso),
NICVF_DRV_STAT(tx_drops),
NICVF_DRV_STAT(tx_timeout),
NICVF_DRV_STAT(txq_stop),
NICVF_DRV_STAT(txq_wake),
};
......
......@@ -828,7 +828,7 @@ static irqreturn_t nicvf_intr_handler(int irq, void *cq_irq)
nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
/* Schedule NAPI */
napi_schedule(&cq_poll->napi);
napi_schedule_irqoff(&cq_poll->napi);
/* Clear interrupt */
nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
......@@ -899,6 +899,31 @@ static void nicvf_disable_msix(struct nicvf *nic)
}
}
static void nicvf_set_irq_affinity(struct nicvf *nic)
{
int vec, cpu;
int irqnum;
for (vec = 0; vec < nic->num_vec; vec++) {
if (!nic->irq_allocated[vec])
continue;
if (!zalloc_cpumask_var(&nic->affinity_mask[vec], GFP_KERNEL))
return;
/* CQ interrupts */
if (vec < NICVF_INTR_ID_SQ)
/* Leave CPU0 for RBDR and other interrupts */
cpu = nicvf_netdev_qidx(nic, vec) + 1;
else
cpu = 0;
cpumask_set_cpu(cpumask_local_spread(cpu, nic->node),
nic->affinity_mask[vec]);
irqnum = nic->msix_entries[vec].vector;
irq_set_affinity_hint(irqnum, nic->affinity_mask[vec]);
}
}
static int nicvf_register_interrupts(struct nicvf *nic)
{
int irq, ret = 0;
......@@ -944,8 +969,13 @@ static int nicvf_register_interrupts(struct nicvf *nic)
ret = request_irq(nic->msix_entries[irq].vector,
nicvf_qs_err_intr_handler,
0, nic->irq_name[irq], nic);
if (!ret)
nic->irq_allocated[irq] = true;
if (ret)
goto err;
nic->irq_allocated[irq] = true;
/* Set IRQ affinities */
nicvf_set_irq_affinity(nic);
err:
if (ret)
......@@ -963,6 +993,9 @@ static void nicvf_unregister_interrupts(struct nicvf *nic)
if (!nic->irq_allocated[irq])
continue;
irq_set_affinity_hint(nic->msix_entries[irq].vector, NULL);
free_cpumask_var(nic->affinity_mask[irq]);
if (irq < NICVF_INTR_ID_SQ)
free_irq(nic->msix_entries[irq].vector, nic->napi[irq]);
else
......@@ -1394,6 +1427,7 @@ static void nicvf_tx_timeout(struct net_device *dev)
netdev_warn(dev, "%s: Transmit timed out, resetting\n",
dev->name);
nic->drv_stats.tx_timeout++;
schedule_work(&nic->reset_task);
}
......
......@@ -78,7 +78,7 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
u32 buf_len, u64 **rbuf)
{
int order = get_order(buf_len);
int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0;
/* Check if request can be accomodated in previous allocated page */
if (nic->rb_page) {
......@@ -96,8 +96,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
order);
if (!nic->rb_page) {
netdev_err(nic->netdev,
"Failed to allocate new rcv buffer\n");
nic->drv_stats.rcv_buffer_alloc_failures++;
return -ENOMEM;
}
nic->rb_page_offset = 0;
......
......@@ -886,7 +886,8 @@ static void bgx_get_qlm_mode(struct bgx *bgx)
#ifdef CONFIG_ACPI
static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst)
static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev,
u8 *dst)
{
u8 mac[ETH_ALEN];
int ret;
......@@ -897,10 +898,13 @@ static int acpi_get_mac_address(struct acpi_device *adev, u8 *dst)
goto out;
if (!is_valid_ether_addr(mac)) {
dev_err(dev, "MAC address invalid: %pM\n", mac);
ret = -EINVAL;
goto out;
}
dev_info(dev, "MAC address set to: %pM\n", mac);
memcpy(dst, mac, ETH_ALEN);
out:
return ret;
......@@ -911,14 +915,15 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
u32 lvl, void *context, void **rv)
{
struct bgx *bgx = context;
struct device *dev = &bgx->pdev->dev;
struct acpi_device *adev;
if (acpi_bus_get_device(handle, &adev))
goto out;
acpi_get_mac_address(adev, bgx->lmac[bgx->lmac_count].mac);
acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac);
SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, &bgx->pdev->dev);
SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev);
bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
out:
......@@ -968,26 +973,27 @@ static int bgx_init_acpi_phy(struct bgx *bgx)
static int bgx_init_of_phy(struct bgx *bgx)
{
struct device_node *np;
struct device_node *np_child;
struct fwnode_handle *fwn;
u8 lmac = 0;
char bgx_sel[5];
const char *mac;
/* Get BGX node from DT */
snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
np = of_find_node_by_name(NULL, bgx_sel);
if (!np)
return -ENODEV;
device_for_each_child_node(&bgx->pdev->dev, fwn) {
struct device_node *phy_np;
struct device_node *node = to_of_node(fwn);
for_each_child_of_node(np, np_child) {
struct device_node *phy_np = of_parse_phandle(np_child,
"phy-handle", 0);
/* If it is not an OF node we cannot handle it yet, so
* exit the loop.
*/
if (!node)
break;
phy_np = of_parse_phandle(node, "phy-handle", 0);
if (!phy_np)
continue;
bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
mac = of_get_mac_address(np_child);
mac = of_get_mac_address(node);
if (mac)
ether_addr_copy(bgx->lmac[lmac].mac, mac);
......@@ -995,7 +1001,7 @@ static int bgx_init_of_phy(struct bgx *bgx)
bgx->lmac[lmac].lmacid = lmac;
lmac++;
if (lmac == MAX_LMAC_PER_BGX) {
of_node_put(np_child);
of_node_put(node);
break;
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment