Commit 4d192660 authored by Sameeh Jubran's avatar Sameeh Jubran Committed by David S. Miller

net: ena: multiple queue creation related cleanups

- Rename ena_calc_queue_size() to ena_calc_io_queue_size() for clarity
  and consistency
- Remove redundant number of io queues parameter in functions
  ena_enable_msix() and ena_enable_msix_and_set_admin_interrupts(),
  which already get adapter parameter, so use adapter->num_io_queues
  in the function instead.
- Use the local variable ena_dev instead of ctx->ena_dev in
  ena_calc_io_queue_size
- Fix multi row comment alignments
Signed-off-by: default avatarArthur Kiyanovski <akiyano@amazon.com>
Signed-off-by: default avatarSameeh Jubran <sameehj@amazon.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent faa615f9
...@@ -1331,7 +1331,7 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data) ...@@ -1331,7 +1331,7 @@ static irqreturn_t ena_intr_msix_io(int irq, void *data)
* the number of potential io queues is the minimum of what the device * the number of potential io queues is the minimum of what the device
* supports and the number of vCPUs. * supports and the number of vCPUs.
*/ */
static int ena_enable_msix(struct ena_adapter *adapter, int num_queues) static int ena_enable_msix(struct ena_adapter *adapter)
{ {
int msix_vecs, irq_cnt; int msix_vecs, irq_cnt;
...@@ -1342,7 +1342,7 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues) ...@@ -1342,7 +1342,7 @@ static int ena_enable_msix(struct ena_adapter *adapter, int num_queues)
} }
/* Reserved the max msix vectors we might need */ /* Reserved the max msix vectors we might need */
msix_vecs = ENA_MAX_MSIX_VEC(num_queues); msix_vecs = ENA_MAX_MSIX_VEC(adapter->num_io_queues);
netif_dbg(adapter, probe, adapter->netdev, netif_dbg(adapter, probe, adapter->netdev,
"trying to enable MSI-X, vectors %d\n", msix_vecs); "trying to enable MSI-X, vectors %d\n", msix_vecs);
...@@ -2682,14 +2682,13 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev, ...@@ -2682,14 +2682,13 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct pci_dev *pdev,
return rc; return rc;
} }
static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter, static int ena_enable_msix_and_set_admin_interrupts(struct ena_adapter *adapter)
int io_vectors)
{ {
struct ena_com_dev *ena_dev = adapter->ena_dev; struct ena_com_dev *ena_dev = adapter->ena_dev;
struct device *dev = &adapter->pdev->dev; struct device *dev = &adapter->pdev->dev;
int rc; int rc;
rc = ena_enable_msix(adapter, io_vectors); rc = ena_enable_msix(adapter);
if (rc) { if (rc) {
dev_err(dev, "Can not reserve msix vectors\n"); dev_err(dev, "Can not reserve msix vectors\n");
return rc; return rc;
...@@ -2782,8 +2781,7 @@ static int ena_restore_device(struct ena_adapter *adapter) ...@@ -2782,8 +2781,7 @@ static int ena_restore_device(struct ena_adapter *adapter)
goto err_device_destroy; goto err_device_destroy;
} }
rc = ena_enable_msix_and_set_admin_interrupts(adapter, rc = ena_enable_msix_and_set_admin_interrupts(adapter);
adapter->num_io_queues);
if (rc) { if (rc) {
dev_err(&pdev->dev, "Enable MSI-X failed\n"); dev_err(&pdev->dev, "Enable MSI-X failed\n");
goto err_device_destroy; goto err_device_destroy;
...@@ -3349,7 +3347,7 @@ static void set_default_llq_configurations(struct ena_llq_configurations *llq_co ...@@ -3349,7 +3347,7 @@ static void set_default_llq_configurations(struct ena_llq_configurations *llq_co
llq_config->llq_ring_entry_size_value = 128; llq_config->llq_ring_entry_size_value = 128;
} }
static int ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx) static int ena_calc_io_queue_size(struct ena_calc_queue_size_ctx *ctx)
{ {
struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq; struct ena_admin_feature_llq_desc *llq = &ctx->get_feat_ctx->llq;
struct ena_com_dev *ena_dev = ctx->ena_dev; struct ena_com_dev *ena_dev = ctx->ena_dev;
...@@ -3358,7 +3356,7 @@ static int ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx) ...@@ -3358,7 +3356,7 @@ static int ena_calc_queue_size(struct ena_calc_queue_size_ctx *ctx)
u32 max_tx_queue_size; u32 max_tx_queue_size;
u32 max_rx_queue_size; u32 max_rx_queue_size;
if (ctx->ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) { if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
struct ena_admin_queue_ext_feature_fields *max_queue_ext = struct ena_admin_queue_ext_feature_fields *max_queue_ext =
&ctx->get_feat_ctx->max_queue_ext.max_queue_ext; &ctx->get_feat_ctx->max_queue_ext.max_queue_ext;
max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth, max_rx_queue_size = min_t(u32, max_queue_ext->max_rx_cq_depth,
...@@ -3497,25 +3495,18 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -3497,25 +3495,18 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
calc_queue_ctx.pdev = pdev; calc_queue_ctx.pdev = pdev;
/* Initial Tx and RX interrupt delay. Assumes 1 usec granularity. /* Initial Tx and RX interrupt delay. Assumes 1 usec granularity.
* Updated during device initialization with the real granularity * Updated during device initialization with the real granularity
*/ */
ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS; ena_dev->intr_moder_tx_interval = ENA_INTR_INITIAL_TX_INTERVAL_USECS;
ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS; ena_dev->intr_moder_rx_interval = ENA_INTR_INITIAL_RX_INTERVAL_USECS;
ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION; ena_dev->intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx); io_queue_num = ena_calc_io_queue_num(pdev, ena_dev, &get_feat_ctx);
rc = ena_calc_queue_size(&calc_queue_ctx); rc = ena_calc_io_queue_size(&calc_queue_ctx);
if (rc || io_queue_num <= 0) { if (rc || io_queue_num <= 0) {
rc = -EFAULT; rc = -EFAULT;
goto err_device_destroy; goto err_device_destroy;
} }
dev_info(&pdev->dev, "creating %d io queues. rx queue size: %d tx queue size. %d LLQ is %s\n",
io_queue_num,
calc_queue_ctx.rx_queue_size,
calc_queue_ctx.tx_queue_size,
(ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) ?
"ENABLED" : "DISABLED");
/* dev zeroed in init_etherdev */ /* dev zeroed in init_etherdev */
netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num); netdev = alloc_etherdev_mq(sizeof(struct ena_adapter), io_queue_num);
if (!netdev) { if (!netdev) {
...@@ -3569,7 +3560,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -3569,7 +3560,7 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
u64_stats_init(&adapter->syncp); u64_stats_init(&adapter->syncp);
rc = ena_enable_msix_and_set_admin_interrupts(adapter, io_queue_num); rc = ena_enable_msix_and_set_admin_interrupts(adapter);
if (rc) { if (rc) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"Failed to enable and set the admin interrupts\n"); "Failed to enable and set the admin interrupts\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment