Commit 96ae48b7 authored by Raghu Vatsavayi's avatar Raghu Vatsavayi Committed by David S. Miller

liquidio:RX queue alloc changes

This patch is to allocate rx queue's memory based on numa node and also use
page based buffers for rx traffic improvements.
Signed-off-by: default avatarDerek Chickles <derek.chickles@caviumnetworks.com>
Signed-off-by: default avatarSatanand Burla <satananda.burla@caviumnetworks.com>
Signed-off-by: default avatarFelix Manlunas <felix.manlunas@caviumnetworks.com>
Signed-off-by: default avatarRaghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent fcd2b5e3
...@@ -783,14 +783,15 @@ int octeon_setup_instr_queues(struct octeon_device *oct) ...@@ -783,14 +783,15 @@ int octeon_setup_instr_queues(struct octeon_device *oct)
int octeon_setup_output_queues(struct octeon_device *oct) int octeon_setup_output_queues(struct octeon_device *oct)
{ {
u32 i, num_oqs = 0; u32 num_oqs = 0;
u32 num_descs = 0; u32 num_descs = 0;
u32 desc_size = 0; u32 desc_size = 0;
u32 oq_no = 0;
int numa_node = cpu_to_node(oq_no % num_online_cpus());
num_oqs = 1;
/* this causes queue 0 to be default queue */ /* this causes queue 0 to be default queue */
if (OCTEON_CN6XXX(oct)) { if (OCTEON_CN6XXX(oct)) {
/* CFG_GET_OQ_MAX_BASE_Q(CHIP_FIELD(oct, cn6xxx, conf)); */
num_oqs = 1;
num_descs = num_descs =
CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf)); CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
desc_size = desc_size =
...@@ -798,19 +799,15 @@ int octeon_setup_output_queues(struct octeon_device *oct) ...@@ -798,19 +799,15 @@ int octeon_setup_output_queues(struct octeon_device *oct)
} }
oct->num_oqs = 0; oct->num_oqs = 0;
oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node);
if (!oct->droq[0])
oct->droq[0] = vmalloc(sizeof(*oct->droq[0]));
if (!oct->droq[0])
return 1;
for (i = 0; i < num_oqs; i++) { if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL))
oct->droq[i] = vmalloc(sizeof(*oct->droq[i])); return 1;
if (!oct->droq[i]) oct->num_oqs++;
return 1;
memset(oct->droq[i], 0, sizeof(struct octeon_droq));
if (octeon_init_droq(oct, i, num_descs, desc_size, NULL))
return 1;
oct->num_oqs++;
}
return 0; return 0;
} }
......
...@@ -242,6 +242,8 @@ int octeon_init_droq(struct octeon_device *oct, ...@@ -242,6 +242,8 @@ int octeon_init_droq(struct octeon_device *oct,
struct octeon_droq *droq; struct octeon_droq *droq;
u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0; u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
u32 c_pkts_per_intr = 0, c_refill_threshold = 0; u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
int orig_node = dev_to_node(&oct->pci_dev->dev);
int numa_node = cpu_to_node(q_no % num_online_cpus());
dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
...@@ -261,15 +263,23 @@ int octeon_init_droq(struct octeon_device *oct, ...@@ -261,15 +263,23 @@ int octeon_init_droq(struct octeon_device *oct,
struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf); struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x); c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x);
c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x); c_refill_threshold =
(u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
} else {
return 1;
} }
droq->max_count = c_num_descs; droq->max_count = c_num_descs;
droq->buffer_size = c_buf_size; droq->buffer_size = c_buf_size;
desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE; desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
set_dev_node(&oct->pci_dev->dev, numa_node);
droq->desc_ring = lio_dma_alloc(oct, desc_ring_size, droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
(dma_addr_t *)&droq->desc_ring_dma); (dma_addr_t *)&droq->desc_ring_dma);
set_dev_node(&oct->pci_dev->dev, orig_node);
if (!droq->desc_ring)
droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
(dma_addr_t *)&droq->desc_ring_dma);
if (!droq->desc_ring) { if (!droq->desc_ring) {
dev_err(&oct->pci_dev->dev, dev_err(&oct->pci_dev->dev,
...@@ -283,12 +293,11 @@ int octeon_init_droq(struct octeon_device *oct, ...@@ -283,12 +293,11 @@ int octeon_init_droq(struct octeon_device *oct,
droq->max_count); droq->max_count);
droq->info_list = droq->info_list =
cnnic_alloc_aligned_dma(oct->pci_dev, cnnic_numa_alloc_aligned_dma((droq->max_count *
(droq->max_count * OCT_DROQ_INFO_SIZE), OCT_DROQ_INFO_SIZE),
&droq->info_alloc_size, &droq->info_alloc_size,
&droq->info_base_addr, &droq->info_base_addr,
&droq->info_list_dma); numa_node);
if (!droq->info_list) { if (!droq->info_list) {
dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n"); dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE), lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
...@@ -297,7 +306,12 @@ int octeon_init_droq(struct octeon_device *oct, ...@@ -297,7 +306,12 @@ int octeon_init_droq(struct octeon_device *oct,
} }
droq->recv_buf_list = (struct octeon_recv_buffer *) droq->recv_buf_list = (struct octeon_recv_buffer *)
vmalloc(droq->max_count * vmalloc_node(droq->max_count *
OCT_DROQ_RECVBUF_SIZE,
numa_node);
if (!droq->recv_buf_list)
droq->recv_buf_list = (struct octeon_recv_buffer *)
vmalloc(droq->max_count *
OCT_DROQ_RECVBUF_SIZE); OCT_DROQ_RECVBUF_SIZE);
if (!droq->recv_buf_list) { if (!droq->recv_buf_list) {
dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n"); dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n");
...@@ -949,6 +963,7 @@ int octeon_create_droq(struct octeon_device *oct, ...@@ -949,6 +963,7 @@ int octeon_create_droq(struct octeon_device *oct,
u32 desc_size, void *app_ctx) u32 desc_size, void *app_ctx)
{ {
struct octeon_droq *droq; struct octeon_droq *droq;
int numa_node = cpu_to_node(q_no % num_online_cpus());
if (oct->droq[q_no]) { if (oct->droq[q_no]) {
dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n", dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
...@@ -957,7 +972,9 @@ int octeon_create_droq(struct octeon_device *oct, ...@@ -957,7 +972,9 @@ int octeon_create_droq(struct octeon_device *oct,
} }
/* Allocate the DS for the new droq. */ /* Allocate the DS for the new droq. */
droq = vmalloc(sizeof(*droq)); droq = vmalloc_node(sizeof(*droq), numa_node);
if (!droq)
droq = vmalloc(sizeof(*droq));
if (!droq) if (!droq)
goto create_droq_fail; goto create_droq_fail;
memset(droq, 0, sizeof(struct octeon_droq)); memset(droq, 0, sizeof(struct octeon_droq));
......
...@@ -126,22 +126,27 @@ static inline int octeon_map_pci_barx(struct octeon_device *oct, ...@@ -126,22 +126,27 @@ static inline int octeon_map_pci_barx(struct octeon_device *oct,
} }
static inline void * static inline void *
cnnic_alloc_aligned_dma(struct pci_dev *pci_dev, cnnic_numa_alloc_aligned_dma(u32 size,
u32 size, u32 *alloc_size,
u32 *alloc_size, size_t *orig_ptr,
size_t *orig_ptr, int numa_node)
size_t *dma_addr __attribute__((unused)))
{ {
int retries = 0; int retries = 0;
void *ptr = NULL; void *ptr = NULL;
#define OCTEON_MAX_ALLOC_RETRIES 1 #define OCTEON_MAX_ALLOC_RETRIES 1
do { do {
ptr = struct page *page = NULL;
(void *)__get_free_pages(GFP_KERNEL,
get_order(size)); page = alloc_pages_node(numa_node,
GFP_KERNEL,
get_order(size));
if (!page)
page = alloc_pages(GFP_KERNEL,
get_order(size));
ptr = (void *)page_address(page);
if ((unsigned long)ptr & 0x07) { if ((unsigned long)ptr & 0x07) {
free_pages((unsigned long)ptr, get_order(size)); __free_pages(page, get_order(size));
ptr = NULL; ptr = NULL;
/* Increment the size required if the first /* Increment the size required if the first
* attempt failed. * attempt failed.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment