Commit 515e752d authored by Raghu Vatsavayi's avatar Raghu Vatsavayi Committed by David S. Miller

liquidio CN23XX: device states

Cleaned up resource leaks during destroy resources by
introducing more device states.
Signed-off-by: default avatarRaghu Vatsavayi <raghu.vatsavayi@caviumnetworks.com>
Signed-off-by: default avatarDerek Chickles <derek.chickles@caviumnetworks.com>
Signed-off-by: default avatarSatanand Burla <satananda.burla@caviumnetworks.com>
Signed-off-by: default avatarFelix Manlunas <felix.manlunas@caviumnetworks.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 86dea55b
...@@ -770,6 +770,7 @@ static void delete_glists(struct lio *lio) ...@@ -770,6 +770,7 @@ static void delete_glists(struct lio *lio)
} }
kfree((void *)lio->glist); kfree((void *)lio->glist);
kfree((void *)lio->glist_lock);
} }
/** /**
...@@ -1329,6 +1330,7 @@ liquidio_probe(struct pci_dev *pdev, ...@@ -1329,6 +1330,7 @@ liquidio_probe(struct pci_dev *pdev,
complete(&first_stage); complete(&first_stage);
if (octeon_device_init(oct_dev)) { if (octeon_device_init(oct_dev)) {
complete(&hs->init);
liquidio_remove(pdev); liquidio_remove(pdev);
return -ENOMEM; return -ENOMEM;
} }
...@@ -1353,7 +1355,15 @@ liquidio_probe(struct pci_dev *pdev, ...@@ -1353,7 +1355,15 @@ liquidio_probe(struct pci_dev *pdev,
oct_dev->watchdog_task = kthread_create( oct_dev->watchdog_task = kthread_create(
liquidio_watchdog, oct_dev, liquidio_watchdog, oct_dev,
"liowd/%02hhx:%02hhx.%hhx", bus, device, function); "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
wake_up_process(oct_dev->watchdog_task); if (!IS_ERR(oct_dev->watchdog_task)) {
wake_up_process(oct_dev->watchdog_task);
} else {
oct_dev->watchdog_task = NULL;
dev_err(&oct_dev->pci_dev->dev,
"failed to create kernel_thread\n");
liquidio_remove(pdev);
return -1;
}
} }
} }
...@@ -1417,6 +1427,8 @@ static void octeon_destroy_resources(struct octeon_device *oct) ...@@ -1417,6 +1427,8 @@ static void octeon_destroy_resources(struct octeon_device *oct)
if (lio_wait_for_oq_pkts(oct)) if (lio_wait_for_oq_pkts(oct))
dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
/* fallthrough */
case OCT_DEV_INTR_SET_DONE:
/* Disable interrupts */ /* Disable interrupts */
oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
...@@ -1443,6 +1455,8 @@ static void octeon_destroy_resources(struct octeon_device *oct) ...@@ -1443,6 +1455,8 @@ static void octeon_destroy_resources(struct octeon_device *oct)
pci_disable_msi(oct->pci_dev); pci_disable_msi(oct->pci_dev);
} }
/* fallthrough */
case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
if (OCTEON_CN23XX_PF(oct)) if (OCTEON_CN23XX_PF(oct))
octeon_free_ioq_vector(oct); octeon_free_ioq_vector(oct);
...@@ -1508,10 +1522,13 @@ static void octeon_destroy_resources(struct octeon_device *oct) ...@@ -1508,10 +1522,13 @@ static void octeon_destroy_resources(struct octeon_device *oct)
octeon_unmap_pci_barx(oct, 1); octeon_unmap_pci_barx(oct, 1);
/* fallthrough */ /* fallthrough */
case OCT_DEV_BEGIN_STATE: case OCT_DEV_PCI_ENABLE_DONE:
pci_clear_master(oct->pci_dev);
/* Disable the device, releasing the PCI INT */ /* Disable the device, releasing the PCI INT */
pci_disable_device(oct->pci_dev); pci_disable_device(oct->pci_dev);
/* fallthrough */
case OCT_DEV_BEGIN_STATE:
/* Nothing to be done here either */ /* Nothing to be done here either */
break; break;
} /* end switch (oct->status) */ } /* end switch (oct->status) */
...@@ -1781,6 +1798,7 @@ static int octeon_pci_os_setup(struct octeon_device *oct) ...@@ -1781,6 +1798,7 @@ static int octeon_pci_os_setup(struct octeon_device *oct)
if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) { if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n"); dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
pci_disable_device(oct->pci_dev);
return 1; return 1;
} }
...@@ -4434,6 +4452,8 @@ static int octeon_device_init(struct octeon_device *octeon_dev) ...@@ -4434,6 +4452,8 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
if (octeon_pci_os_setup(octeon_dev)) if (octeon_pci_os_setup(octeon_dev))
return 1; return 1;
atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
/* Identify the Octeon type and map the BAR address space. */ /* Identify the Octeon type and map the BAR address space. */
if (octeon_chip_specific_setup(octeon_dev)) { if (octeon_chip_specific_setup(octeon_dev)) {
dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n"); dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
...@@ -4505,9 +4525,6 @@ static int octeon_device_init(struct octeon_device *octeon_dev) ...@@ -4505,9 +4525,6 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
if (octeon_setup_instr_queues(octeon_dev)) { if (octeon_setup_instr_queues(octeon_dev)) {
dev_err(&octeon_dev->pci_dev->dev, dev_err(&octeon_dev->pci_dev->dev,
"instruction queue initialization failed\n"); "instruction queue initialization failed\n");
/* On error, release any previously allocated queues */
for (j = 0; j < octeon_dev->num_iqs; j++)
octeon_delete_instr_queue(octeon_dev, j);
return 1; return 1;
} }
atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE); atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
...@@ -4523,9 +4540,6 @@ static int octeon_device_init(struct octeon_device *octeon_dev) ...@@ -4523,9 +4540,6 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
if (octeon_setup_output_queues(octeon_dev)) { if (octeon_setup_output_queues(octeon_dev)) {
dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n"); dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
/* Release any previously allocated queues */
for (j = 0; j < octeon_dev->num_oqs; j++)
octeon_delete_droq(octeon_dev, j);
return 1; return 1;
} }
...@@ -4542,6 +4556,7 @@ static int octeon_device_init(struct octeon_device *octeon_dev) ...@@ -4542,6 +4556,7 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n"); dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
return 1; return 1;
} }
atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
} else { } else {
/* The input and output queue registers were setup earlier (the /* The input and output queue registers were setup earlier (the
...@@ -4569,6 +4584,8 @@ static int octeon_device_init(struct octeon_device *octeon_dev) ...@@ -4569,6 +4584,8 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
/* Enable Octeon device interrupts */ /* Enable Octeon device interrupts */
octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR); octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
/* Enable the input and output queues for this Octeon device */ /* Enable the input and output queues for this Octeon device */
ret = octeon_dev->fn_list.enable_io_queues(octeon_dev); ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
if (ret) { if (ret) {
......
...@@ -822,6 +822,7 @@ int octeon_setup_instr_queues(struct octeon_device *oct) ...@@ -822,6 +822,7 @@ int octeon_setup_instr_queues(struct octeon_device *oct)
if (octeon_init_instr_queue(oct, txpciq, num_descs)) { if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
/* prevent memory leak */ /* prevent memory leak */
vfree(oct->instr_queue[0]); vfree(oct->instr_queue[0]);
oct->instr_queue[0] = NULL;
return 1; return 1;
} }
...@@ -854,8 +855,11 @@ int octeon_setup_output_queues(struct octeon_device *oct) ...@@ -854,8 +855,11 @@ int octeon_setup_output_queues(struct octeon_device *oct)
if (!oct->droq[0]) if (!oct->droq[0])
return 1; return 1;
if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL)) if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL)) {
vfree(oct->droq[oq_no]);
oct->droq[oq_no] = NULL;
return 1; return 1;
}
oct->num_oqs++; oct->num_oqs++;
return 0; return 0;
......
...@@ -72,20 +72,23 @@ enum octeon_pci_swap_mode { ...@@ -72,20 +72,23 @@ enum octeon_pci_swap_mode {
* as it is initialized. * as it is initialized.
*/ */
#define OCT_DEV_BEGIN_STATE 0x0 #define OCT_DEV_BEGIN_STATE 0x0
#define OCT_DEV_PCI_MAP_DONE 0x1 #define OCT_DEV_PCI_ENABLE_DONE 0x1
#define OCT_DEV_DISPATCH_INIT_DONE 0x2 #define OCT_DEV_PCI_MAP_DONE 0x2
#define OCT_DEV_INSTR_QUEUE_INIT_DONE 0x3 #define OCT_DEV_DISPATCH_INIT_DONE 0x3
#define OCT_DEV_SC_BUFF_POOL_INIT_DONE 0x4 #define OCT_DEV_INSTR_QUEUE_INIT_DONE 0x4
#define OCT_DEV_RESP_LIST_INIT_DONE 0x5 #define OCT_DEV_SC_BUFF_POOL_INIT_DONE 0x5
#define OCT_DEV_DROQ_INIT_DONE 0x6 #define OCT_DEV_RESP_LIST_INIT_DONE 0x6
#define OCT_DEV_DROQ_INIT_DONE 0x7
#define OCT_DEV_MBOX_SETUP_DONE 0x8 #define OCT_DEV_MBOX_SETUP_DONE 0x8
#define OCT_DEV_IO_QUEUES_DONE 0x9 #define OCT_DEV_MSIX_ALLOC_VECTOR_DONE 0x9
#define OCT_DEV_CONSOLE_INIT_DONE 0xa #define OCT_DEV_INTR_SET_DONE 0xa
#define OCT_DEV_HOST_OK 0xb #define OCT_DEV_IO_QUEUES_DONE 0xb
#define OCT_DEV_CORE_OK 0xc #define OCT_DEV_CONSOLE_INIT_DONE 0xc
#define OCT_DEV_RUNNING 0xd #define OCT_DEV_HOST_OK 0xd
#define OCT_DEV_IN_RESET 0xe #define OCT_DEV_CORE_OK 0xe
#define OCT_DEV_STATE_INVALID 0xf #define OCT_DEV_RUNNING 0xf
#define OCT_DEV_IN_RESET 0x10
#define OCT_DEV_STATE_INVALID 0x11
#define OCT_DEV_STATES OCT_DEV_STATE_INVALID #define OCT_DEV_STATES OCT_DEV_STATE_INVALID
......
...@@ -988,7 +988,8 @@ int octeon_create_droq(struct octeon_device *oct, ...@@ -988,7 +988,8 @@ int octeon_create_droq(struct octeon_device *oct,
if (!droq) if (!droq)
droq = vmalloc(sizeof(*droq)); droq = vmalloc(sizeof(*droq));
if (!droq) if (!droq)
goto create_droq_fail; return -1;
memset(droq, 0, sizeof(struct octeon_droq)); memset(droq, 0, sizeof(struct octeon_droq));
/*Disable the pkt o/p for this Q */ /*Disable the pkt o/p for this Q */
...@@ -996,7 +997,11 @@ int octeon_create_droq(struct octeon_device *oct, ...@@ -996,7 +997,11 @@ int octeon_create_droq(struct octeon_device *oct,
oct->droq[q_no] = droq; oct->droq[q_no] = droq;
/* Initialize the Droq */ /* Initialize the Droq */
octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx); if (octeon_init_droq(oct, q_no, num_descs, desc_size, app_ctx)) {
vfree(oct->droq[q_no]);
oct->droq[q_no] = NULL;
return -1;
}
oct->num_oqs++; oct->num_oqs++;
...@@ -1009,8 +1014,4 @@ int octeon_create_droq(struct octeon_device *oct, ...@@ -1009,8 +1014,4 @@ int octeon_create_droq(struct octeon_device *oct,
* the same time. * the same time.
*/ */
return 0; return 0;
create_droq_fail:
octeon_delete_droq(oct, q_no);
return -ENOMEM;
} }
...@@ -116,7 +116,7 @@ static inline int octeon_map_pci_barx(struct octeon_device *oct, ...@@ -116,7 +116,7 @@ static inline int octeon_map_pci_barx(struct octeon_device *oct,
mapped_len = oct->mmio[baridx].len; mapped_len = oct->mmio[baridx].len;
if (!mapped_len) if (!mapped_len)
return 1; goto err_release_region;
if (max_map_len && (mapped_len > max_map_len)) if (max_map_len && (mapped_len > max_map_len))
mapped_len = max_map_len; mapped_len = max_map_len;
...@@ -132,11 +132,15 @@ static inline int octeon_map_pci_barx(struct octeon_device *oct, ...@@ -132,11 +132,15 @@ static inline int octeon_map_pci_barx(struct octeon_device *oct,
if (!oct->mmio[baridx].hw_addr) { if (!oct->mmio[baridx].hw_addr) {
dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n", dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n",
baridx); baridx);
return 1; goto err_release_region;
} }
oct->mmio[baridx].done = 1; oct->mmio[baridx].done = 1;
return 0; return 0;
err_release_region:
pci_release_region(oct->pci_dev, baridx * 2);
return 1;
} }
static inline void * static inline void *
......
...@@ -157,6 +157,8 @@ int octeon_init_instr_queue(struct octeon_device *oct, ...@@ -157,6 +157,8 @@ int octeon_init_instr_queue(struct octeon_device *oct,
WQ_MEM_RECLAIM, WQ_MEM_RECLAIM,
0); 0);
if (!oct->check_db_wq[iq_no].wq) { if (!oct->check_db_wq[iq_no].wq) {
vfree(iq->request_list);
iq->request_list = NULL;
lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma); lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n", dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
iq_no); iq_no);
...@@ -749,8 +751,10 @@ int octeon_setup_sc_buffer_pool(struct octeon_device *oct) ...@@ -749,8 +751,10 @@ int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
lio_dma_alloc(oct, lio_dma_alloc(oct,
SOFT_COMMAND_BUFFER_SIZE, SOFT_COMMAND_BUFFER_SIZE,
(dma_addr_t *)&dma_addr); (dma_addr_t *)&dma_addr);
if (!sc) if (!sc) {
octeon_free_sc_buffer_pool(oct);
return 1; return 1;
}
sc->dma_addr = dma_addr; sc->dma_addr = dma_addr;
sc->size = SOFT_COMMAND_BUFFER_SIZE; sc->size = SOFT_COMMAND_BUFFER_SIZE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment