Commit 25d0dbcb authored by Michal Kazior's avatar Michal Kazior Committed by Kalle Valo

ath10k: split ce initialization and allocation

Definitions by which copy engine structure are
allocated do not change so it doesn't make much
sense to re-create those structures each time
device is booted (e.g. due to firmware recovery).

This should decrease chance of memory allocation
failures.

While at it remove per_transfer_context pointer
indirection. The array has been trailing the copy
engine ringbuffer structure anyway. This also
saves pointer size worth of bytes for each copy
engine ringbuffer.
Reported-By: default avatarAvery Pennarun <apenwarr@gmail.com>
Signed-off-by: default avatarMichal Kazior <michal.kazior@tieto.com>
Signed-off-by: default avatarKalle Valo <kvalo@qca.qualcomm.com>
parent 68c03249
...@@ -840,34 +840,17 @@ void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state, ...@@ -840,34 +840,17 @@ void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
static int ath10k_ce_init_src_ring(struct ath10k *ar, static int ath10k_ce_init_src_ring(struct ath10k *ar,
unsigned int ce_id, unsigned int ce_id,
struct ath10k_ce_pipe *ce_state,
const struct ce_attr *attr) const struct ce_attr *attr)
{ {
struct ath10k_ce_ring *src_ring; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
unsigned int nentries = attr->src_nentries; struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
unsigned int ce_nbytes; struct ath10k_ce_ring *src_ring = ce_state->src_ring;
u32 ctrl_addr = ath10k_ce_base_address(ce_id); u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
dma_addr_t base_addr;
char *ptr;
nentries = roundup_pow_of_two(nentries);
if (ce_state->src_ring) {
WARN_ON(ce_state->src_ring->nentries != nentries);
return 0;
}
ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *)); nentries = roundup_pow_of_two(attr->src_nentries);
ptr = kzalloc(ce_nbytes, GFP_KERNEL);
if (ptr == NULL)
return -ENOMEM;
ce_state->src_ring = (struct ath10k_ce_ring *)ptr; memset(src_ring->per_transfer_context, 0,
src_ring = ce_state->src_ring; nentries * sizeof(*src_ring->per_transfer_context));
ptr += sizeof(struct ath10k_ce_ring);
src_ring->nentries = nentries;
src_ring->nentries_mask = nentries - 1;
src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
src_ring->sw_index &= src_ring->nentries_mask; src_ring->sw_index &= src_ring->nentries_mask;
...@@ -877,7 +860,74 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, ...@@ -877,7 +860,74 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
src_ring->write_index &= src_ring->nentries_mask; src_ring->write_index &= src_ring->nentries_mask;
src_ring->per_transfer_context = (void **)ptr; ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
src_ring->base_addr_ce_space);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ATH10K_DBG_BOOT,
"boot init ce src ring id %d entries %d base_addr %p\n",
ce_id, nentries, src_ring->base_addr_owner_space);
return 0;
}
static int ath10k_ce_init_dest_ring(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id);
nentries = roundup_pow_of_two(attr->dest_nentries);
memset(dest_ring->per_transfer_context, 0,
nentries * sizeof(*dest_ring->per_transfer_context));
dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
dest_ring->sw_index &= dest_ring->nentries_mask;
dest_ring->write_index =
ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
dest_ring->write_index &= dest_ring->nentries_mask;
ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
dest_ring->base_addr_ce_space);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ATH10K_DBG_BOOT,
"boot ce dest ring id %d entries %d base_addr %p\n",
ce_id, nentries, dest_ring->base_addr_owner_space);
return 0;
}
static struct ath10k_ce_ring *
ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
const struct ce_attr *attr)
{
struct ath10k_ce_ring *src_ring;
u32 nentries = attr->src_nentries;
dma_addr_t base_addr;
nentries = roundup_pow_of_two(nentries);
src_ring = kzalloc(sizeof(*src_ring) +
(nentries *
sizeof(*src_ring->per_transfer_context)),
GFP_KERNEL);
if (src_ring == NULL)
return ERR_PTR(-ENOMEM);
src_ring->nentries = nentries;
src_ring->nentries_mask = nentries - 1;
/* /*
* Legacy platforms that do not support cache * Legacy platforms that do not support cache
...@@ -889,9 +939,8 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, ...@@ -889,9 +939,8 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
CE_DESC_RING_ALIGN), CE_DESC_RING_ALIGN),
&base_addr, GFP_KERNEL); &base_addr, GFP_KERNEL);
if (!src_ring->base_addr_owner_space_unaligned) { if (!src_ring->base_addr_owner_space_unaligned) {
kfree(ce_state->src_ring); kfree(src_ring);
ce_state->src_ring = NULL; return ERR_PTR(-ENOMEM);
return -ENOMEM;
} }
src_ring->base_addr_ce_space_unaligned = base_addr; src_ring->base_addr_ce_space_unaligned = base_addr;
...@@ -916,69 +965,37 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, ...@@ -916,69 +965,37 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
CE_DESC_RING_ALIGN), CE_DESC_RING_ALIGN),
src_ring->base_addr_owner_space, src_ring->base_addr_owner_space,
src_ring->base_addr_ce_space); src_ring->base_addr_ce_space);
kfree(ce_state->src_ring); kfree(src_ring);
ce_state->src_ring = NULL; return ERR_PTR(-ENOMEM);
return -ENOMEM;
} }
src_ring->shadow_base = PTR_ALIGN( src_ring->shadow_base = PTR_ALIGN(
src_ring->shadow_base_unaligned, src_ring->shadow_base_unaligned,
CE_DESC_RING_ALIGN); CE_DESC_RING_ALIGN);
ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, return src_ring;
src_ring->base_addr_ce_space);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ATH10K_DBG_BOOT,
"boot ce src ring id %d entries %d base_addr %p\n",
ce_id, nentries, src_ring->base_addr_owner_space);
return 0;
} }
static int ath10k_ce_init_dest_ring(struct ath10k *ar, static struct ath10k_ce_ring *
unsigned int ce_id, ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
struct ath10k_ce_pipe *ce_state, const struct ce_attr *attr)
const struct ce_attr *attr)
{ {
struct ath10k_ce_ring *dest_ring; struct ath10k_ce_ring *dest_ring;
unsigned int nentries = attr->dest_nentries; u32 nentries;
unsigned int ce_nbytes;
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
dma_addr_t base_addr; dma_addr_t base_addr;
char *ptr;
nentries = roundup_pow_of_two(nentries); nentries = roundup_pow_of_two(attr->dest_nentries);
if (ce_state->dest_ring) { dest_ring = kzalloc(sizeof(*dest_ring) +
WARN_ON(ce_state->dest_ring->nentries != nentries); (nentries *
return 0; sizeof(*dest_ring->per_transfer_context)),
} GFP_KERNEL);
if (dest_ring == NULL)
ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *)); return ERR_PTR(-ENOMEM);
ptr = kzalloc(ce_nbytes, GFP_KERNEL);
if (ptr == NULL)
return -ENOMEM;
ce_state->dest_ring = (struct ath10k_ce_ring *)ptr;
dest_ring = ce_state->dest_ring;
ptr += sizeof(struct ath10k_ce_ring);
dest_ring->nentries = nentries; dest_ring->nentries = nentries;
dest_ring->nentries_mask = nentries - 1; dest_ring->nentries_mask = nentries - 1;
dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr);
dest_ring->sw_index &= dest_ring->nentries_mask;
dest_ring->write_index =
ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
dest_ring->write_index &= dest_ring->nentries_mask;
dest_ring->per_transfer_context = (void **)ptr;
/* /*
* Legacy platforms that do not support cache * Legacy platforms that do not support cache
* coherent DMA are unsupported * coherent DMA are unsupported
...@@ -989,9 +1006,8 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, ...@@ -989,9 +1006,8 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
CE_DESC_RING_ALIGN), CE_DESC_RING_ALIGN),
&base_addr, GFP_KERNEL); &base_addr, GFP_KERNEL);
if (!dest_ring->base_addr_owner_space_unaligned) { if (!dest_ring->base_addr_owner_space_unaligned) {
kfree(ce_state->dest_ring); kfree(dest_ring);
ce_state->dest_ring = NULL; return ERR_PTR(-ENOMEM);
return -ENOMEM;
} }
dest_ring->base_addr_ce_space_unaligned = base_addr; dest_ring->base_addr_ce_space_unaligned = base_addr;
...@@ -1010,39 +1026,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, ...@@ -1010,39 +1026,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
dest_ring->base_addr_ce_space_unaligned, dest_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN); CE_DESC_RING_ALIGN);
ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, return dest_ring;
dest_ring->base_addr_ce_space);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
ath10k_dbg(ATH10K_DBG_BOOT,
"boot ce dest ring id %d entries %d base_addr %p\n",
ce_id, nentries, dest_ring->base_addr_owner_space);
return 0;
}
static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
unsigned int ce_id,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
spin_lock_bh(&ar_pci->ce_lock);
ce_state->ar = ar;
ce_state->id = ce_id;
ce_state->ctrl_addr = ctrl_addr;
ce_state->attr_flags = attr->flags;
ce_state->src_sz_max = attr->src_sz_max;
spin_unlock_bh(&ar_pci->ce_lock);
return ce_state;
} }
/* /*
...@@ -1052,11 +1036,11 @@ static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar, ...@@ -1052,11 +1036,11 @@ static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar,
* initialization. It may be that only one side or the other is * initialization. It may be that only one side or the other is
* initialized by software/firmware. * initialized by software/firmware.
*/ */
struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar, int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
unsigned int ce_id, const struct ce_attr *attr)
const struct ce_attr *attr)
{ {
struct ath10k_ce_pipe *ce_state; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
int ret; int ret;
/* /*
...@@ -1072,44 +1056,109 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar, ...@@ -1072,44 +1056,109 @@ struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar,
ret = ath10k_pci_wake(ar); ret = ath10k_pci_wake(ar);
if (ret) if (ret)
return NULL; return ret;
ce_state = ath10k_ce_init_state(ar, ce_id, attr); spin_lock_bh(&ar_pci->ce_lock);
if (!ce_state) { ce_state->ar = ar;
ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id); ce_state->id = ce_id;
goto out; ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
} ce_state->attr_flags = attr->flags;
ce_state->src_sz_max = attr->src_sz_max;
spin_unlock_bh(&ar_pci->ce_lock);
if (attr->src_nentries) { if (attr->src_nentries) {
ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr); ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
if (ret) { if (ret) {
ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n", ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
ce_id, ret); ce_id, ret);
ath10k_ce_deinit(ce_state);
ce_state = NULL;
goto out; goto out;
} }
} }
if (attr->dest_nentries) { if (attr->dest_nentries) {
ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr); ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
if (ret) { if (ret) {
ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n", ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
ce_id, ret); ce_id, ret);
ath10k_ce_deinit(ce_state);
ce_state = NULL;
goto out; goto out;
} }
} }
out: out:
ath10k_pci_sleep(ar); ath10k_pci_sleep(ar);
return ce_state; return ret;
} }
void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state) static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
{ {
struct ath10k *ar = ce_state->ar; u32 ctrl_addr = ath10k_ce_base_address(ce_id);
ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
}
static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
{
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
}
void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
{
int ret;
ret = ath10k_pci_wake(ar);
if (ret)
return;
ath10k_ce_deinit_src_ring(ar, ce_id);
ath10k_ce_deinit_dest_ring(ar, ce_id);
ath10k_pci_sleep(ar);
}
int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
int ret;
if (attr->src_nentries) {
ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
if (IS_ERR(ce_state->src_ring)) {
ret = PTR_ERR(ce_state->src_ring);
ath10k_err("failed to allocate copy engine source ring %d: %d\n",
ce_id, ret);
ce_state->src_ring = NULL;
return ret;
}
}
if (attr->dest_nentries) {
ce_state->dest_ring = ath10k_ce_alloc_dest_ring(ar, ce_id,
attr);
if (IS_ERR(ce_state->dest_ring)) {
ret = PTR_ERR(ce_state->dest_ring);
ath10k_err("failed to allocate copy engine destination ring %d: %d\n",
ce_id, ret);
ce_state->dest_ring = NULL;
return ret;
}
}
return 0;
}
void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
if (ce_state->src_ring) { if (ce_state->src_ring) {
kfree(ce_state->src_ring->shadow_base_unaligned); kfree(ce_state->src_ring->shadow_base_unaligned);
......
...@@ -104,7 +104,8 @@ struct ath10k_ce_ring { ...@@ -104,7 +104,8 @@ struct ath10k_ce_ring {
void *shadow_base_unaligned; void *shadow_base_unaligned;
struct ce_desc *shadow_base; struct ce_desc *shadow_base;
void **per_transfer_context; /* keep last */
void *per_transfer_context[0];
}; };
struct ath10k_ce_pipe { struct ath10k_ce_pipe {
...@@ -210,10 +211,12 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, ...@@ -210,10 +211,12 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
/*==================CE Engine Initialization=======================*/ /*==================CE Engine Initialization=======================*/
/* Initialize an instance of a CE */ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar, const struct ce_attr *attr);
unsigned int ce_id, void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
const struct ce_attr *attr); int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
const struct ce_attr *attr);
void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
/*==================CE Engine Shutdown=======================*/ /*==================CE Engine Shutdown=======================*/
/* /*
...@@ -236,8 +239,6 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, ...@@ -236,8 +239,6 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
unsigned int *nbytesp, unsigned int *nbytesp,
unsigned int *transfer_idp); unsigned int *transfer_idp);
void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state);
/*==================CE Interrupt Handlers====================*/ /*==================CE Interrupt Handlers====================*/
void ath10k_ce_per_engine_service_any(struct ath10k *ar); void ath10k_ce_per_engine_service_any(struct ath10k *ar);
void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id); void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id);
......
...@@ -1258,18 +1258,10 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar) ...@@ -1258,18 +1258,10 @@ static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
static void ath10k_pci_ce_deinit(struct ath10k *ar) static void ath10k_pci_ce_deinit(struct ath10k *ar)
{ {
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); int i;
struct ath10k_pci_pipe *pipe_info;
int pipe_num;
for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { for (i = 0; i < CE_COUNT; i++)
pipe_info = &ar_pci->pipe_info[pipe_num]; ath10k_ce_deinit_pipe(ar, i);
if (pipe_info->ce_hdl) {
ath10k_ce_deinit(pipe_info->ce_hdl);
pipe_info->ce_hdl = NULL;
pipe_info->buf_sz = 0;
}
}
} }
static void ath10k_pci_hif_stop(struct ath10k *ar) static void ath10k_pci_hif_stop(struct ath10k *ar)
...@@ -1722,30 +1714,49 @@ static int ath10k_pci_init_config(struct ath10k *ar) ...@@ -1722,30 +1714,49 @@ static int ath10k_pci_init_config(struct ath10k *ar)
return 0; return 0;
} }
static int ath10k_pci_alloc_ce(struct ath10k *ar)
{
int i, ret;
for (i = 0; i < CE_COUNT; i++) {
ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
if (ret) {
ath10k_err("failed to allocate copy engine pipe %d: %d\n",
i, ret);
return ret;
}
}
return 0;
}
static void ath10k_pci_free_ce(struct ath10k *ar)
{
int i;
for (i = 0; i < CE_COUNT; i++)
ath10k_ce_free_pipe(ar, i);
}
static int ath10k_pci_ce_init(struct ath10k *ar) static int ath10k_pci_ce_init(struct ath10k *ar)
{ {
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe_info; struct ath10k_pci_pipe *pipe_info;
const struct ce_attr *attr; const struct ce_attr *attr;
int pipe_num; int pipe_num, ret;
for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) { for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
pipe_info = &ar_pci->pipe_info[pipe_num]; pipe_info = &ar_pci->pipe_info[pipe_num];
pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
pipe_info->pipe_num = pipe_num; pipe_info->pipe_num = pipe_num;
pipe_info->hif_ce_state = ar; pipe_info->hif_ce_state = ar;
attr = &host_ce_config_wlan[pipe_num]; attr = &host_ce_config_wlan[pipe_num];
pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr); ret = ath10k_ce_init_pipe(ar, pipe_num, attr);
if (pipe_info->ce_hdl == NULL) { if (ret) {
ath10k_err("failed to initialize CE for pipe: %d\n", ath10k_err("failed to initialize copy engine pipe %d: %d\n",
pipe_num); pipe_num, ret);
return ret;
/* It is safe to call it here. It checks if ce_hdl is
* valid for each pipe */
ath10k_pci_ce_deinit(ar);
return -1;
} }
if (pipe_num == CE_COUNT - 1) { if (pipe_num == CE_COUNT - 1) {
...@@ -2648,16 +2659,24 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ...@@ -2648,16 +2659,24 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
ath10k_do_pci_sleep(ar); ath10k_do_pci_sleep(ar);
ret = ath10k_pci_alloc_ce(ar);
if (ret) {
ath10k_err("failed to allocate copy engine pipes: %d\n", ret);
goto err_iomap;
}
ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem); ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
ret = ath10k_core_register(ar, chip_id); ret = ath10k_core_register(ar, chip_id);
if (ret) { if (ret) {
ath10k_err("failed to register driver core: %d\n", ret); ath10k_err("failed to register driver core: %d\n", ret);
goto err_iomap; goto err_free_ce;
} }
return 0; return 0;
err_free_ce:
ath10k_pci_free_ce(ar);
err_iomap: err_iomap:
pci_iounmap(pdev, mem); pci_iounmap(pdev, mem);
err_master: err_master:
...@@ -2693,6 +2712,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev) ...@@ -2693,6 +2712,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
tasklet_kill(&ar_pci->msi_fw_err); tasklet_kill(&ar_pci->msi_fw_err);
ath10k_core_unregister(ar); ath10k_core_unregister(ar);
ath10k_pci_free_ce(ar);
pci_iounmap(pdev, ar_pci->mem); pci_iounmap(pdev, ar_pci->mem);
pci_release_region(pdev, BAR_NUM); pci_release_region(pdev, BAR_NUM);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment