Commit 1e6a1cd8 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-fixes'

Yuval Mintz says:

====================
qed: Fixes series

This address several different issues in qed.
The more significant portions:

Patch #1 would cause timeout when qedr utilizes the highest
CIDs availble for it [or when future qede adapters would utilize
queues in some constellations].

Patch #4 fixes a leak of mapped addresses; When iommu is enabled,
offloaded storage protocols might eventually run out of resources
and fail to map additional buffers.

Patches #6,#7 were missing in the initial iSCSI infrastructure
submissions, and would hamper qedi's stability when it reaches
out-of-order scenarios.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a8aa3953 6b116b1d
......@@ -422,8 +422,9 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
u32 align = elems_per_page * DQ_RANGE_ALIGN;
p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page);
p_conn->cid_count = roundup(p_conn->cid_count, align);
}
}
......
......@@ -2389,9 +2389,8 @@ qed_chain_alloc_sanity_check(struct qed_dev *cdev,
* size/capacity fields are of a u32 type.
*/
if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
chain_size > 0x10000) ||
(cnt_type == QED_CHAIN_CNT_TYPE_U32 &&
chain_size > 0x100000000ULL)) {
chain_size > ((u32)U16_MAX + 1)) ||
(cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) {
DP_NOTICE(cdev,
"The actual chain size (0x%llx) is larger than the maximal possible value\n",
chain_size);
......
......@@ -190,6 +190,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
p_init->ooo_enable = p_params->ooo_enable;
p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
p_params->ll2_ooo_queue_id;
p_init->func_params.log_page_size = p_params->log_page_size;
val = p_params->num_tasks;
p_init->func_params.num_tasks = cpu_to_le16(val);
......@@ -786,6 +789,23 @@ static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn,
spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
}
void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn,
struct qed_iscsi_conn *p_conn)
{
qed_chain_free(p_hwfn->cdev, &p_conn->xhq);
qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(struct tcp_upload_params),
p_conn->tcp_upload_params_virt_addr,
p_conn->tcp_upload_params_phys_addr);
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
sizeof(struct scsi_terminate_extra_params),
p_conn->queue_cnts_virt_addr,
p_conn->queue_cnts_phys_addr);
kfree(p_conn);
}
struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_iscsi_info *p_iscsi_info;
......@@ -807,6 +827,17 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
void qed_iscsi_free(struct qed_hwfn *p_hwfn,
struct qed_iscsi_info *p_iscsi_info)
{
struct qed_iscsi_conn *p_conn = NULL;
while (!list_empty(&p_hwfn->p_iscsi_info->free_list)) {
p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list,
struct qed_iscsi_conn, list_entry);
if (p_conn) {
list_del(&p_conn->list_entry);
qed_iscsi_free_connection(p_hwfn, p_conn);
}
}
kfree(p_iscsi_info);
}
......
......@@ -211,6 +211,8 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
/* If need to reuse or there's no replacement buffer, repost this */
if (rc)
goto out_post;
dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
cdev->ll2->rx_size, DMA_FROM_DEVICE);
skb = build_skb(buffer->data, 0);
if (!skb) {
......@@ -474,7 +476,7 @@ qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn,
union core_rx_cqe_union *p_cqe,
unsigned long lock_flags,
unsigned long *p_lock_flags,
bool b_last_cqe)
{
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
......@@ -495,10 +497,10 @@ static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
"Mismatch between active_descq and the LL2 Rx chain\n");
list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
spin_unlock_irqrestore(&p_rx->lock, lock_flags);
spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
spin_lock_irqsave(&p_rx->lock, lock_flags);
spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
return 0;
}
......@@ -538,7 +540,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
break;
case CORE_RX_CQE_TYPE_REGULAR:
rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
cqe, flags, b_last_cqe);
cqe, &flags,
b_last_cqe);
break;
default:
rc = -EIO;
......
......@@ -159,6 +159,8 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
if (!p_ooo_info->ooo_history.p_cqes)
goto no_history_mem;
p_ooo_info->ooo_history.num_of_cqes = QED_MAX_NUM_OOO_HISTORY_ENTRIES;
return p_ooo_info;
no_history_mem:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment